This is the mail archive of the libc-help@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH][RFC] Make malloc routines use mmap if heap is corrupt


Hi,

If the heap is found to be corrupt, the default action currently is to
print a backtrace, the memory map and then abort(). If the arena lock
has been held during this time, this can result in a deadlock since
__backtrace calls routines in the loader that may result in malloc
calls.

With the patch below, any future malloc and free will only act on
mmap'ed memory locations, thus preventing the malloc routines from
tripping over themselves. Even in a multi-threaded case, after a heap
corruption has been detected in some thread, other threads will also
end up using mmap till an abort() is actually called. This is not
necessarily a bad idea, since it prevents them from messing up an
already messed up heap and leaves a better possibility of figuring out
what went wrong from the resulting core dump.

I was going to implement this using malloc hooks, but I found in
recent commits that the hooks will be deprecated.

Also inline is a small program that corrupts the heap to demonstrate a
resulting deadlock.

Thanks,
Siddhesh

reproducer.c:

#include <string.h>
#include <unistd.h>
#include <mcheck.h>
#include <malloc.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/shm.h>
#include <sys/mman.h>

int main(int argc, char *argv[])
{
	char *r=0;
	int i,j,ret;
	unsigned int *p,*q,*s;
	unsigned int *addr;

	p = (unsigned int *)malloc(100);
	memset(p,1,100);
	q = (unsigned int *)malloc(120);
	memset(q,2,100);

	free(q);
	memset(p,5,120);
	q = (unsigned int *)malloc(120);
	free(q);
	memset(q,6,132);

	return 0;
}


The patch:

---
 malloc/malloc.c |   68 +++++++++++++++++++++++++++++++++++++++++++++++++++---
 1 files changed, 64 insertions(+), 4 deletions(-)

diff --git a/malloc/malloc.c b/malloc/malloc.c
index 864c7d9..834d2e7 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1974,6 +1974,14 @@ void weak_variable (*__after_morecore_hook) (void) = NULL;
 
 static int check_action = DEFAULT_CHECK_ACTION;
 
+/* Set if a memory corruption is detected and if we're about to abort().
+   That way, the heap is no longer touched by the alloc routines and we
+   also avoid any deadlocks that may result from the loader or other
+   threads trying to allocate memory in the same heap. This is only set if
+   we're trying to print a backtrace and a memory map information before
+   we abort. */
+static int heap_is_corrupt = 0;
+
 
 /* ------------------ Testing support ----------------------------------*/
 
@@ -2370,6 +2378,8 @@ static void* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
   size_t          pagemask  = GLRO(dl_pagesize) - 1;
   bool            tried_mmap = false;
 
+  if (__builtin_expect(heap_is_corrupt == 1, 0))
+    goto try_mmap;
 
   /*
     If have mmap, and the request size meets the mmap threshold, and
@@ -2432,6 +2442,10 @@ static void* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
     }
   }
 
+  /* mmap failed and heap is corrupt. Nowhere to go but out */
+  if (__builtin_expect(heap_is_corrupt == 1, 0))
+    return 0;
+
   /* Record incoming configuration of top */
 
   old_top  = av->top;
@@ -2916,6 +2930,13 @@ public_mALLOc(size_t bytes)
   if (__builtin_expect (hook != NULL, 0))
     return (*hook)(bytes, RETURN_ADDRESS (0));
 
+  if (__builtin_expect (heap_is_corrupt == 1, 0))
+  {
+    INTERNAL_SIZE_T nb;
+    checked_request2size(bytes, nb);
+    return sYSMALLOc(nb, /* unused */ ar_ptr);
+  }
+
   arena_lookup(ar_ptr);
 
   arena_lock(ar_ptr, bytes);
@@ -2979,8 +3000,13 @@ public_fREe(void* mem)
     return;
   }
 
-  ar_ptr = arena_for_chunk(p);
-  _int_free(ar_ptr, p, 0);
+  if (__builtin_expect (heap_is_corrupt == 1, 0))
+    __libc_message (1, "*** free(): not freeing memory in corrupt heap ***");
+  else
+  {
+    ar_ptr = arena_for_chunk(p);
+    _int_free(ar_ptr, p, 0);
+  }
 }
 libc_hidden_def (public_fREe)
 
@@ -3040,6 +3066,12 @@ public_rEALLOc(void* oldmem, size_t bytes)
     return newmem;
   }
 
+  if (__builtin_expect (heap_is_corrupt == 1, 0))
+  {
+    __libc_message (1, "*** realloc(): not freeing memory in corrupt heap ***");
+    return sYSMALLOc(nb, /* unused */ ar_ptr);
+  }
+
   ar_ptr = arena_for_chunk(oldp);
 #if THREAD_STATS
   if(!mutex_trylock(&ar_ptr->mutex))
@@ -3096,6 +3128,9 @@ public_mEMALIGn(size_t alignment, size_t bytes)
   /* Otherwise, ensure that it is at least a minimum chunk size */
   if (alignment <  MINSIZE) alignment = MINSIZE;
 
+  if (__builtin_expect (heap_is_corrupt == 1, 0))
+    return _int_memalign(ar_ptr, alignment, bytes);
+
   arena_get(ar_ptr, bytes + alignment + MINSIZE);
   if(!ar_ptr)
     return 0;
@@ -3143,6 +3178,9 @@ public_vALLOc(size_t bytes)
   if (__builtin_expect (hook != NULL, 0))
     return (*hook)(pagesz, bytes, RETURN_ADDRESS (0));
 
+  if (__builtin_expect (heap_is_corrupt == 1, 0))
+    return _int_memalign(ar_ptr, pagesz, bytes);
+
   arena_get(ar_ptr, bytes + pagesz + MINSIZE);
   if(!ar_ptr)
     return 0;
@@ -3189,6 +3227,9 @@ public_pVALLOc(size_t bytes)
   if (__builtin_expect (hook != NULL, 0))
     return (*hook)(pagesz, rounded_bytes, RETURN_ADDRESS (0));
 
+  if (__builtin_expect (heap_is_corrupt == 1, 0))
+    return _int_memalign(ar_ptr, pagesz, rounded_bytes);
+
   arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE);
   p = _int_pvalloc(ar_ptr, bytes);
   (void)mutex_unlock(&ar_ptr->mutex);
@@ -3249,6 +3290,14 @@ public_cALLOc(size_t n, size_t elem_size)
 
   sz = bytes;
 
+  if (__builtin_expect (heap_is_corrupt == 1, 0))
+  {
+    mem = sYSMALLOc(sz, /* unused */ av);
+    if (mem == 0)
+      return 0;
+    return memset(mem, 0, sz);
+  }
+
   arena_get(av, sz);
   if(!av)
     return 0;
@@ -4466,7 +4515,12 @@ _int_memalign(mstate av, size_t alignment, size_t bytes)
 
   /* If need less alignment than we give anyway, just relay to malloc */
 
-  if (alignment <= MALLOC_ALIGNMENT) return _int_malloc(av, bytes);
+  if (alignment <= MALLOC_ALIGNMENT) {
+    if (__builtin_expect(heap_is_corrupt == 1, 0))
+      return sYSMALLOc(bytes, av);
+    else
+      return _int_malloc(av, bytes);
+  }
 
   /* Otherwise, ensure that it is at least a minimum chunk size */
 
@@ -4489,7 +4543,10 @@ _int_memalign(mstate av, size_t alignment, size_t bytes)
 
   /* Call malloc with worst case padding to hit alignment. */
 
-  m  = (char*)(_int_malloc(av, nb + alignment + MINSIZE));
+  if (__builtin_expect(heap_is_corrupt == 1, 0))
+    m = (char *)sYSMALLOc(nb + alignment + MINSIZE, av);
+  else
+    m = (char *)_int_malloc(av, nb + alignment + MINSIZE);
 
   if (m == 0) return 0; /* propagate failure */
 
@@ -5004,6 +5061,9 @@ malloc_printerr(int action, const char *str, void *ptr)
       while (cp > buf)
 	*--cp = '0';
 
+      if (action & 2)
+        heap_is_corrupt = 1;
+
       __libc_message (action & 2,
 		      "*** glibc detected *** %s: %s: 0x%s ***\n",
 		      __libc_argv[0] ?: "<unknown>", str, cp);


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]