This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[RFC] Do not realloc foreign arenas.


Hi,

We currently try to extend allocation in realloc even when old memory
belongs to different arena. These could cause contention and cause heap
to live longer due to these allocations.

If we simply do realloc by copy when old and new arenas differ we avoid
these issues. It looks likely that there will be more nontransfering
reallocs than transfering ones.

Also not having to worry about realloc synchronization would simplify
future optimizations.

Comments?

diff --git a/malloc/malloc.c b/malloc/malloc.c
index c9d67f3..d1db997 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -2860,7 +2860,7 @@ libc_hidden_def (__libc_free)
 void*
 __libc_realloc(void* oldmem, size_t bytes)
 {
-  mstate ar_ptr;
+  mstate ar_ptr, old_ptr;
   size_t    nb;      /* padded request size */
 
   void* newp;             /* chunk to return */
@@ -2913,32 +2913,27 @@ __libc_realloc(void* oldmem, size_t bytes)
     return newmem;
   }
 
-  ar_ptr = arena_for_chunk(oldp);
-#if THREAD_STATS
-  if(!mutex_trylock(&ar_ptr->mutex))
-    ++(ar_ptr->stat_lock_direct);
-  else {
-    (void)mutex_lock(&ar_ptr->mutex);
-    ++(ar_ptr->stat_lock_wait);
-  }
-#else
-  (void)mutex_lock(&ar_ptr->mutex);
-#endif
+  old_ptr = arena_for_chunk(oldp);
 
-#if !defined PER_THREAD
-  LIBC_PROBE (memory_arena_reuse_realloc, 1, ar_ptr);
-  /* As in malloc(), remember this arena for the next allocation. */
-  tsd_setspecific(arena_key, (void *)ar_ptr);
-#endif
+  arena_lookup(ar_ptr);
+  arena_lock(ar_ptr, bytes);
+  if(!ar_ptr)
+    return 0;
 
-  newp = _int_realloc(ar_ptr, oldp, oldsize, nb);
+  if (ar_ptr == old_ptr)
+    {
+      newp = _int_realloc(ar_ptr, oldp, oldsize, nb);
 
-  (void)mutex_unlock(&ar_ptr->mutex);
-  assert(!newp || chunk_is_mmapped(mem2chunk(newp)) ||
-	 ar_ptr == arena_for_chunk(mem2chunk(newp)));
+      (void)mutex_unlock(&ar_ptr->mutex);
+      assert(!newp || chunk_is_mmapped(mem2chunk(newp)) ||
+	     ar_ptr == arena_for_chunk(mem2chunk(newp)));
+    } else newp = NULL;
 
   if (newp == NULL)
     {
+      /* Note the extra SIZE_SZ overhead. */
+      if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
+
       /* Try harder to allocate memory in other arenas.  */
       LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
       newp = __libc_malloc(bytes);


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]