This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [PING^2][PATCH] Reformat malloc to gnu style.


On Tue, Dec 31, 2013 at 10:57:14PM +1000, Allan McRae wrote:
> On 31/12/13 01:20, OndÅej BÃlka wrote:
> > 
> > Hi,
> > 
> > I would like to get this one before freeze.
> >
> 
> Looks fine to me.  However, it no longer applies due to changes in
> malloc/malloc.c.
> 
Sorry that I did not responded yesterday, as reformatings are
independent to each other most of it could be applied by

patch -p1 < reformat_malloc

There is only one reject which is below. I could apply rest of patch now
and then reformat conflict. Should it be now or at start of 2.20?


--- malloc/malloc.c
+++ malloc/malloc.c
@@ -3845,376 +3956,401 @@
       goto errout;
     }
 
-  check_inuse_chunk(av, p);
+  check_inuse_chunk (av, p);
 
   /*
-    If eligible, place chunk on a fastbin so it can be found
-    and used quickly in malloc.
-  */
+     If eligible, place chunk on a fastbin so it can be found
+     and used quickly in malloc.
+   */
 
-  if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
+  if ((unsigned long) (size) <= (unsigned long) (get_max_fast ())
 
 #if TRIM_FASTBINS
       /*
-	If TRIM_FASTBINS set, don't place chunks
-	bordering top into fastbins
-      */
-      && (chunk_at_offset(p, size) != av->top)
+         If TRIM_FASTBINS set, don't place chunks
+         bordering top into fastbins
+       */
+      && (chunk_at_offset (p, size) != av->top)
 #endif
-      ) {
-
-    if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
-	|| __builtin_expect (chunksize (chunk_at_offset (p, size))
-			     >= av->system_mem, 0))
-      {
-	/* We might not have a lock at this point and concurrent modifications
-	   of system_mem might have let to a false positive.  Redo the test
-	   after getting the lock.  */
-	if (have_lock
-	    || ({ assert (locked == 0);
-		  mutex_lock(&av->mutex);
-		  locked = 1;
-		  chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
-		    || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
-	      }))
-	  {
-	    errstr = "free(): invalid next size (fast)";
-	    goto errout;
-	  }
-	if (! have_lock)
-	  {
-	    (void)mutex_unlock(&av->mutex);
-	    locked = 0;
-	  }
-      }
-
-    free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
-
-    set_fastchunks(av);
-    unsigned int idx = fastbin_index(size);
-    fb = &fastbin (av, idx);
-
-    mchunkptr fd;
-    mchunkptr old = *fb;
-    unsigned int old_idx = ~0u;
-    do
-      {
-	/* Another simple check: make sure the top of the bin is not the
-	   record we are going to add (i.e., double free).  */
-	if (__builtin_expect (old == p, 0))
-	  {
-	    errstr = "double free or corruption (fasttop)";
-	    goto errout;
-	  }
-	if (old != NULL)
-	  old_idx = fastbin_index(chunksize(old));
-	p->fd = fd = old;
-      }
-    while ((old = catomic_compare_and_exchange_val_rel (fb, p, fd)) != fd);
-
-    if (fd != NULL && __builtin_expect (old_idx != idx, 0))
-      {
-	errstr = "invalid fastbin entry (free)";
-	goto errout;
-      }
-  }
+      )
+    {
+      if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
+          || __builtin_expect (chunksize (chunk_at_offset (p, size))
+                               >= av->system_mem, 0))
+        {
+          /* We might not have a lock at this point and concurrent modifications
+             of system_mem might have let to a false positive.  Redo the test
+             after getting the lock.  */
+          if (have_lock
+              || ({ assert (locked == 0);
+                    mutex_lock (&av->mutex);
+                    locked = 1;
+                    chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
+                    || chunksize (chunk_at_offset (p, size)) >= av->system_mem; }))
+            {
+              errstr = "free(): invalid next size (fast)";
+              goto errout;
+            }
+          if (!have_lock)
+            {
+              (void) mutex_unlock (&av->mutex);
+              locked = 0;
+            }
+        }
+
+      free_perturb (chunk2mem (p), size - 2 * SIZE_SZ);
+
+      set_fastchunks (av);
+      unsigned int idx = fastbin_index (size);
+      fb = &fastbin (av, idx);
+
+      mchunkptr fd;
+      mchunkptr old = *fb;
+      unsigned int old_idx = ~0u;
+      do
+        {
+          /* Another simple check: make sure the top of the bin is not the
+             record we are going to add (i.e., double free).  */
+          if (__builtin_expect (old == p, 0))
+            {
+              errstr = "double free or corruption (fasttop)";
+              goto errout;
+            }
+          if (old != NULL)
+            old_idx = fastbin_index (chunksize (old));
+          p->fd = fd = old;
+        }
+      while ((old = catomic_compare_and_exchange_val_rel (fb, p, fd)) != fd);
+
+      if (fd != NULL && __builtin_expect (old_idx != idx, 0))
+        {
+          errstr = "invalid fastbin entry (free)";
+          goto errout;
+        }
+    }
 
   /*
-    Consolidate other non-mmapped chunks as they arrive.
-  */
+     Consolidate other non-mmapped chunks as they arrive.
+   */
 
-  else if (!chunk_is_mmapped(p)) {
-    if (! have_lock) {
+  else if (!chunk_is_mmapped (p))
+    {
+      if (!have_lock)
+        {
 #if THREAD_STATS
-      if(!mutex_trylock(&av->mutex))
-	++(av->stat_lock_direct);
-      else {
-	(void)mutex_lock(&av->mutex);
-	++(av->stat_lock_wait);
-      }
+          if (!mutex_trylock (&av->mutex))
+            ++(av->stat_lock_direct);
+          else
+            {
+              (void) mutex_lock (&av->mutex);
+              ++(av->stat_lock_wait);
+            }
 #else
-      (void)mutex_lock(&av->mutex);
+          (void) mutex_lock (&av->mutex);
 #endif
-      locked = 1;
-    }
-
-    nextchunk = chunk_at_offset(p, size);
-
-    /* Lightweight tests: check whether the block is already the
-       top block.  */
-    if (__builtin_expect (p == av->top, 0))
-      {
-	errstr = "double free or corruption (top)";
-	goto errout;
-      }
-    /* Or whether the next chunk is beyond the boundaries of the arena.  */
-    if (__builtin_expect (contiguous (av)
-			  && (char *) nextchunk
-			  >= ((char *) av->top + chunksize(av->top)), 0))
-      {
-	errstr = "double free or corruption (out)";
-	goto errout;
-      }
-    /* Or whether the block is actually not marked used.  */
-    if (__builtin_expect (!prev_inuse(nextchunk), 0))
-      {
-	errstr = "double free or corruption (!prev)";
-	goto errout;
-      }
-
-    nextsize = chunksize(nextchunk);
-    if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
-	|| __builtin_expect (nextsize >= av->system_mem, 0))
-      {
-	errstr = "free(): invalid next size (normal)";
-	goto errout;
-      }
-
-    free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
-
-    /* consolidate backward */
-    if (!prev_inuse(p)) {
-      prevsize = p->prev_size;
-      size += prevsize;
-      p = chunk_at_offset(p, -((long) prevsize));
-      unlink(p, bck, fwd);
-    }
-
-    if (nextchunk != av->top) {
-      /* get and clear inuse bit */
-      nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
-
-      /* consolidate forward */
-      if (!nextinuse) {
-	unlink(nextchunk, bck, fwd);
-	size += nextsize;
-      } else
-	clear_inuse_bit_at_offset(nextchunk, 0);
+          locked = 1;
+        }
+
+      nextchunk = chunk_at_offset (p, size);
+
+      /* Lightweight tests: check whether the block is already the
+         top block.  */
+      if (__builtin_expect (p == av->top, 0))
+        {
+          errstr = "double free or corruption (top)";
+          goto errout;
+        }
+      /* Or whether the next chunk is beyond the boundaries of the arena.  */
+      if (__builtin_expect (contiguous (av)
+                            && (char *) nextchunk
+                            >= ((char *) av->top + chunksize (av->top)), 0))
+        {
+          errstr = "double free or corruption (out)";
+          goto errout;
+        }
+      /* Or whether the block is actually not marked used.  */
+      if (__builtin_expect (!prev_inuse (nextchunk), 0))
+        {
+          errstr = "double free or corruption (!prev)";
+          goto errout;
+        }
+
+      nextsize = chunksize (nextchunk);
+      if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
+          || __builtin_expect (nextsize >= av->system_mem, 0))
+        {
+          errstr = "free(): invalid next size (normal)";
+          goto errout;
+        }
+
+      free_perturb (chunk2mem (p), size - 2 * SIZE_SZ);
+
+      /* consolidate backward */
+      if (!prev_inuse (p))
+        {
+          prevsize = p->prev_size;
+          size += prevsize;
+          p = chunk_at_offset (p, -((long) prevsize));
+          unlink (p, bck, fwd);
+        }
+
+      if (nextchunk != av->top)
+        {
+          /* get and clear inuse bit */
+          nextinuse = inuse_bit_at_offset (nextchunk, nextsize);
+
+          /* consolidate forward */
+          if (!nextinuse)
+            {
+              unlink (nextchunk, bck, fwd);
+              size += nextsize;
+            }
+          else
+            clear_inuse_bit_at_offset (nextchunk, 0);
+
+          /*
+             Place the chunk in unsorted chunk list. Chunks are
+             not placed into regular bins until after they have
+             been given one chance to be used in malloc.
+           */
+
+          bck = unsorted_chunks (av);
+          fwd = bck->fd;
+          if (__builtin_expect (fwd->bk != bck, 0))
+            {
+              errstr = "free(): corrupted unsorted chunks";
+              goto errout;
+            }
+          p->fd = fwd;
+          p->bk = bck;
+          if (!in_smallbin_range (size))
+            {
+              p->fd_nextsize = NULL;
+              p->bk_nextsize = NULL;
+            }
+          bck->fd = p;
+          fwd->bk = p;
+
+          set_head (p, size | PREV_INUSE);
+          set_foot (p, size);
+
+          check_free_chunk (av, p);
+        }
 
       /*
-	Place the chunk in unsorted chunk list. Chunks are
-	not placed into regular bins until after they have
-	been given one chance to be used in malloc.
-      */
-
-      bck = unsorted_chunks(av);
-      fwd = bck->fd;
-      if (__builtin_expect (fwd->bk != bck, 0))
-	{
-	  errstr = "free(): corrupted unsorted chunks";
-	  goto errout;
-	}
-      p->fd = fwd;
-      p->bk = bck;
-      if (!in_smallbin_range(size))
-	{
-	  p->fd_nextsize = NULL;
-	  p->bk_nextsize = NULL;
-	}
-      bck->fd = p;
-      fwd->bk = p;
-
-      set_head(p, size | PREV_INUSE);
-      set_foot(p, size);
-
-      check_free_chunk(av, p);
-    }
-
-    /*
-      If the chunk borders the current high end of memory,
-      consolidate into top
-    */
-
-    else {
-      size += nextsize;
-      set_head(p, size | PREV_INUSE);
-      av->top = p;
-      check_chunk(av, p);
-    }
-
-    /*
-      If freeing a large space, consolidate possibly-surrounding
-      chunks. Then, if the total unused topmost memory exceeds trim
-      threshold, ask malloc_trim to reduce top.
+         If the chunk borders the current high end of memory,
+         consolidate into top
+       */
 
-      Unless max_fast is 0, we don't know if there are fastbins
-      bordering top, so we cannot tell for sure whether threshold
-      has been reached unless fastbins are consolidated.  But we
-      don't want to consolidate on each free.  As a compromise,
-      consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
-      is reached.
-    */
-
-    if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
-      if (have_fastchunks(av))
-	malloc_consolidate(av);
+      else
+        {
+          size += nextsize;
+          set_head (p, size | PREV_INUSE);
+          av->top = p;
+          check_chunk (av, p);
+        }
 
-      if (av == &main_arena) {
+      /*
+         If freeing a large space, consolidate possibly-surrounding
+         chunks. Then, if the total unused topmost memory exceeds trim
+         threshold, ask malloc_trim to reduce top.
+
+         Unless max_fast is 0, we don't know if there are fastbins
+         bordering top, so we cannot tell for sure whether threshold
+         has been reached unless fastbins are consolidated.  But we
+         don't want to consolidate on each free.  As a compromise,
+         consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
+         is reached.
+       */
+
+      if ((unsigned long) (size) >= FASTBIN_CONSOLIDATION_THRESHOLD)
+        {
+          if (have_fastchunks (av))
+            malloc_consolidate (av);
+
+          if (av == &main_arena)
+            {
 #ifndef MORECORE_CANNOT_TRIM
-	if ((unsigned long)(chunksize(av->top)) >=
-	    (unsigned long)(mp_.trim_threshold))
-	  systrim(mp_.top_pad, av);
+              if ((unsigned long) (chunksize (av->top)) >=
+                  (unsigned long) (mp_.trim_threshold))
+                systrim (mp_.top_pad, av);
 #endif
-      } else {
-	/* Always try heap_trim(), even if the top chunk is not
-	   large, because the corresponding heap might go away.  */
-	heap_info *heap = heap_for_ptr(top(av));
-
-	assert(heap->ar_ptr == av);
-	heap_trim(heap, mp_.top_pad);
-      }
-    }
-
-    if (! have_lock) {
-      assert (locked);
-      (void)mutex_unlock(&av->mutex);
+            }
+          else
+            {
+              /* Always try heap_trim(), even if the top chunk is not
+                 large, because the corresponding heap might go away.  */
+              heap_info *heap = heap_for_ptr (top (av));
+
+              assert (heap->ar_ptr == av);
+              heap_trim (heap, mp_.top_pad);
+            }
+        }
+
+      if (!have_lock)
+        {
+          assert (locked);
+          (void) mutex_unlock (&av->mutex);
+        }
     }
-  }
   /*
-    If the chunk was allocated via mmap, release via munmap().
-  */
+     If the chunk was allocated via mmap, release via munmap().
+   */
 
-  else {
-    munmap_chunk (p);
-  }
+  else
+    {
+      munmap_chunk (p);
+    }
 }
 
 /*
-  ------------------------- malloc_consolidate -------------------------
+   ------------------------- malloc_consolidate -------------------------
 
-  malloc_consolidate is a specialized version of free() that tears
-  down chunks held in fastbins.  Free itself cannot be used for this
-  purpose since, among other things, it might place chunks back onto
-  fastbins.  So, instead, we need to use a minor variant of the same
-  code.
+   malloc_consolidate is a specialized version of free() that tears
+   down chunks held in fastbins.  Free itself cannot be used for this
+   purpose since, among other things, it might place chunks back onto
+   fastbins.  So, instead, we need to use a minor variant of the same
+   code.
 
-  Also, because this routine needs to be called the first time through
-  malloc anyway, it turns out to be the perfect place to trigger
-  initialization code.
-*/
+   Also, because this routine needs to be called the first time through
+   malloc anyway, it turns out to be the perfect place to trigger
+   initialization code.
+ */
 
-static void malloc_consolidate(mstate av)
+static void
+malloc_consolidate (mstate av)
 {
-  mfastbinptr*    fb;                 /* current fastbin being consolidated */
-  mfastbinptr*    maxfb;              /* last fastbin (for loop control) */
-  mchunkptr       p;                  /* current chunk being consolidated */
-  mchunkptr       nextp;              /* next chunk to consolidate */
-  mchunkptr       unsorted_bin;       /* bin header */
-  mchunkptr       first_unsorted;     /* chunk to link to */
+  mfastbinptr *fb;                    /* current fastbin being consolidated */
+  mfastbinptr *maxfb;                 /* last fastbin (for loop control) */
+  mchunkptr p;                        /* current chunk being consolidated */
+  mchunkptr nextp;                    /* next chunk to consolidate */
+  mchunkptr unsorted_bin;             /* bin header */
+  mchunkptr first_unsorted;           /* chunk to link to */
 
   /* These have same use as in free() */
-  mchunkptr       nextchunk;
+  mchunkptr nextchunk;
   INTERNAL_SIZE_T size;
   INTERNAL_SIZE_T nextsize;
   INTERNAL_SIZE_T prevsize;
-  int             nextinuse;
-  mchunkptr       bck;
-  mchunkptr       fwd;
+  int nextinuse;
+  mchunkptr bck;
+  mchunkptr fwd;
 
   /*
-    If max_fast is 0, we know that av hasn't
-    yet been initialized, in which case do so below
-  */
-
-  if (get_max_fast () != 0) {
-    clear_fastchunks(av);
-
-    unsorted_bin = unsorted_chunks(av);
-
-    /*
-      Remove each chunk from fast bin and consolidate it, placing it
-      then in unsorted bin. Among other reasons for doing this,
-      placing in unsorted bin avoids needing to calculate actual bins
-      until malloc is sure that chunks aren't immediately going to be
-      reused anyway.
-    */
-
-    maxfb = &fastbin (av, NFASTBINS - 1);
-    fb = &fastbin (av, 0);
-    do {
-      p = atomic_exchange_acq (fb, 0);
-      if (p != 0) {
-	do {
-	  check_inuse_chunk(av, p);
-	  nextp = p->fd;
-
-	  /* Slightly streamlined version of consolidation code in free() */
-	  size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
-	  nextchunk = chunk_at_offset(p, size);
-	  nextsize = chunksize(nextchunk);
-
-	  if (!prev_inuse(p)) {
-	    prevsize = p->prev_size;
-	    size += prevsize;
-	    p = chunk_at_offset(p, -((long) prevsize));
-	    unlink(p, bck, fwd);
-	  }
-
-	  if (nextchunk != av->top) {
-	    nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
-
-	    if (!nextinuse) {
-	      size += nextsize;
-	      unlink(nextchunk, bck, fwd);
-	    } else
-	      clear_inuse_bit_at_offset(nextchunk, 0);
-
-	    first_unsorted = unsorted_bin->fd;
-	    unsorted_bin->fd = p;
-	    first_unsorted->bk = p;
-
-	    if (!in_smallbin_range (size)) {
-	      p->fd_nextsize = NULL;
-	      p->bk_nextsize = NULL;
-	    }
-
-	    set_head(p, size | PREV_INUSE);
-	    p->bk = unsorted_bin;
-	    p->fd = first_unsorted;
-	    set_foot(p, size);
-	  }
-
-	  else {
-	    size += nextsize;
-	    set_head(p, size | PREV_INUSE);
-	    av->top = p;
-	  }
-
-	} while ( (p = nextp) != 0);
+     If max_fast is 0, we know that av hasn't
+     yet been initialized, in which case do so below
+   */
 
-      }
-    } while (fb++ != maxfb);
-  }
-  else {
-    malloc_init_state(av);
-    check_malloc_state(av);
-  }
+  if (get_max_fast () != 0)
+    {
+      clear_fastchunks (av);
+
+      unsorted_bin = unsorted_chunks (av);
+
+      /*
+         Remove each chunk from fast bin and consolidate it, placing it
+         then in unsorted bin. Among other reasons for doing this,
+         placing in unsorted bin avoids needing to calculate actual bins
+         until malloc is sure that chunks aren't immediately going to be
+         reused anyway.
+       */
+
+      maxfb = &fastbin (av, NFASTBINS - 1);
+      fb = &fastbin (av, 0);
+      do
+        {
+          p = atomic_exchange_acq (fb, 0);
+          if (p != 0)
+            {
+              do
+                {
+                  check_inuse_chunk (av, p);
+                  nextp = p->fd;
+
+                  /* Slightly streamlined version of consolidation code in free() */
+                  size = p->size & ~(PREV_INUSE | NON_MAIN_ARENA);
+                  nextchunk = chunk_at_offset (p, size);
+                  nextsize = chunksize (nextchunk);
+
+                  if (!prev_inuse (p))
+                    {
+                      prevsize = p->prev_size;
+                      size += prevsize;
+                      p = chunk_at_offset (p, -((long) prevsize));
+                      unlink (p, bck, fwd);
+                    }
+
+                  if (nextchunk != av->top)
+                    {
+                      nextinuse = inuse_bit_at_offset (nextchunk, nextsize);
+
+                      if (!nextinuse)
+                        {
+                          size += nextsize;
+                          unlink (nextchunk, bck, fwd);
+                        }
+                      else
+                        clear_inuse_bit_at_offset (nextchunk, 0);
+
+                      first_unsorted = unsorted_bin->fd;
+                      unsorted_bin->fd = p;
+                      first_unsorted->bk = p;
+
+                      if (!in_smallbin_range (size))
+                        {
+                          p->fd_nextsize = NULL;
+                          p->bk_nextsize = NULL;
+                        }
+
+                      set_head (p, size | PREV_INUSE);
+                      p->bk = unsorted_bin;
+                      p->fd = first_unsorted;
+                      set_foot (p, size);
+                    }
+
+                  else
+                    {
+                      size += nextsize;
+                      set_head (p, size | PREV_INUSE);
+                      av->top = p;
+                    }
+                }
+              while ((p = nextp) != 0);
+            }
+        }
+      while (fb++ != maxfb);
+    }
+  else
+    {
+      malloc_init_state (av);
+      check_malloc_state (av);
+    }
 }
 
 /*
-  ------------------------------ realloc ------------------------------
-*/
+   ------------------------------ realloc ------------------------------
+ */
 
-void*
-_int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
-	     INTERNAL_SIZE_T nb)
+void *
+_int_realloc (mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
+              INTERNAL_SIZE_T nb)
 {
-  mchunkptr        newp;            /* chunk to return */
-  INTERNAL_SIZE_T  newsize;         /* its size */
-  void*          newmem;          /* corresponding user mem */
+  mchunkptr newp;                   /* chunk to return */
+  INTERNAL_SIZE_T newsize;          /* its size */
+  void *newmem;                   /* corresponding user mem */
 
-  mchunkptr        next;            /* next contiguous chunk after oldp */
+  mchunkptr next;                   /* next contiguous chunk after oldp */
 
-  mchunkptr        remainder;       /* extra space at end of newp */
-  unsigned long    remainder_size;  /* its size */
+  mchunkptr remainder;              /* extra space at end of newp */
+  unsigned long remainder_size;     /* its size */
 
-  mchunkptr        bck;             /* misc temp for linking */
-  mchunkptr        fwd;             /* misc temp for linking */
+  mchunkptr bck;                    /* misc temp for linking */
+  mchunkptr fwd;                    /* misc temp for linking */
 
-  unsigned long    copysize;        /* bytes to copy */
-  unsigned int     ncopies;         /* INTERNAL_SIZE_T words to copy */
-  INTERNAL_SIZE_T* s;               /* copy source */
-  INTERNAL_SIZE_T* d;               /* copy destination */
+  unsigned long copysize;           /* bytes to copy */
+  unsigned int ncopies;             /* INTERNAL_SIZE_T words to copy */
+  INTERNAL_SIZE_T *s;               /* copy source */
+  INTERNAL_SIZE_T *d;               /* copy destination */
 
   const char *errstr = NULL;
 


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]