This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PING^2][PATCH] Reformat malloc to gnu style.


Hi,

I would like to get this one before freeze.

On Tue, Dec 17, 2013 at 10:54:18AM +0100, OndÅej BÃlka wrote:
> ping
> On Wed, Dec 11, 2013 at 11:27:01AM +0100, OndÅej BÃlka wrote:
> > As written before that a malloc code should be reformatted and
> > simplified I used formatter to mostly convert it to gnu style.
> > 
> > A patch for this is relatively big, we could just check that code is
> > equivalent and fix inconsistencies that are left as they are discovered.
> > 
> > OK to commit?
> > 
> > 	* malloc/arena.c (malloc_atfork, free_atfork, ptmalloc_lock_all,
> > 	ptmalloc_unlock_all, ptmalloc_unlock_all2, next_env_entry,
> > 	__failing_morecore, ptmalloc_init, dump_heap, new_heap, grow_heap,
> > 	heap_trim, _int_new_arena, get_free_list, reused_arena, arena_get2):
> > 	Convert to GNU style.
> > 	* malloc/hooks.c (memalign_hook_ini, __malloc_check_init,
> > 	mem2mem_check, mem2chunk_check, top_check, realloc_check,
> > 	memalign_check, __malloc_set_state): Likewise.
> > 	* malloc/mallocbug.c (main): Likewise.
> > 	* malloc/malloc.c (__malloc_assert, malloc_init_state, free_perturb,
> > 	do_check_malloced_chunk, do_check_malloc_state, sysmalloc, systrim,
> > 	mremap_chunk, __libc_malloc, __libc_free, __libc_realloc, _mid_memalign,
> > 	_int_malloc, malloc_consolidate, _int_realloc, _int_memalign, mtrim,
> > 	musable, __libc_mallopt, __posix_memalign, malloc_info): Likewise.
> > 	* malloc/malloc.h: Likewise.
> > 	* malloc/mcheck.c (checkhdr, unlink_blk, link_blk, freehook, mallochook,
> > 	memalignhook, reallochook, mabort): Likewise.
> > 	* malloc/mcheck.h: Likewise.
> > 	* malloc/memusage.c (update_data, me, malloc, realloc, calloc, free, mmap,
> > 	mmap64, mremap, munmap, dest): Likewise.
> > 	* malloc/memusagestat.c (main, parse_opt, more_help): Likewise.
> > 	* malloc/morecore.c (__default_morecore): Likewise.
> > 	* malloc/mtrace.c (tr_break, lock_and_info, mtrace): Likewise.
> > 	* malloc/obstack.c (_obstack_begin, _obstack_newchunk,
> > 	_obstack_allocated_p, obstack_free, _obstack_memory_used,
> > 	print_and_abort): Likewise.
> > 	* malloc/obstack.h: Likewise.
> > 	* malloc/set-freeres.c (__libc_freeres): Likewise.
> > 	* malloc/tst-mallocstate.c (main): Likewise.
> > 	* malloc/tst-mtrace.c (main): Likewise.
> > 	* malloc/tst-realloc.c (do_test): Likewise.
> > 
> > 
> > diff --git a/malloc/arena.c b/malloc/arena.c
> > index 9d49f93..fd41f48 100644
> > --- a/malloc/arena.c
> > +++ b/malloc/arena.c
> > @@ -21,12 +21,12 @@
> >  
> >  /* Compile-time constants.  */
> >  
> > -#define HEAP_MIN_SIZE (32*1024)
> > +#define HEAP_MIN_SIZE (32 * 1024)
> >  #ifndef HEAP_MAX_SIZE
> >  # ifdef DEFAULT_MMAP_THRESHOLD_MAX
> >  #  define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
> >  # else
> > -#  define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
> > +#  define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
> >  # endif
> >  #endif
> >  
> > @@ -39,7 +39,7 @@
> >  
> >  
> >  #ifndef THREAD_STATS
> > -#define THREAD_STATS 0
> > +# define THREAD_STATS 0
> >  #endif
> >  
> >  /* If THREAD_STATS is non-zero, some statistics on mutex locking are
> > @@ -53,12 +53,13 @@
> >     malloc_chunks.  It is allocated with mmap() and always starts at an
> >     address aligned to HEAP_MAX_SIZE.  */
> >  
> > -typedef struct _heap_info {
> > +typedef struct _heap_info
> > +{
> >    mstate ar_ptr; /* Arena for this heap. */
> >    struct _heap_info *prev; /* Previous heap. */
> >    size_t size;   /* Current size in bytes. */
> > -  size_t mprotect_size;	/* Size in bytes that has been mprotected
> > -			   PROT_READ|PROT_WRITE.  */
> > +  size_t mprotect_size; /* Size in bytes that has been mprotected
> > +                           PROT_READ|PROT_WRITE.  */
> >    /* Make sure the following data is properly aligned, particularly
> >       that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
> >       MALLOC_ALIGNMENT. */
> > @@ -68,8 +69,8 @@ typedef struct _heap_info {
> >  /* Get a compile-time error if the heap_info padding is not correct
> >     to make alignment work as expected in sYSMALLOc.  */
> >  extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
> > -					     + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
> > -					    ? -1 : 1];
> > +                                             + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
> > +                                            ? -1 : 1];
> >  
> >  /* Thread specific data */
> >  
> > @@ -80,9 +81,9 @@ static mstate free_list;
> >  
> >  #if THREAD_STATS
> >  static int stat_n_heaps;
> > -#define THREAD_STAT(x) x
> > +# define THREAD_STAT(x) x
> >  #else
> > -#define THREAD_STAT(x) do ; while(0)
> > +# define THREAD_STAT(x) do ; while (0)
> >  #endif
> >  
> >  /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
> > @@ -103,28 +104,28 @@ int __malloc_initialized = -1;
> >     in the new arena. */
> >  
> >  #define arena_get(ptr, size) do { \
> > -  arena_lookup(ptr); \
> > -  arena_lock(ptr, size); \
> > -} while(0)
> > +      arena_lookup (ptr);						      \
> > +      arena_lock (ptr, size);						      \
> > +  } while (0)
> >  
> >  #define arena_lookup(ptr) do { \
> > -  void *vptr = NULL; \
> > -  ptr = (mstate)tsd_getspecific(arena_key, vptr); \
> > -} while(0)
> > +      void *vptr = NULL;						      \
> > +      ptr = (mstate) tsd_getspecific (arena_key, vptr);			      \
> > +  } while (0)
> >  
> > -# define arena_lock(ptr, size) do { \
> > -  if(ptr) \
> > -    (void)mutex_lock(&ptr->mutex); \
> > -  else \
> > -    ptr = arena_get2(ptr, (size), NULL); \
> > -} while(0)
> > +#define arena_lock(ptr, size) do {					      \
> > +      if (ptr)								      \
> > +        (void) mutex_lock (&ptr->mutex);				      \
> > +      else								      \
> > +        ptr = arena_get2 (ptr, (size), NULL);				      \
> > +  } while (0)
> >  
> >  /* find the heap and corresponding arena for a given ptr */
> >  
> >  #define heap_for_ptr(ptr) \
> > - ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
> > +  ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
> >  #define arena_for_chunk(ptr) \
> > - (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
> > +  (chunk_non_main_arena (ptr) ? heap_for_ptr (ptr)->ar_ptr : &main_arena)
> >  
> >  
> >  /**************************************************************************/
> > @@ -133,51 +134,58 @@ int __malloc_initialized = -1;
> >  
> >  /* atfork support.  */
> >  
> > -static void *(*save_malloc_hook) (size_t __size, const void *);
> > +static void *(*save_malloc_hook)(size_t __size, const void *);
> >  static void (*save_free_hook) (void *__ptr, const void *);
> >  static void *save_arena;
> >  
> > -#ifdef ATFORK_MEM
> > +# ifdef ATFORK_MEM
> >  ATFORK_MEM;
> > -#endif
> > +# endif
> >  
> >  /* Magic value for the thread-specific arena pointer when
> >     malloc_atfork() is in use.  */
> >  
> > -#define ATFORK_ARENA_PTR ((void*)-1)
> > +# define ATFORK_ARENA_PTR ((void *) -1)
> >  
> >  /* The following hooks are used while the `atfork' handling mechanism
> >     is active. */
> >  
> > -static void*
> > -malloc_atfork(size_t sz, const void *caller)
> > +static void *
> > +malloc_atfork (size_t sz, const void *caller)
> >  {
> >    void *vptr = NULL;
> >    void *victim;
> >  
> > -  tsd_getspecific(arena_key, vptr);
> > -  if(vptr == ATFORK_ARENA_PTR) {
> > -    /* We are the only thread that may allocate at all.  */
> > -    if(save_malloc_hook != malloc_check) {
> > -      return _int_malloc(&main_arena, sz);
> > -    } else {
> > -      if(top_check()<0)
> > -	return 0;
> > -      victim = _int_malloc(&main_arena, sz+1);
> > -      return mem2mem_check(victim, sz);
> > +  tsd_getspecific (arena_key, vptr);
> > +  if (vptr == ATFORK_ARENA_PTR)
> > +    {
> > +      /* We are the only thread that may allocate at all.  */
> > +      if (save_malloc_hook != malloc_check)
> > +        {
> > +          return _int_malloc (&main_arena, sz);
> > +        }
> > +      else
> > +        {
> > +          if (top_check () < 0)
> > +            return 0;
> > +
> > +          victim = _int_malloc (&main_arena, sz + 1);
> > +          return mem2mem_check (victim, sz);
> > +        }
> > +    }
> > +  else
> > +    {
> > +      /* Suspend the thread until the `atfork' handlers have completed.
> > +         By that time, the hooks will have been reset as well, so that
> > +         mALLOc() can be used again. */
> > +      (void) mutex_lock (&list_lock);
> > +      (void) mutex_unlock (&list_lock);
> > +      return __libc_malloc (sz);
> >      }
> > -  } else {
> > -    /* Suspend the thread until the `atfork' handlers have completed.
> > -       By that time, the hooks will have been reset as well, so that
> > -       mALLOc() can be used again. */
> > -    (void)mutex_lock(&list_lock);
> > -    (void)mutex_unlock(&list_lock);
> > -    return __libc_malloc(sz);
> > -  }
> >  }
> >  
> >  static void
> > -free_atfork(void* mem, const void *caller)
> > +free_atfork (void *mem, const void *caller)
> >  {
> >    void *vptr = NULL;
> >    mstate ar_ptr;
> > @@ -186,17 +194,17 @@ free_atfork(void* mem, const void *caller)
> >    if (mem == 0)                              /* free(0) has no effect */
> >      return;
> >  
> > -  p = mem2chunk(mem);         /* do not bother to replicate free_check here */
> > +  p = mem2chunk (mem);         /* do not bother to replicate free_check here */
> >  
> > -  if (chunk_is_mmapped(p))                       /* release mmapped memory. */
> > -  {
> > -    munmap_chunk(p);
> > -    return;
> > -  }
> > +  if (chunk_is_mmapped (p))                       /* release mmapped memory. */
> > +    {
> > +      munmap_chunk (p);
> > +      return;
> > +    }
> >  
> > -  ar_ptr = arena_for_chunk(p);
> > -  tsd_getspecific(arena_key, vptr);
> > -  _int_free(ar_ptr, p, vptr == ATFORK_ARENA_PTR);
> > +  ar_ptr = arena_for_chunk (p);
> > +  tsd_getspecific (arena_key, vptr);
> > +  _int_free (ar_ptr, p, vptr == ATFORK_ARENA_PTR);
> >  }
> >  
> >  
> > @@ -214,33 +222,36 @@ ptmalloc_lock_all (void)
> >  {
> >    mstate ar_ptr;
> >  
> > -  if(__malloc_initialized < 1)
> > +  if (__malloc_initialized < 1)
> >      return;
> > -  if (mutex_trylock(&list_lock))
> > +
> > +  if (mutex_trylock (&list_lock))
> >      {
> >        void *my_arena;
> > -      tsd_getspecific(arena_key, my_arena);
> > +      tsd_getspecific (arena_key, my_arena);
> >        if (my_arena == ATFORK_ARENA_PTR)
> > -	/* This is the same thread which already locks the global list.
> > -	   Just bump the counter.  */
> > -	goto out;
> > +        /* This is the same thread which already locks the global list.
> > +           Just bump the counter.  */
> > +        goto out;
> >  
> >        /* This thread has to wait its turn.  */
> > -      (void)mutex_lock(&list_lock);
> > +      (void) mutex_lock (&list_lock);
> > +    }
> > +  for (ar_ptr = &main_arena;; )
> > +    {
> > +      (void) mutex_lock (&ar_ptr->mutex);
> > +      ar_ptr = ar_ptr->next;
> > +      if (ar_ptr == &main_arena)
> > +        break;
> >      }
> > -  for(ar_ptr = &main_arena;;) {
> > -    (void)mutex_lock(&ar_ptr->mutex);
> > -    ar_ptr = ar_ptr->next;
> > -    if(ar_ptr == &main_arena) break;
> > -  }
> >    save_malloc_hook = __malloc_hook;
> >    save_free_hook = __free_hook;
> >    __malloc_hook = malloc_atfork;
> >    __free_hook = free_atfork;
> >    /* Only the current thread may perform malloc/free calls now. */
> > -  tsd_getspecific(arena_key, save_arena);
> > -  tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
> > - out:
> > +  tsd_getspecific (arena_key, save_arena);
> > +  tsd_setspecific (arena_key, ATFORK_ARENA_PTR);
> > +out:
> >    ++atfork_recursive_cntr;
> >  }
> >  
> > @@ -249,19 +260,23 @@ ptmalloc_unlock_all (void)
> >  {
> >    mstate ar_ptr;
> >  
> > -  if(__malloc_initialized < 1)
> > +  if (__malloc_initialized < 1)
> >      return;
> > +
> >    if (--atfork_recursive_cntr != 0)
> >      return;
> > -  tsd_setspecific(arena_key, save_arena);
> > +
> > +  tsd_setspecific (arena_key, save_arena);
> >    __malloc_hook = save_malloc_hook;
> >    __free_hook = save_free_hook;
> > -  for(ar_ptr = &main_arena;;) {
> > -    (void)mutex_unlock(&ar_ptr->mutex);
> > -    ar_ptr = ar_ptr->next;
> > -    if(ar_ptr == &main_arena) break;
> > -  }
> > -  (void)mutex_unlock(&list_lock);
> > +  for (ar_ptr = &main_arena;; )
> > +    {
> > +      (void) mutex_unlock (&ar_ptr->mutex);
> > +      ar_ptr = ar_ptr->next;
> > +      if (ar_ptr == &main_arena)
> > +        break;
> > +    }
> > +  (void) mutex_unlock (&list_lock);
> >  }
> >  
> >  # ifdef __linux__
> > @@ -276,31 +291,33 @@ ptmalloc_unlock_all2 (void)
> >  {
> >    mstate ar_ptr;
> >  
> > -  if(__malloc_initialized < 1)
> > +  if (__malloc_initialized < 1)
> >      return;
> > -  tsd_setspecific(arena_key, save_arena);
> > +
> > +  tsd_setspecific (arena_key, save_arena);
> >    __malloc_hook = save_malloc_hook;
> >    __free_hook = save_free_hook;
> >    free_list = NULL;
> > -  for(ar_ptr = &main_arena;;) {
> > -    mutex_init(&ar_ptr->mutex);
> > -    if (ar_ptr != save_arena) {
> > -      ar_ptr->next_free = free_list;
> > -      free_list = ar_ptr;
> > +  for (ar_ptr = &main_arena;; )
> > +    {
> > +      mutex_init (&ar_ptr->mutex);
> > +      if (ar_ptr != save_arena)
> > +        {
> > +          ar_ptr->next_free = free_list;
> > +          free_list = ar_ptr;
> > +        }
> > +      ar_ptr = ar_ptr->next;
> > +      if (ar_ptr == &main_arena)
> > +        break;
> >      }
> > -    ar_ptr = ar_ptr->next;
> > -    if(ar_ptr == &main_arena) break;
> > -  }
> > -  mutex_init(&list_lock);
> > +  mutex_init (&list_lock);
> >    atfork_recursive_cntr = 0;
> >  }
> >  
> >  # else
> >  
> >  #  define ptmalloc_unlock_all2 ptmalloc_unlock_all
> > -
> >  # endif
> > -
> >  #endif  /* !NO_THREADS */
> >  
> >  /* Initialization routine. */
> > @@ -317,20 +334,20 @@ next_env_entry (char ***position)
> >    while (*current != NULL)
> >      {
> >        if (__builtin_expect ((*current)[0] == 'M', 0)
> > -	  && (*current)[1] == 'A'
> > -	  && (*current)[2] == 'L'
> > -	  && (*current)[3] == 'L'
> > -	  && (*current)[4] == 'O'
> > -	  && (*current)[5] == 'C'
> > -	  && (*current)[6] == '_')
> > -	{
> > -	  result = &(*current)[7];
> > +          && (*current)[1] == 'A'
> > +          && (*current)[2] == 'L'
> > +          && (*current)[3] == 'L'
> > +          && (*current)[4] == 'O'
> > +          && (*current)[5] == 'C'
> > +          && (*current)[6] == '_')
> > +        {
> > +          result = &(*current)[7];
> >  
> > -	  /* Save current position for next visit.  */
> > -	  *position = ++current;
> > +          /* Save current position for next visit.  */
> > +          *position = ++current;
> >  
> > -	  break;
> > -	}
> > +          break;
> > +        }
> >  
> >        ++current;
> >      }
> > @@ -353,7 +370,9 @@ libc_hidden_proto (_dl_open_hook);
> >  static void
> >  ptmalloc_init (void)
> >  {
> > -  if(__malloc_initialized >= 0) return;
> > +  if (__malloc_initialized >= 0)
> > +    return;
> > +
> >    __malloc_initialized = 0;
> >  
> >  #ifdef SHARED
> > @@ -364,13 +383,13 @@ ptmalloc_init (void)
> >  
> >    if (_dl_open_hook != NULL
> >        || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
> > -	  && l->l_ns != LM_ID_BASE))
> > +          && l->l_ns != LM_ID_BASE))
> >      __morecore = __failing_morecore;
> >  #endif
> >  
> > -  tsd_key_create(&arena_key, NULL);
> > -  tsd_setspecific(arena_key, (void *)&main_arena);
> > -  thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
> > +  tsd_key_create (&arena_key, NULL);
> > +  tsd_setspecific (arena_key, (void *) &main_arena);
> > +  thread_atfork (ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
> >    const char *s = NULL;
> >    if (__builtin_expect (_environ != NULL, 1))
> >      {
> > @@ -378,66 +397,67 @@ ptmalloc_init (void)
> >        char *envline;
> >  
> >        while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
> > -			       0))
> > -	{
> > -	  size_t len = strcspn (envline, "=");
> > -
> > -	  if (envline[len] != '=')
> > -	    /* This is a "MALLOC_" variable at the end of the string
> > -	       without a '=' character.  Ignore it since otherwise we
> > -	       will access invalid memory below.  */
> > -	    continue;
> > -
> > -	  switch (len)
> > -	    {
> > -	    case 6:
> > -	      if (memcmp (envline, "CHECK_", 6) == 0)
> > -		s = &envline[7];
> > -	      break;
> > -	    case 8:
> > -	      if (! __builtin_expect (__libc_enable_secure, 0))
> > -		{
> > -		  if (memcmp (envline, "TOP_PAD_", 8) == 0)
> > -		    __libc_mallopt(M_TOP_PAD, atoi(&envline[9]));
> > -		  else if (memcmp (envline, "PERTURB_", 8) == 0)
> > -		    __libc_mallopt(M_PERTURB, atoi(&envline[9]));
> > -		}
> > -	      break;
> > -	    case 9:
> > -	      if (! __builtin_expect (__libc_enable_secure, 0))
> > -		{
> > -		  if (memcmp (envline, "MMAP_MAX_", 9) == 0)
> > -		    __libc_mallopt(M_MMAP_MAX, atoi(&envline[10]));
> > -		  else if (memcmp (envline, "ARENA_MAX", 9) == 0)
> > -		    __libc_mallopt(M_ARENA_MAX, atoi(&envline[10]));
> > -		}
> > -	      break;
> > -	    case 10:
> > -	      if (! __builtin_expect (__libc_enable_secure, 0))
> > -		{
> > -		  if (memcmp (envline, "ARENA_TEST", 10) == 0)
> > -		    __libc_mallopt(M_ARENA_TEST, atoi(&envline[11]));
> > -		}
> > -	      break;
> > -	    case 15:
> > -	      if (! __builtin_expect (__libc_enable_secure, 0))
> > -		{
> > -		  if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
> > -		    __libc_mallopt(M_TRIM_THRESHOLD, atoi(&envline[16]));
> > -		  else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
> > -		    __libc_mallopt(M_MMAP_THRESHOLD, atoi(&envline[16]));
> > -		}
> > -	      break;
> > -	    default:
> > -	      break;
> > -	    }
> > -	}
> > +                               0))
> > +        {
> > +          size_t len = strcspn (envline, "=");
> > +
> > +          if (envline[len] != '=')
> > +            /* This is a "MALLOC_" variable at the end of the string
> > +               without a '=' character.  Ignore it since otherwise we
> > +               will access invalid memory below.  */
> > +            continue;
> > +
> > +          switch (len)
> > +            {
> > +            case 6:
> > +              if (memcmp (envline, "CHECK_", 6) == 0)
> > +                s = &envline[7];
> > +              break;
> > +            case 8:
> > +              if (!__builtin_expect (__libc_enable_secure, 0))
> > +                {
> > +                  if (memcmp (envline, "TOP_PAD_", 8) == 0)
> > +                    __libc_mallopt (M_TOP_PAD, atoi (&envline[9]));
> > +                  else if (memcmp (envline, "PERTURB_", 8) == 0)
> > +                    __libc_mallopt (M_PERTURB, atoi (&envline[9]));
> > +                }
> > +              break;
> > +            case 9:
> > +              if (!__builtin_expect (__libc_enable_secure, 0))
> > +                {
> > +                  if (memcmp (envline, "MMAP_MAX_", 9) == 0)
> > +                    __libc_mallopt (M_MMAP_MAX, atoi (&envline[10]));
> > +                  else if (memcmp (envline, "ARENA_MAX", 9) == 0)
> > +                    __libc_mallopt (M_ARENA_MAX, atoi (&envline[10]));
> > +                }
> > +              break;
> > +            case 10:
> > +              if (!__builtin_expect (__libc_enable_secure, 0))
> > +                {
> > +                  if (memcmp (envline, "ARENA_TEST", 10) == 0)
> > +                    __libc_mallopt (M_ARENA_TEST, atoi (&envline[11]));
> > +                }
> > +              break;
> > +            case 15:
> > +              if (!__builtin_expect (__libc_enable_secure, 0))
> > +                {
> > +                  if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
> > +                    __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16]));
> > +                  else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
> > +                    __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16]));
> > +                }
> > +              break;
> > +            default:
> > +              break;
> > +            }
> > +        }
> > +    }
> > +  if (s && s[0])
> > +    {
> > +      __libc_mallopt (M_CHECK_ACTION, (int) (s[0] - '0'));
> > +      if (check_action != 0)
> > +        __malloc_check_init ();
> >      }
> > -  if(s && s[0]) {
> > -    __libc_mallopt(M_CHECK_ACTION, (int)(s[0] - '0'));
> > -    if (check_action != 0)
> > -      __malloc_check_init();
> > -  }
> >    void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
> >    if (hook != NULL)
> >      (*hook)();
> > @@ -446,11 +466,11 @@ ptmalloc_init (void)
> >  
> >  /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
> >  #ifdef thread_atfork_static
> > -thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
> > -		     ptmalloc_unlock_all2)
> > +thread_atfork_static (ptmalloc_lock_all, ptmalloc_unlock_all,		      \
> > +                      ptmalloc_unlock_all2)
> >  #endif
> >  
> > -
> > +
> >  
> >  /* Managing heaps and arenas (for concurrent threads) */
> >  
> > @@ -459,30 +479,33 @@ thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
> >  /* Print the complete contents of a single heap to stderr. */
> >  
> >  static void
> > -dump_heap(heap_info *heap)
> > +dump_heap (heap_info *heap)
> >  {
> >    char *ptr;
> >    mchunkptr p;
> >  
> > -  fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
> > -  ptr = (heap->ar_ptr != (mstate)(heap+1)) ?
> > -    (char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state);
> > -  p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
> > -		  ~MALLOC_ALIGN_MASK);
> > -  for(;;) {
> > -    fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
> > -    if(p == top(heap->ar_ptr)) {
> > -      fprintf(stderr, " (top)\n");
> > -      break;
> > -    } else if(p->size == (0|PREV_INUSE)) {
> > -      fprintf(stderr, " (fence)\n");
> > -      break;
> > +  fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size);
> > +  ptr = (heap->ar_ptr != (mstate) (heap + 1)) ?
> > +        (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state);
> > +  p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) &
> > +                   ~MALLOC_ALIGN_MASK);
> > +  for (;; )
> > +    {
> > +      fprintf (stderr, "chunk %p size %10lx", p, (long) p->size);
> > +      if (p == top (heap->ar_ptr))
> > +        {
> > +          fprintf (stderr, " (top)\n");
> > +          break;
> > +        }
> > +      else if (p->size == (0 | PREV_INUSE))
> > +        {
> > +          fprintf (stderr, " (fence)\n");
> > +          break;
> > +        }
> > +      fprintf (stderr, "\n");
> > +      p = next_chunk (p);
> >      }
> > -    fprintf(stderr, "\n");
> > -    p = next_chunk(p);
> > -  }
> >  }
> > -
> >  #endif /* MALLOC_DEBUG > 1 */
> >  
> >  /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
> > @@ -500,18 +523,18 @@ static char *aligned_heap_area;
> >  
> >  static heap_info *
> >  internal_function
> > -new_heap(size_t size, size_t top_pad)
> > +new_heap (size_t size, size_t top_pad)
> >  {
> > -  size_t page_mask = GLRO(dl_pagesize) - 1;
> > +  size_t page_mask = GLRO (dl_pagesize) - 1;
> >    char *p1, *p2;
> >    unsigned long ul;
> >    heap_info *h;
> >  
> > -  if(size+top_pad < HEAP_MIN_SIZE)
> > +  if (size + top_pad < HEAP_MIN_SIZE)
> >      size = HEAP_MIN_SIZE;
> > -  else if(size+top_pad <= HEAP_MAX_SIZE)
> > +  else if (size + top_pad <= HEAP_MAX_SIZE)
> >      size += top_pad;
> > -  else if(size > HEAP_MAX_SIZE)
> > +  else if (size > HEAP_MAX_SIZE)
> >      return 0;
> >    else
> >      size = HEAP_MAX_SIZE;
> > @@ -522,46 +545,55 @@ new_heap(size_t size, size_t top_pad)
> >       mapping (on Linux, this is the case for all non-writable mappings
> >       anyway). */
> >    p2 = MAP_FAILED;
> > -  if(aligned_heap_area) {
> > -    p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
> > -		      MAP_NORESERVE);
> > -    aligned_heap_area = NULL;
> > -    if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) {
> > -      __munmap(p2, HEAP_MAX_SIZE);
> > -      p2 = MAP_FAILED;
> > +  if (aligned_heap_area)
> > +    {
> > +      p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
> > +                          MAP_NORESERVE);
> > +      aligned_heap_area = NULL;
> > +      if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)))
> > +        {
> > +          __munmap (p2, HEAP_MAX_SIZE);
> > +          p2 = MAP_FAILED;
> > +        }
> >      }
> > -  }
> > -  if(p2 == MAP_FAILED) {
> > -    p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_NORESERVE);
> > -    if(p1 != MAP_FAILED) {
> > -      p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1))
> > -		    & ~(HEAP_MAX_SIZE-1));
> > -      ul = p2 - p1;
> > -      if (ul)
> > -	__munmap(p1, ul);
> > +  if (p2 == MAP_FAILED)
> > +    {
> > +      p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE);
> > +      if (p1 != MAP_FAILED)
> > +        {
> > +          p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1))
> > +                         & ~(HEAP_MAX_SIZE - 1));
> > +          ul = p2 - p1;
> > +          if (ul)
> > +            __munmap (p1, ul);
> > +          else
> > +            aligned_heap_area = p2 + HEAP_MAX_SIZE;
> > +          __munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
> > +        }
> >        else
> > -	aligned_heap_area = p2 + HEAP_MAX_SIZE;
> > -      __munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
> > -    } else {
> > -      /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
> > -	 is already aligned. */
> > -      p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
> > -      if(p2 == MAP_FAILED)
> > -	return 0;
> > -      if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
> > -	__munmap(p2, HEAP_MAX_SIZE);
> > -	return 0;
> > -      }
> > +        {
> > +          /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
> > +             is already aligned. */
> > +          p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
> > +          if (p2 == MAP_FAILED)
> > +            return 0;
> > +
> > +          if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))
> > +            {
> > +              __munmap (p2, HEAP_MAX_SIZE);
> > +              return 0;
> > +            }
> > +        }
> >      }
> > -  }
> > -  if(__mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
> > -    __munmap(p2, HEAP_MAX_SIZE);
> > -    return 0;
> > -  }
> > -  h = (heap_info *)p2;
> > +  if (__mprotect (p2, size, PROT_READ | PROT_WRITE) != 0)
> > +    {
> > +      __munmap (p2, HEAP_MAX_SIZE);
> > +      return 0;
> > +    }
> > +  h = (heap_info *) p2;
> >    h->size = size;
> >    h->mprotect_size = size;
> > -  THREAD_STAT(stat_n_heaps++);
> > +  THREAD_STAT (stat_n_heaps++);
> >    LIBC_PROBE (memory_heap_new, 2, h, h->size);
> >    return h;
> >  }
> > @@ -570,22 +602,25 @@ new_heap(size_t size, size_t top_pad)
> >     multiple of the page size. */
> >  
> >  static int
> > -grow_heap(heap_info *h, long diff)
> > +grow_heap (heap_info *h, long diff)
> >  {
> > -  size_t page_mask = GLRO(dl_pagesize) - 1;
> > +  size_t page_mask = GLRO (dl_pagesize) - 1;
> >    long new_size;
> >  
> >    diff = (diff + page_mask) & ~page_mask;
> > -  new_size = (long)h->size + diff;
> > -  if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
> > +  new_size = (long) h->size + diff;
> > +  if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
> >      return -1;
> > -  if((unsigned long) new_size > h->mprotect_size) {
> > -    if (__mprotect((char *)h + h->mprotect_size,
> > -		   (unsigned long) new_size - h->mprotect_size,
> > -		   PROT_READ|PROT_WRITE) != 0)
> > -      return -2;
> > -    h->mprotect_size = new_size;
> > -  }
> > +
> > +  if ((unsigned long) new_size > h->mprotect_size)
> > +    {
> > +      if (__mprotect ((char *) h + h->mprotect_size,
> > +                      (unsigned long) new_size - h->mprotect_size,
> > +                      PROT_READ | PROT_WRITE) != 0)
> > +        return -2;
> > +
> > +      h->mprotect_size = new_size;
> > +    }
> >  
> >    h->size = new_size;
> >    LIBC_PROBE (memory_heap_more, 2, h, h->size);
> > @@ -595,24 +630,26 @@ grow_heap(heap_info *h, long diff)
> >  /* Shrink a heap.  */
> >  
> >  static int
> > -shrink_heap(heap_info *h, long diff)
> > +shrink_heap (heap_info *h, long diff)
> >  {
> >    long new_size;
> >  
> > -  new_size = (long)h->size - diff;
> > -  if(new_size < (long)sizeof(*h))
> > +  new_size = (long) h->size - diff;
> > +  if (new_size < (long) sizeof (*h))
> >      return -1;
> > +
> >    /* Try to re-map the extra heap space freshly to save memory, and make it
> >       inaccessible.  See malloc-sysdep.h to know when this is true.  */
> >    if (__builtin_expect (check_may_shrink_heap (), 0))
> >      {
> > -      if((char *)MMAP((char *)h + new_size, diff, PROT_NONE,
> > -		      MAP_FIXED) == (char *) MAP_FAILED)
> > -	return -2;
> > +      if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
> > +                         MAP_FIXED) == (char *) MAP_FAILED)
> > +        return -2;
> > +
> >        h->mprotect_size = new_size;
> >      }
> >    else
> > -    __madvise ((char *)h + new_size, diff, MADV_DONTNEED);
> > +    __madvise ((char *) h + new_size, diff, MADV_DONTNEED);
> >    /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
> >  
> >    h->size = new_size;
> > @@ -623,66 +660,70 @@ shrink_heap(heap_info *h, long diff)
> >  /* Delete a heap. */
> >  
> >  #define delete_heap(heap) \
> > -  do {								\
> > -    if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area)	\
> > -      aligned_heap_area = NULL;					\
> > -    __munmap((char*)(heap), HEAP_MAX_SIZE);			\
> > -  } while (0)
> > +  do {									      \
> > +      if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area)		      \
> > +        aligned_heap_area = NULL;					      \
> > +      __munmap ((char *) (heap), HEAP_MAX_SIZE);			      \
> > +    } while (0)
> >  
> >  static int
> >  internal_function
> > -heap_trim(heap_info *heap, size_t pad)
> > +heap_trim (heap_info *heap, size_t pad)
> >  {
> >    mstate ar_ptr = heap->ar_ptr;
> > -  unsigned long pagesz = GLRO(dl_pagesize);
> > -  mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
> > +  unsigned long pagesz = GLRO (dl_pagesize);
> > +  mchunkptr top_chunk = top (ar_ptr), p, bck, fwd;
> >    heap_info *prev_heap;
> >    long new_size, top_size, extra, prev_size, misalign;
> >  
> >    /* Can this heap go away completely? */
> > -  while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) {
> > -    prev_heap = heap->prev;
> > -    prev_size = prev_heap->size - (MINSIZE-2*SIZE_SZ);
> > -    p = chunk_at_offset(prev_heap, prev_size);
> > -    /* fencepost must be properly aligned.  */
> > -    misalign = ((long) p) & MALLOC_ALIGN_MASK;
> > -    p = chunk_at_offset(prev_heap, prev_size - misalign);
> > -    assert(p->size == (0|PREV_INUSE)); /* must be fencepost */
> > -    p = prev_chunk(p);
> > -    new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ) + misalign;
> > -    assert(new_size>0 && new_size<(long)(2*MINSIZE));
> > -    if(!prev_inuse(p))
> > -      new_size += p->prev_size;
> > -    assert(new_size>0 && new_size<HEAP_MAX_SIZE);
> > -    if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
> > -      break;
> > -    ar_ptr->system_mem -= heap->size;
> > -    arena_mem -= heap->size;
> > -    LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
> > -    delete_heap(heap);
> > -    heap = prev_heap;
> > -    if(!prev_inuse(p)) { /* consolidate backward */
> > -      p = prev_chunk(p);
> > -      unlink(p, bck, fwd);
> > +  while (top_chunk == chunk_at_offset (heap, sizeof (*heap)))
> > +    {
> > +      prev_heap = heap->prev;
> > +      prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ);
> > +      p = chunk_at_offset (prev_heap, prev_size);
> > +      /* fencepost must be properly aligned.  */
> > +      misalign = ((long) p) & MALLOC_ALIGN_MASK;
> > +      p = chunk_at_offset (prev_heap, prev_size - misalign);
> > +      assert (p->size == (0 | PREV_INUSE)); /* must be fencepost */
> > +      p = prev_chunk (p);
> > +      new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
> > +      assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
> > +      if (!prev_inuse (p))
> > +        new_size += p->prev_size;
> > +      assert (new_size > 0 && new_size < HEAP_MAX_SIZE);
> > +      if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
> > +        break;
> > +      ar_ptr->system_mem -= heap->size;
> > +      arena_mem -= heap->size;
> > +      LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
> > +      delete_heap (heap);
> > +      heap = prev_heap;
> > +      if (!prev_inuse (p)) /* consolidate backward */
> > +        {
> > +          p = prev_chunk (p);
> > +          unlink (p, bck, fwd);
> > +        }
> > +      assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0);
> > +      assert (((char *) p + new_size) == ((char *) heap + heap->size));
> > +      top (ar_ptr) = top_chunk = p;
> > +      set_head (top_chunk, new_size | PREV_INUSE);
> > +      /*check_chunk(ar_ptr, top_chunk);*/
> >      }
> > -    assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0);
> > -    assert( ((char*)p + new_size) == ((char*)heap + heap->size) );
> > -    top(ar_ptr) = top_chunk = p;
> > -    set_head(top_chunk, new_size | PREV_INUSE);
> > -    /*check_chunk(ar_ptr, top_chunk);*/
> > -  }
> > -  top_size = chunksize(top_chunk);
> > +  top_size = chunksize (top_chunk);
> >    extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1);
> > -  if(extra < (long)pagesz)
> > +  if (extra < (long) pagesz)
> >      return 0;
> > +
> >    /* Try to shrink. */
> > -  if(shrink_heap(heap, extra) != 0)
> > +  if (shrink_heap (heap, extra) != 0)
> >      return 0;
> > +
> >    ar_ptr->system_mem -= extra;
> >    arena_mem -= extra;
> >  
> >    /* Success. Adjust top accordingly. */
> > -  set_head(top_chunk, (top_size - extra) | PREV_INUSE);
> > +  set_head (top_chunk, (top_size - extra) | PREV_INUSE);
> >    /*check_chunk(ar_ptr, top_chunk);*/
> >    return 1;
> >  }
> > @@ -690,52 +731,53 @@ heap_trim(heap_info *heap, size_t pad)
> >  /* Create a new arena with initial size "size".  */
> >  
> >  static mstate
> > -_int_new_arena(size_t size)
> > +_int_new_arena (size_t size)
> >  {
> >    mstate a;
> >    heap_info *h;
> >    char *ptr;
> >    unsigned long misalign;
> >  
> > -  h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT),
> > -	       mp_.top_pad);
> > -  if(!h) {
> > -    /* Maybe size is too large to fit in a single heap.  So, just try
> > -       to create a minimally-sized arena and let _int_malloc() attempt
> > -       to deal with the large request via mmap_chunk().  */
> > -    h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad);
> > -    if(!h)
> > -      return 0;
> > -  }
> > -  a = h->ar_ptr = (mstate)(h+1);
> > -  malloc_init_state(a);
> > +  h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT),
> > +                mp_.top_pad);
> > +  if (!h)
> > +    {
> > +      /* Maybe size is too large to fit in a single heap.  So, just try
> > +         to create a minimally-sized arena and let _int_malloc() attempt
> > +         to deal with the large request via mmap_chunk().  */
> > +      h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
> > +      if (!h)
> > +        return 0;
> > +    }
> > +  a = h->ar_ptr = (mstate) (h + 1);
> > +  malloc_init_state (a);
> >    /*a->next = NULL;*/
> >    a->system_mem = a->max_system_mem = h->size;
> >    arena_mem += h->size;
> >  
> >    /* Set up the top chunk, with proper alignment. */
> > -  ptr = (char *)(a + 1);
> > -  misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
> > +  ptr = (char *) (a + 1);
> > +  misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK;
> >    if (misalign > 0)
> >      ptr += MALLOC_ALIGNMENT - misalign;
> > -  top(a) = (mchunkptr)ptr;
> > -  set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
> > +  top (a) = (mchunkptr) ptr;
> > +  set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);
> >  
> >    LIBC_PROBE (memory_arena_new, 2, a, size);
> > -  tsd_setspecific(arena_key, (void *)a);
> > -  mutex_init(&a->mutex);
> > -  (void)mutex_lock(&a->mutex);
> > +  tsd_setspecific (arena_key, (void *) a);
> > +  mutex_init (&a->mutex);
> > +  (void) mutex_lock (&a->mutex);
> >  
> > -  (void)mutex_lock(&list_lock);
> > +  (void) mutex_lock (&list_lock);
> >  
> >    /* Add the new arena to the global list.  */
> >    a->next = main_arena.next;
> >    atomic_write_barrier ();
> >    main_arena.next = a;
> >  
> > -  (void)mutex_unlock(&list_lock);
> > +  (void) mutex_unlock (&list_lock);
> >  
> > -  THREAD_STAT(++(a->stat_lock_loop));
> > +  THREAD_STAT (++(a->stat_lock_loop));
> >  
> >    return a;
> >  }
> > @@ -747,19 +789,19 @@ get_free_list (void)
> >    mstate result = free_list;
> >    if (result != NULL)
> >      {
> > -      (void)mutex_lock(&list_lock);
> > +      (void) mutex_lock (&list_lock);
> >        result = free_list;
> >        if (result != NULL)
> > -	free_list = result->next_free;
> > -      (void)mutex_unlock(&list_lock);
> > +        free_list = result->next_free;
> > +      (void) mutex_unlock (&list_lock);
> >  
> >        if (result != NULL)
> > -	{
> > -	  LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
> > -	  (void)mutex_lock(&result->mutex);
> > -	  tsd_setspecific(arena_key, (void *)result);
> > -	  THREAD_STAT(++(result->stat_lock_loop));
> > -	}
> > +        {
> > +          LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
> > +          (void) mutex_lock (&result->mutex);
> > +          tsd_setspecific (arena_key, (void *) result);
> > +          THREAD_STAT (++(result->stat_lock_loop));
> > +        }
> >      }
> >  
> >    return result;
> > @@ -779,8 +821,8 @@ reused_arena (mstate avoid_arena)
> >    result = next_to_use;
> >    do
> >      {
> > -      if (!mutex_trylock(&result->mutex))
> > -	goto out;
> > +      if (!mutex_trylock (&result->mutex))
> > +        goto out;
> >  
> >        result = result->next;
> >      }
> > @@ -793,12 +835,12 @@ reused_arena (mstate avoid_arena)
> >  
> >    /* No arena available.  Wait for the next in line.  */
> >    LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
> > -  (void)mutex_lock(&result->mutex);
> > +  (void) mutex_lock (&result->mutex);
> >  
> > - out:
> > +out:
> >    LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
> > -  tsd_setspecific(arena_key, (void *)result);
> > -  THREAD_STAT(++(result->stat_lock_loop));
> > +  tsd_setspecific (arena_key, (void *) result);
> > +  THREAD_STAT (++(result->stat_lock_loop));
> >    next_to_use = result->next;
> >  
> >    return result;
> > @@ -806,7 +848,7 @@ reused_arena (mstate avoid_arena)
> >  
> >  static mstate
> >  internal_function
> > -arena_get2(mstate a_tsd, size_t size, mstate avoid_arena)
> > +arena_get2 (mstate a_tsd, size_t size, mstate avoid_arena)
> >  {
> >    mstate a;
> >  
> > @@ -817,40 +859,40 @@ arena_get2(mstate a_tsd, size_t size, mstate avoid_arena)
> >      {
> >        /* Nothing immediately available, so generate a new arena.  */
> >        if (narenas_limit == 0)
> > -	{
> > -	  if (mp_.arena_max != 0)
> > -	    narenas_limit = mp_.arena_max;
> > -	  else if (narenas > mp_.arena_test)
> > -	    {
> > -	      int n  = __get_nprocs ();
> > -
> > -	      if (n >= 1)
> > -		narenas_limit = NARENAS_FROM_NCORES (n);
> > -	      else
> > -		/* We have no information about the system.  Assume two
> > -		   cores.  */
> > -		narenas_limit = NARENAS_FROM_NCORES (2);
> > -	    }
> > -	}
> > +        {
> > +          if (mp_.arena_max != 0)
> > +            narenas_limit = mp_.arena_max;
> > +          else if (narenas > mp_.arena_test)
> > +            {
> > +              int n = __get_nprocs ();
> > +
> > +              if (n >= 1)
> > +                narenas_limit = NARENAS_FROM_NCORES (n);
> > +              else
> > +                /* We have no information about the system.  Assume two
> > +                   cores.  */
> > +                narenas_limit = NARENAS_FROM_NCORES (2);
> > +            }
> > +        }
> >      repeat:;
> >        size_t n = narenas;
> >        /* NB: the following depends on the fact that (size_t)0 - 1 is a
> > -	 very large number and that the underflow is OK.  If arena_max
> > -	 is set the value of arena_test is irrelevant.  If arena_test
> > -	 is set but narenas is not yet larger or equal to arena_test
> > -	 narenas_limit is 0.  There is no possibility for narenas to
> > -	 be too big for the test to always fail since there is not
> > -	 enough address space to create that many arenas.  */
> > +         very large number and that the underflow is OK.  If arena_max
> > +         is set the value of arena_test is irrelevant.  If arena_test
> > +         is set but narenas is not yet larger or equal to arena_test
> > +         narenas_limit is 0.  There is no possibility for narenas to
> > +         be too big for the test to always fail since there is not
> > +         enough address space to create that many arenas.  */
> >        if (__builtin_expect (n <= narenas_limit - 1, 0))
> > -	{
> > -	  if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
> > -	    goto repeat;
> > -	  a = _int_new_arena (size);
> > -	  if (__builtin_expect (a == NULL, 0))
> > -	    catomic_decrement (&narenas);
> > -	}
> > +        {
> > +          if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
> > +            goto repeat;
> > +          a = _int_new_arena (size);
> > +          if (__builtin_expect (a == NULL, 0))
> > +            catomic_decrement (&narenas);
> > +        }
> >        else
> > -	a = reused_arena (avoid_arena);
> > +        a = reused_arena (avoid_arena);
> >      }
> >    return a;
> >  }
> > @@ -863,16 +905,19 @@ static mstate
> >  arena_get_retry (mstate ar_ptr, size_t bytes)
> >  {
> >    LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
> > -  if(ar_ptr != &main_arena) {
> > -    (void)mutex_unlock(&ar_ptr->mutex);
> > -    ar_ptr = &main_arena;
> > -    (void)mutex_lock(&ar_ptr->mutex);
> > -  } else {
> > -    /* Grab ar_ptr->next prior to releasing its lock.  */
> > -    mstate prev = ar_ptr->next ? ar_ptr : 0;
> > -    (void)mutex_unlock(&ar_ptr->mutex);
> > -    ar_ptr = arena_get2(prev, bytes, ar_ptr);
> > -  }
> > +  if (ar_ptr != &main_arena)
> > +    {
> > +      (void) mutex_unlock (&ar_ptr->mutex);
> > +      ar_ptr = &main_arena;
> > +      (void) mutex_lock (&ar_ptr->mutex);
> > +    }
> > +  else
> > +    {
> > +      /* Grab ar_ptr->next prior to releasing its lock.  */
> > +      mstate prev = ar_ptr->next ? ar_ptr : 0;
> > +      (void) mutex_unlock (&ar_ptr->mutex);
> > +      ar_ptr = arena_get2 (prev, bytes, ar_ptr);
> > +    }
> >  
> >    return ar_ptr;
> >  }
> > @@ -881,15 +926,15 @@ static void __attribute__ ((section ("__libc_thread_freeres_fn")))
> >  arena_thread_freeres (void)
> >  {
> >    void *vptr = NULL;
> > -  mstate a = tsd_getspecific(arena_key, vptr);
> > -  tsd_setspecific(arena_key, NULL);
> > +  mstate a = tsd_getspecific (arena_key, vptr);
> > +  tsd_setspecific (arena_key, NULL);
> >  
> >    if (a != NULL)
> >      {
> > -      (void)mutex_lock(&list_lock);
> > +      (void) mutex_lock (&list_lock);
> >        a->next_free = free_list;
> >        free_list = a;
> > -      (void)mutex_unlock(&list_lock);
> > +      (void) mutex_unlock (&list_lock);
> >      }
> >  }
> >  text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
> > diff --git a/malloc/hooks.c b/malloc/hooks.c
> > index 7010fe6..75a9b1b 100644
> > --- a/malloc/hooks.c
> > +++ b/malloc/hooks.c
> > @@ -24,29 +24,29 @@
> >  /* Hooks for debugging versions.  The initial hooks just call the
> >     initialization routine, then do the normal work. */
> >  
> > -static void*
> > -malloc_hook_ini(size_t sz, const void *caller)
> > +static void *
> > +malloc_hook_ini (size_t sz, const void *caller)
> >  {
> >    __malloc_hook = NULL;
> > -  ptmalloc_init();
> > -  return __libc_malloc(sz);
> > +  ptmalloc_init ();
> > +  return __libc_malloc (sz);
> >  }
> >  
> > -static void*
> > -realloc_hook_ini(void* ptr, size_t sz, const void *caller)
> > +static void *
> > +realloc_hook_ini (void *ptr, size_t sz, const void *caller)
> >  {
> >    __malloc_hook = NULL;
> >    __realloc_hook = NULL;
> > -  ptmalloc_init();
> > -  return __libc_realloc(ptr, sz);
> > +  ptmalloc_init ();
> > +  return __libc_realloc (ptr, sz);
> >  }
> >  
> > -static void*
> > -memalign_hook_ini(size_t alignment, size_t sz, const void *caller)
> > +static void *
> > +memalign_hook_ini (size_t alignment, size_t sz, const void *caller)
> >  {
> >    __memalign_hook = NULL;
> > -  ptmalloc_init();
> > -  return __libc_memalign(alignment, sz);
> > +  ptmalloc_init ();
> > +  return __libc_memalign (alignment, sz);
> >  }
> >  
> >  /* Whether we are using malloc checking.  */
> > @@ -71,10 +71,11 @@ static int disallow_malloc_check;
> >  void
> >  __malloc_check_init (void)
> >  {
> > -  if (disallow_malloc_check) {
> > -    disallow_malloc_check = 0;
> > -    return;
> > -  }
> > +  if (disallow_malloc_check)
> > +    {
> > +      disallow_malloc_check = 0;
> > +      return;
> > +    }
> >    using_malloc_checking = 1;
> >    __malloc_hook = malloc_check;
> >    __free_hook = free_check;
> > @@ -87,7 +88,7 @@ __malloc_check_init (void)
> >     overruns.  The goal here is to avoid obscure crashes due to invalid
> >     usage, unlike in the MALLOC_DEBUG code. */
> >  
> > -#define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
> > +#define MAGICBYTE(p) ((((size_t) p >> 3) ^ ((size_t) p >> 11)) & 0xFF)
> >  
> >  /* Visualize the chunk as being partitioned into blocks of 256 bytes from the
> >     highest address of the chunk, downwards.  The beginning of each block tells
> > @@ -96,53 +97,58 @@ __malloc_check_init (void)
> >     must reach it with this iteration, otherwise we have witnessed a memory
> >     corruption.  */
> >  static size_t
> > -malloc_check_get_size(mchunkptr p)
> > +malloc_check_get_size (mchunkptr p)
> >  {
> >    size_t size;
> >    unsigned char c;
> > -  unsigned char magic = MAGICBYTE(p);
> > +  unsigned char magic = MAGICBYTE (p);
> >  
> > -  assert(using_malloc_checking == 1);
> > +  assert (using_malloc_checking == 1);
> >  
> > -  for (size = chunksize(p) - 1 + (chunk_is_mmapped(p) ? 0 : SIZE_SZ);
> > -       (c = ((unsigned char*)p)[size]) != magic;
> > -       size -= c) {
> > -    if(c<=0 || size<(c+2*SIZE_SZ)) {
> > -      malloc_printerr(check_action, "malloc_check_get_size: memory corruption",
> > -		      chunk2mem(p));
> > -      return 0;
> > +  for (size = chunksize (p) - 1 + (chunk_is_mmapped (p) ? 0 : SIZE_SZ);
> > +       (c = ((unsigned char *) p)[size]) != magic;
> > +       size -= c)
> > +    {
> > +      if (c <= 0 || size < (c + 2 * SIZE_SZ))
> > +        {
> > +          malloc_printerr (check_action, "malloc_check_get_size: memory corruption",
> > +                           chunk2mem (p));
> > +          return 0;
> > +        }
> >      }
> > -  }
> >  
> >    /* chunk2mem size.  */
> > -  return size - 2*SIZE_SZ;
> > +  return size - 2 * SIZE_SZ;
> >  }
> >  
> >  /* Instrument a chunk with overrun detector byte(s) and convert it
> >     into a user pointer with requested size sz. */
> >  
> > -static void*
> > +static void *
> >  internal_function
> > -mem2mem_check(void *ptr, size_t sz)
> > +mem2mem_check (void *ptr, size_t sz)
> >  {
> >    mchunkptr p;
> > -  unsigned char* m_ptr = ptr;
> > +  unsigned char *m_ptr = ptr;
> >    size_t i;
> >  
> >    if (!ptr)
> >      return ptr;
> > -  p = mem2chunk(ptr);
> > -  for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
> > -      i > sz;
> > -      i -= 0xFF) {
> > -    if(i-sz < 0x100) {
> > -      m_ptr[i] = (unsigned char)(i-sz);
> > -      break;
> > +
> > +  p = mem2chunk (ptr);
> > +  for (i = chunksize (p) - (chunk_is_mmapped (p) ? 2 * SIZE_SZ + 1 : SIZE_SZ + 1);
> > +       i > sz;
> > +       i -= 0xFF)
> > +    {
> > +      if (i - sz < 0x100)
> > +        {
> > +          m_ptr[i] = (unsigned char) (i - sz);
> > +          break;
> > +        }
> > +      m_ptr[i] = 0xFF;
> >      }
> > -    m_ptr[i] = 0xFF;
> > -  }
> > -  m_ptr[sz] = MAGICBYTE(p);
> > -  return (void*)m_ptr;
> > +  m_ptr[sz] = MAGICBYTE (p);
> > +  return (void *) m_ptr;
> >  }
> >  
> >  /* Convert a pointer to be free()d or realloc()ed to a valid chunk
> > @@ -150,53 +156,64 @@ mem2mem_check(void *ptr, size_t sz)
> >  
> >  static mchunkptr
> >  internal_function
> > -mem2chunk_check(void* mem, unsigned char **magic_p)
> > +mem2chunk_check (void *mem, unsigned char **magic_p)
> >  {
> >    mchunkptr p;
> >    INTERNAL_SIZE_T sz, c;
> >    unsigned char magic;
> >  
> > -  if(!aligned_OK(mem)) return NULL;
> > -  p = mem2chunk(mem);
> > -  if (!chunk_is_mmapped(p)) {
> > -    /* Must be a chunk in conventional heap memory. */
> > -    int contig = contiguous(&main_arena);
> > -    sz = chunksize(p);
> > -    if((contig &&
> > -	((char*)p<mp_.sbrk_base ||
> > -	 ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) ||
> > -       sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
> > -       ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
> > -			    (contig && (char*)prev_chunk(p)<mp_.sbrk_base) ||
> > -			    next_chunk(prev_chunk(p))!=p) ))
> > -      return NULL;
> > -    magic = MAGICBYTE(p);
> > -    for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
> > -      if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
> > +  if (!aligned_OK (mem))
> > +    return NULL;
> > +
> > +  p = mem2chunk (mem);
> > +  if (!chunk_is_mmapped (p))
> > +    {
> > +      /* Must be a chunk in conventional heap memory. */
> > +      int contig = contiguous (&main_arena);
> > +      sz = chunksize (p);
> > +      if ((contig &&
> > +           ((char *) p < mp_.sbrk_base ||
> > +            ((char *) p + sz) >= (mp_.sbrk_base + main_arena.system_mem))) ||
> > +          sz < MINSIZE || sz & MALLOC_ALIGN_MASK || !inuse (p) ||
> > +          (!prev_inuse (p) && (p->prev_size & MALLOC_ALIGN_MASK ||
> > +                               (contig && (char *) prev_chunk (p) < mp_.sbrk_base) ||
> > +                               next_chunk (prev_chunk (p)) != p)))
> > +        return NULL;
> > +
> > +      magic = MAGICBYTE (p);
> > +      for (sz += SIZE_SZ - 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
> > +        {
> > +          if (c <= 0 || sz < (c + 2 * SIZE_SZ))
> > +            return NULL;
> > +        }
> >      }
> > -  } else {
> > -    unsigned long offset, page_mask = GLRO(dl_pagesize)-1;
> > -
> > -    /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
> > -       alignment relative to the beginning of a page.  Check this
> > -       first. */
> > -    offset = (unsigned long)mem & page_mask;
> > -    if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
> > -	offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
> > -	offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
> > -	offset<0x2000) ||
> > -       !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
> > -       ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
> > -       ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
> > -      return NULL;
> > -    magic = MAGICBYTE(p);
> > -    for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
> > -      if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
> > +  else
> > +    {
> > +      unsigned long offset, page_mask = GLRO (dl_pagesize) - 1;
> > +
> > +      /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
> > +         alignment relative to the beginning of a page.  Check this
> > +         first. */
> > +      offset = (unsigned long) mem & page_mask;
> > +      if ((offset != MALLOC_ALIGNMENT && offset != 0 && offset != 0x10 &&
> > +           offset != 0x20 && offset != 0x40 && offset != 0x80 && offset != 0x100 &&
> > +           offset != 0x200 && offset != 0x400 && offset != 0x800 && offset != 0x1000 &&
> > +           offset < 0x2000) ||
> > +          !chunk_is_mmapped (p) || (p->size & PREV_INUSE) ||
> > +          ((((unsigned long) p - p->prev_size) & page_mask) != 0) ||
> > +          ((sz = chunksize (p)), ((p->prev_size + sz) & page_mask) != 0))
> > +        return NULL;
> > +
> > +      magic = MAGICBYTE (p);
> > +      for (sz -= 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
> > +        {
> > +          if (c <= 0 || sz < (c + 2 * SIZE_SZ))
> > +            return NULL;
> > +        }
> >      }
> > -  }
> > -  ((unsigned char*)p)[sz] ^= 0xFF;
> > +  ((unsigned char *) p)[sz] ^= 0xFF;
> >    if (magic_p)
> > -    *magic_p = (unsigned char *)p + sz;
> > +    *magic_p = (unsigned char *) p + sz;
> >    return p;
> >  }
> >  
> > @@ -205,32 +222,32 @@ mem2chunk_check(void* mem, unsigned char **magic_p)
> >  
> >  static int
> >  internal_function
> > -top_check(void)
> > +top_check (void)
> >  {
> > -  mchunkptr t = top(&main_arena);
> > -  char* brk, * new_brk;
> > +  mchunkptr t = top (&main_arena);
> > +  char *brk, *new_brk;
> >    INTERNAL_SIZE_T front_misalign, sbrk_size;
> > -  unsigned long pagesz = GLRO(dl_pagesize);
> > -
> > -  if (t == initial_top(&main_arena) ||
> > -      (!chunk_is_mmapped(t) &&
> > -       chunksize(t)>=MINSIZE &&
> > -       prev_inuse(t) &&
> > -       (!contiguous(&main_arena) ||
> > -	(char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem)))
> > +  unsigned long pagesz = GLRO (dl_pagesize);
> > +
> > +  if (t == initial_top (&main_arena) ||
> > +      (!chunk_is_mmapped (t) &&
> > +       chunksize (t) >= MINSIZE &&
> > +       prev_inuse (t) &&
> > +       (!contiguous (&main_arena) ||
> > +        (char *) t + chunksize (t) == mp_.sbrk_base + main_arena.system_mem)))
> >      return 0;
> >  
> >    malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
> >  
> >    /* Try to set up a new top chunk. */
> > -  brk = MORECORE(0);
> > -  front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
> > +  brk = MORECORE (0);
> > +  front_misalign = (unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK;
> >    if (front_misalign > 0)
> >      front_misalign = MALLOC_ALIGNMENT - front_misalign;
> >    sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
> > -  sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
> > -  new_brk = (char*)(MORECORE (sbrk_size));
> > -  if (new_brk == (char*)(MORECORE_FAILURE))
> > +  sbrk_size += pagesz - ((unsigned long) (brk + sbrk_size) & (pagesz - 1));
> > +  new_brk = (char *) (MORECORE (sbrk_size));
> > +  if (new_brk == (char *) (MORECORE_FAILURE))
> >      {
> >        __set_errno (ENOMEM);
> >        return -1;
> > @@ -238,128 +255,148 @@ top_check(void)
> >    /* Call the `morecore' hook if necessary.  */
> >    void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
> >    if (hook)
> > -    (*hook) ();
> > +    (*hook)();
> >    main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
> >  
> > -  top(&main_arena) = (mchunkptr)(brk + front_misalign);
> > -  set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
> > +  top (&main_arena) = (mchunkptr) (brk + front_misalign);
> > +  set_head (top (&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
> >  
> >    return 0;
> >  }
> >  
> > -static void*
> > -malloc_check(size_t sz, const void *caller)
> > +static void *
> > +malloc_check (size_t sz, const void *caller)
> >  {
> >    void *victim;
> >  
> > -  if (sz+1 == 0) {
> > -    __set_errno (ENOMEM);
> > -    return NULL;
> > -  }
> > +  if (sz + 1 == 0)
> > +    {
> > +      __set_errno (ENOMEM);
> > +      return NULL;
> > +    }
> >  
> > -  (void)mutex_lock(&main_arena.mutex);
> > -  victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
> > -  (void)mutex_unlock(&main_arena.mutex);
> > -  return mem2mem_check(victim, sz);
> > +  (void) mutex_lock (&main_arena.mutex);
> > +  victim = (top_check () >= 0) ? _int_malloc (&main_arena, sz + 1) : NULL;
> > +  (void) mutex_unlock (&main_arena.mutex);
> > +  return mem2mem_check (victim, sz);
> >  }
> >  
> >  static void
> > -free_check(void* mem, const void *caller)
> > +free_check (void *mem, const void *caller)
> >  {
> >    mchunkptr p;
> >  
> > -  if(!mem) return;
> > -  (void)mutex_lock(&main_arena.mutex);
> > -  p = mem2chunk_check(mem, NULL);
> > -  if(!p) {
> > -    (void)mutex_unlock(&main_arena.mutex);
> > -
> > -    malloc_printerr(check_action, "free(): invalid pointer", mem);
> > +  if (!mem)
> >      return;
> > -  }
> > -  if (chunk_is_mmapped(p)) {
> > -    (void)mutex_unlock(&main_arena.mutex);
> > -    munmap_chunk(p);
> > -    return;
> > -  }
> > -  _int_free(&main_arena, p, 1);
> > -  (void)mutex_unlock(&main_arena.mutex);
> > +
> > +  (void) mutex_lock (&main_arena.mutex);
> > +  p = mem2chunk_check (mem, NULL);
> > +  if (!p)
> > +    {
> > +      (void) mutex_unlock (&main_arena.mutex);
> > +
> > +      malloc_printerr (check_action, "free(): invalid pointer", mem);
> > +      return;
> > +    }
> > +  if (chunk_is_mmapped (p))
> > +    {
> > +      (void) mutex_unlock (&main_arena.mutex);
> > +      munmap_chunk (p);
> > +      return;
> > +    }
> > +  _int_free (&main_arena, p, 1);
> > +  (void) mutex_unlock (&main_arena.mutex);
> >  }
> >  
> > -static void*
> > -realloc_check(void* oldmem, size_t bytes, const void *caller)
> > +static void *
> > +realloc_check (void *oldmem, size_t bytes, const void *caller)
> >  {
> >    INTERNAL_SIZE_T nb;
> > -  void* newmem = 0;
> > +  void *newmem = 0;
> >    unsigned char *magic_p;
> >  
> > -  if (bytes+1 == 0) {
> > -    __set_errno (ENOMEM);
> > -    return NULL;
> > -  }
> > -  if (oldmem == 0) return malloc_check(bytes, NULL);
> > -  if (bytes == 0) {
> > -    free_check (oldmem, NULL);
> > -    return NULL;
> > -  }
> > -  (void)mutex_lock(&main_arena.mutex);
> > -  const mchunkptr oldp = mem2chunk_check(oldmem, &magic_p);
> > -  (void)mutex_unlock(&main_arena.mutex);
> > -  if(!oldp) {
> > -    malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
> > -    return malloc_check(bytes, NULL);
> > -  }
> > -  const INTERNAL_SIZE_T oldsize = chunksize(oldp);
> > -
> > -  checked_request2size(bytes+1, nb);
> > -  (void)mutex_lock(&main_arena.mutex);
> > -
> > -  if (chunk_is_mmapped(oldp)) {
> > +  if (bytes + 1 == 0)
> > +    {
> > +      __set_errno (ENOMEM);
> > +      return NULL;
> > +    }
> > +  if (oldmem == 0)
> > +    return malloc_check (bytes, NULL);
> > +
> > +  if (bytes == 0)
> > +    {
> > +      free_check (oldmem, NULL);
> > +      return NULL;
> > +    }
> > +  (void) mutex_lock (&main_arena.mutex);
> > +  const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p);
> > +  (void) mutex_unlock (&main_arena.mutex);
> > +  if (!oldp)
> > +    {
> > +      malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
> > +      return malloc_check (bytes, NULL);
> > +    }
> > +  const INTERNAL_SIZE_T oldsize = chunksize (oldp);
> > +
> > +  checked_request2size (bytes + 1, nb);
> > +  (void) mutex_lock (&main_arena.mutex);
> > +
> > +  if (chunk_is_mmapped (oldp))
> > +    {
> >  #if HAVE_MREMAP
> > -    mchunkptr newp = mremap_chunk(oldp, nb);
> > -    if(newp)
> > -      newmem = chunk2mem(newp);
> > -    else
> > +      mchunkptr newp = mremap_chunk (oldp, nb);
> > +      if (newp)
> > +        newmem = chunk2mem (newp);
> > +      else
> >  #endif
> > -    {
> > -      /* Note the extra SIZE_SZ overhead. */
> > -      if(oldsize - SIZE_SZ >= nb)
> > -	newmem = oldmem; /* do nothing */
> > -      else {
> > -	/* Must alloc, copy, free. */
> > -	if (top_check() >= 0)
> > -	  newmem = _int_malloc(&main_arena, bytes+1);
> > -	if (newmem) {
> > -	  memcpy(newmem, oldmem, oldsize - 2*SIZE_SZ);
> > -	  munmap_chunk(oldp);
> > -	}
> > +      {
> > +        /* Note the extra SIZE_SZ overhead. */
> > +        if (oldsize - SIZE_SZ >= nb)
> > +          newmem = oldmem; /* do nothing */
> > +        else
> > +          {
> > +            /* Must alloc, copy, free. */
> > +            if (top_check () >= 0)
> > +              newmem = _int_malloc (&main_arena, bytes + 1);
> > +            if (newmem)
> > +              {
> > +                memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
> > +                munmap_chunk (oldp);
> > +              }
> > +          }
> >        }
> >      }
> > -  } else {
> > -    if (top_check() >= 0) {
> > -      INTERNAL_SIZE_T nb;
> > -      checked_request2size(bytes + 1, nb);
> > -      newmem = _int_realloc(&main_arena, oldp, oldsize, nb);
> > +  else
> > +    {
> > +      if (top_check () >= 0)
> > +        {
> > +          INTERNAL_SIZE_T nb;
> > +          checked_request2size (bytes + 1, nb);
> > +          newmem = _int_realloc (&main_arena, oldp, oldsize, nb);
> > +        }
> >      }
> > -  }
> >  
> >    /* mem2chunk_check changed the magic byte in the old chunk.
> >       If newmem is NULL, then the old chunk will still be used though,
> >       so we need to invert that change here.  */
> > -  if (newmem == NULL) *magic_p ^= 0xFF;
> > +  if (newmem == NULL)
> > +    *magic_p ^= 0xFF;
> >  
> > -  (void)mutex_unlock(&main_arena.mutex);
> > +  (void) mutex_unlock (&main_arena.mutex);
> >  
> > -  return mem2mem_check(newmem, bytes);
> > +  return mem2mem_check (newmem, bytes);
> >  }
> >  
> > -static void*
> > -memalign_check(size_t alignment, size_t bytes, const void *caller)
> > +static void *
> > +memalign_check (size_t alignment, size_t bytes, const void *caller)
> >  {
> > -  void* mem;
> > +  void *mem;
> > +
> > +  if (alignment <= MALLOC_ALIGNMENT)
> > +    return malloc_check (bytes, NULL);
> >  
> > -  if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
> > -  if (alignment <  MINSIZE) alignment = MINSIZE;
> > +  if (alignment < MINSIZE)
> > +    alignment = MINSIZE;
> >  
> >    /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
> >       power of 2 and will cause overflow in the check below.  */
> > @@ -377,17 +414,19 @@ memalign_check(size_t alignment, size_t bytes, const void *caller)
> >      }
> >  
> >    /* Make sure alignment is power of 2.  */
> > -  if (!powerof2(alignment)) {
> > -    size_t a = MALLOC_ALIGNMENT * 2;
> > -    while (a < alignment) a <<= 1;
> > -    alignment = a;
> > -  }
> > -
> > -  (void)mutex_lock(&main_arena.mutex);
> > -  mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
> > -    NULL;
> > -  (void)mutex_unlock(&main_arena.mutex);
> > -  return mem2mem_check(mem, bytes);
> > +  if (!powerof2 (alignment))
> > +    {
> > +      size_t a = MALLOC_ALIGNMENT * 2;
> > +      while (a < alignment)
> > +        a <<= 1;
> > +      alignment = a;
> > +    }
> > +
> > +  (void) mutex_lock (&main_arena.mutex);
> > +  mem = (top_check () >= 0) ? _int_memalign (&main_arena, alignment, bytes + 1) :
> > +        NULL;
> > +  (void) mutex_unlock (&main_arena.mutex);
> > +  return mem2mem_check (mem, bytes);
> >  }
> >  
> >  
> > @@ -408,59 +447,63 @@ memalign_check(size_t alignment, size_t bytes, const void *caller)
> >     then the hooks are reset to 0.  */
> >  
> >  #define MALLOC_STATE_MAGIC   0x444c4541l
> > -#define MALLOC_STATE_VERSION (0*0x100l + 4l) /* major*0x100 + minor */
> > -
> > -struct malloc_save_state {
> > -  long          magic;
> > -  long          version;
> > -  mbinptr       av[NBINS * 2 + 2];
> > -  char*         sbrk_base;
> > -  int           sbrked_mem_bytes;
> > +#define MALLOC_STATE_VERSION (0 * 0x100l + 4l) /* major*0x100 + minor */
> > +
> > +struct malloc_save_state
> > +{
> > +  long magic;
> > +  long version;
> > +  mbinptr av[NBINS * 2 + 2];
> > +  char *sbrk_base;
> > +  int sbrked_mem_bytes;
> >    unsigned long trim_threshold;
> >    unsigned long top_pad;
> > -  unsigned int  n_mmaps_max;
> > +  unsigned int n_mmaps_max;
> >    unsigned long mmap_threshold;
> > -  int           check_action;
> > +  int check_action;
> >    unsigned long max_sbrked_mem;
> >    unsigned long max_total_mem;
> > -  unsigned int  n_mmaps;
> > -  unsigned int  max_n_mmaps;
> > +  unsigned int n_mmaps;
> > +  unsigned int max_n_mmaps;
> >    unsigned long mmapped_mem;
> >    unsigned long max_mmapped_mem;
> > -  int           using_malloc_checking;
> > +  int using_malloc_checking;
> >    unsigned long max_fast;
> >    unsigned long arena_test;
> >    unsigned long arena_max;
> >    unsigned long narenas;
> >  };
> >  
> > -void*
> > -__malloc_get_state(void)
> > +void *
> > +__malloc_get_state (void)
> >  {
> > -  struct malloc_save_state* ms;
> > +  struct malloc_save_state *ms;
> >    int i;
> >    mbinptr b;
> >  
> > -  ms = (struct malloc_save_state*)__libc_malloc(sizeof(*ms));
> > +  ms = (struct malloc_save_state *) __libc_malloc (sizeof (*ms));
> >    if (!ms)
> >      return 0;
> > -  (void)mutex_lock(&main_arena.mutex);
> > -  malloc_consolidate(&main_arena);
> > +
> > +  (void) mutex_lock (&main_arena.mutex);
> > +  malloc_consolidate (&main_arena);
> >    ms->magic = MALLOC_STATE_MAGIC;
> >    ms->version = MALLOC_STATE_VERSION;
> >    ms->av[0] = 0;
> >    ms->av[1] = 0; /* used to be binblocks, now no longer used */
> > -  ms->av[2] = top(&main_arena);
> > +  ms->av[2] = top (&main_arena);
> >    ms->av[3] = 0; /* used to be undefined */
> > -  for(i=1; i<NBINS; i++) {
> > -    b = bin_at(&main_arena, i);
> > -    if(first(b) == b)
> > -      ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
> > -    else {
> > -      ms->av[2*i+2] = first(b);
> > -      ms->av[2*i+3] = last(b);
> > +  for (i = 1; i < NBINS; i++)
> > +    {
> > +      b = bin_at (&main_arena, i);
> > +      if (first (b) == b)
> > +        ms->av[2 * i + 2] = ms->av[2 * i + 3] = 0; /* empty bin */
> > +      else
> > +        {
> > +          ms->av[2 * i + 2] = first (b);
> > +          ms->av[2 * i + 3] = last (b);
> > +        }
> >      }
> > -  }
> >    ms->sbrk_base = mp_.sbrk_base;
> >    ms->sbrked_mem_bytes = main_arena.system_mem;
> >    ms->trim_threshold = mp_.trim_threshold;
> > @@ -475,78 +518,92 @@ __malloc_get_state(void)
> >    ms->mmapped_mem = mp_.mmapped_mem;
> >    ms->max_mmapped_mem = mp_.max_mmapped_mem;
> >    ms->using_malloc_checking = using_malloc_checking;
> > -  ms->max_fast = get_max_fast();
> > +  ms->max_fast = get_max_fast ();
> >    ms->arena_test = mp_.arena_test;
> >    ms->arena_max = mp_.arena_max;
> >    ms->narenas = narenas;
> > -  (void)mutex_unlock(&main_arena.mutex);
> > -  return (void*)ms;
> > +  (void) mutex_unlock (&main_arena.mutex);
> > +  return (void *) ms;
> >  }
> >  
> >  int
> > -__malloc_set_state(void* msptr)
> > +__malloc_set_state (void *msptr)
> >  {
> > -  struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
> > +  struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
> >    size_t i;
> >    mbinptr b;
> >  
> >    disallow_malloc_check = 1;
> > -  ptmalloc_init();
> > -  if(ms->magic != MALLOC_STATE_MAGIC) return -1;
> > +  ptmalloc_init ();
> > +  if (ms->magic != MALLOC_STATE_MAGIC)
> > +    return -1;
> > +
> >    /* Must fail if the major version is too high. */
> > -  if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
> > -  (void)mutex_lock(&main_arena.mutex);
> > +  if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
> > +    return -2;
> > +
> > +  (void) mutex_lock (&main_arena.mutex);
> >    /* There are no fastchunks.  */
> > -  clear_fastchunks(&main_arena);
> > +  clear_fastchunks (&main_arena);
> >    if (ms->version >= 4)
> > -    set_max_fast(ms->max_fast);
> > +    set_max_fast (ms->max_fast);
> >    else
> > -    set_max_fast(64);	/* 64 used to be the value we always used.  */
> > -  for (i=0; i<NFASTBINS; ++i)
> > +    set_max_fast (64);  /* 64 used to be the value we always used.  */
> > +  for (i = 0; i < NFASTBINS; ++i)
> >      fastbin (&main_arena, i) = 0;
> > -  for (i=0; i<BINMAPSIZE; ++i)
> > +  for (i = 0; i < BINMAPSIZE; ++i)
> >      main_arena.binmap[i] = 0;
> > -  top(&main_arena) = ms->av[2];
> > +  top (&main_arena) = ms->av[2];
> >    main_arena.last_remainder = 0;
> > -  for(i=1; i<NBINS; i++) {
> > -    b = bin_at(&main_arena, i);
> > -    if(ms->av[2*i+2] == 0) {
> > -      assert(ms->av[2*i+3] == 0);
> > -      first(b) = last(b) = b;
> > -    } else {
> > -      if(ms->version >= 3 &&
> > -	 (i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
> > -			   largebin_index(chunksize(ms->av[2*i+3]))==i))) {
> > -	first(b) = ms->av[2*i+2];
> > -	last(b) = ms->av[2*i+3];
> > -	/* Make sure the links to the bins within the heap are correct.  */
> > -	first(b)->bk = b;
> > -	last(b)->fd = b;
> > -	/* Set bit in binblocks.  */
> > -	mark_bin(&main_arena, i);
> > -      } else {
> > -	/* Oops, index computation from chunksize must have changed.
> > -	   Link the whole list into unsorted_chunks.  */
> > -	first(b) = last(b) = b;
> > -	b = unsorted_chunks(&main_arena);
> > -	ms->av[2*i+2]->bk = b;
> > -	ms->av[2*i+3]->fd = b->fd;
> > -	b->fd->bk = ms->av[2*i+3];
> > -	b->fd = ms->av[2*i+2];
> > -      }
> > +  for (i = 1; i < NBINS; i++)
> > +    {
> > +      b = bin_at (&main_arena, i);
> > +      if (ms->av[2 * i + 2] == 0)
> > +        {
> > +          assert (ms->av[2 * i + 3] == 0);
> > +          first (b) = last (b) = b;
> > +        }
> > +      else
> > +        {
> > +          if (ms->version >= 3 &&
> > +              (i < NSMALLBINS || (largebin_index (chunksize (ms->av[2 * i + 2])) == i &&
> > +                                  largebin_index (chunksize (ms->av[2 * i + 3])) == i)))
> > +            {
> > +              first (b) = ms->av[2 * i + 2];
> > +              last (b) = ms->av[2 * i + 3];
> > +              /* Make sure the links to the bins within the heap are correct.  */
> > +              first (b)->bk = b;
> > +              last (b)->fd = b;
> > +              /* Set bit in binblocks.  */
> > +              mark_bin (&main_arena, i);
> > +            }
> > +          else
> > +            {
> > +              /* Oops, index computation from chunksize must have changed.
> > +                 Link the whole list into unsorted_chunks.  */
> > +              first (b) = last (b) = b;
> > +              b = unsorted_chunks (&main_arena);
> > +              ms->av[2 * i + 2]->bk = b;
> > +              ms->av[2 * i + 3]->fd = b->fd;
> > +              b->fd->bk = ms->av[2 * i + 3];
> > +              b->fd = ms->av[2 * i + 2];
> > +            }
> > +        }
> >      }
> > -  }
> > -  if (ms->version < 3) {
> > -    /* Clear fd_nextsize and bk_nextsize fields.  */
> > -    b = unsorted_chunks(&main_arena)->fd;
> > -    while (b != unsorted_chunks(&main_arena)) {
> > -      if (!in_smallbin_range(chunksize(b))) {
> > -	b->fd_nextsize = NULL;
> > -	b->bk_nextsize = NULL;
> > -      }
> > -      b = b->fd;
> > +  if (ms->version < 3)
> > +    {
> > +      /* Clear fd_nextsize and bk_nextsize fields.  */
> > +      b = unsorted_chunks (&main_arena)->fd;
> > +      while (b != unsorted_chunks (&main_arena))
> > +        {
> > +          if (!in_smallbin_range (chunksize (b)))
> > +            {
> > +              b->fd_nextsize = NULL;
> > +              b->bk_nextsize = NULL;
> > +            }
> > +          b = b->fd;
> > +        }
> >      }
> > -  }
> >    mp_.sbrk_base = ms->sbrk_base;
> >    main_arena.system_mem = ms->sbrked_mem_bytes;
> >    mp_.trim_threshold = ms->trim_threshold;
> > @@ -560,28 +617,31 @@ __malloc_set_state(void* msptr)
> >    mp_.mmapped_mem = ms->mmapped_mem;
> >    mp_.max_mmapped_mem = ms->max_mmapped_mem;
> >    /* add version-dependent code here */
> > -  if (ms->version >= 1) {
> > -    /* Check whether it is safe to enable malloc checking, or whether
> > -       it is necessary to disable it.  */
> > -    if (ms->using_malloc_checking && !using_malloc_checking &&
> > -	!disallow_malloc_check)
> > -      __malloc_check_init ();
> > -    else if (!ms->using_malloc_checking && using_malloc_checking) {
> > -      __malloc_hook = NULL;
> > -      __free_hook = NULL;
> > -      __realloc_hook = NULL;
> > -      __memalign_hook = NULL;
> > -      using_malloc_checking = 0;
> > +  if (ms->version >= 1)
> > +    {
> > +      /* Check whether it is safe to enable malloc checking, or whether
> > +         it is necessary to disable it.  */
> > +      if (ms->using_malloc_checking && !using_malloc_checking &&
> > +          !disallow_malloc_check)
> > +        __malloc_check_init ();
> > +      else if (!ms->using_malloc_checking && using_malloc_checking)
> > +        {
> > +          __malloc_hook = NULL;
> > +          __free_hook = NULL;
> > +          __realloc_hook = NULL;
> > +          __memalign_hook = NULL;
> > +          using_malloc_checking = 0;
> > +        }
> >      }
> > -  }
> > -  if (ms->version >= 4) {
> > -    mp_.arena_test = ms->arena_test;
> > -    mp_.arena_max = ms->arena_max;
> > -    narenas = ms->narenas;
> > -  }
> > -  check_malloc_state(&main_arena);
> > -
> > -  (void)mutex_unlock(&main_arena.mutex);
> > +  if (ms->version >= 4)
> > +    {
> > +      mp_.arena_test = ms->arena_test;
> > +      mp_.arena_max = ms->arena_max;
> > +      narenas = ms->narenas;
> > +    }
> > +  check_malloc_state (&main_arena);
> > +
> > +  (void) mutex_unlock (&main_arena.mutex);
> >    return 0;
> >  }
> >  
> > diff --git a/malloc/malloc.c b/malloc/malloc.c
> > index b1668b5..575adf7 100644
> > --- a/malloc/malloc.c
> > +++ b/malloc/malloc.c
> > @@ -353,10 +353,10 @@ __malloc_assert (const char *assertion, const char *file, unsigned int line,
> >     malloc_set_state than will returning blocks not adequately aligned for
> >     long double objects under -mlong-double-128.  */
> >  
> > -#  define MALLOC_ALIGNMENT       (2 * SIZE_SZ < __alignof__ (long double) \
> > -				  ? __alignof__ (long double) : 2 * SIZE_SZ)
> > +#  define MALLOC_ALIGNMENT       (2 *SIZE_SZ < __alignof__ (long double)      \
> > +                                  ? __alignof__ (long double) : 2 *SIZE_SZ)
> >  # else
> > -#  define MALLOC_ALIGNMENT       (2 * SIZE_SZ)
> > +#  define MALLOC_ALIGNMENT       (2 *SIZE_SZ)
> >  # endif
> >  #endif
> >  
> > @@ -463,10 +463,10 @@ void *(*__morecore)(ptrdiff_t) = __default_morecore;
> >       some systems, if the application first decrements and then
> >       increments the break value, the contents of the reallocated space
> >       are unspecified.
> > -*/
> > + */
> >  
> >  #ifndef MORECORE_CLEARS
> > -#define MORECORE_CLEARS 1
> > +# define MORECORE_CLEARS 1
> >  #endif
> >  
> >  
> > @@ -1232,11 +1232,11 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> >     Check if a request is so large that it would wrap around zero when
> >     padded and aligned. To simplify some other code, the bound is made
> >     low enough so that adding MINSIZE will also not wrap around zero.
> > -*/
> > + */
> >  
> >  #define REQUEST_OUT_OF_RANGE(req)                                 \
> > -  ((unsigned long)(req) >=                                        \
> > -   (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
> > +  ((unsigned long) (req) >=						      \
> > +   (unsigned long) (INTERNAL_SIZE_T) (-2 * MINSIZE))
> >  
> >  /* pad request bytes into a usable size -- internal version */
> >  
> > @@ -1248,15 +1248,15 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> >  /*  Same, except also perform argument check */
> >  
> >  #define checked_request2size(req, sz)                             \
> > -  if (REQUEST_OUT_OF_RANGE(req)) {                                \
> > -    __set_errno (ENOMEM);					  \
> > -    return 0;                                                     \
> > -  }                                                               \
> > -  (sz) = request2size(req);
> > +  if (REQUEST_OUT_OF_RANGE (req)) {					      \
> > +      __set_errno (ENOMEM);						      \
> > +      return 0;								      \
> > +    }									      \
> > +  (sz) = request2size (req);
> >  
> >  /*
> > -  --------------- Physical chunk operations ---------------
> > -*/
> > +   --------------- Physical chunk operations ---------------
> > + */
> >  
> >  
> >  /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
> > @@ -1283,49 +1283,49 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> >  
> >  
> >  /*
> > -  Bits to mask off when extracting size
> > +   Bits to mask off when extracting size
> >  
> > -  Note: IS_MMAPPED is intentionally not masked off from size field in
> > -  macros for which mmapped chunks should never be seen. This should
> > -  cause helpful core dumps to occur if it is tried by accident by
> > -  people extending or adapting this malloc.
> > -*/
> > -#define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA)
> > +   Note: IS_MMAPPED is intentionally not masked off from size field in
> > +   macros for which mmapped chunks should never be seen. This should
> > +   cause helpful core dumps to occur if it is tried by accident by
> > +   people extending or adapting this malloc.
> > + */
> > +#define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
> >  
> >  /* Get size, ignoring use bits */
> >  #define chunksize(p)         ((p)->size & ~(SIZE_BITS))
> >  
> >  
> >  /* Ptr to next physical malloc_chunk. */
> > -#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))
> > +#define next_chunk(p) ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))
> >  
> >  /* Ptr to previous physical malloc_chunk */
> > -#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
> > +#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - ((p)->prev_size)))
> >  
> >  /* Treat space at ptr + offset as a chunk */
> > -#define chunk_at_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
> > +#define chunk_at_offset(p, s)  ((mchunkptr) (((char *) (p)) + (s)))
> >  
> >  /* extract p's inuse bit */
> > -#define inuse(p)\
> > -((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
> > +#define inuse(p)							      \
> > +  ((((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
> >  
> >  /* set/clear chunk as being inuse without otherwise disturbing */
> > -#define set_inuse(p)\
> > -((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
> > +#define set_inuse(p)							      \
> > +  ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
> >  
> > -#define clear_inuse(p)\
> > -((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
> > +#define clear_inuse(p)							      \
> > +  ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
> >  
> >  
> >  /* check/set/clear inuse bits in known places */
> > -#define inuse_bit_at_offset(p, s)\
> > - (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
> > +#define inuse_bit_at_offset(p, s)					      \
> > +  (((mchunkptr) (((char *) (p)) + (s)))->size & PREV_INUSE)
> >  
> > -#define set_inuse_bit_at_offset(p, s)\
> > - (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
> > +#define set_inuse_bit_at_offset(p, s)					      \
> > +  (((mchunkptr) (((char *) (p)) + (s)))->size |= PREV_INUSE)
> >  
> > -#define clear_inuse_bit_at_offset(p, s)\
> > - (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
> > +#define clear_inuse_bit_at_offset(p, s)					      \
> > +  (((mchunkptr) (((char *) (p)) + (s)))->size &= ~(PREV_INUSE))
> >  
> >  
> >  /* Set size at head, without disturbing its use bit */
> > @@ -1335,26 +1335,26 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> >  #define set_head(p, s)       ((p)->size = (s))
> >  
> >  /* Set size at footer (only when chunk is not in use) */
> > -#define set_foot(p, s)       (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
> > +#define set_foot(p, s)       (((mchunkptr) ((char *) (p) + (s)))->prev_size = (s))
> >  
> >  
> >  /*
> > -  -------------------- Internal data structures --------------------
> > +   -------------------- Internal data structures --------------------
> >  
> >     All internal state is held in an instance of malloc_state defined
> >     below. There are no other static variables, except in two optional
> >     cases:
> > -   * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
> > -   * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
> > + * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
> > + * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
> >       for mmap.
> >  
> >     Beware of lots of tricks that minimize the total bookkeeping space
> >     requirements. The result is a little over 1K bytes (for 4byte
> >     pointers and size_t.)
> > -*/
> > + */
> >  
> >  /*
> > -  Bins
> > +   Bins
> >  
> >      An array of bin headers for free chunks. Each bin is doubly
> >      linked.  The bins are approximately proportionally (log) spaced.
> > @@ -1387,17 +1387,17 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> >      But to conserve space and improve locality, we allocate
> >      only the fd/bk pointers of bins, and then use repositioning tricks
> >      to treat these as the fields of a malloc_chunk*.
> > -*/
> > + */
> >  
> > -typedef struct malloc_chunk* mbinptr;
> > +typedef struct malloc_chunk *mbinptr;
> >  
> >  /* addressing -- note that bin_at(0) does not exist */
> >  #define bin_at(m, i) \
> > -  (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2]))			      \
> > -	     - offsetof (struct malloc_chunk, fd))
> > +  (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2]))			      \
> > +             - offsetof (struct malloc_chunk, fd))
> >  
> >  /* analog of ++bin */
> > -#define next_bin(b)  ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
> > +#define next_bin(b)  ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
> >  
> >  /* Reminders about list directionality within bins */
> >  #define first(b)     ((b)->fd)
> > @@ -1405,36 +1405,36 @@ typedef struct malloc_chunk* mbinptr;
> >  
> >  /* Take a chunk off a bin list */
> >  #define unlink(P, BK, FD) {                                            \
> > -  FD = P->fd;                                                          \
> > -  BK = P->bk;                                                          \
> > -  if (__builtin_expect (FD->bk != P || BK->fd != P, 0))                \
> > -    malloc_printerr (check_action, "corrupted double-linked list", P); \
> > -  else {                                                               \
> > -    FD->bk = BK;                                                       \
> > -    BK->fd = FD;                                                       \
> > -    if (!in_smallbin_range (P->size)				       \
> > -	&& __builtin_expect (P->fd_nextsize != NULL, 0)) {	       \
> > -      assert (P->fd_nextsize->bk_nextsize == P);		       \
> > -      assert (P->bk_nextsize->fd_nextsize == P);		       \
> > -      if (FD->fd_nextsize == NULL) {				       \
> > -	if (P->fd_nextsize == P)				       \
> > -	  FD->fd_nextsize = FD->bk_nextsize = FD;		       \
> > -	else {							       \
> > -	  FD->fd_nextsize = P->fd_nextsize;			       \
> > -	  FD->bk_nextsize = P->bk_nextsize;			       \
> > -	  P->fd_nextsize->bk_nextsize = FD;			       \
> > -	  P->bk_nextsize->fd_nextsize = FD;			       \
> > -	}							       \
> > -      }	else {							       \
> > -	P->fd_nextsize->bk_nextsize = P->bk_nextsize;		       \
> > -	P->bk_nextsize->fd_nextsize = P->fd_nextsize;		       \
> > -      }								       \
> > -    }								       \
> > -  }                                                                    \
> > +    FD = P->fd;								      \
> > +    BK = P->bk;								      \
> > +    if (__builtin_expect (FD->bk != P || BK->fd != P, 0))		      \
> > +      malloc_printerr (check_action, "corrupted double-linked list", P);      \
> > +    else {								      \
> > +        FD->bk = BK;							      \
> > +        BK->fd = FD;							      \
> > +        if (!in_smallbin_range (P->size)				      \
> > +            && __builtin_expect (P->fd_nextsize != NULL, 0)) {		      \
> > +            assert (P->fd_nextsize->bk_nextsize == P);			      \
> > +            assert (P->bk_nextsize->fd_nextsize == P);			      \
> > +            if (FD->fd_nextsize == NULL) {				      \
> > +                if (P->fd_nextsize == P)				      \
> > +                  FD->fd_nextsize = FD->bk_nextsize = FD;		      \
> > +                else {							      \
> > +                    FD->fd_nextsize = P->fd_nextsize;			      \
> > +                    FD->bk_nextsize = P->bk_nextsize;			      \
> > +                    P->fd_nextsize->bk_nextsize = FD;			      \
> > +                    P->bk_nextsize->fd_nextsize = FD;			      \
> > +                  }							      \
> > +              } else {							      \
> > +                P->fd_nextsize->bk_nextsize = P->bk_nextsize;		      \
> > +                P->bk_nextsize->fd_nextsize = P->fd_nextsize;		      \
> > +              }								      \
> > +          }								      \
> > +      }									      \
> >  }
> >  
> >  /*
> > -  Indexing
> > +   Indexing
> >  
> >      Bins for sizes < 512 bytes contain chunks of all the same size, spaced
> >      8 bytes apart. Larger bins are approximately logarithmically spaced:
> > @@ -1455,7 +1455,7 @@ typedef struct malloc_chunk* mbinptr;
> >  
> >      Bin 0 does not exist.  Bin 1 is the unordered list; if that would be
> >      a valid chunk size the small bins are bumped up one.
> > -*/
> > + */
> >  
> >  #define NBINS             128
> >  #define NSMALLBINS         64
> > @@ -1464,38 +1464,38 @@ typedef struct malloc_chunk* mbinptr;
> >  #define MIN_LARGE_SIZE    ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
> >  
> >  #define in_smallbin_range(sz)  \
> > -  ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
> > +  ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
> >  
> >  #define smallbin_index(sz) \
> > -  ((SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned)(sz)) >> 3)) \
> > +  ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
> >     + SMALLBIN_CORRECTION)
> >  
> >  #define largebin_index_32(sz)                                                \
> > -(((((unsigned long)(sz)) >>  6) <= 38)?  56 + (((unsigned long)(sz)) >>  6): \
> > - ((((unsigned long)(sz)) >>  9) <= 20)?  91 + (((unsigned long)(sz)) >>  9): \
> > - ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
> > - ((((unsigned long)(sz)) >> 15) <=  4)? 119 + (((unsigned long)(sz)) >> 15): \
> > - ((((unsigned long)(sz)) >> 18) <=  2)? 124 + (((unsigned long)(sz)) >> 18): \
> > -					126)
> > +  (((((unsigned long) (sz)) >> 6) <= 38) ?  56 + (((unsigned long) (sz)) >> 6) :\
> > +   ((((unsigned long) (sz)) >> 9) <= 20) ?  91 + (((unsigned long) (sz)) >> 9) :\
> > +   ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
> > +   ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
> > +   ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
> > +   126)
> >  
> >  #define largebin_index_32_big(sz)                                            \
> > -(((((unsigned long)(sz)) >>  6) <= 45)?  49 + (((unsigned long)(sz)) >>  6): \
> > - ((((unsigned long)(sz)) >>  9) <= 20)?  91 + (((unsigned long)(sz)) >>  9): \
> > - ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
> > - ((((unsigned long)(sz)) >> 15) <=  4)? 119 + (((unsigned long)(sz)) >> 15): \
> > - ((((unsigned long)(sz)) >> 18) <=  2)? 124 + (((unsigned long)(sz)) >> 18): \
> > -                                        126)
> > +  (((((unsigned long) (sz)) >> 6) <= 45) ?  49 + (((unsigned long) (sz)) >> 6) :\
> > +   ((((unsigned long) (sz)) >> 9) <= 20) ?  91 + (((unsigned long) (sz)) >> 9) :\
> > +   ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
> > +   ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
> > +   ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
> > +   126)
> >  
> >  // XXX It remains to be seen whether it is good to keep the widths of
> >  // XXX the buckets the same or whether it should be scaled by a factor
> >  // XXX of two as well.
> >  #define largebin_index_64(sz)                                                \
> > -(((((unsigned long)(sz)) >>  6) <= 48)?  48 + (((unsigned long)(sz)) >>  6): \
> > - ((((unsigned long)(sz)) >>  9) <= 20)?  91 + (((unsigned long)(sz)) >>  9): \
> > - ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
> > - ((((unsigned long)(sz)) >> 15) <=  4)? 119 + (((unsigned long)(sz)) >> 15): \
> > - ((((unsigned long)(sz)) >> 18) <=  2)? 124 + (((unsigned long)(sz)) >> 18): \
> > -					126)
> > +  (((((unsigned long) (sz)) >> 6) <= 48) ?  48 + (((unsigned long) (sz)) >> 6) :\
> > +   ((((unsigned long) (sz)) >> 9) <= 20) ?  91 + (((unsigned long) (sz)) >> 9) :\
> > +   ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
> > +   ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
> > +   ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
> > +   126)
> >  
> >  #define largebin_index(sz) \
> >    (SIZE_SZ == 8 ? largebin_index_64 (sz)                                     \
> > @@ -1503,11 +1503,11 @@ typedef struct malloc_chunk* mbinptr;
> >     : largebin_index_32 (sz))
> >  
> >  #define bin_index(sz) \
> > - ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
> > +  ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
> >  
> >  
> >  /*
> > -  Unsorted chunks
> > +   Unsorted chunks
> >  
> >      All remainders from chunk splits, as well as all returned chunks,
> >      are first placed in the "unsorted" bin. They are then placed
> > @@ -1518,13 +1518,13 @@ typedef struct malloc_chunk* mbinptr;
> >  
> >      The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
> >      does not have to be taken into account in size comparisons.
> > -*/
> > + */
> >  
> >  /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
> > -#define unsorted_chunks(M)          (bin_at(M, 1))
> > +#define unsorted_chunks(M)          (bin_at (M, 1))
> >  
> >  /*
> > -  Top
> > +   Top
> >  
> >      The top-most available chunk (i.e., the one bordering the end of
> >      available memory) is treated specially. It is never included in
> > @@ -1539,13 +1539,13 @@ typedef struct malloc_chunk* mbinptr;
> >      interval between initialization and the first call to
> >      sysmalloc. (This is somewhat delicate, since it relies on
> >      the 2 preceding words to be zero during this interval as well.)
> > -*/
> > + */
> >  
> >  /* Conveniently, the unsorted bin can be used as dummy top on first call */
> > -#define initial_top(M)              (unsorted_chunks(M))
> > +#define initial_top(M)              (unsorted_chunks (M))
> >  
> >  /*
> > -  Binmap
> > +   Binmap
> >  
> >      To help compensate for the large number of bins, a one-level index
> >      structure is used for bin-by-bin searching.  `binmap' is a
> > @@ -1553,7 +1553,7 @@ typedef struct malloc_chunk* mbinptr;
> >      be skipped over during during traversals.  The bits are NOT always
> >      cleared as soon as bins are empty, but instead only
> >      when they are noticed to be empty during traversal in malloc.
> > -*/
> > + */
> >  
> >  /* Conservatively use 32 bits per map word, even if on 64bit system */
> >  #define BINMAPSHIFT      5
> > @@ -1561,14 +1561,14 @@ typedef struct malloc_chunk* mbinptr;
> >  #define BINMAPSIZE       (NBINS / BITSPERMAP)
> >  
> >  #define idx2block(i)     ((i) >> BINMAPSHIFT)
> > -#define idx2bit(i)       ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
> > +#define idx2bit(i)       ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
> >  
> > -#define mark_bin(m,i)    ((m)->binmap[idx2block(i)] |=  idx2bit(i))
> > -#define unmark_bin(m,i)  ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
> > -#define get_binmap(m,i)  ((m)->binmap[idx2block(i)] &   idx2bit(i))
> > +#define mark_bin(m, i)    ((m)->binmap[idx2block (i)] |= idx2bit (i))
> > +#define unmark_bin(m, i)  ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
> > +#define get_binmap(m, i)  ((m)->binmap[idx2block (i)] & idx2bit (i))
> >  
> >  /*
> > -  Fastbins
> > +   Fastbins
> >  
> >      An array of lists holding recently freed small chunks.  Fastbins
> >      are not doubly linked.  It is faster to single-link them, and
> > @@ -1582,69 +1582,69 @@ typedef struct malloc_chunk* mbinptr;
> >      be consolidated with other free chunks. malloc_consolidate
> >      releases all chunks in fastbins and consolidates them with
> >      other free chunks.
> > -*/
> > + */
> >  
> > -typedef struct malloc_chunk* mfastbinptr;
> > +typedef struct malloc_chunk *mfastbinptr;
> >  #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
> >  
> >  /* offset 2 to use otherwise unindexable first 2 bins */
> >  #define fastbin_index(sz) \
> > -  ((((unsigned int)(sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
> > +  ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
> >  
> >  
> >  /* The maximum fastbin request size we support */
> >  #define MAX_FAST_SIZE     (80 * SIZE_SZ / 4)
> >  
> > -#define NFASTBINS  (fastbin_index(request2size(MAX_FAST_SIZE))+1)
> > +#define NFASTBINS  (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
> >  
> >  /*
> > -  FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
> > -  that triggers automatic consolidation of possibly-surrounding
> > -  fastbin chunks. This is a heuristic, so the exact value should not
> > -  matter too much. It is defined at half the default trim threshold as a
> > -  compromise heuristic to only attempt consolidation if it is likely
> > -  to lead to trimming. However, it is not dynamically tunable, since
> > -  consolidation reduces fragmentation surrounding large chunks even
> > -  if trimming is not used.
> > -*/
> > +   FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
> > +   that triggers automatic consolidation of possibly-surrounding
> > +   fastbin chunks. This is a heuristic, so the exact value should not
> > +   matter too much. It is defined at half the default trim threshold as a
> > +   compromise heuristic to only attempt consolidation if it is likely
> > +   to lead to trimming. However, it is not dynamically tunable, since
> > +   consolidation reduces fragmentation surrounding large chunks even
> > +   if trimming is not used.
> > + */
> >  
> >  #define FASTBIN_CONSOLIDATION_THRESHOLD  (65536UL)
> >  
> >  /*
> > -  Since the lowest 2 bits in max_fast don't matter in size comparisons,
> > -  they are used as flags.
> > -*/
> > +   Since the lowest 2 bits in max_fast don't matter in size comparisons,
> > +   they are used as flags.
> > + */
> >  
> >  /*
> > -  FASTCHUNKS_BIT held in max_fast indicates that there are probably
> > -  some fastbin chunks. It is set true on entering a chunk into any
> > -  fastbin, and cleared only in malloc_consolidate.
> > +   FASTCHUNKS_BIT held in max_fast indicates that there are probably
> > +   some fastbin chunks. It is set true on entering a chunk into any
> > +   fastbin, and cleared only in malloc_consolidate.
> >  
> > -  The truth value is inverted so that have_fastchunks will be true
> > -  upon startup (since statics are zero-filled), simplifying
> > -  initialization checks.
> > -*/
> > +   The truth value is inverted so that have_fastchunks will be true
> > +   upon startup (since statics are zero-filled), simplifying
> > +   initialization checks.
> > + */
> >  
> >  #define FASTCHUNKS_BIT        (1U)
> >  
> > -#define have_fastchunks(M)     (((M)->flags &  FASTCHUNKS_BIT) == 0)
> > +#define have_fastchunks(M)     (((M)->flags & FASTCHUNKS_BIT) == 0)
> >  #define clear_fastchunks(M)    catomic_or (&(M)->flags, FASTCHUNKS_BIT)
> >  #define set_fastchunks(M)      catomic_and (&(M)->flags, ~FASTCHUNKS_BIT)
> >  
> >  /*
> > -  NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
> > -  regions.  Otherwise, contiguity is exploited in merging together,
> > -  when possible, results from consecutive MORECORE calls.
> > +   NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
> > +   regions.  Otherwise, contiguity is exploited in merging together,
> > +   when possible, results from consecutive MORECORE calls.
> >  
> > -  The initial value comes from MORECORE_CONTIGUOUS, but is
> > -  changed dynamically if mmap is ever used as an sbrk substitute.
> > -*/
> > +   The initial value comes from MORECORE_CONTIGUOUS, but is
> > +   changed dynamically if mmap is ever used as an sbrk substitute.
> > + */
> >  
> >  #define NONCONTIGUOUS_BIT     (2U)
> >  
> > -#define contiguous(M)          (((M)->flags &  NONCONTIGUOUS_BIT) == 0)
> > -#define noncontiguous(M)       (((M)->flags &  NONCONTIGUOUS_BIT) != 0)
> > -#define set_noncontiguous(M)   ((M)->flags |=  NONCONTIGUOUS_BIT)
> > +#define contiguous(M)          (((M)->flags & NONCONTIGUOUS_BIT) == 0)
> > +#define noncontiguous(M)       (((M)->flags & NONCONTIGUOUS_BIT) != 0)
> > +#define set_noncontiguous(M)   ((M)->flags |= NONCONTIGUOUS_BIT)
> >  #define set_contiguous(M)      ((M)->flags &= ~NONCONTIGUOUS_BIT)
> >  
> >  /*
> > @@ -1652,19 +1652,20 @@ typedef struct malloc_chunk* mfastbinptr;
> >     Use impossibly small value if 0.
> >     Precondition: there are no existing fastbin chunks.
> >     Setting the value clears fastchunk bit but preserves noncontiguous bit.
> > -*/
> > + */
> >  
> >  #define set_max_fast(s) \
> > -  global_max_fast = (((s) == 0)						      \
> > -		     ? SMALLBIN_WIDTH: ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
> > +  global_max_fast = (((s) == 0)						      \
> > +                     ? SMALLBIN_WIDTH : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
> >  #define get_max_fast() global_max_fast
> >  
> >  
> >  /*
> >     ----------- Internal state representation and initialization -----------
> > -*/
> > + */
> >  
> > -struct malloc_state {
> > +struct malloc_state
> > +{
> >    /* Serialize access.  */
> >    mutex_t mutex;
> >  
> > @@ -1677,19 +1678,19 @@ struct malloc_state {
> >  #endif
> >  
> >    /* Fastbins */
> > -  mfastbinptr      fastbinsY[NFASTBINS];
> > +  mfastbinptr fastbinsY[NFASTBINS];
> >  
> >    /* Base of the topmost chunk -- not otherwise kept in a bin */
> > -  mchunkptr        top;
> > +  mchunkptr top;
> >  
> >    /* The remainder from the most recent split of a small request */
> > -  mchunkptr        last_remainder;
> > +  mchunkptr last_remainder;
> >  
> >    /* Normal bins packed as described above */
> > -  mchunkptr        bins[NBINS * 2 - 2];
> > +  mchunkptr bins[NBINS * 2 - 2];
> >  
> >    /* Bitmap of bins */
> > -  unsigned int     binmap[BINMAPSIZE];
> > +  unsigned int binmap[BINMAPSIZE];
> >  
> >    /* Linked list */
> >    struct malloc_state *next;
> > @@ -1702,32 +1703,33 @@ struct malloc_state {
> >    INTERNAL_SIZE_T max_system_mem;
> >  };
> >  
> > -struct malloc_par {
> > +struct malloc_par
> > +{
> >    /* Tunable parameters */
> > -  unsigned long    trim_threshold;
> > -  INTERNAL_SIZE_T  top_pad;
> > -  INTERNAL_SIZE_T  mmap_threshold;
> > -  INTERNAL_SIZE_T  arena_test;
> > -  INTERNAL_SIZE_T  arena_max;
> > +  unsigned long trim_threshold;
> > +  INTERNAL_SIZE_T top_pad;
> > +  INTERNAL_SIZE_T mmap_threshold;
> > +  INTERNAL_SIZE_T arena_test;
> > +  INTERNAL_SIZE_T arena_max;
> >  
> >    /* Memory map support */
> > -  int              n_mmaps;
> > -  int              n_mmaps_max;
> > -  int              max_n_mmaps;
> > +  int n_mmaps;
> > +  int n_mmaps_max;
> > +  int max_n_mmaps;
> >    /* the mmap_threshold is dynamic, until the user sets
> >       it manually, at which point we need to disable any
> >       dynamic behavior. */
> > -  int              no_dyn_threshold;
> > +  int no_dyn_threshold;
> >  
> >    /* Statistics */
> > -  INTERNAL_SIZE_T  mmapped_mem;
> > +  INTERNAL_SIZE_T mmapped_mem;
> >    /*INTERNAL_SIZE_T  sbrked_mem;*/
> >    /*INTERNAL_SIZE_T  max_sbrked_mem;*/
> > -  INTERNAL_SIZE_T  max_mmapped_mem;
> > -  INTERNAL_SIZE_T  max_total_mem; /* only kept for NO_THREADS */
> > +  INTERNAL_SIZE_T max_mmapped_mem;
> > +  INTERNAL_SIZE_T max_total_mem;  /* only kept for NO_THREADS */
> >  
> >    /* First address handed out by MORECORE/sbrk.  */
> > -  char*            sbrk_base;
> > +  char *sbrk_base;
> >  };
> >  
> >  /* There are several instances of this struct ("arenas") in this
> > @@ -1737,22 +1739,22 @@ struct malloc_par {
> >     is initialized to all zeroes (as is true of C statics).  */
> >  
> >  static struct malloc_state main_arena =
> > -  {
> > -    .mutex = MUTEX_INITIALIZER,
> > -    .next = &main_arena
> > -  };
> > +{
> > +  .mutex = MUTEX_INITIALIZER,
> > +  .next = &main_arena
> > +};
> >  
> >  /* There is only one instance of the malloc parameters.  */
> >  
> >  static struct malloc_par mp_ =
> > -  {
> > -    .top_pad        = DEFAULT_TOP_PAD,
> > -    .n_mmaps_max    = DEFAULT_MMAP_MAX,
> > -    .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
> > -    .trim_threshold = DEFAULT_TRIM_THRESHOLD,
> > -# define NARENAS_FROM_NCORES(n) ((n) * (sizeof(long) == 4 ? 2 : 8))
> > -    .arena_test     = NARENAS_FROM_NCORES (1)
> > -  };
> > +{
> > +  .top_pad = DEFAULT_TOP_PAD,
> > +  .n_mmaps_max = DEFAULT_MMAP_MAX,
> > +  .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
> > +  .trim_threshold = DEFAULT_TRIM_THRESHOLD,
> > +#define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
> > +  .arena_test = NARENAS_FROM_NCORES (1)
> > +};
> >  
> >  
> >  /*  Non public mallopt parameters.  */
> > @@ -1764,44 +1766,46 @@ static struct malloc_par mp_ =
> >  static INTERNAL_SIZE_T global_max_fast;
> >  
> >  /*
> > -  Initialize a malloc_state struct.
> > +   Initialize a malloc_state struct.
> >  
> > -  This is called only from within malloc_consolidate, which needs
> > -  be called in the same contexts anyway.  It is never called directly
> > -  outside of malloc_consolidate because some optimizing compilers try
> > -  to inline it at all call points, which turns out not to be an
> > -  optimization at all. (Inlining it in malloc_consolidate is fine though.)
> > -*/
> > +   This is called only from within malloc_consolidate, which needs
> > +   be called in the same contexts anyway.  It is never called directly
> > +   outside of malloc_consolidate because some optimizing compilers try
> > +   to inline it at all call points, which turns out not to be an
> > +   optimization at all. (Inlining it in malloc_consolidate is fine though.)
> > + */
> >  
> > -static void malloc_init_state(mstate av)
> > +static void
> > +malloc_init_state (mstate av)
> >  {
> > -  int     i;
> > +  int i;
> >    mbinptr bin;
> >  
> >    /* Establish circular links for normal bins */
> > -  for (i = 1; i < NBINS; ++i) {
> > -    bin = bin_at(av,i);
> > -    bin->fd = bin->bk = bin;
> > -  }
> > +  for (i = 1; i < NBINS; ++i)
> > +    {
> > +      bin = bin_at (av, i);
> > +      bin->fd = bin->bk = bin;
> > +    }
> >  
> >  #if MORECORE_CONTIGUOUS
> >    if (av != &main_arena)
> >  #endif
> > -    set_noncontiguous(av);
> > +  set_noncontiguous (av);
> >    if (av == &main_arena)
> > -    set_max_fast(DEFAULT_MXFAST);
> > +    set_max_fast (DEFAULT_MXFAST);
> >    av->flags |= FASTCHUNKS_BIT;
> >  
> > -  av->top            = initial_top(av);
> > +  av->top = initial_top (av);
> >  }
> >  
> >  /*
> >     Other internal utilities operating on mstates
> > -*/
> > + */
> >  
> > -static void*  sysmalloc(INTERNAL_SIZE_T, mstate);
> > -static int      systrim(size_t, mstate);
> > -static void     malloc_consolidate(mstate);
> > +static void *sysmalloc (INTERNAL_SIZE_T, mstate);
> > +static int      systrim (size_t, mstate);
> > +static void     malloc_consolidate (mstate);
> >  
> >  
> >  /* -------------- Early definitions for debugging hooks ---------------- */
> > @@ -1815,31 +1819,31 @@ static void     malloc_consolidate(mstate);
> >  #endif
> >  
> >  /* Forward declarations.  */
> > -static void* malloc_hook_ini (size_t sz,
> > -			      const void *caller) __THROW;
> > -static void* realloc_hook_ini (void* ptr, size_t sz,
> > -			       const void *caller) __THROW;
> > -static void* memalign_hook_ini (size_t alignment, size_t sz,
> > -				const void *caller) __THROW;
> > +static void *malloc_hook_ini (size_t sz,
> > +                              const void *caller) __THROW;
> > +static void *realloc_hook_ini (void *ptr, size_t sz,
> > +                               const void *caller) __THROW;
> > +static void *memalign_hook_ini (size_t alignment, size_t sz,
> > +                                const void *caller) __THROW;
> >  
> >  void weak_variable (*__malloc_initialize_hook) (void) = NULL;
> >  void weak_variable (*__free_hook) (void *__ptr,
> > -				   const void *) = NULL;
> > +                                   const void *) = NULL;
> >  void *weak_variable (*__malloc_hook)
> > -     (size_t __size, const void *) = malloc_hook_ini;
> > +  (size_t __size, const void *) = malloc_hook_ini;
> >  void *weak_variable (*__realloc_hook)
> > -     (void *__ptr, size_t __size, const void *)
> > -     = realloc_hook_ini;
> > +  (void *__ptr, size_t __size, const void *)
> > +  = realloc_hook_ini;
> >  void *weak_variable (*__memalign_hook)
> > -     (size_t __alignment, size_t __size, const void *)
> > -     = memalign_hook_ini;
> > +  (size_t __alignment, size_t __size, const void *)
> > +  = memalign_hook_ini;
> >  void weak_variable (*__after_morecore_hook) (void) = NULL;
> >  
> >  
> >  /* ---------------- Error behavior ------------------------------------ */
> >  
> >  #ifndef DEFAULT_CHECK_ACTION
> > -#define DEFAULT_CHECK_ACTION 3
> > +# define DEFAULT_CHECK_ACTION 3
> >  #endif
> >  
> >  static int check_action = DEFAULT_CHECK_ACTION;
> > @@ -1871,207 +1875,220 @@ free_perturb (char *p, size_t n)
> >  #include "arena.c"
> >  
> >  /*
> > -  Debugging support
> > +   Debugging support
> >  
> > -  These routines make a number of assertions about the states
> > -  of data structures that should be true at all times. If any
> > -  are not true, it's very likely that a user program has somehow
> > -  trashed memory. (It's also possible that there is a coding error
> > -  in malloc. In which case, please report it!)
> > -*/
> > +   These routines make a number of assertions about the states
> > +   of data structures that should be true at all times. If any
> > +   are not true, it's very likely that a user program has somehow
> > +   trashed memory. (It's also possible that there is a coding error
> > +   in malloc. In which case, please report it!)
> > + */
> >  
> > -#if ! MALLOC_DEBUG
> > +#if !MALLOC_DEBUG
> >  
> > -#define check_chunk(A,P)
> > -#define check_free_chunk(A,P)
> > -#define check_inuse_chunk(A,P)
> > -#define check_remalloced_chunk(A,P,N)
> > -#define check_malloced_chunk(A,P,N)
> > -#define check_malloc_state(A)
> > +# define check_chunk(A, P)
> > +# define check_free_chunk(A, P)
> > +# define check_inuse_chunk(A, P)
> > +# define check_remalloced_chunk(A, P, N)
> > +# define check_malloced_chunk(A, P, N)
> > +# define check_malloc_state(A)
> >  
> >  #else
> >  
> > -#define check_chunk(A,P)              do_check_chunk(A,P)
> > -#define check_free_chunk(A,P)         do_check_free_chunk(A,P)
> > -#define check_inuse_chunk(A,P)        do_check_inuse_chunk(A,P)
> > -#define check_remalloced_chunk(A,P,N) do_check_remalloced_chunk(A,P,N)
> > -#define check_malloced_chunk(A,P,N)   do_check_malloced_chunk(A,P,N)
> > -#define check_malloc_state(A)         do_check_malloc_state(A)
> > +# define check_chunk(A, P)              do_check_chunk (A, P)
> > +# define check_free_chunk(A, P)         do_check_free_chunk (A, P)
> > +# define check_inuse_chunk(A, P)        do_check_inuse_chunk (A, P)
> > +# define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
> > +# define check_malloced_chunk(A, P, N)   do_check_malloced_chunk (A, P, N)
> > +# define check_malloc_state(A)         do_check_malloc_state (A)
> >  
> >  /*
> > -  Properties of all chunks
> > -*/
> > +   Properties of all chunks
> > + */
> >  
> > -static void do_check_chunk(mstate av, mchunkptr p)
> > +static void
> > +do_check_chunk (mstate av, mchunkptr p)
> >  {
> > -  unsigned long sz = chunksize(p);
> > +  unsigned long sz = chunksize (p);
> >    /* min and max possible addresses assuming contiguous allocation */
> > -  char* max_address = (char*)(av->top) + chunksize(av->top);
> > -  char* min_address = max_address - av->system_mem;
> > -
> > -  if (!chunk_is_mmapped(p)) {
> > +  char *max_address = (char *) (av->top) + chunksize (av->top);
> > +  char *min_address = max_address - av->system_mem;
> >  
> > -    /* Has legal address ... */
> > -    if (p != av->top) {
> > -      if (contiguous(av)) {
> > -	assert(((char*)p) >= min_address);
> > -	assert(((char*)p + sz) <= ((char*)(av->top)));
> > -      }
> > -    }
> > -    else {
> > -      /* top size is always at least MINSIZE */
> > -      assert((unsigned long)(sz) >= MINSIZE);
> > -      /* top predecessor always marked inuse */
> > -      assert(prev_inuse(p));
> > +  if (!chunk_is_mmapped (p))
> > +    {
> > +      /* Has legal address ... */
> > +      if (p != av->top)
> > +        {
> > +          if (contiguous (av))
> > +            {
> > +              assert (((char *) p) >= min_address);
> > +              assert (((char *) p + sz) <= ((char *) (av->top)));
> > +            }
> > +        }
> > +      else
> > +        {
> > +          /* top size is always at least MINSIZE */
> > +          assert ((unsigned long) (sz) >= MINSIZE);
> > +          /* top predecessor always marked inuse */
> > +          assert (prev_inuse (p));
> > +        }
> >      }
> > -
> > -  }
> > -  else {
> > -    /* address is outside main heap  */
> > -    if (contiguous(av) && av->top != initial_top(av)) {
> > -      assert(((char*)p) < min_address || ((char*)p) >= max_address);
> > +  else
> > +    {
> > +      /* address is outside main heap  */
> > +      if (contiguous (av) && av->top != initial_top (av))
> > +        {
> > +          assert (((char *) p) < min_address || ((char *) p) >= max_address);
> > +        }
> > +      /* chunk is page-aligned */
> > +      assert (((p->prev_size + sz) & (GLRO (dl_pagesize) - 1)) == 0);
> > +      /* mem is aligned */
> > +      assert (aligned_OK (chunk2mem (p)));
> >      }
> > -    /* chunk is page-aligned */
> > -    assert(((p->prev_size + sz) & (GLRO(dl_pagesize)-1)) == 0);
> > -    /* mem is aligned */
> > -    assert(aligned_OK(chunk2mem(p)));
> > -  }
> >  }
> >  
> >  /*
> > -  Properties of free chunks
> > -*/
> > +   Properties of free chunks
> > + */
> >  
> > -static void do_check_free_chunk(mstate av, mchunkptr p)
> > +static void
> > +do_check_free_chunk (mstate av, mchunkptr p)
> >  {
> > -  INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
> > -  mchunkptr next = chunk_at_offset(p, sz);
> > +  INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE | NON_MAIN_ARENA);
> > +  mchunkptr next = chunk_at_offset (p, sz);
> >  
> > -  do_check_chunk(av, p);
> > +  do_check_chunk (av, p);
> >  
> >    /* Chunk must claim to be free ... */
> > -  assert(!inuse(p));
> > -  assert (!chunk_is_mmapped(p));
> > +  assert (!inuse (p));
> > +  assert (!chunk_is_mmapped (p));
> >  
> >    /* Unless a special marker, must have OK fields */
> > -  if ((unsigned long)(sz) >= MINSIZE)
> > -  {
> > -    assert((sz & MALLOC_ALIGN_MASK) == 0);
> > -    assert(aligned_OK(chunk2mem(p)));
> > -    /* ... matching footer field */
> > -    assert(next->prev_size == sz);
> > -    /* ... and is fully consolidated */
> > -    assert(prev_inuse(p));
> > -    assert (next == av->top || inuse(next));
> > -
> > -    /* ... and has minimally sane links */
> > -    assert(p->fd->bk == p);
> > -    assert(p->bk->fd == p);
> > -  }
> > +  if ((unsigned long) (sz) >= MINSIZE)
> > +    {
> > +      assert ((sz & MALLOC_ALIGN_MASK) == 0);
> > +      assert (aligned_OK (chunk2mem (p)));
> > +      /* ... matching footer field */
> > +      assert (next->prev_size == sz);
> > +      /* ... and is fully consolidated */
> > +      assert (prev_inuse (p));
> > +      assert (next == av->top || inuse (next));
> > +
> > +      /* ... and has minimally sane links */
> > +      assert (p->fd->bk == p);
> > +      assert (p->bk->fd == p);
> > +    }
> >    else /* markers are always of size SIZE_SZ */
> > -    assert(sz == SIZE_SZ);
> > +    assert (sz == SIZE_SZ);
> >  }
> >  
> >  /*
> > -  Properties of inuse chunks
> > -*/
> > +   Properties of inuse chunks
> > + */
> >  
> > -static void do_check_inuse_chunk(mstate av, mchunkptr p)
> > +static void
> > +do_check_inuse_chunk (mstate av, mchunkptr p)
> >  {
> >    mchunkptr next;
> >  
> > -  do_check_chunk(av, p);
> > +  do_check_chunk (av, p);
> >  
> > -  if (chunk_is_mmapped(p))
> > +  if (chunk_is_mmapped (p))
> >      return; /* mmapped chunks have no next/prev */
> >  
> >    /* Check whether it claims to be in use ... */
> > -  assert(inuse(p));
> > +  assert (inuse (p));
> >  
> > -  next = next_chunk(p);
> > +  next = next_chunk (p);
> >  
> >    /* ... and is surrounded by OK chunks.
> > -    Since more things can be checked with free chunks than inuse ones,
> > -    if an inuse chunk borders them and debug is on, it's worth doing them.
> > -  */
> > -  if (!prev_inuse(p))  {
> > -    /* Note that we cannot even look at prev unless it is not inuse */
> > -    mchunkptr prv = prev_chunk(p);
> > -    assert(next_chunk(prv) == p);
> > -    do_check_free_chunk(av, prv);
> > -  }
> > +     Since more things can be checked with free chunks than inuse ones,
> > +     if an inuse chunk borders them and debug is on, it's worth doing them.
> > +   */
> > +  if (!prev_inuse (p))
> > +    {
> > +      /* Note that we cannot even look at prev unless it is not inuse */
> > +      mchunkptr prv = prev_chunk (p);
> > +      assert (next_chunk (prv) == p);
> > +      do_check_free_chunk (av, prv);
> > +    }
> >  
> > -  if (next == av->top) {
> > -    assert(prev_inuse(next));
> > -    assert(chunksize(next) >= MINSIZE);
> > -  }
> > -  else if (!inuse(next))
> > -    do_check_free_chunk(av, next);
> > +  if (next == av->top)
> > +    {
> > +      assert (prev_inuse (next));
> > +      assert (chunksize (next) >= MINSIZE);
> > +    }
> > +  else if (!inuse (next))
> > +    do_check_free_chunk (av, next);
> >  }
> >  
> >  /*
> > -  Properties of chunks recycled from fastbins
> > -*/
> > +   Properties of chunks recycled from fastbins
> > + */
> >  
> > -static void do_check_remalloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
> > +static void
> > +do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
> >  {
> > -  INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
> > +  INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE | NON_MAIN_ARENA);
> >  
> > -  if (!chunk_is_mmapped(p)) {
> > -    assert(av == arena_for_chunk(p));
> > -    if (chunk_non_main_arena(p))
> > -      assert(av != &main_arena);
> > -    else
> > -      assert(av == &main_arena);
> > -  }
> > +  if (!chunk_is_mmapped (p))
> > +    {
> > +      assert (av == arena_for_chunk (p));
> > +      if (chunk_non_main_arena (p))
> > +        assert (av != &main_arena);
> > +      else
> > +        assert (av == &main_arena);
> > +    }
> >  
> > -  do_check_inuse_chunk(av, p);
> > +  do_check_inuse_chunk (av, p);
> >  
> >    /* Legal size ... */
> > -  assert((sz & MALLOC_ALIGN_MASK) == 0);
> > -  assert((unsigned long)(sz) >= MINSIZE);
> > +  assert ((sz & MALLOC_ALIGN_MASK) == 0);
> > +  assert ((unsigned long) (sz) >= MINSIZE);
> >    /* ... and alignment */
> > -  assert(aligned_OK(chunk2mem(p)));
> > +  assert (aligned_OK (chunk2mem (p)));
> >    /* chunk is less than MINSIZE more than request */
> > -  assert((long)(sz) - (long)(s) >= 0);
> > -  assert((long)(sz) - (long)(s + MINSIZE) < 0);
> > +  assert ((long) (sz) - (long) (s) >= 0);
> > +  assert ((long) (sz) - (long) (s + MINSIZE) < 0);
> >  }
> >  
> >  /*
> > -  Properties of nonrecycled chunks at the point they are malloced
> > -*/
> > +   Properties of nonrecycled chunks at the point they are malloced
> > + */
> >  
> > -static void do_check_malloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
> > +static void
> > +do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
> >  {
> >    /* same as recycled case ... */
> > -  do_check_remalloced_chunk(av, p, s);
> > +  do_check_remalloced_chunk (av, p, s);
> >  
> >    /*
> > -    ... plus,  must obey implementation invariant that prev_inuse is
> > -    always true of any allocated chunk; i.e., that each allocated
> > -    chunk borders either a previously allocated and still in-use
> > -    chunk, or the base of its memory arena. This is ensured
> > -    by making all allocations from the `lowest' part of any found
> > -    chunk.  This does not necessarily hold however for chunks
> > -    recycled via fastbins.
> > -  */
> > -
> > -  assert(prev_inuse(p));
> > +     ... plus,  must obey implementation invariant that prev_inuse is
> > +     always true of any allocated chunk; i.e., that each allocated
> > +     chunk borders either a previously allocated and still in-use
> > +     chunk, or the base of its memory arena. This is ensured
> > +     by making all allocations from the `lowest' part of any found
> > +     chunk.  This does not necessarily hold however for chunks
> > +     recycled via fastbins.
> > +   */
> > +
> > +  assert (prev_inuse (p));
> >  }
> >  
> >  
> >  /*
> > -  Properties of malloc_state.
> > +   Properties of malloc_state.
> >  
> > -  This may be useful for debugging malloc, as well as detecting user
> > -  programmer errors that somehow write into malloc_state.
> > +   This may be useful for debugging malloc, as well as detecting user
> > +   programmer errors that somehow write into malloc_state.
> >  
> > -  If you are extending or experimenting with this malloc, you can
> > -  probably figure out how to hack this routine to print out or
> > -  display chunk addresses, sizes, bins, and other instrumentation.
> > -*/
> > +   If you are extending or experimenting with this malloc, you can
> > +   probably figure out how to hack this routine to print out or
> > +   display chunk addresses, sizes, bins, and other instrumentation.
> > + */
> >  
> > -static void do_check_malloc_state(mstate av)
> > +static void
> > +do_check_malloc_state (mstate av)
> >  {
> >    int i;
> >    mchunkptr p;
> > @@ -2083,126 +2100,132 @@ static void do_check_malloc_state(mstate av)
> >    int max_fast_bin;
> >  
> >    /* internal size_t must be no wider than pointer type */
> > -  assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
> > +  assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *));
> >  
> >    /* alignment is a power of 2 */
> > -  assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
> > +  assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
> >  
> >    /* cannot run remaining checks until fully initialized */
> > -  if (av->top == 0 || av->top == initial_top(av))
> > +  if (av->top == 0 || av->top == initial_top (av))
> >      return;
> >  
> >    /* pagesize is a power of 2 */
> > -  assert((GLRO(dl_pagesize) & (GLRO(dl_pagesize)-1)) == 0);
> > +  assert ((GLRO (dl_pagesize) & (GLRO (dl_pagesize) - 1)) == 0);
> >  
> >    /* A contiguous main_arena is consistent with sbrk_base.  */
> > -  if (av == &main_arena && contiguous(av))
> > -    assert((char*)mp_.sbrk_base + av->system_mem ==
> > -	   (char*)av->top + chunksize(av->top));
> > +  if (av == &main_arena && contiguous (av))
> > +    assert ((char *) mp_.sbrk_base + av->system_mem ==
> > +            (char *) av->top + chunksize (av->top));
> >  
> >    /* properties of fastbins */
> >  
> >    /* max_fast is in allowed range */
> > -  assert((get_max_fast () & ~1) <= request2size(MAX_FAST_SIZE));
> > -
> > -  max_fast_bin = fastbin_index(get_max_fast ());
> > -
> > -  for (i = 0; i < NFASTBINS; ++i) {
> > -    p = fastbin (av, i);
> > -
> > -    /* The following test can only be performed for the main arena.
> > -       While mallopt calls malloc_consolidate to get rid of all fast
> > -       bins (especially those larger than the new maximum) this does
> > -       only happen for the main arena.  Trying to do this for any
> > -       other arena would mean those arenas have to be locked and
> > -       malloc_consolidate be called for them.  This is excessive.  And
> > -       even if this is acceptable to somebody it still cannot solve
> > -       the problem completely since if the arena is locked a
> > -       concurrent malloc call might create a new arena which then
> > -       could use the newly invalid fast bins.  */
> > -
> > -    /* all bins past max_fast are empty */
> > -    if (av == &main_arena && i > max_fast_bin)
> > -      assert(p == 0);
> > -
> > -    while (p != 0) {
> > -      /* each chunk claims to be inuse */
> > -      do_check_inuse_chunk(av, p);
> > -      total += chunksize(p);
> > -      /* chunk belongs in this bin */
> > -      assert(fastbin_index(chunksize(p)) == i);
> > -      p = p->fd;
> > +  assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE));
> > +
> > +  max_fast_bin = fastbin_index (get_max_fast ());
> > +
> > +  for (i = 0; i < NFASTBINS; ++i)
> > +    {
> > +      p = fastbin (av, i);
> > +
> > +      /* The following test can only be performed for the main arena.
> > +         While mallopt calls malloc_consolidate to get rid of all fast
> > +         bins (especially those larger than the new maximum) this does
> > +         only happen for the main arena.  Trying to do this for any
> > +         other arena would mean those arenas have to be locked and
> > +         malloc_consolidate be called for them.  This is excessive.  And
> > +         even if this is acceptable to somebody it still cannot solve
> > +         the problem completely since if the arena is locked a
> > +         concurrent malloc call might create a new arena which then
> > +         could use the newly invalid fast bins.  */
> > +
> > +      /* all bins past max_fast are empty */
> > +      if (av == &main_arena && i > max_fast_bin)
> > +        assert (p == 0);
> > +
> > +      while (p != 0)
> > +        {
> > +          /* each chunk claims to be inuse */
> > +          do_check_inuse_chunk (av, p);
> > +          total += chunksize (p);
> > +          /* chunk belongs in this bin */
> > +          assert (fastbin_index (chunksize (p)) == i);
> > +          p = p->fd;
> > +        }
> >      }
> > -  }
> >  
> >    if (total != 0)
> > -    assert(have_fastchunks(av));
> > -  else if (!have_fastchunks(av))
> > -    assert(total == 0);
> > +    assert (have_fastchunks (av));
> > +  else if (!have_fastchunks (av))
> > +    assert (total == 0);
> >  
> >    /* check normal bins */
> > -  for (i = 1; i < NBINS; ++i) {
> > -    b = bin_at(av,i);
> > -
> > -    /* binmap is accurate (except for bin 1 == unsorted_chunks) */
> > -    if (i >= 2) {
> > -      unsigned int binbit = get_binmap(av,i);
> > -      int empty = last(b) == b;
> > -      if (!binbit)
> > -	assert(empty);
> > -      else if (!empty)
> > -	assert(binbit);
> > -    }
> > -
> > -    for (p = last(b); p != b; p = p->bk) {
> > -      /* each chunk claims to be free */
> > -      do_check_free_chunk(av, p);
> > -      size = chunksize(p);
> > -      total += size;
> > -      if (i >= 2) {
> > -	/* chunk belongs in bin */
> > -	idx = bin_index(size);
> > -	assert(idx == i);
> > -	/* lists are sorted */
> > -	assert(p->bk == b ||
> > -	       (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p));
> > -
> > -	if (!in_smallbin_range(size))
> > -	  {
> > -	    if (p->fd_nextsize != NULL)
> > -	      {
> > -		if (p->fd_nextsize == p)
> > -		  assert (p->bk_nextsize == p);
> > -		else
> > -		  {
> > -		    if (p->fd_nextsize == first (b))
> > -		      assert (chunksize (p) < chunksize (p->fd_nextsize));
> > -		    else
> > -		      assert (chunksize (p) > chunksize (p->fd_nextsize));
> > -
> > -		    if (p == first (b))
> > -		      assert (chunksize (p) > chunksize (p->bk_nextsize));
> > -		    else
> > -		      assert (chunksize (p) < chunksize (p->bk_nextsize));
> > -		  }
> > -	      }
> > -	    else
> > -	      assert (p->bk_nextsize == NULL);
> > -	  }
> > -      } else if (!in_smallbin_range(size))
> > -	assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
> > -      /* chunk is followed by a legal chain of inuse chunks */
> > -      for (q = next_chunk(p);
> > -	   (q != av->top && inuse(q) &&
> > -	     (unsigned long)(chunksize(q)) >= MINSIZE);
> > -	   q = next_chunk(q))
> > -	do_check_inuse_chunk(av, q);
> > +  for (i = 1; i < NBINS; ++i)
> > +    {
> > +      b = bin_at (av, i);
> > +
> > +      /* binmap is accurate (except for bin 1 == unsorted_chunks) */
> > +      if (i >= 2)
> > +        {
> > +          unsigned int binbit = get_binmap (av, i);
> > +          int empty = last (b) == b;
> > +          if (!binbit)
> > +            assert (empty);
> > +          else if (!empty)
> > +            assert (binbit);
> > +        }
> > +
> > +      for (p = last (b); p != b; p = p->bk)
> > +        {
> > +          /* each chunk claims to be free */
> > +          do_check_free_chunk (av, p);
> > +          size = chunksize (p);
> > +          total += size;
> > +          if (i >= 2)
> > +            {
> > +              /* chunk belongs in bin */
> > +              idx = bin_index (size);
> > +              assert (idx == i);
> > +              /* lists are sorted */
> > +              assert (p->bk == b ||
> > +                      (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p));
> > +
> > +              if (!in_smallbin_range (size))
> > +                {
> > +                  if (p->fd_nextsize != NULL)
> > +                    {
> > +                      if (p->fd_nextsize == p)
> > +                        assert (p->bk_nextsize == p);
> > +                      else
> > +                        {
> > +                          if (p->fd_nextsize == first (b))
> > +                            assert (chunksize (p) < chunksize (p->fd_nextsize));
> > +                          else
> > +                            assert (chunksize (p) > chunksize (p->fd_nextsize));
> > +
> > +                          if (p == first (b))
> > +                            assert (chunksize (p) > chunksize (p->bk_nextsize));
> > +                          else
> > +                            assert (chunksize (p) < chunksize (p->bk_nextsize));
> > +                        }
> > +                    }
> > +                  else
> > +                    assert (p->bk_nextsize == NULL);
> > +                }
> > +            }
> > +          else if (!in_smallbin_range (size))
> > +            assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
> > +          /* chunk is followed by a legal chain of inuse chunks */
> > +          for (q = next_chunk (p);
> > +               (q != av->top && inuse (q) &&
> > +                (unsigned long) (chunksize (q)) >= MINSIZE);
> > +               q = next_chunk (q))
> > +            do_check_inuse_chunk (av, q);
> > +        }
> >      }
> > -  }
> >  
> >    /* top chunk is OK */
> > -  check_chunk(av, av->top);
> > -
> > +  check_chunk (av, av->top);
> >  }
> >  #endif
> >  
> > @@ -2214,461 +2237,482 @@ static void do_check_malloc_state(mstate av)
> >  /* ----------- Routines dealing with system allocation -------------- */
> >  
> >  /*
> > -  sysmalloc handles malloc cases requiring more memory from the system.
> > -  On entry, it is assumed that av->top does not have enough
> > -  space to service request for nb bytes, thus requiring that av->top
> > -  be extended or replaced.
> > -*/
> > +   sysmalloc handles malloc cases requiring more memory from the system.
> > +   On entry, it is assumed that av->top does not have enough
> > +   space to service request for nb bytes, thus requiring that av->top
> > +   be extended or replaced.
> > + */
> >  
> > -static void* sysmalloc(INTERNAL_SIZE_T nb, mstate av)
> > +static void *
> > +sysmalloc (INTERNAL_SIZE_T nb, mstate av)
> >  {
> > -  mchunkptr       old_top;        /* incoming value of av->top */
> > +  mchunkptr old_top;              /* incoming value of av->top */
> >    INTERNAL_SIZE_T old_size;       /* its size */
> > -  char*           old_end;        /* its end address */
> > +  char *old_end;                  /* its end address */
> >  
> > -  long            size;           /* arg to first MORECORE or mmap call */
> > -  char*           brk;            /* return value from MORECORE */
> > +  long size;                      /* arg to first MORECORE or mmap call */
> > +  char *brk;                      /* return value from MORECORE */
> >  
> > -  long            correction;     /* arg to 2nd MORECORE call */
> > -  char*           snd_brk;        /* 2nd return val */
> > +  long correction;                /* arg to 2nd MORECORE call */
> > +  char *snd_brk;                  /* 2nd return val */
> >  
> >    INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
> >    INTERNAL_SIZE_T end_misalign;   /* partial page left at end of new space */
> > -  char*           aligned_brk;    /* aligned offset into brk */
> > +  char *aligned_brk;              /* aligned offset into brk */
> >  
> > -  mchunkptr       p;              /* the allocated/returned chunk */
> > -  mchunkptr       remainder;      /* remainder from allocation */
> > -  unsigned long   remainder_size; /* its size */
> > +  mchunkptr p;                    /* the allocated/returned chunk */
> > +  mchunkptr remainder;            /* remainder from allocation */
> > +  unsigned long remainder_size;   /* its size */
> >  
> >  
> > -  size_t          pagemask  = GLRO(dl_pagesize) - 1;
> > -  bool            tried_mmap = false;
> > +  size_t pagemask = GLRO (dl_pagesize) - 1;
> > +  bool tried_mmap = false;
> >  
> >  
> >    /*
> > -    If have mmap, and the request size meets the mmap threshold, and
> > -    the system supports mmap, and there are few enough currently
> > -    allocated mmapped regions, try to directly map this request
> > -    rather than expanding top.
> > -  */
> > -
> > -  if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) &&
> > -      (mp_.n_mmaps < mp_.n_mmaps_max)) {
> > -
> > -    char* mm;             /* return value from mmap call*/
> > -
> > -  try_mmap:
> > -    /*
> > -      Round up size to nearest page.  For mmapped chunks, the overhead
> > -      is one SIZE_SZ unit larger than for normal chunks, because there
> > -      is no following chunk whose prev_size field could be used.
> > -
> > -      See the front_misalign handling below, for glibc there is no
> > -      need for further alignments unless we have have high alignment.
> > -    */
> > -    if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
> > -      size = (nb + SIZE_SZ + pagemask) & ~pagemask;
> > -    else
> > -      size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
> > -    tried_mmap = true;
> > -
> > -    /* Don't try if size wraps around 0 */
> > -    if ((unsigned long)(size) > (unsigned long)(nb)) {
> > -
> > -      mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, 0));
> > -
> > -      if (mm != MAP_FAILED) {
> > -
> > -	/*
> > -	  The offset to the start of the mmapped region is stored
> > -	  in the prev_size field of the chunk. This allows us to adjust
> > -	  returned start address to meet alignment requirements here
> > -	  and in memalign(), and still be able to compute proper
> > -	  address argument for later munmap in free() and realloc().
> > -	*/
> > -
> > -	if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
> > -	  {
> > -	    /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
> > -	       MALLOC_ALIGN_MASK is 2*SIZE_SZ-1.  Each mmap'ed area is page
> > -	       aligned and therefore definitely MALLOC_ALIGN_MASK-aligned.  */
> > -	    assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0);
> > -	    front_misalign = 0;
> > -	  }
> > -	else
> > -	  front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
> > -	if (front_misalign > 0) {
> > -	  correction = MALLOC_ALIGNMENT - front_misalign;
> > -	  p = (mchunkptr)(mm + correction);
> > -	  p->prev_size = correction;
> > -	  set_head(p, (size - correction) |IS_MMAPPED);
> > -	}
> > -	else
> > -	  {
> > -	    p = (mchunkptr)mm;
> > -	    set_head(p, size|IS_MMAPPED);
> > -	  }
> > -
> > -	/* update statistics */
> > -
> > -	int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
> > -	atomic_max (&mp_.max_n_mmaps, new);
> > -
> > -	unsigned long sum;
> > -	sum = atomic_exchange_and_add(&mp_.mmapped_mem, size) + size;
> > -	atomic_max (&mp_.max_mmapped_mem, sum);
> > -
> > -	check_chunk(av, p);
> > -
> > -	return chunk2mem(p);
> > -      }
> > +     If have mmap, and the request size meets the mmap threshold, and
> > +     the system supports mmap, and there are few enough currently
> > +     allocated mmapped regions, try to directly map this request
> > +     rather than expanding top.
> > +   */
> > +
> > +  if ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold) &&
> > +      (mp_.n_mmaps < mp_.n_mmaps_max))
> > +    {
> > +      char *mm;           /* return value from mmap call*/
> > +
> > +    try_mmap:
> > +      /*
> > +         Round up size to nearest page.  For mmapped chunks, the overhead
> > +         is one SIZE_SZ unit larger than for normal chunks, because there
> > +         is no following chunk whose prev_size field could be used.
> > +
> > +         See the front_misalign handling below, for glibc there is no
> > +         need for further alignments unless we have have high alignment.
> > +       */
> > +      if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
> > +        size = (nb + SIZE_SZ + pagemask) & ~pagemask;
> > +      else
> > +        size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
> > +      tried_mmap = true;
> > +
> > +      /* Don't try if size wraps around 0 */
> > +      if ((unsigned long) (size) > (unsigned long) (nb))
> > +        {
> > +          mm = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
> > +
> > +          if (mm != MAP_FAILED)
> > +            {
> > +              /*
> > +                 The offset to the start of the mmapped region is stored
> > +                 in the prev_size field of the chunk. This allows us to adjust
> > +                 returned start address to meet alignment requirements here
> > +                 and in memalign(), and still be able to compute proper
> > +                 address argument for later munmap in free() and realloc().
> > +               */
> > +
> > +              if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
> > +                {
> > +                  /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
> > +                     MALLOC_ALIGN_MASK is 2*SIZE_SZ-1.  Each mmap'ed area is page
> > +                     aligned and therefore definitely MALLOC_ALIGN_MASK-aligned.  */
> > +                  assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
> > +                  front_misalign = 0;
> > +                }
> > +              else
> > +                front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
> > +              if (front_misalign > 0)
> > +                {
> > +                  correction = MALLOC_ALIGNMENT - front_misalign;
> > +                  p = (mchunkptr) (mm + correction);
> > +                  p->prev_size = correction;
> > +                  set_head (p, (size - correction) | IS_MMAPPED);
> > +                }
> > +              else
> > +                {
> > +                  p = (mchunkptr) mm;
> > +                  set_head (p, size | IS_MMAPPED);
> > +                }
> > +
> > +              /* update statistics */
> > +
> > +              int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
> > +              atomic_max (&mp_.max_n_mmaps, new);
> > +
> > +              unsigned long sum;
> > +              sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
> > +              atomic_max (&mp_.max_mmapped_mem, sum);
> > +
> > +              check_chunk (av, p);
> > +
> > +              return chunk2mem (p);
> > +            }
> > +        }
> >      }
> > -  }
> >  
> >    /* Record incoming configuration of top */
> >  
> > -  old_top  = av->top;
> > -  old_size = chunksize(old_top);
> > -  old_end  = (char*)(chunk_at_offset(old_top, old_size));
> > +  old_top = av->top;
> > +  old_size = chunksize (old_top);
> > +  old_end = (char *) (chunk_at_offset (old_top, old_size));
> >  
> > -  brk = snd_brk = (char*)(MORECORE_FAILURE);
> > +  brk = snd_brk = (char *) (MORECORE_FAILURE);
> >  
> >    /*
> >       If not the first time through, we require old_size to be
> >       at least MINSIZE and to have prev_inuse set.
> > -  */
> > +   */
> >  
> > -  assert((old_top == initial_top(av) && old_size == 0) ||
> > -	 ((unsigned long) (old_size) >= MINSIZE &&
> > -	  prev_inuse(old_top) &&
> > -	  ((unsigned long)old_end & pagemask) == 0));
> > +  assert ((old_top == initial_top (av) && old_size == 0) ||
> > +          ((unsigned long) (old_size) >= MINSIZE &&
> > +           prev_inuse (old_top) &&
> > +           ((unsigned long) old_end & pagemask) == 0));
> >  
> >    /* Precondition: not enough current space to satisfy nb request */
> > -  assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE));
> > -
> > -
> > -  if (av != &main_arena) {
> > -
> > -    heap_info *old_heap, *heap;
> > -    size_t old_heap_size;
> > -
> > -    /* First try to extend the current heap. */
> > -    old_heap = heap_for_ptr(old_top);
> > -    old_heap_size = old_heap->size;
> > -    if ((long) (MINSIZE + nb - old_size) > 0
> > -	&& grow_heap(old_heap, MINSIZE + nb - old_size) == 0) {
> > -      av->system_mem += old_heap->size - old_heap_size;
> > -      arena_mem += old_heap->size - old_heap_size;
> > -      set_head(old_top, (((char *)old_heap + old_heap->size) - (char *)old_top)
> > -	       | PREV_INUSE);
> > -    }
> > -    else if ((heap = new_heap(nb + (MINSIZE + sizeof(*heap)), mp_.top_pad))) {
> > -      /* Use a newly allocated heap.  */
> > -      heap->ar_ptr = av;
> > -      heap->prev = old_heap;
> > -      av->system_mem += heap->size;
> > -      arena_mem += heap->size;
> > -      /* Set up the new top.  */
> > -      top(av) = chunk_at_offset(heap, sizeof(*heap));
> > -      set_head(top(av), (heap->size - sizeof(*heap)) | PREV_INUSE);
> > -
> > -      /* Setup fencepost and free the old top chunk with a multiple of
> > -	 MALLOC_ALIGNMENT in size. */
> > -      /* The fencepost takes at least MINSIZE bytes, because it might
> > -	 become the top chunk again later.  Note that a footer is set
> > -	 up, too, although the chunk is marked in use. */
> > -      old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
> > -      set_head(chunk_at_offset(old_top, old_size + 2*SIZE_SZ), 0|PREV_INUSE);
> > -      if (old_size >= MINSIZE) {
> > -	set_head(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)|PREV_INUSE);
> > -	set_foot(chunk_at_offset(old_top, old_size), (2*SIZE_SZ));
> > -	set_head(old_top, old_size|PREV_INUSE|NON_MAIN_ARENA);
> > -	_int_free(av, old_top, 1);
> > -      } else {
> > -	set_head(old_top, (old_size + 2*SIZE_SZ)|PREV_INUSE);
> > -	set_foot(old_top, (old_size + 2*SIZE_SZ));
> > -      }
> > -    }
> > -    else if (!tried_mmap)
> > -      /* We can at least try to use to mmap memory.  */
> > -      goto try_mmap;
> > -
> > -  } else { /* av == main_arena */
> > -
> > -
> > -  /* Request enough space for nb + pad + overhead */
> > -
> > -  size = nb + mp_.top_pad + MINSIZE;
> > +  assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
> >  
> > -  /*
> > -    If contiguous, we can subtract out existing space that we hope to
> > -    combine with new space. We add it back later only if
> > -    we don't actually get contiguous space.
> > -  */
> > -
> > -  if (contiguous(av))
> > -    size -= old_size;
> > -
> > -  /*
> > -    Round to a multiple of page size.
> > -    If MORECORE is not contiguous, this ensures that we only call it
> > -    with whole-page arguments.  And if MORECORE is contiguous and
> > -    this is not first time through, this preserves page-alignment of
> > -    previous calls. Otherwise, we correct to page-align below.
> > -  */
> > -
> > -  size = (size + pagemask) & ~pagemask;
> > -
> > -  /*
> > -    Don't try to call MORECORE if argument is so big as to appear
> > -    negative. Note that since mmap takes size_t arg, it may succeed
> > -    below even if we cannot call MORECORE.
> > -  */
> > -
> > -  if (size > 0) {
> > -    brk = (char*)(MORECORE(size));
> > -    LIBC_PROBE (memory_sbrk_more, 2, brk, size);
> > -  }
> >  
> > -  if (brk != (char*)(MORECORE_FAILURE)) {
> > -    /* Call the `morecore' hook if necessary.  */
> > -    void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
> > -    if (__builtin_expect (hook != NULL, 0))
> > -      (*hook) ();
> > -  } else {
> > -  /*
> > -    If have mmap, try using it as a backup when MORECORE fails or
> > -    cannot be used. This is worth doing on systems that have "holes" in
> > -    address space, so sbrk cannot extend to give contiguous space, but
> > -    space is available elsewhere.  Note that we ignore mmap max count
> > -    and threshold limits, since the space will not be used as a
> > -    segregated mmap region.
> > -  */
> > -
> > -    /* Cannot merge with old top, so add its size back in */
> > -    if (contiguous(av))
> > -      size = (size + old_size + pagemask) & ~pagemask;
> > -
> > -    /* If we are relying on mmap as backup, then use larger units */
> > -    if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
> > -      size = MMAP_AS_MORECORE_SIZE;
> > -
> > -    /* Don't try if size wraps around 0 */
> > -    if ((unsigned long)(size) > (unsigned long)(nb)) {
> > -
> > -      char *mbrk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, 0));
> > -
> > -      if (mbrk != MAP_FAILED) {
> > -
> > -	/* We do not need, and cannot use, another sbrk call to find end */
> > -	brk = mbrk;
> > -	snd_brk = brk + size;
> > -
> > -	/*
> > -	   Record that we no longer have a contiguous sbrk region.
> > -	   After the first time mmap is used as backup, we do not
> > -	   ever rely on contiguous space since this could incorrectly
> > -	   bridge regions.
> > -	*/
> > -	set_noncontiguous(av);
> > -      }
> > +  if (av != &main_arena)
> > +    {
> > +      heap_info *old_heap, *heap;
> > +      size_t old_heap_size;
> > +
> > +      /* First try to extend the current heap. */
> > +      old_heap = heap_for_ptr (old_top);
> > +      old_heap_size = old_heap->size;
> > +      if ((long) (MINSIZE + nb - old_size) > 0
> > +          && grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
> > +        {
> > +          av->system_mem += old_heap->size - old_heap_size;
> > +          arena_mem += old_heap->size - old_heap_size;
> > +          set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
> > +                    | PREV_INUSE);
> > +        }
> > +      else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
> > +        {
> > +          /* Use a newly allocated heap.  */
> > +          heap->ar_ptr = av;
> > +          heap->prev = old_heap;
> > +          av->system_mem += heap->size;
> > +          arena_mem += heap->size;
> > +          /* Set up the new top.  */
> > +          top (av) = chunk_at_offset (heap, sizeof (*heap));
> > +          set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
> > +
> > +          /* Setup fencepost and free the old top chunk with a multiple of
> > +             MALLOC_ALIGNMENT in size. */
> > +          /* The fencepost takes at least MINSIZE bytes, because it might
> > +             become the top chunk again later.  Note that a footer is set
> > +             up, too, although the chunk is marked in use. */
> > +          old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
> > +          set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 0 | PREV_INUSE);
> > +          if (old_size >= MINSIZE)
> > +            {
> > +              set_head (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ) | PREV_INUSE);
> > +              set_foot (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ));
> > +              set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
> > +              _int_free (av, old_top, 1);
> > +            }
> > +          else
> > +            {
> > +              set_head (old_top, (old_size + 2 * SIZE_SZ) | PREV_INUSE);
> > +              set_foot (old_top, (old_size + 2 * SIZE_SZ));
> > +            }
> > +        }
> > +      else if (!tried_mmap)
> > +        /* We can at least try to use to mmap memory.  */
> > +        goto try_mmap;
> >      }
> > -  }
> > -
> > -  if (brk != (char*)(MORECORE_FAILURE)) {
> > -    if (mp_.sbrk_base == 0)
> > -      mp_.sbrk_base = brk;
> > -    av->system_mem += size;
> > +  else     /* av == main_arena */
> >  
> > -    /*
> > -      If MORECORE extends previous space, we can likewise extend top size.
> > -    */
> >  
> > -    if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE))
> > -      set_head(old_top, (size + old_size) | PREV_INUSE);
> > +    { /* Request enough space for nb + pad + overhead */
> > +      size = nb + mp_.top_pad + MINSIZE;
> >  
> > -    else if (contiguous(av) && old_size && brk < old_end) {
> > -      /* Oops!  Someone else killed our space..  Can't touch anything.  */
> > -      malloc_printerr (3, "break adjusted to free malloc space", brk);
> > -    }
> > +      /*
> > +         If contiguous, we can subtract out existing space that we hope to
> > +         combine with new space. We add it back later only if
> > +         we don't actually get contiguous space.
> > +       */
> >  
> > -    /*
> > -      Otherwise, make adjustments:
> > -
> > -      * If the first time through or noncontiguous, we need to call sbrk
> > -	just to find out where the end of memory lies.
> > -
> > -      * We need to ensure that all returned chunks from malloc will meet
> > -	MALLOC_ALIGNMENT
> > -
> > -      * If there was an intervening foreign sbrk, we need to adjust sbrk
> > -	request size to account for fact that we will not be able to
> > -	combine new space with existing space in old_top.
> > -
> > -      * Almost all systems internally allocate whole pages at a time, in
> > -	which case we might as well use the whole last page of request.
> > -	So we allocate enough more memory to hit a page boundary now,
> > -	which in turn causes future contiguous calls to page-align.
> > -    */
> > -
> > -    else {
> > -      front_misalign = 0;
> > -      end_misalign = 0;
> > -      correction = 0;
> > -      aligned_brk = brk;
> > -
> > -      /* handle contiguous cases */
> > -      if (contiguous(av)) {
> > -
> > -	/* Count foreign sbrk as system_mem.  */
> > -	if (old_size)
> > -	  av->system_mem += brk - old_end;
> > -
> > -	/* Guarantee alignment of first new chunk made from this space */
> > -
> > -	front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
> > -	if (front_misalign > 0) {
> > -
> > -	  /*
> > -	    Skip over some bytes to arrive at an aligned position.
> > -	    We don't need to specially mark these wasted front bytes.
> > -	    They will never be accessed anyway because
> > -	    prev_inuse of av->top (and any chunk created from its start)
> > -	    is always true after initialization.
> > -	  */
> > -
> > -	  correction = MALLOC_ALIGNMENT - front_misalign;
> > -	  aligned_brk += correction;
> > -	}
> > -
> > -	/*
> > -	  If this isn't adjacent to existing space, then we will not
> > -	  be able to merge with old_top space, so must add to 2nd request.
> > -	*/
> > -
> > -	correction += old_size;
> > -
> > -	/* Extend the end address to hit a page boundary */
> > -	end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
> > -	correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
> > -
> > -	assert(correction >= 0);
> > -	snd_brk = (char*)(MORECORE(correction));
> > -
> > -	/*
> > -	  If can't allocate correction, try to at least find out current
> > -	  brk.  It might be enough to proceed without failing.
> > -
> > -	  Note that if second sbrk did NOT fail, we assume that space
> > -	  is contiguous with first sbrk. This is a safe assumption unless
> > -	  program is multithreaded but doesn't use locks and a foreign sbrk
> > -	  occurred between our first and second calls.
> > -	*/
> > -
> > -	if (snd_brk == (char*)(MORECORE_FAILURE)) {
> > -	  correction = 0;
> > -	  snd_brk = (char*)(MORECORE(0));
> > -	} else {
> > -	  /* Call the `morecore' hook if necessary.  */
> > -	  void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
> > -	  if (__builtin_expect (hook != NULL, 0))
> > -	    (*hook) ();
> > -	}
> > -      }
> > +      if (contiguous (av))
> > +        size -= old_size;
> >  
> > -      /* handle non-contiguous cases */
> > -      else {
> > -	if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
> > -	  /* MORECORE/mmap must correctly align */
> > -	  assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
> > -	else {
> > -	  front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
> > -	  if (front_misalign > 0) {
> > -
> > -	    /*
> > -	      Skip over some bytes to arrive at an aligned position.
> > -	      We don't need to specially mark these wasted front bytes.
> > -	      They will never be accessed anyway because
> > -	      prev_inuse of av->top (and any chunk created from its start)
> > -	      is always true after initialization.
> > -	    */
> > -
> > -	    aligned_brk += MALLOC_ALIGNMENT - front_misalign;
> > -	  }
> > -	}
> > -
> > -	/* Find out current end of memory */
> > -	if (snd_brk == (char*)(MORECORE_FAILURE)) {
> > -	  snd_brk = (char*)(MORECORE(0));
> > -	}
> > -      }
> > -
> > -      /* Adjust top based on results of second sbrk */
> > -      if (snd_brk != (char*)(MORECORE_FAILURE)) {
> > -	av->top = (mchunkptr)aligned_brk;
> > -	set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
> > -	av->system_mem += correction;
> > -
> > -	/*
> > -	  If not the first time through, we either have a
> > -	  gap due to foreign sbrk or a non-contiguous region.  Insert a
> > -	  double fencepost at old_top to prevent consolidation with space
> > -	  we don't own. These fenceposts are artificial chunks that are
> > -	  marked as inuse and are in any case too small to use.  We need
> > -	  two to make sizes and alignments work out.
> > -	*/
> > -
> > -	if (old_size != 0) {
> > -	  /*
> > -	     Shrink old_top to insert fenceposts, keeping size a
> > -	     multiple of MALLOC_ALIGNMENT. We know there is at least
> > -	     enough space in old_top to do this.
> > -	  */
> > -	  old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
> > -	  set_head(old_top, old_size | PREV_INUSE);
> > -
> > -	  /*
> > -	    Note that the following assignments completely overwrite
> > -	    old_top when old_size was previously MINSIZE.  This is
> > -	    intentional. We need the fencepost, even if old_top otherwise gets
> > -	    lost.
> > -	  */
> > -	  chunk_at_offset(old_top, old_size            )->size =
> > -	    (2*SIZE_SZ)|PREV_INUSE;
> > -
> > -	  chunk_at_offset(old_top, old_size + 2*SIZE_SZ)->size =
> > -	    (2*SIZE_SZ)|PREV_INUSE;
> > -
> > -	  /* If possible, release the rest. */
> > -	  if (old_size >= MINSIZE) {
> > -	    _int_free(av, old_top, 1);
> > -	  }
> > -
> > -	}
> > -      }
> > -    }
> > -  }
> > +      /*
> > +         Round to a multiple of page size.
> > +         If MORECORE is not contiguous, this ensures that we only call it
> > +         with whole-page arguments.  And if MORECORE is contiguous and
> > +         this is not first time through, this preserves page-alignment of
> > +         previous calls. Otherwise, we correct to page-align below.
> > +       */
> >  
> > -  } /* if (av !=  &main_arena) */
> > +      size = (size + pagemask) & ~pagemask;
> >  
> > -  if ((unsigned long)av->system_mem > (unsigned long)(av->max_system_mem))
> > +      /*
> > +         Don't try to call MORECORE if argument is so big as to appear
> > +         negative. Note that since mmap takes size_t arg, it may succeed
> > +         below even if we cannot call MORECORE.
> > +       */
> > +
> > +      if (size > 0)
> > +        {
> > +          brk = (char *) (MORECORE (size));
> > +          LIBC_PROBE (memory_sbrk_more, 2, brk, size);
> > +        }
> > +
> > +      if (brk != (char *) (MORECORE_FAILURE))
> > +        {
> > +          /* Call the `morecore' hook if necessary.  */
> > +          void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
> > +          if (__builtin_expect (hook != NULL, 0))
> > +            (*hook)();
> > +        }
> > +      else
> > +        {
> > +          /*
> > +             If have mmap, try using it as a backup when MORECORE fails or
> > +             cannot be used. This is worth doing on systems that have "holes" in
> > +             address space, so sbrk cannot extend to give contiguous space, but
> > +             space is available elsewhere.  Note that we ignore mmap max count
> > +             and threshold limits, since the space will not be used as a
> > +             segregated mmap region.
> > +           */
> > +
> > +          /* Cannot merge with old top, so add its size back in */
> > +          if (contiguous (av))
> > +            size = (size + old_size + pagemask) & ~pagemask;
> > +
> > +          /* If we are relying on mmap as backup, then use larger units */
> > +          if ((unsigned long) (size) < (unsigned long) (MMAP_AS_MORECORE_SIZE))
> > +            size = MMAP_AS_MORECORE_SIZE;
> > +
> > +          /* Don't try if size wraps around 0 */
> > +          if ((unsigned long) (size) > (unsigned long) (nb))
> > +            {
> > +              char *mbrk = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
> > +
> > +              if (mbrk != MAP_FAILED)
> > +                {
> > +                  /* We do not need, and cannot use, another sbrk call to find end */
> > +                  brk = mbrk;
> > +                  snd_brk = brk + size;
> > +
> > +                  /*
> > +                     Record that we no longer have a contiguous sbrk region.
> > +                     After the first time mmap is used as backup, we do not
> > +                     ever rely on contiguous space since this could incorrectly
> > +                     bridge regions.
> > +                   */
> > +                  set_noncontiguous (av);
> > +                }
> > +            }
> > +        }
> > +
> > +      if (brk != (char *) (MORECORE_FAILURE))
> > +        {
> > +          if (mp_.sbrk_base == 0)
> > +            mp_.sbrk_base = brk;
> > +          av->system_mem += size;
> > +
> > +          /*
> > +             If MORECORE extends previous space, we can likewise extend top size.
> > +           */
> > +
> > +          if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
> > +            set_head (old_top, (size + old_size) | PREV_INUSE);
> > +
> > +          else if (contiguous (av) && old_size && brk < old_end)
> > +            {
> > +              /* Oops!  Someone else killed our space..  Can't touch anything.  */
> > +              malloc_printerr (3, "break adjusted to free malloc space", brk);
> > +            }
> > +
> > +          /*
> > +             Otherwise, make adjustments:
> > +
> > +           * If the first time through or noncontiguous, we need to call sbrk
> > +              just to find out where the end of memory lies.
> > +
> > +           * We need to ensure that all returned chunks from malloc will meet
> > +              MALLOC_ALIGNMENT
> > +
> > +           * If there was an intervening foreign sbrk, we need to adjust sbrk
> > +              request size to account for fact that we will not be able to
> > +              combine new space with existing space in old_top.
> > +
> > +           * Almost all systems internally allocate whole pages at a time, in
> > +              which case we might as well use the whole last page of request.
> > +              So we allocate enough more memory to hit a page boundary now,
> > +              which in turn causes future contiguous calls to page-align.
> > +           */
> > +
> > +          else
> > +            {
> > +              front_misalign = 0;
> > +              end_misalign = 0;
> > +              correction = 0;
> > +              aligned_brk = brk;
> > +
> > +              /* handle contiguous cases */
> > +              if (contiguous (av))
> > +                {
> > +                  /* Count foreign sbrk as system_mem.  */
> > +                  if (old_size)
> > +                    av->system_mem += brk - old_end;
> > +
> > +                  /* Guarantee alignment of first new chunk made from this space */
> > +
> > +                  front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
> > +                  if (front_misalign > 0)
> > +                    {
> > +                      /*
> > +                         Skip over some bytes to arrive at an aligned position.
> > +                         We don't need to specially mark these wasted front bytes.
> > +                         They will never be accessed anyway because
> > +                         prev_inuse of av->top (and any chunk created from its start)
> > +                         is always true after initialization.
> > +                       */
> > +
> > +                      correction = MALLOC_ALIGNMENT - front_misalign;
> > +                      aligned_brk += correction;
> > +                    }
> > +
> > +                  /*
> > +                     If this isn't adjacent to existing space, then we will not
> > +                     be able to merge with old_top space, so must add to 2nd request.
> > +                   */
> > +
> > +                  correction += old_size;
> > +
> > +                  /* Extend the end address to hit a page boundary */
> > +                  end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
> > +                  correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
> > +
> > +                  assert (correction >= 0);
> > +                  snd_brk = (char *) (MORECORE (correction));
> > +
> > +                  /*
> > +                     If can't allocate correction, try to at least find out current
> > +                     brk.  It might be enough to proceed without failing.
> > +
> > +                     Note that if second sbrk did NOT fail, we assume that space
> > +                     is contiguous with first sbrk. This is a safe assumption unless
> > +                     program is multithreaded but doesn't use locks and a foreign sbrk
> > +                     occurred between our first and second calls.
> > +                   */
> > +
> > +                  if (snd_brk == (char *) (MORECORE_FAILURE))
> > +                    {
> > +                      correction = 0;
> > +                      snd_brk = (char *) (MORECORE (0));
> > +                    }
> > +                  else
> > +                    {
> > +                      /* Call the `morecore' hook if necessary.  */
> > +                      void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
> > +                      if (__builtin_expect (hook != NULL, 0))
> > +                        (*hook)();
> > +                    }
> > +                }
> > +
> > +              /* handle non-contiguous cases */
> > +              else
> > +                {
> > +                  if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
> > +                    /* MORECORE/mmap must correctly align */
> > +                    assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
> > +                  else
> > +                    {
> > +                      front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
> > +                      if (front_misalign > 0)
> > +                        {
> > +                          /*
> > +                             Skip over some bytes to arrive at an aligned position.
> > +                             We don't need to specially mark these wasted front bytes.
> > +                             They will never be accessed anyway because
> > +                             prev_inuse of av->top (and any chunk created from its start)
> > +                             is always true after initialization.
> > +                           */
> > +
> > +                          aligned_brk += MALLOC_ALIGNMENT - front_misalign;
> > +                        }
> > +                    }
> > +
> > +                  /* Find out current end of memory */
> > +                  if (snd_brk == (char *) (MORECORE_FAILURE))
> > +                    {
> > +                      snd_brk = (char *) (MORECORE (0));
> > +                    }
> > +                }
> > +
> > +              /* Adjust top based on results of second sbrk */
> > +              if (snd_brk != (char *) (MORECORE_FAILURE))
> > +                {
> > +                  av->top = (mchunkptr) aligned_brk;
> > +                  set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
> > +                  av->system_mem += correction;
> > +
> > +                  /*
> > +                     If not the first time through, we either have a
> > +                     gap due to foreign sbrk or a non-contiguous region.  Insert a
> > +                     double fencepost at old_top to prevent consolidation with space
> > +                     we don't own. These fenceposts are artificial chunks that are
> > +                     marked as inuse and are in any case too small to use.  We need
> > +                     two to make sizes and alignments work out.
> > +                   */
> > +
> > +                  if (old_size != 0)
> > +                    {
> > +                      /*
> > +                         Shrink old_top to insert fenceposts, keeping size a
> > +                         multiple of MALLOC_ALIGNMENT. We know there is at least
> > +                         enough space in old_top to do this.
> > +                       */
> > +                      old_size = (old_size - 4 * SIZE_SZ) & ~MALLOC_ALIGN_MASK;
> > +                      set_head (old_top, old_size | PREV_INUSE);
> > +
> > +                      /*
> > +                         Note that the following assignments completely overwrite
> > +                         old_top when old_size was previously MINSIZE.  This is
> > +                         intentional. We need the fencepost, even if old_top otherwise gets
> > +                         lost.
> > +                       */
> > +                      chunk_at_offset (old_top, old_size)->size =
> > +                        (2 * SIZE_SZ) | PREV_INUSE;
> > +
> > +                      chunk_at_offset (old_top, old_size + 2 * SIZE_SZ)->size =
> > +                        (2 * SIZE_SZ) | PREV_INUSE;
> > +
> > +                      /* If possible, release the rest. */
> > +                      if (old_size >= MINSIZE)
> > +                        {
> > +                          _int_free (av, old_top, 1);
> > +                        }
> > +                    }
> > +                }
> > +            }
> > +        }
> > +    } /* if (av !=  &main_arena) */
> > +
> > +  if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
> >      av->max_system_mem = av->system_mem;
> > -  check_malloc_state(av);
> > +  check_malloc_state (av);
> >  
> >    /* finally, do the allocation */
> >    p = av->top;
> > -  size = chunksize(p);
> > +  size = chunksize (p);
> >  
> >    /* check that one of the above allocation paths succeeded */
> > -  if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
> > -    remainder_size = size - nb;
> > -    remainder = chunk_at_offset(p, nb);
> > -    av->top = remainder;
> > -    set_head(p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
> > -    set_head(remainder, remainder_size | PREV_INUSE);
> > -    check_malloced_chunk(av, p, nb);
> > -    return chunk2mem(p);
> > -  }
> > +  if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
> > +    {
> > +      remainder_size = size - nb;
> > +      remainder = chunk_at_offset (p, nb);
> > +      av->top = remainder;
> > +      set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
> > +      set_head (remainder, remainder_size | PREV_INUSE);
> > +      check_malloced_chunk (av, p, nb);
> > +      return chunk2mem (p);
> > +    }
> >  
> >    /* catch all failure paths */
> >    __set_errno (ENOMEM);
> > @@ -2677,26 +2721,27 @@ static void* sysmalloc(INTERNAL_SIZE_T nb, mstate av)
> >  
> >  
> >  /*
> > -  systrim is an inverse of sorts to sysmalloc.  It gives memory back
> > -  to the system (via negative arguments to sbrk) if there is unused
> > -  memory at the `high' end of the malloc pool. It is called
> > -  automatically by free() when top space exceeds the trim
> > -  threshold. It is also called by the public malloc_trim routine.  It
> > -  returns 1 if it actually released any memory, else 0.
> > -*/
> > -
> > -static int systrim(size_t pad, mstate av)
> > +   systrim is an inverse of sorts to sysmalloc.  It gives memory back
> > +   to the system (via negative arguments to sbrk) if there is unused
> > +   memory at the `high' end of the malloc pool. It is called
> > +   automatically by free() when top space exceeds the trim
> > +   threshold. It is also called by the public malloc_trim routine.  It
> > +   returns 1 if it actually released any memory, else 0.
> > + */
> > +
> > +static int
> > +systrim (size_t pad, mstate av)
> >  {
> > -  long  top_size;        /* Amount of top-most memory */
> > -  long  extra;           /* Amount to release */
> > -  long  released;        /* Amount actually released */
> > -  char* current_brk;     /* address returned by pre-check sbrk call */
> > -  char* new_brk;         /* address returned by post-check sbrk call */
> > +  long top_size;         /* Amount of top-most memory */
> > +  long extra;            /* Amount to release */
> > +  long released;         /* Amount actually released */
> > +  char *current_brk;     /* address returned by pre-check sbrk call */
> > +  char *new_brk;         /* address returned by post-check sbrk call */
> >    size_t pagesz;
> > -  long  top_area;
> > +  long top_area;
> >  
> > -  pagesz = GLRO(dl_pagesize);
> > -  top_size = chunksize(av->top);
> > +  pagesz = GLRO (dl_pagesize);
> > +  top_size = chunksize (av->top);
> >  
> >    top_area = top_size - MINSIZE - 1;
> >    if (top_area <= pad)
> > @@ -2706,53 +2751,55 @@ static int systrim(size_t pad, mstate av)
> >    extra = (top_area - pad) & ~(pagesz - 1);
> >  
> >    /*
> > -    Only proceed if end of memory is where we last set it.
> > -    This avoids problems if there were foreign sbrk calls.
> > -  */
> > -  current_brk = (char*)(MORECORE(0));
> > -  if (current_brk == (char*)(av->top) + top_size) {
> > -
> > -    /*
> > -      Attempt to release memory. We ignore MORECORE return value,
> > -      and instead call again to find out where new end of memory is.
> > -      This avoids problems if first call releases less than we asked,
> > -      of if failure somehow altered brk value. (We could still
> > -      encounter problems if it altered brk in some very bad way,
> > -      but the only thing we can do is adjust anyway, which will cause
> > -      some downstream failure.)
> > -    */
> > -
> > -    MORECORE(-extra);
> > -    /* Call the `morecore' hook if necessary.  */
> > -    void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
> > -    if (__builtin_expect (hook != NULL, 0))
> > -      (*hook) ();
> > -    new_brk = (char*)(MORECORE(0));
> > -
> > -    LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra);
> > -
> > -    if (new_brk != (char*)MORECORE_FAILURE) {
> > -      released = (long)(current_brk - new_brk);
> > -
> > -      if (released != 0) {
> > -	/* Success. Adjust top. */
> > -	av->system_mem -= released;
> > -	set_head(av->top, (top_size - released) | PREV_INUSE);
> > -	check_malloc_state(av);
> > -	return 1;
> > -       }
> > +     Only proceed if end of memory is where we last set it.
> > +     This avoids problems if there were foreign sbrk calls.
> > +   */
> > +  current_brk = (char *) (MORECORE (0));
> > +  if (current_brk == (char *) (av->top) + top_size)
> > +    {
> > +      /*
> > +         Attempt to release memory. We ignore MORECORE return value,
> > +         and instead call again to find out where new end of memory is.
> > +         This avoids problems if first call releases less than we asked,
> > +         of if failure somehow altered brk value. (We could still
> > +         encounter problems if it altered brk in some very bad way,
> > +         but the only thing we can do is adjust anyway, which will cause
> > +         some downstream failure.)
> > +       */
> > +
> > +      MORECORE (-extra);
> > +      /* Call the `morecore' hook if necessary.  */
> > +      void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
> > +      if (__builtin_expect (hook != NULL, 0))
> > +        (*hook)();
> > +      new_brk = (char *) (MORECORE (0));
> > +
> > +      LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra);
> > +
> > +      if (new_brk != (char *) MORECORE_FAILURE)
> > +        {
> > +          released = (long) (current_brk - new_brk);
> > +
> > +          if (released != 0)
> > +            {
> > +              /* Success. Adjust top. */
> > +              av->system_mem -= released;
> > +              set_head (av->top, (top_size - released) | PREV_INUSE);
> > +              check_malloc_state (av);
> > +              return 1;
> > +            }
> > +        }
> >      }
> > -  }
> >    return 0;
> >  }
> >  
> >  static void
> >  internal_function
> > -munmap_chunk(mchunkptr p)
> > +munmap_chunk (mchunkptr p)
> >  {
> > -  INTERNAL_SIZE_T size = chunksize(p);
> > +  INTERNAL_SIZE_T size = chunksize (p);
> >  
> > -  assert (chunk_is_mmapped(p));
> > +  assert (chunk_is_mmapped (p));
> >  
> >    uintptr_t block = (uintptr_t) p - p->prev_size;
> >    size_t total_size = p->prev_size + size;
> > @@ -2761,10 +2808,10 @@ munmap_chunk(mchunkptr p)
> >       page size.  But gcc does not recognize the optimization possibility
> >       (in the moment at least) so we combine the two values into one before
> >       the bit test.  */
> > -  if (__builtin_expect (((block | total_size) & (GLRO(dl_pagesize) - 1)) != 0, 0))
> > +  if (__builtin_expect (((block | total_size) & (GLRO (dl_pagesize) - 1)) != 0, 0))
> >      {
> >        malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
> > -		       chunk2mem (p));
> > +                       chunk2mem (p));
> >        return;
> >      }
> >  
> > @@ -2774,22 +2821,22 @@ munmap_chunk(mchunkptr p)
> >    /* If munmap failed the process virtual memory address space is in a
> >       bad shape.  Just leave the block hanging around, the process will
> >       terminate shortly anyway since not much can be done.  */
> > -  __munmap((char *)block, total_size);
> > +  __munmap ((char *) block, total_size);
> >  }
> >  
> >  #if HAVE_MREMAP
> >  
> >  static mchunkptr
> >  internal_function
> > -mremap_chunk(mchunkptr p, size_t new_size)
> > +mremap_chunk (mchunkptr p, size_t new_size)
> >  {
> > -  size_t page_mask = GLRO(dl_pagesize) - 1;
> > +  size_t page_mask = GLRO (dl_pagesize) - 1;
> >    INTERNAL_SIZE_T offset = p->prev_size;
> > -  INTERNAL_SIZE_T size = chunksize(p);
> > +  INTERNAL_SIZE_T size = chunksize (p);
> >    char *cp;
> >  
> > -  assert (chunk_is_mmapped(p));
> > -  assert(((size + offset) & (GLRO(dl_pagesize)-1)) == 0);
> > +  assert (chunk_is_mmapped (p));
> > +  assert (((size + offset) & (GLRO (dl_pagesize) - 1)) == 0);
> >  
> >    /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
> >    new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
> > @@ -2798,31 +2845,31 @@ mremap_chunk(mchunkptr p, size_t new_size)
> >    if (size + offset == new_size)
> >      return p;
> >  
> > -  cp = (char *)__mremap((char *)p - offset, size + offset, new_size,
> > -			MREMAP_MAYMOVE);
> > +  cp = (char *) __mremap ((char *) p - offset, size + offset, new_size,
> > +                          MREMAP_MAYMOVE);
> >  
> > -  if (cp == MAP_FAILED) return 0;
> > +  if (cp == MAP_FAILED)
> > +    return 0;
> >  
> > -  p = (mchunkptr)(cp + offset);
> > +  p = (mchunkptr) (cp + offset);
> >  
> > -  assert(aligned_OK(chunk2mem(p)));
> > +  assert (aligned_OK (chunk2mem (p)));
> >  
> > -  assert((p->prev_size == offset));
> > -  set_head(p, (new_size - offset)|IS_MMAPPED);
> > +  assert ((p->prev_size == offset));
> > +  set_head (p, (new_size - offset) | IS_MMAPPED);
> >  
> >    INTERNAL_SIZE_T new;
> >    new = atomic_exchange_and_add (&mp_.mmapped_mem, new_size - size - offset)
> > -	+ new_size - size - offset;
> > +        + new_size - size - offset;
> >    atomic_max (&mp_.max_mmapped_mem, new);
> >    return p;
> >  }
> > -
> >  #endif /* HAVE_MREMAP */
> >  
> >  /*------------------------ Public wrappers. --------------------------------*/
> >  
> > -void*
> > -__libc_malloc(size_t bytes)
> > +void *
> > +__libc_malloc (size_t bytes)
> >  {
> >    mstate ar_ptr;
> >    void *victim;
> > @@ -2832,73 +2879,78 @@ __libc_malloc(size_t bytes)
> >    if (__builtin_expect (hook != NULL, 0))
> >      return (*hook)(bytes, RETURN_ADDRESS (0));
> >  
> > -  arena_lookup(ar_ptr);
> > +  arena_lookup (ar_ptr);
> >  
> > -  arena_lock(ar_ptr, bytes);
> > -  if(!ar_ptr)
> > +  arena_lock (ar_ptr, bytes);
> > +  if (!ar_ptr)
> >      return 0;
> > -  victim = _int_malloc(ar_ptr, bytes);
> > -  if(!victim) {
> > -    LIBC_PROBE (memory_malloc_retry, 1, bytes);
> > -    ar_ptr = arena_get_retry(ar_ptr, bytes);
> > -    if (__builtin_expect(ar_ptr != NULL, 1)) {
> > -      victim = _int_malloc(ar_ptr, bytes);
> > -      (void)mutex_unlock(&ar_ptr->mutex);
> > +
> > +  victim = _int_malloc (ar_ptr, bytes);
> > +  if (!victim)
> > +    {
> > +      LIBC_PROBE (memory_malloc_retry, 1, bytes);
> > +      ar_ptr = arena_get_retry (ar_ptr, bytes);
> > +      if (__builtin_expect (ar_ptr != NULL, 1))
> > +        {
> > +          victim = _int_malloc (ar_ptr, bytes);
> > +          (void) mutex_unlock (&ar_ptr->mutex);
> > +        }
> >      }
> > -  } else
> > -    (void)mutex_unlock(&ar_ptr->mutex);
> > -  assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
> > -	 ar_ptr == arena_for_chunk(mem2chunk(victim)));
> > +  else
> > +    (void) mutex_unlock (&ar_ptr->mutex);
> > +  assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
> > +          ar_ptr == arena_for_chunk (mem2chunk (victim)));
> >    return victim;
> >  }
> > -libc_hidden_def(__libc_malloc)
> > +libc_hidden_def (__libc_malloc)
> >  
> >  void
> > -__libc_free(void* mem)
> > +__libc_free (void *mem)
> >  {
> >    mstate ar_ptr;
> >    mchunkptr p;                          /* chunk corresponding to mem */
> >  
> >    void (*hook) (void *, const void *)
> >      = atomic_forced_read (__free_hook);
> > -  if (__builtin_expect (hook != NULL, 0)) {
> > -    (*hook)(mem, RETURN_ADDRESS (0));
> > -    return;
> > -  }
> > +  if (__builtin_expect (hook != NULL, 0))
> > +    {
> > +      (*hook)(mem, RETURN_ADDRESS (0));
> > +      return;
> > +    }
> >  
> >    if (mem == 0)                              /* free(0) has no effect */
> >      return;
> >  
> > -  p = mem2chunk(mem);
> > +  p = mem2chunk (mem);
> >  
> > -  if (chunk_is_mmapped(p))                       /* release mmapped memory. */
> > -  {
> > -    /* see if the dynamic brk/mmap threshold needs adjusting */
> > -    if (!mp_.no_dyn_threshold
> > -	&& p->size > mp_.mmap_threshold
> > -	&& p->size <= DEFAULT_MMAP_THRESHOLD_MAX)
> > -      {
> > -	mp_.mmap_threshold = chunksize (p);
> > -	mp_.trim_threshold = 2 * mp_.mmap_threshold;
> > -	LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
> > -		    mp_.mmap_threshold, mp_.trim_threshold);
> > -      }
> > -    munmap_chunk(p);
> > -    return;
> > -  }
> > +  if (chunk_is_mmapped (p))                       /* release mmapped memory. */
> > +    {
> > +      /* see if the dynamic brk/mmap threshold needs adjusting */
> > +      if (!mp_.no_dyn_threshold
> > +          && p->size > mp_.mmap_threshold
> > +          && p->size <= DEFAULT_MMAP_THRESHOLD_MAX)
> > +        {
> > +          mp_.mmap_threshold = chunksize (p);
> > +          mp_.trim_threshold = 2 * mp_.mmap_threshold;
> > +          LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
> > +                      mp_.mmap_threshold, mp_.trim_threshold);
> > +        }
> > +      munmap_chunk (p);
> > +      return;
> > +    }
> >  
> > -  ar_ptr = arena_for_chunk(p);
> > -  _int_free(ar_ptr, p, 0);
> > +  ar_ptr = arena_for_chunk (p);
> > +  _int_free (ar_ptr, p, 0);
> >  }
> >  libc_hidden_def (__libc_free)
> >  
> > -void*
> > -__libc_realloc(void* oldmem, size_t bytes)
> > +void *
> > +__libc_realloc (void *oldmem, size_t bytes)
> >  {
> >    mstate ar_ptr;
> > -  INTERNAL_SIZE_T    nb;      /* padded request size */
> > +  INTERNAL_SIZE_T nb;         /* padded request size */
> >  
> > -  void* newp;             /* chunk to return */
> > +  void *newp;             /* chunk to return */
> >  
> >    void *(*hook) (void *, size_t, const void *) =
> >      atomic_forced_read (__realloc_hook);
> > @@ -2906,16 +2958,20 @@ __libc_realloc(void* oldmem, size_t bytes)
> >      return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
> >  
> >  #if REALLOC_ZERO_BYTES_FREES
> > -  if (bytes == 0 && oldmem != NULL) { __libc_free(oldmem); return 0; }
> > +  if (bytes == 0 && oldmem != NULL)
> > +    {
> > +      __libc_free (oldmem); return 0;
> > +    }
> >  #endif
> >  
> >    /* realloc of null is supposed to be same as malloc */
> > -  if (oldmem == 0) return __libc_malloc(bytes);
> > +  if (oldmem == 0)
> > +    return __libc_malloc (bytes);
> >  
> >    /* chunk corresponding to oldmem */
> > -  const mchunkptr oldp    = mem2chunk(oldmem);
> > +  const mchunkptr oldp = mem2chunk (oldmem);
> >    /* its size */
> > -  const INTERNAL_SIZE_T oldsize = chunksize(oldp);
> > +  const INTERNAL_SIZE_T oldsize = chunksize (oldp);
> >  
> >    /* Little security check which won't hurt performance: the
> >       allocator never wrapps around at the end of the address space.
> > @@ -2928,63 +2984,69 @@ __libc_realloc(void* oldmem, size_t bytes)
> >        return NULL;
> >      }
> >  
> > -  checked_request2size(bytes, nb);
> > +  checked_request2size (bytes, nb);
> >  
> > -  if (chunk_is_mmapped(oldp))
> > -  {
> > -    void* newmem;
> > +  if (chunk_is_mmapped (oldp))
> > +    {
> > +      void *newmem;
> >  
> >  #if HAVE_MREMAP
> > -    newp = mremap_chunk(oldp, nb);
> > -    if(newp) return chunk2mem(newp);
> > +      newp = mremap_chunk (oldp, nb);
> > +      if (newp)
> > +        return chunk2mem (newp);
> >  #endif
> > -    /* Note the extra SIZE_SZ overhead. */
> > -    if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
> > -    /* Must alloc, copy, free. */
> > -    newmem = __libc_malloc(bytes);
> > -    if (newmem == 0) return 0; /* propagate failure */
> > -    memcpy(newmem, oldmem, oldsize - 2*SIZE_SZ);
> > -    munmap_chunk(oldp);
> > -    return newmem;
> > -  }
> > +      /* Note the extra SIZE_SZ overhead. */
> > +      if (oldsize - SIZE_SZ >= nb)
> > +        return oldmem;                         /* do nothing */
> > +
> > +      /* Must alloc, copy, free. */
> > +      newmem = __libc_malloc (bytes);
> > +      if (newmem == 0)
> > +        return 0;              /* propagate failure */
> >  
> > -  ar_ptr = arena_for_chunk(oldp);
> > +      memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
> > +      munmap_chunk (oldp);
> > +      return newmem;
> > +    }
> > +
> > +  ar_ptr = arena_for_chunk (oldp);
> >  #if THREAD_STATS
> > -  if(!mutex_trylock(&ar_ptr->mutex))
> > +  if (!mutex_trylock (&ar_ptr->mutex))
> >      ++(ar_ptr->stat_lock_direct);
> > -  else {
> > -    (void)mutex_lock(&ar_ptr->mutex);
> > -    ++(ar_ptr->stat_lock_wait);
> > -  }
> > +  else
> > +    {
> > +      (void) mutex_lock (&ar_ptr->mutex);
> > +      ++(ar_ptr->stat_lock_wait);
> > +    }
> >  #else
> > -  (void)mutex_lock(&ar_ptr->mutex);
> > +  (void) mutex_lock (&ar_ptr->mutex);
> >  #endif
> >  
> >  
> > -  newp = _int_realloc(ar_ptr, oldp, oldsize, nb);
> > +  newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
> >  
> > -  (void)mutex_unlock(&ar_ptr->mutex);
> > -  assert(!newp || chunk_is_mmapped(mem2chunk(newp)) ||
> > -	 ar_ptr == arena_for_chunk(mem2chunk(newp)));
> > +  (void) mutex_unlock (&ar_ptr->mutex);
> > +  assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
> > +          ar_ptr == arena_for_chunk (mem2chunk (newp)));
> >  
> >    if (newp == NULL)
> >      {
> >        /* Try harder to allocate memory in other arenas.  */
> >        LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
> > -      newp = __libc_malloc(bytes);
> > +      newp = __libc_malloc (bytes);
> >        if (newp != NULL)
> > -	{
> > -	  memcpy (newp, oldmem, oldsize - SIZE_SZ);
> > -	  _int_free(ar_ptr, oldp, 0);
> > -	}
> > +        {
> > +          memcpy (newp, oldmem, oldsize - SIZE_SZ);
> > +          _int_free (ar_ptr, oldp, 0);
> > +        }
> >      }
> >  
> >    return newp;
> >  }
> >  libc_hidden_def (__libc_realloc)
> >  
> > -void*
> > -__libc_memalign(size_t alignment, size_t bytes)
> > +void *
> > +__libc_memalign (size_t alignment, size_t bytes)
> >  {
> >    void *address = RETURN_ADDRESS (0);
> >    return _mid_memalign (alignment, bytes, address);
> > @@ -3002,10 +3064,12 @@ _mid_memalign (size_t alignment, size_t bytes, void *address)
> >      return (*hook)(alignment, bytes, address);
> >  
> >    /* If we need less alignment than we give anyway, just relay to malloc.  */
> > -  if (alignment <= MALLOC_ALIGNMENT) return __libc_malloc(bytes);
> > +  if (alignment <= MALLOC_ALIGNMENT)
> > +    return __libc_malloc (bytes);
> >  
> >    /* Otherwise, ensure that it is at least a minimum chunk size */
> > -  if (alignment <  MINSIZE) alignment = MINSIZE;
> > +  if (alignment < MINSIZE)
> > +    alignment = MINSIZE;
> >  
> >    /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
> >       power of 2 and will cause overflow in the check below.  */
> > @@ -3024,58 +3088,63 @@ _mid_memalign (size_t alignment, size_t bytes, void *address)
> >  
> >  
> >    /* Make sure alignment is power of 2.  */
> > -  if (!powerof2(alignment)) {
> > -    size_t a = MALLOC_ALIGNMENT * 2;
> > -    while (a < alignment) a <<= 1;
> > -    alignment = a;
> > -  }
> > +  if (!powerof2 (alignment))
> > +    {
> > +      size_t a = MALLOC_ALIGNMENT * 2;
> > +      while (a < alignment)
> > +        a <<= 1;
> > +      alignment = a;
> > +    }
> >  
> > -  arena_get(ar_ptr, bytes + alignment + MINSIZE);
> > -  if(!ar_ptr)
> > +  arena_get (ar_ptr, bytes + alignment + MINSIZE);
> > +  if (!ar_ptr)
> >      return 0;
> > -  p = _int_memalign(ar_ptr, alignment, bytes);
> > -  if(!p) {
> > -    LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
> > -    ar_ptr = arena_get_retry (ar_ptr, bytes);
> > -    if (__builtin_expect(ar_ptr != NULL, 1)) {
> > -      p = _int_memalign(ar_ptr, alignment, bytes);
> > -      (void)mutex_unlock(&ar_ptr->mutex);
> > +
> > +  p = _int_memalign (ar_ptr, alignment, bytes);
> > +  if (!p)
> > +    {
> > +      LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
> > +      ar_ptr = arena_get_retry (ar_ptr, bytes);
> > +      if (__builtin_expect (ar_ptr != NULL, 1))
> > +        {
> > +          p = _int_memalign (ar_ptr, alignment, bytes);
> > +          (void) mutex_unlock (&ar_ptr->mutex);
> > +        }
> >      }
> > -  } else
> > -    (void)mutex_unlock(&ar_ptr->mutex);
> > -  assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
> > -	 ar_ptr == arena_for_chunk(mem2chunk(p)));
> > +  else
> > +    (void) mutex_unlock (&ar_ptr->mutex);
> > +  assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
> > +          ar_ptr == arena_for_chunk (mem2chunk (p)));
> >    return p;
> >  }
> >  /* For ISO C11.  */
> >  weak_alias (__libc_memalign, aligned_alloc)
> >  libc_hidden_def (__libc_memalign)
> >  
> > -void*
> > -__libc_valloc(size_t bytes)
> > +void *
> > +__libc_valloc (size_t bytes)
> >  {
> > -  if(__malloc_initialized < 0)
> > +  if (__malloc_initialized < 0)
> >      ptmalloc_init ();
> >  
> >    void *address = RETURN_ADDRESS (0);
> > -  size_t pagesz = GLRO(dl_pagesize);
> > +  size_t pagesz = GLRO (dl_pagesize);
> >    return _mid_memalign (pagesz, bytes, address);
> >  }
> >  
> > -void*
> > -__libc_pvalloc(size_t bytes)
> > +void *
> > +__libc_pvalloc (size_t bytes)
> >  {
> > -
> > -  if(__malloc_initialized < 0)
> > +  if (__malloc_initialized < 0)
> >      ptmalloc_init ();
> >  
> >    void *address = RETURN_ADDRESS (0);
> > -  size_t pagesz = GLRO(dl_pagesize);
> > -  size_t page_mask = GLRO(dl_pagesize) - 1;
> > +  size_t pagesz = GLRO (dl_pagesize);
> > +  size_t page_mask = GLRO (dl_pagesize) - 1;
> >    size_t rounded_bytes = (bytes + page_mask) & ~(page_mask);
> >  
> >    /* Check for overflow.  */
> > -  if (bytes > SIZE_MAX - 2*pagesz - MINSIZE)
> > +  if (bytes > SIZE_MAX - 2 * pagesz - MINSIZE)
> >      {
> >        __set_errno (ENOMEM);
> >        return 0;
> > @@ -3084,235 +3153,254 @@ __libc_pvalloc(size_t bytes)
> >    return _mid_memalign (pagesz, rounded_bytes, address);
> >  }
> >  
> > -void*
> > -__libc_calloc(size_t n, size_t elem_size)
> > +void *
> > +__libc_calloc (size_t n, size_t elem_size)
> >  {
> >    mstate av;
> >    mchunkptr oldtop, p;
> >    INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
> > -  void* mem;
> > +  void *mem;
> >    unsigned long clearsize;
> >    unsigned long nclears;
> > -  INTERNAL_SIZE_T* d;
> > +  INTERNAL_SIZE_T *d;
> >  
> >    /* size_t is unsigned so the behavior on overflow is defined.  */
> >    bytes = n * elem_size;
> >  #define HALF_INTERNAL_SIZE_T \
> >    (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
> > -  if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
> > -    if (elem_size != 0 && bytes / elem_size != n) {
> > -      __set_errno (ENOMEM);
> > -      return 0;
> > +  if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0))
> > +    {
> > +      if (elem_size != 0 && bytes / elem_size != n)
> > +        {
> > +          __set_errno (ENOMEM);
> > +          return 0;
> > +        }
> >      }
> > -  }
> >  
> >    void *(*hook) (size_t, const void *) =
> >      atomic_forced_read (__malloc_hook);
> > -  if (__builtin_expect (hook != NULL, 0)) {
> > -    sz = bytes;
> > -    mem = (*hook)(sz, RETURN_ADDRESS (0));
> > -    if(mem == 0)
> > -      return 0;
> > -    return memset(mem, 0, sz);
> > -  }
> > +  if (__builtin_expect (hook != NULL, 0))
> > +    {
> > +      sz = bytes;
> > +      mem = (*hook)(sz, RETURN_ADDRESS (0));
> > +      if (mem == 0)
> > +        return 0;
> > +
> > +      return memset (mem, 0, sz);
> > +    }
> >  
> >    sz = bytes;
> >  
> > -  arena_get(av, sz);
> > -  if(!av)
> > +  arena_get (av, sz);
> > +  if (!av)
> >      return 0;
> >  
> >    /* Check if we hand out the top chunk, in which case there may be no
> >       need to clear. */
> >  #if MORECORE_CLEARS
> > -  oldtop = top(av);
> > -  oldtopsize = chunksize(top(av));
> > -#if MORECORE_CLEARS < 2
> > +  oldtop = top (av);
> > +  oldtopsize = chunksize (top (av));
> > +# if MORECORE_CLEARS < 2
> >    /* Only newly allocated memory is guaranteed to be cleared.  */
> >    if (av == &main_arena &&
> > -      oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
> > -    oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
> > -#endif
> > +      oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
> > +    oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
> > +# endif
> >    if (av != &main_arena)
> >      {
> >        heap_info *heap = heap_for_ptr (oldtop);
> >        if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
> > -	oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
> > +        oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
> >      }
> >  #endif
> > -  mem = _int_malloc(av, sz);
> > +  mem = _int_malloc (av, sz);
> >  
> >  
> > -  assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
> > -	 av == arena_for_chunk(mem2chunk(mem)));
> > +  assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
> > +          av == arena_for_chunk (mem2chunk (mem)));
> >  
> > -  if (mem == 0) {
> > -    LIBC_PROBE (memory_calloc_retry, 1, sz);
> > -    av = arena_get_retry (av, sz);
> > -    if (__builtin_expect(av != NULL, 1)) {
> > -      mem = _int_malloc(av, sz);
> > -      (void)mutex_unlock(&av->mutex);
> > +  if (mem == 0)
> > +    {
> > +      LIBC_PROBE (memory_calloc_retry, 1, sz);
> > +      av = arena_get_retry (av, sz);
> > +      if (__builtin_expect (av != NULL, 1))
> > +        {
> > +          mem = _int_malloc (av, sz);
> > +          (void) mutex_unlock (&av->mutex);
> > +        }
> > +      if (mem == 0)
> > +        return 0;
> >      }
> > -    if (mem == 0) return 0;
> > -  } else
> > -    (void)mutex_unlock(&av->mutex);
> > -  p = mem2chunk(mem);
> > +  else
> > +    (void) mutex_unlock (&av->mutex);
> > +  p = mem2chunk (mem);
> >  
> >    /* Two optional cases in which clearing not necessary */
> >    if (chunk_is_mmapped (p))
> >      {
> >        if (__builtin_expect (perturb_byte, 0))
> > -	return memset (mem, 0, sz);
> > +        return memset (mem, 0, sz);
> > +
> >        return mem;
> >      }
> >  
> > -  csz = chunksize(p);
> > +  csz = chunksize (p);
> >  
> >  #if MORECORE_CLEARS
> > -  if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize)) {
> > -    /* clear only the bytes from non-freshly-sbrked memory */
> > -    csz = oldtopsize;
> > -  }
> > +  if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
> > +    {
> > +      /* clear only the bytes from non-freshly-sbrked memory */
> > +      csz = oldtopsize;
> > +    }
> >  #endif
> >  
> >    /* Unroll clear of <= 36 bytes (72 if 8byte sizes).  We know that
> >       contents have an odd number of INTERNAL_SIZE_T-sized words;
> >       minimally 3.  */
> > -  d = (INTERNAL_SIZE_T*)mem;
> > +  d = (INTERNAL_SIZE_T *) mem;
> >    clearsize = csz - SIZE_SZ;
> > -  nclears = clearsize / sizeof(INTERNAL_SIZE_T);
> > -  assert(nclears >= 3);
> > +  nclears = clearsize / sizeof (INTERNAL_SIZE_T);
> > +  assert (nclears >= 3);
> >  
> >    if (nclears > 9)
> > -    return memset(d, 0, clearsize);
> > -
> > -  else {
> > -    *(d+0) = 0;
> > -    *(d+1) = 0;
> > -    *(d+2) = 0;
> > -    if (nclears > 4) {
> > -      *(d+3) = 0;
> > -      *(d+4) = 0;
> > -      if (nclears > 6) {
> > -	*(d+5) = 0;
> > -	*(d+6) = 0;
> > -	if (nclears > 8) {
> > -	  *(d+7) = 0;
> > -	  *(d+8) = 0;
> > -	}
> > -      }
> > +    return memset (d, 0, clearsize);
> > +
> > +  else
> > +    {
> > +      *(d + 0) = 0;
> > +      *(d + 1) = 0;
> > +      *(d + 2) = 0;
> > +      if (nclears > 4)
> > +        {
> > +          *(d + 3) = 0;
> > +          *(d + 4) = 0;
> > +          if (nclears > 6)
> > +            {
> > +              *(d + 5) = 0;
> > +              *(d + 6) = 0;
> > +              if (nclears > 8)
> > +                {
> > +                  *(d + 7) = 0;
> > +                  *(d + 8) = 0;
> > +                }
> > +            }
> > +        }
> >      }
> > -  }
> >  
> >    return mem;
> >  }
> >  
> >  /*
> > -  ------------------------------ malloc ------------------------------
> > -*/
> > +   ------------------------------ malloc ------------------------------
> > + */
> >  
> > -static void*
> > -_int_malloc(mstate av, size_t bytes)
> > +static void *
> > +_int_malloc (mstate av, size_t bytes)
> >  {
> >    INTERNAL_SIZE_T nb;               /* normalized request size */
> > -  unsigned int    idx;              /* associated bin index */
> > -  mbinptr         bin;              /* associated bin */
> > +  unsigned int idx;                 /* associated bin index */
> > +  mbinptr bin;                      /* associated bin */
> >  
> > -  mchunkptr       victim;           /* inspected/selected chunk */
> > +  mchunkptr victim;                 /* inspected/selected chunk */
> >    INTERNAL_SIZE_T size;             /* its size */
> > -  int             victim_index;     /* its bin index */
> > +  int victim_index;                 /* its bin index */
> >  
> > -  mchunkptr       remainder;        /* remainder from a split */
> > -  unsigned long   remainder_size;   /* its size */
> > +  mchunkptr remainder;              /* remainder from a split */
> > +  unsigned long remainder_size;     /* its size */
> >  
> > -  unsigned int    block;            /* bit map traverser */
> > -  unsigned int    bit;              /* bit map traverser */
> > -  unsigned int    map;              /* current word of binmap */
> > +  unsigned int block;               /* bit map traverser */
> > +  unsigned int bit;                 /* bit map traverser */
> > +  unsigned int map;                 /* current word of binmap */
> >  
> > -  mchunkptr       fwd;              /* misc temp for linking */
> > -  mchunkptr       bck;              /* misc temp for linking */
> > +  mchunkptr fwd;                    /* misc temp for linking */
> > +  mchunkptr bck;                    /* misc temp for linking */
> >  
> >    const char *errstr = NULL;
> >  
> >    /*
> > -    Convert request size to internal form by adding SIZE_SZ bytes
> > -    overhead plus possibly more to obtain necessary alignment and/or
> > -    to obtain a size of at least MINSIZE, the smallest allocatable
> > -    size. Also, checked_request2size traps (returning 0) request sizes
> > -    that are so large that they wrap around zero when padded and
> > -    aligned.
> > -  */
> > +     Convert request size to internal form by adding SIZE_SZ bytes
> > +     overhead plus possibly more to obtain necessary alignment and/or
> > +     to obtain a size of at least MINSIZE, the smallest allocatable
> > +     size. Also, checked_request2size traps (returning 0) request sizes
> > +     that are so large that they wrap around zero when padded and
> > +     aligned.
> > +   */
> >  
> > -  checked_request2size(bytes, nb);
> > +  checked_request2size (bytes, nb);
> >  
> >    /*
> > -    If the size qualifies as a fastbin, first check corresponding bin.
> > -    This code is safe to execute even if av is not yet initialized, so we
> > -    can try it without checking, which saves some time on this fast path.
> > -  */
> > -
> > -  if ((unsigned long)(nb) <= (unsigned long)(get_max_fast ())) {
> > -    idx = fastbin_index(nb);
> > -    mfastbinptr* fb = &fastbin (av, idx);
> > -    mchunkptr pp = *fb;
> > -    do
> > -      {
> > -	victim = pp;
> > -	if (victim == NULL)
> > -	  break;
> > -      }
> > -    while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim))
> > -	   != victim);
> > -    if (victim != 0) {
> > -      if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
> > -	{
> > -	  errstr = "malloc(): memory corruption (fast)";
> > -	errout:
> > -	  malloc_printerr (check_action, errstr, chunk2mem (victim));
> > -	  return NULL;
> > -	}
> > -      check_remalloced_chunk(av, victim, nb);
> > -      void *p = chunk2mem(victim);
> > -      alloc_perturb (p, bytes);
> > -      return p;
> > +     If the size qualifies as a fastbin, first check corresponding bin.
> > +     This code is safe to execute even if av is not yet initialized, so we
> > +     can try it without checking, which saves some time on this fast path.
> > +   */
> > +
> > +  if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
> > +    {
> > +      idx = fastbin_index (nb);
> > +      mfastbinptr *fb = &fastbin (av, idx);
> > +      mchunkptr pp = *fb;
> > +      do
> > +        {
> > +          victim = pp;
> > +          if (victim == NULL)
> > +            break;
> > +        }
> > +      while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim))
> > +             != victim);
> > +      if (victim != 0)
> > +        {
> > +          if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
> > +            {
> > +              errstr = "malloc(): memory corruption (fast)";
> > +            errout:
> > +              malloc_printerr (check_action, errstr, chunk2mem (victim));
> > +              return NULL;
> > +            }
> > +          check_remalloced_chunk (av, victim, nb);
> > +          void *p = chunk2mem (victim);
> > +          alloc_perturb (p, bytes);
> > +          return p;
> > +        }
> >      }
> > -  }
> >  
> >    /*
> > -    If a small request, check regular bin.  Since these "smallbins"
> > -    hold one size each, no searching within bins is necessary.
> > -    (For a large request, we need to wait until unsorted chunks are
> > -    processed to find best fit. But for small ones, fits are exact
> > -    anyway, so we can check now, which is faster.)
> > -  */
> > -
> > -  if (in_smallbin_range(nb)) {
> > -    idx = smallbin_index(nb);
> > -    bin = bin_at(av,idx);
> > -
> > -    if ( (victim = last(bin)) != bin) {
> > -      if (victim == 0) /* initialization check */
> > -	malloc_consolidate(av);
> > -      else {
> > -	bck = victim->bk;
> > -	if (__builtin_expect (bck->fd != victim, 0))
> > -	  {
> > -	    errstr = "malloc(): smallbin double linked list corrupted";
> > -	    goto errout;
> > -	  }
> > -	set_inuse_bit_at_offset(victim, nb);
> > -	bin->bk = bck;
> > -	bck->fd = bin;
> > -
> > -	if (av != &main_arena)
> > -	  victim->size |= NON_MAIN_ARENA;
> > -	check_malloced_chunk(av, victim, nb);
> > -	void *p = chunk2mem(victim);
> > -	alloc_perturb (p, bytes);
> > -	return p;
> > -      }
> > +     If a small request, check regular bin.  Since these "smallbins"
> > +     hold one size each, no searching within bins is necessary.
> > +     (For a large request, we need to wait until unsorted chunks are
> > +     processed to find best fit. But for small ones, fits are exact
> > +     anyway, so we can check now, which is faster.)
> > +   */
> > +
> > +  if (in_smallbin_range (nb))
> > +    {
> > +      idx = smallbin_index (nb);
> > +      bin = bin_at (av, idx);
> > +
> > +      if ((victim = last (bin)) != bin)
> > +        {
> > +          if (victim == 0) /* initialization check */
> > +            malloc_consolidate (av);
> > +          else
> > +            {
> > +              bck = victim->bk;
> > +              if (__builtin_expect (bck->fd != victim, 0))
> > +                {
> > +                  errstr = "malloc(): smallbin double linked list corrupted";
> > +                  goto errout;
> > +                }
> > +              set_inuse_bit_at_offset (victim, nb);
> > +              bin->bk = bck;
> > +              bck->fd = bin;
> > +
> > +              if (av != &main_arena)
> > +                victim->size |= NON_MAIN_ARENA;
> > +              check_malloced_chunk (av, victim, nb);
> > +              void *p = chunk2mem (victim);
> > +              alloc_perturb (p, bytes);
> > +              return p;
> > +            }
> > +        }
> >      }
> > -  }
> >  
> >    /*
> >       If this is a large request, consolidate fastbins before continuing.
> > @@ -3323,394 +3411,417 @@ _int_malloc(mstate av, size_t bytes)
> >       large requests, but less often mixtures, so consolidation is not
> >       invoked all that often in most programs. And the programs that
> >       it is called frequently in otherwise tend to fragment.
> > -  */
> > +   */
> >  
> > -  else {
> > -    idx = largebin_index(nb);
> > -    if (have_fastchunks(av))
> > -      malloc_consolidate(av);
> > -  }
> > +  else
> > +    {
> > +      idx = largebin_index (nb);
> > +      if (have_fastchunks (av))
> > +        malloc_consolidate (av);
> > +    }
> >  
> >    /*
> > -    Process recently freed or remaindered chunks, taking one only if
> > -    it is exact fit, or, if this a small request, the chunk is remainder from
> > -    the most recent non-exact fit.  Place other traversed chunks in
> > -    bins.  Note that this step is the only place in any routine where
> > -    chunks are placed in bins.
> > -
> > -    The outer loop here is needed because we might not realize until
> > -    near the end of malloc that we should have consolidated, so must
> > -    do so and retry. This happens at most once, and only when we would
> > -    otherwise need to expand memory to service a "small" request.
> > -  */
> > -
> > -  for(;;) {
> > -
> > -    int iters = 0;
> > -    while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
> > -      bck = victim->bk;
> > -      if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
> > -	  || __builtin_expect (victim->size > av->system_mem, 0))
> > -	malloc_printerr (check_action, "malloc(): memory corruption",
> > -			 chunk2mem (victim));
> > -      size = chunksize(victim);
> > +     Process recently freed or remaindered chunks, taking one only if
> > +     it is exact fit, or, if this a small request, the chunk is remainder from
> > +     the most recent non-exact fit.  Place other traversed chunks in
> > +     bins.  Note that this step is the only place in any routine where
> > +     chunks are placed in bins.
> > +
> > +     The outer loop here is needed because we might not realize until
> > +     near the end of malloc that we should have consolidated, so must
> > +     do so and retry. This happens at most once, and only when we would
> > +     otherwise need to expand memory to service a "small" request.
> > +   */
> > +
> > +  for (;; )
> > +    {
> > +      int iters = 0;
> > +      while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
> > +        {
> > +          bck = victim->bk;
> > +          if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
> > +              || __builtin_expect (victim->size > av->system_mem, 0))
> > +            malloc_printerr (check_action, "malloc(): memory corruption",
> > +                             chunk2mem (victim));
> > +          size = chunksize (victim);
> > +
> > +          /*
> > +             If a small request, try to use last remainder if it is the
> > +             only chunk in unsorted bin.  This helps promote locality for
> > +             runs of consecutive small requests. This is the only
> > +             exception to best-fit, and applies only when there is
> > +             no exact fit for a small chunk.
> > +           */
> > +
> > +          if (in_smallbin_range (nb) &&
> > +              bck == unsorted_chunks (av) &&
> > +              victim == av->last_remainder &&
> > +              (unsigned long) (size) > (unsigned long) (nb + MINSIZE))
> > +            {
> > +              /* split and reattach remainder */
> > +              remainder_size = size - nb;
> > +              remainder = chunk_at_offset (victim, nb);
> > +              unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
> > +              av->last_remainder = remainder;
> > +              remainder->bk = remainder->fd = unsorted_chunks (av);
> > +              if (!in_smallbin_range (remainder_size))
> > +                {
> > +                  remainder->fd_nextsize = NULL;
> > +                  remainder->bk_nextsize = NULL;
> > +                }
> > +
> > +              set_head (victim, nb | PREV_INUSE |
> > +                        (av != &main_arena ? NON_MAIN_ARENA : 0));
> > +              set_head (remainder, remainder_size | PREV_INUSE);
> > +              set_foot (remainder, remainder_size);
> > +
> > +              check_malloced_chunk (av, victim, nb);
> > +              void *p = chunk2mem (victim);
> > +              alloc_perturb (p, bytes);
> > +              return p;
> > +            }
> > +
> > +          /* remove from unsorted list */
> > +          unsorted_chunks (av)->bk = bck;
> > +          bck->fd = unsorted_chunks (av);
> > +
> > +          /* Take now instead of binning if exact fit */
> > +
> > +          if (size == nb)
> > +            {
> > +              set_inuse_bit_at_offset (victim, size);
> > +              if (av != &main_arena)
> > +                victim->size |= NON_MAIN_ARENA;
> > +              check_malloced_chunk (av, victim, nb);
> > +              void *p = chunk2mem (victim);
> > +              alloc_perturb (p, bytes);
> > +              return p;
> > +            }
> > +
> > +          /* place chunk in bin */
> > +
> > +          if (in_smallbin_range (size))
> > +            {
> > +              victim_index = smallbin_index (size);
> > +              bck = bin_at (av, victim_index);
> > +              fwd = bck->fd;
> > +            }
> > +          else
> > +            {
> > +              victim_index = largebin_index (size);
> > +              bck = bin_at (av, victim_index);
> > +              fwd = bck->fd;
> > +
> > +              /* maintain large bins in sorted order */
> > +              if (fwd != bck)
> > +                {
> > +                  /* Or with inuse bit to speed comparisons */
> > +                  size |= PREV_INUSE;
> > +                  /* if smaller than smallest, bypass loop below */
> > +                  assert ((bck->bk->size & NON_MAIN_ARENA) == 0);
> > +                  if ((unsigned long) (size) < (unsigned long) (bck->bk->size))
> > +                    {
> > +                      fwd = bck;
> > +                      bck = bck->bk;
> > +
> > +                      victim->fd_nextsize = fwd->fd;
> > +                      victim->bk_nextsize = fwd->fd->bk_nextsize;
> > +                      fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
> > +                    }
> > +                  else
> > +                    {
> > +                      assert ((fwd->size & NON_MAIN_ARENA) == 0);
> > +                      while ((unsigned long) size < fwd->size)
> > +                        {
> > +                          fwd = fwd->fd_nextsize;
> > +                          assert ((fwd->size & NON_MAIN_ARENA) == 0);
> > +                        }
> > +
> > +                      if ((unsigned long) size == (unsigned long) fwd->size)
> > +                        /* Always insert in the second position.  */
> > +                        fwd = fwd->fd;
> > +                      else
> > +                        {
> > +                          victim->fd_nextsize = fwd;
> > +                          victim->bk_nextsize = fwd->bk_nextsize;
> > +                          fwd->bk_nextsize = victim;
> > +                          victim->bk_nextsize->fd_nextsize = victim;
> > +                        }
> > +                      bck = fwd->bk;
> > +                    }
> > +                }
> > +              else
> > +                victim->fd_nextsize = victim->bk_nextsize = victim;
> > +            }
> > +
> > +          mark_bin (av, victim_index);
> > +          victim->bk = bck;
> > +          victim->fd = fwd;
> > +          fwd->bk = victim;
> > +          bck->fd = victim;
> > +
> > +#define MAX_ITERS       10000
> > +          if (++iters >= MAX_ITERS)
> > +            break;
> > +        }
> >  
> >        /*
> > -	 If a small request, try to use last remainder if it is the
> > -	 only chunk in unsorted bin.  This helps promote locality for
> > -	 runs of consecutive small requests. This is the only
> > -	 exception to best-fit, and applies only when there is
> > -	 no exact fit for a small chunk.
> > -      */
> > -
> > -      if (in_smallbin_range(nb) &&
> > -	  bck == unsorted_chunks(av) &&
> > -	  victim == av->last_remainder &&
> > -	  (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
> > -
> > -	/* split and reattach remainder */
> > -	remainder_size = size - nb;
> > -	remainder = chunk_at_offset(victim, nb);
> > -	unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
> > -	av->last_remainder = remainder;
> > -	remainder->bk = remainder->fd = unsorted_chunks(av);
> > -	if (!in_smallbin_range(remainder_size))
> > -	  {
> > -	    remainder->fd_nextsize = NULL;
> > -	    remainder->bk_nextsize = NULL;
> > -	  }
> > -
> > -	set_head(victim, nb | PREV_INUSE |
> > -		 (av != &main_arena ? NON_MAIN_ARENA : 0));
> > -	set_head(remainder, remainder_size | PREV_INUSE);
> > -	set_foot(remainder, remainder_size);
> > -
> > -	check_malloced_chunk(av, victim, nb);
> > -	void *p = chunk2mem(victim);
> > -	alloc_perturb (p, bytes);
> > -	return p;
> > -      }
> > -
> > -      /* remove from unsorted list */
> > -      unsorted_chunks(av)->bk = bck;
> > -      bck->fd = unsorted_chunks(av);
> > -
> > -      /* Take now instead of binning if exact fit */
> > -
> > -      if (size == nb) {
> > -	set_inuse_bit_at_offset(victim, size);
> > -	if (av != &main_arena)
> > -	  victim->size |= NON_MAIN_ARENA;
> > -	check_malloced_chunk(av, victim, nb);
> > -	void *p = chunk2mem(victim);
> > -	alloc_perturb (p, bytes);
> > -	return p;
> > -      }
> > -
> > -      /* place chunk in bin */
> > -
> > -      if (in_smallbin_range(size)) {
> > -	victim_index = smallbin_index(size);
> > -	bck = bin_at(av, victim_index);
> > -	fwd = bck->fd;
> > -      }
> > -      else {
> > -	victim_index = largebin_index(size);
> > -	bck = bin_at(av, victim_index);
> > -	fwd = bck->fd;
> > -
> > -	/* maintain large bins in sorted order */
> > -	if (fwd != bck) {
> > -	  /* Or with inuse bit to speed comparisons */
> > -	  size |= PREV_INUSE;
> > -	  /* if smaller than smallest, bypass loop below */
> > -	  assert((bck->bk->size & NON_MAIN_ARENA) == 0);
> > -	  if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) {
> > -	    fwd = bck;
> > -	    bck = bck->bk;
> > -
> > -	    victim->fd_nextsize = fwd->fd;
> > -	    victim->bk_nextsize = fwd->fd->bk_nextsize;
> > -	    fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
> > -	  }
> > -	  else {
> > -	    assert((fwd->size & NON_MAIN_ARENA) == 0);
> > -	    while ((unsigned long) size < fwd->size)
> > -	      {
> > -		fwd = fwd->fd_nextsize;
> > -		assert((fwd->size & NON_MAIN_ARENA) == 0);
> > -	      }
> > -
> > -	    if ((unsigned long) size == (unsigned long) fwd->size)
> > -	      /* Always insert in the second position.  */
> > -	      fwd = fwd->fd;
> > -	    else
> > -	      {
> > -		victim->fd_nextsize = fwd;
> > -		victim->bk_nextsize = fwd->bk_nextsize;
> > -		fwd->bk_nextsize = victim;
> > -		victim->bk_nextsize->fd_nextsize = victim;
> > -	      }
> > -	    bck = fwd->bk;
> > -	  }
> > -	} else
> > -	  victim->fd_nextsize = victim->bk_nextsize = victim;
> > -      }
> > -
> > -      mark_bin(av, victim_index);
> > -      victim->bk = bck;
> > -      victim->fd = fwd;
> > -      fwd->bk = victim;
> > -      bck->fd = victim;
> > -
> > -#define MAX_ITERS	10000
> > -      if (++iters >= MAX_ITERS)
> > -	break;
> > -    }
> > -
> > -    /*
> > -      If a large request, scan through the chunks of current bin in
> > -      sorted order to find smallest that fits.  Use the skip list for this.
> > -    */
> > -
> > -    if (!in_smallbin_range(nb)) {
> > -      bin = bin_at(av, idx);
> > -
> > -      /* skip scan if empty or largest chunk is too small */
> > -      if ((victim = first(bin)) != bin &&
> > -	  (unsigned long)(victim->size) >= (unsigned long)(nb)) {
> > -
> > -	victim = victim->bk_nextsize;
> > -	while (((unsigned long)(size = chunksize(victim)) <
> > -		(unsigned long)(nb)))
> > -	  victim = victim->bk_nextsize;
> > -
> > -	/* Avoid removing the first entry for a size so that the skip
> > -	   list does not have to be rerouted.  */
> > -	if (victim != last(bin) && victim->size == victim->fd->size)
> > -	  victim = victim->fd;
> > -
> > -	remainder_size = size - nb;
> > -	unlink(victim, bck, fwd);
> > -
> > -	/* Exhaust */
> > -	if (remainder_size < MINSIZE)  {
> > -	  set_inuse_bit_at_offset(victim, size);
> > -	  if (av != &main_arena)
> > -	    victim->size |= NON_MAIN_ARENA;
> > -	}
> > -	/* Split */
> > -	else {
> > -	  remainder = chunk_at_offset(victim, nb);
> > -	  /* We cannot assume the unsorted list is empty and therefore
> > -	     have to perform a complete insert here.  */
> > -	  bck = unsorted_chunks(av);
> > -	  fwd = bck->fd;
> > -	  if (__builtin_expect (fwd->bk != bck, 0))
> > -	    {
> > -	      errstr = "malloc(): corrupted unsorted chunks";
> > -	      goto errout;
> > -	    }
> > -	  remainder->bk = bck;
> > -	  remainder->fd = fwd;
> > -	  bck->fd = remainder;
> > -	  fwd->bk = remainder;
> > -	  if (!in_smallbin_range(remainder_size))
> > -	    {
> > -	      remainder->fd_nextsize = NULL;
> > -	      remainder->bk_nextsize = NULL;
> > -	    }
> > -	  set_head(victim, nb | PREV_INUSE |
> > -		   (av != &main_arena ? NON_MAIN_ARENA : 0));
> > -	  set_head(remainder, remainder_size | PREV_INUSE);
> > -	  set_foot(remainder, remainder_size);
> > -	}
> > -	check_malloced_chunk(av, victim, nb);
> > -	void *p = chunk2mem(victim);
> > -	alloc_perturb (p, bytes);
> > -	return p;
> > -      }
> > -    }
> > -
> > -    /*
> > -      Search for a chunk by scanning bins, starting with next largest
> > -      bin. This search is strictly by best-fit; i.e., the smallest
> > -      (with ties going to approximately the least recently used) chunk
> > -      that fits is selected.
> > -
> > -      The bitmap avoids needing to check that most blocks are nonempty.
> > -      The particular case of skipping all bins during warm-up phases
> > -      when no chunks have been returned yet is faster than it might look.
> > -    */
> > -
> > -    ++idx;
> > -    bin = bin_at(av,idx);
> > -    block = idx2block(idx);
> > -    map = av->binmap[block];
> > -    bit = idx2bit(idx);
> > -
> > -    for (;;) {
> > -
> > -      /* Skip rest of block if there are no more set bits in this block.  */
> > -      if (bit > map || bit == 0) {
> > -	do {
> > -	  if (++block >= BINMAPSIZE)  /* out of bins */
> > -	    goto use_top;
> > -	} while ( (map = av->binmap[block]) == 0);
> > -
> > -	bin = bin_at(av, (block << BINMAPSHIFT));
> > -	bit = 1;
> > -      }
> > -
> > -      /* Advance to bin with set bit. There must be one. */
> > -      while ((bit & map) == 0) {
> > -	bin = next_bin(bin);
> > -	bit <<= 1;
> > -	assert(bit != 0);
> > -      }
> > -
> > -      /* Inspect the bin. It is likely to be non-empty */
> > -      victim = last(bin);
> > -
> > -      /*  If a false alarm (empty bin), clear the bit. */
> > -      if (victim == bin) {
> > -	av->binmap[block] = map &= ~bit; /* Write through */
> > -	bin = next_bin(bin);
> > -	bit <<= 1;
> > -      }
> > -
> > -      else {
> > -	size = chunksize(victim);
> > -
> > -	/*  We know the first chunk in this bin is big enough to use. */
> > -	assert((unsigned long)(size) >= (unsigned long)(nb));
> > -
> > -	remainder_size = size - nb;
> > -
> > -	/* unlink */
> > -	unlink(victim, bck, fwd);
> > -
> > -	/* Exhaust */
> > -	if (remainder_size < MINSIZE) {
> > -	  set_inuse_bit_at_offset(victim, size);
> > -	  if (av != &main_arena)
> > -	    victim->size |= NON_MAIN_ARENA;
> > -	}
> > -
> > -	/* Split */
> > -	else {
> > -	  remainder = chunk_at_offset(victim, nb);
> > -
> > -	  /* We cannot assume the unsorted list is empty and therefore
> > -	     have to perform a complete insert here.  */
> > -	  bck = unsorted_chunks(av);
> > -	  fwd = bck->fd;
> > -	  if (__builtin_expect (fwd->bk != bck, 0))
> > -	    {
> > -	      errstr = "malloc(): corrupted unsorted chunks 2";
> > -	      goto errout;
> > -	    }
> > -	  remainder->bk = bck;
> > -	  remainder->fd = fwd;
> > -	  bck->fd = remainder;
> > -	  fwd->bk = remainder;
> > -
> > -	  /* advertise as last remainder */
> > -	  if (in_smallbin_range(nb))
> > -	    av->last_remainder = remainder;
> > -	  if (!in_smallbin_range(remainder_size))
> > -	    {
> > -	      remainder->fd_nextsize = NULL;
> > -	      remainder->bk_nextsize = NULL;
> > -	    }
> > -	  set_head(victim, nb | PREV_INUSE |
> > -		   (av != &main_arena ? NON_MAIN_ARENA : 0));
> > -	  set_head(remainder, remainder_size | PREV_INUSE);
> > -	  set_foot(remainder, remainder_size);
> > -	}
> > -	check_malloced_chunk(av, victim, nb);
> > -	void *p = chunk2mem(victim);
> > -	alloc_perturb (p, bytes);
> > -	return p;
> > -      }
> > -    }
> > +         If a large request, scan through the chunks of current bin in
> > +         sorted order to find smallest that fits.  Use the skip list for this.
> > +       */
> > +
> > +      if (!in_smallbin_range (nb))
> > +        {
> > +          bin = bin_at (av, idx);
> > +
> > +          /* skip scan if empty or largest chunk is too small */
> > +          if ((victim = first (bin)) != bin &&
> > +              (unsigned long) (victim->size) >= (unsigned long) (nb))
> > +            {
> > +              victim = victim->bk_nextsize;
> > +              while (((unsigned long) (size = chunksize (victim)) <
> > +                      (unsigned long) (nb)))
> > +                victim = victim->bk_nextsize;
> > +
> > +              /* Avoid removing the first entry for a size so that the skip
> > +                 list does not have to be rerouted.  */
> > +              if (victim != last (bin) && victim->size == victim->fd->size)
> > +                victim = victim->fd;
> > +
> > +              remainder_size = size - nb;
> > +              unlink (victim, bck, fwd);
> > +
> > +              /* Exhaust */
> > +              if (remainder_size < MINSIZE)
> > +                {
> > +                  set_inuse_bit_at_offset (victim, size);
> > +                  if (av != &main_arena)
> > +                    victim->size |= NON_MAIN_ARENA;
> > +                }
> > +              /* Split */
> > +              else
> > +                {
> > +                  remainder = chunk_at_offset (victim, nb);
> > +                  /* We cannot assume the unsorted list is empty and therefore
> > +                     have to perform a complete insert here.  */
> > +                  bck = unsorted_chunks (av);
> > +                  fwd = bck->fd;
> > +                  if (__builtin_expect (fwd->bk != bck, 0))
> > +                    {
> > +                      errstr = "malloc(): corrupted unsorted chunks";
> > +                      goto errout;
> > +                    }
> > +                  remainder->bk = bck;
> > +                  remainder->fd = fwd;
> > +                  bck->fd = remainder;
> > +                  fwd->bk = remainder;
> > +                  if (!in_smallbin_range (remainder_size))
> > +                    {
> > +                      remainder->fd_nextsize = NULL;
> > +                      remainder->bk_nextsize = NULL;
> > +                    }
> > +                  set_head (victim, nb | PREV_INUSE |
> > +                            (av != &main_arena ? NON_MAIN_ARENA : 0));
> > +                  set_head (remainder, remainder_size | PREV_INUSE);
> > +                  set_foot (remainder, remainder_size);
> > +                }
> > +              check_malloced_chunk (av, victim, nb);
> > +              void *p = chunk2mem (victim);
> > +              alloc_perturb (p, bytes);
> > +              return p;
> > +            }
> > +        }
> >  
> > -  use_top:
> > -    /*
> > -      If large enough, split off the chunk bordering the end of memory
> > -      (held in av->top). Note that this is in accord with the best-fit
> > -      search rule.  In effect, av->top is treated as larger (and thus
> > -      less well fitting) than any other available chunk since it can
> > -      be extended to be as large as necessary (up to system
> > -      limitations).
> > -
> > -      We require that av->top always exists (i.e., has size >=
> > -      MINSIZE) after initialization, so if it would otherwise be
> > -      exhausted by current request, it is replenished. (The main
> > -      reason for ensuring it exists is that we may need MINSIZE space
> > -      to put in fenceposts in sysmalloc.)
> > -    */
> > -
> > -    victim = av->top;
> > -    size = chunksize(victim);
> > -
> > -    if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
> > -      remainder_size = size - nb;
> > -      remainder = chunk_at_offset(victim, nb);
> > -      av->top = remainder;
> > -      set_head(victim, nb | PREV_INUSE |
> > -	       (av != &main_arena ? NON_MAIN_ARENA : 0));
> > -      set_head(remainder, remainder_size | PREV_INUSE);
> > -
> > -      check_malloced_chunk(av, victim, nb);
> > -      void *p = chunk2mem(victim);
> > -      alloc_perturb (p, bytes);
> > -      return p;
> > -    }
> > +      /*
> > +         Search for a chunk by scanning bins, starting with next largest
> > +         bin. This search is strictly by best-fit; i.e., the smallest
> > +         (with ties going to approximately the least recently used) chunk
> > +         that fits is selected.
> > +
> > +         The bitmap avoids needing to check that most blocks are nonempty.
> > +         The particular case of skipping all bins during warm-up phases
> > +         when no chunks have been returned yet is faster than it might look.
> > +       */
> > +
> > +      ++idx;
> > +      bin = bin_at (av, idx);
> > +      block = idx2block (idx);
> > +      map = av->binmap[block];
> > +      bit = idx2bit (idx);
> > +
> > +      for (;; )
> > +        {
> > +          /* Skip rest of block if there are no more set bits in this block.  */
> > +          if (bit > map || bit == 0)
> > +            {
> > +              do
> > +                {
> > +                  if (++block >= BINMAPSIZE) /* out of bins */
> > +                    goto use_top;
> > +                }
> > +              while ((map = av->binmap[block]) == 0);
> > +
> > +              bin = bin_at (av, (block << BINMAPSHIFT));
> > +              bit = 1;
> > +            }
> > +
> > +          /* Advance to bin with set bit. There must be one. */
> > +          while ((bit & map) == 0)
> > +            {
> > +              bin = next_bin (bin);
> > +              bit <<= 1;
> > +              assert (bit != 0);
> > +            }
> > +
> > +          /* Inspect the bin. It is likely to be non-empty */
> > +          victim = last (bin);
> > +
> > +          /*  If a false alarm (empty bin), clear the bit. */
> > +          if (victim == bin)
> > +            {
> > +              av->binmap[block] = map &= ~bit; /* Write through */
> > +              bin = next_bin (bin);
> > +              bit <<= 1;
> > +            }
> > +
> > +          else
> > +            {
> > +              size = chunksize (victim);
> > +
> > +              /*  We know the first chunk in this bin is big enough to use. */
> > +              assert ((unsigned long) (size) >= (unsigned long) (nb));
> > +
> > +              remainder_size = size - nb;
> > +
> > +              /* unlink */
> > +              unlink (victim, bck, fwd);
> > +
> > +              /* Exhaust */
> > +              if (remainder_size < MINSIZE)
> > +                {
> > +                  set_inuse_bit_at_offset (victim, size);
> > +                  if (av != &main_arena)
> > +                    victim->size |= NON_MAIN_ARENA;
> > +                }
> > +
> > +              /* Split */
> > +              else
> > +                {
> > +                  remainder = chunk_at_offset (victim, nb);
> > +
> > +                  /* We cannot assume the unsorted list is empty and therefore
> > +                     have to perform a complete insert here.  */
> > +                  bck = unsorted_chunks (av);
> > +                  fwd = bck->fd;
> > +                  if (__builtin_expect (fwd->bk != bck, 0))
> > +                    {
> > +                      errstr = "malloc(): corrupted unsorted chunks 2";
> > +                      goto errout;
> > +                    }
> > +                  remainder->bk = bck;
> > +                  remainder->fd = fwd;
> > +                  bck->fd = remainder;
> > +                  fwd->bk = remainder;
> > +
> > +                  /* advertise as last remainder */
> > +                  if (in_smallbin_range (nb))
> > +                    av->last_remainder = remainder;
> > +                  if (!in_smallbin_range (remainder_size))
> > +                    {
> > +                      remainder->fd_nextsize = NULL;
> > +                      remainder->bk_nextsize = NULL;
> > +                    }
> > +                  set_head (victim, nb | PREV_INUSE |
> > +                            (av != &main_arena ? NON_MAIN_ARENA : 0));
> > +                  set_head (remainder, remainder_size | PREV_INUSE);
> > +                  set_foot (remainder, remainder_size);
> > +                }
> > +              check_malloced_chunk (av, victim, nb);
> > +              void *p = chunk2mem (victim);
> > +              alloc_perturb (p, bytes);
> > +              return p;
> > +            }
> > +        }
> > +
> > +    use_top:
> > +      /*
> > +         If large enough, split off the chunk bordering the end of memory
> > +         (held in av->top). Note that this is in accord with the best-fit
> > +         search rule.  In effect, av->top is treated as larger (and thus
> > +         less well fitting) than any other available chunk since it can
> > +         be extended to be as large as necessary (up to system
> > +         limitations).
> > +
> > +         We require that av->top always exists (i.e., has size >=
> > +         MINSIZE) after initialization, so if it would otherwise be
> > +         exhausted by current request, it is replenished. (The main
> > +         reason for ensuring it exists is that we may need MINSIZE space
> > +         to put in fenceposts in sysmalloc.)
> > +       */
> > +
> > +      victim = av->top;
> > +      size = chunksize (victim);
> > +
> > +      if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
> > +        {
> > +          remainder_size = size - nb;
> > +          remainder = chunk_at_offset (victim, nb);
> > +          av->top = remainder;
> > +          set_head (victim, nb | PREV_INUSE |
> > +                    (av != &main_arena ? NON_MAIN_ARENA : 0));
> > +          set_head (remainder, remainder_size | PREV_INUSE);
> > +
> > +          check_malloced_chunk (av, victim, nb);
> > +          void *p = chunk2mem (victim);
> > +          alloc_perturb (p, bytes);
> > +          return p;
> > +        }
> > +
> > +      /* When we are using atomic ops to free fast chunks we can get
> > +         here for all block sizes.  */
> > +      else if (have_fastchunks (av))
> > +        {
> > +          malloc_consolidate (av);
> > +          /* restore original bin index */
> > +          if (in_smallbin_range (nb))
> > +            idx = smallbin_index (nb);
> > +          else
> > +            idx = largebin_index (nb);
> > +        }
> >  
> > -    /* When we are using atomic ops to free fast chunks we can get
> > -       here for all block sizes.  */
> > -    else if (have_fastchunks(av)) {
> > -      malloc_consolidate(av);
> > -      /* restore original bin index */
> > -      if (in_smallbin_range(nb))
> > -	idx = smallbin_index(nb);
> > +      /*
> > +         Otherwise, relay to handle system-dependent cases
> > +       */
> >        else
> > -	idx = largebin_index(nb);
> > +        {
> > +          void *p = sysmalloc (nb, av);
> > +          if (p != NULL)
> > +            alloc_perturb (p, bytes);
> > +          return p;
> > +        }
> >      }
> > -
> > -    /*
> > -       Otherwise, relay to handle system-dependent cases
> > -    */
> > -    else {
> > -      void *p = sysmalloc(nb, av);
> > -      if (p != NULL)
> > -	alloc_perturb (p, bytes);
> > -      return p;
> > -    }
> > -  }
> >  }
> >  
> >  /*
> > -  ------------------------------ free ------------------------------
> > -*/
> > +   ------------------------------ free ------------------------------
> > + */
> >  
> >  static void
> > -_int_free(mstate av, mchunkptr p, int have_lock)
> > +_int_free (mstate av, mchunkptr p, int have_lock)
> >  {
> >    INTERNAL_SIZE_T size;        /* its size */
> > -  mfastbinptr*    fb;          /* associated fastbin */
> > -  mchunkptr       nextchunk;   /* next contiguous chunk */
> > +  mfastbinptr *fb;             /* associated fastbin */
> > +  mchunkptr nextchunk;         /* next contiguous chunk */
> >    INTERNAL_SIZE_T nextsize;    /* its size */
> > -  int             nextinuse;   /* true if nextchunk is used */
> > +  int nextinuse;               /* true if nextchunk is used */
> >    INTERNAL_SIZE_T prevsize;    /* size of previous contiguous chunk */
> > -  mchunkptr       bck;         /* misc temp for linking */
> > -  mchunkptr       fwd;         /* misc temp for linking */
> > +  mchunkptr bck;               /* misc temp for linking */
> > +  mchunkptr fwd;               /* misc temp for linking */
> >  
> >    const char *errstr = NULL;
> >    int locked = 0;
> >  
> > -  size = chunksize(p);
> > +  size = chunksize (p);
> >  
> >    /* Little security check which won't hurt performance: the
> >       allocator never wrapps around at the end of the address space.
> > @@ -3721,9 +3832,9 @@ _int_free(mstate av, mchunkptr p, int have_lock)
> >      {
> >        errstr = "free(): invalid pointer";
> >      errout:
> > -      if (! have_lock && locked)
> > -	(void)mutex_unlock(&av->mutex);
> > -      malloc_printerr (check_action, errstr, chunk2mem(p));
> > +      if (!have_lock && locked)
> > +        (void) mutex_unlock (&av->mutex);
> > +      malloc_printerr (check_action, errstr, chunk2mem (p));
> >        return;
> >      }
> >    /* We know that each chunk is at least MINSIZE bytes in size or a
> > @@ -3734,376 +3845,401 @@ _int_free(mstate av, mchunkptr p, int have_lock)
> >        goto errout;
> >      }
> >  
> > -  check_inuse_chunk(av, p);
> > +  check_inuse_chunk (av, p);
> >  
> >    /*
> > -    If eligible, place chunk on a fastbin so it can be found
> > -    and used quickly in malloc.
> > -  */
> > +     If eligible, place chunk on a fastbin so it can be found
> > +     and used quickly in malloc.
> > +   */
> >  
> > -  if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
> > +  if ((unsigned long) (size) <= (unsigned long) (get_max_fast ())
> >  
> >  #if TRIM_FASTBINS
> >        /*
> > -	If TRIM_FASTBINS set, don't place chunks
> > -	bordering top into fastbins
> > -      */
> > -      && (chunk_at_offset(p, size) != av->top)
> > +         If TRIM_FASTBINS set, don't place chunks
> > +         bordering top into fastbins
> > +       */
> > +      && (chunk_at_offset (p, size) != av->top)
> >  #endif
> > -      ) {
> > -
> > -    if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
> > -	|| __builtin_expect (chunksize (chunk_at_offset (p, size))
> > -			     >= av->system_mem, 0))
> > -      {
> > -	/* We might not have a lock at this point and concurrent modifications
> > -	   of system_mem might have let to a false positive.  Redo the test
> > -	   after getting the lock.  */
> > -	if (have_lock
> > -	    || ({ assert (locked == 0);
> > -		  mutex_lock(&av->mutex);
> > -		  locked = 1;
> > -		  chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
> > -		    || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
> > -	      }))
> > -	  {
> > -	    errstr = "free(): invalid next size (fast)";
> > -	    goto errout;
> > -	  }
> > -	if (! have_lock)
> > -	  {
> > -	    (void)mutex_unlock(&av->mutex);
> > -	    locked = 0;
> > -	  }
> > -      }
> > -
> > -    free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
> > -
> > -    set_fastchunks(av);
> > -    unsigned int idx = fastbin_index(size);
> > -    fb = &fastbin (av, idx);
> > -
> > -    mchunkptr fd;
> > -    mchunkptr old = *fb;
> > -    unsigned int old_idx = ~0u;
> > -    do
> > -      {
> > -	/* Another simple check: make sure the top of the bin is not the
> > -	   record we are going to add (i.e., double free).  */
> > -	if (__builtin_expect (old == p, 0))
> > -	  {
> > -	    errstr = "double free or corruption (fasttop)";
> > -	    goto errout;
> > -	  }
> > -	if (old != NULL)
> > -	  old_idx = fastbin_index(chunksize(old));
> > -	p->fd = fd = old;
> > -      }
> > -    while ((old = catomic_compare_and_exchange_val_rel (fb, p, fd)) != fd);
> > -
> > -    if (fd != NULL && __builtin_expect (old_idx != idx, 0))
> > -      {
> > -	errstr = "invalid fastbin entry (free)";
> > -	goto errout;
> > -      }
> > -  }
> > +      )
> > +    {
> > +      if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
> > +          || __builtin_expect (chunksize (chunk_at_offset (p, size))
> > +                               >= av->system_mem, 0))
> > +        {
> > +          /* We might not have a lock at this point and concurrent modifications
> > +             of system_mem might have let to a false positive.  Redo the test
> > +             after getting the lock.  */
> > +          if (have_lock
> > +              || ({ assert (locked == 0);
> > +                    mutex_lock (&av->mutex);
> > +                    locked = 1;
> > +                    chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
> > +                    || chunksize (chunk_at_offset (p, size)) >= av->system_mem; }))
> > +            {
> > +              errstr = "free(): invalid next size (fast)";
> > +              goto errout;
> > +            }
> > +          if (!have_lock)
> > +            {
> > +              (void) mutex_unlock (&av->mutex);
> > +              locked = 0;
> > +            }
> > +        }
> > +
> > +      free_perturb (chunk2mem (p), size - 2 * SIZE_SZ);
> > +
> > +      set_fastchunks (av);
> > +      unsigned int idx = fastbin_index (size);
> > +      fb = &fastbin (av, idx);
> > +
> > +      mchunkptr fd;
> > +      mchunkptr old = *fb;
> > +      unsigned int old_idx = ~0u;
> > +      do
> > +        {
> > +          /* Another simple check: make sure the top of the bin is not the
> > +             record we are going to add (i.e., double free).  */
> > +          if (__builtin_expect (old == p, 0))
> > +            {
> > +              errstr = "double free or corruption (fasttop)";
> > +              goto errout;
> > +            }
> > +          if (old != NULL)
> > +            old_idx = fastbin_index (chunksize (old));
> > +          p->fd = fd = old;
> > +        }
> > +      while ((old = catomic_compare_and_exchange_val_rel (fb, p, fd)) != fd);
> > +
> > +      if (fd != NULL && __builtin_expect (old_idx != idx, 0))
> > +        {
> > +          errstr = "invalid fastbin entry (free)";
> > +          goto errout;
> > +        }
> > +    }
> >  
> >    /*
> > -    Consolidate other non-mmapped chunks as they arrive.
> > -  */
> > +     Consolidate other non-mmapped chunks as they arrive.
> > +   */
> >  
> > -  else if (!chunk_is_mmapped(p)) {
> > -    if (! have_lock) {
> > +  else if (!chunk_is_mmapped (p))
> > +    {
> > +      if (!have_lock)
> > +        {
> >  #if THREAD_STATS
> > -      if(!mutex_trylock(&av->mutex))
> > -	++(av->stat_lock_direct);
> > -      else {
> > -	(void)mutex_lock(&av->mutex);
> > -	++(av->stat_lock_wait);
> > -      }
> > +          if (!mutex_trylock (&av->mutex))
> > +            ++(av->stat_lock_direct);
> > +          else
> > +            {
> > +              (void) mutex_lock (&av->mutex);
> > +              ++(av->stat_lock_wait);
> > +            }
> >  #else
> > -      (void)mutex_lock(&av->mutex);
> > +          (void) mutex_lock (&av->mutex);
> >  #endif
> > -      locked = 1;
> > -    }
> > -
> > -    nextchunk = chunk_at_offset(p, size);
> > -
> > -    /* Lightweight tests: check whether the block is already the
> > -       top block.  */
> > -    if (__builtin_expect (p == av->top, 0))
> > -      {
> > -	errstr = "double free or corruption (top)";
> > -	goto errout;
> > -      }
> > -    /* Or whether the next chunk is beyond the boundaries of the arena.  */
> > -    if (__builtin_expect (contiguous (av)
> > -			  && (char *) nextchunk
> > -			  >= ((char *) av->top + chunksize(av->top)), 0))
> > -      {
> > -	errstr = "double free or corruption (out)";
> > -	goto errout;
> > -      }
> > -    /* Or whether the block is actually not marked used.  */
> > -    if (__builtin_expect (!prev_inuse(nextchunk), 0))
> > -      {
> > -	errstr = "double free or corruption (!prev)";
> > -	goto errout;
> > -      }
> > -
> > -    nextsize = chunksize(nextchunk);
> > -    if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
> > -	|| __builtin_expect (nextsize >= av->system_mem, 0))
> > -      {
> > -	errstr = "free(): invalid next size (normal)";
> > -	goto errout;
> > -      }
> > -
> > -    free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
> > -
> > -    /* consolidate backward */
> > -    if (!prev_inuse(p)) {
> > -      prevsize = p->prev_size;
> > -      size += prevsize;
> > -      p = chunk_at_offset(p, -((long) prevsize));
> > -      unlink(p, bck, fwd);
> > -    }
> > -
> > -    if (nextchunk != av->top) {
> > -      /* get and clear inuse bit */
> > -      nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
> > -
> > -      /* consolidate forward */
> > -      if (!nextinuse) {
> > -	unlink(nextchunk, bck, fwd);
> > -	size += nextsize;
> > -      } else
> > -	clear_inuse_bit_at_offset(nextchunk, 0);
> > +          locked = 1;
> > +        }
> > +
> > +      nextchunk = chunk_at_offset (p, size);
> > +
> > +      /* Lightweight tests: check whether the block is already the
> > +         top block.  */
> > +      if (__builtin_expect (p == av->top, 0))
> > +        {
> > +          errstr = "double free or corruption (top)";
> > +          goto errout;
> > +        }
> > +      /* Or whether the next chunk is beyond the boundaries of the arena.  */
> > +      if (__builtin_expect (contiguous (av)
> > +                            && (char *) nextchunk
> > +                            >= ((char *) av->top + chunksize (av->top)), 0))
> > +        {
> > +          errstr = "double free or corruption (out)";
> > +          goto errout;
> > +        }
> > +      /* Or whether the block is actually not marked used.  */
> > +      if (__builtin_expect (!prev_inuse (nextchunk), 0))
> > +        {
> > +          errstr = "double free or corruption (!prev)";
> > +          goto errout;
> > +        }
> > +
> > +      nextsize = chunksize (nextchunk);
> > +      if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
> > +          || __builtin_expect (nextsize >= av->system_mem, 0))
> > +        {
> > +          errstr = "free(): invalid next size (normal)";
> > +          goto errout;
> > +        }
> > +
> > +      free_perturb (chunk2mem (p), size - 2 * SIZE_SZ);
> > +
> > +      /* consolidate backward */
> > +      if (!prev_inuse (p))
> > +        {
> > +          prevsize = p->prev_size;
> > +          size += prevsize;
> > +          p = chunk_at_offset (p, -((long) prevsize));
> > +          unlink (p, bck, fwd);
> > +        }
> > +
> > +      if (nextchunk != av->top)
> > +        {
> > +          /* get and clear inuse bit */
> > +          nextinuse = inuse_bit_at_offset (nextchunk, nextsize);
> > +
> > +          /* consolidate forward */
> > +          if (!nextinuse)
> > +            {
> > +              unlink (nextchunk, bck, fwd);
> > +              size += nextsize;
> > +            }
> > +          else
> > +            clear_inuse_bit_at_offset (nextchunk, 0);
> > +
> > +          /*
> > +             Place the chunk in unsorted chunk list. Chunks are
> > +             not placed into regular bins until after they have
> > +             been given one chance to be used in malloc.
> > +           */
> > +
> > +          bck = unsorted_chunks (av);
> > +          fwd = bck->fd;
> > +          if (__builtin_expect (fwd->bk != bck, 0))
> > +            {
> > +              errstr = "free(): corrupted unsorted chunks";
> > +              goto errout;
> > +            }
> > +          p->fd = fwd;
> > +          p->bk = bck;
> > +          if (!in_smallbin_range (size))
> > +            {
> > +              p->fd_nextsize = NULL;
> > +              p->bk_nextsize = NULL;
> > +            }
> > +          bck->fd = p;
> > +          fwd->bk = p;
> > +
> > +          set_head (p, size | PREV_INUSE);
> > +          set_foot (p, size);
> > +
> > +          check_free_chunk (av, p);
> > +        }
> >  
> >        /*
> > -	Place the chunk in unsorted chunk list. Chunks are
> > -	not placed into regular bins until after they have
> > -	been given one chance to be used in malloc.
> > -      */
> > -
> > -      bck = unsorted_chunks(av);
> > -      fwd = bck->fd;
> > -      if (__builtin_expect (fwd->bk != bck, 0))
> > -	{
> > -	  errstr = "free(): corrupted unsorted chunks";
> > -	  goto errout;
> > -	}
> > -      p->fd = fwd;
> > -      p->bk = bck;
> > -      if (!in_smallbin_range(size))
> > -	{
> > -	  p->fd_nextsize = NULL;
> > -	  p->bk_nextsize = NULL;
> > -	}
> > -      bck->fd = p;
> > -      fwd->bk = p;
> > -
> > -      set_head(p, size | PREV_INUSE);
> > -      set_foot(p, size);
> > -
> > -      check_free_chunk(av, p);
> > -    }
> > -
> > -    /*
> > -      If the chunk borders the current high end of memory,
> > -      consolidate into top
> > -    */
> > -
> > -    else {
> > -      size += nextsize;
> > -      set_head(p, size | PREV_INUSE);
> > -      av->top = p;
> > -      check_chunk(av, p);
> > -    }
> > -
> > -    /*
> > -      If freeing a large space, consolidate possibly-surrounding
> > -      chunks. Then, if the total unused topmost memory exceeds trim
> > -      threshold, ask malloc_trim to reduce top.
> > +         If the chunk borders the current high end of memory,
> > +         consolidate into top
> > +       */
> >  
> > -      Unless max_fast is 0, we don't know if there are fastbins
> > -      bordering top, so we cannot tell for sure whether threshold
> > -      has been reached unless fastbins are consolidated.  But we
> > -      don't want to consolidate on each free.  As a compromise,
> > -      consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
> > -      is reached.
> > -    */
> > -
> > -    if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
> > -      if (have_fastchunks(av))
> > -	malloc_consolidate(av);
> > +      else
> > +        {
> > +          size += nextsize;
> > +          set_head (p, size | PREV_INUSE);
> > +          av->top = p;
> > +          check_chunk (av, p);
> > +        }
> >  
> > -      if (av == &main_arena) {
> > +      /*
> > +         If freeing a large space, consolidate possibly-surrounding
> > +         chunks. Then, if the total unused topmost memory exceeds trim
> > +         threshold, ask malloc_trim to reduce top.
> > +
> > +         Unless max_fast is 0, we don't know if there are fastbins
> > +         bordering top, so we cannot tell for sure whether threshold
> > +         has been reached unless fastbins are consolidated.  But we
> > +         don't want to consolidate on each free.  As a compromise,
> > +         consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
> > +         is reached.
> > +       */
> > +
> > +      if ((unsigned long) (size) >= FASTBIN_CONSOLIDATION_THRESHOLD)
> > +        {
> > +          if (have_fastchunks (av))
> > +            malloc_consolidate (av);
> > +
> > +          if (av == &main_arena)
> > +            {
> >  #ifndef MORECORE_CANNOT_TRIM
> > -	if ((unsigned long)(chunksize(av->top)) >=
> > -	    (unsigned long)(mp_.trim_threshold))
> > -	  systrim(mp_.top_pad, av);
> > +              if ((unsigned long) (chunksize (av->top)) >=
> > +                  (unsigned long) (mp_.trim_threshold))
> > +                systrim (mp_.top_pad, av);
> >  #endif
> > -      } else {
> > -	/* Always try heap_trim(), even if the top chunk is not
> > -	   large, because the corresponding heap might go away.  */
> > -	heap_info *heap = heap_for_ptr(top(av));
> > -
> > -	assert(heap->ar_ptr == av);
> > -	heap_trim(heap, mp_.top_pad);
> > -      }
> > -    }
> > -
> > -    if (! have_lock) {
> > -      assert (locked);
> > -      (void)mutex_unlock(&av->mutex);
> > +            }
> > +          else
> > +            {
> > +              /* Always try heap_trim(), even if the top chunk is not
> > +                 large, because the corresponding heap might go away.  */
> > +              heap_info *heap = heap_for_ptr (top (av));
> > +
> > +              assert (heap->ar_ptr == av);
> > +              heap_trim (heap, mp_.top_pad);
> > +            }
> > +        }
> > +
> > +      if (!have_lock)
> > +        {
> > +          assert (locked);
> > +          (void) mutex_unlock (&av->mutex);
> > +        }
> >      }
> > -  }
> >    /*
> > -    If the chunk was allocated via mmap, release via munmap().
> > -  */
> > +     If the chunk was allocated via mmap, release via munmap().
> > +   */
> >  
> > -  else {
> > -    munmap_chunk (p);
> > -  }
> > +  else
> > +    {
> > +      munmap_chunk (p);
> > +    }
> >  }
> >  
> >  /*
> > -  ------------------------- malloc_consolidate -------------------------
> > +   ------------------------- malloc_consolidate -------------------------
> >  
> > -  malloc_consolidate is a specialized version of free() that tears
> > -  down chunks held in fastbins.  Free itself cannot be used for this
> > -  purpose since, among other things, it might place chunks back onto
> > -  fastbins.  So, instead, we need to use a minor variant of the same
> > -  code.
> > +   malloc_consolidate is a specialized version of free() that tears
> > +   down chunks held in fastbins.  Free itself cannot be used for this
> > +   purpose since, among other things, it might place chunks back onto
> > +   fastbins.  So, instead, we need to use a minor variant of the same
> > +   code.
> >  
> > -  Also, because this routine needs to be called the first time through
> > -  malloc anyway, it turns out to be the perfect place to trigger
> > -  initialization code.
> > -*/
> > +   Also, because this routine needs to be called the first time through
> > +   malloc anyway, it turns out to be the perfect place to trigger
> > +   initialization code.
> > + */
> >  
> > -static void malloc_consolidate(mstate av)
> > +static void
> > +malloc_consolidate (mstate av)
> >  {
> > -  mfastbinptr*    fb;                 /* current fastbin being consolidated */
> > -  mfastbinptr*    maxfb;              /* last fastbin (for loop control) */
> > -  mchunkptr       p;                  /* current chunk being consolidated */
> > -  mchunkptr       nextp;              /* next chunk to consolidate */
> > -  mchunkptr       unsorted_bin;       /* bin header */
> > -  mchunkptr       first_unsorted;     /* chunk to link to */
> > +  mfastbinptr *fb;                    /* current fastbin being consolidated */
> > +  mfastbinptr *maxfb;                 /* last fastbin (for loop control) */
> > +  mchunkptr p;                        /* current chunk being consolidated */
> > +  mchunkptr nextp;                    /* next chunk to consolidate */
> > +  mchunkptr unsorted_bin;             /* bin header */
> > +  mchunkptr first_unsorted;           /* chunk to link to */
> >  
> >    /* These have same use as in free() */
> > -  mchunkptr       nextchunk;
> > +  mchunkptr nextchunk;
> >    INTERNAL_SIZE_T size;
> >    INTERNAL_SIZE_T nextsize;
> >    INTERNAL_SIZE_T prevsize;
> > -  int             nextinuse;
> > -  mchunkptr       bck;
> > -  mchunkptr       fwd;
> > +  int nextinuse;
> > +  mchunkptr bck;
> > +  mchunkptr fwd;
> >  
> >    /*
> > -    If max_fast is 0, we know that av hasn't
> > -    yet been initialized, in which case do so below
> > -  */
> > -
> > -  if (get_max_fast () != 0) {
> > -    clear_fastchunks(av);
> > -
> > -    unsorted_bin = unsorted_chunks(av);
> > -
> > -    /*
> > -      Remove each chunk from fast bin and consolidate it, placing it
> > -      then in unsorted bin. Among other reasons for doing this,
> > -      placing in unsorted bin avoids needing to calculate actual bins
> > -      until malloc is sure that chunks aren't immediately going to be
> > -      reused anyway.
> > -    */
> > -
> > -    maxfb = &fastbin (av, NFASTBINS - 1);
> > -    fb = &fastbin (av, 0);
> > -    do {
> > -      p = atomic_exchange_acq (fb, 0);
> > -      if (p != 0) {
> > -	do {
> > -	  check_inuse_chunk(av, p);
> > -	  nextp = p->fd;
> > -
> > -	  /* Slightly streamlined version of consolidation code in free() */
> > -	  size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
> > -	  nextchunk = chunk_at_offset(p, size);
> > -	  nextsize = chunksize(nextchunk);
> > -
> > -	  if (!prev_inuse(p)) {
> > -	    prevsize = p->prev_size;
> > -	    size += prevsize;
> > -	    p = chunk_at_offset(p, -((long) prevsize));
> > -	    unlink(p, bck, fwd);
> > -	  }
> > -
> > -	  if (nextchunk != av->top) {
> > -	    nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
> > -
> > -	    if (!nextinuse) {
> > -	      size += nextsize;
> > -	      unlink(nextchunk, bck, fwd);
> > -	    } else
> > -	      clear_inuse_bit_at_offset(nextchunk, 0);
> > -
> > -	    first_unsorted = unsorted_bin->fd;
> > -	    unsorted_bin->fd = p;
> > -	    first_unsorted->bk = p;
> > -
> > -	    if (!in_smallbin_range (size)) {
> > -	      p->fd_nextsize = NULL;
> > -	      p->bk_nextsize = NULL;
> > -	    }
> > -
> > -	    set_head(p, size | PREV_INUSE);
> > -	    p->bk = unsorted_bin;
> > -	    p->fd = first_unsorted;
> > -	    set_foot(p, size);
> > -	  }
> > -
> > -	  else {
> > -	    size += nextsize;
> > -	    set_head(p, size | PREV_INUSE);
> > -	    av->top = p;
> > -	  }
> > -
> > -	} while ( (p = nextp) != 0);
> > +     If max_fast is 0, we know that av hasn't
> > +     yet been initialized, in which case do so below
> > +   */
> >  
> > -      }
> > -    } while (fb++ != maxfb);
> > -  }
> > -  else {
> > -    malloc_init_state(av);
> > -    check_malloc_state(av);
> > -  }
> > +  if (get_max_fast () != 0)
> > +    {
> > +      clear_fastchunks (av);
> > +
> > +      unsorted_bin = unsorted_chunks (av);
> > +
> > +      /*
> > +         Remove each chunk from fast bin and consolidate it, placing it
> > +         then in unsorted bin. Among other reasons for doing this,
> > +         placing in unsorted bin avoids needing to calculate actual bins
> > +         until malloc is sure that chunks aren't immediately going to be
> > +         reused anyway.
> > +       */
> > +
> > +      maxfb = &fastbin (av, NFASTBINS - 1);
> > +      fb = &fastbin (av, 0);
> > +      do
> > +        {
> > +          p = atomic_exchange_acq (fb, 0);
> > +          if (p != 0)
> > +            {
> > +              do
> > +                {
> > +                  check_inuse_chunk (av, p);
> > +                  nextp = p->fd;
> > +
> > +                  /* Slightly streamlined version of consolidation code in free() */
> > +                  size = p->size & ~(PREV_INUSE | NON_MAIN_ARENA);
> > +                  nextchunk = chunk_at_offset (p, size);
> > +                  nextsize = chunksize (nextchunk);
> > +
> > +                  if (!prev_inuse (p))
> > +                    {
> > +                      prevsize = p->prev_size;
> > +                      size += prevsize;
> > +                      p = chunk_at_offset (p, -((long) prevsize));
> > +                      unlink (p, bck, fwd);
> > +                    }
> > +
> > +                  if (nextchunk != av->top)
> > +                    {
> > +                      nextinuse = inuse_bit_at_offset (nextchunk, nextsize);
> > +
> > +                      if (!nextinuse)
> > +                        {
> > +                          size += nextsize;
> > +                          unlink (nextchunk, bck, fwd);
> > +                        }
> > +                      else
> > +                        clear_inuse_bit_at_offset (nextchunk, 0);
> > +
> > +                      first_unsorted = unsorted_bin->fd;
> > +                      unsorted_bin->fd = p;
> > +                      first_unsorted->bk = p;
> > +
> > +                      if (!in_smallbin_range (size))
> > +                        {
> > +                          p->fd_nextsize = NULL;
> > +                          p->bk_nextsize = NULL;
> > +                        }
> > +
> > +                      set_head (p, size | PREV_INUSE);
> > +                      p->bk = unsorted_bin;
> > +                      p->fd = first_unsorted;
> > +                      set_foot (p, size);
> > +                    }
> > +
> > +                  else
> > +                    {
> > +                      size += nextsize;
> > +                      set_head (p, size | PREV_INUSE);
> > +                      av->top = p;
> > +                    }
> > +                }
> > +              while ((p = nextp) != 0);
> > +            }
> > +        }
> > +      while (fb++ != maxfb);
> > +    }
> > +  else
> > +    {
> > +      malloc_init_state (av);
> > +      check_malloc_state (av);
> > +    }
> >  }
> >  
> >  /*
> > -  ------------------------------ realloc ------------------------------
> > -*/
> > +   ------------------------------ realloc ------------------------------
> > + */
> >  
> > -void*
> > -_int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
> > -	     INTERNAL_SIZE_T nb)
> > +void *
> > +_int_realloc (mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
> > +              INTERNAL_SIZE_T nb)
> >  {
> > -  mchunkptr        newp;            /* chunk to return */
> > -  INTERNAL_SIZE_T  newsize;         /* its size */
> > -  void*          newmem;          /* corresponding user mem */
> > +  mchunkptr newp;                   /* chunk to return */
> > +  INTERNAL_SIZE_T newsize;          /* its size */
> > +  void *newmem;                   /* corresponding user mem */
> >  
> > -  mchunkptr        next;            /* next contiguous chunk after oldp */
> > +  mchunkptr next;                   /* next contiguous chunk after oldp */
> >  
> > -  mchunkptr        remainder;       /* extra space at end of newp */
> > -  unsigned long    remainder_size;  /* its size */
> > +  mchunkptr remainder;              /* extra space at end of newp */
> > +  unsigned long remainder_size;     /* its size */
> >  
> > -  mchunkptr        bck;             /* misc temp for linking */
> > -  mchunkptr        fwd;             /* misc temp for linking */
> > +  mchunkptr bck;                    /* misc temp for linking */
> > +  mchunkptr fwd;                    /* misc temp for linking */
> >  
> > -  unsigned long    copysize;        /* bytes to copy */
> > -  unsigned int     ncopies;         /* INTERNAL_SIZE_T words to copy */
> > -  INTERNAL_SIZE_T* s;               /* copy source */
> > -  INTERNAL_SIZE_T* d;               /* copy destination */
> > +  unsigned long copysize;           /* bytes to copy */
> > +  unsigned int ncopies;             /* INTERNAL_SIZE_T words to copy */
> > +  INTERNAL_SIZE_T *s;               /* copy source */
> > +  INTERNAL_SIZE_T *d;               /* copy destination */
> >  
> >    const char *errstr = NULL;
> >  
> > @@ -4113,17 +4249,17 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
> >      {
> >        errstr = "realloc(): invalid old size";
> >      errout:
> > -      malloc_printerr (check_action, errstr, chunk2mem(oldp));
> > +      malloc_printerr (check_action, errstr, chunk2mem (oldp));
> >        return NULL;
> >      }
> >  
> > -  check_inuse_chunk(av, oldp);
> > +  check_inuse_chunk (av, oldp);
> >  
> >    /* All callers already filter out mmap'ed chunks.  */
> > -  assert (!chunk_is_mmapped(oldp));
> > +  assert (!chunk_is_mmapped (oldp));
> >  
> > -  next = chunk_at_offset(oldp, oldsize);
> > -  INTERNAL_SIZE_T nextsize = chunksize(next);
> > +  next = chunk_at_offset (oldp, oldsize);
> > +  INTERNAL_SIZE_T nextsize = chunksize (next);
> >    if (__builtin_expect (next->size <= 2 * SIZE_SZ, 0)
> >        || __builtin_expect (nextsize >= av->system_mem, 0))
> >      {
> > @@ -4131,216 +4267,233 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
> >        goto errout;
> >      }
> >  
> > -  if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
> > -    /* already big enough; split below */
> > -    newp = oldp;
> > -    newsize = oldsize;
> > -  }
> > -
> > -  else {
> > -    /* Try to expand forward into top */
> > -    if (next == av->top &&
> > -	(unsigned long)(newsize = oldsize + nextsize) >=
> > -	(unsigned long)(nb + MINSIZE)) {
> > -      set_head_size(oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
> > -      av->top = chunk_at_offset(oldp, nb);
> > -      set_head(av->top, (newsize - nb) | PREV_INUSE);
> > -      check_inuse_chunk(av, oldp);
> > -      return chunk2mem(oldp);
> > -    }
> > -
> > -    /* Try to expand forward into next chunk;  split off remainder below */
> > -    else if (next != av->top &&
> > -	     !inuse(next) &&
> > -	     (unsigned long)(newsize = oldsize + nextsize) >=
> > -	     (unsigned long)(nb)) {
> > +  if ((unsigned long) (oldsize) >= (unsigned long) (nb))
> > +    {
> > +      /* already big enough; split below */
> >        newp = oldp;
> > -      unlink(next, bck, fwd);
> > +      newsize = oldsize;
> >      }
> >  
> > -    /* allocate, copy, free */
> > -    else {
> > -      newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
> > -      if (newmem == 0)
> > -	return 0; /* propagate failure */
> > -
> > -      newp = mem2chunk(newmem);
> > -      newsize = chunksize(newp);
> > -
> > -      /*
> > -	Avoid copy if newp is next chunk after oldp.
> > -      */
> > -      if (newp == next) {
> > -	newsize += oldsize;
> > -	newp = oldp;
> > -      }
> > -      else {
> > -	/*
> > -	  Unroll copy of <= 36 bytes (72 if 8byte sizes)
> > -	  We know that contents have an odd number of
> > -	  INTERNAL_SIZE_T-sized words; minimally 3.
> > -	*/
> > -
> > -	copysize = oldsize - SIZE_SZ;
> > -	s = (INTERNAL_SIZE_T*)(chunk2mem(oldp));
> > -	d = (INTERNAL_SIZE_T*)(newmem);
> > -	ncopies = copysize / sizeof(INTERNAL_SIZE_T);
> > -	assert(ncopies >= 3);
> > -
> > -	if (ncopies > 9)
> > -	  memcpy(d, s, copysize);
> > -
> > -	else {
> > -	  *(d+0) = *(s+0);
> > -	  *(d+1) = *(s+1);
> > -	  *(d+2) = *(s+2);
> > -	  if (ncopies > 4) {
> > -	    *(d+3) = *(s+3);
> > -	    *(d+4) = *(s+4);
> > -	    if (ncopies > 6) {
> > -	      *(d+5) = *(s+5);
> > -	      *(d+6) = *(s+6);
> > -	      if (ncopies > 8) {
> > -		*(d+7) = *(s+7);
> > -		*(d+8) = *(s+8);
> > -	      }
> > -	    }
> > -	  }
> > -	}
> > -
> > -	_int_free(av, oldp, 1);
> > -	check_inuse_chunk(av, newp);
> > -	return chunk2mem(newp);
> > -      }
> > +  else
> > +    {
> > +      /* Try to expand forward into top */
> > +      if (next == av->top &&
> > +          (unsigned long) (newsize = oldsize + nextsize) >=
> > +          (unsigned long) (nb + MINSIZE))
> > +        {
> > +          set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
> > +          av->top = chunk_at_offset (oldp, nb);
> > +          set_head (av->top, (newsize - nb) | PREV_INUSE);
> > +          check_inuse_chunk (av, oldp);
> > +          return chunk2mem (oldp);
> > +        }
> > +
> > +      /* Try to expand forward into next chunk;  split off remainder below */
> > +      else if (next != av->top &&
> > +               !inuse (next) &&
> > +               (unsigned long) (newsize = oldsize + nextsize) >=
> > +               (unsigned long) (nb))
> > +        {
> > +          newp = oldp;
> > +          unlink (next, bck, fwd);
> > +        }
> > +
> > +      /* allocate, copy, free */
> > +      else
> > +        {
> > +          newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
> > +          if (newmem == 0)
> > +            return 0; /* propagate failure */
> > +
> > +          newp = mem2chunk (newmem);
> > +          newsize = chunksize (newp);
> > +
> > +          /*
> > +             Avoid copy if newp is next chunk after oldp.
> > +           */
> > +          if (newp == next)
> > +            {
> > +              newsize += oldsize;
> > +              newp = oldp;
> > +            }
> > +          else
> > +            {
> > +              /*
> > +                 Unroll copy of <= 36 bytes (72 if 8byte sizes)
> > +                 We know that contents have an odd number of
> > +                 INTERNAL_SIZE_T-sized words; minimally 3.
> > +               */
> > +
> > +              copysize = oldsize - SIZE_SZ;
> > +              s = (INTERNAL_SIZE_T *) (chunk2mem (oldp));
> > +              d = (INTERNAL_SIZE_T *) (newmem);
> > +              ncopies = copysize / sizeof (INTERNAL_SIZE_T);
> > +              assert (ncopies >= 3);
> > +
> > +              if (ncopies > 9)
> > +                memcpy (d, s, copysize);
> > +
> > +              else
> > +                {
> > +                  *(d + 0) = *(s + 0);
> > +                  *(d + 1) = *(s + 1);
> > +                  *(d + 2) = *(s + 2);
> > +                  if (ncopies > 4)
> > +                    {
> > +                      *(d + 3) = *(s + 3);
> > +                      *(d + 4) = *(s + 4);
> > +                      if (ncopies > 6)
> > +                        {
> > +                          *(d + 5) = *(s + 5);
> > +                          *(d + 6) = *(s + 6);
> > +                          if (ncopies > 8)
> > +                            {
> > +                              *(d + 7) = *(s + 7);
> > +                              *(d + 8) = *(s + 8);
> > +                            }
> > +                        }
> > +                    }
> > +                }
> > +
> > +              _int_free (av, oldp, 1);
> > +              check_inuse_chunk (av, newp);
> > +              return chunk2mem (newp);
> > +            }
> > +        }
> >      }
> > -  }
> >  
> >    /* If possible, free extra space in old or extended chunk */
> >  
> > -  assert((unsigned long)(newsize) >= (unsigned long)(nb));
> > +  assert ((unsigned long) (newsize) >= (unsigned long) (nb));
> >  
> >    remainder_size = newsize - nb;
> >  
> > -  if (remainder_size < MINSIZE) { /* not enough extra to split off */
> > -    set_head_size(newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
> > -    set_inuse_bit_at_offset(newp, newsize);
> > -  }
> > -  else { /* split remainder */
> > -    remainder = chunk_at_offset(newp, nb);
> > -    set_head_size(newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
> > -    set_head(remainder, remainder_size | PREV_INUSE |
> > -	     (av != &main_arena ? NON_MAIN_ARENA : 0));
> > -    /* Mark remainder as inuse so free() won't complain */
> > -    set_inuse_bit_at_offset(remainder, remainder_size);
> > -    _int_free(av, remainder, 1);
> > -  }
> > +  if (remainder_size < MINSIZE)   /* not enough extra to split off */
> > +    {
> > +      set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
> > +      set_inuse_bit_at_offset (newp, newsize);
> > +    }
> > +  else   /* split remainder */
> > +    {
> > +      remainder = chunk_at_offset (newp, nb);
> > +      set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
> > +      set_head (remainder, remainder_size | PREV_INUSE |
> > +                (av != &main_arena ? NON_MAIN_ARENA : 0));
> > +      /* Mark remainder as inuse so free() won't complain */
> > +      set_inuse_bit_at_offset (remainder, remainder_size);
> > +      _int_free (av, remainder, 1);
> > +    }
> >  
> > -  check_inuse_chunk(av, newp);
> > -  return chunk2mem(newp);
> > +  check_inuse_chunk (av, newp);
> > +  return chunk2mem (newp);
> >  }
> >  
> >  /*
> > -  ------------------------------ memalign ------------------------------
> > -*/
> > +   ------------------------------ memalign ------------------------------
> > + */
> >  
> > -static void*
> > -_int_memalign(mstate av, size_t alignment, size_t bytes)
> > +static void *
> > +_int_memalign (mstate av, size_t alignment, size_t bytes)
> >  {
> >    INTERNAL_SIZE_T nb;             /* padded  request size */
> > -  char*           m;              /* memory returned by malloc call */
> > -  mchunkptr       p;              /* corresponding chunk */
> > -  char*           brk;            /* alignment point within p */
> > -  mchunkptr       newp;           /* chunk to return */
> > +  char *m;                        /* memory returned by malloc call */
> > +  mchunkptr p;                    /* corresponding chunk */
> > +  char *brk;                      /* alignment point within p */
> > +  mchunkptr newp;                 /* chunk to return */
> >    INTERNAL_SIZE_T newsize;        /* its size */
> >    INTERNAL_SIZE_T leadsize;       /* leading space before alignment point */
> > -  mchunkptr       remainder;      /* spare room at end to split off */
> > -  unsigned long   remainder_size; /* its size */
> > +  mchunkptr remainder;            /* spare room at end to split off */
> > +  unsigned long remainder_size;   /* its size */
> >    INTERNAL_SIZE_T size;
> >  
> >  
> >  
> > -  checked_request2size(bytes, nb);
> > +  checked_request2size (bytes, nb);
> >  
> >    /*
> > -    Strategy: find a spot within that chunk that meets the alignment
> > -    request, and then possibly free the leading and trailing space.
> > -  */
> > +     Strategy: find a spot within that chunk that meets the alignment
> > +     request, and then possibly free the leading and trailing space.
> > +   */
> >  
> >  
> >    /* Call malloc with worst case padding to hit alignment. */
> >  
> > -  m  = (char*)(_int_malloc(av, nb + alignment + MINSIZE));
> > -
> > -  if (m == 0) return 0; /* propagate failure */
> > -
> > -  p = mem2chunk(m);
> > -
> > -  if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */
> > -
> > -    /*
> > -      Find an aligned spot inside chunk.  Since we need to give back
> > -      leading space in a chunk of at least MINSIZE, if the first
> > -      calculation places us at a spot with less than MINSIZE leader,
> > -      we can move to the next aligned spot -- we've allocated enough
> > -      total room so that this is always possible.
> > -    */
> > -
> > -    brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) &
> > -			   -((signed long) alignment));
> > -    if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
> > -      brk += alignment;
> > -
> > -    newp = (mchunkptr)brk;
> > -    leadsize = brk - (char*)(p);
> > -    newsize = chunksize(p) - leadsize;
> > -
> > -    /* For mmapped chunks, just adjust offset */
> > -    if (chunk_is_mmapped(p)) {
> > -      newp->prev_size = p->prev_size + leadsize;
> > -      set_head(newp, newsize|IS_MMAPPED);
> > -      return chunk2mem(newp);
> > +  m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
> > +
> > +  if (m == 0)
> > +    return 0;           /* propagate failure */
> > +
> > +  p = mem2chunk (m);
> > +
> > +  if ((((unsigned long) (m)) % alignment) != 0)   /* misaligned */
> > +
> > +    { /*
> > +                Find an aligned spot inside chunk.  Since we need to give back
> > +                leading space in a chunk of at least MINSIZE, if the first
> > +                calculation places us at a spot with less than MINSIZE leader,
> > +                we can move to the next aligned spot -- we've allocated enough
> > +                total room so that this is always possible.
> > +                 */
> > +      brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) &
> > +                                - ((signed long) alignment));
> > +      if ((unsigned long) (brk - (char *) (p)) < MINSIZE)
> > +        brk += alignment;
> > +
> > +      newp = (mchunkptr) brk;
> > +      leadsize = brk - (char *) (p);
> > +      newsize = chunksize (p) - leadsize;
> > +
> > +      /* For mmapped chunks, just adjust offset */
> > +      if (chunk_is_mmapped (p))
> > +        {
> > +          newp->prev_size = p->prev_size + leadsize;
> > +          set_head (newp, newsize | IS_MMAPPED);
> > +          return chunk2mem (newp);
> > +        }
> > +
> > +      /* Otherwise, give back leader, use the rest */
> > +      set_head (newp, newsize | PREV_INUSE |
> > +                (av != &main_arena ? NON_MAIN_ARENA : 0));
> > +      set_inuse_bit_at_offset (newp, newsize);
> > +      set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
> > +      _int_free (av, p, 1);
> > +      p = newp;
> > +
> > +      assert (newsize >= nb &&
> > +              (((unsigned long) (chunk2mem (p))) % alignment) == 0);
> >      }
> >  
> > -    /* Otherwise, give back leader, use the rest */
> > -    set_head(newp, newsize | PREV_INUSE |
> > -	     (av != &main_arena ? NON_MAIN_ARENA : 0));
> > -    set_inuse_bit_at_offset(newp, newsize);
> > -    set_head_size(p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
> > -    _int_free(av, p, 1);
> > -    p = newp;
> > -
> > -    assert (newsize >= nb &&
> > -	    (((unsigned long)(chunk2mem(p))) % alignment) == 0);
> > -  }
> > -
> >    /* Also give back spare room at the end */
> > -  if (!chunk_is_mmapped(p)) {
> > -    size = chunksize(p);
> > -    if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
> > -      remainder_size = size - nb;
> > -      remainder = chunk_at_offset(p, nb);
> > -      set_head(remainder, remainder_size | PREV_INUSE |
> > -	       (av != &main_arena ? NON_MAIN_ARENA : 0));
> > -      set_head_size(p, nb);
> > -      _int_free(av, remainder, 1);
> > +  if (!chunk_is_mmapped (p))
> > +    {
> > +      size = chunksize (p);
> > +      if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE))
> > +        {
> > +          remainder_size = size - nb;
> > +          remainder = chunk_at_offset (p, nb);
> > +          set_head (remainder, remainder_size | PREV_INUSE |
> > +                    (av != &main_arena ? NON_MAIN_ARENA : 0));
> > +          set_head_size (p, nb);
> > +          _int_free (av, remainder, 1);
> > +        }
> >      }
> > -  }
> >  
> > -  check_inuse_chunk(av, p);
> > -  return chunk2mem(p);
> > +  check_inuse_chunk (av, p);
> > +  return chunk2mem (p);
> >  }
> >  
> >  
> >  /*
> > -  ------------------------------ malloc_trim ------------------------------
> > -*/
> > +   ------------------------------ malloc_trim ------------------------------
> > + */
> >  
> > -static int mtrim(mstate av, size_t pad)
> > +static int
> > +mtrim (mstate av, size_t pad)
> >  {
> >    /* Ensure initialization/consolidation */
> >    malloc_consolidate (av);
> >  
> > -  const size_t ps = GLRO(dl_pagesize);
> > +  const size_t ps = GLRO (dl_pagesize);
> >    int psindex = bin_index (ps);
> >    const size_t psm1 = ps - 1;
> >  
> > @@ -4348,42 +4501,43 @@ static int mtrim(mstate av, size_t pad)
> >    for (int i = 1; i < NBINS; ++i)
> >      if (i == 1 || i >= psindex)
> >        {
> > -	mbinptr bin = bin_at (av, i);
> > +        mbinptr bin = bin_at (av, i);
> >  
> > -	for (mchunkptr p = last (bin); p != bin; p = p->bk)
> > -	  {
> > -	    INTERNAL_SIZE_T size = chunksize (p);
> > +        for (mchunkptr p = last (bin); p != bin; p = p->bk)
> > +          {
> > +            INTERNAL_SIZE_T size = chunksize (p);
> >  
> > -	    if (size > psm1 + sizeof (struct malloc_chunk))
> > -	      {
> > -		/* See whether the chunk contains at least one unused page.  */
> > -		char *paligned_mem = (char *) (((uintptr_t) p
> > -						+ sizeof (struct malloc_chunk)
> > -						+ psm1) & ~psm1);
> > +            if (size > psm1 + sizeof (struct malloc_chunk))
> > +              {
> > +                /* See whether the chunk contains at least one unused page.  */
> > +                char *paligned_mem = (char *) (((uintptr_t) p
> > +                                                + sizeof (struct malloc_chunk)
> > +                                                + psm1) & ~psm1);
> >  
> > -		assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
> > -		assert ((char *) p + size > paligned_mem);
> > +                assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
> > +                assert ((char *) p + size > paligned_mem);
> >  
> > -		/* This is the size we could potentially free.  */
> > -		size -= paligned_mem - (char *) p;
> > +                /* This is the size we could potentially free.  */
> > +                size -= paligned_mem - (char *) p;
> >  
> > -		if (size > psm1)
> > -		  {
> > +                if (size > psm1)
> > +                  {
> >  #ifdef MALLOC_DEBUG
> > -		    /* When debugging we simulate destroying the memory
> > -		       content.  */
> > -		    memset (paligned_mem, 0x89, size & ~psm1);
> > +                    /* When debugging we simulate destroying the memory
> > +                       content.  */
> > +                    memset (paligned_mem, 0x89, size & ~psm1);
> >  #endif
> > -		    __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
> > +                    __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
> >  
> > -		    result = 1;
> > -		  }
> > -	      }
> > -	  }
> > +                    result = 1;
> > +                  }
> > +              }
> > +          }
> >        }
> >  
> >  #ifndef MORECORE_CANNOT_TRIM
> >    return result | (av == &main_arena ? systrim (pad, av) : 0);
> > +
> >  #else
> >    return result;
> >  #endif
> > @@ -4391,11 +4545,11 @@ static int mtrim(mstate av, size_t pad)
> >  
> >  
> >  int
> > -__malloc_trim(size_t s)
> > +__malloc_trim (size_t s)
> >  {
> >    int result = 0;
> >  
> > -  if(__malloc_initialized < 0)
> > +  if (__malloc_initialized < 0)
> >      ptmalloc_init ();
> >  
> >    mstate ar_ptr = &main_arena;
> > @@ -4414,43 +4568,45 @@ __malloc_trim(size_t s)
> >  
> >  
> >  /*
> > -  ------------------------- malloc_usable_size -------------------------
> > -*/
> > +   ------------------------- malloc_usable_size -------------------------
> > + */
> >  
> >  static size_t
> > -musable(void* mem)
> > +musable (void *mem)
> >  {
> >    mchunkptr p;
> > -  if (mem != 0) {
> > -    p = mem2chunk(mem);
> > -
> > -    if (__builtin_expect(using_malloc_checking == 1, 0))
> > -      return malloc_check_get_size(p);
> > -    if (chunk_is_mmapped(p))
> > -      return chunksize(p) - 2*SIZE_SZ;
> > -    else if (inuse(p))
> > -      return chunksize(p) - SIZE_SZ;
> > -  }
> > +  if (mem != 0)
> > +    {
> > +      p = mem2chunk (mem);
> > +
> > +      if (__builtin_expect (using_malloc_checking == 1, 0))
> > +        return malloc_check_get_size (p);
> > +
> > +      if (chunk_is_mmapped (p))
> > +        return chunksize (p) - 2 * SIZE_SZ;
> > +      else if (inuse (p))
> > +        return chunksize (p) - SIZE_SZ;
> > +    }
> >    return 0;
> >  }
> >  
> >  
> >  size_t
> > -__malloc_usable_size(void* m)
> > +__malloc_usable_size (void *m)
> >  {
> >    size_t result;
> >  
> > -  result = musable(m);
> > +  result = musable (m);
> >    return result;
> >  }
> >  
> >  /*
> > -  ------------------------------ mallinfo ------------------------------
> > -  Accumulate malloc statistics for arena AV into M.
> > -*/
> > +   ------------------------------ mallinfo ------------------------------
> > +   Accumulate malloc statistics for arena AV into M.
> > + */
> >  
> >  static void
> > -int_mallinfo(mstate av, struct mallinfo *m)
> > +int_mallinfo (mstate av, struct mallinfo *m)
> >  {
> >    size_t i;
> >    mbinptr b;
> > @@ -4461,35 +4617,40 @@ int_mallinfo(mstate av, struct mallinfo *m)
> >    int nfastblocks;
> >  
> >    /* Ensure initialization */
> > -  if (av->top == 0)  malloc_consolidate(av);
> > +  if (av->top == 0)
> > +    malloc_consolidate (av);
> >  
> > -  check_malloc_state(av);
> > +  check_malloc_state (av);
> >  
> >    /* Account for top */
> > -  avail = chunksize(av->top);
> > +  avail = chunksize (av->top);
> >    nblocks = 1;  /* top always exists */
> >  
> >    /* traverse fastbins */
> >    nfastblocks = 0;
> >    fastavail = 0;
> >  
> > -  for (i = 0; i < NFASTBINS; ++i) {
> > -    for (p = fastbin (av, i); p != 0; p = p->fd) {
> > -      ++nfastblocks;
> > -      fastavail += chunksize(p);
> > +  for (i = 0; i < NFASTBINS; ++i)
> > +    {
> > +      for (p = fastbin (av, i); p != 0; p = p->fd)
> > +        {
> > +          ++nfastblocks;
> > +          fastavail += chunksize (p);
> > +        }
> >      }
> > -  }
> >  
> >    avail += fastavail;
> >  
> >    /* traverse regular bins */
> > -  for (i = 1; i < NBINS; ++i) {
> > -    b = bin_at(av, i);
> > -    for (p = last(b); p != b; p = p->bk) {
> > -      ++nblocks;
> > -      avail += chunksize(p);
> > +  for (i = 1; i < NBINS; ++i)
> > +    {
> > +      b = bin_at (av, i);
> > +      for (p = last (b); p != b; p = p->bk)
> > +        {
> > +          ++nblocks;
> > +          avail += chunksize (p);
> > +        }
> >      }
> > -  }
> >  
> >    m->smblks += nfastblocks;
> >    m->ordblks += nblocks;
> > @@ -4502,35 +4663,38 @@ int_mallinfo(mstate av, struct mallinfo *m)
> >        m->hblks = mp_.n_mmaps;
> >        m->hblkhd = mp_.mmapped_mem;
> >        m->usmblks = mp_.max_total_mem;
> > -      m->keepcost = chunksize(av->top);
> > +      m->keepcost = chunksize (av->top);
> >      }
> >  }
> >  
> >  
> > -struct mallinfo __libc_mallinfo()
> > +struct mallinfo
> > +__libc_mallinfo ()
> >  {
> >    struct mallinfo m;
> >    mstate ar_ptr;
> >  
> > -  if(__malloc_initialized < 0)
> > +  if (__malloc_initialized < 0)
> >      ptmalloc_init ();
> >  
> > -  memset(&m, 0, sizeof (m));
> > +  memset (&m, 0, sizeof (m));
> >    ar_ptr = &main_arena;
> > -  do {
> > -    (void)mutex_lock(&ar_ptr->mutex);
> > -    int_mallinfo(ar_ptr, &m);
> > -    (void)mutex_unlock(&ar_ptr->mutex);
> > +  do
> > +    {
> > +      (void) mutex_lock (&ar_ptr->mutex);
> > +      int_mallinfo (ar_ptr, &m);
> > +      (void) mutex_unlock (&ar_ptr->mutex);
> >  
> > -    ar_ptr = ar_ptr->next;
> > -  } while (ar_ptr != &main_arena);
> > +      ar_ptr = ar_ptr->next;
> > +    }
> > +  while (ar_ptr != &main_arena);
> >  
> >    return m;
> >  }
> >  
> >  /*
> > -  ------------------------------ malloc_stats ------------------------------
> > -*/
> > +   ------------------------------ malloc_stats ------------------------------
> > + */
> >  
> >  void
> >  __malloc_stats (void)
> > @@ -4542,48 +4706,50 @@ __malloc_stats (void)
> >    long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0;
> >  #endif
> >  
> > -  if(__malloc_initialized < 0)
> > +  if (__malloc_initialized < 0)
> >      ptmalloc_init ();
> >    _IO_flockfile (stderr);
> >    int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
> >    ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
> > -  for (i=0, ar_ptr = &main_arena;; i++) {
> > -    struct mallinfo mi;
> > -
> > -    memset(&mi, 0, sizeof(mi));
> > -    (void)mutex_lock(&ar_ptr->mutex);
> > -    int_mallinfo(ar_ptr, &mi);
> > -    fprintf(stderr, "Arena %d:\n", i);
> > -    fprintf(stderr, "system bytes     = %10u\n", (unsigned int)mi.arena);
> > -    fprintf(stderr, "in use bytes     = %10u\n", (unsigned int)mi.uordblks);
> > +  for (i = 0, ar_ptr = &main_arena;; i++)
> > +    {
> > +      struct mallinfo mi;
> > +
> > +      memset (&mi, 0, sizeof (mi));
> > +      (void) mutex_lock (&ar_ptr->mutex);
> > +      int_mallinfo (ar_ptr, &mi);
> > +      fprintf (stderr, "Arena %d:\n", i);
> > +      fprintf (stderr, "system bytes     = %10u\n", (unsigned int) mi.arena);
> > +      fprintf (stderr, "in use bytes     = %10u\n", (unsigned int) mi.uordblks);
> >  #if MALLOC_DEBUG > 1
> > -    if (i > 0)
> > -      dump_heap(heap_for_ptr(top(ar_ptr)));
> > +      if (i > 0)
> > +        dump_heap (heap_for_ptr (top (ar_ptr)));
> >  #endif
> > -    system_b += mi.arena;
> > -    in_use_b += mi.uordblks;
> > +      system_b += mi.arena;
> > +      in_use_b += mi.uordblks;
> >  #if THREAD_STATS
> > -    stat_lock_direct += ar_ptr->stat_lock_direct;
> > -    stat_lock_loop += ar_ptr->stat_lock_loop;
> > -    stat_lock_wait += ar_ptr->stat_lock_wait;
> > +      stat_lock_direct += ar_ptr->stat_lock_direct;
> > +      stat_lock_loop += ar_ptr->stat_lock_loop;
> > +      stat_lock_wait += ar_ptr->stat_lock_wait;
> >  #endif
> > -    (void)mutex_unlock(&ar_ptr->mutex);
> > -    ar_ptr = ar_ptr->next;
> > -    if(ar_ptr == &main_arena) break;
> > -  }
> > -  fprintf(stderr, "Total (incl. mmap):\n");
> > -  fprintf(stderr, "system bytes     = %10u\n", system_b);
> > -  fprintf(stderr, "in use bytes     = %10u\n", in_use_b);
> > -  fprintf(stderr, "max mmap regions = %10u\n", (unsigned int)mp_.max_n_mmaps);
> > -  fprintf(stderr, "max mmap bytes   = %10lu\n",
> > -	  (unsigned long)mp_.max_mmapped_mem);
> > +      (void) mutex_unlock (&ar_ptr->mutex);
> > +      ar_ptr = ar_ptr->next;
> > +      if (ar_ptr == &main_arena)
> > +        break;
> > +    }
> > +  fprintf (stderr, "Total (incl. mmap):\n");
> > +  fprintf (stderr, "system bytes     = %10u\n", system_b);
> > +  fprintf (stderr, "in use bytes     = %10u\n", in_use_b);
> > +  fprintf (stderr, "max mmap regions = %10u\n", (unsigned int) mp_.max_n_mmaps);
> > +  fprintf (stderr, "max mmap bytes   = %10lu\n",
> > +           (unsigned long) mp_.max_mmapped_mem);
> >  #if THREAD_STATS
> > -  fprintf(stderr, "heaps created    = %10d\n",  stat_n_heaps);
> > -  fprintf(stderr, "locked directly  = %10ld\n", stat_lock_direct);
> > -  fprintf(stderr, "locked in loop   = %10ld\n", stat_lock_loop);
> > -  fprintf(stderr, "locked waiting   = %10ld\n", stat_lock_wait);
> > -  fprintf(stderr, "locked total     = %10ld\n",
> > -	  stat_lock_direct + stat_lock_loop + stat_lock_wait);
> > +  fprintf (stderr, "heaps created    = %10d\n", stat_n_heaps);
> > +  fprintf (stderr, "locked directly  = %10ld\n", stat_lock_direct);
> > +  fprintf (stderr, "locked in loop   = %10ld\n", stat_lock_loop);
> > +  fprintf (stderr, "locked waiting   = %10ld\n", stat_lock_wait);
> > +  fprintf (stderr, "locked total     = %10ld\n",
> > +           stat_lock_direct + stat_lock_loop + stat_lock_wait);
> >  #endif
> >    ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
> >    _IO_funlockfile (stderr);
> > @@ -4591,201 +4757,203 @@ __malloc_stats (void)
> >  
> >  
> >  /*
> > -  ------------------------------ mallopt ------------------------------
> > -*/
> > +   ------------------------------ mallopt ------------------------------
> > + */
> >  
> > -int __libc_mallopt(int param_number, int value)
> > +int
> > +__libc_mallopt (int param_number, int value)
> >  {
> >    mstate av = &main_arena;
> >    int res = 1;
> >  
> > -  if(__malloc_initialized < 0)
> > +  if (__malloc_initialized < 0)
> >      ptmalloc_init ();
> > -  (void)mutex_lock(&av->mutex);
> > +  (void) mutex_lock (&av->mutex);
> >    /* Ensure initialization/consolidation */
> > -  malloc_consolidate(av);
> > +  malloc_consolidate (av);
> >  
> >    LIBC_PROBE (memory_mallopt, 2, param_number, value);
> >  
> > -  switch(param_number) {
> > -  case M_MXFAST:
> > -    if (value >= 0 && value <= MAX_FAST_SIZE)
> > -      {
> > -	LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ());
> > -	set_max_fast(value);
> > -      }
> > -    else
> > -      res = 0;
> > -    break;
> > -
> > -  case M_TRIM_THRESHOLD:
> > -    LIBC_PROBE (memory_mallopt_trim_threshold, 3, value,
> > -		mp_.trim_threshold, mp_.no_dyn_threshold);
> > -    mp_.trim_threshold = value;
> > -    mp_.no_dyn_threshold = 1;
> > -    break;
> > -
> > -  case M_TOP_PAD:
> > -    LIBC_PROBE (memory_mallopt_top_pad, 3, value,
> > -		mp_.top_pad, mp_.no_dyn_threshold);
> > -    mp_.top_pad = value;
> > -    mp_.no_dyn_threshold = 1;
> > -    break;
> > -
> > -  case M_MMAP_THRESHOLD:
> > -    /* Forbid setting the threshold too high. */
> > -    if((unsigned long)value > HEAP_MAX_SIZE/2)
> > -      res = 0;
> > -    else
> > -      {
> > -	LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value,
> > -		    mp_.mmap_threshold, mp_.no_dyn_threshold);
> > -	mp_.mmap_threshold = value;
> > -	mp_.no_dyn_threshold = 1;
> > -      }
> > -    break;
> > -
> > -  case M_MMAP_MAX:
> > -    LIBC_PROBE (memory_mallopt_mmap_max, 3, value,
> > -		mp_.n_mmaps_max, mp_.no_dyn_threshold);
> > -    mp_.n_mmaps_max = value;
> > -    mp_.no_dyn_threshold = 1;
> > -    break;
> > -
> > -  case M_CHECK_ACTION:
> > -    LIBC_PROBE (memory_mallopt_check_action, 2, value, check_action);
> > -    check_action = value;
> > -    break;
> > -
> > -  case M_PERTURB:
> > -    LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
> > -    perturb_byte = value;
> > -    break;
> > -
> > -  case M_ARENA_TEST:
> > -    if (value > 0)
> > -      {
> > -	LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
> > -	mp_.arena_test = value;
> > -      }
> > -    break;
> > -
> > -  case M_ARENA_MAX:
> > -    if (value > 0)
> > -      {
> > -	LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
> > -	mp_.arena_max = value;
> > -      }
> > -    break;
> > -  }
> > -  (void)mutex_unlock(&av->mutex);
> > +  switch (param_number)
> > +    {
> > +    case M_MXFAST:
> > +      if (value >= 0 && value <= MAX_FAST_SIZE)
> > +        {
> > +          LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ());
> > +          set_max_fast (value);
> > +        }
> > +      else
> > +        res = 0;
> > +      break;
> > +
> > +    case M_TRIM_THRESHOLD:
> > +      LIBC_PROBE (memory_mallopt_trim_threshold, 3, value,
> > +                  mp_.trim_threshold, mp_.no_dyn_threshold);
> > +      mp_.trim_threshold = value;
> > +      mp_.no_dyn_threshold = 1;
> > +      break;
> > +
> > +    case M_TOP_PAD:
> > +      LIBC_PROBE (memory_mallopt_top_pad, 3, value,
> > +                  mp_.top_pad, mp_.no_dyn_threshold);
> > +      mp_.top_pad = value;
> > +      mp_.no_dyn_threshold = 1;
> > +      break;
> > +
> > +    case M_MMAP_THRESHOLD:
> > +      /* Forbid setting the threshold too high. */
> > +      if ((unsigned long) value > HEAP_MAX_SIZE / 2)
> > +        res = 0;
> > +      else
> > +        {
> > +          LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value,
> > +                      mp_.mmap_threshold, mp_.no_dyn_threshold);
> > +          mp_.mmap_threshold = value;
> > +          mp_.no_dyn_threshold = 1;
> > +        }
> > +      break;
> > +
> > +    case M_MMAP_MAX:
> > +      LIBC_PROBE (memory_mallopt_mmap_max, 3, value,
> > +                  mp_.n_mmaps_max, mp_.no_dyn_threshold);
> > +      mp_.n_mmaps_max = value;
> > +      mp_.no_dyn_threshold = 1;
> > +      break;
> > +
> > +    case M_CHECK_ACTION:
> > +      LIBC_PROBE (memory_mallopt_check_action, 2, value, check_action);
> > +      check_action = value;
> > +      break;
> > +
> > +    case M_PERTURB:
> > +      LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
> > +      perturb_byte = value;
> > +      break;
> > +
> > +    case M_ARENA_TEST:
> > +      if (value > 0)
> > +        {
> > +          LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
> > +          mp_.arena_test = value;
> > +        }
> > +      break;
> > +
> > +    case M_ARENA_MAX:
> > +      if (value > 0)
> > +        {
> > +          LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
> > +          mp_.arena_max = value;
> > +        }
> > +      break;
> > +    }
> > +  (void) mutex_unlock (&av->mutex);
> >    return res;
> >  }
> >  libc_hidden_def (__libc_mallopt)
> >  
> >  
> >  /*
> > -  -------------------- Alternative MORECORE functions --------------------
> > -*/
> > +   -------------------- Alternative MORECORE functions --------------------
> > + */
> >  
> >  
> >  /*
> > -  General Requirements for MORECORE.
> > +   General Requirements for MORECORE.
> >  
> > -  The MORECORE function must have the following properties:
> > +   The MORECORE function must have the following properties:
> >  
> > -  If MORECORE_CONTIGUOUS is false:
> > +   If MORECORE_CONTIGUOUS is false:
> >  
> > -    * MORECORE must allocate in multiples of pagesize. It will
> > + * MORECORE must allocate in multiples of pagesize. It will
> >        only be called with arguments that are multiples of pagesize.
> >  
> > -    * MORECORE(0) must return an address that is at least
> > + * MORECORE(0) must return an address that is at least
> >        MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
> >  
> > -  else (i.e. If MORECORE_CONTIGUOUS is true):
> > +   else (i.e. If MORECORE_CONTIGUOUS is true):
> >  
> > -    * Consecutive calls to MORECORE with positive arguments
> > + * Consecutive calls to MORECORE with positive arguments
> >        return increasing addresses, indicating that space has been
> >        contiguously extended.
> >  
> > -    * MORECORE need not allocate in multiples of pagesize.
> > + * MORECORE need not allocate in multiples of pagesize.
> >        Calls to MORECORE need not have args of multiples of pagesize.
> >  
> > -    * MORECORE need not page-align.
> > + * MORECORE need not page-align.
> >  
> > -  In either case:
> > +   In either case:
> >  
> > -    * MORECORE may allocate more memory than requested. (Or even less,
> > + * MORECORE may allocate more memory than requested. (Or even less,
> >        but this will generally result in a malloc failure.)
> >  
> > -    * MORECORE must not allocate memory when given argument zero, but
> > + * MORECORE must not allocate memory when given argument zero, but
> >        instead return one past the end address of memory from previous
> >        nonzero call. This malloc does NOT call MORECORE(0)
> >        until at least one call with positive arguments is made, so
> >        the initial value returned is not important.
> >  
> > -    * Even though consecutive calls to MORECORE need not return contiguous
> > + * Even though consecutive calls to MORECORE need not return contiguous
> >        addresses, it must be OK for malloc'ed chunks to span multiple
> >        regions in those cases where they do happen to be contiguous.
> >  
> > -    * MORECORE need not handle negative arguments -- it may instead
> > + * MORECORE need not handle negative arguments -- it may instead
> >        just return MORECORE_FAILURE when given negative arguments.
> >        Negative arguments are always multiples of pagesize. MORECORE
> >        must not misinterpret negative args as large positive unsigned
> >        args. You can suppress all such calls from even occurring by defining
> >        MORECORE_CANNOT_TRIM,
> >  
> > -  There is some variation across systems about the type of the
> > -  argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
> > -  actually be size_t, because sbrk supports negative args, so it is
> > -  normally the signed type of the same width as size_t (sometimes
> > -  declared as "intptr_t", and sometimes "ptrdiff_t").  It doesn't much
> > -  matter though. Internally, we use "long" as arguments, which should
> > -  work across all reasonable possibilities.
> > -
> > -  Additionally, if MORECORE ever returns failure for a positive
> > -  request, then mmap is used as a noncontiguous system allocator. This
> > -  is a useful backup strategy for systems with holes in address spaces
> > -  -- in this case sbrk cannot contiguously expand the heap, but mmap
> > -  may be able to map noncontiguous space.
> > -
> > -  If you'd like mmap to ALWAYS be used, you can define MORECORE to be
> > -  a function that always returns MORECORE_FAILURE.
> > -
> > -  If you are using this malloc with something other than sbrk (or its
> > -  emulation) to supply memory regions, you probably want to set
> > -  MORECORE_CONTIGUOUS as false.  As an example, here is a custom
> > -  allocator kindly contributed for pre-OSX macOS.  It uses virtually
> > -  but not necessarily physically contiguous non-paged memory (locked
> > -  in, present and won't get swapped out).  You can use it by
> > -  uncommenting this section, adding some #includes, and setting up the
> > -  appropriate defines above:
> > -
> > -      #define MORECORE osMoreCore
> > -      #define MORECORE_CONTIGUOUS 0
> > -
> > -  There is also a shutdown routine that should somehow be called for
> > -  cleanup upon program exit.
> > -
> > -  #define MAX_POOL_ENTRIES 100
> > -  #define MINIMUM_MORECORE_SIZE  (64 * 1024)
> > -  static int next_os_pool;
> > -  void *our_os_pools[MAX_POOL_ENTRIES];
> > -
> > -  void *osMoreCore(int size)
> > -  {
> > +   There is some variation across systems about the type of the
> > +   argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
> > +   actually be size_t, because sbrk supports negative args, so it is
> > +   normally the signed type of the same width as size_t (sometimes
> > +   declared as "intptr_t", and sometimes "ptrdiff_t").  It doesn't much
> > +   matter though. Internally, we use "long" as arguments, which should
> > +   work across all reasonable possibilities.
> > +
> > +   Additionally, if MORECORE ever returns failure for a positive
> > +   request, then mmap is used as a noncontiguous system allocator. This
> > +   is a useful backup strategy for systems with holes in address spaces
> > +   -- in this case sbrk cannot contiguously expand the heap, but mmap
> > +   may be able to map noncontiguous space.
> > +
> > +   If you'd like mmap to ALWAYS be used, you can define MORECORE to be
> > +   a function that always returns MORECORE_FAILURE.
> > +
> > +   If you are using this malloc with something other than sbrk (or its
> > +   emulation) to supply memory regions, you probably want to set
> > +   MORECORE_CONTIGUOUS as false.  As an example, here is a custom
> > +   allocator kindly contributed for pre-OSX macOS.  It uses virtually
> > +   but not necessarily physically contiguous non-paged memory (locked
> > +   in, present and won't get swapped out).  You can use it by
> > +   uncommenting this section, adding some #includes, and setting up the
> > +   appropriate defines above:
> > +
> > + *#define MORECORE osMoreCore
> > + *#define MORECORE_CONTIGUOUS 0
> > +
> > +   There is also a shutdown routine that should somehow be called for
> > +   cleanup upon program exit.
> > +
> > + *#define MAX_POOL_ENTRIES 100
> > + *#define MINIMUM_MORECORE_SIZE  (64 * 1024)
> > +   static int next_os_pool;
> > +   void *our_os_pools[MAX_POOL_ENTRIES];
> > +
> > +   void *osMoreCore(int size)
> > +   {
> >      void *ptr = 0;
> >      static void *sbrk_top = 0;
> >  
> >      if (size > 0)
> >      {
> >        if (size < MINIMUM_MORECORE_SIZE)
> > -	 size = MINIMUM_MORECORE_SIZE;
> > +         size = MINIMUM_MORECORE_SIZE;
> >        if (CurrentExecutionLevel() == kTaskLevel)
> > -	 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
> > +         ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
> >        if (ptr == 0)
> >        {
> > -	return (void *) MORECORE_FAILURE;
> > +        return (void *) MORECORE_FAILURE;
> >        }
> >        // save ptrs so they can be freed during cleanup
> >        our_os_pools[next_os_pool] = ptr;
> > @@ -4803,24 +4971,24 @@ libc_hidden_def (__libc_mallopt)
> >      {
> >        return sbrk_top;
> >      }
> > -  }
> > +   }
> >  
> > -  // cleanup any allocated memory pools
> > -  // called as last thing before shutting down driver
> > +   // cleanup any allocated memory pools
> > +   // called as last thing before shutting down driver
> >  
> > -  void osCleanupMem(void)
> > -  {
> > +   void osCleanupMem(void)
> > +   {
> >      void **ptr;
> >  
> >      for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
> >        if (*ptr)
> >        {
> > -	 PoolDeallocate(*ptr);
> > -	 *ptr = 0;
> > +         PoolDeallocate(*ptr);
> > + * ptr = 0;
> >        }
> > -  }
> > +   }
> >  
> > -*/
> > + */
> >  
> >  
> >  /* Helper code.  */
> > @@ -4828,7 +4996,7 @@ libc_hidden_def (__libc_mallopt)
> >  extern char **__libc_argv attribute_hidden;
> >  
> >  static void
> > -malloc_printerr(int action, const char *str, void *ptr)
> > +malloc_printerr (int action, const char *str, void *ptr)
> >  {
> >    if ((action & 5) == 5)
> >      __libc_message (action & 2, "%s\n", str);
> > @@ -4839,10 +5007,10 @@ malloc_printerr(int action, const char *str, void *ptr)
> >        buf[sizeof (buf) - 1] = '\0';
> >        char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0);
> >        while (cp > buf)
> > -	*--cp = '0';
> > +        *--cp = '0';
> >  
> >        __libc_message (action & 2, "*** Error in `%s': %s: 0x%s ***\n",
> > -		      __libc_argv[0] ?: "<unknown>", str, cp);
> > +                      __libc_argv[0] ? : "<unknown>", str, cp);
> >      }
> >    else if (action & 2)
> >      abort ();
> > @@ -4865,10 +5033,11 @@ __posix_memalign (void **memptr, size_t alignment, size_t size)
> >    void *address = RETURN_ADDRESS (0);
> >    mem = _mid_memalign (alignment, size, address);
> >  
> > -  if (mem != NULL) {
> > -    *memptr = mem;
> > -    return 0;
> > -  }
> > +  if (mem != NULL)
> > +    {
> > +      *memptr = mem;
> > +      return 0;
> > +    }
> >  
> >    return ENOMEM;
> >  }
> > @@ -4892,7 +5061,8 @@ malloc_info (int options, FILE *fp)
> >    size_t total_aspace = 0;
> >    size_t total_aspace_mprotect = 0;
> >  
> > -  void mi_arena (mstate ar_ptr)
> > +  void
> > +  mi_arena (mstate ar_ptr)
> >    {
> >      fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
> >  
> > @@ -4913,28 +5083,28 @@ malloc_info (int options, FILE *fp)
> >  
> >      for (size_t i = 0; i < NFASTBINS; ++i)
> >        {
> > -	mchunkptr p = fastbin (ar_ptr, i);
> > -	if (p != NULL)
> > -	  {
> > -	    size_t nthissize = 0;
> > -	    size_t thissize = chunksize (p);
> > -
> > -	    while (p != NULL)
> > -	      {
> > -		++nthissize;
> > -		p = p->fd;
> > -	      }
> > -
> > -	    fastavail += nthissize * thissize;
> > -	    nfastblocks += nthissize;
> > -	    sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
> > -	    sizes[i].to = thissize;
> > -	    sizes[i].count = nthissize;
> > -	  }
> > -	else
> > -	  sizes[i].from = sizes[i].to = sizes[i].count = 0;
> > -
> > -	sizes[i].total = sizes[i].count * sizes[i].to;
> > +        mchunkptr p = fastbin (ar_ptr, i);
> > +        if (p != NULL)
> > +          {
> > +            size_t nthissize = 0;
> > +            size_t thissize = chunksize (p);
> > +
> > +            while (p != NULL)
> > +              {
> > +                ++nthissize;
> > +                p = p->fd;
> > +              }
> > +
> > +            fastavail += nthissize * thissize;
> > +            nfastblocks += nthissize;
> > +            sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
> > +            sizes[i].to = thissize;
> > +            sizes[i].count = nthissize;
> > +          }
> > +        else
> > +          sizes[i].from = sizes[i].to = sizes[i].count = 0;
> > +
> > +        sizes[i].total = sizes[i].count * sizes[i].to;
> >        }
> >  
> >  
> > @@ -4943,29 +5113,29 @@ malloc_info (int options, FILE *fp)
> >  
> >      for (size_t i = 1; i < NBINS; ++i)
> >        {
> > -	bin = bin_at (ar_ptr, i);
> > -	r = bin->fd;
> > -	sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
> > -	sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
> > -	  = sizes[NFASTBINS - 1 + i].count = 0;
> > -
> > -	if (r != NULL)
> > -	  while (r != bin)
> > -	    {
> > -	      ++sizes[NFASTBINS - 1 + i].count;
> > -	      sizes[NFASTBINS - 1 + i].total += r->size;
> > -	      sizes[NFASTBINS - 1 + i].from
> > -		= MIN (sizes[NFASTBINS - 1 + i].from, r->size);
> > -	      sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
> > -						 r->size);
> > -
> > -	      r = r->fd;
> > -	    }
> > -
> > -	if (sizes[NFASTBINS - 1 + i].count == 0)
> > -	  sizes[NFASTBINS - 1 + i].from = 0;
> > -	nblocks += sizes[NFASTBINS - 1 + i].count;
> > -	avail += sizes[NFASTBINS - 1 + i].total;
> > +        bin = bin_at (ar_ptr, i);
> > +        r = bin->fd;
> > +        sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
> > +        sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
> > +                                        = sizes[NFASTBINS - 1 + i].count = 0;
> > +
> > +        if (r != NULL)
> > +          while (r != bin)
> > +            {
> > +              ++sizes[NFASTBINS - 1 + i].count;
> > +              sizes[NFASTBINS - 1 + i].total += r->size;
> > +              sizes[NFASTBINS - 1 + i].from
> > +                = MIN (sizes[NFASTBINS - 1 + i].from, r->size);
> > +              sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
> > +                                                 r->size);
> > +
> > +              r = r->fd;
> > +            }
> > +
> > +        if (sizes[NFASTBINS - 1 + i].count == 0)
> > +          sizes[NFASTBINS - 1 + i].from = 0;
> > +        nblocks += sizes[NFASTBINS - 1 + i].count;
> > +        avail += sizes[NFASTBINS - 1 + i].total;
> >        }
> >  
> >      mutex_unlock (&ar_ptr->mutex);
> > @@ -4978,51 +5148,51 @@ malloc_info (int options, FILE *fp)
> >  
> >      for (size_t i = 0; i < nsizes; ++i)
> >        if (sizes[i].count != 0 && i != NFASTBINS)
> > -	fprintf (fp, "\
> > +        fprintf (fp, "							      \
> >  <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
> > -		 sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
> > +                 sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
> >  
> >      if (sizes[NFASTBINS].count != 0)
> >        fprintf (fp, "\
> >  <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
> > -	       sizes[NFASTBINS].from, sizes[NFASTBINS].to,
> > -	       sizes[NFASTBINS].total, sizes[NFASTBINS].count);
> > +               sizes[NFASTBINS].from, sizes[NFASTBINS].to,
> > +               sizes[NFASTBINS].total, sizes[NFASTBINS].count);
> >  
> >      total_system += ar_ptr->system_mem;
> >      total_max_system += ar_ptr->max_system_mem;
> >  
> >      fprintf (fp,
> > -	     "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
> > -	     "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
> > -	     "<system type=\"current\" size=\"%zu\"/>\n"
> > -	     "<system type=\"max\" size=\"%zu\"/>\n",
> > -	     nfastblocks, fastavail, nblocks, avail,
> > -	     ar_ptr->system_mem, ar_ptr->max_system_mem);
> > +             "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
> > +             "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
> > +             "<system type=\"current\" size=\"%zu\"/>\n"
> > +             "<system type=\"max\" size=\"%zu\"/>\n",
> > +             nfastblocks, fastavail, nblocks, avail,
> > +             ar_ptr->system_mem, ar_ptr->max_system_mem);
> >  
> >      if (ar_ptr != &main_arena)
> >        {
> > -	heap_info *heap = heap_for_ptr(top(ar_ptr));
> > -	fprintf (fp,
> > -		 "<aspace type=\"total\" size=\"%zu\"/>\n"
> > -		 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
> > -		 heap->size, heap->mprotect_size);
> > -	total_aspace += heap->size;
> > -	total_aspace_mprotect += heap->mprotect_size;
> > +        heap_info *heap = heap_for_ptr (top (ar_ptr));
> > +        fprintf (fp,
> > +                 "<aspace type=\"total\" size=\"%zu\"/>\n"
> > +                 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
> > +                 heap->size, heap->mprotect_size);
> > +        total_aspace += heap->size;
> > +        total_aspace_mprotect += heap->mprotect_size;
> >        }
> >      else
> >        {
> > -	fprintf (fp,
> > -		 "<aspace type=\"total\" size=\"%zu\"/>\n"
> > -		 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
> > -		 ar_ptr->system_mem, ar_ptr->system_mem);
> > -	total_aspace += ar_ptr->system_mem;
> > -	total_aspace_mprotect += ar_ptr->system_mem;
> > +        fprintf (fp,
> > +                 "<aspace type=\"total\" size=\"%zu\"/>\n"
> > +                 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
> > +                 ar_ptr->system_mem, ar_ptr->system_mem);
> > +        total_aspace += ar_ptr->system_mem;
> > +        total_aspace_mprotect += ar_ptr->system_mem;
> >        }
> >  
> >      fputs ("</heap>\n", fp);
> >    }
> >  
> > -  if(__malloc_initialized < 0)
> > +  if (__malloc_initialized < 0)
> >      ptmalloc_init ();
> >  
> >    fputs ("<malloc version=\"1\">\n", fp);
> > @@ -5037,16 +5207,16 @@ malloc_info (int options, FILE *fp)
> >    while (ar_ptr != &main_arena);
> >  
> >    fprintf (fp,
> > -	   "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
> > -	   "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
> > -	   "<system type=\"current\" size=\"%zu\"/>\n"
> > -	   "<system type=\"max\" size=\"%zu\"/>\n"
> > -	   "<aspace type=\"total\" size=\"%zu\"/>\n"
> > -	   "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
> > -	   "</malloc>\n",
> > -	   total_nfastblocks, total_fastavail, total_nblocks, total_avail,
> > -	   total_system, total_max_system,
> > -	   total_aspace, total_aspace_mprotect);
> > +           "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
> > +           "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
> > +           "<system type=\"current\" size=\"%zu\"/>\n"
> > +           "<system type=\"max\" size=\"%zu\"/>\n"
> > +           "<aspace type=\"total\" size=\"%zu\"/>\n"
> > +           "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
> > +           "</malloc>\n",
> > +           total_nfastblocks, total_fastavail, total_nblocks, total_avail,
> > +           total_system, total_max_system,
> > +           total_aspace, total_aspace_mprotect);
> >  
> >    return 0;
> >  }
> > @@ -5073,11 +5243,11 @@ weak_alias (__malloc_set_state, malloc_set_state)
> >  
> >  
> >  /* ------------------------------------------------------------
> > -History:
> > +   History:
> >  
> > -[see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
> > +   [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
> >  
> > -*/
> > + */
> >  /*
> >   * Local variables:
> >   * c-basic-offset: 2
> > diff --git a/malloc/malloc.h b/malloc/malloc.h
> > index b8b0ca3..22a9f3a 100644
> > --- a/malloc/malloc.h
> > +++ b/malloc/malloc.h
> > @@ -39,7 +39,7 @@ extern void *malloc (size_t __size) __THROW __attribute_malloc__ __wur;
> >  
> >  /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0.  */
> >  extern void *calloc (size_t __nmemb, size_t __size)
> > -     __THROW __attribute_malloc__ __wur;
> > +__THROW __attribute_malloc__ __wur;
> >  
> >  /* Re-allocate the previously allocated block in __ptr, making the new
> >     block SIZE bytes long.  */
> > @@ -47,7 +47,7 @@ extern void *calloc (size_t __nmemb, size_t __size)
> >     the same pointer that was passed to it, aliasing needs to be allowed
> >     between objects pointed by the old and new pointers.  */
> >  extern void *realloc (void *__ptr, size_t __size)
> > -     __THROW __attribute_warn_unused_result__;
> > +__THROW __attribute_warn_unused_result__;
> >  
> >  /* Free a block allocated by `malloc', `realloc' or `calloc'.  */
> >  extern void free (void *__ptr) __THROW;
> > @@ -57,14 +57,14 @@ extern void cfree (void *__ptr) __THROW;
> >  
> >  /* Allocate SIZE bytes allocated to ALIGNMENT bytes.  */
> >  extern void *memalign (size_t __alignment, size_t __size)
> > -     __THROW __attribute_malloc__ __wur;
> > +__THROW __attribute_malloc__ __wur;
> >  
> >  /* Allocate SIZE bytes on a page boundary.  */
> >  extern void *valloc (size_t __size) __THROW __attribute_malloc__ __wur;
> >  
> >  /* Equivalent to valloc(minimum-page-that-holds(n)), that is, round up
> >     __size to nearest pagesize. */
> > -extern void * pvalloc (size_t __size) __THROW __attribute_malloc__ __wur;
> > +extern void *pvalloc (size_t __size) __THROW __attribute_malloc__ __wur;
> >  
> >  /* Underlying allocation function; successive calls should return
> >     contiguous pieces of memory.  */
> > @@ -72,7 +72,7 @@ extern void *(*__morecore) (ptrdiff_t __size);
> >  
> >  /* Default value of `__morecore'.  */
> >  extern void *__default_morecore (ptrdiff_t __size)
> > -     __THROW __attribute_malloc__;
> > +__THROW __attribute_malloc__;
> >  
> >  /* SVID2/XPG mallinfo structure */
> >  
> > @@ -95,16 +95,16 @@ extern struct mallinfo mallinfo (void) __THROW;
> >  
> >  /* SVID2/XPG mallopt options */
> >  #ifndef M_MXFAST
> > -# define M_MXFAST  1	/* maximum request size for "fastbins" */
> > +# define M_MXFAST  1    /* maximum request size for "fastbins" */
> >  #endif
> >  #ifndef M_NLBLKS
> > -# define M_NLBLKS  2	/* UNUSED in this malloc */
> > +# define M_NLBLKS  2    /* UNUSED in this malloc */
> >  #endif
> >  #ifndef M_GRAIN
> > -# define M_GRAIN   3	/* UNUSED in this malloc */
> > +# define M_GRAIN   3    /* UNUSED in this malloc */
> >  #endif
> >  #ifndef M_KEEP
> > -# define M_KEEP    4	/* UNUSED in this malloc */
> > +# define M_KEEP    4    /* UNUSED in this malloc */
> >  #endif
> >  
> >  /* mallopt options that actually do something */
> > @@ -113,9 +113,9 @@ extern struct mallinfo mallinfo (void) __THROW;
> >  #define M_MMAP_THRESHOLD    -3
> >  #define M_MMAP_MAX          -4
> >  #define M_CHECK_ACTION      -5
> > -#define M_PERTURB	    -6
> > -#define M_ARENA_TEST	    -7
> > -#define M_ARENA_MAX	    -8
> > +#define M_PERTURB           -6
> > +#define M_ARENA_TEST        -7
> > +#define M_ARENA_MAX         -8
> >  
> >  /* General SVID/XPG interface to tunable parameters. */
> >  extern int mallopt (int __param, int __val) __THROW;
> > @@ -145,22 +145,22 @@ extern int malloc_set_state (void *__ptr) __THROW;
> >     the application provides the preferred way to set up the hook
> >     pointers. */
> >  extern void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void)
> > -     __MALLOC_DEPRECATED;
> > +__MALLOC_DEPRECATED;
> >  /* Hooks for debugging and user-defined versions. */
> >  extern void (*__MALLOC_HOOK_VOLATILE __free_hook) (void *__ptr,
> > -						   const void *)
> > -     __MALLOC_DEPRECATED;
> > -extern void *(*__MALLOC_HOOK_VOLATILE __malloc_hook) (size_t __size,
> > -						      const void *)
> > -     __MALLOC_DEPRECATED;
> > -extern void *(*__MALLOC_HOOK_VOLATILE __realloc_hook) (void *__ptr,
> > -						       size_t __size,
> > -						       const void *)
> > -     __MALLOC_DEPRECATED;
> > -extern void *(*__MALLOC_HOOK_VOLATILE __memalign_hook) (size_t __alignment,
> > -							size_t __size,
> > -							const void *)
> > -     __MALLOC_DEPRECATED;
> > +                                                   const void *)
> > +__MALLOC_DEPRECATED;
> > +extern void *(*__MALLOC_HOOK_VOLATILE __malloc_hook)(size_t __size,
> > +                                                     const void *)
> > +__MALLOC_DEPRECATED;
> > +extern void *(*__MALLOC_HOOK_VOLATILE __realloc_hook)(void *__ptr,
> > +                                                      size_t __size,
> > +                                                      const void *)
> > +__MALLOC_DEPRECATED;
> > +extern void *(*__MALLOC_HOOK_VOLATILE __memalign_hook)(size_t __alignment,
> > +                                                       size_t __size,
> > +                                                       const void *)
> > +__MALLOC_DEPRECATED;
> >  extern void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
> >  
> >  /* Activate a standard set of debugging hooks. */
> > @@ -168,5 +168,4 @@ extern void __malloc_check_init (void) __THROW __MALLOC_DEPRECATED;
> >  
> >  
> >  __END_DECLS
> > -
> >  #endif /* malloc.h */
> > diff --git a/malloc/mallocbug.c b/malloc/mallocbug.c
> > index fc607ae..7d19b6f 100644
> > --- a/malloc/mallocbug.c
> > +++ b/malloc/mallocbug.c
> > @@ -22,14 +22,14 @@ main (int argc, char *argv[])
> >    size_t i;
> >  
> >    /* Here's what memory is supposed to look like (hex):
> > -	size  contents
> > -	3000  original_info_table, later fill_info_table1
> > +        size  contents
> > +        3000  original_info_table, later fill_info_table1
> >        3fa000  dummy0
> >        3fa000  dummy1
> > -	6000  info_table_2
> > -	3000  over_top
> > +        6000  info_table_2
> > +        3000  over_top
> >  
> > -	*/
> > +   */
> >    /* mem: original_info_table */
> >    dummy0 = malloc (0x3fa000);
> >    /* mem: original_info_table, dummy0 */
> > @@ -54,15 +54,15 @@ main (int argc, char *argv[])
> >    for (i = 0; i < over_top_size; ++i)
> >      if (over_top[i] != 0)
> >        {
> > -	printf ("FAIL: malloc expands info table\n");
> > -	return 0;
> > +        printf ("FAIL: malloc expands info table\n");
> > +        return 0;
> >        }
> >  
> >    for (i = 0; i < over_top_dup_size; ++i)
> >      if (over_top_dup[i] != 1)
> >        {
> > -	printf ("FAIL: malloc expands info table\n");
> > -	return 0;
> > +        printf ("FAIL: malloc expands info table\n");
> > +        return 0;
> >        }
> >  
> >    printf ("PASS: malloc expands info table\n");
> > diff --git a/malloc/mcheck.c b/malloc/mcheck.c
> > index 2e5eadd..8be0145 100644
> > --- a/malloc/mcheck.c
> > +++ b/malloc/mcheck.c
> > @@ -17,7 +17,7 @@
> >     License along with the GNU C Library; if not, see
> >     <http://www.gnu.org/licenses/>.  */
> >  
> > -#ifndef	_MALLOC_INTERNAL
> > +#ifndef _MALLOC_INTERNAL
> >  # define _MALLOC_INTERNAL
> >  # include <malloc.h>
> >  # include <mcheck.h>
> > @@ -28,32 +28,32 @@
> >  #endif
> >  
> >  /* Old hook values.  */
> > -static void (*old_free_hook) (__ptr_t ptr, const __ptr_t);
> > +static void (*old_free_hook)(__ptr_t ptr, const __ptr_t);
> >  static __ptr_t (*old_malloc_hook) (size_t size, const __ptr_t);
> >  static __ptr_t (*old_memalign_hook) (size_t alignment, size_t size,
> > -				     const __ptr_t);
> > +                                     const __ptr_t);
> >  static __ptr_t (*old_realloc_hook) (__ptr_t ptr, size_t size,
> > -				    const __ptr_t);
> > +                                    const __ptr_t);
> >  
> >  /* Function to call when something awful happens.  */
> >  static void (*abortfunc) (enum mcheck_status);
> >  
> >  /* Arbitrary magical numbers.  */
> > -#define MAGICWORD	0xfedabeeb
> > -#define MAGICFREE	0xd8675309
> > -#define MAGICBYTE	((char) 0xd7)
> > -#define MALLOCFLOOD	((char) 0x93)
> > -#define FREEFLOOD	((char) 0x95)
> > +#define MAGICWORD       0xfedabeeb
> > +#define MAGICFREE       0xd8675309
> > +#define MAGICBYTE       ((char) 0xd7)
> > +#define MALLOCFLOOD     ((char) 0x93)
> > +#define FREEFLOOD       ((char) 0x95)
> >  
> >  struct hdr
> > -  {
> > -    size_t size;		/* Exact size requested by user.  */
> > -    unsigned long int magic;	/* Magic number to check header integrity.  */
> > -    struct hdr *prev;
> > -    struct hdr *next;
> > -    __ptr_t block;		/* Real block allocated, for memalign.  */
> > -    unsigned long int magic2;	/* Extra, keeps us doubleword aligned.  */
> > -  };
> > +{
> > +  size_t size;                  /* Exact size requested by user.  */
> > +  unsigned long int magic;      /* Magic number to check header integrity.  */
> > +  struct hdr *prev;
> > +  struct hdr *next;
> > +  __ptr_t block;                /* Real block allocated, for memalign.  */
> > +  unsigned long int magic2;     /* Extra, keeps us doubleword aligned.  */
> > +};
> >  
> >  /* This is the beginning of the list of all memory blocks allocated.
> >     It is only constructed if the pedantic testing is requested.  */
> > @@ -69,11 +69,10 @@ static int pedantic;
> >  # define flood memset
> >  #else
> >  static void flood (__ptr_t, int, size_t);
> > -static void
> > -flood (ptr, val, size)
> > -     __ptr_t ptr;
> > -     int val;
> > -     size_t size;
> > +static void flood (ptr, val, size)
> > +__ptr_t ptr;
> > +int val;
> > +size_t size;
> >  {
> >    char *cp = ptr;
> >    while (size--)
> > @@ -101,11 +100,11 @@ checkhdr (const struct hdr *hdr)
> >        break;
> >      case MAGICWORD:
> >        if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
> > -	status = MCHECK_TAIL;
> > +        status = MCHECK_TAIL;
> >        else if ((hdr->magic2 ^ (uintptr_t) hdr->block) != MAGICWORD)
> > -	status = MCHECK_HEAD;
> > +        status = MCHECK_HEAD;
> >        else
> > -	status = MCHECK_OK;
> > +        status = MCHECK_OK;
> >        break;
> >      }
> >    if (status != MCHECK_OK)
> > @@ -148,13 +147,13 @@ unlink_blk (struct hdr *ptr)
> >      {
> >        ptr->next->prev = ptr->prev;
> >        ptr->next->magic = MAGICWORD ^ ((uintptr_t) ptr->next->prev
> > -				      + (uintptr_t) ptr->next->next);
> > +                                      + (uintptr_t) ptr->next->next);
> >      }
> >    if (ptr->prev != NULL)
> >      {
> >        ptr->prev->next = ptr->next;
> >        ptr->prev->magic = MAGICWORD ^ ((uintptr_t) ptr->prev->prev
> > -				      + (uintptr_t) ptr->prev->next);
> > +                                      + (uintptr_t) ptr->prev->next);
> >      }
> >    else
> >      root = ptr->next;
> > @@ -173,7 +172,7 @@ link_blk (struct hdr *hdr)
> >      {
> >        hdr->next->prev = hdr;
> >        hdr->next->magic = MAGICWORD ^ ((uintptr_t) hdr
> > -				      + (uintptr_t) hdr->next->next);
> > +                                      + (uintptr_t) hdr->next->next);
> >      }
> >  }
> >  static void
> > @@ -194,7 +193,7 @@ freehook (__ptr_t ptr, const __ptr_t caller)
> >      }
> >    __free_hook = old_free_hook;
> >    if (old_free_hook != NULL)
> > -    (*old_free_hook) (ptr, caller);
> > +    (*old_free_hook)(ptr, caller);
> >    else
> >      free (ptr);
> >    __free_hook = freehook;
> > @@ -216,8 +215,8 @@ mallochook (size_t size, const __ptr_t caller)
> >  
> >    __malloc_hook = old_malloc_hook;
> >    if (old_malloc_hook != NULL)
> > -    hdr = (struct hdr *) (*old_malloc_hook) (sizeof (struct hdr) + size + 1,
> > -					     caller);
> > +    hdr = (struct hdr *) (*old_malloc_hook)(sizeof (struct hdr) + size + 1,
> > +                                            caller);
> >    else
> >      hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
> >    __malloc_hook = mallochook;
> > @@ -235,7 +234,7 @@ mallochook (size_t size, const __ptr_t caller)
> >  
> >  static __ptr_t
> >  memalignhook (size_t alignment, size_t size,
> > -	      const __ptr_t caller)
> > +              const __ptr_t caller)
> >  {
> >    struct hdr *hdr;
> >    size_t slop;
> > @@ -244,7 +243,7 @@ memalignhook (size_t alignment, size_t size,
> >    if (pedantic)
> >      mcheck_check_all ();
> >  
> > -  slop = (sizeof *hdr + alignment - 1) & -alignment;
> > +  slop = (sizeof *hdr + alignment - 1) & - alignment;
> >  
> >    if (size > ~((size_t) 0) - (slop + 1))
> >      {
> > @@ -254,7 +253,7 @@ memalignhook (size_t alignment, size_t size,
> >  
> >    __memalign_hook = old_memalign_hook;
> >    if (old_memalign_hook != NULL)
> > -    block = (*old_memalign_hook) (alignment, slop + size + 1, caller);
> > +    block = (*old_memalign_hook)(alignment, slop + size + 1, caller);
> >    else
> >      block = memalign (alignment, slop + size + 1);
> >    __memalign_hook = memalignhook;
> > @@ -301,7 +300,7 @@ reallochook (__ptr_t ptr, size_t size, const __ptr_t caller)
> >        checkhdr (hdr);
> >        unlink_blk (hdr);
> >        if (size < osize)
> > -	flood ((char *) ptr + size, FREEFLOOD, osize - size);
> > +        flood ((char *) ptr + size, FREEFLOOD, osize - size);
> >      }
> >    else
> >      {
> > @@ -313,12 +312,12 @@ reallochook (__ptr_t ptr, size_t size, const __ptr_t caller)
> >    __memalign_hook = old_memalign_hook;
> >    __realloc_hook = old_realloc_hook;
> >    if (old_realloc_hook != NULL)
> > -    hdr = (struct hdr *) (*old_realloc_hook) ((__ptr_t) hdr,
> > -					      sizeof (struct hdr) + size + 1,
> > -					      caller);
> > +    hdr = (struct hdr *) (*old_realloc_hook)((__ptr_t) hdr,
> > +                                             sizeof (struct hdr) + size + 1,
> > +                                             caller);
> >    else
> >      hdr = (struct hdr *) realloc ((__ptr_t) hdr,
> > -				  sizeof (struct hdr) + size + 1);
> > +                                  sizeof (struct hdr) + size + 1);
> >    __free_hook = freehook;
> >    __malloc_hook = mallochook;
> >    __memalign_hook = memalignhook;
> > @@ -344,19 +343,19 @@ mabort (enum mcheck_status status)
> >    switch (status)
> >      {
> >      case MCHECK_OK:
> > -      msg = _("memory is consistent, library is buggy\n");
> > +      msg = _ ("memory is consistent, library is buggy\n");
> >        break;
> >      case MCHECK_HEAD:
> > -      msg = _("memory clobbered before allocated block\n");
> > +      msg = _ ("memory clobbered before allocated block\n");
> >        break;
> >      case MCHECK_TAIL:
> > -      msg = _("memory clobbered past end of allocated block\n");
> > +      msg = _ ("memory clobbered past end of allocated block\n");
> >        break;
> >      case MCHECK_FREE:
> > -      msg = _("block freed twice\n");
> > +      msg = _ ("block freed twice\n");
> >        break;
> >      default:
> > -      msg = _("bogus mcheck_status, library is buggy\n");
> > +      msg = _ ("bogus mcheck_status, library is buggy\n");
> >        break;
> >      }
> >  #ifdef _LIBC
> > @@ -370,11 +369,10 @@ mabort (enum mcheck_status status)
> >  
> >  /* Memory barrier so that GCC does not optimize out the argument.  */
> >  #define malloc_opt_barrier(x) \
> > -({ __typeof (x) __x = x; __asm ("" : "+m" (__x)); __x; })
> > +  ({ __typeof (x) __x = x; __asm ("" : "+m" (__x)); __x; })
> >  
> > -int
> > -mcheck (func)
> > -     void (*func) (enum mcheck_status);
> > +int mcheck (func)
> > +void (*func)(enum mcheck_status);
> >  {
> >    abortfunc = (func != NULL) ? func : &mabort;
> >  
> > @@ -404,9 +402,8 @@ mcheck (func)
> >  libc_hidden_def (mcheck)
> >  #endif
> >  
> > -int
> > -mcheck_pedantic (func)
> > -      void (*func) (enum mcheck_status);
> > +int mcheck_pedantic (func)
> > +void (*func)(enum mcheck_status);
> >  {
> >    int res = mcheck (func);
> >    if (res == 0)
> > diff --git a/malloc/mcheck.h b/malloc/mcheck.h
> > index 204ca33..53b9405 100644
> > --- a/malloc/mcheck.h
> > +++ b/malloc/mcheck.h
> > @@ -16,7 +16,7 @@
> >     <http://www.gnu.org/licenses/>.  */
> >  
> >  #ifndef _MCHECK_H
> > -#define _MCHECK_H	1
> > +#define _MCHECK_H       1
> >  
> >  #include <features.h>
> >  
> > @@ -25,24 +25,24 @@ __BEGIN_DECLS
> >  /* Return values for `mprobe': these are the kinds of inconsistencies that
> >     `mcheck' enables detection of.  */
> >  enum mcheck_status
> > -  {
> > -    MCHECK_DISABLED = -1,       /* Consistency checking is not turned on.  */
> > -    MCHECK_OK,                  /* Block is fine.  */
> > -    MCHECK_FREE,                /* Block freed twice.  */
> > -    MCHECK_HEAD,                /* Memory before the block was clobbered.  */
> > -    MCHECK_TAIL                 /* Memory after the block was clobbered.  */
> > -  };
> > +{
> > +  MCHECK_DISABLED = -1,         /* Consistency checking is not turned on.  */
> > +  MCHECK_OK,                    /* Block is fine.  */
> > +  MCHECK_FREE,                  /* Block freed twice.  */
> > +  MCHECK_HEAD,                  /* Memory before the block was clobbered.  */
> > +  MCHECK_TAIL                   /* Memory after the block was clobbered.  */
> > +};
> >  
> >  
> >  /* Activate a standard collection of debugging hooks.  This must be called
> >     before `malloc' is ever called.  ABORTFUNC is called with an error code
> >     (see enum above) when an inconsistency is detected.  If ABORTFUNC is
> >     null, the standard function prints on stderr and then calls `abort'.  */
> > -extern int mcheck (void (*__abortfunc) (enum mcheck_status)) __THROW;
> > +extern int mcheck (void (*__abortfunc)(enum mcheck_status)) __THROW;
> >  
> >  /* Similar to `mcheck' but performs checks for all block whenever one of
> >     the memory handling functions is called.  This can be very slow.  */
> > -extern int mcheck_pedantic (void (*__abortfunc) (enum mcheck_status)) __THROW;
> > +extern int mcheck_pedantic (void (*__abortfunc)(enum mcheck_status)) __THROW;
> >  
> >  /* Force check of all blocks now.  */
> >  extern void mcheck_check_all (void);
> > @@ -57,5 +57,4 @@ extern void mtrace (void) __THROW;
> >  extern void muntrace (void) __THROW;
> >  
> >  __END_DECLS
> > -
> >  #endif /* mcheck.h */
> > diff --git a/malloc/memusage.c b/malloc/memusage.c
> > index e32f6ba..2aaa150 100644
> > --- a/malloc/memusage.c
> > +++ b/malloc/memusage.c
> > @@ -38,7 +38,7 @@
> >  
> >  /* Pointer to the real functions.  These are determined used `dlsym'
> >     when really needed.  */
> > -static void *(*mallocp) (size_t);
> > +static void *(*mallocp)(size_t);
> >  static void *(*reallocp) (void *, size_t);
> >  static void *(*callocp) (size_t, size_t);
> >  static void (*freep) (void *);
> > @@ -89,11 +89,11 @@ static memusage_size_t peak_use[3];
> >  static __thread uintptr_t start_sp;
> >  
> >  /* A few macros to make the source more readable.  */
> > -#define peak_heap	peak_use[0]
> > -#define peak_stack	peak_use[1]
> > -#define peak_total	peak_use[2]
> > +#define peak_heap       peak_use[0]
> > +#define peak_stack      peak_use[1]
> > +#define peak_total      peak_use[2]
> >  
> > -#define DEFAULT_BUFFER_SIZE	32768
> > +#define DEFAULT_BUFFER_SIZE     32768
> >  static size_t buffer_size;
> >  
> >  static int fd = -1;
> > @@ -164,16 +164,16 @@ update_data (struct header *result, size_t len, size_t old_len)
> >      {
> >        uatomic32_t idx = catomic_exchange_and_add (&buffer_cnt, 1);
> >        if (idx + 1 >= 2 * buffer_size)
> > -	{
> > -	  /* We try to reset the counter to the correct range.  If
> > -	     this fails because of another thread increasing the
> > -	     counter it does not matter since that thread will take
> > -	     care of the correction.  */
> > -	  uatomic32_t reset = (idx + 1) % (2 * buffer_size);
> > -	  catomic_compare_and_exchange_val_acq (&buffer_cnt, reset, idx + 1);
> > -	  if (idx >= 2 * buffer_size)
> > -	    idx = reset - 1;
> > -	}
> > +        {
> > +          /* We try to reset the counter to the correct range.  If
> > +             this fails because of another thread increasing the
> > +             counter it does not matter since that thread will take
> > +             care of the correction.  */
> > +          uatomic32_t reset = (idx + 1) % (2 * buffer_size);
> > +          catomic_compare_and_exchange_val_acq (&buffer_cnt, reset, idx + 1);
> > +          if (idx >= 2 * buffer_size)
> > +            idx = reset - 1;
> > +        }
> >        assert (idx < 2 * DEFAULT_BUFFER_SIZE);
> >  
> >        buffer[idx].heap = current_heap;
> > @@ -182,9 +182,9 @@ update_data (struct header *result, size_t len, size_t old_len)
> >  
> >        /* Write out buffer if it is full.  */
> >        if (idx + 1 == buffer_size)
> > -	write (fd, buffer, buffer_size * sizeof (struct entry));
> > +        write (fd, buffer, buffer_size * sizeof (struct entry));
> >        else if (idx + 1 == 2 * buffer_size)
> > -	write (fd, &buffer[buffer_size], buffer_size * sizeof (struct entry));
> > +        write (fd, &buffer[buffer_size], buffer_size * sizeof (struct entry));
> >      }
> >  }
> >  
> > @@ -221,19 +221,19 @@ me (void)
> >    size_t prog_len = strlen (__progname);
> >  
> >    initialized = -1;
> > -  mallocp = (void *(*) (size_t)) dlsym (RTLD_NEXT, "malloc");
> > -  reallocp = (void *(*) (void *, size_t)) dlsym (RTLD_NEXT, "realloc");
> > -  callocp = (void *(*) (size_t, size_t)) dlsym (RTLD_NEXT, "calloc");
> > -  freep = (void (*) (void *)) dlsym (RTLD_NEXT, "free");
> > +  mallocp = (void *(*)(size_t))dlsym (RTLD_NEXT, "malloc");
> > +  reallocp = (void *(*)(void *, size_t))dlsym (RTLD_NEXT, "realloc");
> > +  callocp = (void *(*)(size_t, size_t))dlsym (RTLD_NEXT, "calloc");
> > +  freep = (void (*)(void *))dlsym (RTLD_NEXT, "free");
> >  
> > -  mmapp = (void *(*) (void *, size_t, int, int, int, off_t)) dlsym (RTLD_NEXT,
> > -								    "mmap");
> > +  mmapp = (void *(*)(void *, size_t, int, int, int, off_t))dlsym (RTLD_NEXT,
> > +                                                                  "mmap");
> >    mmap64p =
> > -    (void *(*) (void *, size_t, int, int, int, off64_t)) dlsym (RTLD_NEXT,
> > -								"mmap64");
> > -  mremapp = (void *(*) (void *, size_t, size_t, int, void *)) dlsym (RTLD_NEXT,
> > -								     "mremap");
> > -  munmapp = (int (*) (void *, size_t)) dlsym (RTLD_NEXT, "munmap");
> > +    (void *(*)(void *, size_t, int, int, int, off64_t))dlsym (RTLD_NEXT,
> > +                                                              "mmap64");
> > +  mremapp = (void *(*)(void *, size_t, size_t, int, void *))dlsym (RTLD_NEXT,
> > +                                                                   "mremap");
> > +  munmapp = (int (*)(void *, size_t))dlsym (RTLD_NEXT, "munmap");
> >    initialized = 1;
> >  
> >    if (env != NULL)
> > @@ -241,8 +241,8 @@ me (void)
> >        /* Check for program name.  */
> >        size_t len = strlen (env);
> >        if (len > prog_len || strcmp (env, &__progname[prog_len - len]) != 0
> > -	  || (prog_len != len && __progname[prog_len - len - 1] != '/'))
> > -	not_me = true;
> > +          || (prog_len != len && __progname[prog_len - len - 1] != '/'))
> > +        not_me = true;
> >      }
> >  
> >    /* Only open the file if it's really us.  */
> > @@ -251,62 +251,62 @@ me (void)
> >        const char *outname;
> >  
> >        if (!start_sp)
> > -	start_sp = GETSP ();
> > +        start_sp = GETSP ();
> >  
> >        outname = getenv ("MEMUSAGE_OUTPUT");
> >        if (outname != NULL && outname[0] != '\0'
> > -	  && (access (outname, R_OK | W_OK) == 0 || errno == ENOENT))
> > -	{
> > -	  fd = creat64 (outname, 0666);
> > -
> > -	  if (fd == -1)
> > -	    /* Don't do anything in future calls if we cannot write to
> > -	       the output file.  */
> > -	    not_me = true;
> > -	  else
> > -	    {
> > -	      /* Write the first entry.  */
> > -	      first.heap = 0;
> > -	      first.stack = 0;
> > -	      GETTIME (first.time_low, first.time_high);
> > -	      /* Write it two times since we need the starting and end time. */
> > -	      write (fd, &first, sizeof (first));
> > -	      write (fd, &first, sizeof (first));
> > -
> > -	      /* Determine the buffer size.  We use the default if the
> > -		 environment variable is not present.  */
> > -	      buffer_size = DEFAULT_BUFFER_SIZE;
> > -	      if (getenv ("MEMUSAGE_BUFFER_SIZE") != NULL)
> > -		{
> > -		  buffer_size = atoi (getenv ("MEMUSAGE_BUFFER_SIZE"));
> > -		  if (buffer_size == 0 || buffer_size > DEFAULT_BUFFER_SIZE)
> > -		    buffer_size = DEFAULT_BUFFER_SIZE;
> > -		}
> > -
> > -	      /* Possibly enable timer-based stack pointer retrieval.  */
> > -	      if (getenv ("MEMUSAGE_NO_TIMER") == NULL)
> > -		{
> > -		  struct sigaction act;
> > -
> > -		  act.sa_handler = (sighandler_t) &int_handler;
> > -		  act.sa_flags = SA_RESTART;
> > -		  sigfillset (&act.sa_mask);
> > -
> > -		  if (sigaction (SIGPROF, &act, NULL) >= 0)
> > -		    {
> > -		      struct itimerval timer;
> > -
> > -		      timer.it_value.tv_sec = 0;
> > -		      timer.it_value.tv_usec = 1;
> > -		      timer.it_interval = timer.it_value;
> > -		      setitimer (ITIMER_PROF, &timer, NULL);
> > -		    }
> > -		}
> > -	    }
> > -	}
> > +          && (access (outname, R_OK | W_OK) == 0 || errno == ENOENT))
> > +        {
> > +          fd = creat64 (outname, 0666);
> > +
> > +          if (fd == -1)
> > +            /* Don't do anything in future calls if we cannot write to
> > +               the output file.  */
> > +            not_me = true;
> > +          else
> > +            {
> > +              /* Write the first entry.  */
> > +              first.heap = 0;
> > +              first.stack = 0;
> > +              GETTIME (first.time_low, first.time_high);
> > +              /* Write it two times since we need the starting and end time. */
> > +              write (fd, &first, sizeof (first));
> > +              write (fd, &first, sizeof (first));
> > +
> > +              /* Determine the buffer size.  We use the default if the
> > +                 environment variable is not present.  */
> > +              buffer_size = DEFAULT_BUFFER_SIZE;
> > +              if (getenv ("MEMUSAGE_BUFFER_SIZE") != NULL)
> > +                {
> > +                  buffer_size = atoi (getenv ("MEMUSAGE_BUFFER_SIZE"));
> > +                  if (buffer_size == 0 || buffer_size > DEFAULT_BUFFER_SIZE)
> > +                    buffer_size = DEFAULT_BUFFER_SIZE;
> > +                }
> > +
> > +              /* Possibly enable timer-based stack pointer retrieval.  */
> > +              if (getenv ("MEMUSAGE_NO_TIMER") == NULL)
> > +                {
> > +                  struct sigaction act;
> > +
> > +                  act.sa_handler = (sighandler_t) &int_handler;
> > +                  act.sa_flags = SA_RESTART;
> > +                  sigfillset (&act.sa_mask);
> > +
> > +                  if (sigaction (SIGPROF, &act, NULL) >= 0)
> > +                    {
> > +                      struct itimerval timer;
> > +
> > +                      timer.it_value.tv_sec = 0;
> > +                      timer.it_value.tv_usec = 1;
> > +                      timer.it_interval = timer.it_value;
> > +                      setitimer (ITIMER_PROF, &timer, NULL);
> > +                    }
> > +                }
> > +            }
> > +        }
> >  
> >        if (!not_me && getenv ("MEMUSAGE_TRACE_MMAP") != NULL)
> > -	trace_mmap = true;
> > +        trace_mmap = true;
> >      }
> >  }
> >  
> > @@ -317,7 +317,7 @@ __attribute__ ((constructor))
> >  init (void)
> >  {
> >    start_sp = GETSP ();
> > -  if (! initialized)
> > +  if (!initialized)
> >      me ();
> >  }
> >  
> > @@ -333,13 +333,14 @@ malloc (size_t len)
> >    if (__builtin_expect (initialized <= 0, 0))
> >      {
> >        if (initialized == -1)
> > -	return NULL;
> > +        return NULL;
> > +
> >        me ();
> >      }
> >  
> >    /* If this is not the correct program just use the normal function.  */
> >    if (not_me)
> > -    return (*mallocp) (len);
> > +    return (*mallocp)(len);
> >  
> >    /* Keep track of number of calls.  */
> >    catomic_increment (&calls[idx_malloc]);
> > @@ -356,7 +357,7 @@ malloc (size_t len)
> >    catomic_increment (&calls_total);
> >  
> >    /* Do the real work.  */
> > -  result = (struct header *) (*mallocp) (len + sizeof (struct header));
> > +  result = (struct header *) (*mallocp)(len + sizeof (struct header));
> >    if (result == NULL)
> >      {
> >        catomic_increment (&failed[idx_malloc]);
> > @@ -384,13 +385,14 @@ realloc (void *old, size_t len)
> >    if (__builtin_expect (initialized <= 0, 0))
> >      {
> >        if (initialized == -1)
> > -	return NULL;
> > +        return NULL;
> > +
> >        me ();
> >      }
> >  
> >    /* If this is not the correct program just use the normal function.  */
> >    if (not_me)
> > -    return (*reallocp) (old, len);
> > +    return (*reallocp)(old, len);
> >  
> >    if (old == NULL)
> >      {
> > @@ -402,8 +404,9 @@ realloc (void *old, size_t len)
> >      {
> >        real = ((struct header *) old) - 1;
> >        if (real->magic != MAGIC)
> > -	/* This is no memory allocated here.  */
> > -	return (*reallocp) (old, len);
> > +        /* This is no memory allocated here.  */
> > +        return (*reallocp)(old, len);
> > +
> >        old_len = real->length;
> >      }
> >  
> > @@ -442,7 +445,7 @@ realloc (void *old, size_t len)
> >    catomic_increment (&calls_total);
> >  
> >    /* Do the real work.  */
> > -  result = (struct header *) (*reallocp) (real, len + sizeof (struct header));
> > +  result = (struct header *) (*reallocp)(real, len + sizeof (struct header));
> >    if (result == NULL)
> >      {
> >        catomic_increment (&failed[idx_realloc]);
> > @@ -476,13 +479,14 @@ calloc (size_t n, size_t len)
> >    if (__builtin_expect (initialized <= 0, 0))
> >      {
> >        if (initialized == -1)
> > -	return NULL;
> > +        return NULL;
> > +
> >        me ();
> >      }
> >  
> >    /* If this is not the correct program just use the normal function.  */
> >    if (not_me)
> > -    return (*callocp) (n, len);
> > +    return (*callocp)(n, len);
> >  
> >    /* Keep track of number of calls.  */
> >    catomic_increment (&calls[idx_calloc]);
> > @@ -499,7 +503,7 @@ calloc (size_t n, size_t len)
> >    ++calls_total;
> >  
> >    /* Do the real work.  */
> > -  result = (struct header *) (*mallocp) (size + sizeof (struct header));
> > +  result = (struct header *) (*mallocp)(size + sizeof (struct header));
> >    if (result == NULL)
> >      {
> >        catomic_increment (&failed[idx_calloc]);
> > @@ -525,7 +529,8 @@ free (void *ptr)
> >    if (__builtin_expect (initialized <= 0, 0))
> >      {
> >        if (initialized == -1)
> > -	return;
> > +        return;
> > +
> >        me ();
> >      }
> >  
> > @@ -576,17 +581,18 @@ mmap (void *start, size_t len, int prot, int flags, int fd, off_t offset)
> >    if (__builtin_expect (initialized <= 0, 0))
> >      {
> >        if (initialized == -1)
> > -	return NULL;
> > +        return NULL;
> > +
> >        me ();
> >      }
> >  
> >    /* Always get a block.  We don't need extra memory.  */
> > -  result = (*mmapp) (start, len, prot, flags, fd, offset);
> > +  result = (*mmapp)(start, len, prot, flags, fd, offset);
> >  
> >    if (!not_me && trace_mmap)
> >      {
> >        int idx = (flags & MAP_ANON
> > -		 ? idx_mmap_a : prot & PROT_WRITE ? idx_mmap_w : idx_mmap_r);
> > +                 ? idx_mmap_a : prot & PROT_WRITE ? idx_mmap_w : idx_mmap_r);
> >  
> >        /* Keep track of number of calls.  */
> >        catomic_increment (&calls[idx]);
> > @@ -596,20 +602,20 @@ mmap (void *start, size_t len, int prot, int flags, int fd, off_t offset)
> >        catomic_add (&grand_total, len);
> >        /* Remember the size of the request.  */
> >        if (len < 65536)
> > -	catomic_increment (&histogram[len / 16]);
> > +        catomic_increment (&histogram[len / 16]);
> >        else
> > -	catomic_increment (&large);
> > +        catomic_increment (&large);
> >        /* Total number of calls of any of the functions.  */
> >        catomic_increment (&calls_total);
> >  
> >        /* Check for failures.  */
> >        if (result == NULL)
> > -	catomic_increment (&failed[idx]);
> > +        catomic_increment (&failed[idx]);
> >        else if (idx == idx_mmap_w)
> > -	/* Update the allocation data and write out the records if
> > -	   necessary.  Note the first parameter is NULL which means
> > -	   the size is not tracked.  */
> > -	update_data (NULL, len, 0);
> > +        /* Update the allocation data and write out the records if
> > +           necessary.  Note the first parameter is NULL which means
> > +           the size is not tracked.  */
> > +        update_data (NULL, len, 0);
> >      }
> >  
> >    /* Return the pointer to the user buffer.  */
> > @@ -628,17 +634,18 @@ mmap64 (void *start, size_t len, int prot, int flags, int fd, off64_t offset)
> >    if (__builtin_expect (initialized <= 0, 0))
> >      {
> >        if (initialized == -1)
> > -	return NULL;
> > +        return NULL;
> > +
> >        me ();
> >      }
> >  
> >    /* Always get a block.  We don't need extra memory.  */
> > -  result = (*mmap64p) (start, len, prot, flags, fd, offset);
> > +  result = (*mmap64p)(start, len, prot, flags, fd, offset);
> >  
> >    if (!not_me && trace_mmap)
> >      {
> >        int idx = (flags & MAP_ANON
> > -		 ? idx_mmap_a : prot & PROT_WRITE ? idx_mmap_w : idx_mmap_r);
> > +                 ? idx_mmap_a : prot & PROT_WRITE ? idx_mmap_w : idx_mmap_r);
> >  
> >        /* Keep track of number of calls.  */
> >        catomic_increment (&calls[idx]);
> > @@ -648,20 +655,20 @@ mmap64 (void *start, size_t len, int prot, int flags, int fd, off64_t offset)
> >        catomic_add (&grand_total, len);
> >        /* Remember the size of the request.  */
> >        if (len < 65536)
> > -	catomic_increment (&histogram[len / 16]);
> > +        catomic_increment (&histogram[len / 16]);
> >        else
> > -	catomic_increment (&large);
> > +        catomic_increment (&large);
> >        /* Total number of calls of any of the functions.  */
> >        catomic_increment (&calls_total);
> >  
> >        /* Check for failures.  */
> >        if (result == NULL)
> > -	catomic_increment (&failed[idx]);
> > +        catomic_increment (&failed[idx]);
> >        else if (idx == idx_mmap_w)
> > -	/* Update the allocation data and write out the records if
> > -	   necessary.  Note the first parameter is NULL which means
> > -	   the size is not tracked.  */
> > -	update_data (NULL, len, 0);
> > +        /* Update the allocation data and write out the records if
> > +           necessary.  Note the first parameter is NULL which means
> > +           the size is not tracked.  */
> > +        update_data (NULL, len, 0);
> >      }
> >  
> >    /* Return the pointer to the user buffer.  */
> > @@ -672,7 +679,7 @@ mmap64 (void *start, size_t len, int prot, int flags, int fd, off64_t offset)
> >  /* `mremap' replacement.  We do not have to keep track of the size since
> >     `munmap' will get it as a parameter.  */
> >  void *
> > -mremap (void *start, size_t old_len, size_t len, int flags,  ...)
> > +mremap (void *start, size_t old_len, size_t len, int flags, ...)
> >  {
> >    void *result = NULL;
> >    va_list ap;
> > @@ -685,49 +692,50 @@ mremap (void *start, size_t old_len, size_t len, int flags,  ...)
> >    if (__builtin_expect (initialized <= 0, 0))
> >      {
> >        if (initialized == -1)
> > -	return NULL;
> > +        return NULL;
> > +
> >        me ();
> >      }
> >  
> >    /* Always get a block.  We don't need extra memory.  */
> > -  result = (*mremapp) (start, old_len, len, flags, newaddr);
> > +  result = (*mremapp)(start, old_len, len, flags, newaddr);
> >  
> >    if (!not_me && trace_mmap)
> >      {
> >        /* Keep track of number of calls.  */
> >        catomic_increment (&calls[idx_mremap]);
> >        if (len > old_len)
> > -	{
> > -	  /* Keep track of total memory consumption for `malloc'.  */
> > -	  catomic_add (&total[idx_mremap], len - old_len);
> > -	  /* Keep track of total memory requirement.  */
> > -	  catomic_add (&grand_total, len - old_len);
> > -	}
> > +        {
> > +          /* Keep track of total memory consumption for `malloc'.  */
> > +          catomic_add (&total[idx_mremap], len - old_len);
> > +          /* Keep track of total memory requirement.  */
> > +          catomic_add (&grand_total, len - old_len);
> > +        }
> >        /* Remember the size of the request.  */
> >        if (len < 65536)
> > -	catomic_increment (&histogram[len / 16]);
> > +        catomic_increment (&histogram[len / 16]);
> >        else
> > -	catomic_increment (&large);
> > +        catomic_increment (&large);
> >        /* Total number of calls of any of the functions.  */
> >        catomic_increment (&calls_total);
> >  
> >        /* Check for failures.  */
> >        if (result == NULL)
> > -	catomic_increment (&failed[idx_mremap]);
> > +        catomic_increment (&failed[idx_mremap]);
> >        else
> > -	{
> > -	  /* Record whether the reduction/increase happened in place.  */
> > -	  if (start == result)
> > -	    catomic_increment (&inplace_mremap);
> > -	  /* Was the buffer increased?  */
> > -	  if (old_len > len)
> > -	    catomic_increment (&decreasing_mremap);
> > -
> > -	  /* Update the allocation data and write out the records if
> > -	     necessary.  Note the first parameter is NULL which means
> > -	     the size is not tracked.  */
> > -	  update_data (NULL, len, old_len);
> > -	}
> > +        {
> > +          /* Record whether the reduction/increase happened in place.  */
> > +          if (start == result)
> > +            catomic_increment (&inplace_mremap);
> > +          /* Was the buffer increased?  */
> > +          if (old_len > len)
> > +            catomic_increment (&decreasing_mremap);
> > +
> > +          /* Update the allocation data and write out the records if
> > +             necessary.  Note the first parameter is NULL which means
> > +             the size is not tracked.  */
> > +          update_data (NULL, len, old_len);
> > +        }
> >      }
> >  
> >    /* Return the pointer to the user buffer.  */
> > @@ -745,12 +753,13 @@ munmap (void *start, size_t len)
> >    if (__builtin_expect (initialized <= 0, 0))
> >      {
> >        if (initialized == -1)
> > -	return -1;
> > +        return -1;
> > +
> >        me ();
> >      }
> >  
> >    /* Do the real work.  */
> > -  result = (*munmapp) (start, len);
> > +  result = (*munmapp)(start, len);
> >  
> >    if (!not_me && trace_mmap)
> >      {
> > @@ -758,16 +767,16 @@ munmap (void *start, size_t len)
> >        catomic_increment (&calls[idx_munmap]);
> >  
> >        if (__builtin_expect (result == 0, 1))
> > -	{
> > -	  /* Keep track of total memory freed using `free'.  */
> > -	  catomic_add (&total[idx_munmap], len);
> > -
> > -	  /* Update the allocation data and write out the records if
> > -	     necessary.  */
> > -	  update_data (NULL, 0, len);
> > -	}
> > +        {
> > +          /* Keep track of total memory freed using `free'.  */
> > +          catomic_add (&total[idx_munmap], len);
> > +
> > +          /* Update the allocation data and write out the records if
> > +             necessary.  */
> > +          update_data (NULL, 0, len);
> > +        }
> >        else
> > -	catomic_increment (&failed[idx_munmap]);
> > +        catomic_increment (&failed[idx_munmap]);
> >      }
> >  
> >    return result;
> > @@ -785,6 +794,7 @@ dest (void)
> >    /* If we haven't done anything here just return.  */
> >    if (not_me)
> >      return;
> > +
> >    /* If we should call any of the memory functions don't do any profiling.  */
> >    not_me = true;
> >  
> > @@ -793,13 +803,13 @@ dest (void)
> >      {
> >        /* Write the partially filled buffer.  */
> >        if (buffer_cnt > buffer_size)
> > -	write (fd, buffer + buffer_size,
> > -	       (buffer_cnt - buffer_size) * sizeof (struct entry));
> > +        write (fd, buffer + buffer_size,
> > +               (buffer_cnt - buffer_size) * sizeof (struct entry));
> >        else
> > -	write (fd, buffer, buffer_cnt * sizeof (struct entry));
> > +        write (fd, buffer, buffer_cnt * sizeof (struct entry));
> >  
> >        /* Go back to the beginning of the file.  We allocated two records
> > -	 here when we opened the file.  */
> > +         here when we opened the file.  */
> >        lseek (fd, 0, SEEK_SET);
> >        /* Write out a record containing the total size.  */
> >        first.stack = peak_total;
> > @@ -824,25 +834,25 @@ dest (void)
> >  \e[00;34mrealloc|\e[0m %10lu   %12llu   %s%12lu\e[00;00m  (nomove:%ld, dec:%ld, free:%ld)\n\
> >  \e[00;34m calloc|\e[0m %10lu   %12llu   %s%12lu\e[00;00m\n\
> >  \e[00;34m   free|\e[0m %10lu   %12llu\n",
> > -	   (unsigned long long int) grand_total, (unsigned long int) peak_heap,
> > -	   (unsigned long int) peak_stack,
> > -	   (unsigned long int) calls[idx_malloc],
> > -	   (unsigned long long int) total[idx_malloc],
> > -	   failed[idx_malloc] ? "\e[01;41m" : "",
> > -	   (unsigned long int) failed[idx_malloc],
> > -	   (unsigned long int) calls[idx_realloc],
> > -	   (unsigned long long int) total[idx_realloc],
> > -	   failed[idx_realloc] ? "\e[01;41m" : "",
> > -	   (unsigned long int) failed[idx_realloc],
> > -	   (unsigned long int) inplace,
> > -	   (unsigned long int) decreasing,
> > -	   (unsigned long int) realloc_free,
> > -	   (unsigned long int) calls[idx_calloc],
> > -	   (unsigned long long int) total[idx_calloc],
> > -	   failed[idx_calloc] ? "\e[01;41m" : "",
> > -	   (unsigned long int) failed[idx_calloc],
> > -	   (unsigned long int) calls[idx_free],
> > -	   (unsigned long long int) total[idx_free]);
> > +           (unsigned long long int) grand_total, (unsigned long int) peak_heap,
> > +           (unsigned long int) peak_stack,
> > +           (unsigned long int) calls[idx_malloc],
> > +           (unsigned long long int) total[idx_malloc],
> > +           failed[idx_malloc] ? "\e[01;41m" : "",
> > +           (unsigned long int) failed[idx_malloc],
> > +           (unsigned long int) calls[idx_realloc],
> > +           (unsigned long long int) total[idx_realloc],
> > +           failed[idx_realloc] ? "\e[01;41m" : "",
> > +           (unsigned long int) failed[idx_realloc],
> > +           (unsigned long int) inplace,
> > +           (unsigned long int) decreasing,
> > +           (unsigned long int) realloc_free,
> > +           (unsigned long int) calls[idx_calloc],
> > +           (unsigned long long int) total[idx_calloc],
> > +           failed[idx_calloc] ? "\e[01;41m" : "",
> > +           (unsigned long int) failed[idx_calloc],
> > +           (unsigned long int) calls[idx_free],
> > +           (unsigned long long int) total[idx_free]);
> >  
> >    if (trace_mmap)
> >      fprintf (stderr, "\
> > @@ -851,28 +861,28 @@ dest (void)
> >  \e[00;34mmmap(a)|\e[0m %10lu   %12llu   %s%12lu\e[00;00m\n\
> >  \e[00;34m mremap|\e[0m %10lu   %12llu   %s%12lu\e[00;00m  (nomove: %ld, dec:%ld)\n\
> >  \e[00;34m munmap|\e[0m %10lu   %12llu   %s%12lu\e[00;00m\n",
> > -	     (unsigned long int) calls[idx_mmap_r],
> > -	     (unsigned long long int) total[idx_mmap_r],
> > -	     failed[idx_mmap_r] ? "\e[01;41m" : "",
> > -	     (unsigned long int) failed[idx_mmap_r],
> > -	     (unsigned long int) calls[idx_mmap_w],
> > -	     (unsigned long long int) total[idx_mmap_w],
> > -	     failed[idx_mmap_w] ? "\e[01;41m" : "",
> > -	     (unsigned long int) failed[idx_mmap_w],
> > -	     (unsigned long int) calls[idx_mmap_a],
> > -	     (unsigned long long int) total[idx_mmap_a],
> > -	     failed[idx_mmap_a] ? "\e[01;41m" : "",
> > -	     (unsigned long int) failed[idx_mmap_a],
> > -	     (unsigned long int) calls[idx_mremap],
> > -	     (unsigned long long int) total[idx_mremap],
> > -	     failed[idx_mremap] ? "\e[01;41m" : "",
> > -	     (unsigned long int) failed[idx_mremap],
> > -	     (unsigned long int) inplace_mremap,
> > -	     (unsigned long int) decreasing_mremap,
> > -	     (unsigned long int) calls[idx_munmap],
> > -	     (unsigned long long int) total[idx_munmap],
> > -	     failed[idx_munmap] ? "\e[01;41m" : "",
> > -	     (unsigned long int) failed[idx_munmap]);
> > +             (unsigned long int) calls[idx_mmap_r],
> > +             (unsigned long long int) total[idx_mmap_r],
> > +             failed[idx_mmap_r] ? "\e[01;41m" : "",
> > +             (unsigned long int) failed[idx_mmap_r],
> > +             (unsigned long int) calls[idx_mmap_w],
> > +             (unsigned long long int) total[idx_mmap_w],
> > +             failed[idx_mmap_w] ? "\e[01;41m" : "",
> > +             (unsigned long int) failed[idx_mmap_w],
> > +             (unsigned long int) calls[idx_mmap_a],
> > +             (unsigned long long int) total[idx_mmap_a],
> > +             failed[idx_mmap_a] ? "\e[01;41m" : "",
> > +             (unsigned long int) failed[idx_mmap_a],
> > +             (unsigned long int) calls[idx_mremap],
> > +             (unsigned long long int) total[idx_mremap],
> > +             failed[idx_mremap] ? "\e[01;41m" : "",
> > +             (unsigned long int) failed[idx_mremap],
> > +             (unsigned long int) inplace_mremap,
> > +             (unsigned long int) decreasing_mremap,
> > +             (unsigned long int) calls[idx_munmap],
> > +             (unsigned long long int) total[idx_munmap],
> > +             failed[idx_munmap] ? "\e[01;41m" : "",
> > +             (unsigned long int) failed[idx_munmap]);
> >  
> >    /* Write out a histoogram of the sizes of the allocations.  */
> >    fprintf (stderr, "\e[01;32mHistogram for block sizes:\e[0;0m\n");
> > @@ -887,20 +897,20 @@ dest (void)
> >      /* Only write out the nonzero entries.  */
> >      if (histogram[cnt / 16] != 0)
> >        {
> > -	percent = (histogram[cnt / 16] * 100) / calls_total;
> > -	fprintf (stderr, "%5d-%-5d%12lu ", cnt, cnt + 15,
> > -		 (unsigned long int) histogram[cnt / 16]);
> > -	if (percent == 0)
> > -	  fputs (" <1% \e[41;37m", stderr);
> > -	else
> > -	  fprintf (stderr, "%3d%% \e[41;37m", percent);
> > -
> > -	/* Draw a bar with a length corresponding to the current
> > +        percent = (histogram[cnt / 16] * 100) / calls_total;
> > +        fprintf (stderr, "%5d-%-5d%12lu ", cnt, cnt + 15,
> > +                 (unsigned long int) histogram[cnt / 16]);
> > +        if (percent == 0)
> > +          fputs (" <1% \e[41;37m", stderr);
> > +        else
> > +          fprintf (stderr, "%3d%% \e[41;37m", percent);
> > +
> > +        /* Draw a bar with a length corresponding to the current
> >             percentage.  */
> > -	percent = (histogram[cnt / 16] * 50) / maxcalls;
> > -	while (percent-- > 0)
> > -	  fputc ('=', stderr);
> > -	 fputs ("\e[0;0m\n", stderr);
> > +        percent = (histogram[cnt / 16] * 50) / maxcalls;
> > +        while (percent-- > 0)
> > +          fputc ('=', stderr);
> > +        fputs ("\e[0;0m\n", stderr);
> >        }
> >  
> >    if (large != 0)
> > @@ -908,9 +918,9 @@ dest (void)
> >        percent = (large * 100) / calls_total;
> >        fprintf (stderr, "   large   %12lu ", (unsigned long int) large);
> >        if (percent == 0)
> > -	fputs (" <1% \e[41;37m", stderr);
> > +        fputs (" <1% \e[41;37m", stderr);
> >        else
> > -	fprintf (stderr, "%3d%% \e[41;37m", percent);
> > +        fprintf (stderr, "%3d%% \e[41;37m", percent);
> >        percent = (large * 50) / maxcalls;
> >        while (percent-- > 0)
> >          fputc ('=', stderr);
> > diff --git a/malloc/memusagestat.c b/malloc/memusagestat.c
> > index b244ef6..8c43b5d 100644
> > --- a/malloc/memusagestat.c
> > +++ b/malloc/memusagestat.c
> > @@ -53,24 +53,24 @@
> >  /* Definitions of arguments for argp functions.  */
> >  static const struct argp_option options[] =
> >  {
> > -  { "output", 'o', N_("FILE"), 0, N_("Name output file") },
> > -  { "string", 's', N_("STRING"), 0, N_("Title string used in output graphic") },
> > -  { "time", 't', NULL, 0, N_("\
> > +  { "output", 'o', N_ ("FILE"), 0, N_ ("Name output file") },
> > +  { "string", 's', N_ ("STRING"), 0, N_ ("Title string used in output graphic") },
> > +  { "time", 't', NULL, 0, N_ ("						      \
> >  Generate output linear to time (default is linear to number of function calls)\
> >  ") },
> >    { "total", 'T', NULL, 0,
> > -    N_("Also draw graph for total memory consumption") },
> > -  { "x-size", 'x', N_("VALUE"), 0,
> > -    N_("Make output graphic VALUE pixels wide") },
> > -  { "y-size", 'y', "VALUE", 0, N_("Make output graphic VALUE pixels high") },
> > +    N_ ("Also draw graph for total memory consumption") },
> > +  { "x-size", 'x', N_ ("VALUE"), 0,
> > +    N_ ("Make output graphic VALUE pixels wide") },
> > +  { "y-size", 'y', "VALUE", 0, N_ ("Make output graphic VALUE pixels high") },
> >    { NULL, 0, NULL, 0, NULL }
> >  };
> >  
> >  /* Short description of program.  */
> > -static const char doc[] = N_("Generate graphic from memory profiling data");
> > +static const char doc[] = N_ ("Generate graphic from memory profiling data");
> >  
> >  /* Strings for arguments in help texts.  */
> > -static const char args_doc[] = N_("DATAFILE [OUTFILE]");
> > +static const char args_doc[] = N_ ("DATAFILE [OUTFILE]");
> >  
> >  /* Prototype for option handler.  */
> >  static error_t parse_opt (int key, char *arg, struct argp_state *state);
> > @@ -152,7 +152,7 @@ main (int argc, char *argv[])
> >    if (remaining >= argc || remaining + 2 < argc)
> >      {
> >        argp_help (&argp, stdout, ARGP_HELP_SEE | ARGP_HELP_EXIT_ERR,
> > -		 program_invocation_short_name);
> > +                 program_invocation_short_name);
> >        exit (1);
> >      }
> >  
> > @@ -197,21 +197,21 @@ main (int argc, char *argv[])
> >    if (maxsize_heap == 0 && maxsize_stack == 0)
> >      {
> >        /* The program aborted before memusage was able to write the
> > -	 information about the maximum heap and stack use.  Repair
> > -	 the file now.  */
> > +         information about the maximum heap and stack use.  Repair
> > +         the file now.  */
> >        struct entry next;
> >  
> >        while (1)
> > -	{
> > -	  if (read (fd, &next, sizeof (next)) == 0)
> > -	    break;
> > -	  if (next.heap > maxsize_heap)
> > -	    maxsize_heap = next.heap;
> > -	  if (next.stack > maxsize_stack)
> > -	    maxsize_stack = next.stack;
> > -	  if (maxsize_heap + maxsize_stack > maxsize_total)
> > -	    maxsize_total = maxsize_heap + maxsize_stack;
> > -	}
> > +        {
> > +          if (read (fd, &next, sizeof (next)) == 0)
> > +            break;
> > +          if (next.heap > maxsize_heap)
> > +            maxsize_heap = next.heap;
> > +          if (next.stack > maxsize_stack)
> > +            maxsize_stack = next.stack;
> > +          if (maxsize_heap + maxsize_stack > maxsize_total)
> > +            maxsize_total = maxsize_heap + maxsize_stack;
> > +        }
> >  
> >        headent[0].stack = maxsize_total;
> >        headent[1].heap = maxsize_heap;
> > @@ -227,7 +227,7 @@ main (int argc, char *argv[])
> >    if (also_total)
> >      {
> >        /* We use one scale and since we also draw the total amount of
> > -	 memory used we have to adapt the maximum.  */
> > +         memory used we have to adapt the maximum.  */
> >        maxsize_heap = maxsize_total;
> >        maxsize_stack = maxsize_total;
> >      }
> > @@ -292,145 +292,145 @@ main (int argc, char *argv[])
> >      }
> >  
> >    gdImageString (im_out, gdFontSmall, 38, ysize - 14, (unsigned char *) "0",
> > -		 blue);
> > +                 blue);
> >    snprintf (buf, sizeof (buf), heap_format, 0);
> >    gdImageString (im_out, gdFontSmall, maxsize_heap < 1024 ? 32 : 26,
> > -		 ysize - 26, (unsigned char *) buf, red);
> > +                 ysize - 26, (unsigned char *) buf, red);
> >    snprintf (buf, sizeof (buf), stack_format, 0);
> >    gdImageString (im_out, gdFontSmall, xsize - 37, ysize - 26,
> > -		 (unsigned char *) buf, green);
> > +                 (unsigned char *) buf, green);
> >  
> >    if (string != NULL)
> >      gdImageString (im_out, gdFontLarge, (xsize - strlen (string) * 8) / 2,
> > -		   2, (unsigned char *) string, green);
> > +                   2, (unsigned char *) string, green);
> >  
> >    gdImageStringUp (im_out, gdFontSmall, 1, ysize / 2 - 10,
> > -		   (unsigned char *) "allocated", red);
> > +                   (unsigned char *) "allocated", red);
> >    gdImageStringUp (im_out, gdFontSmall, 11, ysize / 2 - 10,
> > -		   (unsigned char *) "memory", red);
> > +                   (unsigned char *) "memory", red);
> >  
> >    gdImageStringUp (im_out, gdFontSmall, xsize - 39, ysize / 2 - 10,
> > -		   (unsigned char *) "used", green);
> > +                   (unsigned char *) "used", green);
> >    gdImageStringUp (im_out, gdFontSmall, xsize - 27, ysize / 2 - 10,
> > -		   (unsigned char *) "stack", green);
> > +                   (unsigned char *) "stack", green);
> >  
> >    snprintf (buf, sizeof (buf), heap_format, maxsize_heap / heap_scale);
> >    gdImageString (im_out, gdFontSmall, 39 - strlen (buf) * 6, 14,
> > -		 (unsigned char *) buf, red);
> > +                 (unsigned char *) buf, red);
> >    snprintf (buf, sizeof (buf), stack_format, maxsize_stack / stack_scale);
> >    gdImageString (im_out, gdFontSmall, xsize - 37, 14,
> > -		 (unsigned char *) buf, green);
> > +                 (unsigned char *) buf, green);
> >  
> >    for (line = 1; line <= 3; ++line)
> >      {
> >        if (maxsize_heap > 0)
> > -	{
> > -	  cnt = (((ysize - 40) * (maxsize_heap / 4 * line / heap_scale))
> > -		 / (maxsize_heap / heap_scale));
> > -	  gdImageDashedLine (im_out, 40, ysize - 20 - cnt, xsize - 40,
> > -			     ysize - 20 - cnt, red);
> > -	  snprintf (buf, sizeof (buf), heap_format,
> > -		    maxsize_heap / 4 * line / heap_scale);
> > -	  gdImageString (im_out, gdFontSmall, 39 - strlen (buf) * 6,
> > -			 ysize - 26 - cnt, (unsigned char *) buf, red);
> > -	}
> > +        {
> > +          cnt = (((ysize - 40) * (maxsize_heap / 4 * line / heap_scale))
> > +                 / (maxsize_heap / heap_scale));
> > +          gdImageDashedLine (im_out, 40, ysize - 20 - cnt, xsize - 40,
> > +                             ysize - 20 - cnt, red);
> > +          snprintf (buf, sizeof (buf), heap_format,
> > +                    maxsize_heap / 4 * line / heap_scale);
> > +          gdImageString (im_out, gdFontSmall, 39 - strlen (buf) * 6,
> > +                         ysize - 26 - cnt, (unsigned char *) buf, red);
> > +        }
> >        else
> > -	cnt = 0;
> > +        cnt = 0;
> >  
> >        if (maxsize_stack > 0)
> > -	cnt2 = (((ysize - 40) * (maxsize_stack / 4 * line / stack_scale))
> > -		/ (maxsize_stack / stack_scale));
> > +        cnt2 = (((ysize - 40) * (maxsize_stack / 4 * line / stack_scale))
> > +                / (maxsize_stack / stack_scale));
> >        else
> > -	cnt2 = 0;
> > +        cnt2 = 0;
> >  
> >        if (cnt != cnt2)
> > -	gdImageDashedLine (im_out, 40, ysize - 20 - cnt2, xsize - 40,
> > -			   ysize - 20 - cnt2, green);
> > +        gdImageDashedLine (im_out, 40, ysize - 20 - cnt2, xsize - 40,
> > +                           ysize - 20 - cnt2, green);
> >        snprintf (buf, sizeof (buf), stack_format, maxsize_stack / 4 * line /
> > -		stack_scale);
> > +                stack_scale);
> >        gdImageString (im_out, gdFontSmall, xsize - 37, ysize - 26 - cnt2,
> > -		     (unsigned char *) buf, green);
> > +                     (unsigned char *) buf, green);
> >      }
> >  
> >    snprintf (buf, sizeof (buf), "%llu", (unsigned long long) total);
> >    gdImageString (im_out, gdFontSmall, xsize - 50, ysize - 14,
> > -		 (unsigned char *) buf, blue);
> > +                 (unsigned char *) buf, blue);
> >  
> >    if (!time_based)
> >      {
> >        uint64_t previously = start_time;
> >  
> >        gdImageString (im_out, gdFontSmall, 40 + (xsize - 32 * 6 - 80) / 2,
> > -		     ysize - 12,
> > -		     (unsigned char *) "# memory handling function calls",
> > -		     blue);
> > +                     ysize - 12,
> > +                     (unsigned char *) "# memory handling function calls",
> > +                     blue);
> >  
> >  
> >        last_stack = last_heap = last_total = ysize - 20;
> >        for (cnt = 1; cnt <= total; ++cnt)
> > -	{
> > -	  struct entry entry;
> > -	  size_t new[2];
> > -	  uint64_t now;
> > -
> > -	  read (fd, &entry, sizeof (entry));
> > -
> > -	  now = ((uint64_t) entry.time_high) << 32 | entry.time_low;
> > -
> > -	  if ((((previously - start_time) * 100) / total_time) % 10 < 5)
> > -	    gdImageFilledRectangle (im_out,
> > -				    40 + ((cnt - 1) * (xsize - 80)) / total,
> > -				    ysize - 19,
> > -				    39 + (cnt * (xsize - 80)) / total,
> > -				    ysize - 14, yellow);
> > -	  previously = now;
> > -
> > -	  if (also_total && maxsize_heap > 0)
> > -	    {
> > -	      size_t new3;
> > -
> > -	      new3 = (ysize - 20) - ((((unsigned long long int) (ysize - 40))
> > -				      * (entry.heap + entry.stack))
> > -				     / maxsize_heap);
> > -	      gdImageLine (im_out, 40 + ((xsize - 80) * (cnt - 1)) / total,
> > -			   last_total,
> > -			   40 + ((xsize - 80) * cnt) / total, new3,
> > -			   black);
> > -	      last_total = new3;
> > -	    }
> > -
> > -	  if (maxsize_heap > 0)
> > -	    {
> > -	      new[0] = ((ysize - 20)
> > -			- ((((unsigned long long int) (ysize - 40))
> > -			    * entry.heap) / maxsize_heap));
> > -	      gdImageLine (im_out, 40 + ((xsize - 80) * (cnt - 1)) / total,
> > -			   last_heap, 40 + ((xsize - 80) * cnt) / total,
> > -			   new[0], red);
> > -	      last_heap = new[0];
> > -	    }
> > -
> > -	  if (maxsize_stack > 0)
> > -	    {
> > -	      new[1] = ((ysize - 20)
> > -			- ((((unsigned long long int) (ysize - 40))
> > -			    * entry.stack) / maxsize_stack));
> > -	      gdImageLine (im_out, 40 + ((xsize - 80) * (cnt - 1)) / total,
> > -			   last_stack, 40 + ((xsize - 80) * cnt) / total,
> > -			   new[1], green);
> > -	      last_stack = new[1];
> > -	    }
> > -	}
> > +        {
> > +          struct entry entry;
> > +          size_t new[2];
> > +          uint64_t now;
> > +
> > +          read (fd, &entry, sizeof (entry));
> > +
> > +          now = ((uint64_t) entry.time_high) << 32 | entry.time_low;
> > +
> > +          if ((((previously - start_time) * 100) / total_time) % 10 < 5)
> > +            gdImageFilledRectangle (im_out,
> > +                                    40 + ((cnt - 1) * (xsize - 80)) / total,
> > +                                    ysize - 19,
> > +                                    39 + (cnt * (xsize - 80)) / total,
> > +                                    ysize - 14, yellow);
> > +          previously = now;
> > +
> > +          if (also_total && maxsize_heap > 0)
> > +            {
> > +              size_t new3;
> > +
> > +              new3 = (ysize - 20) - ((((unsigned long long int) (ysize - 40))
> > +                                      * (entry.heap + entry.stack))
> > +                                     / maxsize_heap);
> > +              gdImageLine (im_out, 40 + ((xsize - 80) * (cnt - 1)) / total,
> > +                           last_total,
> > +                           40 + ((xsize - 80) * cnt) / total, new3,
> > +                           black);
> > +              last_total = new3;
> > +            }
> > +
> > +          if (maxsize_heap > 0)
> > +            {
> > +              new[0] = ((ysize - 20)
> > +                        - ((((unsigned long long int) (ysize - 40))
> > +                            * entry.heap) / maxsize_heap));
> > +              gdImageLine (im_out, 40 + ((xsize - 80) * (cnt - 1)) / total,
> > +                           last_heap, 40 + ((xsize - 80) * cnt) / total,
> > +                           new[0], red);
> > +              last_heap = new[0];
> > +            }
> > +
> > +          if (maxsize_stack > 0)
> > +            {
> > +              new[1] = ((ysize - 20)
> > +                        - ((((unsigned long long int) (ysize - 40))
> > +                            * entry.stack) / maxsize_stack));
> > +              gdImageLine (im_out, 40 + ((xsize - 80) * (cnt - 1)) / total,
> > +                           last_stack, 40 + ((xsize - 80) * cnt) / total,
> > +                           new[1], green);
> > +              last_stack = new[1];
> > +            }
> > +        }
> >  
> >        cnt = 0;
> >        while (cnt < total)
> > -	{
> > -	  gdImageLine (im_out, 40 + ((xsize - 80) * cnt) / total, ysize - 20,
> > -		       40 + ((xsize - 80) * cnt) / total, ysize - 15, blue);
> > -	  cnt += MAX (1, total / 20);
> > -	}
> > +        {
> > +          gdImageLine (im_out, 40 + ((xsize - 80) * cnt) / total, ysize - 20,
> > +                       40 + ((xsize - 80) * cnt) / total, ysize - 15, blue);
> > +          cnt += MAX (1, total / 20);
> > +        }
> >        gdImageLine (im_out, xsize - 40, ysize - 20, xsize - 40, ysize - 15,
> > -		   blue);
> > +                   blue);
> >      }
> >    else
> >      {
> > @@ -438,67 +438,67 @@ main (int argc, char *argv[])
> >        size_t last_xpos = 40;
> >  
> >        gdImageString (im_out, gdFontSmall, 40 + (xsize - 39 * 6 - 80) / 2,
> > -		     ysize - 12,
> > -		     (unsigned char *) "\
> > +                     ysize - 12,
> > +                     (unsigned char *) "				      \
> >  # memory handling function calls / time", blue);
> >  
> >        for (cnt = 0; cnt < 20; cnt += 2)
> > -	gdImageFilledRectangle (im_out,
> > -				40 + (cnt * (xsize - 80)) / 20, ysize - 19,
> > -				39 + ((cnt + 1) * (xsize - 80)) / 20,
> > -				ysize - 14, yellow);
> > +        gdImageFilledRectangle (im_out,
> > +                                40 + (cnt * (xsize - 80)) / 20, ysize - 19,
> > +                                39 + ((cnt + 1) * (xsize - 80)) / 20,
> > +                                ysize - 14, yellow);
> >  
> >        last_stack = last_heap = last_total = ysize - 20;
> >        for (cnt = 1; cnt <= total; ++cnt)
> > -	{
> > -	  struct entry entry;
> > -	  size_t new[2];
> > -	  size_t xpos;
> > -	  uint64_t now;
> > -
> > -	  read (fd, &entry, sizeof (entry));
> > -
> > -	  now = ((uint64_t) entry.time_high) << 32 | entry.time_low;
> > -	  xpos = 40 + ((xsize - 80) * (now - start_time)) / total_time;
> > -
> > -	  if (cnt == next_tick)
> > -	    {
> > -	      gdImageLine (im_out, xpos, ysize - 20, xpos, ysize - 15, blue);
> > -	      next_tick += MAX (1, total / 20);
> > -	    }
> > -
> > -	  if (also_total && maxsize_heap > 0)
> > -	    {
> > -	      size_t new3;
> > -
> > -	      new3 = (ysize - 20) - ((((unsigned long long int) (ysize - 40))
> > -				      * (entry.heap + entry.stack))
> > -				     / maxsize_heap);
> > -	      gdImageLine (im_out, last_xpos, last_total, xpos, new3, black);
> > -	      last_total = new3;
> > -	    }
> > -
> > -	  if (maxsize_heap > 0)
> > -	    {
> > -	      new[0] = ((ysize - 20)
> > -			- ((((unsigned long long int) (ysize - 40))
> > -			    * entry.heap) / maxsize_heap));
> > -	      gdImageLine (im_out, last_xpos, last_heap, xpos, new[0], red);
> > -	      last_heap = new[0];
> > -	    }
> > -
> > -	  if (maxsize_stack > 0)
> > -	    {
> > -	      new[1] = ((ysize - 20)
> > -			- ((((unsigned long long int) (ysize - 40))
> > -			    * entry.stack) / maxsize_stack));
> > -	      gdImageLine (im_out, last_xpos, last_stack, xpos, new[1],
> > -			   green);
> > -	      last_stack = new[1];
> > -	    }
> > -
> > -	  last_xpos = xpos;
> > -	}
> > +        {
> > +          struct entry entry;
> > +          size_t new[2];
> > +          size_t xpos;
> > +          uint64_t now;
> > +
> > +          read (fd, &entry, sizeof (entry));
> > +
> > +          now = ((uint64_t) entry.time_high) << 32 | entry.time_low;
> > +          xpos = 40 + ((xsize - 80) * (now - start_time)) / total_time;
> > +
> > +          if (cnt == next_tick)
> > +            {
> > +              gdImageLine (im_out, xpos, ysize - 20, xpos, ysize - 15, blue);
> > +              next_tick += MAX (1, total / 20);
> > +            }
> > +
> > +          if (also_total && maxsize_heap > 0)
> > +            {
> > +              size_t new3;
> > +
> > +              new3 = (ysize - 20) - ((((unsigned long long int) (ysize - 40))
> > +                                      * (entry.heap + entry.stack))
> > +                                     / maxsize_heap);
> > +              gdImageLine (im_out, last_xpos, last_total, xpos, new3, black);
> > +              last_total = new3;
> > +            }
> > +
> > +          if (maxsize_heap > 0)
> > +            {
> > +              new[0] = ((ysize - 20)
> > +                        - ((((unsigned long long int) (ysize - 40))
> > +                            * entry.heap) / maxsize_heap));
> > +              gdImageLine (im_out, last_xpos, last_heap, xpos, new[0], red);
> > +              last_heap = new[0];
> > +            }
> > +
> > +          if (maxsize_stack > 0)
> > +            {
> > +              new[1] = ((ysize - 20)
> > +                        - ((((unsigned long long int) (ysize - 40))
> > +                            * entry.stack) / maxsize_stack));
> > +              gdImageLine (im_out, last_xpos, last_stack, xpos, new[1],
> > +                           green);
> > +              last_stack = new[1];
> > +            }
> > +
> > +          last_xpos = xpos;
> > +        }
> >      }
> >  
> >    /* Write out the result.  */
> > @@ -537,12 +537,12 @@ parse_opt (int key, char *arg, struct argp_state *state)
> >      case 'x':
> >        xsize = atoi (arg);
> >        if (xsize == 0)
> > -	xsize = XSIZE;
> > +        xsize = XSIZE;
> >        break;
> >      case 'y':
> >        ysize = atoi (arg);
> >        if (ysize == 0)
> > -	ysize = XSIZE;
> > +        ysize = XSIZE;
> >        break;
> >      default:
> >        return ARGP_ERR_UNKNOWN;
> > @@ -563,8 +563,10 @@ more_help (int key, const char *text, void *input)
> >        if (asprintf (&tp, gettext ("\
> >  For bug reporting instructions, please see:\n\
> >  %s.\n"), REPORT_BUGS_TO) < 0)
> > -	return NULL;
> > +        return NULL;
> > +
> >        return tp;
> > +
> >      default:
> >        break;
> >      }
> > diff --git a/malloc/morecore.c b/malloc/morecore.c
> > index 0a644c3..bd6e750 100644
> > --- a/malloc/morecore.c
> > +++ b/malloc/morecore.c
> > @@ -15,27 +15,27 @@
> >     License along with the GNU C Library; if not, see
> >     <http://www.gnu.org/licenses/>.  */
> >  
> > -#ifndef	_MALLOC_INTERNAL
> > -#define	_MALLOC_INTERNAL
> > -#include <malloc.h>
> > +#ifndef _MALLOC_INTERNAL
> > +# define _MALLOC_INTERNAL
> > +# include <malloc.h>
> >  #endif
> >  
> > -#ifndef	__GNU_LIBRARY__
> > -#define	__sbrk	sbrk
> > +#ifndef __GNU_LIBRARY__
> > +# define __sbrk  sbrk
> >  #endif
> >  
> >  #ifdef __GNU_LIBRARY__
> >  /* It is best not to declare this and cast its result on foreign operating
> >     systems with potentially hostile include files.  */
> >  
> > -#include <stddef.h>
> > -#include <stdlib.h>
> > +# include <stddef.h>
> > +# include <stdlib.h>
> >  extern void *__sbrk (ptrdiff_t increment) __THROW;
> >  libc_hidden_proto (__sbrk)
> >  #endif
> >  
> >  #ifndef NULL
> > -#define NULL 0
> > +# define NULL 0
> >  #endif
> >  
> >  /* Allocate INCREMENT more bytes of data space,
> > @@ -47,6 +47,7 @@ __default_morecore (ptrdiff_t increment)
> >    void *result = (void *) __sbrk (increment);
> >    if (result == (void *) -1)
> >      return NULL;
> > +
> >    return result;
> >  }
> >  libc_hidden_def (__default_morecore)
> > diff --git a/malloc/mtrace.c b/malloc/mtrace.c
> > index ee94133..93229ed 100644
> > --- a/malloc/mtrace.c
> > +++ b/malloc/mtrace.c
> > @@ -1,8 +1,8 @@
> >  /* More debugging hooks for `malloc'.
> >     Copyright (C) 1991-2013 Free Software Foundation, Inc.
> >     This file is part of the GNU C Library.
> > -		 Written April 2, 1991 by John Gilmore of Cygnus Support.
> > -		 Based on mcheck.c by Mike Haertel.
> > +                 Written April 2, 1991 by John Gilmore of Cygnus Support.
> > +                 Based on mcheck.c by Mike Haertel.
> >  
> >     The GNU C Library is free software; you can redistribute it and/or
> >     modify it under the terms of the GNU Lesser General Public
> > @@ -18,11 +18,11 @@
> >     License along with the GNU C Library; if not, see
> >     <http://www.gnu.org/licenses/>.  */
> >  
> > -#ifndef	_MALLOC_INTERNAL
> > -#define	_MALLOC_INTERNAL
> > -#include <malloc.h>
> > -#include <mcheck.h>
> > -#include <bits/libc-lock.h>
> > +#ifndef _MALLOC_INTERNAL
> > +# define _MALLOC_INTERNAL
> > +# include <malloc.h>
> > +# include <mcheck.h>
> > +# include <bits/libc-lock.h>
> >  #endif
> >  
> >  #include <dlfcn.h>
> > @@ -48,7 +48,7 @@
> >  #define TRACE_BUFFER_SIZE 512
> >  
> >  static FILE *mallstream;
> > -static const char mallenv[]= "MALLOC_TRACE";
> > +static const char mallenv[] = "MALLOC_TRACE";
> >  static char *malloc_trace_buffer;
> >  
> >  __libc_lock_define_initialized (static, lock);
> > @@ -60,9 +60,9 @@ __ptr_t mallwatch;
> >  static void (*tr_old_free_hook) (__ptr_t ptr, const __ptr_t);
> >  static __ptr_t (*tr_old_malloc_hook) (size_t size, const __ptr_t);
> >  static __ptr_t (*tr_old_realloc_hook) (__ptr_t ptr, size_t size,
> > -				       const __ptr_t);
> > +                                       const __ptr_t);
> >  static __ptr_t (*tr_old_memalign_hook) (size_t __alignment, size_t __size,
> > -					const __ptr_t);
> > +                                        const __ptr_t);
> >  
> >  /* This function is called when the block being alloc'd, realloc'd, or
> >     freed has an address matching the variable "mallwatch".  In a debugger,
> > @@ -79,39 +79,38 @@ libc_hidden_def (tr_break)
> >  
> >  static void tr_where (const __ptr_t, Dl_info *) __THROW internal_function;
> >  static void
> > -internal_function
> > -tr_where (caller, info)
> > -     const __ptr_t caller;
> > -     Dl_info *info;
> > +internal_function tr_where (caller, info)
> > +const __ptr_t caller;
> > +Dl_info *info;
> >  {
> >    if (caller != NULL)
> >      {
> >        if (info != NULL)
> > -	{
> > -	  char *buf = (char *) "";
> > -	  if (info->dli_sname != NULL)
> > -	    {
> > -	      size_t len = strlen (info->dli_sname);
> > -	      buf = alloca (len + 6 + 2 * sizeof (void *));
> > -
> > -	      buf[0] = '(';
> > -	      __stpcpy (_fitoa (caller >= (const __ptr_t) info->dli_saddr
> > -				? caller - (const __ptr_t) info->dli_saddr
> > -				: (const __ptr_t) info->dli_saddr - caller,
> > -				__stpcpy (__mempcpy (buf + 1, info->dli_sname,
> > -						     len),
> > -					  caller >= (__ptr_t) info->dli_saddr
> > -					  ? "+0x" : "-0x"),
> > -				16, 0),
> > -			")");
> > -	    }
> > -
> > -	  fprintf (mallstream, "@ %s%s%s[%p] ",
> > -		   info->dli_fname ?: "", info->dli_fname ? ":" : "",
> > -		   buf, caller);
> > -	}
> > +        {
> > +          char *buf = (char *) "";
> > +          if (info->dli_sname != NULL)
> > +            {
> > +              size_t len = strlen (info->dli_sname);
> > +              buf = alloca (len + 6 + 2 * sizeof (void *));
> > +
> > +              buf[0] = '(';
> > +              __stpcpy (_fitoa (caller >= (const __ptr_t) info->dli_saddr
> > +                                ? caller - (const __ptr_t) info->dli_saddr
> > +                                : (const __ptr_t) info->dli_saddr - caller,
> > +                                __stpcpy (__mempcpy (buf + 1, info->dli_sname,
> > +                                                     len),
> > +                                          caller >= (__ptr_t) info->dli_saddr
> > +                                          ? "+0x" : "-0x"),
> > +                                16, 0),
> > +                        ")");
> > +            }
> > +
> > +          fprintf (mallstream, "@ %s%s%s[%p] ",
> > +                   info->dli_fname ? : "", info->dli_fname ? ":" : "",
> > +                   buf, caller);
> > +        }
> >        else
> > -	fprintf (mallstream, "@ [%p] ", caller);
> > +        fprintf (mallstream, "@ [%p] ", caller);
> >      }
> >  }
> >  
> > @@ -131,10 +130,9 @@ lock_and_info (const __ptr_t caller, Dl_info *mem)
> >  
> >  
> >  static void tr_freehook (__ptr_t, const __ptr_t) __THROW;
> > -static void
> > -tr_freehook (ptr, caller)
> > -     __ptr_t ptr;
> > -     const __ptr_t caller;
> > +static void tr_freehook (ptr, caller)
> > +__ptr_t ptr;
> > +const __ptr_t caller;
> >  {
> >    if (ptr == NULL)
> >      return;
> > @@ -152,7 +150,7 @@ tr_freehook (ptr, caller)
> >      }
> >    __free_hook = tr_old_free_hook;
> >    if (tr_old_free_hook != NULL)
> > -    (*tr_old_free_hook) (ptr, caller);
> > +    (*tr_old_free_hook)(ptr, caller);
> >    else
> >      free (ptr);
> >    __free_hook = tr_freehook;
> > @@ -160,10 +158,9 @@ tr_freehook (ptr, caller)
> >  }
> >  
> >  static __ptr_t tr_mallochook (size_t, const __ptr_t) __THROW;
> > -static __ptr_t
> > -tr_mallochook (size, caller)
> > -     size_t size;
> > -     const __ptr_t caller;
> > +static __ptr_t tr_mallochook (size, caller)
> > +size_t size;
> > +const __ptr_t caller;
> >  {
> >    __ptr_t hdr;
> >  
> > @@ -172,7 +169,7 @@ tr_mallochook (size, caller)
> >  
> >    __malloc_hook = tr_old_malloc_hook;
> >    if (tr_old_malloc_hook != NULL)
> > -    hdr = (__ptr_t) (*tr_old_malloc_hook) (size, caller);
> > +    hdr = (__ptr_t) (*tr_old_malloc_hook)(size, caller);
> >    else
> >      hdr = (__ptr_t) malloc (size);
> >    __malloc_hook = tr_mallochook;
> > @@ -190,12 +187,11 @@ tr_mallochook (size, caller)
> >  }
> >  
> >  static __ptr_t tr_reallochook (__ptr_t, size_t, const __ptr_t)
> > -     __THROW;
> > -static __ptr_t
> > -tr_reallochook (ptr, size, caller)
> > -     __ptr_t ptr;
> > -     size_t size;
> > -     const __ptr_t caller;
> > +__THROW;
> > +static __ptr_t tr_reallochook (ptr, size, caller)
> > +__ptr_t ptr;
> > +size_t size;
> > +const __ptr_t caller;
> >  {
> >    __ptr_t hdr;
> >  
> > @@ -209,7 +205,7 @@ tr_reallochook (ptr, size, caller)
> >    __malloc_hook = tr_old_malloc_hook;
> >    __realloc_hook = tr_old_realloc_hook;
> >    if (tr_old_realloc_hook != NULL)
> > -    hdr = (__ptr_t) (*tr_old_realloc_hook) (ptr, size, caller);
> > +    hdr = (__ptr_t) (*tr_old_realloc_hook)(ptr, size, caller);
> >    else
> >      hdr = (__ptr_t) realloc (ptr, size);
> >    __free_hook = tr_freehook;
> > @@ -220,10 +216,10 @@ tr_reallochook (ptr, size, caller)
> >    if (hdr == NULL)
> >      {
> >        if (size != 0)
> > -	/* Failed realloc.  */
> > -	fprintf (mallstream, "! %p %#lx\n", ptr, (unsigned long int) size);
> > +        /* Failed realloc.  */
> > +        fprintf (mallstream, "! %p %#lx\n", ptr, (unsigned long int) size);
> >        else
> > -	fprintf (mallstream, "- %p\n", ptr);
> > +        fprintf (mallstream, "- %p\n", ptr);
> >      }
> >    else if (ptr == NULL)
> >      fprintf (mallstream, "+ %p %#lx\n", hdr, (unsigned long int) size);
> > @@ -243,11 +239,10 @@ tr_reallochook (ptr, size, caller)
> >  }
> >  
> >  static __ptr_t tr_memalignhook (size_t, size_t,
> > -				const __ptr_t) __THROW;
> > -static __ptr_t
> > -tr_memalignhook (alignment, size, caller)
> > -     size_t alignment, size;
> > -     const __ptr_t caller;
> > +                                const __ptr_t) __THROW;
> > +static __ptr_t tr_memalignhook (alignment, size, caller)
> > +size_t alignment, size;
> > +const __ptr_t caller;
> >  {
> >    __ptr_t hdr;
> >  
> > @@ -257,7 +252,7 @@ tr_memalignhook (alignment, size, caller)
> >    __memalign_hook = tr_old_memalign_hook;
> >    __malloc_hook = tr_old_malloc_hook;
> >    if (tr_old_memalign_hook != NULL)
> > -    hdr = (__ptr_t) (*tr_old_memalign_hook) (alignment, size, caller);
> > +    hdr = (__ptr_t) (*tr_old_memalign_hook)(alignment, size, caller);
> >    else
> >      hdr = (__ptr_t) memalign (alignment, size);
> >    __memalign_hook = tr_memalignhook;
> > @@ -321,44 +316,44 @@ mtrace (void)
> >      {
> >        char *mtb = malloc (TRACE_BUFFER_SIZE);
> >        if (mtb == NULL)
> > -	return;
> > +        return;
> >  
> >        mallstream = fopen (mallfile != NULL ? mallfile : "/dev/null", "wce");
> >        if (mallstream != NULL)
> > -	{
> > +        {
> >  #ifndef __ASSUME_O_CLOEXEC
> > -	  /* Make sure we close the file descriptor on exec.  */
> > -	  int flags = __fcntl (fileno (mallstream), F_GETFD, 0);
> > -	  if (flags >= 0)
> > -	    {
> > -	      flags |= FD_CLOEXEC;
> > -	      __fcntl (fileno (mallstream), F_SETFD, flags);
> > -	    }
> > +          /* Make sure we close the file descriptor on exec.  */
> > +          int flags = __fcntl (fileno (mallstream), F_GETFD, 0);
> > +          if (flags >= 0)
> > +            {
> > +              flags |= FD_CLOEXEC;
> > +              __fcntl (fileno (mallstream), F_SETFD, flags);
> > +            }
> >  #endif
> > -	  /* Be sure it doesn't malloc its buffer!  */
> > -	  malloc_trace_buffer = mtb;
> > -	  setvbuf (mallstream, malloc_trace_buffer, _IOFBF, TRACE_BUFFER_SIZE);
> > -	  fprintf (mallstream, "= Start\n");
> > -	  tr_old_free_hook = __free_hook;
> > -	  __free_hook = tr_freehook;
> > -	  tr_old_malloc_hook = __malloc_hook;
> > -	  __malloc_hook = tr_mallochook;
> > -	  tr_old_realloc_hook = __realloc_hook;
> > -	  __realloc_hook = tr_reallochook;
> > -	  tr_old_memalign_hook = __memalign_hook;
> > -	  __memalign_hook = tr_memalignhook;
> > +          /* Be sure it doesn't malloc its buffer!  */
> > +          malloc_trace_buffer = mtb;
> > +          setvbuf (mallstream, malloc_trace_buffer, _IOFBF, TRACE_BUFFER_SIZE);
> > +          fprintf (mallstream, "= Start\n");
> > +          tr_old_free_hook = __free_hook;
> > +          __free_hook = tr_freehook;
> > +          tr_old_malloc_hook = __malloc_hook;
> > +          __malloc_hook = tr_mallochook;
> > +          tr_old_realloc_hook = __realloc_hook;
> > +          __realloc_hook = tr_reallochook;
> > +          tr_old_memalign_hook = __memalign_hook;
> > +          __memalign_hook = tr_memalignhook;
> >  #ifdef _LIBC
> > -	  if (!added_atexit_handler)
> > -	    {
> > -	      extern void *__dso_handle __attribute__ ((__weak__));
> > -	      added_atexit_handler = 1;
> > -	      __cxa_atexit ((void (*) (void *)) release_libc_mem, NULL,
> > -			     &__dso_handle ? __dso_handle : NULL);
> > -	    }
> > +          if (!added_atexit_handler)
> > +            {
> > +              extern void *__dso_handle __attribute__ ((__weak__));
> > +              added_atexit_handler = 1;
> > +              __cxa_atexit ((void (*)(void *))release_libc_mem, NULL,
> > +                            &__dso_handle ? __dso_handle : NULL);
> > +            }
> >  #endif
> > -	}
> > +        }
> >        else
> > -	free (mtb);
> > +        free (mtb);
> >      }
> >  }
> >  
> > diff --git a/malloc/obstack.c b/malloc/obstack.c
> > index 69320ca..37d98a4 100644
> > --- a/malloc/obstack.c
> > +++ b/malloc/obstack.c
> > @@ -42,7 +42,7 @@
> >     program understand `configure --with-gnu-libc' and omit the object
> >     files, it is simpler to just do this in the source for each such file.  */
> >  
> > -#include <stdio.h>		/* Random thing to get __GNU_LIBRARY__.  */
> > +#include <stdio.h>              /* Random thing to get __GNU_LIBRARY__.  */
> >  #if !defined _LIBC && defined __GNU_LIBRARY__ && __GNU_LIBRARY__ > 1
> >  # include <gnu-versions.h>
> >  # if _GNU_OBSTACK_INTERFACE_VERSION == OBSTACK_INTERFACE_VERSION
> > @@ -78,10 +78,10 @@ struct fooalign
> >     But in fact it might be less smart and round addresses to as much as
> >     DEFAULT_ROUNDING.  So we prepare for it to do that.  */
> >  enum
> > -  {
> > -    DEFAULT_ALIGNMENT = offsetof (struct fooalign, u),
> > -    DEFAULT_ROUNDING = sizeof (union fooround)
> > -  };
> > +{
> > +  DEFAULT_ALIGNMENT = offsetof (struct fooalign, u),
> > +  DEFAULT_ROUNDING = sizeof (union fooround)
> > +};
> >  
> >  /* When we copy a long block of data, this is the unit to do it with.
> >     On some machines, copying successive ints does not work;
> > @@ -127,19 +127,19 @@ compat_symbol (libc, _obstack_compat, _obstack, GLIBC_2_0);
> >     do not allow (expr) ? void : void.  */
> >  
> >  # define CALL_CHUNKFUN(h, size) \
> > -  (((h) -> use_extra_arg) \
> > -   ? (*(h)->chunkfun) ((h)->extra_arg, (size)) \
> > -   : (*(struct _obstack_chunk *(*) (long)) (h)->chunkfun) ((size)))
> > +  (((h)->use_extra_arg)							      \
> > +   ? (*(h)->chunkfun)((h)->extra_arg, (size))				      \
> > +   : (*(struct _obstack_chunk *(*)(long))(h)->chunkfun)((size)))
> >  
> >  # define CALL_FREEFUN(h, old_chunk) \
> >    do { \
> > -    if ((h) -> use_extra_arg) \
> > -      (*(h)->freefun) ((h)->extra_arg, (old_chunk)); \
> > -    else \
> > -      (*(void (*) (void *)) (h)->freefun) ((old_chunk)); \
> > -  } while (0)
> > +      if ((h)->use_extra_arg)						      \
> > +        (*(h)->freefun)((h)->extra_arg, (old_chunk));			      \
> > +      else								      \
> > +        (*(void (*)(void *))(h)->freefun)((old_chunk));			      \
> > +    } while (0)
> > +
> >  
> > -
> >  /* Initialize an obstack H for use.  Specify chunk size SIZE (0 means default).
> >     Objects start on multiples of ALIGNMENT (0 means use default).
> >     CHUNKFUN is the function to use to allocate chunks,
> > @@ -150,44 +150,44 @@ compat_symbol (libc, _obstack_compat, _obstack, GLIBC_2_0);
> >  
> >  int
> >  _obstack_begin (struct obstack *h,
> > -		int size, int alignment,
> > -		void *(*chunkfun) (long),
> > -		void (*freefun) (void *))
> > +                int size, int alignment,
> > +                void *(*chunkfun)(long),
> > +                void (*freefun)(void *))
> >  {
> >    struct _obstack_chunk *chunk; /* points to new chunk */
> >  
> >    if (alignment == 0)
> >      alignment = DEFAULT_ALIGNMENT;
> >    if (size == 0)
> > -    /* Default size is what GNU malloc can fit in a 4096-byte block.  */
> > +  /* Default size is what GNU malloc can fit in a 4096-byte block.  */
> >      {
> >        /* 12 is sizeof (mhead) and 4 is EXTRA from GNU malloc.
> > -	 Use the values for range checking, because if range checking is off,
> > -	 the extra bytes won't be missed terribly, but if range checking is on
> > -	 and we used a larger request, a whole extra 4096 bytes would be
> > -	 allocated.
> > +         Use the values for range checking, because if range checking is off,
> > +         the extra bytes won't be missed terribly, but if range checking is on
> > +         and we used a larger request, a whole extra 4096 bytes would be
> > +         allocated.
> >  
> > -	 These number are irrelevant to the new GNU malloc.  I suspect it is
> > -	 less sensitive to the size of the request.  */
> > +         These number are irrelevant to the new GNU malloc.  I suspect it is
> > +         less sensitive to the size of the request.  */
> >        int extra = ((((12 + DEFAULT_ROUNDING - 1) & ~(DEFAULT_ROUNDING - 1))
> > -		    + 4 + DEFAULT_ROUNDING - 1)
> > -		   & ~(DEFAULT_ROUNDING - 1));
> > +                    + 4 + DEFAULT_ROUNDING - 1)
> > +                   & ~(DEFAULT_ROUNDING - 1));
> >        size = 4096 - extra;
> >      }
> >  
> > -  h->chunkfun = (struct _obstack_chunk * (*)(void *, long)) chunkfun;
> > -  h->freefun = (void (*) (void *, struct _obstack_chunk *)) freefun;
> > +  h->chunkfun = (struct _obstack_chunk * (*)(void *, long))chunkfun;
> > +  h->freefun = (void (*)(void *, struct _obstack_chunk *))freefun;
> >    h->chunk_size = size;
> >    h->alignment_mask = alignment - 1;
> >    h->use_extra_arg = 0;
> >  
> > -  chunk = h->chunk = CALL_CHUNKFUN (h, h -> chunk_size);
> > +  chunk = h->chunk = CALL_CHUNKFUN (h, h->chunk_size);
> >    if (!chunk)
> > -    (*obstack_alloc_failed_handler) ();
> > +    (*obstack_alloc_failed_handler)();
> >    h->next_free = h->object_base = __PTR_ALIGN ((char *) chunk, chunk->contents,
> > -					       alignment - 1);
> > +                                               alignment - 1);
> >    h->chunk_limit = chunk->limit
> > -    = (char *) chunk + h->chunk_size;
> > +                     = (char *) chunk + h->chunk_size;
> >    chunk->prev = 0;
> >    /* The initial chunk now contains no empty object.  */
> >    h->maybe_empty_object = 0;
> > @@ -197,45 +197,45 @@ _obstack_begin (struct obstack *h,
> >  
> >  int
> >  _obstack_begin_1 (struct obstack *h, int size, int alignment,
> > -		  void *(*chunkfun) (void *, long),
> > -		  void (*freefun) (void *, void *),
> > -		  void *arg)
> > +                  void *(*chunkfun)(void *, long),
> > +                  void (*freefun)(void *, void *),
> > +                  void *arg)
> >  {
> >    struct _obstack_chunk *chunk; /* points to new chunk */
> >  
> >    if (alignment == 0)
> >      alignment = DEFAULT_ALIGNMENT;
> >    if (size == 0)
> > -    /* Default size is what GNU malloc can fit in a 4096-byte block.  */
> > +  /* Default size is what GNU malloc can fit in a 4096-byte block.  */
> >      {
> >        /* 12 is sizeof (mhead) and 4 is EXTRA from GNU malloc.
> > -	 Use the values for range checking, because if range checking is off,
> > -	 the extra bytes won't be missed terribly, but if range checking is on
> > -	 and we used a larger request, a whole extra 4096 bytes would be
> > -	 allocated.
> > +         Use the values for range checking, because if range checking is off,
> > +         the extra bytes won't be missed terribly, but if range checking is on
> > +         and we used a larger request, a whole extra 4096 bytes would be
> > +         allocated.
> >  
> > -	 These number are irrelevant to the new GNU malloc.  I suspect it is
> > -	 less sensitive to the size of the request.  */
> > +         These number are irrelevant to the new GNU malloc.  I suspect it is
> > +         less sensitive to the size of the request.  */
> >        int extra = ((((12 + DEFAULT_ROUNDING - 1) & ~(DEFAULT_ROUNDING - 1))
> > -		    + 4 + DEFAULT_ROUNDING - 1)
> > -		   & ~(DEFAULT_ROUNDING - 1));
> > +                    + 4 + DEFAULT_ROUNDING - 1)
> > +                   & ~(DEFAULT_ROUNDING - 1));
> >        size = 4096 - extra;
> >      }
> >  
> > -  h->chunkfun = (struct _obstack_chunk * (*)(void *,long)) chunkfun;
> > -  h->freefun = (void (*) (void *, struct _obstack_chunk *)) freefun;
> > +  h->chunkfun = (struct _obstack_chunk * (*)(void *, long))chunkfun;
> > +  h->freefun = (void (*)(void *, struct _obstack_chunk *))freefun;
> >    h->chunk_size = size;
> >    h->alignment_mask = alignment - 1;
> >    h->extra_arg = arg;
> >    h->use_extra_arg = 1;
> >  
> > -  chunk = h->chunk = CALL_CHUNKFUN (h, h -> chunk_size);
> > +  chunk = h->chunk = CALL_CHUNKFUN (h, h->chunk_size);
> >    if (!chunk)
> > -    (*obstack_alloc_failed_handler) ();
> > +    (*obstack_alloc_failed_handler)();
> >    h->next_free = h->object_base = __PTR_ALIGN ((char *) chunk, chunk->contents,
> > -					       alignment - 1);
> > +                                               alignment - 1);
> >    h->chunk_limit = chunk->limit
> > -    = (char *) chunk + h->chunk_size;
> > +                     = (char *) chunk + h->chunk_size;
> >    chunk->prev = 0;
> >    /* The initial chunk now contains no empty object.  */
> >    h->maybe_empty_object = 0;
> > @@ -254,7 +254,7 @@ _obstack_newchunk (struct obstack *h, int length)
> >  {
> >    struct _obstack_chunk *old_chunk = h->chunk;
> >    struct _obstack_chunk *new_chunk;
> > -  long	new_size;
> > +  long new_size;
> >    long obj_size = h->next_free - h->object_base;
> >    long i;
> >    long already;
> > @@ -268,7 +268,7 @@ _obstack_newchunk (struct obstack *h, int length)
> >    /* Allocate and initialize the new chunk.  */
> >    new_chunk = CALL_CHUNKFUN (h, new_size);
> >    if (!new_chunk)
> > -    (*obstack_alloc_failed_handler) ();
> > +    (*obstack_alloc_failed_handler)();
> >    h->chunk = new_chunk;
> >    new_chunk->prev = old_chunk;
> >    new_chunk->limit = h->chunk_limit = (char *) new_chunk + new_size;
> > @@ -283,12 +283,12 @@ _obstack_newchunk (struct obstack *h, int length)
> >    if (h->alignment_mask + 1 >= DEFAULT_ALIGNMENT)
> >      {
> >        for (i = obj_size / sizeof (COPYING_UNIT) - 1;
> > -	   i >= 0; i--)
> > -	((COPYING_UNIT *)object_base)[i]
> > -	  = ((COPYING_UNIT *)h->object_base)[i];
> > +           i >= 0; i--)
> > +        ((COPYING_UNIT *) object_base)[i]
> > +          = ((COPYING_UNIT *) h->object_base)[i];
> >        /* We used to copy the odd few remaining bytes as one extra COPYING_UNIT,
> > -	 but that can cross a page boundary on a machine
> > -	 which does not do strict alignment for COPYING_UNITS.  */
> > +         but that can cross a page boundary on a machine
> > +         which does not do strict alignment for COPYING_UNITS.  */
> >        already = obj_size / sizeof (COPYING_UNIT) * sizeof (COPYING_UNIT);
> >      }
> >    else
> > @@ -300,10 +300,10 @@ _obstack_newchunk (struct obstack *h, int length)
> >    /* If the object just copied was the only data in OLD_CHUNK,
> >       free that chunk and remove it from the chain.
> >       But not if that chunk might contain an empty object.  */
> > -  if (! h->maybe_empty_object
> > +  if (!h->maybe_empty_object
> >        && (h->object_base
> > -	  == __PTR_ALIGN ((char *) old_chunk, old_chunk->contents,
> > -			  h->alignment_mask)))
> > +          == __PTR_ALIGN ((char *) old_chunk, old_chunk->contents,
> > +                          h->alignment_mask)))
> >      {
> >        new_chunk->prev = old_chunk->prev;
> >        CALL_FREEFUN (h, old_chunk);
> > @@ -329,8 +329,8 @@ int _obstack_allocated_p (struct obstack *h, void *obj);
> >  int
> >  _obstack_allocated_p (struct obstack *h, void *obj)
> >  {
> > -  struct _obstack_chunk *lp;	/* below addr of any objects in this chunk */
> > -  struct _obstack_chunk *plp;	/* point to previous chunk if any */
> > +  struct _obstack_chunk *lp;    /* below addr of any objects in this chunk */
> > +  struct _obstack_chunk *plp;   /* point to previous chunk if any */
> >  
> >    lp = (h)->chunk;
> >    /* We use >= rather than > since the object cannot be exactly at
> > @@ -343,7 +343,7 @@ _obstack_allocated_p (struct obstack *h, void *obj)
> >      }
> >    return lp != 0;
> >  }
> > -
> > +
> >  /* Free objects in obstack H, including OBJ and everything allocate
> >     more recently than OBJ.  If OBJ is zero, free everything in H.  */
> >  
> > @@ -352,8 +352,8 @@ _obstack_allocated_p (struct obstack *h, void *obj)
> >  void
> >  obstack_free (struct obstack *h, void *obj)
> >  {
> > -  struct _obstack_chunk *lp;	/* below addr of any objects in this chunk */
> > -  struct _obstack_chunk *plp;	/* point to previous chunk if any */
> > +  struct _obstack_chunk *lp;    /* below addr of any objects in this chunk */
> > +  struct _obstack_chunk *plp;   /* point to previous chunk if any */
> >  
> >    lp = h->chunk;
> >    /* We use >= because there cannot be an object at the beginning of a chunk.
> > @@ -365,7 +365,7 @@ obstack_free (struct obstack *h, void *obj)
> >        CALL_FREEFUN (h, lp);
> >        lp = plp;
> >        /* If we switch chunks, we can't tell whether the new current
> > -	 chunk contains an empty object, so assume that it may.  */
> > +         chunk contains an empty object, so assume that it may.  */
> >        h->maybe_empty_object = 1;
> >      }
> >    if (lp)
> > @@ -384,11 +384,11 @@ obstack_free (struct obstack *h, void *obj)
> >     called by non-GCC compilers.  */
> >  strong_alias (obstack_free, _obstack_free)
> >  # endif
> > -
> > +
> >  int
> >  _obstack_memory_used (struct obstack *h)
> >  {
> > -  struct _obstack_chunk* lp;
> > +  struct _obstack_chunk *lp;
> >    int nbytes = 0;
> >  
> >    for (lp = h->chunk; lp != 0; lp = lp->prev)
> > @@ -397,7 +397,7 @@ _obstack_memory_used (struct obstack *h)
> >      }
> >    return nbytes;
> >  }
> > -
> > +
> >  /* Define the error handler.  */
> >  # ifdef _LIBC
> >  #  include <libintl.h>
> > @@ -429,11 +429,10 @@ print_and_abort (void)
> >       like this and the translation should be reused instead of creating
> >       a very similar string which requires a separate translation.  */
> >  # ifdef _LIBC
> > -  (void) __fxprintf (NULL, "%s\n", _("memory exhausted"));
> > +  (void) __fxprintf (NULL, "%s\n", _ ("memory exhausted"));
> >  # else
> > -  fprintf (stderr, "%s\n", _("memory exhausted"));
> > +  fprintf (stderr, "%s\n", _ ("memory exhausted"));
> >  # endif
> >    exit (obstack_exit_failure);
> >  }
> > -
> > -#endif	/* !ELIDE_CODE */
> > +#endif  /* !ELIDE_CODE */
> > diff --git a/malloc/obstack.h b/malloc/obstack.h
> > index e786d1f..6efd47d 100644
> > --- a/malloc/obstack.h
> > +++ b/malloc/obstack.h
> > @@ -18,85 +18,85 @@
> >  
> >  /* Summary:
> >  
> > -All the apparent functions defined here are macros. The idea
> > -is that you would use these pre-tested macros to solve a
> > -very specific set of problems, and they would run fast.
> > -Caution: no side-effects in arguments please!! They may be
> > -evaluated MANY times!!
> > -
> > -These macros operate a stack of objects.  Each object starts life
> > -small, and may grow to maturity.  (Consider building a word syllable
> > -by syllable.)  An object can move while it is growing.  Once it has
> > -been "finished" it never changes address again.  So the "top of the
> > -stack" is typically an immature growing object, while the rest of the
> > -stack is of mature, fixed size and fixed address objects.
> > -
> > -These routines grab large chunks of memory, using a function you
> > -supply, called `obstack_chunk_alloc'.  On occasion, they free chunks,
> > -by calling `obstack_chunk_free'.  You must define them and declare
> > -them before using any obstack macros.
> > -
> > -Each independent stack is represented by a `struct obstack'.
> > -Each of the obstack macros expects a pointer to such a structure
> > -as the first argument.
> > -
> > -One motivation for this package is the problem of growing char strings
> > -in symbol tables.  Unless you are "fascist pig with a read-only mind"
> > ---Gosper's immortal quote from HAKMEM item 154, out of context--you
> > -would not like to put any arbitrary upper limit on the length of your
> > -symbols.
> > -
> > -In practice this often means you will build many short symbols and a
> > -few long symbols.  At the time you are reading a symbol you don't know
> > -how long it is.  One traditional method is to read a symbol into a
> > -buffer, realloc()ating the buffer every time you try to read a symbol
> > -that is longer than the buffer.  This is beaut, but you still will
> > -want to copy the symbol from the buffer to a more permanent
> > -symbol-table entry say about half the time.
> > -
> > -With obstacks, you can work differently.  Use one obstack for all symbol
> > -names.  As you read a symbol, grow the name in the obstack gradually.
> > -When the name is complete, finalize it.  Then, if the symbol exists already,
> > -free the newly read name.
> > -
> > -The way we do this is to take a large chunk, allocating memory from
> > -low addresses.  When you want to build a symbol in the chunk you just
> > -add chars above the current "high water mark" in the chunk.  When you
> > -have finished adding chars, because you got to the end of the symbol,
> > -you know how long the chars are, and you can create a new object.
> > -Mostly the chars will not burst over the highest address of the chunk,
> > -because you would typically expect a chunk to be (say) 100 times as
> > -long as an average object.
> > -
> > -In case that isn't clear, when we have enough chars to make up
> > -the object, THEY ARE ALREADY CONTIGUOUS IN THE CHUNK (guaranteed)
> > -so we just point to it where it lies.  No moving of chars is
> > -needed and this is the second win: potentially long strings need
> > -never be explicitly shuffled. Once an object is formed, it does not
> > -change its address during its lifetime.
> > -
> > -When the chars burst over a chunk boundary, we allocate a larger
> > -chunk, and then copy the partly formed object from the end of the old
> > -chunk to the beginning of the new larger chunk.  We then carry on
> > -accreting characters to the end of the object as we normally would.
> > -
> > -A special macro is provided to add a single char at a time to a
> > -growing object.  This allows the use of register variables, which
> > -break the ordinary 'growth' macro.
> > -
> > -Summary:
> > -	We allocate large chunks.
> > -	We carve out one object at a time from the current chunk.
> > -	Once carved, an object never moves.
> > -	We are free to append data of any size to the currently
> > -	  growing object.
> > -	Exactly one object is growing in an obstack at any one time.
> > -	You can run one obstack per control block.
> > -	You may have as many control blocks as you dare.
> > -	Because of the way we do it, you can `unwind' an obstack
> > -	  back to a previous state. (You may remove objects much
> > -	  as you would with a stack.)
> > -*/
> > +   All the apparent functions defined here are macros. The idea
> > +   is that you would use these pre-tested macros to solve a
> > +   very specific set of problems, and they would run fast.
> > +   Caution: no side-effects in arguments please!! They may be
> > +   evaluated MANY times!!
> > +
> > +   These macros operate a stack of objects.  Each object starts life
> > +   small, and may grow to maturity.  (Consider building a word syllable
> > +   by syllable.)  An object can move while it is growing.  Once it has
> > +   been "finished" it never changes address again.  So the "top of the
> > +   stack" is typically an immature growing object, while the rest of the
> > +   stack is of mature, fixed size and fixed address objects.
> > +
> > +   These routines grab large chunks of memory, using a function you
> > +   supply, called `obstack_chunk_alloc'.  On occasion, they free chunks,
> > +   by calling `obstack_chunk_free'.  You must define them and declare
> > +   them before using any obstack macros.
> > +
> > +   Each independent stack is represented by a `struct obstack'.
> > +   Each of the obstack macros expects a pointer to such a structure
> > +   as the first argument.
> > +
> > +   One motivation for this package is the problem of growing char strings
> > +   in symbol tables.  Unless you are "fascist pig with a read-only mind"
> > +   --Gosper's immortal quote from HAKMEM item 154, out of context--you
> > +   would not like to put any arbitrary upper limit on the length of your
> > +   symbols.
> > +
> > +   In practice this often means you will build many short symbols and a
> > +   few long symbols.  At the time you are reading a symbol you don't know
> > +   how long it is.  One traditional method is to read a symbol into a
> > +   buffer, realloc()ating the buffer every time you try to read a symbol
> > +   that is longer than the buffer.  This is beaut, but you still will
> > +   want to copy the symbol from the buffer to a more permanent
> > +   symbol-table entry say about half the time.
> > +
> > +   With obstacks, you can work differently.  Use one obstack for all symbol
> > +   names.  As you read a symbol, grow the name in the obstack gradually.
> > +   When the name is complete, finalize it.  Then, if the symbol exists already,
> > +   free the newly read name.
> > +
> > +   The way we do this is to take a large chunk, allocating memory from
> > +   low addresses.  When you want to build a symbol in the chunk you just
> > +   add chars above the current "high water mark" in the chunk.  When you
> > +   have finished adding chars, because you got to the end of the symbol,
> > +   you know how long the chars are, and you can create a new object.
> > +   Mostly the chars will not burst over the highest address of the chunk,
> > +   because you would typically expect a chunk to be (say) 100 times as
> > +   long as an average object.
> > +
> > +   In case that isn't clear, when we have enough chars to make up
> > +   the object, THEY ARE ALREADY CONTIGUOUS IN THE CHUNK (guaranteed)
> > +   so we just point to it where it lies.  No moving of chars is
> > +   needed and this is the second win: potentially long strings need
> > +   never be explicitly shuffled. Once an object is formed, it does not
> > +   change its address during its lifetime.
> > +
> > +   When the chars burst over a chunk boundary, we allocate a larger
> > +   chunk, and then copy the partly formed object from the end of the old
> > +   chunk to the beginning of the new larger chunk.  We then carry on
> > +   accreting characters to the end of the object as we normally would.
> > +
> > +   A special macro is provided to add a single char at a time to a
> > +   growing object.  This allows the use of register variables, which
> > +   break the ordinary 'growth' macro.
> > +
> > +   Summary:
> > +        We allocate large chunks.
> > +        We carve out one object at a time from the current chunk.
> > +        Once carved, an object never moves.
> > +        We are free to append data of any size to the currently
> > +          growing object.
> > +        Exactly one object is growing in an obstack at any one time.
> > +        You can run one obstack per control block.
> > +        You may have as many control blocks as you dare.
> > +        Because of the way we do it, you can `unwind' an obstack
> > +          back to a previous state. (You may remove objects much
> > +          as you would with a stack.)
> > + */
> >  
> >  
> >  /* Don't do the contents of this file more than once.  */
> > @@ -107,7 +107,7 @@ Summary:
> >  #ifdef __cplusplus
> >  extern "C" {
> >  #endif
> > -
> > +
> >  /* We need the type of a pointer subtraction.  If __PTRDIFF_TYPE__ is
> >     defined, as with GNU C, use that; that way we don't pollute the
> >     namespace with <stddef.h>'s symbols.  Otherwise, include <stddef.h>
> > @@ -124,7 +124,7 @@ extern "C" {
> >     aligning P to the next multiple of A + 1.  B and P must be of type
> >     char *.  A + 1 must be a power of 2.  */
> >  
> > -#define __BPTR_ALIGN(B, P, A) ((B) + (((P) - (B) + (A)) & ~(A)))
> > +#define __BPTR_ALIGN(B, P, A) ((B) + (((P) -(B) + (A)) & ~(A)))
> >  
> >  /* Similiar to _BPTR_ALIGN (B, P, A), except optimize the common case
> >     where pointers can be converted to integers, aligned as integers,
> > @@ -133,61 +133,61 @@ extern "C" {
> >     relative to B.  Otherwise, use the faster strategy of computing the
> >     alignment relative to 0.  */
> >  
> > -#define __PTR_ALIGN(B, P, A)						    \
> > +#define __PTR_ALIGN(B, P, A)						      \
> >    __BPTR_ALIGN (sizeof (PTR_INT_TYPE) < sizeof (void *) ? (B) : (char *) 0, \
> > -		P, A)
> > +                P, A)
> >  
> >  #include <string.h>
> >  
> > -struct _obstack_chunk		/* Lives at front of each chunk. */
> > +struct _obstack_chunk           /* Lives at front of each chunk. */
> >  {
> > -  char  *limit;			/* 1 past end of this chunk */
> > -  struct _obstack_chunk *prev;	/* address of prior chunk or NULL */
> > -  char	contents[4];		/* objects begin here */
> > +  char *limit;                  /* 1 past end of this chunk */
> > +  struct _obstack_chunk *prev;  /* address of prior chunk or NULL */
> > +  char contents[4];             /* objects begin here */
> >  };
> >  
> > -struct obstack		/* control current object in current chunk */
> > +struct obstack          /* control current object in current chunk */
> >  {
> > -  long	chunk_size;		/* preferred size to allocate chunks in */
> > -  struct _obstack_chunk *chunk;	/* address of current struct obstack_chunk */
> > -  char	*object_base;		/* address of object we are building */
> > -  char	*next_free;		/* where to add next char to current object */
> > -  char	*chunk_limit;		/* address of char after current chunk */
> > +  long chunk_size;              /* preferred size to allocate chunks in */
> > +  struct _obstack_chunk *chunk; /* address of current struct obstack_chunk */
> > +  char *object_base;            /* address of object we are building */
> > +  char *next_free;              /* where to add next char to current object */
> > +  char *chunk_limit;            /* address of char after current chunk */
> >    union
> >    {
> >      PTR_INT_TYPE tempint;
> >      void *tempptr;
> > -  } temp;			/* Temporary for some macros.  */
> > -  int   alignment_mask;		/* Mask of alignment for each object. */
> > +  } temp;                       /* Temporary for some macros.  */
> > +  int alignment_mask;           /* Mask of alignment for each object. */
> >    /* These prototypes vary based on `use_extra_arg', and we use
> >       casts to the prototypeless function type in all assignments,
> >       but having prototypes here quiets -Wstrict-prototypes.  */
> >    struct _obstack_chunk *(*chunkfun) (void *, long);
> >    void (*freefun) (void *, struct _obstack_chunk *);
> > -  void *extra_arg;		/* first arg for chunk alloc/dealloc funcs */
> > -  unsigned use_extra_arg:1;	/* chunk alloc/dealloc funcs take extra arg */
> > -  unsigned maybe_empty_object:1;/* There is a possibility that the current
> > -				   chunk contains a zero-length object.  This
> > -				   prevents freeing the chunk if we allocate
> > -				   a bigger chunk to replace it. */
> > -  unsigned alloc_failed:1;	/* No longer used, as we now call the failed
> > -				   handler on error, but retained for binary
> > -				   compatibility.  */
> > +  void *extra_arg;              /* first arg for chunk alloc/dealloc funcs */
> > +  unsigned use_extra_arg : 1;     /* chunk alloc/dealloc funcs take extra arg */
> > +  unsigned maybe_empty_object : 1; /* There is a possibility that the current
> > +                                      chunk contains a zero-length object.  This
> > +                                      prevents freeing the chunk if we allocate
> > +                                      a bigger chunk to replace it. */
> > +  unsigned alloc_failed : 1;      /* No longer used, as we now call the failed
> > +                                     handler on error, but retained for binary
> > +                                     compatibility.  */
> >  };
> >  
> >  /* Declare the external functions we use; they are in obstack.c.  */
> >  
> >  extern void _obstack_newchunk (struct obstack *, int);
> >  extern int _obstack_begin (struct obstack *, int, int,
> > -			    void *(*) (long), void (*) (void *));
> > +                           void *(*)(long), void (*)(void *));
> >  extern int _obstack_begin_1 (struct obstack *, int, int,
> > -			     void *(*) (void *, long),
> > -			     void (*) (void *, void *), void *);
> > +                             void *(*)(void *, long),
> > +                             void (*)(void *, void *), void *);
> >  extern int _obstack_memory_used (struct obstack *);
> >  
> >  void obstack_free (struct obstack *__obstack, void *__glibc_block);
> >  
> > -
> > +
> >  /* Error handler called when `obstack_chunk_alloc' failed to allocate
> >     more memory.  This can be set to a user defined function which
> >     should either abort gracefully or use longjump - but shouldn't
> > @@ -196,7 +196,7 @@ extern void (*obstack_alloc_failed_handler) (void);
> >  
> >  /* Exit value used when `print_and_abort' is used.  */
> >  extern int obstack_exit_failure;
> > -
> > +
> >  /* Pointer to beginning of object being allocated or to be allocated next.
> >     Note that this might not be the final address of the object
> >     because a new chunk might be needed to hold the final size.  */
> > @@ -209,45 +209,45 @@ extern int obstack_exit_failure;
> >  
> >  /* Pointer to next byte not yet allocated in current chunk.  */
> >  
> > -#define obstack_next_free(h)	((h)->next_free)
> > +#define obstack_next_free(h)    ((h)->next_free)
> >  
> >  /* Mask specifying low bits that should be clear in address of an object.  */
> >  
> >  #define obstack_alignment_mask(h) ((h)->alignment_mask)
> >  
> >  /* To prevent prototype warnings provide complete argument list.  */
> > -#define obstack_init(h)						\
> > -  _obstack_begin ((h), 0, 0,					\
> > -		  (void *(*) (long)) obstack_chunk_alloc,	\
> > -		  (void (*) (void *)) obstack_chunk_free)
> > +#define obstack_init(h)							      \
> > +  _obstack_begin ((h), 0, 0,						      \
> > +                  (void *(*)(long))obstack_chunk_alloc,			      \
> > +                  (void (*)(void *))obstack_chunk_free)
> >  
> > -#define obstack_begin(h, size)					\
> > -  _obstack_begin ((h), (size), 0,				\
> > -		  (void *(*) (long)) obstack_chunk_alloc,	\
> > -		  (void (*) (void *)) obstack_chunk_free)
> > +#define obstack_begin(h, size)						      \
> > +  _obstack_begin ((h), (size), 0,					      \
> > +                  (void *(*)(long))obstack_chunk_alloc,			      \
> > +                  (void (*)(void *))obstack_chunk_free)
> >  
> >  #define obstack_specify_allocation(h, size, alignment, chunkfun, freefun)  \
> > -  _obstack_begin ((h), (size), (alignment),				   \
> > -		  (void *(*) (long)) (chunkfun),			   \
> > -		  (void (*) (void *)) (freefun))
> > +  _obstack_begin ((h), (size), (alignment),				      \
> > +                  (void *(*)(long))(chunkfun),				      \
> > +                  (void (*)(void *))(freefun))
> >  
> >  #define obstack_specify_allocation_with_arg(h, size, alignment, chunkfun, freefun, arg) \
> > -  _obstack_begin_1 ((h), (size), (alignment),				\
> > -		    (void *(*) (void *, long)) (chunkfun),		\
> > -		    (void (*) (void *, void *)) (freefun), (arg))
> > +  _obstack_begin_1 ((h), (size), (alignment),				      \
> > +                    (void *(*)(void *, long))(chunkfun),		      \
> > +                    (void (*)(void *, void *))(freefun), (arg))
> >  
> >  #define obstack_chunkfun(h, newchunkfun) \
> > -  ((h) -> chunkfun = (struct _obstack_chunk *(*)(void *, long)) (newchunkfun))
> > +  ((h)->chunkfun = (struct _obstack_chunk *(*)(void *, long))(newchunkfun))
> >  
> >  #define obstack_freefun(h, newfreefun) \
> > -  ((h) -> freefun = (void (*)(void *, struct _obstack_chunk *)) (newfreefun))
> > +  ((h)->freefun = (void (*)(void *, struct _obstack_chunk *))(newfreefun))
> >  
> > -#define obstack_1grow_fast(h,achar) (*((h)->next_free)++ = (achar))
> > +#define obstack_1grow_fast(h, achar) (*((h)->next_free)++ = (achar))
> >  
> > -#define obstack_blank_fast(h,n) ((h)->next_free += (n))
> > +#define obstack_blank_fast(h, n) ((h)->next_free += (n))
> >  
> >  #define obstack_memory_used(h) _obstack_memory_used (h)
> > -
> > +
> >  #if defined __GNUC__
> >  /* NextStep 2.0 cc is really gcc 1.93 but it defines __GNUC__ = 2 and
> >     does not implement __extension__.  But that compiler doesn't define
> > @@ -261,158 +261,158 @@ extern int obstack_exit_failure;
> >     without using a global variable.
> >     Also, we can avoid using the `temp' slot, to make faster code.  */
> >  
> > -# define obstack_object_size(OBSTACK)					\
> > -  __extension__								\
> > -  ({ struct obstack const *__o = (OBSTACK);				\
> > -     (unsigned) (__o->next_free - __o->object_base); })
> > -
> > -# define obstack_room(OBSTACK)						\
> > -  __extension__								\
> > -  ({ struct obstack const *__o = (OBSTACK);				\
> > -     (unsigned) (__o->chunk_limit - __o->next_free); })
> > -
> > -# define obstack_make_room(OBSTACK,length)				\
> > -__extension__								\
> > -({ struct obstack *__o = (OBSTACK);					\
> > -   int __len = (length);						\
> > -   if (__o->chunk_limit - __o->next_free < __len)			\
> > -     _obstack_newchunk (__o, __len);					\
> > -   (void) 0; })
> > -
> > -# define obstack_empty_p(OBSTACK)					\
> > -  __extension__								\
> > -  ({ struct obstack const *__o = (OBSTACK);				\
> > -     (__o->chunk->prev == 0						\
> > -      && __o->next_free == __PTR_ALIGN ((char *) __o->chunk,		\
> > -					__o->chunk->contents,		\
> > -					__o->alignment_mask)); })
> > -
> > -# define obstack_grow(OBSTACK,where,length)				\
> > -__extension__								\
> > -({ struct obstack *__o = (OBSTACK);					\
> > -   int __len = (length);						\
> > -   if (__o->next_free + __len > __o->chunk_limit)			\
> > -     _obstack_newchunk (__o, __len);					\
> > -   memcpy (__o->next_free, where, __len);				\
> > -   __o->next_free += __len;						\
> > -   (void) 0; })
> > -
> > -# define obstack_grow0(OBSTACK,where,length)				\
> > -__extension__								\
> > -({ struct obstack *__o = (OBSTACK);					\
> > -   int __len = (length);						\
> > -   if (__o->next_free + __len + 1 > __o->chunk_limit)			\
> > -     _obstack_newchunk (__o, __len + 1);				\
> > -   memcpy (__o->next_free, where, __len);				\
> > -   __o->next_free += __len;						\
> > -   *(__o->next_free)++ = 0;						\
> > -   (void) 0; })
> > -
> > -# define obstack_1grow(OBSTACK,datum)					\
> > -__extension__								\
> > -({ struct obstack *__o = (OBSTACK);					\
> > -   if (__o->next_free + 1 > __o->chunk_limit)				\
> > -     _obstack_newchunk (__o, 1);					\
> > -   obstack_1grow_fast (__o, datum);					\
> > -   (void) 0; })
> > +# define obstack_object_size(OBSTACK)					      \
> > +  __extension__								      \
> > +    ({ struct obstack const *__o = (OBSTACK);				      \
> > +       (unsigned) (__o->next_free - __o->object_base); })
> > +
> > +# define obstack_room(OBSTACK)						      \
> > +  __extension__								      \
> > +    ({ struct obstack const *__o = (OBSTACK);				      \
> > +       (unsigned) (__o->chunk_limit - __o->next_free); })
> > +
> > +# define obstack_make_room(OBSTACK, length)				      \
> > +  __extension__								      \
> > +    ({ struct obstack *__o = (OBSTACK);					      \
> > +       int __len = (length);						      \
> > +       if (__o->chunk_limit - __o->next_free < __len)			      \
> > +         _obstack_newchunk (__o, __len);				      \
> > +       (void) 0; })
> > +
> > +# define obstack_empty_p(OBSTACK)					      \
> > +  __extension__								      \
> > +    ({ struct obstack const *__o = (OBSTACK);				      \
> > +       (__o->chunk->prev == 0						      \
> > +        && __o->next_free == __PTR_ALIGN ((char *) __o->chunk,		      \
> > +                                          __o->chunk->contents,		      \
> > +                                          __o->alignment_mask)); })
> > +
> > +# define obstack_grow(OBSTACK, where, length)				      \
> > +  __extension__								      \
> > +    ({ struct obstack *__o = (OBSTACK);					      \
> > +       int __len = (length);						      \
> > +       if (__o->next_free + __len > __o->chunk_limit)			      \
> > +         _obstack_newchunk (__o, __len);				      \
> > +       memcpy (__o->next_free, where, __len);				      \
> > +       __o->next_free += __len;						      \
> > +       (void) 0; })
> > +
> > +# define obstack_grow0(OBSTACK, where, length)				      \
> > +  __extension__								      \
> > +    ({ struct obstack *__o = (OBSTACK);					      \
> > +       int __len = (length);						      \
> > +       if (__o->next_free + __len + 1 > __o->chunk_limit)		      \
> > +         _obstack_newchunk (__o, __len + 1);				      \
> > +       memcpy (__o->next_free, where, __len);				      \
> > +       __o->next_free += __len;						      \
> > +       *(__o->next_free)++ = 0;						      \
> > +       (void) 0; })
> > +
> > +# define obstack_1grow(OBSTACK, datum)					      \
> > +  __extension__								      \
> > +    ({ struct obstack *__o = (OBSTACK);					      \
> > +       if (__o->next_free + 1 > __o->chunk_limit)			      \
> > +         _obstack_newchunk (__o, 1);					      \
> > +       obstack_1grow_fast (__o, datum);					      \
> > +       (void) 0; })
> >  
> >  /* These assume that the obstack alignment is good enough for pointers
> >     or ints, and that the data added so far to the current object
> >     shares that much alignment.  */
> >  
> > -# define obstack_ptr_grow(OBSTACK,datum)				\
> > -__extension__								\
> > -({ struct obstack *__o = (OBSTACK);					\
> > -   if (__o->next_free + sizeof (void *) > __o->chunk_limit)		\
> > -     _obstack_newchunk (__o, sizeof (void *));				\
> > -   obstack_ptr_grow_fast (__o, datum); })				\
> > -
> > -# define obstack_int_grow(OBSTACK,datum)				\
> > -__extension__								\
> > -({ struct obstack *__o = (OBSTACK);					\
> > -   if (__o->next_free + sizeof (int) > __o->chunk_limit)		\
> > -     _obstack_newchunk (__o, sizeof (int));				\
> > -   obstack_int_grow_fast (__o, datum); })
> > -
> > -# define obstack_ptr_grow_fast(OBSTACK,aptr)				\
> > -__extension__								\
> > -({ struct obstack *__o1 = (OBSTACK);					\
> > -   *(const void **) __o1->next_free = (aptr);				\
> > -   __o1->next_free += sizeof (const void *);				\
> > -   (void) 0; })
> > -
> > -# define obstack_int_grow_fast(OBSTACK,aint)				\
> > -__extension__								\
> > -({ struct obstack *__o1 = (OBSTACK);					\
> > -   *(int *) __o1->next_free = (aint);					\
> > -   __o1->next_free += sizeof (int);					\
> > -   (void) 0; })
> > -
> > -# define obstack_blank(OBSTACK,length)					\
> > -__extension__								\
> > -({ struct obstack *__o = (OBSTACK);					\
> > -   int __len = (length);						\
> > -   if (__o->chunk_limit - __o->next_free < __len)			\
> > -     _obstack_newchunk (__o, __len);					\
> > -   obstack_blank_fast (__o, __len);					\
> > -   (void) 0; })
> > -
> > -# define obstack_alloc(OBSTACK,length)					\
> > -__extension__								\
> > -({ struct obstack *__h = (OBSTACK);					\
> > -   obstack_blank (__h, (length));					\
> > -   obstack_finish (__h); })
> > -
> > -# define obstack_copy(OBSTACK,where,length)				\
> > -__extension__								\
> > -({ struct obstack *__h = (OBSTACK);					\
> > -   obstack_grow (__h, (where), (length));				\
> > -   obstack_finish (__h); })
> > -
> > -# define obstack_copy0(OBSTACK,where,length)				\
> > -__extension__								\
> > -({ struct obstack *__h = (OBSTACK);					\
> > -   obstack_grow0 (__h, (where), (length));				\
> > -   obstack_finish (__h); })
> > +# define obstack_ptr_grow(OBSTACK, datum)				      \
> > +  __extension__								      \
> > +    ({ struct obstack *__o = (OBSTACK);					      \
> > +       if (__o->next_free + sizeof (void *) > __o->chunk_limit)		      \
> > +         _obstack_newchunk (__o, sizeof (void *));			      \
> > +       obstack_ptr_grow_fast (__o, datum); })				      \
> > +
> > +# define obstack_int_grow(OBSTACK, datum)				      \
> > +  __extension__								      \
> > +    ({ struct obstack *__o = (OBSTACK);					      \
> > +       if (__o->next_free + sizeof (int) > __o->chunk_limit)		      \
> > +         _obstack_newchunk (__o, sizeof (int));				      \
> > +       obstack_int_grow_fast (__o, datum); })
> > +
> > +# define obstack_ptr_grow_fast(OBSTACK, aptr)				      \
> > +  __extension__								      \
> > +    ({ struct obstack *__o1 = (OBSTACK);				      \
> > +       *(const void **) __o1->next_free = (aptr);			      \
> > +       __o1->next_free += sizeof (const void *);			      \
> > +       (void) 0; })
> > +
> > +# define obstack_int_grow_fast(OBSTACK, aint)				      \
> > +  __extension__								      \
> > +    ({ struct obstack *__o1 = (OBSTACK);				      \
> > +       *(int *) __o1->next_free = (aint);				      \
> > +       __o1->next_free += sizeof (int);					      \
> > +       (void) 0; })
> > +
> > +# define obstack_blank(OBSTACK, length)					      \
> > +  __extension__								      \
> > +    ({ struct obstack *__o = (OBSTACK);					      \
> > +       int __len = (length);						      \
> > +       if (__o->chunk_limit - __o->next_free < __len)			      \
> > +         _obstack_newchunk (__o, __len);				      \
> > +       obstack_blank_fast (__o, __len);					      \
> > +       (void) 0; })
> > +
> > +# define obstack_alloc(OBSTACK, length)					      \
> > +  __extension__								      \
> > +    ({ struct obstack *__h = (OBSTACK);					      \
> > +       obstack_blank (__h, (length));					      \
> > +       obstack_finish (__h); })
> > +
> > +# define obstack_copy(OBSTACK, where, length)				      \
> > +  __extension__								      \
> > +    ({ struct obstack *__h = (OBSTACK);					      \
> > +       obstack_grow (__h, (where), (length));				      \
> > +       obstack_finish (__h); })
> > +
> > +# define obstack_copy0(OBSTACK, where, length)				      \
> > +  __extension__								      \
> > +    ({ struct obstack *__h = (OBSTACK);					      \
> > +       obstack_grow0 (__h, (where), (length));				      \
> > +       obstack_finish (__h); })
> >  
> >  /* The local variable is named __o1 to avoid a name conflict
> >     when obstack_blank is called.  */
> > -# define obstack_finish(OBSTACK)					\
> > -__extension__								\
> > -({ struct obstack *__o1 = (OBSTACK);					\
> > -   void *__value = (void *) __o1->object_base;				\
> > -   if (__o1->next_free == __value)					\
> > -     __o1->maybe_empty_object = 1;					\
> > -   __o1->next_free							\
> > -     = __PTR_ALIGN (__o1->object_base, __o1->next_free,			\
> > -		    __o1->alignment_mask);				\
> > -   if (__o1->next_free - (char *)__o1->chunk				\
> > -       > __o1->chunk_limit - (char *)__o1->chunk)			\
> > -     __o1->next_free = __o1->chunk_limit;				\
> > -   __o1->object_base = __o1->next_free;					\
> > -   __value; })
> > -
> > -# define obstack_free(OBSTACK, OBJ)					\
> > -__extension__								\
> > -({ struct obstack *__o = (OBSTACK);					\
> > -   void *__obj = (OBJ);							\
> > -   if (__obj > (void *)__o->chunk && __obj < (void *)__o->chunk_limit)  \
> > -     __o->next_free = __o->object_base = (char *)__obj;			\
> > -   else (obstack_free) (__o, __obj); })
> > -
> > +# define obstack_finish(OBSTACK)					      \
> > +  __extension__								      \
> > +    ({ struct obstack *__o1 = (OBSTACK);				      \
> > +       void *__value = (void *) __o1->object_base;			      \
> > +       if (__o1->next_free == __value)					      \
> > +         __o1->maybe_empty_object = 1;					      \
> > +       __o1->next_free							      \
> > +         = __PTR_ALIGN (__o1->object_base, __o1->next_free,		      \
> > +                        __o1->alignment_mask);				      \
> > +       if (__o1->next_free - (char *) __o1->chunk			      \
> > +           > __o1->chunk_limit - (char *) __o1->chunk)			      \
> > +         __o1->next_free = __o1->chunk_limit;				      \
> > +       __o1->object_base = __o1->next_free;				      \
> > +       __value; })
> > +
> > +# define obstack_free(OBSTACK, OBJ)					      \
> > +  __extension__								      \
> > +    ({ struct obstack *__o = (OBSTACK);					      \
> > +       void *__obj = (OBJ);						      \
> > +       if (__obj > (void *) __o->chunk && __obj < (void *) __o->chunk_limit)  \
> > +         __o->next_free = __o->object_base = (char *) __obj;		      \
> > +       else (obstack_free) (__o, __obj); })
> > +
> >  #else /* not __GNUC__ */
> >  
> >  # define obstack_object_size(h) \
> > - (unsigned) ((h)->next_free - (h)->object_base)
> > +  (unsigned) ((h)->next_free - (h)->object_base)
> >  
> > -# define obstack_room(h)		\
> > - (unsigned) ((h)->chunk_limit - (h)->next_free)
> > +# define obstack_room(h)						      \
> > +  (unsigned) ((h)->chunk_limit - (h)->next_free)
> >  
> >  # define obstack_empty_p(h) \
> > - ((h)->chunk->prev == 0							\
> > -  && (h)->next_free == __PTR_ALIGN ((char *) (h)->chunk,		\
> > -				    (h)->chunk->contents,		\
> > -				    (h)->alignment_mask))
> > +  ((h)->chunk->prev == 0						      \
> > +   && (h)->next_free == __PTR_ALIGN ((char *) (h)->chunk,		      \
> > +                                     (h)->chunk->contents,		      \
> > +                                     (h)->alignment_mask))
> >  
> >  /* Note that the call to _obstack_newchunk is enclosed in (..., 0)
> >     so that we can avoid having void expressions
> > @@ -420,88 +420,86 @@ __extension__								\
> >     Casting the third operand to void was tried before,
> >     but some compilers won't accept it.  */
> >  
> > -# define obstack_make_room(h,length)					\
> > -( (h)->temp.tempint = (length),						\
> > -  (((h)->next_free + (h)->temp.tempint > (h)->chunk_limit)		\
> > +# define obstack_make_room(h, length)					      \
> > +  ((h)->temp.tempint = (length),					      \
> > +   (((h)->next_free + (h)->temp.tempint > (h)->chunk_limit)		      \
> >     ? (_obstack_newchunk ((h), (h)->temp.tempint), 0) : 0))
> >  
> > -# define obstack_grow(h,where,length)					\
> > -( (h)->temp.tempint = (length),						\
> > -  (((h)->next_free + (h)->temp.tempint > (h)->chunk_limit)		\
> > -   ? (_obstack_newchunk ((h), (h)->temp.tempint), 0) : 0),		\
> > -  memcpy ((h)->next_free, where, (h)->temp.tempint),			\
> > -  (h)->next_free += (h)->temp.tempint)
> > -
> > -# define obstack_grow0(h,where,length)					\
> > -( (h)->temp.tempint = (length),						\
> > -  (((h)->next_free + (h)->temp.tempint + 1 > (h)->chunk_limit)		\
> > -   ? (_obstack_newchunk ((h), (h)->temp.tempint + 1), 0) : 0),		\
> > -  memcpy ((h)->next_free, where, (h)->temp.tempint),			\
> > -  (h)->next_free += (h)->temp.tempint,					\
> > -  *((h)->next_free)++ = 0)
> > -
> > -# define obstack_1grow(h,datum)						\
> > -( (((h)->next_free + 1 > (h)->chunk_limit)				\
> > -   ? (_obstack_newchunk ((h), 1), 0) : 0),				\
> > -  obstack_1grow_fast (h, datum))
> > -
> > -# define obstack_ptr_grow(h,datum)					\
> > -( (((h)->next_free + sizeof (char *) > (h)->chunk_limit)		\
> > -   ? (_obstack_newchunk ((h), sizeof (char *)), 0) : 0),		\
> > -  obstack_ptr_grow_fast (h, datum))
> > -
> > -# define obstack_int_grow(h,datum)					\
> > -( (((h)->next_free + sizeof (int) > (h)->chunk_limit)			\
> > -   ? (_obstack_newchunk ((h), sizeof (int)), 0) : 0),			\
> > -  obstack_int_grow_fast (h, datum))
> > -
> > -# define obstack_ptr_grow_fast(h,aptr)					\
> > +# define obstack_grow(h, where, length)					      \
> > +  ((h)->temp.tempint = (length),					      \
> > +   (((h)->next_free + (h)->temp.tempint > (h)->chunk_limit)		      \
> > +   ? (_obstack_newchunk ((h), (h)->temp.tempint), 0) : 0),		      \
> > +   memcpy ((h)->next_free, where, (h)->temp.tempint),			      \
> > +   (h)->next_free += (h)->temp.tempint)
> > +
> > +# define obstack_grow0(h, where, length)				      \
> > +  ((h)->temp.tempint = (length),					      \
> > +   (((h)->next_free + (h)->temp.tempint + 1 > (h)->chunk_limit)		      \
> > +   ? (_obstack_newchunk ((h), (h)->temp.tempint + 1), 0) : 0),		      \
> > +   memcpy ((h)->next_free, where, (h)->temp.tempint),			      \
> > +   (h)->next_free += (h)->temp.tempint,					      \
> > +   *((h)->next_free)++ = 0)
> > +
> > +# define obstack_1grow(h, datum)					      \
> > +  ((((h)->next_free + 1 > (h)->chunk_limit)				      \
> > +    ? (_obstack_newchunk ((h), 1), 0) : 0),				      \
> > +   obstack_1grow_fast (h, datum))
> > +
> > +# define obstack_ptr_grow(h, datum)					      \
> > +  ((((h)->next_free + sizeof (char *) > (h)->chunk_limit)		      \
> > +    ? (_obstack_newchunk ((h), sizeof (char *)), 0) : 0),		      \
> > +   obstack_ptr_grow_fast (h, datum))
> > +
> > +# define obstack_int_grow(h, datum)					      \
> > +  ((((h)->next_free + sizeof (int) > (h)->chunk_limit)			      \
> > +    ? (_obstack_newchunk ((h), sizeof (int)), 0) : 0),			      \
> > +   obstack_int_grow_fast (h, datum))
> > +
> > +# define obstack_ptr_grow_fast(h, aptr)					      \
> >    (((const void **) ((h)->next_free += sizeof (void *)))[-1] = (aptr))
> >  
> > -# define obstack_int_grow_fast(h,aint)					\
> > +# define obstack_int_grow_fast(h, aint)					      \
> >    (((int *) ((h)->next_free += sizeof (int)))[-1] = (aint))
> >  
> > -# define obstack_blank(h,length)					\
> > -( (h)->temp.tempint = (length),						\
> > -  (((h)->chunk_limit - (h)->next_free < (h)->temp.tempint)		\
> > -   ? (_obstack_newchunk ((h), (h)->temp.tempint), 0) : 0),		\
> > -  obstack_blank_fast (h, (h)->temp.tempint))
> > -
> > -# define obstack_alloc(h,length)					\
> > - (obstack_blank ((h), (length)), obstack_finish ((h)))
> > -
> > -# define obstack_copy(h,where,length)					\
> > - (obstack_grow ((h), (where), (length)), obstack_finish ((h)))
> > -
> > -# define obstack_copy0(h,where,length)					\
> > - (obstack_grow0 ((h), (where), (length)), obstack_finish ((h)))
> > -
> > -# define obstack_finish(h)						\
> > -( ((h)->next_free == (h)->object_base					\
> > -   ? (((h)->maybe_empty_object = 1), 0)					\
> > -   : 0),								\
> > -  (h)->temp.tempptr = (h)->object_base,					\
> > -  (h)->next_free							\
> > -    = __PTR_ALIGN ((h)->object_base, (h)->next_free,			\
> > -		   (h)->alignment_mask),				\
> > -  (((h)->next_free - (char *) (h)->chunk				\
> > -    > (h)->chunk_limit - (char *) (h)->chunk)				\
> > -   ? ((h)->next_free = (h)->chunk_limit) : 0),				\
> > -  (h)->object_base = (h)->next_free,					\
> > -  (h)->temp.tempptr)
> > -
> > -# define obstack_free(h,obj)						\
> > -( (h)->temp.tempint = (char *) (obj) - (char *) (h)->chunk,		\
> > -  ((((h)->temp.tempint > 0						\
> > -    && (h)->temp.tempint < (h)->chunk_limit - (char *) (h)->chunk))	\
> > -   ? (((h)->next_free = (h)->object_base				\
> > -       = (h)->temp.tempint + (char *) (h)->chunk), 0)			\
> > +# define obstack_blank(h, length)					      \
> > +  ((h)->temp.tempint = (length),					      \
> > +   (((h)->chunk_limit - (h)->next_free < (h)->temp.tempint)		      \
> > +   ? (_obstack_newchunk ((h), (h)->temp.tempint), 0) : 0),		      \
> > +   obstack_blank_fast (h, (h)->temp.tempint))
> > +
> > +# define obstack_alloc(h, length)					      \
> > +  (obstack_blank ((h), (length)), obstack_finish ((h)))
> > +
> > +# define obstack_copy(h, where, length)					      \
> > +  (obstack_grow ((h), (where), (length)), obstack_finish ((h)))
> > +
> > +# define obstack_copy0(h, where, length)				      \
> > +  (obstack_grow0 ((h), (where), (length)), obstack_finish ((h)))
> > +
> > +# define obstack_finish(h)						      \
> > +  (((h)->next_free == (h)->object_base					      \
> > +    ? (((h)->maybe_empty_object = 1), 0)				      \
> > +    : 0),								      \
> > +   (h)->temp.tempptr = (h)->object_base,				      \
> > +   (h)->next_free							      \
> > +     = __PTR_ALIGN ((h)->object_base, (h)->next_free,			      \
> > +                    (h)->alignment_mask),				      \
> > +   (((h)->next_free - (char *) (h)->chunk				      \
> > +     > (h)->chunk_limit - (char *) (h)->chunk)				      \
> > +   ? ((h)->next_free = (h)->chunk_limit) : 0),				      \
> > +   (h)->object_base = (h)->next_free,					      \
> > +   (h)->temp.tempptr)
> > +
> > +# define obstack_free(h, obj)						      \
> > +  ((h)->temp.tempint = (char *) (obj) - (char *) (h)->chunk,		      \
> > +   ((((h)->temp.tempint > 0						      \
> > +      && (h)->temp.tempint < (h)->chunk_limit - (char *) (h)->chunk))	      \
> > +   ? (((h)->next_free = (h)->object_base				      \
> > +                          = (h)->temp.tempint + (char *) (h)->chunk), 0)      \
> >     : ((obstack_free) ((h), (h)->temp.tempint + (char *) (h)->chunk), 0)))
> > -
> >  #endif /* not __GNUC__ */
> >  
> >  #ifdef __cplusplus
> > -}	/* C++ */
> > +}       /* C++ */
> >  #endif
> > -
> >  #endif /* obstack.h */
> > diff --git a/malloc/set-freeres.c b/malloc/set-freeres.c
> > index e7ffbe0..f18cb83 100644
> > --- a/malloc/set-freeres.c
> > +++ b/malloc/set-freeres.c
> > @@ -33,17 +33,17 @@ __libc_freeres (void)
> >       protect for multiple executions since these are fatal.  */
> >    static long int already_called;
> >  
> > -  if (! atomic_compare_and_exchange_bool_acq (&already_called, 1, 0))
> > +  if (!atomic_compare_and_exchange_bool_acq (&already_called, 1, 0))
> >      {
> > -      void * const *p;
> > +      void *const *p;
> >  
> >        _IO_cleanup ();
> >  
> >        RUN_HOOK (__libc_subfreeres, ());
> >  
> >        for (p = symbol_set_first_element (__libc_freeres_ptrs);
> > -	   ! symbol_set_end_p (__libc_freeres_ptrs, p); ++p)
> > -	free (*p);
> > +           !symbol_set_end_p (__libc_freeres_ptrs, p); ++p)
> > +        free (*p);
> >      }
> >  }
> >  libc_hidden_def (__libc_freeres)
> > diff --git a/malloc/tst-mallocstate.c b/malloc/tst-mallocstate.c
> > index 8548dad..19e595b 100644
> > --- a/malloc/tst-mallocstate.c
> > +++ b/malloc/tst-mallocstate.c
> > @@ -48,19 +48,19 @@ main (void)
> >  
> >    free (malloc (10));
> >  
> > -  for (i=0; i<100; ++i)
> > +  for (i = 0; i < 100; ++i)
> >      {
> >        save_state = malloc_get_state ();
> >        if (save_state == NULL)
> > -	{
> > -	  merror ("malloc_get_state () failed.");
> > -	  break;
> > -	}
> > +        {
> > +          merror ("malloc_get_state () failed.");
> > +          break;
> > +        }
> >        /*free (malloc (10)); This could change the top chunk! */
> >        malloc_set_state (save_state);
> > -      p1 = realloc (p1, i*4 + 4);
> > +      p1 = realloc (p1, i * 4 + 4);
> >        if (p1 == NULL)
> > -	merror ("realloc (i*4) failed.");
> > +        merror ("realloc (i*4) failed.");
> >        free (save_state);
> >      }
> >  
> > diff --git a/malloc/tst-mtrace.c b/malloc/tst-mtrace.c
> > index 93d560d..606e7f6 100644
> > --- a/malloc/tst-mtrace.c
> > +++ b/malloc/tst-mtrace.c
> > @@ -55,20 +55,20 @@ main (void)
> >        ssize_t n = getline (&line, &linelen, fp);
> >  
> >        if (n < 0)
> > -	break;
> > +        break;
> >  
> >        if (n == 0)
> > -	continue;
> > +        continue;
> >  
> >        copy = strdup (line);
> >        if (copy == NULL)
> > -	abort ();
> > +        abort ();
> >  
> >        p = (char **) tsearch (copy, &root,
> > -			     (int (*) (const void *, const void *)) strcmp);
> > +                             (int (*)(const void *, const void *))strcmp);
> >        if (*p != copy)
> > -	/* This line wasn't added.  */
> > -	free (copy);
> > +        /* This line wasn't added.  */
> > +        free (copy);
> >      }
> >  
> >    fclose (fp);
> > diff --git a/malloc/tst-realloc.c b/malloc/tst-realloc.c
> > index 9d290d2..1df3057 100644
> > --- a/malloc/tst-realloc.c
> > +++ b/malloc/tst-realloc.c
> > @@ -79,11 +79,11 @@ do_test (void)
> >    for (i = 0; i < 20; i++)
> >      {
> >        if (c[i] != 0)
> > -	ok = 0;
> > +        ok = 0;
> >      }
> >  
> >    if (ok == 0)
> > -	merror ("first 20 bytes were not cleared");
> > +    merror ("first 20 bytes were not cleared");
> >  
> >    free (p);
> >  
> > @@ -104,11 +104,11 @@ do_test (void)
> >    for (i = 0; i < 16; i++)
> >      {
> >        if (c[i] != 0xff)
> > -	ok = 0;
> > +        ok = 0;
> >      }
> >  
> >    if (ok == 0)
> > -	merror ("first 16 bytes were not correct");
> > +    merror ("first 16 bytes were not correct");
> >  
> >    /* Check failed realloc leaves original untouched (C89).  */
> >    c = realloc (p, -1);
> > @@ -121,11 +121,11 @@ do_test (void)
> >    for (i = 0; i < 16; i++)
> >      {
> >        if (c[i] != 0xff)
> > -	ok = 0;
> > +        ok = 0;
> >      }
> >  
> >    if (ok == 0)
> > -	merror ("first 16 bytes were not correct after failed realloc");
> > +    merror ("first 16 bytes were not correct after failed realloc");
> >  
> >    /* realloc (p, 0) frees p (C89) and returns NULL (glibc).  */
> >    p = realloc (p, 0);
> 
> -- 
> 
> CPU needs recalibration

-- 

your keyboard's space bar is generating spurious keycodes.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]