This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [PATCH v2] Consolidate valloc/pvalloc code. large.


On 8 November 2013 21:02, OndÅej BÃlka <neleai@seznam.cz> wrote:
> On Fri, Nov 08, 2013 at 10:14:15AM +0000, Will Newton wrote:
>> On 7 November 2013 21:14, OndÅej BÃlka <neleai@seznam.cz> wrote:
>> > Hi,
>> >
>> > As valloc/pvalloc are legacy functionality they should occupy minimal
>> > amount of code. Here we redirect valloc/pvalloc calls to memalign calls.
>> >
>> > This allows us simplify _int_memalign as several checks are already done
>> > is __libc_memalign.
>> >
>> > We could go further with refactoring as hook call is almost duplicated
>> > except that we need to define new function and pass caller address as
>> > argument.
>> >
>> > Comments?
>>
> I done a refactoring by introducing _mid_memalign which will call hook
> with caller passed as argument.
>
>> I am in favour of this refactor in principle. However you also need to
>> look at hooks.c as _int_memalign is called there too. You can test
>> that code by setting MALLOC_CHECK_=3 and running the tests.
>>
> I added same check for hook as in mid_memalign, rest of checks there are
> also duplicate.
>
> I included rest of comments.
>
>         * malloc/hooks.c (memalign_check): Add alignment rounding.
>         * malloc/malloc.c (_mid_memalign): New function.
>         (__libc_valloc, __libc_pvalloc, __libc_memalign, __posix_memalign):
>         Implement by calling _mid_memalign.
>         * manual/probes.texi (Memory Allocation Probes): Remove
>         memory_valloc_retry and memory_pvalloc_retry.
>
> diff --git a/malloc/hooks.c b/malloc/hooks.c
> index 1dbe93f..12727ab 100644
> --- a/malloc/hooks.c
> +++ b/malloc/hooks.c
> @@ -376,6 +376,14 @@ memalign_check(size_t alignment, size_t bytes, const void *caller)
>        return 0;
>      }
>
> +  /* Make sure alignment is power of 2 (in case MINSIZE is not).  */
> +  if ((alignment & (alignment - 1)) != 0) {

I guess this could use powerof2 for consistency.

> +    size_t a = MALLOC_ALIGNMENT * 2;
> +    while (a < alignment) a <<= 1;
> +    alignment = a;
> +  }
> +
> +
>    (void)mutex_lock(&main_arena.mutex);
>    mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
>      NULL;
> diff --git a/malloc/malloc.c b/malloc/malloc.c
> index 897c43a..7297af4 100644
> --- a/malloc/malloc.c
> +++ b/malloc/malloc.c
> @@ -1054,8 +1054,8 @@ static void     _int_free(mstate, mchunkptr, int);
>  static void*  _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
>                            INTERNAL_SIZE_T);
>  static void*  _int_memalign(mstate, size_t, size_t);
> -static void*  _int_valloc(mstate, size_t);
> -static void*  _int_pvalloc(mstate, size_t);
> +static void*  _mid_memalign(size_t, size_t, void *);
> +
>  static void malloc_printerr(int action, const char *str, void *ptr);
>
>  static void* internal_function mem2mem_check(void *p, size_t sz);
> @@ -3002,15 +3002,23 @@ libc_hidden_def (__libc_realloc)
>  void*
>  __libc_memalign(size_t alignment, size_t bytes)
>  {
> +  void *address = RETURN_ADDRESS (0);
> +  return _mid_memalign (pagesz, bytes, address);
> +}

This does not compile, pagesz is not defined, it should be alignment.

> +
> +static void *
> +_mid_memalign (size_t alignment, size_t bytes, void *address)
> +{
> +
>    mstate ar_ptr;
>    void *p;
>
>    void *(*hook) (size_t, size_t, const void *) =
>      force_reg (__memalign_hook);
>    if (__builtin_expect (hook != NULL, 0))
> -    return (*hook)(alignment, bytes, RETURN_ADDRESS (0));
> +    return (*hook)(alignment, bytes, address);
>
> -  /* If need less alignment than we give anyway, just relay to malloc */
> +  /* If we need less alignment than we give anyway, just relay to malloc */

If we're going to fix the comment then we should probably add a full
stop and space to the end.

>    if (alignment <= MALLOC_ALIGNMENT) return __libc_malloc(bytes);
>
>    /* Otherwise, ensure that it is at least a minimum chunk size */
> @@ -3031,6 +3039,15 @@ __libc_memalign(size_t alignment, size_t bytes)
>        return 0;
>      }
>
> +
> +  /* Make sure alignment is power of 2 (in case MINSIZE is not).  */

The MINSIZE comment confuses me. AFAICT MINSIZE is not the issue (and
MINSIZE can statically be determined to be a power of 2).

> +  if (!powerof2(alignment)) {
> +    size_t a = MALLOC_ALIGNMENT * 2;
> +    while (a < alignment) a <<= 1;
> +    alignment = a;
> +  }
> +
> +

There is no need for the extra blank lines around this code.

>    arena_get(ar_ptr, bytes + alignment + MINSIZE);
>    if(!ar_ptr)
>      return 0;
> @@ -3055,54 +3072,22 @@ libc_hidden_def (__libc_memalign)
>  void*
>  __libc_valloc(size_t bytes)
>  {
> -  mstate ar_ptr;
> -  void *p;
> -
>    if(__malloc_initialized < 0)
>      ptmalloc_init ();
>
> +  void *address = RETURN_ADDRESS (0);
>    size_t pagesz = GLRO(dl_pagesize);
> -
> -  /* Check for overflow.  */
> -  if (bytes > SIZE_MAX - pagesz - MINSIZE)
> -    {
> -      __set_errno (ENOMEM);
> -      return 0;
> -    }
> -
> -  void *(*hook) (size_t, size_t, const void *) =
> -    force_reg (__memalign_hook);
> -  if (__builtin_expect (hook != NULL, 0))
> -    return (*hook)(pagesz, bytes, RETURN_ADDRESS (0));
> -
> -  arena_get(ar_ptr, bytes + pagesz + MINSIZE);
> -  if(!ar_ptr)
> -    return 0;
> -  p = _int_valloc(ar_ptr, bytes);
> -  if(!p) {
> -    LIBC_PROBE (memory_valloc_retry, 1, bytes);
> -    ar_ptr = arena_get_retry (ar_ptr, bytes);
> -    if (__builtin_expect(ar_ptr != NULL, 1)) {
> -      p = _int_memalign(ar_ptr, pagesz, bytes);
> -      (void)mutex_unlock(&ar_ptr->mutex);
> -    }
> -  } else
> -    (void)mutex_unlock (&ar_ptr->mutex);
> -  assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
> -        ar_ptr == arena_for_chunk(mem2chunk(p)));
> -
> -  return p;
> +  return _mid_memalign (pagesz, bytes, address);
>  }
>
>  void*
>  __libc_pvalloc(size_t bytes)
>  {
> -  mstate ar_ptr;
> -  void *p;
>
>    if(__malloc_initialized < 0)
>      ptmalloc_init ();
>
> +  void *address = RETURN_ADDRESS (0);
>    size_t pagesz = GLRO(dl_pagesize);
>    size_t page_mask = GLRO(dl_pagesize) - 1;
>    size_t rounded_bytes = (bytes + page_mask) & ~(page_mask);
> @@ -3114,26 +3099,7 @@ __libc_pvalloc(size_t bytes)
>        return 0;
>      }
>
> -  void *(*hook) (size_t, size_t, const void *) =
> -    force_reg (__memalign_hook);
> -  if (__builtin_expect (hook != NULL, 0))
> -    return (*hook)(pagesz, rounded_bytes, RETURN_ADDRESS (0));
> -
> -  arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE);
> -  p = _int_pvalloc(ar_ptr, bytes);
> -  if(!p) {
> -    LIBC_PROBE (memory_pvalloc_retry, 1, bytes);
> -    ar_ptr = arena_get_retry (ar_ptr, bytes + 2*pagesz + MINSIZE);
> -    if (__builtin_expect(ar_ptr != NULL, 1)) {
> -      p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
> -      (void)mutex_unlock(&ar_ptr->mutex);
> -    }
> -  } else
> -    (void)mutex_unlock(&ar_ptr->mutex);
> -  assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
> -        ar_ptr == arena_for_chunk(mem2chunk(p)));
> -
> -  return p;
> +  return _mid_memalign (pagesz, rounded_bytes, address);
>  }
>
>  void*
> @@ -4318,20 +4284,7 @@ _int_memalign(mstate av, size_t alignment, size_t bytes)
>    unsigned long   remainder_size; /* its size */
>    INTERNAL_SIZE_T size;
>
> -  /* If need less alignment than we give anyway, just relay to malloc */
> -
> -  if (alignment <= MALLOC_ALIGNMENT) return _int_malloc(av, bytes);
>
> -  /* Otherwise, ensure that it is at least a minimum chunk size */
> -
> -  if (alignment <  MINSIZE) alignment = MINSIZE;
> -
> -  /* Make sure alignment is power of 2 (in case MINSIZE is not).  */
> -  if ((alignment & (alignment - 1)) != 0) {
> -    size_t a = MALLOC_ALIGNMENT * 2;
> -    while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
> -    alignment = a;
> -  }
>
>    checked_request2size(bytes, nb);
>
> @@ -4406,35 +4359,6 @@ _int_memalign(mstate av, size_t alignment, size_t bytes)
>
>
>  /*
> -  ------------------------------ valloc ------------------------------
> -*/
> -
> -static void*
> -_int_valloc(mstate av, size_t bytes)
> -{
> -  /* Ensure initialization/consolidation */
> -  if (have_fastchunks(av)) malloc_consolidate(av);
> -  return _int_memalign(av, GLRO(dl_pagesize), bytes);
> -}
> -
> -/*
> -  ------------------------------ pvalloc ------------------------------
> -*/
> -
> -
> -static void*
> -_int_pvalloc(mstate av, size_t bytes)
> -{
> -  size_t pagesz;
> -
> -  /* Ensure initialization/consolidation */
> -  if (have_fastchunks(av)) malloc_consolidate(av);
> -  pagesz = GLRO(dl_pagesize);
> -  return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
> -}
> -
> -
> -/*
>    ------------------------------ malloc_trim ------------------------------
>  */
>
> @@ -4968,14 +4892,9 @@ __posix_memalign (void **memptr, size_t alignment, size_t size)
>        || alignment == 0)
>      return EINVAL;
>
> -  /* Call the hook here, so that caller is posix_memalign's caller
> -     and not posix_memalign itself.  */
> -  void *(*hook) (size_t, size_t, const void *) =
> -    force_reg (__memalign_hook);
> -  if (__builtin_expect (hook != NULL, 0))
> -    mem = (*hook)(alignment, size, RETURN_ADDRESS (0));
> -  else
> -    mem = __libc_memalign (alignment, size);
> +
> +  void *address = RETURN_ADDRESS (0);
> +  mem = _mid_memalign (alignment, size, address);
>
>    if (mem != NULL) {
>      *memptr = mem;
> diff --git a/manual/probes.texi b/manual/probes.texi
> index 5492bb7..556c010 100644
> --- a/manual/probes.texi
> +++ b/manual/probes.texi
> @@ -71,8 +71,6 @@ heap is released.  Argument @var{$arg1} is a pointer to the heap, and
>  @deftp Probe memory_malloc_retry (size_t @var{$arg1})
>  @deftpx Probe memory_realloc_retry (size_t @var{$arg1}, void *@var{$arg2})
>  @deftpx Probe memory_memalign_retry (size_t @var{$arg1}, size_t @var{$arg2})
> -@deftpx Probe memory_valloc_retry (size_t @var{$arg1})
> -@deftpx Probe memory_pvalloc_retry (size_t @var{$arg1})
>  @deftpx Probe memory_calloc_retry (size_t @var{$arg1})
>  These probes are triggered when the corresponding functions fail to
>  obtain the requested amount of memory from the arena in use, before they

It may also be worth adding a line to say that the memalign probe is
called for aligned_alloc, memalign, posix_memalign, pvalloc and
valloc.

-- 
Will Newton
Toolchain Working Group, Linaro


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]