This is the mail archive of the
libc-alpha@sourceware.org
mailing list for the glibc project.
Re: [PATCH] Async signal safe TLS accesses
- From: OndÅej BÃlka <neleai at seznam dot cz>
- To: Andrew Hunter <ahh at google dot com>
- Cc: libc-alpha at sourceware dot org, carlos at redhat dot com, iant at google dot com, ppluzhnikov at google dot com
- Date: Fri, 4 Oct 2013 09:03:00 +0200
- Subject: Re: [PATCH] Async signal safe TLS accesses
- Authentication-results: sourceware.org; auth=none
- References: <1379977289-21260-1-git-send-email-ahh at google dot com> <1380830518-16721-1-git-send-email-ahh at google dot com>
On Thu, Oct 03, 2013 at 01:01:58PM -0700, Andrew Hunter wrote:
> TLS accesses from initial-exec variables are async-signal-safe. Even
> dynamic-type accesses from shared objects loaded by ld.so at startup
> are. But dynamic accesses from dlopen()ed objects are not, which
> means a lot of trouble for any sort of per-thread state we want to
> use from signal handlers since we can't rely on always having
> initial-exec. Make all TLS access always signal safe.
>
After using formater and restricting to patch you need fix following
formatting:
diff --git a/elf/dl-misc.c b/elf/dl-misc.c
index 974c6fd..c3c09b1 100644
--- a/elf/dl-misc.c
+++ b/elf/dl-misc.c
@@ -374,7 +374,7 @@ internal_function
_dl_mask_all_signals (sigset_t *old)
{
sigset_t new;
- sigfillset(&new);
+ sigfillset (&new);
int ret;
/* So...hmmm. This function just serves as a replacement to pthread_sigmask,
@@ -393,14 +393,14 @@ _dl_mask_all_signals (sigset_t *old)
that's not present.
It's unfortunate there's no simpler solution than duplicating sigmask.
- */
+ */
/* it's very important we don't touch errno here--that's TLS and who
knows what might happen then! */
INTERNAL_SYSCALL_DECL (err);
ret = INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_SETMASK, &new, old,
- _NSIG / 8);
+ _NSIG / 8);
assert (ret == 0);
}
@@ -414,7 +414,7 @@ _dl_unmask_signals (sigset_t *old)
INTERNAL_SYSCALL_DECL (err);
ret = INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_SETMASK, old, NULL,
- _NSIG / 8);
+ _NSIG / 8);
assert (ret == 0);
}
@@ -427,12 +427,13 @@ _dl_unmask_signals (sigset_t *old)
efficient, but it will be used rarely (and only in binaries that use
dlopen.) The API matches that of malloc() and friends. */
-struct __signal_safe_allocator_header {
+struct __signal_safe_allocator_header
+{
size_t size;
void *start;
};
-void * weak_function
+void *weak_function
__signal_safe_memalign (size_t boundary, size_t size)
{
struct __signal_safe_allocator_header *header;
@@ -443,7 +444,7 @@ __signal_safe_memalign (size_t boundary, size_t size)
if (boundary & (boundary - 1))
return NULL;
- size_t pg = GLRO(dl_pagesize);
+ size_t pg = GLRO (dl_pagesize);
size_t padded_size;
if (boundary <= pg)
{
@@ -463,7 +464,7 @@ __signal_safe_memalign (size_t boundary, size_t size)
size_t actual_size = roundup (padded_size, pg);
void *actual = mmap (NULL, actual_size, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (actual == MAP_FAILED)
return NULL;
@@ -473,33 +474,32 @@ __signal_safe_memalign (size_t boundary, size_t size)
}
else
{
- intptr_t actual_pg = ((intptr_t)actual) / pg;
+ intptr_t actual_pg = ((intptr_t) actual) / pg;
intptr_t boundary_pg = boundary / pg;
intptr_t start_pg = actual_pg + boundary_pg;
start_pg -= start_pg % boundary_pg;
if (start_pg > (actual_pg + 1))
- {
- int ret = munmap (actual, (start_pg - actual_pg - 1) * pg);
- assert (ret == 0);
- actual = (void *)((start_pg - 1) * pg);
- }
- char *start = (void *)(start_pg * pg);
- header = start - sizeof(*header);
-
+ {
+ int ret = munmap (actual, (start_pg - actual_pg - 1) * pg);
+ assert (ret == 0);
+ actual = (void *) ((start_pg - 1) * pg);
+ }
+ char *start = (void *) (start_pg * pg);
+ header = start - sizeof (*header);
}
header->size = actual_size;
header->start = actual;
void *ptr = header;
ptr += sizeof (*header);
- if (((intptr_t)ptr) % boundary != 0)
+ if (((intptr_t) ptr) % boundary != 0)
_dl_fatal_printf ("__signal_safe_memalign produced incorrect alignment\n");
return ptr;
}
-void * weak_function
+void *weak_function
__signal_safe_malloc (size_t size)
{
- return __signal_safe_memalign(1, size);
+ return __signal_safe_memalign (1, size);
}
void weak_function
@@ -508,13 +508,13 @@ __signal_safe_free (void *ptr)
if (ptr == NULL)
return;
- struct __signal_safe_allocator_header *header = ((char *)ptr) - sizeof (*header);
+ struct __signal_safe_allocator_header *header = ((char *) ptr) - sizeof (*header);
int ret = munmap (header->start, header->size);
assert (ret == 0);
}
-void * weak_function
+void *weak_function
__signal_safe_realloc (void *ptr, size_t size)
{
if (size == 0)
@@ -525,7 +525,7 @@ __signal_safe_realloc (void *ptr, size_t size)
if (ptr == NULL)
return __signal_safe_malloc (size);
- struct __signal_safe_allocator_header *header = ((char *)ptr) - sizeof (*header);
+ struct __signal_safe_allocator_header *header = ((char *) ptr) - sizeof (*header);
size_t old_size = header->size;
if (old_size - sizeof (*header) >= size)
return ptr;
@@ -540,11 +540,11 @@ __signal_safe_realloc (void *ptr, size_t size)
return new_ptr;
}
-void * weak_function
+void *weak_function
__signal_safe_calloc (size_t nmemb, size_t size)
{
- void *ptr = __signal_safe_malloc(nmemb * size);
+ void *ptr = __signal_safe_malloc (nmemb * size);
if (ptr == NULL)
return NULL;
- return memset(ptr, 0, nmemb * size);
+ return memset (ptr, 0, nmemb * size);
}
diff --git a/elf/dl-reloc.c b/elf/dl-reloc.c
index f9493d7..fa1ca68 100644
--- a/elf/dl-reloc.c
+++ b/elf/dl-reloc.c
@@ -90,9 +90,9 @@ _dl_try_allocate_static_tls (struct link_map *map)
ptrdiff_t val;
while ((val = map->l_tls_offset) == NO_TLS_OFFSET)
{
- atomic_compare_and_exchange_bool_acq(&map->l_tls_offset,
- offset,
- NO_TLS_OFFSET);
+ atomic_compare_and_exchange_bool_acq (&map->l_tls_offset,
+ offset,
+ NO_TLS_OFFSET);
}
if (val != offset)
{
@@ -102,7 +102,7 @@ _dl_try_allocate_static_tls (struct link_map *map)
}
/* We installed the value; now update the globals. */
#if TLS_TCB_AT_TP
- GL(dl_tls_static_used) = offset;
+ GL (dl_tls_static_used) = offset;
#elif TLS_DTV_AT_TP
map->l_tls_firstbyte_offset = GL(dl_tls_static_used);
GL(dl_tls_static_used) = used;
@@ -147,7 +147,7 @@ _dl_allocate_static_tls (struct link_map *map)
_dl_unmask_signals (&old);
if (err != 0)
{
- _dl_signal_error (0, map->l_name, NULL, N_("\
+ _dl_signal_error (0, map->l_name, NULL, N_ ("\
cannot allocate memory in static TLS block"));
}
}
diff --git a/elf/dl-tls.c b/elf/dl-tls.c
index 9b699d7..5af9e63 100644
--- a/elf/dl-tls.c
+++ b/elf/dl-tls.c
@@ -468,7 +468,7 @@ internal_function
_dl_clear_dtv (dtv_t *dtv)
{
for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt)
- if (! dtv[1 + cnt].pointer.is_static
+ if (!dtv[1 + cnt].pointer.is_static
&& dtv[1 + cnt].pointer.val != TLS_DTV_UNALLOCATED)
__signal_safe_free (dtv[1 + cnt].pointer.val);
memset (dtv, '\0', (dtv[-1].counter + 1) * sizeof (dtv_t));
@@ -597,7 +597,7 @@ _dl_update_slotinfo (unsigned long int req_modid)
update us. */
dtv = THREAD_DTV ();
if (dtv[0].counter >= listp->slotinfo[idx].gen)
- goto out;
+ goto out;
/* We have to look through the entire dtv slotinfo list. */
listp = GL(dl_tls_dtv_slotinfo_list);
do
@@ -626,7 +626,7 @@ _dl_update_slotinfo (unsigned long int req_modid)
/* If this modid was used at some point the memory
might still be allocated. */
if (dtv[-1].counter >= modid
- && !dtv[modid].pointer.is_static
+ && !dtv[modid].pointer.is_static
&& dtv[modid].pointer.val != TLS_DTV_UNALLOCATED)
{
__signal_safe_free (dtv[modid].pointer.val);
@@ -708,7 +708,7 @@ _dl_update_slotinfo (unsigned long int req_modid)
/* This will be the new maximum generation counter. */
dtv[0].counter = new_gen;
- out:
+ out:
_dl_unmask_signals (&old);
}
@@ -750,27 +750,27 @@ tls_get_addr_tail (GET_ADDR_ARGS, dtv_t *dtv, struct link_map *the_map)
ptrdiff_t offset;
while ((offset = the_map->l_tls_offset) == NO_TLS_OFFSET)
{
- atomic_compare_and_exchange_bool_acq(&the_map->l_tls_offset,
- FORCED_DYNAMIC_TLS_OFFSET,
- NO_TLS_OFFSET);
+ atomic_compare_and_exchange_bool_acq (&the_map->l_tls_offset,
+ FORCED_DYNAMIC_TLS_OFFSET,
+ NO_TLS_OFFSET);
}
if (offset == FORCED_DYNAMIC_TLS_OFFSET)
{
- allocate_and_init(&dtv[GET_ADDR_MODULE], the_map);
+ allocate_and_init (&dtv[GET_ADDR_MODULE], the_map);
}
else
{
void **pp = &dtv[GET_ADDR_MODULE].pointer.val;
while (atomic_forced_read (*pp) == TLS_DTV_UNALLOCATED)
- {
- /* for lack of a better (safe) thing to do, just spin.
- Someone else (not us; it's done under a signal mask) set
- this map to a static TLS offset, and they'll iterate all
- threads to initialize it. They'll eventually set
- is_static=true, at which point we know they've fully
- completed initialization. */
- atomic_delay ();
- }
+ {
+ /* for lack of a better (safe) thing to do, just spin.
+ Someone else (not us; it's done under a signal mask) set
+ this map to a static TLS offset, and they'll iterate all
+ threads to initialize it. They'll eventually set
+ is_static=true, at which point we know they've fully
+ completed initialization. */
+ atomic_delay ();
+ }
/* make sure we've picked up their initialization of the actual block. */
atomic_read_barrier ();
}