/* Initialize static TLS area and DTV for current (only) thread. libpthread implementations should provide their own hook to handle all threads. */ void attribute_hidden __attribute_noinline__ _dl_nothread_init_static_tls (struct link_map *map) { # ifdef TLS_TCB_AT_TP void *dest = (char *) THREAD_SELF - map->l_tls_offset; # elif defined(TLS_DTV_AT_TP) void *dest = (char *) THREAD_SELF + map->l_tls_offset + TLS_PRE_TCB_SIZE; # else # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" # endif /* Fill in the DTV slot so that a later LD/GD access will find it. */ dtv_t *dtv = THREAD_DTV (); if (!(map->l_tls_modid <= dtv[-1].counter)) { _dl_dprintf(2, "map->l_tls_modid <= dtv[-1].counter FAILED!\n"); _dl_exit(30); } dtv[map->l_tls_modid].pointer.val = dest; dtv[map->l_tls_modid].pointer.is_static = true; /* Initialize the memory. */ _dl_memcpy(dest, map->l_tls_initimage, map->l_tls_initimage_size); _dl_memset((dest + map->l_tls_initimage_size), '\0', map->l_tls_blocksize - map->l_tls_initimage_size); }
/* The generic dynamic and local dynamic model cannot be used in statically linked applications. */ void * __tls_get_addr (GET_ADDR_ARGS) { dtv_t *dtv = THREAD_DTV (); struct link_map *the_map = NULL; void *p; if (__builtin_expect (dtv[0].counter != _dl_tls_generation, 0)) { the_map = _dl_update_slotinfo (GET_ADDR_MODULE); dtv = THREAD_DTV (); } p = dtv[GET_ADDR_MODULE].pointer.val; if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0)) { /* The allocation was deferred. Do it now. */ if (the_map == NULL) { /* Find the link map for this module. */ size_t idx = GET_ADDR_MODULE; struct dtv_slotinfo_list *listp = _dl_tls_dtv_slotinfo_list; while (idx >= listp->len) { idx -= listp->len; listp = listp->next; } the_map = listp->slotinfo[idx].map; } p = dtv[GET_ADDR_MODULE].pointer.val = allocate_and_init (the_map); dtv[GET_ADDR_MODULE].pointer.is_static = false; } return (char *) p + GET_ADDR_OFFSET; }
void * __tls_get_addr (tls_index *ti) { dtv_t *dtv = THREAD_DTV (); return (char *) dtv[1].pointer.val + ti->ti_offset; }
/* We are trying to perform a static TLS relocation in MAP, but it was dynamically loaded. This can only work if there is enough surplus in the static TLS area already allocated for each running thread. If this object's TLS segment is too big to fit, we fail. If it fits, we set MAP->l_tls_offset and return. This function intentionally does not return any value but signals error directly, as static TLS should be rare and code handling it should not be inlined as much as possible. */ int _dl_try_allocate_static_tls (struct link_map *map) { /* If we've already used the variable with dynamic access, or if the alignment requirements are too high, fail. */ if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET || map->l_tls_align > GL(dl_tls_static_align)) { fail: return -1; } #if TLS_TCB_AT_TP size_t freebytes = GL(dl_tls_static_size) - GL(dl_tls_static_used); if (freebytes < TLS_TCB_SIZE) goto fail; freebytes -= TLS_TCB_SIZE; size_t blsize = map->l_tls_blocksize + map->l_tls_firstbyte_offset; if (freebytes < blsize) goto fail; size_t n = (freebytes - blsize) / map->l_tls_align; size_t offset = GL(dl_tls_static_used) + (freebytes - n * map->l_tls_align - map->l_tls_firstbyte_offset); map->l_tls_offset = GL(dl_tls_static_used) = offset; #elif TLS_DTV_AT_TP /* dl_tls_static_used includes the TCB at the beginning. */ size_t offset = (ALIGN_UP(GL(dl_tls_static_used) - map->l_tls_firstbyte_offset, map->l_tls_align) + map->l_tls_firstbyte_offset); size_t used = offset + map->l_tls_blocksize; if (used > GL(dl_tls_static_size)) goto fail; map->l_tls_offset = offset; map->l_tls_firstbyte_offset = GL(dl_tls_static_used); GL(dl_tls_static_used) = used; #else # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" #endif /* If the object is not yet relocated we cannot initialize the static TLS region. Delay it. */ if (map->l_real->l_relocated) { #ifdef SHARED if (__builtin_expect (THREAD_DTV()[0].counter != GL(dl_tls_generation), 0)) /* Update the slot information data for at least the generation of the DSO we are allocating data for. */ (void) _dl_update_slotinfo (map->l_tls_modid); #endif GL(dl_init_static_tls) (map); } else map->l_need_tls_init = 1; return 0; }
void * __tls_get_addr (tls_index *ti) { dtv_t *dtv = THREAD_DTV (); return (char *) dtv[1].pointer.val + GET_ADDR_OFFSET; }
void * __tls_get_addr (size_t m, size_t offset) { dtv_t *dtv = THREAD_DTV (); return (char *) dtv[1].pointer + offset; }
struct link_map * _dl_update_slotinfo (unsigned long int req_modid) { struct link_map *the_map = NULL; dtv_t *dtv = THREAD_DTV (); /* The global dl_tls_dtv_slotinfo array contains for each module index the generation counter current when the entry was created. This array never shrinks so that all module indices which were valid at some time can be used to access it. Before the first use of a new module index in this function the array was extended appropriately. Access also does not have to be guarded against modifications of the array. It is assumed that pointer-size values can be read atomically even in SMP environments. It is possible that other threads at the same time dynamically load code and therefore add to the slotinfo list. This is a problem since we must not pick up any information about incomplete work. The solution to this is to ignore all dtv slots which were created after the one we are currently interested. We know that dynamic loading for this module is completed and this is the last load operation we know finished. */ unsigned long int idx = req_modid; struct dtv_slotinfo_list *listp = _dl_tls_dtv_slotinfo_list; _dl_debug_early ("Updating slotinfo for module %d\n", req_modid); while (idx >= listp->len) { idx -= listp->len; listp = listp->next; } if (dtv[0].counter < listp->slotinfo[idx].gen) { /* The generation counter for the slot is higher than what the current dtv implements. We have to update the whole dtv but only those entries with a generation counter <= the one for the entry we need. */ size_t new_gen = listp->slotinfo[idx].gen; size_t total = 0; /* We have to look through the entire dtv slotinfo list. */ listp = _dl_tls_dtv_slotinfo_list; do { size_t cnt; for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt) { size_t gen = listp->slotinfo[cnt].gen; if (gen > new_gen) /* This is a slot for a generation younger than the one we are handling now. It might be incompletely set up so ignore it. */ continue; /* If the entry is older than the current dtv layout we know we don't have to handle it. */ if (gen <= dtv[0].counter) continue; /* If there is no map this means the entry is empty. */ struct link_map *map = listp->slotinfo[cnt].map; if (map == NULL) { /* If this modid was used at some point the memory might still be allocated. */ if (! dtv[total + cnt].pointer.is_static && dtv[total + cnt].pointer.val != TLS_DTV_UNALLOCATED) { _dl_free (dtv[total + cnt].pointer.val); dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED; } continue; } /* Check whether the current dtv array is large enough. */ size_t modid = map->l_tls_modid; _dl_assert (total + cnt == modid); if (dtv[-1].counter < modid) { /* Reallocate the dtv. */ dtv_t *newp; size_t newsize = _dl_tls_max_dtv_idx + DTV_SURPLUS; size_t oldsize = dtv[-1].counter; _dl_assert (map->l_tls_modid <= newsize); if (dtv == _dl_initial_dtv) { /* This is the initial dtv that was allocated during rtld startup using the dl-minimal.c malloc instead of the real malloc. We can't free it, we have to abandon the old storage. */ newp = _dl_malloc ((2 + newsize) * sizeof (dtv_t)); if (newp == NULL) oom (); _dl_memcpy (newp, &dtv[-1], oldsize * sizeof (dtv_t)); } else { newp = _dl_realloc (&dtv[-1], (2 + newsize) * sizeof (dtv_t)); if (newp == NULL) oom (); } newp[0].counter = newsize; /* Clear the newly allocated part. */ _dl_memset (newp + 2 + oldsize, '\0', (newsize - oldsize) * sizeof (dtv_t)); /* Point dtv to the generation counter. */ dtv = &newp[1]; /* Install this new dtv in the thread data structures. */ INSTALL_NEW_DTV (dtv); } /* If there is currently memory allocate for this dtv entry free it. */ /* XXX Ideally we will at some point create a memory pool. */ if (! dtv[modid].pointer.is_static && dtv[modid].pointer.val != TLS_DTV_UNALLOCATED) /* Note that free is called for NULL is well. We deallocate even if it is this dtv entry we are supposed to load. The reason is that we call memalign and not malloc. */ _dl_free (dtv[modid].pointer.val); /* This module is loaded dynamically- We defer memory allocation. */ dtv[modid].pointer.is_static = false; dtv[modid].pointer.val = TLS_DTV_UNALLOCATED; if (modid == req_modid) the_map = map; } total += listp->len; } while ((listp = listp->next) != NULL); /* This will be the new maximum generation counter. */ dtv[0].counter = new_gen; } return the_map; }
/* * We are trying to perform a static TLS relocation in MAP, but it was * dynamically loaded. This can only work if there is enough surplus in * the static TLS area already allocated for each running thread. If this * object's TLS segment is too big to fit, we fail. If it fits, * we set MAP->l_tls_offset and return. * This function intentionally does not return any value but signals error * directly, as static TLS should be rare and code handling it should * not be inlined as much as possible. */ void internal_function __attribute_noinline__ _dl_allocate_static_tls (struct link_map *map) { /* If the alignment requirements are too high fail. */ if (map->l_tls_align > _dl_tls_static_align) { fail: _dl_dprintf(2, "cannot allocate memory in static TLS block"); _dl_exit(30); } # ifdef TLS_TCB_AT_TP size_t freebytes; size_t n; size_t blsize; freebytes = _dl_tls_static_size - _dl_tls_static_used - TLS_TCB_SIZE; blsize = map->l_tls_blocksize + map->l_tls_firstbyte_offset; if (freebytes < blsize) goto fail; n = (freebytes - blsize) & ~(map->l_tls_align - 1); size_t offset = _dl_tls_static_used + (freebytes - n - map->l_tls_firstbyte_offset); map->l_tls_offset = _dl_tls_static_used = offset; # elif defined(TLS_DTV_AT_TP) size_t used; size_t check; size_t offset = roundup_pow2 (_dl_tls_static_used, map->l_tls_align); used = offset + map->l_tls_blocksize; check = used; /* dl_tls_static_used includes the TCB at the beginning. */ if (check > _dl_tls_static_size) goto fail; map->l_tls_offset = offset; _dl_tls_static_used = used; # else # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" # endif /* * If the object is not yet relocated we cannot initialize the * static TLS region. Delay it. */ if (((struct elf_resolve *) map)->init_flag & RELOCS_DONE) { #ifdef SHARED /* * Update the slot information data for at least the generation of * the DSO we are allocating data for. */ if (__builtin_expect (THREAD_DTV()[0].counter != _dl_tls_generation, 0)) (void) _dl_update_slotinfo (map->l_tls_modid); #endif _dl_init_static_tls (map); } else map->l_need_tls_init = 1; }