/* Procedure executed by sideline threads. * XXX i#500: Cannot use libc routines (printf) in the child process. */ int run(void *arg) { int threadnum = (int)(long) arg; int i = 0; /* for CLONE_CHILD_CLEARTID for signaling parent. if we used raw * clone system call we could get kernel to do this for us. */ child[threadnum] = dynamorio_syscall(SYS_gettid, 0); dynamorio_syscall(SYS_set_tid_address, 1, &child[threadnum]); child_started[threadnum] = true; nolibc_print("Sideline thread started\n"); while (true) { /* do nothing for now */ i++; if (i % 25000000 == 0) break; } while (!child_exit[threadnum]) nolibc_nanosleep(&sleeptime); nolibc_print("Sideline thread finished, exiting whole group\n"); /* We deliberately bring down the whole group. Note that this is * the default on x64 on returning for some reason which seems * like a bug in _clone() (xref i#94). */ dynamorio_syscall(SYS_exit_group, 0); return 0; }
int fork_syscall(void) { #if FORK_BROKEN_CASE_4967 /* FIXME: SYS_fork on dereksha is creating a child whose pid is * same as parent but has a different tid, and the abort() to dump * core kills the parent process -- looks just like a separate * thread, not a separate process! * * When I use glibc fork() I get the proper behavior. * glibc 2.3.3 fork() calls clone() with flags = 0x01200011 * == CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID | SIGCHLD, which matches * glibc-2.3.3-200405070341/nptl/sysdeps/unix/sysv/linux/i386/fork.c's * ARCH_FORK, which is what glibc uses, so why doesn't SYS_fork do * same thing? Instead it simply sets SIGCHLD and not the CLONE_* flags. * (see /usr/src/linux/arch/i386/kernel/process.c). But, trying the CLONE_* flags * doesn't do the trick -- libc fork() is doing something extra, and * glibc-2.3.3-200405070341/nptl/sysdeps/unix/sysv/linux/fork.c's version of it * shows it's doing some funny tricks with the pid! * * Once figure it out, need to have dynamic check for threading version to know * what to do. */ # if 0 /* from /usr/include/bits/sched.h ifdef __USE_MISC: */ # define CLONE_CHILD_CLEARTID \ 0x00200000 /* Register exit futex and memory \ * location to clear. */ # define CLONE_CHILD_SETTID \ 0x01000000 /* Store TID in userlevel buffer in \ * the child. */ /* i386/fork.c pass a 5th arg, &THREAD_SELF->tid, is it needed for SETTID? */ static uint tid; return dynamorio_syscall(SYS_clone, 4/* 5 */, /* flags, newsp (if 0 -> cur esp), parent_tidptr, * child_tidptr, something! */ CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID | SIGCHLD, 0, NULL, NULL /*, &tid*/); # else /* my workaround for now: just use libc -- binaries won't be back-compatible though */ return fork(); # endif #else # ifdef SYS_fork return dynamorio_syscall(SYS_fork, 0); # else return dynamorio_syscall(SYS_clone, 5, SIGCHLD, NULL, NULL, NULL, NULL); # endif #endif }
/* Safe nanosleep. */ void nolibc_nanosleep(struct timespec *req) { #ifdef MACOS /* XXX: share with os_thread_sleep */ semaphore_t sem = MACH_PORT_NULL; int res; if (sem == MACH_PORT_NULL) { kern_return_t res = semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, 0); assert(res == KERN_SUCCESS); } res = dynamorio_syscall(SYS___semwait_signal_nocancel, 6, sem, MACH_PORT_NULL, 1, 1, (int64_t)req->tv_sec, (int32_t)req->tv_nsec); #else dynamorio_syscall(SYS_nanosleep, 2, req, NULL); #endif }
static void test_non_rt_sigaction(int sig) { int rc; old_sigaction_t first_act; old_sigaction_t new_act; old_sigaction_t old_act; memset((void *)&first_act, 0, sizeof(first_act)); first_act.handler = (void (*)(int, siginfo_t *, void *))SENTINEL; first_act.sa_mask |= (1 << SIGUSR1); first_act.sa_mask |= (1 << SIGUSR2); rc = dynamorio_syscall(SYS_sigaction, 3, sig, &first_act, NULL); assert(rc == 0); /* Test with nothing. */ rc = dynamorio_syscall(SYS_sigaction, 3, sig, NULL, NULL); assert(rc == 0); /* Test passing NULL to non-rt sigaction, which is used on Android (i#1822) */ memset((void *)&old_act, 0xff, sizeof(old_act)); rc = dynamorio_syscall(SYS_sigaction, 3, sig, NULL, &old_act); assert(rc == 0 && old_act.handler == first_act.handler && /* The flags do not match due to SA_RESTORER. */ /* The rest of mask is uninit stack values from the libc wrapper. */ *(long*)&old_act.sa_mask == *(long*)&first_act.sa_mask); /* Test with a new action. */ memset((void *)&old_act, 0xff, sizeof(old_act)); memset((void *)&new_act, 0, sizeof(new_act)); new_act.handler = (void (*)(int, siginfo_t *, void *))SIG_IGN; rc = dynamorio_syscall(SYS_sigaction, 3, sig, &new_act, &old_act); assert(rc == 0 && old_act.handler == first_act.handler && /* The flags do not match due to SA_RESTORER. */ /* The rest of mask is uninit stack values from the libc wrapper. */ *(long*)&old_act.sa_mask == *(long*)&first_act.sa_mask); /* Clear handler */ memset((void *)&new_act, 0, sizeof(new_act)); rc = dynamorio_syscall(SYS_sigaction, 3, sig, &new_act, NULL); assert(rc == 0); }
/* Safe mmap. */ void * nolibc_mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset) { #if defined(X64) || defined(MACOS) int sysnum = SYS_mmap; #else int sysnum = SYS_mmap2; #endif return (void*)dynamorio_syscall(sysnum, 6, addr, length, prot, flags, fd, offset); }
/* Safe print syscall. */ void nolibc_print(const char *str) { dynamorio_syscall( #ifdef MACOS SYS_write_nocancel, #else SYS_write, #endif 3, stderr->_fileno, str, nolibc_strlen(str)); }
/* Wakes up at most one thread waiting on the futex if the kernel supports * SYS_futex syscall. Does nothing if the kernel doesn't support SYS_futex. */ ptr_int_t ksynch_wake(volatile int *futex) { ptr_int_t res; ASSERT(ALIGNED(futex, sizeof(int))); if (kernel_futex_support) { res = dynamorio_syscall(SYS_futex, 6, futex, FUTEX_WAKE, 1, NULL, NULL, 0); } else { res = -1; } return res; }
void tls_thread_free(tls_type_t tls_type, int index) { if (tls_type == TLS_TYPE_LDT) clear_ldt_entry(index); else if (tls_type == TLS_TYPE_GDT) { our_modify_ldt_t desc; clear_ldt_struct(&desc, index); DEBUG_DECLARE(int res = ) dynamorio_syscall(SYS_set_thread_area, 1, &desc); ASSERT(res >= 0); }
void ksynch_init(void) { /* Determines whether the kernel supports SYS_futex syscall or not. * From man futex(2): initial futex support was merged in 2.5.7, in current six * argument format since 2.6.7. */ volatile int futex_for_test = 0; ptr_int_t res = dynamorio_syscall(SYS_futex, 6, &futex_for_test, FUTEX_WAKE, 1, NULL, NULL, 0); kernel_futex_support = (res >= 0); ASSERT_CURIOSITY(kernel_futex_support); }
/* Waits on the futex until woken if the kernel supports SYS_futex syscall * and the futex's value has not been changed from mustbe. Does not block * if the kernel doesn't support SYS_futex. Returns 0 if woken by another thread, * and negative value for all other cases. */ ptr_int_t ksynch_wait(volatile int *futex, int mustbe) { ptr_int_t res; ASSERT(ALIGNED(futex, sizeof(int))); if (kernel_futex_support) { /* XXX: Having debug timeout like win32 os_wait_event() would be useful */ res = dynamorio_syscall(SYS_futex, 6, futex, FUTEX_WAIT, mustbe, NULL, NULL, 0); } else { res = -1; } return res; }
/* i#2089: we skip this for non-detach */ void tls_thread_free(tls_type_t tls_type, int index) { /* XXX i#107 (and i#2088): We need to restore the segment base the * app was using when we detach, instead of just clearing. */ if (tls_type == TLS_TYPE_LDT) clear_ldt_entry(index); else if (tls_type == TLS_TYPE_GDT) { our_modify_ldt_t desc; clear_ldt_struct(&desc, index); DEBUG_DECLARE(int res =) dynamorio_syscall(SYS_set_thread_area, 1, &desc); ASSERT(res >= 0); }
/* Waits on the futex until woken if the kernel supports SYS_futex syscall * and the futex's value has not been changed from mustbe. Does not block * if the kernel doesn't support SYS_futex. If timeout_ms is 0, there is no timeout; * else returns a negative value on a timeout. Returns 0 if woken by another thread, * and negative value for all other cases. */ ptr_int_t ksynch_wait(volatile int *futex, int mustbe, int timeout_ms) { ptr_int_t res; ASSERT(ALIGNED(futex, sizeof(int))); if (kernel_futex_support) { /* XXX: Having debug timeout like win32 os_wait_event() would be useful */ struct timespec timeout; timeout.tv_sec = (timeout_ms / 1000); timeout.tv_nsec = ((int64)timeout_ms % 1000) * 1000000; res = dynamorio_syscall(SYS_futex, 6, futex, FUTEX_WAIT, mustbe, timeout_ms > 0 ? &timeout : NULL, NULL, 0); } else { res = -1; } return res; }
/* Safe print syscall. */ void nolibc_print(const char *str) { dynamorio_syscall( #ifdef MACOS SYS_write_nocancel, #else SYS_write, #endif 3, #if defined(MACOS) || defined(ANDROID) stderr->_file, #else stderr->_fileno, #endif str, nolibc_strlen(str)); }
bool tls_thread_preinit() { #ifdef X64 /* i#3356: Write a non-zero value to the gs base to work around an AMD bug * present on pre-4.7 Linux kernels. See the call to this in our signal * handler for more information. */ if (proc_get_vendor() != VENDOR_AMD) return true; /* First identify a temp-native thread with a real segment in * place but just an invalid .magic field. We do not want to clobber the * legitimate segment base in that case. */ if (safe_read_tls_magic() == TLS_MAGIC_INVALID) { os_local_state_t *tls = (os_local_state_t *)safe_read_tls_self(); if (tls != NULL && tls->state.spill_space.dcontext->owning_thread == get_sys_thread_id()) return true; } /* XXX: What about Mac on AMD? Presumably by the time anyone wants to run * that combination the Mac kernel will have fixed this if they haven't already. */ /* We just don't have time to support non-arch_prctl and test it. */ if (tls_global_type != TLS_TYPE_ARCH_PRCTL) { ASSERT_BUG_NUM(3356, tls_global_type == TLS_TYPE_ARCH_PRCTL); return false; } int res = dynamorio_syscall(SYS_arch_prctl, 2, ARCH_SET_GS, NON_ZERO_UNINIT_GSBASE); LOG(GLOBAL, LOG_THREADS, 1, "%s: set non-zero pre-init gs base for thread " TIDFMT "\n", __FUNCTION__, get_sys_thread_id()); return res == 0; #else return true; #endif }
void tls_thread_init(os_local_state_t *os_tls, byte *segment) { /* We have four different ways to obtain TLS, each with its own limitations: * * 1) Piggyback on the threading system (like we do on Windows): here that would * be pthreads, which uses a segment since at least RH9, and uses gdt-based * segments for NPTL. The advantage is we won't run out of ldt or gdt entries * (except when the app itself would). The disadvantage is we're stealing * application slots and we rely on user mode interfaces. * * 2) Steal an ldt entry via SYS_modify_ldt. This suffers from the 8K ldt entry * limit and requires that we update manually on a new thread. For 64-bit * we're limited here to a 32-bit base. (Strangely, the kernel's * include/asm-x86_64/ldt.h implies that the base is ignored: but it doesn't * seem to be.) * * 3) Steal a gdt entry via SYS_set_thread_area. There is a 3rd unused entry * (after pthreads and wine) we could use. The kernel swaps for us, and with * CLONE_TLS the kernel will set up the entry for a new thread for us. Xref * PR 192231 and PR 285898. This system call is disabled on 64-bit 2.6 * kernels (though the man page for arch_prctl implies it isn't for 2.5 * kernels?!?) * * 4) Use SYS_arch_prctl. This is only implemented on 64-bit kernels, and can * only be used to set the gdt entries that fs and gs select for. Faster to * use <4GB base (obtain with mmap MAP_32BIT) since can use gdt; else have to * use wrmsr. The man pages say "ARCH_SET_GS is disabled in some kernels". */ uint selector; int index = -1; int res; #ifdef X64 /* First choice is gdt, which means arch_prctl. Since this may fail * on some kernels, we require -heap_in_lower_4GB so we can fall back * on modify_ldt. */ byte *cur_gs; res = dynamorio_syscall(SYS_arch_prctl, 2, ARCH_GET_GS, &cur_gs); if (res >= 0) { LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init: cur gs base is "PFX"\n", cur_gs); /* If we're a non-initial thread, gs will be set to the parent thread's value */ if (cur_gs == NULL || is_dynamo_address(cur_gs) || /* By resolving i#107, we can handle gs conflicts between app and dr. */ INTERNAL_OPTION(mangle_app_seg)) { res = dynamorio_syscall(SYS_arch_prctl, 2, ARCH_SET_GS, segment); if (res >= 0) { os_tls->tls_type = TLS_TYPE_ARCH_PRCTL; LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init: arch_prctl successful for base "PFX"\n", segment); /* Kernel should have written %gs for us if using GDT */ if (!dynamo_initialized && read_thread_register(SEG_TLS) == 0) { LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init: using MSR\n"); tls_using_msr = true; } if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) { res = dynamorio_syscall(SYS_arch_prctl, 2, ARCH_SET_FS, os_tls->os_seg_info.priv_lib_tls_base); /* Assuming set fs must be successful if set gs succeeded. */ ASSERT(res >= 0); } } else { /* we've found a kernel where ARCH_SET_GS is disabled */ ASSERT_CURIOSITY(false && "arch_prctl failed on set but not get"); LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init: arch_prctl failed: error %d\n", res); } } else { /* FIXME PR 205276: we don't currently handle it: fall back on ldt, but * we'll have the same conflict w/ the selector... */ ASSERT_BUG_NUM(205276, cur_gs == NULL); } } #endif if (os_tls->tls_type == TLS_TYPE_NONE) { /* Second choice is set_thread_area */ /* PR 285898: if we added CLONE_SETTLS to all clone calls (and emulated vfork * with clone) we could avoid having to set tls up for each thread (as well * as solve race PR 207903), at least for kernel 2.5.32+. For now we stick * w/ manual setup. */ our_modify_ldt_t desc; /* Pick which GDT slots we'll use for DR TLS and for library TLS if * using the private loader. */ choose_gdt_slots(os_tls); if (tls_gdt_index > -1) { /* Now that we know which GDT slot to use, install the per-thread base * into it. */ /* Base here must be 32-bit */ IF_X64(ASSERT(DYNAMO_OPTION(heap_in_lower_4GB) && segment <= (byte*)UINT_MAX)); initialize_ldt_struct(&desc, segment, PAGE_SIZE, tls_gdt_index); res = dynamorio_syscall(SYS_set_thread_area, 1, &desc); LOG(GLOBAL, LOG_THREADS, 3, "%s: set_thread_area %d => %d res, %d index\n", __FUNCTION__, tls_gdt_index, res, desc.entry_number); ASSERT(res < 0 || desc.entry_number == tls_gdt_index); } else { res = -1; /* fall back on LDT */ } if (res >= 0) { LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init: set_thread_area successful for base "PFX" @index %d\n", segment, tls_gdt_index); os_tls->tls_type = TLS_TYPE_GDT; index = tls_gdt_index; selector = GDT_SELECTOR(index); WRITE_DR_SEG(selector); /* macro needs lvalue! */ } else { IF_VMX86(ASSERT_NOT_REACHED()); /* since no modify_ldt */ LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init: set_thread_area failed: error %d\n", res); } #ifdef CLIENT_INTERFACE /* Install the library TLS base. */ if (INTERNAL_OPTION(private_loader) && res >= 0) { app_pc base = os_tls->os_seg_info.priv_lib_tls_base; /* lib_tls_gdt_index is picked in choose_gdt_slots. */ ASSERT(lib_tls_gdt_index >= gdt_entry_tls_min); initialize_ldt_struct(&desc, base, GDT_NO_SIZE_LIMIT, lib_tls_gdt_index); res = dynamorio_syscall(SYS_set_thread_area, 1, &desc); LOG(GLOBAL, LOG_THREADS, 3, "%s: set_thread_area %d => %d res, %d index\n", __FUNCTION__, lib_tls_gdt_index, res, desc.entry_number); if (res >= 0) { /* i558 update lib seg reg to enforce the segment changes */ selector = GDT_SELECTOR(lib_tls_gdt_index); LOG(GLOBAL, LOG_THREADS, 2, "%s: setting %s to selector 0x%x\n", __FUNCTION__, reg_names[LIB_SEG_TLS], selector); WRITE_LIB_SEG(selector); } } #endif } if (os_tls->tls_type == TLS_TYPE_NONE) { /* Third choice: modify_ldt, which should be available on kernel 2.3.99+ */ /* Base here must be 32-bit */ IF_X64(ASSERT(DYNAMO_OPTION(heap_in_lower_4GB) && segment <= (byte*)UINT_MAX)); /* we have the thread_initexit_lock so no race here */ index = find_unused_ldt_index(); selector = LDT_SELECTOR(index); ASSERT(index != -1); create_ldt_entry((void *)segment, PAGE_SIZE, index); os_tls->tls_type = TLS_TYPE_LDT; WRITE_DR_SEG(selector); /* macro needs lvalue! */ LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init: modify_ldt successful for base "PFX" w/ index %d\n", segment, index); } os_tls->ldt_index = index; }
/* Queries the set of available GDT slots, and initializes: * - tls_gdt_index * - gdt_entry_tls_min on ia32 * - lib_tls_gdt_index if using private loader * GDT slots are initialized with a base and limit of zero. The caller is * responsible for setting them to a real base. */ static void choose_gdt_slots(os_local_state_t *os_tls) { our_modify_ldt_t desc; int i; int avail_index[GDT_NUM_TLS_SLOTS]; our_modify_ldt_t clear_desc; int res; /* using local static b/c dynamo_initialized is not set for a client thread * when created in client's dr_init routine */ /* FIXME: Could be racy if we have multiple threads initializing during * startup. */ if (tls_global_init) return; tls_global_init = true; /* We don't want to break the assumptions of pthreads or wine, * so we try to take the last slot. We don't want to hardcode * the index b/c the kernel will let us clobber entries so we want * to only pass in -1. */ ASSERT(!dynamo_initialized); ASSERT(tls_gdt_index == -1); for (i = 0; i < GDT_NUM_TLS_SLOTS; i++) avail_index[i] = -1; for (i = 0; i < GDT_NUM_TLS_SLOTS; i++) { /* We use a base and limit of 0 for testing what's available. */ initialize_ldt_struct(&desc, NULL, 0, -1); res = dynamorio_syscall(SYS_set_thread_area, 1, &desc); LOG(GLOBAL, LOG_THREADS, 4, "%s: set_thread_area -1 => %d res, %d index\n", __FUNCTION__, res, desc.entry_number); if (res >= 0) { /* We assume monotonic increases */ avail_index[i] = desc.entry_number; ASSERT(avail_index[i] > tls_gdt_index); tls_gdt_index = desc.entry_number; } else break; } #ifndef X64 /* In x86-64's ia32 emulation, * set_thread_area(6 <= entry_number && entry_number <= 8) fails * with EINVAL (22) because x86-64 only accepts GDT indices 12 to 14 * for TLS entries. */ if (tls_gdt_index > (gdt_entry_tls_min + GDT_NUM_TLS_SLOTS)) gdt_entry_tls_min = GDT_ENTRY_TLS_MIN_64; /* The kernel is x64. */ #endif /* Now give up the earlier slots */ for (i = 0; i < GDT_NUM_TLS_SLOTS; i++) { if (avail_index[i] > -1 && avail_index[i] != tls_gdt_index) { LOG(GLOBAL, LOG_THREADS, 4, "clearing set_thread_area index %d\n", avail_index[i]); clear_ldt_struct(&clear_desc, avail_index[i]); res = dynamorio_syscall(SYS_set_thread_area, 1, &clear_desc); ASSERT(res >= 0); } } #ifndef VMX86_SERVER ASSERT_CURIOSITY(tls_gdt_index == (kernel_is_64bit() ? GDT_64BIT : GDT_32BIT)); #endif #ifdef CLIENT_INTERFACE if (INTERNAL_OPTION(private_loader) && tls_gdt_index != -1) { /* Use the app's selector with our own TLS base for libraries. app_fs * and app_gs are initialized by the caller in os_tls_app_seg_init(). */ int index = SELECTOR_INDEX(os_tls->app_lib_tls_reg); if (index == 0) { /* An index of zero means the app has no TLS (yet), and happens * during early injection. We use -1 to grab a new entry. When the * app asks for its first table entry with set_thread_area, we give * it this one and emulate its usage of the segment. */ ASSERT_CURIOSITY(DYNAMO_OPTION(early_inject) && "app has " "no TLS, but we used non-early injection"); initialize_ldt_struct(&desc, NULL, 0, -1); res = dynamorio_syscall(SYS_set_thread_area, 1, &desc); LOG(GLOBAL, LOG_THREADS, 4, "%s: set_thread_area -1 => %d res, %d index\n", __FUNCTION__, res, desc.entry_number); ASSERT(res >= 0); if (res >= 0) { return_stolen_lib_tls_gdt = true; index = desc.entry_number; } } lib_tls_gdt_index = index; } #endif }
static int modify_ldt_syscall(int func, void *ptr, unsigned long bytecount) { return dynamorio_syscall(SYS_modify_ldt, 3, func, ptr, bytecount); }
/* Safe nanosleep. */ void nolibc_nanosleep(struct timespec *req) { dynamorio_syscall(SYS_nanosleep, 2, req, NULL); }
static int execve_syscall(const char *exe, const char **argv, char **envp) { return dynamorio_syscall(SYS_execve, 3, exe, argv, envp); }
pid_t wait_syscall(int *status) { return dynamorio_syscall(SYSNUM_NO_CANCEL(SYS_wait4), 4, WAIT_ANY, status, 0, NULL); }
void * privload_tls_init(void *app_tls) { void *res; /* We have to exactly duplicate the offset of key fields in Android's * pthread_internal_t struct. */ ASSERT(PTHREAD_TLS_OFFS == offsetof(android_pthread_internal_t, tls)); ASSERT(DR_TLS_BASE_OFFSET == offsetof(android_pthread_internal_t, dr_tls_base) - PTHREAD_TLS_OFFS /* the self slot */); if (!dynamo_initialized) { char **e; /* We have to duplicate the pthread setup that the Android loader does. * We expect app_tls to be either NULL or garbage, as we have early injection. */ init_thread.tid = dynamorio_syscall(SYS_set_tid_address, 1, &init_thread.tid); init_thread.cached_pid_ = init_thread.tid; /* init_thread.attr is set to all 0 (sched is SCHED_NORMAL==0, and sizes are * zeroed out) */ /* init_thread.join_state is set to 0 (THREAD_NOT_JOINED) */ init_thread.tls[ANDROID_TLS_SLOT_SELF] = init_thread.tls; init_thread.tls[ANDROID_TLS_SLOT_THREAD_ID] = &init_thread; /* tls[TLS_SLOT_STACK_GUARD] is set to 0 */ /* Set up the data struct pointing at kernel args that Bionic expects */ kernel_args.argc = *(int *)kernel_init_sp; kernel_args.argv = (char **)kernel_init_sp + 1; kernel_args.envp = kernel_args.argv + kernel_args.argc + 1; /* The aux vector is after the last environment pointer. */ for (e = kernel_args.envp; *e != NULL; e++) ; /* nothing */ kernel_args.auxv = (ELF_AUXV_TYPE *)(e + 1); init_thread.tls[ANDROID_TLS_SLOT_BIONIC_PREINIT] = &kernel_args; /* We use our own alternate signal stack */ LOG(GLOBAL, LOG_LOADER, 2, "%s: kernel sp is "PFX"; TLS set to "PFX"\n", __FUNCTION__, init_thread.tls[ANDROID_TLS_SLOT_BIONIC_PREINIT], init_thread.tls[ANDROID_TLS_SLOT_SELF]); res = init_thread.tls[ANDROID_TLS_SLOT_SELF]; } else { android_pthread_internal_t *thrd; res = heap_mmap(ALIGN_FORWARD(sizeof(android_pthread_internal_t), PAGE_SIZE)); LOG(GLOBAL, LOG_LOADER, 2, "%s: allocated new TLS at "PFX"; copying from "PFX"\n", __FUNCTION__, res, app_tls); if (app_tls != NULL) memcpy(res, app_tls, sizeof(android_pthread_internal_t)); thrd = (android_pthread_internal_t *) res; thrd->tls[ANDROID_TLS_SLOT_SELF] = thrd->tls; thrd->tls[ANDROID_TLS_SLOT_THREAD_ID] = thrd; thrd->tid = get_thread_id(); thrd->dr_tls_base = NULL; res = thrd->tls[ANDROID_TLS_SLOT_SELF]; LOG(GLOBAL, LOG_LOADER, 2, "%s: TLS set to "PFX"\n", __FUNCTION__, thrd->tls[ANDROID_TLS_SLOT_SELF]); } /* Android does not yet support per-module TLS */ return res; }
/* Safe munmap. */ void nolibc_munmap(void *addr, size_t length) { dynamorio_syscall(SYS_munmap, 2, addr, length); }