void NaClTlsFree(struct NaClAppThread *natp) { uint32_t idx = NaClGetThreadIdx(natp); NaClLog(2, "NaClTlsFree: old idx %d\n", idx); NaClXMutexLock(&gNaClTlsMu); gNaClThreadIdxInUse[idx] = 0; NaClXMutexUnlock(&gNaClTlsMu); natp->user.r9 = 0; natp->user.guard_token = 0; }
void WINAPI NaClAppThreadLauncher(void *state) { struct NaClAppThread *natp = (struct NaClAppThread *) state; uint32_t thread_idx; NaClLog(4, "NaClAppThreadLauncher: entered\n"); NaClSignalStackRegister(natp->signal_stack); NaClLog(4, " natp = 0x%016"NACL_PRIxPTR"\n", (uintptr_t) natp); NaClLog(4, " prog_ctr = 0x%016"NACL_PRIxNACL_REG"\n", natp->user.prog_ctr); NaClLog(4, "stack_ptr = 0x%016"NACL_PRIxPTR"\n", NaClGetThreadCtxSp(&natp->user)); thread_idx = NaClGetThreadIdx(natp); CHECK(0 < thread_idx); CHECK(thread_idx < NACL_THREAD_MAX); NaClTlsSetCurrentThread(natp); nacl_user[thread_idx] = &natp->user; #if NACL_WINDOWS nacl_thread_ids[thread_idx] = GetCurrentThreadId(); #elif NACL_OSX NaClSetCurrentMachThreadForThreadIndex(thread_idx); #endif /* * We have to hold the threads_mu lock until after thread_num field * in this thread has been initialized. All other threads can only * find and examine this natp through the threads table, so the fact * that natp is not consistent (no thread_num) will not be visible. */ NaClXMutexLock(&natp->nap->threads_mu); natp->thread_num = NaClAddThreadMu(natp->nap, natp); NaClXMutexUnlock(&natp->nap->threads_mu); NaClVmHoleThreadStackIsSafe(natp->nap); NaClStackSafetyNowOnUntrustedStack(); /* * Notify the debug stub, that a new thread is availible. */ if (NULL != natp->nap->debug_stub_callbacks) { natp->nap->debug_stub_callbacks->thread_create_hook(natp); } /* * After this NaClAppThreadSetSuspendState() call, we should not * claim any mutexes, otherwise we risk deadlock. */ NaClAppThreadSetSuspendState(natp, NACL_APP_THREAD_TRUSTED, NACL_APP_THREAD_UNTRUSTED); NaClStartThreadInApp(natp, natp->user.prog_ctr); }
void WINAPI NaClThreadLauncher(void *state) { struct NaClAppThread *natp = (struct NaClAppThread *) state; NaClLog(4, "NaClThreadLauncher: entered\n"); NaClSignalStackRegister(natp->signal_stack); NaClLog(4, " natp = 0x%016"NACL_PRIxPTR"\n", (uintptr_t) natp); NaClLog(4, " prog_ctr = 0x%016"NACL_PRIxNACL_REG"\n", natp->user.prog_ctr); NaClLog(4, "stack_ptr = 0x%016"NACL_PRIxPTR"\n", NaClGetThreadCtxSp(&natp->user)); NaClTlsSetIdx(NaClGetThreadIdx(natp)); /* * We have to hold the threads_mu lock until after thread_num field * in this thread has been initialized. All other threads can only * find and examine this natp through the threads table, so the fact * that natp is not consistent (no thread_num) will not be visible. */ NaClXMutexLock(&natp->nap->threads_mu); natp->thread_num = NaClAddThreadMu(natp->nap, natp); NaClXMutexUnlock(&natp->nap->threads_mu); /* * Notify the debug stub, that a new thread is availible. */ NaClDebugThreadPrepDebugging(natp); /* * We need to set an exception handler in every thread we start, * otherwise the system's default handler is called and a message box is * shown. */ WINDOWS_EXCEPTION_TRY; NaClStartThreadInApp(natp, natp->user.prog_ctr); WINDOWS_EXCEPTION_CATCH; }
/* * natp should be thread_self(), called while holding no locks. */ void NaClAppThreadTeardown(struct NaClAppThread *natp) { struct NaClApp *nap; size_t thread_idx; /* * mark this thread as dead; doesn't matter if some other thread is * asking us to commit suicide. */ NaClLog(3, "NaClAppThreadTeardown(0x%08"NACL_PRIxPTR")\n", (uintptr_t) natp); nap = natp->nap; if (NULL != nap->debug_stub_callbacks) { NaClLog(3, " notifying the debug stub of the thread exit\n"); /* * This must happen before deallocating the ID natp->thread_num. * We have the invariant that debug stub lock should be acquired before * nap->threads_mu lock. Hence we must not hold threads_mu lock while * calling debug stub hooks. */ nap->debug_stub_callbacks->thread_exit_hook(natp); } NaClLog(3, " getting thread table lock\n"); NaClXMutexLock(&nap->threads_mu); NaClLog(3, " getting thread lock\n"); NaClXMutexLock(&natp->mu); /* * Remove ourselves from the ldt-indexed global tables. The ldt * entry is released as part of NaClAppThreadDelete(), and if * another thread is immediately created (from some other running * thread) we want to be sure that any ldt-based lookups will not * reach this dying thread's data. */ thread_idx = NaClGetThreadIdx(natp); /* * On x86-64 and ARM, clearing nacl_user entry ensures that we will * fault if another syscall is made with this thread_idx. In * particular, thread_idx 0 is never used. */ nacl_user[thread_idx] = NULL; #if NACL_WINDOWS nacl_thread_ids[thread_idx] = 0; #elif NACL_OSX NaClClearMachThreadForThreadIndex(thread_idx); #endif /* * Unset the TLS variable so that if a crash occurs during thread * teardown, the signal handler does not dereference a dangling * NaClAppThread pointer. */ NaClTlsSetCurrentThread(NULL); NaClLog(3, " removing thread from thread table\n"); /* Deallocate the ID natp->thread_num. */ NaClRemoveThreadMu(nap, natp->thread_num); NaClLog(3, " unlocking thread\n"); NaClXMutexUnlock(&natp->mu); NaClLog(3, " unlocking thread table\n"); NaClXMutexUnlock(&nap->threads_mu); NaClLog(3, " unregistering signal stack\n"); NaClSignalStackUnregister(); NaClLog(3, " freeing thread object\n"); NaClAppThreadDelete(natp); NaClLog(3, " NaClThreadExit\n"); NaClThreadExit(); NaClLog(LOG_FATAL, "NaClAppThreadTeardown: NaClThreadExit() should not return\n"); /* NOTREACHED */ }
int NaClAppThreadCtor(struct NaClAppThread *natp, struct NaClApp *nap, int is_privileged, uintptr_t usr_entry, uintptr_t usr_stack_ptr, uint32_t tls_idx, uintptr_t sys_tdb) { int rv; uint64_t thread_idx; struct NaClDescEffectorLdr *effp; NaClLog(4, " natp = 0x%016"NACL_PRIxPTR"\n", (uintptr_t) natp); NaClLog(4, " nap = 0x%016"NACL_PRIxPTR"\n", (uintptr_t) nap); NaClLog(4, "usr_stack_ptr = 0x%016"NACL_PRIxPTR"\n", usr_stack_ptr); NaClThreadContextCtor(&natp->user, nap, usr_entry, usr_stack_ptr, tls_idx); effp = NULL; natp->signal_stack = NULL; if (!NaClMutexCtor(&natp->mu)) { return 0; } if (!NaClCondVarCtor(&natp->cv)) { goto cleanup_mutex; } natp->is_privileged = is_privileged; if (!NaClClosureResultCtor(&natp->result)) { goto cleanup_cv; } natp->sysret = 0; natp->nap = nap; effp = (struct NaClDescEffectorLdr *) malloc(sizeof *effp); if (NULL == effp) { goto cleanup_cv; } if (!NaClDescEffectorLdrCtor(effp, natp)) { goto cleanup_cv; } natp->effp = (struct NaClDescEffector *) effp; effp = NULL; if (!NaClSignalStackAllocate(&natp->signal_stack)) { goto cleanup_cv; } natp->holding_sr_locks = 0; natp->state = NACL_APP_THREAD_ALIVE; natp->thread_num = -1; /* illegal index */ natp->sys_tdb = sys_tdb; natp->dynamic_delete_generation = 0; thread_idx = NaClGetThreadIdx(natp); nacl_thread[thread_idx] = natp; nacl_user[thread_idx] = &natp->user; nacl_sys[thread_idx] = &natp->sys; rv = NaClThreadCtor(&natp->thread, NaClThreadLauncher, (void *) natp, NACL_KERN_STACK_SIZE); if (rv != 0) { return rv; /* Success */ } NaClClosureResultDtor(&natp->result); cleanup_cv: NaClCondVarDtor(&natp->cv); cleanup_mutex: NaClMutexDtor(&natp->mu); free(effp); natp->effp = NULL; if (NULL != natp->signal_stack) { NaClSignalStackFree(&natp->signal_stack); natp->signal_stack = NULL; } return 0; }