void NaClLogModuleInitExtended(int initial_verbosity, struct Gio *log_gio) { NaClMutexCtor(&log_mu); NaClLogSetVerbosity(initial_verbosity); NaClLogSetGio(log_gio); }
int NaClClockInit(void) { if (0 != NaClGetTimeOfDay(&g_nacl_clock_tv)) { return 0; } g_NaClClock_is_initialized = NaClMutexCtor(&g_nacl_clock_mu); return g_NaClClock_is_initialized; }
int NaClNameServiceCtor(struct NaClNameService *self, NaClThreadIfFactoryFunction thread_factory_fn, void *thread_factory_data) { int retval = 0; /* fail */ NaClLog(4, "Entered NaClNameServiceCtor\n"); if (!NaClSimpleLtdServiceCtor(&self->base, kNaClNameServiceHandlers, NACL_NAME_SERVICE_CONNECTION_MAX, thread_factory_fn, thread_factory_data)) { NaClLog(4, "NaClSimpleLtdServiceCtor failed\n"); goto done; } if (!NaClMutexCtor(&self->mu)) { NaClLog(4, "NaClMutexCtor failed\n"); goto abort_mu; } NACL_VTBL(NaClRefCount, self) = (struct NaClRefCountVtbl *) &kNaClNameServiceVtbl; /* success return path */ self->head = (struct NaClNameServiceEntry *) NULL; retval = 1; goto done; /* cleanup unwind */ abort_mu: /* mutex ctor failed */ (*NACL_VTBL(NaClRefCount, self)->Dtor)((struct NaClRefCount *) self); done: return retval; }
int NaClDescCtor(struct NaClDesc *ndp) { /* this should be a compile-time test */ if (0 != (sizeof(struct NaClInternalHeader) & 0xf)) { NaClLog(LOG_FATAL, "Internal error. NaClInternalHeader size not a" " multiple of 16\n"); } ndp->ref_count = 1; return NaClMutexCtor(&ndp->mu); }
int NaClIntrMutexCtor(struct NaClIntrMutex *mp) { if (!NaClMutexCtor(&mp->mu)) { return 0; } if (!NaClCondVarCtor(&mp->cv)) { NaClMutexDtor(&mp->mu); return 0; } mp->lock_state = NACL_INTR_LOCK_FREE; return 1; }
int NaClHostDirCtor(struct NaClHostDir *d, int dir_desc) { if (!NaClMutexCtor(&d->mu)) { return -NACL_ABI_ENOMEM; } d->fd = dir_desc; d->cur_byte = 0; d->nbytes = 0; NaClLog(3, "NaClHostDirCtor: success.\n"); return 0; }
void NaClDescInvalidInit(void) { mutex = (struct NaClMutex *) malloc(sizeof(*mutex)); if (NULL == mutex) { NaClLog(LOG_FATAL, "Cannot allocate NaClDescInvalid mutex\n"); } if (!NaClMutexCtor(mutex)) { free(mutex); mutex = NULL; NaClLog(LOG_FATAL, "Cannot construct NaClDescInvalid mutex\n"); } }
int NaClDescImcDescCtor(struct NaClDescImcDesc *self, NaClHandle h) { int retval; retval = NaClDescImcConnectedDescCtor(&self->base, h); if (!retval) { return 0; } if (!NaClMutexCtor(&self->sendmsg_mu)) { NaClDescUnref((struct NaClDesc *) self); return 0; } if (!NaClMutexCtor(&self->recvmsg_mu)) { NaClMutexDtor(&self->sendmsg_mu); NaClDescUnref((struct NaClDesc *) self); return 0; } self->base.base.base.vtbl = (struct NaClRefCountVtbl const *) &kNaClDescImcDescVtbl; return retval; }
int NaClLdtInitPlatformSpecific(void) { HMODULE hmod = GetModuleHandleA("ntdll.dll"); /* * query_information_process is used to examine LDT entries to find a free * selector, etc. */ query_information_process = (NTQUERY)(GetProcAddress(hmod, "NtQueryInformationProcess")); if (query_information_process == 0) { /* * Unable to get query_information_process, which is needed for querying * the LDT. */ return 0; } /* * set_information_process is one of the methods used to update an LDT * entry for a given selector. */ set_information_process = (NTSETINFO)(GetProcAddress(hmod, "ZwSetInformationProcess")); /* * set_ldt_entries is the other method used to update an LDT entry for a * given selector. */ set_ldt_entries = (NTSETLDT)(GetProcAddress(hmod, "NtSetLdtEntries")); if (NULL == set_ldt_entries) { set_ldt_entries = (NTSETLDT)(GetProcAddress(hmod, "ZwSetLdtEntries")); } if ((NULL == set_ldt_entries) && (NULL == set_information_process)) { /* * Unable to locate either method for setting the LDT. */ return 0; } if (!NaClMutexCtor(&nacl_ldt_mutex)) { return 0; } /* * Allocate the last LDT entry to force the LDT to grow to its maximum size. */ return NaClLdtAllocateSelector(LDT_ENTRIES - 1, 0, NACL_LDT_DESCRIPTOR_DATA, 0, 0, 0); }
int NaClTlsInit(void) { size_t i; NaClLog(2, "NaClTlsInit\n"); for (i = 0; i < kNumThreads; i++) { gNaClThreadIdxInUse[i] = 0; } if (!NaClMutexCtor(&gNaClTlsMu)) { NaClLog(LOG_WARNING, "NaClTlsInit: gNaClTlsMu initialization failed\n"); return 0; } return 1; }
int NaClDescQuotaSubclassCtor(struct NaClDescQuota *self, struct NaClDesc *desc, uint8_t const *file_id, struct NaClDescQuotaInterface *quota_interface) { if (!NaClMutexCtor(&self->mu)) { /* do not NaClRefCountUnref, since we cannot free: caller must do that */ (*NACL_VTBL(NaClRefCount, self)->Dtor)((struct NaClRefCount *) self); return 0; } self->desc = desc; /* take ownership */ memcpy(self->file_id, file_id, NACL_DESC_QUOTA_FILE_ID_LEN); if (NULL == quota_interface) { self->quota_interface = (struct NaClDescQuotaInterface *) NULL; } else { self->quota_interface = NaClDescQuotaInterfaceRef(quota_interface); } NACL_VTBL(NaClDesc, self) = &kNaClDescQuotaVtbl; return 1; }
int NaClReverseServiceCtor(struct NaClReverseService *self, struct NaClReverseInterface *iface, struct NaClDesc *conn_cap) { int retval = 0; /* fail */ CHECK(iface != NULL); NaClLog(4, "Entered NaClReverseServiceCtor\n"); if (!NaClSimpleRevServiceCtor(&self->base, conn_cap, kNaClReverseServiceHandlers, NaClReverseThreadIfFactoryFn, (void *) self)) { NaClLog(4, "NaClReverseServiceCtor: NaClSimpleRevServiceCtor failed\n"); goto done; } NACL_VTBL(NaClRefCount, self) = (struct NaClRefCountVtbl *) &kNaClReverseServiceVtbl; if (!NaClMutexCtor(&self->mu)) { NaClLog(4, "NaClMutexCtor failed\n"); goto mutex_ctor_fail; } if (!NaClCondVarCtor(&self->cv)) { NaClLog(4, "NaClCondVar failed\n"); goto condvar_ctor_fail; } /* success return path */ self->iface = (struct NaClReverseInterface *) NaClRefCountRef( (struct NaClRefCount *) iface); self->thread_count = 0; retval = 1; goto done; /* cleanup unwind */ condvar_ctor_fail: NaClMutexDtor(&self->mu); mutex_ctor_fail: (*NACL_VTBL(NaClRefCount, self)->Dtor)((struct NaClRefCount *) self); done: return retval; }
int NaClLdtInitPlatformSpecific() { HMODULE hmod = GetModuleHandleA("ntdll.dll"); /* * query_information_process is used to examine LDT entries to find a free * selector, etc. */ query_information_process = (NTQUERY)(GetProcAddress(hmod, "NtQueryInformationProcess")); if (query_information_process == 0) { /* * Unable to get query_information_process, which is needed for querying * the LDT. */ return 0; } /* * set_information_process is one of the methods used to update an LDT * entry for a given selector. */ set_information_process = (NTSETINFO)(GetProcAddress(hmod, "ZwSetInformationProcess")); /* * set_ldt_entries is the other method used to update an LDT entry for a * given selector. */ set_ldt_entries = (NTSETLDT)(GetProcAddress(hmod, "NtSetLdtEntries")); if (NULL == set_ldt_entries) { set_ldt_entries = (NTSETLDT)(GetProcAddress(hmod, "ZwSetLdtEntries")); } if ((NULL == set_ldt_entries) && (NULL == set_information_process)) { /* * Unable to locate either method for setting the LDT. */ return 0; } return NaClMutexCtor(&nacl_ldt_mutex); }
int NaClSimpleLtdServiceCtor( struct NaClSimpleLtdService *self, struct NaClSrpcHandlerDesc const *srpc_handlers, int max_cli, NaClThreadIfFactoryFunction thread_factory_fn, void *thread_factory_data) { NaClLog(4, "Entered NaClSimpleLtdServiceCtor\n"); if (!NaClSimpleServiceCtor((struct NaClSimpleService *) self, srpc_handlers, thread_factory_fn, thread_factory_data)) { NaClLog(4, "NaClSimpleServiceCtor failed\n"); goto base_ctor_fail; } if (!NaClMutexCtor(&self->mu)) { NaClLog(4, "NaClSimpleLtdServiceCtor: NaClMutexCtor failed\n"); goto mutex_ctor_fail; } if (!NaClCondVarCtor(&self->cv)) { NaClLog(4, "NaClSimpleLtdServiceCtor: NaClCondVarCtor failed\n"); goto condvar_ctor_fail; } self->max_clients = max_cli; self->num_clients = 0; NACL_VTBL(NaClRefCount, self) = (struct NaClRefCountVtbl *) &kNaClSimpleLtdServiceVtbl; NaClLog(4, "NaClSimpleLtdServiceCtor: success\n"); return 1; /* failure cascade, in reverse construction order */ condvar_ctor_fail: NaClMutexDtor(&self->mu); mutex_ctor_fail: (*NACL_VTBL(NaClRefCount, self)->Dtor)((struct NaClRefCount *) self); base_ctor_fail: return 0; }
void NaClTimeInternalInit(struct NaClTimeState *ntsp) { TIMECAPS tc; SYSTEMTIME st; FILETIME ft; /* * Maximize timer/Sleep resolution. */ timeGetDevCaps(&tc, sizeof tc); ntsp->wPeriodMin = tc.wPeriodMin; ntsp->time_resolution_ns = tc.wPeriodMin * NACL_NANOS_PER_MILLI; NaClLog(0, "NaClTimeInternalInit: timeBeginPeriod(%u)\n", tc.wPeriodMin); timeBeginPeriod(ntsp->wPeriodMin); /* * Compute Unix epoch start; calibrate high resolution clock. */ st.wYear = 1970; st.wMonth = 1; st.wDay = 1; st.wHour = 0; st.wMinute = 0; st.wSecond = 0; st.wMilliseconds = 0; SystemTimeToFileTime(&st, &ft); ntsp->epoch_start_ms = NaClFileTimeToMs(&ft); ntsp->last_reported_time_ms = 0; NaClLog(0, "Unix epoch start is %"NACL_PRIu64"ms in Windows epoch time\n", ntsp->epoch_start_ms); NaClMutexCtor(&ntsp->mu); /* * We don't actually grab the lock, since the module initializer * should be called before going threaded. */ NaClCalibrateWindowsClockMu(ntsp); }
/* Based on NaClAppThreadCtor() */ static void InitThread(struct NaClApp *nap, struct NaClAppThread *natp) { struct NaClDescEffectorLdr *effp; memset(natp, 0xff, sizeof(*natp)); natp->nap = nap; if (!NaClMutexCtor(&natp->mu)) { ASSERT(0); } if (!NaClCondVarCtor(&natp->cv)) { ASSERT(0); } natp->is_privileged = 0; effp = (struct NaClDescEffectorLdr *) malloc(sizeof *effp); ASSERT_NE(effp, NULL); if (!NaClDescEffectorLdrCtor(effp, natp)) { ASSERT(0); } natp->effp = (struct NaClDescEffector *) effp; }
int main(int ac, char **av) { int exit_status = -1; int opt; size_t num_threads = 16; size_t n; struct NaClThread thr; while (EOF != (opt = getopt(ac, av, "n:s:t:"))) { switch (opt) { case 'n': num_threads = strtoul(optarg, (char **) NULL, 0); break; case 't': gNumTriesSufficient = strtoul(optarg, (char **) NULL, 0); break; default: fprintf(stderr, "Usage: nacl_semaphore_test [args]\n" " -n n number of threads used to test semaphore\n" " -t n number of TryWait operations before blocking Try\n"); goto cleanup0; } } NaClPlatformInit(); if (!NaClSemCtor(&gSem, 0)) { fprintf(stderr, "nacl_semaphore_test: NaClSemCtor failed!\n"); goto cleanup1; } if (!NaClMutexCtor(&gMu)) { fprintf(stderr, "nacl_semaphore_test: NaClMutexCtor failed!\n"); goto cleanup2; } if (!NaClCondVarCtor(&gCv)) { fprintf(stderr, "nacl_semaphore_test: NaClCondVarCtor failed!\n"); goto cleanup3; } for (n = 0; n < num_threads; ++n) { if (!NaClThreadCtor(&thr, ThreadMain, (void *) (uintptr_t) n, STACK_SIZE_BYTES)) { fprintf(stderr, "nacl_semaphore_test: could not create thread %"NACL_PRIdS"\n", n); goto cleanup4; /* osx leak semaphore otherwise */ } } NaClMutexLock(&gMu); while (gNumThreadsTried != num_threads) { NaClCondVarWait(&gCv, &gMu); } NaClMutexUnlock(&gMu); for (n = 0; n < num_threads; ++n) { NaClSemPost(&gSem); /* let a thread go */ } NaClMutexLock(&gMu); while (gNumThreadsDone != num_threads) { NaClCondVarWait(&gCv, &gMu); } exit_status = gFailure; NaClMutexUnlock(&gMu); if (0 == exit_status) { printf("SUCCESS\n"); } cleanup4: /* single exit with (ah hem) simulation of RAII via cleanup sled */ NaClCondVarDtor(&gCv); cleanup3: NaClMutexDtor(&gMu); cleanup2: NaClSemDtor(&gSem); cleanup1: NaClPlatformFini(); cleanup0: return exit_status; }
int NaClLdtInitPlatformSpecific(void) { return NaClMutexCtor(&nacl_ldt_mutex); }
struct NaClAppThread *NaClAppThreadMake(struct NaClApp *nap, uintptr_t usr_entry, uintptr_t usr_stack_ptr, uint32_t user_tls1, uint32_t user_tls2) { struct NaClAppThread *natp; natp = NaClAlignedMalloc(sizeof *natp, __alignof(struct NaClAppThread)); if (natp == NULL) { return NULL; } NaClLog(4, " natp = 0x%016"NACL_PRIxPTR"\n", (uintptr_t) natp); NaClLog(4, " nap = 0x%016"NACL_PRIxPTR"\n", (uintptr_t) nap); NaClLog(4, "usr_stack_ptr = 0x%016"NACL_PRIxPTR"\n", usr_stack_ptr); /* * Set these early, in case NaClTlsAllocate() wants to examine them. */ natp->nap = nap; natp->thread_num = -1; /* illegal index */ natp->host_thread_is_defined = 0; memset(&natp->host_thread, 0, sizeof(natp->host_thread)); if (!NaClAppThreadInitArchSpecific(natp, usr_entry, usr_stack_ptr)) { goto cleanup_free; } NaClTlsSetTlsValue1(natp, user_tls1); NaClTlsSetTlsValue2(natp, user_tls2); natp->signal_stack = NULL; natp->exception_stack = 0; natp->exception_flag = 0; if (!NaClMutexCtor(&natp->mu)) { goto cleanup_free; } if (!NaClSignalStackAllocate(&natp->signal_stack)) { goto cleanup_mu; } if (!NaClMutexCtor(&natp->suspend_mu)) { goto cleanup_mu; } natp->suspend_state = NACL_APP_THREAD_TRUSTED; natp->suspended_registers = NULL; natp->fault_signal = 0; natp->dynamic_delete_generation = 0; if (!NaClCondVarCtor(&natp->futex_condvar)) { goto cleanup_suspend_mu; } return natp; cleanup_suspend_mu: NaClMutexDtor(&natp->suspend_mu); cleanup_mu: NaClMutexDtor(&natp->mu); if (NULL != natp->signal_stack) { NaClSignalStackFree(&natp->signal_stack); natp->signal_stack = NULL; } cleanup_free: NaClAlignedFree(natp); return NULL; }
struct NaClAppThread *NaClAppThreadMake(struct NaClApp *nap, uintptr_t usr_entry, uintptr_t usr_stack_ptr, uint32_t user_tls1, uint32_t user_tls2) { struct NaClAppThread *natp; uint32_t tls_idx; natp = NaClAlignedMalloc(sizeof *natp, __alignof(struct NaClAppThread)); if (natp == NULL) { return NULL; } NaClLog(4, " natp = 0x%016"NACL_PRIxPTR"\n", (uintptr_t) natp); NaClLog(4, " nap = 0x%016"NACL_PRIxPTR"\n", (uintptr_t) nap); NaClLog(4, "usr_stack_ptr = 0x%016"NACL_PRIxPTR"\n", usr_stack_ptr); /* * Set these early, in case NaClTlsAllocate() wants to examine them. */ natp->nap = nap; natp->thread_num = -1; /* illegal index */ natp->host_thread_is_defined = 0; memset(&natp->host_thread, 0, sizeof(natp->host_thread)); /* * Even though we don't know what segment base/range should gs/r9/nacl_tls_idx * select, we still need one, since it identifies the thread when we context * switch back. This use of a dummy tls is only needed for the main thread, * which is expected to invoke the tls_init syscall from its crt code (before * main or much of libc can run). Other threads are spawned with the thread * pointer address as a parameter. */ tls_idx = NaClTlsAllocate(natp); if (NACL_TLS_INDEX_INVALID == tls_idx) { NaClLog(LOG_ERROR, "No tls for thread, num_thread %d\n", nap->num_threads); goto cleanup_free; } NaClThreadContextCtor(&natp->user, nap, usr_entry, usr_stack_ptr, tls_idx); NaClTlsSetTlsValue1(natp, user_tls1); NaClTlsSetTlsValue2(natp, user_tls2); natp->signal_stack = NULL; natp->exception_stack = 0; natp->exception_flag = 0; if (!NaClMutexCtor(&natp->mu)) { goto cleanup_free; } if (!NaClSignalStackAllocate(&natp->signal_stack)) { goto cleanup_mu; } if (!NaClMutexCtor(&natp->suspend_mu)) { goto cleanup_mu; } natp->suspend_state = NACL_APP_THREAD_TRUSTED; natp->suspended_registers = NULL; natp->fault_signal = 0; natp->dynamic_delete_generation = 0; return natp; cleanup_mu: NaClMutexDtor(&natp->mu); if (NULL != natp->signal_stack) { NaClSignalStackFree(&natp->signal_stack); natp->signal_stack = NULL; } cleanup_free: NaClAlignedFree(natp); return NULL; }
void NaClTimeInternalInit(struct NaClTimeState *ntsp) { TIMECAPS tc; SYSTEMTIME st; FILETIME ft; LARGE_INTEGER qpc_freq; /* * Maximize timer/Sleep resolution. */ timeGetDevCaps(&tc, sizeof tc); if (ntsp->allow_low_resolution) { /* Set resolution to max so we don't over-promise. */ ntsp->wPeriodMin = tc.wPeriodMax; } else { ntsp->wPeriodMin = tc.wPeriodMin; timeBeginPeriod(ntsp->wPeriodMin); NaClLog(4, "NaClTimeInternalInit: timeBeginPeriod(%u)\n", ntsp->wPeriodMin); } ntsp->time_resolution_ns = ntsp->wPeriodMin * NACL_NANOS_PER_MILLI; /* * Compute Unix epoch start; calibrate high resolution clock. */ st.wYear = 1970; st.wMonth = 1; st.wDay = 1; st.wHour = 0; st.wMinute = 0; st.wSecond = 0; st.wMilliseconds = 0; SystemTimeToFileTime(&st, &ft); ntsp->epoch_start_ms = NaClFileTimeToMs(&ft); NaClLog(4, "Unix epoch start is %"NACL_PRIu64"ms in Windows epoch time\n", ntsp->epoch_start_ms); NaClMutexCtor(&ntsp->mu); /* * We don't actually grab the lock, since the module initializer * should be called before going threaded. */ ntsp->can_use_qpc = 0; if (!ntsp->allow_low_resolution) { ntsp->can_use_qpc = QueryPerformanceFrequency(&qpc_freq); /* * On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is * unreliable. Fallback to low-res clock. */ if (strstr(CPU_GetBrandString(), "AuthenticAMD") && (CPU_GetFamily() == 15)) ntsp->can_use_qpc = 0; NaClLog(4, "CPU_GetBrandString->[%s] ntsp->can_use_qpc=%d\n", CPU_GetBrandString(), ntsp->can_use_qpc); if (ntsp->can_use_qpc) { ntsp->qpc_frequency = qpc_freq.QuadPart; NaClLog(4, "qpc_frequency = %"NACL_PRId64" (counts/s)\n", ntsp->qpc_frequency); if (!NaClCalibrateWindowsClockQpc(ntsp)) ntsp->can_use_qpc = 0; } if (!ntsp->can_use_qpc) NaClCalibrateWindowsClockMu(ntsp); } }
void NaClXMutexCtor(struct NaClMutex *mp) { if (!NaClMutexCtor(mp)) { NaClLog(LOG_FATAL, "NaClXMutexCtor failed\n"); } }
int NaClAppThreadCtor(struct NaClAppThread *natp, struct NaClApp *nap, int is_privileged, uintptr_t usr_entry, uintptr_t usr_stack_ptr, uint32_t tls_idx, uintptr_t sys_tdb) { int rv; uint64_t thread_idx; struct NaClDescEffectorLdr *effp; NaClLog(4, " natp = 0x%016"NACL_PRIxPTR"\n", (uintptr_t) natp); NaClLog(4, " nap = 0x%016"NACL_PRIxPTR"\n", (uintptr_t) nap); NaClLog(4, "usr_stack_ptr = 0x%016"NACL_PRIxPTR"\n", usr_stack_ptr); NaClThreadContextCtor(&natp->user, nap, usr_entry, usr_stack_ptr, tls_idx); effp = NULL; natp->signal_stack = NULL; if (!NaClMutexCtor(&natp->mu)) { return 0; } if (!NaClCondVarCtor(&natp->cv)) { goto cleanup_mutex; } natp->is_privileged = is_privileged; if (!NaClClosureResultCtor(&natp->result)) { goto cleanup_cv; } natp->sysret = 0; natp->nap = nap; effp = (struct NaClDescEffectorLdr *) malloc(sizeof *effp); if (NULL == effp) { goto cleanup_cv; } if (!NaClDescEffectorLdrCtor(effp, natp)) { goto cleanup_cv; } natp->effp = (struct NaClDescEffector *) effp; effp = NULL; if (!NaClSignalStackAllocate(&natp->signal_stack)) { goto cleanup_cv; } natp->holding_sr_locks = 0; natp->state = NACL_APP_THREAD_ALIVE; natp->thread_num = -1; /* illegal index */ natp->sys_tdb = sys_tdb; natp->dynamic_delete_generation = 0; thread_idx = NaClGetThreadIdx(natp); nacl_thread[thread_idx] = natp; nacl_user[thread_idx] = &natp->user; nacl_sys[thread_idx] = &natp->sys; rv = NaClThreadCtor(&natp->thread, NaClThreadLauncher, (void *) natp, NACL_KERN_STACK_SIZE); if (rv != 0) { return rv; /* Success */ } NaClClosureResultDtor(&natp->result); cleanup_cv: NaClCondVarDtor(&natp->cv); cleanup_mutex: NaClMutexDtor(&natp->mu); free(effp); natp->effp = NULL; if (NULL != natp->signal_stack) { NaClSignalStackFree(&natp->signal_stack); natp->signal_stack = NULL; } return 0; }