void NaClFileLockManagerLock(struct NaClFileLockManager *self, int desc) { struct NaClFileLockEntry key; struct NaClFileLockEntry **existing; struct NaClFileLockEntry *entry; (*self->set_file_identity_data)(&key, desc); NaClXMutexLock(&self->mu); existing = NaClFileLockManagerFindEntryMu(self, &key); if (NULL == existing) { /* make new entry */ entry = NaClFileLockManagerEntryFactory(self, desc); entry->next = self->head; self->head = entry; NaClXMutexUnlock(&self->mu); } else { entry = *existing; NaClXMutexLock(&entry->mu); entry->num_waiting++; /* arithmetic overflow */ CHECK(0 != entry->num_waiting); /* drop container lock after ensuring that the entry will not be deleted */ NaClXMutexUnlock(&self->mu); while (entry->holding_lock) { NaClXCondVarWait(&entry->cv, &entry->mu); } entry->holding_lock = 1; entry->num_waiting--; NaClXMutexUnlock(&entry->mu); } (*self->take_file_lock)(desc); }
static void NaClManifestProxyConnectionDtor(struct NaClRefCount *vself) { struct NaClManifestProxyConnection *self = (struct NaClManifestProxyConnection *) vself; NaClLog(4, "Entered NaClManifestProxyConnectionDtor: self 0x%"NACL_PRIxPTR"\n", (uintptr_t) self); NaClXMutexLock(&self->mu); while (!self->channel_initialized) { NaClLog(4, "NaClManifestProxyConnectionDtor:" " waiting for connection initialization\n"); NaClXCondVarWait(&self->cv, &self->mu); } NaClXMutexUnlock(&self->mu); NaClLog(4, "NaClManifestProxyConnectionDtor: dtoring\n"); NaClCondVarDtor(&self->cv); NaClMutexDtor(&self->mu); NaClSrpcDtor(&self->client_channel); NACL_VTBL(NaClSimpleServiceConnection, self) = &kNaClSimpleServiceConnectionVtbl; (*NACL_VTBL(NaClRefCount, self)->Dtor)(vself); }
static void NaClManifestWaitForChannel_yield_mu( struct NaClManifestProxyConnection *self) { NaClLog(4, "Entered NaClManifestWaitForChannel_yield_mu\n"); NaClXMutexLock(&self->mu); NaClLog(4, "NaClManifestWaitForChannel_yield_mu: checking channel\n"); while (!self->channel_initialized) { NaClLog(4, "NaClManifestWaitForChannel_yield_mu: waiting\n"); NaClXCondVarWait(&self->cv, &self->mu); } NaClLog(4, "Leaving NaClManifestWaitForChannel_yield_mu\n"); }
void NaClReverseServiceWaitForServiceThreadsToExit( struct NaClReverseService *self) { NaClLog(4, "NaClReverseServiceWaitForServiceThreadsToExit\n"); NaClXMutexLock(&self->mu); while (0 != self->thread_count) { NaClXCondVarWait(&self->cv, &self->mu); NaClLog(5, "NaClReverseServiceWaitForServiceThreadsToExit: woke up\n"); } NaClXMutexUnlock(&self->mu); NaClLog(4, "NaClReverseServiceWaitForServiceThreadsToExit: all done\n"); }
NaClSyncStatus NaClIntrMutexLock(struct NaClIntrMutex *mp) { NaClSyncStatus rv = NACL_SYNC_INTERNAL_ERROR; NaClXMutexLock(&mp->mu); while (NACL_INTR_LOCK_HELD == mp->lock_state) { NaClXCondVarWait(&mp->cv, &mp->mu); } if (NACL_INTR_LOCK_FREE == mp->lock_state) { mp->lock_state = NACL_INTR_LOCK_HELD; rv = NACL_SYNC_OK; } if (NACL_INTR_LOCK_INTERRUPTED == mp->lock_state) { rv = NACL_SYNC_MUTEX_INTERRUPTED; } NaClXMutexUnlock(&mp->mu); return rv; }
int NaClWaitForMainThreadToExit(struct NaClApp *nap) { NaClLog(3, "NaClWaitForMainThreadToExit: taking NaClApp lock\n"); NaClXMutexLock(&nap->mu); NaClLog(3, " waiting for exit status\n"); while (nap->running) { NaClXCondVarWait(&nap->cv, &nap->mu); NaClLog(3, " wakeup, nap->running %d, nap->exit_status %d\n", nap->running, nap->exit_status); } NaClXMutexUnlock(&nap->mu); /* * Some thread invoked the exit (exit_group) syscall. */ if (NULL != nap->debug_stub_callbacks) { nap->debug_stub_callbacks->process_exit_hook(); } return NACL_ABI_WEXITSTATUS(nap->exit_status); }
int NaClSimpleLtdServiceAcceptConnection( struct NaClSimpleService *vself, struct NaClSimpleServiceConnection **out) { struct NaClSimpleLtdService *self = (struct NaClSimpleLtdService *) vself; int rv; NaClXMutexLock(&self->mu); while (self->num_clients >= self->max_clients) { NaClXCondVarWait(&self->cv, &self->mu); } NaClXMutexUnlock(&self->mu); /* * Other threads can only decrement num_clients. */ rv = (*kNaClSimpleServiceVtbl.AcceptConnection)(vself, out); NaClXMutexLock(&self->mu); self->num_clients++; NaClXMutexUnlock(&self->mu); return rv; }
int NaClAppLaunchServiceThreads(struct NaClApp *nap) { struct NaClManifestProxy *manifest_proxy = NULL; struct NaClKernelService *kernel_service = NULL; int rv = 0; enum NaClReverseChannelInitializationState init_state; NaClNameServiceLaunch(nap->name_service); kernel_service = (struct NaClKernelService *) malloc(sizeof *kernel_service); if (NULL == kernel_service) { NaClLog(LOG_ERROR, "NaClAppLaunchServiceThreads: No memory for kern service\n"); goto done; } if (!NaClKernelServiceCtor(kernel_service, NaClAddrSpSquattingThreadIfFactoryFunction, (void *) nap, nap)) { NaClLog(LOG_ERROR, "NaClAppLaunchServiceThreads: KernServiceCtor failed\n"); free(kernel_service); kernel_service = NULL; goto done; } if (!NaClSimpleServiceStartServiceThread((struct NaClSimpleService *) kernel_service)) { NaClLog(LOG_ERROR, "NaClAppLaunchServiceThreads: KernService start service failed\n"); goto done; } /* * NB: StartServiceThread grabbed another reference to kernel_service, * used by the service thread. Closing the connection capability * should cause the service thread to shut down and in turn release * that reference. */ /* * The locking here isn't really needed. Here is why: * reverse_channel_initialized is written in reverse_setup RPC * handler of the secure command channel RPC handler thread. and * the RPC order requires that the plugin invoke reverse_setup prior * to invoking start_module, so there will have been plenty of other * synchronization operations to force cache coherency * (module_may_start, for example, is set in the cache of the secure * channel RPC handler (in start_module) and read by the main * thread, and the synchronization operations needed to propagate * its value properly suffices to propagate * reverse_channel_initialized as well). However, reading it while * holding a lock is more obviously correct for tools like tsan. * Due to the RPC order, it is impossible for * reverse_channel_initialized to get set after the unlock and * before the if test. */ NaClXMutexLock(&nap->mu); /* * If no reverse_setup RPC was made, then we do not set up a * manifest proxy. Otherwise, we make sure that the reverse channel * setup is done, so that the application can actually use * reverse-channel-based services such as the manifest proxy. */ if (NACL_REVERSE_CHANNEL_UNINITIALIZED != (init_state = nap->reverse_channel_initialization_state)) { while (NACL_REVERSE_CHANNEL_INITIALIZED != (init_state = nap->reverse_channel_initialization_state)) { NaClXCondVarWait(&nap->cv, &nap->mu); } } NaClXMutexUnlock(&nap->mu); if (NACL_REVERSE_CHANNEL_INITIALIZED != init_state) { NaClLog(3, ("NaClAppLaunchServiceThreads: no reverse channel;" " launched kernel services.\n")); NaClLog(3, ("NaClAppLaunchServiceThreads: no reverse channel;" " NOT launching manifest proxy.\n")); nap->kernel_service = kernel_service; kernel_service = NULL; rv = 1; goto done; } /* * Allocate/construct the manifest proxy without grabbing global * locks. */ NaClLog(3, "NaClAppLaunchServiceThreads: launching manifest proxy\n"); /* * ReverseClientSetup RPC should be done via the command channel * prior to the load_module / start_module RPCs, and * occurs after that, so checking * nap->reverse_client suffices for determining whether the proxy is * exporting reverse services. */ manifest_proxy = (struct NaClManifestProxy *) malloc(sizeof *manifest_proxy); if (NULL == manifest_proxy) { NaClLog(LOG_ERROR, "No memory for manifest proxy\n"); NaClDescUnref(kernel_service->base.bound_and_cap[1]); goto done; } if (!NaClManifestProxyCtor(manifest_proxy, NaClAddrSpSquattingThreadIfFactoryFunction, (void *) nap, nap)) { NaClLog(LOG_ERROR, "ManifestProxyCtor failed\n"); /* do not leave a non-NULL pointer to a not-fully constructed object */ free(manifest_proxy); manifest_proxy = NULL; NaClDescUnref(kernel_service->base.bound_and_cap[1]); goto done; } /* * NaClSimpleServiceStartServiceThread requires the nap->mu lock. */ if (!NaClSimpleServiceStartServiceThread((struct NaClSimpleService *) manifest_proxy)) { NaClLog(LOG_ERROR, "ManifestProxy start service failed\n"); NaClDescUnref(kernel_service->base.bound_and_cap[1]); goto done; } NaClXMutexLock(&nap->mu); CHECK(NULL == nap->manifest_proxy); CHECK(NULL == nap->kernel_service); nap->manifest_proxy = manifest_proxy; manifest_proxy = NULL; nap->kernel_service = kernel_service; kernel_service = NULL; NaClXMutexUnlock(&nap->mu); rv = 1; done: NaClXMutexLock(&nap->mu); if (NULL != nap->manifest_proxy) { NaClLog(3, ("NaClAppLaunchServiceThreads: adding manifest proxy to" " name service\n")); (*NACL_VTBL(NaClNameService, nap->name_service)-> CreateDescEntry)(nap->name_service, "ManifestNameService", NACL_ABI_O_RDWR, NaClDescRef(nap->manifest_proxy->base.bound_and_cap[1])); } if (NULL != nap->kernel_service) { NaClLog(3, ("NaClAppLaunchServiceThreads: adding kernel service to" " name service\n")); (*NACL_VTBL(NaClNameService, nap->name_service)-> CreateDescEntry)(nap->name_service, "KernelService", NACL_ABI_O_RDWR, NaClDescRef(nap->kernel_service->base.bound_and_cap[1])); } NaClXMutexUnlock(&nap->mu); /* * Single exit path. * * Error cleanup invariant. No service thread should be running * (modulo asynchronous shutdown). Automatic variables refer to * fully constructed objects if non-NULL, and when ownership is * transferred to the NaClApp object the corresponding automatic * variable is set to NULL. */ NaClRefCountSafeUnref((struct NaClRefCount *) manifest_proxy); NaClRefCountSafeUnref((struct NaClRefCount *) kernel_service); return rv; }
int32_t NaClSysMunmap(struct NaClAppThread *natp, void *start, size_t length) { int32_t retval = -NACL_ABI_EINVAL; uintptr_t sysaddr; int holding_app_lock = 0; size_t alloc_rounded_length; NaClLog(3, "Entered NaClSysMunmap(0x%08"NACL_PRIxPTR", " "0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxS")\n", (uintptr_t) natp, (uintptr_t) start, length); NaClSysCommonThreadSyscallEnter(natp); if (!NaClIsAllocPageMultiple((uintptr_t) start)) { NaClLog(4, "start addr not allocation multiple\n"); retval = -NACL_ABI_EINVAL; goto cleanup; } if (0 == length) { /* * linux mmap of zero length yields a failure, but osx does not, leading * to a NaClVmmapUpdate of zero pages, which should not occur. */ retval = -NACL_ABI_EINVAL; goto cleanup; } alloc_rounded_length = NaClRoundAllocPage(length); if (alloc_rounded_length != length) { length = alloc_rounded_length; NaClLog(LOG_WARNING, "munmap: rounded length to 0x%"NACL_PRIxS"\n", length); } sysaddr = NaClUserToSysAddrRange(natp->nap, (uintptr_t) start, length); if (kNaClBadAddress == sysaddr) { NaClLog(4, "region not user addresses\n"); retval = -NACL_ABI_EFAULT; goto cleanup; } NaClXMutexLock(&natp->nap->mu); while (0 != natp->nap->threads_launching) { NaClXCondVarWait(&natp->nap->cv, &natp->nap->mu); } natp->nap->vm_hole_may_exist = 1; holding_app_lock = 1; /* * NB: windows (or generic) version would use Munmap virtual * function from the backing NaClDesc object obtained by iterating * through the address map for the region, and those Munmap virtual * functions may return -NACL_ABI_E_MOVE_ADDRESS_SPACE. * * We should hold the application lock while doing this iteration * and unmapping, so that the address space is consistent for other * threads. */ /* * User should be unable to unmap any executable pages. We check here. */ if (NaClSysCommonAddrRangeContainsExecutablePages_mu(natp->nap, (uintptr_t) start, length)) { NaClLog(2, "NaClSysMunmap: region contains executable pages\n"); retval = -NACL_ABI_EINVAL; goto cleanup; } /* * Overwrite current mapping with inaccessible, anonymous * zero-filled pages, which should be copy-on-write and thus * relatively cheap. Do not open up an address space hole. */ NaClLog(4, ("NaClSysMunmap: mmap(0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxS"," " 0x%x, 0x%x, -1, 0)\n"), sysaddr, length, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED); if (MAP_FAILED == mmap((void *) sysaddr, length, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, (off_t) 0)) { NaClLog(4, "mmap to put in anonymous memory failed, errno = %d\n", errno); retval = -NaClXlateErrno(errno); goto cleanup; } NaClVmmapUpdate(&natp->nap->mem_map, (NaClSysToUser(natp->nap, (uintptr_t) sysaddr) >> NACL_PAGESHIFT), length >> NACL_PAGESHIFT, 0, /* prot */ (struct NaClMemObj *) NULL, 1); /* Delete mapping */ retval = 0; cleanup: if (holding_app_lock) { natp->nap->vm_hole_may_exist = 0; NaClXCondVarBroadcast(&natp->nap->cv); NaClXMutexUnlock(&natp->nap->mu); } NaClSysCommonThreadSyscallLeave(natp); return retval; }