struct NaClDescInvalid const *NaClDescInvalidMake(void) { NaClXMutexLock(mutex); if (NULL == singleton) { do { /* Allocate an instance. */ singleton = (struct NaClDescInvalid *) malloc(sizeof(*singleton)); if (NULL == singleton) { break; } /* Do the base class construction. */ if (!NaClDescCtor(&(singleton->base))) { free(singleton); singleton = NULL; break; } /* Construct the derived class (simply set the vtbl). */ singleton->base.base.vtbl = (struct NaClRefCountVtbl const *) &kNaClDescInvalidVtbl; } while (0); } NaClXMutexUnlock(mutex); /* If we reached this point and still have NULL == singleton there was an * error in allocation or construction. Return NULL to indicate error. */ if (NULL == singleton) { return NULL; } return (struct NaClDescInvalid *) NaClDescRef(&(singleton->base)); }
/* * The memory map structure is a simple array of memory regions which * may have different access protections. We do not yet merge regions * with the same access protections together to reduce the region * number, but may do so in the future. * * Regions are described by (relative) starting page number, the * number of pages, and the protection that the pages should have. */ struct NaClVmmapEntry *NaClVmmapEntryMake(uintptr_t page_num, size_t npages, int prot, int flags, struct NaClDesc *desc, nacl_off64_t offset, nacl_off64_t file_size) { struct NaClVmmapEntry *entry; NaClLog(4, "NaClVmmapEntryMake(0x%"NACL_PRIxPTR",0x%"NACL_PRIxS"," "0x%x,0x%x,0x%"NACL_PRIxPTR",0x%"NACL_PRIx64")\n", page_num, npages, prot, flags, (uintptr_t) desc, offset); entry = (struct NaClVmmapEntry *) malloc(sizeof *entry); if (NULL == entry) { return 0; } NaClLog(4, "entry: 0x%"NACL_PRIxPTR"\n", (uintptr_t) entry); entry->page_num = page_num; entry->npages = npages; entry->prot = prot; entry->flags = flags; entry->removed = 0; entry->desc = desc; if (desc != NULL) { NaClDescRef(desc); } entry->offset = offset; entry->file_size = file_size; return entry; }
/* * Placement new copy ctor. */ int NaClMemObjCopyCtorOff(struct NaClMemObj *nmop, struct NaClMemObj *src, nacl_off64_t additional) { nmop->ndp = src->ndp; NaClDescRef(nmop->ndp); nmop->nbytes = src->nbytes; nmop->offset = src->offset + additional; return 1; }
int NaClSimpleServiceWithSocketCtor( struct NaClSimpleService *self, struct NaClSrpcHandlerDesc const *srpc_handlers, NaClThreadIfFactoryFunction thread_factory_fn, void *thread_factory_data, struct NaClDesc *service_port, struct NaClDesc *sock_addr) { NaClLog(4, "NaClSimpleServiceWithSocketCtor: self 0x%"NACL_PRIxPTR"\n", (uintptr_t) self); if (!NaClSimpleServiceCtorIntern(self, srpc_handlers, thread_factory_fn, thread_factory_data)) { return 0; } self->bound_and_cap[0] = NaClDescRef(service_port); self->bound_and_cap[1] = NaClDescRef(sock_addr); return 1; }
/* * A wrapper class for NaClSrpcMessageDesc that allows clients to ignore * implementation differences. */ static int PortableDescCtor(struct PortableDesc* self, NaClSrpcMessageDesc desc) { if (kInvalidDesc == desc) { return 0; } #ifdef __native_client__ self->raw_desc = desc; #else self->raw_desc = NaClDescRef(desc); #endif /* __native_client__ */ return 1; }
/* * Takes ownership of NaClDesc object, so no manipulation of ref count. */ int NaClMemObjCtor(struct NaClMemObj *nmop, struct NaClDesc *ndp, nacl_off64_t nbytes, nacl_off64_t offset) { if (NULL == ndp) { NaClLog(LOG_FATAL, "NaClMemObjCtor: ndp is NULL\n"); } nmop->ndp = ndp; NaClDescRef(ndp); nmop->nbytes = nbytes; nmop->offset = offset; return 1; }
static int NaClGioShmCtorIntern(struct NaClGioShm *self, struct NaClDesc *shmp, size_t shm_size) { struct nacl_abi_stat stbuf; int vfret; int rval = 0; self->base.vtbl = NULL; self->shmp = NULL; self->cur_window = NULL; if (0 != (vfret = (*((struct NaClDescVtbl const *) shmp->base.vtbl)-> Fstat)(shmp, &stbuf))) { NaClLog(1, "NaClGioShmCtorIntern: Fstat virtual function returned %d\n", vfret); goto cleanup; } /* * nacl_abi_off_t is signed 32-bit quantity, but we don't want to * hardwire in that knowledge here. * * size_t is unsigned, and may be 32-bits or 64-bits, depending on * the underlying host OS. * * we want to ensure that the shm's size, as reported by the desc * abstraction and thus is in nacl_abi_off_t, is at least that * claimed by the ctor argument. so, if (as Integers) * * stbuf.nacl_abi_st_size < shm_size * * holds, this is an error. however, the value-preserving cast rule * makes this harder. * * Note that for signed sizes (ssize_t), the kernel ABI generally * only reserve -1 for error, and asking for an I/O operation via a * size_t that would succeed but yield a ssize_t return value that * is negative is okay, since -1 is never valid as an I/O size on a * von Neuman machine (except for a writev where the iov entries * overlap): there just isn't that much data to read/write, when the * instructions also take up space in the process address space. * Whether requiring the programmer to detect this corner case is * advisable is a different argument -- similar to negative ssize_t * sizes, the syscall can just succeed with a partial transfer to * avoid returning -1 on a success, just as we could avoid returning * negative values; in practice, we do the latter, since we often * see code written that tests for syscall error by comparing the * return value to see if it is less than zero, rather than if it is * equal to -1. */ if (stbuf.nacl_abi_st_size < 0) { NaClLog(LOG_ERROR, ("NaClGioShmCtorIntern: actual shm size negative" " %"NACL_PRIdNACL_OFF"\n"), stbuf.nacl_abi_st_size); goto cleanup; } if (stbuf.nacl_abi_st_size <= (nacl_abi_off_t) SIZE_T_MAX && (size_t) stbuf.nacl_abi_st_size < shm_size) { NaClLog(LOG_ERROR, ("NaClGioShmCtorIntern: claimed shm file size greater than" " actual shm segment size, %"NACL_PRIuS" vs" " %"NACL_PRIuNACL_OFF"\n"), shm_size, stbuf.nacl_abi_st_size); goto cleanup; } if (OFF_T_MAX < SIZE_T_MAX && (size_t) OFF_T_MAX < shm_size) { NaClLog(LOG_ERROR, ("NaClGioShmCtorIntern: claimed shm file size greater than" " off_t max value, %"NACL_PRId64"\n"), (int64_t) OFF_T_MAX); goto cleanup; } self->shmp = NaClDescRef(shmp); self->io_offset = 0; self->shm_sz = shm_size; self->window_offset = 0; self->base.vtbl = &kNaClGioShmVtbl; if (!NaClGioShmSetWindow(self, 0)) { NaClLog(LOG_ERROR, ("NaClGioShmCtorIntern: initial seek to beginning failed\n")); NaClDescUnref(self->shmp); self->shmp = NULL; self->shm_sz = 0; self->base.vtbl = NULL; goto cleanup; } rval = 1; cleanup: return rval; }
int NaClAppLaunchServiceThreads(struct NaClApp *nap) { struct NaClManifestProxy *manifest_proxy = NULL; struct NaClKernelService *kernel_service = NULL; int rv = 0; enum NaClReverseChannelInitializationState init_state; NaClNameServiceLaunch(nap->name_service); kernel_service = (struct NaClKernelService *) malloc(sizeof *kernel_service); if (NULL == kernel_service) { NaClLog(LOG_ERROR, "NaClAppLaunchServiceThreads: No memory for kern service\n"); goto done; } if (!NaClKernelServiceCtor(kernel_service, NaClAddrSpSquattingThreadIfFactoryFunction, (void *) nap, nap)) { NaClLog(LOG_ERROR, "NaClAppLaunchServiceThreads: KernServiceCtor failed\n"); free(kernel_service); kernel_service = NULL; goto done; } if (!NaClSimpleServiceStartServiceThread((struct NaClSimpleService *) kernel_service)) { NaClLog(LOG_ERROR, "NaClAppLaunchServiceThreads: KernService start service failed\n"); goto done; } /* * NB: StartServiceThread grabbed another reference to kernel_service, * used by the service thread. Closing the connection capability * should cause the service thread to shut down and in turn release * that reference. */ /* * The locking here isn't really needed. Here is why: * reverse_channel_initialized is written in reverse_setup RPC * handler of the secure command channel RPC handler thread. and * the RPC order requires that the plugin invoke reverse_setup prior * to invoking start_module, so there will have been plenty of other * synchronization operations to force cache coherency * (module_may_start, for example, is set in the cache of the secure * channel RPC handler (in start_module) and read by the main * thread, and the synchronization operations needed to propagate * its value properly suffices to propagate * reverse_channel_initialized as well). However, reading it while * holding a lock is more obviously correct for tools like tsan. * Due to the RPC order, it is impossible for * reverse_channel_initialized to get set after the unlock and * before the if test. */ NaClXMutexLock(&nap->mu); /* * If no reverse_setup RPC was made, then we do not set up a * manifest proxy. Otherwise, we make sure that the reverse channel * setup is done, so that the application can actually use * reverse-channel-based services such as the manifest proxy. */ if (NACL_REVERSE_CHANNEL_UNINITIALIZED != (init_state = nap->reverse_channel_initialization_state)) { while (NACL_REVERSE_CHANNEL_INITIALIZED != (init_state = nap->reverse_channel_initialization_state)) { NaClXCondVarWait(&nap->cv, &nap->mu); } } NaClXMutexUnlock(&nap->mu); if (NACL_REVERSE_CHANNEL_INITIALIZED != init_state) { NaClLog(3, ("NaClAppLaunchServiceThreads: no reverse channel;" " launched kernel services.\n")); NaClLog(3, ("NaClAppLaunchServiceThreads: no reverse channel;" " NOT launching manifest proxy.\n")); nap->kernel_service = kernel_service; kernel_service = NULL; rv = 1; goto done; } /* * Allocate/construct the manifest proxy without grabbing global * locks. */ NaClLog(3, "NaClAppLaunchServiceThreads: launching manifest proxy\n"); /* * ReverseClientSetup RPC should be done via the command channel * prior to the load_module / start_module RPCs, and * occurs after that, so checking * nap->reverse_client suffices for determining whether the proxy is * exporting reverse services. */ manifest_proxy = (struct NaClManifestProxy *) malloc(sizeof *manifest_proxy); if (NULL == manifest_proxy) { NaClLog(LOG_ERROR, "No memory for manifest proxy\n"); NaClDescUnref(kernel_service->base.bound_and_cap[1]); goto done; } if (!NaClManifestProxyCtor(manifest_proxy, NaClAddrSpSquattingThreadIfFactoryFunction, (void *) nap, nap)) { NaClLog(LOG_ERROR, "ManifestProxyCtor failed\n"); /* do not leave a non-NULL pointer to a not-fully constructed object */ free(manifest_proxy); manifest_proxy = NULL; NaClDescUnref(kernel_service->base.bound_and_cap[1]); goto done; } /* * NaClSimpleServiceStartServiceThread requires the nap->mu lock. */ if (!NaClSimpleServiceStartServiceThread((struct NaClSimpleService *) manifest_proxy)) { NaClLog(LOG_ERROR, "ManifestProxy start service failed\n"); NaClDescUnref(kernel_service->base.bound_and_cap[1]); goto done; } NaClXMutexLock(&nap->mu); CHECK(NULL == nap->manifest_proxy); CHECK(NULL == nap->kernel_service); nap->manifest_proxy = manifest_proxy; manifest_proxy = NULL; nap->kernel_service = kernel_service; kernel_service = NULL; NaClXMutexUnlock(&nap->mu); rv = 1; done: NaClXMutexLock(&nap->mu); if (NULL != nap->manifest_proxy) { NaClLog(3, ("NaClAppLaunchServiceThreads: adding manifest proxy to" " name service\n")); (*NACL_VTBL(NaClNameService, nap->name_service)-> CreateDescEntry)(nap->name_service, "ManifestNameService", NACL_ABI_O_RDWR, NaClDescRef(nap->manifest_proxy->base.bound_and_cap[1])); } if (NULL != nap->kernel_service) { NaClLog(3, ("NaClAppLaunchServiceThreads: adding kernel service to" " name service\n")); (*NACL_VTBL(NaClNameService, nap->name_service)-> CreateDescEntry)(nap->name_service, "KernelService", NACL_ABI_O_RDWR, NaClDescRef(nap->kernel_service->base.bound_and_cap[1])); } NaClXMutexUnlock(&nap->mu); /* * Single exit path. * * Error cleanup invariant. No service thread should be running * (modulo asynchronous shutdown). Automatic variables refer to * fully constructed objects if non-NULL, and when ownership is * transferred to the NaClApp object the corresponding automatic * variable is set to NULL. */ NaClRefCountSafeUnref((struct NaClRefCount *) manifest_proxy); NaClRefCountSafeUnref((struct NaClRefCount *) kernel_service); return rv; }
int NaClNameServiceResolveName(struct NaClNameService *nnsp, char const *name, int flags, struct NaClDesc **out) { struct NaClNameServiceEntry *nnsep; int status = NACL_NAME_SERVICE_NAME_NOT_FOUND; NaClLog(3, "NaClNameServiceResolveName: looking up %s, flags %d (0x%x)\n", name, flags, flags); if (0 != (flags & ~NACL_ABI_O_ACCMODE)) { NaClLog(2, "NaClNameServiceResolveName: bad flags!\n"); status = NACL_NAME_SERVICE_PERMISSION_DENIED; goto quit; } NaClXMutexLock(&nnsp->mu); nnsep = *NameServiceSearch(&nnsp->head, name); if (NULL != nnsep) { if (NULL != nnsep->entry) { NaClLog(3, "NaClNameServiceResolveName: found %s, mode %d (0x%x)\n", name, nnsep->mode, nnsep->mode); /* check flags against nnsep->mode */ NaClLog(4, ("NaClNameServiceResolveName: checking mode/flags" " compatibility\n")); switch (flags) { case NACL_ABI_O_RDONLY: if (NACL_ABI_O_WRONLY == nnsep->mode) { status = NACL_NAME_SERVICE_PERMISSION_DENIED; NaClLog(4, "NaClNameServiceResolveName: incompatible," " not readable\n"); goto unlock_and_quit; } break; case NACL_ABI_O_WRONLY: if (NACL_ABI_O_RDONLY == nnsep->mode) { status = NACL_NAME_SERVICE_PERMISSION_DENIED; NaClLog(4, "NaClNameServiceResolveName: incompatible," " not writeable\n"); goto unlock_and_quit; } break; case NACL_ABI_O_RDWR: if (NACL_ABI_O_RDWR != nnsep->mode) { status = NACL_NAME_SERVICE_PERMISSION_DENIED; NaClLog(4, "NaClNameServiceResolveName: incompatible," " not for both read and write\n"); goto unlock_and_quit; } break; default: status = NACL_NAME_SERVICE_INVALID_ARGUMENT; NaClLog(4, "NaClNameServiceResolveName: invalid flag\n"); goto unlock_and_quit; } NaClLog(4, "NaClNameServiceResolveName: mode and flags are compatible\n"); *out = NaClDescRef(nnsep->entry); status = NACL_NAME_SERVICE_SUCCESS; } else { status = (*nnsep->factory)(nnsep->state, name, flags, out); } } unlock_and_quit: nnsep = NULL; NaClXMutexUnlock(&nnsp->mu); quit: return status; }