static void NaClKernelServiceCreateProcessRpc( struct NaClSrpcRpc *rpc, struct NaClSrpcArg **in_args, struct NaClSrpcArg **out_args, struct NaClSrpcClosure *done_cls) { struct NaClKernelService *nksp = (struct NaClKernelService *) rpc->channel->server_instance_data; int status; struct NaClDesc *sock_addr = NULL; struct NaClDesc *app_addr = NULL; UNREFERENCED_PARAMETER(in_args); NaClLog(4, "NaClKernelServiceCreateProcessRpc: creating process\n"); status = (*NACL_VTBL(NaClKernelService, nksp)->CreateProcess)( nksp, &sock_addr, &app_addr); out_args[0]->u.ival = status; out_args[1]->u.hval = (0 == status) ? sock_addr : (struct NaClDesc *) NaClDescInvalidMake(); out_args[2]->u.hval = (0 == status) ? app_addr : (struct NaClDesc *) NaClDescInvalidMake(); NaClLog(4, ("NaClKernelServiceCreateProcessRpc: status %d, sock_addr" " 0x08%"NACL_PRIxPTR", app_addr 0x%08"NACL_PRIxPTR"\n"), status, (uintptr_t) sock_addr, (uintptr_t) app_addr); rpc->result = NACL_SRPC_RESULT_OK; (*done_cls->Run)(done_cls); if (0 == status) { NaClDescUnref(sock_addr); NaClDescUnref(app_addr); } }
/* ### * get portion of data from opened channel (NaClAppThread object) * put (map) it to given memory region (NaClAppThread object) * return count of read bytes when success, otherwise - nacl error code */ int ZMQSysRead(struct NaClAppThread *natp, int d, void *buf, uint32_t count) { ssize_t read_result; uintptr_t sysaddr; struct NaClDesc *ndp; /* ### log to remove. i only use it to debug this class */ NaClLog(1, "int ZMQSysRead(struct NaClAppThread *natp, int d, void *buf, size_t count) -- entered\n"); ndp = NaClGetDesc(natp->nap, d); if (NULL == ndp) return -NACL_ABI_EINVAL; /* ### * dummy code. just to test if class work proper * delete it after zmq integration */ sysaddr = NaClUserToSysAddrRange(natp->nap, (uintptr_t) buf, count); if (kNaClBadAddress == sysaddr) { NaClDescUnref(ndp); return -NACL_ABI_EFAULT; } /* * The maximum length for read and write is INT32_MAX--anything larger and * the return value would overflow. Passing larger values isn't an error-- * we'll just clamp the request size if it's too large. */ if (count > INT32_MAX) { count = INT32_MAX; } read_result = (*((struct NaClDescVtbl const *) ndp->base.vtbl)->Read) (ndp, (void *) sysaddr, count); if (read_result > 0) { NaClLog(4, "read returned %"NACL_PRIdS" bytes\n", read_result); NaClLog(8, "read result: %.*s\n", (int) read_result, (char *) sysaddr); } else { NaClLog(4, "read returned %"NACL_PRIdS"\n", read_result); } NaClDescUnref(ndp); /* This cast is safe because we clamped count above.*/ return (int32_t) read_result; }
int NaClDescMutexClose(struct NaClDesc *vself, struct NaClDescEffector *effp) { UNREFERENCED_PARAMETER(effp); NaClDescUnref(vself); return 0; }
int32_t NaClSysFstat(struct NaClAppThread *natp, int d, uint32_t nasp) { struct NaClApp *nap = natp->nap; int32_t retval = -NACL_ABI_EINVAL; struct NaClDesc *ndp; struct nacl_abi_stat result; NaClLog(3, ("Entered NaClSysFstat(0x%08"NACL_PRIxPTR ", %d, 0x%08"NACL_PRIx32")\n"), (uintptr_t) natp, d, nasp); ndp = NaClAppGetDesc(nap, d); if (NULL == ndp) { NaClLog(4, "bad desc\n"); retval = -NACL_ABI_EBADF; goto cleanup; } retval = (*((struct NaClDescVtbl const *) ndp->base.vtbl)-> Fstat)(ndp, &result); if (0 == retval) { if (!NaClFileAccessEnabled()) { result.nacl_abi_st_ino = NACL_FAKE_INODE_NUM; } if (!NaClCopyOutToUser(nap, nasp, &result, sizeof result)) { retval = -NACL_ABI_EFAULT; } } NaClDescUnref(ndp); cleanup: return retval; }
int NaClGioShmAllocCtor(struct NaClGioShm *self, size_t shm_size) { struct NaClDescImcShm *shmp; int rv; CHECK(shm_size == NaClRoundAllocPage(shm_size)); if (!NaClDescEffectorTrustedMemCtor(&self->eff)) { return 0; } shmp = malloc(sizeof *shmp); if (NULL == shmp) { (*self->eff.base.vtbl->Dtor)(&self->eff.base); return 0; } if (!NaClDescImcShmAllocCtor(shmp, shm_size, /* executable= */ 0)) { (*self->eff.base.vtbl->Dtor)(&self->eff.base); free(shmp); return 0; } rv = NaClGioShmCtorIntern(self, (struct NaClDesc *) shmp, shm_size); if (!rv) { NaClDescUnref((struct NaClDesc *) shmp); free(shmp); (*self->eff.base.vtbl->Dtor)(&self->eff.base); } return rv; }
static int NaClGioShmClose(struct Gio *vself) { struct NaClGioShm *self = (struct NaClGioShm *) vself; int ret; if (NULL != self->cur_window) { ret = (*((struct NaClDescVtbl const *) self->shmp->base.vtbl)-> UnmapUnsafe)(self->shmp, (struct NaClDescEffector *) &self->eff, (void *) self->cur_window, NACL_MAP_PAGESIZE); if (ret < 0) { errno = EIO; return -1; } } self->cur_window = NULL; if (NULL == self->shmp) { NaClLog(LOG_ERROR, "NaClGioShmClose: double close detected\n"); errno = EIO; return -1; } NaClDescUnref(self->shmp); self->shmp = NULL; /* double close will fault */ return 0; }
int32_t NaClSysImcConnect(struct NaClAppThread *natp, int d) { struct NaClApp *nap = natp->nap; int32_t retval = -NACL_ABI_EINVAL; struct NaClDesc *ndp; NaClLog(3, "Entered NaClSysImcConnectAddr(0x%08"NACL_PRIxPTR", %d)\n", (uintptr_t) natp, d); /* This syscall is not used in Chromium so is disabled by default. */ if (!NaClAclBypassChecks) { return -NACL_ABI_EACCES; } ndp = NaClAppGetDesc(nap, d); if (NULL == ndp) { retval = -NACL_ABI_EBADF; } else { struct NaClDesc *result; retval = (*((struct NaClDescVtbl const *) ndp->base.vtbl)-> ConnectAddr)(ndp, &result); if (retval == 0) { retval = NaClAppSetDescAvail(nap, result); } NaClDescUnref(ndp); } return retval; }
int32_t NaClSysFtruncate(struct NaClAppThread *natp, int d, uint32_t lengthp) { struct NaClApp *nap = natp->nap; struct NaClDesc *ndp; nacl_abi_off_t length; int32_t retval = -NACL_ABI_EINVAL; NaClLog(3, ("Entered NaClSysFtruncate(0x%08"NACL_PRIxPTR", %d," " 0x%"NACL_PRIx32")\n"), (uintptr_t) natp, d, lengthp); ndp = NaClAppGetDesc(nap, d); if (NULL == ndp) { retval = -NACL_ABI_EBADF; goto cleanup; } if (!NaClCopyInFromUser(nap, &length, lengthp, sizeof length)) { retval = -NACL_ABI_EFAULT; goto cleanup_unref; } NaClLog(4, "length 0x%08"NACL_PRIx64"\n", (uint64_t) length); retval = (*((struct NaClDescVtbl const *) ndp->base.vtbl)-> Ftruncate)(ndp, length); cleanup_unref: NaClDescUnref(ndp); cleanup: return retval; }
void NaClDescSafeUnref(struct NaClDesc *ndp) { NaClLog(4, "NaClDescSafeUnref(0x%08"NACL_PRIxPTR").\n", (uintptr_t) ndp); if (NULL == ndp) { return; } NaClDescUnref(ndp); }
int32_t NaClCommonDescMakeBoundSock(struct NaClDesc *pair[2]) { int32_t retval; struct NaClSocketAddress sa; struct NaClDescConnCap *ccp; NaClHandle h; struct NaClDescImcBoundDesc *idp; retval = -NACL_ABI_ENOMEM; ccp = NULL; idp = NULL; h = NACL_INVALID_HANDLE; /* * create NaClDescConnCap object, invoke NaClBoundSocket, create * an NaClDescImcDesc object. put both into open file table. */ ccp = malloc(sizeof *ccp); if (NULL == ccp) { goto cleanup; } idp = malloc(sizeof *idp); if (NULL == idp) { goto cleanup; } do { NaClGenerateRandomPath(&sa.path[0], NACL_PATH_MAX); h = NaClBoundSocket(&sa); NaClLog(3, "NaClCommonDescMakeBoundSock: sa: %s, h 0x%"NACL_PRIxPTR"\n", sa.path, (uintptr_t) h); } while (NACL_INVALID_HANDLE == h); if (!NaClDescConnCapCtor(ccp, &sa)) { goto cleanup; } if (!NaClDescImcBoundDescCtor(idp, h)) { NaClDescUnref((struct NaClDesc *) ccp); goto cleanup; } h = NACL_INVALID_HANDLE; /* idp took ownership */ pair[0] = (struct NaClDesc *) idp; idp = NULL; pair[1] = (struct NaClDesc *) ccp; ccp = NULL; retval = 0; cleanup: free(idp); free(ccp); if (NACL_INVALID_HANDLE != h) { (void) NaClClose(h); } return retval; }
int NaClSimpleServiceCtor( struct NaClSimpleService *self, struct NaClSrpcHandlerDesc const *srpc_handlers, NaClThreadIfFactoryFunction thread_factory_fn, void *thread_factory_data) { NaClLog(4, "Entered NaClSimpleServiceCtor: self 0x%"NACL_PRIxPTR"\n", (uintptr_t) self); if (0 != NaClCommonDescMakeBoundSock(self->bound_and_cap)) { return 0; } if (!NaClSimpleServiceCtorIntern(self, srpc_handlers, thread_factory_fn, thread_factory_data)) { NaClDescUnref(self->bound_and_cap[0]); NaClDescUnref(self->bound_and_cap[1]); return 0; } return 1; }
int NaClDescImcDescCtor(struct NaClDescImcDesc *self, NaClHandle h) { int retval; retval = NaClDescImcConnectedDescCtor(&self->base, h); if (!retval) { return 0; } if (!NaClMutexCtor(&self->sendmsg_mu)) { NaClDescUnref((struct NaClDesc *) self); return 0; } if (!NaClMutexCtor(&self->recvmsg_mu)) { NaClMutexDtor(&self->sendmsg_mu); NaClDescUnref((struct NaClDesc *) self); return 0; } self->base.base.base.vtbl = (struct NaClRefCountVtbl const *) &kNaClDescImcDescVtbl; return retval; }
/* * This implements 64-bit offsets, so we use |offp| as an in/out * address so we can have a 64 bit return value. */ int32_t NaClSysLseek(struct NaClAppThread *natp, int d, uint32_t offp, int whence) { struct NaClApp *nap = natp->nap; nacl_abi_off_t offset; nacl_off64_t retval64; int32_t retval = -NACL_ABI_EINVAL; struct NaClDesc *ndp; NaClLog(3, ("Entered NaClSysLseek(0x%08"NACL_PRIxPTR", %d," " 0x%08"NACL_PRIx32", %d)\n"), (uintptr_t) natp, d, offp, whence); ndp = NaClAppGetDesc(nap, d); if (NULL == ndp) { retval = -NACL_ABI_EBADF; goto cleanup; } if (!NaClCopyInFromUser(nap, &offset, offp, sizeof offset)) { retval = -NACL_ABI_EFAULT; goto cleanup_unref; } NaClLog(4, "offset 0x%08"NACL_PRIx64"\n", (uint64_t) offset); retval64 = (*((struct NaClDescVtbl const *) ndp->base.vtbl)-> Seek)(ndp, (nacl_off64_t) offset, whence); if (NaClOff64IsNegErrno(&retval64)) { retval = (int32_t) retval64; } else { if (NaClCopyOutToUser(nap, offp, &retval64, sizeof retval64)) { retval = 0; } else { NaClLog(LOG_FATAL, "NaClSysLseek: in/out ptr became invalid at copyout?\n"); } } cleanup_unref: NaClDescUnref(ndp); cleanup: return retval; }
static int NaClGioShmClose(struct Gio *vself) { struct NaClGioShm *self = (struct NaClGioShm *) vself; if (NULL != self->cur_window) { NaClDescUnmapUnsafe(self->shmp, (void *) self->cur_window, NACL_MAP_PAGESIZE); } self->cur_window = NULL; if (NULL == self->shmp) { NaClLog(LOG_ERROR, "NaClGioShmClose: double close detected\n"); errno = EIO; return -1; } NaClDescUnref(self->shmp); self->shmp = NULL; /* double close will fault */ return 0; }
int NaClDescQuotaInternalize(struct NaClDesc **out_desc, struct NaClDescXferState *xfer, struct NaClDescQuotaInterface *quota_interface) { int rv = -NACL_ABI_EIO; uint8_t file_id[NACL_DESC_QUOTA_FILE_ID_LEN]; struct NaClDescQuota *out = NULL; struct NaClDesc *wrapped_desc; if (NULL == (out = malloc(sizeof *out))) { rv = -NACL_ABI_ENOMEM; goto cleanup; } memcpy(file_id, xfer->next_byte, sizeof file_id); xfer->next_byte += sizeof file_id; if (1 != NaClDescInternalizeFromXferBuffer(&wrapped_desc, xfer, quota_interface)) { rv = -NACL_ABI_EIO; goto cleanup; } if (!NaClDescQuotaCtor(out, wrapped_desc, file_id, quota_interface)) { rv = -NACL_ABI_ENOMEM; goto cleanup_wrapped; } *out_desc = (struct NaClDesc *) out; rv = 0; cleanup_wrapped: if (0 != rv) { NaClDescUnref(wrapped_desc); } cleanup: if (0 != rv) { free(out); } return rv; }
void WINAPI serviceThread(void* arg) { struct ServiceThreadArgs* typedArg; NaClSrpcImcDescType desc; NaClSrpcHandlerDesc handlers[] = { { "getNum::i", handleGetNum }, { NULL, NULL } }; typedArg = (struct ServiceThreadArgs*) arg; desc = typedArg->desc; free(typedArg); if (!NaClSrpcServerLoop(desc, handlers, 0)) { failWithErrno("NaClSrpcServerLoop"); exit(EXIT_FAILURE); } #ifdef __native_client__ close(desc); #else NaClDescUnref(desc); #endif NaClThreadExit(0); }
void NaClReplaceDescIfValidationCacheAssertsMappable( struct NaClDesc **desc_in_out, struct NaClValidationCache *validation_cache) { struct NaClDesc *desc = *desc_in_out; struct NaClDesc *replacement; struct NaClFileToken file_token; if (NACL_FI("validation_cache_replacement_bypass", 0, 1)) { NaClDescMarkSafeForMmap(desc); } else if (!NaClDescGetFileToken(desc, &file_token)) { NaClLog(4, "NaClReplaceDescIfValidationCacheAssertsMappable: no valid" " file token\n"); } else { replacement = NaClExchangeFileTokenForMappableDesc(&file_token, validation_cache); if (NULL != replacement) { NaClDescUnref(desc); *desc_in_out = replacement; } } }
struct NaClDesc *NaClDescCreateWithFilePathMetadata(NaClHandle handle, const char *file_path) { struct NaClDesc *desc = NaClDescIoDescFromHandleAllocCtor(handle, NACL_ABI_O_RDONLY); char *alloc_file_path; size_t file_path_length = strlen(file_path); struct NaClRichFileInfo info; if (desc == NULL) return NULL; /* * If there is no file path metadata, just return the created NaClDesc * without adding rich file info. */ if (file_path_length == 0) return desc; /* Mark the desc as OK for mmapping. */ NaClDescMarkSafeForMmap(desc); alloc_file_path = (char *) malloc(file_path_length + 1); if (alloc_file_path == NULL) { NaClDescUnref(desc); return NULL; } memcpy(alloc_file_path, file_path, file_path_length + 1); /* Provide metadata for validation. */ NaClRichFileInfoCtor(&info); info.known_file = 1; info.file_path = alloc_file_path; /* Takes ownership. */ info.file_path_length = (uint32_t) file_path_length; NaClSetFileOriginInfo(desc, &info); NaClRichFileInfoDtor(&info); return desc; }
int32_t NaClSysIsatty(struct NaClAppThread *natp, int d) { struct NaClApp *nap = natp->nap; int retval = -NACL_ABI_EBADF; struct NaClDesc *ndp; NaClLog(3, "Entered NaClSysIsatty(0x%08"NACL_PRIxPTR", %d)\n", (uintptr_t) natp, d); if (!NaClAclBypassChecks) { return -NACL_ABI_EACCES; } ndp = NaClAppGetDesc(nap, d); if (NULL == ndp) { NaClLog(4, "bad desc\n"); return -NACL_ABI_EBADF; } retval = (*((struct NaClDescVtbl const *) ndp->base.vtbl)->Isatty)(ndp); NaClDescUnref(ndp); return retval; }
int32_t NaClSysFdatasync(struct NaClAppThread *natp, int d) { struct NaClApp *nap = natp->nap; struct NaClDesc *ndp; int32_t retval = -NACL_ABI_EINVAL; NaClLog(3, ("Entered NaClSysFdatasync(0x%08"NACL_PRIxPTR", %d)\n"), (uintptr_t) natp, d); ndp = NaClAppGetDesc(nap, d); if (NULL == ndp) { retval = -NACL_ABI_EBADF; goto cleanup; } retval = (*((struct NaClDescVtbl const *) ndp->base.vtbl)-> Fdatasync)(ndp); NaClDescUnref(ndp); cleanup: return retval; }
int32_t NaClSysClose(struct NaClAppThread *natp, int d) { struct NaClApp *nap = natp->nap; int retval = -NACL_ABI_EBADF; struct NaClDesc *ndp; NaClLog(3, "Entered NaClSysClose(0x%08"NACL_PRIxPTR", %d)\n", (uintptr_t) natp, d); NaClFastMutexLock(&nap->desc_mu); ndp = NaClAppGetDescMu(nap, d); if (NULL != ndp) { NaClAppSetDescMu(nap, d, NULL); /* Unref the desc_tbl */ } NaClFastMutexUnlock(&nap->desc_mu); NaClLog(5, "Invoking Close virtual function of object 0x%08"NACL_PRIxPTR"\n", (uintptr_t) ndp); if (NULL != ndp) { NaClDescUnref(ndp); retval = 0; } return retval; }
int32_t NaClSysImcAccept(struct NaClAppThread *natp, int d) { struct NaClApp *nap = natp->nap; int32_t retval = -NACL_ABI_EINVAL; struct NaClDesc *ndp; NaClLog(3, "Entered NaClSysImcAccept(0x%08"NACL_PRIxPTR", %d)\n", (uintptr_t) natp, d); ndp = NaClAppGetDesc(nap, d); if (NULL == ndp) { retval = -NACL_ABI_EBADF; } else { struct NaClDesc *result_desc; retval = (*((struct NaClDescVtbl const *) ndp->base.vtbl)-> AcceptConn)(ndp, &result_desc); if (retval == 0) { retval = NaClAppSetDescAvail(nap, result_desc); } NaClDescUnref(ndp); } return retval; }
int NaClGioShmAllocCtor(struct NaClGioShm *self, size_t shm_size) { struct NaClDescImcShm *shmp; int rv; CHECK(shm_size == NaClRoundAllocPage(shm_size)); shmp = malloc(sizeof *shmp); if (NULL == shmp) { return 0; } if (!NaClDescImcShmAllocCtor(shmp, shm_size, /* executable= */ 0)) { free(shmp); return 0; } rv = NaClGioShmCtorIntern(self, (struct NaClDesc *) shmp, shm_size); NaClDescUnref((struct NaClDesc *) shmp); if (!rv) { free(shmp); } return rv; }
/* * preconditions: * * argc is the length of the argv array * * envv may be NULL (this happens on MacOS/Cocoa and in tests) * * if envv is non-NULL it is 'consistent', null terminated etc. */ int NaClCreateMainThread(struct NaClApp *nap, int argc, char **argv, char const *const *envv) { /* * Compute size of string tables for argv and envv */ int retval; int envc; size_t size; int auxv_entries; size_t ptr_tbl_size; int i; uint32_t *p; char *strp; size_t *argv_len; size_t *envv_len; uintptr_t stack_ptr; retval = 0; /* fail */ CHECK(argc >= 0); CHECK(NULL != argv || 0 == argc); envc = 0; if (NULL != envv) { char const *const *pp; for (pp = envv; NULL != *pp; ++pp) { ++envc; } } envv_len = 0; argv_len = malloc(argc * sizeof argv_len[0]); envv_len = malloc(envc * sizeof envv_len[0]); if (NULL == argv_len) { goto cleanup; } if (NULL == envv_len && 0 != envc) { goto cleanup; } size = 0; /* * The following two loops cannot overflow. The reason for this is * that they are counting the number of bytes used to hold the * NUL-terminated strings that comprise the argv and envv tables. * If the entire address space consisted of just those strings, then * the size variable would overflow; however, since there's the code * space required to hold the code below (and we are not targetting * Harvard architecture machines), at least one page holds code, not * data. We are assuming that the caller is non-adversarial and the * code does not look like string data.... */ for (i = 0; i < argc; ++i) { argv_len[i] = strlen(argv[i]) + 1; size += argv_len[i]; } for (i = 0; i < envc; ++i) { envv_len[i] = strlen(envv[i]) + 1; size += envv_len[i]; } /* * NaCl modules are ILP32, so the argv, envv pointers, as well as * the terminating NULL pointers at the end of the argv/envv tables, * are 32-bit values. We also have the auxv to take into account. * * The argv and envv pointer tables came from trusted code and is * part of memory. Thus, by the same argument above, adding in * "ptr_tbl_size" cannot possibly overflow the "size" variable since * it is a size_t object. However, the extra pointers for auxv and * the space for argv could cause an overflow. The fact that we * used stack to get here etc means that ptr_tbl_size could not have * overflowed. * * NB: the underlying OS would have limited the amount of space used * for argv and envv -- on linux, it is ARG_MAX, or 128KB -- and * hence the overflow check is for obvious auditability rather than * for correctness. */ auxv_entries = 1; if (0 != nap->user_entry_pt) { auxv_entries++; } if (0 != nap->dynamic_text_start) { auxv_entries++; } ptr_tbl_size = (((NACL_STACK_GETS_ARG ? 1 : 0) + (3 + argc + 1 + envc + 1 + auxv_entries * 2)) * sizeof(uint32_t)); if (SIZE_T_MAX - size < ptr_tbl_size) { NaClLog(LOG_WARNING, "NaClCreateMainThread: ptr_tbl_size cause size of" " argv / environment copy to overflow!?!\n"); retval = 0; goto cleanup; } size += ptr_tbl_size; size = (size + NACL_STACK_ALIGN_MASK) & ~NACL_STACK_ALIGN_MASK; if (size > nap->stack_size) { retval = 0; goto cleanup; } /* * Write strings and char * arrays to stack. */ stack_ptr = NaClUserToSysAddrRange(nap, NaClGetInitialStackTop(nap) - size, size); if (stack_ptr == kNaClBadAddress) { retval = 0; goto cleanup; } NaClLog(2, "setting stack to : %016"NACL_PRIxPTR"\n", stack_ptr); VCHECK(0 == (stack_ptr & NACL_STACK_ALIGN_MASK), ("stack_ptr not aligned: %016"NACL_PRIxPTR"\n", stack_ptr)); p = (uint32_t *) stack_ptr; strp = (char *) stack_ptr + ptr_tbl_size; /* * For x86-32, we push an initial argument that is the address of * the main argument block. For other machines, this is passed * in a register and that's set in NaClStartThreadInApp. */ if (NACL_STACK_GETS_ARG) { uint32_t *argloc = p++; *argloc = (uint32_t) NaClSysToUser(nap, (uintptr_t) p); } *p++ = 0; /* Cleanup function pointer, always NULL. */ *p++ = envc; *p++ = argc; for (i = 0; i < argc; ++i) { *p++ = (uint32_t) NaClSysToUser(nap, (uintptr_t) strp); NaClLog(2, "copying arg %d %p -> %p\n", i, argv[i], strp); strcpy(strp, argv[i]); strp += argv_len[i]; } *p++ = 0; /* argv[argc] is NULL. */ for (i = 0; i < envc; ++i) { *p++ = (uint32_t) NaClSysToUser(nap, (uintptr_t) strp); NaClLog(2, "copying env %d %p -> %p\n", i, envv[i], strp); strcpy(strp, envv[i]); strp += envv_len[i]; } *p++ = 0; /* envp[envc] is NULL. */ /* Push an auxv */ if (0 != nap->user_entry_pt) { *p++ = AT_ENTRY; *p++ = (uint32_t) nap->user_entry_pt; } if (0 != nap->dynamic_text_start) { *p++ = AT_BASE; *p++ = (uint32_t) nap->dynamic_text_start; } *p++ = AT_NULL; *p++ = 0; CHECK((char *) p == (char *) stack_ptr + ptr_tbl_size); /* now actually spawn the thread */ NaClXMutexLock(&nap->mu); /* * Unreference the main nexe and irt at this point if no debug stub callbacks * have been registered, as these references to the main nexe and irt * descriptors are only used when providing file access to the debugger. * In the debug case, let shutdown take care of cleanup. */ if (NULL == nap->debug_stub_callbacks) { if (NULL != nap->main_nexe_desc) { NaClDescUnref(nap->main_nexe_desc); nap->main_nexe_desc = NULL; } if (NULL != nap->irt_nexe_desc) { NaClDescUnref(nap->irt_nexe_desc); nap->irt_nexe_desc = NULL; } } nap->running = 1; NaClXMutexUnlock(&nap->mu); NaClVmHoleWaitToStartThread(nap); /* * For x86, we adjust the stack pointer down to push a dummy return * address. This happens after the stack pointer alignment. * We avoid the otherwise harmless call for the zero case because * _FORTIFY_SOURCE memset can warn about zero-length calls. */ if (NACL_STACK_PAD_BELOW_ALIGN != 0) { stack_ptr -= NACL_STACK_PAD_BELOW_ALIGN; memset((void *) stack_ptr, 0, NACL_STACK_PAD_BELOW_ALIGN); } NaClLog(2, "system stack ptr : %016"NACL_PRIxPTR"\n", stack_ptr); NaClLog(2, " user stack ptr : %016"NACL_PRIxPTR"\n", NaClSysToUserStackAddr(nap, stack_ptr)); /* e_entry is user addr */ retval = NaClAppThreadSpawn(nap, nap->initial_entry_pt, NaClSysToUserStackAddr(nap, stack_ptr), /* user_tls1= */ (uint32_t) nap->break_addr, /* user_tls2= */ 0); cleanup: free(argv_len); free(envv_len); return retval; }
static int LoadApp(struct NaClApp *nap, struct NaClChromeMainArgs *args) { NaClErrorCode errcode = LOAD_OK; CHECK(g_initialized); /* Allow or disallow dyncode API based on args. */ nap->enable_dyncode_syscalls = args->enable_dyncode_syscalls; nap->initial_nexe_max_code_bytes = args->initial_nexe_max_code_bytes; nap->pnacl_mode = args->pnacl_mode; #if NACL_LINUX g_prereserved_sandbox_size = args->prereserved_sandbox_size; #endif #if NACL_LINUX || NACL_OSX /* * Overwrite value of sc_nprocessors_onln set in NaClAppCtor. In * the Chrome embedding, the outer sandbox was already enabled when * the NaClApp Ctor was invoked, so a bogus value was written in * sc_nprocessors_onln. */ if (-1 != args->number_of_cores) { nap->sc_nprocessors_onln = args->number_of_cores; } #endif if (args->create_memory_object_func != NULL) NaClSetCreateMemoryObjectFunc(args->create_memory_object_func); /* Inject the validation caching interface, if it exists. */ nap->validation_cache = args->validation_cache; NaClAppInitialDescriptorHookup(nap); /* * in order to report load error to the browser plugin through the * secure command channel, we do not immediate jump to cleanup code * on error. rather, we continue processing (assuming earlier * errors do not make it inappropriate) until the secure command * channel is set up, and then bail out. */ /* * Ensure this operating system platform is supported. */ if (args->skip_qualification) { fprintf(stderr, "PLATFORM QUALIFICATION DISABLED - " "Native Client's sandbox will be unreliable!\n"); } else { errcode = NACL_FI_VAL("pq", NaClErrorCode, NaClRunSelQualificationTests()); if (LOAD_OK != errcode) { nap->module_load_status = errcode; fprintf(stderr, "Error while loading in SelMain: %s\n", NaClErrorString(errcode)); goto error; } } /* * Patch the Windows exception dispatcher to be safe in the case * of faults inside x86-64 sandboxed code. The sandbox is not * secure on 64-bit Windows without this. */ #if (NACL_WINDOWS && NACL_ARCH(NACL_BUILD_ARCH) == NACL_x86 && \ NACL_BUILD_SUBARCH == 64) NaClPatchWindowsExceptionDispatcher(); #endif NaClSignalTestCrashOnStartup(); nap->enable_exception_handling = args->enable_exception_handling; if (args->enable_exception_handling || args->enable_debug_stub) { #if NACL_LINUX /* NaCl's signal handler is always enabled on Linux. */ #elif NACL_OSX if (!NaClInterceptMachExceptions()) { NaClLog(LOG_FATAL, "LoadApp: Failed to set up Mach exception handler\n"); } #elif NACL_WINDOWS nap->attach_debug_exception_handler_func = args->attach_debug_exception_handler_func; #else # error Unknown host OS #endif } #if NACL_LINUX NaClSignalHandlerInit(); #endif /* Give debuggers a well known point at which xlate_base is known. */ NaClGdbHook(nap); CHECK(args->nexe_desc != NULL); NaClAppLoadModule(nap, args->nexe_desc); NaClDescUnref(args->nexe_desc); args->nexe_desc = NULL; NACL_FI_FATAL("BeforeLoadIrt"); /* * error reporting done; can quit now if there was an error earlier. */ errcode = NaClGetLoadStatus(nap); if (LOAD_OK != errcode) { goto error; } /* * Load the integrated runtime (IRT) library. * Skip if irt_load_optional and the nexe doesn't have the usual 256MB * segment gap. PNaCl's disabling of the segment gap doesn't actually * disable the segment gap. It only only reduces it drastically. */ if (args->irt_load_optional && nap->dynamic_text_end < 0x10000000) { NaClLog(1, "Skipped NaClLoadIrt, irt_load_optional with dynamic_text_end: %" NACL_PRIxPTR"\n", nap->dynamic_text_end); } else { if (args->irt_fd != -1) { CHECK(args->irt_desc == NULL); args->irt_desc = IrtDescFromFd(args->irt_fd); args->irt_fd = -1; } if (args->irt_desc != NULL) { NaClLoadIrt(nap, args->irt_desc); NaClDescUnref(args->irt_desc); args->irt_desc = NULL; } } if (args->enable_debug_stub) { #if NACL_LINUX || NACL_OSX if (args->debug_stub_pipe_fd != NACL_INVALID_HANDLE) { NaClDebugStubSetPipe(args->debug_stub_pipe_fd); } else if (args->debug_stub_server_bound_socket_fd != NACL_INVALID_SOCKET) { NaClDebugSetBoundSocket(args->debug_stub_server_bound_socket_fd); } #endif if (!NaClDebugInit(nap)) { goto error; } #if NACL_WINDOWS if (NULL != args->debug_stub_server_port_selected_handler_func) { args->debug_stub_server_port_selected_handler_func( NaClDebugGetBoundPort()); } #endif } if (args->load_status_handler_func != NULL) { args->load_status_handler_func(LOAD_OK); } return LOAD_OK; error: fflush(stdout); /* Don't return LOAD_OK if we had some failure loading. */ if (LOAD_OK == errcode) { errcode = LOAD_INTERNAL; } /* * If there is a load status callback, call that now and transfer logs * in preparation for process exit. */ if (args->load_status_handler_func != NULL) { args->load_status_handler_func(errcode); NaClLog(LOG_ERROR, "NaCl LoadApp failed. Transferring logs before exit.\n"); NaClLogRunAbortBehavior(); } return errcode; }
int32_t NaClSysWrite(struct NaClAppThread *natp, int d, uint32_t buf, uint32_t count) { struct NaClApp *nap = natp->nap; int32_t retval = -NACL_ABI_EINVAL; ssize_t write_result = -NACL_ABI_EINVAL; uintptr_t sysaddr; char const *ellipsis = ""; struct NaClDesc *ndp; size_t log_bytes; NaClLog(3, "Entered NaClSysWrite(0x%08"NACL_PRIxPTR", " "%d, 0x%08"NACL_PRIx32", " "%"NACL_PRIu32"[0x%"NACL_PRIx32"])\n", (uintptr_t) natp, d, buf, count, count); ndp = NaClAppGetDesc(nap, d); NaClLog(4, " ndp = %"NACL_PRIxPTR"\n", (uintptr_t) ndp); if (NULL == ndp) { retval = -NACL_ABI_EBADF; goto cleanup; } sysaddr = NaClUserToSysAddrRange(nap, buf, count); if (kNaClBadAddress == sysaddr) { NaClDescUnref(ndp); retval = -NACL_ABI_EFAULT; goto cleanup; } log_bytes = count; if (log_bytes > INT32_MAX) { log_bytes = INT32_MAX; ellipsis = "..."; } if (NaClLogGetVerbosity() < 10) { if (log_bytes > kdefault_io_buffer_bytes_to_log) { log_bytes = kdefault_io_buffer_bytes_to_log; ellipsis = "..."; } } NaClLog(8, "In NaClSysWrite(%d, %.*s%s, %"NACL_PRIu32")\n", d, (int) log_bytes, (char *) sysaddr, ellipsis, count); /* * The maximum length for read and write is INT32_MAX--anything larger and * the return value would overflow. Passing larger values isn't an error-- * we'll just clamp the request size if it's too large. */ if (count > INT32_MAX) { count = INT32_MAX; } NaClVmIoWillStart(nap, buf, buf + count - 1); write_result = (*((struct NaClDescVtbl const *) ndp->base.vtbl)-> Write)(ndp, (void *) sysaddr, count); NaClVmIoHasEnded(nap, buf, buf + count - 1); NaClDescUnref(ndp); /* This cast is safe because we clamped count above.*/ retval = (int32_t) write_result; cleanup: return retval; }
int32_t NaClSysGetdents(struct NaClAppThread *natp, int d, uint32_t dirp, size_t count) { struct NaClApp *nap = natp->nap; int32_t retval = -NACL_ABI_EINVAL; ssize_t getdents_ret; uintptr_t sysaddr; struct NaClDesc *ndp; NaClLog(3, ("Entered NaClSysGetdents(0x%08"NACL_PRIxPTR", " "%d, 0x%08"NACL_PRIx32", " "%"NACL_PRIuS"[0x%"NACL_PRIxS"])\n"), (uintptr_t) natp, d, dirp, count, count); if (!NaClFileAccessEnabled()) { /* * Filesystem access is disabled, so disable the getdents() syscall. * We do this for security hardening, though it should be redundant, * because untrusted code should not be able to open any directory * descriptors (i.e. descriptors with a non-trivial Getdents() * implementation). */ return -NACL_ABI_EACCES; } ndp = NaClAppGetDesc(nap, d); if (NULL == ndp) { retval = -NACL_ABI_EBADF; goto cleanup; } /* * Generic NaClCopyOutToUser is not sufficient, since buffer size * |count| is arbitrary and we wouldn't want to have to allocate * memory in trusted address space to match. */ sysaddr = NaClUserToSysAddrRange(nap, dirp, count); if (kNaClBadAddress == sysaddr) { NaClLog(4, " illegal address for directory data\n"); retval = -NACL_ABI_EFAULT; goto cleanup_unref; } /* * Clamp count to INT32_MAX to avoid the possibility of Getdents returning * a value that is outside the range of an int32. */ if (count > INT32_MAX) { count = INT32_MAX; } /* * Grab addr space lock; getdents should not normally block, though * if the directory is on a networked filesystem this could, and * cause mmap to be slower on Windows. */ NaClXMutexLock(&nap->mu); getdents_ret = (*((struct NaClDescVtbl const *) ndp->base.vtbl)-> Getdents)(ndp, (void *) sysaddr, count); NaClXMutexUnlock(&nap->mu); /* drop addr space lock */ if ((getdents_ret < INT32_MIN && !NaClSSizeIsNegErrno(&getdents_ret)) || INT32_MAX < getdents_ret) { /* This should never happen, because we already clamped the input count */ NaClLog(LOG_FATAL, "Overflow in Getdents: return value is %"NACL_PRIxS, (size_t) getdents_ret); } else { retval = (int32_t) getdents_ret; } if (retval > 0) { NaClLog(4, "getdents returned %d bytes\n", retval); NaClLog(8, "getdents result: %.*s\n", retval, (char *) sysaddr); } else { NaClLog(4, "getdents returned %d\n", retval); } cleanup_unref: NaClDescUnref(ndp); cleanup: return retval; }
int32_t NaClCommonDescSocketPair(struct NaClDesc *pair[2]) { int32_t retval = -NACL_ABI_EIO; struct NaClDescXferableDataDesc *d0; struct NaClDescXferableDataDesc *d1; NaClHandle sock_pair[2]; /* * mark resources to enable easy cleanup */ d0 = NULL; d1 = NULL; sock_pair[0] = NACL_INVALID_HANDLE; sock_pair[1] = NACL_INVALID_HANDLE; if (0 != NaClSocketPair(sock_pair)) { NaClLog(1, "NaClCommonSysImc_Socket_Pair: IMC socket pair creation failed\n"); retval = -NACL_ABI_ENFILE; goto cleanup; } if (NULL == (d0 = malloc(sizeof *d0))) { retval = -NACL_ABI_ENOMEM; goto cleanup; } if (NULL == (d1 = malloc(sizeof *d1))) { free((void *) d0); d0 = NULL; retval = -NACL_ABI_ENOMEM; goto cleanup; } if (!NaClDescXferableDataDescCtor(d0, sock_pair[0])) { free((void *) d0); d0 = NULL; free((void *) d1); d1 = NULL; retval = -NACL_ABI_ENFILE; goto cleanup; } sock_pair[0] = NACL_INVALID_HANDLE; /* ctor took ownership */ if (!NaClDescXferableDataDescCtor(d1, sock_pair[1])) { free((void *) d1); d1 = NULL; retval = -NACL_ABI_ENFILE; goto cleanup; } sock_pair[1] = NACL_INVALID_HANDLE; /* ctor took ownership */ pair[0] = (struct NaClDesc *) d0; d0 = NULL; pair[1] = (struct NaClDesc *) d1; d1 = NULL; retval = 0; cleanup: /* * pre: d0 and d1 must either be NULL or point to fully constructed * NaClDesc objects */ if (NULL != d0) { NaClDescUnref((struct NaClDesc *) d0); } if (NULL != d1) { NaClDescUnref((struct NaClDesc *) d1); } if (NACL_INVALID_HANDLE != sock_pair[0]) { (void) NaClClose(sock_pair[0]); } if (NACL_INVALID_HANDLE != sock_pair[1]) { (void) NaClClose(sock_pair[1]); } free(d0); free(d1); return retval; }
ssize_t NaClImcRecvTypedMessage( struct NaClDesc *channel, struct NaClImcTypedMsgHdr *nitmhp, int flags, struct NaClDescQuotaInterface *quota_interface) { int supported_flags; ssize_t retval; char *recv_buf; size_t user_bytes; NaClHandle kern_handle[NACL_ABI_IMC_DESC_MAX]; struct NaClIOVec recv_iov; struct NaClMessageHeader recv_hdr; ssize_t total_recv_bytes; struct NaClInternalHeader intern_hdr; size_t recv_user_bytes_avail; size_t tmp; char *user_data; size_t iov_copy_size; struct NaClDescXferState xfer; struct NaClDesc *new_desc[NACL_ABI_IMC_DESC_MAX]; int xfer_status; size_t i; size_t num_user_desc; NaClLog(4, "Entered NaClImcRecvTypedMsg(0x%08"NACL_PRIxPTR", " "0x%08"NACL_PRIxPTR", %d)\n", (uintptr_t) channel, (uintptr_t) nitmhp, flags); supported_flags = NACL_ABI_IMC_NONBLOCK; if (0 != (flags & ~supported_flags)) { NaClLog(LOG_WARNING, "WARNING: NaClImcRecvTypedMsg: unknown IMC flag used: 0x%x\n", flags); flags &= supported_flags; } if (nitmhp->iov_length > NACL_ABI_IMC_IOVEC_MAX) { NaClLog(4, "gather/scatter array too large\n"); return -NACL_ABI_EINVAL; } if (nitmhp->ndesc_length > NACL_ABI_IMC_USER_DESC_MAX) { NaClLog(4, "handle vector too long\n"); return -NACL_ABI_EINVAL; } user_bytes = 0; for (i = 0; i < nitmhp->iov_length; ++i) { if (user_bytes > SIZE_T_MAX - nitmhp->iov[i].length) { NaClLog(4, "integer overflow in iov length summation\n"); return -NACL_ABI_EINVAL; } user_bytes += nitmhp->iov[i].length; } /* * if user_bytes > NACL_ABI_IMC_USER_BYTES_MAX, * we will just never fill up all the buffer space. */ user_bytes = min_size(user_bytes, NACL_ABI_IMC_USER_BYTES_MAX); /* * user_bytes = \min(\sum_{i=0}{nitmhp->iov_length-1} nitmhp->iov[i].length, * NACL_ABI_IMC_USER_BYTES_MAX) */ recv_buf = NULL; memset(new_desc, 0, sizeof new_desc); /* * from here on, set retval and jump to cleanup code. */ recv_buf = malloc(NACL_ABI_IMC_BYTES_MAX); if (NULL == recv_buf) { NaClLog(4, "no memory for receive buffer\n"); retval = -NACL_ABI_ENOMEM; goto cleanup; } recv_iov.base = (void *) recv_buf; recv_iov.length = NACL_ABI_IMC_BYTES_MAX; recv_hdr.iov = &recv_iov; recv_hdr.iov_length = 1; for (i = 0; i < NACL_ARRAY_SIZE(kern_handle); ++i) { kern_handle[i] = NACL_INVALID_HANDLE; } if (NACL_DESC_IMC_SOCKET == ((struct NaClDescVtbl const *) channel->base.vtbl)->typeTag) { /* * Channel can transfer access rights. */ recv_hdr.handles = kern_handle; recv_hdr.handle_count = NACL_ARRAY_SIZE(kern_handle); NaClLog(4, "Connected socket, may transfer descriptors\n"); } else { /* * Channel cannot transfer access rights. The syscall would fail * if recv_iov.length is non-zero. */ recv_hdr.handles = (NaClHandle *) NULL; recv_hdr.handle_count = 0; NaClLog(4, "Transferable Data Only socket\n"); } recv_hdr.flags = 0; /* just to make it obvious; IMC will clear it for us */ total_recv_bytes = (*((struct NaClDescVtbl const *) channel->base.vtbl)-> LowLevelRecvMsg)(channel, &recv_hdr, flags); if (NaClSSizeIsNegErrno(&total_recv_bytes)) { NaClLog(1, "LowLevelRecvMsg failed, returned %"NACL_PRIdS"\n", total_recv_bytes); retval = total_recv_bytes; goto cleanup; } /* total_recv_bytes >= 0 */ /* * NB: recv_hdr.flags may already contain NACL_ABI_MESSAGE_TRUNCATED * and/or NACL_ABI_HANDLES_TRUNCATED. * * First, parse the NaClInternalHeader and any subsequent fields to * extract and internalize the NaClDesc objects from the array of * NaClHandle values. * * Copy out to user buffer. Possibly additional truncation may occur. * * Since total_recv_bytes >= 0, the cast to size_t is value preserving. */ if ((size_t) total_recv_bytes < sizeof intern_hdr) { NaClLog(4, ("only received %"NACL_PRIdS" (0x%"NACL_PRIxS") bytes," " but internal header is %"NACL_PRIdS" (0x%"NACL_PRIxS ") bytes\n"), total_recv_bytes, total_recv_bytes, sizeof intern_hdr, sizeof intern_hdr); retval = -NACL_ABI_EIO; goto cleanup; } memcpy(&intern_hdr, recv_buf, sizeof intern_hdr); /* * Future code should handle old versions in a backward compatible way. */ if (NACL_HANDLE_TRANSFER_PROTOCOL != intern_hdr.h.xfer_protocol_version) { NaClLog(4, ("protocol version mismatch:" " got %x, but can only handle %x\n"), intern_hdr.h.xfer_protocol_version, NACL_HANDLE_TRANSFER_PROTOCOL); /* * The returned value should be a special version mismatch error * code that, along with the recv_buf, permit retrying with later * decoders. */ retval = -NACL_ABI_EIO; goto cleanup; } if ((size_t) total_recv_bytes < (intern_hdr.h.descriptor_data_bytes + sizeof intern_hdr)) { NaClLog(4, ("internal header (size %"NACL_PRIdS" (0x%"NACL_PRIxS")) " "says there are " "%d (0x%x) NRD xfer descriptor bytes, " "but we received %"NACL_PRIdS" (0x%"NACL_PRIxS") bytes\n"), sizeof intern_hdr, sizeof intern_hdr, intern_hdr.h.descriptor_data_bytes, intern_hdr.h.descriptor_data_bytes, total_recv_bytes, total_recv_bytes); retval = -NACL_ABI_EIO; goto cleanup; } recv_user_bytes_avail = (total_recv_bytes - intern_hdr.h.descriptor_data_bytes - sizeof intern_hdr); /* * NaCl app asked for user_bytes, and we have recv_user_bytes_avail. * Set recv_user_bytes_avail to the min of these two values, as well * as inform the caller if data truncation occurred. */ if (user_bytes < recv_user_bytes_avail) { recv_hdr.flags |= NACL_ABI_RECVMSG_DATA_TRUNCATED; } recv_user_bytes_avail = min_size(recv_user_bytes_avail, user_bytes); retval = recv_user_bytes_avail; /* default from hence forth */ /* * Let UserDataSize := recv_user_bytes_avail. (bind to current value) */ user_data = recv_buf + sizeof intern_hdr + intern_hdr.h.descriptor_data_bytes; /* * Let StartUserData := user_data */ /* * Precondition: user_data in [StartUserData, StartUserData + UserDataSize]. * * Invariant: * user_data + recv_user_bytes_avail == StartUserData + UserDataSize */ for (i = 0; i < nitmhp->iov_length && 0 < recv_user_bytes_avail; ++i) { iov_copy_size = min_size(nitmhp->iov[i].length, recv_user_bytes_avail); memcpy(nitmhp->iov[i].base, user_data, iov_copy_size); user_data += iov_copy_size; /* * subtraction could not underflow due to how recv_user_bytes_avail was * computed; however, we are paranoid, in case the code changes. */ tmp = recv_user_bytes_avail - iov_copy_size; if (tmp > recv_user_bytes_avail) { NaClLog(LOG_FATAL, "NaClImcRecvTypedMessage: impossible underflow occurred"); } recv_user_bytes_avail = tmp; } /* * postcondition: recv_user_bytes_avail == 0. * * NB: 0 < recv_user_bytes_avail \rightarrow i < nitmhp->iov_length * must hold, due to how user_bytes is computed. We leave the * unnecessary test in the loop condition to avoid future code * changes from causing problems as defensive programming. */ /* * Now extract/internalize the NaClHandles as NaClDesc objects. * Note that we will extract beyond nitmhp->desc_length, since we * must still destroy the ones that are dropped. */ xfer.next_byte = recv_buf + sizeof intern_hdr; xfer.byte_buffer_end = xfer.next_byte + intern_hdr.h.descriptor_data_bytes; xfer.next_handle = kern_handle; xfer.handle_buffer_end = kern_handle + recv_hdr.handle_count; i = 0; while (xfer.next_byte < xfer.byte_buffer_end) { struct NaClDesc *out; xfer_status = NaClDescInternalizeFromXferBuffer(&out, &xfer, quota_interface); NaClLog(4, "NaClDescInternalizeFromXferBuffer: returned %d\n", xfer_status); if (0 == xfer_status) { /* end of descriptors reached */ break; } if (i >= NACL_ARRAY_SIZE(new_desc)) { NaClLog(LOG_FATAL, ("NaClImcRecvTypedMsg: trusted peer tried to send too many" " descriptors!\n")); } if (1 != xfer_status) { /* xfer_status < 0, out did not receive output */ retval = -NACL_ABI_EIO; goto cleanup; } new_desc[i] = out; out = NULL; ++i; } num_user_desc = i; /* actual number of descriptors received */ if (nitmhp->ndesc_length < num_user_desc) { nitmhp->flags |= NACL_ABI_RECVMSG_DESC_TRUNCATED; num_user_desc = nitmhp->ndesc_length; } /* transfer ownership to nitmhp->ndescv; some may be left behind */ for (i = 0; i < num_user_desc; ++i) { nitmhp->ndescv[i] = new_desc[i]; new_desc[i] = NULL; } /* cast is safe because we clamped num_user_desc earlier to * be no greater than the original value of nithmp->ndesc_length. */ nitmhp->ndesc_length = (nacl_abi_size_t)num_user_desc; /* retval is number of bytes received */ cleanup: free(recv_buf); /* * Note that we must exercise discipline when constructing NaClDesc * objects from NaClHandles -- the NaClHandle values *must* be set * to NACL_INVALID_HANDLE after the construction of the NaClDesc * where ownership of the NaClHandle is transferred into the NaCDesc * object. Otherwise, between new_desc and kern_handle cleanup code, * a NaClHandle might be closed twice. */ for (i = 0; i < NACL_ARRAY_SIZE(new_desc); ++i) { if (NULL != new_desc[i]) { NaClDescUnref(new_desc[i]); new_desc[i] = NULL; } } for (i = 0; i < NACL_ARRAY_SIZE(kern_handle); ++i) { if (NACL_INVALID_HANDLE != kern_handle[i]) { (void) NaClClose(kern_handle[i]); } } NaClLog(3, "NaClImcRecvTypedMsg: returning %"NACL_PRIdS"\n", retval); return retval; }
static int NaClGioShmCtorIntern(struct NaClGioShm *self, struct NaClDesc *shmp, size_t shm_size) { struct nacl_abi_stat stbuf; int vfret; int rval = 0; self->base.vtbl = NULL; self->shmp = NULL; self->cur_window = NULL; if (0 != (vfret = (*((struct NaClDescVtbl const *) shmp->base.vtbl)-> Fstat)(shmp, &stbuf))) { NaClLog(1, "NaClGioShmCtorIntern: Fstat virtual function returned %d\n", vfret); goto cleanup; } /* * nacl_abi_off_t is signed 32-bit quantity, but we don't want to * hardwire in that knowledge here. * * size_t is unsigned, and may be 32-bits or 64-bits, depending on * the underlying host OS. * * we want to ensure that the shm's size, as reported by the desc * abstraction and thus is in nacl_abi_off_t, is at least that * claimed by the ctor argument. so, if (as Integers) * * stbuf.nacl_abi_st_size < shm_size * * holds, this is an error. however, the value-preserving cast rule * makes this harder. * * Note that for signed sizes (ssize_t), the kernel ABI generally * only reserve -1 for error, and asking for an I/O operation via a * size_t that would succeed but yield a ssize_t return value that * is negative is okay, since -1 is never valid as an I/O size on a * von Neuman machine (except for a writev where the iov entries * overlap): there just isn't that much data to read/write, when the * instructions also take up space in the process address space. * Whether requiring the programmer to detect this corner case is * advisable is a different argument -- similar to negative ssize_t * sizes, the syscall can just succeed with a partial transfer to * avoid returning -1 on a success, just as we could avoid returning * negative values; in practice, we do the latter, since we often * see code written that tests for syscall error by comparing the * return value to see if it is less than zero, rather than if it is * equal to -1. */ if (stbuf.nacl_abi_st_size < 0) { NaClLog(LOG_ERROR, ("NaClGioShmCtorIntern: actual shm size negative" " %"NACL_PRIdNACL_OFF"\n"), stbuf.nacl_abi_st_size); goto cleanup; } if (stbuf.nacl_abi_st_size <= (nacl_abi_off_t) SIZE_T_MAX && (size_t) stbuf.nacl_abi_st_size < shm_size) { NaClLog(LOG_ERROR, ("NaClGioShmCtorIntern: claimed shm file size greater than" " actual shm segment size, %"NACL_PRIuS" vs" " %"NACL_PRIuNACL_OFF"\n"), shm_size, stbuf.nacl_abi_st_size); goto cleanup; } if (OFF_T_MAX < SIZE_T_MAX && (size_t) OFF_T_MAX < shm_size) { NaClLog(LOG_ERROR, ("NaClGioShmCtorIntern: claimed shm file size greater than" " off_t max value, %"NACL_PRId64"\n"), (int64_t) OFF_T_MAX); goto cleanup; } self->shmp = NaClDescRef(shmp); self->io_offset = 0; self->shm_sz = shm_size; self->window_offset = 0; self->base.vtbl = &kNaClGioShmVtbl; if (!NaClGioShmSetWindow(self, 0)) { NaClLog(LOG_ERROR, ("NaClGioShmCtorIntern: initial seek to beginning failed\n")); NaClDescUnref(self->shmp); self->shmp = NULL; self->shm_sz = 0; self->base.vtbl = NULL; goto cleanup; } rval = 1; cleanup: return rval; }