int NaClDescConnCapFdInternalize( struct NaClDesc **out_desc, struct NaClDescXferState *xfer) { struct NaClDescConnCapFd *conn_cap; int rv; conn_cap = malloc(sizeof(*conn_cap)); if (NULL == conn_cap) { return -NACL_ABI_ENOMEM; } if (!NaClDescInternalizeCtor(&conn_cap->base, xfer)) { free(conn_cap); conn_cap = NULL; rv = -NACL_ABI_ENOMEM; goto cleanup; } if (xfer->next_handle == xfer->handle_buffer_end) { rv = -NACL_ABI_EIO; goto cleanup; } rv = NaClDescConnCapFdSubclassCtor(conn_cap, *xfer->next_handle); if (!rv) { rv = -NACL_ABI_ENOMEM; goto cleanup; } *xfer->next_handle++ = NACL_INVALID_HANDLE; *out_desc = &conn_cap->base; rv = 0; cleanup: if (rv < 0) { NaClDescSafeUnref((struct NaClDesc *) conn_cap); } return rv; }
int NaClNameServiceDeleteName(struct NaClNameService *nnsp, char const *name) { struct NaClNameServiceEntry **nnsepp; struct NaClNameServiceEntry *to_free = NULL; int status = NACL_NAME_SERVICE_NAME_NOT_FOUND; NaClXMutexLock(&nnsp->mu); nnsepp = NameServiceSearch(&nnsp->head, name); if (NULL != *nnsepp) { to_free = *nnsepp; *nnsepp = to_free->next; status = NACL_NAME_SERVICE_SUCCESS; } NaClXMutexUnlock(&nnsp->mu); /* do the free operations w/o holding the lock */ if (NULL != to_free) { NaClDescSafeUnref(to_free->entry); if (NULL != to_free->factory) { (void) (*to_free->factory)(to_free->state, to_free->name, 0, (struct NaClDesc **) NULL); } free((void *) to_free->name); free(to_free); } return status; }
/* * Look up by string name, resulting in a handle (if name is in the * preimage), a object proxy handle, and an error code. */ static void NaClReverseServiceManifestLookupRpc( struct NaClSrpcRpc *rpc, struct NaClSrpcArg **in_args, struct NaClSrpcArg **out_args, struct NaClSrpcClosure *done_cls) { struct NaClReverseService *nrsp = (struct NaClReverseService *) rpc->channel->server_instance_data; char *url_key = in_args[0]->arrays.str; int flags = in_args[0]->u.ival; struct NaClFileInfo info; struct NaClHostDesc *host_desc; struct NaClDescIoDesc *io_desc = NULL; struct NaClDesc *nacl_desc = NULL; memset(&info, 0, sizeof(info)); NaClLog(4, "Entered ManifestLookupRpc: 0x%08"NACL_PRIxPTR", %s, %d\n", (uintptr_t) nrsp, url_key, flags); NaClLog(4, "ManifestLookupRpc: invoking OpenManifestEntry\n"); if (!(*NACL_VTBL(NaClReverseInterface, nrsp->iface)-> OpenManifestEntry)(nrsp->iface, url_key, &info) || -1 == info.desc) { NaClLog(1, "ManifestLookupRpc: OpenManifestEntry failed.\n"); out_args[0]->u.ival = NACL_ABI_ENOENT; /* failed */ out_args[1]->u.hval = (struct NaClDesc *) NaClDescInvalidMake(); out_args[2]->u.lval = 0; out_args[3]->u.lval = 0; out_args[4]->u.count = 0; goto done; } NaClLog(4, "ManifestLookupRpc: OpenManifestEntry returned desc %d.\n", info.desc); host_desc = (struct NaClHostDesc *) malloc(sizeof *host_desc); CHECK(host_desc != NULL); CHECK(NaClHostDescPosixTake(host_desc, info.desc, NACL_ABI_O_RDONLY) == 0); io_desc = NaClDescIoDescMake(host_desc); CHECK(io_desc != NULL); nacl_desc = (struct NaClDesc *) io_desc; out_args[0]->u.ival = 0; /* OK */ out_args[1]->u.hval = nacl_desc; out_args[2]->u.lval = (int64_t) info.file_token.lo; out_args[3]->u.lval = (int64_t) info.file_token.hi; out_args[4]->u.count = 10; strncpy(out_args[4]->arrays.carr, "123456789", 10); /* * TODO(phosek): the array should be an object reference (issue 3035). */ done: rpc->result = NACL_SRPC_RESULT_OK; (*done_cls->Run)(done_cls); NaClDescSafeUnref((struct NaClDesc *) io_desc); }
int NaClDescImcShmInternalize(struct NaClDesc **out_desc, struct NaClDescXferState *xfer, struct NaClDescQuotaInterface *quota_interface) { int rv; struct NaClDescImcShm *ndisp; NaClHandle h; nacl_off64_t hsize; UNREFERENCED_PARAMETER(quota_interface); rv = -NACL_ABI_EIO; ndisp = malloc(sizeof *ndisp); if (NULL == ndisp) { rv = -NACL_ABI_ENOMEM; goto cleanup; } if (!NaClDescInternalizeCtor((struct NaClDesc *) ndisp, xfer)) { free(ndisp); ndisp = NULL; rv = -NACL_ABI_ENOMEM; goto cleanup; } if (xfer->next_handle == xfer->handle_buffer_end) { rv = -NACL_ABI_EIO; goto cleanup; } if (xfer->next_byte + sizeof ndisp->size > xfer->byte_buffer_end) { rv = -NACL_ABI_EIO; goto cleanup; } h = *xfer->next_handle; *xfer->next_handle++ = NACL_INVALID_HANDLE; memcpy(&hsize, xfer->next_byte, sizeof hsize); xfer->next_byte += sizeof hsize; if (!NaClDescImcShmSubclassCtor(ndisp, h, hsize)) { rv = -NACL_ABI_EIO; goto cleanup; } *out_desc = (struct NaClDesc *) ndisp; rv = 0; cleanup: if (rv < 0) { NaClDescSafeUnref((struct NaClDesc *) ndisp); } return rv; }
void NaClVmmapEntryFree(struct NaClVmmapEntry *entry) { NaClLog(4, ("NaClVmmapEntryFree(0x%08"NACL_PRIxPTR "): (0x%"NACL_PRIxPTR",0x%"NACL_PRIxS"," "0x%x,0x%x,0x%"NACL_PRIxPTR",0x%"NACL_PRIx64")\n"), (uintptr_t) entry, entry->page_num, entry->npages, entry->prot, entry->flags, (uintptr_t) entry->desc, entry->offset); if (entry->desc != NULL) { NaClDescSafeUnref(entry->desc); } free(entry); }
int NaClDescXferableDataDescInternalize( struct NaClDesc **baseptr, struct NaClDescXferState *xfer, struct NaClDescQuotaInterface *quota_interface) { int rv; struct NaClDescXferableDataDesc *ndxdp; UNREFERENCED_PARAMETER(quota_interface); NaClLog(4, "Entered NaClDescXferableDataDescInternalize\n"); ndxdp = malloc(sizeof *ndxdp); if (NULL == ndxdp) { NaClLog(LOG_ERROR, "NaClXferableDataDescInternalize: no memory\n"); rv = -NACL_ABI_ENOMEM; goto cleanup; } rv = NaClDescInternalizeCtor((struct NaClDesc *) ndxdp, xfer); if (!rv) { free(ndxdp); ndxdp = NULL; goto cleanup; } if (xfer->next_handle == xfer->handle_buffer_end) { NaClLog(LOG_ERROR, ("NaClXferableDataDescInternalize: no descriptor" " left in xfer state\n")); rv = -NACL_ABI_EIO; goto cleanup; } if (!NaClDescXferableDataDescSubclassesCtor(ndxdp, *xfer->next_handle)) { NaClLog(LOG_ERROR, "NaClXferableDataDescInternalize: descriptor ctor error\n"); rv = -NACL_ABI_EIO; goto cleanup; } *xfer->next_handle++ = NACL_INVALID_HANDLE; *baseptr = (struct NaClDesc *) ndxdp; rv = 0; cleanup: if (rv < 0) { NaClDescSafeUnref((struct NaClDesc *) ndxdp); } return rv; }
int NaClDescConnCapInternalize(struct NaClDesc **out_desc, struct NaClDescXferState *xfer, struct NaClDescQuotaInterface *quota_interface) { int rv; struct NaClSocketAddress nsa; struct NaClDescConnCap *ndccp; UNREFERENCED_PARAMETER(quota_interface); rv = -NACL_ABI_EIO; /* catch-all */ ndccp = malloc(sizeof *ndccp); if (NULL == ndccp) { rv = -NACL_ABI_ENOMEM; goto cleanup; } if (!NaClDescInternalizeCtor((struct NaClDesc *) ndccp, xfer)) { free(ndccp); ndccp = NULL; rv = -NACL_ABI_ENOMEM; goto cleanup; } if (xfer->next_byte + NACL_PATH_MAX > xfer->byte_buffer_end) { rv = -NACL_ABI_EIO; goto cleanup; } memcpy(nsa.path, xfer->next_byte, NACL_PATH_MAX); if (!NaClDescConnCapSubclassCtor(ndccp, &nsa)) { rv = -NACL_ABI_EIO; goto cleanup; } *out_desc = (struct NaClDesc *) ndccp; rv = 0; xfer->next_byte += NACL_PATH_MAX; cleanup: if (rv < 0) { NaClDescSafeUnref((struct NaClDesc *) ndccp); } return rv; }
NaClErrorCode NaClMakeDynamicTextShared(struct NaClApp *nap) { enum NaClErrorCode retval = LOAD_INTERNAL; uintptr_t dynamic_text_size; struct NaClDescImcShm *shm = NULL; uintptr_t shm_vaddr_base; int mmap_protections; uintptr_t mmap_ret; uintptr_t shm_upper_bound; uintptr_t text_sysaddr; shm_vaddr_base = NaClEndOfStaticText(nap); NaClLog(4, "NaClMakeDynamicTextShared: shm_vaddr_base = %08"NACL_PRIxPTR"\n", shm_vaddr_base); shm_vaddr_base = NaClRoundAllocPage(shm_vaddr_base); NaClLog(4, "NaClMakeDynamicTextShared: shm_vaddr_base = %08"NACL_PRIxPTR"\n", shm_vaddr_base); /* * Default is that there is no usable dynamic code area. */ nap->dynamic_text_start = shm_vaddr_base; nap->dynamic_text_end = shm_vaddr_base; if (!nap->use_shm_for_dynamic_text) { NaClLog(4, "NaClMakeDynamicTextShared:" " rodata / data segments not allocation aligned\n"); NaClLog(4, " not using shm for text\n"); return LOAD_OK; } /* * Allocate a shm region the size of which is nap->rodata_start - * end-of-text. This implies that the "core" text will not be * backed by shm. */ shm_upper_bound = nap->rodata_start; if (0 == shm_upper_bound) { shm_upper_bound = NaClTruncAllocPage(nap->data_start); } if (0 == shm_upper_bound) { shm_upper_bound = shm_vaddr_base; } NaClLog(4, "shm_upper_bound = %08"NACL_PRIxPTR"\n", shm_upper_bound); dynamic_text_size = shm_upper_bound - shm_vaddr_base; NaClLog(4, "NaClMakeDynamicTextShared: dynamic_text_size = %"NACL_PRIxPTR"\n", dynamic_text_size); if (0 == dynamic_text_size) { NaClLog(4, "Empty JITtable region\n"); return LOAD_OK; } shm = (struct NaClDescImcShm *) malloc(sizeof *shm); if (NULL == shm) { NaClLog(4, "NaClMakeDynamicTextShared: shm object allocation failed\n"); retval = LOAD_NO_MEMORY; goto cleanup; } if (!NaClDescImcShmAllocCtor(shm, dynamic_text_size, /* executable= */ 1)) { /* cleanup invariant is if ptr is non-NULL, it's fully ctor'd */ free(shm); shm = NULL; NaClLog(4, "NaClMakeDynamicTextShared: shm alloc ctor for text failed\n"); retval = LOAD_NO_MEMORY_FOR_DYNAMIC_TEXT; goto cleanup; } text_sysaddr = NaClUserToSys(nap, shm_vaddr_base); /* Existing memory is anonymous paging file backed. */ NaClPageFree((void *) text_sysaddr, dynamic_text_size); /* * Unix allows us to map pages with PROT_NONE initially and later * increase the mapping permissions with mprotect(). * * Windows does not allow this, however: the initial permissions are * an upper bound on what the permissions may later be changed to * with VirtualProtect() or VirtualAlloc(). Given this, using * PROT_NONE at this point does not even make sense. On Windows, * the pages start off as uncommitted, which makes them inaccessible * regardless of the page permissions they are mapped with. * * Write permissions are included here for nacl64-gdb to set * breakpoints. */ #if NACL_WINDOWS mmap_protections = NACL_ABI_PROT_READ | NACL_ABI_PROT_EXEC | NACL_ABI_PROT_WRITE; #else mmap_protections = NACL_ABI_PROT_NONE; #endif NaClLog(4, "NaClMakeDynamicTextShared: Map(,,0x%"NACL_PRIxPTR",size = 0x%x," " prot=0x%x, flags=0x%x, offset=0)\n", text_sysaddr, (int) dynamic_text_size, mmap_protections, NACL_ABI_MAP_SHARED | NACL_ABI_MAP_FIXED); mmap_ret = (*((struct NaClDescVtbl const *) shm->base.base.vtbl)-> Map)((struct NaClDesc *) shm, NaClDescEffectorTrustedMem(), (void *) text_sysaddr, dynamic_text_size, mmap_protections, NACL_ABI_MAP_SHARED | NACL_ABI_MAP_FIXED, 0); if (text_sysaddr != mmap_ret) { NaClLog(LOG_FATAL, "Could not map in shm for dynamic text region\n"); } nap->dynamic_page_bitmap = BitmapAllocate((uint32_t) (dynamic_text_size / NACL_MAP_PAGESIZE)); if (NULL == nap->dynamic_page_bitmap) { NaClLog(LOG_FATAL, "NaClMakeDynamicTextShared: BitmapAllocate() failed\n"); } nap->dynamic_text_start = shm_vaddr_base; nap->dynamic_text_end = shm_upper_bound; nap->text_shm = &shm->base; retval = LOAD_OK; cleanup: if (LOAD_OK != retval) { NaClDescSafeUnref((struct NaClDesc *) shm); free(shm); } return retval; }
static void PortableDescDtor(struct PortableDesc* self) { #ifndef __native_client__ NaClDescSafeUnref(self->raw_desc); #endif /* __native_client__ */ self->raw_desc = kInvalidDesc; }
/* set *out_desc to struct NaClDescIo * output */ int NaClDescIoInternalize(struct NaClDesc **out_desc, struct NaClDescXferState *xfer, struct NaClDescQuotaInterface *quota_interface) { int rv; NaClHandle h; int d; int flags; struct NaClHostDesc *nhdp; struct NaClDescIoDesc *ndidp; UNREFERENCED_PARAMETER(quota_interface); rv = -NACL_ABI_EIO; /* catch-all */ h = NACL_INVALID_HANDLE; nhdp = NULL; ndidp = NULL; nhdp = malloc(sizeof *nhdp); if (NULL == nhdp) { rv = -NACL_ABI_ENOMEM; goto cleanup; } ndidp = malloc(sizeof *ndidp); if (!ndidp) { rv = -NACL_ABI_ENOMEM; goto cleanup; } if (!NaClDescInternalizeCtor((struct NaClDesc *) ndidp, xfer)) { rv = -NACL_ABI_ENOMEM; goto cleanup; } if (xfer->next_handle == xfer->handle_buffer_end || xfer->next_byte + sizeof ndidp->hd->flags > xfer->byte_buffer_end) { rv = -NACL_ABI_EIO; goto cleanup_ndidp_dtor; } NACL_COMPILE_TIME_ASSERT(sizeof flags == sizeof(ndidp->hd->flags)); memcpy(&flags, xfer->next_byte, sizeof flags); xfer->next_byte += sizeof flags; h = *xfer->next_handle; *xfer->next_handle++ = NACL_INVALID_HANDLE; #if NACL_WINDOWS if (-1 == (d = _open_osfhandle((intptr_t) h, _O_RDWR | _O_BINARY))) { rv = -NACL_ABI_EIO; goto cleanup_ndidp_dtor; } #else d = h; #endif /* * We mark it as read/write, but don't really know for sure until we * try to make those syscalls (in which case we'd get EBADF). */ if ((rv = NaClHostDescPosixTake(nhdp, d, flags)) < 0) { goto cleanup_ndidp_dtor; } h = NACL_INVALID_HANDLE; /* nhdp took ownership of h */ if (!NaClDescIoDescSubclassCtor(ndidp, nhdp)) { rv = -NACL_ABI_ENOMEM; goto cleanup_nhdp_dtor; } /* * ndidp took ownership of nhdp, now give ownership of ndidp to caller. */ *out_desc = (struct NaClDesc *) ndidp; rv = 0; cleanup_nhdp_dtor: if (rv < 0) { if (0 != NaClHostDescClose(nhdp)) { NaClLog(LOG_FATAL, "NaClDescIoInternalize: NaClHostDescClose failed\n"); } } cleanup_ndidp_dtor: if (rv < 0) { NaClDescSafeUnref((struct NaClDesc *) ndidp); ndidp = NULL; } cleanup: if (rv < 0) { free(nhdp); free(ndidp); if (NACL_INVALID_HANDLE != h) { (void) NaClClose(h); } } return rv; }
int32_t NaClSysImcRecvmsg(struct NaClAppThread *natp, int d, uint32_t nanimhp, int flags) { struct NaClApp *nap = natp->nap; int32_t retval = -NACL_ABI_EINVAL; ssize_t ssize_retval; uintptr_t sysaddr; size_t i; struct NaClDesc *ndp; struct NaClAbiNaClImcMsgHdr kern_nanimh; struct NaClAbiNaClImcMsgIoVec kern_naiov[NACL_ABI_IMC_IOVEC_MAX]; struct NaClImcMsgIoVec kern_iov[NACL_ABI_IMC_IOVEC_MAX]; int32_t usr_desc[NACL_ABI_IMC_USER_DESC_MAX]; struct NaClImcTypedMsgHdr recv_hdr; struct NaClDesc *new_desc[NACL_ABI_IMC_DESC_MAX]; nacl_abi_size_t num_user_desc; struct NaClDesc *invalid_desc = NULL; NaClLog(3, ("Entered NaClSysImcRecvMsg(0x%08"NACL_PRIxPTR", %d," " 0x%08"NACL_PRIx32")\n"), (uintptr_t) natp, d, nanimhp); /* * First, we validate user-supplied message headers before * allocating a receive buffer. */ if (!NaClCopyInFromUser(nap, &kern_nanimh, nanimhp, sizeof kern_nanimh)) { NaClLog(4, "NaClImcMsgHdr not in user address space\n"); retval = -NACL_ABI_EFAULT; goto cleanup_leave; } /* copy before validating */ if (kern_nanimh.iov_length > NACL_ABI_IMC_IOVEC_MAX) { NaClLog(4, "gather/scatter array too large: %"NACL_PRIdNACL_SIZE"\n", kern_nanimh.iov_length); retval = -NACL_ABI_EINVAL; goto cleanup_leave; } if (kern_nanimh.desc_length > NACL_ABI_IMC_USER_DESC_MAX) { NaClLog(4, "handle vector too long: %"NACL_PRIdNACL_SIZE"\n", kern_nanimh.desc_length); retval = -NACL_ABI_EINVAL; goto cleanup_leave; } if (kern_nanimh.iov_length > 0) { /* * Copy IOV array into kernel space. Validate this snapshot and do * user->kernel address conversions on this snapshot. */ if (!NaClCopyInFromUser(nap, kern_naiov, (uintptr_t) kern_nanimh.iov, (kern_nanimh.iov_length * sizeof kern_naiov[0]))) { NaClLog(4, "gather/scatter array not in user address space\n"); retval = -NACL_ABI_EFAULT; goto cleanup_leave; } /* * Convert every IOV base from user to system address, validate * range of bytes are really in user address space. */ for (i = 0; i < kern_nanimh.iov_length; ++i) { sysaddr = NaClUserToSysAddrRange(nap, (uintptr_t) kern_naiov[i].base, kern_naiov[i].length); if (kNaClBadAddress == sysaddr) { NaClLog(4, "iov number %"NACL_PRIuS" not entirely in user space\n", i); retval = -NACL_ABI_EFAULT; goto cleanup_leave; } kern_iov[i].base = (void *) sysaddr; kern_iov[i].length = kern_naiov[i].length; } } if (kern_nanimh.desc_length > 0) { sysaddr = NaClUserToSysAddrRange(nap, (uintptr_t) kern_nanimh.descv, kern_nanimh.desc_length * sizeof(int32_t)); if (kNaClBadAddress == sysaddr) { retval = -NACL_ABI_EFAULT; goto cleanup_leave; } } ndp = NaClAppGetDesc(nap, d); if (NULL == ndp) { NaClLog(4, "receiving descriptor invalid\n"); retval = -NACL_ABI_EBADF; goto cleanup_leave; } recv_hdr.iov = kern_iov; recv_hdr.iov_length = kern_nanimh.iov_length; recv_hdr.ndescv = new_desc; recv_hdr.ndesc_length = NACL_ARRAY_SIZE(new_desc); memset(new_desc, 0, sizeof new_desc); recv_hdr.flags = 0; /* just to make it obvious; IMC will clear it for us */ /* lock user memory ranges in kern_naiov */ for (i = 0; i < kern_nanimh.iov_length; ++i) { NaClVmIoWillStart(nap, kern_naiov[i].base, kern_naiov[i].base + kern_naiov[i].length - 1); } ssize_retval = NACL_VTBL(NaClDesc, ndp)->RecvMsg(ndp, &recv_hdr, flags); /* unlock user memory ranges in kern_naiov */ for (i = 0; i < kern_nanimh.iov_length; ++i) { NaClVmIoHasEnded(nap, kern_naiov[i].base, kern_naiov[i].base + kern_naiov[i].length - 1); } /* * retval is number of user payload bytes received and excludes the * header bytes. */ NaClLog(3, "NaClSysImcRecvMsg: RecvMsg() returned %"NACL_PRIdS"\n", ssize_retval); if (NaClSSizeIsNegErrno(&ssize_retval)) { /* negative error numbers all have valid 32-bit representations, * so this cast is safe. */ retval = (int32_t) ssize_retval; goto cleanup; } else if (ssize_retval > INT32_MAX || ssize_retval < INT32_MIN) { retval = -NACL_ABI_EOVERFLOW; goto cleanup; } else { /* cast is safe due to range check above */ retval = (int32_t) ssize_retval; } /* * NB: recv_hdr.flags may contain NACL_ABI_MESSAGE_TRUNCATED and/or * NACL_ABI_HANDLES_TRUNCATED. */ kern_nanimh.flags = recv_hdr.flags; /* * Now internalize the NaClHandles as NaClDesc objects. */ num_user_desc = recv_hdr.ndesc_length; if (kern_nanimh.desc_length < num_user_desc) { kern_nanimh.flags |= NACL_ABI_RECVMSG_DESC_TRUNCATED; for (i = kern_nanimh.desc_length; i < num_user_desc; ++i) { NaClDescUnref(new_desc[i]); new_desc[i] = NULL; } num_user_desc = kern_nanimh.desc_length; } invalid_desc = (struct NaClDesc *) NaClDescInvalidMake(); /* prepare to write out to user space the descriptor numbers */ for (i = 0; i < num_user_desc; ++i) { if (invalid_desc == new_desc[i]) { usr_desc[i] = kKnownInvalidDescNumber; NaClDescUnref(new_desc[i]); } else { usr_desc[i] = NaClAppSetDescAvail(nap, new_desc[i]); } new_desc[i] = NULL; } if (0 != num_user_desc && !NaClCopyOutToUser(nap, (uintptr_t) kern_nanimh.descv, usr_desc, num_user_desc * sizeof usr_desc[0])) { NaClLog(LOG_FATAL, ("NaClSysImcRecvMsg: in/out ptr (descv %"NACL_PRIxPTR ") became invalid at copyout?\n"), (uintptr_t) kern_nanimh.descv); } kern_nanimh.desc_length = num_user_desc; if (!NaClCopyOutToUser(nap, nanimhp, &kern_nanimh, sizeof kern_nanimh)) { NaClLog(LOG_FATAL, "NaClSysImcRecvMsg: in/out ptr (iov) became" " invalid at copyout?\n"); } /* copy out updated desc count, flags */ cleanup: if (retval < 0) { for (i = 0; i < NACL_ARRAY_SIZE(new_desc); ++i) { if (NULL != new_desc[i]) { NaClDescUnref(new_desc[i]); new_desc[i] = NULL; } } } NaClDescUnref(ndp); NaClDescSafeUnref(invalid_desc); NaClLog(3, "NaClSysImcRecvMsg: returning %d\n", retval); cleanup_leave: return retval; }