int NaClGioShmAllocCtor(struct NaClGioShm *self, size_t shm_size) { struct NaClDescImcShm *shmp; int rv; CHECK(shm_size == NaClRoundAllocPage(shm_size)); if (!NaClDescEffectorTrustedMemCtor(&self->eff)) { return 0; } shmp = malloc(sizeof *shmp); if (NULL == shmp) { (*self->eff.base.vtbl->Dtor)(&self->eff.base); return 0; } if (!NaClDescImcShmAllocCtor(shmp, shm_size, /* executable= */ 0)) { (*self->eff.base.vtbl->Dtor)(&self->eff.base); free(shmp); return 0; } rv = NaClGioShmCtorIntern(self, (struct NaClDesc *) shmp, shm_size); if (!rv) { NaClDescUnref((struct NaClDesc *) shmp); free(shmp); (*self->eff.base.vtbl->Dtor)(&self->eff.base); } return rv; }
int32_t NaClSysImcMemObjCreate(struct NaClAppThread *natp, size_t size) { struct NaClApp *nap = natp->nap; int32_t retval = -NACL_ABI_EINVAL; struct NaClDescImcShm *shmp; off_t size_as_off; NaClLog(3, ("Entered NaClSysImcMemObjCreate(0x%08"NACL_PRIxPTR " 0x%08"NACL_PRIxS")\n"), (uintptr_t) natp, size); /* This syscall is not used in Chromium so is disabled by default. */ if (!NaClAclBypassChecks) { return -NACL_ABI_EACCES; } if (0 != (size & (NACL_MAP_PAGESIZE - 1))) { return -NACL_ABI_EINVAL; } /* * TODO(bsy): policy about maximum shm object size should be * enforced here. */ size_as_off = (off_t) size; if (size_as_off < 0) { return -NACL_ABI_EINVAL; } shmp = NULL; shmp = malloc(sizeof *shmp); if (NULL == shmp) { retval = -NACL_ABI_ENOMEM; goto cleanup; } if (!NaClDescImcShmAllocCtor(shmp, size_as_off, /* executable= */ 0)) { retval = -NACL_ABI_ENOMEM; /* is this reasonable? */ goto cleanup; } retval = NaClAppSetDescAvail(nap, (struct NaClDesc *) shmp); shmp = NULL; cleanup: free(shmp); return retval; }
int NaClGioShmAllocCtor(struct NaClGioShm *self, size_t shm_size) { struct NaClDescImcShm *shmp; int rv; CHECK(shm_size == NaClRoundAllocPage(shm_size)); shmp = malloc(sizeof *shmp); if (NULL == shmp) { return 0; } if (!NaClDescImcShmAllocCtor(shmp, shm_size, /* executable= */ 0)) { free(shmp); return 0; } rv = NaClGioShmCtorIntern(self, (struct NaClDesc *) shmp, shm_size); NaClDescUnref((struct NaClDesc *) shmp); if (!rv) { free(shmp); } return rv; }
NaClErrorCode NaClMakeDynamicTextShared(struct NaClApp *nap) { enum NaClErrorCode retval = LOAD_INTERNAL; uintptr_t dynamic_text_size; struct NaClDescImcShm *shm = NULL; uintptr_t shm_vaddr_base; int mmap_protections; uintptr_t mmap_ret; uintptr_t shm_upper_bound; uintptr_t text_sysaddr; shm_vaddr_base = NaClEndOfStaticText(nap); NaClLog(4, "NaClMakeDynamicTextShared: shm_vaddr_base = %08"NACL_PRIxPTR"\n", shm_vaddr_base); shm_vaddr_base = NaClRoundAllocPage(shm_vaddr_base); NaClLog(4, "NaClMakeDynamicTextShared: shm_vaddr_base = %08"NACL_PRIxPTR"\n", shm_vaddr_base); /* * Default is that there is no usable dynamic code area. */ nap->dynamic_text_start = shm_vaddr_base; nap->dynamic_text_end = shm_vaddr_base; if (!nap->use_shm_for_dynamic_text) { NaClLog(4, "NaClMakeDynamicTextShared:" " rodata / data segments not allocation aligned\n"); NaClLog(4, " not using shm for text\n"); return LOAD_OK; } /* * Allocate a shm region the size of which is nap->rodata_start - * end-of-text. This implies that the "core" text will not be * backed by shm. */ shm_upper_bound = nap->rodata_start; if (0 == shm_upper_bound) { shm_upper_bound = NaClTruncAllocPage(nap->data_start); } if (0 == shm_upper_bound) { shm_upper_bound = shm_vaddr_base; } NaClLog(4, "shm_upper_bound = %08"NACL_PRIxPTR"\n", shm_upper_bound); dynamic_text_size = shm_upper_bound - shm_vaddr_base; NaClLog(4, "NaClMakeDynamicTextShared: dynamic_text_size = %"NACL_PRIxPTR"\n", dynamic_text_size); if (0 == dynamic_text_size) { NaClLog(4, "Empty JITtable region\n"); return LOAD_OK; } shm = (struct NaClDescImcShm *) malloc(sizeof *shm); if (NULL == shm) { NaClLog(4, "NaClMakeDynamicTextShared: shm object allocation failed\n"); retval = LOAD_NO_MEMORY; goto cleanup; } if (!NaClDescImcShmAllocCtor(shm, dynamic_text_size, /* executable= */ 1)) { /* cleanup invariant is if ptr is non-NULL, it's fully ctor'd */ free(shm); shm = NULL; NaClLog(4, "NaClMakeDynamicTextShared: shm alloc ctor for text failed\n"); retval = LOAD_NO_MEMORY_FOR_DYNAMIC_TEXT; goto cleanup; } text_sysaddr = NaClUserToSys(nap, shm_vaddr_base); /* Existing memory is anonymous paging file backed. */ NaClPageFree((void *) text_sysaddr, dynamic_text_size); /* * Unix allows us to map pages with PROT_NONE initially and later * increase the mapping permissions with mprotect(). * * Windows does not allow this, however: the initial permissions are * an upper bound on what the permissions may later be changed to * with VirtualProtect() or VirtualAlloc(). Given this, using * PROT_NONE at this point does not even make sense. On Windows, * the pages start off as uncommitted, which makes them inaccessible * regardless of the page permissions they are mapped with. * * Write permissions are included here for nacl64-gdb to set * breakpoints. */ #if NACL_WINDOWS mmap_protections = NACL_ABI_PROT_READ | NACL_ABI_PROT_EXEC | NACL_ABI_PROT_WRITE; #else mmap_protections = NACL_ABI_PROT_NONE; #endif NaClLog(4, "NaClMakeDynamicTextShared: Map(,,0x%"NACL_PRIxPTR",size = 0x%x," " prot=0x%x, flags=0x%x, offset=0)\n", text_sysaddr, (int) dynamic_text_size, mmap_protections, NACL_ABI_MAP_SHARED | NACL_ABI_MAP_FIXED); mmap_ret = (*((struct NaClDescVtbl const *) shm->base.base.vtbl)-> Map)((struct NaClDesc *) shm, NaClDescEffectorTrustedMem(), (void *) text_sysaddr, dynamic_text_size, mmap_protections, NACL_ABI_MAP_SHARED | NACL_ABI_MAP_FIXED, 0); if (text_sysaddr != mmap_ret) { NaClLog(LOG_FATAL, "Could not map in shm for dynamic text region\n"); } nap->dynamic_page_bitmap = BitmapAllocate((uint32_t) (dynamic_text_size / NACL_MAP_PAGESIZE)); if (NULL == nap->dynamic_page_bitmap) { NaClLog(LOG_FATAL, "NaClMakeDynamicTextShared: BitmapAllocate() failed\n"); } nap->dynamic_text_start = shm_vaddr_base; nap->dynamic_text_end = shm_upper_bound; nap->text_shm = &shm->base; retval = LOAD_OK; cleanup: if (LOAD_OK != retval) { NaClDescSafeUnref((struct NaClDesc *) shm); free(shm); } return retval; }