/* * Linear search, from high addresses down. */ uintptr_t NaClVmmapFindSpace(struct NaClVmmap *self, size_t num_pages) { size_t i; struct NaClVmmapEntry *vmep; uintptr_t end_page; uintptr_t start_page; if (0 == self->nvalid) return 0; NaClVmmapMakeSorted(self); for (i = self->nvalid; --i > 0; ) { vmep = self->vmentry[i-1]; end_page = vmep->page_num + vmep->npages; /* end page from previous */ start_page = self->vmentry[i]->page_num; /* start page from current */ if (start_page - end_page >= num_pages) { return start_page - num_pages; } } return 0; /* * in user addresses, page 0 is always trampoline, and user * addresses are contained in system addresses, so returning a * system page number of 0 can serve as error indicator: it is at * worst the trampoline page, and likely to be below it. */ }
void NaClVmmapVisit(struct NaClVmmap *self, void (*fn)(void *state, struct NaClVmmapEntry *entry), void *state) { size_t i; size_t nentries; NaClVmmapMakeSorted(self); for (i = 0, nentries = self->nvalid; i < nentries; ++i) { (*fn)(state, self->vmentry[i]); } }
struct NaClVmmapEntry const *NaClVmmapFindPage(struct NaClVmmap *self, uintptr_t pnum) { struct NaClVmmapEntry key; struct NaClVmmapEntry *kptr; struct NaClVmmapEntry *const *result_ptr; NaClVmmapMakeSorted(self); key.page_num = pnum; kptr = &key; result_ptr = ((struct NaClVmmapEntry *const *) bsearch(&kptr, self->vmentry, self->nvalid, sizeof self->vmentry[0], NaClVmmapContainCmpEntries)); return result_ptr ? *result_ptr : NULL; }
/* * Linear search, from uaddr up. */ uintptr_t NaClVmmapFindMapSpaceAboveHint(struct NaClVmmap *self, uintptr_t uaddr, size_t num_pages) { size_t nvalid; size_t i; struct NaClVmmapEntry *vmep; uintptr_t usr_page; uintptr_t start_page; uintptr_t end_page; NaClVmmapMakeSorted(self); usr_page = uaddr >> NACL_PAGESHIFT; num_pages = NaClRoundPageNumUpToMapMultiple(num_pages); nvalid = self->nvalid; for (i = 1; i < nvalid; ++i) { vmep = self->vmentry[i-1]; end_page = vmep->page_num + vmep->npages; end_page = NaClRoundPageNumUpToMapMultiple(end_page); start_page = self->vmentry[i]->page_num; if (NACL_MAP_PAGESHIFT > NACL_PAGESHIFT) { start_page = NaClTruncPageNumDownToMapMultiple(start_page); if (start_page <= end_page) { continue; } } if (end_page <= usr_page && usr_page < start_page) { end_page = usr_page; } if (usr_page <= end_page && (start_page - end_page) >= num_pages) { /* found a gap at or after uaddr that's big enough */ return end_page; } } return 0; }
/* * preallocate memory area of given size. abort if fail */ void PreallocateUserMemory(struct NaClApp *nap) { struct SetupList *policy = nap->manifest->user_setup; uintptr_t i = nap->data_end; uint32_t stump = nap->manifest->user_setup->max_mem - nap->stack_size - nap->data_end; uint32_t dead_space; struct NaClVmmapEntry *user_space; /* check if max_mem is specified in manifest and proceed if so */ if(!policy->max_mem) return; /* user memory chunk must be allocated next to the data end */ i = (i + NACL_MAP_PAGESIZE - 1) & ~(NACL_MAP_PAGESIZE - 1); policy->heap_ptr = NaClCommonSysMmapIntern(nap, (void*)i, stump, 3, 0x22, -1, 0); assert(policy->heap_ptr == i); /* * free "whole chunk" block without real memory deallocation * the map entry we need is the last in raw */ user_space = nap->mem_map.vmentry[nap->mem_map.nvalid - 1]; assert(policy->heap_ptr / NACL_PAGESIZE == user_space->page_num); assert(nap->mem_map.is_sorted != 1); /* protect dead space */ dead_space = NaClVmmapFindMaxFreeSpace(&nap->mem_map, 1) * NACL_PAGESIZE; i = (user_space->page_num + user_space->npages) * NACL_PAGESIZE; dead_space = NaClCommonSysMmapIntern(nap, (void*)i, dead_space, 0, 0x22, -1, 0); assert(dead_space == i); /* sort and remove deleted blocks */ user_space->removed = 1; nap->mem_map.is_sorted = 0; /* force sort because we need to get rid of removed blocks */ NaClVmmapMakeSorted(&nap->mem_map); /* why 0xfffff000? 1. 0x1000 reserved for error codes 2. it is still larger then 4gb - stack */ COND_ABORT(policy->heap_ptr > 0xfffff000, "cannot preallocate memory for user\n"); }
/* * NaClVmmapCheckMapping checks whether there is an existing mapping with * maximum protection equivalent or higher to the given one. */ static int NaClVmmapCheckExistingMapping(struct NaClVmmap *self, uintptr_t page_num, size_t npages, int prot) { size_t i; uintptr_t region_end_page = page_num + npages; NaClLog(2, ("NaClVmmapCheckExistingMapping(0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxPTR ", 0x%"NACL_PRIxS", 0x%x)\n"), (uintptr_t) self, page_num, npages, prot); if (0 == self->nvalid) { return 0; } NaClVmmapMakeSorted(self); for (i = 0; i < self->nvalid; ++i) { struct NaClVmmapEntry *ent = self->vmentry[i]; uintptr_t ent_end_page = ent->page_num + ent->npages; int flags = NaClVmmapEntryMaxProt(ent); if (ent->page_num <= page_num && region_end_page <= ent_end_page) { /* The mapping is inside existing entry. */ return 0 == (prot & (~flags)); } else if (ent->page_num <= page_num && page_num < ent_end_page) { /* The mapping overlaps the entry. */ if (0 != (prot & (~flags))) { return 0; } page_num = ent_end_page; npages = region_end_page - ent_end_page; } else if (page_num < ent->page_num) { /* The mapping without backing store. */ return 0; } } return 0; }
/* * Linear search, from high addresses down. For mmap, so the starting * address of the region found must be NACL_MAP_PAGESIZE aligned. * * For general mmap it is better to use as high an address as * possible, since the stack size for the main thread is currently * fixed, and the heap is the only thing that grows. */ uintptr_t NaClVmmapFindMapSpace(struct NaClVmmap *self, size_t num_pages) { size_t i; struct NaClVmmapEntry *vmep; uintptr_t end_page; uintptr_t start_page; if (0 == self->nvalid) return 0; NaClVmmapMakeSorted(self); num_pages = NaClRoundPageNumUpToMapMultiple(num_pages); for (i = self->nvalid; --i > 0; ) { vmep = self->vmentry[i-1]; end_page = vmep->page_num + vmep->npages; /* end page from previous */ end_page = NaClRoundPageNumUpToMapMultiple(end_page); start_page = self->vmentry[i]->page_num; /* start page from current */ if (NACL_MAP_PAGESHIFT > NACL_PAGESHIFT) { start_page = NaClTruncPageNumDownToMapMultiple(start_page); if (start_page <= end_page) { continue; } } if (start_page - end_page >= num_pages) { return start_page - num_pages; } } return 0; /* * in user addresses, page 0 is always trampoline, and user * addresses are contained in system addresses, so returning a * system page number of 0 can serve as error indicator: it is at * worst the trampoline page, and likely to be below it. */ }
struct NaClVmmapIter *NaClVmmapFindPageIter(struct NaClVmmap *self, uintptr_t pnum, struct NaClVmmapIter *space) { struct NaClVmmapEntry key; struct NaClVmmapEntry *kptr; struct NaClVmmapEntry **result_ptr; NaClVmmapMakeSorted(self); key.page_num = pnum; kptr = &key; result_ptr = ((struct NaClVmmapEntry **) bsearch(&kptr, self->vmentry, self->nvalid, sizeof self->vmentry[0], NaClVmmapContainCmpEntries)); space->vmmap = self; if (NULL == result_ptr) { space->entry_ix = self->nvalid; } else { space->entry_ix = result_ptr - self->vmentry; } return space; }
/* * Update the virtual memory map. Deletion is handled by a remove * flag, since a NULL nmop just means that the memory is backed by the * system paging file. */ void NaClVmmapUpdate(struct NaClVmmap *self, uintptr_t page_num, size_t npages, int prot, struct NaClMemObj *nmop, int remove) { /* update existing entries or create new entry as needed */ size_t i; uintptr_t new_region_end_page = page_num + npages; NaClLog(2, ("NaClVmmapUpdate(0x%08"NACL_PRIxPTR", " "0x%"NACL_PRIxPTR", 0x%"NACL_PRIxS", " "0x%x, 0x%08"NACL_PRIxPTR", %d)\n"), (uintptr_t) self, page_num, npages, prot, (uintptr_t) nmop, remove); NaClVmmapMakeSorted(self); CHECK(npages > 0); for (i = 0; i < self->nvalid; i++) { struct NaClVmmapEntry *ent = self->vmentry[i]; uintptr_t ent_end_page = ent->page_num + ent->npages; nacl_off64_t additional_offset = (new_region_end_page - ent->page_num) << NACL_PAGESHIFT; if (ent->page_num < page_num && new_region_end_page < ent_end_page) { /* * Split existing mapping into two parts, with new mapping in * the middle. */ if (!NaClVmmapAdd(self, new_region_end_page, ent_end_page - new_region_end_page, ent->prot, NaClMemObjSplit(ent->nmop, additional_offset))) { NaClLog(LOG_FATAL, "NaClVmmapUpdate: could not split entry\n"); } ent->npages = page_num - ent->page_num; break; } else if (ent->page_num < page_num && page_num < ent_end_page) { /* New mapping overlaps end of existing mapping. */ ent->npages = page_num - ent->page_num; } else if (ent->page_num < new_region_end_page && new_region_end_page < ent_end_page) { /* New mapping overlaps start of existing mapping. */ NaClMemObjIncOffset(ent->nmop, additional_offset); ent->page_num = new_region_end_page; ent->npages = ent_end_page - new_region_end_page; break; } else if (page_num <= ent->page_num && ent_end_page <= new_region_end_page) { /* New mapping covers all of the existing mapping. */ ent->removed = 1; } else { /* No overlap */ assert(new_region_end_page <= ent->page_num || ent_end_page <= page_num); } } if (!remove) { if (!NaClVmmapAdd(self, page_num, npages, prot, nmop)) { NaClLog(LOG_FATAL, "NaClVmmapUpdate: could not add entry\n"); } } NaClVmmapRemoveMarked(self); }
int main(int argc, char **argv) { char *nacl_file; struct GioMemoryFileSnapshot gf; struct NaClApp state; struct NaClAppThread nat, *natp = &nat; int errcode; uint32_t initial_addr; uint32_t addr; struct NaClVmmap *mem_map; char *nacl_verbosity = getenv("NACLVERBOSITY"); if (argc < 2) { printf("No nexe file!\n\nFAIL\n"); } nacl_file = argv[1]; NaClLogModuleInit(); NaClLogSetVerbosity((NULL == nacl_verbosity) ? 0 : strtol(nacl_verbosity, (char **) 0, 0)); errcode = GioMemoryFileSnapshotCtor(&gf, nacl_file); ASSERT_NE(errcode, 0); errcode = NaClAppCtor(&state); ASSERT_NE(errcode, 0); errcode = NaClAppLoadFile((struct Gio *) &gf, &state, NACL_ABI_CHECK_OPTION_CHECK); ASSERT_EQ(errcode, 0); InitThread(&state, natp); /* * Initial mappings: * 0. -- Zero page * 1. rx Static code segment * 2. r Read-only data segment * 3. rw Writable data segment * 4. rw Stack * There is no dynamic code area in this case. */ /* Check the initial mappings. */ mem_map = &state.mem_map; ASSERT_EQ(mem_map->nvalid, 5); CheckLowerMappings(mem_map); /* Allocate range */ addr = NaClSysMmap(natp, 0, 3 * NACL_MAP_PAGESIZE, NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE, NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE, -1, 0); printf("addr=0x%"NACL_PRIx32"\n", addr); initial_addr = addr; /* * The mappings have changed to become: * 0. -- Zero page * 1. rx Static code segment * 2. r Read-only data segment * 3. rw Writable data segment * 4. rw mmap()'d anonymous, 3 pages (new) * 5. rw Stack */ /* Map to overwrite the start of the previously allocated range */ addr = NaClSysMmap(natp, (void *) (uintptr_t) initial_addr, 2 * NACL_MAP_PAGESIZE, NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE, NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE | NACL_ABI_MAP_FIXED, -1, 0); printf("addr=0x%"NACL_PRIx32"\n", addr); ASSERT_EQ(addr, initial_addr); /* * The mappings have changed to become: * 0. -- Zero page * 1. rx Static code segment * 2. r Read-only data segment * 3. rw Writable data segment * 4. rw mmap()'d anonymous, 2 pages (new) * 5. rw mmap()'d anonymous, 1 pages (previous) * 6. rw Stack */ /* Allocate new page */ addr = NaClSysMmap(natp, 0, NACL_MAP_PAGESIZE, NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE, NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE, -1, 0); printf("addr=0x%"NACL_PRIx32"\n", addr); /* * Our allocation strategy is to scan down from stack. This is an * implementation detail and not part of the guaranteed semantics, * but it is good to test that what we expect of our implementation * didn't change. */ ASSERT_EQ_MSG(addr, initial_addr - NACL_MAP_PAGESIZE, "Allocation strategy changed!"); /* * The mappings have changed to become: * 0. -- Zero page * 1. rx Static code segment * 2. r Read-only data segment * 3. rw Writable data segment * 4. rw mmap()'d anonymous, 1 pages (new) * 5. rw mmap()'d anonymous, 2 pages * 6. rw mmap()'d anonymous, 1 pages * 7. rw Stack */ NaClVmmapMakeSorted(mem_map); ASSERT_EQ(mem_map->nvalid, 8); CheckLowerMappings(mem_map); NaClVmmapDebug(mem_map, "After allocations"); /* Skip mappings 0, 1, 2 and 3. */ ASSERT_EQ(mem_map->vmentry[4]->page_num, (initial_addr - NACL_MAP_PAGESIZE) >> NACL_PAGESHIFT); ASSERT_EQ(mem_map->vmentry[4]->npages, NACL_PAGES_PER_MAP); ASSERT_EQ(mem_map->vmentry[5]->page_num, initial_addr >> NACL_PAGESHIFT); ASSERT_EQ(mem_map->vmentry[5]->npages, 2 * NACL_PAGES_PER_MAP); ASSERT_EQ(mem_map->vmentry[6]->page_num, (initial_addr + 2 * NACL_MAP_PAGESIZE) >> NACL_PAGESHIFT); ASSERT_EQ(mem_map->vmentry[6]->npages, NACL_PAGES_PER_MAP); /* * Undo effects of previous mmaps */ errcode = NaClSysMunmap(natp, (void *) (uintptr_t) (initial_addr - NACL_MAP_PAGESIZE), NACL_MAP_PAGESIZE * 4); ASSERT_EQ(errcode, 0); /* * Mappings return to being: * 0. -- Zero page * 1. rx Static code segment * 2. r Read-only data segment * 3. rw Writable data segment * 4. rw Stack */ ASSERT_EQ(mem_map->nvalid, 5); CheckLowerMappings(mem_map); /* Allocate range */ addr = NaClSysMmap(natp, 0, 9 * NACL_MAP_PAGESIZE, NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE, NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE, -1, 0); printf("addr=0x%"NACL_PRIx32"\n", addr); initial_addr = addr; /* * The mappings have changed to become: * 0. -- Zero page * 1. rx Static code segment * 2. r Read-only data segment * 3. rw Writable data segment * 4. rw mmap()'d anonymous, 9 pages (new) * 5. rw Stack */ /* Map into middle of previously allocated range */ addr = NaClSysMmap(natp, (void *) (uintptr_t) (initial_addr + 2 * NACL_MAP_PAGESIZE), 3 * NACL_MAP_PAGESIZE, NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE, NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE | NACL_ABI_MAP_FIXED, -1, 0); printf("addr=0x%"NACL_PRIx32"\n", addr); ASSERT_EQ(addr, initial_addr + NACL_MAP_PAGESIZE * 2); /* * The mappings have changed to become: * 0. -- Zero page * 1. rx Static code segment * 2. r Read-only data segment * 3. rw Writable data segment * 4. rw mmap()'d anonymous, 2 pages (previous) * 5. rw mmap()'d anonymous, 3 pages (new) * 6. rw mmap()'d anonymous, 4 pages (previous) * 7. rw Stack */ NaClVmmapMakeSorted(mem_map); ASSERT_EQ(mem_map->nvalid, 8); CheckLowerMappings(mem_map); ASSERT_EQ(mem_map->vmentry[4]->page_num, initial_addr >> NACL_PAGESHIFT); ASSERT_EQ(mem_map->vmentry[4]->npages, 2 * NACL_PAGES_PER_MAP); ASSERT_EQ(mem_map->vmentry[5]->page_num, (initial_addr + 2 * NACL_MAP_PAGESIZE) >> NACL_PAGESHIFT); ASSERT_EQ(mem_map->vmentry[5]->npages, 3 * NACL_PAGES_PER_MAP); ASSERT_EQ(mem_map->vmentry[6]->page_num, (initial_addr + 5 * NACL_MAP_PAGESIZE) >> NACL_PAGESHIFT); ASSERT_EQ(mem_map->vmentry[6]->npages, 4 * NACL_PAGES_PER_MAP); /* * Undo effects of previous mmaps */ errcode = NaClSysMunmap(natp, (void *) (uintptr_t) initial_addr, 9 * NACL_MAP_PAGESIZE); ASSERT_EQ(errcode, 0); ASSERT_EQ(mem_map->nvalid, 5); CheckLowerMappings(mem_map); /* * Mappings return to being: * 0. -- Zero page * 1. rx Static code segment * 2. r Read-only data segment * 3. rw Writable data segment * 4. rw Stack */ /* * Check use of hint. */ addr = NaClSysMmap(natp, (void *) (uintptr_t) initial_addr, NACL_MAP_PAGESIZE, NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE, NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE, -1, 0); ASSERT_LE(addr, 0xffff0000u); printf("addr=0x%"NACL_PRIx32"\n", addr); ASSERT_LE_MSG(initial_addr, addr, "returned address not at or above hint"); errcode = NaClSysMunmap(natp, (void *) (uintptr_t) addr, NACL_MAP_PAGESIZE); ASSERT_EQ(errcode, 0); /* Check handling of zero-sized mappings. */ addr = NaClSysMmap(natp, 0, 0, NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE, NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE, -1, 0); ASSERT_EQ((int) addr, -NACL_ABI_EINVAL); errcode = NaClSysMunmap(natp, (void *) (uintptr_t) initial_addr, 0); ASSERT_EQ(errcode, -NACL_ABI_EINVAL); printf("PASS\n"); return 0; }
int main(int argc, char **argv) { char *nacl_file; struct NaClApp state; struct NaClApp *nap = &state; struct NaClAppThread nat, *natp = &nat; int errcode; uint32_t initial_addr; uint32_t addr; struct NaClVmmap *mem_map; struct NaClVmmapEntry *ent; char *nacl_verbosity = getenv("NACLVERBOSITY"); NaClHandleBootstrapArgs(&argc, &argv); if (argc < 2) { printf("No nexe file!\n\nFAIL\n"); } nacl_file = argv[1]; NaClAllModulesInit(); NaClLogSetVerbosity((NULL == nacl_verbosity) ? 0 : strtol(nacl_verbosity, (char **) 0, 0)); errcode = NaClAppCtor(&state); ASSERT_NE(errcode, 0); errcode = NaClAppLoadFileFromFilename(nap, nacl_file); ASSERT_EQ(errcode, LOAD_OK); InitThread(&state, natp); /* * Initial mappings: * 0. -- Zero page * 1. rx Static code segment * 2. r Read-only data segment * 3. rw Writable data segment * 4. rw Stack * There is no dynamic code area in this case. */ /* Check the initial mappings. */ mem_map = &state.mem_map; ASSERT_EQ(mem_map->nvalid, 5); CheckLowerMappings(mem_map); /* Allocate range */ addr = NaClSysMmapIntern(nap, 0, 3 * NACL_MAP_PAGESIZE, NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE, NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE, -1, 0); printf("addr=0x%"NACL_PRIx32"\n", addr); initial_addr = addr; /* * The mappings have changed to become: * 0. -- Zero page * 1. rx Static code segment * 2. r Read-only data segment * 3. rw Writable data segment * 4. rw mmap()'d anonymous, 3 pages (new) * 5. rw Stack */ /* Map to overwrite the start of the previously allocated range */ addr = NaClSysMmapIntern(nap, (void *) (uintptr_t) initial_addr, 2 * NACL_MAP_PAGESIZE, NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE, NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE | NACL_ABI_MAP_FIXED, -1, 0); printf("addr=0x%"NACL_PRIx32"\n", addr); ASSERT_EQ(addr, initial_addr); /* * The mappings have changed to become: * 0. -- Zero page * 1. rx Static code segment * 2. r Read-only data segment * 3. rw Writable data segment * 4. rw mmap()'d anonymous, 2 pages (new) * 5. rw mmap()'d anonymous, 1 pages (previous) * 6. rw Stack */ /* Allocate new page */ addr = NaClSysMmapIntern(nap, 0, NACL_MAP_PAGESIZE, NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE, NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE, -1, 0); printf("addr=0x%"NACL_PRIx32"\n", addr); /* * Our allocation strategy is to scan down from stack. This is an * implementation detail and not part of the guaranteed semantics, * but it is good to test that what we expect of our implementation * didn't change. */ ASSERT_EQ_MSG(addr, initial_addr - NACL_MAP_PAGESIZE, "Allocation strategy changed!"); /* * The mappings have changed to become: * 0. -- Zero page * 1. rx Static code segment * 2. r Read-only data segment * 3. rw Writable data segment * 4. rw mmap()'d anonymous, 1 pages (new) * 5. rw mmap()'d anonymous, 2 pages * 6. rw mmap()'d anonymous, 1 pages * 7. rw Stack */ NaClVmmapMakeSorted(mem_map); ASSERT_EQ(mem_map->nvalid, 8); CheckLowerMappings(mem_map); NaClVmmapDebug(mem_map, "After allocations"); /* Skip mappings 0, 1, 2 and 3. */ ASSERT_EQ(mem_map->vmentry[4]->page_num, (initial_addr - NACL_MAP_PAGESIZE) >> NACL_PAGESHIFT); ASSERT_EQ(mem_map->vmentry[4]->npages, NACL_PAGES_PER_MAP); ASSERT_EQ(mem_map->vmentry[5]->page_num, initial_addr >> NACL_PAGESHIFT); ASSERT_EQ(mem_map->vmentry[5]->npages, 2 * NACL_PAGES_PER_MAP); ASSERT_EQ(mem_map->vmentry[6]->page_num, (initial_addr + 2 * NACL_MAP_PAGESIZE) >> NACL_PAGESHIFT); ASSERT_EQ(mem_map->vmentry[6]->npages, NACL_PAGES_PER_MAP); /* * Undo effects of previous mmaps */ errcode = NaClSysMunmap(natp, (void *) (uintptr_t) (initial_addr - NACL_MAP_PAGESIZE), NACL_MAP_PAGESIZE * 4); ASSERT_EQ(errcode, 0); /* * Mappings return to being: * 0. -- Zero page * 1. rx Static code segment * 2. r Read-only data segment * 3. rw Writable data segment * 4. rw Stack */ ASSERT_EQ(mem_map->nvalid, 5); CheckLowerMappings(mem_map); /* Allocate range */ addr = NaClSysMmapIntern(nap, 0, 9 * NACL_MAP_PAGESIZE, NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE, NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE, -1, 0); printf("addr=0x%"NACL_PRIx32"\n", addr); initial_addr = addr; /* * The mappings have changed to become: * 0. -- Zero page * 1. rx Static code segment * 2. r Read-only data segment * 3. rw Writable data segment * 4. rw mmap()'d anonymous, 9 pages (new) * 5. rw Stack */ /* Map into middle of previously allocated range */ addr = NaClSysMmapIntern(nap, (void *) (uintptr_t) (initial_addr + 2 * NACL_MAP_PAGESIZE), 3 * NACL_MAP_PAGESIZE, NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE, NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE | NACL_ABI_MAP_FIXED, -1, 0); printf("addr=0x%"NACL_PRIx32"\n", addr); ASSERT_EQ(addr, initial_addr + NACL_MAP_PAGESIZE * 2); /* * The mappings have changed to become: * 0. -- Zero page * 1. rx Static code segment * 2. r Read-only data segment * 3. rw Writable data segment * 4. rw mmap()'d anonymous, 2 pages (previous) * 5. rw mmap()'d anonymous, 3 pages (new) * 6. rw mmap()'d anonymous, 4 pages (previous) * 7. rw Stack */ NaClVmmapMakeSorted(mem_map); ASSERT_EQ(mem_map->nvalid, 8); CheckLowerMappings(mem_map); ASSERT_EQ(mem_map->vmentry[4]->page_num, initial_addr >> NACL_PAGESHIFT); ASSERT_EQ(mem_map->vmentry[4]->npages, 2 * NACL_PAGES_PER_MAP); ASSERT_EQ(mem_map->vmentry[5]->page_num, (initial_addr + 2 * NACL_MAP_PAGESIZE) >> NACL_PAGESHIFT); ASSERT_EQ(mem_map->vmentry[5]->npages, 3 * NACL_PAGES_PER_MAP); ASSERT_EQ(mem_map->vmentry[6]->page_num, (initial_addr + 5 * NACL_MAP_PAGESIZE) >> NACL_PAGESHIFT); ASSERT_EQ(mem_map->vmentry[6]->npages, 4 * NACL_PAGES_PER_MAP); /* Change the memory protection of previously allocated range */ errcode = NaClSysMprotectInternal(nap, (initial_addr + 1 * NACL_MAP_PAGESIZE), 5 * NACL_MAP_PAGESIZE, NACL_ABI_PROT_READ); ASSERT_EQ(errcode, 0); /* * The mappings have changed to become: * 0. -- Zero page * 1. rx Static code segment * 2. r Read-only data segment * 3. rw Writable data segment * 4. rw mmap()'d anonymous, 1 pages (previous) * 5. r mmap()'d anonymous, 1 pages (new) * 6. r mmap()'d anonymous, 3 pages (new) * 7. r mmap()'d anonymous, 1 pages (new) * 8. rw mmap()'d anonymous, 3 pages (previous) * 9. rw Stack */ NaClVmmapMakeSorted(mem_map); ASSERT_EQ(mem_map->nvalid, 10); CheckLowerMappings(mem_map); ASSERT_EQ(mem_map->vmentry[4]->npages, 1 * NACL_PAGES_PER_MAP); ASSERT_EQ(mem_map->vmentry[4]->prot, NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE); ASSERT_EQ(mem_map->vmentry[5]->npages, 1 * NACL_PAGES_PER_MAP); ASSERT_EQ(mem_map->vmentry[5]->prot, NACL_ABI_PROT_READ); ASSERT_EQ(mem_map->vmentry[6]->npages, 3 * NACL_PAGES_PER_MAP); ASSERT_EQ(mem_map->vmentry[6]->prot, NACL_ABI_PROT_READ); ASSERT_EQ(mem_map->vmentry[7]->npages, 1 * NACL_PAGES_PER_MAP); ASSERT_EQ(mem_map->vmentry[7]->prot, NACL_ABI_PROT_READ); ASSERT_EQ(mem_map->vmentry[8]->npages, 3 * NACL_PAGES_PER_MAP); ASSERT_EQ(mem_map->vmentry[8]->prot, NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE); /* Change the memory protection of previously allocated range */ errcode = NaClSysMprotectInternal(nap, (initial_addr + 2 * NACL_MAP_PAGESIZE), 3 * NACL_MAP_PAGESIZE, NACL_ABI_PROT_NONE); ASSERT_EQ(errcode, 0); /* * The mappings have changed to become: * 0. -- Zero page * 1. rx Static code segment * 2. r Read-only data segment * 3. rw Writable data segment * 4. rw mmap()'d anonymous, 1 pages (previous) * 5. r mmap()'d anonymous, 1 pages (previous) * 6. -- mmap()'d anonymous, 3 pages (new) * 7. r mmap()'d anonymous, 1 pages (previous) * 8. rw mmap()'d anonymous, 3 pages (previous) * 9. rw Stack */ NaClVmmapMakeSorted(mem_map); ASSERT_EQ(mem_map->nvalid, 10); CheckLowerMappings(mem_map); ASSERT_EQ(mem_map->vmentry[4]->npages, 1 * NACL_PAGES_PER_MAP); ASSERT_EQ(mem_map->vmentry[4]->prot, NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE); ASSERT_EQ(mem_map->vmentry[5]->npages, 1 * NACL_PAGES_PER_MAP); ASSERT_EQ(mem_map->vmentry[5]->prot, NACL_ABI_PROT_READ); ASSERT_EQ(mem_map->vmentry[6]->npages, 3 * NACL_PAGES_PER_MAP); ASSERT_EQ(mem_map->vmentry[6]->prot, NACL_ABI_PROT_NONE); ASSERT_EQ(mem_map->vmentry[7]->npages, 1 * NACL_PAGES_PER_MAP); ASSERT_EQ(mem_map->vmentry[7]->prot, NACL_ABI_PROT_READ); ASSERT_EQ(mem_map->vmentry[8]->npages, 3 * NACL_PAGES_PER_MAP); ASSERT_EQ(mem_map->vmentry[8]->prot, NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE); /* * Undo effects of previous mmaps */ errcode = NaClSysMunmap(natp, (void *) (uintptr_t) initial_addr, 9 * NACL_MAP_PAGESIZE); ASSERT_EQ(errcode, 0); ASSERT_EQ(mem_map->nvalid, 5); CheckLowerMappings(mem_map); /* * Mappings return to being: * 0. -- Zero page * 1. rx Static code segment * 2. r Read-only data segment * 3. rw Writable data segment * 4. rw Stack */ /* * Check use of hint. */ addr = NaClSysMmapIntern(nap, (void *) (uintptr_t) initial_addr, NACL_MAP_PAGESIZE, NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE, NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE, -1, 0); ASSERT_LE(addr, 0xffff0000u); printf("addr=0x%"NACL_PRIx32"\n", addr); ASSERT_LE_MSG(initial_addr, addr, "returned address not at or above hint"); errcode = NaClSysMunmap(natp, (void *) (uintptr_t) addr, NACL_MAP_PAGESIZE); ASSERT_EQ(errcode, 0); /* Check handling of zero-sized mappings. */ addr = NaClSysMmapIntern(nap, 0, 0, NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE, NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE, -1, 0); ASSERT_EQ((int) addr, -NACL_ABI_EINVAL); errcode = NaClSysMunmap(natp, (void *) (uintptr_t) initial_addr, 0); ASSERT_EQ(errcode, -NACL_ABI_EINVAL); /* Check changing the memory protection of neighbouring mmaps */ addr = NaClSysMmapIntern(nap, 0, NACL_MAP_PAGESIZE, NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE, NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE, -1, 0); printf("addr=0x%"NACL_PRIx32"\n", addr); initial_addr = addr; addr = NaClSysMmapIntern(nap, (void *) (uintptr_t) (initial_addr + NACL_MAP_PAGESIZE), NACL_MAP_PAGESIZE, NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE, NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE | NACL_ABI_MAP_FIXED, -1, 0); printf("addr=0x%"NACL_PRIx32"\n", addr); ASSERT_EQ(addr, initial_addr + NACL_MAP_PAGESIZE); errcode = NaClSysMprotectInternal(nap, initial_addr, 2 * NACL_MAP_PAGESIZE, NACL_ABI_PROT_READ); ASSERT_EQ(errcode, 0); /* Undo effects of previous mmaps */ errcode = NaClSysMunmap(natp, (void *) (uintptr_t) initial_addr, 2 * NACL_MAP_PAGESIZE); ASSERT_EQ(errcode, 0); /* Check that we cannot make the read-only data segment writable */ ent = mem_map->vmentry[2]; errcode = NaClSysMprotectInternal(nap, (uint32_t) (ent->page_num << NACL_PAGESHIFT), ent->npages * NACL_MAP_PAGESIZE, NACL_ABI_PROT_WRITE); ASSERT_EQ(errcode, -NACL_ABI_EACCES); #if NACL_ARCH(NACL_BUILD_ARCH) == NACL_x86 && NACL_BUILD_SUBARCH == 64 CheckForGuardRegion(nap->mem_start - ((size_t) 40 << 30), (size_t) 40 << 30); CheckForGuardRegion(nap->mem_start + ((size_t) 4 << 30), (size_t) 40 << 30); #endif NaClAddrSpaceFree(nap); printf("PASS\n"); return 0; }
int NaClVmmapChangeProt(struct NaClVmmap *self, uintptr_t page_num, size_t npages, int prot) { size_t i; size_t nvalid; uintptr_t new_region_end_page = page_num + npages; /* * NaClVmmapCheckExistingMapping should be always called before * NaClVmmapChangeProt proceeds to ensure that valid mapping exists * as modifications cannot be rolled back. */ if (!NaClVmmapCheckExistingMapping(self, page_num, npages, prot)) { return 0; } NaClLog(2, ("NaClVmmapChangeProt(0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxPTR ", 0x%"NACL_PRIxS", 0x%x)\n"), (uintptr_t) self, page_num, npages, prot); NaClVmmapMakeSorted(self); /* * This loop & interval boundary tests closely follow those in * NaClVmmapUpdate. When updating those, do not forget to update them * at both places where appropriate. * TODO(phosek): use better data structure which will support intervals */ for (i = 0, nvalid = self->nvalid; i < nvalid && npages > 0; i++) { struct NaClVmmapEntry *ent = self->vmentry[i]; uintptr_t ent_end_page = ent->page_num + ent->npages; nacl_off64_t additional_offset = (new_region_end_page - ent->page_num) << NACL_PAGESHIFT; if (ent->page_num < page_num && new_region_end_page < ent_end_page) { /* Split existing mapping into two parts */ NaClVmmapAdd(self, new_region_end_page, ent_end_page - new_region_end_page, ent->prot, ent->flags, ent->desc, ent->offset + additional_offset, ent->file_size); ent->npages = page_num - ent->page_num; /* Add the new mapping into the middle. */ NaClVmmapAdd(self, page_num, npages, prot, ent->flags, ent->desc, ent->offset + (page_num - ent->page_num), ent->file_size); break; } else if (ent->page_num < page_num && page_num < ent_end_page) { /* New mapping overlaps end of existing mapping. */ ent->npages = page_num - ent->page_num; /* Add the overlapping part of the mapping. */ NaClVmmapAdd(self, page_num, ent_end_page - page_num, prot, ent->flags, ent->desc, ent->offset + (page_num - ent->page_num), ent->file_size); /* The remaining part (if any) will be added in other iteration. */ page_num = ent_end_page; npages = new_region_end_page - ent_end_page; } else if (ent->page_num < new_region_end_page && new_region_end_page < ent_end_page) { /* New mapping overlaps start of existing mapping, split it. */ NaClVmmapAdd(self, page_num, npages, prot, ent->flags, ent->desc, ent->offset, ent->file_size); ent->page_num = new_region_end_page; ent->npages = ent_end_page - new_region_end_page; ent->offset += additional_offset; break; } else if (page_num <= ent->page_num && ent_end_page <= new_region_end_page) { /* New mapping covers all of the existing mapping. */ page_num = ent_end_page; npages = new_region_end_page - ent_end_page; ent->prot = prot; } else { /* No overlap */ assert(new_region_end_page <= ent->page_num || ent_end_page <= page_num); } } return 1; }
/* * Update the virtual memory map. Deletion is handled by a remove * flag, since a NULL desc just means that the memory is backed by the * system paging file. */ static void NaClVmmapUpdate(struct NaClVmmap *self, uintptr_t page_num, size_t npages, int prot, int flags, int remove, struct NaClDesc *desc, nacl_off64_t offset, nacl_off64_t file_size) { /* update existing entries or create new entry as needed */ size_t i; uintptr_t new_region_end_page = page_num + npages; NaClLog(2, ("NaClVmmapUpdate(0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxPTR", " "0x%"NACL_PRIxS", 0x%x, 0x%x, %d, 0x%"NACL_PRIxPTR", " "0x%"NACL_PRIx64")\n"), (uintptr_t) self, page_num, npages, prot, flags, remove, (uintptr_t) desc, offset); NaClVmmapMakeSorted(self); CHECK(npages > 0); for (i = 0; i < self->nvalid; i++) { struct NaClVmmapEntry *ent = self->vmentry[i]; uintptr_t ent_end_page = ent->page_num + ent->npages; nacl_off64_t additional_offset = (new_region_end_page - ent->page_num) << NACL_PAGESHIFT; if (ent->page_num < page_num && new_region_end_page < ent_end_page) { /* * Split existing mapping into two parts, with new mapping in * the middle. */ NaClVmmapAdd(self, new_region_end_page, ent_end_page - new_region_end_page, ent->prot, ent->flags, ent->desc, ent->offset + additional_offset, ent->file_size); ent->npages = page_num - ent->page_num; break; } else if (ent->page_num < page_num && page_num < ent_end_page) { /* New mapping overlaps end of existing mapping. */ ent->npages = page_num - ent->page_num; } else if (ent->page_num < new_region_end_page && new_region_end_page < ent_end_page) { /* New mapping overlaps start of existing mapping. */ ent->page_num = new_region_end_page; ent->npages = ent_end_page - new_region_end_page; ent->offset += additional_offset; break; } else if (page_num <= ent->page_num && ent_end_page <= new_region_end_page) { /* New mapping covers all of the existing mapping. */ ent->removed = 1; } else { /* No overlap */ assert(new_region_end_page <= ent->page_num || ent_end_page <= page_num); } } if (!remove) { NaClVmmapAdd(self, page_num, npages, prot, flags, desc, offset, file_size); } NaClVmmapRemoveMarked(self); }