/* * Basic address space layout sanity check. */ NaClErrorCode NaClCheckAddressSpaceLayoutSanity(struct NaClApp *nap, uintptr_t rodata_end, uintptr_t data_end, uintptr_t max_vaddr) { if (0 != nap->data_start) { if (data_end != max_vaddr) { NaClLog(LOG_INFO, "data segment is not last\n"); return LOAD_DATA_NOT_LAST_SEGMENT; } } else if (0 != nap->rodata_start) { if (NaClRoundAllocPage(rodata_end) != max_vaddr) { /* * This should be unreachable, but we include it just for * completeness. * * Here is why it is unreachable: * * NaClPhdrChecks checks the test segment starting address. The * only allowed loaded segments are text, data, and rodata. * Thus unless the rodata is in the trampoline region, it must * be after the text. And NaClElfImageValidateProgramHeaders * ensures that all segments start after the trampoline region. */ NaClLog(LOG_INFO, "no data segment, but rodata segment is not last\n"); return LOAD_NO_DATA_BUT_RODATA_NOT_LAST_SEGMENT; } } if (0 != nap->rodata_start && 0 != nap->data_start) { if (rodata_end > NaClTruncAllocPage(nap->data_start)) { NaClLog(LOG_INFO, "rodata_overlaps data.\n"); return LOAD_RODATA_OVERLAPS_DATA; } } if (0 != nap->rodata_start) { if (NaClRoundAllocPage(NaClEndOfStaticText(nap)) > nap->rodata_start) { return LOAD_TEXT_OVERLAPS_RODATA; } } else if (0 != nap->data_start) { if (NaClRoundAllocPage(NaClEndOfStaticText(nap)) > NaClTruncAllocPage(nap->data_start)) { return LOAD_TEXT_OVERLAPS_DATA; } } if (0 != nap->rodata_start && NaClRoundAllocPage(nap->rodata_start) != nap->rodata_start) { NaClLog(LOG_INFO, "rodata_start not a multiple of allocation size\n"); return LOAD_BAD_RODATA_ALIGNMENT; } return LOAD_OK; }
void NaClAllocAddrSpace(struct NaClApp *nap) { void *mem; uintptr_t hole_start; size_t hole_size; uintptr_t stack_start; ZLOGS(LOG_DEBUG, "calling NaClAllocateSpace(*,0x%016x)", ((size_t)1 << nap->addr_bits)); NaClAllocateSpace(&mem, (uintptr_t) 1U << nap->addr_bits); nap->mem_start = (uintptr_t) mem; ZLOGS(LOG_DEBUG, "allocated memory at 0x%08x", nap->mem_start); hole_start = NaClRoundAllocPage(nap->data_end); ZLOGFAIL(nap->stack_size >= ((uintptr_t) 1U) << nap->addr_bits, EFAULT, "NaClAllocAddrSpace: stack too large!"); stack_start = (((uintptr_t) 1U) << nap->addr_bits) - nap->stack_size; stack_start = NaClTruncAllocPage(stack_start); ZLOGFAIL(stack_start < hole_start, EFAULT, "Memory 'hole' between end of BSS and start of stack is negative in size"); hole_size = stack_start - hole_start; hole_size = NaClTruncAllocPage(hole_size); /* * mprotect and madvise unused data space to "free" it up, but * retain mapping so no other memory can be mapped into those * addresses. */ if(hole_size != 0) { ZLOGS(LOG_DEBUG, "madvising 0x%08x, 0x%08x, MADV_DONTNEED", nap->mem_start + hole_start, hole_size); ZLOGFAIL(0 != NaCl_madvise((void*)(nap->mem_start + hole_start), hole_size, MADV_DONTNEED), errno, "madvise failed. cannot release unused data segment"); ZLOGS(LOG_DEBUG, "mprotecting 0x%08x, 0x%08x, PROT_NONE", nap->mem_start + hole_start, hole_size); ZLOGFAIL(0 != NaCl_mprotect((void *)(nap->mem_start + hole_start), hole_size, PROT_NONE), errno, "mprotect failed. cannot protect pages"); } else ZLOGS(LOG_DEBUG, "there is no hole between end of data and the beginning of stack"); }
/* * Fill from static_text_end to end of that page with halt * instruction, which is at least NACL_HALT_LEN in size when no * dynamic text is present. Does not touch dynamic text region, which * should be pre-filled with HLTs. * * By adding NACL_HALT_SLED_SIZE, we ensure that the code region ends * with HLTs, just in case the CPU has a bug in which it fails to * check for running off the end of the x86 code segment. */ void NaClFillEndOfTextRegion(struct NaClApp *nap) { size_t page_pad; /* * NOTE: make sure we are not silently overwriting data. It is the * toolchain's responsibility to ensure that a NACL_HALT_SLED_SIZE * gap exists. */ if (0 != nap->data_start && nap->static_text_end + NACL_HALT_SLED_SIZE > NaClTruncAllocPage(nap->data_start)) { NaClLog(LOG_FATAL, "Missing gap between text and data for halt_sled\n"); } if (0 != nap->rodata_start && nap->static_text_end + NACL_HALT_SLED_SIZE > nap->rodata_start) { NaClLog(LOG_FATAL, "Missing gap between text and rodata for halt_sled\n"); } if (NULL == nap->text_shm) { /* * No dynamic text exists. Space for NACL_HALT_SLED_SIZE must * exist. */ page_pad = (NaClRoundAllocPage(nap->static_text_end + NACL_HALT_SLED_SIZE) - nap->static_text_end); CHECK(page_pad >= NACL_HALT_SLED_SIZE); CHECK(page_pad < NACL_MAP_PAGESIZE + NACL_HALT_SLED_SIZE); } else { /* * Dynamic text exists; the halt sled resides in the dynamic text * region, so all we need to do here is to round out the last * static text page with HLT instructions. It doesn't matter if * the size of this region is smaller than NACL_HALT_SLED_SIZE -- * this is just to fully initialize the page, rather than (later) * decoding/validating zero-filled memory as instructions. */ page_pad = NaClRoundAllocPage(nap->static_text_end) - nap->static_text_end; } NaClLog(4, "Filling with halts: %08"NACL_PRIxPTR", %08"NACL_PRIxS" bytes\n", nap->mem_start + nap->static_text_end, page_pad); NaClFillMemoryRegionWithHalt((void *)(nap->mem_start + nap->static_text_end), page_pad); nap->static_text_end += page_pad; }
int NaClSysCommonAddrRangeContainsExecutablePages(struct NaClApp *nap, uintptr_t usraddr, size_t length) { /* * NOTE: currently only trampoline and text region are executable, * and they are at the beginning of the address space, so this code * is fine. We will probably never allow users to mark other pages * as executable; but if so, we will have to revisit how this check * is implemented. * * nap->static_text_end is a multiple of 4K, the memory protection * granularity. Since this routine is used for checking whether * memory map adjustments / allocations -- which has 64K granularity * -- is okay, usraddr must be an allocation granularity value. Our * callers (as of this writing) does this, but we truncate it down * to an allocation boundary to be sure. */ UNREFERENCED_PARAMETER(length); usraddr = NaClTruncAllocPage(usraddr); return usraddr < nap->dynamic_text_end; }
void *AllocatePageInRange(uint8_t *min_addr, uint8_t *max_addr) { MEMORY_BASIC_INFORMATION info; uint8_t *addr = (uint8_t *) NaClTruncAllocPage((uintptr_t) min_addr); while (addr < max_addr) { size_t result; result = VirtualQuery(addr, &info, sizeof(info)); if (result == 0) { break; } CHECK(result == sizeof(info)); if (info.State == MEM_FREE) { return VirtualAlloc(info.BaseAddress, NACL_MAP_PAGESIZE, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); } /* * RegionSize can be a 4k multiple but not a 64k multiple, so we * have to round up, otherwise our subsequent attempt to * VirtualAlloc() a non-64k-aligned address will fail. */ addr += NaClRoundAllocPage(info.RegionSize); } return NULL; }
/* * Apply memory protection to memory regions. */ void NaClMemoryProtection(struct NaClApp *nap) { uintptr_t start_addr; size_t region_size; int err; /* * The first NACL_SYSCALL_START_ADDR bytes are mapped as PROT_NONE. * This enables NULL pointer checking, and provides additional protection * against addr16/data16 prefixed operations being used for attacks. * * NaClMprotectGuards also sets up guard pages outside of the * virtual address space of the NaClApp -- for the ARM and x86-64 * where data sandboxing only sandbox memory writes and not reads, * we need to ensure that certain addressing modes that might * otherwise allow the NaCl app to write outside its address space * (given how we using masking / base registers to implement data * write sandboxing) won't affect the trusted data structures. */ ZLOGS(LOG_DEBUG, "Protecting guard pages for 0x%08x", nap->mem_start); NaClMprotectGuards(nap); start_addr = nap->mem_start + NACL_SYSCALL_START_ADDR; /* * The next pages up to NACL_TRAMPOLINE_END are the trampolines. * Immediately following that is the loaded text section. * These are collectively marked as PROT_READ | PROT_EXEC. */ region_size = NaClRoundPage(nap->static_text_end - NACL_SYSCALL_START_ADDR); ZLOGS(LOG_DEBUG, "Trampoline/text region start 0x%08x, size 0x%08x, end 0x%08x", start_addr, region_size, start_addr + region_size); err = NaCl_mprotect((void *)start_addr, region_size, PROT_READ | PROT_EXEC); ZLOGFAIL(0 != err, err, FAILED_MSG); SET_MEM_MAP_IDX(nap->mem_map[TextIdx], "Text", start_addr, region_size, PROT_READ | PROT_EXEC); /* * Page protections for this region have already been set up by * nacl_text.c. * * todo(d'b): since text.c exists no more, protection should be set here * * We record the mapping for consistency with other fixed * mappings, but the record is not actually used. Overmapping is * prevented by a separate range check, which is done by * NaClSysCommonAddrRangeContainsExecutablePages_mu(). */ /* * zerovm does not support dynamic text. the code below will check its * existence, log information and fail if needed. * todo(d'b): after the dynamic text support will be added or completely * removed the block below should be rewritten or removed */ start_addr = NaClUserToSys(nap, nap->dynamic_text_start); region_size = nap->dynamic_text_end - nap->dynamic_text_start; ZLOGS(LOG_DEBUG, "shm txt region start 0x%08x, size 0x%08x, end 0x%08x", start_addr, region_size, start_addr + region_size); ZLOGFAIL(0 != region_size, EFAULT, "zerovm does not support nexe with dynamic text!"); if(0 != nap->rodata_start) { uintptr_t rodata_end; /* * TODO(mseaborn): Could reduce the number of cases by ensuring * that nap->data_start is always non-zero, even if * nap->rodata_start == nap->data_start == nap->break_addr. */ if(0 != nap->data_start) rodata_end = nap->data_start; else rodata_end = nap->break_addr; start_addr = NaClUserToSys(nap, nap->rodata_start); region_size = NaClRoundPage(NaClRoundAllocPage(rodata_end) - NaClSysToUser(nap, start_addr)); ZLOGS(LOG_DEBUG, "RO data region start 0x%08x, size 0x%08x, end 0x%08x", start_addr, region_size, start_addr + region_size); err = NaCl_mprotect((void *)start_addr, region_size, PROT_READ); ZLOGFAIL(0 != err, err, FAILED_MSG); SET_MEM_MAP_IDX(nap->mem_map[RODataIdx], "ROData", start_addr, region_size, PROT_READ); } /* * data_end is max virtual addr seen, so start_addr <= data_end * must hold. */ if(0 != nap->data_start) { start_addr = NaClUserToSys(nap, nap->data_start); region_size = NaClRoundPage(NaClRoundAllocPage(nap->data_end) - NaClSysToUser(nap, start_addr)); ZLOGS(LOG_DEBUG, "RW data region start 0x%08x, size 0x%08x, end 0x%08x", start_addr, region_size, start_addr + region_size); err = NaCl_mprotect((void *)start_addr, region_size, PROT_READ | PROT_WRITE); ZLOGFAIL(0 != err, err, FAILED_MSG); SET_MEM_MAP_IDX(nap->mem_map[HeapIdx], "Heap", start_addr, region_size, PROT_READ | PROT_WRITE); } /* stack is read/write but not execute */ region_size = nap->stack_size; start_addr = NaClUserToSys(nap, NaClTruncAllocPage(((uintptr_t) 1U << nap->addr_bits) - nap->stack_size)); ZLOGS(LOG_DEBUG, "RW stack region start 0x%08x, size 0x%08lx, end 0x%08x", start_addr, region_size, start_addr + region_size); err = NaCl_mprotect((void *)start_addr, NaClRoundAllocPage(nap->stack_size), PROT_READ | PROT_WRITE); ZLOGFAIL(0 != err, err, FAILED_MSG); SET_MEM_MAP_IDX(nap->mem_map[StackIdx], "Stack", start_addr, NaClRoundAllocPage(nap->stack_size), PROT_READ | PROT_WRITE); }
NaClErrorCode NaClElfImageLoad(struct NaClElfImage *image, struct NaClDesc *ndp, struct NaClApp *nap) { int segnum; uintptr_t vaddr; uintptr_t paddr; uintptr_t end_vaddr; ssize_t read_ret; int safe_for_mmap; for (segnum = 0; segnum < image->ehdr.e_phnum; ++segnum) { const Elf_Phdr *php = &image->phdrs[segnum]; Elf_Off offset = (Elf_Off) NaClTruncAllocPage(php->p_offset); Elf_Off filesz = php->p_offset + php->p_filesz - offset; /* did we decide that we will load this segment earlier? */ if (!image->loadable[segnum]) { continue; } NaClLog(2, "loading segment %d\n", segnum); if (0 == php->p_filesz) { NaClLog(4, "zero-sized segment. ignoring...\n"); continue; } end_vaddr = php->p_vaddr + php->p_filesz; /* integer overflow? */ if (end_vaddr < php->p_vaddr) { NaClLog(LOG_FATAL, "parameter error should have been detected already\n"); } /* * is the end virtual address within the NaCl application's * address space? if it is, it implies that the start virtual * address is also. */ if (end_vaddr >= ((uintptr_t) 1U << nap->addr_bits)) { NaClLog(LOG_FATAL, "parameter error should have been detected already\n"); } vaddr = NaClTruncAllocPage(php->p_vaddr); paddr = NaClUserToSysAddr(nap, vaddr); CHECK(kNaClBadAddress != paddr); /* * Check NaClDescIsSafeForMmap(ndp) to see if it might be okay to * mmap. */ NaClLog(4, "NaClElfImageLoad: checking descriptor mmap safety\n"); safe_for_mmap = NaClDescIsSafeForMmap(ndp); if (safe_for_mmap) { NaClLog(4, "NaClElfImageLoad: safe-for-mmap\n"); } if (!safe_for_mmap && NACL_FI("ELF_LOAD_BYPASS_DESCRIPTOR_SAFETY_CHECK", 0, 1)) { NaClLog(LOG_WARNING, "WARNING: BYPASSING DESCRIPTOR SAFETY CHECK\n"); safe_for_mmap = 1; } if (safe_for_mmap) { NaClErrorCode map_status; NaClLog(4, "NaClElfImageLoad: safe-for-mmap\n"); map_status = NaClElfFileMapSegment(nap, ndp, php->p_flags, offset, filesz, vaddr, paddr); /* * NB: -Werror=switch-enum forces us to not use a switch. */ if (LOAD_OK == map_status) { /* Segment has been handled -- proceed to next segment */ continue; } else if (LOAD_STATUS_UNKNOWN != map_status) { /* * A real error! Return it so that this can be reported to * the embedding code (via start_module status). */ return map_status; } /* Fall through: pread-based fallback requested */ } NaClLog(4, "PReading %"NACL_PRIdElf_Xword" (0x%"NACL_PRIxElf_Xword") bytes to" " address 0x%"NACL_PRIxPTR", position %" NACL_PRIdElf_Off" (0x%"NACL_PRIxElf_Off")\n", filesz, filesz, paddr, offset, offset); /* * Tell valgrind that this memory is accessible and undefined. For more * details see * http://code.google.com/p/nativeclient/wiki/ValgrindMemcheck#Implementation_details */ NACL_MAKE_MEM_UNDEFINED((void *) paddr, filesz); read_ret = (*NACL_VTBL(NaClDesc, ndp)-> PRead)(ndp, (void *) paddr, filesz, (nacl_off64_t) offset); if (NaClSSizeIsNegErrno(&read_ret) || (size_t) read_ret != filesz) { NaClLog(LOG_ERROR, "load failure segment %d", segnum); return LOAD_SEGMENT_BAD_PARAM; } /* region from p_filesz to p_memsz should already be zero filled */ /* Tell Valgrind that we've mapped a segment of nacl_file. */ NaClFileMappingForValgrind(paddr, filesz, offset); } return LOAD_OK; }
NaClErrorCode NaClMakeDynamicTextShared(struct NaClApp *nap) { enum NaClErrorCode retval = LOAD_INTERNAL; uintptr_t dynamic_text_size; struct NaClDescImcShm *shm = NULL; uintptr_t shm_vaddr_base; int mmap_protections; uintptr_t mmap_ret; uintptr_t shm_upper_bound; uintptr_t text_sysaddr; shm_vaddr_base = NaClEndOfStaticText(nap); NaClLog(4, "NaClMakeDynamicTextShared: shm_vaddr_base = %08"NACL_PRIxPTR"\n", shm_vaddr_base); shm_vaddr_base = NaClRoundAllocPage(shm_vaddr_base); NaClLog(4, "NaClMakeDynamicTextShared: shm_vaddr_base = %08"NACL_PRIxPTR"\n", shm_vaddr_base); /* * Default is that there is no usable dynamic code area. */ nap->dynamic_text_start = shm_vaddr_base; nap->dynamic_text_end = shm_vaddr_base; if (!nap->use_shm_for_dynamic_text) { NaClLog(4, "NaClMakeDynamicTextShared:" " rodata / data segments not allocation aligned\n"); NaClLog(4, " not using shm for text\n"); return LOAD_OK; } /* * Allocate a shm region the size of which is nap->rodata_start - * end-of-text. This implies that the "core" text will not be * backed by shm. */ shm_upper_bound = nap->rodata_start; if (0 == shm_upper_bound) { shm_upper_bound = NaClTruncAllocPage(nap->data_start); } if (0 == shm_upper_bound) { shm_upper_bound = shm_vaddr_base; } NaClLog(4, "shm_upper_bound = %08"NACL_PRIxPTR"\n", shm_upper_bound); dynamic_text_size = shm_upper_bound - shm_vaddr_base; NaClLog(4, "NaClMakeDynamicTextShared: dynamic_text_size = %"NACL_PRIxPTR"\n", dynamic_text_size); if (0 == dynamic_text_size) { NaClLog(4, "Empty JITtable region\n"); return LOAD_OK; } shm = (struct NaClDescImcShm *) malloc(sizeof *shm); if (NULL == shm) { NaClLog(4, "NaClMakeDynamicTextShared: shm object allocation failed\n"); retval = LOAD_NO_MEMORY; goto cleanup; } if (!NaClDescImcShmAllocCtor(shm, dynamic_text_size, /* executable= */ 1)) { /* cleanup invariant is if ptr is non-NULL, it's fully ctor'd */ free(shm); shm = NULL; NaClLog(4, "NaClMakeDynamicTextShared: shm alloc ctor for text failed\n"); retval = LOAD_NO_MEMORY_FOR_DYNAMIC_TEXT; goto cleanup; } text_sysaddr = NaClUserToSys(nap, shm_vaddr_base); /* Existing memory is anonymous paging file backed. */ NaClPageFree((void *) text_sysaddr, dynamic_text_size); /* * Unix allows us to map pages with PROT_NONE initially and later * increase the mapping permissions with mprotect(). * * Windows does not allow this, however: the initial permissions are * an upper bound on what the permissions may later be changed to * with VirtualProtect() or VirtualAlloc(). Given this, using * PROT_NONE at this point does not even make sense. On Windows, * the pages start off as uncommitted, which makes them inaccessible * regardless of the page permissions they are mapped with. * * Write permissions are included here for nacl64-gdb to set * breakpoints. */ #if NACL_WINDOWS mmap_protections = NACL_ABI_PROT_READ | NACL_ABI_PROT_EXEC | NACL_ABI_PROT_WRITE; #else mmap_protections = NACL_ABI_PROT_NONE; #endif NaClLog(4, "NaClMakeDynamicTextShared: Map(,,0x%"NACL_PRIxPTR",size = 0x%x," " prot=0x%x, flags=0x%x, offset=0)\n", text_sysaddr, (int) dynamic_text_size, mmap_protections, NACL_ABI_MAP_SHARED | NACL_ABI_MAP_FIXED); mmap_ret = (*((struct NaClDescVtbl const *) shm->base.base.vtbl)-> Map)((struct NaClDesc *) shm, NaClDescEffectorTrustedMem(), (void *) text_sysaddr, dynamic_text_size, mmap_protections, NACL_ABI_MAP_SHARED | NACL_ABI_MAP_FIXED, 0); if (text_sysaddr != mmap_ret) { NaClLog(LOG_FATAL, "Could not map in shm for dynamic text region\n"); } nap->dynamic_page_bitmap = BitmapAllocate((uint32_t) (dynamic_text_size / NACL_MAP_PAGESIZE)); if (NULL == nap->dynamic_page_bitmap) { NaClLog(LOG_FATAL, "NaClMakeDynamicTextShared: BitmapAllocate() failed\n"); } nap->dynamic_text_start = shm_vaddr_base; nap->dynamic_text_end = shm_upper_bound; nap->text_shm = &shm->base; retval = LOAD_OK; cleanup: if (LOAD_OK != retval) { NaClDescSafeUnref((struct NaClDesc *) shm); free(shm); } return retval; }
/* Warning: sizeof(nacl_abi_off_t)!=sizeof(off_t) on OSX */ int32_t NaClSysMmapIntern(struct NaClApp *nap, void *start, size_t length, int prot, int flags, int d, nacl_abi_off_t offset) { int allowed_flags; struct NaClDesc *ndp; uintptr_t usraddr; uintptr_t usrpage; uintptr_t sysaddr; uintptr_t endaddr; int mapping_code; uintptr_t map_result; int holding_app_lock; struct nacl_abi_stat stbuf; size_t alloc_rounded_length; nacl_off64_t file_size; nacl_off64_t file_bytes; nacl_off64_t host_rounded_file_bytes; size_t alloc_rounded_file_bytes; uint32_t val_flags; holding_app_lock = 0; ndp = NULL; allowed_flags = (NACL_ABI_MAP_FIXED | NACL_ABI_MAP_SHARED | NACL_ABI_MAP_PRIVATE | NACL_ABI_MAP_ANONYMOUS); usraddr = (uintptr_t) start; if (0 != (flags & ~allowed_flags)) { NaClLog(2, "invalid mmap flags 0%o, ignoring extraneous bits\n", flags); flags &= allowed_flags; } if (0 != (flags & NACL_ABI_MAP_ANONYMOUS)) { /* * anonymous mmap, so backing store is just swap: no descriptor is * involved, and no memory object will be created to represent the * descriptor. */ ndp = NULL; } else { ndp = NaClAppGetDesc(nap, d); if (NULL == ndp) { map_result = -NACL_ABI_EBADF; goto cleanup; } } mapping_code = 0; /* * Check if application is trying to do dynamic code loading by * mmaping a file. */ if (0 != (NACL_ABI_PROT_EXEC & prot) && 0 != (NACL_ABI_MAP_FIXED & flags) && NULL != ndp && NaClSysCommonAddrRangeInAllowedDynamicCodeSpace(nap, usraddr, length)) { if (!nap->enable_dyncode_syscalls) { NaClLog(LOG_WARNING, "NaClSysMmap: PROT_EXEC when dyncode syscalls are disabled.\n"); map_result = -NACL_ABI_EINVAL; goto cleanup; } if (0 != (NACL_ABI_PROT_WRITE & prot)) { NaClLog(3, "NaClSysMmap: asked for writable and executable code pages?!?\n"); map_result = -NACL_ABI_EINVAL; goto cleanup; } mapping_code = 1; } else if (0 != (prot & NACL_ABI_PROT_EXEC)) { map_result = -NACL_ABI_EINVAL; goto cleanup; } /* * Starting address must be aligned to worst-case allocation * granularity. (Windows.) */ if (!NaClIsAllocPageMultiple(usraddr)) { if ((NACL_ABI_MAP_FIXED & flags) != 0) { NaClLog(2, "NaClSysMmap: address not allocation granularity aligned\n"); map_result = -NACL_ABI_EINVAL; goto cleanup; } else { NaClLog(2, "NaClSysMmap: Force alignment of misaligned hint address\n"); usraddr = NaClTruncAllocPage(usraddr); } } /* * Offset should be non-negative (nacl_abi_off_t is signed). This * condition is caught when the file is stat'd and checked, and * offset is ignored for anonymous mappings. */ if (offset < 0) { NaClLog(1, /* application bug */ "NaClSysMmap: negative file offset: %"NACL_PRId64"\n", (int64_t) offset); map_result = -NACL_ABI_EINVAL; goto cleanup; } /* * And offset must be a multiple of the allocation unit. */ if (!NaClIsAllocPageMultiple((uintptr_t) offset)) { NaClLog(1, ("NaClSysMmap: file offset 0x%08"NACL_PRIxPTR" not multiple" " of allocation size\n"), (uintptr_t) offset); map_result = -NACL_ABI_EINVAL; goto cleanup; } /* * Round up to a page size multiple. * * Note that if length > 0xffff0000 (i.e. -NACL_MAP_PAGESIZE), rounding * up the length will wrap around to 0. We check for length == 0 *after* * rounding up the length to simultaneously check for the length * originally being 0 and check for the wraparound. */ alloc_rounded_length = NaClRoundAllocPage(length); if (alloc_rounded_length != length) { if (mapping_code) { NaClLog(3, "NaClSysMmap: length not a multiple of allocation size\n"); map_result = -NACL_ABI_EINVAL; goto cleanup; } NaClLog(1, "NaClSysMmap: rounded length to 0x%"NACL_PRIxS"\n", alloc_rounded_length); } if (0 == (uint32_t) alloc_rounded_length) { map_result = -NACL_ABI_EINVAL; goto cleanup; } /* * Sanity check in case any later code behaves badly if * |alloc_rounded_length| is >=4GB. This check shouldn't fail * because |length| was <4GB and we've already checked for overflow * when rounding it up. * TODO(mseaborn): Remove the need for this by using uint32_t for * untrusted sizes more consistently. */ CHECK(alloc_rounded_length == (uint32_t) alloc_rounded_length); if (NULL == ndp) { /* * Note: sentinel values are bigger than the NaCl module addr space. */ file_size = kMaxUsableFileSize; file_bytes = kMaxUsableFileSize; host_rounded_file_bytes = kMaxUsableFileSize; alloc_rounded_file_bytes = kMaxUsableFileSize; } else { /* * We stat the file to figure out its actual size. * * This is necessary because the POSIXy interface we provide * allows mapping beyond the extent of a file but Windows' * interface does not. We simulate the POSIX behaviour on * Windows. */ map_result = (*((struct NaClDescVtbl const *) ndp->base.vtbl)-> Fstat)(ndp, &stbuf); if (0 != map_result) { goto cleanup; } /* * Preemptively refuse to map anything that's not a regular file or * shared memory segment. Other types usually report st_size of zero, * which the code below will handle by just doing a dummy PROT_NONE * mapping for the requested size and never attempting the underlying * NaClDesc Map operation. So without this check, the host OS never * gets the chance to refuse the mapping operation on an object that * can't do it. */ if (!NACL_ABI_S_ISREG(stbuf.nacl_abi_st_mode) && !NACL_ABI_S_ISSHM(stbuf.nacl_abi_st_mode)) { map_result = -NACL_ABI_ENODEV; goto cleanup; } /* * BUG(bsy): there's a race between this fstat and the actual mmap * below. It's probably insoluble. Even if we fstat again after * mmap and compared, the mmap could have "seen" the file with a * different size, after which the racing thread restored back to * the same value before the 2nd fstat takes place. */ file_size = stbuf.nacl_abi_st_size; if (file_size < offset) { map_result = -NACL_ABI_EINVAL; goto cleanup; } file_bytes = file_size - offset; if ((nacl_off64_t) kMaxUsableFileSize < file_bytes) { host_rounded_file_bytes = kMaxUsableFileSize; } else { host_rounded_file_bytes = NaClRoundHostAllocPage((size_t) file_bytes); } ASSERT(host_rounded_file_bytes <= (nacl_off64_t) kMaxUsableFileSize); /* * We need to deal with NaClRoundHostAllocPage rounding up to zero * from ~0u - n, where n < 4096 or 65536 (== 1 alloc page). * * Luckily, file_bytes is at most kMaxUsableFileSize which is * smaller than SIZE_T_MAX, so it should never happen, but we * leave the explicit check below as defensive programming. */ alloc_rounded_file_bytes = NaClRoundAllocPage((size_t) host_rounded_file_bytes); if (0 == alloc_rounded_file_bytes && 0 != host_rounded_file_bytes) { map_result = -NACL_ABI_ENOMEM; goto cleanup; } /* * NB: host_rounded_file_bytes and alloc_rounded_file_bytes can be * zero. Such an mmap just makes memory (offset relative to * usraddr) in the range [0, alloc_rounded_length) inaccessible. */ } /* * host_rounded_file_bytes is how many bytes we can map from the * file, given the user-supplied starting offset. It is at least * one page. If it came from a real file, it is a multiple of * host-OS allocation size. it cannot be larger than * kMaxUsableFileSize. */ if (mapping_code && (size_t) file_bytes < alloc_rounded_length) { NaClLog(3, "NaClSysMmap: disallowing partial allocation page extension for" " short files\n"); map_result = -NACL_ABI_EINVAL; goto cleanup; } length = size_min(alloc_rounded_length, (size_t) host_rounded_file_bytes); /* * Lock the addr space. */ NaClXMutexLock(&nap->mu); NaClVmHoleOpeningMu(nap); holding_app_lock = 1; if (0 == (flags & NACL_ABI_MAP_FIXED)) { /* * The user wants us to pick an address range. */ if (0 == usraddr) { /* * Pick a hole in addr space of appropriate size, anywhere. * We pick one that's best for the system. */ usrpage = NaClVmmapFindMapSpace(&nap->mem_map, alloc_rounded_length >> NACL_PAGESHIFT); NaClLog(4, "NaClSysMmap: FindMapSpace: page 0x%05"NACL_PRIxPTR"\n", usrpage); if (0 == usrpage) { map_result = -NACL_ABI_ENOMEM; goto cleanup; } usraddr = usrpage << NACL_PAGESHIFT; NaClLog(4, "NaClSysMmap: new starting addr: 0x%08"NACL_PRIxPTR "\n", usraddr); } else {