int32_t NaClSysMunmap(struct NaClAppThread *natp, void *start, size_t length) { int32_t retval = -NACL_ABI_EINVAL; uintptr_t sysaddr; uintptr_t addr; uintptr_t endaddr; int holding_app_lock = 0; size_t alloc_rounded_length; NaClLog(3, "NaClSysMunmap(0x%08x, 0x%08x, 0x%x)\n", natp, start, length); NaClSysCommonThreadSyscallEnter(natp); if (!NaClIsAllocPageMultiple((uintptr_t) start)) { NaClLog(4, "start addr not allocation multiple\n"); retval = -NACL_ABI_EINVAL; goto cleanup; } if (0 == length) { /* * linux mmap of zero length yields a failure, but windows code * would just iterate through and do nothing, so does not yield a * failure. */ retval = -NACL_ABI_EINVAL; goto cleanup; } alloc_rounded_length = NaClRoundAllocPage(length); if (alloc_rounded_length != length) { length = alloc_rounded_length; NaClLog(LOG_WARNING, "munmap: rounded length to 0x%x\n", length); } sysaddr = NaClUserToSysAddrRange(natp->nap, (uintptr_t) start, length); if (kNaClBadAddress == sysaddr) { retval = -NACL_ABI_EFAULT; goto cleanup; } NaClXMutexLock(&natp->nap->mu); holding_app_lock = 1; /* * User should be unable to unmap any executable pages. We check here. */ if (NaClSysCommonAddrRangeContainsExecutablePages_mu(natp->nap, (uintptr_t) start, length)) { NaClLog(2, "NaClSysMunmap: region contains executable pages\n"); retval = -NACL_ABI_EINVAL; goto cleanup; } endaddr = sysaddr + length; for (addr = sysaddr; addr < endaddr; addr += NACL_MAP_PAGESIZE) { struct NaClVmmapEntry const *entry; entry = NaClVmmapFindPage(&natp->nap->mem_map, NaClSysToUser(natp->nap, addr) >> NACL_PAGESHIFT); if (NULL == entry) { NaClLog(LOG_FATAL, "NaClSysMunmap: could not find VM map entry for addr 0x%08x\n", addr); } NaClLog(3, ("NaClSysMunmap: addr 0x%08x, nmop 0x%08x\n"), addr, entry->nmop); if (NULL == entry->nmop) { /* anonymous memory; we just decommit it and thus make it inaccessible */ if (!VirtualFree((void *) addr, NACL_MAP_PAGESIZE, MEM_DECOMMIT)) { int error = GetLastError(); NaClLog(LOG_FATAL, ("NaClSysMunmap: Could not VirtualFree MEM_DECOMMIT" " addr 0x%08x, error %d (0x%x)\n"), addr, error, error); } } else { /* * This should invoke a "safe" version of unmap that fills the * memory hole as quickly as possible, and may return * -NACL_ABI_E_MOVE_ADDRESS_SPACE. The "safe" version just * minimizes the size of the timing hole for any racers, plus * the size of the memory window is only 64KB, rather than * whatever size the user is unmapping. */ retval = (*entry->nmop->ndp->vtbl->Unmap)(entry->nmop->ndp, natp->effp, (void*) addr, NACL_MAP_PAGESIZE); if (0 != retval) { NaClLog(LOG_FATAL, ("NaClSysMunmap: Could not unmap via ndp->Unmap 0x%08x" " and cannot handle address space move\n"), addr); } } NaClVmmapUpdate(&natp->nap->mem_map, (NaClSysToUser(natp->nap, (uintptr_t) addr) >> NACL_PAGESHIFT), NACL_PAGES_PER_MAP, 0, /* prot */ (struct NaClMemObj *) NULL, 1); /* delete */ } retval = 0; cleanup: if (holding_app_lock) { NaClXMutexUnlock(&natp->nap->mu); } NaClSysCommonThreadSyscallLeave(natp); return retval; }
int32_t NaClSysMunmap(struct NaClAppThread *natp, void *start, size_t length) { int32_t retval = -NACL_ABI_EINVAL; uintptr_t sysaddr; int holding_app_lock = 0; size_t alloc_rounded_length; NaClLog(3, "Entered NaClSysMunmap(0x%08"NACL_PRIxPTR", " "0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxS")\n", (uintptr_t) natp, (uintptr_t) start, length); NaClSysCommonThreadSyscallEnter(natp); if (!NaClIsAllocPageMultiple((uintptr_t) start)) { NaClLog(4, "start addr not allocation multiple\n"); retval = -NACL_ABI_EINVAL; goto cleanup; } if (0 == length) { /* * linux mmap of zero length yields a failure, but osx does not, leading * to a NaClVmmapUpdate of zero pages, which should not occur. */ retval = -NACL_ABI_EINVAL; goto cleanup; } alloc_rounded_length = NaClRoundAllocPage(length); if (alloc_rounded_length != length) { length = alloc_rounded_length; NaClLog(LOG_WARNING, "munmap: rounded length to 0x%"NACL_PRIxS"\n", length); } sysaddr = NaClUserToSysAddrRange(natp->nap, (uintptr_t) start, length); if (kNaClBadAddress == sysaddr) { NaClLog(4, "region not user addresses\n"); retval = -NACL_ABI_EFAULT; goto cleanup; } NaClXMutexLock(&natp->nap->mu); while (0 != natp->nap->threads_launching) { NaClXCondVarWait(&natp->nap->cv, &natp->nap->mu); } natp->nap->vm_hole_may_exist = 1; holding_app_lock = 1; /* * NB: windows (or generic) version would use Munmap virtual * function from the backing NaClDesc object obtained by iterating * through the address map for the region, and those Munmap virtual * functions may return -NACL_ABI_E_MOVE_ADDRESS_SPACE. * * We should hold the application lock while doing this iteration * and unmapping, so that the address space is consistent for other * threads. */ /* * User should be unable to unmap any executable pages. We check here. */ if (NaClSysCommonAddrRangeContainsExecutablePages_mu(natp->nap, (uintptr_t) start, length)) { NaClLog(2, "NaClSysMunmap: region contains executable pages\n"); retval = -NACL_ABI_EINVAL; goto cleanup; } /* * Overwrite current mapping with inaccessible, anonymous * zero-filled pages, which should be copy-on-write and thus * relatively cheap. Do not open up an address space hole. */ NaClLog(4, ("NaClSysMunmap: mmap(0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxS"," " 0x%x, 0x%x, -1, 0)\n"), sysaddr, length, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED); if (MAP_FAILED == mmap((void *) sysaddr, length, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, (off_t) 0)) { NaClLog(4, "mmap to put in anonymous memory failed, errno = %d\n", errno); retval = -NaClXlateErrno(errno); goto cleanup; } NaClVmmapUpdate(&natp->nap->mem_map, (NaClSysToUser(natp->nap, (uintptr_t) sysaddr) >> NACL_PAGESHIFT), length >> NACL_PAGESHIFT, 0, /* prot */ (struct NaClMemObj *) NULL, 1); /* Delete mapping */ retval = 0; cleanup: if (holding_app_lock) { natp->nap->vm_hole_may_exist = 0; NaClXCondVarBroadcast(&natp->nap->cv); NaClXMutexUnlock(&natp->nap->mu); } NaClSysCommonThreadSyscallLeave(natp); return retval; }
/* Warning: sizeof(nacl_abi_off_t)!=sizeof(off_t) on OSX */ int32_t NaClSysMmapIntern(struct NaClApp *nap, void *start, size_t length, int prot, int flags, int d, nacl_abi_off_t offset) { int allowed_flags; struct NaClDesc *ndp; uintptr_t usraddr; uintptr_t usrpage; uintptr_t sysaddr; uintptr_t endaddr; int mapping_code; uintptr_t map_result; int holding_app_lock; struct nacl_abi_stat stbuf; size_t alloc_rounded_length; nacl_off64_t file_size; nacl_off64_t file_bytes; nacl_off64_t host_rounded_file_bytes; size_t alloc_rounded_file_bytes; uint32_t val_flags; holding_app_lock = 0; ndp = NULL; allowed_flags = (NACL_ABI_MAP_FIXED | NACL_ABI_MAP_SHARED | NACL_ABI_MAP_PRIVATE | NACL_ABI_MAP_ANONYMOUS); usraddr = (uintptr_t) start; if (0 != (flags & ~allowed_flags)) { NaClLog(2, "invalid mmap flags 0%o, ignoring extraneous bits\n", flags); flags &= allowed_flags; } if (0 != (flags & NACL_ABI_MAP_ANONYMOUS)) { /* * anonymous mmap, so backing store is just swap: no descriptor is * involved, and no memory object will be created to represent the * descriptor. */ ndp = NULL; } else { ndp = NaClAppGetDesc(nap, d); if (NULL == ndp) { map_result = -NACL_ABI_EBADF; goto cleanup; } } mapping_code = 0; /* * Check if application is trying to do dynamic code loading by * mmaping a file. */ if (0 != (NACL_ABI_PROT_EXEC & prot) && 0 != (NACL_ABI_MAP_FIXED & flags) && NULL != ndp && NaClSysCommonAddrRangeInAllowedDynamicCodeSpace(nap, usraddr, length)) { if (!nap->enable_dyncode_syscalls) { NaClLog(LOG_WARNING, "NaClSysMmap: PROT_EXEC when dyncode syscalls are disabled.\n"); map_result = -NACL_ABI_EINVAL; goto cleanup; } if (0 != (NACL_ABI_PROT_WRITE & prot)) { NaClLog(3, "NaClSysMmap: asked for writable and executable code pages?!?\n"); map_result = -NACL_ABI_EINVAL; goto cleanup; } mapping_code = 1; } else if (0 != (prot & NACL_ABI_PROT_EXEC)) { map_result = -NACL_ABI_EINVAL; goto cleanup; } /* * Starting address must be aligned to worst-case allocation * granularity. (Windows.) */ if (!NaClIsAllocPageMultiple(usraddr)) { if ((NACL_ABI_MAP_FIXED & flags) != 0) { NaClLog(2, "NaClSysMmap: address not allocation granularity aligned\n"); map_result = -NACL_ABI_EINVAL; goto cleanup; } else { NaClLog(2, "NaClSysMmap: Force alignment of misaligned hint address\n"); usraddr = NaClTruncAllocPage(usraddr); } } /* * Offset should be non-negative (nacl_abi_off_t is signed). This * condition is caught when the file is stat'd and checked, and * offset is ignored for anonymous mappings. */ if (offset < 0) { NaClLog(1, /* application bug */ "NaClSysMmap: negative file offset: %"NACL_PRId64"\n", (int64_t) offset); map_result = -NACL_ABI_EINVAL; goto cleanup; } /* * And offset must be a multiple of the allocation unit. */ if (!NaClIsAllocPageMultiple((uintptr_t) offset)) { NaClLog(1, ("NaClSysMmap: file offset 0x%08"NACL_PRIxPTR" not multiple" " of allocation size\n"), (uintptr_t) offset); map_result = -NACL_ABI_EINVAL; goto cleanup; } /* * Round up to a page size multiple. * * Note that if length > 0xffff0000 (i.e. -NACL_MAP_PAGESIZE), rounding * up the length will wrap around to 0. We check for length == 0 *after* * rounding up the length to simultaneously check for the length * originally being 0 and check for the wraparound. */ alloc_rounded_length = NaClRoundAllocPage(length); if (alloc_rounded_length != length) { if (mapping_code) { NaClLog(3, "NaClSysMmap: length not a multiple of allocation size\n"); map_result = -NACL_ABI_EINVAL; goto cleanup; } NaClLog(1, "NaClSysMmap: rounded length to 0x%"NACL_PRIxS"\n", alloc_rounded_length); } if (0 == (uint32_t) alloc_rounded_length) { map_result = -NACL_ABI_EINVAL; goto cleanup; } /* * Sanity check in case any later code behaves badly if * |alloc_rounded_length| is >=4GB. This check shouldn't fail * because |length| was <4GB and we've already checked for overflow * when rounding it up. * TODO(mseaborn): Remove the need for this by using uint32_t for * untrusted sizes more consistently. */ CHECK(alloc_rounded_length == (uint32_t) alloc_rounded_length); if (NULL == ndp) { /* * Note: sentinel values are bigger than the NaCl module addr space. */ file_size = kMaxUsableFileSize; file_bytes = kMaxUsableFileSize; host_rounded_file_bytes = kMaxUsableFileSize; alloc_rounded_file_bytes = kMaxUsableFileSize; } else { /* * We stat the file to figure out its actual size. * * This is necessary because the POSIXy interface we provide * allows mapping beyond the extent of a file but Windows' * interface does not. We simulate the POSIX behaviour on * Windows. */ map_result = (*((struct NaClDescVtbl const *) ndp->base.vtbl)-> Fstat)(ndp, &stbuf); if (0 != map_result) { goto cleanup; } /* * Preemptively refuse to map anything that's not a regular file or * shared memory segment. Other types usually report st_size of zero, * which the code below will handle by just doing a dummy PROT_NONE * mapping for the requested size and never attempting the underlying * NaClDesc Map operation. So without this check, the host OS never * gets the chance to refuse the mapping operation on an object that * can't do it. */ if (!NACL_ABI_S_ISREG(stbuf.nacl_abi_st_mode) && !NACL_ABI_S_ISSHM(stbuf.nacl_abi_st_mode)) { map_result = -NACL_ABI_ENODEV; goto cleanup; } /* * BUG(bsy): there's a race between this fstat and the actual mmap * below. It's probably insoluble. Even if we fstat again after * mmap and compared, the mmap could have "seen" the file with a * different size, after which the racing thread restored back to * the same value before the 2nd fstat takes place. */ file_size = stbuf.nacl_abi_st_size; if (file_size < offset) { map_result = -NACL_ABI_EINVAL; goto cleanup; } file_bytes = file_size - offset; if ((nacl_off64_t) kMaxUsableFileSize < file_bytes) { host_rounded_file_bytes = kMaxUsableFileSize; } else { host_rounded_file_bytes = NaClRoundHostAllocPage((size_t) file_bytes); } ASSERT(host_rounded_file_bytes <= (nacl_off64_t) kMaxUsableFileSize); /* * We need to deal with NaClRoundHostAllocPage rounding up to zero * from ~0u - n, where n < 4096 or 65536 (== 1 alloc page). * * Luckily, file_bytes is at most kMaxUsableFileSize which is * smaller than SIZE_T_MAX, so it should never happen, but we * leave the explicit check below as defensive programming. */ alloc_rounded_file_bytes = NaClRoundAllocPage((size_t) host_rounded_file_bytes); if (0 == alloc_rounded_file_bytes && 0 != host_rounded_file_bytes) { map_result = -NACL_ABI_ENOMEM; goto cleanup; } /* * NB: host_rounded_file_bytes and alloc_rounded_file_bytes can be * zero. Such an mmap just makes memory (offset relative to * usraddr) in the range [0, alloc_rounded_length) inaccessible. */ } /* * host_rounded_file_bytes is how many bytes we can map from the * file, given the user-supplied starting offset. It is at least * one page. If it came from a real file, it is a multiple of * host-OS allocation size. it cannot be larger than * kMaxUsableFileSize. */ if (mapping_code && (size_t) file_bytes < alloc_rounded_length) { NaClLog(3, "NaClSysMmap: disallowing partial allocation page extension for" " short files\n"); map_result = -NACL_ABI_EINVAL; goto cleanup; } length = size_min(alloc_rounded_length, (size_t) host_rounded_file_bytes); /* * Lock the addr space. */ NaClXMutexLock(&nap->mu); NaClVmHoleOpeningMu(nap); holding_app_lock = 1; if (0 == (flags & NACL_ABI_MAP_FIXED)) { /* * The user wants us to pick an address range. */ if (0 == usraddr) { /* * Pick a hole in addr space of appropriate size, anywhere. * We pick one that's best for the system. */ usrpage = NaClVmmapFindMapSpace(&nap->mem_map, alloc_rounded_length >> NACL_PAGESHIFT); NaClLog(4, "NaClSysMmap: FindMapSpace: page 0x%05"NACL_PRIxPTR"\n", usrpage); if (0 == usrpage) { map_result = -NACL_ABI_ENOMEM; goto cleanup; } usraddr = usrpage << NACL_PAGESHIFT; NaClLog(4, "NaClSysMmap: new starting addr: 0x%08"NACL_PRIxPTR "\n", usraddr); } else {