static uintptr_t NaClDescIoDescMap(struct NaClDesc *vself, struct NaClDescEffector *effp, void *start_addr, size_t len, int prot, int flags, nacl_off64_t offset) { struct NaClDescIoDesc *self = (struct NaClDescIoDesc *) vself; uintptr_t status; uintptr_t addr; /* * prot must only contain NACL_ABI_PROT_* flags. */ if (0 != (~(NACL_ABI_PROT_MASK) & prot)) { NaClLog(LOG_INFO, ("NaClDescIoDescMap: prot has other bits" " than NACL_ABI_PROT_{READ|WRITE|EXEC}\n")); return -NACL_ABI_EINVAL; } if (0 == (NACL_ABI_MAP_FIXED & flags)) { if (!NaClFindAddressSpace(&addr, len)) { NaClLog(1, "NaClDescIoDescMap: no address space?\n"); return -NACL_ABI_ENOMEM; } NaClLog(4, "NaClDescIoDescMap: NaClFindAddressSpace" " returned 0x%"NACL_PRIxPTR"\n", addr); start_addr = (void *) addr; } flags |= NACL_ABI_MAP_FIXED; status = NaClHostDescMap((NULL == self) ? NULL : self->hd, effp, (void *) start_addr, len, prot, flags, offset); if (NACL_MAP_FAILED == (void *) status) { return -NACL_ABI_E_MOVE_ADDRESS_SPACE; } return (uintptr_t) start_addr; }
static uintptr_t NaClDescImcShmMap(struct NaClDesc *vself, struct NaClDescEffector *effp, void *start_addr, size_t len, int prot, int flags, nacl_off64_t offset) { struct NaClDescImcShm *self = (struct NaClDescImcShm *) vself; int rv; int nacl_imc_prot; int nacl_imc_flags; uintptr_t addr; uintptr_t end_addr; void *result; nacl_off64_t tmp_off64; off_t tmp_off; NaClLog(4, "NaClDescImcShmMmap(,,0x%08"NACL_PRIxPTR",0x%"NACL_PRIxS"," "0x%x,0x%x,0x%08"NACL_PRIxNACL_OFF64")\n", (uintptr_t) start_addr, len, prot, flags, offset); /* * shm must have NACL_ABI_MAP_SHARED in flags, and all calls through * this API must supply a start_addr, so NACL_ABI_MAP_FIXED is * assumed. */ if (NACL_ABI_MAP_SHARED != (flags & NACL_ABI_MAP_SHARING_MASK)) { NaClLog(LOG_INFO, ("NaClDescImcShmMap: Mapping not NACL_ABI_MAP_SHARED," " flags 0x%x\n"), flags); return -NACL_ABI_EINVAL; } if (0 != (NACL_ABI_MAP_FIXED & flags) && NULL == start_addr) { NaClLog(LOG_INFO, ("NaClDescImcShmMap: Mapping NACL_ABI_MAP_FIXED" " but start_addr is NULL\n")); } /* post-condition: if NULL == start_addr, then NACL_ABI_MAP_FIXED not set */ /* * prot must not contain bits other than PROT_{READ|WRITE|EXEC}. */ if (0 != (~(NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE | NACL_ABI_PROT_EXEC) & prot)) { NaClLog(LOG_INFO, "NaClDescImcShmMap: prot has other bits than" " PROT_{READ|WRITE|EXEC}\n"); return -NACL_ABI_EINVAL; } /* * Map from NACL_ABI_ prot and flags bits to IMC library flags, * which will later map back into posix-style prot/flags on *x * boxen, and to MapViewOfFileEx arguments on Windows. */ nacl_imc_prot = 0; if (NACL_ABI_PROT_READ & prot) { nacl_imc_prot |= NACL_PROT_READ; } if (NACL_ABI_PROT_WRITE & prot) { nacl_imc_prot |= NACL_PROT_WRITE; } if (NACL_ABI_PROT_EXEC & prot) { nacl_imc_prot |= NACL_PROT_EXEC; } nacl_imc_flags = NACL_MAP_SHARED; if (0 == (NACL_ABI_MAP_FIXED & flags)) { /* start_addr is a hint, and we just ignore the hint... */ if (!NaClFindAddressSpace(&addr, len)) { NaClLog(1, "NaClDescImcShmMap: no address space?!?\n"); return -NACL_ABI_ENOMEM; } start_addr = (void *) addr; } nacl_imc_flags |= NACL_MAP_FIXED; tmp_off64 = offset + len; /* just NaClRoundAllocPage, but in 64 bits */ tmp_off64 = ((tmp_off64 + NACL_MAP_PAGESIZE - 1) & ~(uint64_t) (NACL_MAP_PAGESIZE - 1)); if (tmp_off64 > INT32_MAX) { NaClLog(LOG_INFO, "NaClDescImcShmMap: total offset exceeds 32-bits\n"); return -NACL_ABI_EOVERFLOW; } /* * For *x, we just map with MAP_FIXED and the kernel takes care of * atomically unmapping any existing memory. For Windows, we must * unmap existing memory first, which creates a race condition, * where some other innocent thread puts some other memory into the * hole, and that memory becomes vulnerable to attack by the * untrusted NaCl application. * * For now, abort the process. We will need to figure out how to * re-architect this code to do the address space move, since it is * deep surgery and we'll need to ensure that all threads have * stopped and any addresses derived from the old address space * would not be on any thread's call stack, i.e., stop the thread in * user space or before entering real service runtime code. This * means that no application thread may be indefinitely blocked * performing a service call in the service runtime, since otherwise * there is no way for us to stop all threads. * * TODO(bsy): We will probably return an internal error code * -NACL_ABI_E_MOVE_ADDRESS_SPACE to ask the caller to do the address space * dance. */ for (addr = (uintptr_t) start_addr, end_addr = addr + len, tmp_off = (off_t) offset; addr < end_addr; addr += NACL_MAP_PAGESIZE, tmp_off += NACL_MAP_PAGESIZE) { /* * Minimize the time between the unmap and the map for the same * page: we interleave the unmap and map for the pages, rather * than do all the unmap first and then do all of the map * operations. */ if (0 != (rv = (*effp->vtbl->UnmapMemory)(effp, addr, NACL_MAP_PAGESIZE))) { NaClLog(LOG_FATAL, ("NaClDescImcShmMap: error %d --" " could not unmap 0x%08"NACL_PRIxPTR", length 0x%x\n"), rv, addr, NACL_MAP_PAGESIZE); } result = NaClMap((void *) addr, NACL_MAP_PAGESIZE, nacl_imc_prot, nacl_imc_flags, self->h, tmp_off); if (NACL_MAP_FAILED == result) { return -NACL_ABI_E_MOVE_ADDRESS_SPACE; } if (0 != (NACL_ABI_MAP_FIXED & flags) && result != (void *) addr) { NaClLog(LOG_FATAL, ("NaClDescImcShmMap: NACL_MAP_FIXED but" " got 0x%08"NACL_PRIxPTR" instead of 0x%08"NACL_PRIxPTR"\n"), (uintptr_t) result, addr); } } return (uintptr_t) start_addr; }
static uintptr_t NaClDescImcShmMap(struct NaClDesc *vself, struct NaClDescEffector *effp, void *start_addr, size_t len, int prot, int flags, nacl_off64_t offset) { struct NaClDescImcShm *self = (struct NaClDescImcShm *) vself; int nacl_imc_prot; int nacl_imc_flags; uintptr_t addr; void *result; nacl_off64_t tmp_off64; /* * shm must have NACL_ABI_MAP_SHARED in flags, and all calls through * this API must supply a start_addr, so NACL_ABI_MAP_FIXED is * assumed. */ if (NACL_ABI_MAP_SHARED != (flags & NACL_ABI_MAP_SHARING_MASK)) { NaClLog(LOG_INFO, ("NaClDescImcShmMap: Mapping not NACL_ABI_MAP_SHARED," " flags 0x%x\n"), flags); return -NACL_ABI_EINVAL; } if (0 != (NACL_ABI_MAP_FIXED & flags) && NULL == start_addr) { NaClLog(LOG_INFO, ("NaClDescImcShmMap: Mapping NACL_ABI_MAP_FIXED" " but start_addr is NULL\n")); } /* post-condition: if NULL == start_addr, then NACL_ABI_MAP_FIXED not set */ /* * prot must not contain bits other than PROT_{READ|WRITE|EXEC}. */ if (0 != (~(NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE | NACL_ABI_PROT_EXEC) & prot)) { NaClLog(LOG_INFO, "NaClDescImcShmMap: prot has other bits than" " PROT_{READ|WRITE|EXEC}\n"); return -NACL_ABI_EINVAL; } /* * Map from NACL_ABI_ prot and flags bits to IMC library flags, * which will later map back into posix-style prot/flags on *x * boxen, and to MapViewOfFileEx arguments on Windows. */ nacl_imc_prot = 0; if (NACL_ABI_PROT_READ & prot) { nacl_imc_prot |= NACL_PROT_READ; } if (NACL_ABI_PROT_WRITE & prot) { nacl_imc_prot |= NACL_PROT_WRITE; } if (NACL_ABI_PROT_EXEC & prot) { nacl_imc_prot |= NACL_PROT_EXEC; } nacl_imc_flags = NACL_MAP_SHARED; if (0 == (NACL_ABI_MAP_FIXED & flags)) { /* start_addr is a hint, and we just ignore the hint... */ if (!NaClFindAddressSpace(&addr, len)) { NaClLog(1, "NaClDescImcShmMap: no address space?!?\n"); return -NACL_ABI_ENOMEM; } start_addr = (void *) addr; } nacl_imc_flags |= NACL_MAP_FIXED; tmp_off64 = offset + len; /* just NaClRoundAllocPage, but in 64 bits */ tmp_off64 = ((tmp_off64 + NACL_MAP_PAGESIZE - 1) & ~(uint64_t) (NACL_MAP_PAGESIZE - 1)); if (tmp_off64 > INT32_MAX) { NaClLog(LOG_INFO, "NaClDescImcShmMap: total offset exceeds 32-bits\n"); return -NACL_ABI_EOVERFLOW; } result = NaClMap(effp, (void *) start_addr, len, nacl_imc_prot, nacl_imc_flags, self->h, (off_t) offset); if (NACL_MAP_FAILED == result) { return -NACL_ABI_E_MOVE_ADDRESS_SPACE; } if (0 != (NACL_ABI_MAP_FIXED & flags) && result != (void *) start_addr) { NaClLog(LOG_FATAL, ("NaClDescImcShmMap: NACL_MAP_FIXED but got %p instead of %p\n"), result, start_addr); } return (uintptr_t) start_addr; }
/* * NaClAllocatePow2AlignedMemory is for allocating a large amount of * memory of mem_sz bytes that must be address aligned, so that * log_alignment low-order address bits must be zero. * * Returns the aligned region on success, or NULL on failure. */ static void *NaClAllocatePow2AlignedMemory(size_t mem_sz, size_t log_alignment, enum NaClAslrMode aslr_mode) { uintptr_t pow2align; size_t request_size; uintptr_t unrounded_addr; uintptr_t rounded_addr; size_t extra; int found_memory; pow2align = ((uintptr_t) 1) << log_alignment; request_size = mem_sz + pow2align; NaClLog(4, "%"MSGWIDTH"s %016"NACL_PRIxS"\n", " Ask:", request_size); if (NACL_ENABLE_ASLR == aslr_mode) { found_memory = NaClFindAddressSpaceRandomized( &unrounded_addr, request_size, MAX_ADDRESS_RANDOMIZATION_ATTEMPTS); } else { found_memory = NaClFindAddressSpace(&unrounded_addr, request_size); } if (!found_memory) { NaClLog(LOG_FATAL, "NaClAllocatePow2AlignedMemory: Failed to reserve %"NACL_PRIxS " bytes of address space\n", request_size); } NaClLog(4, "%"MSGWIDTH"s %016"NACL_PRIxPTR"\n", "orig memory at", unrounded_addr); rounded_addr = (unrounded_addr + (pow2align - 1)) & ~(pow2align - 1); extra = rounded_addr - unrounded_addr; if (0 != extra) { NaClLog(4, "%"MSGWIDTH"s %016"NACL_PRIxPTR", %016"NACL_PRIxS"\n", "Freeing front:", unrounded_addr, extra); if (-1 == munmap((void *) unrounded_addr, extra)) { perror("munmap (front)"); NaClLog(LOG_FATAL, "NaClAllocatePow2AlignedMemory: munmap front failed\n"); } } extra = pow2align - extra; if (0 != extra) { NaClLog(4, "%"MSGWIDTH"s %016"NACL_PRIxPTR", %016"NACL_PRIxS"\n", "Freeing tail:", rounded_addr + mem_sz, extra); if (-1 == munmap((void *) (rounded_addr + mem_sz), extra)) { perror("munmap (end)"); NaClLog(LOG_FATAL, "NaClAllocatePow2AlignedMemory: munmap tail failed\n"); } } NaClLog(4, "%"MSGWIDTH"s %016"NACL_PRIxPTR"\n", "Aligned memory:", rounded_addr); /* * we could also mmap again at rounded_addr w/o MAP_NORESERVE etc to * ensure that we have the memory, but that's better done in another * utility function. the semantics here is no paging space * reserved, as in Windows MEM_RESERVE without MEM_COMMIT. */ return (void *) rounded_addr; }