コード例 #1
0
ファイル: manifest_setup.c プロジェクト: clatour/zerovm
/* preallocate memory area of given size. abort if fail */
static void PreallocateUserMemory(struct NaClApp *nap)
{
  uintptr_t i;
  int64_t heap;
  void *p;

  assert(nap != NULL);
  assert(nap->system_manifest != NULL);

  /* quit function if max_mem is not specified or invalid */
  ZLOGFAIL(nap->heap_end == 0 || nap->heap_end > FOURGIG,
      ENOMEM, "invalid memory size");

  /* calculate user heap size (must be allocated next to the data_end) */
  p = (void*)NaClRoundAllocPage(nap->data_end);
  heap = nap->heap_end - nap->stack_size;
  heap = NaClRoundAllocPage(heap) - NaClRoundAllocPage(nap->data_end);
  ZLOGFAIL(heap <= LEAST_USER_HEAP_SIZE, ENOMEM, "user heap size is too small");

  /* since 4gb of user space is already allocated just set protection to the heap */
  p = (void*)NaClUserToSys(nap, (uintptr_t)p);
  i = NaCl_mprotect(p, heap, PROT_READ | PROT_WRITE);
  ZLOGFAIL(0 != i, -i, "cannot set protection on user heap");
  nap->heap_end = NaClSysToUser(nap, (uintptr_t)p + heap);

  nap->mem_map[HeapIdx].size += heap;
  nap->mem_map[HeapIdx].end += heap;
}
コード例 #2
0
ファイル: sel_ldr_standard.c プロジェクト: bortoq/zerovm
/*
 * Basic address space layout sanity check.
 */
NaClErrorCode NaClCheckAddressSpaceLayoutSanity(struct NaClApp *nap,
                                                uintptr_t rodata_end,
                                                uintptr_t data_end,
                                                uintptr_t max_vaddr) {
  if (0 != nap->data_start) {
    if (data_end != max_vaddr) {
      NaClLog(LOG_INFO, "data segment is not last\n");
      return LOAD_DATA_NOT_LAST_SEGMENT;
    }
  } else if (0 != nap->rodata_start) {
    if (NaClRoundAllocPage(rodata_end) != max_vaddr) {
      /*
       * This should be unreachable, but we include it just for
       * completeness.
       *
       * Here is why it is unreachable:
       *
       * NaClPhdrChecks checks the test segment starting address.  The
       * only allowed loaded segments are text, data, and rodata.
       * Thus unless the rodata is in the trampoline region, it must
       * be after the text.  And NaClElfImageValidateProgramHeaders
       * ensures that all segments start after the trampoline region.
       */
      NaClLog(LOG_INFO, "no data segment, but rodata segment is not last\n");
      return LOAD_NO_DATA_BUT_RODATA_NOT_LAST_SEGMENT;
    }
  }
  if (0 != nap->rodata_start && 0 != nap->data_start) {
    if (rodata_end > nap->data_start) {
      NaClLog(LOG_INFO, "rodata_overlaps data.\n");
      return LOAD_RODATA_OVERLAPS_DATA;
    }
  }
  if (0 != nap->rodata_start) {
    if (NaClRoundAllocPage(NaClEndOfStaticText(nap)) > nap->rodata_start) {
      return LOAD_TEXT_OVERLAPS_RODATA;
    }
  } else if (0 != nap->data_start) {
    if (NaClRoundAllocPage(NaClEndOfStaticText(nap)) > nap->data_start) {
      return LOAD_TEXT_OVERLAPS_DATA;
    }
  }
  if (0 != nap->rodata_start &&
      NaClRoundAllocPage(nap->rodata_start) != nap->rodata_start) {
    NaClLog(LOG_INFO, "rodata_start not a multiple of allocation size\n");
    return LOAD_BAD_RODATA_ALIGNMENT;
  }
  if (0 != nap->data_start &&
      NaClRoundAllocPage(nap->data_start) != nap->data_start) {
    NaClLog(LOG_INFO, "data_start not a multiple of allocation size\n");
    return LOAD_BAD_DATA_ALIGNMENT;
  }
  return LOAD_OK;
}
コード例 #3
0
/*
 * Fill from static_text_end to end of that page with halt
 * instruction, which is at least NACL_HALT_LEN in size when no
 * dynamic text is present.  Does not touch dynamic text region, which
 * should be pre-filled with HLTs.
 *
 * By adding NACL_HALT_SLED_SIZE, we ensure that the code region ends
 * with HLTs, just in case the CPU has a bug in which it fails to
 * check for running off the end of the x86 code segment.
 */
void NaClFillEndOfTextRegion(struct NaClApp *nap) {
  size_t page_pad;

  /*
   * NOTE: make sure we are not silently overwriting data.  It is the
   * toolchain's responsibility to ensure that a NACL_HALT_SLED_SIZE
   * gap exists.
   */
  if (0 != nap->data_start &&
      nap->static_text_end + NACL_HALT_SLED_SIZE >
      NaClTruncAllocPage(nap->data_start)) {
    NaClLog(LOG_FATAL, "Missing gap between text and data for halt_sled\n");
  }
  if (0 != nap->rodata_start &&
      nap->static_text_end + NACL_HALT_SLED_SIZE > nap->rodata_start) {
    NaClLog(LOG_FATAL, "Missing gap between text and rodata for halt_sled\n");
  }

  if (NULL == nap->text_shm) {
    /*
     * No dynamic text exists.  Space for NACL_HALT_SLED_SIZE must
     * exist.
     */
    page_pad = (NaClRoundAllocPage(nap->static_text_end + NACL_HALT_SLED_SIZE)
                - nap->static_text_end);
    CHECK(page_pad >= NACL_HALT_SLED_SIZE);
    CHECK(page_pad < NACL_MAP_PAGESIZE + NACL_HALT_SLED_SIZE);
  } else {
    /*
     * Dynamic text exists; the halt sled resides in the dynamic text
     * region, so all we need to do here is to round out the last
     * static text page with HLT instructions.  It doesn't matter if
     * the size of this region is smaller than NACL_HALT_SLED_SIZE --
     * this is just to fully initialize the page, rather than (later)
     * decoding/validating zero-filled memory as instructions.
     */
    page_pad = NaClRoundAllocPage(nap->static_text_end) - nap->static_text_end;
  }

  NaClLog(4,
          "Filling with halts: %08"NACL_PRIxPTR", %08"NACL_PRIxS" bytes\n",
          nap->mem_start + nap->static_text_end,
          page_pad);

  NaClFillMemoryRegionWithHalt((void *)(nap->mem_start + nap->static_text_end),
                               page_pad);

  nap->static_text_end += page_pad;
}
コード例 #4
0
ファイル: gio_shm.c プロジェクト: camuel/zvm
int NaClGioShmAllocCtor(struct NaClGioShm *self,
                        size_t            shm_size) {
  struct NaClDescImcShm *shmp;
  int                   rv;

  CHECK(shm_size == NaClRoundAllocPage(shm_size));

  if (!NaClDescEffectorTrustedMemCtor(&self->eff)) {
    return 0;
  }

  shmp = malloc(sizeof *shmp);
  if (NULL == shmp) {
    (*self->eff.base.vtbl->Dtor)(&self->eff.base);
    return 0;
  }
  if (!NaClDescImcShmAllocCtor(shmp, shm_size, /* executable= */ 0)) {
    (*self->eff.base.vtbl->Dtor)(&self->eff.base);
    free(shmp);
    return 0;
  }

  rv = NaClGioShmCtorIntern(self, (struct NaClDesc *) shmp, shm_size);

  if (!rv) {
    NaClDescUnref((struct NaClDesc *) shmp);
    free(shmp);
    (*self->eff.base.vtbl->Dtor)(&self->eff.base);
  }
  return rv;
}
コード例 #5
0
int NaClGioShmCtor(struct NaClGioShm  *self,
                   struct NaClDesc    *shmp,
                   size_t             shm_size) {

  int rv;

  CHECK(shm_size == NaClRoundAllocPage(shm_size));

  rv = NaClGioShmCtorIntern(self, shmp, shm_size);

  return rv;
}
コード例 #6
0
ファイル: sel_addrspace.c プロジェクト: alepharchives/zerovm
void NaClAllocAddrSpace(struct NaClApp *nap)
{
  void        *mem;
  uintptr_t   hole_start;
  size_t      hole_size;
  uintptr_t   stack_start;

  ZLOGS(LOG_DEBUG, "calling NaClAllocateSpace(*,0x%016x)", ((size_t)1 << nap->addr_bits));
  NaClAllocateSpace(&mem, (uintptr_t) 1U << nap->addr_bits);

  nap->mem_start = (uintptr_t) mem;
  ZLOGS(LOG_DEBUG, "allocated memory at 0x%08x", nap->mem_start);

  hole_start = NaClRoundAllocPage(nap->data_end);

  ZLOGFAIL(nap->stack_size >= ((uintptr_t) 1U) << nap->addr_bits,
      EFAULT, "NaClAllocAddrSpace: stack too large!");
  stack_start = (((uintptr_t) 1U) << nap->addr_bits) - nap->stack_size;
  stack_start = NaClTruncAllocPage(stack_start);

  ZLOGFAIL(stack_start < hole_start, EFAULT,
      "Memory 'hole' between end of BSS and start of stack is negative in size");

  hole_size = stack_start - hole_start;
  hole_size = NaClTruncAllocPage(hole_size);

  /*
   * mprotect and madvise unused data space to "free" it up, but
   * retain mapping so no other memory can be mapped into those
   * addresses.
   */
  if(hole_size != 0)
  {
    ZLOGS(LOG_DEBUG, "madvising 0x%08x, 0x%08x, MADV_DONTNEED",
        nap->mem_start + hole_start, hole_size);

    ZLOGFAIL(0 != NaCl_madvise((void*)(nap->mem_start + hole_start), hole_size,
        MADV_DONTNEED), errno, "madvise failed. cannot release unused data segment");

    ZLOGS(LOG_DEBUG, "mprotecting 0x%08x, 0x%08x, PROT_NONE",
        nap->mem_start + hole_start, hole_size);

    ZLOGFAIL(0 != NaCl_mprotect((void *)(nap->mem_start + hole_start), hole_size,
        PROT_NONE), errno, "mprotect failed. cannot protect pages");
  }
  else
    ZLOGS(LOG_DEBUG, "there is no hole between end of data and the beginning of stack");
}
コード例 #7
0
int NaClSysCommonAddrRangeInAllowedDynamicCodeSpace(struct NaClApp *nap,
                                                    uintptr_t usraddr,
                                                    size_t length) {
  uintptr_t usr_region_end = usraddr + length;

  if (usr_region_end < usraddr) {
    /* Check for unsigned addition overflow */
    return 0;
  }
  usr_region_end = NaClRoundAllocPage(usr_region_end);
  if (usr_region_end < usraddr) {
    /* 32-bit systems only, rounding caused uint32_t overflow */
    return 0;
  }
  return (nap->dynamic_text_start <= usraddr &&
          usr_region_end <= nap->dynamic_text_end);
}
コード例 #8
0
ファイル: gio_shm.c プロジェクト: camuel/zvm
int NaClGioShmCtor(struct NaClGioShm  *self,
                   struct NaClDesc    *shmp,
                   size_t             shm_size) {

  int rv;

  CHECK(shm_size == NaClRoundAllocPage(shm_size));

  if (!NaClDescEffectorTrustedMemCtor(&self->eff)) {
    return 0;
  }

  rv = NaClGioShmCtorIntern(self, shmp, shm_size);

  if (!rv) {
    (*self->eff.base.vtbl->Dtor)(&self->eff.base);
  }
  return rv;
}
コード例 #9
0
void *AllocatePageInRange(uint8_t *min_addr, uint8_t *max_addr) {
  MEMORY_BASIC_INFORMATION info;
  uint8_t *addr = (uint8_t *) NaClTruncAllocPage((uintptr_t) min_addr);
  while (addr < max_addr) {
    size_t result;
    result = VirtualQuery(addr, &info, sizeof(info));
    if (result == 0) {
      break;
    }
    CHECK(result == sizeof(info));
    if (info.State == MEM_FREE) {
      return VirtualAlloc(info.BaseAddress, NACL_MAP_PAGESIZE,
                          MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
    }
    /*
     * RegionSize can be a 4k multiple but not a 64k multiple, so we
     * have to round up, otherwise our subsequent attempt to
     * VirtualAlloc() a non-64k-aligned address will fail.
     */
    addr += NaClRoundAllocPage(info.RegionSize);
  }
  return NULL;
}
コード例 #10
0
int NaClGioShmAllocCtor(struct NaClGioShm *self,
                        size_t            shm_size) {
  struct NaClDescImcShm *shmp;
  int                   rv;

  CHECK(shm_size == NaClRoundAllocPage(shm_size));

  shmp = malloc(sizeof *shmp);
  if (NULL == shmp) {
    return 0;
  }
  if (!NaClDescImcShmAllocCtor(shmp, shm_size, /* executable= */ 0)) {
    free(shmp);
    return 0;
  }

  rv = NaClGioShmCtorIntern(self, (struct NaClDesc *) shmp, shm_size);
  NaClDescUnref((struct NaClDesc *) shmp);

  if (!rv) {
    free(shmp);
  }
  return rv;
}
コード例 #11
0
int32_t NaClSysBrk(struct NaClAppThread *natp,
                   uintptr_t            new_break) {
  struct NaClApp        *nap = natp->nap;
  uintptr_t             break_addr;
  int32_t               rv = -NACL_ABI_EINVAL;
  struct NaClVmmapIter  iter;
  struct NaClVmmapEntry *ent;
  struct NaClVmmapEntry *next_ent;
  uintptr_t             sys_break;
  uintptr_t             sys_new_break;
  uintptr_t             usr_last_data_page;
  uintptr_t             usr_new_last_data_page;
  uintptr_t             last_internal_data_addr;
  uintptr_t             last_internal_page;
  uintptr_t             start_new_region;
  uintptr_t             region_size;

  /*
   * The sysbrk() IRT interface is deprecated and is not enabled for
   * ABI-stable PNaCl pexes, so for security hardening, disable the
   * syscall under PNaCl too.
   */
  if (nap->pnacl_mode)
    return -NACL_ABI_ENOSYS;

  break_addr = nap->break_addr;

  NaClLog(3, "Entered NaClSysBrk(new_break 0x%08"NACL_PRIxPTR")\n",
          new_break);

  sys_new_break = NaClUserToSysAddr(nap, new_break);
  NaClLog(3, "sys_new_break 0x%08"NACL_PRIxPTR"\n", sys_new_break);

  if (kNaClBadAddress == sys_new_break) {
    goto cleanup_no_lock;
  }
  if (NACL_SYNC_OK != NaClMutexLock(&nap->mu)) {
    NaClLog(LOG_ERROR, "Could not get app lock for 0x%08"NACL_PRIxPTR"\n",
            (uintptr_t) nap);
    goto cleanup_no_lock;
  }
  if (new_break < nap->data_end) {
    NaClLog(4, "new_break before data_end (0x%"NACL_PRIxPTR")\n",
            nap->data_end);
    goto cleanup;
  }
  if (new_break <= nap->break_addr) {
    /* freeing memory */
    NaClLog(4, "new_break before break (0x%"NACL_PRIxPTR"); freeing\n",
            nap->break_addr);
    nap->break_addr = new_break;
    break_addr = new_break;
  } else {
    /*
     * See if page containing new_break is in mem_map; if so, we are
     * essentially done -- just update break_addr.  Otherwise, we
     * extend the VM map entry from the page containing the current
     * break to the page containing new_break.
     */

    sys_break = NaClUserToSys(nap, nap->break_addr);

    usr_last_data_page = (nap->break_addr - 1) >> NACL_PAGESHIFT;

    usr_new_last_data_page = (new_break - 1) >> NACL_PAGESHIFT;

    last_internal_data_addr = NaClRoundAllocPage(new_break) - 1;
    last_internal_page = last_internal_data_addr >> NACL_PAGESHIFT;

    NaClLog(4, ("current break sys addr 0x%08"NACL_PRIxPTR", "
                "usr last data page 0x%"NACL_PRIxPTR"\n"),
            sys_break, usr_last_data_page);
    NaClLog(4, "new break usr last data page 0x%"NACL_PRIxPTR"\n",
            usr_new_last_data_page);
    NaClLog(4, "last internal data addr 0x%08"NACL_PRIxPTR"\n",
            last_internal_data_addr);

    if (NULL == NaClVmmapFindPageIter(&nap->mem_map,
                                      usr_last_data_page,
                                      &iter)
        || NaClVmmapIterAtEnd(&iter)) {
      NaClLog(LOG_FATAL, ("current break (0x%08"NACL_PRIxPTR", "
                          "sys 0x%08"NACL_PRIxPTR") "
                          "not in address map\n"),
              nap->break_addr, sys_break);
    }
    ent = NaClVmmapIterStar(&iter);
    NaClLog(4, ("segment containing current break"
                ": page_num 0x%08"NACL_PRIxPTR", npages 0x%"NACL_PRIxS"\n"),
            ent->page_num, ent->npages);
    if (usr_new_last_data_page < ent->page_num + ent->npages) {
      NaClLog(4, "new break within break segment, just bumping addr\n");
      nap->break_addr = new_break;
      break_addr = new_break;
    } else {
      NaClVmmapIterIncr(&iter);
      if (!NaClVmmapIterAtEnd(&iter)
          && ((next_ent = NaClVmmapIterStar(&iter))->page_num
              <= last_internal_page)) {
        /* ran into next segment! */
        NaClLog(4,
                ("new break request of usr address "
                 "0x%08"NACL_PRIxPTR" / usr page 0x%"NACL_PRIxPTR
                 " runs into next region, page_num 0x%"NACL_PRIxPTR", "
                 "npages 0x%"NACL_PRIxS"\n"),
                new_break, usr_new_last_data_page,
                next_ent->page_num, next_ent->npages);
        goto cleanup;
      }
      NaClLog(4,
              "extending segment: page_num 0x%08"NACL_PRIxPTR", "
              "npages 0x%"NACL_PRIxS"\n",
              ent->page_num, ent->npages);
      /* go ahead and extend ent to cover, and make pages accessible */
      start_new_region = (ent->page_num + ent->npages) << NACL_PAGESHIFT;
      ent->npages = (last_internal_page - ent->page_num + 1);
      region_size = (((last_internal_page + 1) << NACL_PAGESHIFT)
                     - start_new_region);
      if (0 != NaClMprotect((void *) NaClUserToSys(nap, start_new_region),
                            region_size,
                            PROT_READ | PROT_WRITE)) {
        NaClLog(LOG_FATAL,
                ("Could not mprotect(0x%08"NACL_PRIxPTR", "
                 "0x%08"NACL_PRIxPTR", "
                 "PROT_READ|PROT_WRITE)\n"),
                start_new_region,
                region_size);
      }
      NaClLog(4, "segment now: page_num 0x%08"NACL_PRIxPTR", "
              "npages 0x%"NACL_PRIxS"\n",
              ent->page_num, ent->npages);
      nap->break_addr = new_break;
      break_addr = new_break;
    }
    /*
     * Zero out memory between old break and new break.
     */
    ASSERT(sys_new_break > sys_break);
    memset((void *) sys_break, 0, sys_new_break - sys_break);
  }



cleanup:
  NaClXMutexUnlock(&nap->mu);
cleanup_no_lock:

  /*
   * This cast is safe because the incoming value (new_break) cannot
   * exceed the user address space--even though its type (uintptr_t)
   * theoretically allows larger values.
   */
  rv = (int32_t) break_addr;

  NaClLog(3, "NaClSysBrk: returning 0x%08"NACL_PRIx32"\n", rv);
  return rv;
}
コード例 #12
0
int32_t NaClSysMunmap(struct NaClAppThread  *natp,
                      void                  *start,
                      size_t                length) {
  int32_t   retval = -NACL_ABI_EINVAL;
  uintptr_t sysaddr;
  uintptr_t addr;
  uintptr_t endaddr;
  int       holding_app_lock = 0;
  size_t    alloc_rounded_length;

  NaClLog(3, "NaClSysMunmap(0x%08x, 0x%08x, 0x%x)\n",
          natp, start, length);

  NaClSysCommonThreadSyscallEnter(natp);

  if (!NaClIsAllocPageMultiple((uintptr_t) start)) {
    NaClLog(4, "start addr not allocation multiple\n");
    retval = -NACL_ABI_EINVAL;
    goto cleanup;
  }
  if (0 == length) {
    /*
     * linux mmap of zero length yields a failure, but windows code
     * would just iterate through and do nothing, so does not yield a
     * failure.
     */
    retval = -NACL_ABI_EINVAL;
    goto cleanup;
  }
  alloc_rounded_length = NaClRoundAllocPage(length);
  if (alloc_rounded_length != length) {
    length = alloc_rounded_length;
    NaClLog(LOG_WARNING,
            "munmap: rounded length to 0x%x\n", length);
  }
  sysaddr = NaClUserToSysAddrRange(natp->nap, (uintptr_t) start, length);
  if (kNaClBadAddress == sysaddr) {
    retval = -NACL_ABI_EFAULT;
    goto cleanup;
  }

  NaClXMutexLock(&natp->nap->mu);
  holding_app_lock = 1;

  /*
   * User should be unable to unmap any executable pages.  We check here.
   */
  if (NaClSysCommonAddrRangeContainsExecutablePages_mu(natp->nap,
                                                       (uintptr_t) start,
                                                       length)) {
    NaClLog(2, "NaClSysMunmap: region contains executable pages\n");
    retval = -NACL_ABI_EINVAL;
    goto cleanup;
  }

  endaddr = sysaddr + length;
  for (addr = sysaddr; addr < endaddr; addr += NACL_MAP_PAGESIZE) {
    struct NaClVmmapEntry const *entry;

    entry = NaClVmmapFindPage(&natp->nap->mem_map,
                              NaClSysToUser(natp->nap, addr)
                              >> NACL_PAGESHIFT);
    if (NULL == entry) {
      NaClLog(LOG_FATAL,
              "NaClSysMunmap: could not find VM map entry for addr 0x%08x\n",
              addr);
    }
    NaClLog(3,
            ("NaClSysMunmap: addr 0x%08x, nmop 0x%08x\n"),
            addr, entry->nmop);
    if (NULL == entry->nmop) {
      /* anonymous memory; we just decommit it and thus make it inaccessible */
      if (!VirtualFree((void *) addr,
                       NACL_MAP_PAGESIZE,
                       MEM_DECOMMIT)) {
        int error = GetLastError();
        NaClLog(LOG_FATAL,
                ("NaClSysMunmap: Could not VirtualFree MEM_DECOMMIT"
                 " addr 0x%08x, error %d (0x%x)\n"),
                addr, error, error);
      }
    } else {
      /*
       * This should invoke a "safe" version of unmap that fills the
       * memory hole as quickly as possible, and may return
       * -NACL_ABI_E_MOVE_ADDRESS_SPACE.  The "safe" version just
       * minimizes the size of the timing hole for any racers, plus
       * the size of the memory window is only 64KB, rather than
       * whatever size the user is unmapping.
       */
      retval = (*entry->nmop->ndp->vtbl->Unmap)(entry->nmop->ndp,
                                                natp->effp,
                                                (void*) addr,
                                                NACL_MAP_PAGESIZE);
      if (0 != retval) {
        NaClLog(LOG_FATAL,
                ("NaClSysMunmap: Could not unmap via ndp->Unmap 0x%08x"
                 " and cannot handle address space move\n"),
                addr);
      }
    }
    NaClVmmapUpdate(&natp->nap->mem_map,
                    (NaClSysToUser(natp->nap, (uintptr_t) addr)
                     >> NACL_PAGESHIFT),
                    NACL_PAGES_PER_MAP,
                    0,  /* prot */
                    (struct NaClMemObj *) NULL,
                    1);  /* delete */
  }
  retval = 0;
cleanup:
  if (holding_app_lock) {
    NaClXMutexUnlock(&natp->nap->mu);
  }
  NaClSysCommonThreadSyscallLeave(natp);
  return retval;
}
コード例 #13
0
ファイル: nacl_syscall_handlers.c プロジェクト: camuel/zvm
int32_t NaClSysMunmap(struct NaClAppThread  *natp,
                      void                  *start,
                      size_t                length) {
  int32_t   retval = -NACL_ABI_EINVAL;
  uintptr_t sysaddr;
  int       holding_app_lock = 0;
  size_t    alloc_rounded_length;

  NaClLog(3, "Entered NaClSysMunmap(0x%08"NACL_PRIxPTR", "
          "0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxS")\n",
          (uintptr_t) natp, (uintptr_t) start, length);

  NaClSysCommonThreadSyscallEnter(natp);

  if (!NaClIsAllocPageMultiple((uintptr_t) start)) {
    NaClLog(4, "start addr not allocation multiple\n");
    retval = -NACL_ABI_EINVAL;
    goto cleanup;
  }
  if (0 == length) {
    /*
     * linux mmap of zero length yields a failure, but osx does not, leading
     * to a NaClVmmapUpdate of zero pages, which should not occur.
     */
    retval = -NACL_ABI_EINVAL;
    goto cleanup;
  }
  alloc_rounded_length = NaClRoundAllocPage(length);
  if (alloc_rounded_length != length) {
    length = alloc_rounded_length;
    NaClLog(LOG_WARNING,
            "munmap: rounded length to 0x%"NACL_PRIxS"\n", length);
  }
  sysaddr = NaClUserToSysAddrRange(natp->nap, (uintptr_t) start, length);
  if (kNaClBadAddress == sysaddr) {
    NaClLog(4, "region not user addresses\n");
    retval = -NACL_ABI_EFAULT;
    goto cleanup;
  }

  NaClXMutexLock(&natp->nap->mu);

  while (0 != natp->nap->threads_launching) {
    NaClXCondVarWait(&natp->nap->cv, &natp->nap->mu);
  }
  natp->nap->vm_hole_may_exist = 1;

  holding_app_lock = 1;
  /*
   * NB: windows (or generic) version would use Munmap virtual
   * function from the backing NaClDesc object obtained by iterating
   * through the address map for the region, and those Munmap virtual
   * functions may return -NACL_ABI_E_MOVE_ADDRESS_SPACE.
   *
   * We should hold the application lock while doing this iteration
   * and unmapping, so that the address space is consistent for other
   * threads.
   */

  /*
   * User should be unable to unmap any executable pages.  We check here.
   */
  if (NaClSysCommonAddrRangeContainsExecutablePages_mu(natp->nap,
                                                       (uintptr_t) start,
                                                       length)) {
    NaClLog(2, "NaClSysMunmap: region contains executable pages\n");
    retval = -NACL_ABI_EINVAL;
    goto cleanup;
  }

  /*
   * Overwrite current mapping with inaccessible, anonymous
   * zero-filled pages, which should be copy-on-write and thus
   * relatively cheap.  Do not open up an address space hole.
   */
  NaClLog(4,
          ("NaClSysMunmap: mmap(0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxS","
           " 0x%x, 0x%x, -1, 0)\n"),
          sysaddr, length, PROT_NONE,
          MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED);
  if (MAP_FAILED == mmap((void *) sysaddr,
                         length,
                         PROT_NONE,
                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
                         -1,
                         (off_t) 0)) {
    NaClLog(4, "mmap to put in anonymous memory failed, errno = %d\n", errno);
    retval = -NaClXlateErrno(errno);
    goto cleanup;
  }
  NaClVmmapUpdate(&natp->nap->mem_map,
                  (NaClSysToUser(natp->nap, (uintptr_t) sysaddr)
                   >> NACL_PAGESHIFT),
                  length >> NACL_PAGESHIFT,
                  0,  /* prot */
                  (struct NaClMemObj *) NULL,
                  1);  /* Delete mapping */
  retval = 0;
cleanup:
  if (holding_app_lock) {
    natp->nap->vm_hole_may_exist = 0;
    NaClXCondVarBroadcast(&natp->nap->cv);
    NaClXMutexUnlock(&natp->nap->mu);
  }
  NaClSysCommonThreadSyscallLeave(natp);
  return retval;
}
コード例 #14
0
ファイル: elf_util.c プロジェクト: mariospr/chromium-browser
NaClErrorCode NaClElfImageLoadDynamically(
    struct NaClElfImage *image,
    struct NaClApp *nap,
    struct NaClDesc *ndp,
    struct NaClValidationMetadata *metadata) {
  ssize_t read_ret;
  int segnum;
  for (segnum = 0; segnum < image->ehdr.e_phnum; ++segnum) {
    const Elf_Phdr *php = &image->phdrs[segnum];
    Elf_Addr vaddr = php->p_vaddr & ~(NACL_MAP_PAGESIZE - 1);
    Elf_Off offset = php->p_offset & ~(NACL_MAP_PAGESIZE - 1);
    Elf_Off filesz = php->p_offset + php->p_filesz - offset;
    Elf_Off memsz = php->p_offset + php->p_memsz - offset;
    int32_t result;

    /*
     * By checking if filesz is larger than memsz, we no longer run the risk of
     * a malicious ELF object overrunning into the trusted address space when
     * reading data of size "filez" into a buffer of size "memsz".
     */
    if (filesz > memsz) {
      return LOAD_UNLOADABLE;
    }

    /*
     * We check for PT_LOAD directly rather than using the "loadable"
     * array because we are not using NaClElfImageValidateProgramHeaders()
     * to fill out the "loadable" array for this ELF object.  This ELF
     * object does not have to fit such strict constraints (such as
     * having code at 0x20000), and safety checks are applied by
     * NaClTextDyncodeCreate() and NaClSysMmapIntern().
     */
    if (PT_LOAD != php->p_type) {
      continue;
    }

    if (0 != (php->p_flags & PF_X)) {
      /* Load code segment. */
      /*
       * We make a copy of the code.  This is not ideal given that this
       * code path is used only for loading the IRT, and we could assume
       * that the contents of the irt.nexe file will not change underneath
       * us.  We should be able to mmap() the IRT's code segment instead of
       * copying it.
       * TODO(mseaborn): Reduce the amount of copying here.
       */
      char *code_copy = malloc(filesz);
      if (NULL == code_copy) {
        NaClLog(LOG_ERROR, "NaClElfImageLoadDynamically: malloc failed\n");
        return LOAD_NO_MEMORY;
      }
      read_ret = (*NACL_VTBL(NaClDesc, ndp)->
                  PRead)(ndp, code_copy, filesz, (nacl_off64_t) offset);
      if (NaClSSizeIsNegErrno(&read_ret) ||
          (size_t) read_ret != filesz) {
        free(code_copy);
        NaClLog(LOG_ERROR, "NaClElfImageLoadDynamically: "
                "failed to read code segment\n");
        return LOAD_READ_ERROR;
      }
      if (NULL != metadata) {
        metadata->code_offset = offset;
      }
      result = NaClTextDyncodeCreate(nap, (uint32_t) vaddr,
                                     code_copy, (uint32_t) filesz, metadata);
      free(code_copy);
      if (0 != result) {
        NaClLog(LOG_ERROR, "NaClElfImageLoadDynamically: "
                "failed to load code segment\n");
        return LOAD_UNLOADABLE;
      }
    } else {
      /* Load data segment. */
      void *paddr = (void *) NaClUserToSys(nap, vaddr);
      size_t mapping_size = NaClRoundAllocPage(memsz);
      /*
       * Note that we do not used NACL_ABI_MAP_FIXED because we do not
       * want to silently overwrite any existing mappings, such as the
       * user app's data segment or the stack.  We detect overmapping
       * when mmap chooses not to use the preferred address we supply.
       * (Ideally mmap would provide a MAP_EXCL option for this
       * instead.)
       */
      result = NaClSysMmapIntern(
          nap, (void *) (uintptr_t) vaddr, mapping_size,
          NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE,
          NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE,
          -1, 0);
      if ((int32_t) vaddr != result) {
        NaClLog(LOG_ERROR, "NaClElfImageLoadDynamically: "
                "failed to map data segment\n");
        return LOAD_UNLOADABLE;
      }
      read_ret = (*NACL_VTBL(NaClDesc, ndp)->
                  PRead)(ndp, paddr, filesz, (nacl_off64_t) offset);
      if (NaClSSizeIsNegErrno(&read_ret) ||
          (size_t) read_ret != filesz) {
        NaClLog(LOG_ERROR, "NaClElfImageLoadDynamically: "
                "failed to read data segment\n");
        return LOAD_READ_ERROR;
      }
      /*
       * Note that we do not need to zero the BSS (the region from
       * p_filesz to p_memsz) because it should already be zero
       * filled.  This would not be the case if we were mapping the
       * data segment from the file.
       */

      if (0 == (php->p_flags & PF_W)) {
        /* Handle read-only data segment. */
        int rc = NaClMprotect(paddr, mapping_size, NACL_ABI_PROT_READ);
        if (0 != rc) {
          NaClLog(LOG_ERROR, "NaClElfImageLoadDynamically: "
                  "failed to mprotect read-only data segment\n");
          return LOAD_MPROTECT_FAIL;
        }

        NaClVmmapAddWithOverwrite(&nap->mem_map,
                                  vaddr >> NACL_PAGESHIFT,
                                  mapping_size >> NACL_PAGESHIFT,
                                  NACL_ABI_PROT_READ,
                                  NACL_ABI_MAP_PRIVATE,
                                  NULL,
                                  0,
                                  0);
      }
    }
  }
コード例 #15
0
ファイル: elf_util.c プロジェクト: camuel/zvm
NaClErrorCode NaClElfImageLoadDynamically(struct NaClElfImage *image,
                                          struct NaClApp      *nap,
                                          struct Gio          *gfile) {
  int segnum;
  for (segnum = 0; segnum < image->ehdr.e_phnum; ++segnum) {
    const Elf_Phdr *php = &image->phdrs[segnum];
    int32_t result;

    /*
     * We check for PT_LOAD directly rather than using the "loadable"
     * array because we are not using NaClElfImageValidateProgramHeaders()
     * to fill out the "loadable" array for this ELF object.  This ELF
     * object does not have to fit such strict constraints (such as
     * having code at 0x20000), and safety checks are applied by
     * NaClTextDyncodeCreate() and NaClCommonSysMmapIntern().
     */
    if (PT_LOAD != php->p_type) {
      continue;
    }

    /*
     * Ideally, Gio would have a Pread() method which we would use
     * instead of Seek().  In practice, though, there is no
     * Seek()/Read() race condition here because both
     * GioMemoryFileSnapshot and NaClGioShm use a seek position that
     * is local and not shared between processes.
     */
    if ((*gfile->vtbl->Seek)(gfile, (off_t) php->p_offset,
                             SEEK_SET) == (off_t) -1) {
      NaClLog(1, "NaClElfImageLoadDynamically: seek failed\n");
      return LOAD_READ_ERROR;
    }

    if (0 != (php->p_flags & PF_X)) {
      /* Load code segment. */
      /*
       * We make a copy of the code.  This is not ideal given that
       * GioMemoryFileSnapshot and NaClGioShm already have a copy of
       * the file in memory or mmapped.
       * TODO(mseaborn): Reduce the amount of copying here.
       */
      char *code_copy = malloc(php->p_filesz);
      if (NULL == code_copy) {
        NaClLog(1, "NaClElfImageLoadDynamically: malloc failed\n");
        return LOAD_NO_MEMORY;
      }
      if ((Elf_Word) (*gfile->vtbl->Read)(gfile, code_copy, php->p_filesz)
          != php->p_filesz) {
        free(code_copy);
        NaClLog(1, "NaClElfImageLoadDynamically: "
                "failed to read code segment\n");
        return LOAD_READ_ERROR;
      }
      result = NaClTextDyncodeCreate(nap, (uint32_t) php->p_vaddr,
                                     code_copy, (uint32_t) php->p_filesz);
      free(code_copy);
      if (0 != result) {
        NaClLog(1, "NaClElfImageLoadDynamically: "
                "failed to load code segment\n");
        return LOAD_UNLOADABLE;
      }
    } else {
      /* Load data segment. */
      void *paddr = (void *) NaClUserToSys(nap, php->p_vaddr);
      size_t mapping_size = NaClRoundAllocPage(php->p_memsz);
      /*
       * Note that we do not used NACL_ABI_MAP_FIXED because we do not
       * want to silently overwrite any existing mappings, such as the
       * user app's data segment or the stack.  We detect overmapping
       * when mmap chooses not to use the preferred address we supply.
       * (Ideally mmap would provide a MAP_EXCL option for this
       * instead.)
       */
      result = NaClCommonSysMmapIntern(
          nap, (void *) php->p_vaddr, mapping_size,
          NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE,
          NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE,
          -1, 0);
      if ((int32_t) php->p_vaddr != result) {
        NaClLog(1, "NaClElfImageLoadDynamically: failed to map data segment\n");
        return LOAD_UNLOADABLE;
      }
      if ((Elf_Word) (*gfile->vtbl->Read)(gfile, paddr, php->p_filesz)
          != php->p_filesz) {
        NaClLog(1, "NaClElfImageLoadDynamically: "
                "failed to read data segment\n");
        return LOAD_READ_ERROR;
      }
      /*
       * Note that we do not need to zero the BSS (the region from
       * p_filesz to p_memsz) because it should already be zero
       * filled.  This would not be the case if we were mapping the
       * data segment from the file.
       */

      if (0 == (php->p_flags & PF_W)) {
        /* Handle read-only data segment. */
        int rc = NaCl_mprotect(paddr, mapping_size, NACL_ABI_PROT_READ);
        if (0 != rc) {
          NaClLog(1, "NaClElfImageLoadDynamically: "
                  "failed to mprotect read-only data segment\n");
          return LOAD_MPROTECT_FAIL;
        }

        NaClVmmapUpdate(&nap->mem_map,
                        php->p_vaddr >> NACL_PAGESHIFT,
                        mapping_size >> NACL_PAGESHIFT,
                        PROT_READ,
                        NULL,
                        0  /* remove: false */);
      }
    }
  }
コード例 #16
0
ファイル: sel_ldr_standard.c プロジェクト: bortoq/zerovm
NaClErrorCode NaClAppLoadFile(struct Gio       *gp,
                              struct NaClApp   *nap) {
  NaClErrorCode       ret = LOAD_INTERNAL;
  NaClErrorCode       subret;
  uintptr_t           rodata_end;
  uintptr_t           data_end;
  uintptr_t           max_vaddr;
  struct NaClElfImage *image = NULL;
  struct NaClPerfCounter  time_load_file;

  NaClPerfCounterCtor(&time_load_file, "NaClAppLoadFile");

  /* NACL_MAX_ADDR_BITS < 32 */
  if (nap->addr_bits > NACL_MAX_ADDR_BITS) {
    ret = LOAD_ADDR_SPACE_TOO_BIG;
    goto done;
  }

  nap->stack_size = NaClRoundAllocPage(nap->stack_size);

  /* temporay object will be deleted at end of function */
  image = NaClElfImageNew(gp, &subret);
  if (NULL == image) {
    ret = subret;
    goto done;
  }

  subret = NaClElfImageValidateElfHeader(image);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  subret = NaClElfImageValidateProgramHeaders(image,
                                              nap->addr_bits,
                                              &nap->static_text_end,
                                              &nap->rodata_start,
                                              &rodata_end,
                                              &nap->data_start,
                                              &data_end,
                                              &max_vaddr);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  if (0 == nap->data_start) {
    if (0 == nap->rodata_start) {
      if (NaClRoundAllocPage(max_vaddr) - max_vaddr < NACL_HALT_SLED_SIZE) {
        /*
         * if no rodata and no data, we make sure that there is space for
         * the halt sled.
         */
        max_vaddr += NACL_MAP_PAGESIZE;
      }
    } else {
      /*
       * no data, but there is rodata.  this means max_vaddr is just
       * where rodata ends.  this might not be at an allocation
       * boundary, and in this the page would not be writable.  round
       * max_vaddr up to the next allocation boundary so that bss will
       * be at the next writable region.
       */
      ;
    }
    max_vaddr = NaClRoundAllocPage(max_vaddr);
  }
  /*
   * max_vaddr -- the break or the boundary between data (initialized
   * and bss) and the address space hole -- does not have to be at a
   * page boundary.
   */
  nap->break_addr = max_vaddr;
  nap->data_end = max_vaddr;

  NaClLog(4, "Values from NaClElfImageValidateProgramHeaders:\n");
  NaClLog(4, "rodata_start = 0x%08"NACL_PRIxPTR"\n", nap->rodata_start);
  NaClLog(4, "rodata_end   = 0x%08"NACL_PRIxPTR"\n", rodata_end);
  NaClLog(4, "data_start   = 0x%08"NACL_PRIxPTR"\n", nap->data_start);
  NaClLog(4, "data_end     = 0x%08"NACL_PRIxPTR"\n", data_end);
  NaClLog(4, "max_vaddr    = 0x%08"NACL_PRIxPTR"\n", max_vaddr);

  /* We now support only one bundle size.  */
  nap->bundle_size = NACL_INSTR_BLOCK_SIZE;

  nap->initial_entry_pt = NaClElfImageGetEntryPoint(image);
  NaClLogAddressSpaceLayout(nap);

  if (!NaClAddrIsValidEntryPt(nap, nap->initial_entry_pt)) {
    ret = LOAD_BAD_ENTRY;
    goto done;
  }

  subret = NaClCheckAddressSpaceLayoutSanity(nap, rodata_end, data_end,
                                             max_vaddr);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  NaClLog(2, "Allocating address space\n");
  NaClPerfCounterMark(&time_load_file, "PreAllocAddrSpace");
  NaClPerfCounterIntervalLast(&time_load_file);
  subret = NaClAllocAddrSpace(nap);
  NaClPerfCounterMark(&time_load_file,
                      NACL_PERF_IMPORTANT_PREFIX "AllocAddrSpace");
  NaClPerfCounterIntervalLast(&time_load_file);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  /*
   * Make sure the static image pages are marked writable before we try
   * to write them.
   */
  NaClLog(2, "Loading into memory\n");
  ret = NaCl_mprotect((void *) (nap->mem_start + NACL_TRAMPOLINE_START),
                      NaClRoundAllocPage(nap->data_end) - NACL_TRAMPOLINE_START,
                      NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE);
  if (0 != ret) {
    NaClLog(LOG_FATAL,
            "NaClAppLoadFile: Failed to make image pages writable. "
            "Error code 0x%x\n",
            ret);
  }
  subret = NaClElfImageLoad(image, gp, nap->addr_bits, nap->mem_start);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  /*
   * NB: mem_map object has been initialized, but is empty.
   * NaClMakeDynamicTextShared does not touch it.
   *
   * NaClMakeDynamicTextShared also fills the dynamic memory region
   * with the architecture-specific halt instruction.  If/when we use
   * memory mapping to save paging space for the dynamic region and
   * lazily halt fill the memory as the pages become
   * readable/executable, we must make sure that the *last*
   * NACL_MAP_PAGESIZE chunk is nonetheless mapped and written with
   * halts.
   */
  NaClLog(2,
          ("Replacing gap between static text and"
           " (ro)data with shareable memory\n"));
  subret = NaClMakeDynamicTextShared(nap);
  NaClPerfCounterMark(&time_load_file,
                      NACL_PERF_IMPORTANT_PREFIX "MakeDynText");
  NaClPerfCounterIntervalLast(&time_load_file);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  /*
   * NaClFillEndOfTextRegion will fill with halt instructions the
   * padding space after the static text region.
   *
   * Shm-backed dynamic text space was filled with halt instructions
   * in NaClMakeDynamicTextShared.  This extends to the rodata.  For
   * non-shm-backed text space, this extend to the next page (and not
   * allocation page).  static_text_end is updated to include the
   * padding.
   */
  NaClFillEndOfTextRegion(nap);

#if 0 == NACL_DANGEROUS_DEBUG_MODE_DISABLE_INNER_SANDBOX
  NaClLog(2, "Validating image\n");
  subret = NaClValidateImage(nap);
  NaClPerfCounterMark(&time_load_file,
                      NACL_PERF_IMPORTANT_PREFIX "ValidateImg");
  NaClPerfCounterIntervalLast(&time_load_file);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }
#endif

  NaClLog(2, "Initializing arch switcher\n");
  NaClInitSwitchToApp(nap);

  NaClLog(2, "Installing trampoline\n");
  NaClLoadTrampoline(nap);

  /*
   * NaClMemoryProtect also initializes the mem_map w/ information
   * about the memory pages and their current protection value.
   *
   * The contents of the dynamic text region will get remapped as
   * non-writable.
   */
  NaClLog(2, "Applying memory protection\n");
  subret = NaClMemoryProtection(nap);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  NaClLog(2, "NaClAppLoadFile done; ");
  NaClLogAddressSpaceLayout(nap);
  ret = LOAD_OK;
done:
  NaClElfImageDelete(image);

  NaClPerfCounterMark(&time_load_file, "EndLoadFile");
  NaClPerfCounterIntervalTotal(&time_load_file);
  return ret;
}
コード例 #17
0
ファイル: nacl_desc_base.c プロジェクト: Html5Lexloo/o3d
int NaClDescMapDescriptor(struct NaClDesc         *desc,
                          struct NaClDescEffector *effector,
                          void                    **addr,
                          size_t                  *size) {
    struct nacl_abi_stat  st;
    size_t                rounded_size = 0;
    const int             kMaxTries = 10;
    int                   tries = 0;
    void                  *map_addr = NULL;
    int                   rval;
    uintptr_t             rval_ptr;

    *addr = NULL;
    *size = 0;

    rval = (*desc->vtbl->Fstat)(desc,
                                effector,
                                &st);
    if (0 != rval) {
        /* Failed to get the size - return failure. */
        return rval;
    }

    /*
     * on sane systems, sizef(size_t) <= sizeof(nacl_abi_off_t) must hold.
     */
    if (st.nacl_abi_st_size < 0) {
        return -NACL_ABI_ENOMEM;
    }
    if (sizeof(size_t) < sizeof(nacl_abi_off_t)) {
        if ((nacl_abi_off_t) SIZE_T_MAX < st.nacl_abi_st_size) {
            return -NACL_ABI_ENOMEM;
        }
    }
    /*
     * size_t and uintptr_t and void * should have the same number of
     * bits (well, void * could be smaller than uintptr_t, and on weird
     * architectures one could imagine the maximum size is smaller than
     * all addr bits, but we're talking sane architectures...).
     */

    /*
     * When probing by VirtualAlloc/mmap, use the same granularity
     * as the Map virtual function (64KB).
     */
    rounded_size = NaClRoundAllocPage(st.nacl_abi_st_size);

    /* Find an address range to map the object into. */
    do {
        ++tries;
#if NACL_WINDOWS
        map_addr = VirtualAlloc(NULL, rounded_size, MEM_RESERVE, PAGE_READWRITE);
        if (NULL == map_addr ||!VirtualFree(map_addr, 0, MEM_RELEASE)) {
            continue;
        }
#else
        map_addr = mmap(NULL,
                        rounded_size,
                        PROT_READ | PROT_WRITE,
                        MAP_SHARED | MAP_ANONYMOUS,
                        0,
                        0);
        if (MAP_FAILED == map_addr || munmap(map_addr, rounded_size)) {
            map_addr = NULL;
            continue;
        }
#endif
        NaClLog(4,
                "NaClDescMapDescriptor: mapping to address %"NACL_PRIxPTR"\n",
                (uintptr_t) map_addr);
        rval_ptr = (*desc->vtbl->Map)(desc,
                                      effector,
                                      map_addr,
                                      rounded_size,
                                      NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE,
                                      NACL_ABI_MAP_SHARED | NACL_ABI_MAP_FIXED,
                                      0);
        NaClLog(4,
                "NaClDescMapDescriptor: result is %"NACL_PRIxPTR"\n",
                rval_ptr);
        if (NaClIsNegErrno(rval_ptr)) {
            /*
             * A nonzero return from NaClIsNegErrno
             * indicates that the value is within the range
             * reserved for errors, which is representable
             * with 32 bits.
             */
            rval = (int) rval_ptr;
        } else {
            /*
             * Map() did not return an error, so set our
             * return code to 0 (success)
             */
            rval = 0;
            map_addr = (void*) rval_ptr;
            break;
        }

    } while (NULL == map_addr && tries < kMaxTries);

    if (NULL == map_addr) {
        return rval;
    }

    *addr = map_addr;
    *size = rounded_size;
    return 0;
}
コード例 #18
0
/* Warning: sizeof(nacl_abi_off_t)!=sizeof(off_t) on OSX */
int32_t NaClSysMmapIntern(struct NaClApp        *nap,
                          void                  *start,
                          size_t                length,
                          int                   prot,
                          int                   flags,
                          int                   d,
                          nacl_abi_off_t        offset) {
  int                         allowed_flags;
  struct NaClDesc             *ndp;
  uintptr_t                   usraddr;
  uintptr_t                   usrpage;
  uintptr_t                   sysaddr;
  uintptr_t                   endaddr;
  int                         mapping_code;
  uintptr_t                   map_result;
  int                         holding_app_lock;
  struct nacl_abi_stat        stbuf;
  size_t                      alloc_rounded_length;
  nacl_off64_t                file_size;
  nacl_off64_t                file_bytes;
  nacl_off64_t                host_rounded_file_bytes;
  size_t                      alloc_rounded_file_bytes;
  uint32_t                    val_flags;

  holding_app_lock = 0;
  ndp = NULL;

  allowed_flags = (NACL_ABI_MAP_FIXED | NACL_ABI_MAP_SHARED
                   | NACL_ABI_MAP_PRIVATE | NACL_ABI_MAP_ANONYMOUS);

  usraddr = (uintptr_t) start;

  if (0 != (flags & ~allowed_flags)) {
    NaClLog(2, "invalid mmap flags 0%o, ignoring extraneous bits\n", flags);
    flags &= allowed_flags;
  }

  if (0 != (flags & NACL_ABI_MAP_ANONYMOUS)) {
    /*
     * anonymous mmap, so backing store is just swap: no descriptor is
     * involved, and no memory object will be created to represent the
     * descriptor.
     */
    ndp = NULL;
  } else {
    ndp = NaClAppGetDesc(nap, d);
    if (NULL == ndp) {
      map_result = -NACL_ABI_EBADF;
      goto cleanup;
    }
  }

  mapping_code = 0;
  /*
   * Check if application is trying to do dynamic code loading by
   * mmaping a file.
   */
  if (0 != (NACL_ABI_PROT_EXEC & prot) &&
      0 != (NACL_ABI_MAP_FIXED & flags) &&
      NULL != ndp &&
      NaClSysCommonAddrRangeInAllowedDynamicCodeSpace(nap, usraddr, length)) {
    if (!nap->enable_dyncode_syscalls) {
      NaClLog(LOG_WARNING,
              "NaClSysMmap: PROT_EXEC when dyncode syscalls are disabled.\n");
      map_result = -NACL_ABI_EINVAL;
      goto cleanup;
    }
    if (0 != (NACL_ABI_PROT_WRITE & prot)) {
      NaClLog(3,
              "NaClSysMmap: asked for writable and executable code pages?!?\n");
      map_result = -NACL_ABI_EINVAL;
      goto cleanup;
    }
    mapping_code = 1;
  } else if (0 != (prot & NACL_ABI_PROT_EXEC)) {
    map_result = -NACL_ABI_EINVAL;
    goto cleanup;
  }

  /*
   * Starting address must be aligned to worst-case allocation
   * granularity.  (Windows.)
   */
  if (!NaClIsAllocPageMultiple(usraddr)) {
    if ((NACL_ABI_MAP_FIXED & flags) != 0) {
      NaClLog(2, "NaClSysMmap: address not allocation granularity aligned\n");
      map_result = -NACL_ABI_EINVAL;
      goto cleanup;
    } else {
      NaClLog(2, "NaClSysMmap: Force alignment of misaligned hint address\n");
      usraddr = NaClTruncAllocPage(usraddr);
    }
  }
  /*
   * Offset should be non-negative (nacl_abi_off_t is signed).  This
   * condition is caught when the file is stat'd and checked, and
   * offset is ignored for anonymous mappings.
   */
  if (offset < 0) {
    NaClLog(1,  /* application bug */
            "NaClSysMmap: negative file offset: %"NACL_PRId64"\n",
            (int64_t) offset);
    map_result = -NACL_ABI_EINVAL;
    goto cleanup;
  }
  /*
   * And offset must be a multiple of the allocation unit.
   */
  if (!NaClIsAllocPageMultiple((uintptr_t) offset)) {
    NaClLog(1,
            ("NaClSysMmap: file offset 0x%08"NACL_PRIxPTR" not multiple"
             " of allocation size\n"),
            (uintptr_t) offset);
    map_result = -NACL_ABI_EINVAL;
    goto cleanup;
  }

  /*
   * Round up to a page size multiple.
   *
   * Note that if length > 0xffff0000 (i.e. -NACL_MAP_PAGESIZE), rounding
   * up the length will wrap around to 0.  We check for length == 0 *after*
   * rounding up the length to simultaneously check for the length
   * originally being 0 and check for the wraparound.
   */
  alloc_rounded_length = NaClRoundAllocPage(length);
  if (alloc_rounded_length != length) {
    if (mapping_code) {
      NaClLog(3, "NaClSysMmap: length not a multiple of allocation size\n");
      map_result = -NACL_ABI_EINVAL;
      goto cleanup;
    }
    NaClLog(1,
            "NaClSysMmap: rounded length to 0x%"NACL_PRIxS"\n",
            alloc_rounded_length);
  }
  if (0 == (uint32_t) alloc_rounded_length) {
    map_result = -NACL_ABI_EINVAL;
    goto cleanup;
  }
  /*
   * Sanity check in case any later code behaves badly if
   * |alloc_rounded_length| is >=4GB.  This check shouldn't fail
   * because |length| was <4GB and we've already checked for overflow
   * when rounding it up.
   * TODO(mseaborn): Remove the need for this by using uint32_t for
   * untrusted sizes more consistently.
   */
  CHECK(alloc_rounded_length == (uint32_t) alloc_rounded_length);

  if (NULL == ndp) {
    /*
     * Note: sentinel values are bigger than the NaCl module addr space.
     */
    file_size                = kMaxUsableFileSize;
    file_bytes               = kMaxUsableFileSize;
    host_rounded_file_bytes  = kMaxUsableFileSize;
    alloc_rounded_file_bytes = kMaxUsableFileSize;
  } else {
    /*
     * We stat the file to figure out its actual size.
     *
     * This is necessary because the POSIXy interface we provide
     * allows mapping beyond the extent of a file but Windows'
     * interface does not.  We simulate the POSIX behaviour on
     * Windows.
     */
    map_result = (*((struct NaClDescVtbl const *) ndp->base.vtbl)->
                  Fstat)(ndp, &stbuf);
    if (0 != map_result) {
      goto cleanup;
    }

    /*
     * Preemptively refuse to map anything that's not a regular file or
     * shared memory segment.  Other types usually report st_size of zero,
     * which the code below will handle by just doing a dummy PROT_NONE
     * mapping for the requested size and never attempting the underlying
     * NaClDesc Map operation.  So without this check, the host OS never
     * gets the chance to refuse the mapping operation on an object that
     * can't do it.
     */
    if (!NACL_ABI_S_ISREG(stbuf.nacl_abi_st_mode) &&
        !NACL_ABI_S_ISSHM(stbuf.nacl_abi_st_mode)) {
      map_result = -NACL_ABI_ENODEV;
      goto cleanup;
    }

    /*
     * BUG(bsy): there's a race between this fstat and the actual mmap
     * below.  It's probably insoluble.  Even if we fstat again after
     * mmap and compared, the mmap could have "seen" the file with a
     * different size, after which the racing thread restored back to
     * the same value before the 2nd fstat takes place.
     */
    file_size = stbuf.nacl_abi_st_size;

    if (file_size < offset) {
      map_result = -NACL_ABI_EINVAL;
      goto cleanup;
    }

    file_bytes = file_size - offset;
    if ((nacl_off64_t) kMaxUsableFileSize < file_bytes) {
      host_rounded_file_bytes = kMaxUsableFileSize;
    } else {
      host_rounded_file_bytes = NaClRoundHostAllocPage((size_t) file_bytes);
    }

    ASSERT(host_rounded_file_bytes <= (nacl_off64_t) kMaxUsableFileSize);
    /*
     * We need to deal with NaClRoundHostAllocPage rounding up to zero
     * from ~0u - n, where n < 4096 or 65536 (== 1 alloc page).
     *
     * Luckily, file_bytes is at most kMaxUsableFileSize which is
     * smaller than SIZE_T_MAX, so it should never happen, but we
     * leave the explicit check below as defensive programming.
     */
    alloc_rounded_file_bytes =
      NaClRoundAllocPage((size_t) host_rounded_file_bytes);

    if (0 == alloc_rounded_file_bytes && 0 != host_rounded_file_bytes) {
      map_result = -NACL_ABI_ENOMEM;
      goto cleanup;
    }

    /*
     * NB: host_rounded_file_bytes and alloc_rounded_file_bytes can be
     * zero.  Such an mmap just makes memory (offset relative to
     * usraddr) in the range [0, alloc_rounded_length) inaccessible.
     */
  }

  /*
   * host_rounded_file_bytes is how many bytes we can map from the
   * file, given the user-supplied starting offset.  It is at least
   * one page.  If it came from a real file, it is a multiple of
   * host-OS allocation size.  it cannot be larger than
   * kMaxUsableFileSize.
   */
  if (mapping_code && (size_t) file_bytes < alloc_rounded_length) {
    NaClLog(3,
            "NaClSysMmap: disallowing partial allocation page extension for"
            " short files\n");
    map_result = -NACL_ABI_EINVAL;
    goto cleanup;
  }
  length = size_min(alloc_rounded_length, (size_t) host_rounded_file_bytes);

  /*
   * Lock the addr space.
   */
  NaClXMutexLock(&nap->mu);

  NaClVmHoleOpeningMu(nap);

  holding_app_lock = 1;

  if (0 == (flags & NACL_ABI_MAP_FIXED)) {
    /*
     * The user wants us to pick an address range.
     */
    if (0 == usraddr) {
      /*
       * Pick a hole in addr space of appropriate size, anywhere.
       * We pick one that's best for the system.
       */
      usrpage = NaClVmmapFindMapSpace(&nap->mem_map,
                                      alloc_rounded_length >> NACL_PAGESHIFT);
      NaClLog(4, "NaClSysMmap: FindMapSpace: page 0x%05"NACL_PRIxPTR"\n",
              usrpage);
      if (0 == usrpage) {
        map_result = -NACL_ABI_ENOMEM;
        goto cleanup;
      }
      usraddr = usrpage << NACL_PAGESHIFT;
      NaClLog(4, "NaClSysMmap: new starting addr: 0x%08"NACL_PRIxPTR
              "\n", usraddr);
    } else {
コード例 #19
0
NaClErrorCode NaClAppLoadFileAslr(struct NaClDesc *ndp,
                                  struct NaClApp *nap,
                                  enum NaClAslrMode aslr_mode) {
  NaClErrorCode       ret = LOAD_INTERNAL;
  NaClErrorCode       subret = LOAD_INTERNAL;
  uintptr_t           rodata_end;
  uintptr_t           data_end;
  uintptr_t           max_vaddr;
  struct NaClElfImage *image = NULL;
  struct NaClPerfCounter  time_load_file;
  struct NaClElfImageInfo info;

  NaClPerfCounterCtor(&time_load_file, "NaClAppLoadFile");

  /* NACL_MAX_ADDR_BITS < 32 */
  if (nap->addr_bits > NACL_MAX_ADDR_BITS) {
    ret = LOAD_ADDR_SPACE_TOO_BIG;
    goto done;
  }

  nap->stack_size = NaClRoundAllocPage(nap->stack_size);

  /* temporay object will be deleted at end of function */
  image = NaClElfImageNew(ndp, &subret);
  if (NULL == image || LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  subret = NaClElfImageValidateProgramHeaders(image,
                                              nap->addr_bits,
                                              &info);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  if (nap->initial_nexe_max_code_bytes != 0) {
    size_t code_segment_size = info.static_text_end - NACL_TRAMPOLINE_END;
    if (code_segment_size > nap->initial_nexe_max_code_bytes) {
      NaClLog(LOG_ERROR, "NaClAppLoadFileAslr: "
              "Code segment size (%"NACL_PRIuS" bytes) exceeds limit (%"
              NACL_PRId32" bytes)\n",
              code_segment_size, nap->initial_nexe_max_code_bytes);
      ret = LOAD_CODE_SEGMENT_TOO_LARGE;
      goto done;
    }
  }

  nap->static_text_end = info.static_text_end;
  nap->rodata_start = info.rodata_start;
  rodata_end = info.rodata_end;
  nap->data_start = info.data_start;
  data_end = info.data_end;
  max_vaddr = info.max_vaddr;

  if (0 == nap->data_start) {
    if (0 == nap->rodata_start) {
      if (NaClRoundAllocPage(max_vaddr) - max_vaddr < NACL_HALT_SLED_SIZE) {
        /*
         * if no rodata and no data, we make sure that there is space for
         * the halt sled.
         */
        max_vaddr += NACL_MAP_PAGESIZE;
      }
    } else {
      /*
       * no data, but there is rodata.  this means max_vaddr is just
       * where rodata ends.  this might not be at an allocation
       * boundary, and in this the page would not be writable.  round
       * max_vaddr up to the next allocation boundary so that bss will
       * be at the next writable region.
       */
      ;
    }
    max_vaddr = NaClRoundAllocPage(max_vaddr);
  }
  /*
   * max_vaddr -- the break or the boundary between data (initialized
   * and bss) and the address space hole -- does not have to be at a
   * page boundary.
   *
   * Memory allocation will use NaClRoundPage(nap->break_addr), but
   * the system notion of break is always an exact address.  Even
   * though we must allocate and make accessible multiples of pages,
   * the linux-style brk system call (which returns current break on
   * failure) permits a non-aligned address as argument.
   */
  nap->break_addr = max_vaddr;
  nap->data_end = max_vaddr;

  NaClLog(4, "Values from NaClElfImageValidateProgramHeaders:\n");
  NaClLog(4, "rodata_start = 0x%08"NACL_PRIxPTR"\n", nap->rodata_start);
  NaClLog(4, "rodata_end   = 0x%08"NACL_PRIxPTR"\n", rodata_end);
  NaClLog(4, "data_start   = 0x%08"NACL_PRIxPTR"\n", nap->data_start);
  NaClLog(4, "data_end     = 0x%08"NACL_PRIxPTR"\n", data_end);
  NaClLog(4, "max_vaddr    = 0x%08"NACL_PRIxPTR"\n", max_vaddr);

  /* We now support only one bundle size.  */
  nap->bundle_size = NACL_INSTR_BLOCK_SIZE;

  nap->initial_entry_pt = NaClElfImageGetEntryPoint(image);
  NaClLogAddressSpaceLayout(nap);

  if (!NaClAddrIsValidEntryPt(nap, nap->initial_entry_pt)) {
    ret = LOAD_BAD_ENTRY;
    goto done;
  }

  subret = NaClCheckAddressSpaceLayoutSanity(nap, rodata_end, data_end,
                                             max_vaddr);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  NaClLog(2, "Allocating address space\n");
  NaClPerfCounterMark(&time_load_file, "PreAllocAddrSpace");
  NaClPerfCounterIntervalLast(&time_load_file);
  subret = NaClAllocAddrSpaceAslr(nap, aslr_mode);
  NaClPerfCounterMark(&time_load_file,
                      NACL_PERF_IMPORTANT_PREFIX "AllocAddrSpace");
  NaClPerfCounterIntervalLast(&time_load_file);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  /*
   * Make sure the static image pages are marked writable before we try
   * to write them.
   */
  NaClLog(2, "Loading into memory\n");
  ret = NaClMprotect((void *) (nap->mem_start + NACL_TRAMPOLINE_START),
                     NaClRoundAllocPage(nap->data_end) - NACL_TRAMPOLINE_START,
                     PROT_READ | PROT_WRITE);
  if (0 != ret) {
    NaClLog(LOG_FATAL,
            "NaClAppLoadFile: Failed to make image pages writable. "
            "Error code 0x%x\n",
            ret);
  }
  subret = NaClElfImageLoad(image, ndp, nap);
  NaClPerfCounterMark(&time_load_file,
                      NACL_PERF_IMPORTANT_PREFIX "NaClElfImageLoad");
  NaClPerfCounterIntervalLast(&time_load_file);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  /*
   * NB: mem_map object has been initialized, but is empty.
   * NaClMakeDynamicTextShared does not touch it.
   *
   * NaClMakeDynamicTextShared also fills the dynamic memory region
   * with the architecture-specific halt instruction.  If/when we use
   * memory mapping to save paging space for the dynamic region and
   * lazily halt fill the memory as the pages become
   * readable/executable, we must make sure that the *last*
   * NACL_MAP_PAGESIZE chunk is nonetheless mapped and written with
   * halts.
   */
  NaClLog(2,
          ("Replacing gap between static text and"
           " (ro)data with shareable memory\n"));
  subret = NaClMakeDynamicTextShared(nap);
  NaClPerfCounterMark(&time_load_file,
                      NACL_PERF_IMPORTANT_PREFIX "MakeDynText");
  NaClPerfCounterIntervalLast(&time_load_file);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  /*
   * NaClFillEndOfTextRegion will fill with halt instructions the
   * padding space after the static text region.
   *
   * Shm-backed dynamic text space was filled with halt instructions
   * in NaClMakeDynamicTextShared.  This extends to the rodata.  For
   * non-shm-backed text space, this extend to the next page (and not
   * allocation page).  static_text_end is updated to include the
   * padding.
   */
  NaClFillEndOfTextRegion(nap);

  if (nap->main_exe_prevalidated) {
    NaClLog(2, "Main executable segment hit validation cache and mapped in,"
            " skipping validation.\n");
    subret = LOAD_OK;
  } else {
    NaClLog(2, "Validating image\n");
    subret = NaClValidateImage(nap);
  }
  NaClPerfCounterMark(&time_load_file,
                      NACL_PERF_IMPORTANT_PREFIX "ValidateImg");
  NaClPerfCounterIntervalLast(&time_load_file);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  NaClLog(2, "Initializing arch switcher\n");
  NaClInitSwitchToApp(nap);

  NaClLog(2, "Installing trampoline\n");
  NaClLoadTrampoline(nap, aslr_mode);

  NaClLog(2, "Installing springboard\n");
  NaClLoadSpringboard(nap);

  /*
   * NaClMemoryProtection also initializes the mem_map w/ information
   * about the memory pages and their current protection value.
   *
   * The contents of the dynamic text region will get remapped as
   * non-writable.
   */
  NaClLog(2, "Applying memory protection\n");
  subret = NaClMemoryProtection(nap);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  NaClLog(2, "NaClAppLoadFile done; ");
  NaClLogAddressSpaceLayout(nap);
  ret = LOAD_OK;
done:
  NaClElfImageDelete(image);

  NaClPerfCounterMark(&time_load_file, "EndLoadFile");
  NaClPerfCounterIntervalTotal(&time_load_file);
  return ret;
}
コード例 #20
0
/*
 * Basic address space layout sanity check.
 */
NaClErrorCode NaClCheckAddressSpaceLayoutSanity(struct NaClApp *nap,
                                                uintptr_t rodata_end,
                                                uintptr_t data_end,
                                                uintptr_t max_vaddr) {
  if (0 != nap->data_start) {
    if (data_end != max_vaddr) {
      NaClLog(LOG_INFO, "data segment is not last\n");
      return LOAD_DATA_NOT_LAST_SEGMENT;
    }
  } else if (0 != nap->rodata_start) {
    if (NaClRoundAllocPage(rodata_end) != max_vaddr) {
      /*
       * This should be unreachable, but we include it just for
       * completeness.
       *
       * Here is why it is unreachable:
       *
       * NaClPhdrChecks checks the test segment starting address.  The
       * only allowed loaded segments are text, data, and rodata.
       * Thus unless the rodata is in the trampoline region, it must
       * be after the text.  And NaClElfImageValidateProgramHeaders
       * ensures that all segments start after the trampoline region.
       */
      NaClLog(LOG_INFO, "no data segment, but rodata segment is not last\n");
      return LOAD_NO_DATA_BUT_RODATA_NOT_LAST_SEGMENT;
    }
  }
  if (0 != nap->rodata_start && 0 != nap->data_start) {
    if (rodata_end > NaClTruncAllocPage(nap->data_start)) {
      NaClLog(LOG_INFO, "rodata_overlaps data.\n");
      return LOAD_RODATA_OVERLAPS_DATA;
    }
  }
  if (0 != nap->rodata_start) {
    if (NaClRoundAllocPage(NaClEndOfStaticText(nap)) > nap->rodata_start) {
      return LOAD_TEXT_OVERLAPS_RODATA;
    }
  } else if (0 != nap->data_start) {
    if (NaClRoundAllocPage(NaClEndOfStaticText(nap)) >
        NaClTruncAllocPage(nap->data_start)) {
      return LOAD_TEXT_OVERLAPS_DATA;
    }
  }
  if (0 != nap->rodata_start &&
      NaClRoundAllocPage(nap->rodata_start) != nap->rodata_start) {
    NaClLog(LOG_INFO, "rodata_start not a multiple of allocation size\n");
    return LOAD_BAD_RODATA_ALIGNMENT;
  }
#if NACL_ARCH(NACL_BUILD_ARCH) == NACL_mips
  /*
   * This check is necessary to make MIPS sandbox secure, as there is no NX page
   * protection support on MIPS.
   */
  if (nap->rodata_start < NACL_DATA_SEGMENT_START) {
    NaClLog(LOG_INFO,
            "rodata_start is below NACL_DATA_SEGMENT_START (0x%X) address\n",
            NACL_DATA_SEGMENT_START);
    return LOAD_SEGMENT_BAD_LOC;
  }
#endif
  return LOAD_OK;
}
コード例 #21
0
NaClErrorCode NaClMakeDynamicTextShared(struct NaClApp *nap) {
  enum NaClErrorCode          retval = LOAD_INTERNAL;
  uintptr_t                   dynamic_text_size;
  struct NaClDescImcShm       *shm = NULL;
  uintptr_t                   shm_vaddr_base;
  int                         mmap_protections;
  uintptr_t                   mmap_ret;

  uintptr_t                   shm_upper_bound;
  uintptr_t                   text_sysaddr;

  shm_vaddr_base = NaClEndOfStaticText(nap);
  NaClLog(4,
          "NaClMakeDynamicTextShared: shm_vaddr_base = %08"NACL_PRIxPTR"\n",
          shm_vaddr_base);
  shm_vaddr_base = NaClRoundAllocPage(shm_vaddr_base);
  NaClLog(4,
          "NaClMakeDynamicTextShared: shm_vaddr_base = %08"NACL_PRIxPTR"\n",
          shm_vaddr_base);

  /*
   * Default is that there is no usable dynamic code area.
   */
  nap->dynamic_text_start = shm_vaddr_base;
  nap->dynamic_text_end = shm_vaddr_base;
  if (!nap->use_shm_for_dynamic_text) {
    NaClLog(4,
            "NaClMakeDynamicTextShared:"
            "  rodata / data segments not allocation aligned\n");
    NaClLog(4,
            " not using shm for text\n");
    return LOAD_OK;
  }

  /*
   * Allocate a shm region the size of which is nap->rodata_start -
   * end-of-text.  This implies that the "core" text will not be
   * backed by shm.
   */
  shm_upper_bound = nap->rodata_start;
  if (0 == shm_upper_bound) {
    shm_upper_bound = NaClTruncAllocPage(nap->data_start);
  }
  if (0 == shm_upper_bound) {
    shm_upper_bound = shm_vaddr_base;
  }

  NaClLog(4, "shm_upper_bound = %08"NACL_PRIxPTR"\n", shm_upper_bound);

  dynamic_text_size = shm_upper_bound - shm_vaddr_base;
  NaClLog(4,
          "NaClMakeDynamicTextShared: dynamic_text_size = %"NACL_PRIxPTR"\n",
          dynamic_text_size);

  if (0 == dynamic_text_size) {
    NaClLog(4, "Empty JITtable region\n");
    return LOAD_OK;
  }

  shm = (struct NaClDescImcShm *) malloc(sizeof *shm);
  if (NULL == shm) {
    NaClLog(4, "NaClMakeDynamicTextShared: shm object allocation failed\n");
    retval = LOAD_NO_MEMORY;
    goto cleanup;
  }
  if (!NaClDescImcShmAllocCtor(shm, dynamic_text_size, /* executable= */ 1)) {
    /* cleanup invariant is if ptr is non-NULL, it's fully ctor'd */
    free(shm);
    shm = NULL;
    NaClLog(4, "NaClMakeDynamicTextShared: shm alloc ctor for text failed\n");
    retval = LOAD_NO_MEMORY_FOR_DYNAMIC_TEXT;
    goto cleanup;
  }

  text_sysaddr = NaClUserToSys(nap, shm_vaddr_base);

  /* Existing memory is anonymous paging file backed. */
  NaClPageFree((void *) text_sysaddr, dynamic_text_size);

  /*
   * Unix allows us to map pages with PROT_NONE initially and later
   * increase the mapping permissions with mprotect().
   *
   * Windows does not allow this, however: the initial permissions are
   * an upper bound on what the permissions may later be changed to
   * with VirtualProtect() or VirtualAlloc().  Given this, using
   * PROT_NONE at this point does not even make sense.  On Windows,
   * the pages start off as uncommitted, which makes them inaccessible
   * regardless of the page permissions they are mapped with.
   *
   * Write permissions are included here for nacl64-gdb to set
   * breakpoints.
   */
#if NACL_WINDOWS
  mmap_protections =
    NACL_ABI_PROT_READ | NACL_ABI_PROT_EXEC | NACL_ABI_PROT_WRITE;
#else
  mmap_protections = NACL_ABI_PROT_NONE;
#endif
  NaClLog(4,
          "NaClMakeDynamicTextShared: Map(,,0x%"NACL_PRIxPTR",size = 0x%x,"
          " prot=0x%x, flags=0x%x, offset=0)\n",
          text_sysaddr,
          (int) dynamic_text_size,
          mmap_protections,
          NACL_ABI_MAP_SHARED | NACL_ABI_MAP_FIXED);
  mmap_ret = (*((struct NaClDescVtbl const *) shm->base.base.vtbl)->
              Map)((struct NaClDesc *) shm,
                   NaClDescEffectorTrustedMem(),
                   (void *) text_sysaddr,
                   dynamic_text_size,
                   mmap_protections,
                   NACL_ABI_MAP_SHARED | NACL_ABI_MAP_FIXED,
                   0);
  if (text_sysaddr != mmap_ret) {
    NaClLog(LOG_FATAL, "Could not map in shm for dynamic text region\n");
  }

  nap->dynamic_page_bitmap =
    BitmapAllocate((uint32_t) (dynamic_text_size / NACL_MAP_PAGESIZE));
  if (NULL == nap->dynamic_page_bitmap) {
    NaClLog(LOG_FATAL, "NaClMakeDynamicTextShared: BitmapAllocate() failed\n");
  }

  nap->dynamic_text_start = shm_vaddr_base;
  nap->dynamic_text_end = shm_upper_bound;
  nap->text_shm = &shm->base;
  retval = LOAD_OK;

 cleanup:
  if (LOAD_OK != retval) {
    NaClDescSafeUnref((struct NaClDesc *) shm);
    free(shm);
  }

  return retval;
}
コード例 #22
0
ファイル: sel_addrspace.c プロジェクト: alepharchives/zerovm
/*
 * Apply memory protection to memory regions.
 */
void NaClMemoryProtection(struct NaClApp *nap)
{
  uintptr_t start_addr;
  size_t    region_size;
  int       err;

  /*
   * The first NACL_SYSCALL_START_ADDR bytes are mapped as PROT_NONE.
   * This enables NULL pointer checking, and provides additional protection
   * against addr16/data16 prefixed operations being used for attacks.
   *
   * NaClMprotectGuards also sets up guard pages outside of the
   * virtual address space of the NaClApp -- for the ARM and x86-64
   * where data sandboxing only sandbox memory writes and not reads,
   * we need to ensure that certain addressing modes that might
   * otherwise allow the NaCl app to write outside its address space
   * (given how we using masking / base registers to implement data
   * write sandboxing) won't affect the trusted data structures.
   */

  ZLOGS(LOG_DEBUG, "Protecting guard pages for 0x%08x", nap->mem_start);
  NaClMprotectGuards(nap);

  start_addr = nap->mem_start + NACL_SYSCALL_START_ADDR;
  /*
   * The next pages up to NACL_TRAMPOLINE_END are the trampolines.
   * Immediately following that is the loaded text section.
   * These are collectively marked as PROT_READ | PROT_EXEC.
   */
  region_size = NaClRoundPage(nap->static_text_end - NACL_SYSCALL_START_ADDR);
  ZLOGS(LOG_DEBUG, "Trampoline/text region start 0x%08x, size 0x%08x, end 0x%08x",
          start_addr, region_size, start_addr + region_size);

  err = NaCl_mprotect((void *)start_addr, region_size, PROT_READ | PROT_EXEC);
  ZLOGFAIL(0 != err, err, FAILED_MSG);

  SET_MEM_MAP_IDX(nap->mem_map[TextIdx], "Text",
      start_addr, region_size, PROT_READ | PROT_EXEC);

  /*
   * Page protections for this region have already been set up by
   * nacl_text.c.
   *
   * todo(d'b): since text.c exists no more, protection should be set here
   *
   * We record the mapping for consistency with other fixed
   * mappings, but the record is not actually used.  Overmapping is
   * prevented by a separate range check, which is done by
   * NaClSysCommonAddrRangeContainsExecutablePages_mu().
   */
  /*
   * zerovm does not support dynamic text. the code below will check its
   * existence, log information and fail if needed.
   * todo(d'b): after the dynamic text support will be added or completely
   * removed the block below should be rewritten or removed
   */
  start_addr = NaClUserToSys(nap, nap->dynamic_text_start);
  region_size = nap->dynamic_text_end - nap->dynamic_text_start;
  ZLOGS(LOG_DEBUG, "shm txt region start 0x%08x, size 0x%08x, end 0x%08x",
      start_addr, region_size, start_addr + region_size);
  ZLOGFAIL(0 != region_size, EFAULT, "zerovm does not support nexe with dynamic text!");

  if(0 != nap->rodata_start)
  {
    uintptr_t rodata_end;

    /*
     * TODO(mseaborn): Could reduce the number of cases by ensuring
     * that nap->data_start is always non-zero, even if
     * nap->rodata_start == nap->data_start == nap->break_addr.
     */
    if(0 != nap->data_start)
      rodata_end = nap->data_start;
    else rodata_end = nap->break_addr;

    start_addr = NaClUserToSys(nap, nap->rodata_start);
    region_size = NaClRoundPage(NaClRoundAllocPage(rodata_end)
        - NaClSysToUser(nap, start_addr));
    ZLOGS(LOG_DEBUG, "RO data region start 0x%08x, size 0x%08x, end 0x%08x",
        start_addr, region_size, start_addr + region_size);

    err = NaCl_mprotect((void *)start_addr, region_size, PROT_READ);
    ZLOGFAIL(0 != err, err, FAILED_MSG);

    SET_MEM_MAP_IDX(nap->mem_map[RODataIdx], "ROData",
        start_addr, region_size, PROT_READ);
  }

  /*
   * data_end is max virtual addr seen, so start_addr <= data_end
   * must hold.
   */
  if(0 != nap->data_start)
  {
    start_addr = NaClUserToSys(nap, nap->data_start);
    region_size = NaClRoundPage(NaClRoundAllocPage(nap->data_end)
        - NaClSysToUser(nap, start_addr));
    ZLOGS(LOG_DEBUG, "RW data region start 0x%08x, size 0x%08x, end 0x%08x",
        start_addr, region_size, start_addr + region_size);

    err = NaCl_mprotect((void *)start_addr, region_size, PROT_READ | PROT_WRITE);
    ZLOGFAIL(0 != err, err, FAILED_MSG);

    SET_MEM_MAP_IDX(nap->mem_map[HeapIdx], "Heap",
        start_addr, region_size, PROT_READ | PROT_WRITE);
  }

  /* stack is read/write but not execute */
  region_size = nap->stack_size;
  start_addr = NaClUserToSys(nap,
      NaClTruncAllocPage(((uintptr_t) 1U << nap->addr_bits) - nap->stack_size));
  ZLOGS(LOG_DEBUG, "RW stack region start 0x%08x, size 0x%08lx, end 0x%08x",
          start_addr, region_size, start_addr + region_size);

  err = NaCl_mprotect((void *)start_addr, NaClRoundAllocPage(nap->stack_size),
      PROT_READ | PROT_WRITE);
  ZLOGFAIL(0 != err, err, FAILED_MSG);

  SET_MEM_MAP_IDX(nap->mem_map[StackIdx], "Stack",
      start_addr, NaClRoundAllocPage(nap->stack_size), PROT_READ | PROT_WRITE);
}
コード例 #23
0
NaClErrorCode NaClAppLoadFile(struct Gio       *gp,
                              struct NaClApp   *nap,
                              enum NaClAbiCheckOption check_abi) {
  NaClErrorCode       ret = LOAD_INTERNAL;
  NaClErrorCode       subret;
  uintptr_t           rodata_end;
  uintptr_t           data_end;
  uintptr_t           max_vaddr;
  struct NaClElfImage *image = NULL;

  /* NACL_MAX_ADDR_BITS < 32 */
  if (nap->addr_bits > NACL_MAX_ADDR_BITS) {
    ret = LOAD_ADDR_SPACE_TOO_BIG;
    goto done;
  }

  nap->stack_size = NaClRoundAllocPage(nap->stack_size);

  /* temporay object will be deleted at end of function */
  image = NaClElfImageNew(gp, &subret);
  if (NULL == image) {
    ret = subret;
    goto done;
  }

#if 0 == NACL_DANGEROUS_DEBUG_MODE_DISABLE_INNER_SANDBOX
  check_abi = NACL_ABI_CHECK_OPTION_CHECK;
#endif

  if (NACL_ABI_CHECK_OPTION_CHECK == check_abi) {
    subret = NaClElfImageValidateAbi(image);
    if (subret != LOAD_OK) {
      ret = subret;
      goto done;
    }
  }

  subret = NaClElfImageValidateElfHeader(image);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  subret = NaClElfImageValidateProgramHeaders(image,
                                              nap->addr_bits,
                                              &nap->static_text_end,
                                              &nap->rodata_start,
                                              &rodata_end,
                                              &nap->data_start,
                                              &data_end,
                                              &max_vaddr);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  if (0 == nap->data_start) {
    if (0 == nap->rodata_start) {
      if (NaClRoundAllocPage(max_vaddr) - max_vaddr < NACL_HALT_SLED_SIZE) {
        /*
         * if no rodata and no data, we make sure that there is space for
         * the halt sled.
         */
        max_vaddr += NACL_MAP_PAGESIZE;
      }
    } else {
      /*
       * no data, but there is rodata.  this means max_vaddr is just
       * where rodata ends.  this might not be at an allocation
       * boundary, and in this the page would not be writable.  round
       * max_vaddr up to the next allocation boundary so that bss will
       * be at the next writable region.
       */
      ;
    }
    max_vaddr = NaClRoundAllocPage(max_vaddr);
  }
  /*
   * max_vaddr -- the break or the boundary between data (initialized
   * and bss) and the address space hole -- does not have to be at a
   * page boundary.
   */
  nap->break_addr = max_vaddr;
  nap->data_end = max_vaddr;

  NaClLog(4, "Values from NaClElfImageValidateProgramHeaders:\n");
  NaClLog(4, "rodata_start = 0x%08"NACL_PRIxPTR"\n", nap->rodata_start);
  NaClLog(4, "rodata_end   = 0x%08"NACL_PRIxPTR"\n", rodata_end);
  NaClLog(4, "data_start   = 0x%08"NACL_PRIxPTR"\n", nap->data_start);
  NaClLog(4, "data_end     = 0x%08"NACL_PRIxPTR"\n", data_end);
  NaClLog(4, "max_vaddr    = 0x%08"NACL_PRIxPTR"\n", max_vaddr);

#if 0 == NACL_DANGEROUS_DEBUG_MODE_DISABLE_INNER_SANDBOX
  nap->bundle_size = NaClElfImageGetBundleSize(image);
  if (nap->bundle_size == 0) {
    ret = LOAD_BAD_ABI;
    goto done;
  }
#else
  /* pick some reasonable default for an un-sandboxed nexe */
  nap->bundle_size = 32;
#endif
  nap->entry_pt = NaClElfImageGetEntryPoint(image);

  NaClLog(2,
          "NaClApp addr space layout:\n");
  NaClLog(2,
          "nap->static_text_end    = 0x%016"NACL_PRIxPTR"\n",
          nap->static_text_end);
  NaClLog(2,
          "nap->dynamic_text_start = 0x%016"NACL_PRIxPTR"\n",
          nap->dynamic_text_start);
  NaClLog(2,
          "nap->dynamic_text_end   = 0x%016"NACL_PRIxPTR"\n",
          nap->dynamic_text_end);
  NaClLog(2,
          "nap->rodata_start       = 0x%016"NACL_PRIxPTR"\n",
          nap->rodata_start);
  NaClLog(2,
          "nap->data_start         = 0x%016"NACL_PRIxPTR"\n",
          nap->data_start);
  NaClLog(2,
          "nap->data_end           = 0x%016"NACL_PRIxPTR"\n",
          nap->data_end);
  NaClLog(2,
          "nap->break_addr         = 0x%016"NACL_PRIxPTR"\n",
          nap->break_addr);
  NaClLog(2,
          "nap->entry_pt           = 0x%016"NACL_PRIxPTR"\n",
          nap->entry_pt);
  NaClLog(2,
          "nap->bundle_size        = 0x%x\n",
          nap->bundle_size);

  if (!NaClAddrIsValidEntryPt(nap, nap->entry_pt)) {
    ret = LOAD_BAD_ENTRY;
    goto done;
  }

  /*
   * Basic address space layout sanity check.
   */
  if (0 != nap->data_start) {
    if (data_end != max_vaddr) {
      NaClLog(LOG_INFO, "data segment is not last\n");
      ret = LOAD_DATA_NOT_LAST_SEGMENT;
      goto done;
    }
  } else if (0 != nap->rodata_start) {
    if (NaClRoundAllocPage(rodata_end) != max_vaddr) {
      /*
       * This should be unreachable, but we include it just for
       * completeness.
       *
       * Here is why it is unreachable:
       *
       * NaClPhdrChecks checks the test segment starting address.  The
       * only allowed loaded segments are text, data, and rodata.
       * Thus unless the rodata is in the trampoline region, it must
       * be after the text.  And NaClElfImageValidateProgramHeaders
       * ensures that all segments start after the trampoline region.
       */
      NaClLog(LOG_INFO, "no data segment, but rodata segment is not last\n");
      ret = LOAD_NO_DATA_BUT_RODATA_NOT_LAST_SEGMENT;
      goto done;
    }
  }
  if (0 != nap->rodata_start && 0 != nap->data_start) {
    if (rodata_end > nap->data_start) {
      NaClLog(LOG_INFO, "rodata_overlaps data.\n");
      ret = LOAD_RODATA_OVERLAPS_DATA;
      goto done;
    }
  }
  if (0 != nap->rodata_start) {
    if (NaClRoundAllocPage(NaClEndOfStaticText(nap)) > nap->rodata_start) {
      ret = LOAD_TEXT_OVERLAPS_RODATA;
      goto done;
    }
  } else if (0 != nap->data_start) {
    if (NaClRoundAllocPage(NaClEndOfStaticText(nap)) > nap->data_start) {
      ret = LOAD_TEXT_OVERLAPS_DATA;
      goto done;
    }
  }

  if (0 != nap->rodata_start &&
      NaClRoundAllocPage(nap->rodata_start) != nap->rodata_start) {
    NaClLog(LOG_INFO, "rodata_start not a multiple of allocation size\n");
    ret = LOAD_BAD_RODATA_ALIGNMENT;
    goto done;
  }
  if (0 != nap->data_start &&
      NaClRoundAllocPage(nap->data_start) != nap->data_start) {
    NaClLog(LOG_INFO, "data_start not a multiple of allocation size\n");
    ret = LOAD_BAD_DATA_ALIGNMENT;
    goto done;
  }

  NaClLog(2, "Allocating address space\n");
  subret = NaClAllocAddrSpace(nap);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  /*
   * NB: mem_map object has been initialized, but is empty.
   * NaClMakeDynamicTextShared does not touch it.
   *
   * NaClMakeDynamicTextShared also fills the dynamic memory region
   * with the architecture-specific halt instruction.  If/when we use
   * memory mapping to save paging space for the dynamic region and
   * lazily halt fill the memory as the pages become
   * readable/executable, we must make sure that the *last*
   * NACL_MAP_PAGESIZE chunk is nonetheless mapped and written with
   * halts.
   */
  NaClLog(2,
          ("Replacing gap between static text and"
           " (ro)data with shareable memory\n"));
  subret = NaClMakeDynamicTextShared(nap);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  /*
   * Make sure the static image pages are marked writable before we try
   * to write them.
   * TODO(ilewis): See if this can be enabled for Win32 as well. (issue 40077)
   */
  NaClLog(2, "Loading into memory\n");
#if NACL_WINDOWS && NACL_ARCH_CPU_X86_64
  ret = NaCl_mprotect((void*) nap->mem_start,
    NaClRoundAllocPage(nap->data_end),
    PROT_READ|PROT_WRITE);
  if (ret) {
      NaClLog(LOG_FATAL, "Couldn't get writeable pages for image. "
                         "Error code 0x%X\n", ret);
  }
#endif
  subret = NaClElfImageLoad(image, gp, nap->addr_bits, nap->mem_start);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  /*
   * NaClFillEndOfTextRegion will fill with halt instructions the
   * padding space after the static text region.
   *
   * Shm-backed dynamic text space was filled with halt instructions
   * in NaClMakeDynamicTextShared.  This extends to the rodata.  For
   * non-shm-backed text space, this extend to the next page (and not
   * allocation page).  static_text_end is updated to include the
   * padding.
   */
  NaClFillEndOfTextRegion(nap);

#if 0 == NACL_DANGEROUS_DEBUG_MODE_DISABLE_INNER_SANDBOX
  NaClLog(2, "Validating image\n");
  subret = NaClValidateImage(nap);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }
#endif

  NaClLog(2, "Installing trampoline\n");

  NaClLoadTrampoline(nap);

  NaClLog(2, "Installing springboard\n");

  NaClLoadSpringboard(nap);

  NaClLog(2, "Applying memory protection\n");

  /*
   * NaClMemoryProtect also initializes the mem_map w/ information
   * about the memory pages and their current protection value.
   *
   * The contents of the dynamic text region will get remapped as
   * non-writable.
   */
  subret = NaClMemoryProtection(nap);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }
  NaClLog(2,
          "NaClAppLoadFile done; addr space layout:\n");
  NaClLog(2,
          "nap->static_text_end    = 0x%016"NACL_PRIxPTR"\n",
          nap->static_text_end);
  NaClLog(2,
          "nap->dynamic_text_start = 0x%016"NACL_PRIxPTR"\n",
          nap->dynamic_text_start);
  NaClLog(2,
          "nap->dynamic_text_end   = 0x%016"NACL_PRIxPTR"\n",
          nap->dynamic_text_end);
  NaClLog(2,
          "nap->rodata_start       = 0x%016"NACL_PRIxPTR"\n",
          nap->rodata_start);
  NaClLog(2,
          "nap->data_start         = 0x%016"NACL_PRIxPTR"\n",
          nap->data_start);
  NaClLog(2,
          "nap->data_end           = 0x%016"NACL_PRIxPTR"\n",
          nap->data_end);
  NaClLog(2,
          "nap->break_addr         = 0x%016"NACL_PRIxPTR"\n",
          nap->break_addr);
  NaClLog(2,
          "nap->entry_pt           = 0x%016"NACL_PRIxPTR"\n",
          nap->entry_pt);
  NaClLog(2,
          "nap->bundle_size        = 0x%x\n",
          nap->bundle_size);

  ret = LOAD_OK;
done:
  NaClElfImageDelete(image);
  return ret;
}
コード例 #24
0
ファイル: elf_util.c プロジェクト: mariospr/chromium-browser
/*
 * Attempt to map into the NaClApp object nap from the NaCl descriptor
 * ndp an ELF segment of type p_flags that start at file_offset for
 * segment_size bytes, to memory starting at paddr (system address).
 * If it is a code segment, make a scratch mapping and check
 * validation in readonly_text mode -- if it succeeds, we map into the
 * target address; if it fails, we return failure so that pread-based
 * loading can proceed.  For rodata and data segments, less checking
 * is needed.  In the text and data case, the end of the segment may
 * not land on a NACL_MAP_PAGESIZE boundary; when this occurs, we will
 * map in all whole NACL_MAP_PAGESIZE chunks, and pread in the tail
 * partial chunk.
 *
 * Returns: LOAD_OK, LOAD_STATUS_UNKNOWN, other error codes.
 *
 * LOAD_OK             -- if the segment has been fully handled
 * LOAD_STATUS_UNKNOWN -- if pread-based fallback is required
 * other error codes   -- if a fatal error occurs, and the caller
 *                        should propagate up
 *
 * See NaClSysMmapIntern in nacl_syscall_common.c for corresponding
 * mmap syscall where PROT_EXEC allows shared libraries to be mapped
 * into dynamic code space.
 */
static NaClErrorCode NaClElfFileMapSegment(struct NaClApp *nap,
                                           struct NaClDesc *ndp,
                                           Elf_Word p_flags,
                                           Elf_Off file_offset,
                                           Elf_Off segment_size,
                                           uintptr_t vaddr,
                                           uintptr_t paddr) {
  size_t rounded_filesz;       /* 64k rounded */
  int mmap_prot = 0;
  uintptr_t image_sys_addr;
  NaClValidationStatus validator_status = NaClValidationFailed;
  struct NaClValidationMetadata metadata;
  int read_last_page_if_partial_allocation_page = 1;
  ssize_t read_ret;
  struct NaClPerfCounter time_mmap_segment;
  NaClPerfCounterCtor(&time_mmap_segment, "NaClElfFileMapSegment");

  rounded_filesz = NaClRoundAllocPage(segment_size);

  NaClLog(4,
          "NaClElfFileMapSegment: checking segment flags 0x%x"
          " to determine map checks\n",
          p_flags);
  /*
   * Is this the text segment?  If so, map into scratch memory and
   * run validation (possibly cached result) with !stubout_mode,
   * readonly_text.  If validator says it's okay, map directly into
   * target location with NACL_ABI_PROT_READ|_EXEC.  If anything
   * failed, fall back to PRead.  NB: the assumption is that there
   * is only one PT_LOAD with PF_R|PF_X segment; this assumption is
   * enforced by phdr seen_seg checks above in
   * NaClElfImageValidateProgramHeaders.
   *
   * After this function returns, we will be setting memory protection
   * in NaClMemoryProtection, so the actual memory protection used is
   * immaterial.
   *
   * For rodata and data/bss, we mmap with NACL_ABI_PROT_READ or
   * NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE as appropriate,
   * without doing validation.  There is no fallback to PRead, since
   * we don't validate the contents.
   */
  switch (p_flags) {
    case PF_R | PF_X:
      NaClLog(4,
              "NaClElfFileMapSegment: text segment and"
              " file is safe for mmap\n");
      if (NACL_VTBL(NaClDesc, ndp)->typeTag != NACL_DESC_HOST_IO) {
        NaClLog(4, "NaClElfFileMapSegment: not supported type, got %d\n",
                NACL_VTBL(NaClDesc, ndp)->typeTag);
        return LOAD_STATUS_UNKNOWN;
      }
      /*
       * Unlike the mmap case, we do not re-run validation to
       * allow patching here; instead, we handle validation
       * failure by going to the pread_fallback case.  In the
       * future, we should consider doing an in-place mapping and
       * allowing HLT patch validation, which should be cheaper
       * since those pages that do not require patching (hopefully
       * majority) will remain file-backed and not require swap
       * space, even if we had to fault in every page.
       */
      NaClLog(1, "NaClElfFileMapSegment: mapping for validation\n");
      NaClPerfCounterMark(&time_mmap_segment, "PreMap");
      NaClPerfCounterIntervalLast(&time_mmap_segment);
      image_sys_addr = (*NACL_VTBL(NaClDesc, ndp)->
                        Map)(ndp,
                             NaClDescEffectorTrustedMem(),
                             (void *) NULL,
                             rounded_filesz,
                             NACL_ABI_PROT_READ,
                             NACL_ABI_MAP_PRIVATE,
                             file_offset);
      NaClPerfCounterMark(&time_mmap_segment, "MapForValidate");
      NaClPerfCounterIntervalLast(&time_mmap_segment);
      if (NaClPtrIsNegErrno(&image_sys_addr)) {
        NaClLog(LOG_INFO,
                "NaClElfFileMapSegment: Could not make scratch mapping,"
                " falling back to reading\n");
        return LOAD_STATUS_UNKNOWN;
      }
      /* ask validator / validation cache */
      NaClMetadataFromNaClDescCtor(&metadata, ndp);
      CHECK(segment_size == nap->static_text_end - NACL_TRAMPOLINE_END);
      validator_status = NACL_FI_VAL(
          "ELF_LOAD_FORCE_VALIDATION_STATUS",
          enum NaClValidationStatus,
          (*nap->validator->
           Validate)(vaddr,
                     (uint8_t *) image_sys_addr,
                     segment_size,  /* actual size */
                     0,  /* stubout_mode: no */
                     1,  /* readonly_text: yes */
                     nap->cpu_features,
                     &metadata,
                     nap->validation_cache));
      NaClPerfCounterMark(&time_mmap_segment, "ValidateMapped");
      NaClPerfCounterIntervalLast(&time_mmap_segment);
      NaClLog(3, "NaClElfFileMapSegment: validator_status %d\n",
              validator_status);
      NaClMetadataDtor(&metadata);
      /*
       * Remove scratch mapping, then map directly into untrusted
       * address space or pread.
       */
      NaClDescUnmapUnsafe(ndp, (void *) image_sys_addr,
                          rounded_filesz);
      NACL_MAKE_MEM_UNDEFINED((void *) paddr, rounded_filesz);

      if (NaClValidationSucceeded != validator_status) {
        NaClLog(3,
                ("NaClElfFileMapSegment: readonly_text validation for mmap"
                 " failed.  Will retry validation allowing HALT stubbing out"
                 " of unsupported instruction extensions.\n"));
        return LOAD_STATUS_UNKNOWN;
      }

      NaClLog(1, "NaClElfFileMapSegment: mapping into code space\n");
      /*
       * Windows appears to not allow RWX mappings.  This interferes
       * with HALT_SLED and having to HALT pad the last page.  We
       * allow partial code pages, so
       * read_last_page_if_partial_allocation_page will ensure that
       * the last page is writable, so we will be able to write HALT
       * instructions as needed.
       */
      mmap_prot = NACL_ABI_PROT_READ | NACL_ABI_PROT_EXEC;
      /*
       * NB: the log string is used by tests/mmap_main_nexe/nacl.scons
       * and must be logged at a level that is less than or equal to
       * the requested verbosity level there.
       */
      NaClLog(1, "NaClElfFileMapSegment: EXERCISING MMAP LOAD PATH\n");
      nap->main_exe_prevalidated = 1;
      break;

    case PF_R | PF_W:
      /* read-write (initialized data) */
      mmap_prot = NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE;
      /*
       * NB: the partial page processing will result in zeros
       * following the initialized data, so that the BSS will be zero.
       * On a typical system, this page is mapped in and the BSS
       * region is memset to zero, which means that this partial page
       * is faulted in.  Rather than saving a syscall (pread) and
       * faulting it in, we just use the same code path as for code,
       * which is (slightly) simpler.
       */
      break;

    case PF_R:
      /* read-only */
      mmap_prot = NACL_ABI_PROT_READ;
      /*
       * For rodata, we allow mapping in "garbage" past a partial
       * page; this potentially eliminates a disk I/O operation
       * (if data section has no partial page), possibly delaying
       * disk spin-up if the code was in the validation cache.
       * And it saves another 64kB of swap.
       */
      read_last_page_if_partial_allocation_page = 0;
      break;

    default:
      NaClLog(LOG_FATAL, "NaClElfFileMapSegment: unexpected p_flags %d\n",
              p_flags);
  }
  if (rounded_filesz != segment_size &&
      read_last_page_if_partial_allocation_page) {
    uintptr_t tail_offset = rounded_filesz - NACL_MAP_PAGESIZE;
    size_t tail_size = segment_size - tail_offset;
    NaClLog(4, "NaClElfFileMapSegment: pread tail\n");
    read_ret = (*NACL_VTBL(NaClDesc, ndp)->
                PRead)(ndp,
                       (void *) (paddr + tail_offset),
                       tail_size,
                       (nacl_off64_t) (file_offset + tail_offset));
    NaClPerfCounterMark(&time_mmap_segment, "PRead tail");
    NaClPerfCounterIntervalLast(&time_mmap_segment);
    if (NaClSSizeIsNegErrno(&read_ret) || (size_t) read_ret != tail_size) {
      NaClLog(LOG_ERROR,
              "NaClElfFileMapSegment: pread load of page tail failed\n");
      return LOAD_SEGMENT_BAD_PARAM;
    }
    rounded_filesz -= NACL_MAP_PAGESIZE;
  }
  /* mmap in */
  if (rounded_filesz == 0) {
    NaClLog(4,
            "NaClElfFileMapSegment: no pages to map, probably because"
            " the segment was a partial page, so it was processed by"
            " reading.\n");
  } else {
    NaClLog(4,
            "NaClElfFileMapSegment: mapping %"NACL_PRIuS" (0x%"
            NACL_PRIxS") bytes to"
            " address 0x%"NACL_PRIxPTR", position %"
            NACL_PRIdElf_Off" (0x%"NACL_PRIxElf_Off")\n",
            rounded_filesz, rounded_filesz,
            paddr,
            file_offset, file_offset);
    image_sys_addr = (*NACL_VTBL(NaClDesc, ndp)->
                      Map)(ndp,
                           nap->effp,
                           (void *) paddr,
                           rounded_filesz,
                           mmap_prot,
                           NACL_ABI_MAP_PRIVATE | NACL_ABI_MAP_FIXED,
                           file_offset);
    NaClPerfCounterMark(&time_mmap_segment, "MapFinal");
    NaClPerfCounterIntervalLast(&time_mmap_segment);
    if (image_sys_addr != paddr) {
      NaClLog(LOG_FATAL,
              ("NaClElfFileMapSegment: map to 0x%"NACL_PRIxPTR" (prot %x) "
               "failed: got 0x%"NACL_PRIxPTR"\n"),
              paddr, mmap_prot, image_sys_addr);
    }
    /* Tell Valgrind that we've mapped a segment of nacl_file. */
    NaClFileMappingForValgrind(paddr, rounded_filesz, file_offset);
  }
  return LOAD_OK;
}