예제 #1
0
void TestDEPCheckFailurePath(void) {
  size_t size = NACL_PAGESIZE;
  void *page;
  CHECK(NaCl_page_alloc(&page, size) == 0);

  CHECK(NaCl_mprotect(page, size, PROT_READ | PROT_WRITE | PROT_EXEC) == 0);
  CHECK(!NaClAttemptToExecuteDataAtAddr(page, size));

  /* DEP is not guaranteed to work on x86-32. */
  if (!(NACL_ARCH(NACL_BUILD_ARCH) == NACL_x86 && NACL_BUILD_SUBARCH == 32)) {
    CHECK(NaCl_mprotect(page, size, PROT_READ | PROT_WRITE) == 0);
    CHECK(NaClAttemptToExecuteDataAtAddr(page, size));
  }

  NaCl_page_free(page, size);
}
예제 #2
0
/* preallocate memory area of given size. abort if fail */
static void PreallocateUserMemory(struct NaClApp *nap)
{
  uintptr_t i;
  int64_t heap;
  void *p;

  assert(nap != NULL);
  assert(nap->system_manifest != NULL);

  /* quit function if max_mem is not specified or invalid */
  ZLOGFAIL(nap->heap_end == 0 || nap->heap_end > FOURGIG,
      ENOMEM, "invalid memory size");

  /* calculate user heap size (must be allocated next to the data_end) */
  p = (void*)NaClRoundAllocPage(nap->data_end);
  heap = nap->heap_end - nap->stack_size;
  heap = NaClRoundAllocPage(heap) - NaClRoundAllocPage(nap->data_end);
  ZLOGFAIL(heap <= LEAST_USER_HEAP_SIZE, ENOMEM, "user heap size is too small");

  /* since 4gb of user space is already allocated just set protection to the heap */
  p = (void*)NaClUserToSys(nap, (uintptr_t)p);
  i = NaCl_mprotect(p, heap, PROT_READ | PROT_WRITE);
  ZLOGFAIL(0 != i, -i, "cannot set protection on user heap");
  nap->heap_end = NaClSysToUser(nap, (uintptr_t)p + heap);

  nap->mem_map[HeapIdx].size += heap;
  nap->mem_map[HeapIdx].end += heap;
}
예제 #3
0
파일: trap.c 프로젝트: clatour/zerovm
/* change protection to read / write and return 0 if successful */
static int32_t ZVMUnjailHandle(struct NaClApp *nap, uintptr_t addr, int32_t size)
{
  JAIL_CHECK;

  /* protect */
  result = NaCl_mprotect((void*)sysaddr, size, PROT_READ | PROT_WRITE);
  if(result != 0) return -EACCES;

  return 0;
}
예제 #4
0
/* protect bumpers (guarding space) */
static void NaClMprotectGuards(struct NaClApp *nap)
{
  int err;

  ZLOGS(LOG_DEBUG, "Protecting bumpers");

  /*
   * make bumpers (guard pages) with "inaccessible" protection. the "left"
   * bumper size is 40gb + 64kb, the "right" one - 40gb
   */
  err = NaCl_mprotect((void *)(nap->mem_start - GUARDSIZE),
      GUARDSIZE + NACL_SYSCALL_START_ADDR, PROT_NONE);
  ZLOGFAIL(err != 0, err, FAILED_MSG);
  err = NaCl_mprotect((void *)(nap->mem_start + FOURGIG), GUARDSIZE, PROT_NONE);
  ZLOGFAIL(err != 0, err, FAILED_MSG);

  /* put information to the memory map */
  SET_MEM_MAP_IDX(nap->mem_map[LeftBumperIdx], "LeftBumper",
      nap->mem_start - GUARDSIZE, GUARDSIZE + NACL_SYSCALL_START_ADDR, PROT_NONE);
  SET_MEM_MAP_IDX(nap->mem_map[RightBumperIdx], "RightBumper",
      nap->mem_start + FOURGIG, GUARDSIZE, PROT_NONE);
}
예제 #5
0
/* align area to page(s) both bounds an protect. return pointer to (new) area */
static uintptr_t AlignAndProtect(uintptr_t area, int64_t size, int prot)
{
  uintptr_t page_ptr;
  uint64_t aligned_size;
  int code;

  page_ptr = ROUNDDOWN_64K(area);
  aligned_size = ROUNDUP_64K(size + (area - page_ptr));

  code = NaCl_mprotect((void*)page_ptr, aligned_size, prot);
  ZLOGFAIL(0 != code, code, "cannot protect 0x%x of %d bytes with %d",
      page_ptr, aligned_size, prot);
  return page_ptr;
}
예제 #6
0
파일: trap.c 프로젝트: clatour/zerovm
/*
 * validate given buffer and, if successful, change protection to
 * read / execute and return 0
 */
static int32_t ZVMJailHandle(struct NaClApp *nap, uintptr_t addr, int32_t size)
{
  JAIL_CHECK;

  /* validate */
  result = NaClSegmentValidates((uint8_t*)sysaddr, size, sysaddr);
  if(result == 0) return -EPERM;

  /* protect */
  result = NaCl_mprotect((void*)sysaddr, size, PROT_READ | PROT_EXEC);
  if(result != 0) return -EACCES;

  return 0;
}
예제 #7
0
void NaClAllocAddrSpace(struct NaClApp *nap)
{
  void        *mem;
  uintptr_t   hole_start;
  size_t      hole_size;
  uintptr_t   stack_start;

  ZLOGS(LOG_DEBUG, "calling NaClAllocateSpace(*,0x%016x)", ((size_t)1 << nap->addr_bits));
  NaClAllocateSpace(&mem, (uintptr_t) 1U << nap->addr_bits);

  nap->mem_start = (uintptr_t) mem;
  ZLOGS(LOG_DEBUG, "allocated memory at 0x%08x", nap->mem_start);

  hole_start = NaClRoundAllocPage(nap->data_end);

  ZLOGFAIL(nap->stack_size >= ((uintptr_t) 1U) << nap->addr_bits,
      EFAULT, "NaClAllocAddrSpace: stack too large!");
  stack_start = (((uintptr_t) 1U) << nap->addr_bits) - nap->stack_size;
  stack_start = NaClTruncAllocPage(stack_start);

  ZLOGFAIL(stack_start < hole_start, EFAULT,
      "Memory 'hole' between end of BSS and start of stack is negative in size");

  hole_size = stack_start - hole_start;
  hole_size = NaClTruncAllocPage(hole_size);

  /*
   * mprotect and madvise unused data space to "free" it up, but
   * retain mapping so no other memory can be mapped into those
   * addresses.
   */
  if(hole_size != 0)
  {
    ZLOGS(LOG_DEBUG, "madvising 0x%08x, 0x%08x, MADV_DONTNEED",
        nap->mem_start + hole_start, hole_size);

    ZLOGFAIL(0 != NaCl_madvise((void*)(nap->mem_start + hole_start), hole_size,
        MADV_DONTNEED), errno, "madvise failed. cannot release unused data segment");

    ZLOGS(LOG_DEBUG, "mprotecting 0x%08x, 0x%08x, PROT_NONE",
        nap->mem_start + hole_start, hole_size);

    ZLOGFAIL(0 != NaCl_mprotect((void *)(nap->mem_start + hole_start), hole_size,
        PROT_NONE), errno, "mprotect failed. cannot protect pages");
  }
  else
    ZLOGS(LOG_DEBUG, "there is no hole between end of data and the beginning of stack");
}
NaClErrorCode NaClAppLoadFile(struct Gio       *gp,
                              struct NaClApp   *nap,
                              enum NaClAbiCheckOption check_abi) {
  NaClErrorCode       ret = LOAD_INTERNAL;
  NaClErrorCode       subret;
  uintptr_t           rodata_end;
  uintptr_t           data_end;
  uintptr_t           max_vaddr;
  struct NaClElfImage *image = NULL;

  /* NACL_MAX_ADDR_BITS < 32 */
  if (nap->addr_bits > NACL_MAX_ADDR_BITS) {
    ret = LOAD_ADDR_SPACE_TOO_BIG;
    goto done;
  }

  nap->stack_size = NaClRoundAllocPage(nap->stack_size);

  /* temporay object will be deleted at end of function */
  image = NaClElfImageNew(gp, &subret);
  if (NULL == image) {
    ret = subret;
    goto done;
  }

#if 0 == NACL_DANGEROUS_DEBUG_MODE_DISABLE_INNER_SANDBOX
  check_abi = NACL_ABI_CHECK_OPTION_CHECK;
#endif

  if (NACL_ABI_CHECK_OPTION_CHECK == check_abi) {
    subret = NaClElfImageValidateAbi(image);
    if (subret != LOAD_OK) {
      ret = subret;
      goto done;
    }
  }

  subret = NaClElfImageValidateElfHeader(image);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  subret = NaClElfImageValidateProgramHeaders(image,
                                              nap->addr_bits,
                                              &nap->static_text_end,
                                              &nap->rodata_start,
                                              &rodata_end,
                                              &nap->data_start,
                                              &data_end,
                                              &max_vaddr);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  if (0 == nap->data_start) {
    if (0 == nap->rodata_start) {
      if (NaClRoundAllocPage(max_vaddr) - max_vaddr < NACL_HALT_SLED_SIZE) {
        /*
         * if no rodata and no data, we make sure that there is space for
         * the halt sled.
         */
        max_vaddr += NACL_MAP_PAGESIZE;
      }
    } else {
      /*
       * no data, but there is rodata.  this means max_vaddr is just
       * where rodata ends.  this might not be at an allocation
       * boundary, and in this the page would not be writable.  round
       * max_vaddr up to the next allocation boundary so that bss will
       * be at the next writable region.
       */
      ;
    }
    max_vaddr = NaClRoundAllocPage(max_vaddr);
  }
  /*
   * max_vaddr -- the break or the boundary between data (initialized
   * and bss) and the address space hole -- does not have to be at a
   * page boundary.
   */
  nap->break_addr = max_vaddr;
  nap->data_end = max_vaddr;

  NaClLog(4, "Values from NaClElfImageValidateProgramHeaders:\n");
  NaClLog(4, "rodata_start = 0x%08"NACL_PRIxPTR"\n", nap->rodata_start);
  NaClLog(4, "rodata_end   = 0x%08"NACL_PRIxPTR"\n", rodata_end);
  NaClLog(4, "data_start   = 0x%08"NACL_PRIxPTR"\n", nap->data_start);
  NaClLog(4, "data_end     = 0x%08"NACL_PRIxPTR"\n", data_end);
  NaClLog(4, "max_vaddr    = 0x%08"NACL_PRIxPTR"\n", max_vaddr);

#if 0 == NACL_DANGEROUS_DEBUG_MODE_DISABLE_INNER_SANDBOX
  nap->bundle_size = NaClElfImageGetBundleSize(image);
  if (nap->bundle_size == 0) {
    ret = LOAD_BAD_ABI;
    goto done;
  }
#else
  /* pick some reasonable default for an un-sandboxed nexe */
  nap->bundle_size = 32;
#endif
  nap->entry_pt = NaClElfImageGetEntryPoint(image);

  NaClLog(2,
          "NaClApp addr space layout:\n");
  NaClLog(2,
          "nap->static_text_end    = 0x%016"NACL_PRIxPTR"\n",
          nap->static_text_end);
  NaClLog(2,
          "nap->dynamic_text_start = 0x%016"NACL_PRIxPTR"\n",
          nap->dynamic_text_start);
  NaClLog(2,
          "nap->dynamic_text_end   = 0x%016"NACL_PRIxPTR"\n",
          nap->dynamic_text_end);
  NaClLog(2,
          "nap->rodata_start       = 0x%016"NACL_PRIxPTR"\n",
          nap->rodata_start);
  NaClLog(2,
          "nap->data_start         = 0x%016"NACL_PRIxPTR"\n",
          nap->data_start);
  NaClLog(2,
          "nap->data_end           = 0x%016"NACL_PRIxPTR"\n",
          nap->data_end);
  NaClLog(2,
          "nap->break_addr         = 0x%016"NACL_PRIxPTR"\n",
          nap->break_addr);
  NaClLog(2,
          "nap->entry_pt           = 0x%016"NACL_PRIxPTR"\n",
          nap->entry_pt);
  NaClLog(2,
          "nap->bundle_size        = 0x%x\n",
          nap->bundle_size);

  if (!NaClAddrIsValidEntryPt(nap, nap->entry_pt)) {
    ret = LOAD_BAD_ENTRY;
    goto done;
  }

  /*
   * Basic address space layout sanity check.
   */
  if (0 != nap->data_start) {
    if (data_end != max_vaddr) {
      NaClLog(LOG_INFO, "data segment is not last\n");
      ret = LOAD_DATA_NOT_LAST_SEGMENT;
      goto done;
    }
  } else if (0 != nap->rodata_start) {
    if (NaClRoundAllocPage(rodata_end) != max_vaddr) {
      /*
       * This should be unreachable, but we include it just for
       * completeness.
       *
       * Here is why it is unreachable:
       *
       * NaClPhdrChecks checks the test segment starting address.  The
       * only allowed loaded segments are text, data, and rodata.
       * Thus unless the rodata is in the trampoline region, it must
       * be after the text.  And NaClElfImageValidateProgramHeaders
       * ensures that all segments start after the trampoline region.
       */
      NaClLog(LOG_INFO, "no data segment, but rodata segment is not last\n");
      ret = LOAD_NO_DATA_BUT_RODATA_NOT_LAST_SEGMENT;
      goto done;
    }
  }
  if (0 != nap->rodata_start && 0 != nap->data_start) {
    if (rodata_end > nap->data_start) {
      NaClLog(LOG_INFO, "rodata_overlaps data.\n");
      ret = LOAD_RODATA_OVERLAPS_DATA;
      goto done;
    }
  }
  if (0 != nap->rodata_start) {
    if (NaClRoundAllocPage(NaClEndOfStaticText(nap)) > nap->rodata_start) {
      ret = LOAD_TEXT_OVERLAPS_RODATA;
      goto done;
    }
  } else if (0 != nap->data_start) {
    if (NaClRoundAllocPage(NaClEndOfStaticText(nap)) > nap->data_start) {
      ret = LOAD_TEXT_OVERLAPS_DATA;
      goto done;
    }
  }

  if (0 != nap->rodata_start &&
      NaClRoundAllocPage(nap->rodata_start) != nap->rodata_start) {
    NaClLog(LOG_INFO, "rodata_start not a multiple of allocation size\n");
    ret = LOAD_BAD_RODATA_ALIGNMENT;
    goto done;
  }
  if (0 != nap->data_start &&
      NaClRoundAllocPage(nap->data_start) != nap->data_start) {
    NaClLog(LOG_INFO, "data_start not a multiple of allocation size\n");
    ret = LOAD_BAD_DATA_ALIGNMENT;
    goto done;
  }

  NaClLog(2, "Allocating address space\n");
  subret = NaClAllocAddrSpace(nap);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  /*
   * NB: mem_map object has been initialized, but is empty.
   * NaClMakeDynamicTextShared does not touch it.
   *
   * NaClMakeDynamicTextShared also fills the dynamic memory region
   * with the architecture-specific halt instruction.  If/when we use
   * memory mapping to save paging space for the dynamic region and
   * lazily halt fill the memory as the pages become
   * readable/executable, we must make sure that the *last*
   * NACL_MAP_PAGESIZE chunk is nonetheless mapped and written with
   * halts.
   */
  NaClLog(2,
          ("Replacing gap between static text and"
           " (ro)data with shareable memory\n"));
  subret = NaClMakeDynamicTextShared(nap);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  /*
   * Make sure the static image pages are marked writable before we try
   * to write them.
   * TODO(ilewis): See if this can be enabled for Win32 as well. (issue 40077)
   */
  NaClLog(2, "Loading into memory\n");
#if NACL_WINDOWS && NACL_ARCH_CPU_X86_64
  ret = NaCl_mprotect((void*) nap->mem_start,
    NaClRoundAllocPage(nap->data_end),
    PROT_READ|PROT_WRITE);
  if (ret) {
      NaClLog(LOG_FATAL, "Couldn't get writeable pages for image. "
                         "Error code 0x%X\n", ret);
  }
#endif
  subret = NaClElfImageLoad(image, gp, nap->addr_bits, nap->mem_start);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  /*
   * NaClFillEndOfTextRegion will fill with halt instructions the
   * padding space after the static text region.
   *
   * Shm-backed dynamic text space was filled with halt instructions
   * in NaClMakeDynamicTextShared.  This extends to the rodata.  For
   * non-shm-backed text space, this extend to the next page (and not
   * allocation page).  static_text_end is updated to include the
   * padding.
   */
  NaClFillEndOfTextRegion(nap);

#if 0 == NACL_DANGEROUS_DEBUG_MODE_DISABLE_INNER_SANDBOX
  NaClLog(2, "Validating image\n");
  subret = NaClValidateImage(nap);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }
#endif

  NaClLog(2, "Installing trampoline\n");

  NaClLoadTrampoline(nap);

  NaClLog(2, "Installing springboard\n");

  NaClLoadSpringboard(nap);

  NaClLog(2, "Applying memory protection\n");

  /*
   * NaClMemoryProtect also initializes the mem_map w/ information
   * about the memory pages and their current protection value.
   *
   * The contents of the dynamic text region will get remapped as
   * non-writable.
   */
  subret = NaClMemoryProtection(nap);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }
  NaClLog(2,
          "NaClAppLoadFile done; addr space layout:\n");
  NaClLog(2,
          "nap->static_text_end    = 0x%016"NACL_PRIxPTR"\n",
          nap->static_text_end);
  NaClLog(2,
          "nap->dynamic_text_start = 0x%016"NACL_PRIxPTR"\n",
          nap->dynamic_text_start);
  NaClLog(2,
          "nap->dynamic_text_end   = 0x%016"NACL_PRIxPTR"\n",
          nap->dynamic_text_end);
  NaClLog(2,
          "nap->rodata_start       = 0x%016"NACL_PRIxPTR"\n",
          nap->rodata_start);
  NaClLog(2,
          "nap->data_start         = 0x%016"NACL_PRIxPTR"\n",
          nap->data_start);
  NaClLog(2,
          "nap->data_end           = 0x%016"NACL_PRIxPTR"\n",
          nap->data_end);
  NaClLog(2,
          "nap->break_addr         = 0x%016"NACL_PRIxPTR"\n",
          nap->break_addr);
  NaClLog(2,
          "nap->entry_pt           = 0x%016"NACL_PRIxPTR"\n",
          nap->entry_pt);
  NaClLog(2,
          "nap->bundle_size        = 0x%x\n",
          nap->bundle_size);

  ret = LOAD_OK;
done:
  NaClElfImageDelete(image);
  return ret;
}
예제 #9
0
/*
 * Apply memory protection to memory regions.
 */
void NaClMemoryProtection(struct NaClApp *nap)
{
  uintptr_t start_addr;
  size_t    region_size;
  int       err;

  /*
   * The first NACL_SYSCALL_START_ADDR bytes are mapped as PROT_NONE.
   * This enables NULL pointer checking, and provides additional protection
   * against addr16/data16 prefixed operations being used for attacks.
   *
   * NaClMprotectGuards also sets up guard pages outside of the
   * virtual address space of the NaClApp -- for the ARM and x86-64
   * where data sandboxing only sandbox memory writes and not reads,
   * we need to ensure that certain addressing modes that might
   * otherwise allow the NaCl app to write outside its address space
   * (given how we using masking / base registers to implement data
   * write sandboxing) won't affect the trusted data structures.
   */

  ZLOGS(LOG_DEBUG, "Protecting guard pages for 0x%08x", nap->mem_start);
  NaClMprotectGuards(nap);

  start_addr = nap->mem_start + NACL_SYSCALL_START_ADDR;
  /*
   * The next pages up to NACL_TRAMPOLINE_END are the trampolines.
   * Immediately following that is the loaded text section.
   * These are collectively marked as PROT_READ | PROT_EXEC.
   */
  region_size = NaClRoundPage(nap->static_text_end - NACL_SYSCALL_START_ADDR);
  ZLOGS(LOG_DEBUG, "Trampoline/text region start 0x%08x, size 0x%08x, end 0x%08x",
          start_addr, region_size, start_addr + region_size);

  err = NaCl_mprotect((void *)start_addr, region_size, PROT_READ | PROT_EXEC);
  ZLOGFAIL(0 != err, err, FAILED_MSG);

  SET_MEM_MAP_IDX(nap->mem_map[TextIdx], "Text",
      start_addr, region_size, PROT_READ | PROT_EXEC);

  /*
   * Page protections for this region have already been set up by
   * nacl_text.c.
   *
   * todo(d'b): since text.c exists no more, protection should be set here
   *
   * We record the mapping for consistency with other fixed
   * mappings, but the record is not actually used.  Overmapping is
   * prevented by a separate range check, which is done by
   * NaClSysCommonAddrRangeContainsExecutablePages_mu().
   */
  /*
   * zerovm does not support dynamic text. the code below will check its
   * existence, log information and fail if needed.
   * todo(d'b): after the dynamic text support will be added or completely
   * removed the block below should be rewritten or removed
   */
  start_addr = NaClUserToSys(nap, nap->dynamic_text_start);
  region_size = nap->dynamic_text_end - nap->dynamic_text_start;
  ZLOGS(LOG_DEBUG, "shm txt region start 0x%08x, size 0x%08x, end 0x%08x",
      start_addr, region_size, start_addr + region_size);
  ZLOGFAIL(0 != region_size, EFAULT, "zerovm does not support nexe with dynamic text!");

  if(0 != nap->rodata_start)
  {
    uintptr_t rodata_end;

    /*
     * TODO(mseaborn): Could reduce the number of cases by ensuring
     * that nap->data_start is always non-zero, even if
     * nap->rodata_start == nap->data_start == nap->break_addr.
     */
    if(0 != nap->data_start)
      rodata_end = nap->data_start;
    else rodata_end = nap->break_addr;

    start_addr = NaClUserToSys(nap, nap->rodata_start);
    region_size = NaClRoundPage(NaClRoundAllocPage(rodata_end)
        - NaClSysToUser(nap, start_addr));
    ZLOGS(LOG_DEBUG, "RO data region start 0x%08x, size 0x%08x, end 0x%08x",
        start_addr, region_size, start_addr + region_size);

    err = NaCl_mprotect((void *)start_addr, region_size, PROT_READ);
    ZLOGFAIL(0 != err, err, FAILED_MSG);

    SET_MEM_MAP_IDX(nap->mem_map[RODataIdx], "ROData",
        start_addr, region_size, PROT_READ);
  }

  /*
   * data_end is max virtual addr seen, so start_addr <= data_end
   * must hold.
   */
  if(0 != nap->data_start)
  {
    start_addr = NaClUserToSys(nap, nap->data_start);
    region_size = NaClRoundPage(NaClRoundAllocPage(nap->data_end)
        - NaClSysToUser(nap, start_addr));
    ZLOGS(LOG_DEBUG, "RW data region start 0x%08x, size 0x%08x, end 0x%08x",
        start_addr, region_size, start_addr + region_size);

    err = NaCl_mprotect((void *)start_addr, region_size, PROT_READ | PROT_WRITE);
    ZLOGFAIL(0 != err, err, FAILED_MSG);

    SET_MEM_MAP_IDX(nap->mem_map[HeapIdx], "Heap",
        start_addr, region_size, PROT_READ | PROT_WRITE);
  }

  /* stack is read/write but not execute */
  region_size = nap->stack_size;
  start_addr = NaClUserToSys(nap,
      NaClTruncAllocPage(((uintptr_t) 1U << nap->addr_bits) - nap->stack_size));
  ZLOGS(LOG_DEBUG, "RW stack region start 0x%08x, size 0x%08lx, end 0x%08x",
          start_addr, region_size, start_addr + region_size);

  err = NaCl_mprotect((void *)start_addr, NaClRoundAllocPage(nap->stack_size),
      PROT_READ | PROT_WRITE);
  ZLOGFAIL(0 != err, err, FAILED_MSG);

  SET_MEM_MAP_IDX(nap->mem_map[StackIdx], "Stack",
      start_addr, NaClRoundAllocPage(nap->stack_size), PROT_READ | PROT_WRITE);
}
예제 #10
0
NaClErrorCode NaClAppLoadFile(struct Gio       *gp,
                              struct NaClApp   *nap) {
  NaClErrorCode       ret = LOAD_INTERNAL;
  NaClErrorCode       subret;
  uintptr_t           rodata_end;
  uintptr_t           data_end;
  uintptr_t           max_vaddr;
  struct NaClElfImage *image = NULL;
  struct NaClPerfCounter  time_load_file;

  NaClPerfCounterCtor(&time_load_file, "NaClAppLoadFile");

  /* NACL_MAX_ADDR_BITS < 32 */
  if (nap->addr_bits > NACL_MAX_ADDR_BITS) {
    ret = LOAD_ADDR_SPACE_TOO_BIG;
    goto done;
  }

  nap->stack_size = NaClRoundAllocPage(nap->stack_size);

  /* temporay object will be deleted at end of function */
  image = NaClElfImageNew(gp, &subret);
  if (NULL == image) {
    ret = subret;
    goto done;
  }

  subret = NaClElfImageValidateElfHeader(image);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  subret = NaClElfImageValidateProgramHeaders(image,
                                              nap->addr_bits,
                                              &nap->static_text_end,
                                              &nap->rodata_start,
                                              &rodata_end,
                                              &nap->data_start,
                                              &data_end,
                                              &max_vaddr);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  if (0 == nap->data_start) {
    if (0 == nap->rodata_start) {
      if (NaClRoundAllocPage(max_vaddr) - max_vaddr < NACL_HALT_SLED_SIZE) {
        /*
         * if no rodata and no data, we make sure that there is space for
         * the halt sled.
         */
        max_vaddr += NACL_MAP_PAGESIZE;
      }
    } else {
      /*
       * no data, but there is rodata.  this means max_vaddr is just
       * where rodata ends.  this might not be at an allocation
       * boundary, and in this the page would not be writable.  round
       * max_vaddr up to the next allocation boundary so that bss will
       * be at the next writable region.
       */
      ;
    }
    max_vaddr = NaClRoundAllocPage(max_vaddr);
  }
  /*
   * max_vaddr -- the break or the boundary between data (initialized
   * and bss) and the address space hole -- does not have to be at a
   * page boundary.
   */
  nap->break_addr = max_vaddr;
  nap->data_end = max_vaddr;

  NaClLog(4, "Values from NaClElfImageValidateProgramHeaders:\n");
  NaClLog(4, "rodata_start = 0x%08"NACL_PRIxPTR"\n", nap->rodata_start);
  NaClLog(4, "rodata_end   = 0x%08"NACL_PRIxPTR"\n", rodata_end);
  NaClLog(4, "data_start   = 0x%08"NACL_PRIxPTR"\n", nap->data_start);
  NaClLog(4, "data_end     = 0x%08"NACL_PRIxPTR"\n", data_end);
  NaClLog(4, "max_vaddr    = 0x%08"NACL_PRIxPTR"\n", max_vaddr);

  /* We now support only one bundle size.  */
  nap->bundle_size = NACL_INSTR_BLOCK_SIZE;

  nap->initial_entry_pt = NaClElfImageGetEntryPoint(image);
  NaClLogAddressSpaceLayout(nap);

  if (!NaClAddrIsValidEntryPt(nap, nap->initial_entry_pt)) {
    ret = LOAD_BAD_ENTRY;
    goto done;
  }

  subret = NaClCheckAddressSpaceLayoutSanity(nap, rodata_end, data_end,
                                             max_vaddr);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  NaClLog(2, "Allocating address space\n");
  NaClPerfCounterMark(&time_load_file, "PreAllocAddrSpace");
  NaClPerfCounterIntervalLast(&time_load_file);
  subret = NaClAllocAddrSpace(nap);
  NaClPerfCounterMark(&time_load_file,
                      NACL_PERF_IMPORTANT_PREFIX "AllocAddrSpace");
  NaClPerfCounterIntervalLast(&time_load_file);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  /*
   * Make sure the static image pages are marked writable before we try
   * to write them.
   */
  NaClLog(2, "Loading into memory\n");
  ret = NaCl_mprotect((void *) (nap->mem_start + NACL_TRAMPOLINE_START),
                      NaClRoundAllocPage(nap->data_end) - NACL_TRAMPOLINE_START,
                      NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE);
  if (0 != ret) {
    NaClLog(LOG_FATAL,
            "NaClAppLoadFile: Failed to make image pages writable. "
            "Error code 0x%x\n",
            ret);
  }
  subret = NaClElfImageLoad(image, gp, nap->addr_bits, nap->mem_start);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  /*
   * NB: mem_map object has been initialized, but is empty.
   * NaClMakeDynamicTextShared does not touch it.
   *
   * NaClMakeDynamicTextShared also fills the dynamic memory region
   * with the architecture-specific halt instruction.  If/when we use
   * memory mapping to save paging space for the dynamic region and
   * lazily halt fill the memory as the pages become
   * readable/executable, we must make sure that the *last*
   * NACL_MAP_PAGESIZE chunk is nonetheless mapped and written with
   * halts.
   */
  NaClLog(2,
          ("Replacing gap between static text and"
           " (ro)data with shareable memory\n"));
  subret = NaClMakeDynamicTextShared(nap);
  NaClPerfCounterMark(&time_load_file,
                      NACL_PERF_IMPORTANT_PREFIX "MakeDynText");
  NaClPerfCounterIntervalLast(&time_load_file);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  /*
   * NaClFillEndOfTextRegion will fill with halt instructions the
   * padding space after the static text region.
   *
   * Shm-backed dynamic text space was filled with halt instructions
   * in NaClMakeDynamicTextShared.  This extends to the rodata.  For
   * non-shm-backed text space, this extend to the next page (and not
   * allocation page).  static_text_end is updated to include the
   * padding.
   */
  NaClFillEndOfTextRegion(nap);

#if 0 == NACL_DANGEROUS_DEBUG_MODE_DISABLE_INNER_SANDBOX
  NaClLog(2, "Validating image\n");
  subret = NaClValidateImage(nap);
  NaClPerfCounterMark(&time_load_file,
                      NACL_PERF_IMPORTANT_PREFIX "ValidateImg");
  NaClPerfCounterIntervalLast(&time_load_file);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }
#endif

  NaClLog(2, "Initializing arch switcher\n");
  NaClInitSwitchToApp(nap);

  NaClLog(2, "Installing trampoline\n");
  NaClLoadTrampoline(nap);

  /*
   * NaClMemoryProtect also initializes the mem_map w/ information
   * about the memory pages and their current protection value.
   *
   * The contents of the dynamic text region will get remapped as
   * non-writable.
   */
  NaClLog(2, "Applying memory protection\n");
  subret = NaClMemoryProtection(nap);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  NaClLog(2, "NaClAppLoadFile done; ");
  NaClLogAddressSpaceLayout(nap);
  ret = LOAD_OK;
done:
  NaClElfImageDelete(image);

  NaClPerfCounterMark(&time_load_file, "EndLoadFile");
  NaClPerfCounterIntervalTotal(&time_load_file);
  return ret;
}
예제 #11
0
파일: sel.c 프로젝트: Abioy/zerovm
void AppLoadFile(struct Gio *gp, struct NaClApp *nap)
{
  uintptr_t rodata_end;
  uintptr_t data_end;
  uintptr_t max_vaddr;
  struct ElfImage *image = NULL;
  int err;

  /* fail if Address space too big */
  ZLOGFAIL(nap->addr_bits > NACL_MAX_ADDR_BITS, EFAULT, FAILED_MSG);

  nap->stack_size = ROUNDUP_64K(nap->stack_size);

  /* temporay object will be deleted at end of function */
  image = ElfImageNew(gp);
  ValidateElfHeader(image);

  ValidateProgramHeaders(image, nap->addr_bits, &nap->static_text_end,
      &nap->rodata_start, &rodata_end, &nap->data_start, &data_end, &max_vaddr);

  /*
   * if no rodata and no data, we make sure that there is space for
   * the halt sled. else if no data, but there is rodata.  this means
   * max_vaddr is just where rodata ends.  this might not be at an
   * allocation boundary, and in this the page would not be writable.
   * round max_vaddr up to the next allocation boundary so that bss
   * will be at the next writable region.
   */
  if(0 == nap->data_start)
  {
    if(0 == nap->rodata_start)
    {
      if(ROUNDUP_64K(max_vaddr) - max_vaddr < NACL_HALT_SLED_SIZE)
        max_vaddr += NACL_MAP_PAGESIZE;
    }
    max_vaddr = ROUNDUP_64K(max_vaddr);
  }

  /*
   * max_vaddr -- the break or the boundary between data (initialized
   * and bss) and the address space hole -- does not have to be at a
   * page boundary.
   */
  nap->break_addr = max_vaddr;
  nap->data_end = max_vaddr;

  ZLOGS(LOG_INSANE, "Values from ValidateProgramHeaders:");
  DUMP(nap->rodata_start);
  DUMP(rodata_end);
  DUMP(nap->data_start);
  DUMP(data_end);
  DUMP(max_vaddr);

  nap->initial_entry_pt = ElfImageGetEntryPoint(image);
  LogAddressSpaceLayout(nap);

  /* Bad program entry point address */
  ZLOGFAIL(!AddrIsValidEntryPt(nap, nap->initial_entry_pt), ENOEXEC, FAILED_MSG);

  CheckAddressSpaceLayoutSanity(nap, rodata_end, data_end, max_vaddr);

  ZLOGS(LOG_DEBUG, "Allocating address space");
  AllocAddrSpace(nap);

  /*
   * Make sure the static image pages are marked writable before we try
   * to write them.
   */
  ZLOGS(LOG_DEBUG, "Loading into memory");
  err = NaCl_mprotect((void *)(nap->mem_start + NACL_TRAMPOLINE_START),
      ROUNDUP_64K(nap->data_end) - NACL_TRAMPOLINE_START,
      PROT_READ | PROT_WRITE);
  ZLOGFAIL(0 != err, EFAULT, "Failed to make image pages writable. errno = %d", err);

  ElfImageLoad(image, gp, nap->addr_bits, nap->mem_start);

  /* d'b: shared memory for the dynamic text disabled */
  nap->dynamic_text_start = ROUNDUP_64K(NaClEndOfStaticText(nap));
  nap->dynamic_text_end = nap->dynamic_text_start;

  /*
   * FillEndOfTextRegion will fill with halt instructions the
   * padding space after the static text region.
   *
   * Shm-backed dynamic text space was filled with halt instructions
   * in NaClMakeDynamicTextShared.  This extends to the rodata.  For
   * non-shm-backed text space, this extend to the next page (and not
   * allocation page).  static_text_end is updated to include the
   * padding.
   */
  FillEndOfTextRegion(nap);

  ZLOGS(LOG_DEBUG, "Initializing arch switcher");
  InitSwitchToApp(nap);

  ZLOGS(LOG_DEBUG, "Installing trampoline");
  LoadTrampoline(nap);

  /*
   * NaClMemoryProtect also initializes the mem_map w/ information
   * about the memory pages and their current protection value.
   *
   * The contents of the dynamic text region will get remapped as
   * non-writable.
   */
  ZLOGS(LOG_DEBUG, "Applying memory protection");
  MemoryProtection(nap);

  ZLOGS(LOG_DEBUG, "AppLoadFile done");
  LogAddressSpaceLayout(nap);

  ElfImageDelete(image);
}
예제 #12
0
파일: elf_util.c 프로젝트: camuel/zvm
NaClErrorCode NaClElfImageLoadDynamically(struct NaClElfImage *image,
                                          struct NaClApp      *nap,
                                          struct Gio          *gfile) {
  int segnum;
  for (segnum = 0; segnum < image->ehdr.e_phnum; ++segnum) {
    const Elf_Phdr *php = &image->phdrs[segnum];
    int32_t result;

    /*
     * We check for PT_LOAD directly rather than using the "loadable"
     * array because we are not using NaClElfImageValidateProgramHeaders()
     * to fill out the "loadable" array for this ELF object.  This ELF
     * object does not have to fit such strict constraints (such as
     * having code at 0x20000), and safety checks are applied by
     * NaClTextDyncodeCreate() and NaClCommonSysMmapIntern().
     */
    if (PT_LOAD != php->p_type) {
      continue;
    }

    /*
     * Ideally, Gio would have a Pread() method which we would use
     * instead of Seek().  In practice, though, there is no
     * Seek()/Read() race condition here because both
     * GioMemoryFileSnapshot and NaClGioShm use a seek position that
     * is local and not shared between processes.
     */
    if ((*gfile->vtbl->Seek)(gfile, (off_t) php->p_offset,
                             SEEK_SET) == (off_t) -1) {
      NaClLog(1, "NaClElfImageLoadDynamically: seek failed\n");
      return LOAD_READ_ERROR;
    }

    if (0 != (php->p_flags & PF_X)) {
      /* Load code segment. */
      /*
       * We make a copy of the code.  This is not ideal given that
       * GioMemoryFileSnapshot and NaClGioShm already have a copy of
       * the file in memory or mmapped.
       * TODO(mseaborn): Reduce the amount of copying here.
       */
      char *code_copy = malloc(php->p_filesz);
      if (NULL == code_copy) {
        NaClLog(1, "NaClElfImageLoadDynamically: malloc failed\n");
        return LOAD_NO_MEMORY;
      }
      if ((Elf_Word) (*gfile->vtbl->Read)(gfile, code_copy, php->p_filesz)
          != php->p_filesz) {
        free(code_copy);
        NaClLog(1, "NaClElfImageLoadDynamically: "
                "failed to read code segment\n");
        return LOAD_READ_ERROR;
      }
      result = NaClTextDyncodeCreate(nap, (uint32_t) php->p_vaddr,
                                     code_copy, (uint32_t) php->p_filesz);
      free(code_copy);
      if (0 != result) {
        NaClLog(1, "NaClElfImageLoadDynamically: "
                "failed to load code segment\n");
        return LOAD_UNLOADABLE;
      }
    } else {
      /* Load data segment. */
      void *paddr = (void *) NaClUserToSys(nap, php->p_vaddr);
      size_t mapping_size = NaClRoundAllocPage(php->p_memsz);
      /*
       * Note that we do not used NACL_ABI_MAP_FIXED because we do not
       * want to silently overwrite any existing mappings, such as the
       * user app's data segment or the stack.  We detect overmapping
       * when mmap chooses not to use the preferred address we supply.
       * (Ideally mmap would provide a MAP_EXCL option for this
       * instead.)
       */
      result = NaClCommonSysMmapIntern(
          nap, (void *) php->p_vaddr, mapping_size,
          NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE,
          NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE,
          -1, 0);
      if ((int32_t) php->p_vaddr != result) {
        NaClLog(1, "NaClElfImageLoadDynamically: failed to map data segment\n");
        return LOAD_UNLOADABLE;
      }
      if ((Elf_Word) (*gfile->vtbl->Read)(gfile, paddr, php->p_filesz)
          != php->p_filesz) {
        NaClLog(1, "NaClElfImageLoadDynamically: "
                "failed to read data segment\n");
        return LOAD_READ_ERROR;
      }
      /*
       * Note that we do not need to zero the BSS (the region from
       * p_filesz to p_memsz) because it should already be zero
       * filled.  This would not be the case if we were mapping the
       * data segment from the file.
       */

      if (0 == (php->p_flags & PF_W)) {
        /* Handle read-only data segment. */
        int rc = NaCl_mprotect(paddr, mapping_size, NACL_ABI_PROT_READ);
        if (0 != rc) {
          NaClLog(1, "NaClElfImageLoadDynamically: "
                  "failed to mprotect read-only data segment\n");
          return LOAD_MPROTECT_FAIL;
        }

        NaClVmmapUpdate(&nap->mem_map,
                        php->p_vaddr >> NACL_PAGESHIFT,
                        mapping_size >> NACL_PAGESHIFT,
                        PROT_READ,
                        NULL,
                        0  /* remove: false */);
      }
    }
  }
예제 #13
0
/*
 * Create thunk for use by syscall trampoline code.
 */
int NaClMakePcrelThunk(struct NaClApp *nap) {
  int                   retval = 0;  /* fail */
  int                   error;
  void                  *thunk_addr = NULL;
  struct NaClPatchInfo  patch_info;
  uintptr_t             patch_rel32[1];  /* NaClSyscallSeg */
  struct NaClPatch      patch_abs32[2];  /* ds, nacl_user */

  /* idempotent */
  nacl_pcrel_globals.user = nacl_user;
  nacl_pcrel_globals.sys = nacl_sys;

  if (0 != (error = NaCl_page_alloc(&thunk_addr, NACL_MAP_PAGESIZE))) {
    NaClLog(LOG_INFO,
            "NaClMakePcrelThunk::NaCl_page_alloc failed, errno %d\n",
            -error);
    retval = 0;
    goto cleanup;
  }

  patch_rel32[0] = ((uintptr_t) &NaClPcrelThunk_end) - 4;

  patch_abs32[0].target = ((uintptr_t) &NaClPcrelThunk_dseg_patch) - 4;
  patch_abs32[0].value = NaClGetGlobalDs();
  patch_abs32[1].target = ((uintptr_t) &NaClPcrelThunk_globals_patch) - 4;
  patch_abs32[1].value = (uintptr_t) &nacl_pcrel_globals;

  NaClPatchInfoCtor(&patch_info);

  patch_info.rel32 = patch_rel32;
  patch_info.num_rel32 = NACL_ARRAY_SIZE(patch_rel32);

  patch_info.abs32 = patch_abs32;
  patch_info.num_abs32 = NACL_ARRAY_SIZE(patch_abs32);

  patch_info.dst = (uintptr_t) thunk_addr;
  patch_info.src = (uintptr_t) &NaClPcrelThunk;
  patch_info.nbytes = ((uintptr_t) &NaClPcrelThunk_end
                       - (uintptr_t) &NaClPcrelThunk);

  NaClApplyPatchToMemory(&patch_info);

  if (0 != (error = NaCl_mprotect(thunk_addr,
                                  NACL_MAP_PAGESIZE,
                                  PROT_EXEC|PROT_READ))) {
    NaClLog(LOG_INFO,
            "NaClMakePcrelThunk::NaCl_mprotect failed, errno %d\n",
            -error);
    retval = 0;
    goto cleanup;
  }
  retval = 1;
cleanup:
  if (0 == retval) {
    if (NULL != thunk_addr) {
      NaCl_page_free(thunk_addr, NACL_MAP_PAGESIZE);
      thunk_addr = NULL;
    }
  } else {
    nap->pcrel_thunk = (uintptr_t) thunk_addr;
  }
  return retval;
}
예제 #14
0
int NaClMakeDispatchThunk(struct NaClApp *nap) {
  int                   retval = 0;  /* fail */
  int                   error;
  void                  *thunk_addr = NULL;
  struct NaClPatchInfo  patch_info;
  struct NaClPatch      jmp_target;

  NaClLog(LOG_WARNING, "Entered NaClMakeDispatchThunk\n");
  if (0 != nap->dispatch_thunk) {
    NaClLog(LOG_ERROR, " dispatch_thunk already initialized!\n");
    return 1;
  }

  if (0 != (error = NaCl_page_alloc_randomized(&thunk_addr,
                                               NACL_MAP_PAGESIZE))) {
    NaClLog(LOG_INFO,
            "NaClMakeDispatchThunk::NaCl_page_alloc failed, errno %d\n",
            -error);
    retval = 0;
    goto cleanup;
  }
  NaClLog(LOG_INFO, "NaClMakeDispatchThunk: got addr 0x%"NACL_PRIxPTR"\n",
          (uintptr_t) thunk_addr);

  if (0 != (error = NaCl_mprotect(thunk_addr,
                                  NACL_MAP_PAGESIZE,
                                  PROT_READ | PROT_WRITE))) {
    NaClLog(LOG_INFO,
            "NaClMakeDispatchThunk::NaCl_mprotect r/w failed, errno %d\n",
            -error);
    retval = 0;
    goto cleanup;
  }
  NaClFillMemoryRegionWithHalt(thunk_addr, NACL_MAP_PAGESIZE);

  jmp_target.target = (((uintptr_t) &NaClDispatchThunk_jmp_target)
                       - sizeof(uintptr_t));
  jmp_target.value = (uintptr_t) NaClSyscallSeg;

  NaClPatchInfoCtor(&patch_info);
  patch_info.abs64 = &jmp_target;
  patch_info.num_abs64 = 1;

  patch_info.dst = (uintptr_t) thunk_addr;
  patch_info.src = (uintptr_t) &NaClDispatchThunk;
  patch_info.nbytes = ((uintptr_t) &NaClDispatchThunkEnd
                       - (uintptr_t) &NaClDispatchThunk);
  NaClApplyPatchToMemory(&patch_info);

  if (0 != (error = NaCl_mprotect(thunk_addr,
                                  NACL_MAP_PAGESIZE,
                                  PROT_EXEC|PROT_READ))) {
    NaClLog(LOG_INFO,
            "NaClMakeDispatchThunk::NaCl_mprotect r/x failed, errno %d\n",
            -error);
    retval = 0;
    goto cleanup;
  }
  retval = 1;
 cleanup:
  if (0 == retval) {
    if (NULL != thunk_addr) {
      NaCl_page_free(thunk_addr, NACL_MAP_PAGESIZE);
      thunk_addr = NULL;
    }
  } else {
    nap->dispatch_thunk = (uintptr_t) thunk_addr;
  }
  return retval;
}