コード例 #1
0
/* bool */
int TryToMap(struct NaClHostDesc *hd, size_t map_bytes, int prot, int flags,
             int expected_errno) {
  uintptr_t addr;

  addr = NaClHostDescMap(hd,
                         NaClDescEffectorTrustedMem(),
                         NULL,
                         map_bytes,
                         prot,
                         flags,
                         0);
  if (0 == expected_errno) {
    if ((uintptr_t) -4095 < addr) {
      NaClLog(LOG_ERROR, "NaClHostDescMap returned errno %d\n", -(int) addr);
      return 0;
    }
    CHECK(0 == NaClHostDescUnmapUnsafe((void *) addr, map_bytes));
    return 1;
  } else {
    if ((uintptr_t) -4095 < addr) {
      if (expected_errno != -(int) addr) {
        NaClLog(LOG_ERROR, "NaClHostDescMap returned errno %d, expected %d\n",
                -(int) addr, expected_errno);
      }
    } else {
      NaClLog(LOG_ERROR, "NaClHostDescMap succeeded, expected errno %d\n",
              expected_errno);
      CHECK(0 == NaClHostDescUnmapUnsafe((void *) addr, map_bytes));
    }
    return expected_errno == -(int) addr;
  }
}
コード例 #2
0
int prot_exec_test(struct NaClHostDesc *d, void *test_specifics) {
  struct NaClDescEffector *null_eff = NaClDescEffectorTrustedMem();
  uintptr_t addr;
  int (*func)(int param);
  int param;
  int value;

  UNREFERENCED_PARAMETER(test_specifics);

  if ((uintptr_t) -4095 <
      (addr = NaClHostDescMap(d,
                              null_eff,
                              NULL,
                              kNumFileBytes,
                              NACL_ABI_PROT_READ | NACL_ABI_PROT_EXEC,
                              NACL_ABI_MAP_SHARED,
                              /* offset */ 0))) {
    fprintf(stderr, "prot_exec_test: map failed, errno %d\n", -(int) addr);
    return 1;
  }

  func = (int (*)(int)) addr;
  for (param = 0; param < 16; ++param) {
    printf("%d -> ", param);
    fflush(stdout);
    value = (*func)(param);
    printf("%d\n", value);
    CHECK(value == param+1);
  }

  CHECK(0 == NaClHostDescUnmapUnsafe((void *) addr, kNumFileBytes));

  return 0;
}
コード例 #3
0
/*
 * mmap MAP_SHARED test
 *
 * Make sure two views of the same file see the changes made from one
 * view in the other.
 */
int map_shared_test(struct NaClHostDesc *d, void *test_specifics) {
  struct NaClDescEffector *null_eff = NaClDescEffectorTrustedMem();
  uintptr_t view1;
  uintptr_t view2;
  char *v1ptr;
  char *v2ptr;

  UNREFERENCED_PARAMETER(test_specifics);

  if ((uintptr_t) -4095 <
      (view1 = NaClHostDescMap(d,
                               null_eff,
                               NULL,
                               kNumFileBytes,
                               NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE,
                               NACL_ABI_MAP_SHARED,
                               /* offset */ 0))) {
    fprintf(stderr, "map_shared_test: view1 map failed, errno %d\n",
            -(int) view1);
    return 1;
  }

  if ((uintptr_t) -4095 <
      (view2 = NaClHostDescMap(d,
                               null_eff,
                               NULL,
                               kNumFileBytes,
                               NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE,
                               NACL_ABI_MAP_SHARED,
                               /* offset */ 0))) {
    fprintf(stderr, "map_shared_test: view2 map failed, errno %d\n",
            -(int) view2);
    return 1;
  }

  v1ptr = (char *) view1;
  v2ptr = (char *) view2;

  CHECK(v1ptr[0] == '\0');
  CHECK(v2ptr[0] == '\0');
  v1ptr[0] = 'x';
  CHECK(v2ptr[0] == 'x');
  v2ptr[0x400] = 'y';
  CHECK(v1ptr[0x400] == 'y');

  CHECK(0 == NaClHostDescUnmapUnsafe((void *) view1, kNumFileBytes));
  CHECK(0 == NaClHostDescUnmapUnsafe((void *) view2, kNumFileBytes));

  return 0;
}
コード例 #4
0
/*
 * mmap MAP_PRIVATE test
 *
 * Make sure that a MAP_PRIVATE view initially sees the changes made
 * in a MAP_SHARED view, but after touching the private view further
 * changes become invisible.
 */
int map_private_test(struct NaClHostDesc *d, void *test_specifics) {
  struct MapPrivateSpecifics *params =
      (struct MapPrivateSpecifics *) test_specifics;
  struct NaClDescEffector *null_eff = NaClDescEffectorTrustedMem();
  uintptr_t view1;
  uintptr_t view2;
  nacl_off64_t off;
  ssize_t bytes_written;
  char *v1ptr;
  char *v2ptr;

  if ((uintptr_t) -4095 <
      (view1 = NaClHostDescMap(d,
                               null_eff,
                               NULL,
                               kNumFileBytes,
                               NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE,
                               NACL_ABI_MAP_SHARED,
                               /* offset */ 0))) {
    fprintf(stderr, "map_private_test: view1 map failed, errno %d\n",
            -(int) view1);
    return 1;
  }

  NaClLog(2, "map_private_test: view1 = 0x%"NACL_PRIxPTR"\n", view1);

  if ((uintptr_t) -4095 <
      (view2 = NaClHostDescMap(d,
                               null_eff,
                               NULL,
                               kNumFileBytes,
                               NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE,
                               NACL_ABI_MAP_PRIVATE,
                               /* offset */ 0))) {
    fprintf(stderr, "map_private_test: view2 map failed, errno %d\n",
            -(int) view2);
    return 1;
  }

  NaClLog(2, "map_private_test: view2 = 0x%"NACL_PRIxPTR"\n", view2);

  v1ptr = (char *) view1;
  v2ptr = (char *) view2;

  CHECK(v1ptr[0] == '\0');
  CHECK(v2ptr[0] == '\0');
  if (params->shm_not_write) {
    NaClLog(2, "map_private_test: changing via shm view\n");
    v1ptr[0] = 'x';  /* write through shared view */
  } else {
    NaClLog(2, "map_private_test: changing via write interface\n");
    off = NaClHostDescSeek(d, 0, 0);
    if (off < 0) {
      fprintf(stderr, "Could not seek: NaCl errno %d\n", (int) -off);
      return 1;
    }
    bytes_written = NaClHostDescWrite(d, "x", 1);
    if (1 != bytes_written) {
      fprintf(stderr, "Could not write: NaCl errno %d\n", (int) -bytes_written);
      return 1;
    }
  }
#if NACL_LINUX || NACL_WINDOWS
  /*
   * Most OSes have this behavior: a PRIVATE mapping is copy-on-write,
   * but the COW occurs when the fault occurs on that mapping, not
   * other mappings; otherwise, the page tables just point the system
   * to the buffer cache (or, if evicted, a stub entry that permits
   * faulting in the page).  So, a write through a writable file
   * descriptor or a SHARED mapping would modify the buffer cache, and
   * the PRIVATE mapping would see such changes until a fault occurs.
   */
  CHECK(v2ptr[0] == 'x');  /* visible! */
#elif NACL_OSX
  /*
   * On OSX, however, the underlying Mach primitives provide
   * bidirectional COW.
   */
  CHECK(v2ptr[0] == '\0');  /* NOT visible! */
#else
# error "Unsupported OS"
#endif

  v2ptr[0] = 'z';  /* COW fault */
  v1ptr[0] = 'y';
  CHECK(v2ptr[0] == 'z'); /* private! */

  CHECK(v1ptr[0x400] == '\0');
  v2ptr[0x400] = 'y';
  CHECK(v1ptr[0x400] == '\0');

  CHECK(0 == NaClHostDescUnmapUnsafe((void *) view1, kNumFileBytes));
  CHECK(0 == NaClHostDescUnmapUnsafe((void *) view2, kNumFileBytes));

  return 0;
}
コード例 #5
0
/*
 * Release current window if it exists, then map in window at the
 * provided new_window_offset.  This is akin to filbuf.
 *
 * Preconditions: 0 == (new_win_offset & (NACL_MAP_PAGESIZE - 1))
 *                new_win_offset < self->shm_sz
 */
static int NaClGioShmSetWindow(struct NaClGioShm  *self,
                               size_t             new_win_offset) {
  uintptr_t map_result;
  size_t    actual_len;

  NaClLog(4,
          "NaClGioShmSetWindow: new_win_offset 0x%"NACL_PRIxS"\n",
          new_win_offset);
  if (0 != (new_win_offset & (NACL_MAP_PAGESIZE - 1))) {
    NaClLog(LOG_FATAL,
            ("NaClGioShmSetWindow: internal error, requested"
             " new window offset 0x%"NACL_PRIxS" is not aligned.\n"),
            new_win_offset);
  }

  if (new_win_offset >= self->shm_sz) {
    NaClLog(LOG_FATAL,
            ("NaClGioShmSetWindow: setting window beyond end of shm object"
             " offset 0x%"NACL_PRIxS", size 0x%"NACL_PRIxS"\n"),
            new_win_offset, self->shm_sz);
  }

  if (NULL != self->cur_window) {
    NaClDescUnmapUnsafe(self->shmp, (void *) self->cur_window,
                        self->window_size);
  }
  self->cur_window = NULL;
  self->window_size = 0;

  /*
   * The Map virtual function will NOT pad space beyond the end of the
   * memory mapping object with zero-filled pages.  This is done for
   * user code in nacl_syscall_common.c(NaClCommonSysMmap), and the
   * Map virtual function exposes the behavioral inconsistencies wrt
   * allowing but ignoring mapping an offset beyond the end of file
   * (linux) versus disallowing the mapping (MapViewOfFileEx).
   *
   * Here, we know the actual size of the shm object, and can deal
   * with it.
   */
  actual_len = GIO_SHM_WINDOWSIZE;
  if (actual_len > self->shm_sz - new_win_offset) {
    actual_len = self->shm_sz - new_win_offset;
  }
  map_result =
      (*((struct NaClDescVtbl const *) self->shmp->base.vtbl)->
       Map)(self->shmp,
            NaClDescEffectorTrustedMem(),
            (void *) NULL,
            actual_len,
            NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE,
            NACL_ABI_MAP_SHARED,
            (nacl_off64_t) new_win_offset);
  NaClLog(4,
          "NaClGioShmSetWindow: Map returned 0x%"NACL_PRIxPTR"\n",
          map_result);
  if (NaClPtrIsNegErrno(&map_result)) {
    return 0;
  }

  self->cur_window = (char *) map_result;
  self->window_size = actual_len;
  self->window_offset = new_win_offset;

  return 1;
}
コード例 #6
0
ファイル: elf_util.c プロジェクト: mariospr/chromium-browser
/*
 * Attempt to map into the NaClApp object nap from the NaCl descriptor
 * ndp an ELF segment of type p_flags that start at file_offset for
 * segment_size bytes, to memory starting at paddr (system address).
 * If it is a code segment, make a scratch mapping and check
 * validation in readonly_text mode -- if it succeeds, we map into the
 * target address; if it fails, we return failure so that pread-based
 * loading can proceed.  For rodata and data segments, less checking
 * is needed.  In the text and data case, the end of the segment may
 * not land on a NACL_MAP_PAGESIZE boundary; when this occurs, we will
 * map in all whole NACL_MAP_PAGESIZE chunks, and pread in the tail
 * partial chunk.
 *
 * Returns: LOAD_OK, LOAD_STATUS_UNKNOWN, other error codes.
 *
 * LOAD_OK             -- if the segment has been fully handled
 * LOAD_STATUS_UNKNOWN -- if pread-based fallback is required
 * other error codes   -- if a fatal error occurs, and the caller
 *                        should propagate up
 *
 * See NaClSysMmapIntern in nacl_syscall_common.c for corresponding
 * mmap syscall where PROT_EXEC allows shared libraries to be mapped
 * into dynamic code space.
 */
static NaClErrorCode NaClElfFileMapSegment(struct NaClApp *nap,
                                           struct NaClDesc *ndp,
                                           Elf_Word p_flags,
                                           Elf_Off file_offset,
                                           Elf_Off segment_size,
                                           uintptr_t vaddr,
                                           uintptr_t paddr) {
  size_t rounded_filesz;       /* 64k rounded */
  int mmap_prot = 0;
  uintptr_t image_sys_addr;
  NaClValidationStatus validator_status = NaClValidationFailed;
  struct NaClValidationMetadata metadata;
  int read_last_page_if_partial_allocation_page = 1;
  ssize_t read_ret;
  struct NaClPerfCounter time_mmap_segment;
  NaClPerfCounterCtor(&time_mmap_segment, "NaClElfFileMapSegment");

  rounded_filesz = NaClRoundAllocPage(segment_size);

  NaClLog(4,
          "NaClElfFileMapSegment: checking segment flags 0x%x"
          " to determine map checks\n",
          p_flags);
  /*
   * Is this the text segment?  If so, map into scratch memory and
   * run validation (possibly cached result) with !stubout_mode,
   * readonly_text.  If validator says it's okay, map directly into
   * target location with NACL_ABI_PROT_READ|_EXEC.  If anything
   * failed, fall back to PRead.  NB: the assumption is that there
   * is only one PT_LOAD with PF_R|PF_X segment; this assumption is
   * enforced by phdr seen_seg checks above in
   * NaClElfImageValidateProgramHeaders.
   *
   * After this function returns, we will be setting memory protection
   * in NaClMemoryProtection, so the actual memory protection used is
   * immaterial.
   *
   * For rodata and data/bss, we mmap with NACL_ABI_PROT_READ or
   * NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE as appropriate,
   * without doing validation.  There is no fallback to PRead, since
   * we don't validate the contents.
   */
  switch (p_flags) {
    case PF_R | PF_X:
      NaClLog(4,
              "NaClElfFileMapSegment: text segment and"
              " file is safe for mmap\n");
      if (NACL_VTBL(NaClDesc, ndp)->typeTag != NACL_DESC_HOST_IO) {
        NaClLog(4, "NaClElfFileMapSegment: not supported type, got %d\n",
                NACL_VTBL(NaClDesc, ndp)->typeTag);
        return LOAD_STATUS_UNKNOWN;
      }
      /*
       * Unlike the mmap case, we do not re-run validation to
       * allow patching here; instead, we handle validation
       * failure by going to the pread_fallback case.  In the
       * future, we should consider doing an in-place mapping and
       * allowing HLT patch validation, which should be cheaper
       * since those pages that do not require patching (hopefully
       * majority) will remain file-backed and not require swap
       * space, even if we had to fault in every page.
       */
      NaClLog(1, "NaClElfFileMapSegment: mapping for validation\n");
      NaClPerfCounterMark(&time_mmap_segment, "PreMap");
      NaClPerfCounterIntervalLast(&time_mmap_segment);
      image_sys_addr = (*NACL_VTBL(NaClDesc, ndp)->
                        Map)(ndp,
                             NaClDescEffectorTrustedMem(),
                             (void *) NULL,
                             rounded_filesz,
                             NACL_ABI_PROT_READ,
                             NACL_ABI_MAP_PRIVATE,
                             file_offset);
      NaClPerfCounterMark(&time_mmap_segment, "MapForValidate");
      NaClPerfCounterIntervalLast(&time_mmap_segment);
      if (NaClPtrIsNegErrno(&image_sys_addr)) {
        NaClLog(LOG_INFO,
                "NaClElfFileMapSegment: Could not make scratch mapping,"
                " falling back to reading\n");
        return LOAD_STATUS_UNKNOWN;
      }
      /* ask validator / validation cache */
      NaClMetadataFromNaClDescCtor(&metadata, ndp);
      CHECK(segment_size == nap->static_text_end - NACL_TRAMPOLINE_END);
      validator_status = NACL_FI_VAL(
          "ELF_LOAD_FORCE_VALIDATION_STATUS",
          enum NaClValidationStatus,
          (*nap->validator->
           Validate)(vaddr,
                     (uint8_t *) image_sys_addr,
                     segment_size,  /* actual size */
                     0,  /* stubout_mode: no */
                     1,  /* readonly_text: yes */
                     nap->cpu_features,
                     &metadata,
                     nap->validation_cache));
      NaClPerfCounterMark(&time_mmap_segment, "ValidateMapped");
      NaClPerfCounterIntervalLast(&time_mmap_segment);
      NaClLog(3, "NaClElfFileMapSegment: validator_status %d\n",
              validator_status);
      NaClMetadataDtor(&metadata);
      /*
       * Remove scratch mapping, then map directly into untrusted
       * address space or pread.
       */
      NaClDescUnmapUnsafe(ndp, (void *) image_sys_addr,
                          rounded_filesz);
      NACL_MAKE_MEM_UNDEFINED((void *) paddr, rounded_filesz);

      if (NaClValidationSucceeded != validator_status) {
        NaClLog(3,
                ("NaClElfFileMapSegment: readonly_text validation for mmap"
                 " failed.  Will retry validation allowing HALT stubbing out"
                 " of unsupported instruction extensions.\n"));
        return LOAD_STATUS_UNKNOWN;
      }

      NaClLog(1, "NaClElfFileMapSegment: mapping into code space\n");
      /*
       * Windows appears to not allow RWX mappings.  This interferes
       * with HALT_SLED and having to HALT pad the last page.  We
       * allow partial code pages, so
       * read_last_page_if_partial_allocation_page will ensure that
       * the last page is writable, so we will be able to write HALT
       * instructions as needed.
       */
      mmap_prot = NACL_ABI_PROT_READ | NACL_ABI_PROT_EXEC;
      /*
       * NB: the log string is used by tests/mmap_main_nexe/nacl.scons
       * and must be logged at a level that is less than or equal to
       * the requested verbosity level there.
       */
      NaClLog(1, "NaClElfFileMapSegment: EXERCISING MMAP LOAD PATH\n");
      nap->main_exe_prevalidated = 1;
      break;

    case PF_R | PF_W:
      /* read-write (initialized data) */
      mmap_prot = NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE;
      /*
       * NB: the partial page processing will result in zeros
       * following the initialized data, so that the BSS will be zero.
       * On a typical system, this page is mapped in and the BSS
       * region is memset to zero, which means that this partial page
       * is faulted in.  Rather than saving a syscall (pread) and
       * faulting it in, we just use the same code path as for code,
       * which is (slightly) simpler.
       */
      break;

    case PF_R:
      /* read-only */
      mmap_prot = NACL_ABI_PROT_READ;
      /*
       * For rodata, we allow mapping in "garbage" past a partial
       * page; this potentially eliminates a disk I/O operation
       * (if data section has no partial page), possibly delaying
       * disk spin-up if the code was in the validation cache.
       * And it saves another 64kB of swap.
       */
      read_last_page_if_partial_allocation_page = 0;
      break;

    default:
      NaClLog(LOG_FATAL, "NaClElfFileMapSegment: unexpected p_flags %d\n",
              p_flags);
  }
  if (rounded_filesz != segment_size &&
      read_last_page_if_partial_allocation_page) {
    uintptr_t tail_offset = rounded_filesz - NACL_MAP_PAGESIZE;
    size_t tail_size = segment_size - tail_offset;
    NaClLog(4, "NaClElfFileMapSegment: pread tail\n");
    read_ret = (*NACL_VTBL(NaClDesc, ndp)->
                PRead)(ndp,
                       (void *) (paddr + tail_offset),
                       tail_size,
                       (nacl_off64_t) (file_offset + tail_offset));
    NaClPerfCounterMark(&time_mmap_segment, "PRead tail");
    NaClPerfCounterIntervalLast(&time_mmap_segment);
    if (NaClSSizeIsNegErrno(&read_ret) || (size_t) read_ret != tail_size) {
      NaClLog(LOG_ERROR,
              "NaClElfFileMapSegment: pread load of page tail failed\n");
      return LOAD_SEGMENT_BAD_PARAM;
    }
    rounded_filesz -= NACL_MAP_PAGESIZE;
  }
  /* mmap in */
  if (rounded_filesz == 0) {
    NaClLog(4,
            "NaClElfFileMapSegment: no pages to map, probably because"
            " the segment was a partial page, so it was processed by"
            " reading.\n");
  } else {
    NaClLog(4,
            "NaClElfFileMapSegment: mapping %"NACL_PRIuS" (0x%"
            NACL_PRIxS") bytes to"
            " address 0x%"NACL_PRIxPTR", position %"
            NACL_PRIdElf_Off" (0x%"NACL_PRIxElf_Off")\n",
            rounded_filesz, rounded_filesz,
            paddr,
            file_offset, file_offset);
    image_sys_addr = (*NACL_VTBL(NaClDesc, ndp)->
                      Map)(ndp,
                           nap->effp,
                           (void *) paddr,
                           rounded_filesz,
                           mmap_prot,
                           NACL_ABI_MAP_PRIVATE | NACL_ABI_MAP_FIXED,
                           file_offset);
    NaClPerfCounterMark(&time_mmap_segment, "MapFinal");
    NaClPerfCounterIntervalLast(&time_mmap_segment);
    if (image_sys_addr != paddr) {
      NaClLog(LOG_FATAL,
              ("NaClElfFileMapSegment: map to 0x%"NACL_PRIxPTR" (prot %x) "
               "failed: got 0x%"NACL_PRIxPTR"\n"),
              paddr, mmap_prot, image_sys_addr);
    }
    /* Tell Valgrind that we've mapped a segment of nacl_file. */
    NaClFileMappingForValgrind(paddr, rounded_filesz, file_offset);
  }
  return LOAD_OK;
}
コード例 #7
0
NaClErrorCode NaClMakeDynamicTextShared(struct NaClApp *nap) {
  enum NaClErrorCode          retval = LOAD_INTERNAL;
  uintptr_t                   dynamic_text_size;
  struct NaClDescImcShm       *shm = NULL;
  uintptr_t                   shm_vaddr_base;
  int                         mmap_protections;
  uintptr_t                   mmap_ret;

  uintptr_t                   shm_upper_bound;
  uintptr_t                   text_sysaddr;

  shm_vaddr_base = NaClEndOfStaticText(nap);
  NaClLog(4,
          "NaClMakeDynamicTextShared: shm_vaddr_base = %08"NACL_PRIxPTR"\n",
          shm_vaddr_base);
  shm_vaddr_base = NaClRoundAllocPage(shm_vaddr_base);
  NaClLog(4,
          "NaClMakeDynamicTextShared: shm_vaddr_base = %08"NACL_PRIxPTR"\n",
          shm_vaddr_base);

  /*
   * Default is that there is no usable dynamic code area.
   */
  nap->dynamic_text_start = shm_vaddr_base;
  nap->dynamic_text_end = shm_vaddr_base;
  if (!nap->use_shm_for_dynamic_text) {
    NaClLog(4,
            "NaClMakeDynamicTextShared:"
            "  rodata / data segments not allocation aligned\n");
    NaClLog(4,
            " not using shm for text\n");
    return LOAD_OK;
  }

  /*
   * Allocate a shm region the size of which is nap->rodata_start -
   * end-of-text.  This implies that the "core" text will not be
   * backed by shm.
   */
  shm_upper_bound = nap->rodata_start;
  if (0 == shm_upper_bound) {
    shm_upper_bound = NaClTruncAllocPage(nap->data_start);
  }
  if (0 == shm_upper_bound) {
    shm_upper_bound = shm_vaddr_base;
  }

  NaClLog(4, "shm_upper_bound = %08"NACL_PRIxPTR"\n", shm_upper_bound);

  dynamic_text_size = shm_upper_bound - shm_vaddr_base;
  NaClLog(4,
          "NaClMakeDynamicTextShared: dynamic_text_size = %"NACL_PRIxPTR"\n",
          dynamic_text_size);

  if (0 == dynamic_text_size) {
    NaClLog(4, "Empty JITtable region\n");
    return LOAD_OK;
  }

  shm = (struct NaClDescImcShm *) malloc(sizeof *shm);
  if (NULL == shm) {
    NaClLog(4, "NaClMakeDynamicTextShared: shm object allocation failed\n");
    retval = LOAD_NO_MEMORY;
    goto cleanup;
  }
  if (!NaClDescImcShmAllocCtor(shm, dynamic_text_size, /* executable= */ 1)) {
    /* cleanup invariant is if ptr is non-NULL, it's fully ctor'd */
    free(shm);
    shm = NULL;
    NaClLog(4, "NaClMakeDynamicTextShared: shm alloc ctor for text failed\n");
    retval = LOAD_NO_MEMORY_FOR_DYNAMIC_TEXT;
    goto cleanup;
  }

  text_sysaddr = NaClUserToSys(nap, shm_vaddr_base);

  /* Existing memory is anonymous paging file backed. */
  NaClPageFree((void *) text_sysaddr, dynamic_text_size);

  /*
   * Unix allows us to map pages with PROT_NONE initially and later
   * increase the mapping permissions with mprotect().
   *
   * Windows does not allow this, however: the initial permissions are
   * an upper bound on what the permissions may later be changed to
   * with VirtualProtect() or VirtualAlloc().  Given this, using
   * PROT_NONE at this point does not even make sense.  On Windows,
   * the pages start off as uncommitted, which makes them inaccessible
   * regardless of the page permissions they are mapped with.
   *
   * Write permissions are included here for nacl64-gdb to set
   * breakpoints.
   */
#if NACL_WINDOWS
  mmap_protections =
    NACL_ABI_PROT_READ | NACL_ABI_PROT_EXEC | NACL_ABI_PROT_WRITE;
#else
  mmap_protections = NACL_ABI_PROT_NONE;
#endif
  NaClLog(4,
          "NaClMakeDynamicTextShared: Map(,,0x%"NACL_PRIxPTR",size = 0x%x,"
          " prot=0x%x, flags=0x%x, offset=0)\n",
          text_sysaddr,
          (int) dynamic_text_size,
          mmap_protections,
          NACL_ABI_MAP_SHARED | NACL_ABI_MAP_FIXED);
  mmap_ret = (*((struct NaClDescVtbl const *) shm->base.base.vtbl)->
              Map)((struct NaClDesc *) shm,
                   NaClDescEffectorTrustedMem(),
                   (void *) text_sysaddr,
                   dynamic_text_size,
                   mmap_protections,
                   NACL_ABI_MAP_SHARED | NACL_ABI_MAP_FIXED,
                   0);
  if (text_sysaddr != mmap_ret) {
    NaClLog(LOG_FATAL, "Could not map in shm for dynamic text region\n");
  }

  nap->dynamic_page_bitmap =
    BitmapAllocate((uint32_t) (dynamic_text_size / NACL_MAP_PAGESIZE));
  if (NULL == nap->dynamic_page_bitmap) {
    NaClLog(LOG_FATAL, "NaClMakeDynamicTextShared: BitmapAllocate() failed\n");
  }

  nap->dynamic_text_start = shm_vaddr_base;
  nap->dynamic_text_end = shm_upper_bound;
  nap->text_shm = &shm->base;
  retval = LOAD_OK;

 cleanup:
  if (LOAD_OK != retval) {
    NaClDescSafeUnref((struct NaClDesc *) shm);
    free(shm);
  }

  return retval;
}
コード例 #8
0
/*
 * Maps a writable version of the code at [offset, offset+size) and returns a
 * pointer to the new mapping. Internally caches the last mapping between
 * calls. Pass offset=0,size=0 to clear cache.
 * Caller must hold nap->dynamic_load_mutex.
 */
static uintptr_t CachedMapWritableText(struct NaClApp *nap,
                                       uint32_t offset,
                                       uint32_t size) {
  /*
   * The nap->* variables used in this function can be in two states:
   *
   * 1)
   * nap->dynamic_mapcache_size == 0
   * nap->dynamic_mapcache_ret == 0
   *
   * Initial state, nothing is cached.
   *
   * 2)
   * nap->dynamic_mapcache_size != 0
   * nap->dynamic_mapcache_ret != 0
   *
   * We have a cached mmap result stored, that must be unmapped.
   */
  struct NaClDesc            *shm = nap->text_shm;

  if (offset != nap->dynamic_mapcache_offset
          || size != nap->dynamic_mapcache_size) {
    /*
     * cache miss, first clear the old cache if needed
     */
    if (nap->dynamic_mapcache_size > 0) {
      NaClDescUnmapUnsafe(shm, (void *) nap->dynamic_mapcache_ret,
                          nap->dynamic_mapcache_size);
      nap->dynamic_mapcache_offset = 0;
      nap->dynamic_mapcache_size = 0;
      nap->dynamic_mapcache_ret = 0;
    }

    /*
     * update that cached version
     */
    if (size > 0) {
      uint32_t current_page_index;
      uint32_t end_page_index;

      uintptr_t mapping = (*((struct NaClDescVtbl const *)
            shm->base.vtbl)->
              Map)(shm,
                   NaClDescEffectorTrustedMem(),
                   NULL,
                   size,
                   NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE,
                   NACL_ABI_MAP_SHARED,
                   offset);
      if (NaClPtrIsNegErrno(&mapping)) {
        return 0;
      }

      /*
       * To reduce the number of mprotect() system calls, we coalesce
       * MakeDynamicCodePagesVisible() calls for adjacent pages that
       * have yet not been allocated.
       */
      current_page_index = offset / NACL_MAP_PAGESIZE;
      end_page_index = (offset + size) / NACL_MAP_PAGESIZE;
      while (current_page_index < end_page_index) {
        uint32_t start_page_index = current_page_index;
        /* Find the end of this block of unallocated pages. */
        while (current_page_index < end_page_index &&
               !BitmapIsBitSet(nap->dynamic_page_bitmap, current_page_index)) {
          current_page_index++;
        }
        if (current_page_index > start_page_index) {
          uintptr_t writable_addr =
              mapping + (start_page_index * NACL_MAP_PAGESIZE - offset);
          MakeDynamicCodePagesVisible(nap, start_page_index, current_page_index,
                                      (uint8_t *) writable_addr);
        }
        current_page_index++;
      }

      nap->dynamic_mapcache_offset = offset;
      nap->dynamic_mapcache_size = size;
      nap->dynamic_mapcache_ret = mapping;
    }
  }
  return nap->dynamic_mapcache_ret;
}