示例#1
0
static int NaClGioShmClose(struct Gio *vself) {
  struct NaClGioShm *self = (struct NaClGioShm *) vself;

  if (NULL != self->cur_window) {
    NaClDescUnmapUnsafe(self->shmp, (void *) self->cur_window,
                        NACL_MAP_PAGESIZE);
  }
  self->cur_window = NULL;

  if (NULL == self->shmp) {
    NaClLog(LOG_ERROR, "NaClGioShmClose: double close detected\n");
    errno = EIO;
    return -1;
  }

  NaClDescUnref(self->shmp);
  self->shmp = NULL;  /* double close will fault */
  return 0;
}
示例#2
0
/*
 * Release current window if it exists, then map in window at the
 * provided new_window_offset.  This is akin to filbuf.
 *
 * Preconditions: 0 == (new_win_offset & (NACL_MAP_PAGESIZE - 1))
 *                new_win_offset < self->shm_sz
 */
static int NaClGioShmSetWindow(struct NaClGioShm  *self,
                               size_t             new_win_offset) {
  uintptr_t map_result;
  size_t    actual_len;

  NaClLog(4,
          "NaClGioShmSetWindow: new_win_offset 0x%"NACL_PRIxS"\n",
          new_win_offset);
  if (0 != (new_win_offset & (NACL_MAP_PAGESIZE - 1))) {
    NaClLog(LOG_FATAL,
            ("NaClGioShmSetWindow: internal error, requested"
             " new window offset 0x%"NACL_PRIxS" is not aligned.\n"),
            new_win_offset);
  }

  if (new_win_offset >= self->shm_sz) {
    NaClLog(LOG_FATAL,
            ("NaClGioShmSetWindow: setting window beyond end of shm object"
             " offset 0x%"NACL_PRIxS", size 0x%"NACL_PRIxS"\n"),
            new_win_offset, self->shm_sz);
  }

  if (NULL != self->cur_window) {
    NaClDescUnmapUnsafe(self->shmp, (void *) self->cur_window,
                        self->window_size);
  }
  self->cur_window = NULL;
  self->window_size = 0;

  /*
   * The Map virtual function will NOT pad space beyond the end of the
   * memory mapping object with zero-filled pages.  This is done for
   * user code in nacl_syscall_common.c(NaClCommonSysMmap), and the
   * Map virtual function exposes the behavioral inconsistencies wrt
   * allowing but ignoring mapping an offset beyond the end of file
   * (linux) versus disallowing the mapping (MapViewOfFileEx).
   *
   * Here, we know the actual size of the shm object, and can deal
   * with it.
   */
  actual_len = GIO_SHM_WINDOWSIZE;
  if (actual_len > self->shm_sz - new_win_offset) {
    actual_len = self->shm_sz - new_win_offset;
  }
  map_result =
      (*((struct NaClDescVtbl const *) self->shmp->base.vtbl)->
       Map)(self->shmp,
            NaClDescEffectorTrustedMem(),
            (void *) NULL,
            actual_len,
            NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE,
            NACL_ABI_MAP_SHARED,
            (nacl_off64_t) new_win_offset);
  NaClLog(4,
          "NaClGioShmSetWindow: Map returned 0x%"NACL_PRIxPTR"\n",
          map_result);
  if (NaClPtrIsNegErrno(&map_result)) {
    return 0;
  }

  self->cur_window = (char *) map_result;
  self->window_size = actual_len;
  self->window_offset = new_win_offset;

  return 1;
}
示例#3
0
/*
 * Attempt to map into the NaClApp object nap from the NaCl descriptor
 * ndp an ELF segment of type p_flags that start at file_offset for
 * segment_size bytes, to memory starting at paddr (system address).
 * If it is a code segment, make a scratch mapping and check
 * validation in readonly_text mode -- if it succeeds, we map into the
 * target address; if it fails, we return failure so that pread-based
 * loading can proceed.  For rodata and data segments, less checking
 * is needed.  In the text and data case, the end of the segment may
 * not land on a NACL_MAP_PAGESIZE boundary; when this occurs, we will
 * map in all whole NACL_MAP_PAGESIZE chunks, and pread in the tail
 * partial chunk.
 *
 * Returns: LOAD_OK, LOAD_STATUS_UNKNOWN, other error codes.
 *
 * LOAD_OK             -- if the segment has been fully handled
 * LOAD_STATUS_UNKNOWN -- if pread-based fallback is required
 * other error codes   -- if a fatal error occurs, and the caller
 *                        should propagate up
 *
 * See NaClSysMmapIntern in nacl_syscall_common.c for corresponding
 * mmap syscall where PROT_EXEC allows shared libraries to be mapped
 * into dynamic code space.
 */
static NaClErrorCode NaClElfFileMapSegment(struct NaClApp *nap,
                                           struct NaClDesc *ndp,
                                           Elf_Word p_flags,
                                           Elf_Off file_offset,
                                           Elf_Off segment_size,
                                           uintptr_t vaddr,
                                           uintptr_t paddr) {
  size_t rounded_filesz;       /* 64k rounded */
  int mmap_prot = 0;
  uintptr_t image_sys_addr;
  NaClValidationStatus validator_status = NaClValidationFailed;
  struct NaClValidationMetadata metadata;
  int read_last_page_if_partial_allocation_page = 1;
  ssize_t read_ret;
  struct NaClPerfCounter time_mmap_segment;
  NaClPerfCounterCtor(&time_mmap_segment, "NaClElfFileMapSegment");

  rounded_filesz = NaClRoundAllocPage(segment_size);

  NaClLog(4,
          "NaClElfFileMapSegment: checking segment flags 0x%x"
          " to determine map checks\n",
          p_flags);
  /*
   * Is this the text segment?  If so, map into scratch memory and
   * run validation (possibly cached result) with !stubout_mode,
   * readonly_text.  If validator says it's okay, map directly into
   * target location with NACL_ABI_PROT_READ|_EXEC.  If anything
   * failed, fall back to PRead.  NB: the assumption is that there
   * is only one PT_LOAD with PF_R|PF_X segment; this assumption is
   * enforced by phdr seen_seg checks above in
   * NaClElfImageValidateProgramHeaders.
   *
   * After this function returns, we will be setting memory protection
   * in NaClMemoryProtection, so the actual memory protection used is
   * immaterial.
   *
   * For rodata and data/bss, we mmap with NACL_ABI_PROT_READ or
   * NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE as appropriate,
   * without doing validation.  There is no fallback to PRead, since
   * we don't validate the contents.
   */
  switch (p_flags) {
    case PF_R | PF_X:
      NaClLog(4,
              "NaClElfFileMapSegment: text segment and"
              " file is safe for mmap\n");
      if (NACL_VTBL(NaClDesc, ndp)->typeTag != NACL_DESC_HOST_IO) {
        NaClLog(4, "NaClElfFileMapSegment: not supported type, got %d\n",
                NACL_VTBL(NaClDesc, ndp)->typeTag);
        return LOAD_STATUS_UNKNOWN;
      }
      /*
       * Unlike the mmap case, we do not re-run validation to
       * allow patching here; instead, we handle validation
       * failure by going to the pread_fallback case.  In the
       * future, we should consider doing an in-place mapping and
       * allowing HLT patch validation, which should be cheaper
       * since those pages that do not require patching (hopefully
       * majority) will remain file-backed and not require swap
       * space, even if we had to fault in every page.
       */
      NaClLog(1, "NaClElfFileMapSegment: mapping for validation\n");
      NaClPerfCounterMark(&time_mmap_segment, "PreMap");
      NaClPerfCounterIntervalLast(&time_mmap_segment);
      image_sys_addr = (*NACL_VTBL(NaClDesc, ndp)->
                        Map)(ndp,
                             NaClDescEffectorTrustedMem(),
                             (void *) NULL,
                             rounded_filesz,
                             NACL_ABI_PROT_READ,
                             NACL_ABI_MAP_PRIVATE,
                             file_offset);
      NaClPerfCounterMark(&time_mmap_segment, "MapForValidate");
      NaClPerfCounterIntervalLast(&time_mmap_segment);
      if (NaClPtrIsNegErrno(&image_sys_addr)) {
        NaClLog(LOG_INFO,
                "NaClElfFileMapSegment: Could not make scratch mapping,"
                " falling back to reading\n");
        return LOAD_STATUS_UNKNOWN;
      }
      /* ask validator / validation cache */
      NaClMetadataFromNaClDescCtor(&metadata, ndp);
      CHECK(segment_size == nap->static_text_end - NACL_TRAMPOLINE_END);
      validator_status = NACL_FI_VAL(
          "ELF_LOAD_FORCE_VALIDATION_STATUS",
          enum NaClValidationStatus,
          (*nap->validator->
           Validate)(vaddr,
                     (uint8_t *) image_sys_addr,
                     segment_size,  /* actual size */
                     0,  /* stubout_mode: no */
                     1,  /* readonly_text: yes */
                     nap->cpu_features,
                     &metadata,
                     nap->validation_cache));
      NaClPerfCounterMark(&time_mmap_segment, "ValidateMapped");
      NaClPerfCounterIntervalLast(&time_mmap_segment);
      NaClLog(3, "NaClElfFileMapSegment: validator_status %d\n",
              validator_status);
      NaClMetadataDtor(&metadata);
      /*
       * Remove scratch mapping, then map directly into untrusted
       * address space or pread.
       */
      NaClDescUnmapUnsafe(ndp, (void *) image_sys_addr,
                          rounded_filesz);
      NACL_MAKE_MEM_UNDEFINED((void *) paddr, rounded_filesz);

      if (NaClValidationSucceeded != validator_status) {
        NaClLog(3,
                ("NaClElfFileMapSegment: readonly_text validation for mmap"
                 " failed.  Will retry validation allowing HALT stubbing out"
                 " of unsupported instruction extensions.\n"));
        return LOAD_STATUS_UNKNOWN;
      }

      NaClLog(1, "NaClElfFileMapSegment: mapping into code space\n");
      /*
       * Windows appears to not allow RWX mappings.  This interferes
       * with HALT_SLED and having to HALT pad the last page.  We
       * allow partial code pages, so
       * read_last_page_if_partial_allocation_page will ensure that
       * the last page is writable, so we will be able to write HALT
       * instructions as needed.
       */
      mmap_prot = NACL_ABI_PROT_READ | NACL_ABI_PROT_EXEC;
      /*
       * NB: the log string is used by tests/mmap_main_nexe/nacl.scons
       * and must be logged at a level that is less than or equal to
       * the requested verbosity level there.
       */
      NaClLog(1, "NaClElfFileMapSegment: EXERCISING MMAP LOAD PATH\n");
      nap->main_exe_prevalidated = 1;
      break;

    case PF_R | PF_W:
      /* read-write (initialized data) */
      mmap_prot = NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE;
      /*
       * NB: the partial page processing will result in zeros
       * following the initialized data, so that the BSS will be zero.
       * On a typical system, this page is mapped in and the BSS
       * region is memset to zero, which means that this partial page
       * is faulted in.  Rather than saving a syscall (pread) and
       * faulting it in, we just use the same code path as for code,
       * which is (slightly) simpler.
       */
      break;

    case PF_R:
      /* read-only */
      mmap_prot = NACL_ABI_PROT_READ;
      /*
       * For rodata, we allow mapping in "garbage" past a partial
       * page; this potentially eliminates a disk I/O operation
       * (if data section has no partial page), possibly delaying
       * disk spin-up if the code was in the validation cache.
       * And it saves another 64kB of swap.
       */
      read_last_page_if_partial_allocation_page = 0;
      break;

    default:
      NaClLog(LOG_FATAL, "NaClElfFileMapSegment: unexpected p_flags %d\n",
              p_flags);
  }
  if (rounded_filesz != segment_size &&
      read_last_page_if_partial_allocation_page) {
    uintptr_t tail_offset = rounded_filesz - NACL_MAP_PAGESIZE;
    size_t tail_size = segment_size - tail_offset;
    NaClLog(4, "NaClElfFileMapSegment: pread tail\n");
    read_ret = (*NACL_VTBL(NaClDesc, ndp)->
                PRead)(ndp,
                       (void *) (paddr + tail_offset),
                       tail_size,
                       (nacl_off64_t) (file_offset + tail_offset));
    NaClPerfCounterMark(&time_mmap_segment, "PRead tail");
    NaClPerfCounterIntervalLast(&time_mmap_segment);
    if (NaClSSizeIsNegErrno(&read_ret) || (size_t) read_ret != tail_size) {
      NaClLog(LOG_ERROR,
              "NaClElfFileMapSegment: pread load of page tail failed\n");
      return LOAD_SEGMENT_BAD_PARAM;
    }
    rounded_filesz -= NACL_MAP_PAGESIZE;
  }
  /* mmap in */
  if (rounded_filesz == 0) {
    NaClLog(4,
            "NaClElfFileMapSegment: no pages to map, probably because"
            " the segment was a partial page, so it was processed by"
            " reading.\n");
  } else {
    NaClLog(4,
            "NaClElfFileMapSegment: mapping %"NACL_PRIuS" (0x%"
            NACL_PRIxS") bytes to"
            " address 0x%"NACL_PRIxPTR", position %"
            NACL_PRIdElf_Off" (0x%"NACL_PRIxElf_Off")\n",
            rounded_filesz, rounded_filesz,
            paddr,
            file_offset, file_offset);
    image_sys_addr = (*NACL_VTBL(NaClDesc, ndp)->
                      Map)(ndp,
                           nap->effp,
                           (void *) paddr,
                           rounded_filesz,
                           mmap_prot,
                           NACL_ABI_MAP_PRIVATE | NACL_ABI_MAP_FIXED,
                           file_offset);
    NaClPerfCounterMark(&time_mmap_segment, "MapFinal");
    NaClPerfCounterIntervalLast(&time_mmap_segment);
    if (image_sys_addr != paddr) {
      NaClLog(LOG_FATAL,
              ("NaClElfFileMapSegment: map to 0x%"NACL_PRIxPTR" (prot %x) "
               "failed: got 0x%"NACL_PRIxPTR"\n"),
              paddr, mmap_prot, image_sys_addr);
    }
    /* Tell Valgrind that we've mapped a segment of nacl_file. */
    NaClFileMappingForValgrind(paddr, rounded_filesz, file_offset);
  }
  return LOAD_OK;
}
示例#4
0
/*
 * Maps a writable version of the code at [offset, offset+size) and returns a
 * pointer to the new mapping. Internally caches the last mapping between
 * calls. Pass offset=0,size=0 to clear cache.
 * Caller must hold nap->dynamic_load_mutex.
 */
static uintptr_t CachedMapWritableText(struct NaClApp *nap,
                                       uint32_t offset,
                                       uint32_t size) {
  /*
   * The nap->* variables used in this function can be in two states:
   *
   * 1)
   * nap->dynamic_mapcache_size == 0
   * nap->dynamic_mapcache_ret == 0
   *
   * Initial state, nothing is cached.
   *
   * 2)
   * nap->dynamic_mapcache_size != 0
   * nap->dynamic_mapcache_ret != 0
   *
   * We have a cached mmap result stored, that must be unmapped.
   */
  struct NaClDesc            *shm = nap->text_shm;

  if (offset != nap->dynamic_mapcache_offset
          || size != nap->dynamic_mapcache_size) {
    /*
     * cache miss, first clear the old cache if needed
     */
    if (nap->dynamic_mapcache_size > 0) {
      NaClDescUnmapUnsafe(shm, (void *) nap->dynamic_mapcache_ret,
                          nap->dynamic_mapcache_size);
      nap->dynamic_mapcache_offset = 0;
      nap->dynamic_mapcache_size = 0;
      nap->dynamic_mapcache_ret = 0;
    }

    /*
     * update that cached version
     */
    if (size > 0) {
      uint32_t current_page_index;
      uint32_t end_page_index;

      uintptr_t mapping = (*((struct NaClDescVtbl const *)
            shm->base.vtbl)->
              Map)(shm,
                   NaClDescEffectorTrustedMem(),
                   NULL,
                   size,
                   NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE,
                   NACL_ABI_MAP_SHARED,
                   offset);
      if (NaClPtrIsNegErrno(&mapping)) {
        return 0;
      }

      /*
       * To reduce the number of mprotect() system calls, we coalesce
       * MakeDynamicCodePagesVisible() calls for adjacent pages that
       * have yet not been allocated.
       */
      current_page_index = offset / NACL_MAP_PAGESIZE;
      end_page_index = (offset + size) / NACL_MAP_PAGESIZE;
      while (current_page_index < end_page_index) {
        uint32_t start_page_index = current_page_index;
        /* Find the end of this block of unallocated pages. */
        while (current_page_index < end_page_index &&
               !BitmapIsBitSet(nap->dynamic_page_bitmap, current_page_index)) {
          current_page_index++;
        }
        if (current_page_index > start_page_index) {
          uintptr_t writable_addr =
              mapping + (start_page_index * NACL_MAP_PAGESIZE - offset);
          MakeDynamicCodePagesVisible(nap, start_page_index, current_page_index,
                                      (uint8_t *) writable_addr);
        }
        current_page_index++;
      }

      nap->dynamic_mapcache_offset = offset;
      nap->dynamic_mapcache_size = size;
      nap->dynamic_mapcache_ret = mapping;
    }
  }
  return nap->dynamic_mapcache_ret;
}