示例#1
0
static Bool SerializeAllProcessors(void) {
    /*
     * We rely on the OS mprotect() call to issue interprocessor interrupts,
     * which will cause other processors to execute an IRET, which is
     * serializing.
     *
     * This code is based on two main considerations:
     * 1. Only switching the page from exec to non-exec state is guaranteed
     * to invalidate processors' instruction caches.
     * 2. It's bad to have a page that is both writeable and executable,
     * even if that happens not simultaneously.
     */

    int size = NACL_MAP_PAGESIZE;
    if (NULL == g_squashybuffer) {
        if ((0 != NaClPageAlloc(&g_squashybuffer, size)) ||
                (0 != NaClMprotect(g_squashybuffer, size, PROT_READ|PROT_WRITE))) {
            NaClLog(0,
                    ("SerializeAllProcessors: initial squashybuffer allocation"
                     " failed\n"));
            return FALSE;
        }

        NaClFillMemoryRegionWithHalt(g_squashybuffer, size);
        g_firstbyte = *(char *) g_squashybuffer;
        NaClLog(0, "SerializeAllProcessors: g_firstbyte is %d\n", g_firstbyte);
    }

    if ((0 != NaClMprotect(g_squashybuffer, size, PROT_READ|PROT_EXEC))) {
        NaClLog(0,
                ("SerializeAllProcessors: interprocessor interrupt"
                 " generation failed: could not reverse shield polarity (1)\n"));
        return FALSE;
    }
    /*
     * Make a read to ensure that the potential kernel laziness
     * would not affect this hack.
     */
    if (*(char *) g_squashybuffer != g_firstbyte) {
        NaClLog(0,
                ("SerializeAllProcessors: interprocessor interrupt"
                 " generation failed: could not reverse shield polarity (2)\n"));
        NaClLog(0, "SerializeAllProcessors: g_firstbyte is %d\n", g_firstbyte);
        NaClLog(0, "SerializeAllProcessors: *g_squashybuffer is %d\n",
                *(char *) g_squashybuffer);
        return FALSE;
    }
    /*
     * We would like to set the protection to PROT_NONE, but on Windows
     * there's an ugly hack in NaClMprotect where PROT_NONE can result
     * in MEM_DECOMMIT, causing the contents of the page(s) to be lost!
     */
    if (0 != NaClMprotect(g_squashybuffer, size, PROT_READ)) {
        NaClLog(0,
                ("SerializeAllProcessors: interprocessor interrupt"
                 " generation failed: could not reverse shield polarity (3)\n"));
        return FALSE;
    }
    return TRUE;
}
void TestDEPCheckFailurePath(void) {
  size_t size = NACL_PAGESIZE;
  void *page;
  CHECK(NaClPageAlloc(&page, size) == 0);

  CHECK(NaClMprotect(page, size, PROT_READ | PROT_WRITE | PROT_EXEC) == 0);
  CHECK(!NaClAttemptToExecuteDataAtAddr(page, size));

  /* DEP is not guaranteed to work on x86-32. */
  if (!(NACL_ARCH(NACL_BUILD_ARCH) == NACL_x86 && NACL_BUILD_SUBARCH == 32)) {
    CHECK(NaClMprotect(page, size, PROT_READ | PROT_WRITE) == 0);
    CHECK(NaClAttemptToExecuteDataAtAddr(page, size));
  }

  NaClPageFree(page, size);
}
示例#3
0
static void MakeDynamicCodePagesVisible(struct NaClApp *nap,
                                        uint32_t page_index_min,
                                        uint32_t page_index_max,
                                        uint8_t *writable_addr) {
  void *user_addr;
  uint32_t index;
  size_t size = (page_index_max - page_index_min) * NACL_MAP_PAGESIZE;

  for (index = page_index_min; index < page_index_max; index++) {
    CHECK(!BitmapIsBitSet(nap->dynamic_page_bitmap, index));
    BitmapSetBit(nap->dynamic_page_bitmap, index);
  }
  user_addr = (void *) NaClUserToSys(nap, nap->dynamic_text_start
                                     + page_index_min * NACL_MAP_PAGESIZE);

#if NACL_WINDOWS
  NaClUntrustedThreadsSuspendAll(nap, /* save_registers= */ 0);

  /*
   * The VirtualAlloc() call here has two effects:
   *
   *  1) It commits the page in the shared memory (SHM) object,
   *     allocating swap space and making the page accessible.  This
   *     affects our writable mapping of the shared memory object too.
   *     Before the VirtualAlloc() call, dereferencing writable_addr
   *     would fault.
   *  2) It changes the page permissions of the mapping to
   *     read+execute.  Since this exposes the page in its unsafe,
   *     non-HLT-filled state, this must be done with untrusted
   *     threads suspended.
   */
  {
    uintptr_t offset;
    for (offset = 0; offset < size; offset += NACL_MAP_PAGESIZE) {
      void *user_page_addr = (char *) user_addr + offset;
      if (VirtualAlloc(user_page_addr, NACL_MAP_PAGESIZE,
                       MEM_COMMIT, PAGE_EXECUTE_READ) != user_page_addr) {
        NaClLog(LOG_FATAL, "MakeDynamicCodePagesVisible: "
                "VirtualAlloc() failed -- probably out of swap space\n");
      }
    }
  }
#endif

  /* Sanity check:  Ensure the page is not already in use. */
  CHECK(*writable_addr == 0);

  NaClFillMemoryRegionWithHalt(writable_addr, size);

#if NACL_WINDOWS
  NaClUntrustedThreadsResumeAll(nap);
#else
  if (NaClMprotect(user_addr, size, PROT_READ | PROT_EXEC) != 0) {
    NaClLog(LOG_FATAL, "MakeDynamicCodePageVisible: NaClMprotect() failed\n");
  }
#endif
}
示例#4
0
NaClErrorCode NaClElfImageLoadDynamically(
    struct NaClElfImage *image,
    struct NaClApp *nap,
    struct NaClDesc *ndp,
    struct NaClValidationMetadata *metadata) {
  ssize_t read_ret;
  int segnum;
  for (segnum = 0; segnum < image->ehdr.e_phnum; ++segnum) {
    const Elf_Phdr *php = &image->phdrs[segnum];
    Elf_Addr vaddr = php->p_vaddr & ~(NACL_MAP_PAGESIZE - 1);
    Elf_Off offset = php->p_offset & ~(NACL_MAP_PAGESIZE - 1);
    Elf_Off filesz = php->p_offset + php->p_filesz - offset;
    Elf_Off memsz = php->p_offset + php->p_memsz - offset;
    int32_t result;

    /*
     * By checking if filesz is larger than memsz, we no longer run the risk of
     * a malicious ELF object overrunning into the trusted address space when
     * reading data of size "filez" into a buffer of size "memsz".
     */
    if (filesz > memsz) {
      return LOAD_UNLOADABLE;
    }

    /*
     * We check for PT_LOAD directly rather than using the "loadable"
     * array because we are not using NaClElfImageValidateProgramHeaders()
     * to fill out the "loadable" array for this ELF object.  This ELF
     * object does not have to fit such strict constraints (such as
     * having code at 0x20000), and safety checks are applied by
     * NaClTextDyncodeCreate() and NaClSysMmapIntern().
     */
    if (PT_LOAD != php->p_type) {
      continue;
    }

    if (0 != (php->p_flags & PF_X)) {
      /* Load code segment. */
      /*
       * We make a copy of the code.  This is not ideal given that this
       * code path is used only for loading the IRT, and we could assume
       * that the contents of the irt.nexe file will not change underneath
       * us.  We should be able to mmap() the IRT's code segment instead of
       * copying it.
       * TODO(mseaborn): Reduce the amount of copying here.
       */
      char *code_copy = malloc(filesz);
      if (NULL == code_copy) {
        NaClLog(LOG_ERROR, "NaClElfImageLoadDynamically: malloc failed\n");
        return LOAD_NO_MEMORY;
      }
      read_ret = (*NACL_VTBL(NaClDesc, ndp)->
                  PRead)(ndp, code_copy, filesz, (nacl_off64_t) offset);
      if (NaClSSizeIsNegErrno(&read_ret) ||
          (size_t) read_ret != filesz) {
        free(code_copy);
        NaClLog(LOG_ERROR, "NaClElfImageLoadDynamically: "
                "failed to read code segment\n");
        return LOAD_READ_ERROR;
      }
      if (NULL != metadata) {
        metadata->code_offset = offset;
      }
      result = NaClTextDyncodeCreate(nap, (uint32_t) vaddr,
                                     code_copy, (uint32_t) filesz, metadata);
      free(code_copy);
      if (0 != result) {
        NaClLog(LOG_ERROR, "NaClElfImageLoadDynamically: "
                "failed to load code segment\n");
        return LOAD_UNLOADABLE;
      }
    } else {
      /* Load data segment. */
      void *paddr = (void *) NaClUserToSys(nap, vaddr);
      size_t mapping_size = NaClRoundAllocPage(memsz);
      /*
       * Note that we do not used NACL_ABI_MAP_FIXED because we do not
       * want to silently overwrite any existing mappings, such as the
       * user app's data segment or the stack.  We detect overmapping
       * when mmap chooses not to use the preferred address we supply.
       * (Ideally mmap would provide a MAP_EXCL option for this
       * instead.)
       */
      result = NaClSysMmapIntern(
          nap, (void *) (uintptr_t) vaddr, mapping_size,
          NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE,
          NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE,
          -1, 0);
      if ((int32_t) vaddr != result) {
        NaClLog(LOG_ERROR, "NaClElfImageLoadDynamically: "
                "failed to map data segment\n");
        return LOAD_UNLOADABLE;
      }
      read_ret = (*NACL_VTBL(NaClDesc, ndp)->
                  PRead)(ndp, paddr, filesz, (nacl_off64_t) offset);
      if (NaClSSizeIsNegErrno(&read_ret) ||
          (size_t) read_ret != filesz) {
        NaClLog(LOG_ERROR, "NaClElfImageLoadDynamically: "
                "failed to read data segment\n");
        return LOAD_READ_ERROR;
      }
      /*
       * Note that we do not need to zero the BSS (the region from
       * p_filesz to p_memsz) because it should already be zero
       * filled.  This would not be the case if we were mapping the
       * data segment from the file.
       */

      if (0 == (php->p_flags & PF_W)) {
        /* Handle read-only data segment. */
        int rc = NaClMprotect(paddr, mapping_size, NACL_ABI_PROT_READ);
        if (0 != rc) {
          NaClLog(LOG_ERROR, "NaClElfImageLoadDynamically: "
                  "failed to mprotect read-only data segment\n");
          return LOAD_MPROTECT_FAIL;
        }

        NaClVmmapAddWithOverwrite(&nap->mem_map,
                                  vaddr >> NACL_PAGESHIFT,
                                  mapping_size >> NACL_PAGESHIFT,
                                  NACL_ABI_PROT_READ,
                                  NACL_ABI_MAP_PRIVATE,
                                  NULL,
                                  0,
                                  0);
      }
    }
  }
NaClErrorCode NaClAppLoadFileAslr(struct NaClDesc *ndp,
                                  struct NaClApp *nap,
                                  enum NaClAslrMode aslr_mode) {
  NaClErrorCode       ret = LOAD_INTERNAL;
  NaClErrorCode       subret = LOAD_INTERNAL;
  uintptr_t           rodata_end;
  uintptr_t           data_end;
  uintptr_t           max_vaddr;
  struct NaClElfImage *image = NULL;
  struct NaClPerfCounter  time_load_file;
  struct NaClElfImageInfo info;

  NaClPerfCounterCtor(&time_load_file, "NaClAppLoadFile");

  /* NACL_MAX_ADDR_BITS < 32 */
  if (nap->addr_bits > NACL_MAX_ADDR_BITS) {
    ret = LOAD_ADDR_SPACE_TOO_BIG;
    goto done;
  }

  nap->stack_size = NaClRoundAllocPage(nap->stack_size);

  /* temporay object will be deleted at end of function */
  image = NaClElfImageNew(ndp, &subret);
  if (NULL == image || LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  subret = NaClElfImageValidateProgramHeaders(image,
                                              nap->addr_bits,
                                              &info);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  if (nap->initial_nexe_max_code_bytes != 0) {
    size_t code_segment_size = info.static_text_end - NACL_TRAMPOLINE_END;
    if (code_segment_size > nap->initial_nexe_max_code_bytes) {
      NaClLog(LOG_ERROR, "NaClAppLoadFileAslr: "
              "Code segment size (%"NACL_PRIuS" bytes) exceeds limit (%"
              NACL_PRId32" bytes)\n",
              code_segment_size, nap->initial_nexe_max_code_bytes);
      ret = LOAD_CODE_SEGMENT_TOO_LARGE;
      goto done;
    }
  }

  nap->static_text_end = info.static_text_end;
  nap->rodata_start = info.rodata_start;
  rodata_end = info.rodata_end;
  nap->data_start = info.data_start;
  data_end = info.data_end;
  max_vaddr = info.max_vaddr;

  if (0 == nap->data_start) {
    if (0 == nap->rodata_start) {
      if (NaClRoundAllocPage(max_vaddr) - max_vaddr < NACL_HALT_SLED_SIZE) {
        /*
         * if no rodata and no data, we make sure that there is space for
         * the halt sled.
         */
        max_vaddr += NACL_MAP_PAGESIZE;
      }
    } else {
      /*
       * no data, but there is rodata.  this means max_vaddr is just
       * where rodata ends.  this might not be at an allocation
       * boundary, and in this the page would not be writable.  round
       * max_vaddr up to the next allocation boundary so that bss will
       * be at the next writable region.
       */
      ;
    }
    max_vaddr = NaClRoundAllocPage(max_vaddr);
  }
  /*
   * max_vaddr -- the break or the boundary between data (initialized
   * and bss) and the address space hole -- does not have to be at a
   * page boundary.
   *
   * Memory allocation will use NaClRoundPage(nap->break_addr), but
   * the system notion of break is always an exact address.  Even
   * though we must allocate and make accessible multiples of pages,
   * the linux-style brk system call (which returns current break on
   * failure) permits a non-aligned address as argument.
   */
  nap->break_addr = max_vaddr;
  nap->data_end = max_vaddr;

  NaClLog(4, "Values from NaClElfImageValidateProgramHeaders:\n");
  NaClLog(4, "rodata_start = 0x%08"NACL_PRIxPTR"\n", nap->rodata_start);
  NaClLog(4, "rodata_end   = 0x%08"NACL_PRIxPTR"\n", rodata_end);
  NaClLog(4, "data_start   = 0x%08"NACL_PRIxPTR"\n", nap->data_start);
  NaClLog(4, "data_end     = 0x%08"NACL_PRIxPTR"\n", data_end);
  NaClLog(4, "max_vaddr    = 0x%08"NACL_PRIxPTR"\n", max_vaddr);

  /* We now support only one bundle size.  */
  nap->bundle_size = NACL_INSTR_BLOCK_SIZE;

  nap->initial_entry_pt = NaClElfImageGetEntryPoint(image);
  NaClLogAddressSpaceLayout(nap);

  if (!NaClAddrIsValidEntryPt(nap, nap->initial_entry_pt)) {
    ret = LOAD_BAD_ENTRY;
    goto done;
  }

  subret = NaClCheckAddressSpaceLayoutSanity(nap, rodata_end, data_end,
                                             max_vaddr);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  NaClLog(2, "Allocating address space\n");
  NaClPerfCounterMark(&time_load_file, "PreAllocAddrSpace");
  NaClPerfCounterIntervalLast(&time_load_file);
  subret = NaClAllocAddrSpaceAslr(nap, aslr_mode);
  NaClPerfCounterMark(&time_load_file,
                      NACL_PERF_IMPORTANT_PREFIX "AllocAddrSpace");
  NaClPerfCounterIntervalLast(&time_load_file);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  /*
   * Make sure the static image pages are marked writable before we try
   * to write them.
   */
  NaClLog(2, "Loading into memory\n");
  ret = NaClMprotect((void *) (nap->mem_start + NACL_TRAMPOLINE_START),
                     NaClRoundAllocPage(nap->data_end) - NACL_TRAMPOLINE_START,
                     PROT_READ | PROT_WRITE);
  if (0 != ret) {
    NaClLog(LOG_FATAL,
            "NaClAppLoadFile: Failed to make image pages writable. "
            "Error code 0x%x\n",
            ret);
  }
  subret = NaClElfImageLoad(image, ndp, nap);
  NaClPerfCounterMark(&time_load_file,
                      NACL_PERF_IMPORTANT_PREFIX "NaClElfImageLoad");
  NaClPerfCounterIntervalLast(&time_load_file);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  /*
   * NB: mem_map object has been initialized, but is empty.
   * NaClMakeDynamicTextShared does not touch it.
   *
   * NaClMakeDynamicTextShared also fills the dynamic memory region
   * with the architecture-specific halt instruction.  If/when we use
   * memory mapping to save paging space for the dynamic region and
   * lazily halt fill the memory as the pages become
   * readable/executable, we must make sure that the *last*
   * NACL_MAP_PAGESIZE chunk is nonetheless mapped and written with
   * halts.
   */
  NaClLog(2,
          ("Replacing gap between static text and"
           " (ro)data with shareable memory\n"));
  subret = NaClMakeDynamicTextShared(nap);
  NaClPerfCounterMark(&time_load_file,
                      NACL_PERF_IMPORTANT_PREFIX "MakeDynText");
  NaClPerfCounterIntervalLast(&time_load_file);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  /*
   * NaClFillEndOfTextRegion will fill with halt instructions the
   * padding space after the static text region.
   *
   * Shm-backed dynamic text space was filled with halt instructions
   * in NaClMakeDynamicTextShared.  This extends to the rodata.  For
   * non-shm-backed text space, this extend to the next page (and not
   * allocation page).  static_text_end is updated to include the
   * padding.
   */
  NaClFillEndOfTextRegion(nap);

  if (nap->main_exe_prevalidated) {
    NaClLog(2, "Main executable segment hit validation cache and mapped in,"
            " skipping validation.\n");
    subret = LOAD_OK;
  } else {
    NaClLog(2, "Validating image\n");
    subret = NaClValidateImage(nap);
  }
  NaClPerfCounterMark(&time_load_file,
                      NACL_PERF_IMPORTANT_PREFIX "ValidateImg");
  NaClPerfCounterIntervalLast(&time_load_file);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  NaClLog(2, "Initializing arch switcher\n");
  NaClInitSwitchToApp(nap);

  NaClLog(2, "Installing trampoline\n");
  NaClLoadTrampoline(nap, aslr_mode);

  NaClLog(2, "Installing springboard\n");
  NaClLoadSpringboard(nap);

  /*
   * NaClMemoryProtection also initializes the mem_map w/ information
   * about the memory pages and their current protection value.
   *
   * The contents of the dynamic text region will get remapped as
   * non-writable.
   */
  NaClLog(2, "Applying memory protection\n");
  subret = NaClMemoryProtection(nap);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  NaClLog(2, "NaClAppLoadFile done; ");
  NaClLogAddressSpaceLayout(nap);
  ret = LOAD_OK;
done:
  NaClElfImageDelete(image);

  NaClPerfCounterMark(&time_load_file, "EndLoadFile");
  NaClPerfCounterIntervalTotal(&time_load_file);
  return ret;
}
int NaClMakeDispatchAddrs(struct NaClApp *nap) {
  int                   retval = 0;  /* fail */
  int                   error;
  void                  *page_addr = NULL;
  uintptr_t             next_addr;
  uintptr_t             nacl_syscall_addr = 0;
  uintptr_t             get_tls_fast_path1_addr = 0;
  uintptr_t             get_tls_fast_path2_addr = 0;

  NaClLog(2, "Entered NaClMakeDispatchAddrs\n");
  if (0 != nap->nacl_syscall_addr) {
    NaClLog(LOG_ERROR, " dispatch addrs already initialized!\n");
    return 1;
  }

  if (0 != (error = NaClPageAllocRandomized(&page_addr,
                                            NACL_MAP_PAGESIZE))) {
    NaClLog(LOG_INFO,
            "NaClMakeDispatchAddrs::NaClPageAlloc failed, errno %d\n",
            -error);
    retval = 0;
    goto cleanup;
  }
  NaClLog(2, "NaClMakeDispatchAddrs: got addr 0x%"NACL_PRIxPTR"\n",
          (uintptr_t) page_addr);

  if (0 != (error = NaClMprotect(page_addr,
                                 NACL_MAP_PAGESIZE,
                                 PROT_READ | PROT_WRITE))) {
    NaClLog(LOG_INFO,
            "NaClMakeDispatchAddrs::NaClMprotect r/w failed, errno %d\n",
            -error);
    retval = 0;
    goto cleanup;
  }

  next_addr = (uintptr_t) page_addr;
  nacl_syscall_addr =
      AddDispatchAddr(&next_addr, (uintptr_t) &NaClSyscallSeg);
  get_tls_fast_path1_addr =
      AddDispatchAddr(&next_addr, (uintptr_t) &NaClGetTlsFastPath1);
  get_tls_fast_path2_addr =
      AddDispatchAddr(&next_addr, (uintptr_t) &NaClGetTlsFastPath2);

  if (0 != (error = NaClMprotect(page_addr, NACL_MAP_PAGESIZE, PROT_READ))) {
    NaClLog(LOG_INFO,
            "NaClMakeDispatchAddrs::NaClMprotect read-only failed, errno %d\n",
            -error);
    retval = 0;
    goto cleanup;
  }
  retval = 1;
 cleanup:
  if (0 == retval) {
    if (NULL != page_addr) {
      NaClPageFree(page_addr, NACL_MAP_PAGESIZE);
      page_addr = NULL;
    }
  } else {
    nap->nacl_syscall_addr = nacl_syscall_addr;
    nap->get_tls_fast_path1_addr = get_tls_fast_path1_addr;
    nap->get_tls_fast_path2_addr = get_tls_fast_path2_addr;
  }
  return retval;
}
示例#7
0
int32_t NaClSysBrk(struct NaClAppThread *natp,
                   uintptr_t            new_break) {
  struct NaClApp        *nap = natp->nap;
  uintptr_t             break_addr;
  int32_t               rv = -NACL_ABI_EINVAL;
  struct NaClVmmapIter  iter;
  struct NaClVmmapEntry *ent;
  struct NaClVmmapEntry *next_ent;
  uintptr_t             sys_break;
  uintptr_t             sys_new_break;
  uintptr_t             usr_last_data_page;
  uintptr_t             usr_new_last_data_page;
  uintptr_t             last_internal_data_addr;
  uintptr_t             last_internal_page;
  uintptr_t             start_new_region;
  uintptr_t             region_size;

  /*
   * The sysbrk() IRT interface is deprecated and is not enabled for
   * ABI-stable PNaCl pexes, so for security hardening, disable the
   * syscall under PNaCl too.
   */
  if (nap->pnacl_mode)
    return -NACL_ABI_ENOSYS;

  break_addr = nap->break_addr;

  NaClLog(3, "Entered NaClSysBrk(new_break 0x%08"NACL_PRIxPTR")\n",
          new_break);

  sys_new_break = NaClUserToSysAddr(nap, new_break);
  NaClLog(3, "sys_new_break 0x%08"NACL_PRIxPTR"\n", sys_new_break);

  if (kNaClBadAddress == sys_new_break) {
    goto cleanup_no_lock;
  }
  if (NACL_SYNC_OK != NaClMutexLock(&nap->mu)) {
    NaClLog(LOG_ERROR, "Could not get app lock for 0x%08"NACL_PRIxPTR"\n",
            (uintptr_t) nap);
    goto cleanup_no_lock;
  }
  if (new_break < nap->data_end) {
    NaClLog(4, "new_break before data_end (0x%"NACL_PRIxPTR")\n",
            nap->data_end);
    goto cleanup;
  }
  if (new_break <= nap->break_addr) {
    /* freeing memory */
    NaClLog(4, "new_break before break (0x%"NACL_PRIxPTR"); freeing\n",
            nap->break_addr);
    nap->break_addr = new_break;
    break_addr = new_break;
  } else {
    /*
     * See if page containing new_break is in mem_map; if so, we are
     * essentially done -- just update break_addr.  Otherwise, we
     * extend the VM map entry from the page containing the current
     * break to the page containing new_break.
     */

    sys_break = NaClUserToSys(nap, nap->break_addr);

    usr_last_data_page = (nap->break_addr - 1) >> NACL_PAGESHIFT;

    usr_new_last_data_page = (new_break - 1) >> NACL_PAGESHIFT;

    last_internal_data_addr = NaClRoundAllocPage(new_break) - 1;
    last_internal_page = last_internal_data_addr >> NACL_PAGESHIFT;

    NaClLog(4, ("current break sys addr 0x%08"NACL_PRIxPTR", "
                "usr last data page 0x%"NACL_PRIxPTR"\n"),
            sys_break, usr_last_data_page);
    NaClLog(4, "new break usr last data page 0x%"NACL_PRIxPTR"\n",
            usr_new_last_data_page);
    NaClLog(4, "last internal data addr 0x%08"NACL_PRIxPTR"\n",
            last_internal_data_addr);

    if (NULL == NaClVmmapFindPageIter(&nap->mem_map,
                                      usr_last_data_page,
                                      &iter)
        || NaClVmmapIterAtEnd(&iter)) {
      NaClLog(LOG_FATAL, ("current break (0x%08"NACL_PRIxPTR", "
                          "sys 0x%08"NACL_PRIxPTR") "
                          "not in address map\n"),
              nap->break_addr, sys_break);
    }
    ent = NaClVmmapIterStar(&iter);
    NaClLog(4, ("segment containing current break"
                ": page_num 0x%08"NACL_PRIxPTR", npages 0x%"NACL_PRIxS"\n"),
            ent->page_num, ent->npages);
    if (usr_new_last_data_page < ent->page_num + ent->npages) {
      NaClLog(4, "new break within break segment, just bumping addr\n");
      nap->break_addr = new_break;
      break_addr = new_break;
    } else {
      NaClVmmapIterIncr(&iter);
      if (!NaClVmmapIterAtEnd(&iter)
          && ((next_ent = NaClVmmapIterStar(&iter))->page_num
              <= last_internal_page)) {
        /* ran into next segment! */
        NaClLog(4,
                ("new break request of usr address "
                 "0x%08"NACL_PRIxPTR" / usr page 0x%"NACL_PRIxPTR
                 " runs into next region, page_num 0x%"NACL_PRIxPTR", "
                 "npages 0x%"NACL_PRIxS"\n"),
                new_break, usr_new_last_data_page,
                next_ent->page_num, next_ent->npages);
        goto cleanup;
      }
      NaClLog(4,
              "extending segment: page_num 0x%08"NACL_PRIxPTR", "
              "npages 0x%"NACL_PRIxS"\n",
              ent->page_num, ent->npages);
      /* go ahead and extend ent to cover, and make pages accessible */
      start_new_region = (ent->page_num + ent->npages) << NACL_PAGESHIFT;
      ent->npages = (last_internal_page - ent->page_num + 1);
      region_size = (((last_internal_page + 1) << NACL_PAGESHIFT)
                     - start_new_region);
      if (0 != NaClMprotect((void *) NaClUserToSys(nap, start_new_region),
                            region_size,
                            PROT_READ | PROT_WRITE)) {
        NaClLog(LOG_FATAL,
                ("Could not mprotect(0x%08"NACL_PRIxPTR", "
                 "0x%08"NACL_PRIxPTR", "
                 "PROT_READ|PROT_WRITE)\n"),
                start_new_region,
                region_size);
      }
      NaClLog(4, "segment now: page_num 0x%08"NACL_PRIxPTR", "
              "npages 0x%"NACL_PRIxS"\n",
              ent->page_num, ent->npages);
      nap->break_addr = new_break;
      break_addr = new_break;
    }
    /*
     * Zero out memory between old break and new break.
     */
    ASSERT(sys_new_break > sys_break);
    memset((void *) sys_break, 0, sys_new_break - sys_break);
  }



cleanup:
  NaClXMutexUnlock(&nap->mu);
cleanup_no_lock:

  /*
   * This cast is safe because the incoming value (new_break) cannot
   * exceed the user address space--even though its type (uintptr_t)
   * theoretically allows larger values.
   */
  rv = (int32_t) break_addr;

  NaClLog(3, "NaClSysBrk: returning 0x%08"NACL_PRIx32"\n", rv);
  return rv;
}