// Map all loadable segments in process' address space.
// This assumes you already called phdr_table_reserve_memory to
// reserve the address space range for the library.
bool ElfLoader::LoadSegments(Error* error) {
  for (size_t i = 0; i < phdr_num_; ++i) {
    const ELF::Phdr* phdr = &phdr_table_[i];

    if (phdr->p_type != PT_LOAD) {
      continue;
    }

    // Segment addresses in memory.
    ELF::Addr seg_start = phdr->p_vaddr + load_bias_;
    ELF::Addr seg_end = seg_start + phdr->p_memsz;

    ELF::Addr seg_page_start = PAGE_START(seg_start);
    ELF::Addr seg_page_end = PAGE_END(seg_end);

    ELF::Addr seg_file_end = seg_start + phdr->p_filesz;

    // File offsets.
    ELF::Addr file_start = phdr->p_offset;
    ELF::Addr file_end = file_start + phdr->p_filesz;

    ELF::Addr file_page_start = PAGE_START(file_start);
    ELF::Addr file_length = file_end - file_page_start;

    LOG("%s: file_offset=%p file_length=%p start_address=%p end_address=%p\n",
        __FUNCTION__,
        file_offset_ + file_page_start,
        file_length,
        seg_page_start,
        seg_page_start + PAGE_END(file_length));

    if (file_length != 0) {
      void* seg_addr = fd_.Map((void*)seg_page_start,
                               file_length,
                               PFLAGS_TO_PROT(phdr->p_flags),
                               MAP_FIXED | MAP_PRIVATE,
                               file_page_start + file_offset_);
      if (seg_addr == MAP_FAILED) {
        error->Format("Could not map segment %d: %s", i, strerror(errno));
        return false;
      }
    }

    // if the segment is writable, and does not end on a page boundary,
    // zero-fill it until the page limit.
    if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
      memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
    }

    seg_file_end = PAGE_END(seg_file_end);

    // seg_file_end is now the first page address after the file
    // content. If seg_end is larger, we need to zero anything
    // between them. This is done by using a private anonymous
    // map for all extra pages.
    if (seg_page_end > seg_file_end) {
      void* zeromap = mmap((void*)seg_file_end,
                           seg_page_end - seg_file_end,
                           PFLAGS_TO_PROT(phdr->p_flags),
                           MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE,
                           -1,
                           0);
      if (zeromap == MAP_FAILED) {
        error->Format("Could not zero-fill gap: %s", strerror(errno));
        return false;
      }
    }
  }
  return true;
}
Esempio n. 2
0
/* Map all loadable segments in process' address space.
 * This assumes you already called phdr_table_reserve_memory to
 * reserve the address space range for the library.
 *
 * Input:
 *   phdr_table    -> program header table
 *   phdr_count    -> number of entries in the table
 *   load_bias     -> load offset.
 *   fd            -> input file descriptor.
 *
 * Return:
 *   0 on success, -1 otherwise. Error code in errno.
 */
int
phdr_table_load_segments(const Elf32_Phdr* phdr_table,
                         int               phdr_count,
                         Elf32_Addr        load_bias,
                         int               fd)
{
    int nn;

    for (nn = 0; nn < phdr_count; nn++) {
        const Elf32_Phdr* phdr = &phdr_table[nn];
        void* seg_addr;

        if (phdr->p_type != PT_LOAD)
            continue;

        /* Segment addresses in memory */
        Elf32_Addr seg_start = phdr->p_vaddr + load_bias;
        Elf32_Addr seg_end   = seg_start + phdr->p_memsz;

        Elf32_Addr seg_page_start = PAGE_START(seg_start);
        Elf32_Addr seg_page_end   = PAGE_END(seg_end);

        Elf32_Addr seg_file_end   = seg_start + phdr->p_filesz;

        /* File offsets */
        Elf32_Addr file_start = phdr->p_offset;
        Elf32_Addr file_end   = file_start + phdr->p_filesz;

        Elf32_Addr file_page_start = PAGE_START(file_start);

        seg_addr = mmap((void*)seg_page_start,
                        file_end - file_page_start,
                        PFLAGS_TO_PROT(phdr->p_flags),
                        MAP_FIXED|MAP_PRIVATE,
                        fd,
                        file_page_start);

        if (seg_addr == MAP_FAILED) {
            return -1;
        }

        /* if the segment is writable, and does not end on a page boundary,
         * zero-fill it until the page limit. */
        if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
            memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
        }

        seg_file_end = PAGE_END(seg_file_end);

        /* seg_file_end is now the first page address after the file
         * content. If seg_end is larger, we need to zero anything
         * between them. This is done by using a private anonymous
         * map for all extra pages.
         */
        if (seg_page_end > seg_file_end) {
            void* zeromap = mmap((void*)seg_file_end,
                                    seg_page_end - seg_file_end,
                                    PFLAGS_TO_PROT(phdr->p_flags),
                                    MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
                                    -1,
                                    0);
            if (zeromap == MAP_FAILED) {
                return -1;
            }
        }
    }
    return 0;
}
Esempio n. 3
0
// Map all loadable segments in process' address space.
// This assumes you already called phdr_table_reserve_memory to
// reserve the address space range for the library.
// TODO: assert assumption.
bool ElfReader::LoadSegments() {
  for (size_t i = 0; i < phdr_num_; ++i) {
    const Elf32_Phdr* phdr = &phdr_table_[i];

    if (phdr->p_type != PT_LOAD) {
      continue;
    }

    // Segment addresses in memory.
    Elf32_Addr seg_start = phdr->p_vaddr + load_bias_;
    Elf32_Addr seg_end   = seg_start + phdr->p_memsz;

    Elf32_Addr seg_page_start = PAGE_START(seg_start);
    Elf32_Addr seg_page_end   = PAGE_END(seg_end);

    Elf32_Addr seg_file_end   = seg_start + phdr->p_filesz;

    // File offsets.
    Elf32_Addr file_start = phdr->p_offset;
    Elf32_Addr file_end   = file_start + phdr->p_filesz;

    Elf32_Addr file_page_start = PAGE_START(file_start);

    void* seg_addr = mmap((void*)seg_page_start,
                          file_end - file_page_start,
                          PFLAGS_TO_PROT(phdr->p_flags),
                          MAP_FIXED|MAP_PRIVATE,
                          fd_,
                          file_page_start);
    if (seg_addr == MAP_FAILED) {
      DL_ERR("couldn't map \"%s\" segment %d: %s", name_, i, strerror(errno));
      return false;
    }

    // if the segment is writable, and does not end on a page boundary,
    // zero-fill it until the page limit.
    if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
      memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
    }

    seg_file_end = PAGE_END(seg_file_end);

    // seg_file_end is now the first page address after the file
    // content. If seg_end is larger, we need to zero anything
    // between them. This is done by using a private anonymous
    // map for all extra pages.
    if (seg_page_end > seg_file_end) {
      void* zeromap = mmap((void*)seg_file_end,
                           seg_page_end - seg_file_end,
                           PFLAGS_TO_PROT(phdr->p_flags),
                           MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
                           -1,
                           0);
      if (zeromap == MAP_FAILED) {
        DL_ERR("couldn't zero fill \"%s\" gap: %s", name_, strerror(errno));
        return false;
      }
    }
  }
  return true;
}