Ejemplo n.º 1
0
// Loads the program header table from an ELF file into a read-only private
// anonymous mmap-ed block.
bool ElfReader::ReadProgramHeader() {
  phdr_num_ = header_.e_phnum;

  // Like the kernel, we only accept program header tables that
  // are smaller than 64KiB.
  if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(Elf32_Phdr)) {
    DL_ERR("\"%s\" has invalid e_phnum: %d", name_, phdr_num_);
    return false;
  }

  Elf32_Addr page_min = PAGE_START(header_.e_phoff);
  Elf32_Addr page_max = PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(Elf32_Phdr)));
  Elf32_Addr page_offset = PAGE_OFFSET(header_.e_phoff);

  phdr_size_ = page_max - page_min;

  void* mmap_result = mmap(NULL, phdr_size_, PROT_READ, MAP_PRIVATE, fd_, page_min);
  if (mmap_result == MAP_FAILED) {
    DL_ERR("\"%s\" phdr mmap failed: %s", name_, strerror(errno));
    return false;
  }

  phdr_mmap_ = mmap_result;
  phdr_table_ = reinterpret_cast<Elf32_Phdr*>(reinterpret_cast<char*>(mmap_result) + page_offset);
  return true;
}
Ejemplo n.º 2
0
/* Used internally. Used to set the protection bits of all loaded segments
 * with optional extra flags (i.e. really PROT_WRITE). Used by
 * phdr_table_protect_segments and phdr_table_unprotect_segments.
 */
static int
_phdr_table_set_load_prot(const Elf32_Phdr* phdr_table,
                          int               phdr_count,
                          Elf32_Addr        load_bias,
                          int               extra_prot_flags)
{
    const Elf32_Phdr* phdr = phdr_table;
    const Elf32_Phdr* phdr_limit = phdr + phdr_count;

    for (; phdr < phdr_limit; phdr++) {
        if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0)
            continue;

        Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
        Elf32_Addr seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;

        int ret = mprotect((void*)seg_page_start,
                           seg_page_end - seg_page_start,
                           PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags);
        if (ret < 0) {
            return -1;
        }
    }
    return 0;
}
Ejemplo n.º 3
0
/* Compute the extent of all loadable segments in an ELF program header
 * table. This corresponds to the page-aligned size in bytes that needs to be
 * reserved in the process' address space
 *
 * This returns 0 if there are no loadable segments.
 */
Elf32_Addr phdr_table_get_load_size(const Elf32_Phdr* phdr_table,
                                    size_t phdr_count)
{
    Elf32_Addr min_vaddr = 0xFFFFFFFFU;
    Elf32_Addr max_vaddr = 0x00000000U;

    for (size_t i = 0; i < phdr_count; ++i) {
        const Elf32_Phdr* phdr = &phdr_table[i];

        if (phdr->p_type != PT_LOAD) {
            continue;
        }

        if (phdr->p_vaddr < min_vaddr) {
            min_vaddr = phdr->p_vaddr;
        }

        if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
            max_vaddr = phdr->p_vaddr + phdr->p_memsz;
        }
    }

    if (min_vaddr > max_vaddr) {
        return 0;
    }

    min_vaddr = PAGE_START(min_vaddr);
    max_vaddr = PAGE_END(max_vaddr);

    return max_vaddr - min_vaddr;
}
// Loads the program header table from an ELF file into a read-only private
// anonymous mmap-ed block.
bool ElfLoader::ReadProgramHeader(Error* error) {
  phdr_num_ = header_.e_phnum;

  // Like the kernel, only accept program header tables smaller than 64 KB.
  if (phdr_num_ < 1 || phdr_num_ > 65536 / sizeof(ELF::Phdr)) {
    error->Format("Invalid program header count: %d", phdr_num_);
    return false;
  }

  ELF::Addr page_min = PAGE_START(header_.e_phoff);
  ELF::Addr page_max =
      PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(ELF::Phdr)));
  ELF::Addr page_offset = PAGE_OFFSET(header_.e_phoff);

  phdr_size_ = page_max - page_min;

  void* mmap_result = fd_.Map(
      NULL, phdr_size_, PROT_READ, MAP_PRIVATE, page_min + file_offset_);
  if (mmap_result == MAP_FAILED) {
    error->Format("Phdr mmap failed: %s", strerror(errno));
    return false;
  }

  phdr_mmap_ = mmap_result;
  phdr_table_ = reinterpret_cast<ELF::Phdr*>(
      reinterpret_cast<char*>(mmap_result) + page_offset);
  return true;
}
Ejemplo n.º 5
0
// Loads the program header table from an ELF file into a read-only private
// anonymous mmap-ed block.
bool ElfReader_ReadProgramHeader(ElfReader* er) {
  er->phdr_num_ = er->header_.e_phnum;

  // Like the kernel, we only accept program header tables that
  // are smaller than 64KiB.
  if (er->phdr_num_ < 1 || er->phdr_num_ > 65536/sizeof(Elf32_Phdr)) {
    DL_ERR("\"%s\" has invalid e_phnum: %d", er->name_, er->phdr_num_);
    return false;
  }

  Elf32_Addr page_min = PAGE_START(er->header_.e_phoff);
  Elf32_Addr page_max = PAGE_END(er->header_.e_phoff + (er->phdr_num_ * sizeof(Elf32_Phdr)));
  Elf32_Addr page_offset = PAGE_OFFSET(er->header_.e_phoff);

  er->phdr_size_ = page_max - page_min;

  void* mmap_result = mmap(NULL, er->phdr_size_, PROT_READ, MAP_PRIVATE, er->fd_, page_min);
  if (mmap_result == MAP_FAILED) {
    DL_ERR("\"%s\" phdr mmap failed: %s", er->name_, strerror(errno));
    return false;
  }

  er->phdr_mmap_ = mmap_result;
  er->phdr_table_ = (Elf32_Phdr*)((char*)(mmap_result) + page_offset);
  return true;
}
Ejemplo n.º 6
0
bool RPLLibrary::readElfHeader() {
	void* ehdr_map = ::mmap(nullptr, PAGE_END(sizeof(Elf32_Phdr)),
		PROT_READ, MAP_PRIVATE, fd, 0);
	if (ehdr_map == MAP_FAILED) return false;
	ehdr = (Elf32_Ehdr*) ehdr_map;
	// map the section headers
	// from crazy linker
	Elf32_Addr page_min = PAGE_START(ehdr->e_shoff);
	Elf32_Addr page_max = PAGE_END(ehdr->e_shoff + 
		(ehdr->e_shnum * sizeof(Elf32_Shdr)));
	Elf32_Addr page_offset = PAGE_OFFSET(ehdr->e_shoff);
	void* shdr_map = ::mmap(nullptr, page_max - page_min,
		PROT_READ, MAP_PRIVATE, fd, page_min);
	if (shdr_map == MAP_FAILED) return false;
	shdr = (Elf32_Shdr*) (((uintptr_t) shdr_map) + page_offset);
	return true;
}
static void apply_gnu_relro() {
  Elf32_Phdr* phdr_start = reinterpret_cast<Elf32_Phdr*>(getauxval(AT_PHDR));
  unsigned long int phdr_ct = getauxval(AT_PHNUM);

  for (Elf32_Phdr* phdr = phdr_start; phdr < (phdr_start + phdr_ct); phdr++) {
    if (phdr->p_type != PT_GNU_RELRO) {
      continue;
    }

    Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr);
    Elf32_Addr seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz);

    // Check return value here? What do we do if we fail?
    mprotect(reinterpret_cast<void*>(seg_page_start), seg_page_end - seg_page_start, PROT_READ);
  }
}
Ejemplo n.º 8
0
bool RPLLibrary::reserveAddressSpace() {
	Elf32_Addr min_addr = shdr[0].sh_addr;
	Elf32_Addr max_addr = min_addr + getSectionUncompressedSize(&shdr[0]);
	for (int i = 1; i < ehdr->e_shnum; i++) {
		Elf32_Shdr* s = &shdr[i];
		if (!SectionIsAlloc(s)) continue;
		if (s->sh_addr < min_addr) min_addr = s->sh_addr;
		if (s->sh_addr + getSectionUncompressedSize(s) > max_addr) {
			max_addr = s->sh_addr + getSectionUncompressedSize(s);
		}
	}
	size_t mysize = PAGE_END(max_addr) - PAGE_START(min_addr);
	load_start = mmap(load_start, mysize,
		PROT_READ | PROT_WRITE | PROT_EXEC,
		MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
	if (load_start == MAP_FAILED) return false;
	return true;
}
Ejemplo n.º 9
0
/* Used internally by phdr_table_protect_gnu_relro and
 * phdr_table_unprotect_gnu_relro.
 */
static int
_phdr_table_set_gnu_relro_prot(const Elf32_Phdr* phdr_table,
                               int               phdr_count,
                               Elf32_Addr        load_bias,
                               int               prot_flags)
{
    const Elf32_Phdr* phdr = phdr_table;
    const Elf32_Phdr* phdr_limit = phdr + phdr_count;

    for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
        if (phdr->p_type != PT_GNU_RELRO)
            continue;

        /* Tricky: what happens when the relro segment does not start
         * or end at page boundaries?. We're going to be over-protective
         * here and put every page touched by the segment as read-only.
         *
         * This seems to match Ian Lance Taylor's description of the
         * feature at http://www.airs.com/blog/archives/189.
         *
         * Extract:
         *    Note that the current dynamic linker code will only work
         *    correctly if the PT_GNU_RELRO segment starts on a page
         *    boundary. This is because the dynamic linker rounds the
         *    p_vaddr field down to the previous page boundary. If
         *    there is anything on the page which should not be read-only,
         *    the program is likely to fail at runtime. So in effect the
         *    linker must only emit a PT_GNU_RELRO segment if it ensures
         *    that it starts on a page boundary.
         */
        Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
        Elf32_Addr seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;

        int ret = mprotect((void*)seg_page_start,
                           seg_page_end - seg_page_start,
                           prot_flags);
        if (ret < 0) {
            return -1;
        }
    }
    return 0;
}
Ejemplo n.º 10
0
void* LinkerMemoryAllocator::alloc_mmap(size_t size) {
  size_t allocated_size = PAGE_END(size + sizeof(page_info));
  void* map_ptr = mmap(nullptr, allocated_size,
      PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);

  if (map_ptr == MAP_FAILED) {
    __libc_fatal("mmap failed");
  }

  prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, allocated_size, "linker_alloc_lob");

  memset(map_ptr, 0, allocated_size);

  page_info* info = reinterpret_cast<page_info*>(map_ptr);
  memcpy(info->signature, kSignature, sizeof(kSignature));
  info->type = kLargeObject;
  info->allocated_size = allocated_size;

  return info + 1;
}
Ejemplo n.º 11
0
/* Returns the size of the extent of all the possibly non-contiguous
 * loadable segments in an ELF program header table. This corresponds
 * to the page-aligned size in bytes that needs to be reserved in the
 * process' address space. If there are no loadable segments, 0 is
 * returned.
 *
 * If out_min_vaddr or out_max_vaddr are non-NULL, they will be
 * set to the minimum and maximum addresses of pages to be reserved,
 * or 0 if there is nothing to load.
 */
size_t phdr_table_get_load_size(const Elf32_Phdr* phdr_table,
                                size_t phdr_count,
                                Elf32_Addr* out_min_vaddr,
                                Elf32_Addr* out_max_vaddr)
{
    Elf32_Addr min_vaddr = 0xFFFFFFFFU;
    Elf32_Addr max_vaddr = 0x00000000U;

    bool found_pt_load = false;
    size_t i;
    for (i = 0; i < phdr_count; ++i) {
        const Elf32_Phdr* phdr = &phdr_table[i];

        if (phdr->p_type != PT_LOAD) {
            continue;
        }
        found_pt_load = true;

        if (phdr->p_vaddr < min_vaddr) {
            min_vaddr = phdr->p_vaddr;
        }

        if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
            max_vaddr = phdr->p_vaddr + phdr->p_memsz;
        }
    }
    if (!found_pt_load) {
        min_vaddr = 0x00000000U;
    }

    min_vaddr = PAGE_START(min_vaddr);
    max_vaddr = PAGE_END(max_vaddr);

    if (out_min_vaddr != NULL) {
        *out_min_vaddr = min_vaddr;
    }
    if (out_max_vaddr != NULL) {
        *out_max_vaddr = max_vaddr;
    }
    return max_vaddr - min_vaddr;
}
Ejemplo n.º 12
0
/* Load the program header table from an ELF file into a read-only private
 * anonymous mmap-ed block.
 *
 * Input:
 *   fd           -> file descriptor
 *   phdr_offset  -> file offset of phdr table
 *   phdr_num     -> number of entries in the table.
 *
 * Output:
 *   phdr_mmap    -> address of mmap block in memory.
 *   phdr_memsize -> size of mmap block in memory.
 *   phdr_table   -> address of first entry in memory.
 *
 * Return:
 *   -1 on error, or 0 on success.
 */
int phdr_table_load(int                fd,
                    Elf32_Addr         phdr_offset,
                    Elf32_Half         phdr_num,
                    void**             phdr_mmap,
                    Elf32_Addr*        phdr_size,
                    const Elf32_Phdr** phdr_table)
{
    Elf32_Addr  page_min, page_max, page_offset;
    void*       mmap_result;

    /* Just like the kernel, we only accept program header tables that
     * are smaller than 64KB. */
    if (phdr_num < 1 || phdr_num > 65536/sizeof(Elf32_Phdr)) {
        errno = EINVAL;
        return -1;
    }

    page_min = PAGE_START(phdr_offset);
    page_max = PAGE_END(phdr_offset + phdr_num*sizeof(Elf32_Phdr));
    page_offset = PAGE_OFFSET(phdr_offset);

    mmap_result = mmap(NULL,
                       page_max - page_min,
                       PROT_READ,
                       MAP_PRIVATE,
                       fd,
                       page_min);

    if (mmap_result == MAP_FAILED) {
        return -1;
    }

    *phdr_mmap = mmap_result;
    *phdr_size = page_max - page_min;
    *phdr_table = (Elf32_Phdr*)((char*)mmap_result + page_offset);

    return 0;
}
Ejemplo n.º 13
0
/* Map all loadable segments in process' address space.
 * This assumes you already called phdr_table_reserve_memory to
 * reserve the address space range for the library.
 *
 * Input:
 *   phdr_table    -> program header table
 *   phdr_count    -> number of entries in the table
 *   load_bias     -> load offset.
 *   fd            -> input file descriptor.
 *
 * Return:
 *   0 on success, -1 otherwise. Error code in errno.
 */
int
phdr_table_load_segments(const Elf32_Phdr* phdr_table,
                         int               phdr_count,
                         Elf32_Addr        load_bias,
                         int               fd)
{
    int nn;

    for (nn = 0; nn < phdr_count; nn++) {
        const Elf32_Phdr* phdr = &phdr_table[nn];
        void* seg_addr;

        if (phdr->p_type != PT_LOAD)
            continue;

        /* Segment addresses in memory */
        Elf32_Addr seg_start = phdr->p_vaddr + load_bias;
        Elf32_Addr seg_end   = seg_start + phdr->p_memsz;

        Elf32_Addr seg_page_start = PAGE_START(seg_start);
        Elf32_Addr seg_page_end   = PAGE_END(seg_end);

        Elf32_Addr seg_file_end   = seg_start + phdr->p_filesz;

        /* File offsets */
        Elf32_Addr file_start = phdr->p_offset;
        Elf32_Addr file_end   = file_start + phdr->p_filesz;

        Elf32_Addr file_page_start = PAGE_START(file_start);

        seg_addr = mmap((void*)seg_page_start,
                        file_end - file_page_start,
                        PFLAGS_TO_PROT(phdr->p_flags),
                        MAP_FIXED|MAP_PRIVATE,
                        fd,
                        file_page_start);

        if (seg_addr == MAP_FAILED) {
            return -1;
        }

        /* if the segment is writable, and does not end on a page boundary,
         * zero-fill it until the page limit. */
        if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
            memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
        }

        seg_file_end = PAGE_END(seg_file_end);

        /* seg_file_end is now the first page address after the file
         * content. If seg_end is larger, we need to zero anything
         * between them. This is done by using a private anonymous
         * map for all extra pages.
         */
        if (seg_page_end > seg_file_end) {
            void* zeromap = mmap((void*)seg_file_end,
                                    seg_page_end - seg_file_end,
                                    PFLAGS_TO_PROT(phdr->p_flags),
                                    MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
                                    -1,
                                    0);
            if (zeromap == MAP_FAILED) {
                return -1;
            }
        }
    }
    return 0;
}
// Map all loadable segments in process' address space.
// This assumes you already called phdr_table_reserve_memory to
// reserve the address space range for the library.
bool ElfLoader::LoadSegments(Error* error) {
  for (size_t i = 0; i < phdr_num_; ++i) {
    const ELF::Phdr* phdr = &phdr_table_[i];

    if (phdr->p_type != PT_LOAD) {
      continue;
    }

    // Segment addresses in memory.
    ELF::Addr seg_start = phdr->p_vaddr + load_bias_;
    ELF::Addr seg_end = seg_start + phdr->p_memsz;

    ELF::Addr seg_page_start = PAGE_START(seg_start);
    ELF::Addr seg_page_end = PAGE_END(seg_end);

    ELF::Addr seg_file_end = seg_start + phdr->p_filesz;

    // File offsets.
    ELF::Addr file_start = phdr->p_offset;
    ELF::Addr file_end = file_start + phdr->p_filesz;

    ELF::Addr file_page_start = PAGE_START(file_start);
    ELF::Addr file_length = file_end - file_page_start;

    LOG("%s: file_offset=%p file_length=%p start_address=%p end_address=%p\n",
        __FUNCTION__,
        file_offset_ + file_page_start,
        file_length,
        seg_page_start,
        seg_page_start + PAGE_END(file_length));

    if (file_length != 0) {
      void* seg_addr = fd_.Map((void*)seg_page_start,
                               file_length,
                               PFLAGS_TO_PROT(phdr->p_flags),
                               MAP_FIXED | MAP_PRIVATE,
                               file_page_start + file_offset_);
      if (seg_addr == MAP_FAILED) {
        error->Format("Could not map segment %d: %s", i, strerror(errno));
        return false;
      }
    }

    // if the segment is writable, and does not end on a page boundary,
    // zero-fill it until the page limit.
    if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
      memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
    }

    seg_file_end = PAGE_END(seg_file_end);

    // seg_file_end is now the first page address after the file
    // content. If seg_end is larger, we need to zero anything
    // between them. This is done by using a private anonymous
    // map for all extra pages.
    if (seg_page_end > seg_file_end) {
      void* zeromap = mmap((void*)seg_file_end,
                           seg_page_end - seg_file_end,
                           PFLAGS_TO_PROT(phdr->p_flags),
                           MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE,
                           -1,
                           0);
      if (zeromap == MAP_FAILED) {
        error->Format("Could not zero-fill gap: %s", strerror(errno));
        return false;
      }
    }
  }
  return true;
}
Ejemplo n.º 15
0
// Map all loadable segments in process' address space.
// This assumes you already called phdr_table_reserve_memory to
// reserve the address space range for the library.
// TODO: assert assumption.
bool ElfReader::LoadSegments() {
  for (size_t i = 0; i < phdr_num_; ++i) {
    const Elf32_Phdr* phdr = &phdr_table_[i];

    if (phdr->p_type != PT_LOAD) {
      continue;
    }

    // Segment addresses in memory.
    Elf32_Addr seg_start = phdr->p_vaddr + load_bias_;
    Elf32_Addr seg_end   = seg_start + phdr->p_memsz;

    Elf32_Addr seg_page_start = PAGE_START(seg_start);
    Elf32_Addr seg_page_end   = PAGE_END(seg_end);

    Elf32_Addr seg_file_end   = seg_start + phdr->p_filesz;

    // File offsets.
    Elf32_Addr file_start = phdr->p_offset;
    Elf32_Addr file_end   = file_start + phdr->p_filesz;

    Elf32_Addr file_page_start = PAGE_START(file_start);

    void* seg_addr = mmap((void*)seg_page_start,
                          file_end - file_page_start,
                          PFLAGS_TO_PROT(phdr->p_flags),
                          MAP_FIXED|MAP_PRIVATE,
                          fd_,
                          file_page_start);
    if (seg_addr == MAP_FAILED) {
      DL_ERR("couldn't map \"%s\" segment %d: %s", name_, i, strerror(errno));
      return false;
    }

    // if the segment is writable, and does not end on a page boundary,
    // zero-fill it until the page limit.
    if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
      memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
    }

    seg_file_end = PAGE_END(seg_file_end);

    // seg_file_end is now the first page address after the file
    // content. If seg_end is larger, we need to zero anything
    // between them. This is done by using a private anonymous
    // map for all extra pages.
    if (seg_page_end > seg_file_end) {
      void* zeromap = mmap((void*)seg_file_end,
                           seg_page_end - seg_file_end,
                           PFLAGS_TO_PROT(phdr->p_flags),
                           MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
                           -1,
                           0);
      if (zeromap == MAP_FAILED) {
        DL_ERR("couldn't zero fill \"%s\" gap: %s", name_, strerror(errno));
        return false;
      }
    }
  }
  return true;
}