Example #1
0
static int
validate_mem (unw_word_t addr)
{
  int i, victim;
#ifdef HAVE_MINCORE
  unsigned char mvec[2]; /* Unaligned access may cross page boundary */
#endif
  size_t len;

  if (PAGE_START(addr + sizeof (unw_word_t) - 1) == PAGE_START(addr))
    len = PAGE_SIZE;
  else
    len = PAGE_SIZE * 2;

  addr = PAGE_START(addr);

  if (addr == 0)
    return -1;

  for (i = 0; i < NLGA; i++)
    {
      if (last_good_addr[i] && (addr == last_good_addr[i]))
        return 0;
    }

#ifdef HAVE_MINCORE
  if (mincore ((void *) addr, len, mvec) == -1)
#else
  if (msync ((void *) addr, len, MS_ASYNC) == -1)
#endif
    return -1;

  victim = lga_victim;
  for (i = 0; i < NLGA; i++) {
    if (!last_good_addr[victim]) {
      last_good_addr[victim++] = addr;
      return 0;
    }
    victim = (victim + 1) % NLGA;
  }

  /* All slots full. Evict the victim. */
  last_good_addr[victim] = addr;
  victim = (victim + 1) % NLGA;
  lga_victim = victim;

  return 0;
}
Example #2
0
void
replace_init(void)
{
    if (options.replace_libc) {
        app_pc addr;
        int i;
        char *s;

        /* replace_module_load will be called for each module to populate the hashtable */
        ASSERT(PAGE_START(get_function_entry((app_pc)replace_memset)) ==
               PAGE_START(get_function_entry((app_pc)replace_memmove)),
               "replace_ routines taking up more than one page");
        ASSERT(sizeof(int) >= sizeof(wchar_t),
               "wchar_t replacement functions assume wchar_t is not larger than int");
        replace_routine_start = (app_pc)
            PAGE_START(get_function_entry((app_pc)replace_memset));
        
        /* PR 485412: we support passing in addresses of libc routines to
         * be replaced if statically included in the executable and if
         * we have no symbols available
         */
        s = options.libc_addrs;
        i = 0;
        while (s != NULL) {
            if (sscanf(s, PIFX, (ptr_uint_t *)&addr) == 1 ||
                /* we save option space by having no 0x prefix but assuming hex */
                sscanf(s, PIFMT, (ptr_uint_t *)&addr) == 1) {
                LOG(2, "replacing %s @"PFX" in executable from options\n",
                    replace_routine_name[i], addr);
                if (!drwrap_replace((app_pc)addr, (app_pc)replace_routine_addr[i], false))
                    ASSERT(false, "failed to replace");
            }
            s = strchr(s, ',');
            if (s != NULL)
                s++;
            i++;
        }

#ifdef USE_DRSYMS
        hashtable_init(&replace_name_table, REPLACE_NAME_TABLE_HASH_BITS, HASH_STRING,
                       false/*!strdup*/);
        for (i=0; i<REPLACE_NUM; i++) {
            hashtable_add(&replace_name_table, (void *) replace_routine_name[i],
                          (void *)(ptr_int_t)(i+1)/*since 0 is "not found"*/);
        }
#endif
    }
}
Example #3
0
// Loads the program header table from an ELF file into a read-only private
// anonymous mmap-ed block.
bool ElfReader_ReadProgramHeader(ElfReader* er) {
  er->phdr_num_ = er->header_.e_phnum;

  // Like the kernel, we only accept program header tables that
  // are smaller than 64KiB.
  if (er->phdr_num_ < 1 || er->phdr_num_ > 65536/sizeof(Elf32_Phdr)) {
    DL_ERR("\"%s\" has invalid e_phnum: %d", er->name_, er->phdr_num_);
    return false;
  }

  Elf32_Addr page_min = PAGE_START(er->header_.e_phoff);
  Elf32_Addr page_max = PAGE_END(er->header_.e_phoff + (er->phdr_num_ * sizeof(Elf32_Phdr)));
  Elf32_Addr page_offset = PAGE_OFFSET(er->header_.e_phoff);

  er->phdr_size_ = page_max - page_min;

  void* mmap_result = mmap(NULL, er->phdr_size_, PROT_READ, MAP_PRIVATE, er->fd_, page_min);
  if (mmap_result == MAP_FAILED) {
    DL_ERR("\"%s\" phdr mmap failed: %s", er->name_, strerror(errno));
    return false;
  }

  er->phdr_mmap_ = mmap_result;
  er->phdr_table_ = (Elf32_Phdr*)((char*)(mmap_result) + page_offset);
  return true;
}
Example #4
0
static int
validate_mem (unw_word_t addr)
{
  int i, victim;

  addr = PAGE_START(addr);

  for (i = 0; i < NLGA; i++)
    {
      if (last_good_addr[i] && (addr == last_good_addr[i]))
	return 0;
    }

  if (msync ((void *) addr, 1, MS_SYNC) == -1)
    return -1;

  victim = lga_victim;
  for (i = 0; i < NLGA; i++) {
    if (!last_good_addr[victim]) {
      last_good_addr[victim++] = addr;
      return 0;
    }
    victim = (victim + 1) % NLGA;
  }

  /* All slots full. Evict the victim. */
  last_good_addr[victim] = addr;
  victim = (victim + 1) % NLGA;
  lga_victim = victim;

  return 0;
}
Example #5
0
/* Reserve a virtual address range big enough to hold all loadable
 * segments of a program header table. This is done by creating a
 * private anonymous mmap() with PROT_NONE.
 *
 * Input:
 *   phdr_table    -> program header table
 *   phdr_count    -> number of entries in the tables
 * Output:
 *   load_start    -> first page of reserved address space range
 *   load_size     -> size in bytes of reserved address space range
 *   load_bias     -> load bias, as described in technical note above.
 *
 * Return:
 *   0 on success, -1 otherwise. Error code in errno.
 */
int
phdr_table_reserve_memory(const Elf32_Phdr* phdr_table,
                          size_t phdr_count,
                          void** load_start,
                          Elf32_Addr* load_size,
                          Elf32_Addr* load_bias)
{
    Elf32_Addr size = phdr_table_get_load_size(phdr_table, phdr_count);
    if (size == 0) {
        errno = EINVAL;
        return -1;
    }

    int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
    void* start = mmap(NULL, size, PROT_NONE, mmap_flags, -1, 0);
    if (start == MAP_FAILED) {
        return -1;
    }

    *load_start = start;
    *load_size  = size;
    *load_bias  = 0;

    for (size_t i = 0; i < phdr_count; ++i) {
        const Elf32_Phdr* phdr = &phdr_table[i];
        if (phdr->p_type == PT_LOAD) {
            *load_bias = (Elf32_Addr)start - PAGE_START(phdr->p_vaddr);
            break;
        }
    }
    return 0;
}
// Loads the program header table from an ELF file into a read-only private
// anonymous mmap-ed block.
bool ElfLoader::ReadProgramHeader(Error* error) {
  phdr_num_ = header_.e_phnum;

  // Like the kernel, only accept program header tables smaller than 64 KB.
  if (phdr_num_ < 1 || phdr_num_ > 65536 / sizeof(ELF::Phdr)) {
    error->Format("Invalid program header count: %d", phdr_num_);
    return false;
  }

  ELF::Addr page_min = PAGE_START(header_.e_phoff);
  ELF::Addr page_max =
      PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(ELF::Phdr)));
  ELF::Addr page_offset = PAGE_OFFSET(header_.e_phoff);

  phdr_size_ = page_max - page_min;

  void* mmap_result = fd_.Map(
      NULL, phdr_size_, PROT_READ, MAP_PRIVATE, page_min + file_offset_);
  if (mmap_result == MAP_FAILED) {
    error->Format("Phdr mmap failed: %s", strerror(errno));
    return false;
  }

  phdr_mmap_ = mmap_result;
  phdr_table_ = reinterpret_cast<ELF::Phdr*>(
      reinterpret_cast<char*>(mmap_result) + page_offset);
  return true;
}
Example #7
0
// Reserve a virtual address range big enough to hold all loadable
// segments of a program header table. This is done by creating a
// private anonymous mmap() with PROT_NONE.
bool ElfReader::ReserveAddressSpace() {
  load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_);
  if (load_size_ == 0) {
    DL_ERR("\"%s\" has no loadable segments", name_);
    return false;
  }

  int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
  void* start = mmap(NULL, load_size_, PROT_NONE, mmap_flags, -1, 0);
  if (start == MAP_FAILED) {
    DL_ERR("couldn't reserve %d bytes of address space for \"%s\"", load_size_, name_);
    return false;
  }

  load_start_ = start;
  load_bias_ = 0;

  for (size_t i = 0; i < phdr_num_; ++i) {
    const Elf32_Phdr* phdr = &phdr_table_[i];
    if (phdr->p_type == PT_LOAD) {
      load_bias_ = reinterpret_cast<Elf32_Addr>(start) - PAGE_START(phdr->p_vaddr);
      break;
    }
  }
  return true;
}
Example #8
0
// Loads the program header table from an ELF file into a read-only private
// anonymous mmap-ed block.
bool ElfReader::ReadProgramHeader() {
  phdr_num_ = header_.e_phnum;

  // Like the kernel, we only accept program header tables that
  // are smaller than 64KiB.
  if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(Elf32_Phdr)) {
    DL_ERR("\"%s\" has invalid e_phnum: %d", name_, phdr_num_);
    return false;
  }

  Elf32_Addr page_min = PAGE_START(header_.e_phoff);
  Elf32_Addr page_max = PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(Elf32_Phdr)));
  Elf32_Addr page_offset = PAGE_OFFSET(header_.e_phoff);

  phdr_size_ = page_max - page_min;

  void* mmap_result = mmap(NULL, phdr_size_, PROT_READ, MAP_PRIVATE, fd_, page_min);
  if (mmap_result == MAP_FAILED) {
    DL_ERR("\"%s\" phdr mmap failed: %s", name_, strerror(errno));
    return false;
  }

  phdr_mmap_ = mmap_result;
  phdr_table_ = reinterpret_cast<Elf32_Phdr*>(reinterpret_cast<char*>(mmap_result) + page_offset);
  return true;
}
Example #9
0
/* Used internally. Used to set the protection bits of all loaded segments
 * with optional extra flags (i.e. really PROT_WRITE). Used by
 * phdr_table_protect_segments and phdr_table_unprotect_segments.
 */
static int
_phdr_table_set_load_prot(const Elf32_Phdr* phdr_table,
                          int               phdr_count,
                          Elf32_Addr        load_bias,
                          int               extra_prot_flags)
{
    const Elf32_Phdr* phdr = phdr_table;
    const Elf32_Phdr* phdr_limit = phdr + phdr_count;

    for (; phdr < phdr_limit; phdr++) {
        if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0)
            continue;

        Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
        Elf32_Addr seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;

        int ret = mprotect((void*)seg_page_start,
                           seg_page_end - seg_page_start,
                           PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags);
        if (ret < 0) {
            return -1;
        }
    }
    return 0;
}
Example #10
0
/**  ユーザ空間へのアクセス許可があることを確認する(実装部)
     @param[in] as    検査対象の仮想アドレス空間
     @param[in] start 検査開始アドレス
     @param[in] count 検査範囲のアドレス長(単位:バイト)
     @param[in] prot  検査するアクセス権(仮想メモリ領域の保護属性)
     @retval    true  対象範囲に物理ページが存在し, 必要なアクセス権がある
     @retval    false 対象範囲に物理ページが存在しないか, 必要なアクセス権がない
 */
static bool
user_area_can_access_nolock(vm *as, void *start, size_t count, vma_prot prot) {
	int             rc;
	void     *pg_start;
	void       *pg_end;
	vma          *vmap;

	kassert( as != NULL );

	pg_start = (void *)PAGE_START((uintptr_t)start);
	pg_end = ( PAGE_ALIGNED( (uintptr_t)(start + count) ) ? 
	    ( start + count ) : ( (void *)PAGE_NEXT( (uintptr_t)(start + count) ) ) );

	rc = _vm_find_vma_nolock(as, pg_start, &vmap);
	if ( rc != 0 )
		goto can_not_access;

	if ( vmap->end < pg_end )
		goto can_not_access;

	if ( !( vmap->prot & prot ) )
		goto can_not_access;

	return true;

can_not_access:
	return false;
}
Example #11
0
/* Compute the extent of all loadable segments in an ELF program header
 * table. This corresponds to the page-aligned size in bytes that needs to be
 * reserved in the process' address space
 *
 * This returns 0 if there are no loadable segments.
 */
Elf32_Addr phdr_table_get_load_size(const Elf32_Phdr* phdr_table,
                                    size_t phdr_count)
{
    Elf32_Addr min_vaddr = 0xFFFFFFFFU;
    Elf32_Addr max_vaddr = 0x00000000U;

    for (size_t i = 0; i < phdr_count; ++i) {
        const Elf32_Phdr* phdr = &phdr_table[i];

        if (phdr->p_type != PT_LOAD) {
            continue;
        }

        if (phdr->p_vaddr < min_vaddr) {
            min_vaddr = phdr->p_vaddr;
        }

        if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
            max_vaddr = phdr->p_vaddr + phdr->p_memsz;
        }
    }

    if (min_vaddr > max_vaddr) {
        return 0;
    }

    min_vaddr = PAGE_START(min_vaddr);
    max_vaddr = PAGE_END(max_vaddr);

    return max_vaddr - min_vaddr;
}
page_info* LinkerMemoryAllocator::get_page_info(void* ptr) {
  page_info* info = reinterpret_cast<page_info*>(PAGE_START(reinterpret_cast<size_t>(ptr)));
  if (memcmp(info->signature, kSignature, sizeof(kSignature)) != 0) {
    __libc_fatal("invalid pointer %p (page signature mismatch)", ptr);
  }

  return info;
}
bool ElfLoader::LoadAt(const char* lib_path,
                       off_t file_offset,
                       uintptr_t wanted_address,
                       Error* error) {

  LOG("%s: lib_path='%s', file_offset=%p, load_address=%p\n",
      __FUNCTION__,
      lib_path,
      file_offset,
      wanted_address);

  // Check that the load address is properly page-aligned.
  if (wanted_address != PAGE_START(wanted_address)) {
    error->Format("Load address is not page aligned (%08x)", wanted_address);
    return false;
  }
  wanted_load_address_ = reinterpret_cast<void*>(wanted_address);

  // Check that the file offset is also properly page-aligned.
  // PAGE_START() can't be used here due to the compiler complaining about
  // comparing signed (off_t) and unsigned (size_t) values.
  if ((file_offset & static_cast<off_t>(PAGE_SIZE - 1)) != 0) {
    error->Format("File offset is not page aligned (%08x)", file_offset);
    return false;
  }
  file_offset_ = file_offset;

  // Open the file.
  if (!fd_.OpenReadOnly(lib_path)) {
    error->Format("Can't open file: %s", strerror(errno));
    return false;
  }

  if (file_offset && fd_.SeekTo(file_offset) < 0) {
    error->Format(
        "Can't seek to file offset %08x: %s", file_offset, strerror(errno));
    return false;
  }

  path_ = lib_path;

  if (!ReadElfHeader(error) || !ReadProgramHeader(error) ||
      !ReserveAddressSpace(error)) {
    return false;
  }

  if (!LoadSegments(error) || !FindPhdr(error)) {
    // An error occured, cleanup the address space by un-mapping the
    // range that was reserved by ReserveAddressSpace().
    if (load_start_ && load_size_)
      munmap(load_start_, load_size_);

    return false;
  }

  return true;
}
static int
validate_mem (unw_word_t addr)
{
  int i, victim;
  size_t len;

  if (PAGE_START(addr + sizeof (unw_word_t) - 1) == PAGE_START(addr))
    len = PAGE_SIZE;
  else
    len = PAGE_SIZE * 2;

  addr = PAGE_START(addr);

  if (addr == 0)
    return -1;

  for (i = 0; i < NLGA; i++)
    {
      if (last_good_addr[i] && (addr == last_good_addr[i]))
	return 0;
    }

  if (mem_validate_func ((void *) addr, len) == -1)
    return -1;

  victim = lga_victim;
  for (i = 0; i < NLGA; i++) {
    if (!last_good_addr[victim]) {
      last_good_addr[victim++] = addr;
      return 0;
    }
    victim = (victim + 1) % NLGA;
  }

  /* All slots full. Evict the victim. */
  last_good_addr[victim] = addr;
  victim = (victim + 1) % NLGA;
  lga_victim = victim;

  return 0;
}
Example #15
0
/* Returns whether it had to change page protections */
static bool
patch_coarse_branch(cache_pc stub, cache_pc tgt, bool hot_patch,
                    coarse_info_t *info /*OPTIONAL*/)
{
	// COMPLETEDD #498 patch_coarse_branch
    bool stubs_readonly = false;
    bool stubs_restore = false;
    if (DYNAMO_OPTION(persist_protect_stubs)) {
        if (info == NULL)
            info = get_stub_coarse_info(stub);
        ASSERT(info != NULL);
        if (info->stubs_readonly) {
            stubs_readonly = true;
            stubs_restore = true;
            /* if we don't preserve mapped-in COW state the protection change
             * will fail (case 10570)
             */
            make_copy_on_writable((byte *)PAGE_START(entrance_stub_jmp(stub)),
                                  /* stub jmp can't cross page boundary (can't
                                   * cross cache line in fact) */
                                  PAGE_SIZE);
            if (DYNAMO_OPTION(persist_protect_stubs_limit) > 0) {
                info->stubs_write_count++;
                if (info->stubs_write_count >
                    DYNAMO_OPTION(persist_protect_stubs_limit)) {
                    SYSLOG_INTERNAL_WARNING_ONCE("pcache stubs over write limit");
                    STATS_INC(pcache_unprot_over_limit);
                    stubs_restore = false;
                    info->stubs_readonly = false;
                }
            }
        }
    }
    patch_branch(entrance_stub_jmp(stub), tgt, HOT_PATCHABLE);
    if (stubs_restore)
        make_unwritable((byte *)PAGE_START(entrance_stub_jmp(stub)), PAGE_SIZE);
    return stubs_readonly;
}
linker_vector_t::iterator LinkerSmallObjectAllocator::find_page_record(void* ptr) {
  void* addr = reinterpret_cast<void*>(PAGE_START(reinterpret_cast<uintptr_t>(ptr)));
  small_object_page_record boundary;
  boundary.page_addr = addr;
  linker_vector_t::iterator it = std::lower_bound(
      page_records_.begin(), page_records_.end(), boundary);

  if (it == page_records_.end() || it->page_addr != addr) {
    // not found...
    __libc_fatal("page record for %p was not found (block_size=%zd)", ptr, block_size_);
  }

  return it;
}
Example #17
0
int update_inode_extents(struct inode *inode, struct ext3_extent *entries,
                         e2_blkcnt_t num_entries, int depth)
{
	assert(num_entries <= 4);
	inode->on_disk->extents.hdr.eh_magic = EXT3_EXT_MAGIC;
	inode->on_disk->extents.hdr.eh_entries = num_entries;
	inode->on_disk->extents.hdr.eh_max = 4;
	inode->on_disk->extents.hdr.eh_depth = depth;
	inode->on_disk->extents.hdr.eh_generation = 0;
	memcpy(inode->on_disk->extents.extent, entries,
	       num_entries * sizeof(struct ext3_extent));

	return msync(PAGE_START(inode->on_disk), getpagesize(), MS_SYNC);
}
Example #18
0
static int update_metadata_move(struct defrag_ctx *c, struct inode *inode,
                                blk64_t from, blk64_t to, __u32 logical,
				blk64_t at_block)
{
	int ret = 0;
	struct ext3_extent_header *header;
	struct ext3_extent_idx *idx;
	if (at_block == 0) {
		header = &inode->on_disk->extents.hdr;
	} else {
		header = malloc(EXT2_BLOCK_SIZE(&c->sb));
		ret = read_block(c, header, at_block);
		if (ret)
			goto out_noupdate;
	}
	if (!header->eh_depth) {
		errno = EINVAL;
		goto out_noupdate;
	}
	for (idx = EXT_FIRST_INDEX(header);
	     idx <= EXT_LAST_INDEX(header); idx++) {
		if (idx->ei_block > logical) {
			errno = EINVAL;
			goto out_noupdate;
		} if (idx->ei_block == logical && EI_BLOCK(idx) == from) {
			EI_LEAF_SET(idx, to);
			goto out_update;
		}
		if (idx + 1 > EXT_LAST_INDEX(header) ||
		    (idx + 1)->ei_block > logical) {
			ret = update_metadata_move(c, inode, from, to, logical,
			                           EI_BLOCK(idx));
			goto out_noupdate;
		}
	}
	errno = EINVAL;
	goto out_noupdate;

out_update:
	if (at_block) {
		ret = write_block(c, header, at_block);
	} else {
		ret = msync(PAGE_START(header), getpagesize(), MS_SYNC);
	}
out_noupdate:
	if (at_block)
		free(header);
	return ret;
}
static void apply_gnu_relro() {
  Elf32_Phdr* phdr_start = reinterpret_cast<Elf32_Phdr*>(getauxval(AT_PHDR));
  unsigned long int phdr_ct = getauxval(AT_PHNUM);

  for (Elf32_Phdr* phdr = phdr_start; phdr < (phdr_start + phdr_ct); phdr++) {
    if (phdr->p_type != PT_GNU_RELRO) {
      continue;
    }

    Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr);
    Elf32_Addr seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz);

    // Check return value here? What do we do if we fail?
    mprotect(reinterpret_cast<void*>(seg_page_start), seg_page_end - seg_page_start, PROT_READ);
  }
}
Example #20
0
bool RPLLibrary::readElfHeader() {
	void* ehdr_map = ::mmap(nullptr, PAGE_END(sizeof(Elf32_Phdr)),
		PROT_READ, MAP_PRIVATE, fd, 0);
	if (ehdr_map == MAP_FAILED) return false;
	ehdr = (Elf32_Ehdr*) ehdr_map;
	// map the section headers
	// from crazy linker
	Elf32_Addr page_min = PAGE_START(ehdr->e_shoff);
	Elf32_Addr page_max = PAGE_END(ehdr->e_shoff + 
		(ehdr->e_shnum * sizeof(Elf32_Shdr)));
	Elf32_Addr page_offset = PAGE_OFFSET(ehdr->e_shoff);
	void* shdr_map = ::mmap(nullptr, page_max - page_min,
		PROT_READ, MAP_PRIVATE, fd, page_min);
	if (shdr_map == MAP_FAILED) return false;
	shdr = (Elf32_Shdr*) (((uintptr_t) shdr_map) + page_offset);
	return true;
}
Example #21
0
bool RPLLibrary::reserveAddressSpace() {
	Elf32_Addr min_addr = shdr[0].sh_addr;
	Elf32_Addr max_addr = min_addr + getSectionUncompressedSize(&shdr[0]);
	for (int i = 1; i < ehdr->e_shnum; i++) {
		Elf32_Shdr* s = &shdr[i];
		if (!SectionIsAlloc(s)) continue;
		if (s->sh_addr < min_addr) min_addr = s->sh_addr;
		if (s->sh_addr + getSectionUncompressedSize(s) > max_addr) {
			max_addr = s->sh_addr + getSectionUncompressedSize(s);
		}
	}
	size_t mysize = PAGE_END(max_addr) - PAGE_START(min_addr);
	load_start = mmap(load_start, mysize,
		PROT_READ | PROT_WRITE | PROT_EXEC,
		MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
	if (load_start == MAP_FAILED) return false;
	return true;
}
Example #22
0
/* Used internally by phdr_table_protect_gnu_relro and
 * phdr_table_unprotect_gnu_relro.
 */
static int
_phdr_table_set_gnu_relro_prot(const Elf32_Phdr* phdr_table,
                               int               phdr_count,
                               Elf32_Addr        load_bias,
                               int               prot_flags)
{
    const Elf32_Phdr* phdr = phdr_table;
    const Elf32_Phdr* phdr_limit = phdr + phdr_count;

    for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
        if (phdr->p_type != PT_GNU_RELRO)
            continue;

        /* Tricky: what happens when the relro segment does not start
         * or end at page boundaries?. We're going to be over-protective
         * here and put every page touched by the segment as read-only.
         *
         * This seems to match Ian Lance Taylor's description of the
         * feature at http://www.airs.com/blog/archives/189.
         *
         * Extract:
         *    Note that the current dynamic linker code will only work
         *    correctly if the PT_GNU_RELRO segment starts on a page
         *    boundary. This is because the dynamic linker rounds the
         *    p_vaddr field down to the previous page boundary. If
         *    there is anything on the page which should not be read-only,
         *    the program is likely to fail at runtime. So in effect the
         *    linker must only emit a PT_GNU_RELRO segment if it ensures
         *    that it starts on a page boundary.
         */
        Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
        Elf32_Addr seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;

        int ret = mprotect((void*)seg_page_start,
                           seg_page_end - seg_page_start,
                           prot_flags);
        if (ret < 0) {
            return -1;
        }
    }
    return 0;
}
Example #23
0
/* Returns the size of the extent of all the possibly non-contiguous
 * loadable segments in an ELF program header table. This corresponds
 * to the page-aligned size in bytes that needs to be reserved in the
 * process' address space. If there are no loadable segments, 0 is
 * returned.
 *
 * If out_min_vaddr or out_max_vaddr are non-NULL, they will be
 * set to the minimum and maximum addresses of pages to be reserved,
 * or 0 if there is nothing to load.
 */
size_t phdr_table_get_load_size(const Elf32_Phdr* phdr_table,
                                size_t phdr_count,
                                Elf32_Addr* out_min_vaddr,
                                Elf32_Addr* out_max_vaddr)
{
    Elf32_Addr min_vaddr = 0xFFFFFFFFU;
    Elf32_Addr max_vaddr = 0x00000000U;

    bool found_pt_load = false;
    size_t i;
    for (i = 0; i < phdr_count; ++i) {
        const Elf32_Phdr* phdr = &phdr_table[i];

        if (phdr->p_type != PT_LOAD) {
            continue;
        }
        found_pt_load = true;

        if (phdr->p_vaddr < min_vaddr) {
            min_vaddr = phdr->p_vaddr;
        }

        if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
            max_vaddr = phdr->p_vaddr + phdr->p_memsz;
        }
    }
    if (!found_pt_load) {
        min_vaddr = 0x00000000U;
    }

    min_vaddr = PAGE_START(min_vaddr);
    max_vaddr = PAGE_END(max_vaddr);

    if (out_min_vaddr != NULL) {
        *out_min_vaddr = min_vaddr;
    }
    if (out_max_vaddr != NULL) {
        *out_max_vaddr = max_vaddr;
    }
    return max_vaddr - min_vaddr;
}
Example #24
0
/* Load the program header table from an ELF file into a read-only private
 * anonymous mmap-ed block.
 *
 * Input:
 *   fd           -> file descriptor
 *   phdr_offset  -> file offset of phdr table
 *   phdr_num     -> number of entries in the table.
 *
 * Output:
 *   phdr_mmap    -> address of mmap block in memory.
 *   phdr_memsize -> size of mmap block in memory.
 *   phdr_table   -> address of first entry in memory.
 *
 * Return:
 *   -1 on error, or 0 on success.
 */
int phdr_table_load(int                fd,
                    Elf32_Addr         phdr_offset,
                    Elf32_Half         phdr_num,
                    void**             phdr_mmap,
                    Elf32_Addr*        phdr_size,
                    const Elf32_Phdr** phdr_table)
{
    Elf32_Addr  page_min, page_max, page_offset;
    void*       mmap_result;

    /* Just like the kernel, we only accept program header tables that
     * are smaller than 64KB. */
    if (phdr_num < 1 || phdr_num > 65536/sizeof(Elf32_Phdr)) {
        errno = EINVAL;
        return -1;
    }

    page_min = PAGE_START(phdr_offset);
    page_max = PAGE_END(phdr_offset + phdr_num*sizeof(Elf32_Phdr));
    page_offset = PAGE_OFFSET(phdr_offset);

    mmap_result = mmap(NULL,
                       page_max - page_min,
                       PROT_READ,
                       MAP_PRIVATE,
                       fd,
                       page_min);

    if (mmap_result == MAP_FAILED) {
        return -1;
    }

    *phdr_mmap = mmap_result;
    *phdr_size = page_max - page_min;
    *phdr_table = (Elf32_Phdr*)((char*)mmap_result + page_offset);

    return 0;
}
Example #25
0
static int write_direct_mapping(struct defrag_ctx *c, struct data_extent *e)
{
	struct inode *inode = c->inodes[e->inode_nr];
	__u32 cur_block = e->start_block;
	__u32 cur_logical = e->start_logical;
	__u32 new_block;
	int sync_inode = 0;

	/* Direct blocks */
	for (cur_logical = e->start_logical;
	     cur_logical < EXT2_IND_LBLOCK(&c->sb) && cur_block <= e->end_block;
	     cur_logical++) {
		if (!is_sparse(inode, cur_logical))
			new_block = cur_block++;
		else
			new_block = 0;
		if (inode->on_disk->i_block[cur_logical] != new_block) {
			inode->on_disk->i_block[cur_logical] = new_block;
			sync_inode = 1;
		}
	}
	if (cur_block > e->end_block)
		goto out;

	/* Singly indirect blocks */
	if (cur_logical == EXT2_IND_LBLOCK(&c->sb)) {
		if (is_sparse(inode, cur_logical))
			new_block = 0;
		else
			new_block = cur_block++;
		cur_logical++;
	} else {
		new_block = inode->on_disk->i_block[EXT2_IND_BLOCK];
	}
	if (cur_logical > EXT2_IND_LBLOCK(&c->sb)
	    && cur_logical < EXT2_DIND_LBLOCK(&c->sb)) {
		write_ind_metadata(c, e, new_block, &cur_logical, &cur_block);
	}
	if (inode->on_disk->i_block[EXT2_IND_BLOCK] != new_block) {
		inode->on_disk->i_block[EXT2_IND_BLOCK] = new_block;
		sync_inode = 1;
	}
	if (cur_block > e->end_block)
		goto out;

	/* Doubly indirect blocks */
	if (cur_logical == EXT2_DIND_LBLOCK(&c->sb)) {
		if (is_sparse(inode, cur_logical) || cur_block > e->end_block)
			new_block = 0;
		else
			new_block = cur_block++;
		cur_logical++;
	} else {
		new_block = inode->on_disk->i_block[EXT2_DIND_BLOCK];
	}
	if (cur_logical > EXT2_DIND_LBLOCK(&c->sb)
	    && cur_logical < EXT2_TIND_LBLOCK(&c->sb)) {
		write_dind_metadata(c, e, new_block, &cur_logical, &cur_block);
	}
	if (inode->on_disk->i_block[EXT2_DIND_BLOCK] != new_block) {
		inode->on_disk->i_block[EXT2_DIND_BLOCK] = new_block;
		sync_inode = 1;
	}
	if (cur_block > e->end_block)
		goto out;

	/* Triply indirect blocks */
	if (cur_logical == EXT2_TIND_LBLOCK(&c->sb)) {
		if (is_sparse(inode, cur_logical) || cur_block > e->end_block)
			new_block = 0;
		else
			new_block = cur_block++;
		cur_logical++;
	} else {
		new_block = inode->on_disk->i_block[EXT2_TIND_BLOCK];
	}
	if (cur_logical > EXT2_TIND_LBLOCK(&c->sb)) {
		write_tind_metadata(c, e, new_block, &cur_logical, &cur_block);
	}
	if (inode->on_disk->i_block[EXT2_TIND_BLOCK] != new_block) {
		inode->on_disk->i_block[EXT2_TIND_BLOCK] = new_block;
		sync_inode = 1;
	}

out:
	if (sync_inode)
		/* Assumes the inode is completely within one page */
		return msync(PAGE_START(inode->on_disk),getpagesize(), MS_SYNC);
	return 0;
}
Example #26
0
/* drop cache except for the header and the active pages */
void rrd_dontneed(
    rrd_file_t *rrd_file,
    rrd_t *rrd)
{
    rrd_simple_file_t *rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
#if defined USE_MADVISE || defined HAVE_POSIX_FADVISE
    size_t dontneed_start;
    size_t rra_start;
    size_t active_block;
    size_t i;
    ssize_t   _page_size = sysconf(_SC_PAGESIZE);

    if (rrd_file == NULL) {
#if defined DEBUG && DEBUG
	    fprintf (stderr, "rrd_dontneed: Argument 'rrd_file' is NULL.\n");
#endif
	    return;
    }

#if defined DEBUG && DEBUG > 1
    mincore_print(rrd_file, "before");
#endif

    /* ignoring errors from RRDs that are smaller then the file_len+rounding */
    rra_start = rrd_file->header_len;
    dontneed_start = PAGE_START(rra_start) + _page_size;
    for (i = 0; i < rrd->stat_head->rra_cnt; ++i) {
        active_block =
            PAGE_START(rra_start
                       + rrd->rra_ptr[i].cur_row
                       * rrd->stat_head->ds_cnt * sizeof(rrd_value_t));
        if (active_block > dontneed_start) {
#ifdef USE_MADVISE
            madvise(rrd_simple_file->file_start + dontneed_start,
                    active_block - dontneed_start - 1, MADV_DONTNEED);
#endif
/* in linux at least only fadvise DONTNEED seems to purge pages from cache */
#ifdef HAVE_POSIX_FADVISE
            posix_fadvise(rrd_simple_file->fd, dontneed_start,
                          active_block - dontneed_start - 1,
                          POSIX_FADV_DONTNEED);
#endif
        }
        dontneed_start = active_block;
        /* do not release 'hot' block if update for this RAA will occur
         * within 10 minutes */
        if (rrd->stat_head->pdp_step * rrd->rra_def[i].pdp_cnt -
            rrd->live_head->last_up % (rrd->stat_head->pdp_step *
                                       rrd->rra_def[i].pdp_cnt) < 10 * 60) {
            dontneed_start += _page_size;
        }
        rra_start +=
            rrd->rra_def[i].row_cnt * rrd->stat_head->ds_cnt *
            sizeof(rrd_value_t);
    }

    if (dontneed_start < rrd_file->file_len) {
#ifdef USE_MADVISE
	    madvise(rrd_simple_file->file_start + dontneed_start,
		    rrd_file->file_len - dontneed_start, MADV_DONTNEED);
#endif
#ifdef HAVE_POSIX_FADVISE
	    posix_fadvise(rrd_simple_file->fd, dontneed_start,
			  rrd_file->file_len - dontneed_start,
			  POSIX_FADV_DONTNEED);
#endif
    }

#if defined DEBUG && DEBUG > 1
    mincore_print(rrd_file, "after");
#endif
#endif                          /* without madvise and posix_fadvise it does not make much sense todo anything */
}
Example #27
0
rrd_file_t *rrd_open( const char *const file_name, rrd_t *rrd, 
		unsigned rdwr, int *ret_p) {
	unsigned long ui;
	int       flags = 0;
	int       version;
	int       ret = 0;

#ifdef HAVE_MMAP
	ssize_t   _page_size = sysconf(_SC_PAGESIZE);
	char     *data = MAP_FAILED;
#endif
	off_t     offset = 0;
	struct stat statb;
	rrd_file_t *rrd_file = NULL;
	rrd_simple_file_t *rrd_simple_file = NULL;
	size_t     newfile_size = 0;
	size_t header_len, value_cnt, data_len;

	/* Are we creating a new file? */
	if((rdwr & RRD_CREAT) && (rrd->stat_head != NULL)) {
		header_len = rrd_get_header_size(rrd);

		value_cnt = 0;
		for (ui = 0; ui < rrd->stat_head->rra_cnt; ui++)
			value_cnt += rrd->stat_head->ds_cnt * rrd->rra_def[ui].row_cnt;

		data_len = sizeof(rrd_value_t) * value_cnt;

		newfile_size = header_len + data_len;
	}

	rrd_file = (rrd_file_t*)malloc(sizeof(rrd_file_t));
	if (rrd_file == NULL) {
		*ret_p = -RRD_ERR_MALLOC7;
		return NULL;
	}
	memset(rrd_file, 0, sizeof(rrd_file_t));

	rrd_file->pvt = malloc(sizeof(rrd_simple_file_t));
	if(rrd_file->pvt == NULL) {
		*ret_p = -RRD_ERR_MALLOC8;
		return NULL;
	}
	memset(rrd_file->pvt, 0, sizeof(rrd_simple_file_t));
	rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;

#ifdef DEBUG
	if ((rdwr & (RRD_READONLY | RRD_READWRITE)) ==
			(RRD_READONLY | RRD_READWRITE)) {
		/* Both READONLY and READWRITE were given, which is invalid.  */
		*ret_p = -RRD_ERR_IO1;
		exit(-1);
	}
#endif

#ifdef HAVE_MMAP
	rrd_simple_file->mm_prot = PROT_READ;
	rrd_simple_file->mm_flags = 0;
#endif

	if (rdwr & RRD_READONLY) {
		flags |= O_RDONLY;
#ifdef HAVE_MMAP
# if !defined(AIX)
		rrd_simple_file->mm_flags = MAP_PRIVATE;
# endif
# ifdef MAP_NORESERVE
		rrd_simple_file->mm_flags |= MAP_NORESERVE;  /* readonly, so no swap backing needed */
# endif
#endif
	} else {
		if (rdwr & RRD_READWRITE) {
			flags |= O_RDWR;
#ifdef HAVE_MMAP 
			rrd_simple_file->mm_flags = MAP_SHARED; 
			rrd_simple_file->mm_prot |= PROT_WRITE; 
#endif 
		}
		if (rdwr & RRD_CREAT) {
			flags |= (O_CREAT | O_TRUNC);
		}
		if (rdwr & RRD_EXCL) {
			flags |= O_EXCL;
		}
	}

#ifdef HAVE_MMAP
	if (rdwr & RRD_READAHEAD) {
#ifdef MAP_POPULATE
		rrd_simple_file->mm_flags |= MAP_POPULATE;   /* populate ptes and data */
#endif
#if defined MAP_NONBLOCK
		rrd_simple_file->mm_flags |= MAP_NONBLOCK;   /* just populate ptes */
#endif
	}
#endif

#if defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)
	flags |= O_BINARY;
#endif

	if ((rrd_simple_file->fd = open(file_name, flags, 0666)) < 0) {
		ret = -RRD_ERR_OPEN_FILE;
		goto out_free;
	}

#ifdef USE_STDIO
	if (rdwr & RRD_READWRITE) {
		rrd_simple_file->fp = fdopen(rrd_simple_file->fd, "r+");
	}else{
		rrd_simple_file->fp = fdopen(rrd_simple_file->fd, "r");
	}
	if (rrd_simple_file->fp == NULL){
		ret = -RRD_ERR_OPEN_FILE;
		goto out_close;
	}
#endif

#if defined(HAVE_MMAP) && defined(HAVE_BROKEN_MS_ASYNC)
	if (rdwr & RRD_READWRITE) {    
		/* some unices, the files mtime does not get update    
		   on msync MS_ASYNC, in order to help them,     
		   we update the the timestamp at this point.      
		   The thing happens pretty 'close' to the open    
		   call so the chances of a race should be minimal.    

		   Maybe ask your vendor to fix your OS ... */    
		utime(file_name,NULL);  
	}
#endif    

	/* Better try to avoid seeks as much as possible. stat may be heavy but
	 * many concurrent seeks are even worse.  */
	if (newfile_size == 0 && ((fstat(rrd_simple_file->fd, &statb)) < 0)) {
		ret = -RRD_ERR_STAT_FILE;
		goto out_close;
	}
	if (newfile_size == 0) {
		rrd_file->file_len = statb.st_size;
	} else {
		rrd_file->file_len = newfile_size;
#ifdef HAVE_POSIX_FALLOCATE
		if (posix_fallocate(rrd_simple_file->fd, 0, newfile_size) == 0){
			/* if all  is well we skip the seeking below */            
			goto no_lseek_necessary;        
		}
#endif

		lseek(rrd_simple_file->fd, newfile_size - 1, SEEK_SET);
		if (write(rrd_simple_file->fd, "\0", 1) == -1){    /* poke */
			ret = -RRD_ERR_WRITE5;
			goto out_close;
		}
		lseek(rrd_simple_file->fd, 0, SEEK_SET);
	}
no_lseek_necessary:
#ifdef HAVE_POSIX_FADVISE
	/* In general we need no read-ahead when dealing with rrd_files.
	   When we stop reading, it is highly unlikely that we start up again.
	   In this manner we actually save time and diskaccess (and buffer cache).
	   Thanks to Dave Plonka for the Idea of using POSIX_FADV_RANDOM here. */
	posix_fadvise(rrd_simple_file->fd, 0, 0, POSIX_FADV_RANDOM);
#endif

#ifdef HAVE_MMAP
#ifndef HAVE_POSIX_FALLOCATE
	/* force allocating the file on the underlaying filesystem to prevent any
	 * future bus error when the filesystem is full and attempting to write
	 * trough the file mapping. Filling the file using memset on the file
	 * mapping can also lead some bus error, so we use the old fashioned
	 * write().
	 */
	if (rdwr & RRD_CREAT) {
		char     buf[4096];
		unsigned i;

		memset(buf, DNAN, sizeof buf);
		lseek(rrd_simple_file->fd, offset, SEEK_SET);

		for (i = 0; i < (newfile_size - 1) / sizeof buf; ++i) {
			if (write(rrd_simple_file->fd, buf, sizeof buf) == -1) {
				ret = -RRD_ERR_WRITE5;
				goto out_close;
			}
		}

		if (write(rrd_simple_file->fd, buf,
					(newfile_size - 1) % sizeof buf) == -1) {
			ret = -RRD_ERR_WRITE5;
			goto out_close;
		}

		lseek(rrd_simple_file->fd, 0, SEEK_SET);
	}
#endif

	data = mmap(0, rrd_file->file_len, 
			rrd_simple_file->mm_prot, rrd_simple_file->mm_flags,
			rrd_simple_file->fd, offset);

	/* lets see if the first read worked */
	if (data == MAP_FAILED) {
		ret = -RRD_ERR_MMAP;
		goto out_close;
	}
	rrd_simple_file->file_start = data;
#endif
	if (rdwr & RRD_CREAT)
		goto out_done;
#ifdef USE_MADVISE
	if (rdwr & RRD_COPY) {
		/* We will read everything in a moment (copying) */
		madvise(data, rrd_file->file_len, MADV_WILLNEED );
		madvise(data, rrd_file->file_len, MADV_SEQUENTIAL );
	} else {
		/* We do not need to read anything in for the moment */
		madvise(data, rrd_file->file_len, MADV_RANDOM);
		/* the stat_head will be needed soonish, so hint accordingly */
		madvise(data, sizeof(stat_head_t), MADV_WILLNEED);
		madvise(data, sizeof(stat_head_t), MADV_RANDOM);
	}
#endif

	__rrd_read(rrd->stat_head, stat_head_t, 1);

	/* lets do some test if we are on track ... */
	if (memcmp(rrd->stat_head->cookie, RRD_COOKIE, sizeof(RRD_COOKIE)) != 0) {
		ret = -RRD_ERR_FILE;
		goto out_nullify_head;
	}

	if (rrd->stat_head->float_cookie != FLOAT_COOKIE) {
		ret = -RRD_ERR_FILE1;
		goto out_nullify_head;
	}

	version = atoi(rrd->stat_head->version);

	if (version > atoi(RRD_VERSION)) {
		ret = -RRD_ERR_FILE2;
		goto out_nullify_head;
	}
#if defined USE_MADVISE
	/* the ds_def will be needed soonish, so hint accordingly */
	madvise(data + PAGE_START(offset),
			sizeof(ds_def_t) * rrd->stat_head->ds_cnt, MADV_WILLNEED);
#endif
	__rrd_read(rrd->ds_def, ds_def_t, rrd->stat_head->ds_cnt);

#if defined USE_MADVISE
	/* the rra_def will be needed soonish, so hint accordingly */
	madvise(data + PAGE_START(offset),
			sizeof(rra_def_t) * rrd->stat_head->rra_cnt, MADV_WILLNEED);
#endif
	__rrd_read(rrd->rra_def, rra_def_t,
			rrd->stat_head->rra_cnt);

	/* handle different format for the live_head */
	if (version < 3) {
		rrd->live_head = (live_head_t *) malloc(sizeof(live_head_t));
		if (rrd->live_head == NULL) {
			ret = -RRD_ERR_MALLOC9;
			goto out_close;
		}
#if defined USE_MADVISE
		/* the live_head will be needed soonish, so hint accordingly */
		madvise(data + PAGE_START(offset), sizeof(time_t), MADV_WILLNEED);
#endif
		__rrd_read(rrd->legacy_last_up, time_t,
				1);

		rrd->live_head->last_up = *rrd->legacy_last_up;
		rrd->live_head->last_up_usec = 0;
	} else {
#if defined USE_MADVISE
		/* the live_head will be needed soonish, so hint accordingly */
		madvise(data + PAGE_START(offset),
				sizeof(live_head_t), MADV_WILLNEED);
#endif
		__rrd_read(rrd->live_head, live_head_t,
				1);
	}
	__rrd_read(rrd->pdp_prep, pdp_prep_t,
			rrd->stat_head->ds_cnt);
	__rrd_read(rrd->cdp_prep, cdp_prep_t,
			rrd->stat_head->rra_cnt * rrd->stat_head->ds_cnt);
	__rrd_read(rrd->rra_ptr, rra_ptr_t,
			rrd->stat_head->rra_cnt);

	rrd_file->header_len = offset;
	rrd_file->pos = offset;

	{
		unsigned long row_cnt = 0;

		for (ui=0; ui<rrd->stat_head->rra_cnt; ui++)
			row_cnt += rrd->rra_def[ui].row_cnt;

		size_t  correct_len = rrd_file->header_len +
			sizeof(rrd_value_t) * row_cnt * rrd->stat_head->ds_cnt;

		if (correct_len > rrd_file->file_len) {
			ret = -RRD_ERR_FILE3;
			goto out_nullify_head;
		}
	}

out_done:
	return (rrd_file);
out_nullify_head:
	rrd->stat_head = NULL;
out_close:
#ifdef HAVE_MMAP
	if (data != MAP_FAILED)
		munmap(data, rrd_file->file_len);
#endif
#ifdef USE_STDIO
	if (rrd_simple_file->fp)
		fclose(rrd_simple_file->fp);
	else
		close(rrd_simple_file->fd);

#else
	close(rrd_simple_file->fd);
#endif
out_free:
	free(rrd_file->pvt);
	free(rrd_file);
	*ret_p = ret;
	return NULL;
}
Example #28
0
/* For 32-bit build, supports looking for x64 marker (in WOW64 process).
 * For 64-bit build, only supports looking for x64 marker.
 */
static int
read_and_verify_dr_marker_common(HANDLE process, dr_marker_t *marker, bool x64)
{
    byte buf[8]; /* only needs to be 5, but dword pad just in case */
    size_t res;
    void *target = NULL;
#if !defined(NOT_DYNAMORIO_CORE) && !defined(NOT_DYNAMORIO_CORE_PROPER)
    GET_NTDLL(DR_MARKER_HOOKED_FUNCTION, DR_MARKER_HOOKED_FUNCTION_ARGS);
    void *hook_func = (void *)DR_MARKER_HOOKED_FUNCTION;
#else
    if (IF_X64_ELSE(!x64, x64 && !is_wow64_process(NT_CURRENT_PROCESS)))
        return DR_MARKER_ERROR;
    if (x64) {
# ifndef X64
        uint64 hook_func = get_proc_address_64
            (get_module_handle_64(L_DR_MARKER_HOOKED_DLL),
             DR_MARKER_HOOKED_FUNCTION_STRING);
        uint64 landing_pad = 0;
        if (hook_func == 0)
            return DR_MARKER_ERROR;
        if (!NT_SUCCESS(nt_wow64_read_virtual_memory64(process, hook_func, buf, 5, &res))
            || res != 5) {
            return DR_MARKER_ERROR;
        }
        if (buf[0] != OP_jmp_byte)
            return DR_MARKER_NOT_FOUND;

        /* jmp offset + EIP (after jmp = hook_func + size of jmp (5 bytes)) */
        /* for 64-bit, the target is stored in front of the trampoline */
        landing_pad = *(int *)&buf[1] + hook_func + 5 - 8;
         if (!NT_SUCCESS(nt_wow64_read_virtual_memory64(process, landing_pad, buf, 8,
                                                        &res)) ||
            res != 8U)
            return DR_MARKER_ERROR;
        /* trampoline address is stored at the top of the landing pad for 64-bit */
        target = (void *)PAGE_START(*(ptr_int_t *)buf);
    } else {
# endif /* !X64 */
        void *hook_func = (void *)GetProcAddress(GetModuleHandle(DR_MARKER_HOOKED_DLL),
                                                 DR_MARKER_HOOKED_FUNCTION_STRING);
#endif
        void *landing_pad;
        if (hook_func == NULL)
            return DR_MARKER_ERROR;
        if (!READ_FUNC(process, hook_func, buf, 5, &res) || res != 5)
            return DR_MARKER_ERROR;
        if (buf[0] != OP_jmp_byte)
            return DR_MARKER_NOT_FOUND;

        /* jmp offset + EIP (after jmp = hook_func + size of jmp (5 bytes)) */
        landing_pad = (void *)(*(int *)&buf[1] + (ptr_int_t)hook_func + 5);
        /* for 64-bit, the target is stored in front of the trampoline */
        if (x64)
            landing_pad = (byte *)landing_pad - 8;
        /* see emit_landing_pad_code() for layout of landing pad */
        if (!READ_FUNC(process, landing_pad, buf, (x64 ? 8 : 5), &res) ||
            res != (x64 ? 8U : 5U))
            return DR_MARKER_ERROR;
        if (x64) {
            /* trampoline address is stored at the top of the landing pad for 64-bit */
            target = (void *)PAGE_START(*(ptr_int_t *)buf);
        } else {
            /* jmp offset + EIP (after jmp = landing_pad + size of jmp (5 bytes)) */
            target = (void *)PAGE_START(*(int *)&buf[1] + (ptr_int_t)landing_pad + 5);
        }
#if defined(NOT_DYNAMORIO_CORE) || defined(NOT_DYNAMORIO_CORE_PROPER)
    }
#endif

    if (target == NULL)
        return DR_MARKER_ERROR;
    if (!READ_FUNC(process, target, marker, sizeof(dr_marker_t), &res) ||
        res != sizeof(dr_marker_t)) {
        return DR_MARKER_NOT_FOUND;
    }

    if (dr_marker_verify(process, marker)) {
        return DR_MARKER_FOUND;
    }

    return DR_MARKER_NOT_FOUND; /* probably some other hooker */
}
Example #29
0
/* Map all loadable segments in process' address space.
 * This assumes you already called phdr_table_reserve_memory to
 * reserve the address space range for the library.
 *
 * Input:
 *   phdr_table    -> program header table
 *   phdr_count    -> number of entries in the table
 *   load_bias     -> load offset.
 *   fd            -> input file descriptor.
 *
 * Return:
 *   0 on success, -1 otherwise. Error code in errno.
 */
int
phdr_table_load_segments(const Elf32_Phdr* phdr_table,
                         int               phdr_count,
                         Elf32_Addr        load_bias,
                         int               fd)
{
    int nn;

    for (nn = 0; nn < phdr_count; nn++) {
        const Elf32_Phdr* phdr = &phdr_table[nn];
        void* seg_addr;

        if (phdr->p_type != PT_LOAD)
            continue;

        /* Segment addresses in memory */
        Elf32_Addr seg_start = phdr->p_vaddr + load_bias;
        Elf32_Addr seg_end   = seg_start + phdr->p_memsz;

        Elf32_Addr seg_page_start = PAGE_START(seg_start);
        Elf32_Addr seg_page_end   = PAGE_END(seg_end);

        Elf32_Addr seg_file_end   = seg_start + phdr->p_filesz;

        /* File offsets */
        Elf32_Addr file_start = phdr->p_offset;
        Elf32_Addr file_end   = file_start + phdr->p_filesz;

        Elf32_Addr file_page_start = PAGE_START(file_start);

        seg_addr = mmap((void*)seg_page_start,
                        file_end - file_page_start,
                        PFLAGS_TO_PROT(phdr->p_flags),
                        MAP_FIXED|MAP_PRIVATE,
                        fd,
                        file_page_start);

        if (seg_addr == MAP_FAILED) {
            return -1;
        }

        /* if the segment is writable, and does not end on a page boundary,
         * zero-fill it until the page limit. */
        if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
            memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
        }

        seg_file_end = PAGE_END(seg_file_end);

        /* seg_file_end is now the first page address after the file
         * content. If seg_end is larger, we need to zero anything
         * between them. This is done by using a private anonymous
         * map for all extra pages.
         */
        if (seg_page_end > seg_file_end) {
            void* zeromap = mmap((void*)seg_file_end,
                                    seg_page_end - seg_file_end,
                                    PFLAGS_TO_PROT(phdr->p_flags),
                                    MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
                                    -1,
                                    0);
            if (zeromap == MAP_FAILED) {
                return -1;
            }
        }
    }
    return 0;
}
Example #30
0
ULONG
RtlWalkFrameChain (

    OUT PVOID *Callers,
    IN ULONG Count,
    IN ULONG Flags)

/*++

Routine Description:

    RtlWalkFrameChain

Description:

    This function tries to walk the EBP chain and fill out a vector of
    return addresses. The function works only on x86. It is possible that
    the function cannot fill the requested number of callers because somewhere
    on the stack we have a function compiled FPO (the frame register (EBP) is
    used as a normal register. In this case the function will just return with
    a less then requested count. In kernel mode the function should not take
    any exceptions (page faults) because it can be called at all sorts of
    irql levels.

    The `Flags' parameter is used for future extensions. A zero value will be
    compatible with new stack walking algorithms.

    Note. The algorithm can be somewhat improved by unassembling the return
    addresses identified. However this is impractical in kernel mode because
    the function might get called at high irql levels where page faults are
    not allowed.

Return value:

    The number of identified return addresses on the stack. This can be less
    then the Count requested if the stack ends or we encounter a FPO compiled
    function.

--*/

{
#if defined(_X86_)

    ULONG_PTR Fp, NewFp, ReturnAddress;
    ULONG Index;
    ULONG_PTR StackEnd, StackStart;
    BOOLEAN Result;

    //
    // Get the current EBP pointer which is supposed to
    // be the start of the EBP chain.
    //

    _asm mov Fp, EBP;

    StackStart = Fp;

#if _KERNEL_MODE_STACK_TRACES_

    StackEnd = (ULONG_PTR)(KeGetCurrentThread()->StackBase);

    //
    // bugbug: find a reliable way to get the stack limit in kernel mode.
    // `StackBase' is not a reliable way to get the stack end in kernel
    // mode because we might execute a DPC routine on thread's behalf.
    // There are a few other reasons why we cannot trust this completely.
    //
    // Note. The condition `PAGE_START(StackEnd) - PAGE_START(StackStart) > PAGE_SIZE'
    // is not totally safe. We can encounter a situation where in this case we
    // do not have the same stack. Can we?
    //
    // The DPC stack is actually the stack of the idle thread corresponding to
    // the current processor. Based on that we probably can figure out in almost
    // all contexts what are the real limits of the stack.
    //

    if ((StackStart > StackEnd)
        || (PAGE_START(StackEnd) - PAGE_START(StackStart) > PAGE_SIZE)) {

        StackEnd = (StackStart + PAGE_SIZE) & ~((ULONG_PTR)PAGE_SIZE - 1);
    
        //
        // Try to get one more page if possible. Note that this is not
        // 100% reliable because a non faulting address can fault if
        // appropriate locks are not held.
        //

        if (MmIsAddressValid ((PVOID)StackEnd)) {
            StackEnd += PAGE_SIZE;
        }
    }

#else

    StackEnd = (ULONG_PTR)(NtCurrentTeb()->NtTib.StackBase);

#endif // #if _KERNEL_MODE_STACK_TRACES_

    try {

        for (Index = 0; Index < Count; Index++) {

            if (Fp + sizeof(ULONG_PTR) >= StackEnd) {
                break;
            }

            NewFp = *((PULONG_PTR)(Fp + 0));
            ReturnAddress = *((PULONG_PTR)(Fp + sizeof(ULONG_PTR)));

            //
            // Figure out if the new frame pointer is ok. This validation
            // should avoid all exceptions in kernel mode because we always
            // read within the current thread's stack and the stack is
            // guaranteed to be in memory (no page faults). It is also guaranteed
            // that we do not take random exceptions in user mode because we always
            // keep the frame pointer within stack limits.
            //

            if (! (Fp < NewFp && NewFp < StackEnd)) {
                break;
            }

            //
            // Figure out if the return address is ok. If return address
            // is a stack address or <64k then something is wrong. There is
            // no reason to return garbage to the caller therefore we stop.
            //

            if (StackStart < ReturnAddress && ReturnAddress < StackEnd) {
                break;
            }

            if (ReturnAddress < 64 * SIZE_1_KB) {
                break;
            }

            //
            // Store new fp and return address and move on.
            //

            Fp = NewFp;
            Callers[Index] = (PVOID)ReturnAddress;
        }
    }
    except (EXCEPTION_EXECUTE_HANDLER) {

        //
        // The frame traversal algorithm is written so that we should
        // not get any exception. Therefore if we get some exception
        // we better debug it.
        //
        // bugbug: enable bkpt only on checked builds
        // After we get some coverage on this we should leave it active
        // only on checked builds.
        //

        DbgPrint ("Unexpected exception in RtlWalkFrameChain ...\n");
        DbgBreakPoint ();
    }

    //
    // Return the number of return addresses identified on the stack.
    //

#if _COLLECT_FRAME_WALK_STATISTICS_
    CollectFrameWalkStatistics (Index);
#endif // #if _COLLECT_FRAME_WALK_STATISTICS_

    return Index;

#else

    return 0;

#endif // #if defined(_X86_)
}