Beispiel #1
0
boolean_t xmon_update_view(const guest_vcpu_t *vcpu_id, view_handle_t view,
			   view_update_type_t update_type, gpa_t base_gpa,
			   hpa_t base_hpa, mam_attributes_t attr,
			   uint64_t eptp_list_index)
{
	boolean_t status;
	gpa_t align_base_gpa;
	mam_attributes_t old_attr;
	hpa_t hpa = 0, align_base_hpa;
	guest_view_t *guest_view = (guest_view_t *)view;

	MON_ASSERT(vcpu_id);
	MON_ASSERT(guest_view);

	/* ensure address is aligned */
	align_base_gpa = ALIGN_BACKWARD(base_gpa, PAGE_4KB_SIZE);

	/* Remove mapping and return */
	if (update_type == VIEW_REMOVE_MAPPING) {
		status = mon_gpm_remove_mapping(guest_view->gpm, align_base_gpa,
			PAGE_4KB_SIZE);
		MON_ASSERT(status);
		return status;
	}

	if (update_type == VIEW_UPDATE_PERMISSIONS_ONLY) {
		status = mon_gpm_gpa_to_hpa(guest_view->gpm, align_base_gpa,
			&hpa, &old_attr);
		MON_ASSERT(status);
	} else if (update_type == VIEW_UPDATE_MAPPING) {
		hpa = base_hpa;
	}

	/* ensure address is aligned */
	align_base_hpa = ALIGN_BACKWARD(hpa, PAGE_4KB_SIZE);

	status = mon_gpm_add_mapping(guest_view->gpm, align_base_gpa,
		align_base_hpa, PAGE_4KB_SIZE, attr);
	MON_ASSERT(status);

	if (update_type == VIEW_UPDATE_PERMISSIONS_ONLY) {
		/* Optimization - Update EPT as well if only permissions are updated */
		if (guest_view->address_space == MAM_INVALID_HANDLE) {
			if (xmon_recreate_view(vcpu_id->guest_id,
				    (view_handle_t)guest_view,
				    eptp_list_index) == FALSE) {
				return FALSE;
			}
		}

		status = mon_mam_overwrite_permissions_in_existing_mapping(
			guest_view->address_space, align_base_gpa,
			PAGE_4KB_SIZE, attr);
		MON_ASSERT(status);
	}

	return status;
}
Beispiel #2
0
static bool
counter_crosses_cache_line(byte *addr, size_t size)
{
    size_t cache_line_size = proc_get_cache_line_size();
    if (ALIGN_BACKWARD(addr, cache_line_size) ==
        ALIGN_BACKWARD(addr+size-1, cache_line_size))
        return false;
    return true;
}
Beispiel #3
0
static
dr_signal_action_t signal_event_redirect(void *dcontext, dr_siginfo_t *info)
{
    if (info->sig == SIGSEGV) {
        app_pc addr;
        module_data_t *data = dr_lookup_module_by_name("client."EVENTS);
        dr_fprintf(STDERR, "signal event redirect\n");
        if (data == NULL) {
            dr_fprintf(STDERR, "couldn't find client."EVENTS" module\n");
            return DR_SIGNAL_DELIVER;
        }
        addr = (app_pc)dr_get_proc_address(data->handle, "redirect");
        dr_free_module_data(data);
        if (addr == NULL) {
            dr_fprintf(STDERR, "Couldn't find function redirect in client."EVENTS"\n");
            return DR_SIGNAL_DELIVER;
        }
#ifdef X64
        /* align properly in case redirect function relies on conventions (i#384) */
        info->mcontext->xsp = ALIGN_BACKWARD(info->mcontext->xsp, 16) - sizeof(void*);
#endif
        info->mcontext->pc = addr;
        return DR_SIGNAL_REDIRECT;
    }
    return DR_SIGNAL_DELIVER;
}
Beispiel #4
0
static
bool exception_event_redirect(void *dcontext, dr_exception_t *excpt)
{
    app_pc addr;
    dr_mcontext_t mcontext = {sizeof(mcontext),DR_MC_ALL,};
    module_data_t *data = dr_lookup_module_by_name("client."EVENTS".exe");
    dr_fprintf(STDERR, "exception event redirect\n");
    if (data == NULL) {
        dr_fprintf(STDERR, "couldn't find "EVENTS".exe module\n");
        return true;
    }
    addr = (app_pc)dr_get_proc_address(data->handle, "redirect");
    dr_free_module_data(data);
    mcontext = *excpt->mcontext;
    mcontext.pc = addr;
    if (addr == NULL) {
        dr_fprintf(STDERR, "Couldn't find function redirect in "EVENTS".exe\n");
        return true;
    }
#ifdef X64
    /* align properly in case redirect function relies on conventions (i#419) */
    mcontext.xsp = ALIGN_BACKWARD(mcontext.xsp, 16) - sizeof(void*);
#endif
    dr_redirect_execution(&mcontext);
    dr_fprintf(STDERR, "should not be reached, dr_redirect_execution() should not return\n");
    return true;
}
Beispiel #5
0
bool 
is_readable_without_exception(const byte *pc, size_t size)
{
    /* Case 7967: NtReadVirtualMemory is significantly faster than
     * NtQueryVirtualMemory (probably even for large regions where NtQuery can
     * walk by mbi.RegionSize but we have to walk by page size).  We don't care
     * if multiple threads write into the buffer at once.  Nearly all of our
     * calls ask about areas smaller than a page.
     */
    byte *check_pc = (byte *) ALIGN_BACKWARD(pc, PAGE_SIZE);
    if (size > (size_t)((byte *)POINTER_MAX - pc))
        size = (byte *)POINTER_MAX - pc;
    do {
        size_t bytes_read = 0;
#if defined(NOT_DYNAMORIO_CORE)
        if (!ReadProcessMemory(NT_CURRENT_PROCESS, check_pc, is_readable_buf,
                               sizeof(is_readable_buf), (SIZE_T*) &bytes_read) ||
            bytes_read != sizeof(is_readable_buf))
#else
        if (!nt_read_virtual_memory(NT_CURRENT_PROCESS, check_pc, is_readable_buf,
                                    sizeof(is_readable_buf), &bytes_read) ||
            bytes_read != sizeof(is_readable_buf))
#endif
            return false;
        check_pc += PAGE_SIZE;
    } while (check_pc != 0/*overflow*/ && check_pc < pc+size);
    return true;
}
Beispiel #6
0
void dc_invalidaterange(void *start, u32 size)
{
	u32 cookie = irq_kill();
	void *end = ALIGN_FORWARD(((u8*)start) + size, LINESIZE);
	start = ALIGN_BACKWARD(start, LINESIZE);
	_dc_inval_entries(start, (end - start) / LINESIZE);
	ahb_flush_to(AHB_STARLET);
	irq_restore(cookie);
}
Beispiel #7
0
void dc_flushrange(const void *start, u32 size)
{
	u32 cookie = irq_kill();
	if(size > 0x4000) {
		_dc_flush();
	} else {
		void *end = ALIGN_FORWARD(((u8*)start) + size, LINESIZE);
		start = ALIGN_BACKWARD(start, LINESIZE);
		_dc_flush_entries(start, (end - start) / LINESIZE);
	}
	_drain_write_buffer();
	ahb_flush_from(AHB_1);
	irq_restore(cookie);
}
Beispiel #8
0
/* helper for find_vm_areas_via_probe() and get_memory_info_from_os()
 * returns the passed-in pc if the probe was successful; else, returns
 * where the next probe should be (to skip DR memory).
 * if the probe was successful, returns in prot the results.
 */
static app_pc
probe_address(dcontext_t *dcontext, app_pc pc_in, byte *our_heap_start,
              byte *our_heap_end, OUT uint *prot)
{
    app_pc base;
    size_t size;
    app_pc pc = (app_pc)ALIGN_BACKWARD(pc_in, PAGE_SIZE);
    ASSERT(ALIGNED(pc, PAGE_SIZE));
    ASSERT(prot != NULL);
    *prot = MEMPROT_NONE;

    /* skip our own vmheap */
    if (pc >= our_heap_start && pc < our_heap_end)
        return our_heap_end;
    /* if no vmheap and we probe our own stack, the SIGSEGV handler will
     * report stack overflow as it checks that prior to handling TRY
     */
    if (is_stack_overflow(dcontext, pc))
        return pc + PAGE_SIZE;
#ifdef VMX86_SERVER
    /* Workaround for PR 380621 */
    if (is_vmkernel_addr_in_user_space(pc, &base)) {
        LOG(GLOBAL, LOG_VMAREAS, 4, "%s: skipping vmkernel region " PFX "-" PFX "\n",
            __func__, pc, base);
        return base;
    }
#endif
    /* Only for find_vm_areas_via_probe(), skip modules added by
     * dl_iterate_get_areas_cb.  Subsequent probes are about gettting
     * info from OS, so do the actual probe.  See PR 410907.
     */
    if (!dynamo_initialized && get_memory_info(pc, &base, &size, prot))
        return base + size;

    TRY_EXCEPT(dcontext, /* try */
               {
                   PROBE_READ_PC(pc);
                   *prot |= MEMPROT_READ;
               },
               /* except */
               {
                   /* nothing: just continue */
               });
Beispiel #9
0
bool
handle_mem_ref(uint flags, app_loc_t *loc, byte *addr, size_t sz, dr_mcontext_t *mc)
{
    byte *ptr;
    /* We're piggybacking on Dr. Memory syscall, etc. code.  For reads
     * and writes we want to mark the shadow byte to indicate the
     * memory was accessed.  For an addressability check we do
     * nothing.
     */
    if (TEST(MEMREF_CHECK_ADDRESSABLE, flags))
        return true;
    /* We ignore MEMREF_MOVS, etc.: we don't propagate anything */
    for (ptr = (byte *) ALIGN_BACKWARD(addr, SHADOW_GRANULARITY);
         ptr < (byte *) ALIGN_FORWARD(addr + sz, SHADOW_GRANULARITY);
         ptr += SHADOW_GRANULARITY) {
        shadow_set_byte(ptr, 1);
    }
    return true;
}
Beispiel #10
0
/**
 * Call KaffeGC_rmRef with corrected object address. 
 *
 * @param collector the garbage collector
 * @param mem the object
 *
 * @return TRUE if a reference could be removed, otherwise FALSE.
 */
bool
BoehmGC_rmRef(Collector *collector, void* mem)
{
  return KaffeGC_rmRef(collector, ALIGN_BACKWARD(mem));
}
Beispiel #11
0
/**
 * Call KaffeGC_addRef with corrected object address.
 * 
 * @param collector the garbage collector
 * @param mem the object
 *
 * @return TRUE if a reference could be added, otherwise FALSE.
 */
bool
BoehmGC_addRef(Collector *collector, const void* mem)
{
  return KaffeGC_addRef(collector, ALIGN_BACKWARD(mem));
}
Beispiel #12
0
/* callback for dl_iterate_phdr() for adding existing modules to our lists */
static int
dl_iterate_get_areas_cb(struct dl_phdr_info *info, size_t size, void *data)
{
    int *count = (int *)data;
    uint i;
    /* see comments in dl_iterate_get_path_cb() */
    app_pc modend;
    app_pc min_vaddr = module_vaddr_from_prog_header((app_pc)info->dlpi_phdr,
                                                     info->dlpi_phnum, NULL, &modend);
    app_pc modbase = info->dlpi_addr + min_vaddr;
    size_t modsize = modend - min_vaddr;
    LOG(GLOBAL, LOG_VMAREAS, 2,
        "dl_iterate_get_areas_cb: addr=" PFX " hdrs=" PFX " base=" PFX " name=%s\n",
        info->dlpi_addr, info->dlpi_phdr, modbase, info->dlpi_name);
    ASSERT(info->dlpi_phnum == module_num_program_headers(modbase));

    ASSERT(count != NULL);
    if (*count == 0) {
        /* since we don't get a name for the executable, for now we
         * assume that the first iter is the executable itself.
         * XXX: this seems to hold, but there's no guarantee: can we do better?
         */
        executable_start = modbase;
    }

#ifndef X64
    if (modsize == PAGE_SIZE && info->dlpi_name[0] == '\0') {
        /* Candidate for VDSO.  Xref PR 289138 on using AT_SYSINFO to locate. */
        /* Xref VSYSCALL_PAGE_START_HARDCODED but later linuxes randomize */
        char *soname;
        if (module_walk_program_headers(modbase, modsize, false,
                                        true, /* i#1589: ld.so relocated .dynamic */
                                        NULL, NULL, NULL, &soname, NULL) &&
            strncmp(soname, VSYSCALL_PAGE_SO_NAME, strlen(VSYSCALL_PAGE_SO_NAME)) == 0) {
            ASSERT(!dynamo_initialized); /* .data should be +w */
            ASSERT(vsyscall_page_start == NULL);
            vsyscall_page_start = modbase;
            LOG(GLOBAL, LOG_VMAREAS, 1, "found vsyscall page @ " PFX "\n",
                vsyscall_page_start);
        }
    }
#endif
    if (modbase != vsyscall_page_start)
        module_list_add(modbase, modsize, false, info->dlpi_name, 0 /*don't have inode*/);

    for (i = 0; i < info->dlpi_phnum; i++) {
        app_pc start, end;
        uint prot;
        size_t align;
        if (module_read_program_header(modbase, i, &start, &end, &prot, &align)) {
            start += info->dlpi_addr;
            end += info->dlpi_addr;
            LOG(GLOBAL, LOG_VMAREAS, 2, "\tsegment %d: " PFX "-" PFX " %s align=%d\n", i,
                start, end, memprot_string(prot), align);
            start = (app_pc)ALIGN_BACKWARD(start, PAGE_SIZE);
            end = (app_pc)ALIGN_FORWARD(end, PAGE_SIZE);
            LOG(GLOBAL, LOG_VMAREAS, 4,
                "find_executable_vm_areas: adding: " PFX "-" PFX " prot=%d\n", start, end,
                prot);
            all_memory_areas_lock();
            update_all_memory_areas(start, end, prot, DR_MEMTYPE_IMAGE);
            all_memory_areas_unlock();
            if (app_memory_allocation(NULL, start, end - start, prot,
                                      true /*image*/
                                      _IF_DEBUG("ELF SO")))
                (*count)++;
        }
    }
    return 0; /* keep iterating */
}
Beispiel #13
0
int
main(int argc, char *argv[])
{
    byte *dll_1, *dll_2, *p1, *p2, *iat_start1, *iat_end1, *iat_start2, *iat_end2;
    bool has_iat = false;
    MEMORY_BASIC_INFORMATION info;
    void *drcontext = dr_standalone_init();
    uint writable_pages = 0, reserved_pages = 0, IAT_pages = 0;
    uint matched_pages = 0, second_matched_pages = 0, unmatched_pages = 0;
    uint exact_match_pages = 0, exact_no_match_pages = 0;
    char reloc_file[MAX_PATH] = {0}, orig_file[MAX_PATH], *input_file;
    uint old_size = 0, new_size = 0;
    uint old_base = 0, new_base = 0x69000000; /* unlikely to collide */

    /* user specified option defaults */
    uint arg_offs = 1;
    bool use_second_pass = true;
    bool assume_header_match = true;
    uint second_pass_offset = 16; /* FIXME arbitrary, what's a good choice? */
    bool assume_IAT_written = true;
    bool spin_for_debugger = false;

    if (argc < 2)
        return usage(argv[0]);
    while (argv[arg_offs][0] == '-') {
        if (strcmp(argv[arg_offs], "-vv") == 0) {
            vv = true;
        } else if (strcmp(argv[arg_offs], "-v") == 0) {
            v = true;
        } else if (strcmp(argv[arg_offs], "-no_second_pass") == 0) {
            use_second_pass = false;
        } else if (strcmp(argv[arg_offs], "-second_pass_offset") == 0) {
            if ((uint)argc <= arg_offs+1)
                return usage(argv[0]);
            second_pass_offset = atoi(argv[++arg_offs]);
        } else if (strcmp(argv[arg_offs], "-no_assume_IAT_written") == 0) {
            assume_IAT_written = false;
        } else if (strcmp(argv[arg_offs], "-spin_for_debugger") == 0) {
            spin_for_debugger = true;
        } else {
            return usage(argv[0]);
        }
        arg_offs++;
    }   
    input_file = argv[arg_offs++];
    if (arg_offs != argc)
        return usage(argv[0]);
    
    _snprintf(reloc_file, sizeof(reloc_file), "%s.reloc.dll", input_file);
    reloc_file[sizeof(reloc_file)-1] = '\0';
    if (!CopyFile(input_file, reloc_file, FALSE)) {
        LPSTR msg = NULL;
        uint error = GetLastError();
        FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_ALLOCATE_BUFFER,
                      0, GetLastError(), 0, msg, 0, NULL);
        VERBOSE_PRINT("Copy Error %d (0x%x) = %s\n", error, error, msg);
        return 1;
    }
    snprintf(orig_file, sizeof(orig_file), "%s.orig.dll", input_file);
    orig_file[sizeof(orig_file)-1] = '\0';
    if (!CopyFile(input_file, orig_file, FALSE)) {
        LPSTR msg = NULL;
        uint error = GetLastError();
        FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_ALLOCATE_BUFFER,
                      0, GetLastError(), 0, msg, 0, NULL);
        VERBOSE_PRINT("Copy Error %d (0x%x) = %s\n", error, error, msg);
        return 1;
    }
    if (ReBaseImage(reloc_file, "", TRUE, FALSE, FALSE, 0, &old_size, &old_base,
                    &new_size, &new_base, 0)) {
        VERBOSE_PRINT("Rebased imsage \"%s\" from 0x%08x to 0x%08x\n"
                      "Size changed from %d bytes to %d bytes\n",
                      input_file, old_base, new_base, old_size, new_size);
    } else {
        LPSTR msg = NULL;
        uint error = GetLastError();
        FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_ALLOCATE_BUFFER,
                      0, GetLastError(), 0, msg, 0, NULL);
        VERBOSE_PRINT("Rebase Error %d (0x%x) = %s\n", error, error, msg);
        return 1;
    }
    
    dll_1 = (byte *)ALIGN_BACKWARD(LoadLibraryExA(orig_file, NULL,
                                                  DONT_RESOLVE_DLL_REFERENCES),
                                   PAGE_SIZE);
    p1 = dll_1;
    dll_2 = (byte *)ALIGN_BACKWARD(LoadLibraryExA(reloc_file, NULL,
                                                  DONT_RESOLVE_DLL_REFERENCES),
                                   PAGE_SIZE);
    p2 = dll_2;
    VVERBOSE_PRINT("Loaded dll @ 0x%08x and 0x%08x\n", dll_1, dll_2);

    if (dll_1 == NULL || dll_2 == NULL) {
        VERBOSE_PRINT( "Error loading %s\n", input_file);
        return 1;
    }
    
    /* Handle the first page specially since I'm seeing problems with a handful of
     * dlls that aren't really getting rebased. mcupdate_GenuineIntel.dll for ex.
     * (which does have relocations etc.) not sure what's up, but it's only a couple of
     * dlls so will ignore them. If we really rebased the header should differ. */
    if (memcmp(dll_1, dll_2, PAGE_SIZE) == 0) {
        printf("%s - ERROR during relocating\n", input_file);
        return 1;
    } else {
        exact_no_match_pages++;
        if (assume_header_match)
            /* We could modify the hash function to catch header pages. */
            matched_pages++;
        else 
            unmatched_pages++;
    }
    p1 += PAGE_SIZE;
    p2 += PAGE_SIZE;

    if (assume_IAT_written && get_IAT_section_bounds(dll_1, &iat_start1, &iat_end1)) {
        has_iat = true;
        ASSERT(get_IAT_section_bounds(dll_2, &iat_start2, &iat_end2) &&
               iat_start1 - dll_1 == iat_start2 - dll_2 &&
               iat_end1 - dll_1 == iat_end2 - dll_2);
    }

    while (dr_virtual_query(p1, &info, sizeof(info)) == sizeof(info) &&
           info.State != MEM_FREE && info.AllocationBase == dll_1) {
        /* we only check read-only pages (assumption writable pages aren't shareable) */
        ASSERT(p1 == info.BaseAddress);
        if (info.State != MEM_COMMIT) {
            reserved_pages += info.RegionSize / PAGE_SIZE;
            VVERBOSE_PRINT("skipping %d reserved pages\n", info.RegionSize / PAGE_SIZE);
            p1 += info.RegionSize;
            p2 += info.RegionSize;
        } else if (!prot_is_writable(info.Protect)) {
            uint i;
            for (i = 0; i < info.RegionSize / PAGE_SIZE; i++) {
                bool exact = false;
                if (assume_IAT_written && has_iat &&
                    iat_end1 > p1 && iat_start1 < p1 + PAGE_SIZE) {
                    /* overlaps an IAT page */
                    IAT_pages++;
                    p1 += PAGE_SIZE;
                    p2 += PAGE_SIZE;
                    continue;
                }
                if (memcmp(p1, p2, PAGE_SIZE) == 0) {
                    VVERBOSE_PRINT("Page Exact Match\n");
                    exact_match_pages++;
                    exact = true;
                } else {
                    VVERBOSE_PRINT("Page Exact Mismatch\n");
                    exact_no_match_pages++;
                }
                if (compare_pages(drcontext, p1, p2, 0)) {
                    VVERBOSE_PRINT("Matched page\n");
                    matched_pages++;
                } else { 
                    VVERBOSE_PRINT("Failed to match page\n");
                    if (use_second_pass &&
                        compare_pages(drcontext, p1, p2, second_pass_offset)) {
                        second_matched_pages++;
                    } else {
                        unmatched_pages++;
                    }
                    ASSERT(!exact);
                }
                p1 += PAGE_SIZE;
                p2 += PAGE_SIZE;
            }
        } else {
            writable_pages += info.RegionSize / PAGE_SIZE;
            VVERBOSE_PRINT("skipping %d writable pages\n", info.RegionSize / PAGE_SIZE);
            p1 += info.RegionSize;
            p2 += info.RegionSize;
        }
    }

    VERBOSE_PRINT("%d exact match, %d not exact match\n%d hash_match, %d second_hash_match, %d hash_mismatch\n",
                  exact_match_pages, exact_no_match_pages, matched_pages, second_matched_pages, unmatched_pages); 

    printf("%s : %d pages - %d w %d res %d IAT = %d same %d differ : %d hash differ %d first hash differ : %d%% found, %d%% found first hash\n",
           input_file, writable_pages + reserved_pages + IAT_pages + exact_match_pages + exact_no_match_pages,
           writable_pages, reserved_pages, IAT_pages,
           exact_match_pages, exact_no_match_pages,
           unmatched_pages, unmatched_pages + second_matched_pages,
           (100 * (matched_pages + second_matched_pages - exact_match_pages))/exact_no_match_pages,
           (100 * (matched_pages - exact_match_pages))/exact_no_match_pages);

    while (spin_for_debugger)
        Sleep(1000);

    return 0;
}
// init memory layout
// This function should perform init of "memory layout object" and
// init the primary guest memory layout.
// In the case of no secondary guests, memory layout object is not required
//
// For primary guest:
//   - All memory upto 4G is mapped, except for VMM and secondary guest areas
//   - Only specified memory above 4G is mapped. Mapping in the >4G region for primary
//     guest should be added by demand
//
// For secondary guests:
//   - All secondary guests are loaded lower than 4G
BOOLEAN init_memory_layout_from_mbr(
#if 0
                    // JLM(FIX)
                    int num_excluded
#endif
                    const VMM_MEMORY_LAYOUT* vmm_memory_layout,
                    GPM_HANDLE primary_guest_gpm, BOOLEAN are_secondary_guests_exist,
                    const VMM_APPLICATION_PARAMS_STRUCT* application_params)
{
    E820_ABSTRACTION_RANGE_ITERATOR         e820_iter;
    const INT15_E820_MEMORY_MAP_ENTRY_EXT*  e820_entry = NULL;
    BOOLEAN                                 ok;
    UINT64                                  range_start;
    UINT64                                  range_end;
    INT15_E820_RANGE_TYPE                   range_type;
    INT15_E820_MEMORY_MAP_EXT_ATTRIBUTES    range_attr;
    UINT64                                  page_index;
    UINT64                                 *entry_list;

    // BEFORE_VMLAUNCH. CRITICAL check that should not fail.
    VMM_ASSERT(e820_abstraction_is_initialized());

    if (global_policy_uses_vtlb()) {
        mam_rwx_attrs.uint32 = 0x5;
        mam_rw_attrs.uint32 = 0x1;
        mam_ro_attrs.uint32= 0x0;
     }

    // 1. first map 0-4G host region to primary guest
    ok = gpm_add_mapping( primary_guest_gpm, 0, 0, FOUR_GIGABYTE, mam_rwx_attrs );
    VMM_LOG(mask_anonymous, level_trace,"Primary guest GPM: add 0-4G region\r\n");
    // BEFORE_VMLAUNCH. CRITICAL check that should not fail.
    VMM_ASSERT( ok == TRUE );

    // 2. Add real memory to "memory layout object" and to the primary guest
    //    if this memory range is above 4G
    // if in the post launch mode skip it
    for (e820_iter = e820_abstraction_iterator_get_first(E820_ORIGINAL_MAP);
        e820_iter != E820_ABSTRACTION_NULL_ITERATOR;
        e820_iter = e820_abstraction_iterator_get_next(E820_ORIGINAL_MAP, e820_iter)) {
        e820_entry = e820_abstraction_iterator_get_range_details(e820_iter);

        range_start = e820_entry->basic_entry.base_address;
        range_end   = range_start + e820_entry->basic_entry.length;
        range_type  = e820_entry->basic_entry.address_range_type;
        range_attr  = e820_entry->extended_attributes;

        // align ranges and sizes on 4K boundaries
        range_start = ALIGN_FORWARD(range_start, PAGE_4KB_SIZE);
        range_end   = ALIGN_BACKWARD(range_end, PAGE_4KB_SIZE);

        VMM_DEBUG_CODE({
            if (range_start != e820_entry->basic_entry.base_address) {
                VMM_LOG(mask_anonymous, level_trace,"init_memory_layout_from_mbr WARNING: aligning E820 range start from %P to %P\n",
                    e820_entry->basic_entry.base_address, range_start);
                }

            if (range_end != e820_entry->basic_entry.base_address + e820_entry->basic_entry.length) {
                    VMM_LOG(mask_anonymous, level_trace,"init_memory_layout_from_mbr WARNING: aligning E820 range end from %P to %P\n",
                        e820_entry->basic_entry.base_address+e820_entry->basic_entry.length,
                        range_end);
                    }
            })

        if (range_end <= range_start) {
            // after alignment the range became invalid
            VMM_LOG(mask_anonymous, level_trace,"init_memory_layout_from_mbr WARNING: skipping invalid E820 memory range FROM %P to %P\n",
                 range_start, range_end);
            continue;
        }

        // add memory to the "memory layout object" if this is a real memory
        // lower 4G
        if (are_secondary_guests_exist && (range_start < FOUR_GIGABYTE) &&
            range_attr.Bits.enabled && (!range_attr.Bits.non_volatile)) {
            UINT64 top = (range_end < FOUR_GIGABYTE) ? range_end : FOUR_GIGABYTE;
	    (void)top;
            if ((range_type == INT15_E820_ADDRESS_RANGE_TYPE_MEMORY) ||
                (range_type == INT15_E820_ADDRESS_RANGE_TYPE_ACPI)) {
                // here we need to all a call to the "memory layout object"
                // to fill is with the range_start-top range
                // to make compiler happy
                top = 0;
            }
        }

        // add memory to the primary guest if this is a memory above 4G
        if (range_end > FOUR_GIGABYTE) {
            UINT64 bottom = (range_start < FOUR_GIGABYTE) ? FOUR_GIGABYTE : range_start;

            if (bottom < range_end) {
                VMM_LOG(mask_anonymous, level_trace,"Primary guest GPM: add memory above 4GB base %p size %p\r\n",
                        bottom, range_end - bottom);
                ok = gpm_add_mapping( primary_guest_gpm, bottom, bottom, range_end - bottom, mam_rwx_attrs );
                // BEFORE_VMLAUNCH. CRITICAL check that should not fail.
                VMM_ASSERT( ok == TRUE );
            }
        }
    }
Beispiel #15
0
bool page_table_get_page(generic_page_table_entry_t* l4,
                         void* virtual_address,
                         vm_region_t* region,
                         unsigned long* pfn,
                         generic_page_table_entry_t** parent,
                         int *parent_level) {
    virtual_address_t index;
    generic_page_table_entry_t* entry;
    region->present = false;
    *parent = 0;
    *parent_level = -1;

    if (virtual_address >= VM_HOLE_START && virtual_address <= VM_HOLE_END) {
        return false;
    }

    index.virtual_address = virtual_address;
    entry = &l4[index.l4_index];
    *parent = entry;
    *parent_level = 4;

    if (!entry->present) {
        return false;
    }
    region->access.writable = entry->writable;
    region->access.executable = !entry->not_executable;
    region->access.user = entry->user;
    entry = &follow_page_table_entry(entry)[index.l3_index];
    *parent = entry;
    *parent_level = 3;

    if (!entry->present) {
        return false;
    }
    region->access.writable &= entry->writable;
    region->access.executable &= !entry->not_executable;
    region->access.user &= entry->user;
    if (entry->size) {
        region->present = true;
        region->start = (void*) ALIGN_BACKWARD(virtual_address, _1GB);
        region->end = region->start + _1GB - 1;
        *pfn = entry->next_pfn + index.l1_index + (index.l2_index << 9);
        return true;
    };
    entry = &follow_page_table_entry(entry)[index.l2_index];
    *parent = entry;
    *parent_level = 2;

    if (!entry->present) {
        return false;
    }
    region->access.writable &= entry->writable;
    region->access.executable &= !entry->not_executable;
    region->access.user &= entry->user;
    if (entry->size) {
        region->present = true;
        region->start = (void*) ALIGN_BACKWARD(virtual_address, _2MB);
        region->end = region->start + _2MB - 1;
        *pfn = entry->next_pfn + index.l1_index;
        return true;
    }
    entry = &follow_page_table_entry(entry)[index.l1_index];
    *parent = entry;
    *parent_level = 1;

    if (!entry->present) {
        return false;
    }
    region->present = true;
    region->access.writable &= entry->writable;
    region->access.executable &= !entry->not_executable;
    region->access.user &= entry->user;
    region->start = (void*) ALIGN_BACKWARD(virtual_address, _4KB);
    region->end = region->start + _4KB - 1;
    *pfn = entry->next_pfn;
    return true;
}
Beispiel #16
0
/* pass non-NULL for thandle if you want this routine to use
 *   Get/SetThreadContext to get the context -- you must still pass
 *   in a pointer to a cxt
 */
BOOL
inject_into_thread(HANDLE phandle, CONTEXT *cxt, HANDLE thandle,
                   char *dynamo_path)
{
    size_t              nbytes;
    BOOL                success = FALSE;
    ptr_uint_t          dynamo_entry_esp;
    ptr_uint_t          dynamo_path_esp;
    LPVOID              load_dynamo_code = NULL; /* = base of code allocation */
    ptr_uint_t          addr;
    reg_t               *bufptr;
    char                buf[MAX_PATH];
    uint                old_prot;

    ASSERT(cxt != NULL);

#ifndef NOT_DYNAMORIO_CORE_PROPER
    /* FIXME - if we were early injected we couldn't call inject_init during
     * startup because kernel32 wasn't loaded yet, so we call it here which
     * isn't safe because it uses app locks. If we want to support a mix
     * of early and late follow children injection we should change load_dynamo
     * to use Nt functions (which we can link) rather then kernel32 functions
     * (which we have to look up).  We could also use module.c code to safely
     * walk the exports of kernel32.dll (we can cache its mod handle when it
     * is loaded). */ 
    if (!inject_initialized) {
        SYSLOG_INTERNAL_WARNING("Using late inject follow children from early injected process, unsafe LdrLock usage");
        SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
        inject_init();
        SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
    }
#else
    ASSERT(inject_initialized);
#endif

    /* soon we'll start using alternative injection with case 102 - leaving block */
    {
        reg_t app_xsp;
        if (thandle != NULL) {
            /* grab the context of the app's main thread */                 
            cxt->ContextFlags = CONTEXT_DR_STATE;
            if (!NT_SUCCESS(nt_get_context(thandle, cxt))) {
                display_error("GetThreadContext failed");
                goto error;
            }
        }
        app_xsp = cxt->CXT_XSP;

        /* copy load_dynamo() into the address space of the new process */
        ASSERT(BUFFER_SIZE_BYTES(buf) > SIZE_OF_LOAD_DYNAMO);
        memcpy(buf, (char*)load_dynamo, SIZE_OF_LOAD_DYNAMO);
        /* R-X protection is adequate for our non-self modifying code,
         * and we'll update that after we're done with
         * nt_write_virtual_memory() calls */

        /* get allocation, this will be freed by os_heap_free, so make sure
         * is compatible allocation method */
        if (!NT_SUCCESS(nt_remote_allocate_virtual_memory(phandle, &load_dynamo_code, 
                                                          SIZE_OF_LOAD_DYNAMO,
                                                          PAGE_EXECUTE_READWRITE,
                                                          MEMORY_COMMIT))) {
            display_error("Failed to allocate memory for injection code");
            goto error;
        }
        if (!nt_write_virtual_memory(phandle, load_dynamo_code, buf,
                                     SIZE_OF_LOAD_DYNAMO, &nbytes)) {
            display_error("WriteMemory failed");
            goto error;
        }

        /* Xref PR 252745 & PR 252008 - we can use the app's stack to hold our data
         * even on WOW64 and 64-bit since we're using set context to set xsp. */
   
        /* copy the DYNAMORIO_ENTRY string to the app's stack */
        _snprintf(buf, BUFFER_SIZE_ELEMENTS(buf), "%s", DYNAMORIO_ENTRY);
        NULL_TERMINATE_BUFFER(buf);
        nbytes = strlen(buf) + 1; // include the trailing '\0'
        /* keep esp at pointer-sized alignment */
        cxt->CXT_XSP -= ALIGN_FORWARD(nbytes, XSP_SZ);
        dynamo_entry_esp = cxt->CXT_XSP;
        if (!nt_write_virtual_memory(phandle, (LPVOID)cxt->CXT_XSP, 
                                     buf, nbytes, &nbytes)) {
            display_error("WriteMemory failed");
            goto error;
        }

        /* copy the dynamorio_path string to the app's stack */
        _snprintf(buf, BUFFER_SIZE_ELEMENTS(buf), "%s", dynamo_path);
        NULL_TERMINATE_BUFFER(buf);
        nbytes = strlen(buf) + 1; // include the trailing '\0'
        /* keep esp at pointer-sized byte alignment */
        cxt->CXT_XSP -= ALIGN_FORWARD(nbytes, XSP_SZ);
        dynamo_path_esp = cxt->CXT_XSP;
        if (!nt_write_virtual_memory(phandle, (LPVOID)cxt->CXT_XSP, 
                                     buf, nbytes, &nbytes)) {
            display_error("WriteMemory failed");
            goto error;
        }

        /* copy the current context to the app's stack. Only need the
         * control registers, so we use a dr_mcontext_t layout.
         */
        bufptr = (reg_t*) buf;
        *bufptr++ = cxt->CXT_XDI;
        *bufptr++ = cxt->CXT_XSI;
        *bufptr++ = cxt->CXT_XBP;
        *bufptr++ = app_xsp;
        *bufptr++ = cxt->CXT_XBX;
        *bufptr++ = cxt->CXT_XDX;
        *bufptr++ = cxt->CXT_XCX;
        *bufptr++ = cxt->CXT_XAX;
#ifdef X64
        *bufptr++ = cxt->R8;
        *bufptr++ = cxt->R9;
        *bufptr++ = cxt->R10;
        *bufptr++ = cxt->R11;
        *bufptr++ = cxt->R12;
        *bufptr++ = cxt->R13;
        *bufptr++ = cxt->R14;
        *bufptr++ = cxt->R15;
#endif
        /* It would be nice to use preserve_xmm_caller_saved(), but we'd need to
         * link proc.c and deal w/ messy dependencies to get it into arch_exports.h,
         * so we do our own check.  We go ahead and put in the xmm slots even
         * if the underlying processor has no xmm support: no harm done.
         */
        if (IF_X64_ELSE(true, is_wow64_process(NT_CURRENT_PROCESS))) {
            /* PR 264138: preserve xmm0-5.  We fill in all slots even though
             * for 32-bit we don't use them (PR 306394).
             */
            int i, j;
            for (i = 0; i < NUM_XMM_SLOTS; i++) {
                for (j = 0; j < IF_X64_ELSE(2,4); j++) {
                    *bufptr++ = CXT_XMM(cxt, i)->reg[j];
                }
            }
        } else {
            /* skip xmm slots */
            bufptr += XMM_SLOTS_SIZE/sizeof(*bufptr);
        }
        *bufptr++ = cxt->CXT_XFLAGS;
        *bufptr++ = cxt->CXT_XIP;
        ASSERT((char *)bufptr - (char *)buf == sizeof(dr_mcontext_t));
        *bufptr++ = (ptr_uint_t)load_dynamo_code;
        *bufptr++ = SIZE_OF_LOAD_DYNAMO;
        nbytes = sizeof(dr_mcontext_t) + 2*sizeof(reg_t);
        cxt->CXT_XSP -= nbytes;
#ifdef X64
        /* We need xsp to be aligned prior to each call, but we can only pad
         * before the context as all later users assume the info they need is
         * at TOS.
         */
        cxt->CXT_XSP = ALIGN_BACKWARD(cxt->CXT_XSP, XMM_ALIGN);
#endif
        if (!nt_write_virtual_memory(phandle, (LPVOID)cxt->CXT_XSP,
                                     buf, nbytes, &nbytes)) {
            display_error("WriteMemory failed");
            goto error;
        }

        /* push the address of the DYNAMORIO_ENTRY string on the app's stack */
        cxt->CXT_XSP -= XSP_SZ;
        if (!nt_write_virtual_memory(phandle, (LPVOID)cxt->CXT_XSP, 
                                     &dynamo_entry_esp, sizeof(dynamo_entry_esp),
                                     &nbytes)) {
            display_error("WriteMemory failed");
            goto error;
        }

        /* push the address of GetProcAddress on the app's stack */
        ASSERT(addr_getprocaddr);
        addr = addr_getprocaddr;
        cxt->CXT_XSP -= XSP_SZ;
        if (!nt_write_virtual_memory(phandle, (LPVOID)cxt->CXT_XSP, 
                                     &addr, sizeof(addr), &nbytes)) {
            display_error("WriteMemory failed");
            goto error;
        }

        /* push the address of the dynamorio_path string on the app's stack */
        cxt->CXT_XSP -= XSP_SZ;
        if (!nt_write_virtual_memory(phandle, (LPVOID)cxt->CXT_XSP, 
                                     &dynamo_path_esp, sizeof(dynamo_path_esp),
                                     &nbytes)) {
            display_error("WriteMemory failed");
            goto error;
        }

        /* push the address of LoadLibraryA on the app's stack */
        ASSERT(addr_loadlibrarya);
        addr = addr_loadlibrarya;
        cxt->CXT_XSP -= XSP_SZ;
        if (!nt_write_virtual_memory(phandle, (LPVOID)cxt->CXT_XSP, 
                                     &addr, sizeof(addr), &nbytes)) {
            display_error("WriteMemory failed");
            goto error;
        }

#ifdef LOAD_DYNAMO_DEBUGBREAK
        /* push the address of DebugBreak on the app's stack */
        ASSERT(addr_debugbreak);
        addr = addr_debugbreak;
        cxt->CXT_XSP -= XSP_SZ;
        if (!nt_write_virtual_memory(phandle, (LPVOID)cxt->CXT_XSP, 
                                     &addr, sizeof(addr), &nbytes)) {
            display_error("WriteMemory failed");
            goto error;
        }
#endif

        /* make the code R-X now */
        if (!nt_remote_protect_virtual_memory(phandle, load_dynamo_code, 
                                              SIZE_OF_LOAD_DYNAMO,
                                              PAGE_EXECUTE_READ, &old_prot)) {
            display_error("Failed to make injection code R-X");
            goto error;
        }
        ASSERT(old_prot == PAGE_EXECUTE_READWRITE);

        /* now change Eip to point to the entry point of load_dynamo(), so that
           when we resume, load_dynamo is invoked automatically */
        cxt->CXT_XIP = (ptr_uint_t)load_dynamo_code;
        cxt->CXT_XFLAGS = 0;
        if (thandle != NULL) {
            if (!NT_SUCCESS(nt_set_context(thandle, cxt))) {
                display_error("SetThreadContext failed");
                goto error;
            }
        }

        success = TRUE;
    }
    error:
        /* we do not recover any changes in the child's address space */

    return success;
}