示例#1
0
void
ept_invalidate_global(void)
{
	if (cpu_supports_ept()) {
		invept(INVEPT_TYPE_GLOBAL, 0);
	}
}
示例#2
0
文件: ept.c 项目: Alkzndr/freebsd
static void
invept_single_context(void *arg)
{
	struct invept_desc desc = *(struct invept_desc *)arg;

	invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
}
示例#3
0
文件: memory.c 项目: tisma/haxm
static int handle_set_ram(struct vm_t *vm, uint64 start_gpa, uint64 size,
                          uint64 start_uva, uint8 flags)
{
    bool unmap = flags & HAX_RAM_INFO_INVALID;
    hax_gpa_space *gpa_space;
    uint64 start_gfn, npages;
    int ret;
    hax_ept_tree *ept_tree;

    // HAX_RAM_INFO_INVALID indicates that guest physical address range
    // [start_gpa, start_gpa + size) should be unmapped
    if (unmap && (flags != HAX_RAM_INFO_INVALID || start_uva)) {
        hax_error("%s: Invalid start_uva=0x%llx or flags=0x%x for unmapping\n",
                  __func__, start_uva, flags);
        return -EINVAL;
    }
    if (!unmap && !start_uva) {
        hax_error("%s: Cannot map to an invalid UVA\n", __func__);
        return -EINVAL;
    }
    if (!size) {
        hax_error("%s: size == 0\n", __func__);
        return -EINVAL;
    }

    assert(vm != NULL);
    gpa_space = &vm->gpa_space;
    start_gfn = start_gpa >> PG_ORDER_4K;
    npages = size >> PG_ORDER_4K;
    ret = memslot_set_mapping(gpa_space, start_gfn, npages, start_uva, flags);
    if (ret) {
        hax_error("%s: memslot_set_mapping() failed: ret=%d, start_gfn=0x%llx,"
                  " npages=0x%llx, start_uva=0x%llx, flags=0x%x\n", __func__,
                  ret, start_gfn, npages, start_uva, flags);
        return ret;
    }
    memslot_dump_list(gpa_space);

    ept_tree = &vm->ept_tree;
    if (!hax_test_and_clear_bit(0, (uint64 *)&ept_tree->invept_pending)) {
        // INVEPT pending flag was set
        hax_info("%s: Invoking INVEPT for VM #%d\n", __func__, vm->vm_id);
        invept(vm, EPT_INVEPT_SINGLE_CONTEXT);
    }
    return 0;
}
示例#4
0
static void
#ifdef HYP_PAE
__ept_invalidate_addr(vm_t *v, u32 gpaddr0, u32 gpaddr1)
{
	paddr_t gpaddr = ((paddr_t)gpaddr1 << 32) | gpaddr0;
#else
__ept_invalidate_addr(vm_t *v, paddr_t gpaddr)
{
#endif
	/* There is currently no option to invept to invalidate
	 * a particular page, so gpaddr is ignored */
	u64 eptp = vmread64(VMCE_EPT_PTR);
	un err = invept(INVEPT_TYPE_SINGLE, eptp);
	if (err) {
		kprintf("__ept_invalidate_addr>ERROR eptp 0x%llx\n", eptp);
		return;
	}
	if (TRUNC_PAGE(gpaddr) == kvtophys(dma_test_page)) {
	  kprintf("__ept_invalidate_addr>succeeded gpaddr ="PFMT"\n",
		  gpaddr);
	}
}
示例#5
0
文件: memory.c 项目: tisma/haxm
int hax_vm_set_ram(struct vm_t *vm, struct hax_set_ram_info *info)
{
#ifdef CONFIG_HAX_EPT2
    return handle_set_ram(vm, info->pa_start, info->size, info->va,
                          info->flags);
#else  // !CONFIG_HAX_EPT2
    int num = info->size >> page_shift;
    uint64_t gpfn = info->pa_start >> page_shift;
    uint64_t cur_va = info->va;
    bool is_unmap = info->flags & HAX_RAM_INFO_INVALID;
    bool is_readonly = info->flags & HAX_RAM_INFO_ROM;
    uint emt = is_unmap ? EMT_NONE : (is_readonly ? EMT_UC : EMT_WB);
    uint perm = is_unmap ? EPT_TYPE_NONE
                : (is_readonly ? EPT_TYPE_ROM : EPT_TYPE_MEM);
    bool ept_modified = false;

    // HAX_RAM_INFO_INVALID indicates that guest physical address range
    // [pa_start, pa_start + size) should be unmapped
    if (is_unmap && (info->flags != HAX_RAM_INFO_INVALID || info->va)) {
        hax_error("HAX_VM_IOCTL_SET_RAM called with invalid parameter(s):"
                  " flags=0x%x, va=0x%llx\n", info->flags, info->va);
        return -EINVAL;
    }

    while (num > 0) {
        uint64_t hpfn;
        uint64_t hva;
        bool epte_modified;

        if (is_unmap) {
            hpfn = 0;
            hva = 0;
        } else {
            struct hax_vcpu_mem *pmem = get_pmem_range(vm, cur_va);
            if (!pmem) {
                hax_error("Can't find pmem for va %llx", cur_va);
                return -ENOMEM;
            }
            hpfn = get_hpfn_from_pmem(pmem, cur_va);

            if (hpfn <= 0) {
                hax_error("Can't get host address for va %llx", cur_va);
                /*
                 * Shall we revert the already setup one? Assume not since the
                 * QEMU should exit on such situation, although it does not.
                 */
                return -ENOMEM;
            }
#if defined(__MACH__)
#ifdef __x86_64__
            hva = (uint64_t)pmem->kva + (cur_va - pmem->uva);
#else
            hva = (uint64_t)(uint32_t)pmem->kva + (cur_va - pmem->uva);
#endif
#else   // __MACH
#if defined(_WIN64)
            hva = (uint64_t)pmem->kva + (cur_va - pmem->uva);
#else
            hva = 0;
#endif
#endif
            cur_va += page_size;
        }

        if (!hax_core_set_p2m(vm, gpfn, hpfn, hva, info->flags)) {
            return -ENOMEM;
        }
        if (!ept_set_pte(vm, gpfn << page_shift, hpfn << page_shift, emt, perm,
                         &epte_modified)) {
            hax_error("ept_set_pte() failed at gpfn 0x%llx hpfn 0x%llx\n", gpfn,
                      hpfn);
            return -ENOMEM;
        }
        ept_modified = ept_modified || epte_modified;

        gpfn++;
        num--;
    }
    if (ept_modified) {
        /* Invalidate EPT cache (see IASDM Vol. 3C 28.3.3.4) */
        hax_info("Calling INVEPT after EPT update (pa_start=0x%llx, size=0x%x,"
                 " flags=0x%x)\n", info->pa_start, info->size, info->flags);
        invept(vm, EPT_INVEPT_SINGLE_CONTEXT);
    }
    return 0;
#endif  // CONFIG_HAX_EPT2
}