// Map host virtual address hva to guest physical address gpa,
// with permissions perm.  eptrt is a pointer to the extended
// page table root.
//
// Return 0 on success.
// 
// If the mapping already exists and overwrite is set to 0,
//  return -E_INVAL.
// 
// Hint: use ept_lookup_gpa to create the intermediate 
//       ept levels, and return the final epte_t pointer.
//       You should set the type to EPTE_TYPE_WB and set __EPTE_IPAT flag.
int ept_map_hva2gpa(epte_t* eptrt, void* hva, void* gpa, int perm, 
        int overwrite) {
	epte_t* epte_out = NULL; 
	int result = 0;
    /* Your code here */

	result = ept_lookup_gpa(eptrt, gpa, 1 , &epte_out);
	if(result < 0){
		cprintf("ept_map_hva2gpa :  failing with result < 0\n");
		return result;
	}
	else if(result  == 0){
		if(*epte_out == '\0')	
			*epte_out = (uint64_t)PADDR(hva)|perm;
		else if(*epte_out != '\0' && overwrite == 0){
			cprintf("ept_map_hva2gpa :	failing with result < 0\n");
			return -E_INVAL;
		}else if((*epte_out != '\0' && overwrite == 1)){
			*epte_out = (uint64_t)PADDR(hva)|perm|__EPTE_IPAT|__EPTE_TYPE(EPTE_TYPE_WB);
		}
	}
    //panic("ept_map_hva2gpa not implemented\n");

    return 0;
}
Beispiel #2
0
/**
 * ept_invalidate_page - removes a page from the EPT
 * @vcpu: the vcpu
 * @mm: the process's mm_struct
 * @addr: the address of the page
 * 
 * Returns 1 if the page was removed, 0 otherwise
 */
static int ept_invalidate_page(struct vmx_vcpu *vcpu,
			       struct mm_struct *mm,
			       unsigned long addr)
{
	int ret;
	epte_t *epte;
	void *gpa = (void *) hva_to_gpa(vcpu, mm, (unsigned long) addr);

	if (gpa == (void *) GPA_ADDR_INVAL) {
		printk(KERN_ERR "ept: hva %lx is out of range\n", addr);
		return 0;
	}

	spin_lock(&vcpu->ept_lock);
	ret = ept_lookup_gpa(vcpu, (void *) gpa, 0, 0, &epte);
	if (ret) {
		spin_unlock(&vcpu->ept_lock);
		return 0;
	}

	ret = ept_clear_epte(epte);
	spin_unlock(&vcpu->ept_lock);

	if (ret)
		vmx_ept_sync_individual_addr(vcpu, (gpa_t) gpa);

	return ret;
}
Beispiel #3
0
/**
 * ept_check_page_accessed - determines if a page was accessed using AD bits
 * @vcpu: the vcpu
 * @mm: the process's mm_struct
 * @addr: the address of the page
 * @flush: if true, clear the A bit
 * 
 * Returns 1 if the page was accessed, 0 otherwise
 */
static int ept_check_page_accessed(struct vmx_vcpu *vcpu,
				   struct mm_struct *mm,
				   unsigned long addr,
				   bool flush)
{
	int ret, accessed;
	epte_t *epte;
	void *gpa = (void *) hva_to_gpa(vcpu, mm, (unsigned long) addr);

	if (gpa == (void *) GPA_ADDR_INVAL) {
		printk(KERN_ERR "ept: hva %lx is out of range\n", addr);
		return 0;
	}

	spin_lock(&vcpu->ept_lock);
	ret = ept_lookup_gpa(vcpu, (void *) gpa, 0, 0, &epte);
	if (ret) {
		spin_unlock(&vcpu->ept_lock);
		return 0;
	}

	accessed = (*epte & __EPTE_A);
	if (flush & accessed)
		*epte = (*epte & ~__EPTE_A);
	spin_unlock(&vcpu->ept_lock);

	if (flush & accessed)
		vmx_ept_sync_individual_addr(vcpu, (gpa_t) gpa);

	return accessed;
}
Beispiel #4
0
static int ept_set_pfnmap_epte(struct vmx_vcpu *vcpu, int make_write,
				unsigned long gpa, unsigned long hva)
{
	struct vm_area_struct *vma;
	struct mm_struct *mm = current->mm;
	epte_t *epte, flags;
	unsigned long pfn;
	int ret;

	down_read(&mm->mmap_sem);
	vma = find_vma(mm, hva);
	if (!vma) {
		up_read(&mm->mmap_sem);
		return -EFAULT;
	}

	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
		up_read(&mm->mmap_sem);
		return -EFAULT;
	}

	ret = follow_pfn(vma, hva, &pfn);
	if (ret) {
		up_read(&mm->mmap_sem);
		return ret;
	}
	up_read(&mm->mmap_sem);

	/* NOTE: pfn's can not be huge pages, which is quite a relief here */
	spin_lock(&vcpu->ept_lock);
	ret = ept_lookup_gpa(vcpu, (void *) gpa, 0, 1, &epte);
	if (ret) {
		spin_unlock(&vcpu->ept_lock);
		printk(KERN_ERR "ept: failed to lookup EPT entry\n");
		return ret;
	}

	flags = __EPTE_READ | __EPTE_TYPE(EPTE_TYPE_UC) |
		__EPTE_IPAT | __EPTE_PFNMAP;
	if (make_write)
		flags |= __EPTE_WRITE;
	if (vcpu->ept_ad_enabled) {
		/* premark A/D to avoid extra memory references */
		flags |= __EPTE_A;
		if (make_write)
			flags |= __EPTE_D;
	}

	if (epte_present(*epte))
		ept_clear_epte(epte);

	*epte = epte_addr(pfn << PAGE_SHIFT) | flags;
	spin_unlock(&vcpu->ept_lock);

	return 0;
}
Beispiel #5
0
static int ept_set_epte(struct vmx_vcpu *vcpu, int make_write,
			unsigned long gpa, unsigned long hva)
{
	int ret;
	epte_t *epte, flags;
	struct page *page;

	ret = get_user_pages_fast(hva, 1, make_write, &page);
	if (ret != 1) {
		printk(KERN_ERR "ept: failed to get user page %lx\n", hva);
		return ret;
	}

	spin_lock(&vcpu->ept_lock);

	ret = ept_lookup_gpa(vcpu, (void *) gpa,
			     PageHuge(page) ? 1 : 0, 1, &epte);
	if (ret) {
		spin_unlock(&vcpu->ept_lock);
		printk(KERN_ERR "ept: failed to lookup EPT entry\n");
		return ret;
	}

	if (epte_present(*epte)) {
		if (!epte_big(*epte) && PageHuge(page))
			ept_clear_l1_epte(epte);
		else
			ept_clear_epte(epte);
	}

	flags = __EPTE_READ | __EPTE_EXEC |
		__EPTE_TYPE(EPTE_TYPE_WB) | __EPTE_IPAT;
	if (make_write)
		flags |= __EPTE_WRITE;
	if (vcpu->ept_ad_enabled) {
		/* premark A/D to avoid extra memory references */
		flags |= __EPTE_A;
		if (make_write)
			flags |= __EPTE_D;
	}

	if (PageHuge(page)) {
		flags |= __EPTE_SZ;
		*epte = epte_addr(page_to_phys(page) & ~((1 << 21) - 1)) |
			flags;
	} else
		*epte = epte_addr(page_to_phys(page)) | flags;

	spin_unlock(&vcpu->ept_lock);

	return 0;
}
void ept_gpa2hva(epte_t* eptrt, void *gpa, void **hva) {
    epte_t* pte;
    int ret = ept_lookup_gpa(eptrt, gpa, 0, &pte);
    if(ret < 0) {
        *hva = NULL;
    } else {
        if(!epte_present(*pte)) {
           *hva = NULL;
        } else {
           *hva = KADDR(epte_addr(*pte));
        }
    }
}
Beispiel #7
0
static int
ept_lookup(struct vmx_vcpu *vcpu, struct mm_struct *mm,
	   void *hva, int level, int create, epte_t **epte_out)
{
	void *gpa = (void *) hva_to_gpa(vcpu, mm, (unsigned long) hva);

	if (gpa == (void *) GPA_ADDR_INVAL) {
		printk(KERN_ERR "ept: hva %p is out of range\n", hva);
		printk(KERN_ERR "ept: mem_base %lx, stack_start %lx\n",
		       mm->mmap_base, mm->start_stack);
		return -EINVAL;
	}

	return ept_lookup_gpa(vcpu, gpa, level, create, epte_out);
}
Beispiel #8
0
/**
 * ept_check_page_mapped - determines if a page is mapped in the ept
 * @vcpu: the vcpu
 * @mm: the process's mm_struct
 * @addr: the address of the page
 * 
 * Returns 1 if the page is mapped, 0 otherwise
 */
static int ept_check_page_mapped(struct vmx_vcpu *vcpu,
				 struct mm_struct *mm,
				 unsigned long addr)
{
	int ret;
	epte_t *epte;
	void *gpa = (void *) hva_to_gpa(vcpu, mm, (unsigned long) addr);

	if (gpa == (void *) GPA_ADDR_INVAL) {
		printk(KERN_ERR "ept: hva %lx is out of range\n", addr);
		return 0;
	}

	spin_lock(&vcpu->ept_lock);
	ret = ept_lookup_gpa(vcpu, (void *) gpa, 0, 0, &epte);
	spin_unlock(&vcpu->ept_lock);

	return !ret;
}
int test_ept_map(void)
{
	struct Env *srcenv, *dstenv;
	struct PageInfo *pp;
	epte_t *epte;
	int r;
	int pp_ref;
	int i;
	epte_t* dir;
	/* Initialize source env */
	if ((r = env_alloc(&srcenv, 0)) < 0)
		panic("Failed to allocate env (%d)\n", r);
	if (!(pp = page_alloc(ALLOC_ZERO)))
		panic("Failed to allocate page (%d)\n", r);
	if ((r = page_insert(srcenv->env_pml4e, pp, UTEMP, 0)) < 0)
		panic("Failed to insert page (%d)\n", r);
	curenv = srcenv;

	/* Check if sys_ept_map correctly verify the target env */
	if ((r = env_alloc(&dstenv, srcenv->env_id)) < 0)
		panic("Failed to allocate env (%d)\n", r);
	if ((r = _export_sys_ept_map(srcenv->env_id, UTEMP, dstenv->env_id, UTEMP, __EPTE_READ)) < 0)
		cprintf("EPT map to non-guest env failed as expected (%d).\n", r);
	else
		panic("sys_ept_map success on non-guest env.\n");

	/*env_destroy(dstenv);*/

	if ((r = env_guest_alloc(&dstenv, srcenv->env_id)) < 0)
		panic("Failed to allocate guest env (%d)\n", r);
	dstenv->env_vmxinfo.phys_sz = (uint64_t)UTEMP + PGSIZE;
	
	/* Check if sys_ept_map can verify srcva correctly */
	if ((r = _export_sys_ept_map(srcenv->env_id, (void *)UTOP, dstenv->env_id, UTEMP, __EPTE_READ)) < 0)
		cprintf("EPT map from above UTOP area failed as expected (%d).\n", r);
	else
		panic("sys_ept_map from above UTOP area success\n");
	if ((r = _export_sys_ept_map(srcenv->env_id, UTEMP+1, dstenv->env_id, UTEMP, __EPTE_READ)) < 0)
		cprintf("EPT map from unaligned srcva failed as expected (%d).\n", r);
	else
		panic("sys_ept_map from unaligned srcva success\n");

	/* Check if sys_ept_map can verify guest_pa correctly */
	if ((r = _export_sys_ept_map(srcenv->env_id, UTEMP, dstenv->env_id, UTEMP + PGSIZE, __EPTE_READ)) < 0)
		cprintf("EPT map to out-of-boundary area failed as expected (%d).\n", r);
	else
		panic("sys_ept_map success on out-of-boundary area\n");
	if ((r = _export_sys_ept_map(srcenv->env_id, UTEMP, dstenv->env_id, UTEMP-1, __EPTE_READ)) < 0)
		cprintf("EPT map to unaligned guest_pa failed as expected (%d).\n", r);
	else
		panic("sys_ept_map success on unaligned guest_pa\n");

	/* Check if the sys_ept_map can verify the permission correctly */
	if ((r = _export_sys_ept_map(srcenv->env_id, UTEMP, dstenv->env_id, UTEMP, 0)) < 0)
		cprintf("EPT map with empty perm parameter failed as expected (%d).\n", r);
	else
		panic("sys_ept_map success on empty perm\n");
	if ((r = _export_sys_ept_map(srcenv->env_id, UTEMP, dstenv->env_id, UTEMP, __EPTE_WRITE)) < 0)
		cprintf("EPT map with write perm parameter failed as expected (%d).\n", r);
	else
		panic("sys_ept_map success on write perm\n");
	
	pp_ref = pp->pp_ref;	
	/* Check if the sys_ept_map can succeed on correct setup */
	if ((r = _export_sys_ept_map(srcenv->env_id, UTEMP, dstenv->env_id, UTEMP, __EPTE_READ)) < 0)
		panic("Failed to do sys_ept_map (%d)\n", r);
	else
		cprintf("sys_ept_map finished normally.\n");
		
	if (pp->pp_ref != pp_ref + 1) 
		panic("Failed on checking pp_ref[%d]\n",pp_ref);
	else
		cprintf("pp_ref incremented correctly\n");		
	
	/* Check if the sys_ept_map can handle remapping correctly */
	pp_ref = pp->pp_ref;
	if ((r = _export_sys_ept_map(srcenv->env_id, UTEMP, dstenv->env_id, UTEMP, __EPTE_READ)) < 0)
		cprintf("sys_ept_map finished normally.\n");
	else
		panic("sys_ept_map success on remapping the same page\n");
	/* Check if the sys_ept_map reset the pp_ref after failed on remapping the same page */
	if (pp->pp_ref == pp_ref)
		cprintf("sys_ept_map handled pp_ref correctly.\n");
	else
		panic("sys_ept_map failed to handle pp_ref.\n");
	
	/* Check if ept_lookup_gpa can handle empty eptrt correctly */
	if ((r = ept_lookup_gpa(NULL, UTEMP, 0, &epte)) < 0)
		cprintf("EPT lookup with a null eptrt failed as expected\n");
	else
		panic ("ept_lookup_gpa success on null eptrt\n");
	
		
	/* Check if the mapping is valid */
	if ((r = ept_lookup_gpa(dstenv->env_pml4e, UTEMP, 0, &epte)) < 0)
		panic("Failed on ept_lookup_gpa (%d)\n", r);
	if (page2pa(pp) != (epte_addr(*epte)))
		panic("EPT mapping address mismatching (%x vs %x).\n",
				page2pa(pp), epte_addr(*epte));
	else
		cprintf("EPT mapping address looks good: %x vs %x.\n",
				page2pa(pp), epte_addr(*epte));
	
	/* Check if the map_hva2gpa handle the overwrite correctly */
	if ((r = ept_map_hva2gpa(dstenv->env_pml4e, page2kva(pp), UTEMP, __EPTE_READ, 0)) < 0)
		cprintf("map_hva2gpa handle not overwriting correctly\n");
	else
		panic("map_hva2gpa success on overwriting with non-overwrite parameter\n");
		
	/* Check if the map_hva2gpa can map a page */
	if ((r = ept_map_hva2gpa(dstenv->env_pml4e, page2kva(pp), UTEMP, __EPTE_READ, 1)) < 0)
		panic ("Failed on mapping a page from kva to gpa\n");
	else
		cprintf("map_hva2gpa success on mapping a page\n");
		
	/* Check if the map_hva2gpa set permission correctly */
	if ((r = ept_lookup_gpa(dstenv->env_pml4e, UTEMP, 0, &epte)) < 0)
		panic("Failed on ept_lookup_gpa (%d)\n", r);
	if (((uint64_t)*epte & (~EPTE_ADDR)) == (__EPTE_READ | __EPTE_TYPE( EPTE_TYPE_WB ) | __EPTE_IPAT))
		cprintf("map_hva2gpa success on perm check\n");
	else
		panic("map_hva2gpa didn't set permission correctly\n");	
	/* Go through the extended page table to check if the immediate mappings are correct */
	dir = dstenv->env_pml4e;
	for ( i = EPT_LEVELS - 1; i > 0; --i ) {
        	int idx = ADDR_TO_IDX(UTEMP, i);
        	if (!epte_present(dir[idx])) {
        		panic("Failed to find page table item at the immediate level %d.", i);
        	}	
		if (!(dir[idx] & __EPTE_FULL)) {
			panic("Permission check failed at immediate level %d.", i);
		}        	
		dir = (epte_t *) epte_page_vaddr(dir[idx]);
        }
	cprintf("EPT immediate mapping check passed\n");
		
	
	/* stop running after test, as this is just a test run. */
	panic("Cheers! sys_ept_map seems to work correctly.\n");

	return 0;
}
Beispiel #10
0
static int ept_set_epte(struct vmx_vcpu *vcpu, int make_write,
			unsigned long gpa, unsigned long hva)
{
	int ret;
	epte_t *epte, flags;
	struct page *page;
	unsigned huge_shift;
	int level;

	ret = get_user_pages_fast(hva, 1, make_write, &page);
	if (ret != 1) {
		ret = ept_set_pfnmap_epte(vcpu, make_write, gpa, hva);
		if (ret)
			printk(KERN_ERR "ept: failed to get user page %lx\n", hva);
		return ret;
	}

	spin_lock(&vcpu->ept_lock);

	huge_shift = compound_order(compound_head(page)) + PAGE_SHIFT;
	level = 0;
	if (huge_shift == 30)
		level = 2;
	else if (huge_shift == 21)
		level = 1;

	ret = ept_lookup_gpa(vcpu, (void *) gpa,
			     level, 1, &epte);
	if (ret) {
		spin_unlock(&vcpu->ept_lock);
		put_page(page);
		printk(KERN_ERR "ept: failed to lookup EPT entry\n");
		return ret;
	}

	if (epte_present(*epte)) {
		if (!epte_big(*epte) && level == 2)
			ept_clear_l2_epte(epte);
		else if (!epte_big(*epte) && level == 1)
			ept_clear_l1_epte(epte);
		else
			ept_clear_epte(epte);
	}

	flags = __EPTE_READ | __EPTE_EXEC |
		__EPTE_TYPE(EPTE_TYPE_WB) | __EPTE_IPAT;
	if (make_write)
		flags |= __EPTE_WRITE;
	if (vcpu->ept_ad_enabled) {
		/* premark A/D to avoid extra memory references */
		flags |= __EPTE_A;
		if (make_write)
			flags |= __EPTE_D;
	}

	if (level) {
		struct page *tmp = page;
		page = compound_head(page);
		get_page(page);
		put_page(tmp);

		flags |= __EPTE_SZ;
	}

	*epte = epte_addr(page_to_phys(page)) | flags;

	spin_unlock(&vcpu->ept_lock);

	return 0;
}