Ejemplo n.º 1
0
/*
 * Maps a page (PAE).
 *     uint64_t VirtAddr -> the virtual address where to map the frame to.
 *     uint64_t PhysAddr -> the physical address of the frame to map to the page.
 */
void PAEPagingMap(uint64_t VirtAddr, uint64_t PhysAddr)
{
    char _CONST *ErrorFrameAlloc = "ERROR: Unable to allocate pages for the VMM.";

    PageDirEntry_t *PageDir;
    PageTableEntry_t *PageTable;

    PageDir = (PageDirEntry_t*)(uint32_t)(PDPT[PDPT_INDEX(VirtAddr)] & PAGE_MASK);

    // If page table isn't present, make one.
    if(!(PageDir[PD_INDEX(VirtAddr)] & PRESENT_BIT))
    {
        PageTable = (PageTableEntry_t*)AllocFrameFunc(POOL_BITMAP);
        if(!PageTable)
        {
            // Switch to text mode.
            VideoAPIFunc(VIDEO_VGA_SWITCH_MODE, MODE_80_25_TEXT);

            AbortBootFunc(ErrorFrameAlloc);
        }

        memset(PageTable, 0x00000000, PAGE_SIZE);

        PageDir[PD_INDEX(VirtAddr)] = (PageDirEntry_t)PageTable | PRESENT_BIT;
    }

    else
    {
        PageTable = (PageTableEntry_t*)(uint32_t)(PageDir[PD_INDEX(VirtAddr)] & PAGE_MASK);
    }

    PageTable[PT_INDEX(VirtAddr)] = PhysAddr | PRESENT_BIT;
}
/*
 * nonpaging_map
 * @vcpu
 * @gva: the requested guest virtual address
 *
 * Only need to check the host page table for constructing the SPT.
 */
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t gva)
{
	u32 *root;
	u32 *pt;
	int index;
	
	u32 *spde;
	u32 *spte;

	struct kvm_mmu_spage *spage;

	root = __va(vcpu->arch.mmu.root_hpa);
	index = ROOT_INDEX(gva);
	spde = &root[index];
	if(*spde == NULL_ENTRY) {
		spage = mmu_alloc_pseudo_pt_spage(vcpu, index, spde, 0);
		if(!spage) 
			return MMU_FAILED;
		pt = spage->pt;
		nonpaging_fill_spde(spde, pt);
	} else {
		pt = GET_PT_HVA(*spde);
	}
	
	index = PT_INDEX(gva);
	spte = &pt[index];
	if(*spte == NULL_ENTRY) {
		nonpaging_fill_spte(vcpu, spte, gva);
	}

	return MMU_OK;
}
Ejemplo n.º 3
0
/*
 * Initializes PAE paging.
 */
void PAEPagingInit()
{
    char _CONST *ErrorFrameAlloc = "ERROR: Unable to allocate pages for the VMM.";

    // Allocate a page for the page directory.
    PageDirEntry_t *PageDir[4];
    for(uint32_t i = 0; i < 4; i++)
    {
        // Allocate a page for the page directory.
        PageDir[i] = (PageDirEntry_t*)AllocFrameFunc(POOL_BITMAP);
        if(!PageDir[i])
        {
            // Switch to text mode.
            VideoAPIFunc(VIDEO_VGA_SWITCH_MODE, MODE_80_25_TEXT);

            AbortBootFunc(ErrorFrameAlloc);
        }

        memset(PageDir[i], 0x00000000, PAGE_SIZE);

        // Mark it in the PDPT.
        PDPT[i] = (PageDirPTEntry_t)PageDir[i] | PRESENT_BIT;
    }

    // Allocate page for the page table.
    PageTableEntry_t *BaseTable = (PageTableEntry_t*)AllocFrameFunc(POOL_BITMAP);
    if(!BaseTable)
    {
        // Switch to text mode.
        VideoAPIFunc(VIDEO_VGA_SWITCH_MODE, MODE_80_25_TEXT);

        AbortBootFunc(ErrorFrameAlloc);
    }

    memset(BaseTable, 0x00000000, PAGE_SIZE);

    // Mark the page table in the page directory.
    (PageDir[0])[PD_INDEX(0x00000000)] = (PageDirEntry_t)BaseTable | PRESENT_BIT;

    for(uint32_t Index = 0x0000; Index < 0x100000; Index += 0x1000)
    {
        BaseTable[PT_INDEX(Index)] = Index | PRESENT_BIT;
    }

    // Self-recursive trick, ftw!
    (PageDir[3])[508] = (PageDirEntry_t)PageDir[0] | PRESENT_BIT;
    (PageDir[3])[509] = (PageDirEntry_t)PageDir[1] | PRESENT_BIT;
    (PageDir[3])[510] = (PageDirEntry_t)PageDir[2] | PRESENT_BIT;
    (PageDir[3])[511] = (PageDirEntry_t)PageDir[3] | PRESENT_BIT;
}
Ejemplo n.º 4
0
bool arch_mm_context_read(struct vmm_context *ctx, void *output, addr_t address, size_t length)
{
	int pml4idx = PML4_INDEX(address);
	int pdptidx = PDPT_INDEX(address);
	int pdidx = PD_INDEX(address);

	addr_t destp;
	addr_t offset;
	addr_t *pml4v = (addr_t *)ctx->root_virtual;
	if(!pml4v[pml4idx]) {
		return false;
	}
	addr_t *pdptv = (addr_t *)((pml4v[pml4idx] & PAGE_MASK_PHYSICAL) + PHYS_PAGE_MAP);
	if(!pdptv[pdptidx]) {
		return false;
	}
	addr_t *pdv = (addr_t *)((pdptv[pdptidx] & PAGE_MASK_PHYSICAL) + PHYS_PAGE_MAP);
	if(!(pdv[pdidx] & PAGE_LARGE)) {
		int ptidx = PT_INDEX(address);
		offset = address & (0x1000 - 1);

		if(offset + length > 0x1000)
			PANIC(0, "mm_context_read crossed page boundary", EFAULT);

		if(!pdv[pdidx]) {
			return false;
		}
		addr_t *ptv = (addr_t *)((pdv[pdidx] & PAGE_MASK_PHYSICAL) + PHYS_PAGE_MAP);
		if(!ptv[ptidx]) {
			return false;
		}
		destp = ptv[ptidx] & PAGE_MASK_PHYSICAL;
	} else {
		offset = address & (0x200000 - 1);

		if(offset + length > 0x200000)
			PANIC(0, "mm_context_read crossed page boundary", EFAULT);

		if(!pdv[pdidx]) {
			return false;
		}
		destp = pdv[pdidx] & PAGE_MASK_PHYSICAL;
	}
	memcpy(output, (void *)(destp + PHYS_PAGE_MAP + offset), length);
	return true;
}
Ejemplo n.º 5
0
bool arch_mm_context_virtual_getmap(struct vmm_context *ctx, addr_t address, addr_t *phys, int *flags)
{
	int pml4idx = PML4_INDEX(address);
	int pdptidx = PDPT_INDEX(address);
	int pdidx = PD_INDEX(address);

	addr_t destp;
	addr_t *pml4v = (addr_t *)ctx->root_virtual;
	if(!pml4v[pml4idx]) {
		return false;
	}
	addr_t *pdptv = (addr_t *)((pml4v[pml4idx] & PAGE_MASK_PHYSICAL) + PHYS_PAGE_MAP);
	if(!pdptv[pdptidx]) {
		return false;
	}
	addr_t *pdv = (addr_t *)((pdptv[pdptidx] & PAGE_MASK_PHYSICAL) + PHYS_PAGE_MAP);
	if(!(pdv[pdidx] & PAGE_LARGE)) {
		int ptidx = PT_INDEX(address);

		if(!pdv[pdidx]) {
			return false;
		}
		addr_t *ptv = (addr_t *)((pdv[pdidx] & PAGE_MASK_PHYSICAL) + PHYS_PAGE_MAP);
		if(!ptv[ptidx]) {
			return false;
		}
		destp = ptv[ptidx];
	} else {
		if(!pdv[pdidx]) {
			return false;
		}
		destp = pdv[pdidx];
	}
	if(phys)
		*phys = destp & PAGE_MASK_PHYSICAL;
	if(flags)
		*flags = destp & ATTRIB_MASK;
	return true;
}