Exemple #1
0
	__forceinline
	void SetPTE(__in PAGE_TABLE_ENTRY& pte)
	{
		PAGE_TABLE_ENTRY* _pte = PTE();
		if (_pte)
			*_pte = pte;
	}
Exemple #2
0
	__checkReturn
	bool GetPTE(__out PAGE_TABLE_ENTRY& pte)
	{
		PAGE_TABLE_ENTRY* _pte = PTE();
		if (_pte)
		{
			pte = *_pte;
			return true;
		}
		return false;
	}
Exemple #3
0
/** 
 * @brief Map the physical page 'pp' into the virtual address 'va' in page
 *        directory 'pgdir'
 *
 * Map the physical page 'pp' at virtual address 'va'.
 * The permissions (the low 12 bits) of the page table
 * entry should be set to 'perm|PTE_P'.
 * 
 * Details:
 *   - If there is already a page mapped at 'va', it is page_remove()d.
 *   - If necessary, on demand, allocates a page table and inserts it into 
 *     'pgdir'.
 *   - page_incref() should be called if the insertion succeeds. 
 *   - The TLB must be invalidated if a page was formerly present at 'va'.
 *     (this is handled in page_remove)
 *
 * No support for jumbos here.  We will need to be careful when trying to
 * insert regular pages into something that was already jumbo.  We will
 * also need to be careful with our overloading of the PTE_PS and 
 * PTE_PAT flags...
 *
 * @param[in] pgdir the page directory to insert the page into
 * @param[in] pp    a pointr to the page struct representing the
 *                  physical page that should be inserted.
 * @param[in] va    the virtual address where the page should be
 *                  inserted.
 * @param[in] perm  the permition bits with which to set up the 
 *                  virtual mapping.
 *
 * @return ESUCCESS  on success
 * @return -ENOMEM   if a page table could not be allocated
 *                   into which the page should be inserted
 *
 */
int page_insert(pde_t *pgdir, struct page *page, void *va, int perm) 
{
	pte_t* pte = pgdir_walk(pgdir, va, 1);
	if (!pte)
		return -ENOMEM;
	/* Two things here:  First, we need to up the ref count of the page we want
	 * to insert in case it is already mapped at va.  In that case we don't want
	 * page_remove to ultimately free it, and then for us to continue as if pp
	 * wasn't freed. (moral = up the ref asap) */
	kref_get(&page->pg_kref, 1);
	/* Careful, page remove handles the cases where the page is PAGED_OUT. */
	if (!PAGE_UNMAPPED(*pte))
		page_remove(pgdir, va);
	*pte = PTE(page2ppn(page), PTE_P | perm);
	return 0;
}
Exemple #4
0
void pagetable_init(uint64_t max_addr, uint32_t kernel_end) {
    uint32_t i;

    page_table_area = kernel_end;
    memset((uint8_t *)page_table_area, 0, PT_NUM_PAGES(max_addr) * PAGE_SIZE);
    printk("page_table_area: 0x%lx\n", page_table_area);
    printk("page_table_end:  0x%lx\n", page_table_area + PT_NUM_PAGES(max_addr) * PAGE_SIZE);

    /* direct map all but the zero page in the page tables */
    for (i = 1; i < NUM_PTES(max_addr); i++ ) {
        struct dw *pt = (struct dw *)PT(i);
        pt->lo = PTE(i) | ENTRY_RW | ENTRY_PRESENT;
    }

    /* set up the page directories */
    for (i = 0; i < NUM_PTPGS(max_addr); i++) {
        struct dw *pd = (struct dw *)PD(i);
        pd->lo = PDE(i) | ENTRY_RW | ENTRY_PRESENT;
    }

    /* set up the pdp's  */
    for (i = 0; i < NUM_PDPGS(max_addr); i++) {
        struct dw *pdp = (struct dw *)PDP(i);
        pdp->lo = PDPE(i) | ENTRY_RW | ENTRY_PRESENT;
    }

    /* set up the pml4  */
    for (i = 0; i < NUM_PDPPGS(max_addr); i++) {
        struct dw *pml4 = (struct dw *)PML4(i);
        pml4->lo = PML4E(i) | ENTRY_RW | ENTRY_PRESENT;
    }

    walk_pagetable(max_addr);

    to64_prep_paging(PML4(0));
    //return PML4(0);
}
Exemple #5
0
static void walk_pagetable(uint64_t max_addr) {
    uint32_t pml4 = PML4(0);
    int dbg = 0;

    if (dbg) {
        printk("pml4pgs: %d\n", NUM_PML4PGS(max_addr));
        printk("pdppgs:  %d\n", NUM_PDPPGS(max_addr));
        printk("pdpgs:   %d\n", NUM_PDPGS(max_addr));
        printk("ptpgs:   %d\n", NUM_PTPGS(max_addr));
        
        printk("pml4(0) is at 0x%lx\n", PML4(0));
        printk("pml4e(0) is 0x%llx ", PT_ADDR(*(uint64_t *)pml4));
        printk("should be 0x%llx\n", PML4E(0));
    }
    assert(PT_ADDR(*(uint64_t *)pml4) == PML4E(0));

    uint64_t *pdp, *pd, *pt;
    size_t i, j, k;
    for (i = 0; i < NUM_ENTRIES; i++) {
        pdp = (uint64_t *)(PT_ADDR(*(uint64_t *)pml4)) + i;
        if (!(*pdp & 0x1))
            continue;

        if (dbg) {
            printk("PDP(%d) is at ", i);
            printk("0x%llx ", pdp);
            printk("should be 0x%llx\n", PDP(i));
        }
        assert((uint32_t)pdp == PDP(i));
        if (dbg) {
            printk("PDPE(%d) is at ", i);
            printk("0x%llx ", PT_ADDR(*pdp));
            printk("should be 0x%llx\n", PDPE(i));
        }
        assert(PT_ADDR(*pdp) == PDPE(i));

        for (j = 0; j < NUM_ENTRIES; j++) {
            pd = ((uint64_t *)PT_ADDR(*pdp)) + j; 
            if (!(*pd & 0x1))
                continue;
            size_t jdx = i * NUM_ENTRIES + j;
            if (dbg) {
                printk("PD(%d) is at ", jdx);
                printk("0x%llx ", pd);
                printk("should be 0x%llx\n", PD(jdx));
            }
            assert((uint32_t)pd == PD(jdx));
            if (dbg) {
                printk("PDE(%d) is at ", jdx);
                printk("0x%llx ", PT_ADDR(*pd));
                printk("should be 0x%llx\n", PDE(jdx));
            }
            assert(PT_ADDR(*pd) == PDE(jdx));

            for (k = 0; k < NUM_ENTRIES; k++) {
                pt = ((uint64_t *)PT_ADDR(*pd)) + k; 
                if (!(*pt & 0x1))
                    continue;
                size_t idx = jdx * NUM_ENTRIES + k;
                if (dbg) {
                    printk("PT(%d) is at ", idx);
                    printk("0x%llx ", pt);
                    printk("should be 0x%llx\n", PT(idx));
                }
                assert((uint32_t)pt == PT(idx));
                if (dbg) {
                    printk("PTE(%d) is at ", idx);
                    printk("0x%llx ", PT_ADDR(*pt));
                    printk("should be 0x%llx\n", PTE(idx));
                }
                assert(PT_ADDR(*pt) == PTE(idx));
            }
        }
    }
}