Example #1
0
// Return the address of the PTE in page directory that corresponds to
// virtual address va.  If alloc!=0, create any required page table pages.
static pte_t* walkpgdir (pgd_t *pgdbase, const void *va, int alloc)
{
    pgd_t *pgd;
    pmd_t *pmdbase;
    pmd_t *pmd;
    pte_t *ptebase;

    pgd = &pgdbase[PGD_IDX((uint64)va)];

    if(*pgd & (ENTRY_TABLE | ENTRY_VALID)) {
        pmdbase = (pmd_t*) p2v((*pgd) & PG_ADDR_MASK);
    } else {
        if (!alloc || (pmdbase = (pmd_t*) kpt_alloc()) == 0) {
            return 0;
        }

        memset(pmdbase, 0, PT_SZ);

        *pgd = v2p(pmdbase) | ENTRY_TABLE | ENTRY_VALID;
    }

    pmd = &pmdbase[PMD_IDX(va)];

    if (*pmd & (ENTRY_TABLE | ENTRY_VALID)) {
        ptebase = (pte_t*) p2v((*pmd) & PG_ADDR_MASK);
    } else {
        if (!alloc || (ptebase = (pte_t*) kpt_alloc()) == 0) {
           return 0;
        }

        // Make sure all those PTE_P bits are zero.
        memset(ptebase, 0, PT_SZ);

        // The permissions here are overly generous, but they can
        // be further restricted by the permissions in the page table
        // entries, if necessary.
        *pmd = v2p(ptebase) | ENTRY_TABLE | ENTRY_VALID;
    }

    return &ptebase[PTE_IDX(va)];
}
Example #2
0
unsigned long build_guest_tables(struct vcpu *v, struct domain_setup_info *dsi)
{
	pde_t *pgd;
	pte_t *pgt;
	u32 pgd_idx;
	u32 pgt_idx;
	unsigned long pg;

#define PGT_ENTRIES_PER_PAGE	512

	DECLARE_MAP_TRACK(dsi);

	pgd = (pde_t *)ALLOC_GUEST_TABLE(2);
	memcpy(pgd, &idle_pgd[0], PGD_SIZE);

	pgd_idx = PGD_IDX(dsi->v_start);
	pgt_idx = PGT_IDX(dsi->v_start);

	do {
		pgt = (pte_t *)ALLOC_GUEST_TABLE(0);

		/* Two page tables per each page */
		pgd[pgd_idx++] = MK_PDE((unsigned long)&pgt[0],   PDE_GUEST_TABLE);
		pgd[pgd_idx++] = MK_PDE((unsigned long)&pgt[256], PDE_GUEST_TABLE);

		do {
			pgt[pgt_idx++] = MK_PTE(ALLOC_GUEST_PAGE(), PTE_GUEST_PAGE);
		} while((NEXT_TRACK() < dsi->p_end) && (pgt_idx != PGT_ENTRIES_PER_PAGE));

		pgt_idx = 0;

	} while(NEXT_TRACK() < dsi->p_end);

	pgt = (pte_t *)ALLOC_GUEST_TABLE(0);
	pg = ALLOC_GUEST_TABLE(0);

	/* printf("Src Page = 0x%x\n", GET_HVT_PAGE()); */

	/* copy_page(pg, GET_HVT_PAGE()); */

	/* pgt[PGT_IDX(VECTORS_VIRT_BASE)] = MK_PTE(pg, PTE_VECTOR_PAGE); */

	/* pgd[PGD_IDX(VECTORS_VIRT_BASE)] = MK_PDE((unsigned long)&pgt[0], PDE_VECTOR_TABLE); */

	/* 
	 * For Fixmap Region.
	 * This will be removed as soon as possible.
	 */
	pgd_idx = PGD_IDX(HYPERVISOR_VIRT_START - (1 << PGD_SHIFT) * 2);

	pgt = (pte_t *)ALLOC_GUEST_TABLE(0);
	pgd[pgd_idx++] = MK_PDE((unsigned long)&pgt[0], PDE_GUEST_TABLE);
	pgd[pgd_idx++] = MK_PDE((unsigned long)&pgt[256], PDE_GUEST_TABLE);

#if 0
	install_mapcache_table(v, pgd);
#endif

	zap_low_mappings(pgd);

	/* guest start address (phys/virtual addr) */
	v->arch.guest_pstart = dsi->p_start;
	v->arch.guest_vstart = dsi->v_start;

	/* guest page table address (phys addr) */
	v->arch.guest_table  = mpt_base;
	v->arch.guest_vtable = PGD_ALIGN(dsi->v_end);

	printk("PT FRAMES = %d\n", TOTAL_GUEST_TABLES());
	return TOTAL_GUEST_TABLES();
}