/* * Add a mapping for the machine page at the given virtual address. */ static void map_ma_at_va(maddr_t ma, native_ptr_t va, uint_t level) { x86pte_t *ptep; x86pte_t pteval; pteval = ma | pte_bits; if (level > 0) pteval |= PT_PAGESIZE; if (va >= target_kernel_text && pge_support) pteval |= PT_GLOBAL; if (map_debug && ma != va) dboot_printf("mapping ma=0x%" PRIx64 " va=0x%" PRIx64 " pte=0x%" PRIx64 " l=%d\n", (uint64_t)ma, (uint64_t)va, pteval, level); #if defined(__xpv) /* * see if we can avoid find_pte() on the hypervisor */ if (HYPERVISOR_update_va_mapping(va, pteval, UVMF_INVLPG | UVMF_LOCAL) == 0) return; #endif /* * Find the pte that will map this address. This creates any * missing intermediate level page tables */ ptep = find_pte(va, NULL, level, 0); /* * When paravirtualized, we must use hypervisor calls to modify the * PTE, since paging is active. On real hardware we just write to * the pagetables which aren't in use yet. */ #if defined(__xpv) ptep = ptep; /* shut lint up */ if (HYPERVISOR_update_va_mapping(va, pteval, UVMF_INVLPG | UVMF_LOCAL)) dboot_panic("mmu_update failed-map_pa_at_va va=0x%" PRIx64 " l=%d ma=0x%" PRIx64 ", pte=0x%" PRIx64 "", (uint64_t)va, level, (uint64_t)ma, pteval); #else if (va < 1024 * 1024) pteval |= PT_NOCACHE; /* for video RAM */ if (pae_support) *ptep = pteval; else *((x86pte32_t *)ptep) = (x86pte32_t)pteval; #endif }
/* * D5.3 pgmap! ( pte va -- ) */ static void pgmap_store(void) { uint32_t pte; unsigned long va, pa; va = POP(); pte = POP(); pa = find_pte(va, 1); *(uint32_t *)pa = pte; DPRINTF("pgmap!: va 0x%lx pa 0x%lx pte 0x%x\n", va, pa, pte); }
/* * D5.3 pgmap@ ( va -- pte ) */ static void pgmap_fetch(void) { uint32_t pte; unsigned long va, pa; va = POP(); pa = find_pte(va, 0); if (pa == 1 || pa == 2) goto error; pte = *(uint32_t *)pa; DPRINTF("pgmap@: va 0x%lx pa 0x%lx pte 0x%x\n", va, pa, pte); PUSH(pte); return; error: PUSH(0); }
/* * Build page tables to map all of memory used so far as well as the kernel. */ static void build_page_tables(void) { uint32_t psize; uint32_t level; uint32_t off; uint64_t start; #if !defined(__xpv) uint32_t i; uint64_t end; #endif /* __xpv */ /* * If we're on metal, we need to create the top level pagetable. */ #if defined(__xpv) top_page_table = (paddr_t)(uintptr_t)xen_info->pt_base; #else /* __xpv */ top_page_table = (paddr_t)(uintptr_t)mem_alloc(MMU_PAGESIZE); #endif /* __xpv */ DBG((uintptr_t)top_page_table); /* * Determine if we'll use large mappings for kernel, then map it. */ if (largepage_support) { psize = lpagesize; level = 1; } else { psize = MMU_PAGESIZE; level = 0; } DBG_MSG("Mapping kernel\n"); DBG(ktext_phys); DBG(target_kernel_text); DBG(ksize); DBG(psize); for (off = 0; off < ksize; off += psize) map_pa_at_va(ktext_phys + off, target_kernel_text + off, level); /* * The kernel will need a 1 page window to work with page tables */ bi->bi_pt_window = (uintptr_t)mem_alloc(MMU_PAGESIZE); DBG(bi->bi_pt_window); bi->bi_pte_to_pt_window = (uintptr_t)find_pte(bi->bi_pt_window, NULL, 0, 0); DBG(bi->bi_pte_to_pt_window); #if defined(__xpv) if (!DOMAIN_IS_INITDOMAIN(xen_info)) { /* If this is a domU we're done. */ DBG_MSG("\nPage tables constructed\n"); return; } #endif /* __xpv */ /* * We need 1:1 mappings for the lower 1M of memory to access * BIOS tables used by a couple of drivers during boot. * * The following code works because our simple memory allocator * only grows usage in an upwards direction. * * Note that by this point in boot some mappings for low memory * may already exist because we've already accessed device in low * memory. (Specifically the video frame buffer and keyboard * status ports.) If we're booting on raw hardware then GRUB * created these mappings for us. If we're booting under a * hypervisor then we went ahead and remapped these devices into * memory allocated within dboot itself. */ if (map_debug) dboot_printf("1:1 map pa=0..1Meg\n"); for (start = 0; start < 1024 * 1024; start += MMU_PAGESIZE) { #if defined(__xpv) map_ma_at_va(start, start, 0); #else /* __xpv */ map_pa_at_va(start, start, 0); #endif /* __xpv */ } #if !defined(__xpv) for (i = 0; i < memlists_used; ++i) { start = memlists[i].addr; end = start + memlists[i].size; if (map_debug) dboot_printf("1:1 map pa=%" PRIx64 "..%" PRIx64 "\n", start, end); while (start < end && start < next_avail_addr) { map_pa_at_va(start, start, 0); start += MMU_PAGESIZE; } } #endif /* !__xpv */ DBG_MSG("\nPage tables constructed\n"); }