예제 #1
0
파일: paging.c 프로젝트: joe9/barrelfish
/**
 * \brief Map a region of physical memory into physical memory address space.
 *
 * Maps the region of physical memory, based at base and sized size bytes
 * to the same-sized virtual memory region. All pages are flagged according to
 * bitmap. This function automatically fills the needed page directory entries
 * in the page hierarchy rooted at pml4. base and size will be made
 * page-aligned by this function.
 *
 * \param base          Physical base address of memory region
 * \param size          Size in bytes of memory region
 * \param bitmap        Bitmap of flags for page tables/directories
 *
 * \return 0 on success, -1 on error (out of range)
 */
static int
paging_map_mem(lpaddr_t base,
               size_t size,
               uint64_t bitmap)
{
    lvaddr_t vaddr, vbase = local_phys_to_mem(base);
    lpaddr_t addr;

    // Align given physical base address
    if (base & X86_64_MEM_PAGE_MASK) {
        base -= base & X86_64_MEM_PAGE_MASK;
    }

    paging_align(&vbase, &base, &size, X86_64_MEM_PAGE_SIZE);

    // Is mapped region out of range?
    assert(base + size <= (lpaddr_t)K1OM_PADDR_SPACE_LIMIT);
    if (base + size > (lpaddr_t) K1OM_PADDR_SPACE_LIMIT) {
        return -1;
    }

    // Map pages, tables and directories
    for (vaddr = vbase, addr = base; vaddr < vbase + size; vaddr +=
    X86_64_MEM_PAGE_SIZE, addr += X86_64_MEM_PAGE_SIZE) {
        union x86_64_pdir_entry *pml4_base = &pml4[X86_64_PML4_BASE(vaddr)];

        union x86_64_pdir_entry *pdpt_base =
                &mem_pdpt[X86_64_PML4_BASE(addr)][X86_64_PDPT_BASE(vaddr)];

        union x86_64_ptable_entry *pdir_base =
                &mem_pdir[X86_64_PML4_BASE(addr)][X86_64_PDPT_BASE(addr)][X86_64_PDIR_BASE(
                        vaddr)];

        debug(SUBSYS_PAGING,
              "Mapping 2M page: vaddr = 0x%"PRIxLVADDR"x, addr = 0x%lx, " "PML4_BASE = %lu, PDPT_BASE = %lu, PDIR_BASE = %lu -- ",
              vaddr, addr, X86_64_PML4_BASE(vaddr), X86_64_PDPT_BASE(vaddr),
              X86_64_PDIR_BASE(vaddr));

        mapit(pml4_base, pdpt_base, pdir_base, addr, bitmap);
    }
    // XXX FIXME: get rid of this TBL flush code, or move it elsewhere
    // uint64_t cr3;
    // __asm__ __volatile__("mov %%cr3,%0" : "=a" (cr3) : );
    // __asm__ __volatile__("mov %0,%%cr3" :  : "a" (cr3));

    return 0;
}
static inline lvaddr_t get_leaf_ptable_for_vaddr(genvaddr_t vaddr)
{
    lvaddr_t root_pt = local_phys_to_mem(dcb_current->vspace);

    // get pdpt
    union x86_64_pdir_entry *pdpt = (union x86_64_pdir_entry *)root_pt + X86_64_PML4_BASE(vaddr);
    if (!pdpt->raw) { return 0; }
    genpaddr_t pdpt_gp = pdpt->d.base_addr << BASE_PAGE_BITS;
    lvaddr_t pdpt_lv = local_phys_to_mem(gen_phys_to_local_phys(pdpt_gp));
    // get pdir
    union x86_64_pdir_entry *pdir = (union x86_64_pdir_entry *)pdpt_lv + X86_64_PDPT_BASE(vaddr);
    if (!pdir->raw) { return 0; }
    genpaddr_t pdir_gp = pdir->d.base_addr << BASE_PAGE_BITS;
    lvaddr_t pdir_lv = local_phys_to_mem(gen_phys_to_local_phys(pdir_gp));
    // get ptable
    union x86_64_ptable_entry *ptable = (union x86_64_ptable_entry *)pdir_lv + X86_64_PDIR_BASE(vaddr);
    if (!ptable->raw) { return 0; }
    genpaddr_t ptable_gp = ptable->base.base_addr << BASE_PAGE_BITS;
    lvaddr_t ptable_lv = local_phys_to_mem(gen_phys_to_local_phys(ptable_gp));

    return ptable_lv;
}