Example #1
0
/**
 * \brief Reset kernel paging.
 *
 * This function resets the page maps for kernel and memory-space. It clears out
 * all other mappings. Use this only at system bootup!
 */
void paging_x86_32_reset(void)
{
    // Re-map physical memory
    // XXX: Map in what we get from Multiboot. We should actually map
    // stuff dynamically, whenever raw mem gets retyped into a kernel
    // object
/*     if(paging_map_memory(0, multiboot_info->mem_upper * 1024 + 0x100000) */
    lpaddr_t lpaddr = gen_phys_to_local_phys(X86_32_PADDR_SPACE_LIMIT -
                                             X86_32_DEVICE_SPACE_LIMIT);
    if(paging_x86_32_map_memory(0, lpaddr) != 0) {
        panic("error while mapping physical memory!");
    }

    // Switch to new page layout
#ifdef CONFIG_PAE
    paging_x86_32_context_switch(mem_to_local_phys((lvaddr_t)pdpte));
#else
    paging_x86_32_context_switch(mem_to_local_phys((lvaddr_t)pdir));
#endif
}
/// Map within a x86_32 pdpt
static errval_t x86_32_pdpt(struct capability *dest, cslot_t slot,
                            struct capability * src, uintptr_t flags,
                            uintptr_t offset, uintptr_t pte_count,
                            struct cte *mapping_cte)
{
    if (slot >= X86_32_PTABLE_SIZE) { // Slot within page table
        return SYS_ERR_VNODE_SLOT_INVALID;
    }

    if (pte_count > 1) { // disallow multiple pdpt mappings at a time
        return SYS_ERR_VM_MAP_SIZE;
    }

    if (src->type != ObjType_VNode_x86_32_pdir) { // Right mapping
        return SYS_ERR_WRONG_MAPPING;
    }

    if(slot >= X86_32_PDPTE_BASE(X86_32_MEMORY_OFFSET)) { // Kernel mapped here
        return SYS_ERR_VNODE_SLOT_RESERVED;
    }

    // Destination
    genpaddr_t dest_gp   = dest->u.vnode_x86_32_pdpt.base;
    lpaddr_t dest_lp     = gen_phys_to_local_phys(dest_gp);
    lvaddr_t dest_lv     = local_phys_to_mem(dest_lp);
    union x86_32_pdpte_entry *entry =
        (union x86_32_pdpte_entry *)dest_lv + slot;

    // Set metadata
    create_mapping_cap(mapping_cte, src,
                       dest_lp + slot * sizeof(union x86_32_pdpte_entry),
                       pte_count);

    // Source
    genpaddr_t src_gp   = src->u.vnode_x86_32_pdir.base;
    lpaddr_t src_lp     = gen_phys_to_local_phys(src_gp);
    paging_x86_32_map_pdpte(entry, src_lp);
    paging_x86_32_context_switch(dcb_current->vspace); // To flush TLB

    return SYS_ERR_OK;
}
Example #3
0
static void init_page_tables(struct spawn_state *st, alloc_phys_func alloc_phys)
{
    /* Allocate memory for init's page tables */
#ifdef CONFIG_PAE
    init_pdpte = (void *)local_phys_to_mem(alloc_phys(X86_32_PDPTE_SIZE
                                           * sizeof(union x86_32_pdpte_entry)));
#endif
    init_pdir = (void *)local_phys_to_mem(
                alloc_phys(X86_32_PTABLE_SIZE * INIT_PDIR_SIZE
                           * sizeof(union x86_32_pdir_entry)));
    init_ptable = (void *)local_phys_to_mem(
                alloc_phys(X86_32_PTABLE_SIZE * INIT_PDIR_SIZE
                           * INIT_PTABLE_SIZE * sizeof(union x86_32_ptable_entry)));

    /* Page table setup */
    /* Initialize init page tables */
    for(size_t j = 0; j < INIT_PDIR_SIZE; j++) {
        paging_x86_32_clear_pdir(&init_pdir[j]);
        for(size_t k = 0; k < INIT_PTABLE_SIZE; k++) {
            paging_x86_32_clear_ptable(&init_ptable[j * X86_32_PTABLE_SIZE + k]);
        }
    }
    /* Map pagetables into pageCN */
    int     pagecn_pagemap = 0;
#ifdef CONFIG_PAE
    // Map PDPTE into first slot in pagecn
    caps_create_new(ObjType_VNode_x86_32_pdpt,
                    mem_to_local_phys((lvaddr_t)init_pdpte),
                    BASE_PAGE_BITS, 0, my_core_id,
                    caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
#endif
    // Map PDIR into successive slots in pagecn
    for(size_t i = 0; i < INIT_PDIR_SIZE; i++) {
        caps_create_new(ObjType_VNode_x86_32_pdir,
                        mem_to_local_phys((lvaddr_t)init_pdir) + i * BASE_PAGE_SIZE,
                        BASE_PAGE_BITS, 0, my_core_id,
                        caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
    }
    // Map page tables into successive slots in pagecn
    for(size_t i = 0; i < INIT_PTABLE_SIZE; i++) {
        caps_create_new(ObjType_VNode_x86_32_ptable,
                        mem_to_local_phys((lvaddr_t)init_ptable) + i * BASE_PAGE_SIZE,
                        BASE_PAGE_BITS, 0, my_core_id,
                        caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
    }
    // Connect all page tables to page directories.
    // init's memory manager expects page tables within the pagecn to
    // already be connected to the corresponding directories. To avoid
    // unneccessary special cases, we connect them here.
    for(lvaddr_t vaddr = 0; vaddr < X86_32_INIT_SPACE_LIMIT;
        vaddr += BASE_PAGE_SIZE) {
#ifdef CONFIG_PAE
        union x86_32_pdpte_entry *pdpte_base =
            &init_pdpte[X86_32_PDPTE_BASE(vaddr)];
        union x86_32_pdir_entry *pdir_base =
            &init_pdir[X86_32_PDPTE_BASE(vaddr) * X86_32_PTABLE_SIZE +
                       X86_32_PDIR_BASE(vaddr)];
        union x86_32_ptable_entry *ptable_base =
            &init_ptable[X86_32_PDPTE_BASE(vaddr) * X86_32_PTABLE_SIZE *
                         X86_32_PTABLE_SIZE + X86_32_PDIR_BASE(vaddr) *
                         X86_32_PTABLE_SIZE + X86_32_PTABLE_BASE(vaddr)];

        paging_x86_32_map_pdpte(pdpte_base, mem_to_local_phys((lvaddr_t)pdir_base));
#else
        union x86_32_pdir_entry *pdir_base =
            &init_pdir[X86_32_PDIR_BASE(vaddr)];
        union x86_32_ptable_entry *ptable_base =
            &init_ptable[X86_32_PDIR_BASE(vaddr) * X86_32_PTABLE_SIZE +
                         X86_32_PTABLE_BASE(vaddr)];
#endif
        paging_x86_32_map_table(pdir_base,
                                mem_to_local_phys((lvaddr_t)ptable_base));
    }

    /* Switch to init's VSpace */
#ifdef CONFIG_PAE
    paging_x86_32_context_switch(mem_to_local_phys((lvaddr_t)init_pdpte));
#else
    paging_x86_32_context_switch(mem_to_local_phys((lvaddr_t)init_pdir));
#endif

    /***** VSpace available *****/

    /* Map cmdline args R/W into VSpace at ARGS_BASE */
#ifdef CONFIG_PAE
    paging_x86_32_map_pdpte(&init_pdpte[X86_32_PDPTE_BASE(ARGS_BASE)],
                            mem_to_local_phys((lvaddr_t)init_pdir));
#endif
    paging_x86_32_map_table(&init_pdir[X86_32_PDIR_BASE(ARGS_BASE)],
                            mem_to_local_phys((lvaddr_t)init_ptable));
    for (int i = 0; i < ARGS_SIZE / BASE_PAGE_SIZE; i++) {
        paging_x86_32_map(&init_ptable[X86_32_PTABLE_BASE(ARGS_BASE) + i],
                   st->args_page + i * BASE_PAGE_SIZE,
                   INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R | PF_W));
    }
}