Exemplo n.º 1
0
static inline void mapit(union x86_32_pdpte_entry *pdpte_base,
                         union x86_32_ptable_entry *pdir_base, lpaddr_t addr,
                         uint64_t bitmap)
{
    if(!X86_32_IS_PRESENT(pdpte_base)) {
        paging_x86_32_map_pdpte(pdpte_base,
                                mem_to_local_phys((lvaddr_t)pdir_base));
    }

    if(!X86_32_IS_PRESENT(pdir_base)) {
        debug(SUBSYS_PAGING, "mapped!\n");
    } else {
        //remap the page anyway, this is important for the memory latency benchmark
        debug(SUBSYS_PAGING, "already existing! remapping it\n");
    }

    paging_x86_32_map_large(pdir_base, addr, bitmap);
}
Exemplo n.º 2
0
/**
 * \brief Map init user-space memory.
 *
 * This function maps pages of the init user-space module. It expects
 * the virtual base address 'vbase' of a program segment of the init executable,
 * its size 'size' and its ELF64 access control flags. It maps pages
 * to the sequential area of physical memory, given by 'base'. If you
 * want to allocate physical memory frames as you go, you better use
 * startup_alloc_init().
 *
 * \param vbase Virtual base address of program segment.
 * \param base  Physical base address of program segment.
 * \param size  Size of program segment in bytes.
 * \param flags ELF64 access control flags of program segment.
 */
errval_t startup_map_init(lvaddr_t vbase, lpaddr_t base, size_t size,
                          uint32_t flags)
{
    lvaddr_t vaddr;

    paging_align(&vbase, &base, &size, BASE_PAGE_SIZE);
    assert(vbase + size < X86_32_INIT_SPACE_LIMIT);

    // Map pages
    for(vaddr = vbase; vaddr < vbase + size;
        vaddr += BASE_PAGE_SIZE, base += BASE_PAGE_SIZE) {
#ifdef CONFIG_PAE
        union x86_32_ptable_entry *ptable_base = &init_ptable[
                    + X86_32_PDPTE_BASE(vaddr) * X86_32_PTABLE_SIZE * X86_32_PTABLE_SIZE
                    + X86_32_PDIR_BASE(vaddr) * X86_32_PTABLE_SIZE
                    + X86_32_PTABLE_BASE(vaddr)];

        debug(SUBSYS_PAGING, "Mapping 4K page: vaddr = 0x%x, base = 0x%x, "
              "PDPTE_BASE = %u, PDIR_BASE = %u, "
              "PTABLE_BASE = %u -- ", vaddr, base, X86_32_PDPTE_BASE(vaddr),
              X86_32_PDIR_BASE(vaddr), X86_32_PTABLE_BASE(vaddr));
#else
        union x86_32_ptable_entry *ptable_base = &init_ptable[
                    X86_32_PDIR_BASE(vaddr) * X86_32_PTABLE_SIZE
                    + X86_32_PTABLE_BASE(vaddr)];

        debug(SUBSYS_PAGING, "Mapping 4K page: vaddr = 0x%"PRIxLVADDR
                             ", base = 0x%"PRIxLPADDR", "
              "PDIR_BASE = %"PRIuLPADDR", "
              "PTABLE_BASE = %"PRIuLPADDR" -- ", vaddr, base,
              X86_32_PDIR_BASE(vaddr), X86_32_PTABLE_BASE(vaddr));
#endif

        if(!X86_32_IS_PRESENT(ptable_base)) {
            debug(SUBSYS_PAGING, "mapped!\n");
            paging_x86_32_map(ptable_base, base,
                       INIT_PAGE_BITMAP | paging_elf_to_page_flags(flags));
        } else {
            debug(SUBSYS_PAGING, "already existing!\n");
        }
    }

    return SYS_ERR_OK;
}
Exemplo n.º 3
0
/// Map within a x86_32 pdir
static errval_t x86_32_pdir(struct capability *dest, cslot_t slot,
                            struct capability * src, uintptr_t flags,
                            uintptr_t offset, uintptr_t pte_count,
                            struct cte *mapping_cte)
{
    //printf("x86_32_pdir\n");
    if (slot >= X86_32_PTABLE_SIZE) { // Slot within page table
        return SYS_ERR_VNODE_SLOT_INVALID;
    }

    if (slot + pte_count > X86_32_PTABLE_SIZE) {
        // check that mapping fits page directory
        return SYS_ERR_VM_MAP_SIZE;
    }

#ifndef CONFIG_PAE
    if(slot >= X86_32_PDIR_BASE(X86_32_MEMORY_OFFSET)) { // Kernel mapped here
        return SYS_ERR_VNODE_SLOT_RESERVED;
    }
#endif

    // large page code
    if(src->type == ObjType_Frame || src->type == ObjType_DevFrame)
    {
        cslot_t last_slot = slot + pte_count;

        // check offset within frame
        if (offset + pte_count * X86_32_LARGE_PAGE_SIZE > get_size(src)) {
            return SYS_ERR_FRAME_OFFSET_INVALID;
        }

        /* Calculate page access protection flags */
        // Get frame cap rights
        paging_x86_32_flags_t flags_large =
            paging_x86_32_cap_to_page_flags(src->rights);
        // Mask with provided access rights mask
        flags_large = paging_x86_32_mask_attrs(flags_large, X86_32_PTABLE_ACCESS(flags));
        // Add additional arch-specific flags
        flags_large |= X86_32_PTABLE_FLAGS(flags);
        // Unconditionally mark the page present
        flags_large |= X86_32_PTABLE_PRESENT;

        // Convert destination base address
        genpaddr_t dest_gp   = get_address(dest);
        lpaddr_t dest_lp     = gen_phys_to_local_phys(dest_gp);
        lvaddr_t dest_lv     = local_phys_to_mem(dest_lp);
        // Convert source base address
        genpaddr_t src_gp   = get_address(src);
        lpaddr_t src_lp     = gen_phys_to_local_phys(src_gp);
        // Set metadata
        create_mapping_cap(mapping_cte, src,
                           dest_lp + slot * sizeof(union x86_32_ptable_entry),
                           offset,
                           pte_count);

        for (; slot < last_slot; slot++, offset += X86_32_LARGE_PAGE_SIZE) {
            union x86_32_ptable_entry *entry =
                (union x86_32_ptable_entry *)dest_lv + slot;

            /* FIXME: Flush TLB if the page is already present
             * in the meantime, since we don't do this, we just assert that
             * we never reuse a VA mapping */
            if (X86_32_IS_PRESENT(entry)) {
                printf("Trying to map into an already present page NYI.\n");
                return SYS_ERR_VNODE_SLOT_INUSE;
            }

            // Carry out the page mapping
            paging_x86_32_map_large(entry, src_lp + offset, flags_large);
        }

        return SYS_ERR_OK;
    }

    if (src->type != ObjType_VNode_x86_32_ptable) { // Right mapping
        return SYS_ERR_WRONG_MAPPING;
    }

    // Destination
    genpaddr_t dest_gp   = dest->u.vnode_x86_32_pdir.base;
    lpaddr_t dest_lp     = gen_phys_to_local_phys(dest_gp);
    lvaddr_t dest_lv     = local_phys_to_mem(dest_lp);
    union x86_32_pdir_entry *entry =
        (union x86_32_pdir_entry *)dest_lv + slot;

    // Set metadata
    create_mapping_cap(mapping_cte, src,
                       dest_lp + slot * sizeof(union x86_32_pdir_entry),
                       pte_count);


    // Source
    // XXX: offset is ignored
    genpaddr_t src_gp   = src->u.vnode_x86_32_pdir.base;
    lpaddr_t src_lp     = gen_phys_to_local_phys(src_gp);
    paging_x86_32_map_table(entry, src_lp);

    return SYS_ERR_OK;
}
Exemplo n.º 4
0
/// Map within a x86_32 ptable
static errval_t x86_32_ptable(struct capability *dest, cslot_t slot,
                              struct capability * src, uintptr_t uflags,
                              uintptr_t offset, uintptr_t pte_count,
                              struct cte *mapping_cte)
{
    //printf("x86_32_ptable\n");
    if (slot >= X86_32_PTABLE_SIZE) { // Slot within page table
        return SYS_ERR_VNODE_SLOT_INVALID;
    }

    cslot_t last_slot = slot + pte_count;

    if (last_slot > X86_32_PTABLE_SIZE) {
        printf("slot = %"PRIuCSLOT", last_slot = %"PRIuCSLOT", PTABLE_SIZE = %d\n", slot, last_slot, X86_32_PTABLE_SIZE);
        return SYS_ERR_VM_MAP_SIZE;
    }

    if (src->type != ObjType_Frame &&
        src->type != ObjType_DevFrame) { // Right mapping
        return SYS_ERR_WRONG_MAPPING;
    }

    // check offset within frame
    if (offset + pte_count * X86_32_BASE_PAGE_SIZE > get_size(src)) {
        return SYS_ERR_FRAME_OFFSET_INVALID;
    }

    /* Calculate page access protection flags */
    // Get frame cap rights
    paging_x86_32_flags_t flags =
        paging_x86_32_cap_to_page_flags(src->rights);
    // Mask with provided access rights mask
    flags = paging_x86_32_mask_attrs(flags, X86_32_PTABLE_ACCESS(uflags));
    // Add additional arch-specific flags
    flags |= X86_32_PTABLE_FLAGS(uflags);
    // Unconditionally mark the page present
    flags |= X86_32_PTABLE_PRESENT;

    // Convert destination base address
    genpaddr_t dest_gp   = get_address(dest);
    lpaddr_t dest_lp     = gen_phys_to_local_phys(dest_gp);
    lvaddr_t dest_lv     = local_phys_to_mem(dest_lp);
    // Convert source base address
    genpaddr_t src_gp   = get_address(src);
    lpaddr_t src_lp     = gen_phys_to_local_phys(src_gp);
    // Set metadata
    create_mapping_cap(mapping_cte, src,
                       dest_lp + slot * sizeof(union x86_32_ptable_entry),
                       offset,
                       pte_count);


    for (; slot < last_slot; slot++, offset += X86_32_BASE_PAGE_SIZE) {
        union x86_32_ptable_entry *entry =
            (union x86_32_ptable_entry *)dest_lv + slot;

        /* FIXME: Flush TLB if the page is already present
         * in the meantime, since we don't do this, we just assert that
         * we never reuse a VA mapping */
        if (X86_32_IS_PRESENT(entry)) {
            panic("Trying to map into an already present page NYI.");
        }

        // Carry out the page mapping
        paging_x86_32_map(entry, src_lp + offset, flags);
    }

    return SYS_ERR_OK;
}