Ejemplo n.º 1
0
errval_t page_mappings_unmap(struct capability *pgtable, struct cte *mapping,
                             size_t slot, size_t num_pages)
{
    assert(type_is_vnode(pgtable->type));
    errval_t err;
    debug(SUBSYS_PAGING, "page_mappings_unmap(%zd pages)\n", num_pages);

    // get page table entry data
    genpaddr_t paddr;

    read_pt_entry(pgtable, slot, &paddr, NULL, NULL);
    lvaddr_t pt = local_phys_to_mem(gen_phys_to_local_phys(get_address(pgtable)));

    // get virtual address of first page
    // TODO: error checking
    genvaddr_t vaddr;
    bool tlb_flush_necessary = true;
    struct cte *leaf_pt = cte_for_cap(pgtable);
    err = compile_vaddr(leaf_pt, slot, &vaddr);
    if (err_is_fail(err)) {
        if (err_no(err) == SYS_ERR_VNODE_NOT_INSTALLED) {
            debug(SUBSYS_PAGING, "couldn't reconstruct virtual address\n");
        } else if (err_no(err) == SYS_ERR_VNODE_SLOT_INVALID
                   && leaf_pt->mapping_info.pte == 0) {
            debug(SUBSYS_PAGING, "unmapping in floating page table; not flushing TLB\n");
            tlb_flush_necessary = false;
        } else {
            return err;
        }
    }

    if (num_pages != mapping->mapping_info.pte_count) {
        // want to unmap a different amount of pages than was mapped
        return SYS_ERR_VM_MAP_SIZE;
    }

    do_unmap(pt, slot, num_pages);

    // flush TLB for unmapped pages if we got a valid virtual address
    // TODO: heuristic that decides if selective or full flush is more
    //       efficient?
    if (tlb_flush_necessary) {
        if (num_pages > 1 || err_is_fail(err)) {
            do_full_tlb_flush();
        } else {
            do_one_tlb_flush(vaddr);
        }
    }

    // update mapping info
    memset(&mapping->mapping_info, 0, sizeof(struct mapping_info));

    return SYS_ERR_OK;
}
Ejemplo n.º 2
0
errval_t page_mappings_unmap(struct capability *pgtable, struct cte *mapping)
{
    assert(type_is_vnode(pgtable->type));
    assert(type_is_mapping(mapping->cap.type));
    struct Frame_Mapping *info = &mapping->cap.u.frame_mapping;
    errval_t err;
    debug(SUBSYS_PAGING, "page_mappings_unmap(%hu pages)\n", info->pte_count);

    // calculate page table address
    lvaddr_t pt = local_phys_to_mem(gen_phys_to_local_phys(get_address(pgtable)));

    cslot_t slot = info->entry;
    // get virtual address of first page
    genvaddr_t vaddr;
    bool tlb_flush_necessary = true;
    struct cte *leaf_pt = cte_for_cap(pgtable);
    err = compile_vaddr(leaf_pt, slot, &vaddr);
    if (err_is_fail(err)) {
        if (err_no(err) == SYS_ERR_VNODE_NOT_INSTALLED && vaddr == 0) {
            debug(SUBSYS_PAGING, "unmapping in floating page table; not flushing TLB\n");
            tlb_flush_necessary = false;
        } else if (err_no(err) == SYS_ERR_VNODE_SLOT_INVALID) {
            debug(SUBSYS_PAGING, "couldn't reconstruct virtual address\n");
        } else {
            return err;
        }
    }

    do_unmap(pt, slot, info->pte_count);

    // flush TLB for unmapped pages if we got a valid virtual address
    // TODO: heuristic that decides if selective or full flush is more
    //       efficient?
    if (tlb_flush_necessary) {
        if (info->pte_count > 1 || err_is_fail(err)) {
            do_full_tlb_flush();
        } else {
            do_one_tlb_flush(vaddr);
        }
    }

    return SYS_ERR_OK;
}
Ejemplo n.º 3
0
errval_t paging_modify_flags(struct capability *mapping, uintptr_t offset,
                             uintptr_t pages, uintptr_t kpi_paging_flags)
{
    assert(type_is_mapping(mapping->type));
    struct Frame_Mapping *info = &mapping->u.frame_mapping;

    // check flags
    assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));

    /* Calculate location of page table entries we need to modify */
    lvaddr_t base = local_phys_to_mem(get_address(&info->ptable->cap)) +
        (info->entry + offset) * sizeof(union armv8_ttable_entry *);

    for (int i = 0; i < pages; i++) {
        union armv8_ttable_entry *entry =
            (union armv8_ttable_entry *)base + i;
        paging_set_flags(entry, kpi_paging_flags);
    }

    return paging_tlb_flush_range(cte_for_cap(mapping), 0, pages);
}
Ejemplo n.º 4
0
struct sysret
sys_map(struct capability *ptable, cslot_t slot, capaddr_t source_cptr,
        int source_vbits, uintptr_t flags, uintptr_t offset,
        uintptr_t pte_count)
{
    assert (type_is_vnode(ptable->type));

    errval_t err;

    /* Lookup source cap */
    struct capability *root = &dcb_current->cspace.cap;
    struct cte *src_cte;
    err = caps_lookup_slot(root, source_cptr, source_vbits, &src_cte,
                           CAPRIGHTS_READ);
    if (err_is_fail(err)) {
        return SYSRET(err_push(err, SYS_ERR_SOURCE_CAP_LOOKUP));
    }

    /* Perform map */
    // XXX: this does not check if we do have CAPRIGHTS_READ_WRITE on
    // the destination cap (the page table we're inserting into)
    return SYSRET(caps_copy_to_vnode(cte_for_cap(ptable), slot, src_cte, flags,
                                     offset, pte_count));
}
Ejemplo n.º 5
0
/**
 * \brief modify flags of mapping for `frame`.
 *
 * \arg frame the frame whose mapping should be modified
 * \arg offset the offset from the first page table entry in entries
 * \arg pages the number of pages to modify
 * \arg mflags the new flags
 * \arg va_hint a user-supplied virtual address for hinting selective TLB
 *              flushing
 */
errval_t page_mappings_modify_flags(struct capability *frame, size_t offset,
                                    size_t pages, size_t mflags, genvaddr_t va_hint)
{
    struct cte *mapping = cte_for_cap(frame);
    struct mapping_info *info = &mapping->mapping_info;
    struct cte *leaf_pt;
    errval_t err;
    err = mdb_find_cap_for_address(info->pte, &leaf_pt);
    if (err_is_fail(err)) {
        return err;
    }

    /* Calculate page access protection flags */
    // Get frame cap rights
    paging_x86_64_flags_t flags =
        paging_x86_64_cap_to_page_flags(frame->rights);
    // Mask with provided access rights mask
    flags = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(mflags));
    // Add additional arch-specific flags
    flags |= X86_64_PTABLE_FLAGS(mflags);
    // Unconditionally mark the page present
    flags |= X86_64_PTABLE_PRESENT;

    // check arguments
    if (offset >= X86_64_PTABLE_SIZE) { // Within pagetable
        return SYS_ERR_VNODE_SLOT_INVALID;
    }
    if (offset + pages > X86_64_PTABLE_SIZE) { // mapping size ok
        return SYS_ERR_VM_MAP_SIZE;
    }

    /* Calculate location of first pt entry we need to modify */
    lvaddr_t base = local_phys_to_mem(info->pte) +
        offset * sizeof(union x86_64_ptable_entry);

    size_t pagesize = BASE_PAGE_SIZE;
    switch(leaf_pt->cap.type) {
        case ObjType_VNode_x86_64_ptable :
            for (int i = 0; i < pages; i++) {
                union x86_64_ptable_entry *entry =
                    (union x86_64_ptable_entry *)base + i;
                paging_x86_64_modify_flags(entry, flags);
            }
            break;
        case ObjType_VNode_x86_64_pdir :
            for (int i = 0; i < pages; i++) {
                union x86_64_ptable_entry *entry =
                    (union x86_64_ptable_entry *)base + i;
                paging_x86_64_modify_flags_large(entry, flags);
            }
            pagesize = LARGE_PAGE_SIZE;
            break;
        case ObjType_VNode_x86_64_pdpt :
            for (int i = 0; i < pages; i++) {
                union x86_64_ptable_entry *entry =
                    (union x86_64_ptable_entry *)base + i;
                paging_x86_64_modify_flags_huge(entry, flags);
            }
            pagesize = HUGE_PAGE_SIZE;
            break;
        default:
            return SYS_ERR_WRONG_MAPPING;
    }

    if (va_hint != 0 && va_hint > BASE_PAGE_SIZE) {
        // use as direct hint
        // invlpg should work for large/huge pages
        for (int i = 0; i < pages; i++) {
            do_one_tlb_flush(va_hint + i * pagesize);
        }
    } else {
        /* do full TLB flush */
        do_full_tlb_flush();
    }
    return SYS_ERR_OK;
}
Ejemplo n.º 6
0
/// Map within a x86_64 non leaf ptable
static errval_t x86_64_non_ptable(struct capability *dest, cslot_t slot,
                                  struct capability *src, uintptr_t flags,
                                  uintptr_t offset, size_t pte_count)
{
    //printf("page_mappings_arch:x86_64_non_ptable\n");
    if (slot >= X86_64_PTABLE_SIZE) { // Within pagetable
        return SYS_ERR_VNODE_SLOT_INVALID;
    }

    if (type_is_vnode(src->type) && pte_count != 1) { // only allow single ptable mappings
        printf("src type and count mismatch\n");
        return SYS_ERR_VM_MAP_SIZE;
    }

    if (slot + pte_count > X86_64_PTABLE_SIZE) { // mapping size ok
        printf("mapping size invalid (%zd)\n", pte_count);
        return SYS_ERR_VM_MAP_SIZE;
    }

    size_t page_size = 0;
    paging_x86_64_flags_t flags_large = 0;
    switch (dest->type) {
        case ObjType_VNode_x86_64_pml4:
            if (src->type != ObjType_VNode_x86_64_pdpt) { // Right mapping
                printf("src type invalid\n");
                return SYS_ERR_WRONG_MAPPING;
            }
            if(slot >= X86_64_PML4_BASE(MEMORY_OFFSET)) { // Kernel mapped here
                return SYS_ERR_VNODE_SLOT_RESERVED;
            }
            break;
        case ObjType_VNode_x86_64_pdpt:
            // huge page support
            if (src->type != ObjType_VNode_x86_64_pdir) { // Right mapping
                // TODO: check if the system allows 1GB mappings
                page_size = X86_64_HUGE_PAGE_SIZE;
                // check offset within frame
                genpaddr_t off = offset;

                if (off + pte_count * X86_64_HUGE_PAGE_SIZE > get_size(src)) {
                    return SYS_ERR_FRAME_OFFSET_INVALID;
                }
                // Calculate page access protection flags /
                // Get frame cap rights
                flags_large = paging_x86_64_cap_to_page_flags(src->rights);
                // Mask with provided access rights mask
                flags_large = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(flags));
                // Add additional arch-specific flags
                flags_large |= X86_64_PTABLE_FLAGS(flags);
                // Unconditionally mark the page present
                flags_large |= X86_64_PTABLE_PRESENT;
            }
            break;
        case ObjType_VNode_x86_64_pdir:
            // superpage support
            if (src->type != ObjType_VNode_x86_64_ptable) { // Right mapping
                page_size = X86_64_LARGE_PAGE_SIZE;

                // check offset within frame
                genpaddr_t off = offset;

                if (off + pte_count * X86_64_LARGE_PAGE_SIZE > get_size(src)) {
                    return SYS_ERR_FRAME_OFFSET_INVALID;
                }
                // Calculate page access protection flags /
                // Get frame cap rights
                flags_large = paging_x86_64_cap_to_page_flags(src->rights);
                // Mask with provided access rights mask
                flags_large = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(flags));
                // Add additional arch-specific flags
                flags_large |= X86_64_PTABLE_FLAGS(flags);
                // Unconditionally mark the page present
                flags_large |= X86_64_PTABLE_PRESENT;

            }
            break;
        default:
            printf("dest type invalid\n");
            return SYS_ERR_DEST_TYPE_INVALID;
    }

    // Convert destination base address
    genpaddr_t dest_gp   = get_address(dest);
    lpaddr_t dest_lp     = gen_phys_to_local_phys(dest_gp);
    lvaddr_t dest_lv     = local_phys_to_mem(dest_lp);
    // Convert source base address
    genpaddr_t src_gp   = get_address(src);
    lpaddr_t src_lp     = gen_phys_to_local_phys(src_gp);

    // set metadata
    struct cte *src_cte = cte_for_cap(src);
    src_cte->mapping_info.pte = dest_lp + slot * sizeof(union x86_64_ptable_entry);
    src_cte->mapping_info.pte_count = pte_count;
    src_cte->mapping_info.offset = offset;

    cslot_t last_slot = slot + pte_count;
    for (; slot < last_slot; slot++, offset += page_size) {
        // Destination
        union x86_64_pdir_entry *entry = (union x86_64_pdir_entry *)dest_lv + slot;

        if (X86_64_IS_PRESENT(entry)) {
            // cleanup mapping info
            // TODO: cleanup already mapped pages
            memset(&src_cte->mapping_info, 0, sizeof(struct mapping_info));
            printf("slot in use\n");
            return SYS_ERR_VNODE_SLOT_INUSE;
        }

        // determine if we map a large/huge page or a normal entry
        if (page_size == X86_64_LARGE_PAGE_SIZE)
        {
            //a large page is mapped
            paging_x86_64_map_large((union x86_64_ptable_entry *)entry, src_lp + offset, flags_large);
        } else if (page_size == X86_64_HUGE_PAGE_SIZE) {
            // a huge page is mapped
            paging_x86_64_map_huge((union x86_64_ptable_entry *)entry, src_lp + offset, flags_large);
        } else {
            //a normal paging structure entry is mapped
            paging_x86_64_map_table(entry, src_lp + offset);
        }
    }

    return SYS_ERR_OK;
}
Ejemplo n.º 7
0
/// Map within a x86_64 ptable
static errval_t x86_64_ptable(struct capability *dest, cslot_t slot,
                              struct capability *src, uintptr_t mflags,
                              uintptr_t offset, size_t pte_count)
{
    //printf("page_mappings_arch:x86_64_ptable\n");
    if (slot >= X86_64_PTABLE_SIZE) { // Within pagetable
        printf("    vnode_invalid\n");
        return SYS_ERR_VNODE_SLOT_INVALID;
    }

    if (slot + pte_count > X86_64_PTABLE_SIZE) { // mapping size ok
        printf("mapping size invalid (%zd)\n", pte_count);
        return SYS_ERR_VM_MAP_SIZE;
    }

    if (src->type != ObjType_Frame &&
        src->type != ObjType_DevFrame) { // Right mapping
        printf("src type invalid\n");
        return SYS_ERR_WRONG_MAPPING;
    }

    // check offset within frame
    genpaddr_t off = offset;
    if (off + pte_count * X86_64_BASE_PAGE_SIZE > get_size(src)) {
        printf("frame offset invalid\n");
        return SYS_ERR_FRAME_OFFSET_INVALID;
    }


    /* Calculate page access protection flags */
    // Get frame cap rights
    paging_x86_64_flags_t flags =
        paging_x86_64_cap_to_page_flags(src->rights);
    // Mask with provided access rights mask
    flags = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(mflags));
    // Add additional arch-specific flags
    flags |= X86_64_PTABLE_FLAGS(mflags);
    // Unconditionally mark the page present
    flags |= X86_64_PTABLE_PRESENT;

    // Convert destination base address
    genpaddr_t dest_gp   = get_address(dest);
    lpaddr_t dest_lp     = gen_phys_to_local_phys(dest_gp);
    lvaddr_t dest_lv     = local_phys_to_mem(dest_lp);
    // Convert source base address
    genpaddr_t src_gp   = get_address(src);
    lpaddr_t src_lp     = gen_phys_to_local_phys(src_gp);
    // Set metadata
    struct cte *src_cte = cte_for_cap(src);
    src_cte->mapping_info.pte = dest_lp + slot * sizeof(union x86_64_ptable_entry);
    src_cte->mapping_info.pte_count = pte_count;
    src_cte->mapping_info.offset = offset;

    cslot_t last_slot = slot + pte_count;
    for (; slot < last_slot; slot++, offset += X86_64_BASE_PAGE_SIZE) {
        union x86_64_ptable_entry *entry =
            (union x86_64_ptable_entry *)dest_lv + slot;

        /* FIXME: Flush TLB if the page is already present
         * in the meantime, since we don't do this, we just fail to avoid
         * ever reusing a VA mapping */
        if (X86_64_IS_PRESENT(entry)) {
            // TODO: cleanup already mapped pages
            memset(&src_cte->mapping_info, 0, sizeof(struct mapping_info));
            debug(LOG_WARN, "Trying to remap an already-present page is NYI, but "
                  "this is most likely a user-space bug!\n");
            return SYS_ERR_VNODE_SLOT_INUSE;
        }

        // Carry out the page mapping
        paging_x86_64_map(entry, src_lp + offset, flags);
    }

    return SYS_ERR_OK;
}
Ejemplo n.º 8
0
static errval_t
caps_map_l3(struct capability* dest,
            cslot_t            slot,
            struct capability* src,
            uintptr_t          kpi_paging_flags,
            uintptr_t          offset,
            uintptr_t          pte_count,
            struct cte*        mapping_cte)
{
    assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));

    // ARM L3 has 256 entries, but we treat a 4K page as a consecutive
    // region of L3 with a single index. 4K == 4 * 1K
    if (slot >= VMSAv8_64_PTABLE_NUM_ENTRIES) {
        panic("oops: slot >= 512");
        return SYS_ERR_VNODE_SLOT_INVALID;
    }

    if (src->type != ObjType_Frame && src->type != ObjType_DevFrame) {
        panic("oops: src->type != ObjType_Frame && src->type != ObjType_DevFrame");
        return SYS_ERR_WRONG_MAPPING;
    }

    // check offset within frame
    if ((offset + BASE_PAGE_SIZE > get_size(src)) ||
        ((offset % BASE_PAGE_SIZE) != 0)) {
        panic("oops: frame offset invalid");
        return SYS_ERR_FRAME_OFFSET_INVALID;
    }

    // check mapping does not overlap leaf page table
    if (slot + pte_count > VMSAv8_64_PTABLE_NUM_ENTRIES ) {
        return SYS_ERR_VM_MAP_SIZE;
    }

    // Destination
    lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
    lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);

    union armv8_ttable_entry *entry = (union armv8_ttable_entry *)dest_lvaddr + slot;
    if (entry->page.valid) {
        panic("Remapping valid page.");
    }

    lpaddr_t src_lpaddr = gen_phys_to_local_phys(get_address(src) + offset);
    if ((src_lpaddr & (BASE_PAGE_SIZE - 1))) {
        panic("Invalid target");
    }

    create_mapping_cap(mapping_cte, src, cte_for_cap(dest), slot, pte_count);

    for (int i = 0; i < pte_count; i++) {
        entry->raw = 0;

        entry->page.valid = 1;
        entry->page.mb1 = 1;
        paging_set_flags(entry, kpi_paging_flags);
        entry->page.base = (src_lpaddr + i * BASE_PAGE_SIZE) >> 12;

        debug(SUBSYS_PAGING, "L3 mapping %08"PRIxLVADDR"[%"PRIuCSLOT"] @%p = %08"PRIx64"\n",
               dest_lvaddr, slot, entry, entry->raw);

		entry++;

    }

    // Flush TLB if remapping.
    sysreg_invalidate_tlb();

    return SYS_ERR_OK;
}
Ejemplo n.º 9
0
static errval_t
caps_map_l2(struct capability* dest,
            cslot_t            slot,
            struct capability* src,
            uintptr_t          kpi_paging_flags,
            uintptr_t          offset,
            uintptr_t          pte_count,
            struct cte*        mapping_cte)
{
    //
    // Note:
    //
    // We have chicken-and-egg problem in initializing resources so
    // instead of treating an L3 table it's actual 1K size, we treat
    // it as being 4K. As a result when we map an "L3" table we actually
    // map a page of memory as if it is 4 consecutive L3 tables.
    //
    // See lib/barrelfish/arch/arm/pmap_arch.c for more discussion.
    //
    if (slot >= VMSAv8_64_PTABLE_NUM_ENTRIES) {
        printf("slot = %"PRIuCSLOT"\n", slot);
        panic("oops: slot id >= 512");
        return SYS_ERR_VNODE_SLOT_INVALID;
    }

    if (pte_count != 1) {
        printf("pte_count = %zu\n",(size_t) pte_count);
        panic("oops: pte_count");
        return SYS_ERR_VM_MAP_SIZE;
    }

    if (src->type != ObjType_VNode_AARCH64_l3) {
        panic("oops: l2 wrong src type");
        return SYS_ERR_WRONG_MAPPING;
    }

    if (slot > VMSAv8_64_PTABLE_NUM_ENTRIES) {
        printf("slot = %"PRIuCSLOT"\n",slot);
        panic("oops: l2 slot id");
        return SYS_ERR_VNODE_SLOT_RESERVED;
    }

    // Destination
    lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
    lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);

    union armv8_ttable_entry* entry = (union armv8_ttable_entry*) dest_lvaddr + slot;

    // Source
    genpaddr_t src_gpaddr = get_address(src);
    lpaddr_t   src_lpaddr = gen_phys_to_local_phys(src_gpaddr);

    assert(offset == 0);
    assert(aligned(src_lpaddr, 1u << 12));
    assert((src_lpaddr < dest_lpaddr) || (src_lpaddr >= dest_lpaddr + 4096));

    create_mapping_cap(mapping_cte, src, cte_for_cap(dest), slot, pte_count);

    entry->raw = 0;
    entry->d.valid = 1;
    entry->d.mb1 = 1;
    entry->d.base = (src_lpaddr) >> 12;
    debug(SUBSYS_PAGING, "L2 mapping %"PRIuCSLOT". @%p = %08"PRIx32"\n",
              slot, entry, entry->raw);

    sysreg_invalidate_tlb();

    return SYS_ERR_OK;
}
Ejemplo n.º 10
0
static errval_t
caps_map_l1(struct capability* dest,
            cslot_t            slot,
            struct capability* src,
            uintptr_t          kpi_paging_flags,
            uintptr_t          offset,
            uintptr_t          pte_count)
{
    //
    // Note:
    //
    // We have chicken-and-egg problem in initializing resources so
    // instead of treating an L2 table it's actual 1K size, we treat
    // it as being 4K. As a result when we map an "L2" table we actually
    // map a page of memory as if it is 4 consecutive L2 tables.
    //
    // See lib/barrelfish/arch/arm/pmap_arch.c for more discussion.
    //
    const int ARM_L1_SCALE = 4;

    if (slot >= 1024) {
        printf("slot = %"PRIuCSLOT"\n",slot);
        panic("oops: slot id >= 1024");
        return SYS_ERR_VNODE_SLOT_INVALID;
    }

    if (pte_count != 1) {
        printf("pte_count = %zu\n",(size_t)pte_count);
        panic("oops: pte_count");
        return SYS_ERR_VM_MAP_SIZE;
    }

    if (src->type != ObjType_VNode_ARM_l2) {
        //large page mapping goes here
        printf("kernel large page\n");
        //panic("oops: wrong src type");
        assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));

        // ARM L1 has 4K entries, but we treat it as if it had 1K
        if (slot >= (256 * 4)) {
            panic("oops: slot >= (256 * 4)");
            return SYS_ERR_VNODE_SLOT_INVALID;
        }

        if (src->type != ObjType_Frame && src->type != ObjType_DevFrame) {
            panic("oops: src->type != ObjType_Frame && src->type != ObjType_DevFrame");
            return SYS_ERR_WRONG_MAPPING;
        }

        // check offset within frame
        if ((offset + BYTES_PER_SECTION > get_size(src)) ||
            ((offset % BYTES_PER_SECTION) != 0)) {
            panic("oops: frame offset invalid");
            return SYS_ERR_FRAME_OFFSET_INVALID;
        }

        // check mapping does not overlap leaf page table
        if (slot + pte_count > (256 * 4)) {
            return SYS_ERR_VM_MAP_SIZE;
        }

        // Destination
        lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
        lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);

        union arm_l1_entry* entry = (union arm_l1_entry*)dest_lvaddr + slot;
        if (entry->invalid.type != L1_TYPE_INVALID_ENTRY) {
            panic("Remapping valid page.");
        }

        lpaddr_t src_lpaddr = gen_phys_to_local_phys(get_address(src) + offset);
        if ((src_lpaddr & (LARGE_PAGE_SIZE - 1))) {
            panic("Invalid target");
        }

        struct cte *src_cte = cte_for_cap(src);
        src_cte->mapping_info.pte_count = pte_count;
        src_cte->mapping_info.pte = dest_lpaddr;
        src_cte->mapping_info.offset = offset;

        for (int i = 0; i < pte_count; i++) {
            entry->raw = 0;

            entry->section.type = L1_TYPE_SECTION_ENTRY;
            entry->section.bufferable = 1;
            entry->section.cacheable = (kpi_paging_flags & KPI_PAGING_FLAGS_NOCACHE)? 0: 1;
            entry->section.ap10 = (kpi_paging_flags & KPI_PAGING_FLAGS_READ)? 2:0;
            entry->section.ap10 |= (kpi_paging_flags & KPI_PAGING_FLAGS_WRITE)? 3:0;
            entry->section.ap2 = 0;
            entry->section.base_address = (src_lpaddr + i * BYTES_PER_SECTION) >> 12;

            entry++;

            debug(SUBSYS_PAGING, "L2 mapping %08"PRIxLVADDR"[%"PRIuCSLOT"] @%p = %08"PRIx32"\n",
                   dest_lvaddr, slot, entry, entry->raw);
        }

        // Flush TLB if remapping.
        cp15_invalidate_tlb();
        return SYS_ERR_OK;
        return SYS_ERR_WRONG_MAPPING;
    }