errval_t page_mappings_unmap(struct capability *pgtable, struct cte *mapping,
                             size_t slot, size_t num_pages)
{
    assert(type_is_vnode(pgtable->type));
    errval_t err;
    debug(SUBSYS_PAGING, "page_mappings_unmap(%zd pages)\n", num_pages);

    // get page table entry data
    genpaddr_t paddr;

    read_pt_entry(pgtable, slot, &paddr, NULL, NULL);
    lvaddr_t pt = local_phys_to_mem(gen_phys_to_local_phys(get_address(pgtable)));

    // get virtual address of first page
    // TODO: error checking
    genvaddr_t vaddr;
    bool tlb_flush_necessary = true;
    struct cte *leaf_pt = cte_for_cap(pgtable);
    err = compile_vaddr(leaf_pt, slot, &vaddr);
    if (err_is_fail(err)) {
        if (err_no(err) == SYS_ERR_VNODE_NOT_INSTALLED) {
            debug(SUBSYS_PAGING, "couldn't reconstruct virtual address\n");
        } else if (err_no(err) == SYS_ERR_VNODE_SLOT_INVALID
                   && leaf_pt->mapping_info.pte == 0) {
            debug(SUBSYS_PAGING, "unmapping in floating page table; not flushing TLB\n");
            tlb_flush_necessary = false;
        } else {
            return err;
        }
    }

    if (num_pages != mapping->mapping_info.pte_count) {
        // want to unmap a different amount of pages than was mapped
        return SYS_ERR_VM_MAP_SIZE;
    }

    do_unmap(pt, slot, num_pages);

    // flush TLB for unmapped pages if we got a valid virtual address
    // TODO: heuristic that decides if selective or full flush is more
    //       efficient?
    if (tlb_flush_necessary) {
        if (num_pages > 1 || err_is_fail(err)) {
            do_full_tlb_flush();
        } else {
            do_one_tlb_flush(vaddr);
        }
    }

    // update mapping info
    memset(&mapping->mapping_info, 0, sizeof(struct mapping_info));

    return SYS_ERR_OK;
}
/// Create page mappings
errval_t caps_copy_to_vnode(struct cte *dest_vnode_cte, cslot_t dest_slot,
                            struct cte *src_cte, uintptr_t flags,
                            uintptr_t offset, uintptr_t pte_count)
{
    assert(type_is_vnode(dest_vnode_cte->cap.type));

    struct capability *src_cap  = &src_cte->cap;
    struct capability *dest_cap = &dest_vnode_cte->cap;
    mapping_handler_t handler_func = handler[dest_cap->type];

    assert(handler_func != NULL);

#if 0
    genpaddr_t paddr = get_address(&src_cte->cap) + offset;
    genvaddr_t vaddr;
    compile_vaddr(dest_vnode_cte, dest_slot, &vaddr);
    printf("mapping 0x%"PRIxGENPADDR" to 0x%"PRIxGENVADDR"\n", paddr, vaddr);
#endif

    if (src_cte->mapping_info.pte) {
        // this cap is already mapped
#if DIAGNOSTIC_ON_ERROR
        printf("caps_copy_to_vnode: this copy is already mapped @pte 0x%lx (paddr = 0x%"PRIxGENPADDR")\n", src_cte->mapping_info.pte, get_address(src_cap));
#endif
#if RETURN_ON_ERROR
        return SYS_ERR_VM_ALREADY_MAPPED;
#endif
    }

    cslot_t last_slot = dest_slot + pte_count;

    if (last_slot > X86_64_PTABLE_SIZE) {
        // requested map overlaps leaf page table
#if DIAGNOSTIC_ON_ERROR
        printf("caps_copy_to_vnode: requested mapping spans multiple leaf page tables\n");
#endif
#if RETURN_ON_ERROR
        return SYS_ERR_VM_RETRY_SINGLE;
#endif
    }

    errval_t r = handler_func(dest_cap, dest_slot, src_cap, flags, offset, pte_count);
    if (err_is_fail(r)) {
        printf("caps_copy_to_vnode: handler func returned %ld\n", r);
    }
#if 0
    else {
        printf("mapping_info.pte       = 0x%lx\n", src_cte->mapping_info.pte);
        printf("mapping_info.offset    = 0x%lx\n", src_cte->mapping_info.offset);
        printf("mapping_info.pte_count = %zu\n", src_cte->mapping_info.pte_count);
    }
#endif
    return r;
}
errval_t page_mappings_unmap(struct capability *pgtable, struct cte *mapping)
{
    assert(type_is_vnode(pgtable->type));
    assert(type_is_mapping(mapping->cap.type));
    struct Frame_Mapping *info = &mapping->cap.u.frame_mapping;
    errval_t err;
    debug(SUBSYS_PAGING, "page_mappings_unmap(%hu pages)\n", info->pte_count);

    // calculate page table address
    lvaddr_t pt = local_phys_to_mem(gen_phys_to_local_phys(get_address(pgtable)));

    cslot_t slot = info->entry;
    // get virtual address of first page
    genvaddr_t vaddr;
    bool tlb_flush_necessary = true;
    struct cte *leaf_pt = cte_for_cap(pgtable);
    err = compile_vaddr(leaf_pt, slot, &vaddr);
    if (err_is_fail(err)) {
        if (err_no(err) == SYS_ERR_VNODE_NOT_INSTALLED && vaddr == 0) {
            debug(SUBSYS_PAGING, "unmapping in floating page table; not flushing TLB\n");
            tlb_flush_necessary = false;
        } else if (err_no(err) == SYS_ERR_VNODE_SLOT_INVALID) {
            debug(SUBSYS_PAGING, "couldn't reconstruct virtual address\n");
        } else {
            return err;
        }
    }

    do_unmap(pt, slot, info->pte_count);

    // flush TLB for unmapped pages if we got a valid virtual address
    // TODO: heuristic that decides if selective or full flush is more
    //       efficient?
    if (tlb_flush_necessary) {
        if (info->pte_count > 1 || err_is_fail(err)) {
            do_full_tlb_flush();
        } else {
            do_one_tlb_flush(vaddr);
        }
    }

    return SYS_ERR_OK;
}
// TODO: cleanup arch compatibility mess for page size selection
errval_t paging_tlb_flush_range(struct cte *mapping_cte, size_t offset, size_t pages)
{
    assert(type_is_mapping(mapping_cte->cap.type));

    struct Frame_Mapping *mapping = &mapping_cte->cap.u.frame_mapping;

    // reconstruct first virtual address for TLB flushing
    struct cte *leaf_pt = mapping->ptable;
    if (!type_is_vnode(leaf_pt->cap.type)) {
        return SYS_ERR_VNODE_TYPE;
    }
    assert(type_is_vnode(leaf_pt->cap.type));
    errval_t err;
    genvaddr_t vaddr;
    size_t entry = mapping->entry;
    entry += offset;
    err = compile_vaddr(leaf_pt, entry, &vaddr);
    if (err_is_fail(err)) {
        if (err_no(err) == SYS_ERR_VNODE_NOT_INSTALLED) {
            debug(SUBSYS_PAGING, "couldn't reconstruct virtual address\n");
        }
        else {
            return err;
        }
    }
    debug(SUBSYS_PAGING, "flushing TLB entries for vaddrs 0x%"
            PRIxGENVADDR"--0x%"PRIxGENVADDR"\n",
            vaddr, vaddr+(pages * BASE_PAGE_SIZE));
    // flush TLB entries for all modified pages
    size_t page_size = 0;
    switch(leaf_pt->cap.type) {
#if defined(__x86_64__)
        case ObjType_VNode_x86_64_ptable:
            page_size = X86_64_BASE_PAGE_SIZE;
            break;
        case ObjType_VNode_x86_64_pdir:
            page_size = X86_64_LARGE_PAGE_SIZE;
            break;
        case ObjType_VNode_x86_64_pdpt:
            page_size = X86_64_HUGE_PAGE_SIZE;
            break;
#elif defined(__i386__)
        case ObjType_VNode_x86_32_ptable:
            page_size = X86_32_BASE_PAGE_SIZE;
            break;
        case ObjType_VNode_x86_32_pdir:
            page_size = X86_32_LARGE_PAGE_SIZE;
            break;
#elif defined(__ARM_ARCH_7A__)
        case ObjType_VNode_ARM_l1:
            panic("large page support for ARM NYI!\n");
            break;
        case ObjType_VNode_ARM_l2:
            page_size = BASE_PAGE_SIZE;
            break;
#elif defined(__ARM_ARCH_8A__)
            // TODO: define ARMv8 paging
#else
#error setup page sizes for arch
#endif
        default:
            panic("cannot find page size for cap type: %d\n",
                  leaf_pt->cap.type);
            break;
    }
    assert(page_size);
    // TODO: check what tlb flushing instructions expect for large/huge pages
    for (int i = 0; i < pages; i++) {
        do_one_tlb_flush(vaddr);
        vaddr += page_size;
    }

    return SYS_ERR_OK;
}
errval_t unmap_capability(struct cte *mem)
{
    errval_t err;

    TRACE_CAP_MSG("unmapping", mem);

    genvaddr_t vaddr = 0;
    bool single_page_flush = false;
    int mapping_count = 0, unmap_count = 0;
    genpaddr_t faddr = get_address(&mem->cap);

    // iterate over all mappings associated with 'mem' and unmap them
    struct cte *next = mem;
    struct cte *to_delete = NULL;

    while ((next = mdb_successor(next)) && get_address(&next->cap) == faddr) {
        TRACE_CAP_MSG("looking at", next);
        if (next->cap.type == get_mapping_type(mem->cap.type) &&
            next->cap.u.frame_mapping.cap == &mem->cap)
        {
            TRACE_CAP_MSG("cleaning up mapping", next);
            mapping_count ++;

            // do unmap
            struct Frame_Mapping *mapping = &next->cap.u.frame_mapping;
            struct cte *pgtable = mapping->ptable;
            if (!pgtable) {
                debug(SUBSYS_PAGING, "mapping->ptable == 0: just deleting mapping\n");
                // mem is not mapped, so just return
                goto delete_mapping;
            }
            if (!type_is_vnode(pgtable->cap.type)) {
                debug(SUBSYS_PAGING,
                        "mapping->ptable.type not vnode (%d): just deleting mapping\n",
                        mapping->ptable->cap.type);
                // mem is not mapped, so just return
                goto delete_mapping;
            }

            lpaddr_t ptable_lp = gen_phys_to_local_phys(get_address(&pgtable->cap));
            lvaddr_t ptable_lv = local_phys_to_mem(ptable_lp);
            cslot_t slot = mapping->entry;

            // unmap
            do_unmap(ptable_lv, slot, mapping->pte_count);

            unmap_count ++;

            // TLB flush?
            if (unmap_count == 1) {
                err = compile_vaddr(pgtable, slot, &vaddr);
                if (err_is_ok(err) && mapping->pte_count == 1) {
                    single_page_flush = true;
                }
            }

delete_mapping:
            assert(!next->delete_node.next);
            // mark mapping cap for delete: cannot do delete here as it messes
            // up mdb_successor()
            next->delete_node.next = to_delete;
            to_delete = next;
        }
    }

    // delete mapping caps
    while (to_delete) {
        next = to_delete->delete_node.next;
        err = caps_delete(to_delete);
        if (err_is_fail(err)) {
            printk(LOG_NOTE, "caps_delete: %"PRIuERRV"\n", err);
        }
        to_delete = next;
    }

    TRACE_CAP_MSGF(mem, "unmapped %d/%d instances", unmap_count, mapping_count);

    // do TLB flush
    if (single_page_flush) {
        do_one_tlb_flush(vaddr);
    } else {
        do_full_tlb_flush();
    }

    return SYS_ERR_OK;
}