Пример #1
0
// TODO: XXX: multiple mappings?
static inline errval_t find_next_ptable(struct cte *mapping_cte, struct cte **next)
{
    assert(mapping_cte);
    struct Frame_Mapping *mapping = &mapping_cte->cap.u.frame_mapping;
    /*
    errval_t err;
    err = mdb_find_cap_for_address(
            local_phys_to_gen_phys(mapping->pte), next);
    if (err_no(err) == CAPS_ERR_CAP_NOT_FOUND ||
        err_no(err) == SYS_ERR_CAP_NOT_FOUND)
    {
        debug(SUBSYS_PAGING, "could not find cap associated "
                "with 0x%"PRIxLPADDR"\n", mapping->pte);
        return SYS_ERR_VNODE_NOT_INSTALLED;
    }
    if (err_is_fail(err)) {
        debug(SUBSYS_PAGING, "error in compile_vaddr:"
                " mdb_find_range: 0x%"PRIxERRV"\n", err);
        return err;
    }
    */
    if (!mapping->ptable || mapping->ptable->cap.type == ObjType_Null)
    {
        return SYS_ERR_VNODE_NOT_INSTALLED;
    }
    *next = mapping->ptable;

    if (!type_is_vnode((*next)->cap.type)) {
        struct cte *tmp = mdb_predecessor(*next);
        // check if there's a copy of *next that is a vnode, and return that
        // copy, if found.
        while(is_copy(&tmp->cap, &(*next)->cap)) {
            if (type_is_vnode(tmp->cap.type)) {
                *next = tmp;
                return SYS_ERR_OK;
            }
            tmp = mdb_predecessor(tmp);
        }
        tmp = mdb_successor(*next);
        while(is_copy(&tmp->cap, &(*next)->cap)) {
            if (type_is_vnode(tmp->cap.type)) {
                *next = tmp;
                return SYS_ERR_OK;
            }
            tmp = mdb_successor(tmp);
        }

        debug(SUBSYS_CAPS, "found cap not a VNode\n");
        // no copy was vnode
        return SYS_ERR_VNODE_LOOKUP_NEXT;
    }
    return SYS_ERR_OK;
}
errval_t page_mappings_unmap(struct capability *pgtable, struct cte *mapping,
                             size_t slot, size_t num_pages)
{
    assert(type_is_vnode(pgtable->type));
    errval_t err;
    debug(SUBSYS_PAGING, "page_mappings_unmap(%zd pages)\n", num_pages);

    // get page table entry data
    genpaddr_t paddr;

    read_pt_entry(pgtable, slot, &paddr, NULL, NULL);
    lvaddr_t pt = local_phys_to_mem(gen_phys_to_local_phys(get_address(pgtable)));

    // get virtual address of first page
    // TODO: error checking
    genvaddr_t vaddr;
    bool tlb_flush_necessary = true;
    struct cte *leaf_pt = cte_for_cap(pgtable);
    err = compile_vaddr(leaf_pt, slot, &vaddr);
    if (err_is_fail(err)) {
        if (err_no(err) == SYS_ERR_VNODE_NOT_INSTALLED) {
            debug(SUBSYS_PAGING, "couldn't reconstruct virtual address\n");
        } else if (err_no(err) == SYS_ERR_VNODE_SLOT_INVALID
                   && leaf_pt->mapping_info.pte == 0) {
            debug(SUBSYS_PAGING, "unmapping in floating page table; not flushing TLB\n");
            tlb_flush_necessary = false;
        } else {
            return err;
        }
    }

    if (num_pages != mapping->mapping_info.pte_count) {
        // want to unmap a different amount of pages than was mapped
        return SYS_ERR_VM_MAP_SIZE;
    }

    do_unmap(pt, slot, num_pages);

    // flush TLB for unmapped pages if we got a valid virtual address
    // TODO: heuristic that decides if selective or full flush is more
    //       efficient?
    if (tlb_flush_necessary) {
        if (num_pages > 1 || err_is_fail(err)) {
            do_full_tlb_flush();
        } else {
            do_one_tlb_flush(vaddr);
        }
    }

    // update mapping info
    memset(&mapping->mapping_info, 0, sizeof(struct mapping_info));

    return SYS_ERR_OK;
}
/// Create page mappings
errval_t caps_copy_to_vnode(struct cte *dest_vnode_cte, cslot_t dest_slot,
                            struct cte *src_cte, uintptr_t flags,
                            uintptr_t offset, uintptr_t pte_count)
{
    assert(type_is_vnode(dest_vnode_cte->cap.type));

    struct capability *src_cap  = &src_cte->cap;
    struct capability *dest_cap = &dest_vnode_cte->cap;
    mapping_handler_t handler_func = handler[dest_cap->type];

    assert(handler_func != NULL);

#if 0
    genpaddr_t paddr = get_address(&src_cte->cap) + offset;
    genvaddr_t vaddr;
    compile_vaddr(dest_vnode_cte, dest_slot, &vaddr);
    printf("mapping 0x%"PRIxGENPADDR" to 0x%"PRIxGENVADDR"\n", paddr, vaddr);
#endif

    if (src_cte->mapping_info.pte) {
        // this cap is already mapped
#if DIAGNOSTIC_ON_ERROR
        printf("caps_copy_to_vnode: this copy is already mapped @pte 0x%lx (paddr = 0x%"PRIxGENPADDR")\n", src_cte->mapping_info.pte, get_address(src_cap));
#endif
#if RETURN_ON_ERROR
        return SYS_ERR_VM_ALREADY_MAPPED;
#endif
    }

    cslot_t last_slot = dest_slot + pte_count;

    if (last_slot > X86_64_PTABLE_SIZE) {
        // requested map overlaps leaf page table
#if DIAGNOSTIC_ON_ERROR
        printf("caps_copy_to_vnode: requested mapping spans multiple leaf page tables\n");
#endif
#if RETURN_ON_ERROR
        return SYS_ERR_VM_RETRY_SINGLE;
#endif
    }

    errval_t r = handler_func(dest_cap, dest_slot, src_cap, flags, offset, pte_count);
    if (err_is_fail(r)) {
        printf("caps_copy_to_vnode: handler func returned %ld\n", r);
    }
#if 0
    else {
        printf("mapping_info.pte       = 0x%lx\n", src_cte->mapping_info.pte);
        printf("mapping_info.offset    = 0x%lx\n", src_cte->mapping_info.offset);
        printf("mapping_info.pte_count = %zu\n", src_cte->mapping_info.pte_count);
    }
#endif
    return r;
}
static inline void read_pt_entry(struct capability *pgtable, size_t slot,
                                 genpaddr_t *mapped_addr, lpaddr_t *pte,
                                 void **entry)
{
    assert(type_is_vnode(pgtable->type));

    genpaddr_t paddr;
    lpaddr_t pte_;
    void *entry_;

    genpaddr_t gp = get_address(pgtable);
    lpaddr_t lp = gen_phys_to_local_phys(gp);
    lvaddr_t lv = local_phys_to_mem(lp);

    // get paddr
    switch (pgtable->type) {
    case ObjType_VNode_x86_64_pml4:
    case ObjType_VNode_x86_64_pdpt:
    case ObjType_VNode_x86_64_pdir: {
        union x86_64_pdir_entry *e =
            (union x86_64_pdir_entry *)lv + slot;
        paddr = (lpaddr_t)e->d.base_addr << BASE_PAGE_BITS;
        entry_ = e;
        pte_ = lp + slot * sizeof(union x86_64_pdir_entry);
        break;
    }
    case ObjType_VNode_x86_64_ptable: {
        union x86_64_ptable_entry *e =
            (union x86_64_ptable_entry *)lv + slot;
        paddr = (lpaddr_t)e->base.base_addr << BASE_PAGE_BITS;
        entry_ = e;
        pte_ = lp + slot * sizeof(union x86_64_ptable_entry);
        break;
    }
    default:
        assert(!"Should not get here");
    }

    if (mapped_addr) {
        *mapped_addr = paddr;
    }
    if (pte) {
        *pte = pte_;
    }
    if (entry) {
        *entry = entry_;
    }
}
Пример #5
0
/*
 * 'set_cap()' for mapping caps
 */
void create_mapping_cap(struct cte *mapping_cte, struct capability *cap,
                        struct cte *ptable, cslot_t entry, size_t pte_count)
{
    assert(mapping_cte->cap.type == ObjType_Null);
    assert(type_is_vnode(ptable->cap.type));
    assert(entry < UINT16_MAX);
    // Currently, we have 32 bit offsets with 10 bit minimum page size, hence
    // the offset needs to have no more than 42 significant bits. FIXME
    //assert((offset & ~MASK(42)) == 0);

    mapping_cte->cap.type = get_mapping_type(cap->type);
    mapping_cte->cap.u.frame_mapping.cap = cap;
    mapping_cte->cap.u.frame_mapping.ptable = ptable;
    mapping_cte->cap.u.frame_mapping.entry = entry;
    //mapping_cte->cap.u.frame_mapping.offset = offset >> 10;
    mapping_cte->cap.u.frame_mapping.pte_count = pte_count;
}
Пример #6
0
errval_t page_mappings_unmap(struct capability *pgtable, struct cte *mapping)
{
    assert(type_is_vnode(pgtable->type));
    assert(type_is_mapping(mapping->cap.type));
    struct Frame_Mapping *info = &mapping->cap.u.frame_mapping;
    errval_t err;
    debug(SUBSYS_PAGING, "page_mappings_unmap(%hu pages)\n", info->pte_count);

    // calculate page table address
    lvaddr_t pt = local_phys_to_mem(gen_phys_to_local_phys(get_address(pgtable)));

    cslot_t slot = info->entry;
    // get virtual address of first page
    genvaddr_t vaddr;
    bool tlb_flush_necessary = true;
    struct cte *leaf_pt = cte_for_cap(pgtable);
    err = compile_vaddr(leaf_pt, slot, &vaddr);
    if (err_is_fail(err)) {
        if (err_no(err) == SYS_ERR_VNODE_NOT_INSTALLED && vaddr == 0) {
            debug(SUBSYS_PAGING, "unmapping in floating page table; not flushing TLB\n");
            tlb_flush_necessary = false;
        } else if (err_no(err) == SYS_ERR_VNODE_SLOT_INVALID) {
            debug(SUBSYS_PAGING, "couldn't reconstruct virtual address\n");
        } else {
            return err;
        }
    }

    do_unmap(pt, slot, info->pte_count);

    // flush TLB for unmapped pages if we got a valid virtual address
    // TODO: heuristic that decides if selective or full flush is more
    //       efficient?
    if (tlb_flush_necessary) {
        if (info->pte_count > 1 || err_is_fail(err)) {
            do_full_tlb_flush();
        } else {
            do_one_tlb_flush(vaddr);
        }
    }

    return SYS_ERR_OK;
}
Пример #7
0
/// Create page mappings
errval_t caps_copy_to_vnode(struct cte *dest_vnode_cte, cslot_t dest_slot,
                            struct cte *src_cte, uintptr_t flags,
                            uintptr_t offset, uintptr_t pte_count,
                            struct cte *mapping_cte)
{
    assert(type_is_vnode(dest_vnode_cte->cap.type));
    assert(mapping_cte->cap.type == ObjType_Null);

    struct capability *src_cap  = &src_cte->cap;
    struct capability *dest_cap = &dest_vnode_cte->cap;
    mapping_handler_t handler_func = handler[dest_cap->type];

    assert(handler_func != NULL);

    cslot_t last_slot = dest_slot + pte_count;

    // TODO: PAE
    if (last_slot > X86_32_PTABLE_SIZE) {
        // requested map overlaps leaf page table
        debug(SUBSYS_CAPS,
                "caps_copy_to_vnode: requested mapping spans multiple leaf page tables\n");
        return SYS_ERR_VM_RETRY_SINGLE;
    }

    errval_t r = handler_func(dest_cap, dest_slot, src_cap, flags, offset,
                              pte_count, mapping_cte);
    if (err_is_fail(r)) {
        assert(mapping_cte->cap.type == ObjType_Null);
        debug(SUBSYS_PAGING, "caps_copy_to_vnode: handler func returned %d\n", r);
        return r;
    }

    /* insert mapping cap into mdb */
    errval_t err = mdb_insert(mapping_cte);
    if (err_is_fail(err)) {
        printk(LOG_ERR, "%s: mdb_insert: %"PRIuERRV"\n", __FUNCTION__, err);
    }

    TRACE_CAP_MSG("created", mapping_cte);

    return err;
}
Пример #8
0
static inline void read_pt_entry(struct capability *pgtable, size_t slot, genpaddr_t *paddr)
{
    assert(type_is_vnode(pgtable->type));
    assert(paddr);

    genpaddr_t gp = get_address(pgtable);
    lpaddr_t lp = gen_phys_to_local_phys(gp);
    lvaddr_t lv = local_phys_to_mem(lp);

    switch (pgtable->type) {
        case ObjType_VNode_AARCH64_l0:
        {
            union armv8_ttable_entry *e = (union armv8_ttable_entry *) lv;
            *paddr = (genpaddr_t) (e->d.base) << 12;
            return;
        }
        case ObjType_VNode_AARCH64_l1:
        {
            union armv8_ttable_entry *e = (union armv8_ttable_entry *) lv;
            *paddr = (genpaddr_t) (e->d.base) << 12;
            return;
        }
        case ObjType_VNode_AARCH64_l2:
        {
            union armv8_ttable_entry *e = (union armv8_ttable_entry *) lv;
            *paddr = (genpaddr_t) (e->d.base) << 12;
            return;
        }
        case ObjType_VNode_AARCH64_l3:
        {
            union armv8_ttable_entry *e = (union armv8_ttable_entry *) lv;
            *paddr = (genpaddr_t) (e->page.base) << 12;
            return;
        }
        default:
            assert(!"Should not get here");
    }
}
Пример #9
0
struct sysret
sys_map(struct capability *ptable, cslot_t slot, capaddr_t source_cptr,
        int source_vbits, uintptr_t flags, uintptr_t offset,
        uintptr_t pte_count)
{
    assert (type_is_vnode(ptable->type));

    errval_t err;

    /* Lookup source cap */
    struct capability *root = &dcb_current->cspace.cap;
    struct cte *src_cte;
    err = caps_lookup_slot(root, source_cptr, source_vbits, &src_cte,
                           CAPRIGHTS_READ);
    if (err_is_fail(err)) {
        return SYSRET(err_push(err, SYS_ERR_SOURCE_CAP_LOOKUP));
    }

    /* Perform map */
    // XXX: this does not check if we do have CAPRIGHTS_READ_WRITE on
    // the destination cap (the page table we're inserting into)
    return SYSRET(caps_copy_to_vnode(cte_for_cap(ptable), slot, src_cte, flags,
                                     offset, pte_count));
}
Пример #10
0
/**
 * \brief Cleanup a cap copy but not the object represented by the cap
 */
static errval_t
cleanup_copy(struct cte *cte)
{
    errval_t err;

    TRACE_CAP_MSG("cleaning up copy", cte);

    struct capability *cap = &cte->cap;

    if (type_is_vnode(cap->type) ||
        cap->type == ObjType_Frame ||
        cap->type == ObjType_DevFrame)
    {
        unmap_capability(cte);
    }

    if (distcap_is_foreign(cte)) {
        TRACE_CAP_MSG("cleaning up non-owned copy", cte);
        if (cte->mdbnode.remote_copies || cte->mdbnode.remote_descs) {
            struct cte *ancestor = mdb_find_ancestor(cte);
            if (ancestor) {
                mdb_set_relations(ancestor, RRELS_DESC_BIT, RRELS_DESC_BIT);
            }
        }
    }

    err = mdb_remove(cte);
    if (err_is_fail(err)) {
        return err;
    }
    TRACE_CAP_MSG("cleaned up copy", cte);
    assert(!mdb_reachable(cte));
    memset(cte, 0, sizeof(*cte));

    return SYS_ERR_OK;
}
Пример #11
0
/// Map within a x86_64 non leaf ptable
static errval_t x86_64_non_ptable(struct capability *dest, cslot_t slot,
                                  struct capability *src, uintptr_t flags,
                                  uintptr_t offset, size_t pte_count)
{
    //printf("page_mappings_arch:x86_64_non_ptable\n");
    if (slot >= X86_64_PTABLE_SIZE) { // Within pagetable
        return SYS_ERR_VNODE_SLOT_INVALID;
    }

    if (type_is_vnode(src->type) && pte_count != 1) { // only allow single ptable mappings
        printf("src type and count mismatch\n");
        return SYS_ERR_VM_MAP_SIZE;
    }

    if (slot + pte_count > X86_64_PTABLE_SIZE) { // mapping size ok
        printf("mapping size invalid (%zd)\n", pte_count);
        return SYS_ERR_VM_MAP_SIZE;
    }

    size_t page_size = 0;
    paging_x86_64_flags_t flags_large = 0;
    switch (dest->type) {
        case ObjType_VNode_x86_64_pml4:
            if (src->type != ObjType_VNode_x86_64_pdpt) { // Right mapping
                printf("src type invalid\n");
                return SYS_ERR_WRONG_MAPPING;
            }
            if(slot >= X86_64_PML4_BASE(MEMORY_OFFSET)) { // Kernel mapped here
                return SYS_ERR_VNODE_SLOT_RESERVED;
            }
            break;
        case ObjType_VNode_x86_64_pdpt:
            // huge page support
            if (src->type != ObjType_VNode_x86_64_pdir) { // Right mapping
                // TODO: check if the system allows 1GB mappings
                page_size = X86_64_HUGE_PAGE_SIZE;
                // check offset within frame
                genpaddr_t off = offset;

                if (off + pte_count * X86_64_HUGE_PAGE_SIZE > get_size(src)) {
                    return SYS_ERR_FRAME_OFFSET_INVALID;
                }
                // Calculate page access protection flags /
                // Get frame cap rights
                flags_large = paging_x86_64_cap_to_page_flags(src->rights);
                // Mask with provided access rights mask
                flags_large = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(flags));
                // Add additional arch-specific flags
                flags_large |= X86_64_PTABLE_FLAGS(flags);
                // Unconditionally mark the page present
                flags_large |= X86_64_PTABLE_PRESENT;
            }
            break;
        case ObjType_VNode_x86_64_pdir:
            // superpage support
            if (src->type != ObjType_VNode_x86_64_ptable) { // Right mapping
                page_size = X86_64_LARGE_PAGE_SIZE;

                // check offset within frame
                genpaddr_t off = offset;

                if (off + pte_count * X86_64_LARGE_PAGE_SIZE > get_size(src)) {
                    return SYS_ERR_FRAME_OFFSET_INVALID;
                }
                // Calculate page access protection flags /
                // Get frame cap rights
                flags_large = paging_x86_64_cap_to_page_flags(src->rights);
                // Mask with provided access rights mask
                flags_large = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(flags));
                // Add additional arch-specific flags
                flags_large |= X86_64_PTABLE_FLAGS(flags);
                // Unconditionally mark the page present
                flags_large |= X86_64_PTABLE_PRESENT;

            }
            break;
        default:
            printf("dest type invalid\n");
            return SYS_ERR_DEST_TYPE_INVALID;
    }

    // Convert destination base address
    genpaddr_t dest_gp   = get_address(dest);
    lpaddr_t dest_lp     = gen_phys_to_local_phys(dest_gp);
    lvaddr_t dest_lv     = local_phys_to_mem(dest_lp);
    // Convert source base address
    genpaddr_t src_gp   = get_address(src);
    lpaddr_t src_lp     = gen_phys_to_local_phys(src_gp);

    // set metadata
    struct cte *src_cte = cte_for_cap(src);
    src_cte->mapping_info.pte = dest_lp + slot * sizeof(union x86_64_ptable_entry);
    src_cte->mapping_info.pte_count = pte_count;
    src_cte->mapping_info.offset = offset;

    cslot_t last_slot = slot + pte_count;
    for (; slot < last_slot; slot++, offset += page_size) {
        // Destination
        union x86_64_pdir_entry *entry = (union x86_64_pdir_entry *)dest_lv + slot;

        if (X86_64_IS_PRESENT(entry)) {
            // cleanup mapping info
            // TODO: cleanup already mapped pages
            memset(&src_cte->mapping_info, 0, sizeof(struct mapping_info));
            printf("slot in use\n");
            return SYS_ERR_VNODE_SLOT_INUSE;
        }

        // determine if we map a large/huge page or a normal entry
        if (page_size == X86_64_LARGE_PAGE_SIZE)
        {
            //a large page is mapped
            paging_x86_64_map_large((union x86_64_ptable_entry *)entry, src_lp + offset, flags_large);
        } else if (page_size == X86_64_HUGE_PAGE_SIZE) {
            // a huge page is mapped
            paging_x86_64_map_huge((union x86_64_ptable_entry *)entry, src_lp + offset, flags_large);
        } else {
            //a normal paging structure entry is mapped
            paging_x86_64_map_table(entry, src_lp + offset);
        }
    }

    return SYS_ERR_OK;
}
Пример #12
0
// TODO: cleanup arch compatibility mess for page size selection
errval_t paging_tlb_flush_range(struct cte *mapping_cte, size_t offset, size_t pages)
{
    assert(type_is_mapping(mapping_cte->cap.type));

    struct Frame_Mapping *mapping = &mapping_cte->cap.u.frame_mapping;

    // reconstruct first virtual address for TLB flushing
    struct cte *leaf_pt = mapping->ptable;
    if (!type_is_vnode(leaf_pt->cap.type)) {
        return SYS_ERR_VNODE_TYPE;
    }
    assert(type_is_vnode(leaf_pt->cap.type));
    errval_t err;
    genvaddr_t vaddr;
    size_t entry = mapping->entry;
    entry += offset;
    err = compile_vaddr(leaf_pt, entry, &vaddr);
    if (err_is_fail(err)) {
        if (err_no(err) == SYS_ERR_VNODE_NOT_INSTALLED) {
            debug(SUBSYS_PAGING, "couldn't reconstruct virtual address\n");
        }
        else {
            return err;
        }
    }
    debug(SUBSYS_PAGING, "flushing TLB entries for vaddrs 0x%"
            PRIxGENVADDR"--0x%"PRIxGENVADDR"\n",
            vaddr, vaddr+(pages * BASE_PAGE_SIZE));
    // flush TLB entries for all modified pages
    size_t page_size = 0;
    switch(leaf_pt->cap.type) {
#if defined(__x86_64__)
        case ObjType_VNode_x86_64_ptable:
            page_size = X86_64_BASE_PAGE_SIZE;
            break;
        case ObjType_VNode_x86_64_pdir:
            page_size = X86_64_LARGE_PAGE_SIZE;
            break;
        case ObjType_VNode_x86_64_pdpt:
            page_size = X86_64_HUGE_PAGE_SIZE;
            break;
#elif defined(__i386__)
        case ObjType_VNode_x86_32_ptable:
            page_size = X86_32_BASE_PAGE_SIZE;
            break;
        case ObjType_VNode_x86_32_pdir:
            page_size = X86_32_LARGE_PAGE_SIZE;
            break;
#elif defined(__ARM_ARCH_7A__)
        case ObjType_VNode_ARM_l1:
            panic("large page support for ARM NYI!\n");
            break;
        case ObjType_VNode_ARM_l2:
            page_size = BASE_PAGE_SIZE;
            break;
#elif defined(__ARM_ARCH_8A__)
            // TODO: define ARMv8 paging
#else
#error setup page sizes for arch
#endif
        default:
            panic("cannot find page size for cap type: %d\n",
                  leaf_pt->cap.type);
            break;
    }
    assert(page_size);
    // TODO: check what tlb flushing instructions expect for large/huge pages
    for (int i = 0; i < pages; i++) {
        do_one_tlb_flush(vaddr);
        vaddr += page_size;
    }

    return SYS_ERR_OK;
}
Пример #13
0
errval_t unmap_capability(struct cte *mem)
{
    errval_t err;

    TRACE_CAP_MSG("unmapping", mem);

    genvaddr_t vaddr = 0;
    bool single_page_flush = false;
    int mapping_count = 0, unmap_count = 0;
    genpaddr_t faddr = get_address(&mem->cap);

    // iterate over all mappings associated with 'mem' and unmap them
    struct cte *next = mem;
    struct cte *to_delete = NULL;

    while ((next = mdb_successor(next)) && get_address(&next->cap) == faddr) {
        TRACE_CAP_MSG("looking at", next);
        if (next->cap.type == get_mapping_type(mem->cap.type) &&
            next->cap.u.frame_mapping.cap == &mem->cap)
        {
            TRACE_CAP_MSG("cleaning up mapping", next);
            mapping_count ++;

            // do unmap
            struct Frame_Mapping *mapping = &next->cap.u.frame_mapping;
            struct cte *pgtable = mapping->ptable;
            if (!pgtable) {
                debug(SUBSYS_PAGING, "mapping->ptable == 0: just deleting mapping\n");
                // mem is not mapped, so just return
                goto delete_mapping;
            }
            if (!type_is_vnode(pgtable->cap.type)) {
                debug(SUBSYS_PAGING,
                        "mapping->ptable.type not vnode (%d): just deleting mapping\n",
                        mapping->ptable->cap.type);
                // mem is not mapped, so just return
                goto delete_mapping;
            }

            lpaddr_t ptable_lp = gen_phys_to_local_phys(get_address(&pgtable->cap));
            lvaddr_t ptable_lv = local_phys_to_mem(ptable_lp);
            cslot_t slot = mapping->entry;

            // unmap
            do_unmap(ptable_lv, slot, mapping->pte_count);

            unmap_count ++;

            // TLB flush?
            if (unmap_count == 1) {
                err = compile_vaddr(pgtable, slot, &vaddr);
                if (err_is_ok(err) && mapping->pte_count == 1) {
                    single_page_flush = true;
                }
            }

delete_mapping:
            assert(!next->delete_node.next);
            // mark mapping cap for delete: cannot do delete here as it messes
            // up mdb_successor()
            next->delete_node.next = to_delete;
            to_delete = next;
        }
    }

    // delete mapping caps
    while (to_delete) {
        next = to_delete->delete_node.next;
        err = caps_delete(to_delete);
        if (err_is_fail(err)) {
            printk(LOG_NOTE, "caps_delete: %"PRIuERRV"\n", err);
        }
        to_delete = next;
    }

    TRACE_CAP_MSGF(mem, "unmapped %d/%d instances", unmap_count, mapping_count);

    // do TLB flush
    if (single_page_flush) {
        do_one_tlb_flush(vaddr);
    } else {
        do_full_tlb_flush();
    }

    return SYS_ERR_OK;
}
Пример #14
0
/*
 * compile_vaddr returns the lowest address that is addressed by entry 'entry'
 * in page table 'ptable'
 */
errval_t compile_vaddr(struct cte *ptable, size_t entry, genvaddr_t *retvaddr)
{
    if (!type_is_vnode(ptable->cap.type)) {
        return SYS_ERR_VNODE_TYPE;
    }

    genvaddr_t vaddr = 0;
    // shift at least by BASE_PAGE_BITS for first vaddr part
    size_t shift = BASE_PAGE_BITS;

    // figure out how much we need to shift (assuming that
    // compile_vaddr can be used on arbitrary page table types)
    // A couple of cases have fallthroughs in order to avoid having
    // multiple calls to vnode_objbits with the same type argument.
    switch (ptable->cap.type) {
        case ObjType_VNode_x86_64_pml4:
            shift += vnode_objbits(ObjType_VNode_x86_64_pdpt);
        case ObjType_VNode_x86_64_pdpt:
            shift += vnode_objbits(ObjType_VNode_x86_64_pdir);
        case ObjType_VNode_x86_64_pdir:
            shift += vnode_objbits(ObjType_VNode_x86_64_ptable);
        case ObjType_VNode_x86_64_ptable:
            break;

        case ObjType_VNode_x86_32_pdpt:
            shift += vnode_objbits(ObjType_VNode_x86_32_pdir);
        case ObjType_VNode_x86_32_pdir:
            shift += vnode_objbits(ObjType_VNode_x86_32_ptable);
        case ObjType_VNode_x86_32_ptable:
            break;

        case ObjType_VNode_ARM_l2:
            shift += vnode_objbits(ObjType_VNode_ARM_l1);
        case ObjType_VNode_ARM_l1:
            break;

        case ObjType_VNode_AARCH64_l0:
            shift += vnode_objbits(ObjType_VNode_AARCH64_l1);
        case ObjType_VNode_AARCH64_l1:
            shift += vnode_objbits(ObjType_VNode_AARCH64_l2);
        case ObjType_VNode_AARCH64_l2:
            shift += vnode_objbits(ObjType_VNode_AARCH64_l3);
        case ObjType_VNode_AARCH64_l3:
            break;

        default:
            return SYS_ERR_VNODE_TYPE;
    }

    size_t mask = (1ULL<<vnode_objbits(ptable->cap.type))-1;
    vaddr = ((genvaddr_t)(entry & mask)) << shift;

    // add next piece of virtual address until we are at root page table
    struct cte *old = ptable;
    struct cte *next, *mapping = NULL;
    errval_t err;
    while (!is_root_pt(old->cap.type))
    {
        err = find_mapping_for_cap(old, &mapping);
        if (err_is_fail(err)) {
            // no mapping found, cannot reconstruct vaddr
            *retvaddr = 0;
            return SYS_ERR_VNODE_NOT_INSTALLED;
        }
        err = find_next_ptable(mapping, &next);
        // no next page table
        if (err == SYS_ERR_VNODE_NOT_INSTALLED ||
            err == SYS_ERR_VNODE_LOOKUP_NEXT)
        {
            *retvaddr = 0;
            return SYS_ERR_VNODE_NOT_INSTALLED;
        }
        if (err_is_fail(err)) {
            return err;
        }
        // calculate offset into next level ptable
        size_t offset = mapping->cap.u.frame_mapping.entry * get_pte_size();
        // shift new part of vaddr by old shiftwidth + #entries of old ptable
        shift += vnode_entry_bits(old->cap.type);

        mask = (1ULL<<vnode_objbits(next->cap.type))-1;
        vaddr |= ((offset & mask) << shift);
        old = next;
    }

    *retvaddr = vaddr;
    return SYS_ERR_OK;
}
Пример #15
0
/**
 * \brief Cleanup the last cap copy for an object and the object itself
 */
static errval_t
cleanup_last(struct cte *cte, struct cte *ret_ram_cap)
{
    errval_t err;

    TRACE_CAP_MSG("cleaning up last copy", cte);
    struct capability *cap = &cte->cap;

    assert(!has_copies(cte));
    if (cte->mdbnode.remote_copies) {
        printk(LOG_WARN, "cleanup_last but remote_copies is set\n");
    }

    if (ret_ram_cap && ret_ram_cap->cap.type != ObjType_Null) {
        return SYS_ERR_SLOT_IN_USE;
    }

    struct RAM ram = { .bits = 0 };
    size_t len = sizeof(struct RAM) / sizeof(uintptr_t) + 1;

    if (!has_descendants(cte) && !has_ancestors(cte)) {
        // List all RAM-backed capabilities here
        // NB: ObjType_PhysAddr and ObjType_DevFrame caps are *not* RAM-backed!
        switch(cap->type) {
        case ObjType_RAM:
            ram.base = cap->u.ram.base;
            ram.bits = cap->u.ram.bits;
            break;

        case ObjType_Frame:
            ram.base = cap->u.frame.base;
            ram.bits = cap->u.frame.bits;
            break;

        case ObjType_CNode:
            ram.base = cap->u.cnode.cnode;
            ram.bits = cap->u.cnode.bits + OBJBITS_CTE;
            break;

        case ObjType_Dispatcher:
            // Convert to genpaddr
            ram.base = local_phys_to_gen_phys(mem_to_local_phys((lvaddr_t)cap->u.dispatcher.dcb));
            ram.bits = OBJBITS_DISPATCHER;
            break;

        default:
            // Handle VNodes here
            if(type_is_vnode(cap->type)) {
                ram.base = get_address(cap);
                ram.bits = vnode_objbits(cap->type);
            }
            break;
        }
    }

    err = cleanup_copy(cte);
    if (err_is_fail(err)) {
        return err;
    }

    if(ram.bits > 0) {
        // Send back as RAM cap to monitor
        if (ret_ram_cap) {
            if (dcb_current != monitor_ep.u.endpoint.listener) {
                printk(LOG_WARN, "sending fresh ram cap to non-monitor?\n");
            }
            assert(ret_ram_cap->cap.type == ObjType_Null);
            ret_ram_cap->cap.u.ram = ram;
            ret_ram_cap->cap.type = ObjType_RAM;
            err = mdb_insert(ret_ram_cap);
            TRACE_CAP_MSG("reclaimed", ret_ram_cap);
            assert(err_is_ok(err));
            // note: this is a "success" code!
            err = SYS_ERR_RAM_CAP_CREATED;
        }
        else if (monitor_ep.type && monitor_ep.u.endpoint.listener != 0) {
#ifdef TRACE_PMEM_CAPS
            struct cte ramcte;
            memset(&ramcte, 0, sizeof(ramcte));
            ramcte.cap.u.ram = ram;
            ramcte.cap.type = ObjType_RAM;
            TRACE_CAP_MSG("reclaimed", ret_ram_cap);
#endif
            // XXX: This looks pretty ugly. We need an interface.
            err = lmp_deliver_payload(&monitor_ep, NULL,
                                      (uintptr_t *)&ram,
                                      len, false);
        }
        else {
            printk(LOG_WARN, "dropping ram cap base %08"PRIxGENPADDR" bits %"PRIu8"\n", ram.base, ram.bits);
        }
        if (err_no(err) == SYS_ERR_LMP_BUF_OVERFLOW) {
            printk(LOG_WARN, "dropped ram cap base %08"PRIxGENPADDR" bits %"PRIu8"\n", ram.base, ram.bits);
            err = SYS_ERR_OK;

        } else {
            assert(err_is_ok(err));
        }
    }

    return err;
}

/*
 * Mark phase of revoke mark & sweep
 */

static void caps_mark_revoke_copy(struct cte *cte)
{
    errval_t err;
    err = caps_try_delete(cte);
    if (err_is_fail(err)) {
        // this should not happen as there is a copy of the cap
        panic("error while marking/deleting cap copy for revoke:"
              " 0x%"PRIuERRV"\n", err);
    }
}