errval_t page_mappings_unmap(struct capability *pgtable, struct cte *mapping,
                             size_t slot, size_t num_pages)
{
    assert(type_is_vnode(pgtable->type));
    errval_t err;
    debug(SUBSYS_PAGING, "page_mappings_unmap(%zd pages)\n", num_pages);

    // get page table entry data
    genpaddr_t paddr;

    read_pt_entry(pgtable, slot, &paddr, NULL, NULL);
    lvaddr_t pt = local_phys_to_mem(gen_phys_to_local_phys(get_address(pgtable)));

    // get virtual address of first page
    // TODO: error checking
    genvaddr_t vaddr;
    bool tlb_flush_necessary = true;
    struct cte *leaf_pt = cte_for_cap(pgtable);
    err = compile_vaddr(leaf_pt, slot, &vaddr);
    if (err_is_fail(err)) {
        if (err_no(err) == SYS_ERR_VNODE_NOT_INSTALLED) {
            debug(SUBSYS_PAGING, "couldn't reconstruct virtual address\n");
        } else if (err_no(err) == SYS_ERR_VNODE_SLOT_INVALID
                   && leaf_pt->mapping_info.pte == 0) {
            debug(SUBSYS_PAGING, "unmapping in floating page table; not flushing TLB\n");
            tlb_flush_necessary = false;
        } else {
            return err;
        }
    }

    if (num_pages != mapping->mapping_info.pte_count) {
        // want to unmap a different amount of pages than was mapped
        return SYS_ERR_VM_MAP_SIZE;
    }

    do_unmap(pt, slot, num_pages);

    // flush TLB for unmapped pages if we got a valid virtual address
    // TODO: heuristic that decides if selective or full flush is more
    //       efficient?
    if (tlb_flush_necessary) {
        if (num_pages > 1 || err_is_fail(err)) {
            do_full_tlb_flush();
        } else {
            do_one_tlb_flush(vaddr);
        }
    }

    // update mapping info
    memset(&mapping->mapping_info, 0, sizeof(struct mapping_info));

    return SYS_ERR_OK;
}
示例#2
0
errval_t page_mappings_unmap(struct capability *pgtable, struct cte *mapping)
{
    assert(type_is_vnode(pgtable->type));
    assert(type_is_mapping(mapping->cap.type));
    struct Frame_Mapping *info = &mapping->cap.u.frame_mapping;
    errval_t err;
    debug(SUBSYS_PAGING, "page_mappings_unmap(%hu pages)\n", info->pte_count);

    // calculate page table address
    lvaddr_t pt = local_phys_to_mem(gen_phys_to_local_phys(get_address(pgtable)));

    cslot_t slot = info->entry;
    // get virtual address of first page
    genvaddr_t vaddr;
    bool tlb_flush_necessary = true;
    struct cte *leaf_pt = cte_for_cap(pgtable);
    err = compile_vaddr(leaf_pt, slot, &vaddr);
    if (err_is_fail(err)) {
        if (err_no(err) == SYS_ERR_VNODE_NOT_INSTALLED && vaddr == 0) {
            debug(SUBSYS_PAGING, "unmapping in floating page table; not flushing TLB\n");
            tlb_flush_necessary = false;
        } else if (err_no(err) == SYS_ERR_VNODE_SLOT_INVALID) {
            debug(SUBSYS_PAGING, "couldn't reconstruct virtual address\n");
        } else {
            return err;
        }
    }

    do_unmap(pt, slot, info->pte_count);

    // flush TLB for unmapped pages if we got a valid virtual address
    // TODO: heuristic that decides if selective or full flush is more
    //       efficient?
    if (tlb_flush_necessary) {
        if (info->pte_count > 1 || err_is_fail(err)) {
            do_full_tlb_flush();
        } else {
            do_one_tlb_flush(vaddr);
        }
    }

    return SYS_ERR_OK;
}
/**
 * \brief modify flags of mapping for `frame`.
 *
 * \arg frame the frame whose mapping should be modified
 * \arg offset the offset from the first page table entry in entries
 * \arg pages the number of pages to modify
 * \arg mflags the new flags
 * \arg va_hint a user-supplied virtual address for hinting selective TLB
 *              flushing
 */
errval_t page_mappings_modify_flags(struct capability *frame, size_t offset,
                                    size_t pages, size_t mflags, genvaddr_t va_hint)
{
    struct cte *mapping = cte_for_cap(frame);
    struct mapping_info *info = &mapping->mapping_info;
    struct cte *leaf_pt;
    errval_t err;
    err = mdb_find_cap_for_address(info->pte, &leaf_pt);
    if (err_is_fail(err)) {
        return err;
    }

    /* Calculate page access protection flags */
    // Get frame cap rights
    paging_x86_64_flags_t flags =
        paging_x86_64_cap_to_page_flags(frame->rights);
    // Mask with provided access rights mask
    flags = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(mflags));
    // Add additional arch-specific flags
    flags |= X86_64_PTABLE_FLAGS(mflags);
    // Unconditionally mark the page present
    flags |= X86_64_PTABLE_PRESENT;

    // check arguments
    if (offset >= X86_64_PTABLE_SIZE) { // Within pagetable
        return SYS_ERR_VNODE_SLOT_INVALID;
    }
    if (offset + pages > X86_64_PTABLE_SIZE) { // mapping size ok
        return SYS_ERR_VM_MAP_SIZE;
    }

    /* Calculate location of first pt entry we need to modify */
    lvaddr_t base = local_phys_to_mem(info->pte) +
        offset * sizeof(union x86_64_ptable_entry);

    size_t pagesize = BASE_PAGE_SIZE;
    switch(leaf_pt->cap.type) {
        case ObjType_VNode_x86_64_ptable :
            for (int i = 0; i < pages; i++) {
                union x86_64_ptable_entry *entry =
                    (union x86_64_ptable_entry *)base + i;
                paging_x86_64_modify_flags(entry, flags);
            }
            break;
        case ObjType_VNode_x86_64_pdir :
            for (int i = 0; i < pages; i++) {
                union x86_64_ptable_entry *entry =
                    (union x86_64_ptable_entry *)base + i;
                paging_x86_64_modify_flags_large(entry, flags);
            }
            pagesize = LARGE_PAGE_SIZE;
            break;
        case ObjType_VNode_x86_64_pdpt :
            for (int i = 0; i < pages; i++) {
                union x86_64_ptable_entry *entry =
                    (union x86_64_ptable_entry *)base + i;
                paging_x86_64_modify_flags_huge(entry, flags);
            }
            pagesize = HUGE_PAGE_SIZE;
            break;
        default:
            return SYS_ERR_WRONG_MAPPING;
    }

    if (va_hint != 0 && va_hint > BASE_PAGE_SIZE) {
        // use as direct hint
        // invlpg should work for large/huge pages
        for (int i = 0; i < pages; i++) {
            do_one_tlb_flush(va_hint + i * pagesize);
        }
    } else {
        /* do full TLB flush */
        do_full_tlb_flush();
    }
    return SYS_ERR_OK;
}
示例#4
0
errval_t unmap_capability(struct cte *mem)
{
    errval_t err;

    TRACE_CAP_MSG("unmapping", mem);

    genvaddr_t vaddr = 0;
    bool single_page_flush = false;
    int mapping_count = 0, unmap_count = 0;
    genpaddr_t faddr = get_address(&mem->cap);

    // iterate over all mappings associated with 'mem' and unmap them
    struct cte *next = mem;
    struct cte *to_delete = NULL;

    while ((next = mdb_successor(next)) && get_address(&next->cap) == faddr) {
        TRACE_CAP_MSG("looking at", next);
        if (next->cap.type == get_mapping_type(mem->cap.type) &&
            next->cap.u.frame_mapping.cap == &mem->cap)
        {
            TRACE_CAP_MSG("cleaning up mapping", next);
            mapping_count ++;

            // do unmap
            struct Frame_Mapping *mapping = &next->cap.u.frame_mapping;
            struct cte *pgtable = mapping->ptable;
            if (!pgtable) {
                debug(SUBSYS_PAGING, "mapping->ptable == 0: just deleting mapping\n");
                // mem is not mapped, so just return
                goto delete_mapping;
            }
            if (!type_is_vnode(pgtable->cap.type)) {
                debug(SUBSYS_PAGING,
                        "mapping->ptable.type not vnode (%d): just deleting mapping\n",
                        mapping->ptable->cap.type);
                // mem is not mapped, so just return
                goto delete_mapping;
            }

            lpaddr_t ptable_lp = gen_phys_to_local_phys(get_address(&pgtable->cap));
            lvaddr_t ptable_lv = local_phys_to_mem(ptable_lp);
            cslot_t slot = mapping->entry;

            // unmap
            do_unmap(ptable_lv, slot, mapping->pte_count);

            unmap_count ++;

            // TLB flush?
            if (unmap_count == 1) {
                err = compile_vaddr(pgtable, slot, &vaddr);
                if (err_is_ok(err) && mapping->pte_count == 1) {
                    single_page_flush = true;
                }
            }

delete_mapping:
            assert(!next->delete_node.next);
            // mark mapping cap for delete: cannot do delete here as it messes
            // up mdb_successor()
            next->delete_node.next = to_delete;
            to_delete = next;
        }
    }

    // delete mapping caps
    while (to_delete) {
        next = to_delete->delete_node.next;
        err = caps_delete(to_delete);
        if (err_is_fail(err)) {
            printk(LOG_NOTE, "caps_delete: %"PRIuERRV"\n", err);
        }
        to_delete = next;
    }

    TRACE_CAP_MSGF(mem, "unmapped %d/%d instances", unmap_count, mapping_count);

    // do TLB flush
    if (single_page_flush) {
        do_one_tlb_flush(vaddr);
    } else {
        do_full_tlb_flush();
    }

    return SYS_ERR_OK;
}