示例#1
0
struct sysret sys_cap_has_relations(capaddr_t caddr, uint8_t vbits,
                                    uint8_t mask)
{
    errval_t err;

    struct cte *cap;
    err = caps_lookup_slot(&dcb_current->cspace.cap, caddr, vbits, &cap,
                           CAPRIGHTS_READ);
    if (err_is_fail(err)) {
        return SYSRET(err);
    }

    uint8_t res = 0;
    if (mask & RRELS_COPY_BIT && has_copies(cap)) {
        res |= RRELS_COPY_BIT;
    }
    if (mask & RRELS_ANCS_BIT && has_ancestors(cap)) {
        res |= RRELS_ANCS_BIT;
    }
    if (mask & RRELS_DESC_BIT && has_descendants(cap)) {
        res |= RRELS_DESC_BIT;
    }

    return (struct sysret) { .error = SYS_ERR_OK, .value = res };
}
示例#2
0
errval_t caps_delete_step(struct cte *ret_next)
{
    errval_t err = SYS_ERR_OK;

    assert(ret_next);
    assert(ret_next->cap.type == ObjType_Null);

    if (!delete_head) {
        assert(!delete_tail);
        return SYS_ERR_CAP_NOT_FOUND;
    }
    assert(delete_head->mdbnode.in_delete == true);

    TRACE_CAP_MSG("performing delete step", delete_head);
    struct cte *cte = delete_head, *next = cte->delete_node.next;
    if (cte->mdbnode.locked) {
        err = SYS_ERR_CAP_LOCKED;
    }
    else if (distcap_is_foreign(cte) || has_copies(cte)) {
        err = cleanup_copy(cte);
    }
    else if (cte->mdbnode.remote_copies) {
        err = caps_copyout_last(cte, ret_next);
        if (err_is_ok(err)) {
            if (next) {
                delete_head = next;
            } else {
                delete_head = delete_tail = NULL;
            }
            err = SYS_ERR_DELETE_LAST_OWNED;
        }
    }
    else {
        // XXX: need to clear delete_list flag because it's reused for
        // clear_list? -SG
        cte->delete_node.next = NULL;
        err = caps_delete_last(cte, ret_next);
        if (err_is_fail(err)) {
            TRACE_CAP_MSG("delete last failed", cte);
            // if delete_last fails, reinsert in delete list
            cte->delete_node.next = next;
        }
    }

    if (err_is_ok(err)) {
        if (next) {
            delete_head = next;
        } else {
            delete_head = delete_tail = NULL;
        }
    }
    return err;
}
示例#3
0
/**
 * \brief Try a "simple" delete of a cap. If this fails, the monitor needs to
 * negotiate a delete across the system.
 */
static errval_t caps_try_delete(struct cte *cte)
{
    TRACE_CAP_MSG("trying simple delete", cte);
    if (distcap_is_in_delete(cte) || cte->mdbnode.locked) {
        // locked or already in process of being deleted
        return SYS_ERR_CAP_LOCKED;
    }
    if (distcap_is_foreign(cte) || has_copies(cte)) {
        return cleanup_copy(cte);
    }
    else if (cte->mdbnode.remote_copies
             || cte->cap.type == ObjType_CNode
             || cte->cap.type == ObjType_Dispatcher)
    {
        return SYS_ERR_DELETE_LAST_OWNED;
    }
    else {
        return cleanup_last(cte, NULL);
    }
}
示例#4
0
/**
 * \brief Delete the last copy of a cap in the entire system.
 * \bug Somewhere in the delete process, the remote_ancs property should be
 *      propagated to (remote) immediate descendants.
 */
errval_t caps_delete_last(struct cte *cte, struct cte *ret_ram_cap)
{
    errval_t err;
    assert(!has_copies(cte));

    if (cte->mdbnode.remote_copies) {
        printk(LOG_WARN, "delete_last but remote_copies is set\n");
    }

    TRACE_CAP_MSG("deleting last", cte);

    // try simple delete
    // XXX: this really should always fail, enforce that? -MN
    // XXX: this is probably not the way we should enforce/check this -SG
    err = caps_try_delete(cte);
    if (err_no(err) != SYS_ERR_DELETE_LAST_OWNED &&
        err_no(err) != SYS_ERR_CAP_LOCKED) {
        return err;
    }

    // CNodes and dcbs contain further CTEs, so cannot simply be deleted
    // instead, we place them in a clear list, which is progressivly worked
    // through until each list element contains only ctes that point to
    // other CNodes or dcbs, at which point they are scheduled for final
    // deletion, which only happens when the clear lists are empty.

    if (cte->cap.type == ObjType_CNode) {
        debug(SUBSYS_CAPS, "deleting last copy of cnode: %p\n", cte);
        // Mark all non-Null slots for deletion
        for (cslot_t i = 0; i < (1<<cte->cap.u.cnode.bits); i++) {
            struct cte *slot = caps_locate_slot(cte->cap.u.cnode.cnode, i);
            caps_mark_revoke_generic(slot);
        }

        assert(cte->delete_node.next == NULL || delete_head == cte);
        cte->delete_node.next = NULL;
        clear_list_prepend(cte);

        return SYS_ERR_OK;
    }
    else if (cte->cap.type == ObjType_Dispatcher)
    {
        debug(SUBSYS_CAPS, "deleting last copy of dispatcher: %p\n", cte);
        struct capability *cap = &cte->cap;
        struct dcb *dcb = cap->u.dispatcher.dcb;

        // Remove from queue
        scheduler_remove(dcb);
        // Reset current if it was deleted
        if (dcb_current == dcb) {
            dcb_current = NULL;
        }

        // Remove from wakeup queue
        wakeup_remove(dcb);

        // Notify monitor
        if (monitor_ep.u.endpoint.listener == dcb) {
            printk(LOG_ERR, "monitor terminated; expect badness!\n");
            monitor_ep.u.endpoint.listener = NULL;
        } else if (monitor_ep.u.endpoint.listener != NULL) {
            uintptr_t payload = dcb->domain_id;
            err = lmp_deliver_payload(&monitor_ep, NULL, &payload, 1, false);
            if (err_is_fail(err)) {
                printk(LOG_NOTE, "while notifying monitor about domain exit: %"PRIuERRV".\n", err);
                printk(LOG_NOTE, "please add the console output to the following bug report: https://code.systems.ethz.ch/T78\n");
            }
            assert(err_is_ok(err));
        }

        caps_mark_revoke_generic(&dcb->cspace);
        caps_mark_revoke_generic(&dcb->disp_cte);
        assert(cte->delete_node.next == NULL || delete_head == cte);
        cte->delete_node.next = NULL;
        clear_list_prepend(cte);

        return SYS_ERR_OK;
    }
    else
    {
        // last copy, perform object cleanup
        return cleanup_last(cte, ret_ram_cap);
    }
}
示例#5
0
/**
 * \brief Cleanup the last cap copy for an object and the object itself
 */
static errval_t
cleanup_last(struct cte *cte, struct cte *ret_ram_cap)
{
    errval_t err;

    TRACE_CAP_MSG("cleaning up last copy", cte);
    struct capability *cap = &cte->cap;

    assert(!has_copies(cte));
    if (cte->mdbnode.remote_copies) {
        printk(LOG_WARN, "cleanup_last but remote_copies is set\n");
    }

    if (ret_ram_cap && ret_ram_cap->cap.type != ObjType_Null) {
        return SYS_ERR_SLOT_IN_USE;
    }

    struct RAM ram = { .bits = 0 };
    size_t len = sizeof(struct RAM) / sizeof(uintptr_t) + 1;

    if (!has_descendants(cte) && !has_ancestors(cte)) {
        // List all RAM-backed capabilities here
        // NB: ObjType_PhysAddr and ObjType_DevFrame caps are *not* RAM-backed!
        switch(cap->type) {
        case ObjType_RAM:
            ram.base = cap->u.ram.base;
            ram.bits = cap->u.ram.bits;
            break;

        case ObjType_Frame:
            ram.base = cap->u.frame.base;
            ram.bits = cap->u.frame.bits;
            break;

        case ObjType_CNode:
            ram.base = cap->u.cnode.cnode;
            ram.bits = cap->u.cnode.bits + OBJBITS_CTE;
            break;

        case ObjType_Dispatcher:
            // Convert to genpaddr
            ram.base = local_phys_to_gen_phys(mem_to_local_phys((lvaddr_t)cap->u.dispatcher.dcb));
            ram.bits = OBJBITS_DISPATCHER;
            break;

        default:
            // Handle VNodes here
            if(type_is_vnode(cap->type)) {
                ram.base = get_address(cap);
                ram.bits = vnode_objbits(cap->type);
            }
            break;
        }
    }

    err = cleanup_copy(cte);
    if (err_is_fail(err)) {
        return err;
    }

    if(ram.bits > 0) {
        // Send back as RAM cap to monitor
        if (ret_ram_cap) {
            if (dcb_current != monitor_ep.u.endpoint.listener) {
                printk(LOG_WARN, "sending fresh ram cap to non-monitor?\n");
            }
            assert(ret_ram_cap->cap.type == ObjType_Null);
            ret_ram_cap->cap.u.ram = ram;
            ret_ram_cap->cap.type = ObjType_RAM;
            err = mdb_insert(ret_ram_cap);
            TRACE_CAP_MSG("reclaimed", ret_ram_cap);
            assert(err_is_ok(err));
            // note: this is a "success" code!
            err = SYS_ERR_RAM_CAP_CREATED;
        }
        else if (monitor_ep.type && monitor_ep.u.endpoint.listener != 0) {
#ifdef TRACE_PMEM_CAPS
            struct cte ramcte;
            memset(&ramcte, 0, sizeof(ramcte));
            ramcte.cap.u.ram = ram;
            ramcte.cap.type = ObjType_RAM;
            TRACE_CAP_MSG("reclaimed", ret_ram_cap);
#endif
            // XXX: This looks pretty ugly. We need an interface.
            err = lmp_deliver_payload(&monitor_ep, NULL,
                                      (uintptr_t *)&ram,
                                      len, false);
        }
        else {
            printk(LOG_WARN, "dropping ram cap base %08"PRIxGENPADDR" bits %"PRIu8"\n", ram.base, ram.bits);
        }
        if (err_no(err) == SYS_ERR_LMP_BUF_OVERFLOW) {
            printk(LOG_WARN, "dropped ram cap base %08"PRIxGENPADDR" bits %"PRIu8"\n", ram.base, ram.bits);
            err = SYS_ERR_OK;

        } else {
            assert(err_is_ok(err));
        }
    }

    return err;
}

/*
 * Mark phase of revoke mark & sweep
 */

static void caps_mark_revoke_copy(struct cte *cte)
{
    errval_t err;
    err = caps_try_delete(cte);
    if (err_is_fail(err)) {
        // this should not happen as there is a copy of the cap
        panic("error while marking/deleting cap copy for revoke:"
              " 0x%"PRIuERRV"\n", err);
    }
}