Ejemplo n.º 1
0
/// Destroy the local state associated with a given channel
void lmp_chan_destroy(struct lmp_chan *lc)
{
    lc->connstate = LMP_DISCONNECTED;
    cap_destroy(lc->local_cap);

    if (lc->endpoint != NULL) {
        lmp_endpoint_free(lc->endpoint);
    }

    // remove from send retry queue on dispatcher
    if (waitset_chan_is_registered(&lc->send_waitset)) {
        assert(lc->prev != NULL && lc->next != NULL);
        dispatcher_handle_t handle = disp_disable();
        struct dispatcher_generic *disp = get_dispatcher_generic(handle);
        if (lc->next == lc->prev) {
            assert_disabled(lc->next == lc);
            assert_disabled(disp->lmp_send_events_list == lc);
            disp->lmp_send_events_list = NULL;
        } else {
            lc->prev->next = lc->next;
            lc->next->prev = lc->prev;
        }
        disp_enable(handle);

#ifndef NDEBUG
        lc->next = lc->prev = NULL;
#endif
    }

    waitset_chanstate_destroy(&lc->send_waitset);
}
Ejemplo n.º 2
0
static errval_t do_single_unmap(struct pmap_arm *pmap, genvaddr_t vaddr,
                                size_t pte_count, bool delete_cap)
{
    errval_t err;
    struct vnode *pt = find_ptable(pmap, vaddr);
    if (pt) {
        // analog to do_single_map we use 10 bits for tracking pages in user space -SG
        struct vnode *page = find_vnode(pt, ARM_USER_L2_OFFSET(vaddr));
        if (page && page->u.frame.pte_count == pte_count) {
            err = vnode_unmap(pt->u.vnode.cap, page->u.frame.cap,
                              page->entry, page->u.frame.pte_count);
            if (err_is_fail(err)) {
                DEBUG_ERR(err, "vnode_unmap");
                return err_push(err, LIB_ERR_VNODE_UNMAP);
            }

            // Free up the resources
            if (delete_cap) {
                err = cap_destroy(page->u.frame.cap);
                if (err_is_fail(err)) {
                    return err_push(err, LIB_ERR_PMAP_DO_SINGLE_UNMAP);
                }
            }
            remove_vnode(pt, page);
            slab_free(&pmap->slab, page);
        }
        else {
            return LIB_ERR_PMAP_FIND_VNODE;
        }
    }

    return SYS_ERR_OK;
}
Ejemplo n.º 3
0
void
update_owner__rx_handler(struct intermon_binding *b, intermon_caprep_t caprep, genvaddr_t st)
{
    errval_t err;
    struct intermon_state *inter_st = (struct intermon_state*)b->st;
    coreid_t from = inter_st->core_id;
    struct capref capref;
    struct capability cap;
    caprep_to_capability(&caprep, &cap);

    err = slot_alloc(&capref);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "failed to allocate slot for owner update");
    }

    err = monitor_copy_if_exists(&cap, capref);
    if (err_is_ok(err)) {
        err = monitor_set_cap_owner(cap_root, get_cap_addr(capref),
                                    get_cap_valid_bits(capref), from);
    }
    if (err_no(err) == SYS_ERR_CAP_NOT_FOUND) {
        err = SYS_ERR_OK;
    }

    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "failed to update cap ownership");
    }

    cap_destroy(capref);

    err = owner_updated(from, st);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "failed to send ownership update response");
    }
}
Ejemplo n.º 4
0
static void monitor_bind_ump_client_request_error(struct monitor_binding *b,
                                                  struct capref frame,
                                                  uintptr_t conn_id,
                                                  uintptr_t domain_id,
                                                  errval_t err)
{
    errval_t err2;

    err2 = cap_destroy(frame);
    if (err_is_fail(err2)) {
        USER_PANIC_ERR(err, "cap_destroy failed");
    }

    if (conn_id != 0) {
        err2 = remote_conn_free(conn_id);
        if (err_is_fail(err2)) {
            USER_PANIC_ERR(err2, "remote_conn_free failed");
        }
    }

    err2 = b->tx_vtbl.bind_ump_reply_client(b, NOP_CONT, 0, domain_id, err,
                                            NULL_CAP);
    if (err_is_fail(err2)) {
        USER_PANIC_ERR(err2, "error reply failed");
    }
}
Ejemplo n.º 5
0
static void send_cap_request(struct interdisp_binding *st,
                             struct capref cap, genvaddr_t info)
{
    errval_t err = SYS_ERR_OK, err2;
    struct capref *dest = (struct capref *)(uintptr_t)info;

    err = cap_copy(*dest, cap);
    if(err_is_fail(err)) {
        err_push(err, LIB_ERR_CAP_COPY_FAIL);
        DEBUG_ERR(err, "cap_copy");
        abort();
        goto send_reply;
    }
    err = cap_destroy(cap);
    if(err_is_fail(err)) {
        err_push(err, LIB_ERR_CAP_DELETE_FAIL);
        DEBUG_ERR(err, "cap_destroy default");
        abort();
        goto send_reply;
    }

 send_reply:
    err2 = st->tx_vtbl.send_cap_reply(st, NOP_CONT, err);
    if (err_is_fail(err2)) {
        DEBUG_ERR(err, "Failed to send send_cap_reply");
    }
}
Ejemplo n.º 6
0
/**
 * \brief Initialise a new UMP channel to accept an incoming binding request
 *
 * \param uc  Storage for channel state
 * \param mon_id Monitor's connection ID for this channel
 * \param frame Frame capability containing channel
 * \param inchanlen Size of incoming channel, in bytes (multiple of #UMP_MSG_BYTES)
 * \param outchanlen Size of outgoing channel, in bytes (multiple of #UMP_MSG_BYTES)
 */
errval_t ump_chan_accept(struct ump_chan *uc, uintptr_t mon_id,
                         struct capref frame, size_t inchanlen,
                         size_t outchanlen)
{
    errval_t err;

    uc->monitor_id = mon_id;
    uc->frame = frame;

    // check that the frame is big enough
    struct frame_identity frameid;
    err = invoke_frame_identify(frame, &frameid);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_FRAME_IDENTIFY);
    }

    // Ids for tracing
    uc->recvid = (uintptr_t)(frameid.base + outchanlen);
    uc->sendid = (uintptr_t)frameid.base;

    size_t framesize = ((uintptr_t)1) << frameid.bits;
    if (framesize < inchanlen + outchanlen) {
        return LIB_ERR_UMP_FRAME_OVERFLOW;
    }

    // map it in
    void *buf;
    err = vspace_map_one_frame_attr(&buf, framesize, frame, UMP_MAP_ATTR,
                                    NULL, &uc->vregion);
    if (err_is_fail(err)) {
        cap_destroy(uc->frame);
        return err_push(err, LIB_ERR_VSPACE_MAP);
    }

    // initialise channel state
    err = ump_chan_init(uc, (char *)buf + outchanlen, inchanlen, buf, outchanlen);
    if (err_is_fail(err)) {
        vregion_destroy(uc->vregion);
        cap_destroy(uc->frame);
        return err;
    }

    /* mark connected */
    uc->connstate = UMP_CONNECTED;
    return SYS_ERR_OK;
}
Ejemplo n.º 7
0
static void free_revoke_st(struct revoke_st * st)
{
    cap_destroy(st->croot);
    if (st == &static_revoke_state) {
        static_revoke_state_used = false;
    } else {
        free(st);
    }
}
Ejemplo n.º 8
0
errval_t vspace_mmu_aware_unmap(struct vspace_mmu_aware *state,
                                lvaddr_t base, size_t bytes)
{
    errval_t err;
    struct capref frame;
    genvaddr_t gvaddr = vregion_get_base_addr(&state->vregion) + state->offset;
    lvaddr_t eaddr = vspace_genvaddr_to_lvaddr(gvaddr);
    genvaddr_t offset;
    genvaddr_t gen_base = vspace_lvaddr_to_genvaddr(base)
        - vregion_get_base_addr(&state->vregion);
    genvaddr_t min_offset = 0;
    bool success = false;

    assert(vspace_lvaddr_to_genvaddr(base) >= vregion_get_base_addr(&state->vregion));
    assert(base + bytes == (lvaddr_t)eaddr);

    assert(bytes <= state->consumed);
    assert(bytes <= state->offset);

    // Reduce offset
    state->offset -= bytes;
    state->consumed -= bytes;

    // Free only in bigger blocks
    if(state->mapoffset - state->offset > MIN_MEM_FOR_FREE) {
        do {
            // Unmap and return (via unfill) frames from base
            err = state->memobj.m.f.unfill(&state->memobj.m, gen_base,
                                           &frame, &offset);
            if(err_is_fail(err) && err_no(err) != LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET) {
                return err_push(err, LIB_ERR_MEMOBJ_UNMAP_REGION);
            }

            // Delete frame cap
            if(err_is_ok(err)) {
                success = true;
                if (min_offset == 0 || min_offset > offset) {
                    min_offset = offset;
                }

                err = cap_destroy(frame);
                if(err_is_fail(err)) {
                    return err;
                }
            }
        } while(err != LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET);

//    state->consumed -= bytes;
        if (success) {
            state->mapoffset = min_offset;
        }
    }

    return SYS_ERR_OK;
}
Ejemplo n.º 9
0
static void destroy_caps(void)
{
    errval_t err;
    for (int i=0; i<CAPS_PER_CORE; i++) {
        err = cap_destroy(retyped_caps[i]);
        err = cap_revoke(my_caps[i]);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "xcorecap: Retype to frame failed\n");    
        }
    }
}
Ejemplo n.º 10
0
/**
 * \brief initializes a dma descriptor ring and allocates memory for it
 *
 * \param ring  the ring structure to initialize
 * \param size  number of elements in the ring
 *
 * \returns SYS_ERR_OK on success
 *          errval on error
 */
errval_t xeon_phi_dma_desc_ring_alloc(struct xdma_ring *ring,
                                      uint16_t size)
{
    errval_t err;

    memset(ring, 0, sizeof(*ring));

    assert(size < (XEON_PHI_DMA_DESC_RING_MAX));
    assert(IS_POW2(size));

#ifndef __k1om__
    /*
     * we set the ram affinity to the maximum range mapped by the system memory
     * page tables when being on the host. Otherwise the card cannot access it.
     */
    uint64_t minbase, maxlimit;
    ram_get_affinity(&minbase, &maxlimit);
    ram_set_affinity(0, XEON_PHI_SYSMEM_SIZE-8*XEON_PHI_SYSMEM_PAGE_SIZE);
#endif

    size_t frame_size = ((size_t) size) * XEON_PHI_DMA_DESC_SIZE;
    err = frame_alloc(&ring->cap, frame_size, NULL);

#ifndef __k1om__
    ram_set_affinity(minbase, maxlimit);
#endif

    if (err_is_fail(err)) {
        return err;
    }

    err = vspace_map_one_frame_attr(&ring->vbase,
                                    frame_size,
                                    ring->cap,
                                    VREGION_FLAGS_READ_WRITE,
                                    NULL,
                                    NULL);
    if (err_is_fail(err)) {
        cap_destroy(ring->cap);
        return err;
    }

    struct frame_identity id;
    err = invoke_frame_identify(ring->cap, &id);
    assert(err_is_ok(err));
    ring->pbase = id.base;
    ring->size = size;

    memset(ring->vbase, 0, frame_size);

    return SYS_ERR_OK;
}
Ejemplo n.º 11
0
static errval_t reclaim_memory(genpaddr_t base, uint8_t bits)
{
    /* XXX: mem client is only defined for the bsp core.
     * For app cores, just return */
    if (get_mem_client() == NULL) {
        return SYS_ERR_OK;
    }

    // Fabricate new RAM cap and hand back to mem_serv
    struct capability c = {
        .type = ObjType_RAM,
        .u.ram = {
            .base = base,
            .bits = bits,
        }
    };
    struct capref ramcap;
    errval_t err = slot_alloc(&ramcap);
    if(err_is_fail(err)) {
        return err;
    }

    err = monitor_cap_create(ramcap, &c, my_core_id);
    if(err_is_fail(err)) {
        return err;
    }

    struct ram_alloc_state *ram_alloc_state = get_ram_alloc_state();
    errval_t result;
    thread_mutex_lock(&ram_alloc_state->ram_alloc_lock);
    struct mem_rpc_client *b = get_mem_client();
    // XXX: This should not be an RPC! It could stall the monitor, but
    // we trust mem_serv for the moment.
    err = b->vtbl.free_monitor(b, ramcap, base, bits, &result);
    thread_mutex_unlock(&ram_alloc_state->ram_alloc_lock);
    if(err_is_fail(err)) {
        return err;
    }
    if(err_is_fail(result)) {
        return result;
    }

    // XXX: this shouldn't be necessary as free_monitor uses give_away_cap
    err = cap_destroy(ramcap);
    if (err_no(err) == SYS_ERR_CAP_NOT_FOUND) {
        err = SYS_ERR_OK;
    }
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "destroying reclaimed cap");
    }
    return err;
}
Ejemplo n.º 12
0
static void cleanup_cap(struct capref cap)
{
    errval_t err;

    err = cap_revoke(cap);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "cap_revoke");
    }
    err = cap_destroy(cap);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "cap_revoke");
    }
}
Ejemplo n.º 13
0
errval_t spawn_free(struct spawninfo *si)
{
    cap_destroy(si->rootcn_cap);
    cap_destroy(si->taskcn_cap);
    cap_destroy(si->pagecn_cap);
    cap_destroy(si->dispframe);
    cap_destroy(si->dcb);
    cap_destroy(si->argspg);
    cap_destroy(si->vtree);

    return SYS_ERR_OK;
}
Ejemplo n.º 14
0
/**
 * \brief frees up the resources used by the ring.
 *
 * \param ring the descriptor ring to be freed
 *
 * \returns SYS_ERR_OK on success
 */
errval_t xeon_phi_dma_desc_ring_free(struct xdma_ring *ring)
{
    errval_t err;

    if (capref_is_null(ring->cap)) {
        return SYS_ERR_OK;
    }

    if (ring->vbase) {
        vspace_unmap(ring->vbase);
    }


    err = cap_revoke(ring->cap);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "revokation of ring cap failed\n");
    }
    return cap_destroy(ring->cap);
}
Ejemplo n.º 15
0
static void monitor_bind_ump_reply(struct monitor_binding *dom_binding,
                                   uintptr_t my_mon_id, uintptr_t domain_id,
                                   errval_t msgerr, struct capref notify)
{
    errval_t err;

    struct remote_conn_state *conn = remote_conn_lookup(my_mon_id);
    if (conn == NULL) {
        USER_PANIC("invalid mon_id in UMP bind reply");
        return;
    }

    uintptr_t your_mon_id = conn->mon_id;
    struct intermon_binding *mon_binding = conn->mon_binding;

    if (err_is_ok(msgerr)) {
        /* Connection accepted */
        conn->domain_id = domain_id;
        conn->domain_binding = dom_binding;
    } else {
//error:
        /* Free the cap */
        err = cap_destroy(conn->x.ump.frame);
        assert(err_is_ok(err));

        err = remote_conn_free(my_mon_id);
        assert(err_is_ok(err));
    }

    // Identify notify cap
    struct capability capability;
    err = monitor_cap_identify(notify, &capability);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "monitor_cap_identify failed, ignored");
        return;
    }
    assert(capability.type == ObjType_Notify_RCK
           || capability.type == ObjType_Notify_IPI
           || capability.type == ObjType_Null);
    /* assert(capability.u.notify.coreid == my_core_id); */

    bind_ump_reply_cont(mon_binding, your_mon_id, my_mon_id, msgerr, capability);
}
Ejemplo n.º 16
0
static void bind_lmp_client_request_error(struct monitor_binding *b,
                                          errval_t err, uintptr_t domain_id,
                                          struct monitor_binding *serv_binding,
                                          struct capref ep)
{
    errval_t err2;

    err2 = b->tx_vtbl.bind_lmp_reply_client(b, NOP_CONT, err, 0, domain_id,
                                            NULL_CAP);
    if (err_is_fail(err2)) {
        if(err_no(err2) == FLOUNDER_ERR_TX_BUSY) {
            struct bind_lmp_client_request_error_state *me =
                malloc(sizeof(struct bind_lmp_client_request_error_state));
            assert(me != NULL);
            struct monitor_state *ist = b->st;
            assert(ist != NULL);
            me->args.err = err;
            me->args.conn_id = domain_id;
            me->serv_binding = serv_binding;
            me->ep = ep;
            me->elem.cont = bind_lmp_client_request_error_handler;

            err = monitor_enqueue_send(b, &ist->queue,
                                       get_default_waitset(), &me->elem.queue);
            if (err_is_fail(err)) {
                USER_PANIC_ERR(err, "monitor_enqueue_send failed");
            }
            return;
        }

        USER_PANIC_ERR(err2, "error reply failed");
        USER_PANIC_ERR(err, "The reason for lmp failure");
    }

    /* Delete the EP cap */
    // Do not delete the cap if client or service is monitor itself
    if (b != &monitor_self_binding && serv_binding != &monitor_self_binding) {
        err = cap_destroy(ep);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "cap_destroy failed");
        }
    }
}
Ejemplo n.º 17
0
static void cap_identify(struct monitor_blocking_binding *b,
                         struct capref cap)
{
    errval_t err, reterr;

    union capability_caprep_u u;
    reterr = monitor_cap_identify(cap, &u.cap);

    /* XXX: shouldn't we skip this if we're being called from the monitor?
     * apparently not: we make a copy of the cap on LMP to self?!?! */
    err = cap_destroy(cap);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "cap_destroy failed");
    }

    err = b->tx_vtbl.cap_identify_response(b, NOP_CONT, reterr, u.caprepb);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "reply failed");
    }
}
Ejemplo n.º 18
0
static void bind_lmp_reply(struct monitor_binding *b,
                           errval_t msgerr, uintptr_t mon_conn_id,
                           uintptr_t user_conn_id, struct capref ep)
{
    errval_t err;
    struct monitor_binding *client_binding = NULL;

    struct lmp_conn_state *conn = lmp_conn_lookup(mon_conn_id);
    if (conn == NULL) {
        DEBUG_ERR(0, "invalid connection ID");
        goto cleanup;
    }

    client_binding = conn->domain_binding;
    uintptr_t client_conn_id = conn->domain_id;

    err = lmp_conn_free(mon_conn_id);
    assert(err_is_ok(err));

    if (err_is_fail(msgerr)) {
        bind_lmp_reply_client_cont(client_binding, msgerr, 0, client_conn_id,
                                   ep, b);
    } else {
        bind_lmp_reply_client_cont(client_binding, SYS_ERR_OK, mon_conn_id,
                                   client_conn_id, ep, b);
    }
    return;

cleanup:
    /* Delete the ep cap */
    // XXX: Do not delete the cap if client or service is monitor
    if (client_binding != &monitor_self_binding && b != &monitor_self_binding) {
        err = cap_destroy(ep);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "cap_destroy failed");
        }
    }
}
Ejemplo n.º 19
0
/**
 * \brief Handler to continue spanning domain state machine
 */
static void span_domain_reply(struct monitor_binding *mb,
                              errval_t msgerr, uintptr_t domain_id)
{
    /* On success, no further action needed */
    if (err_is_ok(msgerr)) {
        return;
    }

    /* On failure, release resources and notify the caller */
    struct span_domain_state *span_domain_state =
        (struct span_domain_state*)domain_id;
    errval_t err = cap_destroy(span_domain_state->frame);
    if (err_is_fail(err)) {
        err_push(msgerr, LIB_ERR_CAP_DESTROY);
    }

    if (span_domain_state->callback) { /* Use the callback to return error */
        span_domain_state->callback(span_domain_state->callback_arg, msgerr);
    } else { /* Use debug_err if no callback registered */
        DEBUG_ERR(msgerr, "Failure in span_domain_reply");
    }
    free(span_domain_state);
}
Ejemplo n.º 20
0
/**
 * \brief tries to free the allocated memory region
 *
 * \returns SYS_ERR_OK on success
 *          errval on error
 */
errval_t dma_mem_free(struct dma_mem *mem)
{
    errval_t err;

    if (mem->vaddr) {
        err = vspace_unmap((void*)mem->vaddr);
        if (err_is_fail(err)) {
            /* todo: error handling ignoring for now */
        }
    }

    if (!capref_is_null(mem->frame)) {
        err = cap_destroy(mem->frame);
        if (err_is_fail(err)) {
            /* todo: error handling ignoring for now */

        }
    }

    memset(mem, 0, sizeof(*mem));

    return SYS_ERR_OK;
}
Ejemplo n.º 21
0
static void intermon_bind_ump_reply(struct intermon_binding *ib, 
                                    uint64_t my_mon_id, uint64_t your_mon_id,
                                    errval_t msgerr, 
                                    intermon_caprep_t caprep)
{
    errval_t err;
    struct remote_conn_state *con = remote_conn_lookup(my_mon_id);
    if (con == NULL) {
        USER_PANIC_ERR(0, "unknown mon_id in UMP bind reply");
        return;
    }

    uintptr_t domain_id = con->domain_id;
    struct monitor_binding *domain_binding = con->domain_binding;
    struct capref notify_cap = NULL_CAP;

    if (err_is_ok(msgerr)) { /* bind succeeded */
        con->mon_id = your_mon_id;
        con->mon_binding = ib;

#if 0
        /* map in UMP channel state */
        void *buf;
        err = vspace_map_one_frame_attr(&buf,
              2 * (UMP_CHANNEL_SIZE + con->localchan.size * sizeof(uintptr_t)),
                                        con->frame, VREGION_FLAGS_READ,
                                        NULL, NULL);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "vspace_map_one_frame failed");
            // XXX: should not be an assert, but we don't have any way to do
            // connection teardown here!
            assert(buf != NULL);
        }
        con->sharedchan = buf;
        con->localchan.buf = buf + 2 * UMP_CHANNEL_SIZE;

        // XXX: Put frame cap on a separate allocator as it is not deleted anymore
        struct capref frame_copy;
        err = slot_alloc(&frame_copy);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "Failed to allocator slot from channel_alloc");
        }
        err = cap_copy(frame_copy, con->frame);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "Failed create copy of frame cap");
        }
        err = cap_destroy(con->frame);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "cap_destroy_default failed");
        }
        con->frame = frame_copy;
#endif

        struct capability capability;
        caprep_to_capability(&caprep, &capability);

        if(capability.type != ObjType_Null) {
            // Get core id of sender
            coreid_t core_id = ((struct intermon_state *)ib->st)->core_id;

            // Construct the notify cap
            err = slot_alloc(&notify_cap);
            if (err_is_fail(err)) {
                USER_PANIC_ERR(err, "Failed to allocate slot from channel_alloc");
            }

            err = monitor_cap_create(notify_cap, &capability, core_id);
            if (err_is_fail(err)) {
                USER_PANIC_ERR(err, "monitor_cap_create failed");
            }
        }
    } else { /* bind refused */
        err = cap_destroy(con->x.ump.frame);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "cap_destroy_default failed");
        }
        err = remote_conn_free(my_mon_id);
        assert(err_is_ok(err));
    }

    bind_ump_reply_client_cont(domain_binding, my_mon_id, domain_id, msgerr,
                               notify_cap);
}
Ejemplo n.º 22
0
int invalid_mappings(void)
{
    // outline:
    // get pml4, pdpt, pdir, ptable, and frame
    // check all combinations to make sure that restrictions are implemented
    // correctly in kernel space
    // VALID:
    // map pdpt in pml4
    // map pdir in pdpt
    // map pt   in pdir
    // map frame in {pt, pdir, pdpt}
    // INVALID:
    // all other combinations

    errval_t err;
    struct capref caps[7];
    struct capref mapping;

    // allocate slot for mapping cap: can reuse`
    err = slot_alloc(&mapping);
    if (err_is_fail(err)) {
        debug_printf("slot_alloc: %s (%ld)\n", err_getstring(err), err);
        return 1;
    }

    // allocate caps
    for (int i = 0; i < 5; i++) {
        // get 4k block
        struct capref mem;
        err = ram_alloc(&mem, BASE_PAGE_BITS);
        if (err_is_fail(err)) {
            debug_printf("ram_alloc: %s (%ld)\n", err_getstring(err), err);
            return 1;
        }

        // get slot for retype dest
        err = slot_alloc(&caps[i]);
        if (err_is_fail(err)) {
            debug_printf("slot_alloc: %s (%ld)\n", err_getstring(err), err);
            return 1;
        }
        // retype to selected type
        err = cap_retype(caps[i], mem, types[i], BASE_PAGE_BITS);
        if (err_is_fail(err)) {
            debug_printf("cap_retype: %s (%ld)\n", err_getstring(err), err);
            return 1;
        }

        // cleanup source cap
        DEBUG_INVALID_MAPPINGS("delete ram cap\n");
        err = cap_destroy(mem);
        if (err_is_fail(err)) {
            debug_printf("cap_delete(mem): %s (%ld)\n", err_getstring(err), err);
            return 1;
        }
    }
    // cap 6: 2M frame
    size_t rb = 0;
    err = frame_alloc(&caps[5], X86_64_LARGE_PAGE_SIZE, &rb);
    if (err_is_fail(err) || rb != X86_64_LARGE_PAGE_SIZE) {
        debug_printf("frame_alloc: %s (%ld)\n", err_getstring(err), err);
        return 1;
    }
    // cap 7: 1G frame
    err = frame_alloc(&caps[6], X86_64_HUGE_PAGE_SIZE, &rb);
    if (err_is_fail(err) || rb != X86_64_HUGE_PAGE_SIZE) {
        debug_printf("frame_alloc: %s (%ld)\n", err_getstring(err), err);
        return 1;
    }

    paging_x86_64_flags_t attr = 0;
    // select dest (ignore frame, asserts)
    for (int i = 0; i < 4; i++) {
        // select source
        for (int j = 0; j < 7; j++) {
            if (j >= 4) {
                // frame
                attr = FRAME_ACCESS_DEFAULT;
            } else {
                // ptable
                attr = PTABLE_ACCESS_DEFAULT;
            }
            // try mapping
            err = vnode_map(caps[i], caps[j], /*slot*/0, attr, /*off*/0,
                            /*count*/1, mapping);
            check_result(err, i, j);
            // unmap if mapping succeeded
            if (err_is_ok(err)) {
                err = vnode_unmap(caps[i], mapping);
                if (err_is_fail(err)) {
                    DEBUG_ERR(err, "vnode_unmap");
                }
                assert(err_is_ok(err));
                // XXX: better API?
                err = cap_delete(mapping);
                assert(err_is_ok(err));
            }
        }
    }

    printf("All tests executed: %d PASSED, %d FAILED\n", pass, fail);

    return 0;
}
Ejemplo n.º 23
0
int map_unmap(void)
{
    errval_t err;
    struct capref mem;

    DEBUG_MAP_UNMAP("ram_alloc\n");
    err = ram_alloc(&mem, BASE_PAGE_BITS);
    if (err_is_fail(err)) {
        printf("ram_alloc: %s (%"PRIuERRV")\n", err_getstring(err), err);
        return 1;
    }

    struct capref frame;
    DEBUG_MAP_UNMAP("retype\n");
    err = slot_alloc(&frame);
    if (err_is_fail(err)) {
        printf("slot_alloc: %s (%"PRIuERRV")\n", err_getstring(err), err);
        return 1;
    }
    err = cap_retype(frame, mem, ObjType_Frame, BASE_PAGE_BITS);
    if (err_is_fail(err)) {
        printf("cap_retype: %s (%"PRIuERRV")\n", err_getstring(err), err);
        return 1;
    }

    DEBUG_MAP_UNMAP("delete ram cap\n");
    err = cap_destroy(mem);
    if (err_is_fail(err)) {
        printf("cap_delete(mem): %s (%"PRIuERRV")\n", err_getstring(err), err);
        return 1;
    }

    struct frame_identity fi;
    err = invoke_frame_identify(frame, &fi);
    if (err_is_fail(err)) {
        printf("frame_identify: %s (%"PRIuERRV")\n", err_getstring(err), err);
        return 1;
    }
    DEBUG_MAP_UNMAP("frame: base = 0x%"PRIxGENPADDR", bits = %d\n", fi.base, fi.bits);

#ifdef NKMTEST_DEBUG_MAP_UNMAP
    dump_pmap(get_current_pmap());
#endif

    struct vregion *vr;
    struct memobj *memobj;
    void *vaddr;
    DEBUG_MAP_UNMAP("map\n");
    err = vspace_map_one_frame(&vaddr, BASE_PAGE_SIZE, frame, &memobj, &vr);
    if (err_is_fail(err)) {
        printf("vspace_map_one_frame: %s (%"PRIuERRV")\n", err_getstring(err), err);
    }
    char *memory = vaddr;
    DEBUG_MAP_UNMAP("vaddr = %p\n", vaddr);

#ifdef NKMTEST_DEBUG_MAP_UNMAP
    dump_pmap(get_current_pmap());
#endif

    DEBUG_MAP_UNMAP("write 1\n");
    int i;
    for (i = 0; i < BASE_PAGE_SIZE; i++) {
        memory[i] = i % INT8_MAX;
    }
    DEBUG_MAP_UNMAP("verify 1\n");
    for (i = 0; i < BASE_PAGE_SIZE; i++) {
        assert(memory[i] == i % INT8_MAX);
    }

    DEBUG_MAP_UNMAP("delete frame cap\n");
    err = cap_destroy(frame);
    if (err_is_fail(err)) {
        printf("cap_delete(frame): %s (%"PRIuERRV")\n", err_getstring(err), err);
        return 1;
    }

#ifdef NKMTEST_DEBUG_MAP_UNMAP
    // no mapping should remain here
    dump_pmap(get_current_pmap());
    err = debug_dump_hw_ptables();
    if (err_is_fail(err)) {
        printf("kernel dump ptables: %s (%"PRIuERRV")\n", err_getstring(err), err);
        return 1;
    }
#endif

    printf("%s: done\n", __FUNCTION__);
    return 0;
}
Ejemplo n.º 24
0
static void multiboot_cap_reply(struct monitor_binding *st, struct capref cap,
                                errval_t msgerr)
{
    errval_t err;
    static cslot_t multiboot_slots = 0;

    // All multiboot caps received
    if (err_is_fail(msgerr)) {
        // Request bootinfo frame
        struct bootinfo *bi;
        err = map_bootinfo(&bi);
        assert(err_is_ok(err));

        // Init ramfs
        struct dirent *root = ramfs_init();

        // Populate it with contents of multiboot
        populate_multiboot(root, bi);

        // Start the service
        err = start_service(root);
        assert(err_is_ok(err));
        return;
    }

    // Move the cap into the multiboot cnode
    struct capref dest = {
        .cnode = cnode_module,
        .slot  = multiboot_slots++,
    };
    err = cap_copy(dest, cap);
    assert(err_is_ok(err));
    err = cap_destroy(cap);
    assert(err_is_ok(err));

    err = st->tx_vtbl.multiboot_cap_request(st, NOP_CONT, multiboot_slots);
    assert(err_is_ok(err));
}

static void bootstrap(void)
{
    errval_t err;

    /* Create the module cnode */
    struct capref modulecn_cap = {
        .cnode = cnode_root,
        .slot  = ROOTCN_SLOT_MODULECN,
    };
    err = cnode_create_raw(modulecn_cap, NULL,
                           ((cslot_t)1 << MODULECN_SIZE_BITS), NULL);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "cnode_create_raw failed");
        abort();
    }

    // XXX: Set reply handler
    struct monitor_binding *st = get_monitor_binding();
    st->rx_vtbl.multiboot_cap_reply = multiboot_cap_reply;

    // Make first multiboot cap request
    err = st->tx_vtbl.multiboot_cap_request(st, NOP_CONT, 0);
    assert(err_is_ok(err));
}
Ejemplo n.º 25
0
errval_t spawn_xcore_monitor(coreid_t coreid, int hwid,
                             enum cpu_type cpu_type,
                             const char *cmdline,
                             struct frame_identity urpc_frame_id,
                             struct capref kcb)
{
    uint64_t start = 0;
    const char *monitorname = NULL, *cpuname = NULL;
    genpaddr_t arch_page_size;
    errval_t err;

    err = get_architecture_config(cpu_type, &arch_page_size,
                                  &monitorname, &cpuname);
    assert(err_is_ok(err));

    DEBUG("loading kernel: %s\n", cpuname);
    DEBUG("loading 1st app: %s\n", monitorname);

    // compute size of frame needed and allocate it
    DEBUG("%s:%s:%d: urpc_frame_id.base=%"PRIxGENPADDR"\n",
           __FILE__, __FUNCTION__, __LINE__, urpc_frame_id.base);
    DEBUG("%s:%s:%d: urpc_frame_id.size=%d\n",
           __FILE__, __FUNCTION__, __LINE__, urpc_frame_id.bits);

    if (benchmark_flag) {
        start = bench_tsc();
    }
    static size_t cpu_binary_size;
    static lvaddr_t cpu_binary = 0;
    static genpaddr_t cpu_binary_phys;
    static const char* cached_cpuname = NULL;
    if (cpu_binary == 0) {
        cached_cpuname = cpuname;
        // XXX: Caching these for now, until we have unmap
        err = lookup_module(cpuname, &cpu_binary, &cpu_binary_phys,
                            &cpu_binary_size);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "Can not lookup module");
            return err;
        }
    }
    // Ensure caching actually works and we're
    // always loading same binary. If this starts to fail, get rid of caching.
    assert (strcmp(cached_cpuname, cpuname) == 0);

    static size_t monitor_binary_size;
    static lvaddr_t monitor_binary = 0;
    static genpaddr_t monitor_binary_phys;
    static const char* cached_monitorname = NULL;
    if (monitor_binary == 0) {
        cached_monitorname = monitorname;
        // XXX: Caching these for now, until we have unmap
        err = lookup_module(monitorname, &monitor_binary,
                            &monitor_binary_phys, &monitor_binary_size);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "Can not lookup module");
            return err;
        }
    }
    // Again, ensure caching actually worked (see above)
    assert (strcmp(cached_monitorname, monitorname) == 0);

    if (benchmark_flag) {
        bench_data->load = bench_tsc() - start;
        start = bench_tsc();
    }

    struct capref cpu_memory_cap;
    struct frame_identity frameid;
    size_t cpu_memory;
    err = allocate_kernel_memory(cpu_binary, arch_page_size,
                                 &cpu_memory_cap, &cpu_memory, &frameid);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "Can not allocate space for new app kernel.");
        return err;
    }

    err = cap_mark_remote(cpu_memory_cap);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "Can not mark cap remote.");
        return err;
    }

    void *cpu_buf_memory;
    err = vspace_map_one_frame(&cpu_buf_memory, cpu_memory, cpu_memory_cap,
                               NULL, NULL);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_VSPACE_MAP);
    }
    if (benchmark_flag) {
        bench_data->alloc_cpu = bench_tsc() - start;
        start = bench_tsc();
    }

    /* Chunk of memory to load monitor on the app core */
    struct capref spawn_memory_cap;
    struct frame_identity spawn_memory_identity;

    err = frame_alloc_identify(&spawn_memory_cap,
                               X86_CORE_DATA_PAGES * arch_page_size,
                               NULL, &spawn_memory_identity);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_FRAME_ALLOC);
    }

    err = cap_mark_remote(spawn_memory_cap);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "Can not mark cap remote.");
        return err;
    }
    if (benchmark_flag) {
        bench_data->alloc_mon = bench_tsc() - start;
        start = bench_tsc();
    }

    /* Load cpu */
    struct elf_allocate_state state;
    state.vbase = (char *)cpu_buf_memory + arch_page_size;
    assert(sizeof(struct x86_core_data) <= arch_page_size);
    state.elfbase = elf_virtual_base(cpu_binary);

    struct Elf64_Ehdr *cpu_head = (struct Elf64_Ehdr *)cpu_binary;
    genvaddr_t cpu_entry;

    err = elf_load(cpu_head->e_machine, elfload_allocate, &state,
                   cpu_binary, cpu_binary_size, &cpu_entry);
    if (err_is_fail(err)) {
        return err;
    }
    if (benchmark_flag) {
        bench_data->elf_load = bench_tsc() - start;
        start = bench_tsc();
    }

    err = relocate_cpu_binary(cpu_binary, cpu_head, state, frameid, arch_page_size);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "Can not relocate new kernel.");
        return err;
    }
    if (benchmark_flag) {
        bench_data->elf_reloc = bench_tsc() - start;
    }

    genvaddr_t cpu_reloc_entry = cpu_entry - state.elfbase
                                 + frameid.base + arch_page_size;
    /* Compute entry point in the foreign address space */
    forvaddr_t foreign_cpu_reloc_entry = (forvaddr_t)cpu_reloc_entry;

    /* Setup the core_data struct in the new kernel */
    struct x86_core_data *core_data = (struct x86_core_data *)cpu_buf_memory;
    switch (cpu_head->e_machine) {
    case EM_X86_64:
    case EM_K1OM:
        core_data->elf.size = sizeof(struct Elf64_Shdr);
        core_data->elf.addr = cpu_binary_phys + (uintptr_t)cpu_head->e_shoff;
        core_data->elf.num  = cpu_head->e_shnum;
        break;
    case EM_386:
        core_data->elf.size = sizeof(struct Elf32_Shdr);
        struct Elf32_Ehdr *head32 = (struct Elf32_Ehdr *)cpu_binary;
        core_data->elf.addr = cpu_binary_phys + (uintptr_t)head32->e_shoff;
        core_data->elf.num  = head32->e_shnum;
        break;
    default:
        return SPAWN_ERR_UNKNOWN_TARGET_ARCH;
    }
    core_data->module_start = cpu_binary_phys;
    core_data->module_end   = cpu_binary_phys + cpu_binary_size;
    core_data->urpc_frame_base = urpc_frame_id.base;
    core_data->urpc_frame_bits = urpc_frame_id.bits;
    core_data->monitor_binary   = monitor_binary_phys;
    core_data->monitor_binary_size = monitor_binary_size;
    core_data->memory_base_start = spawn_memory_identity.base;
    core_data->memory_bits       = spawn_memory_identity.bits;
    core_data->src_core_id       = disp_get_core_id();
    core_data->src_arch_id       = my_arch_id;
    core_data->dst_core_id       = coreid;


    struct frame_identity fid;
    err = invoke_frame_identify(kcb, &fid);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "Invoke frame identity for KCB failed. "
                            "Did you add the syscall handler for that architecture?");
    }
    DEBUG("%s:%s:%d: fid.base is 0x%"PRIxGENPADDR"\n",
           __FILE__, __FUNCTION__, __LINE__, fid.base);
    core_data->kcb = (genpaddr_t) fid.base;
#ifdef CONFIG_FLOUNDER_BACKEND_UMP_IPI
    core_data->chan_id           = chanid;
#endif

    if (cmdline != NULL) {
        // copy as much of command line as will fit
        snprintf(core_data->kernel_cmdline, sizeof(core_data->kernel_cmdline),
                "%s %s", cpuname, cmdline);
        // ensure termination
        core_data->kernel_cmdline[sizeof(core_data->kernel_cmdline) - 1] = '\0';

        DEBUG("%s:%s:%d: %s\n", __FILE__, __FUNCTION__, __LINE__, core_data->kernel_cmdline);
    }

    /* Invoke kernel capability to boot new core */
    if (cpu_type == CPU_X86_64 || cpu_type == CPU_K1OM) {
        start_aps_x86_64_start(hwid, foreign_cpu_reloc_entry);
    }

#ifndef __k1om__
    else if (cpu_type == CPU_X86_32) {
        start_aps_x86_32_start(hwid, foreign_cpu_reloc_entry);
    }
#endif

    /* Clean up */
    // XXX: Should not delete the remote caps?
    err = cap_destroy(spawn_memory_cap);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "cap_destroy failed");
    }
    err = vspace_unmap(cpu_buf_memory);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "vspace unmap CPU driver memory failed");
    }
    err = cap_destroy(cpu_memory_cap);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "cap_destroy failed");
    }

    return SYS_ERR_OK;
}
Ejemplo n.º 26
0
static void span_domain_request(struct monitor_binding *mb,
                                uintptr_t domain_id, uint8_t core_id,
                                struct capref vroot, struct capref disp)
{
    errval_t err, err2;

    trace_event(TRACE_SUBSYS_MONITOR, TRACE_EVENT_MONITOR_SPAN0, core_id);

    struct span_state *state;
    uintptr_t state_id;

    err = span_state_alloc(&state, &state_id);
    if (err_is_fail(err)) {
        err_push(err, MON_ERR_SPAN_STATE_ALLOC);
        goto reply;
    }

    state->core_id   = core_id;
    state->vroot     = vroot;
    state->mb        = mb;
    state->domain_id = domain_id;

    trace_event(TRACE_SUBSYS_MONITOR, TRACE_EVENT_MONITOR_SPAN1, core_id);

    /* Look up the destination monitor */
    struct intermon_binding *ib;
    err = intermon_binding_get(core_id, &ib);
    if (err_is_fail(err)) {
        goto reply;
    }

    /* Idenfity vroot */
    struct capability vroot_cap;
    err = monitor_cap_identify(vroot, &vroot_cap);
    if (err_is_fail(err)) {
        err_push(err, MON_ERR_CAP_IDENTIFY);
        goto reply;
    }
    if (vroot_cap.type != ObjType_VNode_x86_64_pml4) { /* Check type */
        err = MON_ERR_WRONG_CAP_TYPE;
        goto reply;
    }

    /* Identify the dispatcher frame */
    struct frame_identity frameid;
    err = invoke_frame_identify(disp, &frameid);
    if (err_is_fail(err)) {
        err_push(err, LIB_ERR_FRAME_IDENTIFY);
        goto reply;
    }

    err = monitor_remote_relations(disp, RRELS_COPY_BIT, RRELS_COPY_BIT, NULL);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "monitor_remote_relations failed");
        return;
    }
    err = monitor_remote_relations(vroot, RRELS_COPY_BIT, RRELS_COPY_BIT, NULL);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "monitor_remote_relations failed");
        return;
    }

    /* Send msg to destination monitor */
    err = ib->tx_vtbl.span_domain_request(ib, NOP_CONT, state_id,
                                          vroot_cap.u.vnode_x86_64_pml4.base,
                                          frameid.base, frameid.bits);

    if (err_is_fail(err)) {
        err_push(err, MON_ERR_SEND_REMOTE_MSG);
        goto reply;
    }
    goto cleanup;

 reply:
    err2 = mb->tx_vtbl.span_domain_reply(mb, NOP_CONT, err, domain_id);
    if (err_is_fail(err2)) {
        // XXX: Cleanup?
        USER_PANIC_ERR(err2, "Failed to reply to the user domain");
    }
    if(state_id != 0) {
        err2 = span_state_free(state_id);
        if (err_is_fail(err2)) {
            USER_PANIC_ERR(err2, "Failed to free span state");
        }
    }

 cleanup:
    err2 = cap_destroy(vroot);
    if (err_is_fail(err2)) {
        USER_PANIC_ERR(err2, "Failed to destroy span_vroot cap");
    }
    err2 = cap_destroy(disp);
    if (err_is_fail(err2)) {
        USER_PANIC_ERR(err2, "Failed to destroy disp cap");
    }
}
Ejemplo n.º 27
0
static errval_t spawn(char *path, char *const argv[], char *argbuf,
                      size_t argbytes, char *const envp[],
                      struct capref inheritcn_cap, struct capref argcn_cap,
                      domainid_t *domainid)
{
    errval_t err, msgerr;

    /* read file into memory */
    vfs_handle_t fh;
    err = vfs_open(path, &fh);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_LOAD);
    }

    struct vfs_fileinfo info;
    err = vfs_stat(fh, &info);
    if (err_is_fail(err)) {
        vfs_close(fh);
        return err_push(err, SPAWN_ERR_LOAD);
    }

    assert(info.type == VFS_FILE);
    uint8_t *image = malloc(info.size);
    if (image == NULL) {
        vfs_close(fh);
        return err_push(err, SPAWN_ERR_LOAD);
    }

    size_t pos = 0, readlen;
    do {
        err = vfs_read(fh, &image[pos], info.size - pos, &readlen);
        if (err_is_fail(err)) {
            vfs_close(fh);
            free(image);
            return err_push(err, SPAWN_ERR_LOAD);
        } else if (readlen == 0) {
            vfs_close(fh);
            free(image);
            return SPAWN_ERR_LOAD; // XXX
        } else {
            pos += readlen;
        }
    } while (err_is_ok(err) && readlen > 0 && pos < info.size);

    err = vfs_close(fh);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "failed to close file %s", path);
    }

    // find short name (last part of path)
    char *name = strrchr(path, VFS_PATH_SEP);
    if (name == NULL) {
        name = path;
    } else {
        name++;
    }

    /* spawn the image */
    struct spawninfo si;
    err = spawn_load_image(&si, (lvaddr_t)image, info.size, CURRENT_CPU_TYPE,
                           name, my_core_id, argv, envp, inheritcn_cap,
                           argcn_cap);
    if (err_is_fail(err)) {
        free(image);
        return err;
    }

    free(image);

    /* request connection from monitor */
    struct monitor_blocking_rpc_client *mrpc = get_monitor_blocking_rpc_client();
    struct capref monep;
    err = mrpc->vtbl.alloc_monitor_ep(mrpc, &msgerr, &monep);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_MONITOR_CLIENT);
    } else if (err_is_fail(msgerr)) {
        return msgerr;
    }

    /* copy connection into the new domain */
    struct capref destep = {
        .cnode = si.rootcn,
        .slot  = ROOTCN_SLOT_MONITOREP,
    };
    err = cap_copy(destep, monep);
    if (err_is_fail(err)) {
        spawn_free(&si);
        cap_destroy(monep);
        return err_push(err, SPAWN_ERR_MONITOR_CLIENT);
    }

    err = cap_destroy(monep);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_MONITOR_CLIENT);
    }

    debug_printf("spawning %s on core %u\n", path, my_core_id);

    /* give the perfmon capability */
    struct capref dest, src;
    dest.cnode = si.taskcn;
    dest.slot = TASKCN_SLOT_PERF_MON;
    src.cnode = cnode_task;
    src.slot = TASKCN_SLOT_PERF_MON;
    err = cap_copy(dest, src);
    if (err_is_fail(err)) {
        return err_push(err, INIT_ERR_COPY_PERF_MON);
    }

    /* run the domain */
    err = spawn_run(&si);
    if (err_is_fail(err)) {
        spawn_free(&si);
        return err_push(err, SPAWN_ERR_RUN);
    }

    // Allocate domain id
    struct ps_entry *pe = malloc(sizeof(struct ps_entry));
    assert(pe != NULL);
    memset(pe, 0, sizeof(struct ps_entry));
    memcpy(pe->argv, argv, MAX_CMDLINE_ARGS*sizeof(*argv));
    pe->argbuf = argbuf;
    pe->argbytes = argbytes;
    /*
     * NB: It's important to keep a copy of the DCB *and* the root
     * CNode around.  We need to revoke both (in the right order, see
     * kill_domain() below), so that we ensure no one else is
     * referring to the domain's CSpace anymore. Especially the loop
     * created by placing rootcn into its own address space becomes a
     * problem here.
     */
    err = slot_alloc(&pe->rootcn_cap);
    assert(err_is_ok(err));
    err = cap_copy(pe->rootcn_cap, si.rootcn_cap);
    pe->rootcn = si.rootcn;
    assert(err_is_ok(err));
    err = slot_alloc(&pe->dcb);
    assert(err_is_ok(err));
    err = cap_copy(pe->dcb, si.dcb);
    assert(err_is_ok(err));
    pe->status = PS_STATUS_RUNNING;
    err = ps_allocate(pe, domainid);
    if(err_is_fail(err)) {
        free(pe);
    }

    // Store in target dispatcher frame
    struct dispatcher_generic *dg = get_dispatcher_generic(si.handle);
    dg->domain_id = *domainid;

    /* cleanup */
    err = spawn_free(&si);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_FREE);
    }

    return SYS_ERR_OK;
}

static void retry_use_local_memserv_response(void *a)
{
    errval_t err;

    struct spawn_binding *b = (struct spawn_binding*)a;

    err = b->tx_vtbl.use_local_memserv_response(b, NOP_CONT);

    if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
        // try again
        err = b->register_send(b, get_default_waitset(),
                               MKCONT(retry_use_local_memserv_response,a));
    }
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "error sending use_local_memserv reply\n");
    }

}
Ejemplo n.º 28
0
/**
 * \brief Setup an initial cspace
 *
 * Create an initial cspace layout
 */
static errval_t spawn_setup_cspace(struct spawninfo *si)
{
    errval_t err;
    struct capref t1;

    /* Create root CNode */
    err = cnode_create(&si->rootcn_cap, &si->rootcn, DEFAULT_CNODE_SLOTS, NULL);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_CREATE_ROOTCN);
    }

    /* Create taskcn */
    err = cnode_create(&si->taskcn_cap, &si->taskcn, DEFAULT_CNODE_SLOTS, NULL);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_CREATE_TASKCN);
    }

    // Mint into rootcn setting the guard
    t1.cnode = si->rootcn;
    t1.slot  = ROOTCN_SLOT_TASKCN;
    err = cap_mint(t1, si->taskcn_cap, 0,
                   GUARD_REMAINDER(2 * DEFAULT_CNODE_BITS));
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_MINT_TASKCN);
    }

    /* Create slot_alloc_cnode */
    t1.cnode = si->rootcn;
    t1.slot  = ROOTCN_SLOT_SLOT_ALLOC0;
    err = cnode_create_raw(t1, NULL, (1<<SLOT_ALLOC_CNODE_BITS), NULL);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_CREATE_SLOTALLOC_CNODE);
    }
    t1.cnode = si->rootcn;
    t1.slot  = ROOTCN_SLOT_SLOT_ALLOC1;
    err = cnode_create_raw(t1, NULL, (1<<SLOT_ALLOC_CNODE_BITS), NULL);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_CREATE_SLOTALLOC_CNODE);
    }
    t1.cnode = si->rootcn;
    t1.slot  = ROOTCN_SLOT_SLOT_ALLOC2;
    err = cnode_create_raw(t1, NULL, (1<<SLOT_ALLOC_CNODE_BITS), NULL);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_CREATE_SLOTALLOC_CNODE);
    }

    // Create DCB
    si->dcb.cnode = si->taskcn;
    si->dcb.slot  = TASKCN_SLOT_DISPATCHER;
    err = dispatcher_create(si->dcb);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_CREATE_DISPATCHER);
    }

    // Give domain endpoint to itself (in taskcn)
    struct capref selfep = {
        .cnode = si->taskcn,
        .slot = TASKCN_SLOT_SELFEP,
    };
    err = cap_retype(selfep, si->dcb, ObjType_EndPoint, 0);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_CREATE_SELFEP);
    }

    // Map root CNode (in taskcn)
    t1.cnode = si->taskcn;
    t1.slot  = TASKCN_SLOT_ROOTCN;
    err = cap_mint(t1, si->rootcn_cap, 0, 0);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_MINT_ROOTCN);
    }

#ifdef TRACING_EXISTS
    // Set up tracing for the child
    err = trace_setup_child(si->taskcn, si->handle);
    if (err_is_fail(err)) {
        printf("Warning: error setting up tracing for child domain\n");
        // SYS_DEBUG(err, ...);
    }
#endif

    // XXX: copy over argspg?
    memset(&si->argspg, 0, sizeof(si->argspg));

    /* Fill up basecn */
    struct capref   basecn_cap;
    struct cnoderef basecn;

    // Create basecn in rootcn
    basecn_cap.cnode = si->rootcn;
    basecn_cap.slot  = ROOTCN_SLOT_BASE_PAGE_CN;
    err = cnode_create_raw(basecn_cap, &basecn, DEFAULT_CNODE_SLOTS, NULL);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_CNODE_CREATE);
    }

    // Place the ram caps
    for (uint8_t i = 0; i < DEFAULT_CNODE_SLOTS; i++) {
        struct capref base = {
            .cnode = basecn,
            .slot  = i
        };
        struct capref ram;
        err = ram_alloc(&ram, BASE_PAGE_BITS);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_RAM_ALLOC);
        }
        err = cap_copy(base, ram);

        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_CAP_COPY);
        }
        err = cap_destroy(ram);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_CAP_DESTROY);
        }
    }

    return SYS_ERR_OK;
}

static errval_t spawn_setup_vspace(struct spawninfo *si)
{
    errval_t err;

    /* Create pagecn */
    si->pagecn_cap = (struct capref){.cnode = si->rootcn, .slot = ROOTCN_SLOT_PAGECN};
    err = cnode_create_raw(si->pagecn_cap, &si->pagecn, PAGE_CNODE_SLOTS, NULL);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_CREATE_PAGECN);
    }

    /* Init pagecn's slot allocator */

    // XXX: satisfy a peculiarity of the single_slot_alloc_init_raw API
    size_t bufsize = SINGLE_SLOT_ALLOC_BUFLEN(PAGE_CNODE_SLOTS);
    void *buf = malloc(bufsize);
    assert(buf != NULL);

    err = single_slot_alloc_init_raw(&si->pagecn_slot_alloc, si->pagecn_cap,
                                     si->pagecn, PAGE_CNODE_SLOTS,
                                     buf, bufsize);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT_RAW);
    }

    // Create root of pagetable
    err = si->pagecn_slot_alloc.a.alloc(&si->pagecn_slot_alloc.a, &si->vtree);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_SLOT_ALLOC);
    }

    // top-level table should always live in slot 0 of pagecn
    assert(si->vtree.slot == 0);

    switch(si->cpu_type) {
    case CPU_X86_64:
    case CPU_K1OM:
        err = vnode_create(si->vtree, ObjType_VNode_x86_64_pml4);
        break;

    case CPU_X86_32:
    case CPU_SCC:
#ifdef CONFIG_PAE
        err = vnode_create(si->vtree, ObjType_VNode_x86_32_pdpt);
#else
        err = vnode_create(si->vtree, ObjType_VNode_x86_32_pdir);
#endif
        break;

    case CPU_ARM5:
    case CPU_ARM7:
        err = vnode_create(si->vtree, ObjType_VNode_ARM_l1);
        break;

    default:
        assert(!"Other architecture");
        return err_push(err, SPAWN_ERR_UNKNOWN_TARGET_ARCH);
    }

    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_CREATE_VNODE);
    }

    err = spawn_vspace_init(si, si->vtree, si->cpu_type);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_VSPACE_INIT);
    }

    return SYS_ERR_OK;
}

#if 0
/**
 * \brief Lookup and map an image
 */
static errval_t spawn_map(const char *name, struct bootinfo *bi,
                          lvaddr_t *binary, size_t *binary_size)
{
    errval_t err;

    /* Get the module from the multiboot */
    struct mem_region *module = multiboot_find_module(bi, name);
    if (module == NULL) {
        return SPAWN_ERR_FIND_MODULE;
    }

    /* Map the image */
    err = spawn_map_module(module, binary_size, binary, NULL);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_MAP_MODULE);
    }

    return SYS_ERR_OK;
}
Ejemplo n.º 29
0
int main(int argc, char* argv[])
{

    size_t size_wanted = 1<<20;
    size_t runs = 100;
    struct reset_opt *reset = NULL;
    struct measure_opt *measure = NULL;
    bool dump = false;

    assert(argc>0);
    if (argc == 1) {
        usage(argv[0]);
        return 0;
    }

    bool args_ok = true;

    for (int arg = 1; arg < argc; arg++) {
        if (strcmp(argv[arg], "help") == 0
            || strcmp(argv[arg], "--help") == 0
            || strcmp(argv[arg], "-h") == 0)
        {
            usage(argv[0]);
            return 0;
        }
        if (strncmp(argv[arg], "size=", 5) == 0) {
            size_wanted = atol(argv[arg]+5);
        }
        if (strncmp(argv[arg], "logsize=", 8) == 0) {
            size_t logsize = atol(argv[arg]+8);
            if (logsize > 31) {
                printf("ERROR: logsize too big\n");
                args_ok = false;
            }
            else {
                size_wanted = 1 << logsize;
            }
        }
        else if (strncmp(argv[arg], "count=", 6) == 0) {
            size_wanted = atol(argv[arg]+6)*sizeof(struct cte);
        }
        else if (strncmp(argv[arg], "logcount=", 9) == 0) {
            size_t logcount = atol(argv[arg]+9);
            if (logcount > (31-OBJBITS_CTE)) {
                printf("ERROR: logcount too big\n");
                args_ok = false;
            }
            else {
                size_wanted = (1 << logcount)*sizeof(struct cte);
            }
        }
        else if (strncmp(argv[arg], "runs=", 5) == 0) {
            runs = atol(argv[arg]+5);
        }
        else if (strncmp(argv[arg], "reset=", 6) == 0) {
            char *name = argv[arg]+6;
            int i;
            for (i = 0; reset_opts[i].name; i++) {
                if (strcmp(reset_opts[i].name, name) == 0) {
                    reset = &reset_opts[i];
                    break;
                }
            }
            if (!reset_opts[i].name) {
                args_ok = false;
                printf("ERROR: unkown reset \"%s\"\n", name);
            }
        }
        else if (strncmp(argv[arg], "measure=", 8) == 0) {
            char *name = argv[arg]+8;
            if (strcmp(name, "dump") == 0) {
                measure = NULL;
                dump = true;
            }
            else {
                int i;
                for (i = 0; measure_opts[i].name; i++) {
                    if (strcmp(measure_opts[i].name, name) == 0) {
                        measure = &measure_opts[i];
                        break;
                    }
                }

                if (measure_opts[i].name) {
                    dump = false;
                }
                else {
                    args_ok = false;
                    printf("ERROR: unkown measure \"%s\"\n", name);
                }
            }
        }
        else {
            args_ok = false;
            printf("ERROR: unkown argument %s\n", argv[arg]);
        }
    }
    if (!args_ok) {
        usage(argv[0]);
        return 1;
    }

    assert(size_wanted > 0);
    assert(runs > 0);
    assert(reset);
    assert(measure || dump);

    errval_t err;
    struct capref frame;
    size_t size;
    err = frame_alloc(&frame, size_wanted, &size);
    assert_err(err, "alloc");
    assert(size >= size_wanted);
    printf("got %lu bytes\n", size);

    struct memobj *m;
    struct vregion *v;
    void *addr;

    err = vspace_map_one_frame(&addr, size, frame, &m, &v);
    assert_err(err, "map");

    if (dump) {
        reset_and_dump(addr, size_wanted, runs, reset->fn, reset->name);
    }
    else {
        bench_init();

        char *bench_name = malloc(strlen(reset->name)+strlen(measure->name)+2);
        strcpy(bench_name, reset->name);
        strcat(bench_name, ":");
        strcat(bench_name, measure->name);
        test(addr, size_wanted, runs, reset->fn, measure->fn, bench_name);

        free(bench_name);
    }

    printf("client done\n");

    vregion_destroy(v);
    cap_destroy(frame);

    return 0;
}
Ejemplo n.º 30
0
/**
 * \brief Initialise a new UMP channel and initiate a binding
 *
 * \param uc  Storage for channel state
 * \param cont Continuation for bind completion/failure
 * \param qnode Storage for an event queue node (used for queuing bind request)
 * \param iref IREF to which to bind
 * \param monitor_binding Monitor binding to use
 * \param inchanlen Size of incoming channel, in bytes (rounded to #UMP_MSG_BYTES)
 * \param outchanlen Size of outgoing channel, in bytes (rounded to #UMP_MSG_BYTES)
 * \param notify_cap Capability to use for notifications, or #NULL_CAP
 */
errval_t ump_chan_bind(struct ump_chan *uc, struct ump_bind_continuation cont,
                       struct event_queue_node *qnode,  iref_t iref,
                       struct monitor_binding *monitor_binding,
                       size_t inchanlen, size_t outchanlen,
                       struct capref notify_cap)
{
    errval_t err;

    // round up channel sizes to message size
    inchanlen = ROUND_UP(inchanlen, UMP_MSG_BYTES);
    outchanlen = ROUND_UP(outchanlen, UMP_MSG_BYTES);

    // compute size of frame needed and allocate it
    size_t framesize = inchanlen + outchanlen;
    err = frame_alloc(&uc->frame, framesize, &framesize);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_FRAME_ALLOC);
    }

    // map it in
    void *buf;
    err = vspace_map_one_frame_attr(&buf, framesize, uc->frame, UMP_MAP_ATTR,
                                    NULL, &uc->vregion);
    if (err_is_fail(err)) { 
        cap_destroy(uc->frame);
        return err_push(err, LIB_ERR_VSPACE_MAP);
    }

    // initialise channel state
    err = ump_chan_init(uc, buf, inchanlen, (char *)buf + inchanlen, outchanlen);
    if (err_is_fail(err)) {
        vregion_destroy(uc->vregion);
        cap_destroy(uc->frame);
        return err;
    }

    // Ids for tracing
    struct frame_identity id;
    err = invoke_frame_identify(uc->frame, &id);
    if (err_is_fail(err)) {
        vregion_destroy(uc->vregion);
        cap_destroy(uc->frame);
        return err_push(err, LIB_ERR_FRAME_IDENTIFY);
    }
    uc->recvid = (uintptr_t)id.base;
    uc->sendid = (uintptr_t)(id.base + inchanlen);

    // store bind args
    uc->bind_continuation = cont;
    uc->monitor_binding = monitor_binding;
    uc->iref = iref;
    uc->inchanlen = inchanlen;
    uc->outchanlen = outchanlen;
    uc->notify_cap = notify_cap;

    // wait for the ability to use the monitor binding
    uc->connstate = UMP_BIND_WAIT;
    event_mutex_enqueue_lock(&monitor_binding->mutex, qnode,
                             MKCLOSURE(send_bind_cont, uc));

    return SYS_ERR_OK;
}