Пример #1
0
errval_t flounder_stub_send_cap(struct flounder_cap_state *s,
                                struct monitor_binding *mb,
                                uintptr_t monitor_id,
                                struct capref cap, bool give_away,
                                void (*cont)(void *st))
{
    errval_t err;

    s->cap_send_continuation = cont;

    if (give_away) {
        err = mb->tx_vtbl.cap_move_request(mb, MKCONT(cap_send_cont, s),
                                           monitor_id, cap, s->tx_capnum);
    }
    else {
        err = mb->tx_vtbl.cap_send_request(mb, MKCONT(cap_send_cont, s),
                                           monitor_id, cap, s->tx_capnum);
    }
    if (err_is_ok(err)) {
        s->tx_capnum++;
        return err;
    } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
        // register to retry
        return mb->register_send(mb, mb->waitset, MKCONT(cap_send_cont, s));
    } else {
        return err_push(err, LIB_ERR_MONITOR_CAP_SEND);
    }
}
Пример #2
0
static errval_t spawn_reply(struct spawn_binding *b, errval_t rerr,
                            domainid_t domainid)
{
    errval_t err;

    err = b->tx_vtbl.spawn_domain_response(b, NOP_CONT, rerr, domainid);

    if (err_is_fail(err)) {
        DEBUG_ERR(err, "error sending spawn_domain reply\n");

        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
            // this will be freed in the retry handler
            struct pending_spawn_response *sr =
                malloc(sizeof(struct pending_spawn_response));
            if (sr == NULL) {
                return LIB_ERR_MALLOC_FAIL;
            }
            sr->b = b;
            sr->err = rerr;
            sr->domainid = domainid;
            err = b->register_send(b, get_default_waitset(),
                                   MKCONT(retry_spawn_domain_response, sr));
            if (err_is_fail(err)) {
                // note that only one continuation may be registered at a time
                free(sr);
                DEBUG_ERR(err, "register_send failed!");
                return err;
            }
        }
    }

    return SYS_ERR_OK;
}
Пример #3
0
static void
new_monitor_binding_reply_cont(struct monitor_binding *b,
                               errval_t reterr, struct capref retcap,
                               uintptr_t st)
{
    errval_t err =
        b->tx_vtbl.new_monitor_binding_reply(b, NOP_CONT, reterr, retcap, st);

    if (err_is_fail(err)) {
        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
            struct monitor_state *ms = b->st;
            struct new_monitor_binding_reply_state *me =
                malloc(sizeof(struct new_monitor_binding_reply_state));
            assert(me != NULL);
            me->args.err = reterr;
            me->args.ep = retcap;
            me->args.st = st;
            me->elem.cont = new_monitor_binding_reply_handler;
            err = monitor_enqueue_send(b, &ms->queue,
                                       get_default_waitset(), &me->elem.queue);
            if (err_is_fail(err)) {
                USER_PANIC_ERR(err, "monitor_enqueue_send failed");
            }
            return;
        }

        USER_PANIC_ERR(err, "failed to send new_monitor_binding_reply");
    }
}
Пример #4
0
/**
 * \brief polls the channels of the IOAT DMA device
 *
 * \param dev   IOAT DMA device
 *
 * \returns SYS_ERR_OK on success
 *          DMA_ERR_DEVICE_IDLE if there is nothing completed on the channels
 *          errval on error
 */
errval_t ioat_dma_device_poll_channels(struct dma_device *dev)
{
    errval_t err;

    uint8_t idle = 0x1;

    for (uint8_t i = 0; i < dev->channels.count; ++i) {
        err = ioat_dma_channel_poll(dev->channels.c[i]);
        switch (err_no(err)) {
            case DMA_ERR_CHAN_IDLE:
                break;
            case SYS_ERR_OK:
                idle = 0;
                break;
            default:
                return err;
        }
    }

    if (idle) {
        return DMA_ERR_DEVICE_IDLE;
    }

    return SYS_ERR_OK;
}
Пример #5
0
static void get_io_cap(struct monitor_blocking_binding *b)
{
    // XXX: We should not just hand out this cap to everyone
    // who requests it. There is currently no way to determine
    // if the client is a valid recipient
    errval_t err;
    struct capref src = {
        .cnode = cnode_task,
        .slot  = TASKCN_SLOT_IO
    };

    err = b->tx_vtbl.get_io_cap_response(b, NOP_CONT, src,
            SYS_ERR_OK);
    if (err_is_fail(err)) {
        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
            err = b->register_send(b, get_default_waitset(),
                                   MKCONT((void (*)(void *))get_io_cap, b));
            if (err_is_fail(err)) {
                USER_PANIC_ERR(err, "register_send failed");
            }
        }

        USER_PANIC_ERR(err, "sending get_io_cap_response failed");
    }
}
Пример #6
0
static void ipi_alloc_notify_reply_cont(struct monitor_binding *b,
                                        uintptr_t state,
                                        struct capref notify_cap,
                                        errval_t reterr)
{
    errval_t err =
        b->tx_vtbl.ipi_alloc_notify_reply(b, NOP_CONT, state,
                                          notify_cap, reterr);

    if(err_is_fail(err)) {
        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
            struct monitor_state *st = b->st;
            struct ipi_alloc_notify_reply_state *me =
                malloc(sizeof(struct ipi_alloc_notify_reply_state));
            assert(me != NULL);
            me->args.state = state;
            me->args.notify = notify_cap;
            me->args.err = reterr;
            me->elem.cont = ipi_alloc_notify_reply_handler;
            err = monitor_enqueue_send(b, &st->queue,
                                       get_default_waitset(), &me->elem.queue);
            if (err_is_fail(err)) {
                USER_PANIC_ERR(err, "monitor_enqueue_send failed");
            }
            return;
        }
        USER_PANIC_ERR(err, "sending reply");
    }
    assert(err_is_ok(err));
}
Пример #7
0
/**
 * \brief Non-blocking write to a terminal.
 *
 * \param client  Terminal client state.
 * \param data    Buffer holding characters to write.
 * \param length  The number of characters to write.
 * \param cont    Continuation invoked once the write completes.
 *
 * \return SYS_ERR_OK if successful.
 *         TERM_ERR_TX_BUSY if another message is buffered but not yet sent.
 *         TERM_ERR_IO if an I/O error occurred.
 */
errval_t term_client_write(struct term_client *client, const char *data,
                           size_t length, struct event_closure cont)
{
    errval_t err = SYS_ERR_OK;
    char *outdata = NULL;

    assert(client != NULL);
    assert(data != NULL);
    assert(length > 0);

    /* Make a copy of the data, since the output filters might modify them. */
    outdata = malloc(length);
    assert(outdata != NULL);
    memcpy(outdata, data, length);

    /* apply output filters */
    term_filter_apply(client->output_filters, &outdata, &length);

    /* try to send characters */
    err = client->out_binding->tx_vtbl.characters(client->out_binding, cont,
                                                  outdata, length);
    if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
        err = err_push(err, TERM_ERR_TX_BUSY);
        goto out;
    } else if (err_is_fail(err)) {
        err = err_push(err, TERM_ERR_IO);
        goto out;
    }

out:
    /* free data */
    free(outdata);
    return err;
}
Пример #8
0
static void alloc_iref_reply_cont(struct monitor_binding *b,
                                    uintptr_t service_id,
                                    iref_t iref, errval_t reterr)
{
    errval_t err;

    err = b->tx_vtbl.alloc_iref_reply(b, NOP_CONT, service_id, iref, reterr);
    if (err_is_fail(err)) {
        if(err_no(err) == FLOUNDER_ERR_TX_BUSY) {
            struct alloc_iref_reply_state *me =
                malloc(sizeof(struct alloc_iref_reply_state));
            assert(me != NULL);
            struct monitor_state *ist = b->st;
            assert(ist != NULL);
            me->args.service_id = service_id;
            me->args.iref = iref;
            me->args.err = reterr;
            me->b = b;
            me->elem.cont = alloc_iref_reply_handler;

            err = monitor_enqueue_send(b, &ist->queue,
                                       get_default_waitset(), &me->elem.queue);
            if (err_is_fail(err)) {
                USER_PANIC_ERR(err, "monitor_enqueue_send failed");
            }
            return;
        }

        USER_PANIC_ERR(err, "reply failed");
    }
}
Пример #9
0
static void
capsend_mc_send_cont(struct intermon_binding *b, struct intermon_msg_queue_elem *e)
{
    struct capsend_mc_msg_st *msg_st = (struct capsend_mc_msg_st*)e;
    struct capsend_mc_st *mc_st = msg_st->mc_st;
    errval_t err = SYS_ERR_OK;

    // if do_send is false, an error occured in the multicast setup, so do not
    // send anything
    if (mc_st->do_send) {
        err = mc_st->send_fn(b, &mc_st->caprep, mc_st);
    }

    if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
        err = capsend_target(msg_st->dest, (struct msg_queue_elem*)msg_st);
    }

    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "sending dequeued capops message");
    }

    // decrement counter of number of queued messages
    if (!--mc_st->num_queued) {
        // if counter is zero, cleanup outgoing memory
        free(mc_st->msg_st_arr);
        mc_st->msg_st_arr = NULL;
        if (!mc_st->do_send || !mc_st->num_pending) {
            // if the send has been aborted, also cleanup cross-call state
            free(mc_st);
        }
    }
}
Пример #10
0
static void handle_notification(void *arg)
{
    struct lmp_endpoint *ep = arg;
    errval_t err;

    do { // consume messages
        struct lmp_recv_msg msg = LMP_RECV_MSG_INIT;
        err = lmp_endpoint_recv(ep, &msg.buf, NULL);

        if (err_is_ok(err)) {
            if(msg.buf.msglen == 1) {
                domainid_t domid = msg.words[0];

                // XXX: This is done by spawnd now
                if (domid != 0) {
                    debug_printf("Dispatcher with domain ID %"PRIuDOMAINID" exited\n",
                                 domid);
                }
            } else if(msg.buf.msglen == sizeof(struct RAM) / sizeof(uintptr_t) + 1) {
#ifndef __arm__
                //defined(__x86_64__) || defined(__i386__)
                union rammsg {
                    uintptr_t msgwords[LMP_MSG_LENGTH];
                    struct RAM ram;
                } *u;
                u = (union rammsg *)&msg.words;

                /* printf("%s.%d: RAM cap deleted, base = %" PRIxGENPADDR ", bits = %u\n", */
                /*        disp_name(), disp_get_core_id(), ram->base, ram->bits); */

                err = reclaim_memory(u->ram.base, u->ram.bits);
                if(err_is_fail(err)) {
                    DEBUG_ERR(err, "reclaim_memory");
                }
#else
                /* XXX: Disabling memory reclamation on ARM. I
                 * couldn't get the compiler to accept the above code
                 * due to strict aliasing restrictions. I do believe
                 * though that the above is according to the C99
                 * spec. Please help fix it, so that it can be
                 * enabled.
                 */
#endif
            } else {
                printf("%s: Unknown kernel notification of length %zu received\n",
                       disp_name(), msg.buf.msglen);
            }
        } else if (err_no(err) != LIB_ERR_NO_LMP_MSG) {
            DEBUG_ERR(err, "unexpected error from lmp_endpoint_recv");
        }
    } while(err_is_ok(err));

    // re-register
    struct event_closure cl = {
        .handler = handle_notification,
        .arg = arg,
    };
    err = lmp_endpoint_register(ep, get_default_waitset(), cl);
    assert(err_is_ok(err));
}
Пример #11
0
/**
 * \brief Wakeup a thread on a foreign dispatcher while disabled.
 *
 * \param core_id       Core ID to wakeup on
 * \param thread        Pointer to thread to wakeup
 * \param mydisp        Dispatcher this function is running on
 *
 * \return SYS_ERR_OK on success.
 */
static errval_t domain_wakeup_on_coreid_disabled(coreid_t core_id,
                                                 struct thread *thread,
                                                 dispatcher_handle_t mydisp)
{
    struct domain_state *ds = get_domain_state();

    // XXX: Ugly hack to allow waking up on a core id we don't have a
    // dispatcher handler for
    thread->coreid = core_id;

    // Catch this early
    assert_disabled(ds != NULL);
    if (ds->b[core_id] == NULL) {
        return LIB_ERR_NO_SPANNED_DISP;
    }

    thread_enqueue(thread, &ds->remote_wakeup_queue);

    // Signal the inter-disp waitset of this event
    struct event_closure closure = {
        .handler = handle_wakeup_on
    };
    errval_t err =
        waitset_chan_trigger_closure_disabled(&ds->interdisp_ws,
                                              &ds->remote_wakeup_event,
                                              closure,
                                              mydisp);
    assert_disabled(err_is_ok(err) ||
                    err_no(err) == LIB_ERR_CHAN_ALREADY_REGISTERED);

    return SYS_ERR_OK;
}
Пример #12
0
/**
 * \brief Continuation function for binding. This function
 *        send the bind request to the monitor.
 * \param pointer to the multihop_chan
 */
static void multihop_chan_bind_cont(void *st)
{

    errval_t err;
    struct multihop_chan *mc = st;
    struct monitor_binding *monitor_binding = mc->monitor_binding;

    // send bind request to the monitor
    // we do not get a lock on the monitor binding, as we did not expose it to the application
    MULTIHOP_DEBUG("sending bind request to monitor...\n");
    err = monitor_binding->tx_vtbl.multihop_bind_client_request(monitor_binding,
            NOP_CONT, mc->iref, mc->my_vci);

    if (err_is_ok(err)) {
        // request was successfully sent
    } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
        // register to retry
        err = monitor_binding->register_send(monitor_binding,
                monitor_binding->waitset, MKCONT(multihop_chan_bind_cont, st));
        assert(err_is_ok(err));
    } else { // permanent failure sending message
        mc->bind_continuation.handler(mc->bind_continuation.st,
                err_push(err, LIB_ERR_BIND_MULTIHOP_REQ), NULL);
        //TODO destroy channel state?
    }
}
Пример #13
0
static void send_bind_reply(void *arg)
{
    struct bind_lmp_reply_state *st = arg;
    struct monitor_binding *b = st->b;
    errval_t err;

    err = st->b->tx_vtbl.bind_lmp_reply_monitor(st->b, NOP_CONT, st->args.err,
                                                st->args.mon_id, st->args.conn_id, 
                                                st->args.ep);
    if (err_is_ok(err)) {
        event_mutex_unlock(&b->mutex);
        free(st);
    } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
        err = st->b->register_send(st->b, st->b->waitset,
                                   MKCONT(send_bind_reply,st));
        assert(err_is_ok(err)); // shouldn't fail, as we have the mutex
    } else {
        event_mutex_unlock(&b->mutex);
        USER_PANIC_ERR(err, "failed sending back reply to LMP bind request;"
                       " request dropped!");
        if (st->lc != NULL) {
            lmp_chan_destroy(st->lc);
            // FIXME: how do we tell the binding about this!?
        }
        free(st);
    }
}
Пример #14
0
void
update_owner__rx_handler(struct intermon_binding *b, intermon_caprep_t caprep, genvaddr_t st)
{
    errval_t err;
    struct intermon_state *inter_st = (struct intermon_state*)b->st;
    coreid_t from = inter_st->core_id;
    struct capref capref;
    struct capability cap;
    caprep_to_capability(&caprep, &cap);

    err = slot_alloc(&capref);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "failed to allocate slot for owner update");
    }

    err = monitor_copy_if_exists(&cap, capref);
    if (err_is_ok(err)) {
        err = monitor_set_cap_owner(cap_root, get_cap_addr(capref),
                                    get_cap_valid_bits(capref), from);
    }
    if (err_no(err) == SYS_ERR_CAP_NOT_FOUND) {
        err = SYS_ERR_OK;
    }

    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "failed to update cap ownership");
    }

    cap_destroy(capref);

    err = owner_updated(from, st);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "failed to send ownership update response");
    }
}
Пример #15
0
static errval_t mymm_free(struct capref ramcap, genpaddr_t base, uint8_t bits)
{
    errval_t ret;
    genpaddr_t mem_to_add;

    mem_to_add = (genpaddr_t)1 << bits;

    ret = mm_free(&mm_ram, ramcap, base, bits);
    if (err_is_fail(ret)) {
        if (err_no(ret) == MM_ERR_NOT_FOUND) {
            // memory wasn't there initially, add it
            ret = mm_add(&mm_ram, ramcap, bits, base);
            if (err_is_fail(ret)) {
                /* DEBUG_ERR(ret, "failed to add RAM to allocator"); */
                return ret;
            }
            mem_total += mem_to_add;
        } else {
            /* DEBUG_ERR(ret, "failed to free RAM in allocator"); */
            return ret;
        }
    }

    mem_avail += mem_to_add;

    return SYS_ERR_OK;
}
Пример #16
0
void
find_cap_result__rx_handler(struct intermon_binding *b, errval_t result, genvaddr_t st)
{
    // if we receive a positive result, immediately forward to caller
    lvaddr_t lst = (lvaddr_t)st;
    struct find_cap_broadcast_st *fc_bc_st = (struct find_cap_broadcast_st*)lst;
    if (err_is_ok(result)) {
        if (!fc_bc_st->found) {
            fc_bc_st->found = true;
            struct intermon_state *inter_st = (struct intermon_state*)b->st;
            coreid_t from = inter_st->core_id;
            fc_bc_st->result_handler(SYS_ERR_OK, from, fc_bc_st->st);
        }
    }
    else if (err_no(result) != SYS_ERR_CAP_NOT_FOUND) {
        DEBUG_ERR(result, "ignoring bad find_cap_result");
    }

    // check to see if broadcast is complete
    if (capsend_handle_mc_reply(&fc_bc_st->bc)) {
        if (!fc_bc_st->found) {
            // broadcast did not find a core, report notfound to caller
            fc_bc_st->result_handler(SYS_ERR_CAP_NOT_FOUND, 0, fc_bc_st->st);
        }
        free(fc_bc_st);
    }
}
Пример #17
0
static void send_myrpc_response(void *a)
{
    errval_t err;
    struct server_state *st = (struct server_state*)a;

    debug_printf("server: sending myresponse\n");

    struct event_closure txcont = MKCONT(send_myrpc_response_cb, st);
    err = xmplrpc_myrpc_response__tx(st->b, txcont, st->s);

    if (err_is_fail(err)) {
        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
            debug_printf("server: re-sending myresponse\n");
            struct waitset *ws = get_default_waitset();
            txcont = MKCONT(send_myrpc_response, st);
            err = st->b->register_send(st->b, ws, txcont);
            if (err_is_fail(err)) {
                // note that only one continuation may be registered at a time
                DEBUG_ERR(err, "register_send on binding failed!");
                free_st(st);
            }
        } else {
            DEBUG_ERR(err, "error sending mycall message\n");
            free_st(st);
        }
    }
}
Пример #18
0
static void send_myrpc_call(void *a)
{
    errval_t err;

    debug_printf("client: sending mycall\n");

    struct xmplrpc_binding *b = (struct xmplrpc_binding *)a;

    struct event_closure txcont = MKCONT(send_myrpc_call_cb, b);

    err = xmplrpc_myrpc_call__tx(b, txcont, 42);

    if (err_is_fail(err)) {
        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
            debug_printf("client: re-sending mycall\n");
            struct waitset *ws = get_default_waitset();
            txcont = MKCONT(send_myrpc_call, b);
            err = b->register_send(b, ws, txcont);
            if (err_is_fail(err)) {
                // note that only one continuation may be registered at a time
                DEBUG_ERR(err, "register_send on binding failed!");
            }
        } else {
            DEBUG_ERR(err, "error sending mycall message\n");
        }
    }
}
Пример #19
0
/**
 * \brief Send a multi-hop message that contains no payload.
 * 		  It is used to acknowledge received messages.
 *
 * \param mc pointer to the multi-hop channel
 */
static void multihop_send_dummy_message(struct multihop_chan *mc)
{
    assert(mc->connstate == MULTIHOP_CONNECTED);

#if MULTIHOP_FLOW_CONTROL

    MULTIHOP_DEBUG("sending dummy message, ack %d...\n", mc->unacked_received);

    errval_t err;
    struct monitor_binding *monitor_binding = mc->monitor_binding;

    // send message
    err = monitor_binding->tx_vtbl.multihop_message(monitor_binding, NOP_CONT,
            mc->vci, mc->direction, MULTIHOP_MESSAGE_FLAG_DUMMY,
            mc->unacked_received, (uint8_t *) mc, 1);

    if (err_is_ok(err)) {
        // we have just acknowledged all received messages
        mc->unacked_received = 0;
    } else if (err_no(err) != FLOUNDER_ERR_TX_BUSY) {
        USER_PANIC_ERR(err,
                "Could not send dummy message over multi-hop channel\n");

    }

#endif // MULTIHOP_FLOW_CONTROL
}
Пример #20
0
/**
 * \brief Send a capability over the multi-hop channel
 *
 * \param mc pointer to the multi-hop channel
 * \param _continuation callback to be executed after the message is sent
 * \param cap_state pointer to the cap state of the channel
 * \param cap the capability to send
 */
errval_t multihop_send_capability(struct multihop_chan *mc,
        struct event_closure _continuation,
        struct flounder_cap_state *cap_state, struct capref cap)
{

    errval_t err;
    assert(mc->connstate == MULTIHOP_CONNECTED);
    struct monitor_binding *mon_binding = mc->monitor_binding;

    // send the message
    err = mon_binding->tx_vtbl.multihop_cap_send(mon_binding, _continuation,
                                                 mc->vci, mc->direction,
                                                 SYS_ERR_OK, cap,
                                                 cap_state->tx_capnum);

    if (err_is_ok(err)) {
        // increase capability number
        cap_state->tx_capnum++;
        return err;
    } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
        return err;
    } else {
        return err_push(err, LIB_ERR_MONITOR_CAP_SEND);
    }
}
Пример #21
0
static void
revoke_result__rx(errval_t result,
                  struct revoke_master_st *st,
                  bool locked)
{
    DEBUG_CAPOPS("%s\n", __FUNCTION__);
    errval_t err;

    if (locked) {
        caplock_unlock(st->cap);
    }

    if (err_is_ok(result)) {
        // clear the remote copies bit
        err = monitor_domcap_remote_relations(st->cap.croot, st->cap.cptr,
                                              st->cap.bits, 0, RRELS_COPY_BIT,
                                              NULL);
        if (err_is_fail(err) && err_no(err) != SYS_ERR_CAP_NOT_FOUND) {
            DEBUG_ERR(err, "resetting remote copies bit after revoke");
        }
    }

    DEBUG_CAPOPS("%s ## revocation completed, calling %p\n", __FUNCTION__,
                 st->result_handler);

    st->result_handler(result, st->st);
    free(st);
}
Пример #22
0
/**
 * \ brief Internal function to send a reply back to the monitor
 *
 */
static void send_bind_reply(void *st)
{

    errval_t err;
    struct bind_multihop_reply_state *reply_state = st;
    struct monitor_binding *monitor_binding = reply_state->monitor_binding;

    // send back a bind success / failure message to the monitor
    MULTIHOP_DEBUG("sending reply back to monitor...\n");
    err = monitor_binding->tx_vtbl.multihop_bind_service_reply(monitor_binding,
            NOP_CONT, reply_state->args.receiver_vci,
            reply_state->args.sender_vci, reply_state->args.err);

    if (err_is_ok(err)) {
        event_mutex_unlock(&monitor_binding->mutex);
        free(reply_state);
    } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
        err = monitor_binding->register_send(monitor_binding,
                monitor_binding->waitset, MKCONT(send_bind_reply, reply_state));
        assert(err_is_ok(err));
        // this shouldn't fail, as we have the mutex
    } else {
        event_mutex_unlock(&monitor_binding->mutex);
        USER_PANIC_ERR(
                err,
                "failed sending back reply to multi-hop bind request to monitor");
        free(st);
    }
}
Пример #23
0
static void bind_monitor_reply_scc_cont(struct intermon_binding *b,
                                        errval_t err, chanid_t chanid)
{
    errval_t err2;

    err2 = b->tx_vtbl.bind_monitor_reply_scc(b, NOP_CONT, err,
            chanid, my_core_id);
    if (err_is_fail(err2)) {
        if(err_no(err2) == FLOUNDER_ERR_TX_BUSY) {
            struct bind_monitor_reply_scc_state *me =
                malloc(sizeof(struct bind_monitor_reply_scc_state));
            assert(me != NULL);
            struct intermon_state *ist = b->st;
            assert(ist != NULL);
            me->args.err = err;
            me->args.chan_id = chanid;
            me->elem.cont = bind_monitor_reply_scc_handler;

            err = intermon_enqueue_send(b, &ist->queue,
                                        get_default_waitset(), &me->elem.queue);
            assert(err_is_ok(err));
            return;
        }

        DEBUG_ERR(err2, "reply failed");
    }
}
Пример #24
0
int access(const char *pathname, int mode)
{
    vfs_handle_t vh;
    errval_t err;
    int ret;

    char *path = vfs_path_mkabs(pathname);
    assert(path != NULL);

    err = vfs_open(path, &vh);
    if (err_is_fail(err)) {
        if(err_no(err) == FS_ERR_NOTFILE) {
            // Is it a directory?
            err = vfs_opendir(path, &vh);
            if(err_is_ok(err)) {
                vfs_closedir(vh);
                ret = 0;
                goto out;
            }
        }
        POSIXCOMPAT_DEBUG("access(%s) failed\n", pathname);
        ret = -1;
    } else {
        POSIXCOMPAT_DEBUG("access(%s): OK\n", pathname);
        vfs_close(vh);
        ret = 0;
    }

 out:
    free(path);
    return ret;
}
Пример #25
0
static void ms_multiboot_cap_request(struct monitor_binding *b, cslot_t slot)
{
    errval_t err1, err2;

    struct capref cap = {
        .cnode = cnode_module,
        .slot  = slot,
    };

    // Call frame_identify to check if cap exists
    struct frame_identity id;
    err1 = invoke_frame_identify(cap, &id);
    if (err_is_fail(err1)) {
        err2 = b->tx_vtbl.multiboot_cap_reply(b, NOP_CONT, NULL_CAP, err1);
    } else {
        err2 = b->tx_vtbl.multiboot_cap_reply(b, NOP_CONT, cap, err1);
    }
    if (err_is_fail(err2)) {
        if (err_no(err2) == FLOUNDER_ERR_TX_BUSY) {
            struct monitor_state *mon_state = b->st;
            struct multiboot_cap_state *ms =
                malloc(sizeof(struct multiboot_cap_state));
            assert(ms);
            ms->slot = slot;
            ms->elem.cont = ms_multiboot_cap_request_handler;
            err1 = monitor_enqueue_send(b, &mon_state->queue,
                                       get_default_waitset(), &ms->elem.queue);
            if (err_is_fail(err1)) {
                USER_PANIC_ERR(err1, "monitor_enqueue_send failed");
            }
        } else {
            USER_PANIC_ERR(err2, "sending multiboot_cap_reply failed");
        }
    }
}
Пример #26
0
static void send_bind_cont(void *arg)
{
    struct ump_chan *uc = arg;
    struct monitor_binding *b = uc->monitor_binding;
    errval_t err;

    /* Send bind request to the monitor */
    assert(uc->monitor_binding == b);
    assert(b->tx_vtbl.bind_ump_client_request);
    err = b->tx_vtbl.bind_ump_client_request(b, NOP_CONT, uc->iref,
                                             (uintptr_t)uc, uc->frame,
                                             uc->inchanlen, uc->outchanlen,
                                             uc->notify_cap);
    if (err_is_ok(err)) { // request sent ok
        event_mutex_unlock(&b->mutex);
    } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
        // register to retry
        err = b->register_send(b, b->waitset, MKCONT(send_bind_cont,uc));
        assert(err_is_ok(err)); // we hold the monitor binding mutex
    } else { // permanent failure sending message
        event_mutex_unlock(&b->mutex);
        uc->bind_continuation.handler(uc->bind_continuation.st,
                                      err_push(err, LIB_ERR_BIND_UMP_REQ),
                                      NULL, NULL_CAP);
    }
}
Пример #27
0
static void bind_ump_service_request_cont(struct monitor_binding *domain_binding,
                                          uintptr_t service_id,
                                          con_id_t my_mon_id,
                                          struct capref frame,
                                          uint32_t channel_length_in,
                                          uint32_t channel_length_out,
                                          struct capref notify_cap,
                                          struct intermon_binding *binding,
                                          con_id_t your_mon_id)
{
    errval_t err, err2;

    /* Proxy the request */
    err = domain_binding->tx_vtbl.
        bind_ump_service_request(domain_binding, NOP_CONT, service_id,
                                 my_mon_id, frame,
                                 channel_length_in, channel_length_out,
                                 notify_cap);
    if (err_is_fail(err)) {
        if(err_no(err) == FLOUNDER_ERR_TX_BUSY) {
            struct bind_ump_service_request_state *me =
                malloc(sizeof(struct bind_ump_service_request_state));
            struct monitor_state *ist = domain_binding->st;
            me->args.service_id = service_id;
            me->args.mon_id = my_mon_id;
            me->args.frame = frame;
            me->args.channel_length_in = channel_length_in;
            me->args.channel_length_out = channel_length_out;
            me->args.notify = notify_cap;
            me->binding = binding;
            me->your_mon_id = your_mon_id;
            me->elem.cont = bind_ump_service_request_handler;

            err = monitor_enqueue_send(domain_binding, &ist->queue,
                                       get_default_waitset(), &me->elem.queue);
            assert(err_is_ok(err));
            return;
        }

        err2 = cap_delete(frame);
        if (err_is_fail(err2)) {
            USER_PANIC_ERR(err2, "Cap delete failed");
        }
        err2 = slot_free(frame);
        if (err_is_fail(err2)) {
            USER_PANIC_ERR(err2, "Cap destroy default failed");
        }
        err2 = remote_conn_free(my_mon_id);
        if (err_is_fail(err2)) {
            USER_PANIC_ERR(err2, "remote_conn_free failed");
        }
        intermon_caprep_t nullcap = {0,0,0,0};
        err2 = binding->tx_vtbl.bind_ump_reply(binding, NOP_CONT, your_mon_id, 0, err,
                                               nullcap);
        if (err_is_fail(err2)) {
            USER_PANIC_ERR(err2, "Sending bind_ump_reply1 failed");
        }
    }
}
Пример #28
0
/**
 * \brief Client enters a barrier. Blocks until all clients have entered the
 * barrier.
 *
 * Each client creates a (sequential record) based on the provided name.
 * Once a client sees the specified amount (wait_for) of records it
 * creates a record that wakes up all waiting clients.
 *
 * \param[in] name Name of the barrier.
 * \param[out] barrier_record Record created for each client.
 * \param[in] wait_for Number of clients entering the barrier.
 */
errval_t oct_barrier_enter(const char* name, char** barrier_record, size_t wait_for)
{
    errval_t err;
    errval_t exist_err;
    char* record = NULL;
    char** names = NULL;
    uint64_t mode = 0;
    uint64_t state = 0;
    uint64_t fn = 0;
    octopus_trigger_id_t tid;
    size_t current_barriers = 0;
    octopus_trigger_t t = oct_mktrigger(OCT_ERR_NO_RECORD, octopus_BINDING_RPC,
            OCT_ON_SET, NULL, NULL);

    err = oct_set_get(SET_SEQUENTIAL, barrier_record,
            "%s_ { barrier: '%s' }", name, name);
    err = oct_get_names(&names, &current_barriers, "_ { barrier: '%s' }",
            name);
    oct_free_names(names, current_barriers);
    if (err_is_fail(err)) {
        return err;
    }
    //debug_printf("current_barriers: %lu wait_for: %lu\n", current_barriers,
    //        wait_for);

    if (current_barriers != wait_for) {
        struct octopus_thc_client_binding_t* cl = oct_get_thc_client();
        err = cl->call_seq.exists(cl, name, t, &tid, &exist_err);
        if (err_is_fail(err)) {
            return err;
        }
        err = exist_err;

        if (err_is_ok(err)) {
            // Barrier already exists
        }
        if (err_no(err) == OCT_ERR_NO_RECORD) {
            // Wait until barrier record is created
            err = cl->recv.trigger(cl, &tid, &fn, &mode, &record, &state);
            free(record);
            assert(mode & OCT_REMOVED);

            err = SYS_ERR_OK;
        }
        else {
            // Some other error happend, return it
        }
    }
    else {
        // We are the last to enter the barrier,
        // wake up the others
        err = oct_set(name);
    }

    return err;
}
Пример #29
0
errval_t page_mappings_unmap(struct capability *pgtable, struct cte *mapping)
{
    assert(type_is_vnode(pgtable->type));
    assert(type_is_mapping(mapping->cap.type));
    struct Frame_Mapping *info = &mapping->cap.u.frame_mapping;
    errval_t err;
    debug(SUBSYS_PAGING, "page_mappings_unmap(%hu pages)\n", info->pte_count);

    // calculate page table address
    lvaddr_t pt = local_phys_to_mem(gen_phys_to_local_phys(get_address(pgtable)));

    cslot_t slot = info->entry;
    // get virtual address of first page
    genvaddr_t vaddr;
    bool tlb_flush_necessary = true;
    struct cte *leaf_pt = cte_for_cap(pgtable);
    err = compile_vaddr(leaf_pt, slot, &vaddr);
    if (err_is_fail(err)) {
        if (err_no(err) == SYS_ERR_VNODE_NOT_INSTALLED && vaddr == 0) {
            debug(SUBSYS_PAGING, "unmapping in floating page table; not flushing TLB\n");
            tlb_flush_necessary = false;
        } else if (err_no(err) == SYS_ERR_VNODE_SLOT_INVALID) {
            debug(SUBSYS_PAGING, "couldn't reconstruct virtual address\n");
        } else {
            return err;
        }
    }

    do_unmap(pt, slot, info->pte_count);

    // flush TLB for unmapped pages if we got a valid virtual address
    // TODO: heuristic that decides if selective or full flush is more
    //       efficient?
    if (tlb_flush_necessary) {
        if (info->pte_count > 1 || err_is_fail(err)) {
            do_full_tlb_flush();
        } else {
            do_one_tlb_flush(vaddr);
        }
    }

    return SYS_ERR_OK;
}
Пример #30
0
// FIXME: error handling (not asserts) needed in this function
static void mem_allocate_handler(struct mem_binding *b, uint8_t bits,
                                 genpaddr_t minbase, genpaddr_t maxlimit)
{
    struct capref *cap = malloc(sizeof(struct capref));
    errval_t err, ret;

    trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_ALLOC, bits);

    /* refill slot allocator if needed */
    err = slot_prealloc_refill(mm_ram.slot_alloc_inst);
    assert(err_is_ok(err));

    /* refill slab allocator if needed */
    while (slab_freecount(&mm_ram.slabs) <= MINSPARENODES) {
        struct capref frame;
        err = msa.a.alloc(&msa.a, &frame);
        assert(err_is_ok(err));
        err = frame_create(frame, BASE_PAGE_SIZE * 8, NULL);
        assert(err_is_ok(err));
        void *buf;
        err = vspace_map_one_frame(&buf, BASE_PAGE_SIZE * 8, frame, NULL, NULL);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "vspace_map_one_frame failed");
            assert(buf);
        }
        slab_grow(&mm_ram.slabs, buf, BASE_PAGE_SIZE * 8);
    }

    ret = mymm_alloc(cap, bits, minbase, maxlimit);
    if (err_is_ok(ret)) {
        mem_avail -= 1UL << bits;
    } else {
        // DEBUG_ERR(ret, "allocation of %d bits in % " PRIxGENPADDR "-%" PRIxGENPADDR " failed",
        //          bits, minbase, maxlimit);
        *cap = NULL_CAP;
    }

    /* Reply */
    err = b->tx_vtbl.allocate_response(b, MKCONT(allocate_response_done, cap),
                                       ret, *cap);
    if (err_is_fail(err)) {
        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
            struct pending_reply *r = malloc(sizeof(struct pending_reply));
            assert(r != NULL);
            r->b = b;
            r->err = ret;
            r->cap = cap;
            err = b->register_send(b, get_default_waitset(), MKCONT(retry_reply,r));
            assert(err_is_ok(err));
        } else {
            DEBUG_ERR(err, "failed to reply to memory request");
            allocate_response_done(cap);
        }
    }
}