static void send_myrpc_response(void *a) { errval_t err; struct server_state *st = (struct server_state*)a; debug_printf("server: sending myresponse\n"); struct event_closure txcont = MKCONT(send_myrpc_response_cb, st); err = xmplrpc_myrpc_response__tx(st->b, txcont, st->s); if (err_is_fail(err)) { if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { debug_printf("server: re-sending myresponse\n"); struct waitset *ws = get_default_waitset(); txcont = MKCONT(send_myrpc_response, st); err = st->b->register_send(st->b, ws, txcont); if (err_is_fail(err)) { // note that only one continuation may be registered at a time DEBUG_ERR(err, "register_send on binding failed!"); free_st(st); } } else { DEBUG_ERR(err, "error sending mycall message\n"); free_st(st); } } }
errval_t flounder_stub_send_cap(struct flounder_cap_state *s, struct monitor_binding *mb, uintptr_t monitor_id, struct capref cap, bool give_away, void (*cont)(void *st)) { errval_t err; s->cap_send_continuation = cont; if (give_away) { err = mb->tx_vtbl.cap_move_request(mb, MKCONT(cap_send_cont, s), monitor_id, cap, s->tx_capnum); } else { err = mb->tx_vtbl.cap_send_request(mb, MKCONT(cap_send_cont, s), monitor_id, cap, s->tx_capnum); } if (err_is_ok(err)) { s->tx_capnum++; return err; } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { // register to retry return mb->register_send(mb, mb->waitset, MKCONT(cap_send_cont, s)); } else { return err_push(err, LIB_ERR_MONITOR_CAP_SEND); } }
static void send_myrpc_call(void *a) { errval_t err; debug_printf("client: sending mycall\n"); struct xmplrpc_binding *b = (struct xmplrpc_binding *)a; struct event_closure txcont = MKCONT(send_myrpc_call_cb, b); err = xmplrpc_myrpc_call__tx(b, txcont, 42); if (err_is_fail(err)) { if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { debug_printf("client: re-sending mycall\n"); struct waitset *ws = get_default_waitset(); txcont = MKCONT(send_myrpc_call, b); err = b->register_send(b, ws, txcont); if (err_is_fail(err)) { // note that only one continuation may be registered at a time DEBUG_ERR(err, "register_send on binding failed!"); } } else { DEBUG_ERR(err, "error sending mycall message\n"); } } }
// FIXME: error handling (not asserts) needed in this function static void mem_allocate_handler(struct mem_binding *b, uint8_t bits, genpaddr_t minbase, genpaddr_t maxlimit) { struct capref *cap = malloc(sizeof(struct capref)); errval_t err, ret; trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_ALLOC, bits); /* refill slot allocator if needed */ err = slot_prealloc_refill(mm_ram.slot_alloc_inst); assert(err_is_ok(err)); /* refill slab allocator if needed */ while (slab_freecount(&mm_ram.slabs) <= MINSPARENODES) { struct capref frame; err = msa.a.alloc(&msa.a, &frame); assert(err_is_ok(err)); err = frame_create(frame, BASE_PAGE_SIZE * 8, NULL); assert(err_is_ok(err)); void *buf; err = vspace_map_one_frame(&buf, BASE_PAGE_SIZE * 8, frame, NULL, NULL); if (err_is_fail(err)) { DEBUG_ERR(err, "vspace_map_one_frame failed"); assert(buf); } slab_grow(&mm_ram.slabs, buf, BASE_PAGE_SIZE * 8); } ret = mymm_alloc(cap, bits, minbase, maxlimit); if (err_is_ok(ret)) { mem_avail -= 1UL << bits; } else { // DEBUG_ERR(ret, "allocation of %d bits in % " PRIxGENPADDR "-%" PRIxGENPADDR " failed", // bits, minbase, maxlimit); *cap = NULL_CAP; } /* Reply */ err = b->tx_vtbl.allocate_response(b, MKCONT(allocate_response_done, cap), ret, *cap); if (err_is_fail(err)) { if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { struct pending_reply *r = malloc(sizeof(struct pending_reply)); assert(r != NULL); r->b = b; r->err = ret; r->cap = cap; err = b->register_send(b, get_default_waitset(), MKCONT(retry_reply,r)); assert(err_is_ok(err)); } else { DEBUG_ERR(err, "failed to reply to memory request"); allocate_response_done(cap); } } }
static void tx_handler(void *arg) { struct serial_binding *b = arg; errval_t err; // free previously-sent buffer, if there is one if (inbuf[!ninbuf].buf != NULL) { free(inbuf[!ninbuf].buf); inbuf[!ninbuf].buf = NULL; } // do we have something to send? if not, bail out if (inbuf[ninbuf].buf == NULL) { return; } // try to send err = b->tx_vtbl.input(b, MKCONT(tx_handler,b), inbuf[ninbuf].buf, inbuf[ninbuf].len); if (err_is_ok(err)) { // swing buffer pointer ninbuf = !ninbuf; assert(inbuf[ninbuf].buf == NULL); } else if (err_is_fail(err)) { DEBUG_ERR(err, "error sending serial input to terminal"); } }
static void send_bind_reply(void *arg) { struct bind_lmp_reply_state *st = arg; struct monitor_binding *b = st->b; errval_t err; err = st->b->tx_vtbl.bind_lmp_reply_monitor(st->b, NOP_CONT, st->args.err, st->args.mon_id, st->args.conn_id, st->args.ep); if (err_is_ok(err)) { event_mutex_unlock(&b->mutex); free(st); } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { err = st->b->register_send(st->b, st->b->waitset, MKCONT(send_bind_reply,st)); assert(err_is_ok(err)); // shouldn't fail, as we have the mutex } else { event_mutex_unlock(&b->mutex); USER_PANIC_ERR(err, "failed sending back reply to LMP bind request;" " request dropped!"); if (st->lc != NULL) { lmp_chan_destroy(st->lc); // FIXME: how do we tell the binding about this!? } free(st); } }
/** * \brief Continuation function for binding. This function * send the bind request to the monitor. * \param pointer to the multihop_chan */ static void multihop_chan_bind_cont(void *st) { errval_t err; struct multihop_chan *mc = st; struct monitor_binding *monitor_binding = mc->monitor_binding; // send bind request to the monitor // we do not get a lock on the monitor binding, as we did not expose it to the application MULTIHOP_DEBUG("sending bind request to monitor...\n"); err = monitor_binding->tx_vtbl.multihop_bind_client_request(monitor_binding, NOP_CONT, mc->iref, mc->my_vci); if (err_is_ok(err)) { // request was successfully sent } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { // register to retry err = monitor_binding->register_send(monitor_binding, monitor_binding->waitset, MKCONT(multihop_chan_bind_cont, st)); assert(err_is_ok(err)); } else { // permanent failure sending message mc->bind_continuation.handler(mc->bind_continuation.st, err_push(err, LIB_ERR_BIND_MULTIHOP_REQ), NULL); //TODO destroy channel state? } }
/** * \ brief Internal function to send a reply back to the monitor * */ static void send_bind_reply(void *st) { errval_t err; struct bind_multihop_reply_state *reply_state = st; struct monitor_binding *monitor_binding = reply_state->monitor_binding; // send back a bind success / failure message to the monitor MULTIHOP_DEBUG("sending reply back to monitor...\n"); err = monitor_binding->tx_vtbl.multihop_bind_service_reply(monitor_binding, NOP_CONT, reply_state->args.receiver_vci, reply_state->args.sender_vci, reply_state->args.err); if (err_is_ok(err)) { event_mutex_unlock(&monitor_binding->mutex); free(reply_state); } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { err = monitor_binding->register_send(monitor_binding, monitor_binding->waitset, MKCONT(send_bind_reply, reply_state)); assert(err_is_ok(err)); // this shouldn't fail, as we have the mutex } else { event_mutex_unlock(&monitor_binding->mutex); USER_PANIC_ERR( err, "failed sending back reply to multi-hop bind request to monitor"); free(st); } }
static errval_t spawn_reply(struct spawn_binding *b, errval_t rerr, domainid_t domainid) { errval_t err; err = b->tx_vtbl.spawn_domain_response(b, NOP_CONT, rerr, domainid); if (err_is_fail(err)) { DEBUG_ERR(err, "error sending spawn_domain reply\n"); if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { // this will be freed in the retry handler struct pending_spawn_response *sr = malloc(sizeof(struct pending_spawn_response)); if (sr == NULL) { return LIB_ERR_MALLOC_FAIL; } sr->b = b; sr->err = rerr; sr->domainid = domainid; err = b->register_send(b, get_default_waitset(), MKCONT(retry_spawn_domain_response, sr)); if (err_is_fail(err)) { // note that only one continuation may be registered at a time free(sr); DEBUG_ERR(err, "register_send failed!"); return err; } } } return SYS_ERR_OK; }
void retrieve_result__rx(struct intermon_binding *b, errval_t status, uint8_t relations, genvaddr_t st) { errval_t err; struct retrieve_rpc_st *rst = (struct retrieve_rpc_st*)(lvaddr_t)st; if (err_is_fail(status)) { err = status; goto report_error; } err = monitor_domcap_remote_relations(rst->cap.croot, rst->cap.cptr, rst->cap.bits, relations, 0xFF, NULL); PANIC_IF_ERR(err, "setting rrels for retrieved cap"); struct event_closure updated_cont = MKCONT(retrieve_ownership_update__fin, rst); err = capsend_update_owner(rst->cap, updated_cont); PANIC_IF_ERR(err, "updating retrieve ownership"); report_error: retrieve_ownership__rx(err, rst); }
void thc_await_send(struct thc_per_binding_state_t *thc, void *f) { struct common_binding *c = (struct common_binding *)f; DEBUG_STUBS(DEBUGPRINTF(DEBUG_STUBS_PREFIX " > thc_await_send\n")); // Synchronize with thc_send_possible_event callback thc_lock_acquire(&thc->thc_binding_lock); // Request an event when sending is possible if (!thc->send_possible_event_requested) { errval_t err = c->register_send(c, get_default_waitset(), MKCONT(thc_send_possible_event, c)); if (err == FLOUNDER_ERR_TX_BUSY) { goto done; } assert(err_is_ok(err)); thc->send_possible_event_requested = 1; } // Wait // // We release the binding lock before blocking. It is passed back to us // by the notification THCSuspendThen(&thc->waiting_sender, thc_await_send0, (void*) &thc->thc_binding_lock); done: thc_lock_release(&thc->thc_binding_lock); DEBUG_STUBS(DEBUGPRINTF(DEBUG_STUBS_PREFIX " > thc_await_send\n")); }
static void send_bind_cont(void *arg) { struct ump_chan *uc = arg; struct monitor_binding *b = uc->monitor_binding; errval_t err; /* Send bind request to the monitor */ assert(uc->monitor_binding == b); assert(b->tx_vtbl.bind_ump_client_request); err = b->tx_vtbl.bind_ump_client_request(b, NOP_CONT, uc->iref, (uintptr_t)uc, uc->frame, uc->inchanlen, uc->outchanlen, uc->notify_cap); if (err_is_ok(err)) { // request sent ok event_mutex_unlock(&b->mutex); } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { // register to retry err = b->register_send(b, b->waitset, MKCONT(send_bind_cont,uc)); assert(err_is_ok(err)); // we hold the monitor binding mutex } else { // permanent failure sending message event_mutex_unlock(&b->mutex); uc->bind_continuation.handler(uc->bind_continuation.st, err_push(err, LIB_ERR_BIND_UMP_REQ), NULL, NULL_CAP); } }
static void get_io_cap(struct monitor_blocking_binding *b) { // XXX: We should not just hand out this cap to everyone // who requests it. There is currently no way to determine // if the client is a valid recipient errval_t err; struct capref src = { .cnode = cnode_task, .slot = TASKCN_SLOT_IO }; err = b->tx_vtbl.get_io_cap_response(b, NOP_CONT, src, SYS_ERR_OK); if (err_is_fail(err)) { if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { err = b->register_send(b, get_default_waitset(), MKCONT((void (*)(void *))get_io_cap, b)); if (err_is_fail(err)) { USER_PANIC_ERR(err, "register_send failed"); } } USER_PANIC_ERR(err, "sending get_io_cap_response failed"); } }
static void fsb_payload32_4_request(struct bench_binding *b, int32_t payload0, int32_t payload1, int32_t payload2, int32_t payload3) { errval_t err; err = b->tx_vtbl.fsb_payload32_4_reply(b, MKCONT(continue_signal, NULL), 1, 2, 3, 4); assert(err_is_ok(err)); }
static void fsb_payload64_1_request(struct bench_binding *b, int64_t payload0) { errval_t err; err = b->tx_vtbl.fsb_payload64_1_reply(b, MKCONT(continue_signal, NULL), 1); if (err_is_fail(err)) { USER_PANIC_ERR(err, "error while sending reply message in client\n"); } }
static void dma_deregister_response_tx(void *a) { errval_t err; struct dma_dereg_resp_st *st = a; struct event_closure txcont = MKCONT(free, a); err = xeon_phi_dma_deregister_response__tx(st->b, txcont, st->err); if (err_is_fail(err)) { if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { txcont = MKCONT(dma_deregister_response_tx, a); err = st->b->register_send(st->b, get_default_waitset(), txcont); if (err_is_fail(err)) { USER_PANIC_ERR(err, "could not send reply"); } } } }
static void bind_cb(void *st, errval_t binderr, struct bench_binding *b) { // copy my message receive handler vtable to the binding b->rx_vtbl = rx_vtbl; // Send an init message. This will start the benchmark. errval_t err; err = b->tx_vtbl.fsb_init_msg(b, MKCONT(continue_signal, NULL), my_core_id); assert(err_is_ok(err)); }
errval_t thc_await_send_x(struct thc_per_binding_state_t *thc, void *f) { struct thc_await_send_cancel_info cinf; cancel_item_t ci; int canceled = 0; struct common_binding *c = (struct common_binding *)f; DEBUG_STUBS(DEBUGPRINTF(DEBUG_STUBS_PREFIX " > thc_await_send_x\n")); // Synchronize with thc_send_possible_event callback thc_lock_acquire(&thc->thc_binding_lock); // Return THC_CANCELED if already requested if (THCIsCancelRequested()) { canceled = 1; goto done; } // Request an event when sending is possible if (!thc->send_possible_event_requested) { errval_t err = c->register_send(c, get_default_waitset(), MKCONT(thc_send_possible_event, c)); if (err == FLOUNDER_ERR_TX_BUSY) { goto done; } assert(err_is_ok(err)); thc->send_possible_event_requested = 1; } // Wait // // We release the binding lock before blocking. It is passed back to us // by the notification cinf.thc = thc; cinf.was_canceled = 0; THCAddCancelItem(&ci, &thc_await_send_x_cancel_fn, (void*)&cinf); THCSuspendThen(&thc->waiting_sender, thc_await_send0, (void*) &thc->thc_binding_lock); canceled = cinf.was_canceled; if (!canceled) { // Remove cancel item if it did not run if (!THCCancelItemRan(&ci)) { THCRemoveCancelItem(&ci); } } done: thc_lock_release(&thc->thc_binding_lock); DEBUG_STUBS(DEBUGPRINTF(DEBUG_STUBS_PREFIX " < thc_await_send\n")); return canceled ? THC_CANCELED : SYS_ERR_OK; }
static void retry_reply(void *arg) { struct pending_reply *r = arg; assert(r != NULL); struct mem_binding *b = r->b; errval_t err; err = b->tx_vtbl.allocate_response(b, MKCONT(allocate_response_done, r->cap), r->err, *r->cap); if (err_is_ok(err)) { b->st = NULL; free(r); } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { err = b->register_send(b, get_default_waitset(), MKCONT(retry_reply,r)); assert(err_is_ok(err)); } else { DEBUG_ERR(err, "failed to reply to memory request"); allocate_response_done(r->cap); } }
static void dma_exec_response_tx(void *a) { errval_t err; struct dma_exec_resp_st *st = a; struct event_closure txcont = MKCONT(dma_exec_response_sent, a); err = xeon_phi_dma_exec_response__tx(st->b, txcont, st->err, st->id); if (err_is_fail(err)) { if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { txcont = MKCONT(dma_exec_response_tx, a); XDMA_DEBUG("dma_exec_response_tx: register sending...\n"); err = st->b->register_send(st->b, get_default_waitset(), txcont); if (err_is_fail(err)) { USER_PANIC_ERR(err, "could not send reply"); } } return; } }
static void usb_tx_transfer_unsetup_response(void *a) { errval_t err; struct usb_tunsetup_state *st = (struct usb_tunsetup_state *) a; struct event_closure txcont = MKCONT(usb_tx_transfer_generic_cb, st); err = usb_manager_transfer_unsetup_response__tx(st->bind, txcont, (uint32_t) st->error); USB_TX_TRANSER_ERR(usb_tx_transfer_unsetup_response); }
static void fsb_payload32_16_request(struct bench_binding *b, int32_t payload0, int32_t payload1, int32_t payload2, int32_t payload3, int32_t payload4, int32_t payload5, int32_t payload6, int32_t payload7, int32_t payload8, int32_t payload9, int32_t payload10, int32_t payload11, int32_t payload12, int32_t payload13, int32_t payload14, int32_t payload15) { errval_t err; err = b->tx_vtbl.fsb_payload32_16_reply(b, MKCONT(continue_signal, NULL), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); assert(err_is_ok(err)); }
static void bind_lmp_service_request_cont(struct monitor_binding *serv_binding, uintptr_t service_id, uintptr_t con_id, size_t buflen, struct capref ep, struct monitor_binding *b, uintptr_t domain_id) { errval_t err, err2; struct monitor_state *ist = serv_binding->st; struct event_closure send_cont = NOP_CONT; struct capref *capp = NULL; if (serv_binding != &monitor_self_binding && b != &monitor_self_binding) { // save EP cap to be destroyed after the send is done capp = caprefdup(ep); send_cont = MKCONT(destroy_outgoing_cap, capp); } err = serv_binding->tx_vtbl. bind_lmp_service_request(serv_binding, send_cont, service_id, con_id, buflen, ep); if (err_is_fail(err)) { free(capp); if(err_no(err) == FLOUNDER_ERR_TX_BUSY) { struct bind_lmp_service_request_state *me = malloc(sizeof(struct bind_lmp_service_request_state)); assert(me != NULL); me->args.service_id = service_id; me->args.mon_id = con_id; me->args.buflen = buflen; me->args.ep = ep; me->b = b; me->domain_id = domain_id; me->elem.cont = bind_lmp_service_request_handler; err = monitor_enqueue_send(serv_binding, &ist->queue, get_default_waitset(), &me->elem.queue); if (err_is_fail(err)) { USER_PANIC_ERR(err, "monitor_enqueue_send failed"); } return; } err2 = lmp_conn_free(con_id); if (err_is_fail(err2)) { USER_PANIC_ERR(err2, "lmp_conn_free failed"); } bind_lmp_client_request_error(b, err, domain_id, serv_binding, ep); return; } }
/** * \brief handles the transmission of the transfer done notification * * \param a pointer to the transfer done state */ static void usb_transfer_complete_tx(void *a) { struct usb_tdone_state *st = a; errval_t err; struct event_closure txcont = MKCONT(usb_transfer_complete_cb, st); err = usb_driver_transfer_done_notify__tx(st->bind, txcont, st->xfer->xfer_id, st->xfer->error, st->buf, st->xfer->actual_bytes); USB_TX_TRANSER_ERR(usb_transfer_complete_tx); }
static void send_string_ready(void *a) { errval_t err; struct xmplmsg_binding *b = (struct xmplmsg_binding*)a; struct event_closure txcont = MKCONT(send_string_cb, b); err = xmplmsg_msg_string__tx(b, txcont, "Hello World"); if (err_is_fail(err)) { DEBUG_ERR(err, "error sending msg_string message\n"); } }
static void send_ints_cb(void *a) { errval_t err; struct xmplmsg_binding *b = (struct xmplmsg_binding*)a; struct event_closure txcont = MKCONT(send_string_cb, b); err = xmplmsg_msg_string__tx(b, txcont, "Hello World"); if (err_is_fail(err)) { DEBUG_ERR(err, "error sending msg_string message\n"); if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { struct waitset *ws = get_default_waitset(); txcont = MKCONT(send_string_ready, b); err = b->register_send(b, ws, txcont); if (err_is_fail(err)) { // note that only one continuation may be registered at a time DEBUG_ERR(err, "register_send on binding failed!"); } } } }
static void usb_tx_request_read_response(void *a) { errval_t err; struct usb_request_state *st = (struct usb_request_state *) a; USB_DEBUG_IDC("send usb_tx_request_read_response()\n"); struct event_closure txcont = MKCONT(usb_tx_request_generic_cb, st); err = usb_manager_request_read_response__tx(st->bind, txcont, st->data, st->data_length, (uint32_t) st->error); USB_TX_REQUEST_ERR(usb_tx_request_read_response); }
static void bind_lmp_reply_client_cont(struct monitor_binding *client_binding, errval_t msgerr, uintptr_t mon_conn_id, uintptr_t client_conn_id, struct capref ep, struct monitor_binding *b) { errval_t err; struct monitor_state *ist = client_binding->st; struct event_closure send_cont = NOP_CONT; struct capref *capp = NULL; if (client_binding != &monitor_self_binding && b != &monitor_self_binding) { // save EP cap to be destroyed after the send is done capp = caprefdup(ep); send_cont = MKCONT(destroy_outgoing_cap, capp); } err = client_binding->tx_vtbl. bind_lmp_reply_client(client_binding, send_cont, SYS_ERR_OK, mon_conn_id, client_conn_id, ep); if (err_is_fail(err)) { free(capp); if(err_no(err) == FLOUNDER_ERR_TX_BUSY) { struct bind_lmp_reply_client_state *me = malloc(sizeof(struct bind_lmp_reply_client_state)); assert(me != NULL); me->args.err = msgerr; me->args.mon_id = mon_conn_id; me->args.conn_id = client_conn_id; me->args.ep = ep; me->b = b; me->elem.cont = bind_lmp_reply_client_handler; err = monitor_enqueue_send(client_binding, &ist->queue, get_default_waitset(), &me->elem.queue); if (err_is_fail(err)) { USER_PANIC_ERR(err, "monitor_enqueue_send failed"); } return; } USER_PANIC_ERR(err, "failed sending IDC bind reply"); } if(err_is_fail(msgerr)) { return; } }
static void usb_tx_transfer_clear_stall_response(void *a) { errval_t err; struct usb_tclearstall_state *st = (struct usb_tclearstall_state *) a; USB_DEBUG_IDC("usb_tx_transfer_clear_stall_response()\n"); struct event_closure txcont = MKCONT(usb_tx_transfer_generic_cb, st); err = usb_manager_transfer_clear_stall_response__tx(st->bind, txcont, (uint32_t) st->error); USB_TX_TRANSER_ERR(usb_tx_transfer_clear_stall_response); }
static void send_ints_ready(void *a) { errval_t err; struct xmplmsg_binding *b = (struct xmplmsg_binding*)a; struct event_closure txcont = MKCONT(send_ints_cb, b); err = xmplmsg_msg_ints__tx(b, txcont, 0x1, 0x10); if (err_is_fail(err)) { DEBUG_ERR(err, "error sending msg_ints message\n"); } }