static void fsb_init_msg(struct bench_binding *b, coreid_t id) { errval_t err; // change waitset of the binding waitset_init(&signal_waitset); err = b->change_waitset(b, &signal_waitset); assert(err_is_ok(err)); binding = b; reply_received = true; #if CONFIG_TRACE // configure tracing err = trace_control(TRACE_EVENT(TRACE_SUBSYS_MULTIHOP, TRACE_EVENT_MULTIHOP_BENCH_START, 0), TRACE_EVENT(TRACE_SUBSYS_MULTIHOP, TRACE_EVENT_MULTIHOP_BENCH_STOP, 0), 0); if(err_is_fail(err)) { USER_PANIC_ERR(err, "trace_control failed"); } #endif // start tracing err = trace_event(TRACE_SUBSYS_MULTIHOP, TRACE_EVENT_MULTIHOP_BENCH_START, 0); if (err_is_fail(err)) { USER_PANIC_ERR(err, "trace_event failed"); } experiment(); }
void domain_mgmt_init(void) { errval_t err; /* Register notification endpoint with kernel */ struct capref epcap; struct lmp_endpoint *notifyep; // XXX: This has to be huge so we can receive a batch of // notifications when deleting CNodes recursively. err = endpoint_create(100 * 12, &epcap, ¬ifyep); if (err_is_fail(err)) { USER_PANIC_ERR(err, "failed creating endpoint"); } // register to receive on this endpoint struct event_closure cl = { .handler = handle_notification, .arg = notifyep, }; err = lmp_endpoint_register(notifyep, get_default_waitset(), cl); assert(err_is_ok(err)); err = invoke_monitor_register(epcap); if(err_is_fail(err)) { USER_PANIC_ERR(err, "Could not register with kernel"); } else { #ifdef DEBUG_MONITOR_ALL debug_printf("monitor ep registered\n"); #endif } }
static void monitor_bind_ump_client_request_error(struct monitor_binding *b, struct capref frame, uintptr_t conn_id, uintptr_t domain_id, errval_t err) { errval_t err2; err2 = cap_destroy(frame); if (err_is_fail(err2)) { USER_PANIC_ERR(err, "cap_destroy failed"); } if (conn_id != 0) { err2 = remote_conn_free(conn_id); if (err_is_fail(err2)) { USER_PANIC_ERR(err2, "remote_conn_free failed"); } } err2 = b->tx_vtbl.bind_ump_reply_client(b, NOP_CONT, 0, domain_id, err, NULL_CAP); if (err_is_fail(err2)) { USER_PANIC_ERR(err2, "error reply failed"); } }
void update_owner__rx_handler(struct intermon_binding *b, intermon_caprep_t caprep, genvaddr_t st) { errval_t err; struct intermon_state *inter_st = (struct intermon_state*)b->st; coreid_t from = inter_st->core_id; struct capref capref; struct capability cap; caprep_to_capability(&caprep, &cap); err = slot_alloc(&capref); if (err_is_fail(err)) { USER_PANIC_ERR(err, "failed to allocate slot for owner update"); } err = monitor_copy_if_exists(&cap, capref); if (err_is_ok(err)) { err = monitor_set_cap_owner(cap_root, get_cap_addr(capref), get_cap_valid_bits(capref), from); } if (err_no(err) == SYS_ERR_CAP_NOT_FOUND) { err = SYS_ERR_OK; } if (err_is_fail(err)) { USER_PANIC_ERR(err, "failed to update cap ownership"); } cap_destroy(capref); err = owner_updated(from, st); if (err_is_fail(err)) { USER_PANIC_ERR(err, "failed to send ownership update response"); } }
void find_descendants__rx_handler(struct intermon_binding *b, intermon_caprep_t caprep, genvaddr_t st) { errval_t err; struct intermon_state *inter_st = (struct intermon_state*)b->st; coreid_t from = inter_st->core_id; struct capability cap; caprep_to_capability(&caprep, &cap); bool has_descendants; err = monitor_has_descendants(&cap, &has_descendants); assert(err_is_ok(err)); struct find_descendants_result_msg_st *msg_st; msg_st = malloc(sizeof(*msg_st)); if (!msg_st) { err = LIB_ERR_MALLOC_FAIL; USER_PANIC_ERR(err, "could not alloc find_descendants_result_msg_st"); } msg_st->queue_elem.cont = find_descendants_result_send_cont; msg_st->st = st; if (err_is_ok(err)) { err = has_descendants ? SYS_ERR_OK : SYS_ERR_CAP_NOT_FOUND; } msg_st->status = err; err = capsend_target(from, (struct msg_queue_elem*)msg_st); if (err_is_fail(err)) { USER_PANIC_ERR(err, "could not enqueue find_descendants_result msg"); } }
static void ipi_alloc_notify_reply_cont(struct monitor_binding *b, uintptr_t state, struct capref notify_cap, errval_t reterr) { errval_t err = b->tx_vtbl.ipi_alloc_notify_reply(b, NOP_CONT, state, notify_cap, reterr); if(err_is_fail(err)) { if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { struct monitor_state *st = b->st; struct ipi_alloc_notify_reply_state *me = malloc(sizeof(struct ipi_alloc_notify_reply_state)); assert(me != NULL); me->args.state = state; me->args.notify = notify_cap; me->args.err = reterr; me->elem.cont = ipi_alloc_notify_reply_handler; err = monitor_enqueue_send(b, &st->queue, get_default_waitset(), &me->elem.queue); if (err_is_fail(err)) { USER_PANIC_ERR(err, "monitor_enqueue_send failed"); } return; } USER_PANIC_ERR(err, "sending reply"); } assert(err_is_ok(err)); }
static void ms_multiboot_cap_request(struct monitor_binding *b, cslot_t slot) { errval_t err1, err2; struct capref cap = { .cnode = cnode_module, .slot = slot, }; // Call frame_identify to check if cap exists struct frame_identity id; err1 = invoke_frame_identify(cap, &id); if (err_is_fail(err1)) { err2 = b->tx_vtbl.multiboot_cap_reply(b, NOP_CONT, NULL_CAP, err1); } else { err2 = b->tx_vtbl.multiboot_cap_reply(b, NOP_CONT, cap, err1); } if (err_is_fail(err2)) { if (err_no(err2) == FLOUNDER_ERR_TX_BUSY) { struct monitor_state *mon_state = b->st; struct multiboot_cap_state *ms = malloc(sizeof(struct multiboot_cap_state)); assert(ms); ms->slot = slot; ms->elem.cont = ms_multiboot_cap_request_handler; err1 = monitor_enqueue_send(b, &mon_state->queue, get_default_waitset(), &ms->elem.queue); if (err_is_fail(err1)) { USER_PANIC_ERR(err1, "monitor_enqueue_send failed"); } } else { USER_PANIC_ERR(err2, "sending multiboot_cap_reply failed"); } } }
static void get_io_cap(struct monitor_blocking_binding *b) { // XXX: We should not just hand out this cap to everyone // who requests it. There is currently no way to determine // if the client is a valid recipient errval_t err; struct capref src = { .cnode = cnode_task, .slot = TASKCN_SLOT_IO }; err = b->tx_vtbl.get_io_cap_response(b, NOP_CONT, src, SYS_ERR_OK); if (err_is_fail(err)) { if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { err = b->register_send(b, get_default_waitset(), MKCONT((void (*)(void *))get_io_cap, b)); if (err_is_fail(err)) { USER_PANIC_ERR(err, "register_send failed"); } } USER_PANIC_ERR(err, "sending get_io_cap_response failed"); } }
int main(int argc, char **argv) { errval_t err; debug_printf("Inter Card Transfer Test started.\n"); coreid_t core = 2; char *name = "k1om/sbin/xeon_phi_inter"; xphi_dom_id_t domid0; err = xeon_phi_client_spawn(0, core, name, NULL, NULL_CAP, &domid0); if (err_is_fail(err)) { USER_PANIC_ERR(err, "could not send the spawn message"); } xphi_dom_id_t domid1; err = xeon_phi_client_spawn(1, core, name, NULL, NULL_CAP, &domid1); if (err_is_fail(err)) { USER_PANIC_ERR(err, "could not send the spawn message"); } debug_printf("Inter Card Transfer Test: Main Loop\n"); messages_handler_loop(); debug_printf("Terminated.\n"); }
static errval_t spawn_child(int rfd) { errval_t err; char *argv[2] = { "net-test", NULL }; domainid_t new_domain = -1; coreid_t core = 0; struct capref fdcap; err = spawn_setup_fds(&fdcap, rfd); if (err_is_fail(err)) { USER_PANIC_ERR(err, "spawn_setup_fds"); } struct capref inheritcn_cap; err = alloc_inheritcn_with_caps(&inheritcn_cap, fdcap, NULL_CAP, NULL_CAP); if (err_is_fail(err)) { USER_PANIC_ERR(err, "failed to setup inheritcn"); } err = spawn_program_with_caps(core, argv[0], argv, NULL, inheritcn_cap, NULL_CAP, SPAWN_FLAGS_NEW_DOMAIN, &new_domain); if (err_is_fail(err)) { DEBUG_ERR(err, "failed spawn on core %d", core); return err; } return SYS_ERR_OK; }
static int run_worker(coreid_t mycore) { errval_t err; trace_event(TRACE_SUBSYS_MEMTEST, TRACE_EVENT_MEMTEST_WAIT, 0); err = ns_barrier_worker((int)mycore, "mem_bench_ready"); if (err_is_fail(err)) { USER_PANIC_ERR(err, "barrier_worker failed"); } trace_event(TRACE_SUBSYS_MEMTEST, TRACE_EVENT_MEMTEST_RUN, 0); run_benchmark(mycore, MAX_REQUESTS); trace_event(TRACE_SUBSYS_MEMTEST, TRACE_EVENT_MEMTEST_WAIT, 0); err = ns_barrier_worker((int)mycore, "mem_bench_finished"); if (err_is_fail(err)) { USER_PANIC_ERR(err, "barrier_worker failed"); } trace_event(TRACE_SUBSYS_MEMTEST, TRACE_EVENT_MEMTEST_DONE, 0); return EXIT_SUCCESS; }
static void alloc_iref_reply_cont(struct monitor_binding *b, uintptr_t service_id, iref_t iref, errval_t reterr) { errval_t err; err = b->tx_vtbl.alloc_iref_reply(b, NOP_CONT, service_id, iref, reterr); if (err_is_fail(err)) { if(err_no(err) == FLOUNDER_ERR_TX_BUSY) { struct alloc_iref_reply_state *me = malloc(sizeof(struct alloc_iref_reply_state)); assert(me != NULL); struct monitor_state *ist = b->st; assert(ist != NULL); me->args.service_id = service_id; me->args.iref = iref; me->args.err = reterr; me->b = b; me->elem.cont = alloc_iref_reply_handler; err = monitor_enqueue_send(b, &ist->queue, get_default_waitset(), &me->elem.queue); if (err_is_fail(err)) { USER_PANIC_ERR(err, "monitor_enqueue_send failed"); } return; } USER_PANIC_ERR(err, "reply failed"); } }
// send single message over TCP connection int send_message_client(void *msg, size_t len) { err_t err; // printf("send_message(pcb: %p, msg: %p, len: %d)\n", // pcb, msg, (int)len); if (len > 0) { assert(tcp_sndbuf(client_pcb) >= len); err = tcp_write(client_pcb, msg, len, TCP_WRITE_FLAG_COPY); if (err != ERR_OK) { USER_PANIC_ERR(err, "tcp_write failed in send_message"); return -1; } } // FIXME: Do I need this? err = tcp_output(client_pcb); if (err != ERR_OK) { USER_PANIC_ERR(err, "tcp_write failed in send_message"); return -1; } // printf("done send_message()\n"); return 0; } // end function: send_message_client
static void export_cb(void *st, errval_t err, iref_t iref) { size_t size = 0; char *service_name = NULL; char *driver_name = (char *) st; if (err_is_fail(err)) { USER_PANIC_ERR(err, "Exporting basic interface failed.\n"); } // build service name as driver_name.SERVICE_SUFFIX size = snprintf(NULL, 0, "%s.%s", driver_name, SERVICE_SUFFIX); service_name = (char *) malloc(size + 1); if (service_name == NULL) { USER_PANIC("Error allocating memory."); } snprintf(service_name, size + 1, "%s.%s", driver_name, SERVICE_SUFFIX); SERIAL_DEBUG("About to register basic interface '%s' at nameservice.\n", service_name); // register basic serial driver service at nameservice err = nameservice_register(service_name, iref); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Registering basic interface at " "nameserver failed."); } free(service_name); }
static void export_cb(void *st, errval_t err, iref_t iref) { if (err_is_fail(err)) { USER_PANIC_ERR(err, "export failed"); } // construct name char namebuf[32]; size_t len = snprintf(namebuf, sizeof(namebuf), "%s.%d", SERVICE_BASENAME, my_core_id); assert(len < sizeof(namebuf)); namebuf[sizeof(namebuf) - 1] = '\0'; // register this iref with the name service err = nameservice_register(namebuf, iref); if (err_is_fail(err)) { USER_PANIC_ERR(err, "nameservice_register failed"); } #if !defined(USE_KALUGA_DVM) || defined(__arm__) || defined(__scc__) || defined(__k1om__) // let the master know we are ready err = nsb_register_n(my_core_id, SERVICE_BASENAME); if (err_is_fail(err)) { USER_PANIC_ERR(err, "nsb_register_n failed"); } // wait for boot to finish err = nsb_wait(ALL_SPAWNDS_UP); if (err_is_fail(err)) { USER_PANIC_ERR(err, "failed ns barrier wait for %s", ALL_SPAWNDS_UP); } // debug_printf("got \"%s\", continuing\n", ALL_SPAWNDS_UP); #endif }
static void new_monitor_binding_reply_cont(struct monitor_binding *b, errval_t reterr, struct capref retcap, uintptr_t st) { errval_t err = b->tx_vtbl.new_monitor_binding_reply(b, NOP_CONT, reterr, retcap, st); if (err_is_fail(err)) { if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { struct monitor_state *ms = b->st; struct new_monitor_binding_reply_state *me = malloc(sizeof(struct new_monitor_binding_reply_state)); assert(me != NULL); me->args.err = reterr; me->args.ep = retcap; me->args.st = st; me->elem.cont = new_monitor_binding_reply_handler; err = monitor_enqueue_send(b, &ms->queue, get_default_waitset(), &me->elem.queue); if (err_is_fail(err)) { USER_PANIC_ERR(err, "monitor_enqueue_send failed"); } return; } USER_PANIC_ERR(err, "failed to send new_monitor_binding_reply"); } }
static void bind_ump_service_request_cont(struct monitor_binding *domain_binding, uintptr_t service_id, con_id_t my_mon_id, struct capref frame, uint32_t channel_length_in, uint32_t channel_length_out, struct capref notify_cap, struct intermon_binding *binding, con_id_t your_mon_id) { errval_t err, err2; /* Proxy the request */ err = domain_binding->tx_vtbl. bind_ump_service_request(domain_binding, NOP_CONT, service_id, my_mon_id, frame, channel_length_in, channel_length_out, notify_cap); if (err_is_fail(err)) { if(err_no(err) == FLOUNDER_ERR_TX_BUSY) { struct bind_ump_service_request_state *me = malloc(sizeof(struct bind_ump_service_request_state)); struct monitor_state *ist = domain_binding->st; me->args.service_id = service_id; me->args.mon_id = my_mon_id; me->args.frame = frame; me->args.channel_length_in = channel_length_in; me->args.channel_length_out = channel_length_out; me->args.notify = notify_cap; me->binding = binding; me->your_mon_id = your_mon_id; me->elem.cont = bind_ump_service_request_handler; err = monitor_enqueue_send(domain_binding, &ist->queue, get_default_waitset(), &me->elem.queue); assert(err_is_ok(err)); return; } err2 = cap_delete(frame); if (err_is_fail(err2)) { USER_PANIC_ERR(err2, "Cap delete failed"); } err2 = slot_free(frame); if (err_is_fail(err2)) { USER_PANIC_ERR(err2, "Cap destroy default failed"); } err2 = remote_conn_free(my_mon_id); if (err_is_fail(err2)) { USER_PANIC_ERR(err2, "remote_conn_free failed"); } intermon_caprep_t nullcap = {0,0,0,0}; err2 = binding->tx_vtbl.bind_ump_reply(binding, NOP_CONT, your_mon_id, 0, err, nullcap); if (err_is_fail(err2)) { USER_PANIC_ERR(err2, "Sending bind_ump_reply1 failed"); } } }
static void export_cb(void *st, errval_t err, iref_t iref) { if (err_is_fail(err)) { USER_PANIC_ERR(err, "export failed"); } err = nameservice_register(service_name, iref); if (err_is_fail(err)) { USER_PANIC_ERR(err, "nameservice_register failed"); } }
static void export_cb(void *st, errval_t err, iref_t iref) { if (err_is_fail(err)) { USER_PANIC_ERR(err, "export failed"); } // register this iref with the name service err = nameservice_register("server", iref); if (err_is_fail(err)) { USER_PANIC_ERR(err, "nameservice_register failed"); } }
static void bind_lmp_service_request_cont(struct monitor_binding *serv_binding, uintptr_t service_id, uintptr_t con_id, size_t buflen, struct capref ep, struct monitor_binding *b, uintptr_t domain_id) { errval_t err, err2; struct monitor_state *ist = serv_binding->st; struct event_closure send_cont = NOP_CONT; struct capref *capp = NULL; if (serv_binding != &monitor_self_binding && b != &monitor_self_binding) { // save EP cap to be destroyed after the send is done capp = caprefdup(ep); send_cont = MKCONT(destroy_outgoing_cap, capp); } err = serv_binding->tx_vtbl. bind_lmp_service_request(serv_binding, send_cont, service_id, con_id, buflen, ep); if (err_is_fail(err)) { free(capp); if(err_no(err) == FLOUNDER_ERR_TX_BUSY) { struct bind_lmp_service_request_state *me = malloc(sizeof(struct bind_lmp_service_request_state)); assert(me != NULL); me->args.service_id = service_id; me->args.mon_id = con_id; me->args.buflen = buflen; me->args.ep = ep; me->b = b; me->domain_id = domain_id; me->elem.cont = bind_lmp_service_request_handler; err = monitor_enqueue_send(serv_binding, &ist->queue, get_default_waitset(), &me->elem.queue); if (err_is_fail(err)) { USER_PANIC_ERR(err, "monitor_enqueue_send failed"); } return; } err2 = lmp_conn_free(con_id); if (err_is_fail(err2)) { USER_PANIC_ERR(err2, "lmp_conn_free failed"); } bind_lmp_client_request_error(b, err, domain_id, serv_binding, ep); return; } }
static void bind_cb(void *st, errval_t err, struct bench_binding *b) { if (err_is_fail(err)) { USER_PANIC_ERR(err, "bind failed"); } // copy my message receive handler vtable to the binding b->rx_vtbl = rx_vtbl; b->tx_vtbl.shmc_init_request(b, NOP_CONT, my_core_id); if (err_is_fail(err)) { USER_PANIC_ERR(err, "sending shm_init_request failed"); } }
static void export_cb(void *st, errval_t err, iref_t iref) { if (err_is_fail(err)) { USER_PANIC_ERR(err, "export failed"); } printf("bfscope: exported at iref %"PRIuIREF"\n", iref); // register this iref with the name service err = nameservice_register("bfscope", iref); if (err_is_fail(err)) { USER_PANIC_ERR(err, "nameservice_register failed"); } }
static void bind_lmp_reply_client_cont(struct monitor_binding *client_binding, errval_t msgerr, uintptr_t mon_conn_id, uintptr_t client_conn_id, struct capref ep, struct monitor_binding *b) { errval_t err; struct monitor_state *ist = client_binding->st; struct event_closure send_cont = NOP_CONT; struct capref *capp = NULL; if (client_binding != &monitor_self_binding && b != &monitor_self_binding) { // save EP cap to be destroyed after the send is done capp = caprefdup(ep); send_cont = MKCONT(destroy_outgoing_cap, capp); } err = client_binding->tx_vtbl. bind_lmp_reply_client(client_binding, send_cont, SYS_ERR_OK, mon_conn_id, client_conn_id, ep); if (err_is_fail(err)) { free(capp); if(err_no(err) == FLOUNDER_ERR_TX_BUSY) { struct bind_lmp_reply_client_state *me = malloc(sizeof(struct bind_lmp_reply_client_state)); assert(me != NULL); me->args.err = msgerr; me->args.mon_id = mon_conn_id; me->args.conn_id = client_conn_id; me->args.ep = ep; me->b = b; me->elem.cont = bind_lmp_reply_client_handler; err = monitor_enqueue_send(client_binding, &ist->queue, get_default_waitset(), &me->elem.queue); if (err_is_fail(err)) { USER_PANIC_ERR(err, "monitor_enqueue_send failed"); } return; } USER_PANIC_ERR(err, "failed sending IDC bind reply"); } if(err_is_fail(msgerr)) { return; } }
/** * \brief Called when the "client" connects to "server" * * Make the connection a "server" connection, free unnecessary state. * Send init msg to the dispatcher that spanned this dispatcher. */ static void client_connected(void *st, errval_t err, struct interdisp_binding *b) { struct remote_core_state *state = (struct remote_core_state*)st; struct domain_state *domain_state = get_domain_state(); if(err_is_fail(err)) { DEBUG_ERR(err, "binding to interdisp service"); abort(); } /* Set it on the domain library state */ b->rx_vtbl = interdisp_vtbl; domain_state->b[state->cnt] = b; // Send it our core id err = b->tx_vtbl.span_eager_connect(b, NOP_CONT, disp_get_core_id()); if(err_is_fail(err)) { USER_PANIC_ERR(err, "sending span_eager_connect"); } // Connect to next active dispatcher do { state->cnt++; if(state->cnt == disp_get_core_id()) { state->cnt++; } } while(allirefs[state->cnt] == NULL_IREF && state->cnt < MAX_CPUS); if(state->cnt < MAX_CPUS) { err = interdisp_bind(allirefs[state->cnt], client_connected, state, &domain_state->interdisp_ws, IDC_BIND_FLAGS_DEFAULT); if(err_is_fail(err)) { USER_PANIC_ERR(err, "Binding to inter-dispatcher service"); } } else { struct interdisp_binding *sb = domain_state->b[state->core_id]; /* Send initialized msg to the dispatcher that spanned us */ errval_t err2 = sb->tx_vtbl. dispatcher_initialized(sb, NOP_CONT, (uintptr_t)state->span_domain_state); if (err_is_fail(err2)) { DEBUG_ERR(err, "failed to send initalized msg"); abort(); } state->initialized = true; } }
static void capsend_mc_send_cont(struct intermon_binding *b, struct intermon_msg_queue_elem *e) { struct capsend_mc_msg_st *msg_st = (struct capsend_mc_msg_st*)e; struct capsend_mc_st *mc_st = msg_st->mc_st; errval_t err = SYS_ERR_OK; // if do_send is false, an error occured in the multicast setup, so do not // send anything if (mc_st->do_send) { err = mc_st->send_fn(b, &mc_st->caprep, mc_st); } if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { err = capsend_target(msg_st->dest, (struct msg_queue_elem*)msg_st); } if (err_is_fail(err)) { USER_PANIC_ERR(err, "sending dequeued capops message"); } // decrement counter of number of queued messages if (!--mc_st->num_queued) { // if counter is zero, cleanup outgoing memory free(mc_st->msg_st_arr); mc_st->msg_st_arr = NULL; if (!mc_st->do_send || !mc_st->num_pending) { // if the send has been aborted, also cleanup cross-call state free(mc_st); } } }
// Get the bootinfo and map it in. static errval_t map_bootinfo(struct bootinfo **bootinfo) { errval_t err, msgerr; struct monitor_blocking_rpc_client *cl = get_monitor_blocking_rpc_client(); assert(cl != NULL); struct capref bootinfo_frame; size_t bootinfo_size; msgerr = cl->vtbl.get_bootinfo(cl, &err, &bootinfo_frame, &bootinfo_size); if (err_is_fail(msgerr)) { err = msgerr; } if (err_is_fail(err)) { USER_PANIC_ERR(err, "failed in get_bootinfo"); return err; } err = vspace_map_one_frame((void**)bootinfo, bootinfo_size, bootinfo_frame, NULL, NULL); assert(err_is_ok(err)); return err; }
/** * \brief Send a multi-hop message that contains no payload. * It is used to acknowledge received messages. * * \param mc pointer to the multi-hop channel */ static void multihop_send_dummy_message(struct multihop_chan *mc) { assert(mc->connstate == MULTIHOP_CONNECTED); #if MULTIHOP_FLOW_CONTROL MULTIHOP_DEBUG("sending dummy message, ack %d...\n", mc->unacked_received); errval_t err; struct monitor_binding *monitor_binding = mc->monitor_binding; // send message err = monitor_binding->tx_vtbl.multihop_message(monitor_binding, NOP_CONT, mc->vci, mc->direction, MULTIHOP_MESSAGE_FLAG_DUMMY, mc->unacked_received, (uint8_t *) mc, 1); if (err_is_ok(err)) { // we have just acknowledged all received messages mc->unacked_received = 0; } else if (err_no(err) != FLOUNDER_ERR_TX_BUSY) { USER_PANIC_ERR(err, "Could not send dummy message over multi-hop channel\n"); } #endif // MULTIHOP_FLOW_CONTROL }
/** * \ brief Internal function to send a reply back to the monitor * */ static void send_bind_reply(void *st) { errval_t err; struct bind_multihop_reply_state *reply_state = st; struct monitor_binding *monitor_binding = reply_state->monitor_binding; // send back a bind success / failure message to the monitor MULTIHOP_DEBUG("sending reply back to monitor...\n"); err = monitor_binding->tx_vtbl.multihop_bind_service_reply(monitor_binding, NOP_CONT, reply_state->args.receiver_vci, reply_state->args.sender_vci, reply_state->args.err); if (err_is_ok(err)) { event_mutex_unlock(&monitor_binding->mutex); free(reply_state); } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { err = monitor_binding->register_send(monitor_binding, monitor_binding->waitset, MKCONT(send_bind_reply, reply_state)); assert(err_is_ok(err)); // this shouldn't fail, as we have the mutex } else { event_mutex_unlock(&monitor_binding->mutex); USER_PANIC_ERR( err, "failed sending back reply to multi-hop bind request to monitor"); free(st); } }
static void send_bind_reply(void *arg) { struct bind_lmp_reply_state *st = arg; struct monitor_binding *b = st->b; errval_t err; err = st->b->tx_vtbl.bind_lmp_reply_monitor(st->b, NOP_CONT, st->args.err, st->args.mon_id, st->args.conn_id, st->args.ep); if (err_is_ok(err)) { event_mutex_unlock(&b->mutex); free(st); } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { err = st->b->register_send(st->b, st->b->waitset, MKCONT(send_bind_reply,st)); assert(err_is_ok(err)); // shouldn't fail, as we have the mutex } else { event_mutex_unlock(&b->mutex); USER_PANIC_ERR(err, "failed sending back reply to LMP bind request;" " request dropped!"); if (st->lc != NULL) { lmp_chan_destroy(st->lc); // FIXME: how do we tell the binding about this!? } free(st); } }
static void vfs_load_file_to_memory (const char *file, void **data, size_t *size) { assert(data != NULL); assert(size != NULL); errval_t err; vfs_handle_t vh; err = vfs_open(file, &vh); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Error opening %s", file); } struct vfs_fileinfo info; err = vfs_stat(vh, &info); assert_err(err, "vfs_stat"); *data = malloc(info.size); assert(*data != NULL); err = vfs_read(vh, *data, info.size, size); assert_err(err, "vfs_read"); assert(*size == info.size); vfs_close(vh); }