static void revoke_no_remote(struct revoke_master_st *st) { assert(num_monitors_online() == 1); if (!delete_steps_get_waitset()) { delete_steps_init(get_default_waitset()); } errval_t err; DEBUG_CAPOPS("%s\n", __FUNCTION__); // pause deletion steps DEBUG_CAPOPS("%s: delete_steps_pause()\n", __FUNCTION__); delete_steps_pause(); // mark target of revoke DEBUG_CAPOPS("%s: mon_revoke_mark_tgt()\n", __FUNCTION__); err = monitor_revoke_mark_target(st->cap.croot, st->cap.cptr, st->cap.bits); PANIC_IF_ERR(err, "marking revoke"); // resume delete steps DEBUG_CAPOPS("%s: delete_steps_resume()\n", __FUNCTION__); delete_steps_resume(); // wait on delete queue, marking that remote cores are done st->remote_fin = true; DEBUG_CAPOPS("%s: delete_queue_wait()\n", __FUNCTION__); struct event_closure steps_fin_cont = MKCLOSURE(revoke_master_steps__fin, st); delete_queue_wait(&st->del_qn, steps_fin_cont); }
static void cap_send_request_tx_cont(errval_t err, struct captx_prepare_state *captx_st, intermon_captx_t *captx, void *st_) { DEBUG_CAPOPS("%s: %s [%p]\n", __FUNCTION__, err_getstring(err), __builtin_return_address(0)); errval_t queue_err; struct send_cap_st *send_st = (struct send_cap_st*)st_; if (err_is_fail(err)) { // XXX: should forward error here DEBUG_ERR(err, "preparing cap tx failed"); free(send_st); return; } send_st->captx = *captx; DEBUG_CAPOPS("%s: enqueueing send\n", __FUNCTION__); send_st->qe.cont = cap_send_tx_cont; struct remote_conn_state *conn = remote_conn_lookup(send_st->my_mon_id); struct intermon_binding *binding = conn->mon_binding; struct intermon_state *inter_st = (struct intermon_state*)binding->st; queue_err = intermon_enqueue_send(binding, &inter_st->queue, binding->waitset, (struct msg_queue_elem*)send_st); if (err_is_fail(queue_err)) { DEBUG_ERR(queue_err, "enqueuing cap_send_request failed"); free(send_st); } }
static void revoke_result__rx(errval_t result, struct revoke_master_st *st, bool locked) { DEBUG_CAPOPS("%s\n", __FUNCTION__); errval_t err; if (locked) { caplock_unlock(st->cap); } if (err_is_ok(result)) { // clear the remote copies bit err = monitor_domcap_remote_relations(st->cap.croot, st->cap.cptr, st->cap.bits, 0, RRELS_COPY_BIT, NULL); if (err_is_fail(err) && err_no(err) != SYS_ERR_CAP_NOT_FOUND) { DEBUG_ERR(err, "resetting remote copies bit after revoke"); } } DEBUG_CAPOPS("%s ## revocation completed, calling %p\n", __FUNCTION__, st->result_handler); st->result_handler(result, st->st); free(st); }
void capops_revoke(struct domcapref cap, revoke_result_handler_t result_handler, void *st) { errval_t err; DEBUG_CAPOPS("%s ## start revocation protocol\n", __FUNCTION__); distcap_state_t state; err = dom_cnode_get_state(cap, &state); GOTO_IF_ERR(err, report_error); if (distcap_state_is_busy(state)) { err = MON_ERR_REMOTE_CAP_RETRY; goto report_error; } struct revoke_master_st *rst; err = calloce(1, sizeof(*rst), &rst); GOTO_IF_ERR(err, report_error); rst->cap = cap; err = monitor_domains_cap_identify(cap.croot, cap.cptr, cap.bits, &rst->rawcap); GOTO_IF_ERR(err, free_st); rst->result_handler = result_handler; rst->st = st; if (distcap_state_is_foreign(state)) { // need to retrieve ownership DEBUG_CAPOPS("%s getting cap ownership\n", __FUNCTION__); capops_retrieve(rst->cap, revoke_retrieve__rx, rst); } else { if (num_monitors_online() == 1) { DEBUG_CAPOPS("%s: only one monitor: do simpler revoke\n", __FUNCTION__); // no remote monitors exist; do simplified revocation process revoke_no_remote(rst); // return here return; } // have ownership, initiate revoke revoke_local(rst); } return; free_st: free(rst); report_error: result_handler(err, st); }
errval_t capops_init(struct waitset *ws, struct intermon_binding *b) { DEBUG_CAPOPS("%s\n", __FUNCTION__); assert(ws != NULL); b->rx_vtbl.capops_request_copy = request_copy__rx; b->rx_vtbl.capops_recv_copy = recv_copy__rx; b->rx_vtbl.capops_recv_copy_result = recv_copy_result__rx; b->rx_vtbl.capops_move_request = move_request__rx_handler; b->rx_vtbl.capops_move_result = move_result__rx_handler; b->rx_vtbl.capops_retrieve_request = retrieve_request__rx; b->rx_vtbl.capops_retrieve_result = retrieve_result__rx; b->rx_vtbl.capops_delete_remote = delete_remote__rx; b->rx_vtbl.capops_delete_remote_result = delete_remote_result__rx; b->rx_vtbl.capops_revoke_mark = revoke_mark__rx; b->rx_vtbl.capops_revoke_ready = revoke_ready__rx; b->rx_vtbl.capops_revoke_commit = revoke_commit__rx; b->rx_vtbl.capops_revoke_done = revoke_done__rx; b->rx_vtbl.capops_request_retype = retype_request__rx; b->rx_vtbl.capops_retype_response = retype_response__rx; b->rx_vtbl.capops_update_owner = update_owner__rx_handler; b->rx_vtbl.capops_owner_updated = owner_updated__rx_handler; b->rx_vtbl.capops_find_cap = find_cap__rx_handler; b->rx_vtbl.capops_find_cap_result = find_cap_result__rx_handler; b->rx_vtbl.capops_find_descendants = find_descendants__rx_handler; b->rx_vtbl.capops_find_descendants_result = find_descendants_result__rx_handler; delete_steps_init(ws); return SYS_ERR_OK; }
static void delete_reply_status(errval_t status, void *st) { DEBUG_CAPOPS("sending cap_delete reply msg: %s\n", err_getstring(status)); struct monitor_blocking_binding *b = (struct monitor_blocking_binding*)st; errval_t err = b->tx_vtbl.remote_cap_delete_response(b, NOP_CONT, status); assert(err_is_ok(err)); }
void revoke_mark__rx(struct intermon_binding *b, intermon_caprep_t caprep, genvaddr_t st) { DEBUG_CAPOPS("%s\n", __FUNCTION__); errval_t err; struct intermon_state *inter_st = (struct intermon_state*)b->st; struct revoke_slave_st *rvk_st; err = calloce(1, sizeof(*rvk_st), &rvk_st); PANIC_IF_ERR(err, "allocating revoke slave state"); rvk_st->from = inter_st->core_id; rvk_st->st = st; caprep_to_capability(&caprep, &rvk_st->rawcap); if (!slaves_head) { assert(!slaves_tail); slaves_head = slaves_tail = rvk_st; } else { assert(slaves_tail); assert(!slaves_tail->next); slaves_tail->next = rvk_st; slaves_tail = rvk_st; } // pause any ongoing "delete stepping" as mark phases on other nodes need // to delete all foreign copies before we can delete locally owned caps delete_steps_pause(); // XXX: this invocation could create a scheduling hole that could be // problematic in RT systems and should probably be done in a loop. err = monitor_revoke_mark_relations(&rvk_st->rawcap); if (err_no(err) == SYS_ERR_CAP_NOT_FOUND) { // found no copies or descendants of capability on this core, // do nothing. -SG DEBUG_CAPOPS("no copies on core %d\n", disp_get_core_id()); } else if (err_is_fail(err)) { USER_PANIC_ERR(err, "marking revoke"); } rvk_st->im_qn.cont = revoke_ready__send; err = capsend_target(rvk_st->from, (struct msg_queue_elem*)rvk_st); PANIC_IF_ERR(err, "enqueing revoke_ready"); }
void revoke_done__rx(struct intermon_binding *b, genvaddr_t st) { DEBUG_CAPOPS("%s\n", __FUNCTION__); struct revoke_master_st *rvk_st = (struct revoke_master_st*)(lvaddr_t)st; if (!capsend_handle_mc_reply(&rvk_st->revoke_mc_st)) { // multicast not complete return; } DEBUG_CAPOPS("%s ## revocation: fin phase\n", __FUNCTION__); rvk_st->remote_fin = true; if (rvk_st->local_fin) { revoke_result__rx(SYS_ERR_OK, rvk_st, true); } }
static void revoke_local(struct revoke_master_st *st) { DEBUG_CAPOPS("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0)); errval_t err; delete_steps_pause(); err = monitor_revoke_mark_target(st->cap.croot, st->cap.cptr, st->cap.bits); PANIC_IF_ERR(err, "marking revoke"); DEBUG_CAPOPS("%s ## revocation: mark phase\n", __FUNCTION__); // XXX: could check whether remote copies exist here(?), -SG, 2014-11-05 err = capsend_relations(&st->rawcap, revoke_mark__send, &st->revoke_mc_st, &st->dests); PANIC_IF_ERR(err, "initiating revoke mark multicast"); }
static void cap_send_tx_cont(struct intermon_binding *b, struct intermon_msg_queue_elem *e) { DEBUG_CAPOPS("%s: %p %p\n", __FUNCTION__, b, e); errval_t send_err; struct send_cap_st *st = (struct send_cap_st*)e; struct remote_conn_state *conn = remote_conn_lookup(st->my_mon_id); send_err = intermon_cap_send_request__tx(b, NOP_CONT, conn->mon_id, st->capid, st->captx); if (err_is_fail(send_err)) { DEBUG_ERR(send_err, "sending cap_send_request failed"); } free(st); }
void revoke_ready__rx(struct intermon_binding *b, genvaddr_t st) { DEBUG_CAPOPS("%s\n", __FUNCTION__); errval_t err; struct revoke_master_st *rvk_st = (struct revoke_master_st*)(lvaddr_t)st; if (!capsend_handle_mc_reply(&rvk_st->revoke_mc_st)) { DEBUG_CAPOPS("%s: waiting for remote cores\n", __FUNCTION__); // multicast not complete return; } DEBUG_CAPOPS("%s ## revocation: commit phase\n", __FUNCTION__); err = capsend_relations(&rvk_st->rawcap, revoke_commit__send, &rvk_st->revoke_mc_st, &rvk_st->dests); PANIC_IF_ERR(err, "enqueing revoke_commit multicast"); delete_steps_resume(); struct event_closure steps_fin_cont = MKCLOSURE(revoke_master_steps__fin, rvk_st); delete_queue_wait(&rvk_st->del_qn, steps_fin_cont); }
errval_t capsend_target(coreid_t dest, struct msg_queue_elem *queue_elem) { errval_t err; // get destination intermon_binding and _state struct intermon_binding *dest_b; err = intermon_binding_get(dest, &dest_b); if (err_is_fail(err)) { return err; } DEBUG_CAPOPS("capsend_target: ->%d (%p)\n", dest, queue_elem); struct intermon_state *inter_st = (struct intermon_state*)dest_b->st; if (!inter_st->capops_ready) { // XXX: custom error value return MON_ERR_CAPOPS_BUSY; } // enqueue message return intermon_enqueue_send(dest_b, &inter_st->queue, dest_b->waitset, queue_elem); }
static void cap_send_request(struct monitor_binding *b, uintptr_t my_mon_id, struct capref cap, uint32_t capid) { DEBUG_CAPOPS("cap_send_request\n"); errval_t err; struct remote_conn_state *conn = remote_conn_lookup(my_mon_id); struct send_cap_st *st; st = calloc(1, sizeof(*st)); if (!st) { err = LIB_ERR_MALLOC_FAIL; DEBUG_ERR(err, "Failed to allocate cap_send_request state"); // XXX: should forward error here return; } st->my_mon_id = my_mon_id; st->cap = cap; st->capid = capid; captx_prepare_send(cap, conn->core_id, true, &st->captx_state, cap_send_request_tx_cont, st); }
static errval_t capsend_broadcast(struct capsend_mc_st *bc_st, struct capsend_destset *dests, struct capability *cap, capsend_send_fn send_cont) { errval_t err; size_t dest_count; bool init_destset = false; size_t online_monitors = num_monitors_online(); // do not count self when calculating #dest cores dest_count = online_monitors - 1; DEBUG_CAPOPS("%s: dest_count = %d\n", __FUNCTION__, dest_count); DEBUG_CAPOPS("%s: num_queued = %d\n", __FUNCTION__, bc_st->num_queued); DEBUG_CAPOPS("%s: num_pending = %d\n", __FUNCTION__, bc_st->num_pending); if (dests && dests->set == NULL) { dests->set = calloc(dest_count, sizeof(coreid_t)); dests->capacity = dest_count; dests->count = 0; init_destset = true; } else if (dests) { dest_count = dests->count; } err = capsend_mc_init(bc_st, cap, send_cont, dest_count, true); if (err_is_fail(err)) { free(bc_st); } if (init_destset || !dests) { for (coreid_t dest = 0; dest < MAX_COREID && bc_st->num_queued < dest_count; dest++) { if (dest == my_core_id) { // do not send to self continue; } err = capsend_mc_enqueue(bc_st, dest); if (err_is_ok(err) && dests) { // if we're initializing destination set, add destination // cores that we were able to enqueue msg for to set. dests->set[dests->count++] = dest; } if (err_no(err) == MON_ERR_NO_MONITOR_FOR_CORE) { // no connection for this core, skip continue; } else if (err_no(err) == MON_ERR_CAPOPS_BUSY) { debug_printf("monitor.%d not ready to participate in distops, skipping\n", dest); } else if (err_is_fail(err)) { // failure, disable broadcast bc_st->do_send = false; if (!bc_st->num_queued) { // only cleanup of no messages have been enqueued free(bc_st->msg_st_arr); free(bc_st); } return err; } } } else { for (int i = 0; i < dest_count; i++) { coreid_t dest = dests->set[i]; err = capsend_mc_enqueue(bc_st, dest); if (err_no(err) == MON_ERR_NO_MONITOR_FOR_CORE) { // no connection for this core, skip continue; } else if (err_no(err) == MON_ERR_CAPOPS_BUSY) { debug_printf("monitor.%d not ready to participate in distops, skipping\n", dest); } else if (err_is_fail(err)) { // failure, disable broadcast bc_st->do_send = false; if (!bc_st->num_queued) { // only cleanup of no messages have been enqueued free(bc_st->msg_st_arr); free(bc_st); } return err; } } } if (!bc_st->num_pending && dest_count > 1) { // XXX: needs sane error -SG return MON_ERR_NO_MONITOR_FOR_CORE; } return SYS_ERR_OK; }
static errval_t boot_app_core(int argc, char *argv[]) { coreid_t parent_core_id; struct intermon_binding *intermon_binding; errval_t err; #ifndef __scc__ /* Create the self endpoint as the kernel doesn't do it */ err = cap_retype(cap_selfep, cap_dispatcher, ObjType_EndPoint, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "Retyping dispatcher to self ep failed"); return err; } #endif err = boot_arch_app_core(argc, argv, &parent_core_id, &intermon_binding); if(err_is_fail(err)) { return err; } // connect it to our request handlers intermon_init(intermon_binding, parent_core_id); /* Request memserv and nameserv iref */ #ifndef __scc__ err = request_mem_serv_iref(intermon_binding); assert(err_is_ok(err)); #endif err = request_name_serv_iref(intermon_binding); assert(err_is_ok(err)); err = request_ramfs_serv_iref(intermon_binding); assert(err_is_ok(err)); #ifdef BARRELFISH_MULTIHOP_CHAN_H // request my part of the routing table err = multihop_request_routing_table(intermon_binding); assert(err_is_ok(err)); #endif // BARRELFISH_MULTIHOP_CHAN_H #ifndef __scc__ /* initialize self ram alloc */ err = mon_ram_alloc_init(parent_core_id, intermon_binding); if (err_is_fail(err)) { return err_push(err, LIB_ERR_RAM_ALLOC_SET); } #endif /* with memory alloc running, take part in cap ops */ DEBUG_CAPOPS("sending capops_ready to %"PRIuCOREID"\n", parent_core_id); err = intermon_binding->tx_vtbl.capops_ready(intermon_binding, NOP_CONT); if (err_is_fail(err)) { return err_push(err, MON_ERR_SEND_REMOTE_MSG); } ((struct intermon_state*)intermon_binding->st)->capops_ready = true; /* Set up monitor rpc channel */ err = monitor_rpc_init(); if(err_is_fail(err)) { return err; } #ifdef TRACING_EXISTS // Request trace caps err = request_trace_caps(intermon_binding); assert(err_is_ok(err)); #endif // Spawn local spawnd #ifdef __scc__ err = spawn_domain("spawnd"); #else err = spawn_spawnd(intermon_binding); #endif if (err_is_fail(err)) { USER_PANIC_ERR(err, "error spawning spawnd"); } /* Signal the monitor that booted us that we have initialized */ err = intermon_binding->tx_vtbl.monitor_initialized(intermon_binding, NOP_CONT); if (err_is_fail(err)) { return err_push(err, MON_ERR_SEND_REMOTE_MSG); } return SYS_ERR_OK; }