static void bind_lmp_reply(struct monitor_binding *b, errval_t msgerr, uintptr_t mon_conn_id, uintptr_t user_conn_id, struct capref ep) { errval_t err; struct monitor_binding *client_binding = NULL; struct lmp_conn_state *conn = lmp_conn_lookup(mon_conn_id); if (conn == NULL) { DEBUG_ERR(0, "invalid connection ID"); goto cleanup; } client_binding = conn->domain_binding; uintptr_t client_conn_id = conn->domain_id; err = lmp_conn_free(mon_conn_id); assert(err_is_ok(err)); if (err_is_fail(msgerr)) { bind_lmp_reply_client_cont(client_binding, msgerr, 0, client_conn_id, ep, b); } else { bind_lmp_reply_client_cont(client_binding, SYS_ERR_OK, mon_conn_id, client_conn_id, ep, b); } return; cleanup: /* Delete the ep cap */ // XXX: Do not delete the cap if client or service is monitor if (client_binding != &monitor_self_binding && b != &monitor_self_binding) { err = cap_destroy(ep); if (err_is_fail(err)) { USER_PANIC_ERR(err, "cap_destroy failed"); } } }
/** * \brief Non-blocking name service lookup * * \param iface Name of interface for which to query name server * \param retiref Returns pointer to IREF on success */ errval_t nameservice_lookup(const char *iface, iref_t *retiref) { errval_t err; struct octopus_rpc_client *r = get_octopus_rpc_client(); if (r == NULL) { return LIB_ERR_NAMESERVICE_NOT_BOUND; } char* record = NULL; octopus_trigger_id_t tid; errval_t error_code; err = r->vtbl.get(r, iface, NOP_TRIGGER, &record, &tid, &error_code); if (err_is_fail(err)) { goto out; } err = error_code; if (err_is_fail(err)) { if (err_no(err) == OCT_ERR_NO_RECORD) { err = err_push(err, LIB_ERR_NAMESERVICE_UNKNOWN_NAME); } goto out; } uint64_t iref_number = 0; err = oct_read(record, "_ { iref: %d }", &iref_number); if (err_is_fail(err) || iref_number == 0) { err = err_push(err, LIB_ERR_NAMESERVICE_INVALID_NAME); goto out; } if (retiref != NULL) { *retiref = iref_number; } out: free(record); return err; }
/* * RPC wrapper functions */ static errval_t arrakis_spawn_arrakis_domain__rpc(struct arrakis_rpc_client *_rpc, const char *path, const char *argvbuf, size_t argvbytes, const char *envbuf, size_t envbytes, arrakis_errval_t *err, arrakis_domainid_t *domain_id) { errval_t _err = SYS_ERR_OK; assert(!(_rpc->rpc_in_progress)); assert((_rpc->async_error) == SYS_ERR_OK); _rpc->rpc_in_progress = true; _rpc->reply_present = false; // call send function _err = ((((_rpc->b)->tx_vtbl).spawn_arrakis_domain_call)(_rpc->b, NOP_CONT, path, argvbuf, argvbytes, envbuf, envbytes)); if (err_is_fail(_err)) { goto out; } // wait for message to be sent and reply or error to be present while (((!(_rpc->reply_present)) || (!(((_rpc->b)->can_send)(_rpc->b)))) && ((_rpc->async_error) == SYS_ERR_OK)) { _err = event_dispatch(&(_rpc->rpc_waitset)); if (err_is_fail(_err)) { _err = err_push(_err, LIB_ERR_EVENT_DISPATCH); goto out; } } if (err_is_fail(_rpc->async_error)) { _err = (_rpc->async_error); _rpc->async_error = SYS_ERR_OK; goto out; } // grab reply variables out of binding struct arrakis_binding *_binding = _rpc->b; *err = (((_binding->rx_union).spawn_arrakis_domain_response).err); *domain_id = (((_binding->rx_union).spawn_arrakis_domain_response).domain_id); out: _rpc->rpc_in_progress = false; return(_err); }
static void send_ints_ready(void *a) { errval_t err; struct xmplmsg_binding *b = (struct xmplmsg_binding*)a; struct event_closure txcont = MKCONT(send_ints_cb, b); err = xmplmsg_msg_ints__tx(b, txcont, 0x1, 0x10); if (err_is_fail(err)) { DEBUG_ERR(err, "error sending msg_ints message\n"); } }
errval_t default_start_function(coreid_t where, struct module_info* mi, char* record) { assert(mi != NULL); errval_t err = SYS_ERR_OK; if (is_started(mi)) { return KALUGA_ERR_DRIVER_ALREADY_STARTED; } if (!is_auto_driver(mi)) { return KALUGA_ERR_DRIVER_NOT_AUTO; } // Construct additional command line arguments containing pci-id. // We need one extra entry for the new argument. uint64_t vendor_id, device_id; char **argv = mi->argv; bool cleanup = false; err = oct_read(record, "_ { vendor: %d, device_id: %d }", &vendor_id, &device_id); if (err_is_ok(err)) { // We assume that we're starting a device if the query above succeeds // and therefore append the pci vendor and device id to the argument // list. argv = malloc((mi->argc+1) * sizeof(char *)); memcpy(argv, mi->argv, mi->argc * sizeof(char *)); char *pci_id = malloc(10); // Make sure pci vendor and device id fit into our argument assert(vendor_id < 0x9999 && device_id < 0x9999); snprintf(pci_id, 10, "%04"PRIx64":%04"PRIx64, vendor_id, device_id); argv[mi->argc] = pci_id; mi->argc += 1; argv[mi->argc] = NULL; cleanup = true; } err = spawn_program(where, mi->path, argv, environ, 0, &mi->did); if (err_is_fail(err)) { DEBUG_ERR(err, "Spawning %s failed.", mi->path); } if (cleanup) { // alloc'd string is the last of our array free(argv[mi->argc-1]); free(argv); } return err; }
int main(int argc, char *argv[]) { vfs_init(); vfs_mkdir("/fat"); errval_t err = vfs_mount("/fat", "fat32://0+0"); if (err_is_fail(err)) { USER_PANIC_ERR(err, "vfs_fat_mount failed"); } walk_dir("/fat"); return 0; }
errval_t debug_dump_region(struct debug_q* que, regionid_t rid) { errval_t err; // find region struct memory_list* region = NULL; err = find_region(que, ®ion, rid); if (err_is_fail(err)){ return err; } dump_list(region); return SYS_ERR_OK; }
/** * \brief Map init user-space memory. * * This function maps pages of the init user-space module. It expects * the virtual base address 'vbase' of a program segment of the init executable, * its size 'size' and its ELF64 access control flags. It maps pages * into physical memory that is allocated on the fly and puts * corresponding frame caps into init's segcn. * * \param vbase Virtual base address of program segment. * \param size Size of program segment in bytes. * \param flags ELF64 access control flags of program segment. * \param ret Used to return base region pointer */ errval_t startup_alloc_init(void *state, genvaddr_t gvbase, size_t size, uint32_t flags, void **ret) { errval_t err; struct spawn_state *spawn_state = state; lvaddr_t vbase = (lvaddr_t)gvbase; /* XXX */ lvaddr_t offset = BASE_PAGE_OFFSET(vbase); /* Page align the parameters */ paging_align(&vbase, NULL, &size, BASE_PAGE_SIZE); lpaddr_t pbase = 0, paddr = 0; for(lvaddr_t i = vbase; i < vbase + size; i += BASE_PAGE_SIZE) { if (apic_is_bsp()) { paddr = bsp_alloc_phys(BASE_PAGE_SIZE); } else { paddr = app_alloc_phys(BASE_PAGE_SIZE); } if(pbase == 0) { pbase = paddr; } err = startup_map_init(i, paddr, BASE_PAGE_SIZE, flags); assert(err_is_ok(err)); } if (apic_is_bsp()) { // Create frame caps for segcn paddr += BASE_PAGE_SIZE; debug(SUBSYS_STARTUP, "Allocated physical memory [0x%"PRIxLPADDR", 0x%"PRIxLPADDR"]\n", pbase, paddr - pbase); err = create_caps_to_cnode(pbase, paddr - pbase, RegionType_RootTask, spawn_state, bootinfo); if (err_is_fail(err)) { return err; } } assert(ret != NULL); *ret = (void *)(vbase + offset); return SYS_ERR_OK; }
/* * Error handler */ static void usb_manager_rpc_client_error(struct usb_manager_binding *_binding, errval_t _err) { // get RPC client state pointer struct usb_manager_rpc_client *_rpc = _binding->st; if (_rpc->rpc_in_progress) { assert(err_is_fail(_err)); _rpc->async_error = _err; // kick waitset with dummy event flounder_support_register(&(_rpc->rpc_waitset), &(_rpc->dummy_chanstate), dummy_event_closure, true); } else { USER_PANIC_ERR(_err, "async error in RPC"); } }
static void udp_receiver(struct udp_pcb *upcb, struct ip_addr *listen_ip, uint16_t listen_port) { printf("U: Going in UDP_RECEIVER mode\n"); // Bind to specified port errval_t r = udp_bind(upcb, listen_ip, listen_port); if (err_is_fail(r)) { DEBUG_ERR(r, "udp_bind:"); } lwip_benchmark_control(connection_type, BMS_START_REQUEST, iterations, rdtsc()); udp_recv(upcb, udp_recv_handler, 0 /*client data, arg in callback*/); while (true) { r = event_dispatch(ws); if (err_is_fail(r)) { DEBUG_ERR(r, "in event_dispatch"); break; } } } // end function: udp_receiver
/** * \brief Handler thread for inter-dispatcher messages * \param arg Pointer to inter-dispatcher waitset * \return 0 on successful exit */ static int interdisp_msg_handler(void *arg) { struct waitset *ws = arg; assert(ws != NULL); for(;;) { errval_t err = event_dispatch(ws); if(err_is_fail(err)) { USER_PANIC_ERR(err, "error on event dispatch"); } } return 0; }
struct sysret sys_monitor_clear_step(capaddr_t ret_cn_addr, uint8_t ret_cn_bits, cslot_t ret_slot) { errval_t err; struct cte *retslot; err = sys_retslot_lookup(ret_cn_addr, ret_cn_bits, ret_slot, &retslot); if (err_is_fail(err)) { return SYSRET(err); } return SYSRET(caps_clear_step(retslot)); }
/** * \brief Called when domain gets a interdisp service. * It will set it on the domain_state. */ static void server_listening(void *st, errval_t err, iref_t iref) { if(err_is_fail(err)) { DEBUG_ERR(err, "interdisp service export"); abort(); } struct domain_state *domain_state = get_domain_state(); domain_state->iref = iref; // Also set in the global array allirefs[disp_get_core_id()] = iref; domain_state->conditional = true; }
static void revoke_cap(struct xcorecap_binding *b) { errval_t err; printf("xcorecapserv do revoke cap\n"); err = cap_revoke(sent_cap); if (err_is_fail(err)) { DEBUG_ERR(err, "xcorecapserv: revoke failed\n"); } printf("xcorecapserv revoked cap\n"); fflush(stdout); b->tx_vtbl.send_done(b, NOP_CONT); }
struct sysret sys_monitor_revoke_mark_tgt(capaddr_t root_addr, uint8_t root_bits, capaddr_t target_addr, uint8_t target_bits) { errval_t err; struct cte *target; err = sys_double_lookup(root_addr, root_bits, target_addr, target_bits, &target); if (err_is_fail(err)) { printf("%s: error in double_lookup: %"PRIuERRV"\n", __FUNCTION__, err); return SYSRET(err); } return SYSRET(caps_mark_revoke(&target->cap, target)); }
errval_t vspace_unmap(const void *buf) { errval_t err; struct vregion *vregion = vspace_get_region(get_current_vspace(), buf); assert(vregion); err = vregion_destroy(vregion); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VREGION_DESTROY); } return SYS_ERR_OK; }
static void span_cb(void *arg, errval_t err) { if (err_is_fail(err)) { DEBUG_ERR(err, "span failed"); return; } static int num_spanned = 1; num_spanned++; if (num_spanned >= num_to_span) { all_spanned = true; } }
/** * \brief Tear down connection to terminal server. * * \param client Terminal client state. * * Dispatches the control waitset until the message is sent. */ void term_client_blocking_exit(struct term_client *client) { errval_t err; TERM_DEBUG("Sending disconnect message to terminal device.\n"); /* Inform terminal device (server), that domain terminated. */ err = client->conf_binding->tx_vtbl.disconnect(client->conf_binding, MKCONT(exit_cb, client)); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Error sending disconnect to terminal device.\n"); } /* Wait until message is sent. Necessary to ensure that message is sent * before we terminate. */ TERM_DEBUG("Waiting until disconnect message is sent.\n"); while (client->connected) { err = event_dispatch(client->conf_ws); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Error dispatching events."); } } }
/** * \brief tries to free the allocated memory region * * \returns SYS_ERR_OK on success * errval on error */ errval_t dma_mem_free(struct dma_mem *mem) { errval_t err; if (mem->vaddr) { err = vspace_unmap((void*)mem->vaddr); if (err_is_fail(err)) { /* todo: error handling ignoring for now */ } } if (!capref_is_null(mem->frame)) { err = cap_destroy(mem->frame); if (err_is_fail(err)) { /* todo: error handling ignoring for now */ } } memset(mem, 0, sizeof(*mem)); return SYS_ERR_OK; }
static errval_t usb_manager_device_powersave__rpc(struct usb_manager_rpc_client *_rpc, uint8_t powerstate, uint32_t *ret_error) { errval_t _err = SYS_ERR_OK; assert(!(_rpc->rpc_in_progress)); assert((_rpc->async_error) == SYS_ERR_OK); _rpc->rpc_in_progress = true; _rpc->reply_present = false; // call send function _err = ((((_rpc->b)->tx_vtbl).device_powersave_call)(_rpc->b, NOP_CONT, powerstate)); if (err_is_fail(_err)) { goto out; } // wait for message to be sent and reply or error to be present while (((!(_rpc->reply_present)) || (!(((_rpc->b)->can_send)(_rpc->b)))) && ((_rpc->async_error) == SYS_ERR_OK)) { _err = event_dispatch(&(_rpc->rpc_waitset)); if (err_is_fail(_err)) { _err = err_push(_err, LIB_ERR_EVENT_DISPATCH); goto out; } } if (err_is_fail(_rpc->async_error)) { _err = (_rpc->async_error); _rpc->async_error = SYS_ERR_OK; goto out; } // grab reply variables out of binding struct usb_manager_binding *_binding = _rpc->b; *ret_error = (((_binding->rx_union).device_powersave_response).ret_error); out: _rpc->rpc_in_progress = false; return(_err); }
static errval_t acpi_mm_free_proxy__rpc(struct acpi_rpc_client *_rpc, struct capref devframe, uint64_t base, uint8_t sizebits, acpi_errval_t *err) { errval_t _err = SYS_ERR_OK; assert(!(_rpc->rpc_in_progress)); assert((_rpc->async_error) == SYS_ERR_OK); _rpc->rpc_in_progress = true; _rpc->reply_present = false; // call send function _err = ((((_rpc->b)->tx_vtbl).mm_free_proxy_call)(_rpc->b, NOP_CONT, devframe, base, sizebits)); if (err_is_fail(_err)) { goto out; } // wait for message to be sent and reply or error to be present while (((!(_rpc->reply_present)) || (!(((_rpc->b)->can_send)(_rpc->b)))) && ((_rpc->async_error) == SYS_ERR_OK)) { _err = event_dispatch(&(_rpc->rpc_waitset)); if (err_is_fail(_err)) { _err = err_push(_err, LIB_ERR_EVENT_DISPATCH); goto out; } } if (err_is_fail(_rpc->async_error)) { _err = (_rpc->async_error); _rpc->async_error = SYS_ERR_OK; goto out; } // grab reply variables out of binding struct acpi_binding *_binding = _rpc->b; *err = (((_binding->rx_union).mm_free_proxy_response).err); out: _rpc->rpc_in_progress = false; return(_err); }
/* * RPC wrapper functions */ static errval_t e10k_vf_get_mac_address__rpc(struct e10k_vf_rpc_client *_rpc, uint8_t vfn, uint64_t *mac) { errval_t _err = SYS_ERR_OK; assert(!(_rpc->rpc_in_progress)); assert((_rpc->async_error) == SYS_ERR_OK); _rpc->rpc_in_progress = true; _rpc->reply_present = false; // call send function _err = ((((_rpc->b)->tx_vtbl).get_mac_address_call)(_rpc->b, NOP_CONT, vfn)); if (err_is_fail(_err)) { goto out; } // wait for message to be sent and reply or error to be present while (((!(_rpc->reply_present)) || (!(((_rpc->b)->can_send)(_rpc->b)))) && ((_rpc->async_error) == SYS_ERR_OK)) { _err = event_dispatch(&(_rpc->rpc_waitset)); if (err_is_fail(_err)) { _err = err_push(_err, LIB_ERR_EVENT_DISPATCH); goto out; } } if (err_is_fail(_rpc->async_error)) { _err = (_rpc->async_error); _rpc->async_error = SYS_ERR_OK; goto out; } // grab reply variables out of binding struct e10k_vf_binding *_binding = _rpc->b; *mac = (((_binding->rx_union).get_mac_address_response).mac); out: _rpc->rpc_in_progress = false; return(_err); }
static errval_t msg_open_cb(xphi_dom_id_t domain, uint64_t usrdata, struct capref msgframe, uint8_t type) { errval_t err; domid = domain; struct frame_identity id; err = invoke_frame_identify(msgframe, &id); if (err_is_fail(err)) { USER_PANIC_ERR(err, "could not identify the frame"); } debug_printf("msg_open_cb | Frame base: %016lx, size=%lx, ud:%lx\n", id.base, 1UL << id.bits, usrdata); remote_frame = msgframe; remote_base = id.base; remote_frame_sz = (1UL << id.bits); err = vspace_map_one_frame(&remote_buf, remote_frame_sz, msgframe, NULL, NULL); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Could not map the frame"); } init_buffer_c0(); connected = 0x1; return SYS_ERR_OK; }
/** * \brief Initialise a new LMP channel and initiate a binding * * \param lc Storage for channel state * \param cont Continuation for bind completion/failure * \param qnode Storage for an event queue node (used for queuing bind request) * \param iref IREF to which to bind * \param buflen_words Size of incoming buffer, in number of words */ errval_t lmp_chan_bind(struct lmp_chan *lc, struct lmp_bind_continuation cont, struct event_queue_node *qnode, iref_t iref, size_t buflen_words) { errval_t err; lmp_chan_init(lc); /* store bind arguments */ lc->iref = iref; lc->buflen_words = buflen_words; lc->bind_continuation = cont; /* allocate a cap slot for the new endpoint cap */ err = slot_alloc(&lc->local_cap); if (err_is_fail(err)) { waitset_chanstate_destroy(&lc->send_waitset); return err_push(err, LIB_ERR_SLOT_ALLOC); } /* allocate a local endpoint */ err = lmp_endpoint_create_in_slot(buflen_words, lc->local_cap, &lc->endpoint); if (err_is_fail(err)) { slot_free(lc->local_cap); waitset_chanstate_destroy(&lc->send_waitset); return err_push(err, LIB_ERR_ENDPOINT_CREATE); } // wait for the ability to use the monitor binding lc->connstate = LMP_BIND_WAIT; struct monitor_binding *mb = lc->monitor_binding = get_monitor_binding(); event_mutex_enqueue_lock(&mb->mutex, qnode, MKCLOSURE(send_bind_cont, lc)); return SYS_ERR_OK; }
static errval_t acpi_enable_and_route_interrupt__rpc(struct acpi_rpc_client *_rpc, uint32_t gsi, acpi_coreid_t dest, uint32_t vector, acpi_errval_t *error_code) { errval_t _err = SYS_ERR_OK; assert(!(_rpc->rpc_in_progress)); assert((_rpc->async_error) == SYS_ERR_OK); _rpc->rpc_in_progress = true; _rpc->reply_present = false; // call send function _err = ((((_rpc->b)->tx_vtbl).enable_and_route_interrupt_call)(_rpc->b, NOP_CONT, gsi, dest, vector)); if (err_is_fail(_err)) { goto out; } // wait for message to be sent and reply or error to be present while (((!(_rpc->reply_present)) || (!(((_rpc->b)->can_send)(_rpc->b)))) && ((_rpc->async_error) == SYS_ERR_OK)) { _err = event_dispatch(&(_rpc->rpc_waitset)); if (err_is_fail(_err)) { _err = err_push(_err, LIB_ERR_EVENT_DISPATCH); goto out; } } if (err_is_fail(_rpc->async_error)) { _err = (_rpc->async_error); _rpc->async_error = SYS_ERR_OK; goto out; } // grab reply variables out of binding struct acpi_binding *_binding = _rpc->b; *error_code = (((_binding->rx_union).enable_and_route_interrupt_response).error_code); out: _rpc->rpc_in_progress = false; return(_err); }
errval_t blockdevfs_ata_open(void *handle) { VFS_BLK_DEBUG("blockdevfs_ata_open: entering\n"); errval_t err; struct ata_handle *h = handle; h->wait_status = SYS_ERR_OK; h->waiting = true; err = ahci_init(h->port_num, ahci_init_cb, h, get_default_waitset()); if (err_is_fail(err)) { printf("ahci_init failed: '%s'\n", err_getstring(err)); h->waiting = false; return err; } // XXX: block for command completion (broken API!) while (h->waiting) { err = event_dispatch(get_default_waitset()); if (err_is_fail(err)) { USER_PANIC_ERR(err, "error in event_dispatch for blockdevfs_ata_open"); } } struct ahci_ata_rw28_binding *ahci_ata_rw28_binding; ahci_ata_rw28_binding = calloc(1, sizeof(struct ahci_ata_rw28_binding)); ahci_ata_rw28_init(ahci_ata_rw28_binding, get_default_waitset(), h->ahci_binding); h->ata_rw28_binding = (struct ata_rw28_binding*)ahci_ata_rw28_binding; err = ata_rw28_rpc_client_init(&h->ata_rw28_rpc, h->ata_rw28_binding); if (err_is_fail(err)) { // TODO: bindings leak VFS_BLK_DEBUG("blockdevfs_ata_open: failed to init ata_rw28 rpc client\n"); return err; } VFS_BLK_DEBUG("blockdevfs_ata_open: exiting\n"); return h->wait_status; }
void find_cap__rx_handler(struct intermon_binding *b, intermon_caprep_t caprep, genvaddr_t st) { errval_t err, cleanup_err; struct intermon_state *inter_st = (struct intermon_state*)b->st; coreid_t from = inter_st->core_id; struct capability cap; caprep_to_capability(&caprep, &cap); struct capref capref; err = slot_alloc(&capref); if (err_is_fail(err)) { goto send_err; } err = monitor_copy_if_exists(&cap, capref); if (err_is_fail(err)) { goto free_slot; } cleanup_err = cap_delete(capref); if (err_is_fail(cleanup_err)) { USER_PANIC_ERR(err, "failed to delete temporary cap"); } free_slot: cleanup_err = slot_free(capref); if (err_is_fail(cleanup_err)) { USER_PANIC_ERR(err, "failed to free slot for temporary cap"); } send_err: cleanup_err = find_cap_result(from, err, st); if (err_is_fail(cleanup_err)) { USER_PANIC_ERR(err, "failed to send find_cap result"); } }
int main(int argc, char *argv[]) { errval_t err; if (argc != 2) { printf("Usage %s: <Num additional threads>\n", argv[0]); exit(-1); } //printf("main running on %d\n", disp_get_core_id()); int cores = strtol(argv[1], NULL, 10) + 1; NPROC = cores -1; BARINIT(barrier, NPROC); uint64_t before = rdtsc(); times[0] = before; trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_BENCH_PCBENCH, 1); for (int i = 1; i < cores; i++) { err = domain_new_dispatcher(i + disp_get_core_id(), domain_spanned_callback, (void*)(uintptr_t)i); if (err_is_fail(err)) { USER_PANIC_ERR(err, "domain_new_dispatcher failed"); } } while (ndispatchers < cores) { thread_yield(); } uint64_t finish = rdtsc(); trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_BENCH_PCBENCH, 0); //sys_print("\nDone\n", 6); printf("spantest: Done in %"PRIu64" cycles\n", finish-before); //trace_dump(); for(int i = 1; i < cores; i++) { err = domain_thread_create_on(i, remote, NULL); assert(err_is_ok(err)); } messages_handler_loop(); return 0; }
static void cap_send_tx_cont(struct intermon_binding *b, struct intermon_msg_queue_elem *e) { DEBUG_CAPOPS("%s: %p %p\n", __FUNCTION__, b, e); errval_t send_err; struct send_cap_st *st = (struct send_cap_st*)e; struct remote_conn_state *conn = remote_conn_lookup(st->my_mon_id); send_err = intermon_cap_send_request__tx(b, NOP_CONT, conn->mon_id, st->capid, st->captx); if (err_is_fail(send_err)) { DEBUG_ERR(send_err, "sending cap_send_request failed"); } free(st); }
errval_t capsend_owner(struct domcapref capref, struct msg_queue_elem *queue_elem) { errval_t err; // read cap owner coreid_t owner; err = monitor_get_domcap_owner(capref, &owner); if (err_is_fail(err)) { return err; } // enqueue to owner return capsend_target(owner, queue_elem); }