errval_t request_trace_caps(struct intermon_binding *st) { errval_t err = st->tx_vtbl.trace_caps_request(st, NOP_CONT); if (err_is_fail(err)) { return err_push(err, MON_ERR_SEND_REMOTE_MSG); } while(capref_is_null(trace_cap)) { messages_wait_and_handle_next(); } return SYS_ERR_OK; }
/** * \brief Allocate a new receive capability slot for an LMP channel * * This utility function allocates a new receive slot (using #slot_alloc) * and sets it on the channel (using #lmp_chan_set_recv_slot). * * \param lc LMP channel */ errval_t lmp_chan_alloc_recv_slot(struct lmp_chan *lc) { struct capref slot; errval_t err = slot_alloc(&slot); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLOT_ALLOC); } lmp_chan_set_recv_slot(lc, slot); return SYS_ERR_OK; }
static void bif_parse_any_ast_literal_compound( struct Runtime *rt, struct AstLiteralCompound *lit_cpd) { VAL_LOC_T size_loc = -1, data_begin, data_size; struct AstNode *current = lit_cpd->exprs; bool has_first_elem = false; VAL_LOC_T first_elem_loc, elem_loc; /* Header. */ switch (lit_cpd->type) { case AST_LIT_CPD_ARRAY: rt_val_push_array_init(&rt->stack, &size_loc); break; case AST_LIT_CPD_TUPLE: rt_val_push_tuple_init(&rt->stack, &size_loc); break; } /* Data. */ data_begin = rt->stack.top; while (current) { elem_loc = rt->stack.top; bif_parse_any_ast(rt, current); if (err_state()) { err_push("BIF", "Failed parsing compound AST node"); return; } else { current = current->next; } if (lit_cpd->type != AST_LIT_CPD_ARRAY) { continue; } /* Homogenity check */ if (!has_first_elem) { has_first_elem = true; first_elem_loc = elem_loc; } else if (!rt_val_pair_homo(rt, first_elem_loc, elem_loc)) { bif_text_error_parse_homo(); return; } } /* Fix the header with the written size. */ data_size = rt->stack.top - data_begin; rt_val_push_cpd_final(&rt->stack, size_loc, data_size); }
/** * Copies caps in inherited cnode into targets cspace. * * \param si Target spawninfo * \param inheritcn_cap Cnode of caps to inherit * \retval SYS_ERR_OK Caps have been copied. */ static errval_t spawn_setup_inherited_caps(struct spawninfo *si, struct capref inheritcn_cap) { errval_t err; struct cnoderef inheritcn; if (capref_is_null(inheritcn_cap)) { return SYS_ERR_OK; } err = cnode_build_cnoderef(&inheritcn, inheritcn_cap); if (err_is_fail(err)) { return err; } /* Copy the file descriptor frame cap over */ err = spawn_setup_inherited_cap(inheritcn, INHERITCN_SLOT_FDSPAGE, si->taskcn, TASKCN_SLOT_FDSPAGE); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_SETUP_FDCAP); } /* Copy the session capability over */ err = spawn_setup_inherited_cap(inheritcn, INHERITCN_SLOT_SESSIONID, si->taskcn, TASKCN_SLOT_SESSIONID); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_SETUP_SIDCAP); } /* Copy the kernel capability over, scary */ err = spawn_setup_inherited_cap(inheritcn, INHERITCN_SLOT_KERNELCAP, si->taskcn, TASKCN_SLOT_KERNELCAP); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_SETUP_KERNEL_CAP); } return SYS_ERR_OK; }
/** * \brief Destroy the given region * * \return SYS_ERR_OK on success, error code on failure * * \bug This only works if the memobj type is memobj_one_frame. */ errval_t vregion_destroy(struct vregion *vregion) { errval_t err; struct vspace *vspace = vregion_get_vspace(vregion); if (vspace != NULL) { err = vspace_remove_vregion(vspace, vregion); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_REMOVE_REGION); } } struct memobj *memobj = vregion_get_memobj(vregion); if (memobj != NULL) { err = memobj->f.unmap_region(memobj, vregion); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_UNMAP_REGION); } } return SYS_ERR_OK; }
/** * \brief Non-blocking name service lookup * * \param iface Name of the domain * \param retdomid returns the Xeon Phi Domain ID */ errval_t domain_lookup(const char *iface, xphi_dom_id_t *retdomid) { errval_t err; struct octopus_rpc_client *r = get_octopus_rpc_client(); if (r == NULL) { return LIB_ERR_NAMESERVICE_NOT_BOUND; } char* record = NULL; octopus_trigger_id_t tid; errval_t error_code; err = r->vtbl.get(r, iface, NOP_TRIGGER, &record, &tid, &error_code); if (err_is_fail(err)) { goto out; } err = error_code; if (err_is_fail(err)) { if (err_no(err) == OCT_ERR_NO_RECORD) { err = err_push(err, XEON_PHI_ERR_CLIENT_DOMAIN_VOID); } goto out; } xphi_dom_id_t domid = 0; err = oct_read(record, "_ { domid: %d }", &domid); if (err_is_fail(err) || domid == 0) { err = err_push(err, XEON_PHI_ERR_CLIENT_DOMAIN_VOID); goto out; } if (retdomid != NULL) { *retdomid = domid; } out: free(record); return err; }
/** * \brief Span a domain with the given vroot and disp_frame * * Operation similar to spawning a domain but the vroot and disp_frame * are already provided */ errval_t spawn_span_domain(struct spawninfo *si, struct capref vroot, struct capref disp_frame) { errval_t err; struct capref t1; struct cnoderef cnode; /* Spawn cspace */ err = spawn_setup_cspace(si); if (err_is_fail(err)) { return err; } /* Create pagecn */ t1.cnode = si->rootcn; t1.slot = ROOTCN_SLOT_PAGECN; err = cnode_create_raw(t1, &cnode, PAGE_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_PAGECN); } // Copy root of pagetable si->vtree.cnode = cnode; si->vtree.slot = 0; err = cap_copy(si->vtree, vroot); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_COPY_VNODE); } /* Copy dispatcher frame (in taskcn) */ si->dispframe.cnode = si->taskcn; si->dispframe.slot = TASKCN_SLOT_DISPFRAME; err = cap_copy(si->dispframe, disp_frame); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_COPY_VNODE); } return SYS_ERR_OK; }
/*static*/ DATA_BIN_PTR db_make(char *title) { DATA_BIN_PTR dbin; dbin = (DATA_BIN_PTR)memMalloc(sizeof(DATA_BIN), "dbin"); if (dbin == NULL) { err_push(ERR_MEM_LACK, "Data Bin"); return(NULL); } /* Initialize data_bin */ #ifdef FF_CHK_ADDR dbin->check_address = (void*)dbin; #endif if (title) { dbin->title = (char *)memStrdup(title, "dbin->title"); /* (char *) for Think C -rf01 */ if (dbin->title == NULL) { err_push(ERR_MEM_LACK, "Data Bin Title"); memFree(dbin, "dbin"); return(NULL); } } else dbin->title = NULL; dbin->table_list = NULL; dbin->array_conduit_list = NULL; dbin->eqn_info = NULL; return(dbin); }
/** * \brief Non-blocking name service lookup * * \param iface Name of interface for which to query name server * \param retiref Returns pointer to IREF on success */ errval_t nameservice_lookup(const char *iface, iref_t *retiref) { errval_t err; struct octopus_rpc_client *r = get_octopus_rpc_client(); if (r == NULL) { return LIB_ERR_NAMESERVICE_NOT_BOUND; } char* record = NULL; octopus_trigger_id_t tid; errval_t error_code; err = r->vtbl.get(r, iface, NOP_TRIGGER, &record, &tid, &error_code); if (err_is_fail(err)) { goto out; } err = error_code; if (err_is_fail(err)) { if (err_no(err) == OCT_ERR_NO_RECORD) { err = err_push(err, LIB_ERR_NAMESERVICE_UNKNOWN_NAME); } goto out; } uint64_t iref_number = 0; err = oct_read(record, "_ { iref: %d }", &iref_number); if (err_is_fail(err) || iref_number == 0) { err = err_push(err, LIB_ERR_NAMESERVICE_INVALID_NAME); goto out; } if (retiref != NULL) { *retiref = iref_number; } out: free(record); return err; }
/// Map with an alignment constraint errval_t vspace_map_anon_nomalloc(void **retaddr, struct memobj_anon *memobj, struct vregion *vregion, size_t size, size_t *retsize, vregion_flags_t flags, size_t alignment) { errval_t err1, err2; size = ROUND_UP(size, BASE_PAGE_SIZE); if (retsize) { *retsize = size; } // Create a memobj and vregion err1 = memobj_create_anon(memobj, size, 0); if (err_is_fail(err1)) { err1 = err_push(err1, LIB_ERR_MEMOBJ_CREATE_ANON); goto error; } err1 = vregion_map_aligned(vregion, get_current_vspace(), (struct memobj *)memobj, 0, size, flags, alignment); if (err_is_fail(err1)) { err1 = err_push(err1, LIB_ERR_VREGION_MAP); goto error; } *retaddr = (void*)vspace_genvaddr_to_lvaddr(vregion_get_base_addr(vregion)); return SYS_ERR_OK; error: if (err_no(err1) != LIB_ERR_MEMOBJ_CREATE_ANON) { err2 = memobj_destroy_anon((struct memobj *)memobj); if (err_is_fail(err2)) { DEBUG_ERR(err2, "memobj_destroy_anon failed"); } } return err1; }
void show_command_line(int argc, char *argv[]) { int i; char cline[2 * MAX_PATH] = {""}; sprintf(cline, "==>%s%s", argv[0], argc > 1 ? " " : ""); for (i = 1; i < argc; i++) sprintf(cline + strlen(cline), "%s%s", argv[i], i < argc - 1 ? " " : ""); sprintf(cline + strlen(cline), "<=="); err_push(ERR_GENERAL, cline); }
errval_t vspace_unmap(const void *buf) { errval_t err; struct vregion *vregion = vspace_get_region(get_current_vspace(), buf); assert(vregion); err = vregion_destroy(vregion); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VREGION_DESTROY); } return SYS_ERR_OK; }
/** * \brief Initialise a new LMP channel and initiate a binding * * \param lc Storage for channel state * \param cont Continuation for bind completion/failure * \param qnode Storage for an event queue node (used for queuing bind request) * \param iref IREF to which to bind * \param buflen_words Size of incoming buffer, in number of words */ errval_t lmp_chan_bind(struct lmp_chan *lc, struct lmp_bind_continuation cont, struct event_queue_node *qnode, iref_t iref, size_t buflen_words) { errval_t err; lmp_chan_init(lc); /* store bind arguments */ lc->iref = iref; lc->buflen_words = buflen_words; lc->bind_continuation = cont; /* allocate a cap slot for the new endpoint cap */ err = slot_alloc(&lc->local_cap); if (err_is_fail(err)) { waitset_chanstate_destroy(&lc->send_waitset); return err_push(err, LIB_ERR_SLOT_ALLOC); } /* allocate a local endpoint */ err = lmp_endpoint_create_in_slot(buflen_words, lc->local_cap, &lc->endpoint); if (err_is_fail(err)) { slot_free(lc->local_cap); waitset_chanstate_destroy(&lc->send_waitset); return err_push(err, LIB_ERR_ENDPOINT_CREATE); } // wait for the ability to use the monitor binding lc->connstate = LMP_BIND_WAIT; struct monitor_binding *mb = lc->monitor_binding = get_monitor_binding(); event_mutex_enqueue_lock(&mb->mutex, qnode, MKCLOSURE(send_bind_cont, lc)); return SYS_ERR_OK; }
/** * \brief Setup a new vregion with alignment constraints in an address space * * \param vregion The vregion * \param vspace The vspace to associate with the vregion * \param memobj The memory object to associate with the region * \param offset Offset into the memory object * \param size Size of the memoryg object to use * \param flags Vregion specific flags * \param alignment Minimum required alignment of mapping (may be increased) */ errval_t vregion_map_aligned(struct vregion *vregion, struct vspace* vspace, struct memobj *memobj, size_t offset, size_t size, vregion_flags_t flags, size_t alignment) { errval_t err; struct pmap *pmap = vspace_get_pmap(vspace); // Allocate some virtual address space genvaddr_t address; err = pmap->f.determine_addr(pmap, memobj, alignment, &address); if (err_is_fail(err)) { return err_push(err, LIB_ERR_PMAP_DETERMINE_ADDR); } // Initialize vregion->vspace = vspace; vregion->memobj = memobj; vregion->base = address; vregion->offset = offset; vregion->size = size; vregion->flags = flags; // Add to the vspace err = vspace_add_vregion(vspace, vregion); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_ADD_REGION); } // Add to memobj err = memobj->f.map_region(memobj, vregion); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_MAP_REGION); } return SYS_ERR_OK; }
static errval_t do_single_map(struct pmap_arm *pmap, genvaddr_t vaddr, genvaddr_t vend, struct capref frame, size_t offset, size_t pte_count, vregion_flags_t flags) { // Get the page table struct vnode *ptable; errval_t err = get_ptable(pmap, vaddr, &ptable); if (err_is_fail(err)) { return err_push(err, LIB_ERR_PMAP_GET_PTABLE); } uintptr_t pmap_flags = vregion_flags_to_kpi_paging_flags(flags); // XXX: reassess the following note -SG // NOTE: strictly speaking a l2 entry only has 8 bits, but due to the way // Barrelfish allocates l1 and l2 tables, we use 10 bits for the tracking // index here and in the map syscall uintptr_t index = ARM_USER_L2_OFFSET(vaddr); // Create user level datastructure for the mapping bool has_page = has_vnode(ptable, index, pte_count); assert(!has_page); struct vnode *page = slab_alloc(&pmap->slab); assert(page); page->is_vnode = false; page->entry = index; page->next = ptable->u.vnode.children; ptable->u.vnode.children = page; page->u.frame.cap = frame; page->u.frame.pte_count = pte_count; // Map entry into the page table err = vnode_map(ptable->u.vnode.cap, frame, index, pmap_flags, offset, pte_count); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VNODE_MAP); } return SYS_ERR_OK; }
/** * \brief Change the waitset used for configuration messages. * * \param client Terminal client state. * \param conf_ws New waitset to use. * * \return SYS_ERR_OK if successful. * TERM_ERR_CHANGE_WAITSET on error. */ errval_t term_client_change_config_waitset(struct term_client *client, struct waitset *conf_ws) { errval_t err; assert(client != NULL); assert(conf_ws != NULL); client->conf_ws = conf_ws; err = client->conf_binding->change_waitset(client->conf_binding, conf_ws); if (err_is_fail(err)) { return err_push(err, TERM_ERR_CHANGE_WAITSET); } return SYS_ERR_OK; }
errval_t caps_delete(struct cte *cte) { errval_t err; TRACE_CAP_MSG("deleting", cte); if (cte->mdbnode.locked) { return SYS_ERR_CAP_LOCKED; } err = caps_try_delete(cte); if (err_no(err) == SYS_ERR_DELETE_LAST_OWNED) { err = err_push(err, SYS_ERR_RETRY_THROUGH_MONITOR); } return err; }
static int option_S(char *argv[], FF_STD_ARGS_PTR std_args, int *i) { int error = 0; switch (toupper(argv[*i][2])) /* -s? */ { case STR_END : std_args->cv_subset = TRUE; break; default: error = err_push(ERR_UNKNOWN_OPTION, "==> %s <==", argv[*i]); break; } return(error); }
static int make_unique_format_titles(DATA_BIN_PTR dbin) { PROCESS_INFO_LIST plist = NULL; PROCESS_INFO_PTR pinfo = NULL; int error = 0; int SCRATCH = strlen("Binary Output Separate Varied Record Header: ") + 1; /* Longest */ char *cp = NULL; FF_VALIDATE(dbin); db_ask(dbin, DBASK_PROCESS_INFO, 0, &plist); plist = dll_first(plist); pinfo = FF_PI(plist); while (pinfo && !error) { FF_VALIDATE(pinfo); cp = (char *)memRealloc(PINFO_FORMAT(pinfo)->name, strlen(PINFO_FORMAT(pinfo)->name) + SCRATCH + 1, "PINFO_FORMAT(pinfo)->name"); if (cp) { PINFO_FORMAT(pinfo)->name = cp; memmove(cp + SCRATCH, cp, strlen(cp) + 1); } else { error = err_push(ERR_MEM_LACK, ""); break; } error = get_format_type(PINFO_FORMAT(pinfo), cp); if (error) break; memmove(cp + strlen(cp), cp + SCRATCH, strlen(cp + SCRATCH) + 1); plist = dll_next(plist); pinfo = FF_PI(plist); } ff_destroy_process_info_list(plist); return error; }
errval_t morecore_init(void) { errval_t err; struct morecore_state *state = get_morecore_state(); thread_mutex_init(&state->mutex); err = vspace_mmu_aware_init(&state->mmu_state, HEAP_REGION); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_MMU_AWARE_INIT); } sys_morecore_alloc = morecore_alloc; sys_morecore_free = morecore_free; return SYS_ERR_OK; }
/* * Init function */ errval_t usb_manager_rpc_client_init(struct usb_manager_rpc_client *rpc, struct usb_manager_binding *binding) { errval_t _err; // Setup state of RPC client object rpc->b = binding; rpc->reply_present = false; rpc->rpc_in_progress = false; rpc->async_error = SYS_ERR_OK; waitset_init(&(rpc->rpc_waitset)); flounder_support_waitset_chanstate_init(&(rpc->dummy_chanstate)); rpc->vtbl = usb_manager_rpc_vtbl; binding->st = rpc; // Change waitset on binding _err = ((binding->change_waitset)(binding, &(rpc->rpc_waitset))); if (err_is_fail(_err)) { waitset_destroy(&(rpc->rpc_waitset)); return(err_push(_err, FLOUNDER_ERR_CHANGE_WAITSET)); } // Set RX handlers on binding object for RPCs (binding->rx_vtbl).connect_response = usb_manager_connect__rpc_rx_handler; (binding->rx_vtbl).device_disconnect_notify_response = usb_manager_device_disconnect_notify__rpc_rx_handler; (binding->rx_vtbl).request_read_response = usb_manager_request_read__rpc_rx_handler; (binding->rx_vtbl).request_write_response = usb_manager_request_write__rpc_rx_handler; (binding->rx_vtbl).request_response = usb_manager_request__rpc_rx_handler; (binding->rx_vtbl).transfer_setup_response = usb_manager_transfer_setup__rpc_rx_handler; (binding->rx_vtbl).transfer_unsetup_response = usb_manager_transfer_unsetup__rpc_rx_handler; (binding->rx_vtbl).transfer_start_response = usb_manager_transfer_start__rpc_rx_handler; (binding->rx_vtbl).transfer_stop_response = usb_manager_transfer_stop__rpc_rx_handler; (binding->rx_vtbl).transfer_status_response = usb_manager_transfer_status__rpc_rx_handler; (binding->rx_vtbl).transfer_state_response = usb_manager_transfer_state__rpc_rx_handler; (binding->rx_vtbl).transfer_clear_stall_response = usb_manager_transfer_clear_stall__rpc_rx_handler; (binding->rx_vtbl).transfer_done_notify_response = usb_manager_transfer_done_notify__rpc_rx_handler; (binding->rx_vtbl).device_get_speed_response = usb_manager_device_get_speed__rpc_rx_handler; (binding->rx_vtbl).device_get_state_response = usb_manager_device_get_state__rpc_rx_handler; (binding->rx_vtbl).device_suspend_response = usb_manager_device_suspend__rpc_rx_handler; (binding->rx_vtbl).device_resume_response = usb_manager_device_resume__rpc_rx_handler; (binding->rx_vtbl).device_powersave_response = usb_manager_device_powersave__rpc_rx_handler; // Set error handler on binding object binding->error_handler = usb_manager_rpc_client_error; return(SYS_ERR_OK); }
struct sysret sys_monitor_identify_cap(struct capability *root, capaddr_t cptr, uint8_t bits, struct capability *retbuf) { struct capability *cap; errval_t err = caps_lookup_cap(root, cptr, bits, &cap, CAPRIGHTS_READ); if (err_is_fail(err)) { return SYSRET(err_push(err, SYS_ERR_IDENTIFY_LOOKUP)); } // XXX: Write cap data directly back to user-space // FIXME: this should involve a pointer/range check for reliability, // but because the monitor is inherently trusted it's not a security hole *retbuf = *cap; return SYSRET(SYS_ERR_OK); }
static errval_t protect(struct memobj *memobj, struct vregion *vregion, genvaddr_t offset, size_t range, vs_prot_flags_t flags) { struct vspace *vspace = vregion_get_vspace(vregion); struct pmap *pmap = vspace_get_pmap(vspace); genvaddr_t base = vregion_get_base_addr(vregion); genvaddr_t vregion_offset = vregion_get_offset(vregion); errval_t err; size_t ret_size; err = pmap->f.modify_flags(pmap, base + offset + vregion_offset, range, flags, &ret_size); if (err_is_fail(err)) { return err_push(err, LIB_ERR_PMAP_MODIFY_FLAGS); } return SYS_ERR_OK; }
static void out_bind_cb(void *st, errval_t err, struct terminal_binding *b) { struct term_client *client = st; if (err_is_fail(err)) { /* call async error callback */ err = err_push(err, TERM_ERR_BIND_OUT_INTERFACE); client->err_cb(client->st, err); return; } client->out_binding = b; b->st = client; b->rx_vtbl.characters = out_characters_handler; /* Check if all connections are already established. */ check_connection_established(client); }
void sym_map_insert( struct SymMap *sym_map, char *key, VAL_LOC_T stack_loc) { struct SymMapNode *node = &sym_map->root; char *current = key; LOG_TRACE( "sym_map_insert(%s, %td, {%d, %d})", key, stack_loc, source_loc.line, source_loc.column); while (*current) { int i; bool found = false; for (i = 0; i < node->children.size; ++i) { if (node->children.data[i].key == *current) { node = node->children.data + i; found = true; break; } } if (!found) { struct SymMapNode new_node = { 0, { NULL, 0, 0 }, false, 0 }; new_node.key = *current; ARRAY_APPEND(node->children, new_node); node = node->children.data + i; } ++current; } if (node->is_set) { char *string = sym_map_serialize(sym_map); LOG_ERROR("Symbol map at error point:\n%s", string); mem_free(string); err_push("RUNTIME", "Symbol \"%s\" already inserted", key); } else { node->is_set = true; node->stack_loc = stack_loc; } }
/** * \brief Internal function called as soon as the new monitor binding is created * \param st pointer to the multi-hop channel * \param err error variable indicating success / failure * \param monitor_binding the new monitor binding */ static void multihop_new_monitor_binding_continuation(void *st, errval_t err, struct monitor_binding *monitor_binding) { struct multihop_chan *mc = st; if (err_is_fail(err)) { // report error to user err = err_push(err, LIB_ERR_MONITOR_CLIENT_BIND); mc->bind_continuation.handler(mc->bind_continuation.st, err, NULL); } else { mc->monitor_binding = monitor_binding; // get a virtual circuit identifier (VCI) for this binding mc->my_vci = multihop_chan_mapping_insert(mc); // send request to the monitor multihop_chan_bind_cont(mc); } }
static void remote_cap_retype(struct monitor_blocking_binding *b, struct capref croot, capaddr_t src, uint64_t new_type, uint8_t size_bits, capaddr_t to, capaddr_t slot, int32_t dcn_vbits) { errval_t err; bool has_descendants; coremask_t on_cores; /* Save state for stackripped reply */ struct retype_st * st = alloc_retype_st(b, croot, src, new_type, size_bits, to, slot, dcn_vbits); /* Get the raw cap from the kernel */ err = monitor_domains_cap_identify(croot, src, CPTR_BITS, &(st->rcap_st.capability)); if (err_is_fail(err)) { err_push(err, MON_ERR_CAP_REMOTE); goto reply; } /* Check if cap is retyped, if it is there is no point continuing, This will be checked again once we succeed in locking cap */ err = rcap_db_get_info(&st->rcap_st.capability, &has_descendants, &on_cores); assert(err_is_ok(err)); if (has_descendants) { err = MON_ERR_REMOTE_CAP_NEED_REVOKE; goto reply; } /* request lock */ err = rcap_db_acquire_lock(&st->rcap_st.capability, (struct rcap_st*)st); if (err_is_fail(err)) { goto reply; } return; // continues in remote_cap_retype_phase_2 reply: free_retype_st(st); err = b->tx_vtbl.remote_cap_retype_response(b, NOP_CONT, err); assert(err_is_ok(err)); }
struct sysret sys_create(struct capability *root, enum objtype type, uint8_t objbits, capaddr_t dest_cnode_cptr, cslot_t dest_slot, int dest_vbits) { errval_t err; uint8_t bits = 0; genpaddr_t base = 0; /* Paramter checking */ if (type == ObjType_Null || type >= ObjType_Num) { return SYSRET(SYS_ERR_ILLEGAL_DEST_TYPE); } /* Destination CNode */ struct capability *dest_cnode_cap; err = caps_lookup_cap(root, dest_cnode_cptr, dest_vbits, &dest_cnode_cap, CAPRIGHTS_READ_WRITE); if (err_is_fail(err)) { return SYSRET(err_push(err, SYS_ERR_DEST_CNODE_LOOKUP)); } /* Destination slot */ struct cte *dest_cte; dest_cte = caps_locate_slot(dest_cnode_cap->u.cnode.cnode, dest_slot); if (dest_cte->cap.type != ObjType_Null) { return SYSRET(SYS_ERR_SLOTS_IN_USE); } /* List capabilities allowed to be created at runtime. */ switch(type) { case ObjType_ID: break; // only certain types of capabilities can be created at runtime default: return SYSRET(SYS_ERR_TYPE_NOT_CREATABLE); } return SYSRET(caps_create_new(type, base, bits, objbits, my_core_id, dest_cte)); }
/* * RPC wrapper functions */ static errval_t acpi_get_pcie_confspace__rpc(struct acpi_rpc_client *_rpc, acpi_errval_t *err, uint64_t *address, uint16_t *segment, uint8_t *startbus, uint8_t *endbus) { errval_t _err = SYS_ERR_OK; assert(!(_rpc->rpc_in_progress)); assert((_rpc->async_error) == SYS_ERR_OK); _rpc->rpc_in_progress = true; _rpc->reply_present = false; // call send function _err = ((((_rpc->b)->tx_vtbl).get_pcie_confspace_call)(_rpc->b, NOP_CONT)); if (err_is_fail(_err)) { goto out; } // wait for message to be sent and reply or error to be present while (((!(_rpc->reply_present)) || (!(((_rpc->b)->can_send)(_rpc->b)))) && ((_rpc->async_error) == SYS_ERR_OK)) { _err = event_dispatch(&(_rpc->rpc_waitset)); if (err_is_fail(_err)) { _err = err_push(_err, LIB_ERR_EVENT_DISPATCH); goto out; } } if (err_is_fail(_rpc->async_error)) { _err = (_rpc->async_error); _rpc->async_error = SYS_ERR_OK; goto out; } // grab reply variables out of binding struct acpi_binding *_binding = _rpc->b; *err = (((_binding->rx_union).get_pcie_confspace_response).err); *address = (((_binding->rx_union).get_pcie_confspace_response).address); *segment = (((_binding->rx_union).get_pcie_confspace_response).segment); *startbus = (((_binding->rx_union).get_pcie_confspace_response).startbus); *endbus = (((_binding->rx_union).get_pcie_confspace_response).endbus); out: _rpc->rpc_in_progress = false; return(_err); }
static errval_t usb_manager_transfer_status__rpc(struct usb_manager_rpc_client *_rpc, uint32_t tid, uint32_t *ret_error, uint32_t *ret_actlen, uint32_t *ret_length, uint32_t *ret_actframes, uint32_t *ret_numframes) { errval_t _err = SYS_ERR_OK; assert(!(_rpc->rpc_in_progress)); assert((_rpc->async_error) == SYS_ERR_OK); _rpc->rpc_in_progress = true; _rpc->reply_present = false; // call send function _err = ((((_rpc->b)->tx_vtbl).transfer_status_call)(_rpc->b, NOP_CONT, tid)); if (err_is_fail(_err)) { goto out; } // wait for message to be sent and reply or error to be present while (((!(_rpc->reply_present)) || (!(((_rpc->b)->can_send)(_rpc->b)))) && ((_rpc->async_error) == SYS_ERR_OK)) { _err = event_dispatch(&(_rpc->rpc_waitset)); if (err_is_fail(_err)) { _err = err_push(_err, LIB_ERR_EVENT_DISPATCH); goto out; } } if (err_is_fail(_rpc->async_error)) { _err = (_rpc->async_error); _rpc->async_error = SYS_ERR_OK; goto out; } // grab reply variables out of binding struct usb_manager_binding *_binding = _rpc->b; *ret_error = (((_binding->rx_union).transfer_status_response).ret_error); *ret_actlen = (((_binding->rx_union).transfer_status_response).ret_actlen); *ret_length = (((_binding->rx_union).transfer_status_response).ret_length); *ret_actframes = (((_binding->rx_union).transfer_status_response).ret_actframes); *ret_numframes = (((_binding->rx_union).transfer_status_response).ret_numframes); out: _rpc->rpc_in_progress = false; return(_err); }