static void get_arch_core_id(struct monitor_blocking_binding *b) { static uintptr_t arch_id = -1; errval_t err; // printf("%s:%s:%d: \n", __FILE__, __FUNCTION__, __LINE__); if (arch_id == -1) { err = invoke_monitor_get_arch_id(&arch_id); assert(err_is_ok(err)); assert(arch_id != -1); } err = b->tx_vtbl.get_arch_core_id_response(b, NOP_CONT, arch_id); assert(err_is_ok(err)); }
errval_t rcap_db_remote_lock_req(struct capability *cap, coreid_t from_core, recordid_t ccast_recordid) { assert(bsp_monitor); errval_t reply_err = bsp_db_lock(cap, from_core); bool has_desc; coremask_t on_cores; errval_t err = rcap_db_get_info(cap, &has_desc, &on_cores); assert(err_is_ok(err)); return route_rcap_lock_reply(reply_err, err_is_ok(reply_err) ? BSP_CORE_MASK : 0, has_desc, ccast_recordid); }
static void frame_allocate_and_map(void **retbuf, struct capref *retcap, size_t bytes) { errval_t err; size_t retbytes; err = frame_alloc(retcap, bytes, &retbytes); assert(err_is_ok(err)); assert(retbytes == bytes); err = vspace_map_one_frame_attr(retbuf, bytes, *retcap, VREGION_FLAGS_READ_WRITE_NOCACHE, NULL, NULL); assert(err_is_ok(err)); }
/** * \brief Send a reply back to the monitor. If the error code indicates success, this function * creates a new monitor binding and registers to receive messages. * \param multihop_chan * \param err error code to send back * \param vci my vci for ingoing messages * \param waitset waitset to use for the channel */ void multihop_chan_send_bind_reply(struct multihop_chan *mc, errval_t msgerr, multihop_vci_t vci, struct waitset *waitset) { errval_t err; struct bind_multihop_reply_state *reply_state = malloc( sizeof(struct bind_multihop_reply_state)); assert(reply_state != NULL); if (err_is_ok(msgerr)) { // make sure channel exists assert(mc != NULL); } else { // make sure channel is not created assert(mc == NULL); } reply_state->mc = mc; reply_state->args.err = msgerr; reply_state->args.receiver_vci = vci; if (err_is_ok(msgerr)) { // get a vci for this binding reply_state->mc->my_vci = multihop_chan_mapping_insert(mc); reply_state->args.sender_vci = reply_state->mc->my_vci; } else { reply_state->args.sender_vci = 0; } if (err_is_ok(msgerr)) { // create a new monitor binding err = monitor_client_new_binding( multihop_new_monitor_binding_continuation2, reply_state, waitset, DEFAULT_LMP_BUF_WORDS); if (err_is_fail(err)) { USER_PANIC_ERR( err, "Could not create a new monitor binding in the multi-hop interconnect driver"); } } else { reply_state->monitor_binding = get_monitor_binding(); // wait for the ability to use the monitor binding event_mutex_enqueue_lock(&reply_state->monitor_binding->mutex, &reply_state->qnode, MKCLOSURE(send_bind_reply, reply_state)); } }
/** * \brief Wakeup a thread on a foreign dispatcher while disabled. * * \param core_id Core ID to wakeup on * \param thread Pointer to thread to wakeup * \param mydisp Dispatcher this function is running on * * \return SYS_ERR_OK on success. */ static errval_t domain_wakeup_on_coreid_disabled(coreid_t core_id, struct thread *thread, dispatcher_handle_t mydisp) { struct domain_state *ds = get_domain_state(); // XXX: Ugly hack to allow waking up on a core id we don't have a // dispatcher handler for thread->coreid = core_id; // Catch this early assert_disabled(ds != NULL); if (ds->b[core_id] == NULL) { return LIB_ERR_NO_SPANNED_DISP; } thread_enqueue(thread, &ds->remote_wakeup_queue); // Signal the inter-disp waitset of this event struct event_closure closure = { .handler = handle_wakeup_on }; errval_t err = waitset_chan_trigger_closure_disabled(&ds->interdisp_ws, &ds->remote_wakeup_event, closure, mydisp); assert_disabled(err_is_ok(err) || err_no(err) == LIB_ERR_CHAN_ALREADY_REGISTERED); return SYS_ERR_OK; }
static void bind_monitor_reply_scc_cont(struct intermon_binding *b, errval_t err, chanid_t chanid) { errval_t err2; err2 = b->tx_vtbl.bind_monitor_reply_scc(b, NOP_CONT, err, chanid, my_core_id); if (err_is_fail(err2)) { if(err_no(err2) == FLOUNDER_ERR_TX_BUSY) { struct bind_monitor_reply_scc_state *me = malloc(sizeof(struct bind_monitor_reply_scc_state)); assert(me != NULL); struct intermon_state *ist = b->st; assert(ist != NULL); me->args.err = err; me->args.chan_id = chanid; me->elem.cont = bind_monitor_reply_scc_handler; err = intermon_enqueue_send(b, &ist->queue, get_default_waitset(), &me->elem.queue); assert(err_is_ok(err)); return; } DEBUG_ERR(err2, "reply failed"); } }
static void fsb_init_msg(struct bench_binding *b, coreid_t id) { errval_t err; // change waitset of the binding waitset_init(&signal_waitset); err = b->change_waitset(b, &signal_waitset); assert(err_is_ok(err)); binding = b; reply_received = true; #if CONFIG_TRACE // configure tracing err = trace_control(TRACE_EVENT(TRACE_SUBSYS_MULTIHOP, TRACE_EVENT_MULTIHOP_BENCH_START, 0), TRACE_EVENT(TRACE_SUBSYS_MULTIHOP, TRACE_EVENT_MULTIHOP_BENCH_STOP, 0), 0); if(err_is_fail(err)) { USER_PANIC_ERR(err, "trace_control failed"); } #endif // start tracing err = trace_event(TRACE_SUBSYS_MULTIHOP, TRACE_EVENT_MULTIHOP_BENCH_START, 0); if (err_is_fail(err)) { USER_PANIC_ERR(err, "trace_event failed"); } experiment(); }
static void get_pcie_confspace(struct acpi_binding* b) { ACPI_DEBUG("get_pcie_confspace\n"); errval_t err; ACPI_STATUS as; ACPI_TABLE_HEADER *mcfg_header; as = AcpiGetTable("MCFG", 1, &mcfg_header); if (ACPI_SUCCESS(as) && mcfg_correct_length(mcfg_header->Length)) { ACPI_MCFG_ALLOCATION *mcfg = (void*) mcfg_header + sizeof(ACPI_TABLE_MCFG); ACPI_DEBUG( "PCIe enhanced configuration region at 0x%"PRIx64" " "(segment %u, buses %u-%u)\n", mcfg->Address, mcfg->PciSegment, mcfg->StartBusNumber, mcfg->EndBusNumber); err = b->tx_vtbl.get_pcie_confspace_response(b, NOP_CONT, SYS_ERR_OK, mcfg->Address, mcfg->PciSegment, mcfg->StartBusNumber, mcfg->EndBusNumber); } else { ACPI_DEBUG("No MCFG table found -> no PCIe enhanced configuration\n"); err = b->tx_vtbl.get_pcie_confspace_response(b, NOP_CONT, ACPI_ERR_NO_MCFG_TABLE, 0, 0, 0, 0); } assert(err_is_ok(err)); }
/// Handler for LMP bind reply messages from the Monitor static void bind_lmp_reply_handler(struct monitor_binding *b, errval_t success, uintptr_t mon_id, uintptr_t conn_id, struct capref endpoint) { struct lmp_chan *lc = (void *)conn_id; errval_t err; assert(lc->connstate == LMP_BIND_WAIT); if (err_is_ok(success)) { /* bind succeeded */ lc->connstate = LMP_CONNECTED; /* Place the cap in the rootcn, to allow LRPC */ err = move_to_root(endpoint, &lc->remote_cap); if (err_is_fail(err)) { DEBUG_ERR(err, "error moving endpoint cap to root in LMP bind reply"); // leave it where it is, and continue lc->remote_cap = endpoint; } } /* either way, tell the user what happened */ assert(lc->bind_continuation.handler != NULL); lc->bind_continuation.handler(lc->bind_continuation.st, success, lc); }
void update_owner__rx_handler(struct intermon_binding *b, intermon_caprep_t caprep, genvaddr_t st) { errval_t err; struct intermon_state *inter_st = (struct intermon_state*)b->st; coreid_t from = inter_st->core_id; struct capref capref; struct capability cap; caprep_to_capability(&caprep, &cap); err = slot_alloc(&capref); if (err_is_fail(err)) { USER_PANIC_ERR(err, "failed to allocate slot for owner update"); } err = monitor_copy_if_exists(&cap, capref); if (err_is_ok(err)) { err = monitor_set_cap_owner(cap_root, get_cap_addr(capref), get_cap_valid_bits(capref), from); } if (err_no(err) == SYS_ERR_CAP_NOT_FOUND) { err = SYS_ERR_OK; } if (err_is_fail(err)) { USER_PANIC_ERR(err, "failed to update cap ownership"); } cap_destroy(capref); err = owner_updated(from, st); if (err_is_fail(err)) { USER_PANIC_ERR(err, "failed to send ownership update response"); } }
errval_t set_record(struct ast_object* ast, uint64_t mode, struct oct_query_state* sqs) { assert(ast != NULL); assert(sqs != NULL); struct skb_ec_terms sr; errval_t err = transform_record(ast, &sr); if (err_is_ok(err)) { // Calling add_object(Name, Attributes) dident add_object; if (mode & SET_SEQUENTIAL) { add_object = ec_did("add_seq_object", 3); } else { add_object = ec_did("add_object", 3); } pword add_object_term = ec_term(add_object, sr.name, sr.attribute_list, sr.constraint_list); ec_post_goal(add_object_term); err = run_eclipse(sqs); OCT_DEBUG(" set_record:\n"); debug_skb_output(sqs); if (err_no(err) == SKB_ERR_GOAL_FAILURE) { /*OCT_DEBUG("Goal failure during set record. Should not happen!\n"); assert(!"SKB_ERR_GOAL_FAILURE during set?");*/ err = err_push(err, OCT_ERR_CONSTRAINT_MISMATCH); } } return err; }
/** * \brief Send a capability over the multi-hop channel * * \param mc pointer to the multi-hop channel * \param _continuation callback to be executed after the message is sent * \param cap_state pointer to the cap state of the channel * \param cap the capability to send */ errval_t multihop_send_capability(struct multihop_chan *mc, struct event_closure _continuation, struct flounder_cap_state *cap_state, struct capref cap) { errval_t err; assert(mc->connstate == MULTIHOP_CONNECTED); struct monitor_binding *mon_binding = mc->monitor_binding; // send the message err = mon_binding->tx_vtbl.multihop_cap_send(mon_binding, _continuation, mc->vci, mc->direction, SYS_ERR_OK, cap, cap_state->tx_capnum); if (err_is_ok(err)) { // increase capability number cap_state->tx_capnum++; return err; } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { return err; } else { return err_push(err, LIB_ERR_MONITOR_CAP_SEND); } }
static void terminate_queue(struct net_queue_manager_binding *cc) { errval_t err; struct buffer_descriptor *buffer; // Free buffers for (buffer = buffers_list; buffer != NULL; buffer = buffer->next) { err = vspace_unmap(buffer->va); assert(err_is_ok(err)); err = cap_delete(buffer->cap); assert(err_is_ok(err)); } assert(ether_terminate_queue_ptr != NULL); ether_terminate_queue_ptr(); }
static void run_benchmark(coreid_t core, int requests) { errval_t err; struct capref ramcap; int i = -1; int bits = MEM_BITS; debug_printf("starting benchmark. allocating mem of size: %d\n", bits); //debug_printf("starting benchmark. allocating mem of size: %d to %d\n", // MINSIZEBITS, MINSIZEBITS+requests-1); sleep_init(); do { i++; // bits = MINSIZEBITS+i; trace_event(TRACE_SUBSYS_MEMTEST, TRACE_EVENT_MEMTEST_ALLOC, i); err = ram_alloc(&ramcap, bits); // milli_sleep(1); /* if ((i % 500 == 0) && (i > 0)) { debug_printf("allocated %d caps\n", i); } */ } while (err_is_ok(err)); // && (i < requests)); debug_printf("done benchmark. allocated %d caps (%lu bytes)\n", i, i * (1UL << bits)); }
/** * \brief Send a multi-hop message that contains no payload. * It is used to acknowledge received messages. * * \param mc pointer to the multi-hop channel */ static void multihop_send_dummy_message(struct multihop_chan *mc) { assert(mc->connstate == MULTIHOP_CONNECTED); #if MULTIHOP_FLOW_CONTROL MULTIHOP_DEBUG("sending dummy message, ack %d...\n", mc->unacked_received); errval_t err; struct monitor_binding *monitor_binding = mc->monitor_binding; // send message err = monitor_binding->tx_vtbl.multihop_message(monitor_binding, NOP_CONT, mc->vci, mc->direction, MULTIHOP_MESSAGE_FLAG_DUMMY, mc->unacked_received, (uint8_t *) mc, 1); if (err_is_ok(err)) { // we have just acknowledged all received messages mc->unacked_received = 0; } else if (err_no(err) != FLOUNDER_ERR_TX_BUSY) { USER_PANIC_ERR(err, "Could not send dummy message over multi-hop channel\n"); } #endif // MULTIHOP_FLOW_CONTROL }
static void revoke_result__rx(errval_t result, struct revoke_master_st *st, bool locked) { DEBUG_CAPOPS("%s\n", __FUNCTION__); errval_t err; if (locked) { caplock_unlock(st->cap); } if (err_is_ok(result)) { // clear the remote copies bit err = monitor_domcap_remote_relations(st->cap.croot, st->cap.cptr, st->cap.bits, 0, RRELS_COPY_BIT, NULL); if (err_is_fail(err) && err_no(err) != SYS_ERR_CAP_NOT_FOUND) { DEBUG_ERR(err, "resetting remote copies bit after revoke"); } } DEBUG_CAPOPS("%s ## revocation completed, calling %p\n", __FUNCTION__, st->result_handler); st->result_handler(result, st->st); free(st); }
static void ipi_alloc_notify_reply_cont(struct monitor_binding *b, uintptr_t state, struct capref notify_cap, errval_t reterr) { errval_t err = b->tx_vtbl.ipi_alloc_notify_reply(b, NOP_CONT, state, notify_cap, reterr); if(err_is_fail(err)) { if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { struct monitor_state *st = b->st; struct ipi_alloc_notify_reply_state *me = malloc(sizeof(struct ipi_alloc_notify_reply_state)); assert(me != NULL); me->args.state = state; me->args.notify = notify_cap; me->args.err = reterr; me->elem.cont = ipi_alloc_notify_reply_handler; err = monitor_enqueue_send(b, &st->queue, get_default_waitset(), &me->elem.queue); if (err_is_fail(err)) { USER_PANIC_ERR(err, "monitor_enqueue_send failed"); } return; } USER_PANIC_ERR(err, "sending reply"); } assert(err_is_ok(err)); }
void thc_await_send(struct thc_per_binding_state_t *thc, void *f) { struct common_binding *c = (struct common_binding *)f; DEBUG_STUBS(DEBUGPRINTF(DEBUG_STUBS_PREFIX " > thc_await_send\n")); // Synchronize with thc_send_possible_event callback thc_lock_acquire(&thc->thc_binding_lock); // Request an event when sending is possible if (!thc->send_possible_event_requested) { errval_t err = c->register_send(c, get_default_waitset(), MKCONT(thc_send_possible_event, c)); if (err == FLOUNDER_ERR_TX_BUSY) { goto done; } assert(err_is_ok(err)); thc->send_possible_event_requested = 1; } // Wait // // We release the binding lock before blocking. It is passed back to us // by the notification THCSuspendThen(&thc->waiting_sender, thc_await_send0, (void*) &thc->thc_binding_lock); done: thc_lock_release(&thc->thc_binding_lock); DEBUG_STUBS(DEBUGPRINTF(DEBUG_STUBS_PREFIX " > thc_await_send\n")); }
static void remote_cap_revoke(struct monitor_blocking_binding *b, struct capref croot, capaddr_t src, uint8_t vbits) { errval_t err; /* Save state for stackripped reply */ struct revoke_st * st = alloc_revoke_st(b, croot, src, vbits); /* Get the raw cap from the kernel */ err = monitor_domains_cap_identify(croot, src, vbits, &(st->rcap_st.capability)); if (err_is_fail(err)) { err_push(err, MON_ERR_CAP_REMOTE); goto reply; } /* request recursive lock on the cap and all of its descendants */ err = rcap_db_acquire_recursive_lock(&(st->rcap_st.capability), (struct rcap_st*)st); if (err_is_fail(err)) { goto reply; } return; // continues in remote_cap_retype_phase_2 reply: free_revoke_st(st); err = b->tx_vtbl.remote_cap_revoke_response(b, NOP_CONT, err); assert(err_is_ok(err)); }
static void delete_reply_status(errval_t status, void *st) { DEBUG_CAPOPS("sending cap_delete reply msg: %s\n", err_getstring(status)); struct monitor_blocking_binding *b = (struct monitor_blocking_binding*)st; errval_t err = b->tx_vtbl.remote_cap_delete_response(b, NOP_CONT, status); assert(err_is_ok(err)); }
/** * \brief Send a multi-hop message * * \param mc pointer to the multi-hop channel * \param _continuation callback to be executed after the message is sent * \param msg pointer to the message payload * \param msglen length of the message payload (in bytes) * */ errval_t multihop_send_message(struct multihop_chan *mc, struct event_closure _continuation, void *msg, size_t msglen) { errval_t err; struct monitor_binding *monitor_binding = mc->monitor_binding; assert(mc->connstate == MULTIHOP_CONNECTED); #if MULTIHOP_FLOW_CONTROL // make sure that we can send another message if (mc->unacked_send == MULTIHOP_WINDOW_SIZE) { return FLOUNDER_ERR_TX_BUSY; } #endif // MULTIHOP_FLOW_CONTROL // send message err = monitor_binding->tx_vtbl.multihop_message(monitor_binding, _continuation, mc->vci, mc->direction, MULTIHOP_MESSAGE_FLAG_PAYLOAD, mc->unacked_received, (uint8_t *) msg, msglen); #if MULTIHOP_FLOW_CONTROL if (err_is_ok(err)) { // update flow control information mc->unacked_received = 0; mc->unacked_send = mc->unacked_send + 1; } #endif // MULTIHOP_FLOW_CONTROL return err; }
// Get the bootinfo and map it in. static errval_t map_bootinfo(struct bootinfo **bootinfo) { errval_t err, msgerr; struct monitor_blocking_rpc_client *cl = get_monitor_blocking_rpc_client(); assert(cl != NULL); struct capref bootinfo_frame; size_t bootinfo_size; msgerr = cl->vtbl.get_bootinfo(cl, &err, &bootinfo_frame, &bootinfo_size); if (err_is_fail(msgerr)) { err = msgerr; } if (err_is_fail(err)) { USER_PANIC_ERR(err, "failed in get_bootinfo"); return err; } err = vspace_map_one_frame((void**)bootinfo, bootinfo_size, bootinfo_frame, NULL, NULL); assert(err_is_ok(err)); return err; }
int main(int argc, char *argv[]) { assert(argc >= 2); errval_t err = SYS_ERR_OK; oct_init(); size_t wait_for = atoi(argv[1]); char* record = NULL; debug_printf("Barrier test with: %lu processes:\n", wait_for); err = oct_barrier_enter("my_barrier", &record, wait_for); if(err_is_ok(err)) { debug_printf("Execute Barrier code section\n"); debug_printf("Barrier record is: %s\n", record); } else { DEBUG_ERR(err, "Barrier enter fail."); abort(); } err = oct_barrier_leave(record); ASSERT_ERR_OK(err); debug_printf("Process no longer inside barrier.\n"); free(record); return EXIT_SUCCESS; }
void domain_mgmt_init(void) { errval_t err; /* Register notification endpoint with kernel */ struct capref epcap; struct lmp_endpoint *notifyep; // XXX: This has to be huge so we can receive a batch of // notifications when deleting CNodes recursively. err = endpoint_create(100 * 12, &epcap, ¬ifyep); if (err_is_fail(err)) { USER_PANIC_ERR(err, "failed creating endpoint"); } // register to receive on this endpoint struct event_closure cl = { .handler = handle_notification, .arg = notifyep, }; err = lmp_endpoint_register(notifyep, get_default_waitset(), cl); assert(err_is_ok(err)); err = invoke_monitor_register(epcap); if(err_is_fail(err)) { USER_PANIC_ERR(err, "Could not register with kernel"); } else { #ifdef DEBUG_MONITOR_ALL debug_printf("monitor ep registered\n"); #endif } }
void find_cap_result__rx_handler(struct intermon_binding *b, errval_t result, genvaddr_t st) { // if we receive a positive result, immediately forward to caller lvaddr_t lst = (lvaddr_t)st; struct find_cap_broadcast_st *fc_bc_st = (struct find_cap_broadcast_st*)lst; if (err_is_ok(result)) { if (!fc_bc_st->found) { fc_bc_st->found = true; struct intermon_state *inter_st = (struct intermon_state*)b->st; coreid_t from = inter_st->core_id; fc_bc_st->result_handler(SYS_ERR_OK, from, fc_bc_st->st); } } else if (err_no(result) != SYS_ERR_CAP_NOT_FOUND) { DEBUG_ERR(result, "ignoring bad find_cap_result"); } // check to see if broadcast is complete if (capsend_handle_mc_reply(&fc_bc_st->bc)) { if (!fc_bc_st->found) { // broadcast did not find a core, report notfound to caller fc_bc_st->result_handler(SYS_ERR_CAP_NOT_FOUND, 0, fc_bc_st->st); } free(fc_bc_st); } }
errval_t default_start_function(coreid_t where, struct module_info* mi, char* record) { assert(mi != NULL); errval_t err = SYS_ERR_OK; coreid_t core; /* * XXX: there may be more device using this driver, so starting it a second time * may be needed. */ if (!can_start(mi)) { return KALUGA_ERR_DRIVER_ALREADY_STARTED; } core = where + get_core_id_offset(mi); if (!is_auto_driver(mi)) { return KALUGA_ERR_DRIVER_NOT_AUTO; } // Construct additional command line arguments containing pci-id. // We need one extra entry for the new argument. uint64_t vendor_id, device_id, bus, dev, fun; char **argv = mi->argv; bool cleanup = false; err = oct_read(record, "_ { bus: %d, device: %d, function: %d, vendor: %d, device_id: %d }", &bus, &dev, &fun, &vendor_id, &device_id); if (err_is_ok(err)) { // We assume that we're starting a device if the query above succeeds // and therefore append the pci vendor and device id to the argument // list. argv = malloc((mi->argc+1) * sizeof(char *)); memcpy(argv, mi->argv, mi->argc * sizeof(char *)); char *pci_id = malloc(26); // Make sure pci vendor and device id fit into our argument assert(vendor_id < 0x9999 && device_id < 0x9999); snprintf(pci_id, 26, "%04"PRIx64":%04"PRIx64":%04"PRIx64":%04" PRIx64":%04"PRIx64, vendor_id, device_id, bus, dev, fun); argv[mi->argc] = pci_id; argv[mi->argc+1] = NULL; cleanup = true; } err = spawn_program(core, mi->path, argv, environ, 0, get_did_ptr(mi)); if (err_is_fail(err)) { DEBUG_ERR(err, "Spawning %s failed.", mi->path); } if (cleanup) { // alloc'd string is the last of our array free(argv[mi->argc]); free(argv); } return err; }
errval_t get_record(struct ast_object* ast, struct oct_query_state* sqs) { assert(ast != NULL); assert(sqs != NULL); struct skb_ec_terms sr; errval_t err = transform_record(ast, &sr); if (err_is_ok(err)) { // Calling get_object(Name, Attrs, Constraints, Y), print_object(Y). dident get_object = ec_did("get_first_object", 4); dident print_object = ec_did("print_object", 1); pword print_var = ec_newvar(); pword get_object_term = ec_term(get_object, sr.name, sr.attribute_list, sr.constraint_list, print_var); pword print_term = ec_term(print_object, print_var); ec_post_goal(get_object_term); ec_post_goal(print_term); err = run_eclipse(sqs); if (err_no(err) == SKB_ERR_GOAL_FAILURE) { err = err_push(err, OCT_ERR_NO_RECORD); } OCT_DEBUG(" get_record:\n"); debug_skb_output(sqs); } return err; }
/** * \brief callback for creating the dispatcher on the remote core * * \param arg argument for the callback * \param err outcome of the spanning request */ static void bomp_thread_init_done(void *arg, errval_t err) { assert(err_is_ok(err)); uint32_t *done = arg; *done = 1; }
errval_t del_record(struct ast_object* ast, struct oct_query_state* dqs) { // TODO sr.attributes, sr.constraints currently not used for delete // it's just based on the name // Think about how to constraints / attributes behave with del assert(ast != NULL); assert(dqs != NULL); struct skb_ec_terms sr; errval_t err = transform_record(ast, &sr); if (err_is_ok(err)) { // Calling del_object(Name) dident del_object = ec_did("del_object", 3); pword del_object_term = ec_term(del_object, sr.name, sr.attribute_list, sr.constraint_list); ec_post_goal(del_object_term); err = run_eclipse(dqs); if (err_no(err) == SKB_ERR_GOAL_FAILURE) { err = err_push(err, OCT_ERR_NO_RECORD); } OCT_DEBUG(" del_record:\n"); debug_skb_output(dqs); } return err; }
static void connect(coreid_t idx) { errval_t err; char id[100]; snprintf(id, sizeof(id), "%s%d", my_name, idx); iref_t iref; err = nameservice_blocking_lookup(id, &iref); if (err_is_fail(err)) { DEBUG_ERR(err, "nameservice_blocking_lookup failed"); abort(); } assert(iref != 0); struct rcce_state *st = malloc(sizeof(struct rcce_state)); assert(st != NULL); memset(st, 0, sizeof(struct rcce_state)); st->index = idx; st->request_done = false; /* printf("%s: rcce_bind\n", my_name); */ err = rcce_bind(iref, client_connected, st, get_default_waitset(), IDC_BIND_FLAGS_DEFAULT); assert(err_is_ok(err)); /* printf("%s: waiting\n", my_name); */ while (!st->request_done) { messages_wait_and_handle_next(); } /* printf("%s: done\n", my_name); */ }