/// Destroy the local state associated with a given channel void lmp_chan_destroy(struct lmp_chan *lc) { lc->connstate = LMP_DISCONNECTED; cap_destroy(lc->local_cap); if (lc->endpoint != NULL) { lmp_endpoint_free(lc->endpoint); } // remove from send retry queue on dispatcher if (waitset_chan_is_registered(&lc->send_waitset)) { assert(lc->prev != NULL && lc->next != NULL); dispatcher_handle_t handle = disp_disable(); struct dispatcher_generic *disp = get_dispatcher_generic(handle); if (lc->next == lc->prev) { assert_disabled(lc->next == lc); assert_disabled(disp->lmp_send_events_list == lc); disp->lmp_send_events_list = NULL; } else { lc->prev->next = lc->next; lc->next->prev = lc->prev; } disp_enable(handle); #ifndef NDEBUG lc->next = lc->prev = NULL; #endif } waitset_chanstate_destroy(&lc->send_waitset); }
/** * \brief Returns a pointer to the chips_context state on the dispatcher priv */ struct spawn_rpc_client *get_spawn_rpc_client(coreid_t core) { dispatcher_handle_t handle = curdispatcher(); struct dispatcher_generic* disp = get_dispatcher_generic(handle); assert(core < MAX_CPUS); return disp->core_state.c.spawn_rpc_clients[core]; }
errval_t domain_thread_move_to(struct thread *thread, coreid_t core_id) { assert(thread == thread_self()); dispatcher_handle_t mydisp = disp_disable(); struct dispatcher_generic *disp_gen = get_dispatcher_generic(mydisp); struct dispatcher_shared_generic *disp = get_dispatcher_shared_generic(mydisp); struct thread *next = thread->next; thread_remove_from_queue(&disp_gen->runq, thread); errval_t err = domain_wakeup_on_coreid_disabled(core_id, thread, mydisp); if(err_is_fail(err)) { thread_enqueue(thread, &disp_gen->runq); disp_enable(mydisp); return err; } // run the next thread, if any if (next != thread) { disp_gen->current = next; disp_resume(mydisp, &next->regs); } else { disp_gen->current = NULL; disp->haswork = havework_disabled(mydisp); disp_yield_disabled(mydisp); } USER_PANIC("should never be reached"); }
/** * \brief Register an event handler to be notified when messages can be sent * * In the future, call the closure on the given waitset when it is likely that * a message can be sent on the channel. A channel may only be registered * with a single send event handler on a single waitset at any one time. * * \param lc LMP channel * \param ws Waitset * \param closure Event handler */ errval_t lmp_chan_register_send(struct lmp_chan *lc, struct waitset *ws, struct event_closure closure) { assert(lc != NULL); assert(ws != NULL); errval_t err = waitset_chan_register(ws, &lc->send_waitset, closure); if (err_is_fail(err)) { return err; } // enqueue in list of channels with a registered event to retry sending assert(lc->next == NULL && lc->prev == NULL); dispatcher_handle_t handle = disp_disable(); struct dispatcher_generic *dp = get_dispatcher_generic(handle); if (dp->lmp_send_events_list == NULL) { dp->lmp_send_events_list = lc; lc->next = lc->prev = lc; } else { lc->prev = dp->lmp_send_events_list->prev; lc->next = dp->lmp_send_events_list; lc->prev->next = lc; lc->next->prev = lc; } disp_enable(handle); return err; }
/** * \brief Cancel an event registration made with lmp_chan_register_send() * * \param lc LMP channel */ errval_t lmp_chan_deregister_send(struct lmp_chan *lc) { assert(lc != NULL); errval_t err = waitset_chan_deregister(&lc->send_waitset); if (err_is_fail(err)) { return err; } // dequeue from list of channels with send events assert(lc->next != NULL && lc->prev != NULL); dispatcher_handle_t handle = disp_disable(); struct dispatcher_generic *dp = get_dispatcher_generic(handle); if (lc->next == lc->prev) { assert_disabled(dp->lmp_send_events_list == lc); dp->lmp_send_events_list = NULL; } else { lc->prev->next = lc->next; lc->next->prev = lc->prev; if (dp->lmp_send_events_list == lc) { dp->lmp_send_events_list = lc->next; } } #ifndef NDEBUG lc->prev = lc->next = NULL; #endif disp_enable(handle); return err; }
/** * \brief set the chips_context state on the dispatcher priv */ void set_arrakis_rpc_client(coreid_t core, struct arrakis_rpc_client *c) { dispatcher_handle_t handle = curdispatcher(); struct dispatcher_generic* disp = get_dispatcher_generic(handle); assert(core < MAX_CPUS); disp->core_state.c.arrakis_rpc_clients[core] = c; }
static void update_wakeup_disabled(dispatcher_handle_t dh) { struct dispatcher_generic *dg = get_dispatcher_generic(dh); struct dispatcher_shared_generic *ds = get_dispatcher_shared_generic(dh); if (dg->deferred_events == NULL) { ds->wakeup = 0; } else { ds->wakeup = dg->deferred_events->time / SYSTIME_MULTIPLIER; } }
/** * \brief returns the address and the size of the EH frame header * * \param eh_frame returned virtual address of the EH frame * \param eh_frame_size returned size of the EH frame */ void disp_get_eh_frame_hdr(lvaddr_t *eh_frame_hdr, size_t *eh_frame_hdr_size) { dispatcher_handle_t handle = curdispatcher(); struct dispatcher_generic* disp = get_dispatcher_generic(handle); if (eh_frame_hdr) { *eh_frame_hdr = disp->eh_frame_hdr; } if (eh_frame_hdr_size) { *eh_frame_hdr_size = disp->eh_frame_hdr_size; } }
static void wakeup_thread_request(struct interdisp_binding *b, genvaddr_t taddr) { coreid_t core_id = disp_get_core_id(); struct thread *wakeup = (struct thread *)(uintptr_t)taddr; dispatcher_handle_t handle = disp_disable(); struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle); /* assert_disabled(wakeup->disp == handle); */ assert_disabled(wakeup->coreid == core_id); wakeup->disp = handle; thread_enqueue(wakeup, &disp_gen->runq); disp_enable(handle); }
/** * \brief Register a deferred event * * \param ws Waitset * \param delay Delay in microseconds * \param closure Event closure to execute * \param event Storage for event metadata */ errval_t deferred_event_register(struct deferred_event *event, struct waitset *ws, delayus_t delay, struct event_closure closure) { errval_t err; dispatcher_handle_t dh = disp_disable(); err = waitset_chan_register_disabled(ws, &event->waitset_state, closure); if (err_is_ok(err)) { struct dispatcher_generic *dg = get_dispatcher_generic(dh); // XXX: determine absolute time for event (ignoring time since dispatch!) event->time = get_system_time() + delay; // enqueue in sorted list of pending timers for (struct deferred_event *e = dg->deferred_events, *p = NULL; ; p = e, e = e->next) { if (e == NULL || e->time > event->time) { if (p == NULL) { // insert at head assert(e == dg->deferred_events); event->prev = NULL; event->next = e; if (e != NULL) { e->prev = event; } dg->deferred_events = event; } else { event->next = e; event->prev = p; p->next = event; if (e != NULL) { e->prev = event; } } break; } } } update_wakeup_disabled(dh); disp_enable(dh); return err; }
/// Trigger any pending deferred events, while disabled void trigger_deferred_events_disabled(dispatcher_handle_t dh, systime_t now) { struct dispatcher_generic *dg = get_dispatcher_generic(dh); struct deferred_event *e; errval_t err; now *= SYSTIME_MULTIPLIER; for (e = dg->deferred_events; e != NULL && e->time <= now; e = e->next) { err = waitset_chan_trigger_disabled(&e->waitset_state, dh); assert(err_is_ok(err)); } dg->deferred_events = e; if (e != NULL) { e->prev = NULL; } update_wakeup_disabled(dh); }
/** * \brief Trigger send events for all LMP channels that are registered * * We don't have a good way to determine when we are likely to be able * to send on an LMP channel, so this function just trigger all such * pending events every time the dispatcher is rescheduled. * * Must be called while disabled and from dispatcher logic. */ void lmp_channels_retry_send_disabled(dispatcher_handle_t handle) { struct dispatcher_generic *dp = get_dispatcher_generic(handle); struct lmp_chan *lc, *first = dp->lmp_send_events_list, *next; errval_t err; for (lc = first; lc != NULL; lc = next) { next = lc->next; assert(next != NULL); err = waitset_chan_trigger_disabled(&lc->send_waitset, handle); assert_disabled(err_is_ok(err)); // shouldn't fail #ifndef NDEBUG lc->next = lc->prev = NULL; #endif if (next == first) { break; // wrapped } } dp->lmp_send_events_list = NULL; }
/** * \brief Cancel a deferred event that has not yet fired */ errval_t deferred_event_cancel(struct deferred_event *event) { dispatcher_handle_t dh = disp_disable(); errval_t err = waitset_chan_deregister_disabled(&event->waitset_state); if (err_is_ok(err)) { // remove from dispatcher queue struct dispatcher_generic *disp = get_dispatcher_generic(dh); if (event->prev == NULL) { assert(disp->deferred_events == event); disp->deferred_events = event->next; } else { event->prev->next = event->next; } if (event->next != NULL) { event->next->prev = event->prev; } update_wakeup_disabled(dh); } disp_enable(dh); return err; }
/** * \brief set the mem client on the dispatcher priv */ void set_mem_client(struct mem_rpc_client *st) { dispatcher_handle_t handle = curdispatcher(); struct dispatcher_generic* disp = get_dispatcher_generic(handle); disp->core_state.c.mem_st = st; }
/** * \brief Returns a pointer to the current vspace on the dispatcher priv */ struct vspace *get_current_vspace(void) { dispatcher_handle_t handle = curdispatcher(); struct dispatcher_generic* disp = get_dispatcher_generic(handle); return &disp->core_state.vspace_state.vspace; }
static errval_t spawn(char *path, char *const argv[], char *argbuf, size_t argbytes, char *const envp[], struct capref inheritcn_cap, struct capref argcn_cap, domainid_t *domainid) { errval_t err, msgerr; /* read file into memory */ vfs_handle_t fh; err = vfs_open(path, &fh); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_LOAD); } struct vfs_fileinfo info; err = vfs_stat(fh, &info); if (err_is_fail(err)) { vfs_close(fh); return err_push(err, SPAWN_ERR_LOAD); } assert(info.type == VFS_FILE); uint8_t *image = malloc(info.size); if (image == NULL) { vfs_close(fh); return err_push(err, SPAWN_ERR_LOAD); } size_t pos = 0, readlen; do { err = vfs_read(fh, &image[pos], info.size - pos, &readlen); if (err_is_fail(err)) { vfs_close(fh); free(image); return err_push(err, SPAWN_ERR_LOAD); } else if (readlen == 0) { vfs_close(fh); free(image); return SPAWN_ERR_LOAD; // XXX } else { pos += readlen; } } while (err_is_ok(err) && readlen > 0 && pos < info.size); err = vfs_close(fh); if (err_is_fail(err)) { DEBUG_ERR(err, "failed to close file %s", path); } // find short name (last part of path) char *name = strrchr(path, VFS_PATH_SEP); if (name == NULL) { name = path; } else { name++; } /* spawn the image */ struct spawninfo si; err = spawn_load_image(&si, (lvaddr_t)image, info.size, CURRENT_CPU_TYPE, name, my_core_id, argv, envp, inheritcn_cap, argcn_cap); if (err_is_fail(err)) { free(image); return err; } free(image); /* request connection from monitor */ struct monitor_blocking_rpc_client *mrpc = get_monitor_blocking_rpc_client(); struct capref monep; err = mrpc->vtbl.alloc_monitor_ep(mrpc, &msgerr, &monep); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MONITOR_CLIENT); } else if (err_is_fail(msgerr)) { return msgerr; } /* copy connection into the new domain */ struct capref destep = { .cnode = si.rootcn, .slot = ROOTCN_SLOT_MONITOREP, }; err = cap_copy(destep, monep); if (err_is_fail(err)) { spawn_free(&si); cap_destroy(monep); return err_push(err, SPAWN_ERR_MONITOR_CLIENT); } err = cap_destroy(monep); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MONITOR_CLIENT); } debug_printf("spawning %s on core %u\n", path, my_core_id); /* give the perfmon capability */ struct capref dest, src; dest.cnode = si.taskcn; dest.slot = TASKCN_SLOT_PERF_MON; src.cnode = cnode_task; src.slot = TASKCN_SLOT_PERF_MON; err = cap_copy(dest, src); if (err_is_fail(err)) { return err_push(err, INIT_ERR_COPY_PERF_MON); } /* run the domain */ err = spawn_run(&si); if (err_is_fail(err)) { spawn_free(&si); return err_push(err, SPAWN_ERR_RUN); } // Allocate domain id struct ps_entry *pe = malloc(sizeof(struct ps_entry)); assert(pe != NULL); memset(pe, 0, sizeof(struct ps_entry)); memcpy(pe->argv, argv, MAX_CMDLINE_ARGS*sizeof(*argv)); pe->argbuf = argbuf; pe->argbytes = argbytes; /* * NB: It's important to keep a copy of the DCB *and* the root * CNode around. We need to revoke both (in the right order, see * kill_domain() below), so that we ensure no one else is * referring to the domain's CSpace anymore. Especially the loop * created by placing rootcn into its own address space becomes a * problem here. */ err = slot_alloc(&pe->rootcn_cap); assert(err_is_ok(err)); err = cap_copy(pe->rootcn_cap, si.rootcn_cap); pe->rootcn = si.rootcn; assert(err_is_ok(err)); err = slot_alloc(&pe->dcb); assert(err_is_ok(err)); err = cap_copy(pe->dcb, si.dcb); assert(err_is_ok(err)); pe->status = PS_STATUS_RUNNING; err = ps_allocate(pe, domainid); if(err_is_fail(err)) { free(pe); } // Store in target dispatcher frame struct dispatcher_generic *dg = get_dispatcher_generic(si.handle); dg->domain_id = *domainid; /* cleanup */ err = spawn_free(&si); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_FREE); } return SYS_ERR_OK; } static void retry_use_local_memserv_response(void *a) { errval_t err; struct spawn_binding *b = (struct spawn_binding*)a; err = b->tx_vtbl.use_local_memserv_response(b, NOP_CONT); if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { // try again err = b->register_send(b, get_default_waitset(), MKCONT(retry_use_local_memserv_response,a)); } if (err_is_fail(err)) { DEBUG_ERR(err, "error sending use_local_memserv reply\n"); } }
/** * \brief Setup the dispatcher frame */ static errval_t spawn_setup_dispatcher(struct spawninfo *si, coreid_t core_id, const char *name, genvaddr_t entry, void* arch_info) { errval_t err; /* Create dispatcher frame (in taskcn) */ si->dispframe.cnode = si->taskcn; si->dispframe.slot = TASKCN_SLOT_DISPFRAME; struct capref spawn_dispframe = { .cnode = si->taskcn, .slot = TASKCN_SLOT_DISPFRAME2, }; err = frame_create(si->dispframe, (1 << DISPATCHER_FRAME_BITS), NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_DISPATCHER_FRAME); } err = cap_copy(spawn_dispframe, si->dispframe); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_DISPATCHER_FRAME); } /* Map in dispatcher frame */ dispatcher_handle_t handle; err = vspace_map_one_frame((void**)&handle, 1ul << DISPATCHER_FRAME_BITS, si->dispframe, NULL, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MAP_DISPATCHER_TO_SELF); } genvaddr_t spawn_dispatcher_base; err = spawn_vspace_map_one_frame(si, &spawn_dispatcher_base, spawn_dispframe, 1UL << DISPATCHER_FRAME_BITS); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MAP_DISPATCHER_TO_NEW); } /* Set initial state */ // XXX: Confusion address translation about l/gen/addr in entry struct dispatcher_shared_generic *disp = get_dispatcher_shared_generic(handle); struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle); arch_registers_state_t *enabled_area = dispatcher_get_enabled_save_area(handle); arch_registers_state_t *disabled_area = dispatcher_get_disabled_save_area(handle); /* Place core_id */ disp_gen->core_id = core_id; /* place eh information */ disp_gen->eh_frame = si->eh_frame; disp_gen->eh_frame_size = si->eh_frame_size; disp_gen->eh_frame_hdr = si->eh_frame_hdr; disp_gen->eh_frame_hdr_size = si->eh_frame_hdr_size; /* Setup dispatcher and make it runnable */ disp->udisp = spawn_dispatcher_base; disp->disabled = 1; disp->fpu_trap = 1; #ifdef __k1om__ disp->xeon_phi_id = disp_xeon_phi_id(); #endif // Copy the name for debugging const char *copy_name = strrchr(name, '/'); if (copy_name == NULL) { copy_name = name; } else { copy_name++; } strncpy(disp->name, copy_name, DISP_NAME_LEN); spawn_arch_set_registers(arch_info, handle, enabled_area, disabled_area); registers_set_entry(disabled_area, entry); si->handle = handle; return SYS_ERR_OK; } errval_t spawn_map_bootinfo(struct spawninfo *si, genvaddr_t *retvaddr) { errval_t err; struct capref src = { .cnode = cnode_task, .slot = TASKCN_SLOT_BOOTINFO }; struct capref dest = { .cnode = si->taskcn, .slot = TASKCN_SLOT_BOOTINFO }; err = cap_copy(dest, src); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CAP_COPY); } err = spawn_vspace_map_one_frame(si, retvaddr, dest, BOOTINFO_SIZE); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MAP_BOOTINFO); } return SYS_ERR_OK; } /** * \brief Retrive the commandline args of #name * * The arguments are malloced into a new space so need to be freed after use */ errval_t spawn_get_cmdline_args(struct mem_region *module, char **retargs) { assert(module != NULL && retargs != NULL); /* Get the cmdline args */ const char *args = getopt_module(module); /* Allocate space */ *retargs = malloc(sizeof(char) * strlen(args)); if (!retargs) { return LIB_ERR_MALLOC_FAIL; } /* Copy args */ strcpy(*retargs, args); return SYS_ERR_OK; }
/** * \brief Returns a pointer to the current pinned state on the dispatcher priv */ struct pinned_state *get_current_pinned_state(void) { dispatcher_handle_t handle = curdispatcher(); struct dispatcher_generic* disp = get_dispatcher_generic(handle); return &disp->core_state.pinned_state; }
/** * \brief Returns a pointer to the current pmap on the dispatcher priv */ struct pmap *get_current_pmap(void) { dispatcher_handle_t handle = curdispatcher(); struct dispatcher_generic* disp = get_dispatcher_generic(handle); return (struct pmap*)&disp->core_state.vspace_state.pmap; }
/** * \brief set the terminal state on the dispatcher priv */ void set_terminal_state(struct terminal_state *st) { dispatcher_handle_t handle = curdispatcher(); struct dispatcher_generic* disp = get_dispatcher_generic(handle); disp->core_state.c.terminal_state = st; }
/** * \brief Returns a pointer to the octopus rpc client on the dispatcher priv */ struct octopus_rpc_client *get_octopus_rpc_client(void) { dispatcher_handle_t handle = curdispatcher(); struct dispatcher_generic* disp = get_dispatcher_generic(handle); return disp->core_state.c.octopus_rpc_client; }
/** * \brief Sets the octopus rpc client on the dispatcher priv */ void set_octopus_rpc_client(struct octopus_rpc_client *c) { dispatcher_handle_t handle = curdispatcher(); struct dispatcher_generic* disp = get_dispatcher_generic(handle); disp->core_state.c.octopus_rpc_client = c; }
/** * \brief Since we cannot dynamically grow our stack yet, we need a * verion that will create threads on remote core with variable stack size * * \bug this is a hack */ static errval_t domain_new_dispatcher_varstack(coreid_t core_id, domain_spanned_callback_t callback, void *callback_arg, size_t stack_size) { assert(core_id != disp_get_core_id()); errval_t err; struct domain_state *domain_state = get_domain_state(); struct monitor_binding *mb = get_monitor_binding(); assert(domain_state != NULL); /* Set reply handler */ mb->rx_vtbl.span_domain_reply = span_domain_reply; while(domain_state->iref == 0) { /* If not initialized, wait */ messages_wait_and_handle_next(); } /* Create the remote_core_state passed to the new dispatcher */ struct remote_core_state *remote_core_state = calloc(1, sizeof(struct remote_core_state)); if (!remote_core_state) { return LIB_ERR_MALLOC_FAIL; } remote_core_state->core_id = disp_get_core_id(); remote_core_state->iref = domain_state->iref; /* get the alignment of the morecore state */ struct morecore_state *state = get_morecore_state(); remote_core_state->pagesize = state->mmu_state.alignment; /* Create the thread for the new dispatcher to init on */ struct thread *newthread = thread_create_unrunnable(remote_core_init_enabled, (void*)remote_core_state, stack_size); if (newthread == NULL) { return LIB_ERR_THREAD_CREATE; } /* Save the state for later steps of the spanning state machine */ struct span_domain_state *span_domain_state = malloc(sizeof(struct span_domain_state)); if (!span_domain_state) { return LIB_ERR_MALLOC_FAIL; } span_domain_state->thread = newthread; span_domain_state->core_id = core_id; span_domain_state->callback = callback; span_domain_state->callback_arg = callback_arg; /* Give remote_core_state pointer to span_domain_state */ remote_core_state->span_domain_state = span_domain_state; /* Start spanning domain state machine by sending vroot to the monitor */ struct capref vroot = { .cnode = cnode_page, .slot = 0 }; /* Create new dispatcher frame */ struct capref frame; size_t dispsize = ((size_t)1) << DISPATCHER_FRAME_BITS; err = frame_alloc(&frame, dispsize, &dispsize); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_ALLOC); } lvaddr_t dispaddr; err = vspace_map_one_frame((void **)&dispaddr, dispsize, frame, NULL, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_MAP); } dispatcher_handle_t handle = dispaddr; struct dispatcher_shared_generic *disp = get_dispatcher_shared_generic(handle); struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle); arch_registers_state_t *disabled_area = dispatcher_get_disabled_save_area(handle); /* Set dispatcher on the newthread */ span_domain_state->thread->disp = handle; span_domain_state->frame = frame; span_domain_state->vroot = vroot; /* Setup dispatcher */ disp->udisp = (lvaddr_t)handle; disp->disabled = true; disp->fpu_trap = 1; disp_gen->core_id = span_domain_state->core_id; // Setup the dispatcher to run remote_core_init_disabled // and pass the created thread as an argument registers_set_initial(disabled_area, span_domain_state->thread, (lvaddr_t)remote_core_init_disabled, (lvaddr_t)&disp_gen->stack[DISPATCHER_STACK_WORDS], (uintptr_t)span_domain_state->thread, 0, 0, 0); // Give dispatcher a unique name for debugging snprintf(disp->name, DISP_NAME_LEN, "%s%d", disp_name(), span_domain_state->core_id); #ifdef __x86_64__ // XXX: share LDT state between all dispatchers // this needs to happen before the remote core starts, otherwise the segment // selectors in the new thread state are invalid struct dispatcher_shared_x86_64 *disp_x64 = get_dispatcher_shared_x86_64(handle); struct dispatcher_shared_x86_64 *mydisp_x64 = get_dispatcher_shared_x86_64(curdispatcher()); disp_x64->ldt_base = mydisp_x64->ldt_base; disp_x64->ldt_npages = mydisp_x64->ldt_npages; #endif threads_prepare_to_span(handle); // Setup new local thread for inter-dispatcher messages, if not already done static struct thread *interdisp_thread = NULL; if(interdisp_thread == NULL) { interdisp_thread = thread_create(interdisp_msg_handler, &domain_state->interdisp_ws); err = thread_detach(interdisp_thread); assert(err_is_ok(err)); } #if 0 // XXX: Tell currently active interdisp-threads to handle default waitset for(int i = 0; i < MAX_CPUS; i++) { struct interdisp_binding *b = domain_state->b[i]; if(disp_get_core_id() != i && b != NULL) { err = b->tx_vtbl.span_slave(b, NOP_CONT); assert(err_is_ok(err)); } } #endif #if 0 /* XXX: create a thread that will handle the default waitset */ if (domain_state->default_waitset_handler == NULL) { domain_state->default_waitset_handler = thread_create(span_slave_thread, NULL); assert(domain_state->default_waitset_handler != NULL); } #endif /* Wait to use the monitor binding */ struct monitor_binding *mcb = get_monitor_binding(); event_mutex_enqueue_lock(&mcb->mutex, &span_domain_state->event_qnode, (struct event_closure) { .handler = span_domain_request_sender_wrapper, .arg = span_domain_state }); #if 1 while(!span_domain_state->initialized) { event_dispatch(get_default_waitset()); } /* Free state */ free(span_domain_state); #endif return SYS_ERR_OK; }
/** * \brief Returns a pointer to the spawn state on the dispatcher priv */ struct slot_alloc_state *get_slot_alloc_state(void) { dispatcher_handle_t handle = curdispatcher(); struct dispatcher_generic* disp = get_dispatcher_generic(handle); return &disp->core_state.c.slot_alloc_state; }
/** * \brief set the blocking rpc monitor client binding on the dispatcher priv */ void set_monitor_blocking_rpc_client(struct monitor_blocking_rpc_client *st) { dispatcher_handle_t handle = curdispatcher(); struct dispatcher_generic* disp = get_dispatcher_generic(handle); disp->core_state.c.monitor_blocking_rpc_client = st; }
/** * \brief Returns a pointer to the spawn state on the dispatcher priv */ struct spawn_state *get_spawn_state(void) { dispatcher_handle_t handle = curdispatcher(); struct dispatcher_generic* disp = get_dispatcher_generic(handle); return disp->core_state.c.spawn_state; }
/** * \brief Returns the blocking rpc monitor client binding on the * dispatcher priv */ struct monitor_blocking_rpc_client *get_monitor_blocking_rpc_client(void) { dispatcher_handle_t handle = curdispatcher(); struct dispatcher_generic* disp = get_dispatcher_generic(handle); return disp->core_state.c.monitor_blocking_rpc_client; }
/** * \brief Returns a pointer to the morecore state on the dispatcher priv */ struct morecore_state *get_morecore_state(void) { dispatcher_handle_t handle = curdispatcher(); struct dispatcher_generic* disp = get_dispatcher_generic(handle); return &disp->core_state.c.morecore_state; }
/** * \brief set the spawn state on the dispatcher priv */ void set_spawn_state(struct spawn_state *st) { dispatcher_handle_t handle = curdispatcher(); struct dispatcher_generic* disp = get_dispatcher_generic(handle); disp->core_state.c.spawn_state = st; }
/** * \brief Returns a pointer to the terminal state on the dispatcher priv */ struct terminal_state *get_terminal_state(void) { dispatcher_handle_t handle = curdispatcher(); struct dispatcher_generic* disp = get_dispatcher_generic(handle); return disp->core_state.c.terminal_state; }