/** * \brief Cancel an event registration made with lmp_chan_register_send() * * \param lc LMP channel */ errval_t lmp_chan_deregister_send(struct lmp_chan *lc) { assert(lc != NULL); errval_t err = waitset_chan_deregister(&lc->send_waitset); if (err_is_fail(err)) { return err; } // dequeue from list of channels with send events assert(lc->next != NULL && lc->prev != NULL); dispatcher_handle_t handle = disp_disable(); struct dispatcher_generic *dp = get_dispatcher_generic(handle); if (lc->next == lc->prev) { assert_disabled(dp->lmp_send_events_list == lc); dp->lmp_send_events_list = NULL; } else { lc->prev->next = lc->next; lc->next->prev = lc->prev; if (dp->lmp_send_events_list == lc) { dp->lmp_send_events_list = lc->next; } } #ifndef NDEBUG lc->prev = lc->next = NULL; #endif disp_enable(handle); return err; }
/** * \brief Trigger an event callback on a channel * * Marks the given channel as having a pending event, causing some future call * to get_next_event() to return the registered closure. * This function must only be called when enabled. * * \param chan Waitset's per-channel state * \param disp Current dispatcher pointer */ errval_t waitset_chan_trigger(struct waitset_chanstate *chan) { dispatcher_handle_t disp = disp_disable(); errval_t err = waitset_chan_trigger_disabled(chan, disp); disp_enable(disp); return err; }
/** * \brief Register an event handler to be notified when messages can be sent * * In the future, call the closure on the given waitset when it is likely that * a message can be sent on the channel. A channel may only be registered * with a single send event handler on a single waitset at any one time. * * \param lc LMP channel * \param ws Waitset * \param closure Event handler */ errval_t lmp_chan_register_send(struct lmp_chan *lc, struct waitset *ws, struct event_closure closure) { assert(lc != NULL); assert(ws != NULL); errval_t err = waitset_chan_register(ws, &lc->send_waitset, closure); if (err_is_fail(err)) { return err; } // enqueue in list of channels with a registered event to retry sending assert(lc->next == NULL && lc->prev == NULL); dispatcher_handle_t handle = disp_disable(); struct dispatcher_generic *dp = get_dispatcher_generic(handle); if (dp->lmp_send_events_list == NULL) { dp->lmp_send_events_list = lc; lc->next = lc->prev = lc; } else { lc->prev = dp->lmp_send_events_list->prev; lc->next = dp->lmp_send_events_list; lc->prev->next = lc; lc->next->prev = lc; } disp_enable(handle); return err; }
errval_t domain_thread_move_to(struct thread *thread, coreid_t core_id) { assert(thread == thread_self()); dispatcher_handle_t mydisp = disp_disable(); struct dispatcher_generic *disp_gen = get_dispatcher_generic(mydisp); struct dispatcher_shared_generic *disp = get_dispatcher_shared_generic(mydisp); struct thread *next = thread->next; thread_remove_from_queue(&disp_gen->runq, thread); errval_t err = domain_wakeup_on_coreid_disabled(core_id, thread, mydisp); if(err_is_fail(err)) { thread_enqueue(thread, &disp_gen->runq); disp_enable(mydisp); return err; } // run the next thread, if any if (next != thread) { disp_gen->current = next; disp_resume(mydisp, &next->regs); } else { disp_gen->current = NULL; disp->haswork = havework_disabled(mydisp); disp_yield_disabled(mydisp); } USER_PANIC("should never be reached"); }
/// Destroy the local state associated with a given channel void lmp_chan_destroy(struct lmp_chan *lc) { lc->connstate = LMP_DISCONNECTED; cap_destroy(lc->local_cap); if (lc->endpoint != NULL) { lmp_endpoint_free(lc->endpoint); } // remove from send retry queue on dispatcher if (waitset_chan_is_registered(&lc->send_waitset)) { assert(lc->prev != NULL && lc->next != NULL); dispatcher_handle_t handle = disp_disable(); struct dispatcher_generic *disp = get_dispatcher_generic(handle); if (lc->next == lc->prev) { assert_disabled(lc->next == lc); assert_disabled(disp->lmp_send_events_list == lc); disp->lmp_send_events_list = NULL; } else { lc->prev->next = lc->next; lc->next->prev = lc->prev; } disp_enable(handle); #ifndef NDEBUG lc->next = lc->prev = NULL; #endif } waitset_chanstate_destroy(&lc->send_waitset); }
/** * \brief Called on the inter-disp handler thread, when another thread * on this dispatcher wants to wakeup a thread on a foreign dispatcher. */ static void handle_wakeup_on(void *arg) { struct domain_state *domain_state = get_domain_state(); errval_t err; assert(domain_state != NULL); // Dequeue all (disable to ensure mutual exclusion -- per dispatcher) for(;;) { struct thread *thread = NULL; dispatcher_handle_t disp = disp_disable(); if(domain_state->remote_wakeup_queue != NULL) { thread = thread_dequeue(&domain_state->remote_wakeup_queue); } disp_enable(disp); // Break if queue empty if(thread == NULL) { break; } // XXX: Hack /* coreid_t core_id = disp_handle_get_core_id(thread->disp); */ coreid_t core_id = thread->coreid; assert(domain_state->b[core_id] != NULL); struct interdisp_binding *b = domain_state->b[core_id]; err = b->tx_vtbl.wakeup_thread(b, NOP_CONT, (genvaddr_t)(uintptr_t)thread); if (err_is_fail(err)) { USER_PANIC_ERR(err, "wakeup_thread"); } } }
/** * \brief Cancel a previous callback registration * * Remove the registration for a callback on the given channel. * This function must only be called when enabled. * * \param chan Waitset's per-channel state */ errval_t waitset_chan_deregister(struct waitset_chanstate *chan) { dispatcher_handle_t handle = disp_disable(); errval_t err = waitset_chan_deregister_disabled(chan); disp_enable(handle); return err; }
errval_t domain_wakeup_on(dispatcher_handle_t disp, struct thread *thread) { dispatcher_handle_t mydisp = disp_disable(); errval_t err = domain_wakeup_on_disabled(disp, thread, mydisp); disp_enable(mydisp); return err; }
/** * \brief Register a closure to be called when a channel is triggered * * In the Future, call the closure on a thread associated with the waitset * when the channel is triggered. Only one closure may be registered per * channel state at any one time. * This function must only be called when enabled. * * \param ws Waitset * \param chan Waitset's per-channel state * \param closure Event handler */ errval_t waitset_chan_register(struct waitset *ws, struct waitset_chanstate *chan, struct event_closure closure) { dispatcher_handle_t handle = disp_disable(); errval_t err = waitset_chan_register_disabled(ws, chan, closure); disp_enable(handle); return err; }
/** * \brief Trigger a specific event callback on an unregistered channel * * This function is equivalent to waitset_chan_register() * followed by waitset_chan_trigger(), but avoids unneccessary queue * manipulation. This function must only be called when enabled. * * \param ws Waitset * \param chan Waitset's per-channel state * \param closure Event handler */ errval_t waitset_chan_trigger_closure(struct waitset *ws, struct waitset_chanstate *chan, struct event_closure closure) { dispatcher_handle_t disp = disp_disable(); errval_t err = waitset_chan_trigger_closure_disabled(ws, chan, closure, disp); disp_enable(disp); return err; }
/** * \brief Mark an idle channel as polled * * The given channel will periodically have its poll function called. * The channel must already be registered. * * \param chan Waitset's per-channel state */ errval_t waitset_chan_start_polling(struct waitset_chanstate *chan) { errval_t err = SYS_ERR_OK; dispatcher_handle_t handle = disp_disable(); struct waitset *ws = chan->waitset; if (ws == NULL) { err = LIB_ERR_CHAN_NOT_REGISTERED; goto out; } assert(chan->state != CHAN_UNREGISTERED); if (chan->state != CHAN_IDLE) { goto out; // no-op if polled or pending } // remove from idle queue if (chan->next == chan) { assert(chan->prev == chan); assert(ws->idle == chan); ws->idle = NULL; } else { chan->prev->next = chan->next; chan->next->prev = chan->prev; if (ws->idle == chan) { ws->idle = chan->next; } } // enqueue on polled queue if (ws->polled == NULL) { ws->polled = chan; chan->next = chan->prev = chan; if (ws->waiting_threads != NULL && !ws->polling) { // start a blocked thread polling ws->polling = true; struct thread *t; t = thread_unblock_one_disabled(handle, &ws->waiting_threads, NULL); assert(t == NULL); // shouldn't see a remote thread: waitsets are per-dispatcher } } else { chan->next = ws->polled; chan->prev = ws->polled->prev; chan->next->prev = chan; chan->prev->next = chan; } chan->state = CHAN_POLLED; out: disp_enable(handle); return err; }
static void wakeup_thread_request(struct interdisp_binding *b, genvaddr_t taddr) { coreid_t core_id = disp_get_core_id(); struct thread *wakeup = (struct thread *)(uintptr_t)taddr; dispatcher_handle_t handle = disp_disable(); struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle); /* assert_disabled(wakeup->disp == handle); */ assert_disabled(wakeup->coreid == core_id); wakeup->disp = handle; thread_enqueue(wakeup, &disp_gen->runq); disp_enable(handle); }
/** * \brief Stop polling the given channel, making it idle again * * \param chan Waitset's per-channel state */ errval_t waitset_chan_stop_polling(struct waitset_chanstate *chan) { errval_t err = SYS_ERR_OK; dispatcher_handle_t handle = disp_disable(); struct waitset *ws = chan->waitset; if (ws == NULL) { err = LIB_ERR_CHAN_NOT_REGISTERED; goto out; } assert(chan->state != CHAN_UNREGISTERED); if (chan->state != CHAN_POLLED) { goto out; // no-op if idle or pending } // remove from polled queue if (chan->next == chan) { assert(chan->prev == chan); assert(ws->polled == chan); ws->polled = NULL; } else { chan->prev->next = chan->next; chan->next->prev = chan->prev; if (ws->polled == chan) { ws->polled = chan->next; } } // enqueue on idle queue if (ws->idle == NULL) { ws->idle = chan; chan->next = chan->prev = chan; } else { chan->next = ws->idle; chan->prev = ws->idle->prev; chan->next->prev = chan; chan->prev->next = chan; } chan->state = CHAN_IDLE; out: disp_enable(handle); return err; }
/** * \brief Register a deferred event * * \param ws Waitset * \param delay Delay in microseconds * \param closure Event closure to execute * \param event Storage for event metadata */ errval_t deferred_event_register(struct deferred_event *event, struct waitset *ws, delayus_t delay, struct event_closure closure) { errval_t err; dispatcher_handle_t dh = disp_disable(); err = waitset_chan_register_disabled(ws, &event->waitset_state, closure); if (err_is_ok(err)) { struct dispatcher_generic *dg = get_dispatcher_generic(dh); // XXX: determine absolute time for event (ignoring time since dispatch!) event->time = get_system_time() + delay; // enqueue in sorted list of pending timers for (struct deferred_event *e = dg->deferred_events, *p = NULL; ; p = e, e = e->next) { if (e == NULL || e->time > event->time) { if (p == NULL) { // insert at head assert(e == dg->deferred_events); event->prev = NULL; event->next = e; if (e != NULL) { e->prev = event; } dg->deferred_events = event; } else { event->next = e; event->prev = p; p->next = event; if (e != NULL) { e->prev = event; } } break; } } } update_wakeup_disabled(dh); disp_enable(dh); return err; }
/** * \brief Cancel a deferred event that has not yet fired */ errval_t deferred_event_cancel(struct deferred_event *event) { dispatcher_handle_t dh = disp_disable(); errval_t err = waitset_chan_deregister_disabled(&event->waitset_state); if (err_is_ok(err)) { // remove from dispatcher queue struct dispatcher_generic *disp = get_dispatcher_generic(dh); if (event->prev == NULL) { assert(disp->deferred_events == event); disp->deferred_events = event->next; } else { event->prev->next = event->next; } if (event->next != NULL) { event->next->prev = event->prev; } update_wakeup_disabled(dh); } disp_enable(dh); return err; }
/** * \brief Return next event on given waitset, if one is already pending * * This is essentially a non-blocking variant of get_next_event(). It should be * used with great care, to avoid the creation of busy-waiting loops. * * \param ws Waitset * \param retclosure Pointer to storage space for returned event closure * * \returns LIB_ERR_NO_EVENT if nothing is pending */ errval_t check_for_event(struct waitset *ws, struct event_closure *retclosure) { struct waitset_chanstate *chan; int pollcount = 0; assert(ws != NULL); assert(retclosure != NULL); recheck: ; // are there any pending events on the waitset? dispatcher_handle_t handle = disp_disable(); chan = get_pending_event_disabled(ws); disp_enable(handle); if (chan != NULL) { *retclosure = chan->closure; return SYS_ERR_OK; } // if there are no pending events, poll all channels once if (ws->polled != NULL && pollcount++ == 0) { for (chan = ws->polled; chan != NULL && chan->waitset == ws && chan->state == CHAN_POLLED; chan = chan->next) { poll_channel(chan); if (ws->pending != NULL) { goto recheck; } if (chan->next == ws->polled) { // reached the start of the queue break; } } } return LIB_ERR_NO_EVENT; }
static errval_t get_next_event_debug(struct waitset *ws, struct event_closure *retclosure, bool debug) { struct waitset_chanstate *chan; bool was_polling = false; cycles_t pollcycles; assert(ws != NULL); assert(retclosure != NULL); // unconditionally disable ourselves and check for events // if we decide we have to start polling, we'll jump back up here goto check_for_events; /* ------------ POLLING LOOP; RUNS WHILE ENABLED ------------ */ polling_loop: was_polling = true; assert(ws->polling); // this thread is polling // get the amount of cycles we want to poll for pollcycles = pollcycles_reset(); // while there are no pending events, poll channels while (ws->polled != NULL && ws->pending == NULL) { struct waitset_chanstate *nextchan = NULL; // NB: Polling policy is to return as soon as a pending event // appears, not bother looking at the rest of the polling queue for (chan = ws->polled; chan != NULL && chan->waitset == ws && chan->state == CHAN_POLLED && ws->pending == NULL; chan = nextchan) { nextchan = chan->next; poll_channel(chan); // update pollcycles pollcycles = pollcycles_update(pollcycles); // yield the thread if we exceed the cycle count limit if (ws->pending == NULL && pollcycles_expired(pollcycles)) { if (debug) { if (strcmp(disp_name(), "netd") != 0) { // Print the callback trace so that we know which call is leading // the schedule removal and printf("%s: callstack: %p %p %p %p\n", disp_name(), __builtin_return_address(0), __builtin_return_address(1), __builtin_return_address(2), __builtin_return_address(3)); } } thread_yield(); pollcycles = pollcycles_reset(); } } // ensure that we restart polling from the place we left off here, // if the next channel is a valid one if (nextchan != NULL && nextchan->waitset == ws && nextchan->state == CHAN_POLLED) { ws->polled = nextchan; } } /* ------------ STATE MACHINERY; RUNS WHILE DISABLED ------------ */ check_for_events: ; dispatcher_handle_t handle = disp_disable(); // are there any pending events on the waitset? chan = get_pending_event_disabled(ws); if (chan != NULL) { // if we need to poll, and we have a blocked thread, wake it up to do so if (was_polling && ws->polled != NULL && ws->waiting_threads != NULL) { // start a blocked thread polling struct thread *t; t = thread_unblock_one_disabled(handle, &ws->waiting_threads, NULL); assert_disabled(t == NULL); // shouldn't see a remote thread } else if (was_polling) { // I'm stopping polling, and there is nobody else assert_disabled(ws->polling); ws->polling = false; } disp_enable(handle); *retclosure = chan->closure; return SYS_ERR_OK; } // If we got here and there are channels to poll but no-one is polling, // then either we never polled, or we lost a race on the channel we picked. // Either way, we'd better start polling again. if (ws->polled != NULL && (was_polling || !ws->polling)) { if (!was_polling) { ws->polling = true; } disp_enable(handle); goto polling_loop; } // otherwise block awaiting an event chan = thread_block_disabled(handle, &ws->waiting_threads); if (chan == NULL) { // not a real event, just a wakeup to get us to start polling! assert(ws->polling); goto polling_loop; } else { *retclosure = chan->closure; return SYS_ERR_OK; } }