/** * \brief Wakeup a thread on a foreign dispatcher while disabled. * * \param core_id Core ID to wakeup on * \param thread Pointer to thread to wakeup * \param mydisp Dispatcher this function is running on * * \return SYS_ERR_OK on success. */ static errval_t domain_wakeup_on_coreid_disabled(coreid_t core_id, struct thread *thread, dispatcher_handle_t mydisp) { struct domain_state *ds = get_domain_state(); // XXX: Ugly hack to allow waking up on a core id we don't have a // dispatcher handler for thread->coreid = core_id; // Catch this early assert_disabled(ds != NULL); if (ds->b[core_id] == NULL) { return LIB_ERR_NO_SPANNED_DISP; } thread_enqueue(thread, &ds->remote_wakeup_queue); // Signal the inter-disp waitset of this event struct event_closure closure = { .handler = handle_wakeup_on }; errval_t err = waitset_chan_trigger_closure_disabled(&ds->interdisp_ws, &ds->remote_wakeup_event, closure, mydisp); assert_disabled(err_is_ok(err) || err_no(err) == LIB_ERR_CHAN_ALREADY_REGISTERED); return SYS_ERR_OK; }
/** * \brief Called on the inter-disp handler thread, when another thread * on this dispatcher wants to wakeup a thread on a foreign dispatcher. */ static void handle_wakeup_on(void *arg) { struct domain_state *domain_state = get_domain_state(); errval_t err; assert(domain_state != NULL); // Dequeue all (disable to ensure mutual exclusion -- per dispatcher) for(;;) { struct thread *thread = NULL; dispatcher_handle_t disp = disp_disable(); if(domain_state->remote_wakeup_queue != NULL) { thread = thread_dequeue(&domain_state->remote_wakeup_queue); } disp_enable(disp); // Break if queue empty if(thread == NULL) { break; } // XXX: Hack /* coreid_t core_id = disp_handle_get_core_id(thread->disp); */ coreid_t core_id = thread->coreid; assert(domain_state->b[core_id] != NULL); struct interdisp_binding *b = domain_state->b[core_id]; err = b->tx_vtbl.wakeup_thread(b, NOP_CONT, (genvaddr_t)(uintptr_t)thread); if (err_is_fail(err)) { USER_PANIC_ERR(err, "wakeup_thread"); } } }
errval_t domain_send_cap(coreid_t core_id, struct capref cap) { errval_t err; struct domain_state *domain_state = get_domain_state(); if (!domain_state->b[core_id]) { return LIB_ERR_NO_SPANNED_DISP; } send_cap_err = SYS_ERR_OK; cap_received = false; struct interdisp_binding *b = domain_state->b[core_id]; err = b->tx_vtbl.send_cap_request(b, NOP_CONT, cap, (uintptr_t)&cap); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SEND_CAP_REQUEST); } assert(!"NYI"); // TODO: Handled on different thread /* while(!cap_received) { */ /* messages_wait_and_handle_next(); */ /* } */ return send_cap_err; }
static void dispatcher_initialized_handler(void *arg) { struct span_domain_state *span_domain_state = arg; #if 0 struct domain_state *domain_state = get_domain_state(); // XXX: Tell currently active interdisp-threads to handle default waitset for(int i = 0; i < MAX_CPUS; i++) { struct interdisp_binding *b = domain_state->b[i]; if(disp_get_core_id() != i && span_domain_state->core_id != i && b != NULL) { errval_t err = b->tx_vtbl.span_slave_done(b, NOP_CONT); assert(err_is_ok(err)); } } #endif /* Upcall into the domain_new_dispatcher callback if registered */ if (span_domain_state->callback) { span_domain_state->callback(span_domain_state->callback_arg, SYS_ERR_OK); } free(span_domain_state); }
static void span_eager_connect_request(struct interdisp_binding *b, coreid_t core_id) { struct domain_state *domain_state = get_domain_state(); /* Store the sending core's connection */ domain_state->b[core_id] = b; }
errval_t domain_thread_create_on_varstack(coreid_t core_id, thread_func_t start_func, void *arg, size_t stacksize, struct thread **newthread) { if (disp_get_core_id() == core_id) { struct thread *th = NULL; if (stacksize == 0) { th = thread_create(start_func, arg); } else { th = thread_create_varstack(start_func, arg, stacksize); } if (th != NULL) { if (newthread) { *newthread = th; } return SYS_ERR_OK; } else { return LIB_ERR_THREAD_CREATE; } } else { struct domain_state *domain_state = get_domain_state(); errval_t err; if (domain_state->b[core_id] == NULL) { return LIB_ERR_NO_SPANNED_DISP; } struct interdisp_binding *b = domain_state->b[core_id]; struct create_thread_req *req = malloc(sizeof(*req)); req->reply_received = false; // use special waitset to make sure loop exits properly. struct waitset ws, *old_ws = b->waitset; waitset_init(&ws); b->change_waitset(b, &ws); err = b->tx_vtbl.create_thread_request(b, NOP_CONT, (genvaddr_t)(uintptr_t)start_func, (genvaddr_t)(uintptr_t)arg, stacksize, (genvaddr_t)(lvaddr_t)req); if (err_is_fail(err)) { return err; } while (!req->reply_received) { event_dispatch(&ws); } if (newthread) { *newthread = req->thread; } free(req); b->change_waitset(b, old_ws); return SYS_ERR_OK; } }
/** * \brief Runs enabled on the remote core to initialize the dispatcher */ static int remote_core_init_enabled(void *arg) { errval_t err; struct remote_core_state *remote_core_state = (struct remote_core_state*)arg; /* construct a temporary spawn param to supply the morecore alignment */ struct spawn_domain_params params; memset(¶ms, 0, sizeof(params)); params.pagesize = remote_core_state->pagesize; /* Initialize the barrelfish library */ err = barrelfish_init_onthread(¶ms); if (err_is_fail(err)) { DEBUG_ERR(err, "barrelfish_init_onthread failed"); abort(); return -1; } // Connect to all dispatchers eagerly remote_core_state->cnt = 0; while(allirefs[remote_core_state->cnt] == NULL_IREF && remote_core_state->cnt < MAX_CPUS) { remote_core_state->cnt++; if(remote_core_state->cnt == disp_get_core_id()) { remote_core_state->cnt++; } } // Don't move before barrelfish_init_onthread() struct domain_state *st = get_domain_state(); if(remote_core_state->cnt != MAX_CPUS) { err = interdisp_bind(allirefs[remote_core_state->cnt], client_connected, remote_core_state, &st->interdisp_ws, IDC_BIND_FLAGS_DEFAULT); if(err_is_fail(err)) { USER_PANIC_ERR(err, "Failure binding to inter-dispatcher service"); } } while(!remote_core_state->initialized) { event_dispatch(get_default_waitset()); } /* Free unnecessary state */ free(remote_core_state); /* XXX: create a thread that will handle the default waitset */ st->default_waitset_handler = thread_create(span_slave_thread, NULL); assert(st->default_waitset_handler != NULL); return interdisp_msg_handler(&st->interdisp_ws); }
/** * \brief Called when domain gets a interdisp service. * It will set it on the domain_state. */ static void server_listening(void *st, errval_t err, iref_t iref) { if(err_is_fail(err)) { DEBUG_ERR(err, "interdisp service export"); abort(); } struct domain_state *domain_state = get_domain_state(); domain_state->iref = iref; // Also set in the global array allirefs[disp_get_core_id()] = iref; domain_state->conditional = true; }
/** * \brief Called when the "client" connects to "server" * * Make the connection a "server" connection, free unnecessary state. * Send init msg to the dispatcher that spanned this dispatcher. */ static void client_connected(void *st, errval_t err, struct interdisp_binding *b) { struct remote_core_state *state = (struct remote_core_state*)st; struct domain_state *domain_state = get_domain_state(); if(err_is_fail(err)) { DEBUG_ERR(err, "binding to interdisp service"); abort(); } /* Set it on the domain library state */ b->rx_vtbl = interdisp_vtbl; domain_state->b[state->cnt] = b; // Send it our core id err = b->tx_vtbl.span_eager_connect(b, NOP_CONT, disp_get_core_id()); if(err_is_fail(err)) { USER_PANIC_ERR(err, "sending span_eager_connect"); } // Connect to next active dispatcher do { state->cnt++; if(state->cnt == disp_get_core_id()) { state->cnt++; } } while(allirefs[state->cnt] == NULL_IREF && state->cnt < MAX_CPUS); if(state->cnt < MAX_CPUS) { err = interdisp_bind(allirefs[state->cnt], client_connected, state, &domain_state->interdisp_ws, IDC_BIND_FLAGS_DEFAULT); if(err_is_fail(err)) { USER_PANIC_ERR(err, "Binding to inter-dispatcher service"); } } else { struct interdisp_binding *sb = domain_state->b[state->core_id]; /* Send initialized msg to the dispatcher that spanned us */ errval_t err2 = sb->tx_vtbl. dispatcher_initialized(sb, NOP_CONT, (uintptr_t)state->span_domain_state); if (err_is_fail(err2)) { DEBUG_ERR(err, "failed to send initalized msg"); abort(); } state->initialized = true; } }
errval_t domain_thread_join(struct thread *thread, int *retval) { coreid_t core_id = thread->coreid; if (disp_get_core_id() == core_id) { return thread_join(thread, retval); } else { struct domain_state *domain_state = get_domain_state(); errval_t err; if (domain_state->b[core_id] == NULL) { return LIB_ERR_NO_SPANNED_DISP; } struct interdisp_binding *b = domain_state->b[core_id]; struct join_thread_req *req = malloc(sizeof(*req)); req->reply_received = false; // use special waitset to make sure loop exits properly. struct waitset ws, *old_ws = b->waitset; waitset_init(&ws); b->change_waitset(b, &ws); err = b->tx_vtbl.join_thread_request(b, NOP_CONT, (genvaddr_t)(lvaddr_t)thread, (genvaddr_t)(lvaddr_t)req); if (err_is_fail(err)) { return err; } while (!req->reply_received) { event_dispatch(&ws); } // change waitset back b->change_waitset(b, old_ws); if (retval) { *retval = req->retval; } err = req->err; free(req); return err; } }
static void sync_domain_events (void) { unsigned long hw_flags, event; volatile bitmap_t pending; struct pt_regs dummy_regs = FAKE_REGS; if (get_domain_state (xm_current_domain) == DOMAIN_FINISHED) return; hw_save_flags_and_cli(&hw_flags); pending = xm_current_domain -> events -> pending_events; xm_current_domain -> events -> pending_events &= xm_current_domain -> events -> masked_events; // Executing all pending events while ((event = get_next_set_bit_and_clear (pending, xm_current_domain -> events -> masked_events)) != -1) { // An event pending when there is not any handler installed // catching it??? it must be an error //assert (xm_current_domain -> event_handler [event]); if (!xm_current_domain -> events -> event_handler [event]) continue; // Before executing an event handler, events' flag is disabled // and the event is masked set_bit (xm_current_domain -> events -> masked_events, event); disable_events_flag (xm_current_domain); SET_PT_REG_IRQ (dummy_regs, event); hw_sti (); // Here, the event handler is executed always // with the same conditions, that is, event flag disabled // and the executed event masked as well (*xm_current_domain -> events -> event_handler[event]) (event, &dummy_regs); hw_cli (); // Events flag is automatically enabled (iret emulation) enable_events_flag (xm_current_domain); } hw_restore_flags (hw_flags); }
errval_t domain_thread_create_on_varstack(coreid_t core_id, thread_func_t start_func, void *arg, size_t stacksize) { if (disp_get_core_id() == core_id) { struct thread *th = NULL; if (stacksize == 0) { th = thread_create(start_func, arg); } else { th = thread_create_varstack(start_func, arg, stacksize); } if (th != NULL) { return SYS_ERR_OK; } else { return LIB_ERR_THREAD_CREATE; } } else { struct domain_state *domain_state = get_domain_state(); errval_t err; if (domain_state->b[core_id] == NULL) { return LIB_ERR_NO_SPANNED_DISP; } struct interdisp_binding *b = domain_state->b[core_id]; err = b->tx_vtbl.create_thread(b, NOP_CONT, (genvaddr_t)(uintptr_t)start_func, (genvaddr_t)(uintptr_t)arg, stacksize); if (err_is_fail(err)) { return err; } return SYS_ERR_OK; } }
/** * \brief Since we cannot dynamically grow our stack yet, we need a * verion that will create threads on remote core with variable stack size * * \bug this is a hack */ static errval_t domain_new_dispatcher_varstack(coreid_t core_id, domain_spanned_callback_t callback, void *callback_arg, size_t stack_size) { assert(core_id != disp_get_core_id()); errval_t err; struct domain_state *domain_state = get_domain_state(); struct monitor_binding *mb = get_monitor_binding(); assert(domain_state != NULL); /* Set reply handler */ mb->rx_vtbl.span_domain_reply = span_domain_reply; while(domain_state->iref == 0) { /* If not initialized, wait */ messages_wait_and_handle_next(); } /* Create the remote_core_state passed to the new dispatcher */ struct remote_core_state *remote_core_state = calloc(1, sizeof(struct remote_core_state)); if (!remote_core_state) { return LIB_ERR_MALLOC_FAIL; } remote_core_state->core_id = disp_get_core_id(); remote_core_state->iref = domain_state->iref; /* get the alignment of the morecore state */ struct morecore_state *state = get_morecore_state(); remote_core_state->pagesize = state->mmu_state.alignment; /* Create the thread for the new dispatcher to init on */ struct thread *newthread = thread_create_unrunnable(remote_core_init_enabled, (void*)remote_core_state, stack_size); if (newthread == NULL) { return LIB_ERR_THREAD_CREATE; } /* Save the state for later steps of the spanning state machine */ struct span_domain_state *span_domain_state = malloc(sizeof(struct span_domain_state)); if (!span_domain_state) { return LIB_ERR_MALLOC_FAIL; } span_domain_state->thread = newthread; span_domain_state->core_id = core_id; span_domain_state->callback = callback; span_domain_state->callback_arg = callback_arg; /* Give remote_core_state pointer to span_domain_state */ remote_core_state->span_domain_state = span_domain_state; /* Start spanning domain state machine by sending vroot to the monitor */ struct capref vroot = { .cnode = cnode_page, .slot = 0 }; /* Create new dispatcher frame */ struct capref frame; size_t dispsize = ((size_t)1) << DISPATCHER_FRAME_BITS; err = frame_alloc(&frame, dispsize, &dispsize); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_ALLOC); } lvaddr_t dispaddr; err = vspace_map_one_frame((void **)&dispaddr, dispsize, frame, NULL, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_MAP); } dispatcher_handle_t handle = dispaddr; struct dispatcher_shared_generic *disp = get_dispatcher_shared_generic(handle); struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle); arch_registers_state_t *disabled_area = dispatcher_get_disabled_save_area(handle); /* Set dispatcher on the newthread */ span_domain_state->thread->disp = handle; span_domain_state->frame = frame; span_domain_state->vroot = vroot; /* Setup dispatcher */ disp->udisp = (lvaddr_t)handle; disp->disabled = true; disp->fpu_trap = 1; disp_gen->core_id = span_domain_state->core_id; // Setup the dispatcher to run remote_core_init_disabled // and pass the created thread as an argument registers_set_initial(disabled_area, span_domain_state->thread, (lvaddr_t)remote_core_init_disabled, (lvaddr_t)&disp_gen->stack[DISPATCHER_STACK_WORDS], (uintptr_t)span_domain_state->thread, 0, 0, 0); // Give dispatcher a unique name for debugging snprintf(disp->name, DISP_NAME_LEN, "%s%d", disp_name(), span_domain_state->core_id); #ifdef __x86_64__ // XXX: share LDT state between all dispatchers // this needs to happen before the remote core starts, otherwise the segment // selectors in the new thread state are invalid struct dispatcher_shared_x86_64 *disp_x64 = get_dispatcher_shared_x86_64(handle); struct dispatcher_shared_x86_64 *mydisp_x64 = get_dispatcher_shared_x86_64(curdispatcher()); disp_x64->ldt_base = mydisp_x64->ldt_base; disp_x64->ldt_npages = mydisp_x64->ldt_npages; #endif threads_prepare_to_span(handle); // Setup new local thread for inter-dispatcher messages, if not already done static struct thread *interdisp_thread = NULL; if(interdisp_thread == NULL) { interdisp_thread = thread_create(interdisp_msg_handler, &domain_state->interdisp_ws); err = thread_detach(interdisp_thread); assert(err_is_ok(err)); } #if 0 // XXX: Tell currently active interdisp-threads to handle default waitset for(int i = 0; i < MAX_CPUS; i++) { struct interdisp_binding *b = domain_state->b[i]; if(disp_get_core_id() != i && b != NULL) { err = b->tx_vtbl.span_slave(b, NOP_CONT); assert(err_is_ok(err)); } } #endif #if 0 /* XXX: create a thread that will handle the default waitset */ if (domain_state->default_waitset_handler == NULL) { domain_state->default_waitset_handler = thread_create(span_slave_thread, NULL); assert(domain_state->default_waitset_handler != NULL); } #endif /* Wait to use the monitor binding */ struct monitor_binding *mcb = get_monitor_binding(); event_mutex_enqueue_lock(&mcb->mutex, &span_domain_state->event_qnode, (struct event_closure) { .handler = span_domain_request_sender_wrapper, .arg = span_domain_state }); #if 1 while(!span_domain_state->initialized) { event_dispatch(get_default_waitset()); } /* Free state */ free(span_domain_state); #endif return SYS_ERR_OK; }