Esempio n. 1
0
/**
 * \brief Blocking bind to the name service
 *
 * Should be called once only at init time on each dispatcher.
 */
errval_t nameservice_client_blocking_bind(void)
{
    errval_t err;

    struct bind_state st = { .done = false };

    /* fire off a request for the iref for the name service */
    struct monitor_binding *mb = get_monitor_binding();
    mb->rx_vtbl.get_name_iref_reply = get_name_iref_reply;
    err = mb->tx_vtbl.get_name_iref_request(mb, NOP_CONT, (uintptr_t)&st);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_GET_NAME_IREF);
    }

    /* block on the default waitset until we're bound */
    struct waitset *ws = get_default_waitset();
    while (!st.done) {
        err = event_dispatch(ws);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_EVENT_DISPATCH);
        }
    }

    return st.err;
}
Esempio n. 2
0
int main(int argc, char **argv)
{
 //this is the bootstrap copy of the domain
     if (strcmp(argv[argc - 1], "SpAwNeD") != 0) {
        bsp_datagatherer = true;
    } else {
        bsp_datagatherer = false;
    }

    core_id = disp_get_core_id();
    skb_client_connect();

#ifdef SPAWN_YOUR_SELF
    if (bsp_datagatherer) {
        spawnmyself();
    }
#endif

//gather different types of data

//run cpuid
    gather_cpuid_data(core_id);

//get the number of running cores and their APIC IDs from the monitor
    if (bsp_datagatherer) {
        gather_nr_running_cores(get_monitor_binding());
    } else {
        nr_cores_done = true;
    }

//adding the numer of cores is the last operation performed by the datagatherer.
//therefore the domain can exit after this. process events as long as the number
//of cores has not yet been added to the SKB.
    struct waitset *ws = get_default_waitset();
    while (!nr_cores_done) {
        errval_t err = event_dispatch(ws);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "in event_dispatch");
            break;
        }
    }

    skb_add_fact("datagatherer_done.");

    if (bsp_datagatherer) {
        int length = nr_of_running_cores + 1;
        while (length != nr_of_running_cores) {
            skb_execute_query("findall(X, datagatherer_done, L),length(L,Len),write(Len).");
            skb_read_output("%d", &length);
            thread_yield();
        }


        errval_t err = nameservice_register("datagatherer_done", 0);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "nameservice_register failed");
        }
    }
    return 0;
}
Esempio n. 3
0
/**
 * \brief Connect the virtual serial port (UART) to a physical serial port
 *        on the host.
 *
 * \param user_data PC16550d driver struct.
 * \param host_uart Name of the host uart driver.
 */
void
pc16550d_attach_to_host_uart (struct pc16550d *user_data, const char *host_uart)
{
    assert(user_data != NULL);
    assert(host_uart != NULL);

    errval_t err;
    iref_t iref;

    // Initialization
    struct pc16550d_forward_uart *state =
        malloc(sizeof(struct pc16550d_forward_uart));
    assert(state != NULL);
    state->connected = false;
    state->ws = get_default_waitset();

    // Adjust PC16550d state
    user_data->forward_state = PC16550d_FORWARD_UART;
    user_data->forward_uart_state = state;

    // Bind to uart driver
    err = nameservice_lookup(host_uart, &iref);
    assert(err_is_ok(err));
    err = serial_bind(iref, serial_bind_cb, user_data, state->ws,
                      IDC_BIND_FLAGS_DEFAULT);
    assert(err_is_ok(err));

    // Dispatch the monitor binding until the bind completes.
    struct monitor_binding *monitor_b = get_monitor_binding();
    struct waitset *monitor_ws = monitor_b->waitset;
    while (!state->connected) {
        err = event_dispatch(monitor_ws);
    }
}
Esempio n. 4
0
/**
 * \brief Initialise a new UMP channel
 *
 * Most code should be using one of ump_chan_bind() or ump_chan_accept().
 *
 * \param uc Storage for channel state
 * \param inbuf Pointer to incoming message buffer
 * \param inbufsize Size of inbuf in bytes (must be multiple of UMP message size)
 * \param outbuf Pointer to outgoing message buffer
 * \param outbufsize Size of outbuf in bytes (must be multiple of UMP message size)
 */
errval_t ump_chan_init(struct ump_chan *uc,
                       volatile void *inbuf, size_t inbufsize,
                       volatile void *outbuf, size_t outbufsize)
{
    assert(uc != NULL);
    errval_t err;

    err = ump_endpoint_init(&uc->endpoint, inbuf, inbufsize);
    if (err_is_fail(err)) {
        return err;
    }

    err = ump_chan_state_init(&uc->send_chan, outbuf, outbufsize, UMP_OUTGOING);
    if (err_is_fail(err)) {
        return err;
    }

    uc->max_send_msgs = outbufsize / UMP_MSG_BYTES;
    uc->max_recv_msgs = inbufsize / UMP_MSG_BYTES;

    memset(&uc->cap_handlers, 0, sizeof(uc->cap_handlers));
    uc->iref = 0;
    uc->monitor_binding = get_monitor_binding(); // TODO: expose non-default to caller

    return SYS_ERR_OK;
}
Esempio n. 5
0
/**
 * \brief Initialize a connection to a terminal server and block until
 *        connection is established.
 *
 * \param client     Terminal client state, initialized by function to default
 *                   values.
 * \param session_id The session the domain is part of.
 *
 * Dispatches the monitor waitset until all the bindings to the terminal server
 * are established.
 */
errval_t term_client_blocking_init(struct term_client *client,
                                   struct capref session_id)
{
    errval_t err;
    iref_t in_iref;
    iref_t out_iref;
    iref_t conf_iref;

    /* Initialize client state to default values. */
    struct_term_client_init(client);

    /* Get the interface references from octopus. */
    err = get_irefs(session_id, &in_iref, &out_iref, &conf_iref);
    if (err_is_fail(err)) {
        return err;
    }

    /* Bind to interface for incoming characters. */
    err = terminal_bind(in_iref, in_bind_cb, client, client->read_ws,
                        IDC_BIND_FLAGS_DEFAULT);
    if (err_is_fail(err)) {
        return err_push(err, TERM_ERR_BIND_IN_INTERFACE);
    }
    TERM_DEBUG("Binding to terminal interface for incoming characters.\n");

    /* Bind to interface for outgoing characters. */
    err = terminal_bind(out_iref, out_bind_cb, client, client->write_ws,
                        IDC_BIND_FLAGS_DEFAULT);
    if (err_is_fail(err)) {
        return err_push(err, TERM_ERR_BIND_OUT_INTERFACE);
    }
    TERM_DEBUG("Binding to terminal interface for outgoing characters.\n");

    /* Bind to interface for incoming characters. */
    err = terminal_config_bind(conf_iref, conf_bind_cb, client,
                               client->conf_ws, IDC_BIND_FLAGS_DEFAULT);
    if (err_is_fail(err)) {
        return err_push(err, TERM_ERR_BIND_CONF_INTERFACE);
    }
    TERM_DEBUG("Binding to terminal configuration interface for configuration "
               "messages.\n");

    /*
     * Dispatch on the monitor binding until the bind completes. Otherwise, we
     * would have to check before every term_client_blocking_read and
     * term_client_blocking_write if we're already connected.
     */
    struct monitor_binding *monitor_b = get_monitor_binding();
    struct waitset *monitor_ws = monitor_b->waitset;
    while (!client->connected) {
        err = event_dispatch(monitor_ws);
        if (err_is_fail(err)) {
                USER_PANIC_ERR(err, "Error dispatching events.");
        }
    }
    TERM_DEBUG("Connection to terminal server successfully established.\n");

    return SYS_ERR_OK;
}
Esempio n. 6
0
/**
 * \brief Initialize the multi-hop interconnect driver
 *
 */
void multihop_init(void)
{
    struct monitor_binding *mb = get_monitor_binding();
    mb->rx_vtbl.multihop_bind_service_request =
            &multihop_bind_service_request_handler; // handler for incoming bind request messages from the monitor
    mb->rx_vtbl.multihop_bind_client_reply = &multihop_bind_reply_handler; // handler for incoming reply messages from the monitor
    mb->rx_vtbl.multihop_message = &handle_multihop_message; // handler for incoming messages from the monitor
    mb->rx_vtbl.multihop_cap_send = &multihop_handle_capability; // handler for incoming capabilities from the monitor
}
Esempio n. 7
0
/*
 * Call this method when bfscope is done with flushing and wants to notify
 * the initiator of the flush request.
 */
static void bfscope_send_flush_ack_to_monitor(void) {


    struct bfscope_ack_send_state *state = malloc(sizeof(struct bfscope_ack_send_state));
    //memset(state, 0, sizeof(struct trace_broadcast_start_state));

    state->monitor_binding = get_monitor_binding();

    event_mutex_enqueue_lock(&state->monitor_binding->mutex, &state->qnode, MKCLOSURE(&bfscope_send_flush_ack_cont, state));
}
Esempio n. 8
0
/**
 * \brief Send a reply back to the monitor. If the error code indicates success, this function
 *        creates a new monitor binding and registers to receive messages.
 * \param multihop_chan
 * \param err error code to send back
 * \param vci my vci for ingoing messages
 * \param waitset waitset to use for the channel
 */
void multihop_chan_send_bind_reply(struct multihop_chan *mc, errval_t msgerr,
        multihop_vci_t vci, struct waitset *waitset)
{

    errval_t err;
    struct bind_multihop_reply_state *reply_state = malloc(
            sizeof(struct bind_multihop_reply_state));
    assert(reply_state != NULL);

    if (err_is_ok(msgerr)) {
        // make sure channel exists
        assert(mc != NULL);
    } else {
        // make sure channel is not created
        assert(mc == NULL);
    }

    reply_state->mc = mc;
    reply_state->args.err = msgerr;
    reply_state->args.receiver_vci = vci;

    if (err_is_ok(msgerr)) {
        // get a vci for this binding
        reply_state->mc->my_vci = multihop_chan_mapping_insert(mc);
        reply_state->args.sender_vci = reply_state->mc->my_vci;
    } else {
        reply_state->args.sender_vci = 0;
    }

    if (err_is_ok(msgerr)) {

        // create a new monitor binding
        err = monitor_client_new_binding(
                multihop_new_monitor_binding_continuation2, reply_state,
                waitset, DEFAULT_LMP_BUF_WORDS);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(
                    err,
                    "Could not create a new monitor binding in the multi-hop interconnect driver");
        }
    } else {
        reply_state->monitor_binding = get_monitor_binding();
        // wait for the ability to use the monitor binding
        event_mutex_enqueue_lock(&reply_state->monitor_binding->mutex,
                &reply_state->qnode, MKCLOSURE(send_bind_reply, reply_state));
    }

}
Esempio n. 9
0
/**
 * \brief Initialise a new LMP channel and initiate a binding
 *
 * \param lc  Storage for channel state
 * \param cont Continuation for bind completion/failure
 * \param qnode Storage for an event queue node (used for queuing bind request)
 * \param iref IREF to which to bind
 * \param buflen_words Size of incoming buffer, in number of words
 */
errval_t lmp_chan_bind(struct lmp_chan *lc, struct lmp_bind_continuation cont,
                       struct event_queue_node *qnode, iref_t iref,
                       size_t buflen_words)
{
    errval_t err;

    lmp_chan_init(lc);

    /* store bind arguments */
    lc->iref = iref;
    lc->buflen_words = buflen_words;
    lc->bind_continuation = cont;

    /* allocate a cap slot for the new endpoint cap */
    err = slot_alloc(&lc->local_cap);
    if (err_is_fail(err)) {
        waitset_chanstate_destroy(&lc->send_waitset);
        return err_push(err, LIB_ERR_SLOT_ALLOC);
    }

    /* allocate a local endpoint */
    err = lmp_endpoint_create_in_slot(buflen_words, lc->local_cap,
                                      &lc->endpoint);
    if (err_is_fail(err)) {
        slot_free(lc->local_cap);
        waitset_chanstate_destroy(&lc->send_waitset);
        return err_push(err, LIB_ERR_ENDPOINT_CREATE);
    }

    // wait for the ability to use the monitor binding
    lc->connstate = LMP_BIND_WAIT;
    struct monitor_binding *mb = lc->monitor_binding = get_monitor_binding();
    event_mutex_enqueue_lock(&mb->mutex, qnode,
                             MKCLOSURE(send_bind_cont, lc));

    return SYS_ERR_OK;
}
Esempio n. 10
0
/// Initialise the LMP channel driver
void lmp_init(void)
{
    struct monitor_binding *mcb = get_monitor_binding();
    mcb->rx_vtbl.bind_lmp_reply_client = bind_lmp_reply_handler;
    mcb->rx_vtbl.bind_lmp_service_request = bind_lmp_service_request_handler;
}
Esempio n. 11
0
static void run_server(struct mem_thc_service_binding_t *sv)
{
    mem_service_msg_t msg;
    bool loop = true;
  
    // this is the bitmap of messages we are interested in receiving
    struct mem_service_selector selector = {
        .allocate = 1,
        .available = 1,
        .free = 1,
        .steal = 1,
    };

    while (loop) {
        // receive any message
        sv->recv_any(sv, &msg, selector);

        // dispatch it
        switch(msg.msg) {
        case mem_allocate:
            percore_allocate_handler(sv, msg.args.allocate.in.bits,
                                     msg.args.allocate.in.minbase,
                                     msg.args.allocate.in.maxlimit);
            break;
        case mem_steal:
            percore_steal_handler(sv, msg.args.allocate.in.bits,
                                     msg.args.allocate.in.minbase,
                                     msg.args.allocate.in.maxlimit);
            break;
        case mem_available:
            mem_available_handler(sv);
            break;
        case mem_free_monitor:
            percore_free_handler(sv, msg.args.free.in.mem_cap); 
            break;
        default:
            debug_printf("unexpected message: %d\n", msg.msg);
            loop = false;
            break;
        }
    }
}

errval_t percore_mem_serv(coreid_t core, coreid_t *cores, 
                                 int len_cores, memsize_t ram)
{
    errval_t err;

    struct waitset *ws = get_default_waitset();

    // Init the memory allocator 
    err = initialize_percore_mem_serv(core, cores, len_cores, ram);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "initializing percore mem_serv");
        return err;
    }

    struct mem_thc_export_info e_info;
    struct mem_thc_service_binding_t *sv;
    struct mem_binding *b;
    iref_t iref;

    char service_name[NAME_LEN];
    snprintf(service_name, NAME_LEN, "%s.%d", MEMSERV_DIST, core);
    
    //    err = mem_thc_export(&e_info, service_name, ws,
    err = mem_thc_export(&e_info, NULL, ws,
                         IDC_EXPORT_FLAGS_DEFAULT,
                         &iref);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "exporting percore mem interface");
        return err;
    }

    struct monitor_binding *mb = get_monitor_binding();
    err = mb->tx_vtbl. set_mem_iref_request(mb, NOP_CONT, iref);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "setting monitor's percore mem_serv iref");
        return err;
    }

    // explicitly tell spawnd to use us
    err = set_local_spawnd_memserv(core);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "setting spawnd.%d's local memserv", core);
        return err;
    }

    // register only after spawnd's local memserv has been set
    err = nameservice_register(service_name, iref);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "nameservice_register failed");
        return err;
    }
    // let the master know we are ready
    err = nsb_register_n(core, MEMSERV_DIST);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "nsb_register_n failed");
    }


    do {
    while (true) {

        mem_thc_accept(&e_info, &b);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "thc accept failed");
            continue;
        }

        sv = malloc(sizeof(struct mem_thc_service_binding_t));
        if (sv == NULL) {
            DEBUG_ERR(LIB_ERR_MALLOC_FAIL, "allocating thc service binding"); 
            continue;
        }

        err = mem_thc_init_service(sv, b, b);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "thc init failed");
            continue;
        }

        async run_server(sv);

    }
    } finish;

    // should never reach here
    return SYS_ERR_OK;
}
Esempio n. 12
0
void setup_routes(int argc, char **argv)
{
    errval_t err;
    struct monitor_binding *st = get_monitor_binding();

    /* printf("%s: setup_routes\n", argv[0]); */

    /* Set core id */
    my_core_id = disp_get_core_id();
    strcpy(my_name, argv[0]);

    // Get number of cores
    coreid_t cores = atoi(argv[1]);

    // Get list of present cores
    for(int i = 3; i < argc; i++) {
        set_present(argv[i]);
    }

    if (strcmp(argv[argc - 1], "dummy")) { /* bsp core */
        // Spawn all copies
        bsp_id = my_core_id;

        /* Spawn on all cores */
        char *spawnargv[argc + 2];
        for (int i = 0; i < argc; i++) {
            spawnargv[i] = argv[i];
        }
        spawnargv[argc] = "dummy";
        spawnargv[argc + 1] = NULL;
        for(coreid_t i = 0; i < MAX_CPUS; i++) {
            if(core_present[i] && i != my_core_id) {
                err = spawn_program(i, my_name, spawnargv, NULL,
                                    SPAWN_FLAGS_DEFAULT, NULL);
                assert(err_is_ok(err));
            }
        }
    }

    /* printf("%s: exporting service\n", argv[0]); */
    /* Setup a server */
    request_done = false;
    err = rcce_export(NULL, _listening, _connected, get_default_waitset(),
                      IDC_EXPORT_FLAGS_DEFAULT);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "rcce_export failed");
        abort();
    }
    while (!request_done) {
        event_dispatch(get_default_waitset());
    }

    if (strcmp(argv[argc - 1], "dummy")) { /* bsp core */
        for (coreid_t i = 0; i < MAX_CPUS; i++) {
            /* Connect to all cores */
            if (core_present[i] && i != my_core_id && barray[i] == NULL) {
                /* printf("%s: connecting to core %d\n", argv[0], i); */
                connect(i);
            }
        }
    } else {
        /* printf("%s: waiting for connection\n", argv[0]); */
        // Wait for an incoming connection request
        while(connect_request == NULL) {
            event_dispatch(get_default_waitset());
        }

        /* Connect to all cores to which we have not connected already */
        for (coreid_t i = 0; i < MAX_CPUS; i++) {
            if (core_present[i] && i != my_core_id && barray[i] == NULL) {
                /* printf("%s: slave connecting to core %d\n", argv[0], i); */
                connect(i);
            }
        }

        /* printf("%s: sending connect reply\n", argv[0]); */
        // Send the reply back
        err = connect_request->tx_vtbl.
            error_reply(connect_request, NOP_CONT, SYS_ERR_OK, connect_state);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "init_reply failed");
            abort();
        }
    }

    /* printf("%s: done\n", argv[0]); */

    // Determine maximum core ID
    coreid_t maxcore = 0;
    for(coreid_t i = 0; i < MAX_CPUS; i++) {
        if(core_present[i]) {
            maxcore = i;
        }
    }

    barriers_init(maxcore + 1);
}
Esempio n. 13
0
static void multiboot_cap_reply(struct monitor_binding *st, struct capref cap,
                                errval_t msgerr)
{
    errval_t err;
    static cslot_t multiboot_slots = 0;

    // All multiboot caps received
    if (err_is_fail(msgerr)) {
        // Request bootinfo frame
        struct bootinfo *bi;
        err = map_bootinfo(&bi);
        assert(err_is_ok(err));

        // Init ramfs
        struct dirent *root = ramfs_init();

        // Populate it with contents of multiboot
        populate_multiboot(root, bi);

        // Start the service
        err = start_service(root);
        assert(err_is_ok(err));
        return;
    }

    // Move the cap into the multiboot cnode
    struct capref dest = {
        .cnode = cnode_module,
        .slot  = multiboot_slots++,
    };
    err = cap_copy(dest, cap);
    assert(err_is_ok(err));
    err = cap_destroy(cap);
    assert(err_is_ok(err));

    err = st->tx_vtbl.multiboot_cap_request(st, NOP_CONT, multiboot_slots);
    assert(err_is_ok(err));
}

static void bootstrap(void)
{
    errval_t err;

    /* Create the module cnode */
    struct capref modulecn_cap = {
        .cnode = cnode_root,
        .slot  = ROOTCN_SLOT_MODULECN,
    };
    err = cnode_create_raw(modulecn_cap, NULL,
                           ((cslot_t)1 << MODULECN_SIZE_BITS), NULL);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "cnode_create_raw failed");
        abort();
    }

    // XXX: Set reply handler
    struct monitor_binding *st = get_monitor_binding();
    st->rx_vtbl.multiboot_cap_reply = multiboot_cap_reply;

    // Make first multiboot cap request
    err = st->tx_vtbl.multiboot_cap_request(st, NOP_CONT, 0);
    assert(err_is_ok(err));
}
Esempio n. 14
0
/**
 * \brief Since we cannot dynamically grow our stack yet, we need a
 * verion that will create threads on remote core with variable stack size
 *
 * \bug this is a hack
 */
static errval_t domain_new_dispatcher_varstack(coreid_t core_id,
                                               domain_spanned_callback_t callback,
                                               void *callback_arg, size_t stack_size)
{
    assert(core_id != disp_get_core_id());

    errval_t err;
    struct domain_state *domain_state = get_domain_state();
    struct monitor_binding *mb = get_monitor_binding();
    assert(domain_state != NULL);

    /* Set reply handler */
    mb->rx_vtbl.span_domain_reply = span_domain_reply;

    while(domain_state->iref == 0) { /* If not initialized, wait */
        messages_wait_and_handle_next();
    }

    /* Create the remote_core_state passed to the new dispatcher */
    struct remote_core_state *remote_core_state =
        calloc(1, sizeof(struct remote_core_state));
    if (!remote_core_state) {
        return LIB_ERR_MALLOC_FAIL;
    }
    remote_core_state->core_id = disp_get_core_id();
    remote_core_state->iref    = domain_state->iref;

    /* get the alignment of the morecore state */
    struct morecore_state *state = get_morecore_state();
    remote_core_state->pagesize = state->mmu_state.alignment;

    /* Create the thread for the new dispatcher to init on */
    struct thread *newthread =
        thread_create_unrunnable(remote_core_init_enabled,
                                 (void*)remote_core_state, stack_size);
    if (newthread == NULL) {
        return LIB_ERR_THREAD_CREATE;
    }

    /* Save the state for later steps of the spanning state machine */
    struct span_domain_state *span_domain_state =
        malloc(sizeof(struct span_domain_state));
    if (!span_domain_state) {
        return LIB_ERR_MALLOC_FAIL;
    }
    span_domain_state->thread       = newthread;
    span_domain_state->core_id      = core_id;
    span_domain_state->callback     = callback;
    span_domain_state->callback_arg = callback_arg;

    /* Give remote_core_state pointer to span_domain_state */
    remote_core_state->span_domain_state = span_domain_state;

    /* Start spanning domain state machine by sending vroot to the monitor */
    struct capref vroot = {
        .cnode = cnode_page,
        .slot = 0
    };

    /* Create new dispatcher frame */
    struct capref frame;
    size_t dispsize = ((size_t)1) << DISPATCHER_FRAME_BITS;
    err = frame_alloc(&frame, dispsize, &dispsize);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_FRAME_ALLOC);
    }
    lvaddr_t dispaddr;

    err = vspace_map_one_frame((void **)&dispaddr, dispsize, frame, NULL, NULL);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_VSPACE_MAP);
    }

    dispatcher_handle_t handle = dispaddr;
    struct dispatcher_shared_generic *disp =
        get_dispatcher_shared_generic(handle);
    struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
    arch_registers_state_t *disabled_area =
        dispatcher_get_disabled_save_area(handle);

    /* Set dispatcher on the newthread */
    span_domain_state->thread->disp = handle;
    span_domain_state->frame = frame;
    span_domain_state->vroot = vroot;

    /* Setup dispatcher */
    disp->udisp = (lvaddr_t)handle;
    disp->disabled = true;
    disp->fpu_trap = 1;
    disp_gen->core_id = span_domain_state->core_id;
    // Setup the dispatcher to run remote_core_init_disabled
    // and pass the created thread as an argument
    registers_set_initial(disabled_area, span_domain_state->thread,
                          (lvaddr_t)remote_core_init_disabled,
                          (lvaddr_t)&disp_gen->stack[DISPATCHER_STACK_WORDS],
                          (uintptr_t)span_domain_state->thread, 0, 0, 0);
    // Give dispatcher a unique name for debugging
    snprintf(disp->name, DISP_NAME_LEN, "%s%d", disp_name(),
             span_domain_state->core_id);

#ifdef __x86_64__
    // XXX: share LDT state between all dispatchers
    // this needs to happen before the remote core starts, otherwise the segment
    // selectors in the new thread state are invalid
    struct dispatcher_shared_x86_64 *disp_x64
        = get_dispatcher_shared_x86_64(handle);
    struct dispatcher_shared_x86_64 *mydisp_x64
        = get_dispatcher_shared_x86_64(curdispatcher());

    disp_x64->ldt_base = mydisp_x64->ldt_base;
    disp_x64->ldt_npages = mydisp_x64->ldt_npages;
#endif

    threads_prepare_to_span(handle);

    // Setup new local thread for inter-dispatcher messages, if not already done
    static struct thread *interdisp_thread = NULL;
    if(interdisp_thread == NULL) {
        interdisp_thread = thread_create(interdisp_msg_handler,
                                         &domain_state->interdisp_ws);
        err = thread_detach(interdisp_thread);
        assert(err_is_ok(err));
    }

#if 0
    // XXX: Tell currently active interdisp-threads to handle default waitset
    for(int i = 0; i < MAX_CPUS; i++) {
        struct interdisp_binding *b = domain_state->b[i];

        if(disp_get_core_id() != i && b != NULL) {
            err = b->tx_vtbl.span_slave(b, NOP_CONT);
            assert(err_is_ok(err));
        }
    }
#endif

    #if 0
    /* XXX: create a thread that will handle the default waitset */
    if (domain_state->default_waitset_handler == NULL) {
        domain_state->default_waitset_handler
            = thread_create(span_slave_thread, NULL);
        assert(domain_state->default_waitset_handler != NULL);
    }
#endif
    /* Wait to use the monitor binding */
    struct monitor_binding *mcb = get_monitor_binding();
    event_mutex_enqueue_lock(&mcb->mutex, &span_domain_state->event_qnode,
                             (struct event_closure) {
                                 .handler = span_domain_request_sender_wrapper,
                                     .arg = span_domain_state });

#if 1
    while(!span_domain_state->initialized) {
        event_dispatch(get_default_waitset());
    }

    /* Free state */
    free(span_domain_state);
#endif

    return SYS_ERR_OK;
}
Esempio n. 15
0
static void span_domain_request_sender_wrapper(void *st)
{
    struct monitor_binding *mb = get_monitor_binding();
    mb->st = st;
    span_domain_request_sender(mb);
}
Esempio n. 16
0
int main(int argc, char**argv)
{

#ifndef CONFIG_TRACE
    // bail - no tracing support
    printf("%.*s: Error, no tracing support, cannot start bfscope\n",
           DISP_NAME_LEN, disp_name());
    printf("%.*s: recompile with trace = TRUE in build/hake/Config.hs\n",
           DISP_NAME_LEN, disp_name());
    return -1;
#endif

    // Allocate the outgoing buffer
    if (trace_buf == NULL) {
        trace_buf = malloc(BFSCOPE_BUFLEN);
    }
    assert(trace_buf);

    /* Disable tracing for bfscope */
    dispatcher_handle_t handle = curdispatcher();
    struct dispatcher_generic *disp = get_dispatcher_generic(handle);
    disp->trace_buf = NULL;

    printf("%.*s running on core %d\n", DISP_NAME_LEN, disp_name(),
           disp_get_core_id());

    /* Connect to e1000 driver */
    printf("%.*s: trying to connect to the e1000 driver...\n",
           DISP_NAME_LEN, disp_name());

    lwip_init_auto();

    err_t lwip_err = bfscope_server_init();

    assert(lwip_err == ERR_OK);


    // Export our empty interface
    errval_t err;
    err = empty_export(NULL /* state pointer for connect/export callbacks */,
            export_cb, connect_cb, get_default_waitset(),
            IDC_EXPORT_FLAGS_DEFAULT);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "export failed");
    }

    // Register our message handlers with the monitor
    struct monitor_binding *monitor_binding;
    monitor_binding = get_monitor_binding();
    monitor_binding->rx_vtbl.bfscope_flush_send = &bfscope_handle_flush_msg;


    while (1) {
        //err = event_dispatch(lwip_waitset);
        err = event_dispatch_non_block(lwip_waitset);

        if (err == LIB_ERR_NO_EVENT) {
            // It is ok that no event is dispatched.
            err = ERR_OK;
        }

        DEBUG("bfscope: dispatched event, autoflush: %d\n",((struct trace_buffer*) trace_buffer_master)->autoflush);

        // Check if we are in autoflush mode
        if(((struct trace_buffer*) trace_buffer_master)->autoflush) {
            local_flush = true;
            bfscope_trace_dump();
        }

        thread_yield_dispatcher(NULL_CAP);


        if (err_is_fail(err)) {
            DEBUG_ERR(err, "in event_dispatch");
            break;
        }
    }

    return 0;
}
Esempio n. 17
0
int epoll_wait(int epfd, struct epoll_event *events,
               int maxevents, int timeout)
{
    struct fdtab_entry *mye = fdtab_get(epfd);
    assert(mye->type == FDTAB_TYPE_EPOLL_INSTANCE);
    struct _epoll_fd *efd = mye->handle;
    struct monitor_binding *mb = get_monitor_binding();
    errval_t err;

    /* waitset_init(&efd->ws); */
    assert(maxevents >= 1);

    for(struct _epoll_events_list *i = efd->events; i != NULL; i = i->next) {
        struct fdtab_entry *e = fdtab_get(i->fd);
        struct epoll_event *event = &i->event;

        switch (e->type) {
        case FDTAB_TYPE_LWIP_SOCKET:
        {
            int retval;

            lwip_mutex_lock();
            if(event->events & EPOLLIN) {
                retval = lwip_sock_waitset_register_read(e->fd, &efd->ws);
                assert(retval == 0);
            }
            if(event->events & EPOLLOUT) {
                retval = lwip_sock_waitset_register_write(e->fd, &efd->ws);
                assert(retval == 0);
            }
            lwip_mutex_unlock();
        }
        break;

        case FDTAB_TYPE_UNIX_SOCKET:
        {
            struct _unix_socket *us = e->handle;

            if(event->events & EPOLLIN) {
                if (us->passive) { /* passive side */
                    int j;

                    /* Check for pending connection requests. */
                    for (j = 0; j < us->u.passive.max_backlog; j++)
                    {
                        if (us->u.passive.backlog[j] != NULL) {
                            break;
                        }
                    }

                    /*
                     * If there are not pending connection request
                     * wait on monitor binding.
                     */
                    if (j == us->u.passive.max_backlog) {
                        /* wait on monitor */
                        err = mb->change_waitset(mb, &efd->ws);
                        if (err_is_fail(err)) {
                            USER_PANIC_ERR(err, "change_waitset");
                        }
                    }
                }
            }

            if(event->events & EPOLLOUT) {
                assert(!us->passive);

                if(us->u.active.mode == _UNIX_SOCKET_MODE_CONNECTING) {
                    /* wait on monitor */
                    err = mb->change_waitset(mb, &efd->ws);
                    if (err_is_fail(err)) {
                        USER_PANIC_ERR(err, "change_waitset");
                    }
                }
            }

            assert(event->events & (EPOLLIN | EPOLLOUT));

            // Change waitset
            err = us->u.active.binding->change_waitset
                  (us->u.active.binding, &efd->ws);
            if (err_is_fail(err)) {
                USER_PANIC_ERR(err, "change waitset");
            }

        }
        break;

        default:
        {
            fprintf(stderr, "change waitset on FD type %d NYI.\n",
                    e->type);
            assert(!"NYI");
            errno = EBADF;
            return -1;
        }
        }
    }

    // Timeout handling
    struct timeout_event toe = {
        .fired = false
    };
    struct deferred_event timeout_event;
    if (timeout > 0) {
        deferred_event_init(&timeout_event);
        err = deferred_event_register(&timeout_event, &efd->ws, timeout,
                                      MKCLOSURE(timeout_fired, &toe));
        if (err_is_fail(err)) {
            errno = EINVAL;
            return -1;
        }
    }

    int retevents = 0;
    while(!toe.fired && retevents == 0) {
        if(timeout == 0) {
            // Just poll once, don't block
            err = event_dispatch_non_block(&efd->ws);
            assert(err_is_ok(err) || err_no(err) == LIB_ERR_NO_EVENT);
            toe.fired = true;
        } else {
            err = event_dispatch(&efd->ws);
            if (err_is_fail(err)) {
                USER_PANIC_ERR(err, "Error in event_dispatch.");
            }
        }

        // Return ready file descriptors
        for(struct _epoll_events_list *i = efd->events; i != NULL; i = i->next) {
            struct epoll_event *event = &i->event;
            struct fdtab_entry *e = fdtab_get(i->fd);

            assert(retevents < maxevents);
            events[retevents] = *event;
            events[retevents].events = 0;

            // Check errors (hangup)
            {
                switch (e->type) {
                case FDTAB_TYPE_LWIP_SOCKET:
                {
                    lwip_mutex_lock();
                    if (!lwip_sock_is_open(e->fd)) {
                        events[retevents].events |= EPOLLHUP;
                    }
                    lwip_mutex_unlock();
                }
                break;

                default:
                    // No-Op
                    break;
                }
            }

            // Check readable FDs
            if(event->events & EPOLLIN) {
                switch (e->type) {
                case FDTAB_TYPE_LWIP_SOCKET:
                {
                    lwip_mutex_lock();
                    if (lwip_sock_ready_read(e->fd)) {
                        events[retevents].events |= EPOLLIN;
                    }
                    lwip_mutex_unlock();
                }
                break;

                case FDTAB_TYPE_UNIX_SOCKET:
                {
                    struct _unix_socket *us = e->handle;

                    if (us->passive) { /* passive side */
                        /* Check for pending connection requests. */
                        for (int j = 0; j < us->u.passive.max_backlog; j++)
                        {
                            if (us->u.passive.backlog[j] != NULL) {
                                events[retevents].events |= EPOLLIN;
                                break;
                            }
                        }
                    } else { /* active side */
                        /* Check for incoming data. */
                        if (us->recv_buf_valid > 0) {
                            events[retevents].events |= EPOLLIN;
                        }
                    }
                }
                break;

                default:
                {
                    fprintf(stderr, "epoll_wait() on FD type %d NYI.\n",
                            e->type);
                    assert(!"NYI");
                    errno = EBADF;
                    return -1;
                }
                }
            }

            // Check writeable FDs
            if(event->events & EPOLLOUT) {
                switch (e->type) {
                case FDTAB_TYPE_LWIP_SOCKET:
                {
                    lwip_mutex_lock();
                    if (lwip_sock_ready_write(e->fd)) {
                        events[retevents].events |= EPOLLOUT;
                    }
                    lwip_mutex_unlock();
                }
                break;

                case FDTAB_TYPE_UNIX_SOCKET:
                {
                    struct _unix_socket *us = e->handle;
                    assert(!us->passive);

                    switch (us->u.active.mode) {
                    case _UNIX_SOCKET_MODE_CONNECTING:
                        break;

                    case _UNIX_SOCKET_MODE_CONNECTED:
                        if (us->send_buf == NULL) {
                            events[retevents].events |= EPOLLOUT;
                        }
                        break;
                    }
                }
                break;

                default:
                {
                    fprintf(stderr, "epoll_wait() on FD type %d NYI.\n",
                            e->type);
                    assert(!"NYI");
                    errno = EBADF;
                    return -1;
                }
                }
            }

            // If any events were returned, go to next entry in array
            if(events[retevents].events != 0) {
                retevents++;
            }
        }
    }

    // Remove timeout from waitset if it was set and not fired
    if(timeout > 0 && !toe.fired) {
        deferred_event_cancel(&timeout_event);
    }

    // Restore old waitsets
    for(struct _epoll_events_list *i = efd->events; i != NULL; i = i->next) {
        struct fdtab_entry *e = fdtab_get(i->fd);
        struct epoll_event *event = &i->event;

        switch (e->type) {
        case FDTAB_TYPE_LWIP_SOCKET:
        {
            lwip_mutex_lock();
            if(event->events & EPOLLIN) {
                err = lwip_sock_waitset_deregister_read(e->fd);
                if (err_is_fail(err) &&
                        err_no(err) != LIB_ERR_CHAN_NOT_REGISTERED) {
                    USER_PANIC_ERR(err, "error deregister read channel for "
                                   "lwip socket");
                }
            }
            if(event->events & EPOLLOUT) {
                err = lwip_sock_waitset_deregister_write(e->fd);
                if (err_is_fail(err) &&
                        err_no(err) != LIB_ERR_CHAN_NOT_REGISTERED) {
                    USER_PANIC_ERR(err, "error deregister write channel for "
                                   "lwip socket");
                }
            }
            lwip_mutex_unlock();
        }
        break;

        case FDTAB_TYPE_UNIX_SOCKET:
        {
            // NYI
        }
        break;

        default:
        {
            fprintf(stderr, "change waitset on FD type %d NYI.\n",
                    e->type);
            assert(!"NYI");
            errno = EBADF;
            return -1;
        }
        }
    }

    return retevents;
}

int epoll_pwait(int epfd, struct epoll_event *events,
                int maxevents, int timeout,
                const sigset_t *sigmask)
{
    assert(!"NYI");
}
Esempio n. 18
0
int main(int argc, char *argv[])
{
    errval_t err;
    my_core_id = disp_get_core_id();
    bench_init();

    if (argc == 1) { /* server */
        struct monitor_binding *mb = get_monitor_binding();
        mb->rx_vtbl.num_cores_reply = num_cores_reply;

        // Get number of cores in the system
        err = mb->tx_vtbl.num_cores_request(mb, NOP_CONT);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "error sending num_core_request");
        }

        // Spawn client on another core
        char *xargv[] = {"shared_mem_clock_bench", "dummy", NULL};
        err = spawn_program_on_all_cores(false, xargv[0], xargv, NULL,
                                         SPAWN_FLAGS_DEFAULT, NULL);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "error spawning on other cores");
        }

        // Export service
        err = bench_export(NULL, export_cb, connect_cb, get_default_waitset(),
                          IDC_EXPORT_FLAGS_DEFAULT);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "export failed");
        }

        // Allocate a cap for the shared memory
        err = frame_alloc(&clock_frame, BASE_PAGE_SIZE, NULL);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "frame_alloc failed");
        }
        err = clock_init(clock_frame);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "clock_init failed");
        }

        // Wait for all connections to be established
        start_experiment_flag = false;
        while(!start_experiment_flag) {
            messages_wait_and_handle_next();
        }

        // Start experiments
        start_experiment();

    } else { /* client */
        // Lookup service
        iref_t iref;
        err = nameservice_blocking_lookup("server", &iref);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "nameservice_blocking_lookup failed");
        }

        // Bind to service
        err = bench_bind(iref, bind_cb, NULL, get_default_waitset(),
                         IDC_BIND_FLAGS_DEFAULT);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "bind failed");
        }
    }

    messages_handler_loop();
}