Exemplo n.º 1
0
static void do_experiment_client(void)
{
    errval_t err;
    cycles_t cycles;

    while(!start_sending_caps) {
        messages_wait_and_handle_next();
    }
    cycles = send_caps();
    do {
        err = bindings[0]->tx_vtbl.barrier_done(bindings[0], NOP_CONT, cycles);
    } while (redo_message(err));
    assert(err_is_ok(err));

    while (true) {
        start_retyping_caps = false;
        while(!start_retyping_caps) {
            messages_wait_and_handle_next();
        }
        cycles = retype_caps();
        
        destroy_caps();
        do {
            err = bindings[0]->tx_vtbl.barrier_done(bindings[0], NOP_CONT, cycles);
        } while (redo_message(err));
        assert(err_is_ok(err));
    }
}
Exemplo n.º 2
0
/* ----- Experiment ------- */
static void do_experiment_bsp(void)
{
    errval_t err;

    printf("Sending Caps\n");
    wait_for = num_cores;
    total_cycles = 0;

    for (int i=1; i<num_cores; i++) {
        do {
            err = bindings[i]->tx_vtbl.start_sending(bindings[i], NOP_CONT);
        } while (redo_message(err));
        
        assert(err_is_ok(err));
    }
    total_cycles += send_caps();
    wait_for--;
    
    // wait for other cores to finish
    while(wait_for) {
        messages_wait_and_handle_next();
    }

    printf("Retyping Caps\n");
    for(int iter=0; iter<10; iter++) {
        wait_for = num_cores;
        total_cycles = 0;
        
        for (int i=1; i<num_cores; i++) {
            do {
                err = bindings[i]->tx_vtbl.start_retyping(bindings[i], NOP_CONT);
            } while (redo_message(err));
            
            assert(err_is_ok(err));
        }
        total_cycles += retype_caps();
        wait_for--;
        
        // wait for other cores to finish
        while(wait_for) {
            messages_wait_and_handle_next();
        }

        destroy_caps();
        
        printf("%" PRIuCYCLES "\n", total_cycles / num_cores);
    }
    printf("Done\n");
}
Exemplo n.º 3
0
static void connect(coreid_t idx)
{
    errval_t err;
    char id[100];
    snprintf(id, sizeof(id), "%s%d", my_name, idx);

    iref_t iref;
    err = nameservice_blocking_lookup(id, &iref);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "nameservice_blocking_lookup failed");
        abort();
    }
    assert(iref != 0);

    struct rcce_state *st = malloc(sizeof(struct rcce_state));
    assert(st != NULL);
    memset(st, 0, sizeof(struct rcce_state));
    st->index = idx;
    st->request_done = false;

    /* printf("%s: rcce_bind\n", my_name); */

    err = rcce_bind(iref, client_connected, st, get_default_waitset(),
                    IDC_BIND_FLAGS_DEFAULT);
    assert(err_is_ok(err));

    /* printf("%s: waiting\n", my_name); */

    while (!st->request_done) {
        messages_wait_and_handle_next();
    }

    /* printf("%s: done\n", my_name); */
}
Exemplo n.º 4
0
errval_t blockdevfs_ahci_init(void)
{
    errval_t err;
    iref_t iref;

    err = nameservice_blocking_lookup("ahcid", &iref);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "nameservice_blocking_lookup for ahcid");
        return err; // FIXME
    }

    err = ahci_mgmt_bind(iref, ahci_mgmt_bind_cb, NULL, get_default_waitset(),
                  IDC_BIND_FLAG_RPC_CAP_TRANSFER);

    // init DMA pool
    ahci_dma_pool_init(1024*1024);

    if (err_is_fail(err)) {
        DEBUG_ERR(err, "ahci_mgmt bind failed");
        return err; // FIXME
    }

    // XXX: block for bind completion (broken API!)
    while (!ahci_mgmt_bound) {
        messages_wait_and_handle_next();
    }

    return SYS_ERR_OK;
}
Exemplo n.º 5
0
/**
 * \brief Initialize the domain library
 *
 * Registers a iref with the monitor to offer the interdisp service on this core
 * Does not block for completion.
 */
errval_t domain_init(void)
{
    errval_t err;
    struct domain_state *domain_state = malloc(sizeof(struct domain_state));
    if (!domain_state) {
        return LIB_ERR_MALLOC_FAIL;
    }
    set_domain_state(domain_state);

    domain_state->iref = 0;
    domain_state->default_waitset_handler = NULL;
    domain_state->remote_wakeup_queue = NULL;
    waitset_chanstate_init(&domain_state->remote_wakeup_event,
                           CHANTYPE_EVENT_QUEUE);
    for (int i = 0; i < MAX_CPUS; i++) {
        domain_state->b[i] = NULL;
    }

    waitset_init(&domain_state->interdisp_ws);
    domain_state->conditional = false;
    err = interdisp_export(NULL, server_listening, server_connected,
                           &domain_state->interdisp_ws, IDC_EXPORT_FLAGS_DEFAULT);
    if (err_is_fail(err)) {
        return err;
    }

    // XXX: Wait for the export to finish before returning
    while(!domain_state->conditional) {
        messages_wait_and_handle_next();
    }

    return SYS_ERR_OK;
}
Exemplo n.º 6
0
static inline bool redo_message(errval_t err) {
    if (err == FLOUNDER_ERR_TX_BUSY) {
        messages_wait_and_handle_next();
        return true;
    } else {
        return false;  
    }
}
Exemplo n.º 7
0
errval_t timing_sync_timer(void)
{
#if defined(__x86_64__) || defined(__i386__)
    uint64_t tscperms;
    errval_t err = sys_debug_get_tsc_per_ms(&tscperms);
    assert(err_is_ok(err));

    // Exponential backoff loop
    for(uint64_t time_offset = MIN_DELAY_MS;
        time_offset <= MAX_DELAY_MS;
        time_offset *= 2) {
        uint64_t synctime = rdtsc() + tscperms * time_offset;
        int waitfor = 0;

        received = 0;
        error = SYS_ERR_OK;

        for(int i = 0; i < MAX_CPUS; i++) {
            struct intermon_binding *b = NULL;
            err = intermon_binding_get(i, &b);
            if(err_no(err) == MON_ERR_NO_MONITOR_FOR_CORE) {
                continue;
            }
            assert(err_is_ok(err));
            err = b->tx_vtbl.rsrc_timer_sync(b, NOP_CONT, synctime);
            assert(err_is_ok(err));

            waitfor++;
        }

        err = invoke_monitor_sync_timer(synctime);
        if(err_is_fail(err)) {
            error = err;
        }

        // Collect success/failure replies
        while(received < waitfor) {
            messages_wait_and_handle_next();
        }

        if(err_is_fail(error)) {
            if(err_no(error) != SYS_ERR_SYNC_MISS) {
                return error;
            }
        } else {
            break;
        }
    }

    return error;
#else
    printf("Phase-locked local clocks not supported on this platform!\n");
    return SYS_ERR_OK;
#endif
}
Exemplo n.º 8
0
Arquivo: main.c Projeto: 8l/barrelfish
errval_t request_ramfs_serv_iref(struct intermon_binding *st)
{
    errval_t err = st->tx_vtbl.ramfs_serv_iref_request(st, NOP_CONT);
    if (err_is_fail(err)) {
        return err_push(err, MON_ERR_SEND_REMOTE_MSG);
    }
    while(ramfs_serv_iref == 0) {
        messages_wait_and_handle_next();
    }
    return SYS_ERR_OK;
}
Exemplo n.º 9
0
static void start_experiment(void)
{
    errval_t err;

    for (int i = 1; i < num_cores; i++) {

        int count = 0;
        experiment_flag = false;
        experiment_count = i;
        for (int j = 0; j < MAX_CPUS; j++) {
            if (array[j]) {
                while(1) {
                    err = array[j]->tx_vtbl.shmc_start(array[j], NOP_CONT);
                    if (err_is_ok(err)) {
                        break;
                    } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
                        messages_wait_and_handle_next();
                    } else {
                        USER_PANIC_ERR(err, "sending shmc_start failed");
                    }
                }
                count++;
                if (count == i) {
                    break;
                }
            }
        }
        run_experiment();

        printf("Running on %d cores\n", i + 1);
        for (int j = 0; j < MAX_COUNT; j++) {
            printf("page %d took %"PRIuCYCLES"\n", j,
                   timestamps[j].time1 - timestamps[j].time0 - bench_tscoverhead());
        }

        while(!experiment_flag) {
            messages_wait_and_handle_next();
        }
    }
    printf("client done\n");
}
Exemplo n.º 10
0
Arquivo: main.c Projeto: 8l/barrelfish
errval_t request_trace_caps(struct intermon_binding *st)
{
    errval_t err = st->tx_vtbl.trace_caps_request(st, NOP_CONT);
    if (err_is_fail(err)) {
        return err_push(err, MON_ERR_SEND_REMOTE_MSG);
    }
    while(capref_is_null(trace_cap)) {
        messages_wait_and_handle_next();
    }

    return SYS_ERR_OK;
}
Exemplo n.º 11
0
errval_t blockdevfs_ahci_write(void *handle, size_t pos, const void *buffer,
        size_t bytes, size_t *bytes_written)
{
    errval_t err;
    struct ahci_handle *h = handle;

    size_t aligned_bytes = bytes / PR_SIZE * PR_SIZE;

    // setup DMA regions and copy data over
    struct ahci_dma_region *bufregion;
    err = ahci_dma_region_alloc(aligned_bytes, &bufregion);
    if (err_is_fail(err)) {
        return err;
    }
    ahci_dma_region_copy_in(bufregion, buffer, 0, aligned_bytes);

    // setup FIS
    struct sata_fis_reg_h2d write_fis;
    memset(&write_fis, 0, sizeof(struct sata_fis_reg_h2d));
    write_fis.type = SATA_FIS_TYPE_H2D;
    write_fis.device = 1 << 6; // LBA mode, not CHS; ???
    sata_set_command(&write_fis, 0xCA); // write dma; ATA Command Set, 7.60
    sata_set_count(&write_fis, aligned_bytes / PR_SIZE); // nr. of sectors/blocks
    sata_set_lba28(&write_fis, pos / PR_SIZE);

    // set handle to waiting
    h->waiting = true;
    h->wait_status = SYS_ERR_OK;
    h->binding->rx_vtbl.command_completed = rx_write_command_completed_cb;

    // load fis and fire commands
    err = ahci_issue_command(h->binding, NOP_CONT, 0,
            (uint8_t*)&write_fis, sizeof(write_fis), true, bufregion, aligned_bytes);
    if (err_is_fail(err)) {
        h->waiting = false;
        ahci_dma_region_free(bufregion);
        return err;
    }

    // XXX: block for command completion (broken API!)
    while (h->waiting) {
        messages_wait_and_handle_next();
    }

    // cleanup and output
    h->binding->rx_vtbl.command_completed = NULL;
    ahci_dma_region_free(bufregion);
    if (err_is_ok(h->wait_status)) {
        *bytes_written = aligned_bytes;
    }
    return h->wait_status;
}
Exemplo n.º 12
0
static int demo(int argc, char *argv[])
{
    int core;
    int pixwidth = PIXEL_WIDTH;
    int frames = FRAMES;

    if (!pixels_inited) pixels_init();
    
    if (argc == 3) {
        pixwidth = atoi(argv[1]);
        frames = atoi(argv[2]);
    }
    int width = 8 * strlen(scroller);
    
    for (int x = 0; x < width - RENDER_WIDTH; x++) {

        // Repeat each frame a few times to slow down scrolling!
        for (int f = 0; f < frames; f++) {
        trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_BENCH_PCBENCH, 1);
        for(int i = 0; i < RENDER_WIDTH; i++) {
            
            int xpos = (x + i)%width;
            char ascii = scroller[xpos >> 3];
            char c64char = c64map(ascii);
            int xsub = xpos & 7;

            acks = 0;
            for (core = 0 ;core < 8; core++) {
                unsigned char bits = font[c64char*8 + (7-core)];
                
                if (bits & (1<<(7-xsub)) ) {

                    my_pixels_bindings[core+2].tx_vtbl.display(&my_pixels_bindings[core+2], NOP_CONT, pixwidth);
                    acks++;
                }
            }
            
            uint64_t now = rdtsc();
            
            while (acks) {             
                messages_wait_and_handle_next();
            }
            while (rdtsc() - now < pixwidth) ;
        }

        trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_BENCH_PCBENCH, 0);
        }
    }
    return 0;
}
Exemplo n.º 13
0
errval_t blockdevfs_ahci_close(void *handle)
{
    struct ahci_handle *h = handle;
    h->waiting = true;
    errval_t err = ahci_close(h->binding, MKCLOSURE(ahci_close_cb, h));
    if (err_is_fail(err)) {
        printf("ahci_init failed: '%s'\n", err_getstring(err));
        h->waiting = false;
        return err;
    }
    while (h->waiting) {
        messages_wait_and_handle_next();
    }
    return SYS_ERR_OK;
}
Exemplo n.º 14
0
errval_t dma_service_init(struct xeon_phi *phi)
{
    errval_t err;

    XDMA_DEBUG("Initializing DMA service\n");

    struct waitset *ws = get_default_waitset();

    err = xeon_phi_dma_export(phi,
                              svc_export_cb,
                              svc_connect_cb,
                              ws,
                              IDC_EXPORT_FLAGS_DEFAULT);
    if (err_is_fail(err)) {
        return err;
    }

    XDMAV_DEBUG("Waiting for export...\n");
    while (svc_state == XPM_SVC_STATE_EXPORTING) {
        messages_wait_and_handle_next();
    }

    if (svc_state == XPM_SVC_STATE_EXPORT_FAIL) {
        return FLOUNDER_ERR_BIND;
    }

    svc_state = XPM_SVC_STATE_NS_REGISTERING;

    char buf[50];
#ifdef __k1om__
    snprintf(buf, 50, "%s.%u", XEON_PHI_DMA_SERVICE_NAME, 0);
#else
    snprintf(buf, 50, "%s.%u", XEON_PHI_DMA_SERVICE_NAME, phi->id);
#endif

    XDMA_DEBUG("Registering iref [%u] with name [%s]\n", dma_iref, buf);
    err = nameservice_register(buf, dma_iref);
    if (err_is_fail(err)) {
        svc_state = XPM_SVC_STATE_NS_REGISTER_FAIL;
        return err;
    }

    svc_state = XPM_SVC_STATE_RUNNING;

    return SYS_ERR_OK;

}
Exemplo n.º 15
0
static void pixels_init(void)
{
    // ensure pixels is up
    if (!pixels_started) {
        printf("Starting pixels...\n");
        spawnpixels(0, NULL);
    }

    pixels_connected = 0;

    for (int core = 0; core < NUM_PIXELS; core ++) {
        char name[16];
        iref_t serv_iref;
        errval_t err;
        
        sprintf(name, "pixels.%d", core);
        
        /* Connect to the server */
        err = nameservice_blocking_lookup(name, &serv_iref);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "failed to lookup server");
            exit(EXIT_FAILURE);
        }

	if (serv_iref == 0) {
	    DEBUG_ERR(err, "failed to get a valid iref back from lookup");
	    exit(EXIT_FAILURE);
	}
  
	err = pixels_bind(serv_iref, 
			  my_pixels_bind_cb, 
			  &my_pixels_bindings[core],
			  get_default_waitset(),
			  IDC_BIND_FLAGS_DEFAULT);
	if (err_is_fail(err)) {
	    DEBUG_ERR(err, "bind request to pixels server failed immediately");
	    exit(EXIT_FAILURE);
	}
    }

    while (pixels_connected < NUM_PIXELS) 
	messages_wait_and_handle_next();
    
    printf("connected to pixels server\n");
    pixels_inited = true;
}
Exemplo n.º 16
0
/**
 * Connects to the driver domain manager.
 *
 * \param  connect_to iref where to connect.
 * \retval SYS_ERR_OK Connected to the driver manager.
 */
errval_t ddomain_communication_init(iref_t connect_to, uint64_t ident)
{
    rpc_bind.err = SYS_ERR_OK;
    rpc_bind.is_done = false;

    errval_t err = ddomain_bind(connect_to, rpc_bind_cb, NULL, get_default_waitset(), IDC_BIND_FLAGS_DEFAULT);
    if (err_is_fail(err)) {
        return err;
    }
    DRIVERKIT_DEBUG("%s:%s:%d: Trying to connect to kaluga...\n", __FILE__, __FUNCTION__, __LINE__);
    // XXX: broken
    while (!rpc_bind.is_done) {
        messages_wait_and_handle_next();
    }

    DRIVERKIT_DEBUG("%s:%s:%d: Send identify %"PRIu64"\n", __FILE__, __FUNCTION__, __LINE__, ident);
    errval_t send_err = rpc_bind.binding->tx_vtbl.identify(rpc_bind.binding, NOP_CONT, ident);
    assert(err_is_ok(send_err));

    return rpc_bind.err;
}
Exemplo n.º 17
0
errval_t blockdevfs_ahci_flush(void *handle)
{
    errval_t err = SYS_ERR_OK;
    struct ahci_handle *h = handle;
    h->waiting = true;
    // setup FIS
    struct sata_fis_reg_h2d fis;
    memset(&fis, 0, sizeof(struct sata_fis_reg_h2d));
    fis.type = SATA_FIS_TYPE_H2D;
    fis.device = 1 << 6; /* LBA mode, not CHS; ??? */
    sata_set_command(&fis, 0xE7); /* flush cache; ATA Command Set, 7.24 */
    sata_set_count(&fis, 0); /* nr. of sectors/blocks */
    sata_set_lba28(&fis, 0);

    // set handle to waiting
    h->waiting = true;
    h->wait_status = SYS_ERR_OK;
    h->bytes_transferred = 0;
    h->binding->rx_vtbl.command_completed = rx_flush_command_completed_cb;

    // load fis and fire commands
    err = ahci_issue_command(h->binding, NOP_CONT, 0,
            (uint8_t*)&fis, sizeof(fis), false, NULL, 0);
    if (err_is_fail(err)) {
        printf("bdfs_ahci: read load_fis failed: 0x%" PRIxPTR "\n", err);
        h->waiting = false;
        goto cleanup;
    }

    while (h->waiting) {
        messages_wait_and_handle_next();
    }

cleanup:

    h->binding->rx_vtbl.command_completed = NULL;

    return err;
}
/**
 * \brief binds to the Xeon Phi Manager service
 *
 * \returns SYS_ERR_OK on success
 *          FLOUNDER_ERR_* on failure
 */
static errval_t xpm_bind(void)
{
    errval_t err;

    if (xpm_binding != NULL) {
        return SYS_ERR_OK;
    }

    assert(conn_state == XPM_STATE_INVALID);

    conn_state = XPM_STATE_NSLOOKUP;

    DEBUG_XPMC("nameservice lookup: "XEON_PHI_MANAGER_SERVICE_NAME"\n");

    err = nameservice_blocking_lookup(XEON_PHI_MANAGER_SERVICE_NAME, &xpm_iref);
    if (err_is_fail(err)) {
        return err;
    }

    conn_state = XPM_STATE_BINDING;

    DEBUG_XPMC("binding: "XEON_PHI_MANAGER_SERVICE_NAME" @ iref:%u\n", xpm_iref);

    err = xeon_phi_manager_bind(xpm_iref, xpm_bind_cb, NULL, get_default_waitset(),
    IDC_BIND_FLAGS_DEFAULT);
    if (err_is_fail(err)) {
        return err;
    }

    while (conn_state == XPM_STATE_BINDING) {
        messages_wait_and_handle_next();
    }

    if (conn_state == XPM_STATE_BIND_FAIL) {
        return FLOUNDER_ERR_BIND;
    }

    return SYS_ERR_OK;
}
Exemplo n.º 19
0
errval_t blockdevfs_ahci_open(void *handle)
{
    VFS_BLK_DEBUG("blockdevfs_ahci_open: entering\n");
    errval_t err;
    struct ahci_handle *h = handle;

    h->wait_status = SYS_ERR_OK;
    h->waiting = true;

    err = ahci_init(h->port_num, ahci_init_cb, h, get_default_waitset());
    if (err_is_fail(err)) {
        printf("ahci_init failed: '%s'\n", err_getstring(err));
        h->waiting = false;
        return err;
    }

    // XXX: block for command completion (broken API!)
    while (h->waiting) {
        messages_wait_and_handle_next();
    }

    VFS_BLK_DEBUG("blockdevfs_ahci_open: exiting\n");
    return h->wait_status;
}
Exemplo n.º 20
0
static void dma_exec_call_rx(struct xeon_phi_dma_binding *_binding,
                             uint64_t src,
                             uint64_t dst,
                             uint64_t length)
{
    XDMAV_DEBUG("memcopy request [0x%016lx]->[0x%016lx] of size 0x%lx\n",
                           src, dst, length);

    struct dma_exec_resp_st st;

    st.b = _binding;
    st.sent = 0x0;
    lpaddr_t dma_src = xdma_mem_verify(_binding, src, length);
    lpaddr_t dma_dst = xdma_mem_verify(_binding, dst, length);
    if (!dma_src || !dma_dst) {
        st.err = XEON_PHI_ERR_DMA_MEM_REGISTERED;
        st.id = 0;
#ifdef XDEBUG_DMA
        if (!dma_src) {
            XDMA_DEBUG("Memory range not registered: [0x%016lx] [0x%016lx]\n",
                       src, src+length);
        }
        if (!dma_dst) {
            XDMA_DEBUG("Memory range not registered: [0x%016lx] [0x%016lx]\n",
                       dst, dst+length);
        }
#endif

        dma_exec_response_tx(&st);
        return;
    }

    /*
     * DMA transfers from host to host are not supported.
     */
    if (dma_src > XEON_PHI_SYSMEM_BASE && dma_dst > XEON_PHI_SYSMEM_BASE) {
        st.err = XEON_PHI_ERR_DMA_NOT_SUPPORTED;
        st.id = 0;
        dma_exec_response_tx(&st);
        return;
    }

    struct dma_req_setup setup = {
        .type = XDMA_REQ_TYPE_MEMCPY,
        .st = _binding,
        .cb = dma_service_send_done,
    };
    setup.info.mem.src = dma_src;
    setup.info.mem.dst = dma_dst;
    setup.info.mem.bytes = length;
    setup.info.mem.dma_id = &st.id;

    struct xeon_phi *phi = xdma_mem_get_phi(_binding);

    st.err = dma_do_request(phi, &setup);

    dma_exec_response_tx(&st);

    /*
     * XXX: we must wait until the message has been sent, otherwise we may
     *      trigger sending the done message when we poll in the main message
     *      loop. This causes the client library to receive a done message
     *      of an invalid id.
     */
    volatile uint8_t *sent_flag = &st.sent;
    while(!(*sent_flag)) {
        messages_wait_and_handle_next();
    }
}
Exemplo n.º 21
0
int main (int argc, char* argv[]) 
{    
    errval_t err;
    my_coreid = disp_get_core_id();

    exported  = false;
    connected = false;
    start_sending_caps  = false;
    start_retyping_caps = false;

    // munge up a bunch of caps
    create_caps();

    assert (argc >= 2);
    if (!strncmp(argv[1], "client", sizeof("client"))) {
        is_bsp = false;
        assert (argc >= 3);
        num_cores = atoi(argv[2]);
        ram_hack();
    } else {
        is_bsp = true;
        num_cores = atoi(argv[1]);
    }

    // export our binding
    exported = false;
    err = xcorecapbench_export(NULL, export_cb, connect_cb, 
                               get_default_waitset(), IDC_EXPORT_FLAGS_DEFAULT);
    while(!exported) {
        messages_wait_and_handle_next();
    }

    if (is_bsp) {
        wait_for = num_cores;
        // spawn other cores
        printf("Core %d starting xcorecapbench on %i cores\n", my_coreid, 
               num_cores);
        assert(disp_get_core_id() == 0);
        spawn_other_cores(argc, argv);
    } else {
        printf("Starting xcorecapbench on core %i \n", my_coreid);
    }

    // connect to other cores
    connected = false;
    for(int i=0; i<num_cores; i++) {
        if (i != my_coreid) {
            bind_core(i);
        }
    }
    while(!connected) {
        messages_wait_and_handle_next();
    }

    if (is_bsp) {
        wait_for--;
        // wait for cores to connect
        while(wait_for) {
            messages_wait_and_handle_next();
        }
    } else {
        err = bindings[0]->tx_vtbl.barrier_done(bindings[0], NOP_CONT, 0);
        assert(err_is_ok(err));
    }

    if (is_bsp) {
        do_experiment_bsp();
    } else {
        do_experiment_client();
    }

    messages_handler_loop();

    return 0;
}
Exemplo n.º 22
0
/**
 * \brief initializes the XOMP worker library
 *
 * \param wid   Xomp worker id
 *
 * \returns SYS_ERR_OK on success
 *          errval on failure
 */
errval_t xomp_worker_init(xomp_wid_t wid)
{
    errval_t err;

    worker_id = wid;

    XWI_DEBUG("initializing worker {%016lx} iref:%u\n", worker_id, svc_iref);

#if XOMP_BENCH_WORKER_EN
    bench_init();
#endif

    struct capref frame = {
        .cnode = cnode_root,
        .slot = ROOTCN_SLOT_ARGCN
    };

    struct frame_identity id;
    err = invoke_frame_identify(frame, &id);
    if (err_is_fail(err)) {
        return err_push(err, XOMP_ERR_INVALID_MSG_FRAME);
    }

    size_t frame_size = 0;

    if (svc_iref) {
        frame_size = XOMP_TLS_SIZE;
    } else {
        frame_size = XOMP_FRAME_SIZE;
        err = spawn_symval_cache_init(0);
        if (err_is_fail(err)) {
            return err;
        }
    }

    if ((1UL << id.bits) < XOMP_TLS_SIZE) {
        return XOMP_ERR_INVALID_MSG_FRAME;
    }

    msgframe = frame;

    err = vspace_map_one_frame(&msgbuf, frame_size, frame, NULL, NULL);
    if (err_is_fail(err)) {
        err_push(err, XOMP_ERR_WORKER_INIT_FAILED);
    }
    if (svc_iref) {
        tls = msgbuf;
    } else {
        tls = ((uint8_t *) msgbuf) + XOMP_MSG_FRAME_SIZE;
    }

    XWI_DEBUG("messaging frame mapped: [%016lx] @ [%016lx]\n", id.base,
              (lvaddr_t )msgbuf);

    struct bomp_thread_local_data *tlsinfo = malloc(sizeof(*tlsinfo));
    tlsinfo->thr = thread_self();
    tlsinfo->work = (struct bomp_work *) tls;
    tlsinfo->work->data = tlsinfo->work + 1;
    g_bomp_state->backend.set_tls(tlsinfo);

#ifdef __k1om__
    if (worker_id & XOMP_WID_GATEWAY_FLAG) {
        err = xomp_gateway_init();
    } else {
        if (!svc_iref) {
            err = xomp_gateway_bind_svc();
        } else {
            err = SYS_ERR_OK;
        }
    }
    if (err_is_fail(err)) {
        return err;
    }
#endif

#ifdef __k1om__
    if (!svc_iref) {
        err = xeon_phi_client_init(disp_xeon_phi_id());
        if (err_is_fail(err)) {
            err_push(err, XOMP_ERR_WORKER_INIT_FAILED);
        }

        xeon_phi_client_set_callbacks(&callbacks);
    }
#endif

    struct waitset *ws = get_default_waitset();

// XXX: disabling DMA on the host as there is no replication used at this moment
#if XOMP_WORKER_ENABLE_DMA && defined(__k1om__)
    /* XXX: use lib numa */

#ifndef __k1om__
    uint8_t numanode = 0;
    if (disp_get_core_id() > 20) {
        numanode = 1;
    }

    err = dma_manager_wait_for_driver(dma_device_type, numanode);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "could not wait for the DMA driver");
    }
#endif
    char svc_name[30];
#ifdef __k1om__
    snprintf(svc_name, 30, "%s", XEON_PHI_DMA_SERVICE_NAME);
#else
    snprintf(svc_name, 30, "%s.%u", IOAT_DMA_SERVICE_NAME, numanode);
#endif

    struct dma_client_info dma_info = {
        .type = DMA_CLIENT_INFO_TYPE_NAME,
        .device_type = dma_device_type,
        .args.name = svc_name
    };
    err = dma_client_device_init(&dma_info, &dma_dev);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "DMA device initialization");
    }
#endif

    if (svc_iref) {
        err = xomp_bind(svc_iref, master_bind_cb, NULL, ws,
                        IDC_EXPORT_FLAGS_DEFAULT);
    } else {
        struct xomp_frameinfo fi = {
            .sendbase = id.base,
            .inbuf = ((uint8_t *) msgbuf) + XOMP_MSG_CHAN_SIZE,
            .inbufsize = XOMP_MSG_CHAN_SIZE,
            .outbuf = ((uint8_t *) msgbuf),
            .outbufsize = XOMP_MSG_CHAN_SIZE
        };
        err = xomp_connect(&fi, master_bind_cb, NULL, ws,
        IDC_EXPORT_FLAGS_DEFAULT);
    }

    if (err_is_fail(err)) {
        /* TODO: Clean up */
        return err_push(err, XOMP_ERR_WORKER_INIT_FAILED);
    }

    XWI_DEBUG("Waiting until bound to master...\n");

    while (!is_bound) {
        messages_wait_and_handle_next();
    }

    if (xbinding == NULL) {
        return XOMP_ERR_WORKER_INIT_FAILED;
    }

    return SYS_ERR_OK;
}
Exemplo n.º 23
0
errval_t send_message(char *msg, size_t size, coreid_t dest)
{
    assert(barray[dest] != NULL);
    struct rcce_state *st = barray[dest]->st;
    errval_t err;

#ifdef RCCE_PERF_MEASURE
    dispatcher_handle_t handle = curdispatcher();
    struct dispatcher_shared_generic* d =
        get_dispatcher_shared_generic(handle);
#endif

    dprintf("%d: S(%lu,%d,%p,%d)\n", my_core_id, size, dest, st, st->waitmsg);

#ifdef BULK_TRANSFER_ENABLED
    // XXX: Assert we can always send a big buffer as bulk data for performance
    // reasons
    if(size > BLOCK_SIZE) {
        /* printf("size = %lu, BLOCK_SIZE = %u\n", size, BLOCK_SIZE); */
    }
    //    assert(size <= BLOCK_SIZE);
#endif

    PERF(0);

    // Wait til previous message has been processed by receiver
#ifdef BULK_TRANSFER_ENABLED
    while(st->waitmsg || st->bulk_waitmsg || !st->recv_ready) {
#else
    while(st->waitmsg) {
#endif
        dprintf("waiting\n");
        messages_wait_and_handle_next();
    }
    st->recv_ready = false;

    PERF(1);

#ifndef BULK_TRANSFER_ENABLED
    st->waitmsg = true;
    // Send via UMP
    st->request_done = false;
    PERF(2);
    err = barray[dest]->
        tx_vtbl.message_request(barray[dest], MKCONT(message_request_cont,st),
                                my_core_id, (uint8_t *)msg, size);
    assert(err_is_ok(err));
    PERF(16);
    while(!st->request_done) {
        /* printf("%d: handling\n", my_core_id); */
        messages_wait_and_handle_next();
    }
    PERF(17);
#else
    /* printf("recv ready, sending %d\n", msg[0]); */
    // Send via bulk transfer
    for(size_t i = 0; i < size; i += BLOCK_SIZE) {
        struct bulk_buf *bb = bulk_alloc(&st->bt);
        assert(bb != NULL);
        void *buf = bulk_buf_get_mem(bb);
        size_t sendsize = i + BLOCK_SIZE < size ? BLOCK_SIZE : size - i;
        bool last_fragment = i + BLOCK_SIZE < size ? false : true;

        memcpy(buf, msg + i, sendsize);
        char *bf = buf;
        /* printf("send to %p (%d), msg = %p, i = %lu, sendsize = %lu\n", buf, bf[0], msg, i, sendsize); */
        uintptr_t id = bulk_prepare_send(bb);
        st->bulk_waitmsg = true;
        err = barray[dest]->tx_vtbl.
            bulk_message_request(barray[dest], NOP_CONT, my_core_id, id,
                                 size, last_fragment);
        assert(err_is_ok(err));
        while(st->bulk_waitmsg) {
            dprintf("waiting for bulk reply\n");
            messages_wait_and_handle_next();
        }
    }
#endif

    return SYS_ERR_OK;
}

static void wait(void)
{
    while (!round) {
        messages_wait_and_handle_next();
    }
    round = false;
}

void barrier_wait(void)
{
    switch(my_role) {
    case MSG_WAIT_MSG_WAIT:
        message();
        wait();
        message();
        wait();
        break;

    case WAIT_MSG_WAIT_MSG:
        wait();
        message();
        wait();
        message();
        break;

    default:
        assert(!"should not get here");
    }
}

void barrier_binding_init(struct rcce_binding *binding)
{
    binding->rx_vtbl.ring_request = ring_request;
    binding->rx_vtbl.ring_reply   = ring_reply;
    binding->rx_vtbl.message_request = message_request;
    binding->rx_vtbl.message_reply = message_reply;
#ifdef BULK_TRANSFER_ENABLED
    binding->rx_vtbl.bulk_message_request = bulk_message_request;
    binding->rx_vtbl.bulk_message_reply = bulk_message_reply;
    binding->rx_vtbl.bulk_recv_ready = bulk_recv_ready;
#endif
}
Exemplo n.º 24
0
errval_t blockdevfs_ahci_read(void *handle, size_t pos, void *buffer, size_t
        bytes, size_t *bytes_read)
{
    errval_t err;
    struct ahci_handle *h = handle;
    size_t aligned_bytes = bytes / PR_SIZE * PR_SIZE;

    VFS_BLK_DEBUG("bdfs_ahci: read begin: %zu -> %zu\n", bytes, aligned_bytes);

    // setup DMA regions for receiving data
    struct ahci_dma_region *bufregion = NULL;
    err = ahci_dma_region_alloc(aligned_bytes, &bufregion);
    if (err_is_fail(err)) {
        printf("bdfs_ahci: read alloc_region failed: 0x%" PRIxPTR  "\n", err);
        return err;
    }
    VFS_BLK_DEBUG("bdfs_ahci_read: bufregion = %p\n", bufregion);
    VFS_BLK_DEBUG("bdfs_ahci_read: bufregion->vaddr = %p\n", bufregion->vaddr);

    // setup FIS
    struct sata_fis_reg_h2d read_fis;
    memset(&read_fis, 0, sizeof(struct sata_fis_reg_h2d));
    read_fis.type = SATA_FIS_TYPE_H2D;
    read_fis.device = 1 << 6; // LBA mode, not CHS; ???
    sata_set_command(&read_fis, 0xC8); // read dma; ATA Command Set, 7.24
    sata_set_count(&read_fis, aligned_bytes / PR_SIZE); // nr. of sectors/blocks
    sata_set_lba28(&read_fis, pos / PR_SIZE);

    // set handle to waiting
    h->waiting = true;
    h->wait_status = SYS_ERR_OK;
    h->bytes_transferred = 0;
    h->binding->rx_vtbl.command_completed = rx_read_command_completed_cb;

    // load fis and fire commands
    err = ahci_issue_command(h->binding, NOP_CONT, 0,
            (uint8_t*)&read_fis, sizeof(read_fis), false, bufregion, aligned_bytes);
    if (err_is_fail(err)) {
        printf("bdfs_ahci: read load_fis failed: 0x%" PRIxPTR  "\n", err);
        h->waiting = false;
        goto cleanup;
    }

    // XXX: block for command completion (broken API!)
    while (h->waiting) {
        messages_wait_and_handle_next();
    }

    VFS_BLK_DEBUG("bdfs_ahci: read wait status: %lu\n", h->wait_status);

    err = h->wait_status;

    // cleanup and output
    if (err_is_ok(err)) {
        ahci_dma_region_copy_out(bufregion, buffer, 0, aligned_bytes);
        *bytes_read = aligned_bytes;
    }

cleanup:

    h->binding->rx_vtbl.command_completed = NULL;

    VFS_BLK_DEBUG("read: freeing bufregion (%p)\n", bufregion);
    ahci_dma_region_free(bufregion);

    return err;

}
Exemplo n.º 25
0
void wait_for_connection(void)
{
    while (!connected) {
        messages_wait_and_handle_next();
    }
}
Exemplo n.º 26
0
static inline void
wait_for_condition (void) {
    while (wait_cond) {
        messages_wait_and_handle_next();
    }
}
Exemplo n.º 27
0
int main(int argc, char *argv[])
{
    oct_init();
    errval_t err = SYS_ERR_OK;
    octopus_trigger_id_t tid;
    size_t received = 0;

    err = oct_set("obj1 { attr: 1 }");
    ASSERT_ERR_OK(err);
    err = oct_set("obj2 { attr: 2 }");
    ASSERT_ERR_OK(err);
    err = oct_set("obj3 { attr: 3 }");
    ASSERT_ERR_OK(err);

    struct octopus_thc_client_binding_t* c = oct_get_thc_client();

    octopus_trigger_t record_deleted = oct_mktrigger(SYS_ERR_OK,
            octopus_BINDING_EVENT, OCT_ON_DEL, trigger_handler, &received);

    errval_t error_code = SYS_ERR_OK;
    char* output = NULL;
    err = c->call_seq.get(c, "r'^obj.$' { attr: 3 } ", record_deleted, &output,
            &tid, &error_code);
    ASSERT_ERR_OK(err);
    ASSERT_ERR_OK(error_code);
    ASSERT_STRING(output, "obj3 { attr: 3 }");
    debug_printf("tid is: %lu\n", tid);
    free(output);

    oct_del("obj3");
    while (received != 1) {
        messages_wait_and_handle_next();
    }

    received = 0;
    tid = 0;
    octopus_mode_t m = OCT_ON_SET | OCT_ON_DEL | OCT_PERSIST;
    octopus_trigger_t ptrigger = oct_mktrigger(SYS_ERR_OK,
            octopus_BINDING_EVENT, m, persistent_trigger, &received);
    output = NULL;
    err = c->call_seq.get(c, "obj2", ptrigger, &output,
            &tid, &error_code);
    ASSERT_ERR_OK(err);
    ASSERT_ERR_OK(error_code);
    debug_printf("tid is: %lu\n", tid);
    ASSERT_STRING(output, "obj2 { attr: 2 }");

    oct_del("obj2");
    while (received != 1) {
        messages_wait_and_handle_next();
    }

    received = 0;
    oct_set("obj2 { attr: 'asdf' }");
    while (received != 1) {
        messages_wait_and_handle_next();
    }

    received = 0;
    err = oct_remove_trigger(tid);
    DEBUG_ERR(err, "remove trigger");
    ASSERT_ERR_OK(err);
    while (received != 1) {
        messages_wait_and_handle_next();
    }

    printf("d2trigger SUCCESS!\n");
    return EXIT_SUCCESS;
}
Exemplo n.º 28
0
int RCCE_APP(int argc, char **argv){
  int YOU, ME, round;
  uint64_t timer = 0, sum = 0;

  int core1 = atoi(argv[3]);
  int core2 = atoi(argv[4]);

  RCCE_init(&argc, &argv);

  RCCE_debug_set(RCCE_DEBUG_ALL);
  ME = RCCE_ue();
  printf("Core %d passed RCCE_init\n", ME);
  if (RCCE_num_ues() != 2) {
    if (!ME) printf("Ping pong needs exactly two UEs; try again\n");
    return(1);
  }
  YOU = !ME;

  // synchronize before starting the timer
  RCCE_barrier(&RCCE_COMM_WORLD);

  struct rcce_ump_ipi_binding *ob;

  if(disp_get_core_id() == core1) {
      ob = (struct rcce_ump_ipi_binding *)barray[core2];
  } else {
      ob = (struct rcce_ump_ipi_binding *)barray[core1];
  }
  errval_t err = lmp_endpoint_deregister(ob->ipi_notify.iep);

  struct event_closure cl = {
      .handler = rck_ping_handler,
      .arg = NULL
  };

  for(;;) {
  for (round=0; round <MAXROUND; round++) {
    if (ME)  {
        ipi_notify_raise(&ob->ipi_notify);
      /* RCCE_send(buffer, BUFSIZE, YOU); */

        err = ipi_notify_register(&ob->ipi_notify, get_default_waitset(), cl);
        assert(err_is_ok(err));
        req_done = false;
        while(!req_done) {
            messages_wait_and_handle_next();
        }
      /* RCCE_recv(buffer, BUFSIZE, YOU); */

    } else {
        timer = rdtsc();

        err = ipi_notify_register(&ob->ipi_notify, get_default_waitset(), cl);
        assert(err_is_ok(err));
        req_done = false;
        while(!req_done) {
            messages_wait_and_handle_next();
        }
      /* RCCE_recv(buffer, BUFSIZE, YOU); */

        ipi_notify_raise(&ob->ipi_notify);
      /* RCCE_send(buffer, BUFSIZE, YOU); */

      sum += rdtsc() - timer;

        if(round % ROUNDS_PER_SLICE == 0) {
            yield_timeslices = 10;
            thread_yield();
            yield_timeslices = 0;
        }
    }
  }
  if (!ME) printf("RTL = %"PRIu64"\n", sum/MAXROUND);
  sum = 0;
  }

  return(0);
}
Exemplo n.º 29
0
Arquivo: main.c Projeto: 8l/barrelfish
/**
 * \brief Initialize monitor running on bsp core
 */
static errval_t boot_bsp_core(int argc, char *argv[])
{
    errval_t err;

    // First argument contains the bootinfo location
    bi = (struct bootinfo*)strtol(argv[1], NULL, 10);

    bsp_monitor = true;

    err = monitor_client_setup_mem_serv();
    assert(err_is_ok(err));

    /* Wait for mem_serv to advertise its iref to us */
    while (mem_serv_iref == 0) {
        messages_wait_and_handle_next();
    }
    update_ram_alloc_binding = false;

    /* Can now connect to and use mem_serv */
    err = ram_alloc_set(NULL);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_RAM_ALLOC_SET);
    }

    // Export ram_alloc service
    err = mon_ram_alloc_serve();
    assert(err_is_ok(err));

    /* Set up monitor rpc channel */
    err = monitor_rpc_init();
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "monitor rpc init failed");
        return err;
    }

    /* SKB needs vfs for ECLiPSe so we need to start ramfsd first... */
    err = spawn_domain("ramfsd");
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "failed spawning ramfsd");
        return err;
    }
    // XXX: Wait for ramfsd to initialize
    while (ramfs_serv_iref == 0) {
        messages_wait_and_handle_next();
    }

    /* Spawn skb (new nameserver) before other domains */
    err = spawn_domain("skb");
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "failed spawning skb");
        return err;
    }
    // XXX: Wait for name_server to initialize
    while (name_serv_iref == 0) {
        messages_wait_and_handle_next();
    }
#ifdef __k1om__
    char args[40];
    snprintf(args, sizeof(args), "0x%016lx 0x%02x", bi->host_msg,
             bi->host_msg_bits);
    char *mgr_argv[MAX_CMDLINE_ARGS + 1];
    spawn_tokenize_cmdargs(args, mgr_argv, ARRAY_LENGTH(mgr_argv));
    err = spawn_domain_with_args("xeon_phi", mgr_argv,environ);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "failed spawning xeon_phi");
        return err;
    }
#endif

    /* Spawn boot domains in menu.lst */
    err = spawn_all_domains();
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "spawn_all_domains failed");
        return err;
    }

    return SYS_ERR_OK;
}
Exemplo n.º 30
0
/**
 * \brief Since we cannot dynamically grow our stack yet, we need a
 * verion that will create threads on remote core with variable stack size
 *
 * \bug this is a hack
 */
static errval_t domain_new_dispatcher_varstack(coreid_t core_id,
                                               domain_spanned_callback_t callback,
                                               void *callback_arg, size_t stack_size)
{
    assert(core_id != disp_get_core_id());

    errval_t err;
    struct domain_state *domain_state = get_domain_state();
    struct monitor_binding *mb = get_monitor_binding();
    assert(domain_state != NULL);

    /* Set reply handler */
    mb->rx_vtbl.span_domain_reply = span_domain_reply;

    while(domain_state->iref == 0) { /* If not initialized, wait */
        messages_wait_and_handle_next();
    }

    /* Create the remote_core_state passed to the new dispatcher */
    struct remote_core_state *remote_core_state =
        calloc(1, sizeof(struct remote_core_state));
    if (!remote_core_state) {
        return LIB_ERR_MALLOC_FAIL;
    }
    remote_core_state->core_id = disp_get_core_id();
    remote_core_state->iref    = domain_state->iref;

    /* get the alignment of the morecore state */
    struct morecore_state *state = get_morecore_state();
    remote_core_state->pagesize = state->mmu_state.alignment;

    /* Create the thread for the new dispatcher to init on */
    struct thread *newthread =
        thread_create_unrunnable(remote_core_init_enabled,
                                 (void*)remote_core_state, stack_size);
    if (newthread == NULL) {
        return LIB_ERR_THREAD_CREATE;
    }

    /* Save the state for later steps of the spanning state machine */
    struct span_domain_state *span_domain_state =
        malloc(sizeof(struct span_domain_state));
    if (!span_domain_state) {
        return LIB_ERR_MALLOC_FAIL;
    }
    span_domain_state->thread       = newthread;
    span_domain_state->core_id      = core_id;
    span_domain_state->callback     = callback;
    span_domain_state->callback_arg = callback_arg;

    /* Give remote_core_state pointer to span_domain_state */
    remote_core_state->span_domain_state = span_domain_state;

    /* Start spanning domain state machine by sending vroot to the monitor */
    struct capref vroot = {
        .cnode = cnode_page,
        .slot = 0
    };

    /* Create new dispatcher frame */
    struct capref frame;
    size_t dispsize = ((size_t)1) << DISPATCHER_FRAME_BITS;
    err = frame_alloc(&frame, dispsize, &dispsize);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_FRAME_ALLOC);
    }
    lvaddr_t dispaddr;

    err = vspace_map_one_frame((void **)&dispaddr, dispsize, frame, NULL, NULL);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_VSPACE_MAP);
    }

    dispatcher_handle_t handle = dispaddr;
    struct dispatcher_shared_generic *disp =
        get_dispatcher_shared_generic(handle);
    struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
    arch_registers_state_t *disabled_area =
        dispatcher_get_disabled_save_area(handle);

    /* Set dispatcher on the newthread */
    span_domain_state->thread->disp = handle;
    span_domain_state->frame = frame;
    span_domain_state->vroot = vroot;

    /* Setup dispatcher */
    disp->udisp = (lvaddr_t)handle;
    disp->disabled = true;
    disp->fpu_trap = 1;
    disp_gen->core_id = span_domain_state->core_id;
    // Setup the dispatcher to run remote_core_init_disabled
    // and pass the created thread as an argument
    registers_set_initial(disabled_area, span_domain_state->thread,
                          (lvaddr_t)remote_core_init_disabled,
                          (lvaddr_t)&disp_gen->stack[DISPATCHER_STACK_WORDS],
                          (uintptr_t)span_domain_state->thread, 0, 0, 0);
    // Give dispatcher a unique name for debugging
    snprintf(disp->name, DISP_NAME_LEN, "%s%d", disp_name(),
             span_domain_state->core_id);

#ifdef __x86_64__
    // XXX: share LDT state between all dispatchers
    // this needs to happen before the remote core starts, otherwise the segment
    // selectors in the new thread state are invalid
    struct dispatcher_shared_x86_64 *disp_x64
        = get_dispatcher_shared_x86_64(handle);
    struct dispatcher_shared_x86_64 *mydisp_x64
        = get_dispatcher_shared_x86_64(curdispatcher());

    disp_x64->ldt_base = mydisp_x64->ldt_base;
    disp_x64->ldt_npages = mydisp_x64->ldt_npages;
#endif

    threads_prepare_to_span(handle);

    // Setup new local thread for inter-dispatcher messages, if not already done
    static struct thread *interdisp_thread = NULL;
    if(interdisp_thread == NULL) {
        interdisp_thread = thread_create(interdisp_msg_handler,
                                         &domain_state->interdisp_ws);
        err = thread_detach(interdisp_thread);
        assert(err_is_ok(err));
    }

#if 0
    // XXX: Tell currently active interdisp-threads to handle default waitset
    for(int i = 0; i < MAX_CPUS; i++) {
        struct interdisp_binding *b = domain_state->b[i];

        if(disp_get_core_id() != i && b != NULL) {
            err = b->tx_vtbl.span_slave(b, NOP_CONT);
            assert(err_is_ok(err));
        }
    }
#endif

    #if 0
    /* XXX: create a thread that will handle the default waitset */
    if (domain_state->default_waitset_handler == NULL) {
        domain_state->default_waitset_handler
            = thread_create(span_slave_thread, NULL);
        assert(domain_state->default_waitset_handler != NULL);
    }
#endif
    /* Wait to use the monitor binding */
    struct monitor_binding *mcb = get_monitor_binding();
    event_mutex_enqueue_lock(&mcb->mutex, &span_domain_state->event_qnode,
                             (struct event_closure) {
                                 .handler = span_domain_request_sender_wrapper,
                                     .arg = span_domain_state });

#if 1
    while(!span_domain_state->initialized) {
        event_dispatch(get_default_waitset());
    }

    /* Free state */
    free(span_domain_state);
#endif

    return SYS_ERR_OK;
}