Esempio n. 1
0
static void fsb_init_msg(struct bench_binding *b, coreid_t id)
{
    errval_t err;

    // change waitset of the binding
    waitset_init(&signal_waitset);
    err = b->change_waitset(b, &signal_waitset);
    assert(err_is_ok(err));

    binding = b;
    reply_received = true;

#if CONFIG_TRACE
    // configure tracing
    err = trace_control(TRACE_EVENT(TRACE_SUBSYS_MULTIHOP,
                    TRACE_EVENT_MULTIHOP_BENCH_START, 0),
            TRACE_EVENT(TRACE_SUBSYS_MULTIHOP,
                    TRACE_EVENT_MULTIHOP_BENCH_STOP, 0), 0);
    if(err_is_fail(err)) {
        USER_PANIC_ERR(err, "trace_control failed");
    }
#endif

    // start tracing
    err = trace_event(TRACE_SUBSYS_MULTIHOP, TRACE_EVENT_MULTIHOP_BENCH_START,
            0);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "trace_event failed");
    }

    experiment();
}
Esempio n. 2
0
/**
 * \brief Initialize the domain library
 *
 * Registers a iref with the monitor to offer the interdisp service on this core
 * Does not block for completion.
 */
errval_t domain_init(void)
{
    errval_t err;
    struct domain_state *domain_state = malloc(sizeof(struct domain_state));
    if (!domain_state) {
        return LIB_ERR_MALLOC_FAIL;
    }
    set_domain_state(domain_state);

    domain_state->iref = 0;
    domain_state->default_waitset_handler = NULL;
    domain_state->remote_wakeup_queue = NULL;
    waitset_chanstate_init(&domain_state->remote_wakeup_event,
                           CHANTYPE_EVENT_QUEUE);
    for (int i = 0; i < MAX_CPUS; i++) {
        domain_state->b[i] = NULL;
    }

    waitset_init(&domain_state->interdisp_ws);
    domain_state->conditional = false;
    err = interdisp_export(NULL, server_listening, server_connected,
                           &domain_state->interdisp_ws, IDC_EXPORT_FLAGS_DEFAULT);
    if (err_is_fail(err)) {
        return err;
    }

    // XXX: Wait for the export to finish before returning
    while(!domain_state->conditional) {
        messages_wait_and_handle_next();
    }

    return SYS_ERR_OK;
}
/*
 * Init function
 */
 errval_t arrakis_rpc_client_init(struct arrakis_rpc_client *rpc, struct arrakis_binding *binding)
{
    errval_t _err;
    
    // Setup state of RPC client object
    rpc->b = binding;
    rpc->reply_present = false;
    rpc->rpc_in_progress = false;
    rpc->async_error = SYS_ERR_OK;
    waitset_init(&(rpc->rpc_waitset));
    flounder_support_waitset_chanstate_init(&(rpc->dummy_chanstate));
    rpc->vtbl = arrakis_rpc_vtbl;
    binding->st = rpc;
    
    // Change waitset on binding
    _err = ((binding->change_waitset)(binding, &(rpc->rpc_waitset)));
    if (err_is_fail(_err)) {
        waitset_destroy(&(rpc->rpc_waitset));
        return(err_push(_err, FLOUNDER_ERR_CHANGE_WAITSET));
    }
    
    // Set RX handlers on binding object for RPCs
    (binding->rx_vtbl).spawn_arrakis_domain_response = arrakis_spawn_arrakis_domain__rpc_rx_handler;
    
    // Set error handler on binding object
    binding->error_handler = arrakis_rpc_client_error;
    
    return(SYS_ERR_OK);
}
Esempio n. 4
0
/**
 * \brief Initialize client state with default values.
 */
static void struct_term_client_init(struct term_client *client)
{
    client->read_ws = malloc(sizeof(struct waitset));
    assert(client->read_ws != NULL);
    waitset_init(client->read_ws);
    client->write_ws = malloc(sizeof(struct waitset));
    assert(client->write_ws != NULL);
    waitset_init(client->write_ws);
    client->conf_ws = malloc(sizeof(struct waitset));
    assert(client->conf_ws);
    waitset_init(client->conf_ws);
    client->connected = false;
    client->echo = true;
    client->line_mode = true;
    client->non_blocking_read = false;
    client->chars_cb = NULL;
    client->err_cb = default_err_handler,
    client->in_binding = NULL;
    client->out_binding = NULL;
    client->conf_binding = NULL;
    client->readbuf = NULL;
    collections_list_create(&client->input_filters, term_filter_free);
    collections_list_create(&client->output_filters, term_filter_free);
    collections_list_create(&client->echo_filters, term_filter_free);
    client->max_input_filter_id = 0;
    client->max_output_filter_id = 0;
    client->max_echo_filter_id = 0;
    collections_list_create(&client->triggers, term_trigger_free);
    client->max_trigger_id = 0;

    /* add default input filters */
    term_client_add_input_filter(client, term_filter_cr2lf);

    /* add default output filters */
    term_client_add_output_filter(client, term_filter_lf2crlf);

    /* add default echo filters */
    term_client_add_echo_filter(client, term_filter_ctrlhat);

    /* add default triggers */
    /* The user can not remove the kill trigger. */
    term_client_add_trigger_type(client, term_trigger_kill,
                                 TERM_TRIGGER_TYPE_BUILT_IN);
    term_client_add_trigger_type(client, term_trigger_int,
                                 TERM_TRIGGER_TYPE_USER);
}
Esempio n. 5
0
errval_t domain_thread_create_on_varstack(coreid_t core_id,
                                          thread_func_t start_func,
                                          void *arg, size_t stacksize,
                                          struct thread **newthread)
{
    if (disp_get_core_id() == core_id) {
        struct thread *th = NULL;
        if (stacksize == 0) {
            th = thread_create(start_func, arg);
        } else {
            th = thread_create_varstack(start_func, arg, stacksize);
        }
        if (th != NULL) {
            if (newthread) {
                *newthread = th;
            }
            return SYS_ERR_OK;
        } else {
            return LIB_ERR_THREAD_CREATE;
        }
    } else {
        struct domain_state *domain_state = get_domain_state();
        errval_t err;

        if (domain_state->b[core_id] == NULL) {
            return LIB_ERR_NO_SPANNED_DISP;
        }

        struct interdisp_binding *b = domain_state->b[core_id];
        struct create_thread_req *req = malloc(sizeof(*req));
        req->reply_received = false;
        // use special waitset to make sure loop exits properly.
        struct waitset ws, *old_ws = b->waitset;
        waitset_init(&ws);
        b->change_waitset(b, &ws);
        err = b->tx_vtbl.create_thread_request(b, NOP_CONT,
                                               (genvaddr_t)(uintptr_t)start_func,
                                               (genvaddr_t)(uintptr_t)arg,
                                               stacksize,
                                               (genvaddr_t)(lvaddr_t)req);
        if (err_is_fail(err)) {
            return err;
        }

        while (!req->reply_received) {
            event_dispatch(&ws);
        }

        if (newthread) {
            *newthread = req->thread;
        }
        free(req);

        b->change_waitset(b, old_ws);

        return SYS_ERR_OK;
    }
}
/*
 * Init function
 */
 errval_t usb_manager_rpc_client_init(struct usb_manager_rpc_client *rpc, struct usb_manager_binding *binding)
{
    errval_t _err;
    
    // Setup state of RPC client object
    rpc->b = binding;
    rpc->reply_present = false;
    rpc->rpc_in_progress = false;
    rpc->async_error = SYS_ERR_OK;
    waitset_init(&(rpc->rpc_waitset));
    flounder_support_waitset_chanstate_init(&(rpc->dummy_chanstate));
    rpc->vtbl = usb_manager_rpc_vtbl;
    binding->st = rpc;
    
    // Change waitset on binding
    _err = ((binding->change_waitset)(binding, &(rpc->rpc_waitset)));
    if (err_is_fail(_err)) {
        waitset_destroy(&(rpc->rpc_waitset));
        return(err_push(_err, FLOUNDER_ERR_CHANGE_WAITSET));
    }
    
    // Set RX handlers on binding object for RPCs
    (binding->rx_vtbl).connect_response = usb_manager_connect__rpc_rx_handler;
    (binding->rx_vtbl).device_disconnect_notify_response = usb_manager_device_disconnect_notify__rpc_rx_handler;
    (binding->rx_vtbl).request_read_response = usb_manager_request_read__rpc_rx_handler;
    (binding->rx_vtbl).request_write_response = usb_manager_request_write__rpc_rx_handler;
    (binding->rx_vtbl).request_response = usb_manager_request__rpc_rx_handler;
    (binding->rx_vtbl).transfer_setup_response = usb_manager_transfer_setup__rpc_rx_handler;
    (binding->rx_vtbl).transfer_unsetup_response = usb_manager_transfer_unsetup__rpc_rx_handler;
    (binding->rx_vtbl).transfer_start_response = usb_manager_transfer_start__rpc_rx_handler;
    (binding->rx_vtbl).transfer_stop_response = usb_manager_transfer_stop__rpc_rx_handler;
    (binding->rx_vtbl).transfer_status_response = usb_manager_transfer_status__rpc_rx_handler;
    (binding->rx_vtbl).transfer_state_response = usb_manager_transfer_state__rpc_rx_handler;
    (binding->rx_vtbl).transfer_clear_stall_response = usb_manager_transfer_clear_stall__rpc_rx_handler;
    (binding->rx_vtbl).transfer_done_notify_response = usb_manager_transfer_done_notify__rpc_rx_handler;
    (binding->rx_vtbl).device_get_speed_response = usb_manager_device_get_speed__rpc_rx_handler;
    (binding->rx_vtbl).device_get_state_response = usb_manager_device_get_state__rpc_rx_handler;
    (binding->rx_vtbl).device_suspend_response = usb_manager_device_suspend__rpc_rx_handler;
    (binding->rx_vtbl).device_resume_response = usb_manager_device_resume__rpc_rx_handler;
    (binding->rx_vtbl).device_powersave_response = usb_manager_device_powersave__rpc_rx_handler;
    
    // Set error handler on binding object
    binding->error_handler = usb_manager_rpc_client_error;
    
    return(SYS_ERR_OK);
}
Esempio n. 7
0
errval_t domain_thread_join(struct thread *thread, int *retval)
{
    coreid_t core_id = thread->coreid;
    if (disp_get_core_id() == core_id) {
        return thread_join(thread, retval);
    } else {
        struct domain_state *domain_state = get_domain_state();
        errval_t err;

        if (domain_state->b[core_id] == NULL) {
            return LIB_ERR_NO_SPANNED_DISP;
        }

        struct interdisp_binding *b = domain_state->b[core_id];
        struct join_thread_req *req = malloc(sizeof(*req));
        req->reply_received = false;
        // use special waitset to make sure loop exits properly.
        struct waitset ws, *old_ws = b->waitset;
        waitset_init(&ws);
        b->change_waitset(b, &ws);
        err = b->tx_vtbl.join_thread_request(b, NOP_CONT,
                                             (genvaddr_t)(lvaddr_t)thread,
                                             (genvaddr_t)(lvaddr_t)req);
        if (err_is_fail(err)) {
            return err;
        }

        while (!req->reply_received) {
            event_dispatch(&ws);
        }
        // change waitset back
        b->change_waitset(b, old_ws);

        if (retval) {
            *retval = req->retval;
        }
        err = req->err;
        free(req);

        return err;
    }
}
Esempio n. 8
0
int epoll_create1(int flags)
{
    struct fdtab_entry e;
    struct _epoll_fd *efd = malloc(sizeof(struct _epoll_fd));
    assert(efd != NULL);

    memset(efd, 0, sizeof(struct _epoll_fd));
    waitset_init(&efd->ws);

    e.type = FDTAB_TYPE_EPOLL_INSTANCE;
    e.handle = efd;
    e.inherited = false;
    e.epoll_fd = -1;

    int fd = fdtab_alloc(&e);
    POSIXCOMPAT_DEBUG("epoll_create1(%d) as fd %d\n", flags, fd);
    if (fd < 0) {
        return -1;
    } else {
        return fd;
    }
}
/*
 * Init function
 */
 errval_t acpi_rpc_client_init(struct acpi_rpc_client *rpc, struct acpi_binding *binding)
{
    errval_t _err;
    
    // Setup state of RPC client object
    rpc->b = binding;
    rpc->reply_present = false;
    rpc->rpc_in_progress = false;
    rpc->async_error = SYS_ERR_OK;
    waitset_init(&(rpc->rpc_waitset));
    flounder_support_waitset_chanstate_init(&(rpc->dummy_chanstate));
    rpc->vtbl = acpi_rpc_vtbl;
    binding->st = rpc;
    
    // Change waitset on binding
    _err = ((binding->change_waitset)(binding, &(rpc->rpc_waitset)));
    if (err_is_fail(_err)) {
        waitset_destroy(&(rpc->rpc_waitset));
        return(err_push(_err, FLOUNDER_ERR_CHANGE_WAITSET));
    }
    
    // Set RX handlers on binding object for RPCs
    (binding->rx_vtbl).get_pcie_confspace_response = acpi_get_pcie_confspace__rpc_rx_handler;
    (binding->rx_vtbl).read_irq_table_response = acpi_read_irq_table__rpc_rx_handler;
    (binding->rx_vtbl).set_device_irq_response = acpi_set_device_irq__rpc_rx_handler;
    (binding->rx_vtbl).enable_and_route_interrupt_response = acpi_enable_and_route_interrupt__rpc_rx_handler;
    (binding->rx_vtbl).reset_response = acpi_reset__rpc_rx_handler;
    (binding->rx_vtbl).sleep_response = acpi_sleep__rpc_rx_handler;
    (binding->rx_vtbl).get_vbe_bios_cap_response = acpi_get_vbe_bios_cap__rpc_rx_handler;
    (binding->rx_vtbl).mm_alloc_range_proxy_response = acpi_mm_alloc_range_proxy__rpc_rx_handler;
    (binding->rx_vtbl).mm_realloc_range_proxy_response = acpi_mm_realloc_range_proxy__rpc_rx_handler;
    (binding->rx_vtbl).mm_free_proxy_response = acpi_mm_free_proxy__rpc_rx_handler;
    
    // Set error handler on binding object
    binding->error_handler = acpi_rpc_client_error;
    
    return(SYS_ERR_OK);
}
Esempio n. 10
0
ssize_t recv(int sockfd, void *buf, size_t len, int flags)
{
    struct fdtab_entry *e = fdtab_get(sockfd);

    switch(e->type) {
    case FDTAB_TYPE_UNIX_SOCKET:
        {
            struct _unix_socket *us = e->handle;

            // XXX: Don't support flags
            assert(flags == 0);

            thread_mutex_lock(&us->mutex);

            if(us->passive
               || us->u.active.mode != _UNIX_SOCKET_MODE_CONNECTED) {
                errno = ENOTCONN;
                thread_mutex_unlock(&us->mutex);
                return -1;
            }

            if(us->recv_buf_valid == 0) {
                // No more data
                if(us->nonblocking) {
                    errno = EAGAIN;
                    thread_mutex_unlock(&us->mutex);
                    return -1;
                } else {
                    struct waitset ws;
                    errval_t err;

                    waitset_init(&ws);

                    err = us->u.active.binding->change_waitset
                        (us->u.active.binding, &ws);
                    if(err_is_fail(err)) {
                        USER_PANIC_ERR(err, "change_waitset");
                    }

                    while(us->recv_buf_valid == 0) {
                        err = event_dispatch(&ws);
                        if(err_is_fail(err)) {
                            USER_PANIC_ERR(err, "waitset_destroy");
                        }
                    }

                    // XXX: Assume it was on the default waitset
                    err = us->u.active.binding->change_waitset
                        (us->u.active.binding, get_default_waitset());
                    if(err_is_fail(err)) {
                        USER_PANIC_ERR(err, "change_waitset");
                    }

                    err = waitset_destroy(&ws);
                    if(err_is_fail(err)) {
                        USER_PANIC_ERR(err, "waitset_destroy");
                    }
                }
            }

            size_t recved = 0;
            while(recved < len && us->recv_list != NULL) {
                struct _unix_socket_recv *usr = us->recv_list;
                size_t consume = MIN(len - recved, usr->size - usr->consumed);

                memcpy(buf + recved, &usr->msg[usr->consumed], consume);
                usr->consumed += consume;
                us->recv_buf_valid -= consume;
                recved += consume;

                if(usr->consumed == usr->size) {
                    us->recv_list = usr->next;
                    if(us->recv_list == NULL) {
                        us->recv_list_end = NULL;
                    }
                    free(usr->msg);
                    free(usr);
                } else {
                    assert(recved == len);
                }
            }

            thread_mutex_unlock(&us->mutex);
            return recved;
        }

    case FDTAB_TYPE_LWIP_SOCKET:
        lwip_mutex_lock();
        ssize_t ret = lwip_recv(e->fd, buf, len, flags);
        lwip_mutex_unlock();
        return ret;

    case FDTAB_TYPE_AVAILABLE:
        errno = EBADF;
        return -1;

    default:
        errno = ENOTSOCK;
        return -1;
    }
}
Esempio n. 11
0
ssize_t send(int sockfd, const void *buf, size_t len, int flags)
{
    struct fdtab_entry *e = fdtab_get(sockfd);

    switch(e->type) {
    case FDTAB_TYPE_UNIX_SOCKET:
        {
            struct _unix_socket *us = e->handle;

            // XXX: Don't support flags
            assert(flags == 0);

            thread_mutex_lock(&us->mutex);

            if(us->passive
               || us->u.active.mode != _UNIX_SOCKET_MODE_CONNECTED) {
                errno = ENOTCONN;
                thread_mutex_unlock(&us->mutex);
                return -1;
            }

            if(us->send_buf != NULL) {
                if(us->nonblocking) {
                    errno = EAGAIN;
                    thread_mutex_unlock(&us->mutex);
                    return -1;
                } else {
                    assert(!"NYI");
                }
            }

            // Bleh. Gotta copy here. I can't just wait until the
            // message is fully sent, as that might block
            // indefinitely.
            us->send_buf = malloc(len);
            memcpy(us->send_buf, buf, len);

            struct event_closure ec = {
                .handler = unixsock_sent,
                .arg = us,
            };
            errval_t err = us->u.active.binding->tx_vtbl.
                send(us->u.active.binding, ec, us->send_buf, len);
            if(err_is_fail(err)) {
                USER_PANIC_ERR(err, "unixsock->send");
                thread_mutex_unlock(&us->mutex);
                return -1;
            }

            // Wait until all data sent if blocking
            if(!us->nonblocking) {
                struct waitset ws;
                waitset_init(&ws);

                err = us->u.active.binding->change_waitset
                    (us->u.active.binding, &ws);
                if(err_is_fail(err)) {
                    USER_PANIC_ERR(err, "change_waitset");
                }

                while(us->send_buf != NULL) {
                    err = event_dispatch(&ws);
                    if(err_is_fail(err)) {
                        USER_PANIC_ERR(err, "waitset_destroy");
                    }
                }

                // XXX: Assume it was on the default waitset
                err = us->u.active.binding->change_waitset
                    (us->u.active.binding, get_default_waitset());
                if(err_is_fail(err)) {
                    USER_PANIC_ERR(err, "change_waitset");
                }

                err = waitset_destroy(&ws);
                if(err_is_fail(err)) {
                    USER_PANIC_ERR(err, "waitset_destroy");
                }
            }

            // XXX: We send all or nothing
            thread_mutex_unlock(&us->mutex);
            return len;
        }

    case FDTAB_TYPE_LWIP_SOCKET:
        lwip_mutex_lock();
        ssize_t ret = lwip_send(e->fd, buf, len, flags);
        lwip_mutex_unlock();
        return ret;

    case FDTAB_TYPE_AVAILABLE:
        errno = EBADF;
        return -1;

    default:
        errno = ENOTSOCK;
        return -1;
    }
}
Esempio n. 12
0
/**
 * \brief
 *
 * \param arg
 *
 * \return
 */
static int bomp_thread_msg_handler(void *arg)
{


    errval_t err;

    struct bomp_tls *tls = calloc(1, sizeof(struct bomp_tls));
    if (tls == NULL) {
        BOMP_ERROR("Could not allocate memory for TLS. %p\n", arg);
        return -1;
    }

    BOMP_DEBUG_THREAD("thread message handler started %p\n", tls);

    tls->role = BOMP_THREAD_ROLE_WORKER;
    tls->self = thread_self();
    tls->r.thread.coreid = disp_get_core_id();
    tls->r.thread.msgbuf = arg;
    tls->r.thread.tls = tls;

    struct waitset local_waitset;
    //struct waitset *ws = get_default_waitset();
    struct waitset *ws = &local_waitset;

    waitset_init(ws);


    struct bomp_frameinfo fi = {
        .sendbase = (lpaddr_t)arg,
        .inbuf = ((uint8_t *) arg) + BOMP_CHANNEL_SIZE,
        .inbufsize = BOMP_CHANNEL_SIZE,
        .outbuf = ((uint8_t *) arg),
        .outbufsize = BOMP_CHANNEL_SIZE
    };



    err = bomp_connect(&fi, bomp_thread_connect_cb, &tls->r.thread, ws,
                       IDC_EXPORT_FLAGS_DEFAULT);


    if (err_is_fail(err)) {
        /* TODO: Clean up */
        return err_push(err, XOMP_ERR_WORKER_INIT_FAILED);
    }

    thread_set_tls(tls);


    while(1) {
        err = event_dispatch_non_block(ws);
        switch(err_no(err)) {
            case LIB_ERR_NO_EVENT :
                thread_yield();
                continue;
                break;
            case SYS_ERR_OK:
                continue;
            break;
            default:
                USER_PANIC_ERR(err, "event dispatch");
                break;
        }
    }

    BOMP_NOTICE("thread %" PRIuCOREID " terminated", disp_get_core_id());


    return 0;
}
Esempio n. 13
0
static errval_t cb_pool_assigned(struct bulk_channel *channel,
                                 struct bulk_pool *pool)
{
    if (channel == &rxc) {
        debug_printf("pool_assigned: RX %p [%d,%d,%d]\n", pool,
                     pool->id.machine, pool->id.dom, pool->id.local);
        //there is a race condition between the two channels, so we have to check it here
        while (txc.state != BULK_STATE_CONNECTED) {
            event_dispatch(txc.waitset);
        }
        wait_flag = 0;
        if (!is_no_copy) {
            expect_success(bulk_channel_assign_pool(&txc, pool, wait_cont));
            //XXX: there is still a possible race condition, if we are in receive master mode,
            //but in that case, we don't expect to get a pool over this channel anyway
            while (!wait_flag)
                event_dispatch(txc.waitset);       //wait until pool is assigned
        }
    } else {
        debug_printf("pool_assigned: TX %p [%d,%d,%d]\n", pool,
                     pool->id.machine, pool->id.dom, pool->id.local);
    }

    if (is_no_copy) {
        struct bulk_pool_constraints pool_constraints = {
            .range_min = 0,
            .range_max = 0,
            .alignment = 0,
            .trust = txc.trust, };
        expect_success(bulk_alloc_init(&txalloc, NUMBUFS, BUFSZ, &pool_constraints));
        DEBUG("TX Pool alloc: %p\n", txalloc.pool);

        wait_flag = 0;
        expect_success(bulk_channel_assign_pool(&txc, txalloc.pool, wait_cont));
        while (!wait_flag)
            event_dispatch(txc.waitset);       //wait until pool is assigned
    }

    return SYS_ERR_OK;
}

static void cb_move_received(struct bulk_channel *channel,
                             struct bulk_buffer *buffer,
                             void *meta)
{
    static unsigned count = 0;
    DEBUG("move_received: %d b->p=%p\n", count, buffer->pool);
    count++;
    ctrl_value += *((uint32_t *) buffer->address);
    assert(channel == &rxc);
    if (is_no_copy) {
        if (NO_COPY_MOVE_BACK) {
            struct bulk_buffer *reply = bulk_alloc_new_buffer(&txalloc);
            assert(reply);
            if (NO_COPY_MOVE_BACK_WITH_COPY) {
                memcpy(reply->address, buffer->address,
                       buffer->pool->buffer_size);
            }
            *((uint32_t *) reply->address) = *((uint32_t *) buffer->address) +1;
            expect_success(bulk_channel_move(&txc, reply, meta, panic_cont));
        }
        expect_success(bulk_channel_pass(&rxc, buffer, meta, panic_cont));
    } else {
        *((uint32_t *) buffer->address) = *((uint32_t *) buffer->address) +1;
        expect_success(bulk_channel_move(&txc, buffer, meta, panic_cont));
    }
}

static void cb_buffer_received(struct bulk_channel *channel,
                               struct bulk_buffer *buffer,
                               void *meta)
{
    static unsigned count = 0;
    DEBUG("buffer_received: %d b->p=%p\n", count, buffer->pool);
    count++;
    assert(channel == &txc);
    if (is_no_copy) {
        expect_success(bulk_alloc_return_buffer(&txalloc, buffer));
    } else {
        expect_success(bulk_channel_pass(&rxc, buffer, meta, panic_cont));
    }
}

static void init(void)
{
    static struct bulk_allocator rxalloc;
    struct bulk_buffer *buf;
    size_t i;
    debug_printf("init: enter\n");

    if (rxc.role == BULK_ROLE_MASTER) {
        // If we're in receive master mode, we need to allocate and pass buffers
        //set the trust level we want in our pool from the start
        struct bulk_pool_constraints pool_constraints = {
            .range_min = 0,
            .range_max = 0,
            .alignment = 0,
            .trust = rxc.trust, };
        expect_success(bulk_alloc_init(&rxalloc, NUMBUFS, BUFSZ, &pool_constraints));
        DEBUG("RX Pool alloc: %p\n", rxalloc.pool);

        wait_flag = 0;
        expect_success(bulk_channel_assign_pool(&rxc, rxalloc.pool, wait_cont));
        while (!wait_flag)
            event_dispatch(rxc.waitset);

        wait_flag = 0;
        expect_success(bulk_channel_assign_pool(&txc, rxalloc.pool, wait_cont));
        while (!wait_flag)
            event_dispatch(txc.waitset);

        for (i = 0; i < NUMBUFS; i++) {
            buf = bulk_alloc_new_buffer(&rxalloc);
            assert(buf != NULL);
            expect_success(bulk_channel_pass(&rxc, buf, NULL, panic_cont));
        }
    }
    debug_printf("init: done\n");
}

static struct bulk_channel_callbacks cb = {
    .bind_received = cb_bind_received,
    .pool_assigned = cb_pool_assigned,
    .move_received = cb_move_received,
    .buffer_received = cb_buffer_received, };

int main(int argc, char *argv[])
{
    struct waitset *ws;
#if !USE_DEFWAITSET
    struct waitset l_ws;
    waitset_init(&l_ws);
    ws = &l_ws;
#else
    ws = get_default_waitset();
#endif

    bool rx_done = false, tx_done = false;

    debug_printf("bulk echo service starting\n");
    assert(argc == 3);
    debug_printf("Initialzing RX channel... [%s]\n", argv[1]);
    initialize_channel(argv[1], &rxc, &cb, ws, BULK_DIRECTION_RX, BUFSZ, 0,
                       &rx_done);
    debug_printf("Initialzing TX channel... [%s]\n", argv[2]);
    initialize_channel(argv[2], &txc, &cb, ws, BULK_DIRECTION_TX, BUFSZ, 0,
                       &tx_done);

    printf("Benchmark Server Ready!\n");
    while (!rx_done || !tx_done) {
        event_dispatch(ws);
    }

    init();
    while (1) {
        event_dispatch(ws);
    }

    return 0;
}