Esempio n. 1
0
static errval_t bind_to_octopus(void)
{
    errval_t err;
    struct bind_state st = { .done = false };
    err = octopus_bind(name_serv_iref, bind_continuation, &st,
            get_default_waitset(), IDC_BIND_FLAG_RPC_CAP_TRANSFER);
    while (!st.done) {
        err = event_dispatch(get_default_waitset());
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "ev_disp in bind_to_octopus");
        }
    }

    return st.err;
}

size_t num_monitors_online(void)
{
    errval_t err;
    struct octopus_rpc_client *r = get_octopus_rpc_client();
    if (r == NULL) {
        err = bind_to_octopus();
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "bind_to_octopus");
            debug_printf("no connection to octopus, num_monitors=1\n");
            return 1;
        }
        r = get_octopus_rpc_client();
    }
    assert(r != NULL);

    char* buffer = NULL;
    errval_t error_code;
    octopus_trigger_id_t tid;

    char** names = NULL;
    size_t count = 0;

    static char* spawnds = "r'spawn.[0-9]+' { iref: _ }";
        err = r->vtbl.get_names(r, spawnds, NOP_TRIGGER, &buffer, &tid, &error_code);
    if (err_is_fail(err) || err_is_fail(error_code)) {
        err = err_push(err, SPAWN_ERR_FIND_SPAWNDS);
        goto out;
    }

    err = oct_parse_names(buffer, &names, &count);
    if (err_is_fail(err)) {
        goto out;
    }

out:
    free(buffer);
    oct_free_names(names, count);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "num_spawnds_online");
        debug_printf("error in octopus, setting num_monitors=1\n");
        return 1;
    }
    return count;
}
Esempio n. 2
0
errval_t blockdevfs_ata_init(void)
{
    errval_t err;
    iref_t iref;

    err = nameservice_blocking_lookup("ahcid", &iref);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "nameservice_blocking_lookup for ahcid");
        return err; // FIXME
    }

    err = ahci_mgmt_bind(iref, ahci_mgmt_bind_cb, NULL, get_default_waitset(),
                  IDC_BIND_FLAG_RPC_CAP_TRANSFER);

    if (err_is_fail(err)) {
        DEBUG_ERR(err, "ahci_mgmt bind failed");
        return err; // FIXME
    }

    // init DMA pool
    ahci_dma_pool_init(1024*1024);

    // XXX: block for bind completion (broken API!)
    while (!ahci_mgmt_bound) {
        err = event_dispatch(get_default_waitset());
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "error in event_dispatch for blockdevfs_ata_init");
        }
    }

    return SYS_ERR_OK;
}
Esempio n. 3
0
int main(int argc, char *argv[])
{
    errval_t err;

    /* Set my core id */
    my_core_id = disp_get_core_id();
    strcpy(my_name, argv[0]);

    printf("entered\n");
    bench_init();
    printf("bench_init done\n");

    if (argc == 1) { /* server */

        /*
         1. spawn domain,
         2. setup a server,
         3. wait for client to connect,
         4. run experiment
         */

        char *xargv[] = { my_name, "dummy", "dummy", "dummy", NULL };
        err = spawn_program(1, my_name, xargv, NULL, SPAWN_FLAGS_DEFAULT, NULL);
        assert(err_is_ok(err));

        /* Setup a server */
        err = bench_export(NULL, export_cb, connect_cb, get_default_waitset(),
                IDC_BIND_FLAGS_DEFAULT);
        assert(err_is_ok(err));

    } else {
        /* Connect to the server */

        printf("ns lookup\n");
        err = nameservice_blocking_lookup("multihop_server", &iref);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "nameservice_blocking_lookup failed");
            abort();
        }

        printf("bench_bind\n");
        // bind a first time for signaling
        err = bench_bind(iref, bind_signal_cb, NULL, get_default_waitset(),
                IDC_BIND_FLAGS_DEFAULT);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "bind failed");
            abort();
        }
    }
    messages_handler_loop();
    return 0;
}
Esempio n. 4
0
int main(int argc, char **argv)
{
 //this is the bootstrap copy of the domain
     if (strcmp(argv[argc - 1], "SpAwNeD") != 0) {
        bsp_datagatherer = true;
    } else {
        bsp_datagatherer = false;
    }

    core_id = disp_get_core_id();
    skb_client_connect();

#ifdef SPAWN_YOUR_SELF
    if (bsp_datagatherer) {
        spawnmyself();
    }
#endif

//gather different types of data

//run cpuid
    gather_cpuid_data(core_id);

//get the number of running cores and their APIC IDs from the monitor
    if (bsp_datagatherer) {
        gather_nr_running_cores(get_monitor_binding());
    } else {
        nr_cores_done = true;
    }

//adding the numer of cores is the last operation performed by the datagatherer.
//therefore the domain can exit after this. process events as long as the number
//of cores has not yet been added to the SKB.
    struct waitset *ws = get_default_waitset();
    while (!nr_cores_done) {
        errval_t err = event_dispatch(ws);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "in event_dispatch");
            break;
        }
    }

    skb_add_fact("datagatherer_done.");

    if (bsp_datagatherer) {
        int length = nr_of_running_cores + 1;
        while (length != nr_of_running_cores) {
            skb_execute_query("findall(X, datagatherer_done, L),length(L,Len),write(Len).");
            skb_read_output("%d", &length);
            thread_yield();
        }


        errval_t err = nameservice_register("datagatherer_done", 0);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "nameservice_register failed");
        }
    }
    return 0;
}
Esempio n. 5
0
/**
 * \brief Blocking bind to the name service
 *
 * Should be called once only at init time on each dispatcher.
 */
errval_t nameservice_client_blocking_bind(void)
{
    errval_t err;

    struct bind_state st = { .done = false };

    /* fire off a request for the iref for the name service */
    struct monitor_binding *mb = get_monitor_binding();
    mb->rx_vtbl.get_name_iref_reply = get_name_iref_reply;
    err = mb->tx_vtbl.get_name_iref_request(mb, NOP_CONT, (uintptr_t)&st);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_GET_NAME_IREF);
    }

    /* block on the default waitset until we're bound */
    struct waitset *ws = get_default_waitset();
    while (!st.done) {
        err = event_dispatch(ws);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_EVENT_DISPATCH);
        }
    }

    return st.err;
}
Esempio n. 6
0
void e1000n_polling_loop(struct waitset *ws)
#endif
{
#ifndef LIBRARY
    struct waitset *ws = get_default_waitset();
#endif
    errval_t err;

    /* INITDEBUG("eventloop()\n"); */

    while (1) {
        err = event_dispatch_non_block(ws);
#ifdef LIBRARY
        check_for_free_txbufs();
#else
        do_pending_work_for_all();
#endif
        if(!use_interrupts) {
            check_for_new_packets(0);
            /* check_for_new_packets(1); */
            check_for_free_txbufs();
        }

#ifdef LIBRARY
        break;
#endif
    }
}
Esempio n. 7
0
static void alloc_iref_reply_cont(struct monitor_binding *b,
                                    uintptr_t service_id,
                                    iref_t iref, errval_t reterr)
{
    errval_t err;

    err = b->tx_vtbl.alloc_iref_reply(b, NOP_CONT, service_id, iref, reterr);
    if (err_is_fail(err)) {
        if(err_no(err) == FLOUNDER_ERR_TX_BUSY) {
            struct alloc_iref_reply_state *me =
                malloc(sizeof(struct alloc_iref_reply_state));
            assert(me != NULL);
            struct monitor_state *ist = b->st;
            assert(ist != NULL);
            me->args.service_id = service_id;
            me->args.iref = iref;
            me->args.err = reterr;
            me->b = b;
            me->elem.cont = alloc_iref_reply_handler;

            err = monitor_enqueue_send(b, &ist->queue,
                                       get_default_waitset(), &me->elem.queue);
            if (err_is_fail(err)) {
                USER_PANIC_ERR(err, "monitor_enqueue_send failed");
            }
            return;
        }

        USER_PANIC_ERR(err, "reply failed");
    }
}
Esempio n. 8
0
static void handle_notification(void *arg)
{
    struct lmp_endpoint *ep = arg;
    errval_t err;

    do { // consume messages
        struct lmp_recv_msg msg = LMP_RECV_MSG_INIT;
        err = lmp_endpoint_recv(ep, &msg.buf, NULL);

        if (err_is_ok(err)) {
            if(msg.buf.msglen == 1) {
                domainid_t domid = msg.words[0];

                // XXX: This is done by spawnd now
                if (domid != 0) {
                    debug_printf("Dispatcher with domain ID %"PRIuDOMAINID" exited\n",
                                 domid);
                }
            } else if(msg.buf.msglen == sizeof(struct RAM) / sizeof(uintptr_t) + 1) {
#ifndef __arm__
                //defined(__x86_64__) || defined(__i386__)
                union rammsg {
                    uintptr_t msgwords[LMP_MSG_LENGTH];
                    struct RAM ram;
                } *u;
                u = (union rammsg *)&msg.words;

                /* printf("%s.%d: RAM cap deleted, base = %" PRIxGENPADDR ", bits = %u\n", */
                /*        disp_name(), disp_get_core_id(), ram->base, ram->bits); */

                err = reclaim_memory(u->ram.base, u->ram.bits);
                if(err_is_fail(err)) {
                    DEBUG_ERR(err, "reclaim_memory");
                }
#else
                /* XXX: Disabling memory reclamation on ARM. I
                 * couldn't get the compiler to accept the above code
                 * due to strict aliasing restrictions. I do believe
                 * though that the above is according to the C99
                 * spec. Please help fix it, so that it can be
                 * enabled.
                 */
#endif
            } else {
                printf("%s: Unknown kernel notification of length %zu received\n",
                       disp_name(), msg.buf.msglen);
            }
        } else if (err_no(err) != LIB_ERR_NO_LMP_MSG) {
            DEBUG_ERR(err, "unexpected error from lmp_endpoint_recv");
        }
    } while(err_is_ok(err));

    // re-register
    struct event_closure cl = {
        .handler = handle_notification,
        .arg = arg,
    };
    err = lmp_endpoint_register(ep, get_default_waitset(), cl);
    assert(err_is_ok(err));
}
Esempio n. 9
0
/* handler function that runs when we get a message from either the mem_serv or
 * monitor channels. once we've received caps from both, we send them to each
 * other's opposite to allow them to construct a proper point-to-point channel
 */
static void init_recv_handler(void *arg)
{
    struct lmp_chan *lc = arg;
    struct lmp_recv_msg msg = LMP_RECV_MSG_INIT;
    struct capref cap;
    errval_t err;

    err = lmp_chan_recv(lc, &msg, &cap);
    if (err_is_fail(err)) {
        if (lmp_err_is_transient(err)) {
            // re-register
            struct event_closure recv_handler = {
                .handler = init_recv_handler,
                .arg = arg,
            };
            struct waitset *ws = get_default_waitset();
            err = lmp_chan_register_recv(lc, ws, recv_handler);
            if (err_is_fail(err)) {
                DEBUG_ERR(err, "in lmp_chan_register_recv");
                abort();
            }
        } else {
            DEBUG_ERR(err, "in lmp_chan_recv");
            abort();
        }
    }
Esempio n. 10
0
static void
revoke_no_remote(struct revoke_master_st *st)
{
    assert(num_monitors_online() == 1);

    if (!delete_steps_get_waitset()) {
        delete_steps_init(get_default_waitset());
    }

    errval_t err;
    DEBUG_CAPOPS("%s\n", __FUNCTION__);

    // pause deletion steps
    DEBUG_CAPOPS("%s: delete_steps_pause()\n", __FUNCTION__);
    delete_steps_pause();

    // mark target of revoke
    DEBUG_CAPOPS("%s: mon_revoke_mark_tgt()\n", __FUNCTION__);
    err = monitor_revoke_mark_target(st->cap.croot,
                                     st->cap.cptr,
                                     st->cap.bits);
    PANIC_IF_ERR(err, "marking revoke");


    // resume delete steps
    DEBUG_CAPOPS("%s: delete_steps_resume()\n", __FUNCTION__);
    delete_steps_resume();

    // wait on delete queue, marking that remote cores are done
    st->remote_fin = true;
    DEBUG_CAPOPS("%s: delete_queue_wait()\n", __FUNCTION__);
    struct event_closure steps_fin_cont
        = MKCLOSURE(revoke_master_steps__fin, st);
    delete_queue_wait(&st->del_qn, steps_fin_cont);
}
Esempio n. 11
0
void domain_mgmt_init(void)
{
    errval_t err;

    /* Register notification endpoint with kernel */
    struct capref epcap;
    struct lmp_endpoint *notifyep;
    // XXX: This has to be huge so we can receive a batch of
    // notifications when deleting CNodes recursively.
    err = endpoint_create(100 * 12, &epcap, &notifyep);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "failed creating endpoint");
    }

    // register to receive on this endpoint
    struct event_closure cl = {
        .handler = handle_notification,
        .arg = notifyep,
    };
    err = lmp_endpoint_register(notifyep, get_default_waitset(), cl);
    assert(err_is_ok(err));

    err = invoke_monitor_register(epcap);
    if(err_is_fail(err)) {
        USER_PANIC_ERR(err, "Could not register with kernel");
    }
    else {
#ifdef DEBUG_MONITOR_ALL
        debug_printf("monitor ep registered\n");
#endif
    }
}
Esempio n. 12
0
static void ipi_alloc_notify_reply_cont(struct monitor_binding *b,
                                        uintptr_t state,
                                        struct capref notify_cap,
                                        errval_t reterr)
{
    errval_t err =
        b->tx_vtbl.ipi_alloc_notify_reply(b, NOP_CONT, state,
                                          notify_cap, reterr);

    if(err_is_fail(err)) {
        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
            struct monitor_state *st = b->st;
            struct ipi_alloc_notify_reply_state *me =
                malloc(sizeof(struct ipi_alloc_notify_reply_state));
            assert(me != NULL);
            me->args.state = state;
            me->args.notify = notify_cap;
            me->args.err = reterr;
            me->elem.cont = ipi_alloc_notify_reply_handler;
            err = monitor_enqueue_send(b, &st->queue,
                                       get_default_waitset(), &me->elem.queue);
            if (err_is_fail(err)) {
                USER_PANIC_ERR(err, "monitor_enqueue_send failed");
            }
            return;
        }
        USER_PANIC_ERR(err, "sending reply");
    }
    assert(err_is_ok(err));
}
Esempio n. 13
0
void thc_await_send(struct thc_per_binding_state_t *thc,
                    void *f) {
  struct common_binding *c = (struct common_binding *)f;
  DEBUG_STUBS(DEBUGPRINTF(DEBUG_STUBS_PREFIX " > thc_await_send\n"));
  // Synchronize with thc_send_possible_event callback
  thc_lock_acquire(&thc->thc_binding_lock);

  // Request an event when sending is possible
  if (!thc->send_possible_event_requested) {
    errval_t err = c->register_send(c, 
                                    get_default_waitset(), 
                                    MKCONT(thc_send_possible_event, c));
    if (err == FLOUNDER_ERR_TX_BUSY) {
      goto done;
    }

    assert(err_is_ok(err));
    thc->send_possible_event_requested = 1;
  }
  
  // Wait
  //
  // We release the binding lock before blocking.  It is passed back to us
  // by the notification

  THCSuspendThen(&thc->waiting_sender, 
                 thc_await_send0, 
                 (void*) &thc->thc_binding_lock);

 done:
  thc_lock_release(&thc->thc_binding_lock);
  DEBUG_STUBS(DEBUGPRINTF(DEBUG_STUBS_PREFIX " > thc_await_send\n"));
}
Esempio n. 14
0
static void get_io_cap(struct monitor_blocking_binding *b)
{
    // XXX: We should not just hand out this cap to everyone
    // who requests it. There is currently no way to determine
    // if the client is a valid recipient
    errval_t err;
    struct capref src = {
        .cnode = cnode_task,
        .slot  = TASKCN_SLOT_IO
    };

    err = b->tx_vtbl.get_io_cap_response(b, NOP_CONT, src,
            SYS_ERR_OK);
    if (err_is_fail(err)) {
        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
            err = b->register_send(b, get_default_waitset(),
                                   MKCONT((void (*)(void *))get_io_cap, b));
            if (err_is_fail(err)) {
                USER_PANIC_ERR(err, "register_send failed");
            }
        }

        USER_PANIC_ERR(err, "sending get_io_cap_response failed");
    }
}
Esempio n. 15
0
static void ms_multiboot_cap_request(struct monitor_binding *b, cslot_t slot)
{
    errval_t err1, err2;

    struct capref cap = {
        .cnode = cnode_module,
        .slot  = slot,
    };

    // Call frame_identify to check if cap exists
    struct frame_identity id;
    err1 = invoke_frame_identify(cap, &id);
    if (err_is_fail(err1)) {
        err2 = b->tx_vtbl.multiboot_cap_reply(b, NOP_CONT, NULL_CAP, err1);
    } else {
        err2 = b->tx_vtbl.multiboot_cap_reply(b, NOP_CONT, cap, err1);
    }
    if (err_is_fail(err2)) {
        if (err_no(err2) == FLOUNDER_ERR_TX_BUSY) {
            struct monitor_state *mon_state = b->st;
            struct multiboot_cap_state *ms =
                malloc(sizeof(struct multiboot_cap_state));
            assert(ms);
            ms->slot = slot;
            ms->elem.cont = ms_multiboot_cap_request_handler;
            err1 = monitor_enqueue_send(b, &mon_state->queue,
                                       get_default_waitset(), &ms->elem.queue);
            if (err_is_fail(err1)) {
                USER_PANIC_ERR(err1, "monitor_enqueue_send failed");
            }
        } else {
            USER_PANIC_ERR(err2, "sending multiboot_cap_reply failed");
        }
    }
}
Esempio n. 16
0
/**
 * \brief Connect the virtual serial port (UART) to a physical serial port
 *        on the host.
 *
 * \param user_data PC16550d driver struct.
 * \param host_uart Name of the host uart driver.
 */
void
pc16550d_attach_to_host_uart (struct pc16550d *user_data, const char *host_uart)
{
    assert(user_data != NULL);
    assert(host_uart != NULL);

    errval_t err;
    iref_t iref;

    // Initialization
    struct pc16550d_forward_uart *state =
        malloc(sizeof(struct pc16550d_forward_uart));
    assert(state != NULL);
    state->connected = false;
    state->ws = get_default_waitset();

    // Adjust PC16550d state
    user_data->forward_state = PC16550d_FORWARD_UART;
    user_data->forward_uart_state = state;

    // Bind to uart driver
    err = nameservice_lookup(host_uart, &iref);
    assert(err_is_ok(err));
    err = serial_bind(iref, serial_bind_cb, user_data, state->ws,
                      IDC_BIND_FLAGS_DEFAULT);
    assert(err_is_ok(err));

    // Dispatch the monitor binding until the bind completes.
    struct monitor_binding *monitor_b = get_monitor_binding();
    struct waitset *monitor_ws = monitor_b->waitset;
    while (!state->connected) {
        err = event_dispatch(monitor_ws);
    }
}
Esempio n. 17
0
// function to handle incoming mac address requests
static void get_mac_addr_qm(struct net_queue_manager_binding *cc,
        uint64_t queueid)
{
    struct q_entry entry;

    memset(&entry, 0, sizeof(struct q_entry));
    entry.handler = wrapper_send_mac_addr_response;
    entry.binding_ptr = (void *) cc;
    struct client_closure *ccl = (struct client_closure *) cc->st;
    assert(ccl->queueid == queueid);

    entry.plist[0] = queueid;
    entry.plist[1] = get_mac_addr_from_device();
       // queueid,  hwaddr

    struct waitset *ws = get_default_waitset();
    int passed_events = 0;
    while (can_enqueue_cont_q(ccl->q) == false) {

       // USER_PANIC("queue full, can go further\n");

        if (passed_events > 5) {
            USER_PANIC("queue full, can go further\n");
           // return CONT_ERR_NO_MORE_SLOTS;
        }
        event_dispatch_debug(ws);
        ++passed_events;
    }


    enqueue_cont_q(ccl->q, &entry);
}
Esempio n. 18
0
static void report_register_buffer_result(struct net_queue_manager_binding *cc,
        errval_t err, uint64_t queueid, uint64_t buffer_id)
{
    struct q_entry entry;
    memset(&entry, 0, sizeof(struct q_entry));
    entry.handler = send_new_buffer_id;
    entry.binding_ptr = (void *) cc;
    struct client_closure *ccl = (struct client_closure *) cc->st;

    entry.plist[0] = err;
    entry.plist[1] = queueid;
    entry.plist[2] = buffer_id;
    //   error, queue_id, buffer_id

    struct waitset *ws = get_default_waitset();
    int passed_events = 0;
    while (can_enqueue_cont_q(ccl->q) == false) {

       // USER_PANIC("queue full, can go further\n");

        if (passed_events > 5) {
            USER_PANIC("queue full, can go further\n");
            //return CONT_ERR_NO_MORE_SLOTS;
        }
        event_dispatch_debug(ws);
        ++passed_events;
    }


    enqueue_cont_q(ccl->q, &entry);
}
Esempio n. 19
0
static void connect(coreid_t idx)
{
    errval_t err;
    char id[100];
    snprintf(id, sizeof(id), "%s%d", my_name, idx);

    iref_t iref;
    err = nameservice_blocking_lookup(id, &iref);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "nameservice_blocking_lookup failed");
        abort();
    }
    assert(iref != 0);

    struct rcce_state *st = malloc(sizeof(struct rcce_state));
    assert(st != NULL);
    memset(st, 0, sizeof(struct rcce_state));
    st->index = idx;
    st->request_done = false;

    /* printf("%s: rcce_bind\n", my_name); */

    err = rcce_bind(iref, client_connected, st, get_default_waitset(),
                    IDC_BIND_FLAGS_DEFAULT);
    assert(err_is_ok(err));

    /* printf("%s: waiting\n", my_name); */

    while (!st->request_done) {
        messages_wait_and_handle_next();
    }

    /* printf("%s: done\n", my_name); */
}
Esempio n. 20
0
static void send_myrpc_response(void *a)
{
    errval_t err;
    struct server_state *st = (struct server_state*)a;

    debug_printf("server: sending myresponse\n");

    struct event_closure txcont = MKCONT(send_myrpc_response_cb, st);
    err = xmplrpc_myrpc_response__tx(st->b, txcont, st->s);

    if (err_is_fail(err)) {
        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
            debug_printf("server: re-sending myresponse\n");
            struct waitset *ws = get_default_waitset();
            txcont = MKCONT(send_myrpc_response, st);
            err = st->b->register_send(st->b, ws, txcont);
            if (err_is_fail(err)) {
                // note that only one continuation may be registered at a time
                DEBUG_ERR(err, "register_send on binding failed!");
                free_st(st);
            }
        } else {
            DEBUG_ERR(err, "error sending mycall message\n");
            free_st(st);
        }
    }
}
Esempio n. 21
0
static void send_myrpc_call(void *a)
{
    errval_t err;

    debug_printf("client: sending mycall\n");

    struct xmplrpc_binding *b = (struct xmplrpc_binding *)a;

    struct event_closure txcont = MKCONT(send_myrpc_call_cb, b);

    err = xmplrpc_myrpc_call__tx(b, txcont, 42);

    if (err_is_fail(err)) {
        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
            debug_printf("client: re-sending mycall\n");
            struct waitset *ws = get_default_waitset();
            txcont = MKCONT(send_myrpc_call, b);
            err = b->register_send(b, ws, txcont);
            if (err_is_fail(err)) {
                // note that only one continuation may be registered at a time
                DEBUG_ERR(err, "register_send on binding failed!");
            }
        } else {
            DEBUG_ERR(err, "error sending mycall message\n");
        }
    }
}
Esempio n. 22
0
static void
new_monitor_binding_reply_cont(struct monitor_binding *b,
                               errval_t reterr, struct capref retcap,
                               uintptr_t st)
{
    errval_t err =
        b->tx_vtbl.new_monitor_binding_reply(b, NOP_CONT, reterr, retcap, st);

    if (err_is_fail(err)) {
        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
            struct monitor_state *ms = b->st;
            struct new_monitor_binding_reply_state *me =
                malloc(sizeof(struct new_monitor_binding_reply_state));
            assert(me != NULL);
            me->args.err = reterr;
            me->args.ep = retcap;
            me->args.st = st;
            me->elem.cont = new_monitor_binding_reply_handler;
            err = monitor_enqueue_send(b, &ms->queue,
                                       get_default_waitset(), &me->elem.queue);
            if (err_is_fail(err)) {
                USER_PANIC_ERR(err, "monitor_enqueue_send failed");
            }
            return;
        }

        USER_PANIC_ERR(err, "failed to send new_monitor_binding_reply");
    }
}
Esempio n. 23
0
static void bind_monitor_reply_scc_cont(struct intermon_binding *b,
                                        errval_t err, chanid_t chanid)
{
    errval_t err2;

    err2 = b->tx_vtbl.bind_monitor_reply_scc(b, NOP_CONT, err,
            chanid, my_core_id);
    if (err_is_fail(err2)) {
        if(err_no(err2) == FLOUNDER_ERR_TX_BUSY) {
            struct bind_monitor_reply_scc_state *me =
                malloc(sizeof(struct bind_monitor_reply_scc_state));
            assert(me != NULL);
            struct intermon_state *ist = b->st;
            assert(ist != NULL);
            me->args.err = err;
            me->args.chan_id = chanid;
            me->elem.cont = bind_monitor_reply_scc_handler;

            err = intermon_enqueue_send(b, &ist->queue,
                                        get_default_waitset(), &me->elem.queue);
            assert(err_is_ok(err));
            return;
        }

        DEBUG_ERR(err2, "reply failed");
    }
}
Esempio n. 24
0
static errval_t spawn_reply(struct spawn_binding *b, errval_t rerr,
                            domainid_t domainid)
{
    errval_t err;

    err = b->tx_vtbl.spawn_domain_response(b, NOP_CONT, rerr, domainid);

    if (err_is_fail(err)) {
        DEBUG_ERR(err, "error sending spawn_domain reply\n");

        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
            // this will be freed in the retry handler
            struct pending_spawn_response *sr =
                malloc(sizeof(struct pending_spawn_response));
            if (sr == NULL) {
                return LIB_ERR_MALLOC_FAIL;
            }
            sr->b = b;
            sr->err = rerr;
            sr->domainid = domainid;
            err = b->register_send(b, get_default_waitset(),
                                   MKCONT(retry_spawn_domain_response, sr));
            if (err_is_fail(err)) {
                // note that only one continuation may be registered at a time
                free(sr);
                DEBUG_ERR(err, "register_send failed!");
                return err;
            }
        }
    }

    return SYS_ERR_OK;
}
Esempio n. 25
0
static void alloc_monitor_ep(struct monitor_blocking_binding *b)
{
    struct capref retcap = NULL_CAP;
    errval_t err, reterr = SYS_ERR_OK;

    struct monitor_lmp_binding *lmpb =
        malloc(sizeof(struct monitor_lmp_binding));
    assert(lmpb != NULL);

    // setup our end of the binding
    err = monitor_client_lmp_accept(lmpb, get_default_waitset(),
                                    DEFAULT_LMP_BUF_WORDS);
    if (err_is_fail(err)) {
        free(lmpb);
        reterr = err_push(err, LIB_ERR_MONITOR_CLIENT_ACCEPT);
        goto out;
    }

    retcap = lmpb->chan.local_cap;
    monitor_server_init(&lmpb->b);

out:
    err = b->tx_vtbl.alloc_monitor_ep_response(b, NOP_CONT, reterr, retcap);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "failed to send alloc_monitor_ep_reply");
    }
}
Esempio n. 26
0
static void bind_ump_service_request_cont(struct monitor_binding *domain_binding,
                                          uintptr_t service_id,
                                          con_id_t my_mon_id,
                                          struct capref frame,
                                          uint32_t channel_length_in,
                                          uint32_t channel_length_out,
                                          struct capref notify_cap,
                                          struct intermon_binding *binding,
                                          con_id_t your_mon_id)
{
    errval_t err, err2;

    /* Proxy the request */
    err = domain_binding->tx_vtbl.
        bind_ump_service_request(domain_binding, NOP_CONT, service_id,
                                 my_mon_id, frame,
                                 channel_length_in, channel_length_out,
                                 notify_cap);
    if (err_is_fail(err)) {
        if(err_no(err) == FLOUNDER_ERR_TX_BUSY) {
            struct bind_ump_service_request_state *me =
                malloc(sizeof(struct bind_ump_service_request_state));
            struct monitor_state *ist = domain_binding->st;
            me->args.service_id = service_id;
            me->args.mon_id = my_mon_id;
            me->args.frame = frame;
            me->args.channel_length_in = channel_length_in;
            me->args.channel_length_out = channel_length_out;
            me->args.notify = notify_cap;
            me->binding = binding;
            me->your_mon_id = your_mon_id;
            me->elem.cont = bind_ump_service_request_handler;

            err = monitor_enqueue_send(domain_binding, &ist->queue,
                                       get_default_waitset(), &me->elem.queue);
            assert(err_is_ok(err));
            return;
        }

        err2 = cap_delete(frame);
        if (err_is_fail(err2)) {
            USER_PANIC_ERR(err2, "Cap delete failed");
        }
        err2 = slot_free(frame);
        if (err_is_fail(err2)) {
            USER_PANIC_ERR(err2, "Cap destroy default failed");
        }
        err2 = remote_conn_free(my_mon_id);
        if (err_is_fail(err2)) {
            USER_PANIC_ERR(err2, "remote_conn_free failed");
        }
        intermon_caprep_t nullcap = {0,0,0,0};
        err2 = binding->tx_vtbl.bind_ump_reply(binding, NOP_CONT, your_mon_id, 0, err,
                                               nullcap);
        if (err_is_fail(err2)) {
            USER_PANIC_ERR(err2, "Sending bind_ump_reply1 failed");
        }
    }
}
Esempio n. 27
0
static void
slaves_connect(struct task_graph *tg)
{
    char name[128];
    iref_t iref;
    int err;

    for (int sid=0; sid < SlState.num_slaves; sid++) {
        int r = snprintf(name, 128, "replay_slave.%u", sid + 1);
        struct slave *sl = SlState.slaves + sid;
        assert(r != -1);

        err = nameservice_blocking_lookup(name, &iref);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "could not lookup IREF for replay slave");
            abort();
        }

        /* bound to slave  */
        bound = false;
        err = replay_bind(iref, replay_bind_cont, NULL,
                          get_default_waitset(),
                          IDC_BIND_FLAGS_DEFAULT);
        if(err_is_fail(err)) {
            DEBUG_ERR(err, "replay_bind");
        }
        while(!bound) {
            err = event_dispatch(get_default_waitset());
            assert(err_is_ok(err));
        }
        msg("Bound to slave %d\n", sid);

        /* initialize bulk transfer for slave */
        init_ok = false;
        err = bulk_create(BULK_TOTAL_SIZE, BULK_BLOCK_SIZE, &sl->frame, &sl->bt, false);
        assert(err_is_ok(err));
        err = sl->b->tx_vtbl.slave_init(sl->b, NOP_CONT, sl->frame, BULK_TOTAL_SIZE);
        assert(err_is_ok(err));
        while (!init_ok) {
            err = event_dispatch(get_default_waitset());
            assert(err_is_ok(err));
        }

        msg("Slave %d initialized\n", sid);
    }
}
Esempio n. 28
0
int listen(int sockfd, int backlog)
{
    struct fdtab_entry *e = fdtab_get(sockfd);

    switch(e->type) {
    case FDTAB_TYPE_UNIX_SOCKET:
        POSIXCOMPAT_DEBUG("listen(%d, %d)\n", sockfd, backlog);
        struct _unix_socket *us = e->handle;

        errval_t err =
            unixsock_export(us, unixsock_listening, unixsock_connected,
                            get_default_waitset(), IDC_EXPORT_FLAGS_DEFAULT);
        if(err_is_fail(err)) {
            DEBUG_ERR(err, "unixsock_export failed");
            return -1;
        }
        while(us->u.passive.listen_iref == NULL_IREF) {
            // XXX: Should wait only on monitor
            event_dispatch(get_default_waitset());
        }

        us->passive = true;
        us->u.passive.max_backlog = backlog;
        us->u.passive.backlog = calloc(backlog, sizeof(struct unixsock_binding *));

        char str[128];
        snprintf(str, 128, "%"PRIuIREF, us->u.passive.listen_iref);
        err = vfs_write(us->vfs_handle, str, strlen(str), NULL);
        if(err_is_fail(err)) {
            USER_PANIC_ERR(err, "vfs_write");
        }
        break;

    case FDTAB_TYPE_LWIP_SOCKET:
        lwip_mutex_lock();
        int ret = lwip_listen(e->fd, backlog);
        lwip_mutex_unlock();
        return ret;

    default:
        return -1;
    }

    return 0;
}
Esempio n. 29
0
static errval_t mp_create(struct descq_binding* b, uint32_t slots,
        struct capref rx, struct capref tx, bool notifications, uint8_t role,
        errval_t *err, uint64_t *queue_id) {
    
    struct descq* q = (struct descq*) b->st;
    DESCQ_DEBUG("start %p\n",q);
    
    // switch RX/TX for correct setup
    *err = vspace_map_one_frame_attr((void**) &(q->rx_descs),
                                    slots*DESCQ_ALIGNMENT, tx,
                                    VREGION_FLAGS_READ_WRITE, NULL, NULL);
    if (err_is_fail(*err)) {
        goto end2;
    }

    *err = vspace_map_one_frame_attr((void**) &(q->tx_descs),
                                    slots*DESCQ_ALIGNMENT, rx,
                                    VREGION_FLAGS_READ_WRITE, NULL, NULL);
    if (err_is_fail(*err)) {
        goto end1;
    }
 
    q->tx_seq_ack = (void*)q->tx_descs;
    q->rx_seq_ack = (void*)q->rx_descs;
    q->tx_descs++;
    q->rx_descs++;
    q->slots = slots-1;
    q->rx_seq = 1;
    q->tx_seq = 1;

    devq_init(&q->q, true);

    q->q.f.enq = descq_enqueue;
    q->q.f.deq = descq_dequeue;
    q->q.f.notify = descq_notify;
    q->q.f.reg = descq_register;
    q->q.f.dereg = descq_deregister;
    q->q.f.ctrl = descq_control;
    q->q.f.destroy = descq_destroy;

    notificator_init(&q->notificator, q, descq_can_read, descq_can_write);
    *err = waitset_chan_register(get_default_waitset(), &q->notificator.ready_to_read, MKCLOSURE(mp_notify, q));
    assert(err_is_ok(*err));

    *err = q->f.create(q, notifications, role, queue_id);
    if (err_is_ok(*err)) {
        goto end2;
    }

end1:
    *err = vspace_unmap(q->rx_descs);
    assert(err_is_ok(*err));
end2:
    DESCQ_DEBUG("end \n");
    return SYS_ERR_OK;
}
Esempio n. 30
0
static errval_t descq_deregister(struct devq* q, regionid_t rid)
{
    errval_t err, err2;
    err2 = SYS_ERR_OK;
    struct descq* queue = (struct descq*) q;

    err = queue->binding->rpc_tx_vtbl.deregister_region(queue->binding, rid, &err2);
    if (err_is_fail(err)) {
        queue->resend_args = rid;
        while(err_is_fail(err)) {
            err = queue->binding->register_send(queue->binding, get_default_waitset(),
                                                MKCONT(try_deregister, queue));
            if (err_is_fail(err)) {
                event_dispatch(get_default_waitset());
            }
        }
    }
    return err2;
}