Beispiel #1
0
static void ms_multiboot_cap_request(struct monitor_binding *b, cslot_t slot)
{
    errval_t err1, err2;

    struct capref cap = {
        .cnode = cnode_module,
        .slot  = slot,
    };

    // Call frame_identify to check if cap exists
    struct frame_identity id;
    err1 = invoke_frame_identify(cap, &id);
    if (err_is_fail(err1)) {
        err2 = b->tx_vtbl.multiboot_cap_reply(b, NOP_CONT, NULL_CAP, err1);
    } else {
        err2 = b->tx_vtbl.multiboot_cap_reply(b, NOP_CONT, cap, err1);
    }
    if (err_is_fail(err2)) {
        if (err_no(err2) == FLOUNDER_ERR_TX_BUSY) {
            struct monitor_state *mon_state = b->st;
            struct multiboot_cap_state *ms =
                malloc(sizeof(struct multiboot_cap_state));
            assert(ms);
            ms->slot = slot;
            ms->elem.cont = ms_multiboot_cap_request_handler;
            err1 = monitor_enqueue_send(b, &mon_state->queue,
                                       get_default_waitset(), &ms->elem.queue);
            if (err_is_fail(err1)) {
                USER_PANIC_ERR(err1, "monitor_enqueue_send failed");
            }
        } else {
            USER_PANIC_ERR(err2, "sending multiboot_cap_reply failed");
        }
    }
}
Beispiel #2
0
static errval_t map_mmio_space(struct xeon_phi *phi)
{
    errval_t err;
    void *mmio;

    struct frame_identity id;
    err = invoke_frame_identify(mmio_cap, &id);
    if (err_is_fail(err)) {
        return err;
    }

    err = vspace_map_one_frame(&mmio, (1UL << id.bits), mmio_cap, NULL, NULL);
    if (err_is_fail(err)) {
        return err;
    }

    XDEBUG("mapped mmio register space @ [%p]\n", mmio);

    phi->mmio.bits = id.bits;
    phi->mmio.vbase = (lvaddr_t) mmio;
    phi->mmio.cap = mmio_cap;
    phi->mmio.pbase = id.base;
    phi->mmio.length = (1UL << id.bits);

    return SYS_ERR_OK;
}
static errval_t alloc_local(void)
{
    errval_t err;

    size_t frame_size = 0;
    if (disp_xeon_phi_id() == 0) {
        frame_size = XPHI_BENCH_FRAME_SIZE_HOST;
    } else {
        frame_size = XPHI_BENCH_FRAME_SIZE_CARD;
    }

    if (!frame_size) {
        frame_size = 4096;
    }

    debug_printf("Allocating a frame of size: %lx\n", frame_size);

    size_t alloced_size = 0;
    err = frame_alloc(&local_frame, frame_size, &alloced_size);
    assert(err_is_ok(err));
    assert(alloced_size >= frame_size);

    struct frame_identity id;
    err = invoke_frame_identify(local_frame, &id);
    assert(err_is_ok(err));
    local_base = id.base;
    local_frame_sz = alloced_size;

    err = vspace_map_one_frame(&local_buf, alloced_size, local_frame, NULL, NULL);

    return err;
}
Beispiel #4
0
void alloc_local(void)
{
    errval_t err;

#ifndef __k1om__
    uint64_t minbase, maxlimit;
    ram_get_affinity(&minbase, &maxlimit);
    ram_set_affinity(XPHI_BENCH_RAM_MINBASE, XPHI_BENCH_RAM_MAXLIMIT);
#endif
    size_t alloced_size = 0;
    err = frame_alloc(&local_frame, XPHI_BENCH_MSG_FRAME_SIZE, &alloced_size);
    EXPECT_SUCCESS(err, "frame_alloc");

#ifndef __k1om__
    ram_set_affinity(minbase, maxlimit);
#endif

    struct frame_identity id;
    err = invoke_frame_identify(local_frame, &id);
    EXPECT_SUCCESS(err, "invoke_frame_identify");

    local_base = id.base;
    local_frame_sz = alloced_size;

    debug_printf("alloc_local | Frame base: %016lx, size=%lx\n", id.base,
                 1UL << id.bits);

    err =  vspace_map_one_frame(&local_buf, alloced_size, local_frame, NULL, NULL);
    EXPECT_SUCCESS(err, "vspace_map_one_frame");
}
Beispiel #5
0
/**
 * \brief initializes a dma descriptor ring and allocates memory for it
 *
 * \param ring  the ring structure to initialize
 * \param size  number of elements in the ring
 *
 * \returns SYS_ERR_OK on success
 *          errval on error
 */
errval_t xeon_phi_dma_desc_ring_alloc(struct xdma_ring *ring,
                                      uint16_t size)
{
    errval_t err;

    memset(ring, 0, sizeof(*ring));

    assert(size < (XEON_PHI_DMA_DESC_RING_MAX));
    assert(IS_POW2(size));

#ifndef __k1om__
    /*
     * we set the ram affinity to the maximum range mapped by the system memory
     * page tables when being on the host. Otherwise the card cannot access it.
     */
    uint64_t minbase, maxlimit;
    ram_get_affinity(&minbase, &maxlimit);
    ram_set_affinity(0, XEON_PHI_SYSMEM_SIZE-8*XEON_PHI_SYSMEM_PAGE_SIZE);
#endif

    size_t frame_size = ((size_t) size) * XEON_PHI_DMA_DESC_SIZE;
    err = frame_alloc(&ring->cap, frame_size, NULL);

#ifndef __k1om__
    ram_set_affinity(minbase, maxlimit);
#endif

    if (err_is_fail(err)) {
        return err;
    }

    err = vspace_map_one_frame_attr(&ring->vbase,
                                    frame_size,
                                    ring->cap,
                                    VREGION_FLAGS_READ_WRITE,
                                    NULL,
                                    NULL);
    if (err_is_fail(err)) {
        cap_destroy(ring->cap);
        return err;
    }

    struct frame_identity id;
    err = invoke_frame_identify(ring->cap, &id);
    assert(err_is_ok(err));
    ring->pbase = id.base;
    ring->size = size;

    memset(ring->vbase, 0, frame_size);

    return SYS_ERR_OK;
}
Beispiel #6
0
// Callback from device manager
void qd_queue_init_data(struct e10k_binding *b, struct capref registers,
        uint64_t macaddr)
{
    struct frame_identity frameid = { .base = 0, .bits = 0 };
    errval_t err;
    void *virt;

    INITDEBUG("idc_queue_init_data\n");

    mac_address = macaddr;

    // Map device registers
    invoke_frame_identify(registers, &frameid);
    err = vspace_map_one_frame_attr(&virt, 1 << frameid.bits, registers,
            VREGION_FLAGS_READ_WRITE_NOCACHE, NULL, NULL);
    assert(err_is_ok(err));

    // Initialize mackerel device
    d = malloc(sizeof(*d));
#ifndef VF
    e10k_initialize(d, virt);
#else
    e10k_vf_initialize(d, virt);
#endif

    // Initialize queue
    setup_queue();
}

// Callback from device manager
void qd_queue_memory_registered(struct e10k_binding *b)
{
    initialized = 1;

    hwqueue_initialized();

    // Register queue with queue_mgr library
#ifndef LIBRARY
    ethersrv_init((char*) service_name, qi, get_mac_addr_fn, terminate_queue_fn,
        transmit_pbuf_list_fn, find_tx_free_slot_count_fn,
        handle_free_tx_slot_fn, RXBUFSZ, register_rx_buffer_fn,
        find_rx_free_slot_count_fn);
#else
    ethernetif_backend_init((char*) service_name, qi, get_mac_addr_fn, terminate_queue_fn,
        transmit_pbuf_list_fn, find_tx_free_slot_count_fn,
        handle_free_tx_slot_fn, RXBUFSZ, register_rx_buffer_fn,
        find_rx_free_slot_count_fn);
#endif
}
Beispiel #7
0
/**
 * \brief Initialise a new UMP channel to accept an incoming binding request
 *
 * \param uc  Storage for channel state
 * \param mon_id Monitor's connection ID for this channel
 * \param frame Frame capability containing channel
 * \param inchanlen Size of incoming channel, in bytes (multiple of #UMP_MSG_BYTES)
 * \param outchanlen Size of outgoing channel, in bytes (multiple of #UMP_MSG_BYTES)
 */
errval_t ump_chan_accept(struct ump_chan *uc, uintptr_t mon_id,
                         struct capref frame, size_t inchanlen,
                         size_t outchanlen)
{
    errval_t err;

    uc->monitor_id = mon_id;
    uc->frame = frame;

    // check that the frame is big enough
    struct frame_identity frameid;
    err = invoke_frame_identify(frame, &frameid);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_FRAME_IDENTIFY);
    }

    // Ids for tracing
    uc->recvid = (uintptr_t)(frameid.base + outchanlen);
    uc->sendid = (uintptr_t)frameid.base;

    size_t framesize = ((uintptr_t)1) << frameid.bits;
    if (framesize < inchanlen + outchanlen) {
        return LIB_ERR_UMP_FRAME_OVERFLOW;
    }

    // map it in
    void *buf;
    err = vspace_map_one_frame_attr(&buf, framesize, frame, UMP_MAP_ATTR,
                                    NULL, &uc->vregion);
    if (err_is_fail(err)) {
        cap_destroy(uc->frame);
        return err_push(err, LIB_ERR_VSPACE_MAP);
    }

    // initialise channel state
    err = ump_chan_init(uc, (char *)buf + outchanlen, inchanlen, buf, outchanlen);
    if (err_is_fail(err)) {
        vregion_destroy(uc->vregion);
        cap_destroy(uc->frame);
        return err;
    }

    /* mark connected */
    uc->connstate = UMP_CONNECTED;
    return SYS_ERR_OK;
}
Beispiel #8
0
static errval_t msg_open_cb(xphi_dom_id_t domain,
                     uint64_t usrdata,
                     struct capref msgframe,
                     uint8_t type)
{
    errval_t err;

    domainid = domain;

    struct frame_identity id;
    err = invoke_frame_identify(msgframe, &id);
    EXPECT_SUCCESS(err, "frame identify");

    debug_printf("msg_open_cb | Frame base: %016lx, size=%lx\n", id.base,
                 1UL << id.bits);

    assert((1UL << id.bits) >= XPHI_BENCH_MSG_FRAME_SIZE);

    err = vspace_map_one_frame(&remote_buf, XPHI_BENCH_MSG_FRAME_SIZE, msgframe,
                               NULL, NULL);
    EXPECT_SUCCESS(err, "vspace map frame");

    remote_frame = msgframe;
    remote_base = id.base;
    remote_frame_sz = (1UL << id.bits);

    init_buffer();

    connected = 0x1;

    debug_printf("Initializing UMP channel...\n");

    err = ump_chan_init(&xphi_uc, inbuf, XPHI_BENCH_MSG_CHAN_SIZE, outbuf,
                        XPHI_BENCH_MSG_CHAN_SIZE);
    EXPECT_SUCCESS(err, "initialize ump channel");

    err = ump_chan_init(&xphi_uc_rev, inbuf_rev, XPHI_BENCH_MSG_CHAN_SIZE, outbuf_rev,
                        XPHI_BENCH_MSG_CHAN_SIZE);
    EXPECT_SUCCESS(err, "initialize ump channel");

    return SYS_ERR_OK;
}
static void get_bootinfo(struct monitor_blocking_binding *b)
{
    errval_t err;

    struct capref frame = {
        .cnode = cnode_task,
        .slot  = TASKCN_SLOT_BOOTINFO
    };

    struct frame_identity id = { .base = 0, .bits = 0 };
    err = invoke_frame_identify(frame, &id);
    assert(err_is_ok(err));

    err = b->tx_vtbl.get_bootinfo_response(b, NOP_CONT, SYS_ERR_OK, frame,
                                           (size_t)1 << id.bits);
    if (err_is_fail(err)) {
        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
            err = b->register_send(b, get_default_waitset(),
                                   MKCONT((void (*)(void *))get_bootinfo, b));
            if (err_is_fail(err)) {
                USER_PANIC_ERR(err, "register_send failed");
            }
        }

        USER_PANIC_ERR(err, "sending get_bootinfo_response failed");
    }
}

/* ----------------------- BOOTINFO REQUEST CODE END ----------------------- */

static void get_ipi_cap(struct monitor_blocking_binding *b)
{
    errval_t err;

    // XXX: We should not just hand out this cap to everyone
    // who requests it. There is currently no way to determine
    // if the client is a valid recipient

    err = b->tx_vtbl.get_ipi_cap_response(b, NOP_CONT, cap_ipi);
    assert(err_is_ok(err));
}
Beispiel #10
0
// populates the given buffer with given capref
static errval_t populate_buffer(struct buffer_descriptor *buffer,
        struct capref cap)
{

    buffer->cap = cap;
    struct frame_identity pa;
    errval_t err = invoke_frame_identify(cap, &pa);
    if (!err_is_ok(err)) {
        printf("invoke_frame_identify failed\n");
        abort();
    }
    buffer->pa = pa.base;
    buffer->bytes = pa.bytes;

    err = vspace_map_one_frame(&buffer->va, buffer->bytes, cap,
            NULL, NULL);

/*
    err = vspace_map_one_frame_attr(&buffer->va, (1L << buffer->bits), cap,
                    VREGION_FLAGS_READ_WRITE_NOCACHE, NULL, NULL);
*/

    if (err_is_fail(err)) {
        DEBUG_ERR(err, "vspace_map_one_frame failed");
        // FIXME: report more sensible error
        return(ETHERSRV_ERR_TOO_MANY_BUFFERS);
    }
    netd_buffer_count++;
    buffer_id_counter++;
    buffer->buffer_id = buffer_id_counter;
//    printf("### buffer gets id %"PRIu64"\n", buffer->buffer_id);
    if (buffer->buffer_id == 3) {
        first_app_b = buffer;
    }

    buffer->next = buffers_list;
    // Adding the buffer on the top of buffer list.
//    buffers_list = buffer;
    return SYS_ERR_OK;
} // end function: populate_buffer
static errval_t msg_open_cb(xphi_dom_id_t domain,
                            uint64_t usrdata,
                            struct capref msgframe,
                            uint8_t type)
{
    errval_t err;

    domid = domain;

    struct frame_identity id;
    err = invoke_frame_identify(msgframe, &id);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "could not identify the frame");
    }

    debug_printf("msg_open_cb | Frame base: %016lx, size=%lx, ud:%lx\n", id.base,
                 1UL << id.bits, usrdata);

    remote_frame = msgframe;

    remote_base = id.base;

    remote_frame_sz = (1UL << id.bits);

    err = vspace_map_one_frame(&remote_buf, remote_frame_sz, msgframe,
    NULL,
                               NULL);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "Could not map the frame");
    }

    init_buffer_c0();

    connected = 0x1;

    return SYS_ERR_OK;
}
/**
 * \brief allocates and maps a memory region to be used for DMA purposes
 *
 * \param bytes minimum size of the memory region in bytes
 * \param flags VREGION flags how the region gets mapped
 * \param mem   returns the mapping information
 *
 * \returns SYS_ERR_OK on success
 *          errval on error
 */
errval_t dma_mem_alloc(size_t bytes,
                       vregion_flags_t flags,
                       struct dma_mem *mem)
{
    errval_t err;

    if (mem == NULL) {
        return DMA_ERR_ARG_INVALID;
    }

    err = frame_alloc(&mem->frame, bytes, &mem->bytes);
    if (err_is_fail(err)) {
        return err;
    }

    struct frame_identity id;
    err = invoke_frame_identify(mem->frame, &id);
    if (err_is_fail(err)) {
        dma_mem_free(mem);
        return err;
    }

    mem->paddr = id.base;

    void *addr;
    err = vspace_map_one_frame_attr(&addr, mem->bytes, mem->frame, flags, NULL,
                                    NULL);
    if (err_is_fail(err)) {
        dma_mem_free(mem);
        return err;
    }

    mem->vaddr = (lvaddr_t)addr;

    return SYS_ERR_OK;
}
Beispiel #13
0
/**
 * \brief Notification of a newly booted monitor.
 *  Setup our connection and request the sender to proxy
 *  the bind request to the monitor
 */
static void new_monitor_notify(struct intermon_binding *b,
                               coreid_t core_id)
{
    errval_t err;

    /* Setup the connection */
    ram_set_affinity(SHARED_MEM_MIN + (PERCORE_MEM_SIZE * my_core_id),
                     SHARED_MEM_MIN + (PERCORE_MEM_SIZE * (my_core_id + 1)));
    struct capref frame;
    err = frame_alloc(&frame, MON_URPC_SIZE, NULL);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "frame_alloc failed");
        return; // FIXME: cleanup
    }
    ram_set_affinity(0, 0);     // Reset affinity

    void *buf;
    err = vspace_map_one_frame_attr(&buf, MON_URPC_SIZE, frame, VREGION_FLAGS_READ_WRITE_NOCACHE, NULL, NULL);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "vspace_map_one_frame failed");
        assert(buf); // XXX
    }
    // XXX: Clear the frame (kernel can't do it for us)
    memset(buf, 0, MON_URPC_SIZE);

    // Allocate my own notification caps
    struct capref ep, my_notify_cap;
    struct lmp_endpoint *iep;
    int chanid;
    err = endpoint_create(LMP_RECV_LENGTH, &ep, &iep);
    assert(err_is_ok(err));
    err = notification_allocate(ep, &chanid);
    assert(err == SYS_ERR_OK);
    err = notification_create_cap(chanid, my_core_id, &my_notify_cap);
    assert(err == SYS_ERR_OK);

    // init our end of the binding and channel
    struct intermon_ump_ipi_binding *ump_binding =
        malloc(sizeof(struct intermon_ump_ipi_binding));
    assert(ump_binding != NULL);
    err = intermon_ump_ipi_init(ump_binding, get_default_waitset(),
                                buf, MON_URPC_CHANNEL_LEN,
                                buf + MON_URPC_CHANNEL_LEN,
                                MON_URPC_CHANNEL_LEN, NULL_CAP, my_notify_cap,
                                ep, iep);
    assert(err_is_ok(err));
    /* if (err_is_fail(err)) { */
    /*     cap_destroy(frame); */
    /*     return err_push(err, LIB_ERR_UMP_CHAN_BIND); */
    /* } */

    // Identify UMP frame for tracing
    struct frame_identity umpid = { .base = 0, .bits = 0 };
    err = invoke_frame_identify(frame, &umpid);
    assert(err_is_ok(err));
    ump_binding->ump_state.chan.recvid = (uintptr_t)umpid.base;
    ump_binding->ump_state.chan.sendid =
        (uintptr_t)(umpid.base + MON_URPC_CHANNEL_LEN);

    err = intermon_init(&ump_binding->b, core_id);
    assert(err_is_ok(err));

    /* Identify the frame cap */
    struct capability frame_cap;
    err = monitor_cap_identify(frame, &frame_cap);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "monitor_cap_identify failed");
        return; // FIXME: cleanup
    }

    intermon_caprep_t caprep;
    capability_to_caprep(&frame_cap, &caprep);

    /* reply to the sending monitor to proxy request */
    err = b->tx_vtbl.bind_monitor_proxy_scc(b, NOP_CONT, core_id,
                                            caprep, chanid, my_core_id);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "bind proxy request failed");
    }
}

errval_t arch_intermon_init(struct intermon_binding *b)
{
    b->rx_vtbl.bind_monitor_request_scc = bind_monitor_request_scc;
    b->rx_vtbl.bind_monitor_reply_scc = bind_monitor_reply_scc;
    b->rx_vtbl.bind_monitor_proxy_scc = bind_monitor_proxy_scc;
    b->rx_vtbl.new_monitor_notify = new_monitor_notify;

    return SYS_ERR_OK;
}
Beispiel #14
0
errval_t spawn_xcore_monitor(coreid_t coreid, int hwid,
                             enum cpu_type cpu_type,
                             const char *cmdline,
                             struct frame_identity urpc_frame_id,
                             struct capref kcb)
{
    uint64_t start = 0;
    const char *monitorname = NULL, *cpuname = NULL;
    genpaddr_t arch_page_size;
    errval_t err;

    err = get_architecture_config(cpu_type, &arch_page_size,
                                  &monitorname, &cpuname);
    assert(err_is_ok(err));

    DEBUG("loading kernel: %s\n", cpuname);
    DEBUG("loading 1st app: %s\n", monitorname);

    // compute size of frame needed and allocate it
    DEBUG("%s:%s:%d: urpc_frame_id.base=%"PRIxGENPADDR"\n",
           __FILE__, __FUNCTION__, __LINE__, urpc_frame_id.base);
    DEBUG("%s:%s:%d: urpc_frame_id.size=%d\n",
           __FILE__, __FUNCTION__, __LINE__, urpc_frame_id.bits);

    if (benchmark_flag) {
        start = bench_tsc();
    }
    static size_t cpu_binary_size;
    static lvaddr_t cpu_binary = 0;
    static genpaddr_t cpu_binary_phys;
    static const char* cached_cpuname = NULL;
    if (cpu_binary == 0) {
        cached_cpuname = cpuname;
        // XXX: Caching these for now, until we have unmap
        err = lookup_module(cpuname, &cpu_binary, &cpu_binary_phys,
                            &cpu_binary_size);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "Can not lookup module");
            return err;
        }
    }
    // Ensure caching actually works and we're
    // always loading same binary. If this starts to fail, get rid of caching.
    assert (strcmp(cached_cpuname, cpuname) == 0);

    static size_t monitor_binary_size;
    static lvaddr_t monitor_binary = 0;
    static genpaddr_t monitor_binary_phys;
    static const char* cached_monitorname = NULL;
    if (monitor_binary == 0) {
        cached_monitorname = monitorname;
        // XXX: Caching these for now, until we have unmap
        err = lookup_module(monitorname, &monitor_binary,
                            &monitor_binary_phys, &monitor_binary_size);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "Can not lookup module");
            return err;
        }
    }
    // Again, ensure caching actually worked (see above)
    assert (strcmp(cached_monitorname, monitorname) == 0);

    if (benchmark_flag) {
        bench_data->load = bench_tsc() - start;
        start = bench_tsc();
    }

    struct capref cpu_memory_cap;
    struct frame_identity frameid;
    size_t cpu_memory;
    err = allocate_kernel_memory(cpu_binary, arch_page_size,
                                 &cpu_memory_cap, &cpu_memory, &frameid);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "Can not allocate space for new app kernel.");
        return err;
    }

    err = cap_mark_remote(cpu_memory_cap);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "Can not mark cap remote.");
        return err;
    }

    void *cpu_buf_memory;
    err = vspace_map_one_frame(&cpu_buf_memory, cpu_memory, cpu_memory_cap,
                               NULL, NULL);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_VSPACE_MAP);
    }
    if (benchmark_flag) {
        bench_data->alloc_cpu = bench_tsc() - start;
        start = bench_tsc();
    }

    /* Chunk of memory to load monitor on the app core */
    struct capref spawn_memory_cap;
    struct frame_identity spawn_memory_identity;

    err = frame_alloc_identify(&spawn_memory_cap,
                               X86_CORE_DATA_PAGES * arch_page_size,
                               NULL, &spawn_memory_identity);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_FRAME_ALLOC);
    }

    err = cap_mark_remote(spawn_memory_cap);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "Can not mark cap remote.");
        return err;
    }
    if (benchmark_flag) {
        bench_data->alloc_mon = bench_tsc() - start;
        start = bench_tsc();
    }

    /* Load cpu */
    struct elf_allocate_state state;
    state.vbase = (char *)cpu_buf_memory + arch_page_size;
    assert(sizeof(struct x86_core_data) <= arch_page_size);
    state.elfbase = elf_virtual_base(cpu_binary);

    struct Elf64_Ehdr *cpu_head = (struct Elf64_Ehdr *)cpu_binary;
    genvaddr_t cpu_entry;

    err = elf_load(cpu_head->e_machine, elfload_allocate, &state,
                   cpu_binary, cpu_binary_size, &cpu_entry);
    if (err_is_fail(err)) {
        return err;
    }
    if (benchmark_flag) {
        bench_data->elf_load = bench_tsc() - start;
        start = bench_tsc();
    }

    err = relocate_cpu_binary(cpu_binary, cpu_head, state, frameid, arch_page_size);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "Can not relocate new kernel.");
        return err;
    }
    if (benchmark_flag) {
        bench_data->elf_reloc = bench_tsc() - start;
    }

    genvaddr_t cpu_reloc_entry = cpu_entry - state.elfbase
                                 + frameid.base + arch_page_size;
    /* Compute entry point in the foreign address space */
    forvaddr_t foreign_cpu_reloc_entry = (forvaddr_t)cpu_reloc_entry;

    /* Setup the core_data struct in the new kernel */
    struct x86_core_data *core_data = (struct x86_core_data *)cpu_buf_memory;
    switch (cpu_head->e_machine) {
    case EM_X86_64:
    case EM_K1OM:
        core_data->elf.size = sizeof(struct Elf64_Shdr);
        core_data->elf.addr = cpu_binary_phys + (uintptr_t)cpu_head->e_shoff;
        core_data->elf.num  = cpu_head->e_shnum;
        break;
    case EM_386:
        core_data->elf.size = sizeof(struct Elf32_Shdr);
        struct Elf32_Ehdr *head32 = (struct Elf32_Ehdr *)cpu_binary;
        core_data->elf.addr = cpu_binary_phys + (uintptr_t)head32->e_shoff;
        core_data->elf.num  = head32->e_shnum;
        break;
    default:
        return SPAWN_ERR_UNKNOWN_TARGET_ARCH;
    }
    core_data->module_start = cpu_binary_phys;
    core_data->module_end   = cpu_binary_phys + cpu_binary_size;
    core_data->urpc_frame_base = urpc_frame_id.base;
    core_data->urpc_frame_bits = urpc_frame_id.bits;
    core_data->monitor_binary   = monitor_binary_phys;
    core_data->monitor_binary_size = monitor_binary_size;
    core_data->memory_base_start = spawn_memory_identity.base;
    core_data->memory_bits       = spawn_memory_identity.bits;
    core_data->src_core_id       = disp_get_core_id();
    core_data->src_arch_id       = my_arch_id;
    core_data->dst_core_id       = coreid;


    struct frame_identity fid;
    err = invoke_frame_identify(kcb, &fid);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "Invoke frame identity for KCB failed. "
                            "Did you add the syscall handler for that architecture?");
    }
    DEBUG("%s:%s:%d: fid.base is 0x%"PRIxGENPADDR"\n",
           __FILE__, __FUNCTION__, __LINE__, fid.base);
    core_data->kcb = (genpaddr_t) fid.base;
#ifdef CONFIG_FLOUNDER_BACKEND_UMP_IPI
    core_data->chan_id           = chanid;
#endif

    if (cmdline != NULL) {
        // copy as much of command line as will fit
        snprintf(core_data->kernel_cmdline, sizeof(core_data->kernel_cmdline),
                "%s %s", cpuname, cmdline);
        // ensure termination
        core_data->kernel_cmdline[sizeof(core_data->kernel_cmdline) - 1] = '\0';

        DEBUG("%s:%s:%d: %s\n", __FILE__, __FUNCTION__, __LINE__, core_data->kernel_cmdline);
    }

    /* Invoke kernel capability to boot new core */
    if (cpu_type == CPU_X86_64 || cpu_type == CPU_K1OM) {
        start_aps_x86_64_start(hwid, foreign_cpu_reloc_entry);
    }

#ifndef __k1om__
    else if (cpu_type == CPU_X86_32) {
        start_aps_x86_32_start(hwid, foreign_cpu_reloc_entry);
    }
#endif

    /* Clean up */
    // XXX: Should not delete the remote caps?
    err = cap_destroy(spawn_memory_cap);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "cap_destroy failed");
    }
    err = vspace_unmap(cpu_buf_memory);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "vspace unmap CPU driver memory failed");
    }
    err = cap_destroy(cpu_memory_cap);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "cap_destroy failed");
    }

    return SYS_ERR_OK;
}
Beispiel #15
0
static errval_t spawn_setup_argcn(struct spawninfo *si,
                                  struct capref argumentcn_cap)
{
    errval_t err;

    if (capref_is_null(argumentcn_cap)) {
        return SYS_ERR_OK;
    }

    struct capref dest = {
        .cnode = si->rootcn,
        .slot  = ROOTCN_SLOT_ARGCN
    };

    err = cap_copy(dest, argumentcn_cap);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_COPY_ARGCN);
    }

    return SYS_ERR_OK;
}


/**
 * \brief Load an image
 *
 * \param si            Struct used by the library
 * \param binary        The image to load
 * \param type          The type of arch to load for
 * \param name          Name of the image required only to place it in disp
 *                      struct
 * \param coreid        Coreid to load for, required only to place it in disp
 *                      struct
 * \param argv          Command-line arguments, NULL-terminated
 * \param envp          Environment, NULL-terminated
 * \param inheritcn_cap Cap to a CNode containing capabilities to be inherited
 * \param argcn_cap     Cap to a CNode containing capabilities passed as
 *                      arguments
 */
errval_t spawn_load_image(struct spawninfo *si, lvaddr_t binary,
                          size_t binary_size, enum cpu_type type,
                          const char *name, coreid_t coreid,
                          char *const argv[], char *const envp[],
                          struct capref inheritcn_cap, struct capref argcn_cap)
{
    errval_t err;

    si->cpu_type = type;

    /* Initialize cspace */
    err = spawn_setup_cspace(si);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_SETUP_CSPACE);
    }

    /* Initialize vspace */
    err = spawn_setup_vspace(si);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_VSPACE_INIT);
    }

    si->name = name;
    genvaddr_t entry;
    void* arch_info;
    /* Load the image */
    err = spawn_arch_load(si, binary, binary_size, &entry, &arch_info);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_LOAD);
    }

    /* Setup dispatcher frame */
    err = spawn_setup_dispatcher(si, coreid, name, entry, arch_info);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_SETUP_DISPATCHER);
    }

    /* Setup inherited caps */
    err = spawn_setup_inherited_caps(si, inheritcn_cap);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_SETUP_INHERITED_CAPS);
    }

    /* Setup argument caps */
    err = spawn_setup_argcn(si, argcn_cap);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_SETUP_ARGCN);
    }

    // Add vspace-pspace mapping to environment
    char envstr[2048];
#ifdef __x86__  // SK: si->vregions only valid on x86
    snprintf(envstr, 2048, "ARRAKIS_PMAP=");
    for(int i = 0; i < si->vregions; i++) {
        struct memobj_anon *m = (struct memobj_anon *)si->vregion[i]->memobj;
        assert(m->m.type == ANONYMOUS);
        for(struct memobj_frame_list *f = m->frame_list; f != NULL; f = f->next) {
            struct frame_identity id;
            err = invoke_frame_identify(f->frame, &id);
            assert(err_is_ok(err));

            char str[128];
            snprintf(str, 128, "%" PRIxGENVADDR ":%" PRIxGENPADDR ":%zx ", si->base[i] + f->offset, id.base, f->size);
            strcat(envstr, str);
        }
    }
#endif /* __x86__ */

    char **myenv = (char **)envp;
    for(int i = 0; i < MAX_ENVIRON_VARS; i++) {
        if(i + 1 == MAX_ENVIRON_VARS) {
            printf("spawnd: Couldn't set environemnt. Out of variables!\n");
            abort();
        }

        if(myenv[i] == NULL) {
            myenv[i] = envstr;
            myenv[i+1] = NULL;
            break;
        }
    }

    /* Setup cmdline args */
    err = spawn_setup_env(si, argv, envp);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_SETUP_ENV);
    }

    return SYS_ERR_OK;
}
static void add_memory_call_rx(struct xomp_binding *b,
                               struct capref frame,
                               uint64_t addr,
                               uint8_t type)
{
    XWI_DEBUG("add_memory_call_rx: addr:%lx, tyep: %u\n", addr, type);

    struct txq_msg_st *msg_st = txq_msg_st_alloc(&txq);
    assert(msg_st != NULL);

    msg_st->send = add_memory_response_tx;
    msg_st->cleanup = NULL;

    uint32_t map_flags = 0x0;

    switch ((xomp_frame_type_t) type) {
        case XOMP_FRAME_TYPE_MSG:
            map_flags = VREGION_FLAGS_READ_WRITE;
            break;
        case XOMP_FRAME_TYPE_SHARED_RW:
            map_flags = VREGION_FLAGS_READ_WRITE;
            break;
        case XOMP_FRAME_TYPE_SHARED_RO:
            map_flags = VREGION_FLAGS_READ;
            break;
        default:
            USER_PANIC("unknown type: %u", type)
            break;
    }
    struct frame_identity id;
    msg_st->err = invoke_frame_identify(frame, &id);
    if(err_is_fail(msg_st->err)) {
        txq_send(msg_st);
        return;
    }

#if XOMP_WORKER_ENABLE_DMA
    if (0) {
        // todo: replicate frame on the same node if needed..
        replicate_frame(addr, &frame);
    }
#endif

#if XOMP_BENCH_WORKER_EN
    cycles_t map_start = bench_tsc();
#endif
    if (addr) {
        msg_st->err = vspace_map_one_frame_fixed_attr(addr, (1UL << id.bits),
                                                      frame, map_flags, NULL, NULL);
    } else {
        void *map_addr;
        msg_st->err = vspace_map_one_frame_attr(&map_addr, (1UL << id.bits),
                                                frame, map_flags, NULL, NULL);
    }
#if XOMP_BENCH_WORKER_EN
    cycles_t timer_end = bench_tsc();
    debug_printf("%lx mem map %016lx took  %lu cycles, %lu ms\n", worker_id, addr,
                     timer_end - map_start, bench_tsc_to_ms(timer_end - map_start));
#endif

    txq_send(msg_st);
}
Beispiel #17
0
/**
 * \brief Initialise a new UMP channel and initiate a binding
 *
 * \param uc  Storage for channel state
 * \param cont Continuation for bind completion/failure
 * \param qnode Storage for an event queue node (used for queuing bind request)
 * \param iref IREF to which to bind
 * \param monitor_binding Monitor binding to use
 * \param inchanlen Size of incoming channel, in bytes (rounded to #UMP_MSG_BYTES)
 * \param outchanlen Size of outgoing channel, in bytes (rounded to #UMP_MSG_BYTES)
 * \param notify_cap Capability to use for notifications, or #NULL_CAP
 */
errval_t ump_chan_bind(struct ump_chan *uc, struct ump_bind_continuation cont,
                       struct event_queue_node *qnode,  iref_t iref,
                       struct monitor_binding *monitor_binding,
                       size_t inchanlen, size_t outchanlen,
                       struct capref notify_cap)
{
    errval_t err;

    // round up channel sizes to message size
    inchanlen = ROUND_UP(inchanlen, UMP_MSG_BYTES);
    outchanlen = ROUND_UP(outchanlen, UMP_MSG_BYTES);

    // compute size of frame needed and allocate it
    size_t framesize = inchanlen + outchanlen;
    err = frame_alloc(&uc->frame, framesize, &framesize);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_FRAME_ALLOC);
    }

    // map it in
    void *buf;
    err = vspace_map_one_frame_attr(&buf, framesize, uc->frame, UMP_MAP_ATTR,
                                    NULL, &uc->vregion);
    if (err_is_fail(err)) { 
        cap_destroy(uc->frame);
        return err_push(err, LIB_ERR_VSPACE_MAP);
    }

    // initialise channel state
    err = ump_chan_init(uc, buf, inchanlen, (char *)buf + inchanlen, outchanlen);
    if (err_is_fail(err)) {
        vregion_destroy(uc->vregion);
        cap_destroy(uc->frame);
        return err;
    }

    // Ids for tracing
    struct frame_identity id;
    err = invoke_frame_identify(uc->frame, &id);
    if (err_is_fail(err)) {
        vregion_destroy(uc->vregion);
        cap_destroy(uc->frame);
        return err_push(err, LIB_ERR_FRAME_IDENTIFY);
    }
    uc->recvid = (uintptr_t)id.base;
    uc->sendid = (uintptr_t)(id.base + inchanlen);

    // store bind args
    uc->bind_continuation = cont;
    uc->monitor_binding = monitor_binding;
    uc->iref = iref;
    uc->inchanlen = inchanlen;
    uc->outchanlen = outchanlen;
    uc->notify_cap = notify_cap;

    // wait for the ability to use the monitor binding
    uc->connstate = UMP_BIND_WAIT;
    event_mutex_enqueue_lock(&monitor_binding->mutex, qnode,
                             MKCLOSURE(send_bind_cont, uc));

    return SYS_ERR_OK;
}
Beispiel #18
0
/// Map in the frame caps for a module into our vspace, return their location
errval_t spawn_map_module(struct mem_region *module, size_t *retsize,
                          lvaddr_t *retaddr, genpaddr_t *retpaddr)
{
    assert(module != NULL);
    assert(module->mr_type == RegionType_Module);

    errval_t err;

    size_t size = module->mrmod_size;

    void *base;
    struct memobj *memobj;
    struct vregion *vregion;

    err = vspace_map_anon_attr(&base, &memobj, &vregion, size, &size,
                               VREGION_FLAGS_READ);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_VSPACE_MAP);
    }
    struct capref frame = {
        .cnode = cnode_module,
        .slot  = module->mrmod_slot,
    };

    if (retpaddr != NULL) {
        *retpaddr = module->mr_base;
    }

    if (retsize != NULL) {
        *retsize = size;
    }

    if (retaddr != NULL) {
        *retaddr = (lvaddr_t)base;
    }

    size_t offset = 0;
    while (size > 0) {
        assert((size & BASE_PAGE_MASK) == 0);

        struct frame_identity id;
        err = invoke_frame_identify(frame, &id);
        assert(err_is_ok(err));

        err = memobj->f.fill(memobj, offset, frame, 1UL << id.bits);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_MEMOBJ_FILL);
        }

        err = memobj->f.pagefault(memobj, vregion, offset, 0);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER);
        }

        frame.slot ++;
        size -= (1UL << id.bits);
        offset += (1UL << id.bits);
    }

    return SYS_ERR_OK;
}

errval_t spawn_unmap_module(lvaddr_t mapped_addr)
{
    return vspace_unmap((void *)mapped_addr);
}

/// Returns a raw pointer to the modules string area string
const char *multiboot_module_rawstring(struct mem_region *region)
{
    if (multiboot_strings == NULL) {
        errval_t err;
        /* Map in multiboot module strings area */
        struct capref mmstrings_cap = {
            .cnode = cnode_module,
            .slot = 0
        };
        err = vspace_map_one_frame_attr((void**)&multiboot_strings,
                                        BASE_PAGE_SIZE, mmstrings_cap,
                                        VREGION_FLAGS_READ,
                                        NULL, NULL);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "vspace_map failed");
	    return NULL;
        }
#if 0
        printf("Mapped multiboot_strings at %p\n", multiboot_strings);
        for (int i = 0; i < 256; i++) {
            if ((i & 15) == 0) printf("%04x  ", i);
            printf ("%02x ", multiboot_strings[i]& 0xff);
            if ((i & 15) == 15) printf("\n");
        }
#endif
    }

    if (region == NULL || region->mr_type != RegionType_Module) {
        return NULL;
    }
    return multiboot_strings + region->mrmod_data;
}

errval_t multiboot_cleanup_mapping(void)
{
    errval_t err = vspace_unmap(multiboot_strings);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "multiboot_cleanup_mapping: vspace_unmap() failed\n");
        return err_push(err, LIB_ERR_VSPACE_REMOVE_REGION);
    }
    multiboot_strings = NULL;
    return SYS_ERR_OK;
}
Beispiel #19
0
errval_t vspace_mmu_aware_reset(struct vspace_mmu_aware *state,
                                struct capref frame, size_t size)
{
    errval_t err;
    struct vregion *vregion;
    struct capref oldframe;
    void *vbuf;

    // create copy of new region
    err = slot_alloc(&oldframe);
    if (err_is_fail(err)) {
        return err;
    }
    err = cap_copy(oldframe, frame);
    if (err_is_fail(err)) {
        return err;
    }
    err = vspace_map_one_frame_attr_aligned(&vbuf, size, oldframe,
            VREGION_FLAGS_READ_WRITE | VREGION_FLAGS_LARGE, LARGE_PAGE_SIZE,
            NULL, &vregion);
    if (err_is_fail(err)) {
        return err;
    }
    // copy over data to new frame
    genvaddr_t gen_base = vregion_get_base_addr(&state->vregion);
    memcpy(vbuf, (void*)(lvaddr_t)gen_base, state->mapoffset);

    err = vregion_destroy(vregion);
    if (err_is_fail(err)) {
        return err;
    }

    genvaddr_t offset = 0;
    // Unmap backing frames for [0, size) in state.vregion
    do {
        err = state->memobj.m.f.unfill(&state->memobj.m, 0, &oldframe,
                &offset);
        if (err_is_fail(err) &&
            err_no(err) != LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET)
        {
            return err_push(err, LIB_ERR_MEMOBJ_UNMAP_REGION);
        }
        struct frame_identity fi;
        // increase address
        err = invoke_frame_identify(oldframe, &fi);
        if (err_is_fail(err)) {
            return err;
        }
        offset += (1UL<<fi.bits);
        err = cap_destroy(oldframe);
        if (err_is_fail(err)) {
            return err;
        }
    } while(offset < state->mapoffset);

    // Map new frame in
    err = state->memobj.m.f.fill(&state->memobj.m, 0, frame, size);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_MEMOBJ_FILL);
    }
    err = state->memobj.m.f.pagefault(&state->memobj.m, &state->vregion, 0, 0);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER);
    }

    state->mapoffset = size;
    return SYS_ERR_OK;
}
Beispiel #20
0
static void impl_test(void)
{
    errval_t err;

    debug_printf("Doing an implementation test\n");

    struct capref frame;
    err = frame_alloc(&frame, 2 * BUFFER_SIZE, NULL);
    assert(err_is_ok(err));

    struct frame_identity id;
    err = invoke_frame_identify(frame, &id);
    assert(err_is_ok(err));

    void *buf;
    err = vspace_map_one_frame(&buf, 1UL << id.bits, frame, NULL, NULL);
    assert(err_is_ok(err));

    memset(buf, 0, 1UL << id.bits);
    memset(buf, 0xA5, BUFFER_SIZE);

    struct ioat_dma_req_setup setup = {
        .type = IOAT_DMA_REQ_TYPE_MEMCPY,
        .src = id.base,
        .dst = id.base + BUFFER_SIZE,
        .bytes = BUFFER_SIZE,
        .done_cb = impl_test_cb,
        .arg = buf
    };
    int reps = 10;
    do {
        debug_printf("!!!!!! NEW ROUND\n");
        err = ioat_dma_request_memcpy(dma_ctrl.devices, &setup);
        assert(err_is_ok(err));

        uint32_t i = 10;
        while(i--) {
            ioat_dma_device_poll_channels(dma_ctrl.devices);
        }
    }while(reps--);
}
#endif

int main(int argc,
         char *argv[])
{
    errval_t err;

    debug_printf("I/O AT DMA driver started\n");

    /*
     * Parsing of cmdline arguments.
     *
     * When started by Kaluga, the last element of the cmdline will contain
     * the basic PCI information of the device.
     * VENDORID:DEVICEID:BUS:DEV:FUN
     */
    uint32_t vendor_id, device_id;

    struct pci_addr addr = {
        .bus = PCI_ADDR_DONT_CARE,
        .device = PCI_ADDR_DONT_CARE,
        .device = PCI_ADDR_DONT_CARE
    };

    enum device_type devtype = IOAT_DEVICE_INVAL;

    if (argc > 1) {
        uint32_t parsed = sscanf(argv[argc - 1], "%x:%x:%x:%x:%x", &vendor_id,
                                 &device_id, &addr.bus, &addr.device,
                                 &addr.function);
        if (parsed != 5) {
            DEBUGPRINT("WARNING: cmdline parsing failed. Using PCI Address [0,0,0]");
        } else {
            if (vendor_id != 0x8086) {
                USER_PANIC("unexpected vendor [%x]", vendor_id);
            }
            switch ((device_id & 0xFFF0)) {
                case PCI_DEVICE_IOAT_IVB0:
                    devtype = IOAT_DEVICE_IVB;
                    break;
                case PCI_DEVICE_IOAT_HSW0:
                    devtype = IOAT_DEVICE_HSW;
                    break;
                default:
                    USER_PANIC("unexpected device [%x]", device_id)
                    ;
                    break;
            }

            DEBUGPRINT("Initializing I/O AT DMA device with PCI address [%u,%u,%u]\n",
                       addr.bus, addr.device, addr.function);
        }
    } else {
        DEBUGPRINT("WARNING: Initializing I/O AT DMA device with unknown PCI address "
                   "[0,0,0]\n");
    }

    err = ioat_device_discovery(addr, devtype, IOAT_DMA_OPERATION);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "DMA Device discovery failed");
    }

#if DMA_BENCH_RUN_BENCHMARK
    struct ioat_dma_device *dev = ioat_device_get_next();
    dma_bench_run_default(dev);
#endif

#if IOAT_DMA_OPERATION == IOAT_DMA_OPERATION_SERVICE

    iref_t svc_iref;
    char svc_name[30];
    uint8_t numa_node = (disp_get_core_id() >= 20);
    snprintf(svc_name, 30, "%s.%u", IOAT_DMA_SERVICE_NAME, numa_node);
    err = dma_service_init_with_name(svc_name, &dma_svc_cb, NULL, &svc_iref);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "Failed to start the DMA service");
    }

    err = dma_manager_register_driver(0, 1ULL << 40, DMA_DEV_TYPE_IOAT, svc_iref);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "Failed to register with the DMA manager\n");
    }

    DEBUGPRINT("Driver registered with DMA manager. Serving requests now.\n");

#endif

#if IOAT_DMA_OPERATION == IOAT_DMA_OPERATION_LIBRARY

#endif
    uint8_t idle = 0x1;
    uint32_t idle_counter = 0xFF;
    while (1) {
        err = ioat_device_poll();
        switch (err_no(err)) {
            case DMA_ERR_DEVICE_IDLE:
                idle = idle && 0x1;
                break;
            case SYS_ERR_OK:
                idle = 0;
                break;
            default:
                debug_printf("I/O AT DMA driver terminated: in poll, %s\n",
                             err_getstring(err));
                return err;
        }
        err = event_dispatch_non_block(get_default_waitset());
        switch (err_no(err)) {
            case SYS_ERR_OK:
                idle = 0;
                break;
            case LIB_ERR_NO_EVENT:
                idle &= 1;
                break;
            default:
                debug_printf("I/O AT DMA driver terminated in dispatch,  %s\n",
                             err_getstring(err));
                return err;
        }
        if (idle) {
            idle_counter--;
        }
        if (idle_counter == 0) {
            idle_counter = 0xFF;
            thread_yield();
        }
    }

    return 0;
}
static errval_t replicate_frame(lvaddr_t addr, struct capref *frame)
{
    errval_t err;

#if XOMP_BENCH_WORKER_EN
    cycles_t repl_timer = bench_tsc();
#endif

    struct frame_identity id;
    err = invoke_frame_identify(*frame, &id);
    if (err_is_fail(err)) {
        return err;
    }

    XWR_DEBUG("Replicating frame: [%016lx]\n", id.base);

    struct capref replicate;
    err = frame_alloc(&replicate, (1UL << id.bits), NULL);
    if (err_is_fail(err)) {
        return err;
    }

    XWR_DEBUG("registering memory with DMA service\n");

#if XOMP_BENCH_WORKER_EN
    cycles_t register_timer = bench_tsc();
#endif
    err = dma_register_memory((struct dma_device *) dma_dev, *frame);
    if (err_is_fail(err)) {
        return err;
    }

    err = dma_register_memory((struct dma_device *) dma_dev, replicate);
    if (err_is_fail(err)) {
        return err;
    }

#if XOMP_BENCH_WORKER_EN
    cycles_t register_timer_end = bench_tsc();
#endif

    struct dma_req_setup setup = {
        .done_cb = dma_replication_cb,
        .cb_arg = NULL,
        .args = {
            .memcpy = {
                .src = id.base,
                .bytes = (1UL << id.bits)
            }
        }
    };

    err = invoke_frame_identify(replicate, &id);
    if (err_is_fail(err)) {
        return err;
    }
    setup.args.memcpy.dst = id.base;

    dma_replication_done = 0x0;

    XWR_DEBUG("DMA request for replication\n");

    err = dma_request_memcpy((struct dma_device *)dma_dev, &setup, NULL);
    if (err_is_fail(err)) {
        return err;
    }

    while (!dma_replication_done) {
        messages_wait_and_handle_next();
    }

    XWR_DEBUG("Replication done.\n");

    *frame = replicate;

#if XOMP_BENCH_WORKER_EN
    cycles_t timer_end = bench_tsc();
    debug_printf("%lx replication took %lu cycles, %lu ms\n", worker_id,
                 timer_end - repl_timer, bench_tsc_to_ms(timer_end - repl_timer));
    debug_printf("%lx register mem took %lu cycles, %lu ms\n", worker_id,
                 register_timer_end - register_timer, bench_tsc_to_ms(register_timer_end - register_timer));
#endif

    return SYS_ERR_OK;
}
Beispiel #22
0
/**
 * \brief initializes a IOAT DMA device with the giving capability
 *
 * \param mmio capability representing the device's MMIO registers
 * \param dev  returns a pointer to the device structure
 *
 * \returns SYS_ERR_OK on success
 *          errval on error
 */
errval_t ioat_dma_device_init(struct capref mmio,
                              struct ioat_dma_device **dev)
{
    errval_t err;

    struct ioat_dma_device *ioat_device = calloc(1, sizeof(*ioat_device));
    if (ioat_device == NULL) {
        return LIB_ERR_MALLOC_FAIL;
    }

#if DMA_BENCH_ENABLED
     bench_init();
#endif

    struct dma_device *dma_dev = &ioat_device->common;

    struct frame_identity mmio_id;
    err = invoke_frame_identify(mmio, &mmio_id);
    if (err_is_fail(err)) {
        free(ioat_device);
        return err;
    }

    dma_dev->id = device_id++;
    dma_dev->mmio.paddr = mmio_id.base;
    dma_dev->mmio.bytes = (1UL << mmio_id.bits);
    dma_dev->mmio.frame = mmio;

    IOATDEV_DEBUG("init device with mmio range: {paddr=0x%016lx, size=%u kB}\n",
                  dma_dev->id, mmio_id.base, 1 << mmio_id.bits);

    err = vspace_map_one_frame_attr((void**) &dma_dev->mmio.vaddr,
                                    dma_dev->mmio.bytes, dma_dev->mmio.frame,
                                    VREGION_FLAGS_READ_WRITE_NOCACHE,
                                    NULL,
                                    NULL);
    if (err_is_fail(err)) {
        free(ioat_device);
        return err;
    }

    ioat_dma_initialize(&ioat_device->device, NULL, (void *) dma_dev->mmio.vaddr);

    ioat_device->version = ioat_dma_cbver_rd(&ioat_device->device);

    IOATDEV_DEBUG("device registers mapped at 0x%016lx. IOAT version: %u.%u\n",
                  dma_dev->id, dma_dev->mmio.vaddr,
                  ioat_dma_cbver_major_extract(ioat_device->version),
                  ioat_dma_cbver_minor_extract(ioat_device->version));

    switch (ioat_dma_cbver_major_extract(ioat_device->version)) {
        case ioat_dma_cbver_1x:
            err = device_init_ioat_v1(ioat_device);
            break;
        case ioat_dma_cbver_2x:
            err = device_init_ioat_v2(ioat_device);
            break;
        case ioat_dma_cbver_3x:
            err = device_init_ioat_v3(ioat_device);
            break;
        default:
            err = DMA_ERR_DEVICE_UNSUPPORTED;
    }

    if (err_is_fail(err)) {
        vspace_unmap((void*) dma_dev->mmio.vaddr);
        free(ioat_device);
        return err;
    }

    dma_dev->f.deregister_memory = NULL;
    dma_dev->f.register_memory = NULL;
    dma_dev->f.poll = ioat_dma_device_poll_channels;

    *dev = ioat_device;

    return err;
}
Beispiel #23
0
static errval_t elf_allocate(void *state, genvaddr_t base, size_t size,
                             uint32_t flags, void **retbase)
{
    errval_t err;
    lvaddr_t vaddr;
    size_t used_size;

    struct spawninfo *si = state;

    // Increase size by space wasted on first page due to page-alignment
    size_t base_offset = BASE_PAGE_OFFSET(base);
    size += base_offset;
    base -= base_offset;
    // Page-align
    size = ROUND_UP(size, BASE_PAGE_SIZE);

    cslot_t vspace_slot = si->elfload_slot;

    // Step 1: Allocate the frames
    size_t sz = 0;
    for (lpaddr_t offset = 0; offset < size; offset += sz) {
        sz = 1UL << log2floor(size - offset);
        struct capref frame = {
            .cnode = si->segcn,
            .slot  = si->elfload_slot++,
        };
        err = frame_create(frame, sz, NULL);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_FRAME_CREATE);
        }
    }

    cslot_t spawn_vspace_slot = si->elfload_slot;
    cslot_t new_slot_count = si->elfload_slot - vspace_slot;

    // Step 2: create copies of the frame capabilities for child vspace
    for (int copy_idx = 0; copy_idx < new_slot_count; copy_idx++) {
        struct capref frame = {
            .cnode = si->segcn,
            .slot = vspace_slot + copy_idx,
        };

        struct capref spawn_frame = {
            .cnode = si->segcn,
            .slot = si->elfload_slot++,
        };
        err = cap_copy(spawn_frame, frame);
        if (err_is_fail(err)) {
            debug_printf("cap_copy failed for src_slot = %"PRIuCSLOT
                    ", dest_slot = %"PRIuCSLOT"\n", frame.slot,
                    spawn_frame.slot);
            return err_push(err, LIB_ERR_CAP_COPY);
        }
    }

    // Step 3: map into own vspace

    // Get virtual address range to hold the module
    void *vaddr_range;
    err = paging_alloc(get_current_paging_state(), &vaddr_range, size);
    if (err_is_fail(err)) {
        debug_printf("elf_allocate: paging_alloc failed\n");
        return (err);
    }

    // map allocated physical memory in virutal memory of parent process
    vaddr = (lvaddr_t)vaddr_range;
    used_size = size;

    while (used_size > 0) {
        struct capref frame = {
            .cnode = si->segcn,
            .slot  = vspace_slot++,
        };       // find out the size of the frame

        struct frame_identity id;
        err = invoke_frame_identify(frame, &id);
        assert(err_is_ok(err));
        size_t slot_size = (1UL << id.bits);

        // map frame to provide physical memory backing
        err = paging_map_fixed_attr(get_current_paging_state(), vaddr, frame, slot_size,
                VREGION_FLAGS_READ_WRITE);

        if (err_is_fail(err)) {
            debug_printf("elf_allocate: paging_map_fixed_attr failed\n");
            return err;
        }

        used_size -= slot_size;
        vaddr +=  slot_size;
    } // end while:


    // Step 3: map into new process
    struct paging_state *cp = si->vspace;

    // map allocated physical memory in virutal memory of child process
    vaddr = (lvaddr_t)base;
    used_size = size;

    while (used_size > 0) {
        struct capref frame = {
            .cnode = si->segcn,
            .slot  = spawn_vspace_slot++,
        };

        // find out the size of the frame
        struct frame_identity id;
        err = invoke_frame_identify(frame, &id);
        assert(err_is_ok(err));
        size_t slot_size = (1UL << id.bits);

        // map frame to provide physical memory backing
        err = paging_map_fixed_attr(cp, vaddr, frame, slot_size,
                elf_to_vregion_flags(flags));

        if (err_is_fail(err)) {
            debug_printf("elf_allocate: paging_map_fixed_attr failed\n");
            return err;
        }

        used_size -= slot_size;
        vaddr +=  slot_size;
    } // end while:

    *retbase = (void*) vaddr_range + base_offset;

    return SYS_ERR_OK;
} // end function: elf_allocate

/**
 * \brief Load the elf image
 */
errval_t spawn_arch_load(struct spawninfo *si,
                         lvaddr_t binary, size_t binary_size,
                         genvaddr_t *entry, void** arch_info)
{
    errval_t err;

    // Reset the elfloader_slot
    si->elfload_slot = 0;
    struct capref cnode_cap = {
        .cnode = si->rootcn,
        .slot  = ROOTCN_SLOT_SEGCN,
    };
    err = cnode_create_raw(cnode_cap, &si->segcn, DEFAULT_CNODE_SLOTS, NULL);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_CREATE_SEGCN);
    }

    // TLS is NYI
    si->tls_init_base = 0;
    si->tls_init_len = si->tls_total_len = 0;

    //debug_printf("spawn_arch_load: about to load elf %p\n", elf_allocate);
    // Load the binary
    err = elf_load(EM_HOST, elf_allocate, si, binary, binary_size, entry);
    if (err_is_fail(err)) {
        return err;
    }

    //debug_printf("hello here\n");
    struct Elf32_Shdr* got_shdr =
        elf32_find_section_header_name(binary, binary_size, ".got");
    if (got_shdr)
    {
        *arch_info = (void*)got_shdr->sh_addr;
    }
    else {
        return SPAWN_ERR_LOAD;
    }

    return SYS_ERR_OK;
}

void spawn_arch_set_registers(void *arch_load_info,
                              dispatcher_handle_t handle,
                              arch_registers_state_t *enabled_area,
                              arch_registers_state_t *disabled_area)
{
    assert(arch_load_info != NULL);
    uintptr_t got_base = (uintptr_t)arch_load_info;

    struct dispatcher_shared_arm* disp_arm = get_dispatcher_shared_arm(handle);
    disp_arm->got_base = got_base;

    enabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base;
    enabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR;

    disabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base;
    disabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR;
}
Beispiel #24
0
/**
 * \brief A monitor receives request to setup a connection
 * with another newly booted monitor from a third monitor
 */
static void bind_monitor_request_scc(struct intermon_binding *b,
                                     coreid_t core_id,
                                     intermon_caprep_t caprep,
                                     chanid_t chan_id,
                                     coreid_t from_core_id)
{
    struct intermon_ump_ipi_binding *umpb = NULL;
    errval_t err;

    /* Create the cap */
    struct capability cap_raw;
    caprep_to_capability(&caprep, &cap_raw);
    if (cap_raw.type != ObjType_Frame) {
        err = MON_ERR_WRONG_CAP_TYPE;
        goto error;
    }

    struct capref frame;
    err = slot_alloc(&frame);
    if (err_is_fail(err)) {
        goto error;
    }

    ram_set_affinity(cap_raw.u.frame.base, cap_raw.u.frame.base + ((genpaddr_t)1 << cap_raw.u.frame.bits));
    err = frame_alloc(&frame, ((genpaddr_t)1 << cap_raw.u.frame.bits), NULL);
    ram_set_affinity(0,0);

    /* err = monitor_cap_create(frame, &cap_raw, core_id); */
    if (err_is_fail(err)) {
        goto error;
    }

    struct frame_identity frameid = { .base = 0, .bits = 0 };
    err = invoke_frame_identify(frame, &frameid);
    assert(err == SYS_ERR_OK);

    printf("bind_monitor_request: URPC physical frame at 0x%llx\n", frameid.base);

    /* Setup the connection */
    void *buf;
    err = vspace_map_one_frame_attr(&buf, MON_URPC_SIZE, frame,
                                    VREGION_FLAGS_READ_WRITE_NOCACHE, NULL,
                                    NULL);
    if (err_is_fail(err)) {
        err = err_push(err, LIB_ERR_VSPACE_MAP);
        goto error;
    }

    // Create remote notify cap
    struct capref notify_cap;
    err = notification_create_cap(chan_id, core_id, &notify_cap);
    assert(err == SYS_ERR_OK);

    // Allocate my own notification caps
    struct capref ep, my_notify_cap;
    struct lmp_endpoint *iep;
    int chanid;
    err = endpoint_create(LMP_RECV_LENGTH, &ep, &iep);
    assert(err_is_ok(err));
    err = notification_allocate(ep, &chanid);
    assert(err == SYS_ERR_OK);
    err = notification_create_cap(chanid, my_core_id, &my_notify_cap);
    assert(err == SYS_ERR_OK);

    // setup our side of the binding
    umpb = malloc(sizeof(struct intermon_ump_ipi_binding));
    assert(umpb != NULL);

    err = intermon_ump_ipi_init(umpb, get_default_waitset(),
                                buf + MON_URPC_CHANNEL_LEN,
                                MON_URPC_CHANNEL_LEN,
                                buf, MON_URPC_CHANNEL_LEN, notify_cap,
                                my_notify_cap, ep, iep);
    assert(err_is_ok(err));

    // Identify UMP frame for tracing
    struct frame_identity umpid;
    err = invoke_frame_identify(frame, &umpid);
    assert(err_is_ok(err));
    umpb->ump_state.chan.recvid = (uintptr_t)umpid.base;
    umpb->ump_state.chan.sendid =
        (uintptr_t)(umpid.base + MON_URPC_CHANNEL_LEN);

    // connect it to our request handlers
    err = intermon_init(&umpb->b, core_id);
    assert(err_is_ok(err));

    /* Send reply */
reply:
    assert(umpb != NULL);
    bind_monitor_reply_scc_cont(&umpb->b, err, chanid);
    return;

error:
    assert(!"Argh");
    // FIXME: cleanup!
    goto reply;
}

/**
 * \brief The monitor that proxied the request for one monitor to
 * setup a connection with another monitor gets the reply
 */
static void bind_monitor_reply_scc(struct intermon_binding *binding,
                                   errval_t err, chanid_t chan_id,
                                   coreid_t core_id)
{
    struct intermon_ump_ipi_binding *b = (struct intermon_ump_ipi_binding *)binding;

    // Create notify cap to that core
    struct capref notify_cap;
    err = notification_create_cap(chan_id, core_id, &notify_cap);
    assert(err == SYS_ERR_OK);

    // And assign it to the binding
    err = ipi_notify_set(&b->ipi_notify, notify_cap);
    assert(err_is_ok(err));

    if (err_is_fail(err)) { // XXX
        DEBUG_ERR(err, "Got error in bind monitor reply");
    }
}

/******* stack-ripped bind_monitor_proxy_scc *******/

static void bind_monitor_request_scc_handler(struct intermon_binding *b,
        struct intermon_msg_queue_elem *e);

struct bind_monitor_request_scc_state {
    struct intermon_msg_queue_elem elem;
    struct intermon_bind_monitor_request_scc__args args;
};

static void bind_monitor_request_scc_cont(struct intermon_binding *dst_binding,
        coreid_t src_core_id,
        intermon_caprep_t caprep,
        chanid_t chan_id,
        coreid_t core_id)
{
    errval_t err;

    err = dst_binding->tx_vtbl.
          bind_monitor_request_scc(dst_binding, NOP_CONT, src_core_id,
                                   caprep, chan_id, core_id);
    if (err_is_fail(err)) {
        if(err_no(err) == FLOUNDER_ERR_TX_BUSY) {
            struct bind_monitor_request_scc_state *me =
                malloc(sizeof(struct bind_monitor_request_scc_state));
            assert(me != NULL);
            struct intermon_state *ist = dst_binding->st;
            assert(ist != NULL);
            me->args.core_id = src_core_id;
            me->args.cap = caprep;
            me->args.chan_id = chan_id;
            me->args.from_core_id = core_id;
            me->elem.cont = bind_monitor_request_scc_handler;

            err = intermon_enqueue_send(dst_binding, &ist->queue,
                                        get_default_waitset(), &me->elem.queue);
            assert(err_is_ok(err));
            return;
        }

        DEBUG_ERR(err, "forwarding bind request failed");
    }
}
Beispiel #25
0
int map_unmap(void)
{
    errval_t err;
    struct capref mem;

    DEBUG_MAP_UNMAP("ram_alloc\n");
    err = ram_alloc(&mem, BASE_PAGE_BITS);
    if (err_is_fail(err)) {
        printf("ram_alloc: %s (%"PRIuERRV")\n", err_getstring(err), err);
        return 1;
    }

    struct capref frame;
    DEBUG_MAP_UNMAP("retype\n");
    err = slot_alloc(&frame);
    if (err_is_fail(err)) {
        printf("slot_alloc: %s (%"PRIuERRV")\n", err_getstring(err), err);
        return 1;
    }
    err = cap_retype(frame, mem, ObjType_Frame, BASE_PAGE_BITS);
    if (err_is_fail(err)) {
        printf("cap_retype: %s (%"PRIuERRV")\n", err_getstring(err), err);
        return 1;
    }

    DEBUG_MAP_UNMAP("delete ram cap\n");
    err = cap_destroy(mem);
    if (err_is_fail(err)) {
        printf("cap_delete(mem): %s (%"PRIuERRV")\n", err_getstring(err), err);
        return 1;
    }

    struct frame_identity fi;
    err = invoke_frame_identify(frame, &fi);
    if (err_is_fail(err)) {
        printf("frame_identify: %s (%"PRIuERRV")\n", err_getstring(err), err);
        return 1;
    }
    DEBUG_MAP_UNMAP("frame: base = 0x%"PRIxGENPADDR", bits = %d\n", fi.base, fi.bits);

#ifdef NKMTEST_DEBUG_MAP_UNMAP
    dump_pmap(get_current_pmap());
#endif

    struct vregion *vr;
    struct memobj *memobj;
    void *vaddr;
    DEBUG_MAP_UNMAP("map\n");
    err = vspace_map_one_frame(&vaddr, BASE_PAGE_SIZE, frame, &memobj, &vr);
    if (err_is_fail(err)) {
        printf("vspace_map_one_frame: %s (%"PRIuERRV")\n", err_getstring(err), err);
    }
    char *memory = vaddr;
    DEBUG_MAP_UNMAP("vaddr = %p\n", vaddr);

#ifdef NKMTEST_DEBUG_MAP_UNMAP
    dump_pmap(get_current_pmap());
#endif

    DEBUG_MAP_UNMAP("write 1\n");
    int i;
    for (i = 0; i < BASE_PAGE_SIZE; i++) {
        memory[i] = i % INT8_MAX;
    }
    DEBUG_MAP_UNMAP("verify 1\n");
    for (i = 0; i < BASE_PAGE_SIZE; i++) {
        assert(memory[i] == i % INT8_MAX);
    }

    DEBUG_MAP_UNMAP("delete frame cap\n");
    err = cap_destroy(frame);
    if (err_is_fail(err)) {
        printf("cap_delete(frame): %s (%"PRIuERRV")\n", err_getstring(err), err);
        return 1;
    }

#ifdef NKMTEST_DEBUG_MAP_UNMAP
    // no mapping should remain here
    dump_pmap(get_current_pmap());
    err = debug_dump_hw_ptables();
    if (err_is_fail(err)) {
        printf("kernel dump ptables: %s (%"PRIuERRV")\n", err_getstring(err), err);
        return 1;
    }
#endif

    printf("%s: done\n", __FUNCTION__);
    return 0;
}
static void gw_req_memory_call_rx(struct xomp_binding *b,
                                  uint64_t addr,
                                  uint8_t type)
{
    XWI_DEBUG("gw_req_memory_call_rx: addr:%lx, tyep: %u\n", addr, type);

#if XOMP_BENCH_WORKER_EN
    cycles_t mem_timer = bench_tsc();
#endif

    struct txq_msg_st *msg_st = txq_msg_st_alloc(&txq);
    assert(msg_st != NULL);

    struct capref frame;
    if (type == XOMP_FRAME_TYPE_REPL_RW) {
        type = XOMP_FRAME_TYPE_SHARED_RW;
    }
    assert(!(worker_id & XOMP_WID_GATEWAY_FLAG));

    msg_st->send = gw_req_memory_response_tx;
    msg_st->cleanup = NULL;

    XWR_DEBUG("Requesting frame from gateway: [%016lx]\n", usrdata);

    msg_st->err = xomp_gateway_get_memory(addr, &frame);
    if (err_is_fail(msg_st->err)) {
        txq_send(msg_st);
        return;
    }

    vregion_flags_t map_flags;

    switch ((xomp_frame_type_t) type) {
        case XOMP_FRAME_TYPE_MSG:
            map_flags = VREGION_FLAGS_READ_WRITE;
            break;
        case XOMP_FRAME_TYPE_SHARED_RW:
        case XOMP_FRAME_TYPE_REPL_RW:
            map_flags = VREGION_FLAGS_READ_WRITE;
            break;
        case XOMP_FRAME_TYPE_SHARED_RO:
            map_flags = VREGION_FLAGS_READ;
            break;
        default:
            USER_PANIC("unknown type: %u", type)
            break;
    }

    struct frame_identity id;
    msg_st->err = invoke_frame_identify(frame, &id);
    if (err_is_fail(msg_st->err)) {
        txq_send(msg_st);
        return;
    }

    if (addr) {
        msg_st->err = vspace_map_one_frame_fixed_attr(addr, (1UL << id.bits),
                                                      frame, map_flags, NULL, NULL);
    } else {
        void *map_addr;
        msg_st->err = vspace_map_one_frame_attr(&map_addr, (1UL << id.bits),
                                                frame, map_flags, NULL, NULL);
    }

#if XOMP_BENCH_WORKER_EN
    mem_timer = bench_tsc() - mem_timer;
    debug_printf("%lx mem request %016lx took  %lu cycles, %lu ms\n", worker_id,
                 addr, mem_timer, bench_tsc_to_ms(mem_timer));
#endif

    txq_send(msg_st);
}
Beispiel #27
0
static void monitor_bind_ump_client_request(struct monitor_binding *mb,
                                            iref_t iref, uintptr_t domain_id,
                                            struct capref frame,
                                            size_t channel_length_in,
                                            size_t channel_length_out,
                                            struct capref notify)
{
    uint8_t core_id;
    uintptr_t conn_id = 0;
    errval_t err;
    struct remote_conn_state *conn = NULL;

    // Get the core id
    err = iref_get_core_id(iref, &core_id);
    if (err_is_fail(err)) {
        debug_err(__FILE__, __func__, __LINE__, err, "iref_get_core_id failed");
        monitor_bind_ump_client_request_error(mb, frame, conn_id, domain_id, err);
        return;
    }

    if (core_id == my_core_id) {
        USER_PANIC("Same-core UMP binding NYI");
    }

    /* Identify frame */
    struct frame_identity frameid;
    err = invoke_frame_identify(frame, &frameid);
    if (err_is_fail(err)) {
        debug_err(__FILE__, __func__, __LINE__, err, "frame_identify failed");
        monitor_bind_ump_client_request_error(mb, frame, conn_id, domain_id, err);
        return;
    }

    // Identify notify cap
    struct capability capability;
    err = monitor_cap_identify(notify, &capability);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "monitor_cap_identify failed, ignored");
        return;
    }
    assert(capability.type == ObjType_Notify_RCK
           || capability.type == ObjType_Notify_IPI
           || capability.type == ObjType_Null);
    /* assert(capability.u.notify.coreid == my_core_id); */

    /* Forward request to the corresponding monitor */
    // Create local state
    err = remote_conn_alloc(&conn, &conn_id, REMOTE_CONN_UMP);
    if (err_is_fail(err)) {
        debug_err(__FILE__, __func__, __LINE__, err, "remote_conn_alloc failed");
        monitor_bind_ump_client_request_error(mb, frame, conn_id, domain_id, err);
        return;
    }

    // Track data
    conn->domain_id = domain_id;
    conn->domain_binding = mb;
    conn->x.ump.frame = frame;
    conn->core_id = core_id;

    // Get connection to the monitor to forward request to
    struct intermon_binding *intermon_binding;
    err = intermon_binding_get(core_id, &intermon_binding);
    if (err_is_fail(err)) {
        debug_err(__FILE__, __func__, __LINE__, err, "intermon_binding_get failed");
        monitor_bind_ump_client_request_error(mb, frame, conn_id, domain_id, err);
        return;
    }

    bind_ump_request_cont(intermon_binding, iref, conn_id, channel_length_in,
                          channel_length_out, frameid, capability, mb, frame,
                          domain_id);
}
/**
 * \brief initializes the XOMP worker library
 *
 * \param wid   Xomp worker id
 *
 * \returns SYS_ERR_OK on success
 *          errval on failure
 */
errval_t xomp_worker_init(xomp_wid_t wid)
{
    errval_t err;

    worker_id = wid;

    XWI_DEBUG("initializing worker {%016lx} iref:%u\n", worker_id, svc_iref);

#if XOMP_BENCH_WORKER_EN
    bench_init();
#endif

    struct capref frame = {
        .cnode = cnode_root,
        .slot = ROOTCN_SLOT_ARGCN
    };

    struct frame_identity id;
    err = invoke_frame_identify(frame, &id);
    if (err_is_fail(err)) {
        return err_push(err, XOMP_ERR_INVALID_MSG_FRAME);
    }

    size_t frame_size = 0;

    if (svc_iref) {
        frame_size = XOMP_TLS_SIZE;
    } else {
        frame_size = XOMP_FRAME_SIZE;
        err = spawn_symval_cache_init(0);
        if (err_is_fail(err)) {
            return err;
        }
    }

    if ((1UL << id.bits) < XOMP_TLS_SIZE) {
        return XOMP_ERR_INVALID_MSG_FRAME;
    }

    msgframe = frame;

    err = vspace_map_one_frame(&msgbuf, frame_size, frame, NULL, NULL);
    if (err_is_fail(err)) {
        err_push(err, XOMP_ERR_WORKER_INIT_FAILED);
    }
    if (svc_iref) {
        tls = msgbuf;
    } else {
        tls = ((uint8_t *) msgbuf) + XOMP_MSG_FRAME_SIZE;
    }

    XWI_DEBUG("messaging frame mapped: [%016lx] @ [%016lx]\n", id.base,
              (lvaddr_t )msgbuf);

    struct bomp_thread_local_data *tlsinfo = malloc(sizeof(*tlsinfo));
    tlsinfo->thr = thread_self();
    tlsinfo->work = (struct bomp_work *) tls;
    tlsinfo->work->data = tlsinfo->work + 1;
    g_bomp_state->backend.set_tls(tlsinfo);

#ifdef __k1om__
    if (worker_id & XOMP_WID_GATEWAY_FLAG) {
        err = xomp_gateway_init();
    } else {
        if (!svc_iref) {
            err = xomp_gateway_bind_svc();
        } else {
            err = SYS_ERR_OK;
        }
    }
    if (err_is_fail(err)) {
        return err;
    }
#endif

#ifdef __k1om__
    if (!svc_iref) {
        err = xeon_phi_client_init(disp_xeon_phi_id());
        if (err_is_fail(err)) {
            err_push(err, XOMP_ERR_WORKER_INIT_FAILED);
        }

        xeon_phi_client_set_callbacks(&callbacks);
    }
#endif

    struct waitset *ws = get_default_waitset();

// XXX: disabling DMA on the host as there is no replication used at this moment
#if XOMP_WORKER_ENABLE_DMA && defined(__k1om__)
    /* XXX: use lib numa */

#ifndef __k1om__
    uint8_t numanode = 0;
    if (disp_get_core_id() > 20) {
        numanode = 1;
    }

    err = dma_manager_wait_for_driver(dma_device_type, numanode);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "could not wait for the DMA driver");
    }
#endif
    char svc_name[30];
#ifdef __k1om__
    snprintf(svc_name, 30, "%s", XEON_PHI_DMA_SERVICE_NAME);
#else
    snprintf(svc_name, 30, "%s.%u", IOAT_DMA_SERVICE_NAME, numanode);
#endif

    struct dma_client_info dma_info = {
        .type = DMA_CLIENT_INFO_TYPE_NAME,
        .device_type = dma_device_type,
        .args.name = svc_name
    };
    err = dma_client_device_init(&dma_info, &dma_dev);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "DMA device initialization");
    }
#endif

    if (svc_iref) {
        err = xomp_bind(svc_iref, master_bind_cb, NULL, ws,
                        IDC_EXPORT_FLAGS_DEFAULT);
    } else {
        struct xomp_frameinfo fi = {
            .sendbase = id.base,
            .inbuf = ((uint8_t *) msgbuf) + XOMP_MSG_CHAN_SIZE,
            .inbufsize = XOMP_MSG_CHAN_SIZE,
            .outbuf = ((uint8_t *) msgbuf),
            .outbufsize = XOMP_MSG_CHAN_SIZE
        };
        err = xomp_connect(&fi, master_bind_cb, NULL, ws,
        IDC_EXPORT_FLAGS_DEFAULT);
    }

    if (err_is_fail(err)) {
        /* TODO: Clean up */
        return err_push(err, XOMP_ERR_WORKER_INIT_FAILED);
    }

    XWI_DEBUG("Waiting until bound to master...\n");

    while (!is_bound) {
        messages_wait_and_handle_next();
    }

    if (xbinding == NULL) {
        return XOMP_ERR_WORKER_INIT_FAILED;
    }

    return SYS_ERR_OK;
}
Beispiel #29
0
static void span_domain_request(struct monitor_binding *mb,
                                uintptr_t domain_id, uint8_t core_id,
                                struct capref vroot, struct capref disp)
{
    errval_t err, err2;

    trace_event(TRACE_SUBSYS_MONITOR, TRACE_EVENT_MONITOR_SPAN0, core_id);

    struct span_state *state;
    uintptr_t state_id;

    err = span_state_alloc(&state, &state_id);
    if (err_is_fail(err)) {
        err_push(err, MON_ERR_SPAN_STATE_ALLOC);
        goto reply;
    }

    state->core_id   = core_id;
    state->vroot     = vroot;
    state->mb        = mb;
    state->domain_id = domain_id;

    trace_event(TRACE_SUBSYS_MONITOR, TRACE_EVENT_MONITOR_SPAN1, core_id);

    /* Look up the destination monitor */
    struct intermon_binding *ib;
    err = intermon_binding_get(core_id, &ib);
    if (err_is_fail(err)) {
        goto reply;
    }

    /* Idenfity vroot */
    struct capability vroot_cap;
    err = monitor_cap_identify(vroot, &vroot_cap);
    if (err_is_fail(err)) {
        err_push(err, MON_ERR_CAP_IDENTIFY);
        goto reply;
    }
    if (vroot_cap.type != ObjType_VNode_x86_64_pml4) { /* Check type */
        err = MON_ERR_WRONG_CAP_TYPE;
        goto reply;
    }

    /* Identify the dispatcher frame */
    struct frame_identity frameid;
    err = invoke_frame_identify(disp, &frameid);
    if (err_is_fail(err)) {
        err_push(err, LIB_ERR_FRAME_IDENTIFY);
        goto reply;
    }

    err = monitor_remote_relations(disp, RRELS_COPY_BIT, RRELS_COPY_BIT, NULL);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "monitor_remote_relations failed");
        return;
    }
    err = monitor_remote_relations(vroot, RRELS_COPY_BIT, RRELS_COPY_BIT, NULL);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "monitor_remote_relations failed");
        return;
    }

    /* Send msg to destination monitor */
    err = ib->tx_vtbl.span_domain_request(ib, NOP_CONT, state_id,
                                          vroot_cap.u.vnode_x86_64_pml4.base,
                                          frameid.base, frameid.bits);

    if (err_is_fail(err)) {
        err_push(err, MON_ERR_SEND_REMOTE_MSG);
        goto reply;
    }
    goto cleanup;

 reply:
    err2 = mb->tx_vtbl.span_domain_reply(mb, NOP_CONT, err, domain_id);
    if (err_is_fail(err2)) {
        // XXX: Cleanup?
        USER_PANIC_ERR(err2, "Failed to reply to the user domain");
    }
    if(state_id != 0) {
        err2 = span_state_free(state_id);
        if (err_is_fail(err2)) {
            USER_PANIC_ERR(err2, "Failed to free span state");
        }
    }

 cleanup:
    err2 = cap_destroy(vroot);
    if (err_is_fail(err2)) {
        USER_PANIC_ERR(err2, "Failed to destroy span_vroot cap");
    }
    err2 = cap_destroy(disp);
    if (err_is_fail(err2)) {
        USER_PANIC_ERR(err2, "Failed to destroy disp cap");
    }
}
/*
 * ----------------------------------------------------------------------------
 * Xeon Phi Channel callbacks
 * ----------------------------------------------------------------------------
 */
static errval_t msg_open_cb(xphi_dom_id_t domain,
                            uint64_t usrdata,
                            struct capref frame,
                            uint8_t type)
{
    errval_t err;

    uint32_t map_flags = 0x0;
    lvaddr_t addr = 0x0;

    struct frame_identity id;
    err = invoke_frame_identify(frame, &id);
    if (err_is_fail(err)) {
        return err;
    }

    XWI_DEBUG("msg_open_cb: from domid:%lx, usrdata:%lx, frame:%lx\n", domain,
              usrdata, id.base);

    switch ((xomp_frame_type_t) type) {
        case XOMP_FRAME_TYPE_MSG:
            map_flags = VREGION_FLAGS_READ_WRITE;
            break;
        case XOMP_FRAME_TYPE_SHARED_RW:
            addr = (lvaddr_t) usrdata;
            map_flags = VREGION_FLAGS_READ_WRITE;
            break;
        case XOMP_FRAME_TYPE_SHARED_RO:
            map_flags = VREGION_FLAGS_READ;
            break;
        case XOMP_FRAME_TYPE_REPL_RW:
            map_flags = VREGION_FLAGS_READ_WRITE;
#if XOMP_WORKER_ENABLE_DMA
            addr = (lvaddr_t) usrdata;
            err = replicate_frame(addr, &frame);
            if (err_is_fail(err)) {
                return err;
            }
            err = invoke_frame_identify(frame, &id);
#else
            struct capref replicate;
            err = frame_alloc(&replicate, (1UL << id.bits), NULL);
            if (err_is_fail(err)) {
                USER_PANIC_ERR(err, "failed to allocate replicate frame\n");
                return err;
            }
            err = vspace_map_one_frame_fixed_attr((lvaddr_t) usrdata, (1UL << id.bits),
                                                  replicate, map_flags, NULL, NULL);
            if (err_is_fail(err)) {
                return err;
            }
            err = invoke_frame_identify(replicate, &id);
#endif
            if (err_is_fail(err)) {
                return err;
            }
            break;
        default:
            USER_PANIC("unknown type: %u", type)
            break;
    }
    if (addr) {
        if (worker_id & XOMP_WID_GATEWAY_FLAG) {
            XWR_DEBUG("registering memory with gateway: [%016lx]\n", addr);
            err = xomp_gateway_mem_insert(frame, addr);
            if (err_is_fail(err)) {
                /* todo: cleanup */
                return err;
            }
        }
        err = vspace_map_one_frame_fixed_attr(addr, (1UL << id.bits), frame,
                                              map_flags, NULL, NULL);
    } else {
        err = vspace_map_one_frame_attr((void **) &addr, (1UL << id.bits), frame,
                                        map_flags, NULL, NULL);
    }
    if (err_is_fail(err)) {
        return err;
    }

#if !XOMP_WORKER_ENABLE_DMA
    if ((xomp_frame_type_t) type == XOMP_FRAME_TYPE_REPL_RW) {
        memcpy((void *)usrdata, (void *)addr, (1UL << id.bits));
    }
#endif

    XWI_DEBUG("msg_open_cb: frame [%016lx] mapped @ [%016lx, %016lx]\n", id.base,
              addr, addr + (1UL << id.bits));

    if ((xomp_frame_type_t) type == XOMP_FRAME_TYPE_MSG) {
        USER_PANIC("NYI: initializing messaging");
    }

    return SYS_ERR_OK;
}