static errval_t mp_create(struct descq_binding* b, uint32_t slots, struct capref rx, struct capref tx, bool notifications, uint8_t role, errval_t *err, uint64_t *queue_id) { struct descq* q = (struct descq*) b->st; DESCQ_DEBUG("start %p\n",q); // switch RX/TX for correct setup *err = vspace_map_one_frame_attr((void**) &(q->rx_descs), slots*DESCQ_ALIGNMENT, tx, VREGION_FLAGS_READ_WRITE, NULL, NULL); if (err_is_fail(*err)) { goto end2; } *err = vspace_map_one_frame_attr((void**) &(q->tx_descs), slots*DESCQ_ALIGNMENT, rx, VREGION_FLAGS_READ_WRITE, NULL, NULL); if (err_is_fail(*err)) { goto end1; } q->tx_seq_ack = (void*)q->tx_descs; q->rx_seq_ack = (void*)q->rx_descs; q->tx_descs++; q->rx_descs++; q->slots = slots-1; q->rx_seq = 1; q->tx_seq = 1; devq_init(&q->q, true); q->q.f.enq = descq_enqueue; q->q.f.deq = descq_dequeue; q->q.f.notify = descq_notify; q->q.f.reg = descq_register; q->q.f.dereg = descq_deregister; q->q.f.ctrl = descq_control; q->q.f.destroy = descq_destroy; notificator_init(&q->notificator, q, descq_can_read, descq_can_write); *err = waitset_chan_register(get_default_waitset(), &q->notificator.ready_to_read, MKCLOSURE(mp_notify, q)); assert(err_is_ok(*err)); *err = q->f.create(q, notifications, role, queue_id); if (err_is_ok(*err)) { goto end2; } end1: *err = vspace_unmap(q->rx_descs); assert(err_is_ok(*err)); end2: DESCQ_DEBUG("end \n"); return SYS_ERR_OK; }
/** * \brief Wrapper for creating and mapping a memory object of type one frame */ errval_t vspace_map_one_frame(void **retaddr, size_t size, struct capref frame, struct memobj **retmemobj, struct vregion **retvregion) { return vspace_map_one_frame_attr(retaddr, size, frame, VREGION_FLAGS_READ_WRITE, retmemobj, retvregion); }
/** * \brief initializes a dma descriptor ring and allocates memory for it * * \param ring the ring structure to initialize * \param size number of elements in the ring * * \returns SYS_ERR_OK on success * errval on error */ errval_t xeon_phi_dma_desc_ring_alloc(struct xdma_ring *ring, uint16_t size) { errval_t err; memset(ring, 0, sizeof(*ring)); assert(size < (XEON_PHI_DMA_DESC_RING_MAX)); assert(IS_POW2(size)); #ifndef __k1om__ /* * we set the ram affinity to the maximum range mapped by the system memory * page tables when being on the host. Otherwise the card cannot access it. */ uint64_t minbase, maxlimit; ram_get_affinity(&minbase, &maxlimit); ram_set_affinity(0, XEON_PHI_SYSMEM_SIZE-8*XEON_PHI_SYSMEM_PAGE_SIZE); #endif size_t frame_size = ((size_t) size) * XEON_PHI_DMA_DESC_SIZE; err = frame_alloc(&ring->cap, frame_size, NULL); #ifndef __k1om__ ram_set_affinity(minbase, maxlimit); #endif if (err_is_fail(err)) { return err; } err = vspace_map_one_frame_attr(&ring->vbase, frame_size, ring->cap, VREGION_FLAGS_READ_WRITE, NULL, NULL); if (err_is_fail(err)) { cap_destroy(ring->cap); return err; } struct frame_identity id; err = invoke_frame_identify(ring->cap, &id); assert(err_is_ok(err)); ring->pbase = id.base; ring->size = size; memset(ring->vbase, 0, frame_size); return SYS_ERR_OK; }
// Callback from device manager void qd_queue_init_data(struct e10k_binding *b, struct capref registers, uint64_t macaddr) { struct frame_identity frameid = { .base = 0, .bits = 0 }; errval_t err; void *virt; INITDEBUG("idc_queue_init_data\n"); mac_address = macaddr; // Map device registers invoke_frame_identify(registers, &frameid); err = vspace_map_one_frame_attr(&virt, 1 << frameid.bits, registers, VREGION_FLAGS_READ_WRITE_NOCACHE, NULL, NULL); assert(err_is_ok(err)); // Initialize mackerel device d = malloc(sizeof(*d)); #ifndef VF e10k_initialize(d, virt); #else e10k_vf_initialize(d, virt); #endif // Initialize queue setup_queue(); } // Callback from device manager void qd_queue_memory_registered(struct e10k_binding *b) { initialized = 1; hwqueue_initialized(); // Register queue with queue_mgr library #ifndef LIBRARY ethersrv_init((char*) service_name, qi, get_mac_addr_fn, terminate_queue_fn, transmit_pbuf_list_fn, find_tx_free_slot_count_fn, handle_free_tx_slot_fn, RXBUFSZ, register_rx_buffer_fn, find_rx_free_slot_count_fn); #else ethernetif_backend_init((char*) service_name, qi, get_mac_addr_fn, terminate_queue_fn, transmit_pbuf_list_fn, find_tx_free_slot_count_fn, handle_free_tx_slot_fn, RXBUFSZ, register_rx_buffer_fn, find_rx_free_slot_count_fn); #endif }
static void frame_allocate_and_map(void **retbuf, struct capref *retcap, size_t bytes) { errval_t err; size_t retbytes; err = frame_alloc(retcap, bytes, &retbytes); assert(err_is_ok(err)); assert(retbytes == bytes); err = vspace_map_one_frame_attr(retbuf, bytes, *retcap, VREGION_FLAGS_READ_WRITE_NOCACHE, NULL, NULL); assert(err_is_ok(err)); }
static void client_connected(void *st, errval_t err, struct rcce_binding *b) { struct rcce_state *cs = st; assert(err_is_ok(err)); /* printf("%s: Am connected to client\n", my_name); */ b->rx_vtbl = rcce_vtbl; b->st = cs; // Create a Frame Capability size_t allocated_size; struct capref shared_mem; #ifdef __scc__ ram_set_affinity(SHARED_MEM_MIN + (PERCORE_MEM_SIZE * disp_get_core_id()), SHARED_MEM_MIN + (PERCORE_MEM_SIZE * (disp_get_core_id() + 1))); #endif errval_t r = frame_alloc(&shared_mem, BULK_SIZE * 2, &allocated_size); assert(err_is_ok(r)); #ifdef __scc__ ram_set_affinity(0, 0); #endif // Map the frame in local memory void *pool; r = vspace_map_one_frame_attr(&pool, allocated_size, shared_mem, BULK_PAGE_MAP, NULL, NULL); assert(pool != NULL); assert(err_is_ok(r)); assert(allocated_size >= BULK_SIZE * 2); // Init sender err = bulk_init(pool, BULK_SIZE, BLOCK_SIZE, &cs->bt); assert(err_is_ok(err)); // Init receiver err = bulk_slave_init(pool + BULK_SIZE, BULK_SIZE, &cs->btr); assert(err_is_ok(err)); barrier_binding_init(b); barray[cs->index] = b; err = barray[cs->index]->tx_vtbl.init_request(barray[cs->index], NOP_CONT, my_core_id, bsp_id, (uint64_t)(uintptr_t)cs, shared_mem); assert(err_is_ok(err)); }
/** * \brief Initialise a new UMP channel to accept an incoming binding request * * \param uc Storage for channel state * \param mon_id Monitor's connection ID for this channel * \param frame Frame capability containing channel * \param inchanlen Size of incoming channel, in bytes (multiple of #UMP_MSG_BYTES) * \param outchanlen Size of outgoing channel, in bytes (multiple of #UMP_MSG_BYTES) */ errval_t ump_chan_accept(struct ump_chan *uc, uintptr_t mon_id, struct capref frame, size_t inchanlen, size_t outchanlen) { errval_t err; uc->monitor_id = mon_id; uc->frame = frame; // check that the frame is big enough struct frame_identity frameid; err = invoke_frame_identify(frame, &frameid); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_IDENTIFY); } // Ids for tracing uc->recvid = (uintptr_t)(frameid.base + outchanlen); uc->sendid = (uintptr_t)frameid.base; size_t framesize = ((uintptr_t)1) << frameid.bits; if (framesize < inchanlen + outchanlen) { return LIB_ERR_UMP_FRAME_OVERFLOW; } // map it in void *buf; err = vspace_map_one_frame_attr(&buf, framesize, frame, UMP_MAP_ATTR, NULL, &uc->vregion); if (err_is_fail(err)) { cap_destroy(uc->frame); return err_push(err, LIB_ERR_VSPACE_MAP); } // initialise channel state err = ump_chan_init(uc, (char *)buf + outchanlen, inchanlen, buf, outchanlen); if (err_is_fail(err)) { vregion_destroy(uc->vregion); cap_destroy(uc->frame); return err; } /* mark connected */ uc->connstate = UMP_CONNECTED; return SYS_ERR_OK; }
/** * \brief allocates and maps a memory region to be used for DMA purposes * * \param bytes minimum size of the memory region in bytes * \param flags VREGION flags how the region gets mapped * \param mem returns the mapping information * * \returns SYS_ERR_OK on success * errval on error */ errval_t dma_mem_alloc(size_t bytes, vregion_flags_t flags, struct dma_mem *mem) { errval_t err; if (mem == NULL) { return DMA_ERR_ARG_INVALID; } err = frame_alloc(&mem->frame, bytes, &mem->bytes); if (err_is_fail(err)) { return err; } struct frame_identity id; err = invoke_frame_identify(mem->frame, &id); if (err_is_fail(err)) { dma_mem_free(mem); return err; } mem->paddr = id.base; void *addr; err = vspace_map_one_frame_attr(&addr, mem->bytes, mem->frame, flags, NULL, NULL); if (err_is_fail(err)) { dma_mem_free(mem); return err; } mem->vaddr = (lvaddr_t)addr; return SYS_ERR_OK; }
static void init_request(struct rcce_binding *st, coreid_t id, coreid_t bspid, uint64_t state, struct capref shared_mem) { errval_t err; struct rcce_state *rs = st->st; // Initialize local state for incoming connection barray[id] = st; bsp_id = bspid; // Map the frame in local memory void *pool; err = vspace_map_one_frame_attr(&pool, BULK_SIZE * 2, shared_mem, BULK_PAGE_MAP, NULL, NULL); assert(pool != NULL); assert(err_is_ok(err)); // Init receiver err = bulk_slave_init(pool, BULK_SIZE, &rs->btr); assert(err_is_ok(err)); // Init sender err = bulk_init(pool + BULK_SIZE, BULK_SIZE, BLOCK_SIZE, &rs->bt); assert(err_is_ok(err)); if(connect_request == NULL && my_core_id != bspid) { connect_request = st; connect_state = state; } else { err = st->tx_vtbl.error_reply(st, NOP_CONT, SYS_ERR_OK, state); if (err_is_fail(err)) { DEBUG_ERR(err, "init_reply failed"); abort(); } } }
/** * \brief initializes a IOAT DMA device with the giving capability * * \param mmio capability representing the device's MMIO registers * \param dev returns a pointer to the device structure * * \returns SYS_ERR_OK on success * errval on error */ errval_t ioat_dma_device_init(struct capref mmio, struct ioat_dma_device **dev) { errval_t err; struct ioat_dma_device *ioat_device = calloc(1, sizeof(*ioat_device)); if (ioat_device == NULL) { return LIB_ERR_MALLOC_FAIL; } #if DMA_BENCH_ENABLED bench_init(); #endif struct dma_device *dma_dev = &ioat_device->common; struct frame_identity mmio_id; err = invoke_frame_identify(mmio, &mmio_id); if (err_is_fail(err)) { free(ioat_device); return err; } dma_dev->id = device_id++; dma_dev->mmio.paddr = mmio_id.base; dma_dev->mmio.bytes = (1UL << mmio_id.bits); dma_dev->mmio.frame = mmio; IOATDEV_DEBUG("init device with mmio range: {paddr=0x%016lx, size=%u kB}\n", dma_dev->id, mmio_id.base, 1 << mmio_id.bits); err = vspace_map_one_frame_attr((void**) &dma_dev->mmio.vaddr, dma_dev->mmio.bytes, dma_dev->mmio.frame, VREGION_FLAGS_READ_WRITE_NOCACHE, NULL, NULL); if (err_is_fail(err)) { free(ioat_device); return err; } ioat_dma_initialize(&ioat_device->device, NULL, (void *) dma_dev->mmio.vaddr); ioat_device->version = ioat_dma_cbver_rd(&ioat_device->device); IOATDEV_DEBUG("device registers mapped at 0x%016lx. IOAT version: %u.%u\n", dma_dev->id, dma_dev->mmio.vaddr, ioat_dma_cbver_major_extract(ioat_device->version), ioat_dma_cbver_minor_extract(ioat_device->version)); switch (ioat_dma_cbver_major_extract(ioat_device->version)) { case ioat_dma_cbver_1x: err = device_init_ioat_v1(ioat_device); break; case ioat_dma_cbver_2x: err = device_init_ioat_v2(ioat_device); break; case ioat_dma_cbver_3x: err = device_init_ioat_v3(ioat_device); break; default: err = DMA_ERR_DEVICE_UNSUPPORTED; } if (err_is_fail(err)) { vspace_unmap((void*) dma_dev->mmio.vaddr); free(ioat_device); return err; } dma_dev->f.deregister_memory = NULL; dma_dev->f.register_memory = NULL; dma_dev->f.poll = ioat_dma_device_poll_channels; *dev = ioat_device; return err; }
/** * \brief A monitor receives request to setup a connection * with another newly booted monitor from a third monitor */ static void bind_monitor_request_scc(struct intermon_binding *b, coreid_t core_id, intermon_caprep_t caprep, chanid_t chan_id, coreid_t from_core_id) { struct intermon_ump_ipi_binding *umpb = NULL; errval_t err; /* Create the cap */ struct capability cap_raw; caprep_to_capability(&caprep, &cap_raw); if (cap_raw.type != ObjType_Frame) { err = MON_ERR_WRONG_CAP_TYPE; goto error; } struct capref frame; err = slot_alloc(&frame); if (err_is_fail(err)) { goto error; } ram_set_affinity(cap_raw.u.frame.base, cap_raw.u.frame.base + ((genpaddr_t)1 << cap_raw.u.frame.bits)); err = frame_alloc(&frame, ((genpaddr_t)1 << cap_raw.u.frame.bits), NULL); ram_set_affinity(0,0); /* err = monitor_cap_create(frame, &cap_raw, core_id); */ if (err_is_fail(err)) { goto error; } struct frame_identity frameid = { .base = 0, .bits = 0 }; err = invoke_frame_identify(frame, &frameid); assert(err == SYS_ERR_OK); printf("bind_monitor_request: URPC physical frame at 0x%llx\n", frameid.base); /* Setup the connection */ void *buf; err = vspace_map_one_frame_attr(&buf, MON_URPC_SIZE, frame, VREGION_FLAGS_READ_WRITE_NOCACHE, NULL, NULL); if (err_is_fail(err)) { err = err_push(err, LIB_ERR_VSPACE_MAP); goto error; } // Create remote notify cap struct capref notify_cap; err = notification_create_cap(chan_id, core_id, ¬ify_cap); assert(err == SYS_ERR_OK); // Allocate my own notification caps struct capref ep, my_notify_cap; struct lmp_endpoint *iep; int chanid; err = endpoint_create(LMP_RECV_LENGTH, &ep, &iep); assert(err_is_ok(err)); err = notification_allocate(ep, &chanid); assert(err == SYS_ERR_OK); err = notification_create_cap(chanid, my_core_id, &my_notify_cap); assert(err == SYS_ERR_OK); // setup our side of the binding umpb = malloc(sizeof(struct intermon_ump_ipi_binding)); assert(umpb != NULL); err = intermon_ump_ipi_init(umpb, get_default_waitset(), buf + MON_URPC_CHANNEL_LEN, MON_URPC_CHANNEL_LEN, buf, MON_URPC_CHANNEL_LEN, notify_cap, my_notify_cap, ep, iep); assert(err_is_ok(err)); // Identify UMP frame for tracing struct frame_identity umpid; err = invoke_frame_identify(frame, &umpid); assert(err_is_ok(err)); umpb->ump_state.chan.recvid = (uintptr_t)umpid.base; umpb->ump_state.chan.sendid = (uintptr_t)(umpid.base + MON_URPC_CHANNEL_LEN); // connect it to our request handlers err = intermon_init(&umpb->b, core_id); assert(err_is_ok(err)); /* Send reply */ reply: assert(umpb != NULL); bind_monitor_reply_scc_cont(&umpb->b, err, chanid); return; error: assert(!"Argh"); // FIXME: cleanup! goto reply; } /** * \brief The monitor that proxied the request for one monitor to * setup a connection with another monitor gets the reply */ static void bind_monitor_reply_scc(struct intermon_binding *binding, errval_t err, chanid_t chan_id, coreid_t core_id) { struct intermon_ump_ipi_binding *b = (struct intermon_ump_ipi_binding *)binding; // Create notify cap to that core struct capref notify_cap; err = notification_create_cap(chan_id, core_id, ¬ify_cap); assert(err == SYS_ERR_OK); // And assign it to the binding err = ipi_notify_set(&b->ipi_notify, notify_cap); assert(err_is_ok(err)); if (err_is_fail(err)) { // XXX DEBUG_ERR(err, "Got error in bind monitor reply"); } } /******* stack-ripped bind_monitor_proxy_scc *******/ static void bind_monitor_request_scc_handler(struct intermon_binding *b, struct intermon_msg_queue_elem *e); struct bind_monitor_request_scc_state { struct intermon_msg_queue_elem elem; struct intermon_bind_monitor_request_scc__args args; }; static void bind_monitor_request_scc_cont(struct intermon_binding *dst_binding, coreid_t src_core_id, intermon_caprep_t caprep, chanid_t chan_id, coreid_t core_id) { errval_t err; err = dst_binding->tx_vtbl. bind_monitor_request_scc(dst_binding, NOP_CONT, src_core_id, caprep, chan_id, core_id); if (err_is_fail(err)) { if(err_no(err) == FLOUNDER_ERR_TX_BUSY) { struct bind_monitor_request_scc_state *me = malloc(sizeof(struct bind_monitor_request_scc_state)); assert(me != NULL); struct intermon_state *ist = dst_binding->st; assert(ist != NULL); me->args.core_id = src_core_id; me->args.cap = caprep; me->args.chan_id = chan_id; me->args.from_core_id = core_id; me->elem.cont = bind_monitor_request_scc_handler; err = intermon_enqueue_send(dst_binding, &ist->queue, get_default_waitset(), &me->elem.queue); assert(err_is_ok(err)); return; } DEBUG_ERR(err, "forwarding bind request failed"); } }
/** * \brief Notification of a newly booted monitor. * Setup our connection and request the sender to proxy * the bind request to the monitor */ static void new_monitor_notify(struct intermon_binding *b, coreid_t core_id) { errval_t err; /* Setup the connection */ ram_set_affinity(SHARED_MEM_MIN + (PERCORE_MEM_SIZE * my_core_id), SHARED_MEM_MIN + (PERCORE_MEM_SIZE * (my_core_id + 1))); struct capref frame; err = frame_alloc(&frame, MON_URPC_SIZE, NULL); if (err_is_fail(err)) { DEBUG_ERR(err, "frame_alloc failed"); return; // FIXME: cleanup } ram_set_affinity(0, 0); // Reset affinity void *buf; err = vspace_map_one_frame_attr(&buf, MON_URPC_SIZE, frame, VREGION_FLAGS_READ_WRITE_NOCACHE, NULL, NULL); if (err_is_fail(err)) { DEBUG_ERR(err, "vspace_map_one_frame failed"); assert(buf); // XXX } // XXX: Clear the frame (kernel can't do it for us) memset(buf, 0, MON_URPC_SIZE); // Allocate my own notification caps struct capref ep, my_notify_cap; struct lmp_endpoint *iep; int chanid; err = endpoint_create(LMP_RECV_LENGTH, &ep, &iep); assert(err_is_ok(err)); err = notification_allocate(ep, &chanid); assert(err == SYS_ERR_OK); err = notification_create_cap(chanid, my_core_id, &my_notify_cap); assert(err == SYS_ERR_OK); // init our end of the binding and channel struct intermon_ump_ipi_binding *ump_binding = malloc(sizeof(struct intermon_ump_ipi_binding)); assert(ump_binding != NULL); err = intermon_ump_ipi_init(ump_binding, get_default_waitset(), buf, MON_URPC_CHANNEL_LEN, buf + MON_URPC_CHANNEL_LEN, MON_URPC_CHANNEL_LEN, NULL_CAP, my_notify_cap, ep, iep); assert(err_is_ok(err)); /* if (err_is_fail(err)) { */ /* cap_destroy(frame); */ /* return err_push(err, LIB_ERR_UMP_CHAN_BIND); */ /* } */ // Identify UMP frame for tracing struct frame_identity umpid = { .base = 0, .bits = 0 }; err = invoke_frame_identify(frame, &umpid); assert(err_is_ok(err)); ump_binding->ump_state.chan.recvid = (uintptr_t)umpid.base; ump_binding->ump_state.chan.sendid = (uintptr_t)(umpid.base + MON_URPC_CHANNEL_LEN); err = intermon_init(&ump_binding->b, core_id); assert(err_is_ok(err)); /* Identify the frame cap */ struct capability frame_cap; err = monitor_cap_identify(frame, &frame_cap); if (err_is_fail(err)) { DEBUG_ERR(err, "monitor_cap_identify failed"); return; // FIXME: cleanup } intermon_caprep_t caprep; capability_to_caprep(&frame_cap, &caprep); /* reply to the sending monitor to proxy request */ err = b->tx_vtbl.bind_monitor_proxy_scc(b, NOP_CONT, core_id, caprep, chanid, my_core_id); if (err_is_fail(err)) { DEBUG_ERR(err, "bind proxy request failed"); } } errval_t arch_intermon_init(struct intermon_binding *b) { b->rx_vtbl.bind_monitor_request_scc = bind_monitor_request_scc; b->rx_vtbl.bind_monitor_reply_scc = bind_monitor_reply_scc; b->rx_vtbl.bind_monitor_proxy_scc = bind_monitor_proxy_scc; b->rx_vtbl.new_monitor_notify = new_monitor_notify; return SYS_ERR_OK; }
/* * ---------------------------------------------------------------------------- * Xeon Phi Channel callbacks * ---------------------------------------------------------------------------- */ static errval_t msg_open_cb(xphi_dom_id_t domain, uint64_t usrdata, struct capref frame, uint8_t type) { errval_t err; uint32_t map_flags = 0x0; lvaddr_t addr = 0x0; struct frame_identity id; err = invoke_frame_identify(frame, &id); if (err_is_fail(err)) { return err; } XWI_DEBUG("msg_open_cb: from domid:%lx, usrdata:%lx, frame:%lx\n", domain, usrdata, id.base); switch ((xomp_frame_type_t) type) { case XOMP_FRAME_TYPE_MSG: map_flags = VREGION_FLAGS_READ_WRITE; break; case XOMP_FRAME_TYPE_SHARED_RW: addr = (lvaddr_t) usrdata; map_flags = VREGION_FLAGS_READ_WRITE; break; case XOMP_FRAME_TYPE_SHARED_RO: map_flags = VREGION_FLAGS_READ; break; case XOMP_FRAME_TYPE_REPL_RW: map_flags = VREGION_FLAGS_READ_WRITE; #if XOMP_WORKER_ENABLE_DMA addr = (lvaddr_t) usrdata; err = replicate_frame(addr, &frame); if (err_is_fail(err)) { return err; } err = invoke_frame_identify(frame, &id); #else struct capref replicate; err = frame_alloc(&replicate, (1UL << id.bits), NULL); if (err_is_fail(err)) { USER_PANIC_ERR(err, "failed to allocate replicate frame\n"); return err; } err = vspace_map_one_frame_fixed_attr((lvaddr_t) usrdata, (1UL << id.bits), replicate, map_flags, NULL, NULL); if (err_is_fail(err)) { return err; } err = invoke_frame_identify(replicate, &id); #endif if (err_is_fail(err)) { return err; } break; default: USER_PANIC("unknown type: %u", type) break; } if (addr) { if (worker_id & XOMP_WID_GATEWAY_FLAG) { XWR_DEBUG("registering memory with gateway: [%016lx]\n", addr); err = xomp_gateway_mem_insert(frame, addr); if (err_is_fail(err)) { /* todo: cleanup */ return err; } } err = vspace_map_one_frame_fixed_attr(addr, (1UL << id.bits), frame, map_flags, NULL, NULL); } else { err = vspace_map_one_frame_attr((void **) &addr, (1UL << id.bits), frame, map_flags, NULL, NULL); } if (err_is_fail(err)) { return err; } #if !XOMP_WORKER_ENABLE_DMA if ((xomp_frame_type_t) type == XOMP_FRAME_TYPE_REPL_RW) { memcpy((void *)usrdata, (void *)addr, (1UL << id.bits)); } #endif XWI_DEBUG("msg_open_cb: frame [%016lx] mapped @ [%016lx, %016lx]\n", id.base, addr, addr + (1UL << id.bits)); if ((xomp_frame_type_t) type == XOMP_FRAME_TYPE_MSG) { USER_PANIC("NYI: initializing messaging"); } return SYS_ERR_OK; }
/** * \brief Initialise a new UMP channel and initiate a binding * * \param uc Storage for channel state * \param cont Continuation for bind completion/failure * \param qnode Storage for an event queue node (used for queuing bind request) * \param iref IREF to which to bind * \param monitor_binding Monitor binding to use * \param inchanlen Size of incoming channel, in bytes (rounded to #UMP_MSG_BYTES) * \param outchanlen Size of outgoing channel, in bytes (rounded to #UMP_MSG_BYTES) * \param notify_cap Capability to use for notifications, or #NULL_CAP */ errval_t ump_chan_bind(struct ump_chan *uc, struct ump_bind_continuation cont, struct event_queue_node *qnode, iref_t iref, struct monitor_binding *monitor_binding, size_t inchanlen, size_t outchanlen, struct capref notify_cap) { errval_t err; // round up channel sizes to message size inchanlen = ROUND_UP(inchanlen, UMP_MSG_BYTES); outchanlen = ROUND_UP(outchanlen, UMP_MSG_BYTES); // compute size of frame needed and allocate it size_t framesize = inchanlen + outchanlen; err = frame_alloc(&uc->frame, framesize, &framesize); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_ALLOC); } // map it in void *buf; err = vspace_map_one_frame_attr(&buf, framesize, uc->frame, UMP_MAP_ATTR, NULL, &uc->vregion); if (err_is_fail(err)) { cap_destroy(uc->frame); return err_push(err, LIB_ERR_VSPACE_MAP); } // initialise channel state err = ump_chan_init(uc, buf, inchanlen, (char *)buf + inchanlen, outchanlen); if (err_is_fail(err)) { vregion_destroy(uc->vregion); cap_destroy(uc->frame); return err; } // Ids for tracing struct frame_identity id; err = invoke_frame_identify(uc->frame, &id); if (err_is_fail(err)) { vregion_destroy(uc->vregion); cap_destroy(uc->frame); return err_push(err, LIB_ERR_FRAME_IDENTIFY); } uc->recvid = (uintptr_t)id.base; uc->sendid = (uintptr_t)(id.base + inchanlen); // store bind args uc->bind_continuation = cont; uc->monitor_binding = monitor_binding; uc->iref = iref; uc->inchanlen = inchanlen; uc->outchanlen = outchanlen; uc->notify_cap = notify_cap; // wait for the ability to use the monitor binding uc->connstate = UMP_BIND_WAIT; event_mutex_enqueue_lock(&monitor_binding->mutex, qnode, MKCLOSURE(send_bind_cont, uc)); return SYS_ERR_OK; }
static void gw_req_memory_call_rx(struct xomp_binding *b, uint64_t addr, uint8_t type) { XWI_DEBUG("gw_req_memory_call_rx: addr:%lx, tyep: %u\n", addr, type); #if XOMP_BENCH_WORKER_EN cycles_t mem_timer = bench_tsc(); #endif struct txq_msg_st *msg_st = txq_msg_st_alloc(&txq); assert(msg_st != NULL); struct capref frame; if (type == XOMP_FRAME_TYPE_REPL_RW) { type = XOMP_FRAME_TYPE_SHARED_RW; } assert(!(worker_id & XOMP_WID_GATEWAY_FLAG)); msg_st->send = gw_req_memory_response_tx; msg_st->cleanup = NULL; XWR_DEBUG("Requesting frame from gateway: [%016lx]\n", usrdata); msg_st->err = xomp_gateway_get_memory(addr, &frame); if (err_is_fail(msg_st->err)) { txq_send(msg_st); return; } vregion_flags_t map_flags; switch ((xomp_frame_type_t) type) { case XOMP_FRAME_TYPE_MSG: map_flags = VREGION_FLAGS_READ_WRITE; break; case XOMP_FRAME_TYPE_SHARED_RW: case XOMP_FRAME_TYPE_REPL_RW: map_flags = VREGION_FLAGS_READ_WRITE; break; case XOMP_FRAME_TYPE_SHARED_RO: map_flags = VREGION_FLAGS_READ; break; default: USER_PANIC("unknown type: %u", type) break; } struct frame_identity id; msg_st->err = invoke_frame_identify(frame, &id); if (err_is_fail(msg_st->err)) { txq_send(msg_st); return; } if (addr) { msg_st->err = vspace_map_one_frame_fixed_attr(addr, (1UL << id.bits), frame, map_flags, NULL, NULL); } else { void *map_addr; msg_st->err = vspace_map_one_frame_attr(&map_addr, (1UL << id.bits), frame, map_flags, NULL, NULL); } #if XOMP_BENCH_WORKER_EN mem_timer = bench_tsc() - mem_timer; debug_printf("%lx mem request %016lx took %lu cycles, %lu ms\n", worker_id, addr, mem_timer, bench_tsc_to_ms(mem_timer)); #endif txq_send(msg_st); }
static void intermon_bind_ump_reply(struct intermon_binding *ib, uint64_t my_mon_id, uint64_t your_mon_id, errval_t msgerr, intermon_caprep_t caprep) { errval_t err; struct remote_conn_state *con = remote_conn_lookup(my_mon_id); if (con == NULL) { USER_PANIC_ERR(0, "unknown mon_id in UMP bind reply"); return; } uintptr_t domain_id = con->domain_id; struct monitor_binding *domain_binding = con->domain_binding; struct capref notify_cap = NULL_CAP; if (err_is_ok(msgerr)) { /* bind succeeded */ con->mon_id = your_mon_id; con->mon_binding = ib; #if 0 /* map in UMP channel state */ void *buf; err = vspace_map_one_frame_attr(&buf, 2 * (UMP_CHANNEL_SIZE + con->localchan.size * sizeof(uintptr_t)), con->frame, VREGION_FLAGS_READ, NULL, NULL); if (err_is_fail(err)) { USER_PANIC_ERR(err, "vspace_map_one_frame failed"); // XXX: should not be an assert, but we don't have any way to do // connection teardown here! assert(buf != NULL); } con->sharedchan = buf; con->localchan.buf = buf + 2 * UMP_CHANNEL_SIZE; // XXX: Put frame cap on a separate allocator as it is not deleted anymore struct capref frame_copy; err = slot_alloc(&frame_copy); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Failed to allocator slot from channel_alloc"); } err = cap_copy(frame_copy, con->frame); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Failed create copy of frame cap"); } err = cap_destroy(con->frame); if (err_is_fail(err)) { USER_PANIC_ERR(err, "cap_destroy_default failed"); } con->frame = frame_copy; #endif struct capability capability; caprep_to_capability(&caprep, &capability); if(capability.type != ObjType_Null) { // Get core id of sender coreid_t core_id = ((struct intermon_state *)ib->st)->core_id; // Construct the notify cap err = slot_alloc(¬ify_cap); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Failed to allocate slot from channel_alloc"); } err = monitor_cap_create(notify_cap, &capability, core_id); if (err_is_fail(err)) { USER_PANIC_ERR(err, "monitor_cap_create failed"); } } } else { /* bind refused */ err = cap_destroy(con->x.ump.frame); if (err_is_fail(err)) { USER_PANIC_ERR(err, "cap_destroy_default failed"); } err = remote_conn_free(my_mon_id); assert(err_is_ok(err)); } bind_ump_reply_client_cont(domain_binding, my_mon_id, domain_id, msgerr, notify_cap); }
static void add_memory_call_rx(struct xomp_binding *b, struct capref frame, uint64_t addr, uint8_t type) { XWI_DEBUG("add_memory_call_rx: addr:%lx, tyep: %u\n", addr, type); struct txq_msg_st *msg_st = txq_msg_st_alloc(&txq); assert(msg_st != NULL); msg_st->send = add_memory_response_tx; msg_st->cleanup = NULL; uint32_t map_flags = 0x0; switch ((xomp_frame_type_t) type) { case XOMP_FRAME_TYPE_MSG: map_flags = VREGION_FLAGS_READ_WRITE; break; case XOMP_FRAME_TYPE_SHARED_RW: map_flags = VREGION_FLAGS_READ_WRITE; break; case XOMP_FRAME_TYPE_SHARED_RO: map_flags = VREGION_FLAGS_READ; break; default: USER_PANIC("unknown type: %u", type) break; } struct frame_identity id; msg_st->err = invoke_frame_identify(frame, &id); if(err_is_fail(msg_st->err)) { txq_send(msg_st); return; } #if XOMP_WORKER_ENABLE_DMA if (0) { // todo: replicate frame on the same node if needed.. replicate_frame(addr, &frame); } #endif #if XOMP_BENCH_WORKER_EN cycles_t map_start = bench_tsc(); #endif if (addr) { msg_st->err = vspace_map_one_frame_fixed_attr(addr, (1UL << id.bits), frame, map_flags, NULL, NULL); } else { void *map_addr; msg_st->err = vspace_map_one_frame_attr(&map_addr, (1UL << id.bits), frame, map_flags, NULL, NULL); } #if XOMP_BENCH_WORKER_EN cycles_t timer_end = bench_tsc(); debug_printf("%lx mem map %016lx took %lu cycles, %lu ms\n", worker_id, addr, timer_end - map_start, bench_tsc_to_ms(timer_end - map_start)); #endif txq_send(msg_st); }
/// Map in the frame caps for a module into our vspace, return their location errval_t spawn_map_module(struct mem_region *module, size_t *retsize, lvaddr_t *retaddr, genpaddr_t *retpaddr) { assert(module != NULL); assert(module->mr_type == RegionType_Module); errval_t err; size_t size = module->mrmod_size; void *base; struct memobj *memobj; struct vregion *vregion; err = vspace_map_anon_attr(&base, &memobj, &vregion, size, &size, VREGION_FLAGS_READ); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_MAP); } struct capref frame = { .cnode = cnode_module, .slot = module->mrmod_slot, }; if (retpaddr != NULL) { *retpaddr = module->mr_base; } if (retsize != NULL) { *retsize = size; } if (retaddr != NULL) { *retaddr = (lvaddr_t)base; } size_t offset = 0; while (size > 0) { assert((size & BASE_PAGE_MASK) == 0); struct frame_identity id; err = invoke_frame_identify(frame, &id); assert(err_is_ok(err)); err = memobj->f.fill(memobj, offset, frame, 1UL << id.bits); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = memobj->f.pagefault(memobj, vregion, offset, 0); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } frame.slot ++; size -= (1UL << id.bits); offset += (1UL << id.bits); } return SYS_ERR_OK; } errval_t spawn_unmap_module(lvaddr_t mapped_addr) { return vspace_unmap((void *)mapped_addr); } /// Returns a raw pointer to the modules string area string const char *multiboot_module_rawstring(struct mem_region *region) { if (multiboot_strings == NULL) { errval_t err; /* Map in multiboot module strings area */ struct capref mmstrings_cap = { .cnode = cnode_module, .slot = 0 }; err = vspace_map_one_frame_attr((void**)&multiboot_strings, BASE_PAGE_SIZE, mmstrings_cap, VREGION_FLAGS_READ, NULL, NULL); if (err_is_fail(err)) { DEBUG_ERR(err, "vspace_map failed"); return NULL; } #if 0 printf("Mapped multiboot_strings at %p\n", multiboot_strings); for (int i = 0; i < 256; i++) { if ((i & 15) == 0) printf("%04x ", i); printf ("%02x ", multiboot_strings[i]& 0xff); if ((i & 15) == 15) printf("\n"); } #endif } if (region == NULL || region->mr_type != RegionType_Module) { return NULL; } return multiboot_strings + region->mrmod_data; } errval_t multiboot_cleanup_mapping(void) { errval_t err = vspace_unmap(multiboot_strings); if (err_is_fail(err)) { DEBUG_ERR(err, "multiboot_cleanup_mapping: vspace_unmap() failed\n"); return err_push(err, LIB_ERR_VSPACE_REMOVE_REGION); } multiboot_strings = NULL; return SYS_ERR_OK; }
/** * @brief initialized a descriptor queue */ errval_t descq_create(struct descq** q, size_t slots, char* name, bool exp, bool notifications, uint8_t role, uint64_t *queue_id, struct descq_func_pointer* f) { DESCQ_DEBUG("create start\n"); errval_t err; struct descq* tmp; struct capref rx; struct capref tx; // Init basic struct fields tmp = malloc(sizeof(struct descq)); assert(tmp != NULL); tmp->name = strdup(name); assert(tmp->name != NULL); if (exp) { // exporting struct descq_endpoint_state* state = malloc(sizeof(struct descq_endpoint_state)); state->name = strdup(name); assert(state->name); state->f.notify = f->notify; state->f.dereg = f->dereg; state->f.reg = f->reg; state->f.create = f->create; state->f.destroy = f->destroy; state->f.control = f->control; err = descq_export(state, export_cb, connect_cb, get_default_waitset(), IDC_BIND_FLAGS_DEFAULT); if (err_is_fail(err)) { goto cleanup1; } while(!state->exp_done) { event_dispatch(get_default_waitset()); } } else { tmp->f.notify = f->notify; tmp->f.dereg = f->dereg; tmp->f.reg = f->reg; tmp->f.create = f->create; tmp->f.destroy = f->destroy; tmp->f.control = f->control; size_t bytes; err = frame_alloc(&rx, DESCQ_ALIGNMENT*slots, &bytes); if (err_is_fail(err)) { goto cleanup1; } assert(bytes >= DESCQ_ALIGNMENT*slots); err = frame_alloc(&tx, DESCQ_ALIGNMENT*slots, &bytes); if (err_is_fail(err)) { goto cleanup2; } assert(bytes >= DESCQ_ALIGNMENT*slots); err = vspace_map_one_frame_attr((void**) &(tmp->rx_descs), slots*DESCQ_ALIGNMENT, rx, VREGION_FLAGS_READ_WRITE, NULL, NULL); if (err_is_fail(err)) { goto cleanup3; } err = vspace_map_one_frame_attr((void**) &(tmp->tx_descs), slots*DESCQ_ALIGNMENT, tx, VREGION_FLAGS_READ_WRITE, NULL, NULL); if (err_is_fail(err)) { goto cleanup4; } memset(tmp->tx_descs, 0, slots*DESCQ_ALIGNMENT); memset(tmp->rx_descs, 0, slots*DESCQ_ALIGNMENT); tmp->bound_done = false; iref_t iref; err = nameservice_blocking_lookup(name, &iref); if (err_is_fail(err)) { goto cleanup5; } err = descq_bind(iref, bind_cb, tmp, get_default_waitset(), IDC_BIND_FLAGS_DEFAULT); if (err_is_fail(err)) { goto cleanup5; } while(!tmp->bound_done) { event_dispatch(get_default_waitset()); } tmp->local_bind = tmp->binding->local_binding != NULL; errval_t err2; err = tmp->binding->rpc_tx_vtbl.create_queue(tmp->binding, slots, rx, tx, notifications, role, &err2, queue_id); if (err_is_fail(err) || err_is_fail(err2)) { err = err_is_fail(err) ? err: err2; goto cleanup5; } tmp->tx_seq_ack = (void*)tmp->tx_descs; tmp->rx_seq_ack = (void*)tmp->rx_descs; tmp->tx_seq_ack->value = 0; tmp->rx_seq_ack->value = 0; tmp->tx_descs++; tmp->rx_descs++; tmp->slots = slots-1; tmp->rx_seq = 1; tmp->tx_seq = 1; devq_init(&tmp->q, false); tmp->q.f.enq = descq_enqueue; tmp->q.f.deq = descq_dequeue; tmp->q.f.notify = descq_notify; tmp->q.f.reg = descq_register; tmp->q.f.dereg = descq_deregister; tmp->q.f.ctrl = descq_control; tmp->notifications = notifications; notificator_init(&tmp->notificator, tmp, descq_can_read, descq_can_write); err = waitset_chan_register(get_default_waitset(), &tmp->notificator.ready_to_read, MKCLOSURE(mp_notify, tmp)); assert(err_is_ok(err)); } *q = tmp; DESCQ_DEBUG("create end %p \n", *q); return SYS_ERR_OK; cleanup5: vspace_unmap(tmp->rx_descs); cleanup4: vspace_unmap(tmp->rx_descs); cleanup3: cap_destroy(tx); cleanup2: cap_destroy(rx); cleanup1: free(tmp->name); free(tmp); return err; }
static errval_t cow_init(size_t bufsize, size_t granularity, struct cnoderef *cow_cn, size_t *frame_count) { assert(cow_cn); assert(frame_count); errval_t err; struct capref frame, cncap; struct cnoderef cnode; // get RAM cap bufsize = (bufsize / granularity + 1) * granularity; err = slot_alloc(&frame); assert(err_is_ok(err)); size_t rambits = log2floor(bufsize); debug_printf("bits = %zu\n", rambits); err = ram_alloc(&frame, rambits); assert(err_is_ok(err)); // calculate #slots cslot_t cap_count = bufsize / granularity; cslot_t slots; // get CNode err = cnode_create(&cncap, &cnode, cap_count, &slots); assert(err_is_ok(err)); assert(slots >= cap_count); // retype RAM into Frames struct capref first_frame = (struct capref) { .cnode = cnode, .slot = 0 }; err = cap_retype(first_frame, frame, ObjType_Frame, log2floor(granularity)); assert(err_is_ok(err)); err = cap_destroy(frame); assert(err_is_ok(err)); *frame_count = slots; *cow_cn = cnode; return SYS_ERR_OK; } // create cow-enabled vregion & backing // Can copy-on-write in granularity-sized chunks static errval_t vspace_map_one_frame_cow(void **buf, size_t size, struct capref frame, vregion_flags_t flags, struct memobj **memobj, struct vregion **vregion, size_t granularity) { errval_t err; if (!memobj) { memobj = malloc(sizeof(*memobj)); } assert(memobj); if (!vregion) { vregion = malloc(sizeof(*vregion)); } assert(vregion); err = vspace_map_anon_attr(buf, memobj, vregion, size, &size, flags); assert(err_is_ok(err)); size_t chunks = size / granularity; cslot_t slots; struct capref cncap; struct cnoderef cnode; err = cnode_create(&cncap, &cnode, chunks, &slots); assert(err_is_ok(err)); assert(slots >= chunks); struct capref fc = (struct capref) { .cnode = cnode, .slot = 0 }; for (int i = 0; i < chunks; i++) { err = cap_copy(fc, frame); assert(err_is_ok(err)); err = (*memobj)->f.fill_foff(*memobj, i * granularity, fc, granularity, i*granularity); assert(err_is_ok(err)); err = (*memobj)->f.pagefault(*memobj, *vregion, i * granularity, 0); assert(err_is_ok(err)); fc.slot++; } return SYS_ERR_OK; } int main(int argc, char *argv[]) { errval_t err; struct capref frame; size_t retsize; void *vbuf; struct vregion *vregion; uint8_t *buf; debug_printf("%s:%d\n", __FUNCTION__, __LINE__); err = frame_alloc(&frame, BUFSIZE, &retsize); assert(retsize >= BUFSIZE); if (err_is_fail(err)) { debug_printf("frame_alloc: %s\n", err_getstring(err)); return 1; } debug_printf("%s:%d: %zu\n", __FUNCTION__, __LINE__, retsize); // setup region err = vspace_map_one_frame_attr(&vbuf, retsize, frame, VREGION_FLAGS_READ_WRITE, NULL, &vregion); if (err_is_fail(err)) { debug_printf("vspace_map: %s\n", err_getstring(err)); return 1; } debug_printf("vaddr: %p\n", vbuf); // write stuff to region buf = vbuf; debug_printf("%s:%d: %p, %lu pages\n", __FUNCTION__, __LINE__, buf, BUFSIZE / BASE_PAGE_SIZE); memset(buf, 0xAA, BUFSIZE); debug_printf("%s:%d\n", __FUNCTION__, __LINE__); // create cow copy // setup exception handler thread_set_exception_handler(handler, NULL, ex_stack, ex_stack+EX_STACK_SIZE, NULL, NULL); assert(err_is_ok(err)); debug_printf("%s:%d\n", __FUNCTION__, __LINE__); err = cow_init(BUFSIZE, BASE_PAGE_SIZE, &cow_frames, &cow_frame_count); assert(err_is_ok(err)); // create r/o copy of region and tell exception handler bounds debug_printf("%s:%d\n", __FUNCTION__, __LINE__); err = vspace_map_one_frame_cow(&cow_vbuf, retsize, frame, VREGION_FLAGS_READ, NULL, &cow_vregion, BASE_PAGE_SIZE); if (err_is_fail(err)) { debug_printf("vspace_map: %s\n", err_getstring(err)); return 1; } debug_printf("cow_vaddr: %p\n", cow_vbuf); // do stuff cow copy uint8_t *cbuf = cow_vbuf; for (int i = 0; i < BUFSIZE / BASE_PAGE_SIZE; i+=2) { cbuf[i * BASE_PAGE_SIZE + 1] = 0x55; } // verify results for (int i = 0; i < BUFSIZE / BASE_PAGE_SIZE; i++) { printf("page %d\n", i); printf("buf[0] = %d; cbuf[0] = %d\n", buf[i*BASE_PAGE_SIZE], cbuf[i*BASE_PAGE_SIZE]); printf("buf[1] = %d; cbuf[1] = %d\n", buf[i*BASE_PAGE_SIZE+1], cbuf[i*BASE_PAGE_SIZE+1]); } debug_dump_hw_ptables(); return EXIT_SUCCESS; }