/** * \brief Allocates a new VNode, adding it to the page table and our metadata */ errval_t alloc_vnode(struct pmap_x86 *pmap, struct vnode *root, enum objtype type, uint32_t entry, struct vnode **retvnode) { errval_t err; struct vnode *newvnode = slab_alloc(&pmap->slab); if (newvnode == NULL) { return LIB_ERR_SLAB_ALLOC_FAIL; } // The VNode capability err = pmap->p.slot_alloc->alloc(pmap->p.slot_alloc, &newvnode->u.vnode.cap); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLOT_ALLOC); } err = vnode_create(newvnode->u.vnode.cap, type); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VNODE_CREATE); } // XXX: need to make sure that vnode cap that we will invoke is in our cspace! if (get_croot_addr(newvnode->u.vnode.cap) != CPTR_ROOTCN) { // debug_printf("%s: creating vnode for another domain in that domain's cspace; need to copy vnode cap to our cspace to make it invokable\n", __FUNCTION__); err = slot_alloc(&newvnode->u.vnode.invokable); assert(err_is_ok(err)); err = cap_copy(newvnode->u.vnode.invokable, newvnode->u.vnode.cap); assert(err_is_ok(err)); } else { // debug_printf("vnode in our cspace: copying capref to invokable\n"); newvnode->u.vnode.invokable = newvnode->u.vnode.cap; } assert(!capref_is_null(newvnode->u.vnode.cap)); assert(!capref_is_null(newvnode->u.vnode.invokable)); err = pmap->p.slot_alloc->alloc(pmap->p.slot_alloc, &newvnode->mapping); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLOT_ALLOC); } // Map it err = vnode_map(root->u.vnode.invokable, newvnode->u.vnode.cap, entry, PTABLE_ACCESS_DEFAULT, 0, 1, newvnode->mapping); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VNODE_MAP); } // The VNode meta data newvnode->is_vnode = true; newvnode->entry = entry; newvnode->next = root->u.vnode.children; root->u.vnode.children = newvnode; newvnode->u.vnode.children = NULL; *retvnode = newvnode; return SYS_ERR_OK; }
// Callback from device manager static void idc_queue_terminated(struct e10k_binding *b) { errval_t err; INITDEBUG("idc_queue_terminated()\n"); // Free memory for hardware ring buffers err = vspace_unmap(q->tx_ring); assert(err_is_ok(err)); err = vspace_unmap(q->rx_ring); assert(err_is_ok(err)); err = cap_delete(tx_frame); assert(err_is_ok(err)); err = cap_delete(rx_frame); assert(err_is_ok(err)); if (!capref_is_null(txhwb_frame)) { err = vspace_unmap(q->tx_hwb); assert(err_is_ok(err)); err = cap_delete(txhwb_frame); assert(err_is_ok(err)); } exit(0); }
static errval_t spawn_setup_inherited_caps(struct spawninfo *si, struct capref inheritcn_cap) { errval_t err; struct cnoderef inheritcn; if (capref_is_null(inheritcn_cap)) { return SYS_ERR_OK; } err = cnode_build_cnoderef(&inheritcn, inheritcn_cap); if (err_is_fail(err)) { return err; } /* Copy the file descriptor frame cap over */ err = spawn_setup_fdcap(si, inheritcn); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_SETUP_FDCAP); } /* Copy the session capability over */ err = spawn_setup_sidcap(si, inheritcn); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_SETUP_SIDCAP); } return SYS_ERR_OK; }
errval_t request_trace_caps(struct intermon_binding *st) { errval_t err = st->tx_vtbl.trace_caps_request(st, NOP_CONT); if (err_is_fail(err)) { return err_push(err, MON_ERR_SEND_REMOTE_MSG); } while(capref_is_null(trace_cap)) { messages_wait_and_handle_next(); } return SYS_ERR_OK; }
static void allocate_response_done(void *arg) { struct capref *cap = arg; if(!capref_is_null(*cap)) { errval_t err = cap_delete(*cap); if(err_is_fail(err) && err_no(err) != SYS_ERR_CAP_NOT_FOUND) { DEBUG_ERR(err, "cap_delete after send. This memory will leak."); } } free(cap); }
static void percore_allocate_handler(struct mem_thc_service_binding_t *sv, uint8_t bits, genpaddr_t minbase, genpaddr_t maxlimit) { errval_t ret; struct capref cap; ret = percore_allocate_handler_common(bits, minbase, maxlimit, &cap); sv->send.allocate(sv, ret, cap); if(!capref_is_null(cap)) { ret = cap_delete(cap); if(err_is_fail(ret)) { DEBUG_ERR(err, "cap_delete after send. This memory will leak."); } } trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_PERCORE_ALLOC_COMPLETE, 0); }
/** * \brief frees up the resources used by the ring. * * \param ring the descriptor ring to be freed * * \returns SYS_ERR_OK on success */ errval_t xeon_phi_dma_desc_ring_free(struct xdma_ring *ring) { errval_t err; if (capref_is_null(ring->cap)) { return SYS_ERR_OK; } if (ring->vbase) { vspace_unmap(ring->vbase); } err = cap_revoke(ring->cap); if (err_is_fail(err)) { DEBUG_ERR(err, "revokation of ring cap failed\n"); } return cap_destroy(ring->cap); }
/** * Copies caps in inherited cnode into targets cspace. * * \param si Target spawninfo * \param inheritcn_cap Cnode of caps to inherit * \retval SYS_ERR_OK Caps have been copied. */ static errval_t spawn_setup_inherited_caps(struct spawninfo *si, struct capref inheritcn_cap) { errval_t err; struct cnoderef inheritcn; if (capref_is_null(inheritcn_cap)) { return SYS_ERR_OK; } err = cnode_build_cnoderef(&inheritcn, inheritcn_cap); if (err_is_fail(err)) { return err; } /* Copy the file descriptor frame cap over */ err = spawn_setup_inherited_cap(inheritcn, INHERITCN_SLOT_FDSPAGE, si->taskcn, TASKCN_SLOT_FDSPAGE); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_SETUP_FDCAP); } /* Copy the session capability over */ err = spawn_setup_inherited_cap(inheritcn, INHERITCN_SLOT_SESSIONID, si->taskcn, TASKCN_SLOT_SESSIONID); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_SETUP_SIDCAP); } /* Copy the kernel capability over, scary */ err = spawn_setup_inherited_cap(inheritcn, INHERITCN_SLOT_KERNELCAP, si->taskcn, TASKCN_SLOT_KERNELCAP); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_SETUP_KERNEL_CAP); } return SYS_ERR_OK; }
/** * \brief tries to free the allocated memory region * * \returns SYS_ERR_OK on success * errval on error */ errval_t dma_mem_free(struct dma_mem *mem) { errval_t err; if (mem->vaddr) { err = vspace_unmap((void*)mem->vaddr); if (err_is_fail(err)) { /* todo: error handling ignoring for now */ } } if (!capref_is_null(mem->frame)) { err = cap_destroy(mem->frame); if (err_is_fail(err)) { /* todo: error handling ignoring for now */ } } memset(mem, 0, sizeof(*mem)); return SYS_ERR_OK; }
errval_t monitor_client_setup(struct spawninfo *si) { errval_t err; struct monitor_lmp_binding *b = malloc(sizeof(struct monitor_lmp_binding)); assert(b != NULL); // setup our end of the binding err = monitor_client_lmp_accept(b, get_default_waitset(), DEFAULT_LMP_BUF_WORDS); if (err_is_fail(err)) { free(b); return err_push(err, LIB_ERR_MONITOR_CLIENT_ACCEPT); } // copy the endpoint cap to the recipient struct capref dest = { .cnode = si->rootcn, .slot = ROOTCN_SLOT_MONITOREP, }; err = cap_copy(dest, b->chan.local_cap); if (err_is_fail(err)) { // TODO: destroy binding return err_push(err, LIB_ERR_CAP_COPY); } // Copy the performance monitoring cap to all spawned processes. struct capref src; dest.cnode = si->taskcn; dest.slot = TASKCN_SLOT_PERF_MON; src.cnode = cnode_task; src.slot = TASKCN_SLOT_PERF_MON; err = cap_copy(dest, src); if (err_is_fail(err)) { return err_push(err, INIT_ERR_COPY_PERF_MON); } // copy our receive vtable to the binding monitor_server_init(&b->b); return SYS_ERR_OK; } errval_t monitor_client_setup_mem_serv(void) { /* construct special-case LMP connection to mem_serv */ static struct monitor_lmp_binding mcb; struct waitset *ws = get_default_waitset(); errval_t err; err = monitor_client_lmp_accept(&mcb, ws, DEFAULT_LMP_BUF_WORDS); if(err_is_fail(err)) { USER_PANIC_ERR(err, "monitor_client_setup_mem_serv"); } assert(err_is_ok(err)); /* Send the cap for this endpoint to init, who will pass it to the monitor */ err = lmp_ep_send0(cap_initep, 0, mcb.chan.local_cap); if (err_is_fail(err)) { USER_PANIC_ERR(err, "lmp_ep_send0 failed"); } // copy our receive vtable to the binding monitor_server_init(&mcb.b); // XXX: handle messages (ie. block) until the monitor binding is ready while (capref_is_null(mcb.chan.remote_cap)) { err = event_dispatch(ws); if (err_is_fail(err)) { DEBUG_ERR(err, "in event_dispatch waiting for mem_serv binding"); return err_push(err, LIB_ERR_EVENT_DISPATCH); } } return SYS_ERR_OK; } /// Setup a dummy monitor binding that "sends" all requests to the local handlers errval_t monitor_client_setup_monitor(void) { monitor_loopback_init(&monitor_self_binding); monitor_server_init(&monitor_self_binding); set_monitor_binding(&monitor_self_binding); caplock_init(get_default_waitset()); idc_init(); // XXX: Need a waitset here or loopback won't work as expected // when binding to the ram_alloc service monitor_self_binding.mutex.equeue.waitset = get_default_waitset(); return SYS_ERR_OK; }
static void spawn_with_caps_handler(struct spawn_binding *b, char *path, char *argbuf, size_t argbytes, char *envbuf, size_t envbytes, struct capref inheritcn_cap, struct capref argcn_cap) { errval_t err; domainid_t domainid = 0; /* extract arguments from buffer */ char *argv[MAX_CMDLINE_ARGS + 1]; int i = 0; size_t pos = 0; while (pos < argbytes && i < MAX_CMDLINE_ARGS) { argv[i++] = &argbuf[pos]; char *end = memchr(&argbuf[pos], '\0', argbytes - pos); if (end == NULL) { err = SPAWN_ERR_GET_CMDLINE_ARGS; goto finish; } pos = end - argbuf + 1; } assert(i <= MAX_CMDLINE_ARGS); argv[i] = NULL; /* extract environment from buffer */ char *envp[MAX_CMDLINE_ARGS + 1]; i = 0; pos = 0; while (pos < envbytes && i < MAX_CMDLINE_ARGS) { envp[i++] = &envbuf[pos]; char *end = memchr(&envbuf[pos], '\0', envbytes - pos); if (end == NULL) { err = SPAWN_ERR_GET_CMDLINE_ARGS; goto finish; } pos = end - envbuf + 1; } assert(i <= MAX_CMDLINE_ARGS); envp[i] = NULL; vfs_path_normalise(path); err = spawn(path, argv, argbuf, argbytes, envp, inheritcn_cap, argcn_cap, &domainid); if (!capref_is_null(inheritcn_cap)) { errval_t err2; err2 = cap_delete(inheritcn_cap); assert(err_is_ok(err2)); } if (!capref_is_null(argcn_cap)) { errval_t err2; err2 = cap_delete(argcn_cap); assert(err_is_ok(err2)); } finish: if(err_is_fail(err)) { free(argbuf); DEBUG_ERR(err, "spawn"); } err = spawn_reply(b, err, domainid); if (err_is_fail(err)) { // not much we can do about this DEBUG_ERR(err, "while sending reply in spawn_handler"); } free(envbuf); free(path); }
static errval_t spawn_setup_argcn(struct spawninfo *si, struct capref argumentcn_cap) { errval_t err; if (capref_is_null(argumentcn_cap)) { return SYS_ERR_OK; } struct capref dest = { .cnode = si->rootcn, .slot = ROOTCN_SLOT_ARGCN }; err = cap_copy(dest, argumentcn_cap); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_COPY_ARGCN); } return SYS_ERR_OK; } /** * \brief Load an image * * \param si Struct used by the library * \param binary The image to load * \param type The type of arch to load for * \param name Name of the image required only to place it in disp * struct * \param coreid Coreid to load for, required only to place it in disp * struct * \param argv Command-line arguments, NULL-terminated * \param envp Environment, NULL-terminated * \param inheritcn_cap Cap to a CNode containing capabilities to be inherited * \param argcn_cap Cap to a CNode containing capabilities passed as * arguments */ errval_t spawn_load_image(struct spawninfo *si, lvaddr_t binary, size_t binary_size, enum cpu_type type, const char *name, coreid_t coreid, char *const argv[], char *const envp[], struct capref inheritcn_cap, struct capref argcn_cap) { errval_t err; si->cpu_type = type; /* Initialize cspace */ err = spawn_setup_cspace(si); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_SETUP_CSPACE); } /* Initialize vspace */ err = spawn_setup_vspace(si); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_VSPACE_INIT); } si->name = name; genvaddr_t entry; void* arch_info; /* Load the image */ err = spawn_arch_load(si, binary, binary_size, &entry, &arch_info); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_LOAD); } /* Setup dispatcher frame */ err = spawn_setup_dispatcher(si, coreid, name, entry, arch_info); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_SETUP_DISPATCHER); } /* Setup inherited caps */ err = spawn_setup_inherited_caps(si, inheritcn_cap); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_SETUP_INHERITED_CAPS); } /* Setup argument caps */ err = spawn_setup_argcn(si, argcn_cap); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_SETUP_ARGCN); } // Add vspace-pspace mapping to environment char envstr[2048]; #ifdef __x86__ // SK: si->vregions only valid on x86 snprintf(envstr, 2048, "ARRAKIS_PMAP="); for(int i = 0; i < si->vregions; i++) { struct memobj_anon *m = (struct memobj_anon *)si->vregion[i]->memobj; assert(m->m.type == ANONYMOUS); for(struct memobj_frame_list *f = m->frame_list; f != NULL; f = f->next) { struct frame_identity id; err = invoke_frame_identify(f->frame, &id); assert(err_is_ok(err)); char str[128]; snprintf(str, 128, "%" PRIxGENVADDR ":%" PRIxGENPADDR ":%zx ", si->base[i] + f->offset, id.base, f->size); strcat(envstr, str); } } #endif /* __x86__ */ char **myenv = (char **)envp; for(int i = 0; i < MAX_ENVIRON_VARS; i++) { if(i + 1 == MAX_ENVIRON_VARS) { printf("spawnd: Couldn't set environemnt. Out of variables!\n"); abort(); } if(myenv[i] == NULL) { myenv[i] = envstr; myenv[i+1] = NULL; break; } } /* Setup cmdline args */ err = spawn_setup_env(si, argv, envp); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_SETUP_ENV); } return SYS_ERR_OK; }
int main(int argc, char *argv[]) { debug_printf("Xeon Phi module started on node [%u].\n", disp_xeon_phi_id()); errval_t err; mmio_cap.cnode = cnode_task; sysmem_cap.cnode = cnode_task; assert(!capref_is_null(mmio_cap)); assert(!capref_is_null(sysmem_cap)); xphi.is_client = 0x1; xphi.id = disp_xeon_phi_id(); for (uint32_t i = 0; i < XEON_PHI_NUM_MAX; ++i) { xphi.topology[i].id = i; xphi.topology[i].local = &xphi; } XDEBUG("Initializing system memory cap manager...\n"); err = sysmem_cap_manager_init(sysmem_cap); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Could not initialize the cap manager.\n"); } err = map_mmio_space(&xphi); if (err_is_fail(err)) { USER_PANIC_ERR(err, "could not map the mmio space"); } err = xdma_service_init(&xphi); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Could not initialize the dma engine.\n"); } err = smpt_init(&xphi); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Could not initialize the SMTP.\n"); } /* wait until the kernels are booted and spawnds are ready */ err = nameservice_blocking_lookup("all_spawnds_up", NULL); if (err_is_fail(err)) { USER_PANIC_ERR(err, "all_spawnds_up.\n"); } //dma_impl_test(&xphi); lpaddr_t host_msg_base = strtol(argv[0], NULL, 16); uint8_t host_msg_size = strtol(argv[1], NULL, 16); XMESSAGING_DEBUG("Getting the host messaging cap...[%016lx, %02x]\n", host_msg_base, host_msg_size); err = sysmem_cap_request(host_msg_base, host_msg_size, &host_cap); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Could not obtain the system messsaging cap\n"); } err = interphi_init(&xphi, host_cap); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Could not initialize the interphi communication\n"); } err = xeon_phi_service_init(&xphi); if (err_is_fail(err)) { USER_PANIC_ERR(err, "could not initialize the messaging service"); } XMESSAGING_DEBUG("Start polling for messages...\n"); while (1) { uint8_t idle = 0x1; err = xdma_service_poll(&xphi); idle = idle && (err_no(err) == DMA_ERR_DEVICE_IDLE); err = event_dispatch_non_block(get_default_waitset()); if (err_is_fail(err)) { if ((err_no(err) == LIB_ERR_NO_EVENT) && idle) { thread_yield(); continue; } if (err_no(err) != LIB_ERR_NO_EVENT) { USER_PANIC_ERR(err, "msg loop"); } } } XDEBUG("Messaging loop terminated...\n"); return 0; }
.handler = init_recv_handler, .arg = arg, }; struct waitset *ws = get_default_waitset(); err = lmp_chan_register_recv(lc, ws, recv_handler); if (err_is_fail(err)) { DEBUG_ERR(err, "in lmp_chan_register_recv"); abort(); } } else { DEBUG_ERR(err, "in lmp_chan_recv"); abort(); } } assert(!capref_is_null(cap)); assert(recv_state.pos < 2); // store cap lc->remote_cap = cap; // if we now have both caps, send them to each other if (++recv_state.pos == 2) { err = lmp_chan_send0(&monitor_chan, 0, mem_serv_chan.remote_cap); if (err_is_fail(err)) { DEBUG_ERR(err, "sending cap to monitor"); abort(); } err = lmp_chan_send0(&mem_serv_chan, 0, monitor_chan.remote_cap); if (err_is_fail(err)) {