static void send_cap_request(struct interdisp_binding *st, struct capref cap, genvaddr_t info) { errval_t err = SYS_ERR_OK, err2; struct capref *dest = (struct capref *)(uintptr_t)info; err = cap_copy(*dest, cap); if(err_is_fail(err)) { err_push(err, LIB_ERR_CAP_COPY_FAIL); DEBUG_ERR(err, "cap_copy"); abort(); goto send_reply; } err = cap_destroy(cap); if(err_is_fail(err)) { err_push(err, LIB_ERR_CAP_DELETE_FAIL); DEBUG_ERR(err, "cap_destroy default"); abort(); goto send_reply; } send_reply: err2 = st->tx_vtbl.send_cap_reply(st, NOP_CONT, err); if (err_is_fail(err2)) { DEBUG_ERR(err, "Failed to send send_cap_reply"); } }
/** * \brief Page fault handler * * \param memobj The memory object * \param region The associated vregion * \param offset Offset into memory object of the page fault * \param type The fault type */ static errval_t pagefault(struct memobj *memobj, struct vregion *vregion, genvaddr_t offset, vm_fault_type_t type) { errval_t err; struct memobj_one_frame_lazy *lazy = (struct memobj_one_frame_lazy*)memobj; struct vspace *vspace = vregion_get_vspace(vregion); struct pmap *pmap = vspace_get_pmap(vspace); genvaddr_t vregion_base = vregion_get_base_addr(vregion); genvaddr_t vregion_off = vregion_get_offset(vregion); vregion_flags_t flags = vregion_get_flags(vregion); // XXX: ugly --> need to revoke lazy->frame in order to clean up // all the copies that are created here struct capref frame_copy; err = slot_alloc(&frame_copy); if (err_is_fail(err)) { return err; } err = cap_copy(frame_copy, lazy->frame); if (err_is_fail(err)) { return err; } err = pmap->f.map(pmap, vregion_base + vregion_off + offset, frame_copy, offset, lazy->chunk_size, flags, NULL, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_PMAP_MAP); } return SYS_ERR_OK; }
static errval_t move_to_root(struct capref src, struct capref *dest) #endif { errval_t err; err = slot_alloc_root(dest); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLOT_ALLOC); } err = cap_copy(*dest, src); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CAP_COPY); } err = cap_delete(src); if (err_is_fail(err)) { return err_push(err, LIB_ERR_WHILE_DELETING); } err = slot_free(src); if (err_is_fail(err)) { return err_push(err, LIB_ERR_WHILE_FREEING_SLOT); } return SYS_ERR_OK; }
/** * \brief Allocates a new VNode, adding it to the page table and our metadata */ errval_t alloc_vnode(struct pmap_x86 *pmap, struct vnode *root, enum objtype type, uint32_t entry, struct vnode **retvnode) { errval_t err; struct vnode *newvnode = slab_alloc(&pmap->slab); if (newvnode == NULL) { return LIB_ERR_SLAB_ALLOC_FAIL; } // The VNode capability err = pmap->p.slot_alloc->alloc(pmap->p.slot_alloc, &newvnode->u.vnode.cap); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLOT_ALLOC); } err = vnode_create(newvnode->u.vnode.cap, type); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VNODE_CREATE); } // XXX: need to make sure that vnode cap that we will invoke is in our cspace! if (get_croot_addr(newvnode->u.vnode.cap) != CPTR_ROOTCN) { // debug_printf("%s: creating vnode for another domain in that domain's cspace; need to copy vnode cap to our cspace to make it invokable\n", __FUNCTION__); err = slot_alloc(&newvnode->u.vnode.invokable); assert(err_is_ok(err)); err = cap_copy(newvnode->u.vnode.invokable, newvnode->u.vnode.cap); assert(err_is_ok(err)); } else { // debug_printf("vnode in our cspace: copying capref to invokable\n"); newvnode->u.vnode.invokable = newvnode->u.vnode.cap; } assert(!capref_is_null(newvnode->u.vnode.cap)); assert(!capref_is_null(newvnode->u.vnode.invokable)); err = pmap->p.slot_alloc->alloc(pmap->p.slot_alloc, &newvnode->mapping); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLOT_ALLOC); } // Map it err = vnode_map(root->u.vnode.invokable, newvnode->u.vnode.cap, entry, PTABLE_ACCESS_DEFAULT, 0, 1, newvnode->mapping); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VNODE_MAP); } // The VNode meta data newvnode->is_vnode = true; newvnode->entry = entry; newvnode->next = root->u.vnode.children; root->u.vnode.children = newvnode; newvnode->u.vnode.children = NULL; *retvnode = newvnode; return SYS_ERR_OK; }
/** * \brief Span a domain with the given vroot and disp_frame * * Operation similar to spawning a domain but the vroot and disp_frame * are already provided */ errval_t spawn_span_domain(struct spawninfo *si, struct capref vroot, struct capref disp_frame) { errval_t err; struct capref t1; struct cnoderef cnode; /* Spawn cspace */ err = spawn_setup_cspace(si); if (err_is_fail(err)) { return err; } /* Create pagecn */ t1.cnode = si->rootcn; t1.slot = ROOTCN_SLOT_PAGECN; err = cnode_create_raw(t1, &cnode, PAGE_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_PAGECN); } // Copy root of pagetable si->vtree.cnode = cnode; si->vtree.slot = 0; err = cap_copy(si->vtree, vroot); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_COPY_VNODE); } /* Copy dispatcher frame (in taskcn) */ si->dispframe.cnode = si->taskcn; si->dispframe.slot = TASKCN_SLOT_DISPFRAME; err = cap_copy(si->dispframe, disp_frame); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_COPY_VNODE); } return SYS_ERR_OK; }
static errval_t spawn_setup_sidcap(struct spawninfo *si, struct cnoderef inheritcn) { errval_t err; struct capref src; src.cnode = inheritcn; src.slot = INHERITCN_SLOT_SESSIONID; struct capref dest; dest.cnode = si->taskcn; dest.slot = TASKCN_SLOT_SESSIONID; err = cap_copy(dest, src); if (err_no(err) == SYS_ERR_SOURCE_CAP_LOOKUP) { // there was no sidcap to inherit, continue return SYS_ERR_OK; } else if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_COPY_SIDCAP); } return SYS_ERR_OK; }
static void multiboot_cap_reply(struct monitor_binding *st, struct capref cap, errval_t msgerr) { errval_t err; static cslot_t multiboot_slots = 0; // All multiboot caps received if (err_is_fail(msgerr)) { // Request bootinfo frame struct bootinfo *bi; err = map_bootinfo(&bi); assert(err_is_ok(err)); // Init ramfs struct dirent *root = ramfs_init(); // Populate it with contents of multiboot populate_multiboot(root, bi); // Start the service err = start_service(root); assert(err_is_ok(err)); return; } // Move the cap into the multiboot cnode struct capref dest = { .cnode = cnode_module, .slot = multiboot_slots++, }; err = cap_copy(dest, cap); assert(err_is_ok(err)); err = cap_destroy(cap); assert(err_is_ok(err)); err = st->tx_vtbl.multiboot_cap_request(st, NOP_CONT, multiboot_slots); assert(err_is_ok(err)); } static void bootstrap(void) { errval_t err; /* Create the module cnode */ struct capref modulecn_cap = { .cnode = cnode_root, .slot = ROOTCN_SLOT_MODULECN, }; err = cnode_create_raw(modulecn_cap, NULL, ((cslot_t)1 << MODULECN_SIZE_BITS), NULL); if (err_is_fail(err)) { DEBUG_ERR(err, "cnode_create_raw failed"); abort(); } // XXX: Set reply handler struct monitor_binding *st = get_monitor_binding(); st->rx_vtbl.multiboot_cap_reply = multiboot_cap_reply; // Make first multiboot cap request err = st->tx_vtbl.multiboot_cap_request(st, NOP_CONT, 0); assert(err_is_ok(err)); }
/** * \brief Boot a app core of x86_64 type * * The processors are started by a sequency of INIT and STARTUP IPIs * which are sent by this function. * CMOS writes to the shutdown status byte are used to execute * different memory locations. * * \param core_id APIC ID of the core to try booting * \param entry Entry address for new kernel in the destination * architecture's lvaddr_t given in genvaddr_t * * \returns Zero on successful boot, non-zero (error code) on failure */ int start_aps_x86_64_start(uint8_t core_id, genvaddr_t entry) { DEBUG("%s:%d: start_aps_x86_64_start\n", __FILE__, __LINE__); errval_t err; // Copy the startup code to the real-mode address uint8_t *real_src = (uint8_t *) &x86_64_start_ap; uint8_t *real_end = (uint8_t *) &x86_64_start_ap_end; struct capref bootcap; #ifdef __k1om__ struct capref realmodecap; realmodecap.cnode = cnode_task; realmodecap.slot = TASKCN_SLOT_COREBOOT; err = slot_alloc(&bootcap); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Allocating a new slot"); } err = cap_copy(bootcap, realmodecap); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Copying capability"); } #else struct acpi_rpc_client* acl = get_acpi_rpc_client(); errval_t error_code; err = acl->vtbl.mm_realloc_range_proxy(acl, 16, 0x0, &bootcap, &error_code); if (err_is_fail(err)) { USER_PANIC_ERR(err, "mm_alloc_range_proxy failed."); } if (err_is_fail(error_code)) { USER_PANIC_ERR(error_code, "mm_alloc_range_proxy return failed."); } #endif void* real_base; err = vspace_map_one_frame(&real_base, 1<<16, bootcap, NULL, NULL); uint8_t* real_dest = (uint8_t*)real_base + X86_64_REAL_MODE_LINEAR_OFFSET; memcpy(real_dest, real_src, real_end - real_src); /* Pointer to the entry point called from init_ap.S */ volatile uint64_t *absolute_entry_ptr = (volatile uint64_t *) (( (lpaddr_t) &x86_64_init_ap_absolute_entry - (lpaddr_t) &x86_64_start_ap ) + real_dest); //copy the address of the function start (in boot.S) to the long-mode //assembler code to be able to perform an absolute jump *absolute_entry_ptr = entry; // pointer to the shared global variable amongst all kernels volatile uint64_t *ap_global = (volatile uint64_t *) (( (lpaddr_t) &x86_64_init_ap_global - (lpaddr_t) &x86_64_start_ap ) + real_dest); genpaddr_t global; struct monitor_blocking_rpc_client *mc = get_monitor_blocking_rpc_client(); err = mc->vtbl.get_global_paddr(mc, &global); if (err_is_fail(err)) { DEBUG_ERR(err, "invoke spawn core"); return err_push(err, MON_ERR_SPAWN_CORE); } *ap_global = (uint64_t)(genpaddr_t)global; // pointer to the pseudo-lock used to detect boot up of new core volatile uint32_t *ap_wait = (volatile uint32_t *) ((lpaddr_t) &x86_64_init_ap_wait - ((lpaddr_t) &x86_64_start_ap) + real_dest); // Pointer to the lock variable in the realmode code volatile uint8_t *ap_lock = (volatile uint8_t *) ((lpaddr_t) &x86_64_init_ap_lock - ((lpaddr_t) &x86_64_start_ap) + real_dest); *ap_wait = AP_STARTING_UP; end = bench_tsc(); #if defined(__k1om__) delay_ms(10); #endif err = invoke_send_init_ipi(ipi_cap, core_id); if (err_is_fail(err)) { DEBUG_ERR(err, "invoke send init ipi"); return err; } #if defined(__k1om__) delay_ms(200); #endif // x86 protocol actually would like us to do this twice err = invoke_send_start_ipi(ipi_cap, core_id, entry); if (err_is_fail(err)) { DEBUG_ERR(err, "invoke sipi"); return err; } // Give the new core a bit time to start-up and set the lock for (uint64_t i = 0; i < STARTUP_TIMEOUT; i++) { if (*ap_lock != 0) { break; } } // If the lock is set, the core has been started, otherwise assume, that // a core with this APIC ID doesn't exist. if (*ap_lock != 0) { while (*ap_wait != AP_STARTED); trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_CORE_START_REQUEST_ACK, core_id); *ap_lock = 0; return 0; } assert(!"badness"); return -1; }
errval_t vspace_mmu_aware_reset(struct vspace_mmu_aware *state, struct capref frame, size_t size) { errval_t err; struct vregion *vregion; struct capref oldframe; void *vbuf; // create copy of new region err = slot_alloc(&oldframe); if (err_is_fail(err)) { return err; } err = cap_copy(oldframe, frame); if (err_is_fail(err)) { return err; } err = vspace_map_one_frame_attr_aligned(&vbuf, size, oldframe, VREGION_FLAGS_READ_WRITE | VREGION_FLAGS_LARGE, LARGE_PAGE_SIZE, NULL, &vregion); if (err_is_fail(err)) { return err; } // copy over data to new frame genvaddr_t gen_base = vregion_get_base_addr(&state->vregion); memcpy(vbuf, (void*)(lvaddr_t)gen_base, state->mapoffset); err = vregion_destroy(vregion); if (err_is_fail(err)) { return err; } genvaddr_t offset = 0; // Unmap backing frames for [0, size) in state.vregion do { err = state->memobj.m.f.unfill(&state->memobj.m, 0, &oldframe, &offset); if (err_is_fail(err) && err_no(err) != LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET) { return err_push(err, LIB_ERR_MEMOBJ_UNMAP_REGION); } struct frame_identity fi; // increase address err = invoke_frame_identify(oldframe, &fi); if (err_is_fail(err)) { return err; } offset += (1UL<<fi.bits); err = cap_destroy(oldframe); if (err_is_fail(err)) { return err; } } while(offset < state->mapoffset); // Map new frame in err = state->memobj.m.f.fill(&state->memobj.m, 0, frame, size); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = state->memobj.m.f.pagefault(&state->memobj.m, &state->vregion, 0, 0); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } state->mapoffset = size; return SYS_ERR_OK; }
/** * Initialize mem_serv while spawning it. */ errval_t initialize_mem_serv(struct spawninfo *si) { errval_t err; /* copy supercn to memory server */; struct capref init_supercn_cap = { .cnode = cnode_root, .slot = ROOTCN_SLOT_SUPERCN }; struct capref child_supercn_cap = { .cnode = si->rootcn, .slot = ROOTCN_SLOT_SUPERCN }; err = cap_copy(child_supercn_cap, init_supercn_cap); if (err_is_fail(err)) { return err_push(err, INIT_ERR_COPY_SUPERCN_CAP); } return SYS_ERR_OK; } errval_t initialize_monitor(struct spawninfo *si) { errval_t err; /* Give monitor the kernel capability */ struct capref dest, src; dest.cnode = si->taskcn; dest.slot = TASKCN_SLOT_KERNELCAP; src.cnode = cnode_task; src.slot = TASKCN_SLOT_KERNELCAP; err = cap_copy(dest, src); if (err_is_fail(err)) { return err_push(err, INIT_ERR_COPY_KERNEL_CAP); } /* Give monitor.0 the BSP KCB capability */ dest.cnode = si->rootcn; dest.slot = ROOTCN_SLOT_BSPKCB; src.cnode = cnode_root; src.slot = ROOTCN_SLOT_BSPKCB; err = cap_copy(dest, src); if (err_is_fail(err)) { return err_push(err, INIT_ERR_COPY_KERNEL_CAP); } /* Give monitor the perfmon capability */ dest.cnode = si->taskcn; dest.slot = TASKCN_SLOT_PERF_MON; src.cnode = cnode_task; src.slot = TASKCN_SLOT_PERF_MON; err = cap_copy(dest, src); if (err_is_fail(err)) { return err_push(err, INIT_ERR_COPY_PERF_MON); } /* Give monitor the IPI capability */ dest.cnode = si->taskcn; dest.slot = TASKCN_SLOT_IPI; src.cnode = cnode_task; src.slot = TASKCN_SLOT_IPI; err = cap_copy(dest, src); if (err_is_fail(err)) { return err_push(err, INIT_ERR_COPY_IPI); } /* Give monitor modulecn */ dest.cnode = si->rootcn; dest.slot = ROOTCN_SLOT_MODULECN; src.cnode = cnode_root; src.slot = ROOTCN_SLOT_MODULECN; err = cap_copy(dest, src); if (err_is_fail(err)) { return err_push(err, INIT_ERR_COPY_MODULECN_CAP); } /* Give monitor physaddr cn */ dest.cnode = si->rootcn; dest.slot = ROOTCN_SLOT_PACN; src.cnode = cnode_root; src.slot = ROOTCN_SLOT_PACN; err = cap_copy(dest, src); if (err_is_fail(err)) { return err_push(err, INIT_ERR_COPY_PACN_CAP); } #if __x86_64__ || __i386__ || __k1om__ /* Give monitor IRQ */ dest.cnode = si->taskcn; dest.slot = TASKCN_SLOT_IRQ; src.cnode = cnode_task; src.slot = TASKCN_SLOT_IRQ; err = cap_copy(dest, src); if (err_is_fail(err)) { return err_push(err, INIT_ERR_COPY_IRQ_CAP); } /* Give monitor IO */ dest.cnode = si->taskcn; dest.slot = TASKCN_SLOT_IO; src.cnode = cnode_task; src.slot = TASKCN_SLOT_IO; err = cap_copy(dest, src); if (err_is_fail(err)) { return err_push(err, INIT_ERR_COPY_IO_CAP); } #endif // __x86_64__ || __i386__ #ifdef __k1om__ /* Give monitor system memory cap */ dest.cnode = si->taskcn; dest.slot = TASKCN_SLOT_SYSMEM; src.cnode = cnode_task; src.slot = TASKCN_SLOT_SYSMEM; err = cap_copy(dest, src); if (err_is_fail(err)) { return err_push(err, INIT_ERR_COPY_IO_CAP); } dest.cnode = si->taskcn; dest.slot = TASKCN_SLOT_COREBOOT; src.cnode = cnode_task; src.slot = TASKCN_SLOT_COREBOOT; err = cap_copy(dest, src); if (err_is_fail(err)) { return err_push(err, INIT_ERR_COPY_IO_CAP); } #endif #if __arm__ /* Give monitor IO */ dest.cnode = si->taskcn; dest.slot = TASKCN_SLOT_IO; src.cnode = cnode_task; src.slot = TASKCN_SLOT_IO; err = cap_copy(dest, src); if (err_is_fail(err)) { return err_push(err, INIT_ERR_COPY_IO_CAP); } /* Give monitor IRQ */ dest.cnode = si->taskcn; dest.slot = TASKCN_SLOT_IRQ; src.cnode = cnode_task; src.slot = TASKCN_SLOT_IRQ; err = cap_copy(dest, src); if (err_is_fail(err)) { return err_push(err, INIT_ERR_COPY_IRQ_CAP); } #endif #ifdef CONFIG_INTERCONNECT_DRIVER_UMP #if 0 // XXX: Disabled until SCC has a decent memory allocator /* Give monitor the foreign frame capability */ dest.cnode = si->taskcn; dest.slot = TASKCN_SLOT_MON_URPC; src.cnode = cnode_task; src.slot = TASKCN_SLOT_MON_URPC; err = cap_copy(dest, src); if (err_is_fail(err)) { return err_push(err, INIT_ERR_COPY_UMP_CAP); } #endif #endif // CONFIG_INTERCONNECT_DRIVER_UMP return SYS_ERR_OK; }
static errval_t spawn(char *path, char *const argv[], char *argbuf, size_t argbytes, char *const envp[], struct capref inheritcn_cap, struct capref argcn_cap, domainid_t *domainid) { errval_t err, msgerr; /* read file into memory */ vfs_handle_t fh; err = vfs_open(path, &fh); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_LOAD); } struct vfs_fileinfo info; err = vfs_stat(fh, &info); if (err_is_fail(err)) { vfs_close(fh); return err_push(err, SPAWN_ERR_LOAD); } assert(info.type == VFS_FILE); uint8_t *image = malloc(info.size); if (image == NULL) { vfs_close(fh); return err_push(err, SPAWN_ERR_LOAD); } size_t pos = 0, readlen; do { err = vfs_read(fh, &image[pos], info.size - pos, &readlen); if (err_is_fail(err)) { vfs_close(fh); free(image); return err_push(err, SPAWN_ERR_LOAD); } else if (readlen == 0) { vfs_close(fh); free(image); return SPAWN_ERR_LOAD; // XXX } else { pos += readlen; } } while (err_is_ok(err) && readlen > 0 && pos < info.size); err = vfs_close(fh); if (err_is_fail(err)) { DEBUG_ERR(err, "failed to close file %s", path); } // find short name (last part of path) char *name = strrchr(path, VFS_PATH_SEP); if (name == NULL) { name = path; } else { name++; } /* spawn the image */ struct spawninfo si; err = spawn_load_image(&si, (lvaddr_t)image, info.size, CURRENT_CPU_TYPE, name, my_core_id, argv, envp, inheritcn_cap, argcn_cap); if (err_is_fail(err)) { free(image); return err; } free(image); /* request connection from monitor */ struct monitor_blocking_rpc_client *mrpc = get_monitor_blocking_rpc_client(); struct capref monep; err = mrpc->vtbl.alloc_monitor_ep(mrpc, &msgerr, &monep); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MONITOR_CLIENT); } else if (err_is_fail(msgerr)) { return msgerr; } /* copy connection into the new domain */ struct capref destep = { .cnode = si.rootcn, .slot = ROOTCN_SLOT_MONITOREP, }; err = cap_copy(destep, monep); if (err_is_fail(err)) { spawn_free(&si); cap_destroy(monep); return err_push(err, SPAWN_ERR_MONITOR_CLIENT); } err = cap_destroy(monep); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MONITOR_CLIENT); } debug_printf("spawning %s on core %u\n", path, my_core_id); /* give the perfmon capability */ struct capref dest, src; dest.cnode = si.taskcn; dest.slot = TASKCN_SLOT_PERF_MON; src.cnode = cnode_task; src.slot = TASKCN_SLOT_PERF_MON; err = cap_copy(dest, src); if (err_is_fail(err)) { return err_push(err, INIT_ERR_COPY_PERF_MON); } /* run the domain */ err = spawn_run(&si); if (err_is_fail(err)) { spawn_free(&si); return err_push(err, SPAWN_ERR_RUN); } // Allocate domain id struct ps_entry *pe = malloc(sizeof(struct ps_entry)); assert(pe != NULL); memset(pe, 0, sizeof(struct ps_entry)); memcpy(pe->argv, argv, MAX_CMDLINE_ARGS*sizeof(*argv)); pe->argbuf = argbuf; pe->argbytes = argbytes; /* * NB: It's important to keep a copy of the DCB *and* the root * CNode around. We need to revoke both (in the right order, see * kill_domain() below), so that we ensure no one else is * referring to the domain's CSpace anymore. Especially the loop * created by placing rootcn into its own address space becomes a * problem here. */ err = slot_alloc(&pe->rootcn_cap); assert(err_is_ok(err)); err = cap_copy(pe->rootcn_cap, si.rootcn_cap); pe->rootcn = si.rootcn; assert(err_is_ok(err)); err = slot_alloc(&pe->dcb); assert(err_is_ok(err)); err = cap_copy(pe->dcb, si.dcb); assert(err_is_ok(err)); pe->status = PS_STATUS_RUNNING; err = ps_allocate(pe, domainid); if(err_is_fail(err)) { free(pe); } // Store in target dispatcher frame struct dispatcher_generic *dg = get_dispatcher_generic(si.handle); dg->domain_id = *domainid; /* cleanup */ err = spawn_free(&si); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_FREE); } return SYS_ERR_OK; } static void retry_use_local_memserv_response(void *a) { errval_t err; struct spawn_binding *b = (struct spawn_binding*)a; err = b->tx_vtbl.use_local_memserv_response(b, NOP_CONT); if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { // try again err = b->register_send(b, get_default_waitset(), MKCONT(retry_use_local_memserv_response,a)); } if (err_is_fail(err)) { DEBUG_ERR(err, "error sending use_local_memserv reply\n"); } }
/** * \brief Setup arguments and environment * * \param argv Command-line arguments, NULL-terminated * \param envp Environment, NULL-terminated */ static errval_t spawn_setup_env(struct spawninfo *si, char *const argv[], char *const envp[]) { errval_t err; // Create frame (actually multiple pages) for arguments si->argspg.cnode = si->taskcn; si->argspg.slot = TASKCN_SLOT_ARGSPAGE; struct capref spawn_argspg = { .cnode = si->taskcn, .slot = TASKCN_SLOT_ARGSPAGE2, }; err = frame_create(si->argspg, ARGS_SIZE, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_ARGSPG); } err = cap_copy(spawn_argspg, si->argspg); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_ARGSPG); } /* Map in args frame */ genvaddr_t spawn_args_base; err = spawn_vspace_map_one_frame(si, &spawn_args_base, spawn_argspg, ARGS_SIZE); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MAP_ARGSPG_TO_NEW); } void *argspg; err = vspace_map_one_frame(&argspg, ARGS_SIZE, si->argspg, NULL, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MAP_ARGSPG_TO_SELF); } /* Layout of arguments page: * struct spawn_domain_params; // contains pointers to other fields * char buf[]; // NUL-terminated strings for arguments and environment * vspace layout data follows the string data */ struct spawn_domain_params *params = argspg; char *buf = (char *)(params + 1); size_t buflen = ARGS_SIZE - (buf - (char *)argspg); /* Copy command-line arguments */ int i; size_t len; for (i = 0; argv[i] != NULL; i++) { len = strlen(argv[i]) + 1; if (len > buflen) { return SPAWN_ERR_ARGSPG_OVERFLOW; } strcpy(buf, argv[i]); params->argv[i] = buf - (char *)argspg + (char *)(lvaddr_t)spawn_args_base; buf += len; buflen -= len; } assert(i <= MAX_CMDLINE_ARGS); int argc = i; params->argv[i] = NULL; /* Copy environment strings */ for (i = 0; envp[i] != NULL; i++) { len = strlen(envp[i]) + 1; if (len > buflen) { return SPAWN_ERR_ARGSPG_OVERFLOW; } strcpy(buf, envp[i]); params->envp[i] = buf - (char *)argspg + (char *)(lvaddr_t)spawn_args_base; buf += len; buflen -= len; } assert(i <= MAX_ENVIRON_VARS); params->envp[i] = NULL; /* Serialise vspace data */ // XXX: align buf to next word char *vspace_buf = (char *)ROUND_UP((lvaddr_t)buf, sizeof(uintptr_t)); buflen -= vspace_buf - buf; // FIXME: currently just the pmap is serialised err = si->vspace->pmap->f.serialise(si->vspace->pmap, vspace_buf, buflen); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_SERIALISE_VSPACE); } /* Setup environment pointer and vspace pointer */ params->argc = argc; params->vspace_buf = (char *)vspace_buf - (char *)argspg + (char *)(lvaddr_t)spawn_args_base; params->vspace_buf_len = buflen; // Setup TLS data params->tls_init_base = (void *)vspace_genvaddr_to_lvaddr(si->tls_init_base); params->tls_init_len = si->tls_init_len; params->tls_total_len = si->tls_total_len; arch_registers_state_t *enabled_area = dispatcher_get_enabled_save_area(si->handle); registers_set_param(enabled_area, (uintptr_t)spawn_args_base); return SYS_ERR_OK; } /** * Copies caps from inheritcnode into destination cnode, * ignores caps that to not exist. * * \param inheritcn Source cnode * \param inherit_slot Source cnode slot * \param destcn Target cnode * \param destcn_slot Target cnode slot * * \retval SYS_ERR_OK Copy to target was successful or source cap * did not exist. * \retval SPAWN_ERR_COPY_INHERITCN_CAP Error in cap_copy */ static errval_t spawn_setup_inherited_cap(struct cnoderef inheritcn, capaddr_t inherit_slot, struct cnoderef destcn, capaddr_t destcn_slot) { errval_t err; struct capref src; src.cnode = inheritcn; src.slot = inherit_slot;; // Create frame (actually multiple pages) for fds struct capref dest; dest.cnode = destcn; dest.slot = destcn_slot; err = cap_copy(dest, src); if (err_no(err) == SYS_ERR_SOURCE_CAP_LOOKUP) { // there was no fdcap to inherit, continue return SYS_ERR_OK; } else if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_COPY_INHERITCN_CAP); } return SYS_ERR_OK; }
static errval_t spawn_setup_argcn(struct spawninfo *si, struct capref argumentcn_cap) { errval_t err; if (capref_is_null(argumentcn_cap)) { return SYS_ERR_OK; } struct capref dest = { .cnode = si->rootcn, .slot = ROOTCN_SLOT_ARGCN }; err = cap_copy(dest, argumentcn_cap); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_COPY_ARGCN); } return SYS_ERR_OK; } /** * \brief Load an image * * \param si Struct used by the library * \param binary The image to load * \param type The type of arch to load for * \param name Name of the image required only to place it in disp * struct * \param coreid Coreid to load for, required only to place it in disp * struct * \param argv Command-line arguments, NULL-terminated * \param envp Environment, NULL-terminated * \param inheritcn_cap Cap to a CNode containing capabilities to be inherited * \param argcn_cap Cap to a CNode containing capabilities passed as * arguments */ errval_t spawn_load_image(struct spawninfo *si, lvaddr_t binary, size_t binary_size, enum cpu_type type, const char *name, coreid_t coreid, char *const argv[], char *const envp[], struct capref inheritcn_cap, struct capref argcn_cap) { errval_t err; si->cpu_type = type; /* Initialize cspace */ err = spawn_setup_cspace(si); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_SETUP_CSPACE); } /* Initialize vspace */ err = spawn_setup_vspace(si); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_VSPACE_INIT); } si->name = name; genvaddr_t entry; void* arch_info; /* Load the image */ err = spawn_arch_load(si, binary, binary_size, &entry, &arch_info); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_LOAD); } /* Setup dispatcher frame */ err = spawn_setup_dispatcher(si, coreid, name, entry, arch_info); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_SETUP_DISPATCHER); } /* Setup inherited caps */ err = spawn_setup_inherited_caps(si, inheritcn_cap); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_SETUP_INHERITED_CAPS); } /* Setup argument caps */ err = spawn_setup_argcn(si, argcn_cap); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_SETUP_ARGCN); } // Add vspace-pspace mapping to environment char envstr[2048]; #ifdef __x86__ // SK: si->vregions only valid on x86 snprintf(envstr, 2048, "ARRAKIS_PMAP="); for(int i = 0; i < si->vregions; i++) { struct memobj_anon *m = (struct memobj_anon *)si->vregion[i]->memobj; assert(m->m.type == ANONYMOUS); for(struct memobj_frame_list *f = m->frame_list; f != NULL; f = f->next) { struct frame_identity id; err = invoke_frame_identify(f->frame, &id); assert(err_is_ok(err)); char str[128]; snprintf(str, 128, "%" PRIxGENVADDR ":%" PRIxGENPADDR ":%zx ", si->base[i] + f->offset, id.base, f->size); strcat(envstr, str); } } #endif /* __x86__ */ char **myenv = (char **)envp; for(int i = 0; i < MAX_ENVIRON_VARS; i++) { if(i + 1 == MAX_ENVIRON_VARS) { printf("spawnd: Couldn't set environemnt. Out of variables!\n"); abort(); } if(myenv[i] == NULL) { myenv[i] = envstr; myenv[i+1] = NULL; break; } } /* Setup cmdline args */ err = spawn_setup_env(si, argv, envp); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_SETUP_ENV); } return SYS_ERR_OK; }
/** * \brief Setup the dispatcher frame */ static errval_t spawn_setup_dispatcher(struct spawninfo *si, coreid_t core_id, const char *name, genvaddr_t entry, void* arch_info) { errval_t err; /* Create dispatcher frame (in taskcn) */ si->dispframe.cnode = si->taskcn; si->dispframe.slot = TASKCN_SLOT_DISPFRAME; struct capref spawn_dispframe = { .cnode = si->taskcn, .slot = TASKCN_SLOT_DISPFRAME2, }; err = frame_create(si->dispframe, (1 << DISPATCHER_FRAME_BITS), NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_DISPATCHER_FRAME); } err = cap_copy(spawn_dispframe, si->dispframe); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_DISPATCHER_FRAME); } /* Map in dispatcher frame */ dispatcher_handle_t handle; err = vspace_map_one_frame((void**)&handle, 1ul << DISPATCHER_FRAME_BITS, si->dispframe, NULL, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MAP_DISPATCHER_TO_SELF); } genvaddr_t spawn_dispatcher_base; err = spawn_vspace_map_one_frame(si, &spawn_dispatcher_base, spawn_dispframe, 1UL << DISPATCHER_FRAME_BITS); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MAP_DISPATCHER_TO_NEW); } /* Set initial state */ // XXX: Confusion address translation about l/gen/addr in entry struct dispatcher_shared_generic *disp = get_dispatcher_shared_generic(handle); struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle); arch_registers_state_t *enabled_area = dispatcher_get_enabled_save_area(handle); arch_registers_state_t *disabled_area = dispatcher_get_disabled_save_area(handle); /* Place core_id */ disp_gen->core_id = core_id; /* place eh information */ disp_gen->eh_frame = si->eh_frame; disp_gen->eh_frame_size = si->eh_frame_size; disp_gen->eh_frame_hdr = si->eh_frame_hdr; disp_gen->eh_frame_hdr_size = si->eh_frame_hdr_size; /* Setup dispatcher and make it runnable */ disp->udisp = spawn_dispatcher_base; disp->disabled = 1; disp->fpu_trap = 1; #ifdef __k1om__ disp->xeon_phi_id = disp_xeon_phi_id(); #endif // Copy the name for debugging const char *copy_name = strrchr(name, '/'); if (copy_name == NULL) { copy_name = name; } else { copy_name++; } strncpy(disp->name, copy_name, DISP_NAME_LEN); spawn_arch_set_registers(arch_info, handle, enabled_area, disabled_area); registers_set_entry(disabled_area, entry); si->handle = handle; return SYS_ERR_OK; } errval_t spawn_map_bootinfo(struct spawninfo *si, genvaddr_t *retvaddr) { errval_t err; struct capref src = { .cnode = cnode_task, .slot = TASKCN_SLOT_BOOTINFO }; struct capref dest = { .cnode = si->taskcn, .slot = TASKCN_SLOT_BOOTINFO }; err = cap_copy(dest, src); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CAP_COPY); } err = spawn_vspace_map_one_frame(si, retvaddr, dest, BOOTINFO_SIZE); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MAP_BOOTINFO); } return SYS_ERR_OK; } /** * \brief Retrive the commandline args of #name * * The arguments are malloced into a new space so need to be freed after use */ errval_t spawn_get_cmdline_args(struct mem_region *module, char **retargs) { assert(module != NULL && retargs != NULL); /* Get the cmdline args */ const char *args = getopt_module(module); /* Allocate space */ *retargs = malloc(sizeof(char) * strlen(args)); if (!retargs) { return LIB_ERR_MALLOC_FAIL; } /* Copy args */ strcpy(*retargs, args); return SYS_ERR_OK; }
/** * \brief Setup an initial cspace * * Create an initial cspace layout */ static errval_t spawn_setup_cspace(struct spawninfo *si) { errval_t err; struct capref t1; /* Create root CNode */ err = cnode_create(&si->rootcn_cap, &si->rootcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_ROOTCN); } /* Create taskcn */ err = cnode_create(&si->taskcn_cap, &si->taskcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_TASKCN); } // Mint into rootcn setting the guard t1.cnode = si->rootcn; t1.slot = ROOTCN_SLOT_TASKCN; err = cap_mint(t1, si->taskcn_cap, 0, GUARD_REMAINDER(2 * DEFAULT_CNODE_BITS)); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MINT_TASKCN); } /* Create slot_alloc_cnode */ t1.cnode = si->rootcn; t1.slot = ROOTCN_SLOT_SLOT_ALLOC0; err = cnode_create_raw(t1, NULL, (1<<SLOT_ALLOC_CNODE_BITS), NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SLOTALLOC_CNODE); } t1.cnode = si->rootcn; t1.slot = ROOTCN_SLOT_SLOT_ALLOC1; err = cnode_create_raw(t1, NULL, (1<<SLOT_ALLOC_CNODE_BITS), NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SLOTALLOC_CNODE); } t1.cnode = si->rootcn; t1.slot = ROOTCN_SLOT_SLOT_ALLOC2; err = cnode_create_raw(t1, NULL, (1<<SLOT_ALLOC_CNODE_BITS), NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SLOTALLOC_CNODE); } // Create DCB si->dcb.cnode = si->taskcn; si->dcb.slot = TASKCN_SLOT_DISPATCHER; err = dispatcher_create(si->dcb); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_DISPATCHER); } // Give domain endpoint to itself (in taskcn) struct capref selfep = { .cnode = si->taskcn, .slot = TASKCN_SLOT_SELFEP, }; err = cap_retype(selfep, si->dcb, ObjType_EndPoint, 0); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SELFEP); } // Map root CNode (in taskcn) t1.cnode = si->taskcn; t1.slot = TASKCN_SLOT_ROOTCN; err = cap_mint(t1, si->rootcn_cap, 0, 0); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MINT_ROOTCN); } #ifdef TRACING_EXISTS // Set up tracing for the child err = trace_setup_child(si->taskcn, si->handle); if (err_is_fail(err)) { printf("Warning: error setting up tracing for child domain\n"); // SYS_DEBUG(err, ...); } #endif // XXX: copy over argspg? memset(&si->argspg, 0, sizeof(si->argspg)); /* Fill up basecn */ struct capref basecn_cap; struct cnoderef basecn; // Create basecn in rootcn basecn_cap.cnode = si->rootcn; basecn_cap.slot = ROOTCN_SLOT_BASE_PAGE_CN; err = cnode_create_raw(basecn_cap, &basecn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CNODE_CREATE); } // Place the ram caps for (uint8_t i = 0; i < DEFAULT_CNODE_SLOTS; i++) { struct capref base = { .cnode = basecn, .slot = i }; struct capref ram; err = ram_alloc(&ram, BASE_PAGE_BITS); if (err_is_fail(err)) { return err_push(err, LIB_ERR_RAM_ALLOC); } err = cap_copy(base, ram); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CAP_COPY); } err = cap_destroy(ram); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CAP_DESTROY); } } return SYS_ERR_OK; } static errval_t spawn_setup_vspace(struct spawninfo *si) { errval_t err; /* Create pagecn */ si->pagecn_cap = (struct capref){.cnode = si->rootcn, .slot = ROOTCN_SLOT_PAGECN}; err = cnode_create_raw(si->pagecn_cap, &si->pagecn, PAGE_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_PAGECN); } /* Init pagecn's slot allocator */ // XXX: satisfy a peculiarity of the single_slot_alloc_init_raw API size_t bufsize = SINGLE_SLOT_ALLOC_BUFLEN(PAGE_CNODE_SLOTS); void *buf = malloc(bufsize); assert(buf != NULL); err = single_slot_alloc_init_raw(&si->pagecn_slot_alloc, si->pagecn_cap, si->pagecn, PAGE_CNODE_SLOTS, buf, bufsize); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT_RAW); } // Create root of pagetable err = si->pagecn_slot_alloc.a.alloc(&si->pagecn_slot_alloc.a, &si->vtree); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLOT_ALLOC); } // top-level table should always live in slot 0 of pagecn assert(si->vtree.slot == 0); switch(si->cpu_type) { case CPU_X86_64: case CPU_K1OM: err = vnode_create(si->vtree, ObjType_VNode_x86_64_pml4); break; case CPU_X86_32: case CPU_SCC: #ifdef CONFIG_PAE err = vnode_create(si->vtree, ObjType_VNode_x86_32_pdpt); #else err = vnode_create(si->vtree, ObjType_VNode_x86_32_pdir); #endif break; case CPU_ARM5: case CPU_ARM7: err = vnode_create(si->vtree, ObjType_VNode_ARM_l1); break; default: assert(!"Other architecture"); return err_push(err, SPAWN_ERR_UNKNOWN_TARGET_ARCH); } if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_VNODE); } err = spawn_vspace_init(si, si->vtree, si->cpu_type); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_VSPACE_INIT); } return SYS_ERR_OK; } #if 0 /** * \brief Lookup and map an image */ static errval_t spawn_map(const char *name, struct bootinfo *bi, lvaddr_t *binary, size_t *binary_size) { errval_t err; /* Get the module from the multiboot */ struct mem_region *module = multiboot_find_module(bi, name); if (module == NULL) { return SPAWN_ERR_FIND_MODULE; } /* Map the image */ err = spawn_map_module(module, binary_size, binary, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MAP_MODULE); } return SYS_ERR_OK; }
static errval_t elf_allocate(void *state, genvaddr_t base, size_t size, uint32_t flags, void **retbase) { errval_t err; struct spawninfo *si = state; // Increase size by space wasted on first page due to page-alignment size_t base_offset = BASE_PAGE_OFFSET(base); size += base_offset; base -= base_offset; // Page-align size = ROUND_UP(size, BASE_PAGE_SIZE); cslot_t vspace_slot = si->elfload_slot; // Allocate the frames size_t sz = 0; for (lpaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = frame_create(frame, sz, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_CREATE); } } cslot_t spawn_vspace_slot = si->elfload_slot; cslot_t new_slot_count = si->elfload_slot - vspace_slot; // create copies of the frame capabilities for spawn vspace for (int copy_idx = 0; copy_idx < new_slot_count; copy_idx++) { struct capref frame = { .cnode = si->segcn, .slot = vspace_slot + copy_idx, }; struct capref spawn_frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = cap_copy(spawn_frame, frame); if (err_is_fail(err)) { // TODO: make debug printf printf("cap_copy failed for src_slot = %"PRIuCSLOT", dest_slot = %"PRIuCSLOT"\n", frame.slot, spawn_frame.slot); return err_push(err, LIB_ERR_CAP_COPY); } } /* Map into my vspace */ struct memobj *memobj = malloc(sizeof(struct memobj_anon)); if (!memobj) { return LIB_ERR_MALLOC_FAIL; } struct vregion *vregion = malloc(sizeof(struct vregion)); if (!vregion) { return LIB_ERR_MALLOC_FAIL; } // Create the objects err = memobj_create_anon((struct memobj_anon*)memobj, size, 0); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_CREATE_ANON); } err = vregion_map(vregion, get_current_vspace(), memobj, 0, size, VREGION_FLAGS_READ_WRITE); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(memobj, genvaddr, frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = memobj->f.pagefault(memobj, vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } /* Map into spawn vspace */ struct memobj *spawn_memobj = NULL; struct vregion *spawn_vregion = NULL; err = spawn_vspace_map_anon_fixed_attr(si, base, size, &spawn_vregion, &spawn_memobj, elf_to_vregion_flags(flags)); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = spawn_vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(spawn_memobj, genvaddr, frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = spawn_memobj->f.pagefault(spawn_memobj, spawn_vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } genvaddr_t genvaddr = vregion_get_base_addr(vregion) + base_offset; *retbase = (void*)vspace_genvaddr_to_lvaddr(genvaddr); return SYS_ERR_OK; } /** * \brief Load the elf image */ errval_t spawn_arch_load(struct spawninfo *si, lvaddr_t binary, size_t binary_size, genvaddr_t *entry, void** arch_info) { errval_t err; // Reset the elfloader_slot si->elfload_slot = 0; struct capref cnode_cap = { .cnode = si->rootcn, .slot = ROOTCN_SLOT_SEGCN, }; err = cnode_create_raw(cnode_cap, &si->segcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SEGCN); } // TLS is NYI si->tls_init_base = 0; si->tls_init_len = si->tls_total_len = 0; // Load the binary err = elf_load(EM_HOST, elf_allocate, si, binary, binary_size, entry); if (err_is_fail(err)) { return err; } struct Elf32_Shdr* got_shdr = elf32_find_section_header_name(binary, binary_size, ".got"); if (got_shdr) { *arch_info = (void*)got_shdr->sh_addr; } else { return SPAWN_ERR_LOAD; } return SYS_ERR_OK; } void spawn_arch_set_registers(void *arch_load_info, dispatcher_handle_t handle, arch_registers_state_t *enabled_area, arch_registers_state_t *disabled_area) { assert(arch_load_info != NULL); uintptr_t got_base = (uintptr_t)arch_load_info; struct dispatcher_shared_arm* disp_arm = get_dispatcher_shared_arm(handle); disp_arm->got_base = got_base; enabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; disabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; #ifndef __ARM_ARCH_7M__ //armv7-m does not support these flags enabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; disabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; #endif }
static errval_t init_allocators(void) { errval_t err, msgerr; struct monitor_blocking_rpc_client *cl = get_monitor_blocking_rpc_client(); assert(cl != NULL); // Get the bootinfo and map it in. struct capref bootinfo_frame; size_t bootinfo_size; struct bootinfo *bootinfo; msgerr = cl->vtbl.get_bootinfo(cl, &err, &bootinfo_frame, &bootinfo_size); if (err_is_fail(msgerr) || err_is_fail(err)) { USER_PANIC_ERR(err_is_fail(msgerr) ? msgerr : err, "failed in get_bootinfo"); } err = vspace_map_one_frame((void**)&bootinfo, bootinfo_size, bootinfo_frame, NULL, NULL); assert(err_is_ok(err)); /* Initialize the memory allocator to handle PhysAddr caps */ static struct range_slot_allocator devframes_allocator; err = range_slot_alloc_init(&devframes_allocator, PCI_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLOT_ALLOC_INIT); } err = mm_init(&pci_mm_physaddr, ObjType_DevFrame, 0, 48, /* This next parameter is important. It specifies the maximum * amount that a cap may be "chunked" (i.e. broken up) at each * level in the allocator. Setting it higher than 1 reduces the * memory overhead of keeping all the intermediate caps around, * but leads to problems if you chunk up a cap too small to be * able to allocate a large subregion. This caused problems * for me with a large framebuffer... -AB 20110810 */ 1, /*was DEFAULT_CNODE_BITS,*/ slab_default_refill, slot_alloc_dynamic, &devframes_allocator, false); if (err_is_fail(err)) { return err_push(err, MM_ERR_MM_INIT); } // Request I/O Cap struct capref requested_caps; errval_t error_code; err = cl->vtbl.get_io_cap(cl, &requested_caps, &error_code); assert(err_is_ok(err) && err_is_ok(error_code)); // Copy into correct slot struct capref caps_io = { .cnode = cnode_task, .slot = TASKCN_SLOT_IO }; err = cap_copy(caps_io, requested_caps); // XXX: The code below is confused about gen/l/paddrs. // Caps should be managed in genpaddr, while the bus mgmt must be in lpaddr. err = cl->vtbl.get_phyaddr_cap(cl, &requested_caps, &error_code); assert(err_is_ok(err) && err_is_ok(error_code)); physical_caps = requested_caps; // Build the capref for the first physical address capability struct capref phys_cap; phys_cap.cnode = build_cnoderef(requested_caps, PHYSADDRCN_BITS); phys_cap.slot = 0; struct cnoderef devcnode; err = slot_alloc(&my_devframes_cnode); assert(err_is_ok(err)); cslot_t slots; err = cnode_create(&my_devframes_cnode, &devcnode, 255, &slots); if (err_is_fail(err)) { USER_PANIC_ERR(err, "cnode create"); } struct capref devframe; devframe.cnode = devcnode; devframe.slot = 0; for (int i = 0; i < bootinfo->regions_length; i++) { struct mem_region *mrp = &bootinfo->regions[i]; if (mrp->mr_type == RegionType_Module) { skb_add_fact("memory_region(16'%" PRIxGENPADDR ",%u,%zu,%u,%tu).", mrp->mr_base, 0, mrp->mrmod_size, mrp->mr_type, mrp->mrmod_data); } else { skb_add_fact("memory_region(16'%" PRIxGENPADDR ",%u,%zu,%u,%tu).", mrp->mr_base, mrp->mr_bits, ((size_t)1) << mrp->mr_bits, mrp->mr_type, mrp->mrmod_data); } if (mrp->mr_type == RegionType_PhyAddr || mrp->mr_type == RegionType_PlatformData) { ACPI_DEBUG("Region %d: %"PRIxGENPADDR" - %"PRIxGENPADDR" %s\n", i, mrp->mr_base, mrp->mr_base + (((size_t)1)<<mrp->mr_bits), mrp->mr_type == RegionType_PhyAddr ? "physical address" : "platform data"); err = cap_retype(devframe, phys_cap, ObjType_DevFrame, mrp->mr_bits); if (err_no(err) == SYS_ERR_REVOKE_FIRST) { printf("cannot retype region %d: need to revoke first; ignoring it\n", i); } else { assert(err_is_ok(err)); err = mm_add(&pci_mm_physaddr, devframe, mrp->mr_bits, mrp->mr_base); if (err_is_fail(err)) { USER_PANIC_ERR(err, "adding region %d FAILED\n", i); } } phys_cap.slot++; devframe.slot++; } } return SYS_ERR_OK; }
static errval_t elf_allocate(void *state, genvaddr_t base, size_t size, uint32_t flags, void **retbase) { errval_t err; struct spawninfo *si = state; // Increase size by space wasted on first page due to page-alignment size_t base_offset = BASE_PAGE_OFFSET(base); size += base_offset; base -= base_offset; // Page-align size = ROUND_UP(size, BASE_PAGE_SIZE); cslot_t vspace_slot = si->elfload_slot; // Allocate the frames size_t sz = 0; for (lpaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = frame_create(frame, sz, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_CREATE); } } cslot_t spawn_vspace_slot = si->elfload_slot; cslot_t new_slot_count = si->elfload_slot - vspace_slot; // create copies of the frame capabilities for spawn vspace for (int copy_idx = 0; copy_idx < new_slot_count; copy_idx++) { struct capref frame = { .cnode = si->segcn, .slot = vspace_slot + copy_idx, }; struct capref spawn_frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = cap_copy(spawn_frame, frame); if (err_is_fail(err)) { // TODO: make debug printf printf("cap_copy failed for src_slot = %"PRIuCSLOT", dest_slot = %"PRIuCSLOT"\n", frame.slot, spawn_frame.slot); return err_push(err, LIB_ERR_CAP_COPY); } } /* Map into my vspace */ struct memobj *memobj = malloc(sizeof(struct memobj_anon)); if (!memobj) { return LIB_ERR_MALLOC_FAIL; } struct vregion *vregion = malloc(sizeof(struct vregion)); if (!vregion) { return LIB_ERR_MALLOC_FAIL; } // Create the objects err = memobj_create_anon((struct memobj_anon*)memobj, size, 0); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_CREATE_ANON); } err = vregion_map(vregion, get_current_vspace(), memobj, 0, size, VREGION_FLAGS_READ_WRITE); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(memobj, genvaddr, frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = memobj->f.pagefault(memobj, vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } /* Map into spawn vspace */ struct memobj *spawn_memobj = NULL; struct vregion *spawn_vregion = NULL; err = spawn_vspace_map_anon_fixed_attr(si, base, size, &spawn_vregion, &spawn_memobj, elf_to_vregion_flags(flags)); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref spawn_frame = { .cnode = si->segcn, .slot = spawn_vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(spawn_memobj, genvaddr, spawn_frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = spawn_memobj->f.pagefault(spawn_memobj, spawn_vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } si->vregion[si->vregions] = vregion; si->base[si->vregions++] = base; genvaddr_t genvaddr = vregion_get_base_addr(vregion) + base_offset; *retbase = (void*)vspace_genvaddr_to_lvaddr(genvaddr); return SYS_ERR_OK; } /** * \brief Load the elf image */ errval_t spawn_arch_load(struct spawninfo *si, lvaddr_t binary, size_t binary_size, genvaddr_t *entry, void** arch_load_info) { errval_t err; // Reset the elfloader_slot si->elfload_slot = 0; si->vregions = 0; struct capref cnode_cap = { .cnode = si->rootcn, .slot = ROOTCN_SLOT_SEGCN, }; // XXX: this code assumes that elf_load never needs more than 32 slots for // text frame capabilities. err = cnode_create_raw(cnode_cap, &si->segcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SEGCN); } // Load the binary si->tls_init_base = 0; si->tls_init_len = si->tls_total_len = 0; err = elf_load_tls(EM_HOST, elf_allocate, si, binary, binary_size, entry, &si->tls_init_base, &si->tls_init_len, &si->tls_total_len); if (err_is_fail(err)) { return err; } return SYS_ERR_OK; } void spawn_arch_set_registers(void *arch_load_info, dispatcher_handle_t handle, arch_registers_state_t *enabled_area, arch_registers_state_t *disabled_area) { #if defined(__x86_64__) /* XXX: 1st argument to _start is the dispatcher pointer * see lib/crt/arch/x86_64/crt0.s */ disabled_area->rdi = get_dispatcher_shared_generic(handle)->udisp; #elif defined(__i386__) /* XXX: 1st argument to _start is the dispatcher pointer * see lib/crt/arch/x86_32/crt0.s */ disabled_area->edi = get_dispatcher_shared_generic(handle)->udisp; #endif }
static errval_t do_map(struct pmap_arm *pmap, genvaddr_t vaddr, struct capref frame, size_t offset, size_t size, vregion_flags_t flags, size_t *retoff, size_t *retsize) { errval_t err; size = ROUND_UP(size, BASE_PAGE_SIZE); size_t pte_count = DIVIDE_ROUND_UP(size, BASE_PAGE_SIZE); genvaddr_t vend = vaddr + size; if (ARM_L1_OFFSET(vaddr) == ARM_L1_OFFSET(vend-1)) { // fast path err = do_single_map(pmap, vaddr, vend, frame, offset, pte_count, flags); if (err_is_fail(err)) { DEBUG_ERR(err, "[do_map] in fast path"); return err_push(err, LIB_ERR_PMAP_DO_MAP); } } else { // multiple leaf page tables // first leaf uint32_t c = ARM_L2_MAX_ENTRIES - ARM_L2_OFFSET(vaddr); genvaddr_t temp_end = vaddr + c * BASE_PAGE_SIZE; err = do_single_map(pmap, vaddr, temp_end, frame, offset, c, flags); if (err_is_fail(err)) { return err_push(err, LIB_ERR_PMAP_DO_MAP); } // map full leaves while (ARM_L1_OFFSET(temp_end) < ARM_L1_OFFSET(vend)) { // update vars vaddr = temp_end; temp_end = vaddr + ARM_L2_MAX_ENTRIES * BASE_PAGE_SIZE; offset += c * BASE_PAGE_SIZE; c = ARM_L2_MAX_ENTRIES; // copy cap struct capref next; err = slot_alloc(&next); if (err_is_fail(err)) { return err_push(err, LIB_ERR_PMAP_DO_MAP); } err = cap_copy(next, frame); if (err_is_fail(err)) { return err_push(err, LIB_ERR_PMAP_DO_MAP); } frame = next; // do mapping err = do_single_map(pmap, vaddr, temp_end, frame, offset, ARM_L2_MAX_ENTRIES, flags); if (err_is_fail(err)) { return err_push(err, LIB_ERR_PMAP_DO_MAP); } } // map remaining part offset += c * BASE_PAGE_SIZE; c = ARM_L2_OFFSET(vend) - ARM_L2_OFFSET(temp_end); if (c) { // copy cap struct capref next; err = slot_alloc(&next); if (err_is_fail(err)) { return err_push(err, LIB_ERR_PMAP_DO_MAP); } err = cap_copy(next, frame); if (err_is_fail(err)) { return err_push(err, LIB_ERR_PMAP_DO_MAP); } // do mapping err = do_single_map(pmap, temp_end, vend, next, offset, c, flags); if (err_is_fail(err)) { return err_push(err, LIB_ERR_PMAP_DO_MAP); } } } if (retoff) { *retoff = offset; } if (retsize) { *retsize = size; } //has_vnode_debug = false; return SYS_ERR_OK; #if 0 errval_t err; uintptr_t pmap_flags = vregion_flags_to_kpi_paging_flags(flags); for (size_t i = offset; i < offset + size; i += BASE_PAGE_SIZE) { vaddr += BASE_PAGE_SIZE; } if (retoff) { *retoff = offset; } if (retsize) { *retsize = size; } return SYS_ERR_OK; #endif }
/** * Act upon request to create a driver instance. * * \param binding Controller binding * \param cls What class to instantiate? * \param cls_len Ignored. * \param name What name the driver instance should have. * \param nlen Ignored. * \param cap Capabilities for the driver instance. * \param flags Flags for the driver instance. */ static void create_handler(struct ddomain_binding* binding, const char* cls, size_t cls_len, const char* name, size_t nlen, const char* a1, size_t a1len, const char* a2, size_t a2len, const char* a3, size_t a3len, const char* a4, size_t a4len, struct capref cap1, struct capref cap2, struct capref cap3, struct capref cap4, struct capref cap5, struct capref cap6, uint64_t flags) { errval_t err; DRIVERKIT_DEBUG("Driver domain got create message from kaluga for cls=%s," "name=%s\n", cls, name); iref_t dev = 0, ctrl = 0; static size_t NR_CAPS = 6; static size_t NR_ARGS = 4; // This array is owned by the driver after create: struct capref* cap_array = calloc(sizeof(struct capref), NR_CAPS); cap_array[0] = cap1; cap_array[1] = cap2; cap_array[2] = cap3; cap_array[3] = cap4; cap_array[4] = cap5; cap_array[5] = cap6; struct capref cnodecap; err = slot_alloc_root(&cnodecap); assert(err_is_ok(err)); err = cap_copy(cnodecap, cap_array[0]); struct capref cap0_0 = { .slot = 0, .cnode = build_cnoderef(cnodecap, CNODE_TYPE_OTHER) }; char debug_msg[100]; debug_print_cap_at_capref(debug_msg, sizeof(debug_msg), cap0_0); DRIVERKIT_DEBUG("Received cap0_0=%s\n", debug_msg); char** args_array = calloc(sizeof(char*), 4); args_array[0] = arg_valid(a1) ? strdup(a1) : NULL; args_array[1] = arg_valid(a2) ? strdup(a2) : NULL; args_array[2] = arg_valid(a3) ? strdup(a3) : NULL; args_array[3] = arg_valid(a4) ? strdup(a4) : NULL; int args_len; for(args_len=0; args_len<NR_ARGS; args_len++) { if(args_array[args_len] == NULL) break; } DRIVERKIT_DEBUG("Instantiate driver\n"); err = driverkit_create_driver(cls, name, cap_array, NR_CAPS, args_array, args_len, flags, &dev, &ctrl); if (err_is_fail(err)) { DEBUG_ERR(err, "Instantiating driver failed, report this back to Kaluga." "name=%s, cls=%s\n", name, cls); } DRIVERKIT_DEBUG("sending create response to kaluga\n"); err = ddomain_create_response__tx(binding, NOP_CONT, dev, ctrl, err); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Sending reply failed.\n"); } } /** * Destroy an existing driver instance. * * \param binding Controller binding. * \param name Name of the driver instance. * \param len Ignored */ static void destroy_handler(struct ddomain_binding* binding, const char* name, size_t len) { DRIVERKIT_DEBUG("Driver domain got destroy message for instance %s\n", name); errval_t err = driverkit_destroy(name); if (err_is_fail(err)) { DEBUG_ERR(err, "Destroying driver failed, report this back to Kaluga."); } err = binding->tx_vtbl.destroy_response(binding, NOP_CONT, err); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Sending reply failed."); } }
static errval_t cow_init(size_t bufsize, size_t granularity, struct cnoderef *cow_cn, size_t *frame_count) { assert(cow_cn); assert(frame_count); errval_t err; struct capref frame, cncap; struct cnoderef cnode; // get RAM cap bufsize = (bufsize / granularity + 1) * granularity; err = slot_alloc(&frame); assert(err_is_ok(err)); size_t rambits = log2floor(bufsize); debug_printf("bits = %zu\n", rambits); err = ram_alloc(&frame, rambits); assert(err_is_ok(err)); // calculate #slots cslot_t cap_count = bufsize / granularity; cslot_t slots; // get CNode err = cnode_create(&cncap, &cnode, cap_count, &slots); assert(err_is_ok(err)); assert(slots >= cap_count); // retype RAM into Frames struct capref first_frame = (struct capref) { .cnode = cnode, .slot = 0 }; err = cap_retype(first_frame, frame, ObjType_Frame, log2floor(granularity)); assert(err_is_ok(err)); err = cap_destroy(frame); assert(err_is_ok(err)); *frame_count = slots; *cow_cn = cnode; return SYS_ERR_OK; } // create cow-enabled vregion & backing // Can copy-on-write in granularity-sized chunks static errval_t vspace_map_one_frame_cow(void **buf, size_t size, struct capref frame, vregion_flags_t flags, struct memobj **memobj, struct vregion **vregion, size_t granularity) { errval_t err; if (!memobj) { memobj = malloc(sizeof(*memobj)); } assert(memobj); if (!vregion) { vregion = malloc(sizeof(*vregion)); } assert(vregion); err = vspace_map_anon_attr(buf, memobj, vregion, size, &size, flags); assert(err_is_ok(err)); size_t chunks = size / granularity; cslot_t slots; struct capref cncap; struct cnoderef cnode; err = cnode_create(&cncap, &cnode, chunks, &slots); assert(err_is_ok(err)); assert(slots >= chunks); struct capref fc = (struct capref) { .cnode = cnode, .slot = 0 }; for (int i = 0; i < chunks; i++) { err = cap_copy(fc, frame); assert(err_is_ok(err)); err = (*memobj)->f.fill_foff(*memobj, i * granularity, fc, granularity, i*granularity); assert(err_is_ok(err)); err = (*memobj)->f.pagefault(*memobj, *vregion, i * granularity, 0); assert(err_is_ok(err)); fc.slot++; } return SYS_ERR_OK; } int main(int argc, char *argv[]) { errval_t err; struct capref frame; size_t retsize; void *vbuf; struct vregion *vregion; uint8_t *buf; debug_printf("%s:%d\n", __FUNCTION__, __LINE__); err = frame_alloc(&frame, BUFSIZE, &retsize); assert(retsize >= BUFSIZE); if (err_is_fail(err)) { debug_printf("frame_alloc: %s\n", err_getstring(err)); return 1; } debug_printf("%s:%d: %zu\n", __FUNCTION__, __LINE__, retsize); // setup region err = vspace_map_one_frame_attr(&vbuf, retsize, frame, VREGION_FLAGS_READ_WRITE, NULL, &vregion); if (err_is_fail(err)) { debug_printf("vspace_map: %s\n", err_getstring(err)); return 1; } debug_printf("vaddr: %p\n", vbuf); // write stuff to region buf = vbuf; debug_printf("%s:%d: %p, %lu pages\n", __FUNCTION__, __LINE__, buf, BUFSIZE / BASE_PAGE_SIZE); memset(buf, 0xAA, BUFSIZE); debug_printf("%s:%d\n", __FUNCTION__, __LINE__); // create cow copy // setup exception handler thread_set_exception_handler(handler, NULL, ex_stack, ex_stack+EX_STACK_SIZE, NULL, NULL); assert(err_is_ok(err)); debug_printf("%s:%d\n", __FUNCTION__, __LINE__); err = cow_init(BUFSIZE, BASE_PAGE_SIZE, &cow_frames, &cow_frame_count); assert(err_is_ok(err)); // create r/o copy of region and tell exception handler bounds debug_printf("%s:%d\n", __FUNCTION__, __LINE__); err = vspace_map_one_frame_cow(&cow_vbuf, retsize, frame, VREGION_FLAGS_READ, NULL, &cow_vregion, BASE_PAGE_SIZE); if (err_is_fail(err)) { debug_printf("vspace_map: %s\n", err_getstring(err)); return 1; } debug_printf("cow_vaddr: %p\n", cow_vbuf); // do stuff cow copy uint8_t *cbuf = cow_vbuf; for (int i = 0; i < BUFSIZE / BASE_PAGE_SIZE; i+=2) { cbuf[i * BASE_PAGE_SIZE + 1] = 0x55; } // verify results for (int i = 0; i < BUFSIZE / BASE_PAGE_SIZE; i++) { printf("page %d\n", i); printf("buf[0] = %d; cbuf[0] = %d\n", buf[i*BASE_PAGE_SIZE], cbuf[i*BASE_PAGE_SIZE]); printf("buf[1] = %d; cbuf[1] = %d\n", buf[i*BASE_PAGE_SIZE+1], cbuf[i*BASE_PAGE_SIZE+1]); } debug_dump_hw_ptables(); return EXIT_SUCCESS; }
static errval_t elf_allocate(void *state, genvaddr_t base, size_t size, uint32_t flags, void **retbase) { errval_t err; lvaddr_t vaddr; size_t used_size; struct spawninfo *si = state; // Increase size by space wasted on first page due to page-alignment size_t base_offset = BASE_PAGE_OFFSET(base); size += base_offset; base -= base_offset; // Page-align size = ROUND_UP(size, BASE_PAGE_SIZE); cslot_t vspace_slot = si->elfload_slot; // Step 1: Allocate the frames size_t sz = 0; for (lpaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = frame_create(frame, sz, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_CREATE); } } cslot_t spawn_vspace_slot = si->elfload_slot; cslot_t new_slot_count = si->elfload_slot - vspace_slot; // Step 2: create copies of the frame capabilities for child vspace for (int copy_idx = 0; copy_idx < new_slot_count; copy_idx++) { struct capref frame = { .cnode = si->segcn, .slot = vspace_slot + copy_idx, }; struct capref spawn_frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = cap_copy(spawn_frame, frame); if (err_is_fail(err)) { debug_printf("cap_copy failed for src_slot = %"PRIuCSLOT ", dest_slot = %"PRIuCSLOT"\n", frame.slot, spawn_frame.slot); return err_push(err, LIB_ERR_CAP_COPY); } } // Step 3: map into own vspace // Get virtual address range to hold the module void *vaddr_range; err = paging_alloc(get_current_paging_state(), &vaddr_range, size); if (err_is_fail(err)) { debug_printf("elf_allocate: paging_alloc failed\n"); return (err); } // map allocated physical memory in virutal memory of parent process vaddr = (lvaddr_t)vaddr_range; used_size = size; while (used_size > 0) { struct capref frame = { .cnode = si->segcn, .slot = vspace_slot++, }; // find out the size of the frame struct frame_identity id; err = invoke_frame_identify(frame, &id); assert(err_is_ok(err)); size_t slot_size = (1UL << id.bits); // map frame to provide physical memory backing err = paging_map_fixed_attr(get_current_paging_state(), vaddr, frame, slot_size, VREGION_FLAGS_READ_WRITE); if (err_is_fail(err)) { debug_printf("elf_allocate: paging_map_fixed_attr failed\n"); return err; } used_size -= slot_size; vaddr += slot_size; } // end while: // Step 3: map into new process struct paging_state *cp = si->vspace; // map allocated physical memory in virutal memory of child process vaddr = (lvaddr_t)base; used_size = size; while (used_size > 0) { struct capref frame = { .cnode = si->segcn, .slot = spawn_vspace_slot++, }; // find out the size of the frame struct frame_identity id; err = invoke_frame_identify(frame, &id); assert(err_is_ok(err)); size_t slot_size = (1UL << id.bits); // map frame to provide physical memory backing err = paging_map_fixed_attr(cp, vaddr, frame, slot_size, elf_to_vregion_flags(flags)); if (err_is_fail(err)) { debug_printf("elf_allocate: paging_map_fixed_attr failed\n"); return err; } used_size -= slot_size; vaddr += slot_size; } // end while: *retbase = (void*) vaddr_range + base_offset; return SYS_ERR_OK; } // end function: elf_allocate /** * \brief Load the elf image */ errval_t spawn_arch_load(struct spawninfo *si, lvaddr_t binary, size_t binary_size, genvaddr_t *entry, void** arch_info) { errval_t err; // Reset the elfloader_slot si->elfload_slot = 0; struct capref cnode_cap = { .cnode = si->rootcn, .slot = ROOTCN_SLOT_SEGCN, }; err = cnode_create_raw(cnode_cap, &si->segcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SEGCN); } // TLS is NYI si->tls_init_base = 0; si->tls_init_len = si->tls_total_len = 0; //debug_printf("spawn_arch_load: about to load elf %p\n", elf_allocate); // Load the binary err = elf_load(EM_HOST, elf_allocate, si, binary, binary_size, entry); if (err_is_fail(err)) { return err; } //debug_printf("hello here\n"); struct Elf32_Shdr* got_shdr = elf32_find_section_header_name(binary, binary_size, ".got"); if (got_shdr) { *arch_info = (void*)got_shdr->sh_addr; } else { return SPAWN_ERR_LOAD; } return SYS_ERR_OK; } void spawn_arch_set_registers(void *arch_load_info, dispatcher_handle_t handle, arch_registers_state_t *enabled_area, arch_registers_state_t *disabled_area) { assert(arch_load_info != NULL); uintptr_t got_base = (uintptr_t)arch_load_info; struct dispatcher_shared_arm* disp_arm = get_dispatcher_shared_arm(handle); disp_arm->got_base = got_base; enabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; enabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; disabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; disabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; }
errval_t monitor_client_setup(struct spawninfo *si) { errval_t err; struct monitor_lmp_binding *b = malloc(sizeof(struct monitor_lmp_binding)); assert(b != NULL); // setup our end of the binding err = monitor_client_lmp_accept(b, get_default_waitset(), DEFAULT_LMP_BUF_WORDS); if (err_is_fail(err)) { free(b); return err_push(err, LIB_ERR_MONITOR_CLIENT_ACCEPT); } // copy the endpoint cap to the recipient struct capref dest = { .cnode = si->rootcn, .slot = ROOTCN_SLOT_MONITOREP, }; err = cap_copy(dest, b->chan.local_cap); if (err_is_fail(err)) { // TODO: destroy binding return err_push(err, LIB_ERR_CAP_COPY); } // Copy the performance monitoring cap to all spawned processes. struct capref src; dest.cnode = si->taskcn; dest.slot = TASKCN_SLOT_PERF_MON; src.cnode = cnode_task; src.slot = TASKCN_SLOT_PERF_MON; err = cap_copy(dest, src); if (err_is_fail(err)) { return err_push(err, INIT_ERR_COPY_PERF_MON); } // copy our receive vtable to the binding monitor_server_init(&b->b); return SYS_ERR_OK; } errval_t monitor_client_setup_mem_serv(void) { /* construct special-case LMP connection to mem_serv */ static struct monitor_lmp_binding mcb; struct waitset *ws = get_default_waitset(); errval_t err; err = monitor_client_lmp_accept(&mcb, ws, DEFAULT_LMP_BUF_WORDS); if(err_is_fail(err)) { USER_PANIC_ERR(err, "monitor_client_setup_mem_serv"); } assert(err_is_ok(err)); /* Send the cap for this endpoint to init, who will pass it to the monitor */ err = lmp_ep_send0(cap_initep, 0, mcb.chan.local_cap); if (err_is_fail(err)) { USER_PANIC_ERR(err, "lmp_ep_send0 failed"); } // copy our receive vtable to the binding monitor_server_init(&mcb.b); // XXX: handle messages (ie. block) until the monitor binding is ready while (capref_is_null(mcb.chan.remote_cap)) { err = event_dispatch(ws); if (err_is_fail(err)) { DEBUG_ERR(err, "in event_dispatch waiting for mem_serv binding"); return err_push(err, LIB_ERR_EVENT_DISPATCH); } } return SYS_ERR_OK; } /// Setup a dummy monitor binding that "sends" all requests to the local handlers errval_t monitor_client_setup_monitor(void) { monitor_loopback_init(&monitor_self_binding); monitor_server_init(&monitor_self_binding); set_monitor_binding(&monitor_self_binding); caplock_init(get_default_waitset()); idc_init(); // XXX: Need a waitset here or loopback won't work as expected // when binding to the ram_alloc service monitor_self_binding.mutex.equeue.waitset = get_default_waitset(); return SYS_ERR_OK; }
static void intermon_bind_ump_reply(struct intermon_binding *ib, uint64_t my_mon_id, uint64_t your_mon_id, errval_t msgerr, intermon_caprep_t caprep) { errval_t err; struct remote_conn_state *con = remote_conn_lookup(my_mon_id); if (con == NULL) { USER_PANIC_ERR(0, "unknown mon_id in UMP bind reply"); return; } uintptr_t domain_id = con->domain_id; struct monitor_binding *domain_binding = con->domain_binding; struct capref notify_cap = NULL_CAP; if (err_is_ok(msgerr)) { /* bind succeeded */ con->mon_id = your_mon_id; con->mon_binding = ib; #if 0 /* map in UMP channel state */ void *buf; err = vspace_map_one_frame_attr(&buf, 2 * (UMP_CHANNEL_SIZE + con->localchan.size * sizeof(uintptr_t)), con->frame, VREGION_FLAGS_READ, NULL, NULL); if (err_is_fail(err)) { USER_PANIC_ERR(err, "vspace_map_one_frame failed"); // XXX: should not be an assert, but we don't have any way to do // connection teardown here! assert(buf != NULL); } con->sharedchan = buf; con->localchan.buf = buf + 2 * UMP_CHANNEL_SIZE; // XXX: Put frame cap on a separate allocator as it is not deleted anymore struct capref frame_copy; err = slot_alloc(&frame_copy); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Failed to allocator slot from channel_alloc"); } err = cap_copy(frame_copy, con->frame); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Failed create copy of frame cap"); } err = cap_destroy(con->frame); if (err_is_fail(err)) { USER_PANIC_ERR(err, "cap_destroy_default failed"); } con->frame = frame_copy; #endif struct capability capability; caprep_to_capability(&caprep, &capability); if(capability.type != ObjType_Null) { // Get core id of sender coreid_t core_id = ((struct intermon_state *)ib->st)->core_id; // Construct the notify cap err = slot_alloc(¬ify_cap); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Failed to allocate slot from channel_alloc"); } err = monitor_cap_create(notify_cap, &capability, core_id); if (err_is_fail(err)) { USER_PANIC_ERR(err, "monitor_cap_create failed"); } } } else { /* bind refused */ err = cap_destroy(con->x.ump.frame); if (err_is_fail(err)) { USER_PANIC_ERR(err, "cap_destroy_default failed"); } err = remote_conn_free(my_mon_id); assert(err_is_ok(err)); } bind_ump_reply_client_cont(domain_binding, my_mon_id, domain_id, msgerr, notify_cap); }