/** * \brief Span a domain with the given vroot and disp_frame * * Operation similar to spawning a domain but the vroot and disp_frame * are already provided */ errval_t spawn_span_domain(struct spawninfo *si, struct capref vroot, struct capref disp_frame) { errval_t err; struct capref t1; struct cnoderef cnode; /* Spawn cspace */ err = spawn_setup_cspace(si); if (err_is_fail(err)) { return err; } /* Create pagecn */ t1.cnode = si->rootcn; t1.slot = ROOTCN_SLOT_PAGECN; err = cnode_create_raw(t1, &cnode, PAGE_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_PAGECN); } // Copy root of pagetable si->vtree.cnode = cnode; si->vtree.slot = 0; err = cap_copy(si->vtree, vroot); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_COPY_VNODE); } /* Copy dispatcher frame (in taskcn) */ si->dispframe.cnode = si->taskcn; si->dispframe.slot = TASKCN_SLOT_DISPFRAME; err = cap_copy(si->dispframe, disp_frame); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_COPY_VNODE); } return SYS_ERR_OK; }
/** * \brief slot allocator * * \param ca Instance of the allocator * \param ret Pointer to return the allocated slot */ errval_t multi_alloc(struct slot_allocator *ca, struct capref *ret) { errval_t err = SYS_ERR_OK; struct multi_slot_allocator *mca = (struct multi_slot_allocator*)ca; thread_mutex_lock(&ca->mutex); assert(ca->space != 0); ca->space--; /* Try allocating from the list of single slot allocators */ struct slot_allocator_list *walk = mca->head; //struct slot_allocator_list *prev = NULL; while(walk != NULL) { err = walk->a.a.alloc(&walk->a.a, ret); if (err_no(err) != LIB_ERR_SLOT_ALLOC_NO_SPACE) { break; } //prev = walk; walk = walk->next; } if (err_is_fail(err)) { return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC); } /* If no more slots left, grow */ if (ca->space == 0) { ca->space = ca->nslots; /* Pull in the reserve */ mca->reserve->next = mca->head; mca->head = mca->reserve; /* Setup a new reserve */ // Cnode struct capref cap; struct cnoderef cnode; err = mca->top->alloc(mca->top, &cap); if (err_is_fail(err)) { thread_mutex_unlock(&ca->mutex); return err_push(err, LIB_ERR_SLOT_ALLOC); } thread_mutex_unlock(&ca->mutex); // cnode_create_raw uses ram_alloc // which may call slot_alloc err = cnode_create_raw(cap, &cnode, ca->nslots, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CNODE_CREATE); } thread_mutex_lock(&ca->mutex); // Buffers void *buf = slab_alloc(&mca->slab); if (!buf) { /* Grow slab */ // Allocate slot out of the list mca->a.space--; struct capref frame; err = mca->head->a.a.alloc(&mca->head->a.a, &frame); if (err_is_fail(err)) { thread_mutex_unlock(&ca->mutex); return err_push(err, LIB_ERR_SLOT_ALLOC); } thread_mutex_unlock(&ca->mutex); // following functions may call // slot_alloc void *slab_buf; size_t size; err = vspace_mmu_aware_map(&mca->mmu_state, frame, mca->slab.blocksize, &slab_buf, &size); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_MMU_AWARE_MAP); } thread_mutex_lock(&ca->mutex); // Grow slab slab_grow(&mca->slab, slab_buf, size); // Try allocating again buf = slab_alloc(&mca->slab); if (err_is_fail(err)) { thread_mutex_unlock(&ca->mutex); return err_push(err, LIB_ERR_SLAB_ALLOC_FAIL); } } mca->reserve = buf; buf = (char *)buf + sizeof(struct slot_allocator_list); size_t bufsize = mca->slab.blocksize - sizeof(struct slot_allocator_list); // Allocator err = single_slot_alloc_init_raw(&mca->reserve->a, cap, cnode, mca->a.nslots, buf, bufsize); if (err_is_fail(err)) { thread_mutex_unlock(&ca->mutex); return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT_RAW); } } thread_mutex_unlock(&ca->mutex); return SYS_ERR_OK; }
/** * \brief Initializer that does not allocate any space * * #slot_alloc_init duplicates some of the code below, * modify it if making changes here. * * XXX: top_buf head_buf and reserve_buf each point to a separate buffer of * size bufsize bytes which can be used for backing storage. bufsize evidently * needs to be >= sizeof(struct cnode_meta) * nslots / 2. Don't ask me why! -AB */ errval_t multi_slot_alloc_init_raw(struct multi_slot_allocator *ret, cslot_t nslots, struct capref top_cap, struct cnoderef top_cnode, void *top_buf, void *head_buf, void *reserve_buf, size_t bufsize) { errval_t err; struct capref cap; struct cnoderef cnode; /* Generic part */ ret->a.alloc = multi_alloc; ret->a.free = multi_free; ret->a.space = nslots; ret->a.nslots = nslots; thread_mutex_init(&ret->a.mutex); ret->head->next = NULL; ret->reserve->next = NULL; /* Top */ err = single_slot_alloc_init_raw((struct single_slot_allocator*)ret->top, top_cap, top_cnode, nslots, top_buf, bufsize); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT); } /* Head */ err = ret->top->alloc(ret->top, &cap); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLOT_ALLOC); } err = cnode_create_raw(cap, &cnode, nslots, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CNODE_CREATE); } err = single_slot_alloc_init_raw(&ret->head->a, cap, cnode, nslots, head_buf, bufsize); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT); } /* Reserve */ err = ret->top->alloc(ret->top, &cap); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLOT_ALLOC); } err = cnode_create_raw(cap, &cnode, nslots, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CNODE_CREATE); } err = single_slot_alloc_init_raw(&ret->reserve->a, cap, cnode, nslots, reserve_buf, bufsize); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT); } /* Slab */ size_t allocation_unit = sizeof(struct slot_allocator_list) + SINGLE_SLOT_ALLOC_BUFLEN(nslots); slab_init(&ret->slab, allocation_unit, NULL); return SYS_ERR_OK; }
static errval_t elf_allocate(void *state, genvaddr_t base, size_t size, uint32_t flags, void **retbase) { errval_t err; struct spawninfo *si = state; // Increase size by space wasted on first page due to page-alignment size_t base_offset = BASE_PAGE_OFFSET(base); size += base_offset; base -= base_offset; // Page-align size = ROUND_UP(size, BASE_PAGE_SIZE); cslot_t vspace_slot = si->elfload_slot; // Allocate the frames size_t sz = 0; for (lpaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = frame_create(frame, sz, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_CREATE); } } cslot_t spawn_vspace_slot = si->elfload_slot; cslot_t new_slot_count = si->elfload_slot - vspace_slot; // create copies of the frame capabilities for spawn vspace for (int copy_idx = 0; copy_idx < new_slot_count; copy_idx++) { struct capref frame = { .cnode = si->segcn, .slot = vspace_slot + copy_idx, }; struct capref spawn_frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = cap_copy(spawn_frame, frame); if (err_is_fail(err)) { // TODO: make debug printf printf("cap_copy failed for src_slot = %"PRIuCSLOT", dest_slot = %"PRIuCSLOT"\n", frame.slot, spawn_frame.slot); return err_push(err, LIB_ERR_CAP_COPY); } } /* Map into my vspace */ struct memobj *memobj = malloc(sizeof(struct memobj_anon)); if (!memobj) { return LIB_ERR_MALLOC_FAIL; } struct vregion *vregion = malloc(sizeof(struct vregion)); if (!vregion) { return LIB_ERR_MALLOC_FAIL; } // Create the objects err = memobj_create_anon((struct memobj_anon*)memobj, size, 0); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_CREATE_ANON); } err = vregion_map(vregion, get_current_vspace(), memobj, 0, size, VREGION_FLAGS_READ_WRITE); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(memobj, genvaddr, frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = memobj->f.pagefault(memobj, vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } /* Map into spawn vspace */ struct memobj *spawn_memobj = NULL; struct vregion *spawn_vregion = NULL; err = spawn_vspace_map_anon_fixed_attr(si, base, size, &spawn_vregion, &spawn_memobj, elf_to_vregion_flags(flags)); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = spawn_vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(spawn_memobj, genvaddr, frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = spawn_memobj->f.pagefault(spawn_memobj, spawn_vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } genvaddr_t genvaddr = vregion_get_base_addr(vregion) + base_offset; *retbase = (void*)vspace_genvaddr_to_lvaddr(genvaddr); return SYS_ERR_OK; } /** * \brief Load the elf image */ errval_t spawn_arch_load(struct spawninfo *si, lvaddr_t binary, size_t binary_size, genvaddr_t *entry, void** arch_info) { errval_t err; // Reset the elfloader_slot si->elfload_slot = 0; struct capref cnode_cap = { .cnode = si->rootcn, .slot = ROOTCN_SLOT_SEGCN, }; err = cnode_create_raw(cnode_cap, &si->segcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SEGCN); } // TLS is NYI si->tls_init_base = 0; si->tls_init_len = si->tls_total_len = 0; // Load the binary err = elf_load(EM_HOST, elf_allocate, si, binary, binary_size, entry); if (err_is_fail(err)) { return err; } struct Elf32_Shdr* got_shdr = elf32_find_section_header_name(binary, binary_size, ".got"); if (got_shdr) { *arch_info = (void*)got_shdr->sh_addr; } else { return SPAWN_ERR_LOAD; } return SYS_ERR_OK; } void spawn_arch_set_registers(void *arch_load_info, dispatcher_handle_t handle, arch_registers_state_t *enabled_area, arch_registers_state_t *disabled_area) { assert(arch_load_info != NULL); uintptr_t got_base = (uintptr_t)arch_load_info; struct dispatcher_shared_arm* disp_arm = get_dispatcher_shared_arm(handle); disp_arm->got_base = got_base; enabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; disabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; #ifndef __ARM_ARCH_7M__ //armv7-m does not support these flags enabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; disabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; #endif }
/** * \brief Setup an initial cspace * * Create an initial cspace layout */ static errval_t spawn_setup_cspace(struct spawninfo *si) { errval_t err; struct capref t1; /* Create root CNode */ err = cnode_create(&si->rootcn_cap, &si->rootcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_ROOTCN); } /* Create taskcn */ err = cnode_create(&si->taskcn_cap, &si->taskcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_TASKCN); } // Mint into rootcn setting the guard t1.cnode = si->rootcn; t1.slot = ROOTCN_SLOT_TASKCN; err = cap_mint(t1, si->taskcn_cap, 0, GUARD_REMAINDER(2 * DEFAULT_CNODE_BITS)); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MINT_TASKCN); } /* Create slot_alloc_cnode */ t1.cnode = si->rootcn; t1.slot = ROOTCN_SLOT_SLOT_ALLOC0; err = cnode_create_raw(t1, NULL, (1<<SLOT_ALLOC_CNODE_BITS), NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SLOTALLOC_CNODE); } t1.cnode = si->rootcn; t1.slot = ROOTCN_SLOT_SLOT_ALLOC1; err = cnode_create_raw(t1, NULL, (1<<SLOT_ALLOC_CNODE_BITS), NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SLOTALLOC_CNODE); } t1.cnode = si->rootcn; t1.slot = ROOTCN_SLOT_SLOT_ALLOC2; err = cnode_create_raw(t1, NULL, (1<<SLOT_ALLOC_CNODE_BITS), NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SLOTALLOC_CNODE); } // Create DCB si->dcb.cnode = si->taskcn; si->dcb.slot = TASKCN_SLOT_DISPATCHER; err = dispatcher_create(si->dcb); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_DISPATCHER); } // Give domain endpoint to itself (in taskcn) struct capref selfep = { .cnode = si->taskcn, .slot = TASKCN_SLOT_SELFEP, }; err = cap_retype(selfep, si->dcb, ObjType_EndPoint, 0); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SELFEP); } // Map root CNode (in taskcn) t1.cnode = si->taskcn; t1.slot = TASKCN_SLOT_ROOTCN; err = cap_mint(t1, si->rootcn_cap, 0, 0); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MINT_ROOTCN); } #ifdef TRACING_EXISTS // Set up tracing for the child err = trace_setup_child(si->taskcn, si->handle); if (err_is_fail(err)) { printf("Warning: error setting up tracing for child domain\n"); // SYS_DEBUG(err, ...); } #endif // XXX: copy over argspg? memset(&si->argspg, 0, sizeof(si->argspg)); /* Fill up basecn */ struct capref basecn_cap; struct cnoderef basecn; // Create basecn in rootcn basecn_cap.cnode = si->rootcn; basecn_cap.slot = ROOTCN_SLOT_BASE_PAGE_CN; err = cnode_create_raw(basecn_cap, &basecn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CNODE_CREATE); } // Place the ram caps for (uint8_t i = 0; i < DEFAULT_CNODE_SLOTS; i++) { struct capref base = { .cnode = basecn, .slot = i }; struct capref ram; err = ram_alloc(&ram, BASE_PAGE_BITS); if (err_is_fail(err)) { return err_push(err, LIB_ERR_RAM_ALLOC); } err = cap_copy(base, ram); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CAP_COPY); } err = cap_destroy(ram); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CAP_DESTROY); } } return SYS_ERR_OK; } static errval_t spawn_setup_vspace(struct spawninfo *si) { errval_t err; /* Create pagecn */ si->pagecn_cap = (struct capref){.cnode = si->rootcn, .slot = ROOTCN_SLOT_PAGECN}; err = cnode_create_raw(si->pagecn_cap, &si->pagecn, PAGE_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_PAGECN); } /* Init pagecn's slot allocator */ // XXX: satisfy a peculiarity of the single_slot_alloc_init_raw API size_t bufsize = SINGLE_SLOT_ALLOC_BUFLEN(PAGE_CNODE_SLOTS); void *buf = malloc(bufsize); assert(buf != NULL); err = single_slot_alloc_init_raw(&si->pagecn_slot_alloc, si->pagecn_cap, si->pagecn, PAGE_CNODE_SLOTS, buf, bufsize); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT_RAW); } // Create root of pagetable err = si->pagecn_slot_alloc.a.alloc(&si->pagecn_slot_alloc.a, &si->vtree); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLOT_ALLOC); } // top-level table should always live in slot 0 of pagecn assert(si->vtree.slot == 0); switch(si->cpu_type) { case CPU_X86_64: case CPU_K1OM: err = vnode_create(si->vtree, ObjType_VNode_x86_64_pml4); break; case CPU_X86_32: case CPU_SCC: #ifdef CONFIG_PAE err = vnode_create(si->vtree, ObjType_VNode_x86_32_pdpt); #else err = vnode_create(si->vtree, ObjType_VNode_x86_32_pdir); #endif break; case CPU_ARM5: case CPU_ARM7: err = vnode_create(si->vtree, ObjType_VNode_ARM_l1); break; default: assert(!"Other architecture"); return err_push(err, SPAWN_ERR_UNKNOWN_TARGET_ARCH); } if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_VNODE); } err = spawn_vspace_init(si, si->vtree, si->cpu_type); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_VSPACE_INIT); } return SYS_ERR_OK; } #if 0 /** * \brief Lookup and map an image */ static errval_t spawn_map(const char *name, struct bootinfo *bi, lvaddr_t *binary, size_t *binary_size) { errval_t err; /* Get the module from the multiboot */ struct mem_region *module = multiboot_find_module(bi, name); if (module == NULL) { return SPAWN_ERR_FIND_MODULE; } /* Map the image */ err = spawn_map_module(module, binary_size, binary, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MAP_MODULE); } return SYS_ERR_OK; }
static void multiboot_cap_reply(struct monitor_binding *st, struct capref cap, errval_t msgerr) { errval_t err; static cslot_t multiboot_slots = 0; // All multiboot caps received if (err_is_fail(msgerr)) { // Request bootinfo frame struct bootinfo *bi; err = map_bootinfo(&bi); assert(err_is_ok(err)); // Init ramfs struct dirent *root = ramfs_init(); // Populate it with contents of multiboot populate_multiboot(root, bi); // Start the service err = start_service(root); assert(err_is_ok(err)); return; } // Move the cap into the multiboot cnode struct capref dest = { .cnode = cnode_module, .slot = multiboot_slots++, }; err = cap_copy(dest, cap); assert(err_is_ok(err)); err = cap_destroy(cap); assert(err_is_ok(err)); err = st->tx_vtbl.multiboot_cap_request(st, NOP_CONT, multiboot_slots); assert(err_is_ok(err)); } static void bootstrap(void) { errval_t err; /* Create the module cnode */ struct capref modulecn_cap = { .cnode = cnode_root, .slot = ROOTCN_SLOT_MODULECN, }; err = cnode_create_raw(modulecn_cap, NULL, ((cslot_t)1 << MODULECN_SIZE_BITS), NULL); if (err_is_fail(err)) { DEBUG_ERR(err, "cnode_create_raw failed"); abort(); } // XXX: Set reply handler struct monitor_binding *st = get_monitor_binding(); st->rx_vtbl.multiboot_cap_reply = multiboot_cap_reply; // Make first multiboot cap request err = st->tx_vtbl.multiboot_cap_request(st, NOP_CONT, 0); assert(err_is_ok(err)); }
static errval_t elf_allocate(void *state, genvaddr_t base, size_t size, uint32_t flags, void **retbase) { errval_t err; struct spawninfo *si = state; // Increase size by space wasted on first page due to page-alignment size_t base_offset = BASE_PAGE_OFFSET(base); size += base_offset; base -= base_offset; // Page-align size = ROUND_UP(size, BASE_PAGE_SIZE); cslot_t vspace_slot = si->elfload_slot; // Allocate the frames size_t sz = 0; for (lpaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = frame_create(frame, sz, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_CREATE); } } cslot_t spawn_vspace_slot = si->elfload_slot; cslot_t new_slot_count = si->elfload_slot - vspace_slot; // create copies of the frame capabilities for spawn vspace for (int copy_idx = 0; copy_idx < new_slot_count; copy_idx++) { struct capref frame = { .cnode = si->segcn, .slot = vspace_slot + copy_idx, }; struct capref spawn_frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = cap_copy(spawn_frame, frame); if (err_is_fail(err)) { // TODO: make debug printf printf("cap_copy failed for src_slot = %"PRIuCSLOT", dest_slot = %"PRIuCSLOT"\n", frame.slot, spawn_frame.slot); return err_push(err, LIB_ERR_CAP_COPY); } } /* Map into my vspace */ struct memobj *memobj = malloc(sizeof(struct memobj_anon)); if (!memobj) { return LIB_ERR_MALLOC_FAIL; } struct vregion *vregion = malloc(sizeof(struct vregion)); if (!vregion) { return LIB_ERR_MALLOC_FAIL; } // Create the objects err = memobj_create_anon((struct memobj_anon*)memobj, size, 0); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_CREATE_ANON); } err = vregion_map(vregion, get_current_vspace(), memobj, 0, size, VREGION_FLAGS_READ_WRITE); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(memobj, genvaddr, frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = memobj->f.pagefault(memobj, vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } /* Map into spawn vspace */ struct memobj *spawn_memobj = NULL; struct vregion *spawn_vregion = NULL; err = spawn_vspace_map_anon_fixed_attr(si, base, size, &spawn_vregion, &spawn_memobj, elf_to_vregion_flags(flags)); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref spawn_frame = { .cnode = si->segcn, .slot = spawn_vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(spawn_memobj, genvaddr, spawn_frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = spawn_memobj->f.pagefault(spawn_memobj, spawn_vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } si->vregion[si->vregions] = vregion; si->base[si->vregions++] = base; genvaddr_t genvaddr = vregion_get_base_addr(vregion) + base_offset; *retbase = (void*)vspace_genvaddr_to_lvaddr(genvaddr); return SYS_ERR_OK; } /** * \brief Load the elf image */ errval_t spawn_arch_load(struct spawninfo *si, lvaddr_t binary, size_t binary_size, genvaddr_t *entry, void** arch_load_info) { errval_t err; // Reset the elfloader_slot si->elfload_slot = 0; si->vregions = 0; struct capref cnode_cap = { .cnode = si->rootcn, .slot = ROOTCN_SLOT_SEGCN, }; // XXX: this code assumes that elf_load never needs more than 32 slots for // text frame capabilities. err = cnode_create_raw(cnode_cap, &si->segcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SEGCN); } // Load the binary si->tls_init_base = 0; si->tls_init_len = si->tls_total_len = 0; err = elf_load_tls(EM_HOST, elf_allocate, si, binary, binary_size, entry, &si->tls_init_base, &si->tls_init_len, &si->tls_total_len); if (err_is_fail(err)) { return err; } return SYS_ERR_OK; } void spawn_arch_set_registers(void *arch_load_info, dispatcher_handle_t handle, arch_registers_state_t *enabled_area, arch_registers_state_t *disabled_area) { #if defined(__x86_64__) /* XXX: 1st argument to _start is the dispatcher pointer * see lib/crt/arch/x86_64/crt0.s */ disabled_area->rdi = get_dispatcher_shared_generic(handle)->udisp; #elif defined(__i386__) /* XXX: 1st argument to _start is the dispatcher pointer * see lib/crt/arch/x86_32/crt0.s */ disabled_area->edi = get_dispatcher_shared_generic(handle)->udisp; #endif }
static errval_t elf_allocate(void *state, genvaddr_t base, size_t size, uint32_t flags, void **retbase) { errval_t err; lvaddr_t vaddr; size_t used_size; struct spawninfo *si = state; // Increase size by space wasted on first page due to page-alignment size_t base_offset = BASE_PAGE_OFFSET(base); size += base_offset; base -= base_offset; // Page-align size = ROUND_UP(size, BASE_PAGE_SIZE); cslot_t vspace_slot = si->elfload_slot; // Step 1: Allocate the frames size_t sz = 0; for (lpaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = frame_create(frame, sz, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_CREATE); } } cslot_t spawn_vspace_slot = si->elfload_slot; cslot_t new_slot_count = si->elfload_slot - vspace_slot; // Step 2: create copies of the frame capabilities for child vspace for (int copy_idx = 0; copy_idx < new_slot_count; copy_idx++) { struct capref frame = { .cnode = si->segcn, .slot = vspace_slot + copy_idx, }; struct capref spawn_frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = cap_copy(spawn_frame, frame); if (err_is_fail(err)) { debug_printf("cap_copy failed for src_slot = %"PRIuCSLOT ", dest_slot = %"PRIuCSLOT"\n", frame.slot, spawn_frame.slot); return err_push(err, LIB_ERR_CAP_COPY); } } // Step 3: map into own vspace // Get virtual address range to hold the module void *vaddr_range; err = paging_alloc(get_current_paging_state(), &vaddr_range, size); if (err_is_fail(err)) { debug_printf("elf_allocate: paging_alloc failed\n"); return (err); } // map allocated physical memory in virutal memory of parent process vaddr = (lvaddr_t)vaddr_range; used_size = size; while (used_size > 0) { struct capref frame = { .cnode = si->segcn, .slot = vspace_slot++, }; // find out the size of the frame struct frame_identity id; err = invoke_frame_identify(frame, &id); assert(err_is_ok(err)); size_t slot_size = (1UL << id.bits); // map frame to provide physical memory backing err = paging_map_fixed_attr(get_current_paging_state(), vaddr, frame, slot_size, VREGION_FLAGS_READ_WRITE); if (err_is_fail(err)) { debug_printf("elf_allocate: paging_map_fixed_attr failed\n"); return err; } used_size -= slot_size; vaddr += slot_size; } // end while: // Step 3: map into new process struct paging_state *cp = si->vspace; // map allocated physical memory in virutal memory of child process vaddr = (lvaddr_t)base; used_size = size; while (used_size > 0) { struct capref frame = { .cnode = si->segcn, .slot = spawn_vspace_slot++, }; // find out the size of the frame struct frame_identity id; err = invoke_frame_identify(frame, &id); assert(err_is_ok(err)); size_t slot_size = (1UL << id.bits); // map frame to provide physical memory backing err = paging_map_fixed_attr(cp, vaddr, frame, slot_size, elf_to_vregion_flags(flags)); if (err_is_fail(err)) { debug_printf("elf_allocate: paging_map_fixed_attr failed\n"); return err; } used_size -= slot_size; vaddr += slot_size; } // end while: *retbase = (void*) vaddr_range + base_offset; return SYS_ERR_OK; } // end function: elf_allocate /** * \brief Load the elf image */ errval_t spawn_arch_load(struct spawninfo *si, lvaddr_t binary, size_t binary_size, genvaddr_t *entry, void** arch_info) { errval_t err; // Reset the elfloader_slot si->elfload_slot = 0; struct capref cnode_cap = { .cnode = si->rootcn, .slot = ROOTCN_SLOT_SEGCN, }; err = cnode_create_raw(cnode_cap, &si->segcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SEGCN); } // TLS is NYI si->tls_init_base = 0; si->tls_init_len = si->tls_total_len = 0; //debug_printf("spawn_arch_load: about to load elf %p\n", elf_allocate); // Load the binary err = elf_load(EM_HOST, elf_allocate, si, binary, binary_size, entry); if (err_is_fail(err)) { return err; } //debug_printf("hello here\n"); struct Elf32_Shdr* got_shdr = elf32_find_section_header_name(binary, binary_size, ".got"); if (got_shdr) { *arch_info = (void*)got_shdr->sh_addr; } else { return SPAWN_ERR_LOAD; } return SYS_ERR_OK; } void spawn_arch_set_registers(void *arch_load_info, dispatcher_handle_t handle, arch_registers_state_t *enabled_area, arch_registers_state_t *disabled_area) { assert(arch_load_info != NULL); uintptr_t got_base = (uintptr_t)arch_load_info; struct dispatcher_shared_arm* disp_arm = get_dispatcher_shared_arm(handle); disp_arm->got_base = got_base; enabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; enabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; disabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; disabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; }