static errval_t elf_allocate(void *state, genvaddr_t base, size_t size, uint32_t flags, void **retbase) { errval_t err; struct spawninfo *si = state; // Increase size by space wasted on first page due to page-alignment size_t base_offset = BASE_PAGE_OFFSET(base); size += base_offset; base -= base_offset; // Page-align size = ROUND_UP(size, BASE_PAGE_SIZE); cslot_t vspace_slot = si->elfload_slot; // Allocate the frames size_t sz = 0; for (lpaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = frame_create(frame, sz, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_CREATE); } } cslot_t spawn_vspace_slot = si->elfload_slot; cslot_t new_slot_count = si->elfload_slot - vspace_slot; // create copies of the frame capabilities for spawn vspace for (int copy_idx = 0; copy_idx < new_slot_count; copy_idx++) { struct capref frame = { .cnode = si->segcn, .slot = vspace_slot + copy_idx, }; struct capref spawn_frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = cap_copy(spawn_frame, frame); if (err_is_fail(err)) { // TODO: make debug printf printf("cap_copy failed for src_slot = %"PRIuCSLOT", dest_slot = %"PRIuCSLOT"\n", frame.slot, spawn_frame.slot); return err_push(err, LIB_ERR_CAP_COPY); } } /* Map into my vspace */ struct memobj *memobj = malloc(sizeof(struct memobj_anon)); if (!memobj) { return LIB_ERR_MALLOC_FAIL; } struct vregion *vregion = malloc(sizeof(struct vregion)); if (!vregion) { return LIB_ERR_MALLOC_FAIL; } // Create the objects err = memobj_create_anon((struct memobj_anon*)memobj, size, 0); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_CREATE_ANON); } err = vregion_map(vregion, get_current_vspace(), memobj, 0, size, VREGION_FLAGS_READ_WRITE); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(memobj, genvaddr, frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = memobj->f.pagefault(memobj, vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } /* Map into spawn vspace */ struct memobj *spawn_memobj = NULL; struct vregion *spawn_vregion = NULL; err = spawn_vspace_map_anon_fixed_attr(si, base, size, &spawn_vregion, &spawn_memobj, elf_to_vregion_flags(flags)); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref spawn_frame = { .cnode = si->segcn, .slot = spawn_vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(spawn_memobj, genvaddr, spawn_frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = spawn_memobj->f.pagefault(spawn_memobj, spawn_vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } si->vregion[si->vregions] = vregion; si->base[si->vregions++] = base; genvaddr_t genvaddr = vregion_get_base_addr(vregion) + base_offset; *retbase = (void*)vspace_genvaddr_to_lvaddr(genvaddr); return SYS_ERR_OK; } /** * \brief Load the elf image */ errval_t spawn_arch_load(struct spawninfo *si, lvaddr_t binary, size_t binary_size, genvaddr_t *entry, void** arch_load_info) { errval_t err; // Reset the elfloader_slot si->elfload_slot = 0; si->vregions = 0; struct capref cnode_cap = { .cnode = si->rootcn, .slot = ROOTCN_SLOT_SEGCN, }; // XXX: this code assumes that elf_load never needs more than 32 slots for // text frame capabilities. err = cnode_create_raw(cnode_cap, &si->segcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SEGCN); } // Load the binary si->tls_init_base = 0; si->tls_init_len = si->tls_total_len = 0; err = elf_load_tls(EM_HOST, elf_allocate, si, binary, binary_size, entry, &si->tls_init_base, &si->tls_init_len, &si->tls_total_len); if (err_is_fail(err)) { return err; } return SYS_ERR_OK; } void spawn_arch_set_registers(void *arch_load_info, dispatcher_handle_t handle, arch_registers_state_t *enabled_area, arch_registers_state_t *disabled_area) { #if defined(__x86_64__) /* XXX: 1st argument to _start is the dispatcher pointer * see lib/crt/arch/x86_64/crt0.s */ disabled_area->rdi = get_dispatcher_shared_generic(handle)->udisp; #elif defined(__i386__) /* XXX: 1st argument to _start is the dispatcher pointer * see lib/crt/arch/x86_32/crt0.s */ disabled_area->edi = get_dispatcher_shared_generic(handle)->udisp; #endif }
static errval_t elf_allocate(void *state, genvaddr_t base, size_t size, uint32_t flags, void **retbase) { errval_t err; struct spawninfo *si = state; // Increase size by space wasted on first page due to page-alignment size_t base_offset = BASE_PAGE_OFFSET(base); size += base_offset; base -= base_offset; // Page-align size = ROUND_UP(size, BASE_PAGE_SIZE); cslot_t vspace_slot = si->elfload_slot; // Allocate the frames size_t sz = 0; for (lpaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = frame_create(frame, sz, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_CREATE); } } cslot_t spawn_vspace_slot = si->elfload_slot; cslot_t new_slot_count = si->elfload_slot - vspace_slot; // create copies of the frame capabilities for spawn vspace for (int copy_idx = 0; copy_idx < new_slot_count; copy_idx++) { struct capref frame = { .cnode = si->segcn, .slot = vspace_slot + copy_idx, }; struct capref spawn_frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = cap_copy(spawn_frame, frame); if (err_is_fail(err)) { // TODO: make debug printf printf("cap_copy failed for src_slot = %"PRIuCSLOT", dest_slot = %"PRIuCSLOT"\n", frame.slot, spawn_frame.slot); return err_push(err, LIB_ERR_CAP_COPY); } } /* Map into my vspace */ struct memobj *memobj = malloc(sizeof(struct memobj_anon)); if (!memobj) { return LIB_ERR_MALLOC_FAIL; } struct vregion *vregion = malloc(sizeof(struct vregion)); if (!vregion) { return LIB_ERR_MALLOC_FAIL; } // Create the objects err = memobj_create_anon((struct memobj_anon*)memobj, size, 0); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_CREATE_ANON); } err = vregion_map(vregion, get_current_vspace(), memobj, 0, size, VREGION_FLAGS_READ_WRITE); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(memobj, genvaddr, frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = memobj->f.pagefault(memobj, vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } /* Map into spawn vspace */ struct memobj *spawn_memobj = NULL; struct vregion *spawn_vregion = NULL; err = spawn_vspace_map_anon_fixed_attr(si, base, size, &spawn_vregion, &spawn_memobj, elf_to_vregion_flags(flags)); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = spawn_vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(spawn_memobj, genvaddr, frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = spawn_memobj->f.pagefault(spawn_memobj, spawn_vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } genvaddr_t genvaddr = vregion_get_base_addr(vregion) + base_offset; *retbase = (void*)vspace_genvaddr_to_lvaddr(genvaddr); return SYS_ERR_OK; } /** * \brief Load the elf image */ errval_t spawn_arch_load(struct spawninfo *si, lvaddr_t binary, size_t binary_size, genvaddr_t *entry, void** arch_info) { errval_t err; // Reset the elfloader_slot si->elfload_slot = 0; struct capref cnode_cap = { .cnode = si->rootcn, .slot = ROOTCN_SLOT_SEGCN, }; err = cnode_create_raw(cnode_cap, &si->segcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SEGCN); } // TLS is NYI si->tls_init_base = 0; si->tls_init_len = si->tls_total_len = 0; // Load the binary err = elf_load(EM_HOST, elf_allocate, si, binary, binary_size, entry); if (err_is_fail(err)) { return err; } struct Elf32_Shdr* got_shdr = elf32_find_section_header_name(binary, binary_size, ".got"); if (got_shdr) { *arch_info = (void*)got_shdr->sh_addr; } else { return SPAWN_ERR_LOAD; } return SYS_ERR_OK; } void spawn_arch_set_registers(void *arch_load_info, dispatcher_handle_t handle, arch_registers_state_t *enabled_area, arch_registers_state_t *disabled_area) { assert(arch_load_info != NULL); uintptr_t got_base = (uintptr_t)arch_load_info; struct dispatcher_shared_arm* disp_arm = get_dispatcher_shared_arm(handle); disp_arm->got_base = got_base; enabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; disabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; #ifndef __ARM_ARCH_7M__ //armv7-m does not support these flags enabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; disabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; #endif }
static errval_t elf_allocate(void *state, genvaddr_t base, size_t size, uint32_t flags, void **retbase) { errval_t err; lvaddr_t vaddr; size_t used_size; struct spawninfo *si = state; // Increase size by space wasted on first page due to page-alignment size_t base_offset = BASE_PAGE_OFFSET(base); size += base_offset; base -= base_offset; // Page-align size = ROUND_UP(size, BASE_PAGE_SIZE); cslot_t vspace_slot = si->elfload_slot; // Step 1: Allocate the frames size_t sz = 0; for (lpaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = frame_create(frame, sz, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_CREATE); } } cslot_t spawn_vspace_slot = si->elfload_slot; cslot_t new_slot_count = si->elfload_slot - vspace_slot; // Step 2: create copies of the frame capabilities for child vspace for (int copy_idx = 0; copy_idx < new_slot_count; copy_idx++) { struct capref frame = { .cnode = si->segcn, .slot = vspace_slot + copy_idx, }; struct capref spawn_frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = cap_copy(spawn_frame, frame); if (err_is_fail(err)) { debug_printf("cap_copy failed for src_slot = %"PRIuCSLOT ", dest_slot = %"PRIuCSLOT"\n", frame.slot, spawn_frame.slot); return err_push(err, LIB_ERR_CAP_COPY); } } // Step 3: map into own vspace // Get virtual address range to hold the module void *vaddr_range; err = paging_alloc(get_current_paging_state(), &vaddr_range, size); if (err_is_fail(err)) { debug_printf("elf_allocate: paging_alloc failed\n"); return (err); } // map allocated physical memory in virutal memory of parent process vaddr = (lvaddr_t)vaddr_range; used_size = size; while (used_size > 0) { struct capref frame = { .cnode = si->segcn, .slot = vspace_slot++, }; // find out the size of the frame struct frame_identity id; err = invoke_frame_identify(frame, &id); assert(err_is_ok(err)); size_t slot_size = (1UL << id.bits); // map frame to provide physical memory backing err = paging_map_fixed_attr(get_current_paging_state(), vaddr, frame, slot_size, VREGION_FLAGS_READ_WRITE); if (err_is_fail(err)) { debug_printf("elf_allocate: paging_map_fixed_attr failed\n"); return err; } used_size -= slot_size; vaddr += slot_size; } // end while: // Step 3: map into new process struct paging_state *cp = si->vspace; // map allocated physical memory in virutal memory of child process vaddr = (lvaddr_t)base; used_size = size; while (used_size > 0) { struct capref frame = { .cnode = si->segcn, .slot = spawn_vspace_slot++, }; // find out the size of the frame struct frame_identity id; err = invoke_frame_identify(frame, &id); assert(err_is_ok(err)); size_t slot_size = (1UL << id.bits); // map frame to provide physical memory backing err = paging_map_fixed_attr(cp, vaddr, frame, slot_size, elf_to_vregion_flags(flags)); if (err_is_fail(err)) { debug_printf("elf_allocate: paging_map_fixed_attr failed\n"); return err; } used_size -= slot_size; vaddr += slot_size; } // end while: *retbase = (void*) vaddr_range + base_offset; return SYS_ERR_OK; } // end function: elf_allocate /** * \brief Load the elf image */ errval_t spawn_arch_load(struct spawninfo *si, lvaddr_t binary, size_t binary_size, genvaddr_t *entry, void** arch_info) { errval_t err; // Reset the elfloader_slot si->elfload_slot = 0; struct capref cnode_cap = { .cnode = si->rootcn, .slot = ROOTCN_SLOT_SEGCN, }; err = cnode_create_raw(cnode_cap, &si->segcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SEGCN); } // TLS is NYI si->tls_init_base = 0; si->tls_init_len = si->tls_total_len = 0; //debug_printf("spawn_arch_load: about to load elf %p\n", elf_allocate); // Load the binary err = elf_load(EM_HOST, elf_allocate, si, binary, binary_size, entry); if (err_is_fail(err)) { return err; } //debug_printf("hello here\n"); struct Elf32_Shdr* got_shdr = elf32_find_section_header_name(binary, binary_size, ".got"); if (got_shdr) { *arch_info = (void*)got_shdr->sh_addr; } else { return SPAWN_ERR_LOAD; } return SYS_ERR_OK; } void spawn_arch_set_registers(void *arch_load_info, dispatcher_handle_t handle, arch_registers_state_t *enabled_area, arch_registers_state_t *disabled_area) { assert(arch_load_info != NULL); uintptr_t got_base = (uintptr_t)arch_load_info; struct dispatcher_shared_arm* disp_arm = get_dispatcher_shared_arm(handle); disp_arm->got_base = got_base; enabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; enabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; disabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; disabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; }