/** * \brief Create page mappings * * \param pmap The pmap object * \param vaddr The virtual address to create the mapping for * \param frame The frame cap to map in * \param offset Offset into the frame cap * \param size Size of the mapping * \param flags Flags for the mapping * \param retoff If non-NULL, filled in with adjusted offset of mapped region * \param retsize If non-NULL, filled in with adjusted size of mapped region */ static errval_t map(struct pmap *pmap, genvaddr_t vaddr, struct capref frame, size_t offset, size_t size, vregion_flags_t flags, size_t *retoff, size_t *retsize) { struct pmap_arm *pmap_arm = (struct pmap_arm *)pmap; size += BASE_PAGE_OFFSET(offset); size = ROUND_UP(size, BASE_PAGE_SIZE); offset -= BASE_PAGE_OFFSET(offset); const size_t slabs_reserve = 3; // == max_slabs_required(1) uint64_t slabs_free = slab_freecount(&pmap_arm->slab); size_t slabs_required = max_slabs_required(size) + slabs_reserve; if (slabs_required > slabs_free) { if (get_current_pmap() == pmap) { errval_t err = refill_slabs(pmap_arm, slabs_required); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLAB_REFILL); } } else { size_t bytes = SLAB_STATIC_SIZE(slabs_required - slabs_free, sizeof(struct vnode)); void *buf = malloc(bytes); if (!buf) { return LIB_ERR_MALLOC_FAIL; } slab_grow(&pmap_arm->slab, buf, bytes); } } return do_map(pmap_arm, vaddr, frame, offset, size, flags, retoff, retsize); }
/** * \brief Map init user-space memory. * * This function maps pages of the init user-space module. It expects * the virtual base address 'vbase' of a program segment of the init executable, * its size 'size' and its ELF64 access control flags. It maps pages * into physical memory that is allocated on the fly and puts * corresponding frame caps into init's segcn. * * \param vbase Virtual base address of program segment. * \param size Size of program segment in bytes. * \param flags ELF64 access control flags of program segment. * \param ret Used to return base region pointer */ errval_t startup_alloc_init(void *state, genvaddr_t gvbase, size_t size, uint32_t flags, void **ret) { errval_t err; struct spawn_state *spawn_state = state; lvaddr_t vbase = (lvaddr_t)gvbase; /* XXX */ lvaddr_t offset = BASE_PAGE_OFFSET(vbase); /* Page align the parameters */ paging_align(&vbase, NULL, &size, BASE_PAGE_SIZE); lpaddr_t pbase = 0, paddr = 0; for(lvaddr_t i = vbase; i < vbase + size; i += BASE_PAGE_SIZE) { if (apic_is_bsp()) { paddr = bsp_alloc_phys(BASE_PAGE_SIZE); } else { paddr = app_alloc_phys(BASE_PAGE_SIZE); } if(pbase == 0) { pbase = paddr; } err = startup_map_init(i, paddr, BASE_PAGE_SIZE, flags); assert(err_is_ok(err)); } if (apic_is_bsp()) { // Create frame caps for segcn paddr += BASE_PAGE_SIZE; debug(SUBSYS_STARTUP, "Allocated physical memory [0x%"PRIxLPADDR", 0x%"PRIxLPADDR"]\n", pbase, paddr - pbase); err = create_caps_to_cnode(pbase, paddr - pbase, RegionType_RootTask, spawn_state, bootinfo); if (err_is_fail(err)) { return err; } } assert(ret != NULL); *ret = (void *)(vbase + offset); return SYS_ERR_OK; }
static errval_t elf_allocate(void *state, genvaddr_t base, size_t size, uint32_t flags, void **retbase) { errval_t err; struct spawninfo *si = state; // Increase size by space wasted on first page due to page-alignment size_t base_offset = BASE_PAGE_OFFSET(base); size += base_offset; base -= base_offset; // Page-align size = ROUND_UP(size, BASE_PAGE_SIZE); cslot_t vspace_slot = si->elfload_slot; // Allocate the frames size_t sz = 0; for (lpaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = frame_create(frame, sz, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_CREATE); } } cslot_t spawn_vspace_slot = si->elfload_slot; cslot_t new_slot_count = si->elfload_slot - vspace_slot; // create copies of the frame capabilities for spawn vspace for (int copy_idx = 0; copy_idx < new_slot_count; copy_idx++) { struct capref frame = { .cnode = si->segcn, .slot = vspace_slot + copy_idx, }; struct capref spawn_frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = cap_copy(spawn_frame, frame); if (err_is_fail(err)) { // TODO: make debug printf printf("cap_copy failed for src_slot = %"PRIuCSLOT", dest_slot = %"PRIuCSLOT"\n", frame.slot, spawn_frame.slot); return err_push(err, LIB_ERR_CAP_COPY); } } /* Map into my vspace */ struct memobj *memobj = malloc(sizeof(struct memobj_anon)); if (!memobj) { return LIB_ERR_MALLOC_FAIL; } struct vregion *vregion = malloc(sizeof(struct vregion)); if (!vregion) { return LIB_ERR_MALLOC_FAIL; } // Create the objects err = memobj_create_anon((struct memobj_anon*)memobj, size, 0); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_CREATE_ANON); } err = vregion_map(vregion, get_current_vspace(), memobj, 0, size, VREGION_FLAGS_READ_WRITE); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(memobj, genvaddr, frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = memobj->f.pagefault(memobj, vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } /* Map into spawn vspace */ struct memobj *spawn_memobj = NULL; struct vregion *spawn_vregion = NULL; err = spawn_vspace_map_anon_fixed_attr(si, base, size, &spawn_vregion, &spawn_memobj, elf_to_vregion_flags(flags)); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = spawn_vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(spawn_memobj, genvaddr, frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = spawn_memobj->f.pagefault(spawn_memobj, spawn_vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } genvaddr_t genvaddr = vregion_get_base_addr(vregion) + base_offset; *retbase = (void*)vspace_genvaddr_to_lvaddr(genvaddr); return SYS_ERR_OK; } /** * \brief Load the elf image */ errval_t spawn_arch_load(struct spawninfo *si, lvaddr_t binary, size_t binary_size, genvaddr_t *entry, void** arch_info) { errval_t err; // Reset the elfloader_slot si->elfload_slot = 0; struct capref cnode_cap = { .cnode = si->rootcn, .slot = ROOTCN_SLOT_SEGCN, }; err = cnode_create_raw(cnode_cap, &si->segcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SEGCN); } // TLS is NYI si->tls_init_base = 0; si->tls_init_len = si->tls_total_len = 0; // Load the binary err = elf_load(EM_HOST, elf_allocate, si, binary, binary_size, entry); if (err_is_fail(err)) { return err; } struct Elf32_Shdr* got_shdr = elf32_find_section_header_name(binary, binary_size, ".got"); if (got_shdr) { *arch_info = (void*)got_shdr->sh_addr; } else { return SPAWN_ERR_LOAD; } return SYS_ERR_OK; } void spawn_arch_set_registers(void *arch_load_info, dispatcher_handle_t handle, arch_registers_state_t *enabled_area, arch_registers_state_t *disabled_area) { assert(arch_load_info != NULL); uintptr_t got_base = (uintptr_t)arch_load_info; struct dispatcher_shared_arm* disp_arm = get_dispatcher_shared_arm(handle); disp_arm->got_base = got_base; enabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; disabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; #ifndef __ARM_ARCH_7M__ //armv7-m does not support these flags enabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; disabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; #endif }
static errval_t elf_allocate(void *state, genvaddr_t base, size_t size, uint32_t flags, void **retbase) { errval_t err; struct spawninfo *si = state; // Increase size by space wasted on first page due to page-alignment size_t base_offset = BASE_PAGE_OFFSET(base); size += base_offset; base -= base_offset; // Page-align size = ROUND_UP(size, BASE_PAGE_SIZE); cslot_t vspace_slot = si->elfload_slot; // Allocate the frames size_t sz = 0; for (lpaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = frame_create(frame, sz, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_CREATE); } } cslot_t spawn_vspace_slot = si->elfload_slot; cslot_t new_slot_count = si->elfload_slot - vspace_slot; // create copies of the frame capabilities for spawn vspace for (int copy_idx = 0; copy_idx < new_slot_count; copy_idx++) { struct capref frame = { .cnode = si->segcn, .slot = vspace_slot + copy_idx, }; struct capref spawn_frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = cap_copy(spawn_frame, frame); if (err_is_fail(err)) { // TODO: make debug printf printf("cap_copy failed for src_slot = %"PRIuCSLOT", dest_slot = %"PRIuCSLOT"\n", frame.slot, spawn_frame.slot); return err_push(err, LIB_ERR_CAP_COPY); } } /* Map into my vspace */ struct memobj *memobj = malloc(sizeof(struct memobj_anon)); if (!memobj) { return LIB_ERR_MALLOC_FAIL; } struct vregion *vregion = malloc(sizeof(struct vregion)); if (!vregion) { return LIB_ERR_MALLOC_FAIL; } // Create the objects err = memobj_create_anon((struct memobj_anon*)memobj, size, 0); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_CREATE_ANON); } err = vregion_map(vregion, get_current_vspace(), memobj, 0, size, VREGION_FLAGS_READ_WRITE); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(memobj, genvaddr, frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = memobj->f.pagefault(memobj, vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } /* Map into spawn vspace */ struct memobj *spawn_memobj = NULL; struct vregion *spawn_vregion = NULL; err = spawn_vspace_map_anon_fixed_attr(si, base, size, &spawn_vregion, &spawn_memobj, elf_to_vregion_flags(flags)); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref spawn_frame = { .cnode = si->segcn, .slot = spawn_vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(spawn_memobj, genvaddr, spawn_frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = spawn_memobj->f.pagefault(spawn_memobj, spawn_vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } si->vregion[si->vregions] = vregion; si->base[si->vregions++] = base; genvaddr_t genvaddr = vregion_get_base_addr(vregion) + base_offset; *retbase = (void*)vspace_genvaddr_to_lvaddr(genvaddr); return SYS_ERR_OK; } /** * \brief Load the elf image */ errval_t spawn_arch_load(struct spawninfo *si, lvaddr_t binary, size_t binary_size, genvaddr_t *entry, void** arch_load_info) { errval_t err; // Reset the elfloader_slot si->elfload_slot = 0; si->vregions = 0; struct capref cnode_cap = { .cnode = si->rootcn, .slot = ROOTCN_SLOT_SEGCN, }; // XXX: this code assumes that elf_load never needs more than 32 slots for // text frame capabilities. err = cnode_create_raw(cnode_cap, &si->segcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SEGCN); } // Load the binary si->tls_init_base = 0; si->tls_init_len = si->tls_total_len = 0; err = elf_load_tls(EM_HOST, elf_allocate, si, binary, binary_size, entry, &si->tls_init_base, &si->tls_init_len, &si->tls_total_len); if (err_is_fail(err)) { return err; } return SYS_ERR_OK; } void spawn_arch_set_registers(void *arch_load_info, dispatcher_handle_t handle, arch_registers_state_t *enabled_area, arch_registers_state_t *disabled_area) { #if defined(__x86_64__) /* XXX: 1st argument to _start is the dispatcher pointer * see lib/crt/arch/x86_64/crt0.s */ disabled_area->rdi = get_dispatcher_shared_generic(handle)->udisp; #elif defined(__i386__) /* XXX: 1st argument to _start is the dispatcher pointer * see lib/crt/arch/x86_32/crt0.s */ disabled_area->edi = get_dispatcher_shared_generic(handle)->udisp; #endif }
static errval_t elf_allocate(void *state, genvaddr_t base, size_t size, uint32_t flags, void **retbase) { errval_t err; lvaddr_t vaddr; size_t used_size; struct spawninfo *si = state; // Increase size by space wasted on first page due to page-alignment size_t base_offset = BASE_PAGE_OFFSET(base); size += base_offset; base -= base_offset; // Page-align size = ROUND_UP(size, BASE_PAGE_SIZE); cslot_t vspace_slot = si->elfload_slot; // Step 1: Allocate the frames size_t sz = 0; for (lpaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = frame_create(frame, sz, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_CREATE); } } cslot_t spawn_vspace_slot = si->elfload_slot; cslot_t new_slot_count = si->elfload_slot - vspace_slot; // Step 2: create copies of the frame capabilities for child vspace for (int copy_idx = 0; copy_idx < new_slot_count; copy_idx++) { struct capref frame = { .cnode = si->segcn, .slot = vspace_slot + copy_idx, }; struct capref spawn_frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = cap_copy(spawn_frame, frame); if (err_is_fail(err)) { debug_printf("cap_copy failed for src_slot = %"PRIuCSLOT ", dest_slot = %"PRIuCSLOT"\n", frame.slot, spawn_frame.slot); return err_push(err, LIB_ERR_CAP_COPY); } } // Step 3: map into own vspace // Get virtual address range to hold the module void *vaddr_range; err = paging_alloc(get_current_paging_state(), &vaddr_range, size); if (err_is_fail(err)) { debug_printf("elf_allocate: paging_alloc failed\n"); return (err); } // map allocated physical memory in virutal memory of parent process vaddr = (lvaddr_t)vaddr_range; used_size = size; while (used_size > 0) { struct capref frame = { .cnode = si->segcn, .slot = vspace_slot++, }; // find out the size of the frame struct frame_identity id; err = invoke_frame_identify(frame, &id); assert(err_is_ok(err)); size_t slot_size = (1UL << id.bits); // map frame to provide physical memory backing err = paging_map_fixed_attr(get_current_paging_state(), vaddr, frame, slot_size, VREGION_FLAGS_READ_WRITE); if (err_is_fail(err)) { debug_printf("elf_allocate: paging_map_fixed_attr failed\n"); return err; } used_size -= slot_size; vaddr += slot_size; } // end while: // Step 3: map into new process struct paging_state *cp = si->vspace; // map allocated physical memory in virutal memory of child process vaddr = (lvaddr_t)base; used_size = size; while (used_size > 0) { struct capref frame = { .cnode = si->segcn, .slot = spawn_vspace_slot++, }; // find out the size of the frame struct frame_identity id; err = invoke_frame_identify(frame, &id); assert(err_is_ok(err)); size_t slot_size = (1UL << id.bits); // map frame to provide physical memory backing err = paging_map_fixed_attr(cp, vaddr, frame, slot_size, elf_to_vregion_flags(flags)); if (err_is_fail(err)) { debug_printf("elf_allocate: paging_map_fixed_attr failed\n"); return err; } used_size -= slot_size; vaddr += slot_size; } // end while: *retbase = (void*) vaddr_range + base_offset; return SYS_ERR_OK; } // end function: elf_allocate /** * \brief Load the elf image */ errval_t spawn_arch_load(struct spawninfo *si, lvaddr_t binary, size_t binary_size, genvaddr_t *entry, void** arch_info) { errval_t err; // Reset the elfloader_slot si->elfload_slot = 0; struct capref cnode_cap = { .cnode = si->rootcn, .slot = ROOTCN_SLOT_SEGCN, }; err = cnode_create_raw(cnode_cap, &si->segcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SEGCN); } // TLS is NYI si->tls_init_base = 0; si->tls_init_len = si->tls_total_len = 0; //debug_printf("spawn_arch_load: about to load elf %p\n", elf_allocate); // Load the binary err = elf_load(EM_HOST, elf_allocate, si, binary, binary_size, entry); if (err_is_fail(err)) { return err; } //debug_printf("hello here\n"); struct Elf32_Shdr* got_shdr = elf32_find_section_header_name(binary, binary_size, ".got"); if (got_shdr) { *arch_info = (void*)got_shdr->sh_addr; } else { return SPAWN_ERR_LOAD; } return SYS_ERR_OK; } void spawn_arch_set_registers(void *arch_load_info, dispatcher_handle_t handle, arch_registers_state_t *enabled_area, arch_registers_state_t *disabled_area) { assert(arch_load_info != NULL); uintptr_t got_base = (uintptr_t)arch_load_info; struct dispatcher_shared_arm* disp_arm = get_dispatcher_shared_arm(handle); disp_arm->got_base = got_base; enabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; enabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; disabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; disabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; }