/** * \brief Resume execution of a given register state * * This function resumes the execution of the given register state on the * current dispatcher. It may only be called while the dispatcher is disabled. * * \param disp Current dispatcher pointer * \param regs Register state snapshot */ void disp_resume(dispatcher_handle_t handle, arch_registers_state_t *archregs) { struct dispatcher_shared_arm *disp = get_dispatcher_shared_arm(handle); // The definition of disp_resume_end is a totally flakey. The system // uses the location of the PC to determine where to spill the thread // context for exceptions and interrupts. There are two safe ways of doing // this: // // 1) Write this entire function in assmebler. // 2) Write this function in C and write a linker script to emit // function bounds. assert_disabled(curdispatcher() == handle); assert_disabled(disp->d.disabled); assert_disabled(disp->d.haswork); #ifdef CONFIG_DEBUG_DEADLOCKS ((struct disp_priv *)disp)->yieldcount = 0; #endif disp_resume_context(&disp->d, archregs->regs); }
/** * \brief Save the current register state and optionally yield the CPU * * This function saves as much as necessary of the current register state * (which, when resumed will return to the caller), and then either * re-enters the thread scheduler or yields the CPU. * It may only be called while the dispatcher is disabled. * * \param disp Current dispatcher pointer * \param regs Location to save current register state * \param yield If true, yield CPU to kernel; otherwise re-run thread scheduler * \param yield_to Endpoint capability for dispatcher to which we want to yield */ void disp_save(dispatcher_handle_t handle, arch_registers_state_t *state, bool yield, capaddr_t yield_to) { struct dispatcher_shared_arm *disp = get_dispatcher_shared_arm(handle); assert_disabled(curdispatcher() == handle); assert_disabled(disp->d.disabled); disp_save_context(state->regs); state->named.pc = (lvaddr_t)disp_save_epilog; if (yield) { sys_yield(yield_to); // may fail if target doesn't exist; if so, just fall through } // this code won't run if the yield succeeded // enter thread scheduler again // this doesn't return, and will call disp_yield if there's nothing to do thread_run_disabled(handle); __asm volatile("disp_save_epilog:"); }
struct dcb *spawn_bsp_init(const char *name, alloc_phys_func alloc_phys) { printf("spawn_bsp_init\n"); /* Only the first core can run this code */ assert(hal_cpu_is_bsp()); /* Allocate bootinfo */ lpaddr_t bootinfo_phys = alloc_phys(BOOTINFO_SIZE); memset((void *)local_phys_to_mem(bootinfo_phys), 0, BOOTINFO_SIZE); /* Construct cmdline args */ char bootinfochar[16]; snprintf(bootinfochar, sizeof(bootinfochar), "%u", INIT_BOOTINFO_VBASE); const char *argv[] = { "init", bootinfochar }; int argc = 2; struct dcb *init_dcb = spawn_init_common(name, argc, argv, bootinfo_phys, alloc_phys); // Map bootinfo spawn_init_map(init_l2, INIT_VBASE, INIT_BOOTINFO_VBASE, bootinfo_phys, BOOTINFO_SIZE, INIT_PERM_RW); struct startup_l2_info l2_info = { init_l2, INIT_VBASE }; genvaddr_t init_ep, got_base; load_init_image(&l2_info, BSP_INIT_MODULE_NAME, &init_ep, &got_base); struct dispatcher_shared_arm *disp_arm = get_dispatcher_shared_arm(init_dcb->disp); disp_arm->enabled_save_area.named.r10 = got_base; disp_arm->got_base = got_base; disp_arm->disabled_save_area.named.pc = init_ep; #ifndef __ARM_ARCH_7M__ //the armv7-m profile does not have such a mode field disp_arm->disabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK; #endif disp_arm->disabled_save_area.named.r10 = got_base; /* Create caps for init to use */ create_module_caps(&spawn_state); lpaddr_t init_alloc_end = alloc_phys(0); // XXX create_phys_caps(init_alloc_end); /* Fill bootinfo struct */ bootinfo->mem_spawn_core = KERNEL_IMAGE_SIZE; // Size of kernel /* // Map dispatcher spawn_init_map(init_l2, INIT_VBASE, INIT_DISPATCHER_VBASE, mem_to_local_phys(init_dcb->disp), DISPATCHER_SIZE, INIT_PERM_RW); disp_arm->disabled_save_area.named.rtls = INIT_DISPATCHER_VBASE; */ return init_dcb; }
/** * \brief Architecture-specific dispatcher initialisation */ void disp_arch_init(dispatcher_handle_t handle) { struct dispatcher_shared_arm *disp = get_dispatcher_shared_arm(handle); disp->d.dispatcher_run = (lvaddr_t)run_entry; disp->d.dispatcher_pagefault = (lvaddr_t)pagefault_entry; disp->d.dispatcher_pagefault_disabled = (lvaddr_t)disabled_pagefault_entry; disp->d.dispatcher_trap = (lvaddr_t)trap_entry; disp->crit_pc_low = (lvaddr_t)disp_resume_context; disp->crit_pc_high = (lvaddr_t)disp_resume_context_epilog; }
void arm_kernel_startup(void) { printf("arm_kernel_startup entered \n"); struct dcb *init_dcb; if (hal_cpu_is_bsp()) { printf("Doing BSP related bootup \n"); /* Initialize the location to allocate phys memory from */ bsp_init_alloc_addr = glbl_core_data->start_free_ram; // Bring up init init_dcb = spawn_bsp_init(BSP_INIT_MODULE_NAME, bsp_alloc_phys); // Not available on PandaBoard? pit_start(0); } else { printf("Doing non-BSP related bootup \n"); /* Initialize the allocator with the information passed to us */ app_alloc_phys_start = glbl_core_data->memory_base_start; app_alloc_phys_end = app_alloc_phys_start + ((lpaddr_t)1 <<glbl_core_data->memory_bits); init_dcb = spawn_app_init(glbl_core_data, APP_INIT_MODULE_NAME, app_alloc_phys); #ifndef __ARM_ARCH_7M__ //armv7-m does not use a gic and can not acknowledge interrupts uint32_t irq = gic_get_active_irq(); gic_ack_irq(irq); #endif } /* printf("Trying to enable interrupts\n"); */ /* __asm volatile ("CPSIE aif"); // Enable interrups */ /* printf("Done enabling interrupts\n"); */ /* printf("HOLD BOOTUP - SPINNING\n"); */ /* while (1); */ /* printf("THIS SHOULD NOT HAPPEN\n"); */ // enable interrupt forwarding to cpu // FIXME: PS: enable this as it is needed for multicore setup. // gic_cpu_interface_enable(); // Should not return printf("Calling dispatch from arm_kernel_startup, start address is=%"PRIxLVADDR"\n", get_dispatcher_shared_arm(init_dcb->disp)->enabled_save_area.named.r0); dispatch(init_dcb); panic("Error spawning init!"); }
/** * \brief Switch execution between two register states * * This function saves as much as necessary of the current register state * (which, when resumed will return to the caller), and switches execution * by resuming the given register state. It may only be called while the * dispatcher is disabled. * * \param disp Current dispatcher pointer * \param from_regs Location to save current register state * \param to_regs Location from which to resume new register state */ void disp_switch(dispatcher_handle_t handle, arch_registers_state_t *from_state, arch_registers_state_t *to_state) { struct dispatcher_shared_arm *disp = get_dispatcher_shared_arm(handle); assert_disabled(curdispatcher() == handle); assert_disabled(disp->d.disabled); assert_disabled(disp->d.haswork); assert_disabled(to_state != NULL); disp_save_context(from_state->regs); from_state->named.pc = (lvaddr_t)disp_switch_epilog; disp_resume_context(&disp->d, to_state->regs); __asm volatile("disp_switch_epilog:"); }
/* * \brief handles undefined instructions, unaligned accesses and division by 0 */ void handle_user_undef(arch_registers_state_t* save_area) { lvaddr_t fault_address = save_area->named.pc;//not passed as argument union registers_arm resume_area; struct dispatcher_shared_arm *disp = get_dispatcher_shared_arm(dcb_current->disp); bool disabled = dispatcher_is_disabled_ip(dcb_current->disp, save_area->named.pc); disp->d.disabled = disabled; assert(dcb_current->disp_cte.cap.type == ObjType_Frame); if (disabled) { assert(save_area == &disp->trap_save_area); } else { assert(save_area == &disp->enabled_save_area); } printk(LOG_WARN, "user usage fault%s in '%.*s': IP %" PRIuPTR "\n", disabled ? " WHILE DISABLED" : "", DISP_NAME_LEN, disp->d.name, fault_address); struct dispatcher_shared_generic *disp_gen = get_dispatcher_shared_generic(dcb_current->disp); //make sure we do not accidentaly create an IT block when upcalling resume_area.named.cpsr = 0; resume_area.named.pc = disp->d.dispatcher_trap; resume_area.named.r0 = disp_gen->udisp; resume_area.named.r1 = ARM_7M_EVECTOR_USAGE; resume_area.named.r2 = 0; resume_area.named.r3 = fault_address; resume_area.named.rtls = disp_gen->udisp; resume_area.named.r10 = disp->got_base; //we need some temporary stack to exit handler mode. memory in userspace would be //better, but for the moment we can use this temporary region resume_area.named.stack = (uint32_t) &irq_save_pushed_area_top; // Upcall user to save area disp->d.disabled = true; resume(&resume_area); }
struct dcb *spawn_app_init(struct arm_core_data *core_data, const char *name, alloc_phys_func alloc_phys) { errval_t err; /* Construct cmdline args */ // Core id of the core that booted this core char coreidchar[10]; snprintf(coreidchar, sizeof(coreidchar), "%d", core_data->src_core_id); // IPI channel id of core that booted this core char chanidchar[30]; snprintf(chanidchar, sizeof(chanidchar), "chanid=%"PRIu32, core_data->chan_id); // Arch id of the core that booted this core char archidchar[30]; snprintf(archidchar, sizeof(archidchar), "archid=%d", core_data->src_arch_id); const char *argv[5] = { name, coreidchar, chanidchar, archidchar }; int argc = 4; struct dcb *init_dcb = spawn_init_common(name, argc, argv,0, alloc_phys); // Urpc frame cap struct cte *urpc_frame_cte = caps_locate_slot(CNODE(spawn_state.taskcn), TASKCN_SLOT_MON_URPC); // XXX: Create as devframe so the memory is not zeroed out err = caps_create_new(ObjType_DevFrame, core_data->urpc_frame_base, core_data->urpc_frame_bits, core_data->urpc_frame_bits, urpc_frame_cte); assert(err_is_ok(err)); urpc_frame_cte->cap.type = ObjType_Frame; lpaddr_t urpc_ptr = gen_phys_to_local_phys(urpc_frame_cte->cap.u.frame.base); /* Map urpc frame at MON_URPC_BASE */ spawn_init_map(init_l2, INIT_VBASE, MON_URPC_VBASE, urpc_ptr, MON_URPC_SIZE, INIT_PERM_RW); struct startup_l2_info l2_info = { init_l2, INIT_VBASE }; // elf load the domain genvaddr_t entry_point, got_base=0; err = elf_load(EM_ARM, startup_alloc_init, &l2_info, local_phys_to_mem(core_data->monitor_binary), core_data->monitor_binary_size, &entry_point); if (err_is_fail(err)) { //err_print_calltrace(err); panic("ELF load of init module failed!"); } // TODO: Fix application linkage so that it's non-PIC. struct Elf32_Shdr* got_shdr = elf32_find_section_header_name(local_phys_to_mem(core_data->monitor_binary), core_data->monitor_binary_size, ".got"); if (got_shdr) { got_base = got_shdr->sh_addr; } struct dispatcher_shared_arm *disp_arm = get_dispatcher_shared_arm(init_dcb->disp); disp_arm->enabled_save_area.named.r10 = got_base; disp_arm->got_base = got_base; disp_arm->disabled_save_area.named.pc = entry_point; #ifndef __ARM_ARCH_7M__ //the armv7-m profile does not have such a mode field disp_arm->disabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK; #endif disp_arm->disabled_save_area.named.r10 = got_base; //disp_arm->disabled_save_area.named.rtls = INIT_DISPATCHER_VBASE; return init_dcb; }
static struct dcb *spawn_init_common(const char *name, int argc, const char *argv[], lpaddr_t bootinfo_phys, alloc_phys_func alloc_phys) { printf("spawn_init_common %s\n", name); lvaddr_t paramaddr; struct dcb *init_dcb = spawn_module(&spawn_state, name, argc, argv, bootinfo_phys, INIT_ARGS_VBASE, alloc_phys, ¶maddr); init_page_tables(); printf("about to call mem_to_local_phys with lvaddr=%"PRIxLVADDR"\n", init_l1); init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_l1); spawn_init_map(init_l2, INIT_VBASE, INIT_ARGS_VBASE, spawn_state.args_page, ARGS_SIZE, INIT_PERM_RW); // Map dispatcher spawn_init_map(init_l2, INIT_VBASE, INIT_DISPATCHER_VBASE, mem_to_local_phys(init_dcb->disp), DISPATCHER_SIZE, INIT_PERM_RW); /* * we create the capability to the devices at this stage and store it * in the TASKCN_SLOT_IO, where on x86 the IO capability is stored for * device access on PCI. PCI is not available on the pandaboard so this * should not be a problem. */ struct cte *iocap = caps_locate_slot(CNODE(spawn_state.taskcn), TASKCN_SLOT_IO); errval_t err = caps_create_new(ObjType_DevFrame, 0x40000000, 30, 30, iocap); assert(err_is_ok(err)); struct dispatcher_shared_generic *disp = get_dispatcher_shared_generic(init_dcb->disp); struct dispatcher_shared_arm *disp_arm = get_dispatcher_shared_arm(init_dcb->disp); /* Initialize dispatcher */ disp->disabled = true; strncpy(disp->name, argv[0], DISP_NAME_LEN); /* tell init the vspace addr of its dispatcher */ disp->udisp = INIT_DISPATCHER_VBASE; disp_arm->enabled_save_area.named.r0 = paramaddr; #ifndef __ARM_ARCH_7M__ //the armv7-m profile does not have such a mode field disp_arm->enabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK; #endif disp_arm->enabled_save_area.named.rtls = INIT_DISPATCHER_VBASE; disp_arm->disabled_save_area.named.rtls = INIT_DISPATCHER_VBASE; printf("spawn_init_common: starting from=%"PRIxLVADDR"\n"); return init_dcb; }
static errval_t elf_allocate(void *state, genvaddr_t base, size_t size, uint32_t flags, void **retbase) { errval_t err; struct spawninfo *si = state; // Increase size by space wasted on first page due to page-alignment size_t base_offset = BASE_PAGE_OFFSET(base); size += base_offset; base -= base_offset; // Page-align size = ROUND_UP(size, BASE_PAGE_SIZE); cslot_t vspace_slot = si->elfload_slot; // Allocate the frames size_t sz = 0; for (lpaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = frame_create(frame, sz, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_CREATE); } } cslot_t spawn_vspace_slot = si->elfload_slot; cslot_t new_slot_count = si->elfload_slot - vspace_slot; // create copies of the frame capabilities for spawn vspace for (int copy_idx = 0; copy_idx < new_slot_count; copy_idx++) { struct capref frame = { .cnode = si->segcn, .slot = vspace_slot + copy_idx, }; struct capref spawn_frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = cap_copy(spawn_frame, frame); if (err_is_fail(err)) { // TODO: make debug printf printf("cap_copy failed for src_slot = %"PRIuCSLOT", dest_slot = %"PRIuCSLOT"\n", frame.slot, spawn_frame.slot); return err_push(err, LIB_ERR_CAP_COPY); } } /* Map into my vspace */ struct memobj *memobj = malloc(sizeof(struct memobj_anon)); if (!memobj) { return LIB_ERR_MALLOC_FAIL; } struct vregion *vregion = malloc(sizeof(struct vregion)); if (!vregion) { return LIB_ERR_MALLOC_FAIL; } // Create the objects err = memobj_create_anon((struct memobj_anon*)memobj, size, 0); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_CREATE_ANON); } err = vregion_map(vregion, get_current_vspace(), memobj, 0, size, VREGION_FLAGS_READ_WRITE); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(memobj, genvaddr, frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = memobj->f.pagefault(memobj, vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } /* Map into spawn vspace */ struct memobj *spawn_memobj = NULL; struct vregion *spawn_vregion = NULL; err = spawn_vspace_map_anon_fixed_attr(si, base, size, &spawn_vregion, &spawn_memobj, elf_to_vregion_flags(flags)); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = spawn_vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(spawn_memobj, genvaddr, frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = spawn_memobj->f.pagefault(spawn_memobj, spawn_vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } genvaddr_t genvaddr = vregion_get_base_addr(vregion) + base_offset; *retbase = (void*)vspace_genvaddr_to_lvaddr(genvaddr); return SYS_ERR_OK; } /** * \brief Load the elf image */ errval_t spawn_arch_load(struct spawninfo *si, lvaddr_t binary, size_t binary_size, genvaddr_t *entry, void** arch_info) { errval_t err; // Reset the elfloader_slot si->elfload_slot = 0; struct capref cnode_cap = { .cnode = si->rootcn, .slot = ROOTCN_SLOT_SEGCN, }; err = cnode_create_raw(cnode_cap, &si->segcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SEGCN); } // TLS is NYI si->tls_init_base = 0; si->tls_init_len = si->tls_total_len = 0; // Load the binary err = elf_load(EM_HOST, elf_allocate, si, binary, binary_size, entry); if (err_is_fail(err)) { return err; } struct Elf32_Shdr* got_shdr = elf32_find_section_header_name(binary, binary_size, ".got"); if (got_shdr) { *arch_info = (void*)got_shdr->sh_addr; } else { return SPAWN_ERR_LOAD; } return SYS_ERR_OK; } void spawn_arch_set_registers(void *arch_load_info, dispatcher_handle_t handle, arch_registers_state_t *enabled_area, arch_registers_state_t *disabled_area) { assert(arch_load_info != NULL); uintptr_t got_base = (uintptr_t)arch_load_info; struct dispatcher_shared_arm* disp_arm = get_dispatcher_shared_arm(handle); disp_arm->got_base = got_base; enabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; disabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; #ifndef __ARM_ARCH_7M__ //armv7-m does not support these flags enabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; disabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; #endif }
static errval_t elf_allocate(void *state, genvaddr_t base, size_t size, uint32_t flags, void **retbase) { errval_t err; lvaddr_t vaddr; size_t used_size; struct spawninfo *si = state; // Increase size by space wasted on first page due to page-alignment size_t base_offset = BASE_PAGE_OFFSET(base); size += base_offset; base -= base_offset; // Page-align size = ROUND_UP(size, BASE_PAGE_SIZE); cslot_t vspace_slot = si->elfload_slot; // Step 1: Allocate the frames size_t sz = 0; for (lpaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = frame_create(frame, sz, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_CREATE); } } cslot_t spawn_vspace_slot = si->elfload_slot; cslot_t new_slot_count = si->elfload_slot - vspace_slot; // Step 2: create copies of the frame capabilities for child vspace for (int copy_idx = 0; copy_idx < new_slot_count; copy_idx++) { struct capref frame = { .cnode = si->segcn, .slot = vspace_slot + copy_idx, }; struct capref spawn_frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = cap_copy(spawn_frame, frame); if (err_is_fail(err)) { debug_printf("cap_copy failed for src_slot = %"PRIuCSLOT ", dest_slot = %"PRIuCSLOT"\n", frame.slot, spawn_frame.slot); return err_push(err, LIB_ERR_CAP_COPY); } } // Step 3: map into own vspace // Get virtual address range to hold the module void *vaddr_range; err = paging_alloc(get_current_paging_state(), &vaddr_range, size); if (err_is_fail(err)) { debug_printf("elf_allocate: paging_alloc failed\n"); return (err); } // map allocated physical memory in virutal memory of parent process vaddr = (lvaddr_t)vaddr_range; used_size = size; while (used_size > 0) { struct capref frame = { .cnode = si->segcn, .slot = vspace_slot++, }; // find out the size of the frame struct frame_identity id; err = invoke_frame_identify(frame, &id); assert(err_is_ok(err)); size_t slot_size = (1UL << id.bits); // map frame to provide physical memory backing err = paging_map_fixed_attr(get_current_paging_state(), vaddr, frame, slot_size, VREGION_FLAGS_READ_WRITE); if (err_is_fail(err)) { debug_printf("elf_allocate: paging_map_fixed_attr failed\n"); return err; } used_size -= slot_size; vaddr += slot_size; } // end while: // Step 3: map into new process struct paging_state *cp = si->vspace; // map allocated physical memory in virutal memory of child process vaddr = (lvaddr_t)base; used_size = size; while (used_size > 0) { struct capref frame = { .cnode = si->segcn, .slot = spawn_vspace_slot++, }; // find out the size of the frame struct frame_identity id; err = invoke_frame_identify(frame, &id); assert(err_is_ok(err)); size_t slot_size = (1UL << id.bits); // map frame to provide physical memory backing err = paging_map_fixed_attr(cp, vaddr, frame, slot_size, elf_to_vregion_flags(flags)); if (err_is_fail(err)) { debug_printf("elf_allocate: paging_map_fixed_attr failed\n"); return err; } used_size -= slot_size; vaddr += slot_size; } // end while: *retbase = (void*) vaddr_range + base_offset; return SYS_ERR_OK; } // end function: elf_allocate /** * \brief Load the elf image */ errval_t spawn_arch_load(struct spawninfo *si, lvaddr_t binary, size_t binary_size, genvaddr_t *entry, void** arch_info) { errval_t err; // Reset the elfloader_slot si->elfload_slot = 0; struct capref cnode_cap = { .cnode = si->rootcn, .slot = ROOTCN_SLOT_SEGCN, }; err = cnode_create_raw(cnode_cap, &si->segcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SEGCN); } // TLS is NYI si->tls_init_base = 0; si->tls_init_len = si->tls_total_len = 0; //debug_printf("spawn_arch_load: about to load elf %p\n", elf_allocate); // Load the binary err = elf_load(EM_HOST, elf_allocate, si, binary, binary_size, entry); if (err_is_fail(err)) { return err; } //debug_printf("hello here\n"); struct Elf32_Shdr* got_shdr = elf32_find_section_header_name(binary, binary_size, ".got"); if (got_shdr) { *arch_info = (void*)got_shdr->sh_addr; } else { return SPAWN_ERR_LOAD; } return SYS_ERR_OK; } void spawn_arch_set_registers(void *arch_load_info, dispatcher_handle_t handle, arch_registers_state_t *enabled_area, arch_registers_state_t *disabled_area) { assert(arch_load_info != NULL); uintptr_t got_base = (uintptr_t)arch_load_info; struct dispatcher_shared_arm* disp_arm = get_dispatcher_shared_arm(handle); disp_arm->got_base = got_base; enabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; enabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; disabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; disabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; }
/* * Try to find out what address generated page fault, and then tell the dispatcher what * happened. Some faults will not be recoverable (e.g. stack faults), because the * context has already been lost */ void handle_user_page_fault(arch_registers_state_t* save_area) { lvaddr_t fault_address;//not passed as argument, because there is not just one place to look /* //print out registers for debugging printf("page fault. registers:\n"); for(uint32_t i = 0; i<NUM_REGS; i++){ printf("0x%x\n", save_area->regs[i]); } uint32_t regval; __asm volatile ("mrs %[regval], xpsr" : [regval] "=r"(regval)); printf("current XPSR register: 0x%x\n", regval); printf("M3 MMU address: 0x%x\n", *((uint32_t*) &mmu)); printf("M3 MMU_FAULT_AD register: 0x%x\n", omap44xx_mmu_fault_ad_rd(&mmu)); printf("M3 MMU_FAULT_STATUS register: 0x%x\n", omap44xx_mmu_fault_status_rd(&mmu)); printf("M3 MMU_FAULT_PC register: 0x%x\n", omap44xx_mmu_fault_pc_rd(&mmu)); printf("M3 MMU_IRQSTATUS register: 0x%x\n", omap44xx_mmu_irqstatus_rd(&mmu)); printf("ICTR: 0x%x\n", omap44xx_cortex_m3_nvic_ICTR_rd(&nvic)); printf("CPUID_BASE: 0x%x\n", omap44xx_cortex_m3_nvic_CPUID_BASE_rd(&nvic)); printf("ICSR: 0x%x\n", omap44xx_cortex_m3_nvic_ICSR_rd(&nvic)); printf("VTOR: 0x%x\n", omap44xx_cortex_m3_nvic_VTOR_rd(&nvic)); printf("AIRCR: 0x%x\n", omap44xx_cortex_m3_nvic_AIRCR_rd(&nvic)); printf("CCR: 0x%x\n", omap44xx_cortex_m3_nvic_CCR_rd(&nvic)); printf("SHCSR: 0x%x\n", omap44xx_cortex_m3_nvic_SHCSR_rd(&nvic)); printf("CFSR: 0x%x\n", omap44xx_cortex_m3_nvic_CFSR_rd(&nvic)); printf("BFAR: 0x%x\n", omap44xx_cortex_m3_nvic_BFAR_rd(&nvic)); printf("SYSTICK_CTRL: 0x%x\n", omap44xx_cortex_m3_nvic_SYSTICK_CTRL_rd(&nvic)); printf("SYSTICK_CALV: 0x%x\n", omap44xx_cortex_m3_nvic_SYSTICK_CALV_rd(&nvic)); */ if (omap44xx_cortex_m3_nvic_SHCSR_busfaultact_rdf(&nvic)){ //triggered by bus fault if (omap44xx_mmu_irqstatus_rd(&mmu)){ //L2 MMU triggered fault: either no valid mapping, or two mappings in TLB //XXX: cachemarker: once we have chaching enabled, this is the place to //look at table entry for special permission bits. //XXX: MMU_FAULT_ADDR register seems to just contain the last address that was //requested. By this time this is probably just a kernelspace address. //I am not sure if the M3 can actually find out what the faulting address really was fault_address = omap44xx_mmu_fault_ad_rd(&mmu); } else{ //"regular" bus fault -> look in NVIC entries if (omap44xx_cortex_m3_nvic_CFSR_bfarvalid_rdf(&nvic)){ //bus fault address register valid fault_address = omap44xx_cortex_m3_nvic_BFAR_rd(&nvic); } else{ //one of the bus faults that do not write the BFAR -> faulting address //literally unknown to system printk(LOG_WARN, "user bus fault with unknown faulting address\n"); fault_address = (lvaddr_t) NULL; } } } else{ //memory management fault (probably access violation) if (omap44xx_cortex_m3_nvic_CFSR_mmarvalid_rdf(&nvic)){ //MMAR contains faulting address fault_address = omap44xx_cortex_m3_nvic_MMAR_rd(&nvic); } else{ //MMAR not written. probably executing in noexecute region assert(omap44xx_cortex_m3_nvic_CFSR_iaccviol_rdf(&nvic)); //so we can assume the pc caused the fault fault_address = save_area->named.pc; } } lvaddr_t handler; struct dispatcher_shared_arm *disp = get_dispatcher_shared_arm(dcb_current->disp); uintptr_t saved_pc = save_area->named.pc; disp->d.disabled = dispatcher_is_disabled_ip(dcb_current->disp, saved_pc); bool disabled = (disp->d.disabled != 0); assert(dcb_current->disp_cte.cap.type == ObjType_Frame); printk(LOG_WARN, "user page fault%s in '%.*s': addr %"PRIxLVADDR " IP %"PRIxPTR"\n", disabled ? " WHILE DISABLED" : "", DISP_NAME_LEN, disp->d.name, fault_address, saved_pc); if (disabled) { assert(save_area == &disp->trap_save_area); handler = disp->d.dispatcher_pagefault_disabled; dcb_current->faults_taken++; } else { assert(save_area == &disp->enabled_save_area); handler = disp->d.dispatcher_pagefault; } if (dcb_current->faults_taken > 2) { printk(LOG_WARN, "handle_user_page_fault: too many faults, " "making domain unrunnable\n"); dcb_current->faults_taken = 0; // just in case it gets restarted scheduler_remove(dcb_current); dispatch(schedule()); } else { // Upcall to dispatcher struct dispatcher_shared_generic *disp_gen = get_dispatcher_shared_generic(dcb_current->disp); union registers_arm resume_area; //make sure we do not accidentaly create an IT block when upcalling resume_area.named.cpsr = 0; resume_area.named.pc = handler; resume_area.named.r0 = disp_gen->udisp; resume_area.named.r1 = fault_address; resume_area.named.r2 = 0; resume_area.named.r3 = saved_pc; resume_area.named.rtls = disp_gen->udisp; resume_area.named.r10 = disp->got_base; //we need some temporary stack to exit handler mode. memory in userspace would be //better, but for the moment we can use this temporary region resume_area.named.stack = (uint32_t) &irq_save_pushed_area_top; // Upcall user to save area disp->d.disabled = true; resume(&resume_area); } }
spawn_init(const char* name, int32_t kernel_id, const uint8_t* initrd_base, size_t initrd_bytes) { assert(0 == kernel_id); // Create page table for init init_l1 = (uintptr_t*)alloc_mem_aligned(INIT_L1_BYTES, ARM_L1_ALIGN); memset(init_l1, 0, INIT_L1_BYTES); init_l2 = (uintptr_t*)alloc_mem_aligned(INIT_L2_BYTES, ARM_L2_ALIGN); memset(init_l2, 0, INIT_L2_BYTES); STARTUP_PROGRESS(); /* Allocate bootinfo */ lpaddr_t bootinfo_phys = alloc_phys(BOOTINFO_SIZE); memset((void *)local_phys_to_mem(bootinfo_phys), 0, BOOTINFO_SIZE); STARTUP_PROGRESS(); /* Construct cmdline args */ char bootinfochar[16]; snprintf(bootinfochar, sizeof(bootinfochar), "%u", INIT_BOOTINFO_VBASE); const char *argv[] = { "init", bootinfochar }; lvaddr_t paramaddr; struct dcb *init_dcb = spawn_module(&spawn_state, name, ARRAY_LENGTH(argv), argv, bootinfo_phys, INIT_ARGS_VBASE, alloc_phys, ¶maddr); STARTUP_PROGRESS(); /* * Create a capability that allows user-level applications to * access device memory. This capability will be passed to Kaluga, * split up into smaller pieces and distributed to among device * drivers. * * For armv5, this is currently a dummy capability. We do not * have support for user-level device drivers in gem5 yet, so we * do not allocate any memory as device memory. Some cap_copy * operations in the bootup code fail if this capability is not * present. */ struct cte *iocap = caps_locate_slot(CNODE(spawn_state.taskcn), TASKCN_SLOT_IO); errval_t err = caps_create_new(ObjType_IO, 0, 0, 0, my_core_id, iocap); assert(err_is_ok(err)); struct dispatcher_shared_generic *disp = get_dispatcher_shared_generic(init_dcb->disp); struct dispatcher_shared_arm *disp_arm = get_dispatcher_shared_arm(init_dcb->disp); assert(NULL != disp); STARTUP_PROGRESS(); /* Initialize dispatcher */ disp->udisp = INIT_DISPATCHER_VBASE; STARTUP_PROGRESS(); init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_l1); STARTUP_PROGRESS(); /* Page table setup */ /* Map pagetables into page CN */ int pagecn_pagemap = 0; /* * ARM has: * * L1 has 4096 entries (16KB). * L2 Coarse has 256 entries (256 * 4B = 1KB). * * CPU driver currently fakes having 1024 entries in L1 and * L2 with 1024 entries by treating a page as 4 consecutive * L2 tables and mapping this as a unit in L1. */ caps_create_new( ObjType_VNode_ARM_l1, mem_to_local_phys((lvaddr_t)init_l1), vnode_objbits(ObjType_VNode_ARM_l1), 0, my_core_id, caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++) ); STARTUP_PROGRESS(); // Map L2 into successive slots in pagecn size_t i; for (i = 0; i < INIT_L2_BYTES / BASE_PAGE_SIZE; i++) { size_t objbits_vnode = vnode_objbits(ObjType_VNode_ARM_l2); assert(objbits_vnode == BASE_PAGE_BITS); caps_create_new( ObjType_VNode_ARM_l2, mem_to_local_phys((lvaddr_t)init_l2) + (i << objbits_vnode), objbits_vnode, 0, my_core_id, caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++) ); } /* * Initialize init page tables - this just wires the L1 * entries through to the corresponding L2 entries. */ STATIC_ASSERT(0 == (INIT_VBASE % ARM_L1_SECTION_BYTES), ""); for (lvaddr_t vaddr = INIT_VBASE; vaddr < INIT_SPACE_LIMIT; vaddr += ARM_L1_SECTION_BYTES) { uintptr_t section = (vaddr - INIT_VBASE) / ARM_L1_SECTION_BYTES; uintptr_t l2_off = section * ARM_L2_TABLE_BYTES; lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l2) + l2_off; paging_map_user_pages_l1((lvaddr_t)init_l1, vaddr, paddr); } paging_make_good((lvaddr_t)init_l1, INIT_L1_BYTES); STARTUP_PROGRESS(); printf("XXX: Debug print to make Bram's code work\n"); paging_context_switch(mem_to_local_phys((lvaddr_t)init_l1)); STARTUP_PROGRESS(); // Map cmdline arguments in VSpace at ARGS_BASE STATIC_ASSERT(0 == (ARGS_SIZE % BASE_PAGE_SIZE), ""); STARTUP_PROGRESS(); spawn_init_map(init_l2, INIT_VBASE, INIT_ARGS_VBASE, spawn_state.args_page, ARGS_SIZE, INIT_PERM_RW); STARTUP_PROGRESS(); // Map bootinfo spawn_init_map(init_l2, INIT_VBASE, INIT_BOOTINFO_VBASE, bootinfo_phys, BOOTINFO_SIZE, INIT_PERM_RW); struct startup_l2_info l2_info = { init_l2, INIT_VBASE }; genvaddr_t init_ep, got_base; load_init_image(&l2_info, initrd_base, initrd_bytes, &init_ep, &got_base); // Set startup arguments (argc, argv) disp_arm->enabled_save_area.named.r0 = paramaddr; disp_arm->enabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK; disp_arm->enabled_save_area.named.rtls = INIT_DISPATCHER_VBASE; disp_arm->enabled_save_area.named.r10 = got_base; disp_arm->got_base = got_base; struct bootinfo* bootinfo = (struct bootinfo*)INIT_BOOTINFO_VBASE; bootinfo->regions_length = 0; STARTUP_PROGRESS(); create_modules_from_initrd(bootinfo, initrd_base, initrd_bytes); debug(SUBSYS_STARTUP, "used %"PRIuCSLOT" slots in modulecn\n", spawn_state.modulecn_slot); STARTUP_PROGRESS(); create_phys_caps(&spawn_state.physaddrcn->cap, bootinfo); STARTUP_PROGRESS(); bootinfo->mem_spawn_core = ~0; // Size of kernel if bringing up others // Map dispatcher spawn_init_map(init_l2, INIT_VBASE, INIT_DISPATCHER_VBASE, mem_to_local_phys(init_dcb->disp), DISPATCHER_SIZE, INIT_PERM_RW); STARTUP_PROGRESS(); // NB libbarrelfish initialization sets up the stack. disp_arm->disabled_save_area.named.pc = init_ep; disp_arm->disabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK; disp_arm->disabled_save_area.named.rtls = INIT_DISPATCHER_VBASE; disp_arm->disabled_save_area.named.r10 = got_base; #ifdef __XSCALE__ cp15_disable_cache(); #endif printf("Kernel ready.\n"); pit_start(); // On to userland... STARTUP_PROGRESS(); dispatch(init_dcb); panic("Not reached."); }
void arm_kernel_startup(void) { printf("arm_kernel_startup entered \n"); /* Initialize the core_data */ /* Used when bringing up other cores, must be at consistent global address * seen by all cores */ struct arm_core_data *core_data = (void *)((lvaddr_t)&kernel_first_byte - BASE_PAGE_SIZE); struct dcb *init_dcb; if(hal_cpu_is_bsp()) { printf("Doing BSP related bootup \n"); /* Initialize the location to allocate phys memory from */ bsp_init_alloc_addr = glbl_core_data->start_free_ram; // Bring up init init_dcb = spawn_bsp_init(BSP_INIT_MODULE_NAME, bsp_alloc_phys); // Not available on PandaBoard? pit_start(0); } else { printf("Doing non-BSP related bootup \n"); my_core_id = core_data->dst_core_id; /* Initialize the allocator */ app_alloc_phys_start = core_data->memory_base_start; app_alloc_phys_end = ((lpaddr_t)1 << core_data->memory_bits) + app_alloc_phys_start; init_dcb = spawn_app_init(core_data, APP_INIT_MODULE_NAME, app_alloc_phys); uint32_t irq = gic_get_active_irq(); gic_ack_irq(irq); } /* printf("Trying to enable interrupts\n"); */ /* __asm volatile ("CPSIE aif"); // Enable interrups */ /* printf("Done enabling interrupts\n"); */ /* printf("HOLD BOOTUP - SPINNING\n"); */ /* while (1); */ /* printf("THIS SHOULD NOT HAPPEN\n"); */ // enable interrupt forwarding to cpu // FIXME: PS: enable this as it is needed for multicore setup. // gic_cpu_interface_enable(); // Should not return printf("Calling dispatch from arm_kernel_startup, start address is=%"PRIxLVADDR"\n", get_dispatcher_shared_arm(init_dcb->disp)->enabled_save_area.named.r0); dispatch(init_dcb); panic("Error spawning init!"); }