struct dcb *spawn_bsp_init(const char *name) { MSG("spawning '%s' on BSP core\n", name); /* Only the first core can run this code */ assert(cpu_is_bsp()); /* Allocate bootinfo */ lpaddr_t bootinfo_phys = bsp_alloc_phys_aligned(BOOTINFO_SIZE, BASE_PAGE_SIZE); memset((void *)local_phys_to_mem(bootinfo_phys), 0, BOOTINFO_SIZE); /* store pointer to bootinfo in kernel virtual memory */ bootinfo = (struct bootinfo *) local_phys_to_mem(bootinfo_phys); /* Construct cmdline args */ char bootinfochar[16]; snprintf(bootinfochar, sizeof(bootinfochar), "%u", INIT_BOOTINFO_VBASE); const char *argv[] = { "init", bootinfochar }; int argc = 2; /* perform common spawning of init domain */ struct dcb *init_dcb = spawn_init_common(name, argc, argv,bootinfo_phys, bsp_alloc_phys, bsp_alloc_phys_aligned); /* map boot info into init's VSPACE */ spawn_init_map(init_l3, ARMV8_INIT_VBASE, INIT_BOOTINFO_VBASE, bootinfo_phys, BOOTINFO_SIZE, INIT_PERM_RW); /* load the image */ genvaddr_t init_ep, got_base; struct startup_l3_info l3_info = { init_l3, ARMV8_INIT_VBASE }; load_init_image(&l3_info, BSP_INIT_MODULE_NAME, &init_ep, &got_base); MSG("init loaded with entry=0x%" PRIxGENVADDR " and GOT=0x%" PRIxGENVADDR "\n", init_ep, got_base); struct dispatcher_shared_aarch64 *disp_aarch64 = get_dispatcher_shared_aarch64(init_dcb->disp); /* setting GOT pointers */ disp_aarch64->got_base = got_base; /* XXX - Why does the kernel do this? -DC */ disp_aarch64->enabled_save_area.named.x10 = got_base; disp_aarch64->disabled_save_area.named.x10 = got_base; /* setting entry points */ disp_aarch64->disabled_save_area.named.pc = init_ep; disp_aarch64->disabled_save_area.named.spsr = AARCH64_MODE_USR | CPSR_F_MASK; /* Create caps for init to use */ create_module_caps(&spawn_state); lpaddr_t init_alloc_end = bsp_alloc_phys(0); create_phys_caps(armv8_glbl_core_data->start_kernel_ram, init_alloc_end); /* Fill bootinfo struct */ bootinfo->mem_spawn_core = KERNEL_IMAGE_SIZE; // Size of kernel return init_dcb; }
static struct dcb *spawn_init_common(const char *name, int argc, const char *argv[], lpaddr_t bootinfo_phys, alloc_phys_func alloc_phys, alloc_phys_aligned_func alloc_phys_aligned) { struct dispatcher_shared_generic *disp; struct dispatcher_shared_aarch64 *disp_aarch64; MSG("spawn_init_common %s\n", name); lvaddr_t paramaddr; struct dcb *init_dcb = spawn_module(&spawn_state, name, argc, argv, bootinfo_phys, INIT_ARGS_VBASE, alloc_phys, alloc_phys_aligned, ¶maddr); /* initialize page tables */ init_page_tables(); init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_l0); spawn_init_map(init_l3, ARMV8_INIT_VBASE, INIT_ARGS_VBASE, spawn_state.args_page, ARGS_SIZE, INIT_PERM_RW); /* Map dispatcher */ spawn_init_map(init_l3, ARMV8_INIT_VBASE, INIT_DISPATCHER_VBASE, mem_to_local_phys(init_dcb->disp), DISPATCHER_SIZE, INIT_PERM_RW); disp = get_dispatcher_shared_generic(init_dcb->disp); disp_aarch64 = get_dispatcher_shared_aarch64(init_dcb->disp); /* Initialize dispatcher */ disp->disabled = true; strncpy(disp->name, argv[0], DISP_NAME_LEN); /* Tell init the vspace addr of its dispatcher. */ disp->udisp = INIT_DISPATCHER_VBASE; /* TODO: write the contet ID for init */ /* Set the thread ID register to point to the shared structure. */ disp_aarch64->enabled_save_area.named.x0 = paramaddr; disp_aarch64->enabled_save_area.named.spsr = AARCH64_MODE_USR | CPSR_I_MASK; sysreg_write_tpidrro_el0((uint64_t)disp->udisp); dump_dispatcher(disp); return init_dcb; }
void handle_user_undef(lvaddr_t fault_address, arch_registers_state_t* save_area) { union registers_aarch64 resume_area; struct dispatcher_shared_aarch64 *disp = get_dispatcher_shared_aarch64(dcb_current->disp); bool disabled = dispatcher_is_disabled_ip(dcb_current->disp, save_area->named.pc); disp->d.disabled = disabled; assert(dcb_current->disp_cte.cap.type == ObjType_Frame); if (disabled) { // assert(save_area == &disp->trap_save_area); } else { assert(save_area == &disp->enabled_save_area); } printk(LOG_WARN, "user undef fault%s in '%.*s': IP %" PRIuPTR "\n", disabled ? " WHILE DISABLED" : "", DISP_NAME_LEN, disp->d.name, fault_address); struct dispatcher_shared_generic *disp_gen = get_dispatcher_shared_generic(dcb_current->disp); resume_area.named.x0 = disp_gen->udisp; resume_area.named.x1 = AARCH64_EVECTOR_UNDEF; resume_area.named.x2 = 0; resume_area.named.x3 = fault_address; /* Why does the kernel do this? */ resume_area.named.x10 = disp->got_base; resume_area.named.pc = disp->d.dispatcher_trap; resume_area.named.spsr = CPSR_F_MASK | AARCH64_MODE_USR; // Upcall user to save area disp->d.disabled = true; resume(&resume_area); }
void arm_kernel_startup(void *pointer) { /* Initialize the core_data */ /* Used when bringing up other cores, must be at consistent global address * seen by all cores */ // Initialize system timers timers_init(config_timeslice); struct dcb *init_dcb; if (cpu_is_bsp()) { MSG("Doing BSP related bootup \n"); /* Initialize the location to allocate phys memory from */ printf("start_free_ram = 0x%lx\n", armv8_glbl_core_data->start_free_ram); bsp_init_alloc_addr = armv8_glbl_core_data->start_free_ram; /* allocate initial KCB */ kcb_current= (struct kcb *)local_phys_to_mem( bsp_alloc_phys(sizeof(*kcb_current))); assert(kcb_current); memset(kcb_current, 0, sizeof(*kcb_current)); init_dcb = spawn_bsp_init(BSP_INIT_MODULE_NAME); platform_gic_init(); // pit_start(0); } else { MSG("Doing non-BSP related bootup \n"); struct armv8_core_data *core_data = (struct armv8_core_data *)pointer; my_core_id = core_data->dst_core_id; /* Initialize the allocator */ app_alloc_phys_start = (core_data->memory.base); app_alloc_phys_end = (core_data->memory.length + app_alloc_phys_start); MSG("Memory: %lx, %lx, size=%zu kB\n", app_alloc_phys_start, app_alloc_phys_end, (app_alloc_phys_end - app_alloc_phys_start + 1) >> 10); kcb_current= (struct kcb *)local_phys_to_mem(core_data->kcb); init_dcb = spawn_app_init(core_data, APP_INIT_MODULE_NAME); // uint32_t irq = gic_get_active_irq(); // gic_ack_irq(irq); } // enable interrupt forwarding to cpu platform_gic_cpu_interface_enable(); MSG("Calling dispatch from arm_kernel_startup, entry point %#"PRIxLVADDR"\n", get_dispatcher_shared_aarch64(init_dcb->disp)->disabled_save_area.named.pc); // Should not return dispatch(init_dcb); panic("Error spawning init!"); }
struct dcb *spawn_app_init(struct armv8_core_data *core_data, const char *name) { errval_t err; MSG("spawning '%s' on APP core\n", name); /* Only the app core can run this code */ assert(!cpu_is_bsp()); /* Construct cmdline args */ // Core id of the core that booted this core char coreidchar[10]; snprintf(coreidchar, sizeof(coreidchar), "%d", core_data->src_core_id); // IPI channel id of core that booted this core char chanidchar[30]; snprintf(chanidchar, sizeof(chanidchar), "chanid=%"PRIu32, core_data->chan_id); // Arch id of the core that booted this core char archidchar[30]; snprintf(archidchar, sizeof(archidchar), "archid=%d", core_data->src_arch_id); const char *argv[5] = { name, coreidchar, chanidchar, archidchar }; int argc = 4; struct dcb *init_dcb= spawn_init_common(name, argc, argv, 0, app_alloc_phys, app_alloc_phys_aligned); MSG("creating monitor URPC frame cap\n"); // Urpc frame cap struct cte *urpc_frame_cte = caps_locate_slot(CNODE(spawn_state.taskcn), TASKCN_SLOT_MON_URPC); // XXX: Create as devframe so the memory is not zeroed out err = caps_create_new(ObjType_DevFrame, core_data->urpc_frame.base, core_data->urpc_frame.length, core_data->urpc_frame.length, my_core_id, urpc_frame_cte); assert(err_is_ok(err)); urpc_frame_cte->cap.type = ObjType_Frame; lpaddr_t urpc_ptr = gen_phys_to_local_phys(urpc_frame_cte->cap.u.frame.base); /* Map urpc frame at MON_URPC_BASE */ MSG("mapping URPC frame cap %" PRIxLPADDR" \n",urpc_ptr ); spawn_init_map(init_l3, ARMV8_INIT_VBASE, MON_URPC_VBASE, urpc_ptr, MON_URPC_SIZE, INIT_PERM_RW); struct startup_l3_info l3_info = { init_l3, ARMV8_INIT_VBASE }; // elf load the domain genvaddr_t entry_point, got_base=0; MSG("loading elf '%s' @ %" PRIxLPADDR "\n", name, local_phys_to_mem(core_data->monitor_binary.base)); err = elf_load(EM_AARCH64, startup_alloc_init, &l3_info, local_phys_to_mem(core_data->monitor_binary.base), core_data->monitor_binary.length, &entry_point); if (err_is_fail(err)) { //err_print_calltrace(err); panic("ELF load of init module failed!"); } // TODO: Fix application linkage so that it's non-PIC. struct Elf64_Shdr* got_shdr; got_shdr = elf64_find_section_header_name(local_phys_to_mem(core_data->monitor_binary.base), core_data->monitor_binary.length, ".got"); if (got_shdr) { got_base = got_shdr->sh_addr; } MSG("init loaded with entry=0x%" PRIxGENVADDR " and GOT=0x%" PRIxGENVADDR "\n", entry_point, got_base); struct dispatcher_shared_aarch64 *disp_aarch64 = get_dispatcher_shared_aarch64(init_dcb->disp); disp_aarch64->got_base = got_base; disp_aarch64->enabled_save_area.named.x10 = got_base; disp_aarch64->disabled_save_area.named.x10 = got_base; /* setting entry points */ disp_aarch64->disabled_save_area.named.pc = entry_point; disp_aarch64->disabled_save_area.named.spsr = AARCH64_MODE_USR | CPSR_F_MASK; //arch_set_thread_register(INIT_DISPATCHER_VBASE); MSG("init dcb set up\n"); return init_dcb; }
void handle_user_page_fault(lvaddr_t fault_address, arch_registers_state_t* save_area) { lvaddr_t handler; struct dispatcher_shared_aarch64 *disp = get_dispatcher_shared_aarch64(dcb_current->disp); uintptr_t saved_pc = save_area->named.pc; disp->d.disabled = dispatcher_is_disabled_ip(dcb_current->disp, saved_pc); bool disabled = (disp->d.disabled != 0); assert(dcb_current->disp_cte.cap.type == ObjType_Frame); printk(LOG_WARN, "user page fault%s in '%.*s': addr %"PRIxLVADDR " IP %"PRIxPTR"\n", disabled ? " WHILE DISABLED" : "", DISP_NAME_LEN, disp->d.name, fault_address, saved_pc); if (disabled) { assert(save_area == &disp->trap_save_area); handler = disp->d.dispatcher_pagefault_disabled; dcb_current->faults_taken++; } else { assert(save_area == &disp->enabled_save_area); handler = disp->d.dispatcher_pagefault; } if (dcb_current->faults_taken > 2) { printk(LOG_WARN, "handle_user_page_fault: too many faults, " "making domain unrunnable\n"); dcb_current->faults_taken = 0; // just in case it gets restarted scheduler_remove(dcb_current); dispatch(schedule()); } else { // // Upcall to dispatcher // // NB System might be cleaner with a prototype // dispatch context that has R0-R3 to be overwritten // plus initial stack, thread, and gic registers. Could do // a faster resume_for_upcall(). // struct dispatcher_shared_generic *disp_gen = get_dispatcher_shared_generic(dcb_current->disp); /* XXX - This code leaks the contents of the kernel stack to the * user-level fault handler. */ union registers_aarch64 resume_area; resume_area.named.x0 = disp_gen->udisp; resume_area.named.x1 = fault_address; resume_area.named.x2 = 0; resume_area.named.x3 = saved_pc; /* Why does the kernel do this? */ resume_area.named.x10 = disp->got_base; resume_area.named.pc = handler; resume_area.named.spsr = CPSR_F_MASK | AARCH64_MODE_USR; // SP is set by handler routine. // Upcall user to save area disp->d.disabled = true; printk(LOG_WARN, "page fault at %p calling handler %p\n", fault_address, handler); resume(&resume_area); } }