BOOT_CODE bool_t init_sys_state( cpu_id_t cpu_id, mem_p_regs_t mem_p_regs, dev_p_regs_t* dev_p_regs, ui_info_t ui_info, p_region_t boot_mem_reuse_p_reg, /* parameters below not modeled in abstract specification */ uint32_t num_drhu, paddr_t* drhu_list, acpi_rmrr_list_t *rmrr_list ) { cap_t root_cnode_cap; vptr_t bi_frame_vptr; vptr_t ipcbuf_vptr; cap_t it_vspace_cap; cap_t it_ap_cap; cap_t ipcbuf_cap; pptr_t bi_frame_pptr; create_frames_of_region_ret_t create_frames_ret; #ifdef CONFIG_ENABLE_BENCHMARKS vm_attributes_t buffer_attr = {{ 0 }}; word_t paddr; pde_t pde; #endif /* CONFIG_ENABLE_BENCHMARKS */ /* convert from physical addresses to kernel pptrs */ region_t ui_reg = paddr_to_pptr_reg(ui_info.p_reg); region_t boot_mem_reuse_reg = paddr_to_pptr_reg(boot_mem_reuse_p_reg); /* convert from physical addresses to userland vptrs */ v_region_t ui_v_reg; v_region_t it_v_reg; ui_v_reg.start = ui_info.p_reg.start - ui_info.pv_offset; ui_v_reg.end = ui_info.p_reg.end - ui_info.pv_offset; ipcbuf_vptr = ui_v_reg.end; bi_frame_vptr = ipcbuf_vptr + BIT(PAGE_BITS); /* The region of the initial thread is the user image + ipcbuf and boot info */ it_v_reg.start = ui_v_reg.start; it_v_reg.end = bi_frame_vptr + BIT(PAGE_BITS); init_freemem(ui_info.p_reg, mem_p_regs); /* initialise virtual-memory-related data structures (not in abstract spec) */ if (!init_vm_state()) { return false; } #ifdef CONFIG_ENABLE_BENCHMARKS /* allocate and create the log buffer */ buffer_attr.words[0] = IA32_PAT_MT_WRITE_THROUGH; paddr = pptr_to_paddr((void *) alloc_region(pageBitsForSize(X86_LargePage))); /* allocate a large frame for logging */ pde = x86_make_pde_mapping(paddr, buffer_attr); ia32KSGlobalPD[IA32_KSLOG_IDX] = pde; /* flush the tlb */ invalidateTranslationAll(); /* if we crash here, the log isn't working */ #ifdef CONFIG_DEBUG_BUILD #if CONFIG_MAX_NUM_TRACE_POINTS > 0 printf("Testing log\n"); ksLog[0].data = 0xdeadbeef; printf("Wrote to ksLog %x\n", ksLog[0].data); assert(ksLog[0].data == 0xdeadbeef); #endif /* CONFIG_MAX_NUM_TRACE_POINTS */ #endif /* CONFIG_DEBUG_BUILD */ #endif /* CONFIG_ENABLE_BENCHMARKS */ /* create the root cnode */ root_cnode_cap = create_root_cnode(); /* create the IO port cap */ write_slot( SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapIOPort), cap_io_port_cap_new( 0, /* first port */ NUM_IO_PORTS - 1 /* last port */ ) ); /* create the cap for managing thread domains */ create_domain_cap(root_cnode_cap); /* create the IRQ CNode */ if (!create_irq_cnode()) { return false; } /* initialise the IRQ states and provide the IRQ control cap */ init_irqs(root_cnode_cap); /* create the bootinfo frame */ bi_frame_pptr = allocate_bi_frame(0, 1, ipcbuf_vptr); if (!bi_frame_pptr) { return false; } /* Construct an initial address space with enough virtual addresses * to cover the user image + ipc buffer and bootinfo frames */ it_vspace_cap = create_it_address_space(root_cnode_cap, it_v_reg); if (cap_get_capType(it_vspace_cap) == cap_null_cap) { return false; } /* Create and map bootinfo frame cap */ create_bi_frame_cap( root_cnode_cap, it_vspace_cap, bi_frame_pptr, bi_frame_vptr ); /* create the initial thread's IPC buffer */ ipcbuf_cap = create_ipcbuf_frame(root_cnode_cap, it_vspace_cap, ipcbuf_vptr); if (cap_get_capType(ipcbuf_cap) == cap_null_cap) { return false; } /* create all userland image frames */ create_frames_ret = create_frames_of_region( root_cnode_cap, it_vspace_cap, ui_reg, true, ui_info.pv_offset ); if (!create_frames_ret.success) { return false; } ndks_boot.bi_frame->userImageFrames = create_frames_ret.region; /* create the initial thread's ASID pool */ it_ap_cap = create_it_asid_pool(root_cnode_cap); if (cap_get_capType(it_ap_cap) == cap_null_cap) { return false; } write_it_asid_pool(it_ap_cap, it_vspace_cap); /* * Initialise the NULL FPU state. This is different from merely zero'ing it * out (i.e., the NULL FPU state is non-zero), and must be performed before * the first thread is created. */ resetFpu(); saveFpuState(&x86KSnullFpuState); x86KSfpuOwner = NULL; /* create the idle thread */ if (!create_idle_thread()) { return false; } /* create the initial thread */ if (!create_initial_thread( root_cnode_cap, it_vspace_cap, ui_info.v_entry, bi_frame_vptr, ipcbuf_vptr, ipcbuf_cap )) { return false; } if (config_set(CONFIG_IOMMU)) { /* initialise VTD-related data structures and the IOMMUs */ if (!vtd_init(cpu_id, num_drhu, rmrr_list)) { return false; } /* write number of IOMMU PT levels into bootinfo */ ndks_boot.bi_frame->numIOPTLevels = x86KSnumIOPTLevels; /* write IOSpace master cap */ write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapIOSpace), master_iospace_cap()); } else { ndks_boot.bi_frame->numIOPTLevels = -1; } /* convert the remaining free memory into UT objects and provide the caps */ if (!create_untypeds(root_cnode_cap, boot_mem_reuse_reg)) { return false; } /* WARNING: alloc_region() must not be called anymore after here! */ /* create device frames */ if (!create_device_frames(root_cnode_cap, dev_p_regs)) { return false; } /* finalise the bootinfo frame */ bi_finalise(); return true; }
BOOT_CODE bool_t init_sys_state( cpu_id_t cpu_id, mem_p_regs_t mem_p_regs, ui_info_t ui_info, p_region_t boot_mem_reuse_p_reg, /* parameters below not modeled in abstract specification */ uint32_t num_drhu, paddr_t* drhu_list, acpi_rmrr_list_t *rmrr_list, seL4_X86_BootInfo_VBE *vbe ) { cap_t root_cnode_cap; vptr_t extra_bi_frame_vptr; vptr_t bi_frame_vptr; vptr_t ipcbuf_vptr; cap_t it_vspace_cap; cap_t it_ap_cap; cap_t ipcbuf_cap; pptr_t bi_frame_pptr; word_t extra_bi_size = sizeof(seL4_BootInfoHeader); region_t extra_bi_region; pptr_t extra_bi_offset = 0; create_frames_of_region_ret_t create_frames_ret; create_frames_of_region_ret_t extra_bi_ret; /* convert from physical addresses to kernel pptrs */ region_t ui_reg = paddr_to_pptr_reg(ui_info.p_reg); region_t boot_mem_reuse_reg = paddr_to_pptr_reg(boot_mem_reuse_p_reg); /* convert from physical addresses to userland vptrs */ v_region_t ui_v_reg; v_region_t it_v_reg; ui_v_reg.start = ui_info.p_reg.start - ui_info.pv_offset; ui_v_reg.end = ui_info.p_reg.end - ui_info.pv_offset; ipcbuf_vptr = ui_v_reg.end; bi_frame_vptr = ipcbuf_vptr + BIT(PAGE_BITS); extra_bi_frame_vptr = bi_frame_vptr + BIT(PAGE_BITS); if (vbe->vbeMode != -1) { extra_bi_size += sizeof(seL4_X86_BootInfo_VBE); } /* The region of the initial thread is the user image + ipcbuf and boot info */ it_v_reg.start = ui_v_reg.start; it_v_reg.end = ROUND_UP(extra_bi_frame_vptr + extra_bi_size, PAGE_BITS); init_freemem(ui_info.p_reg, mem_p_regs); /* create the root cnode */ root_cnode_cap = create_root_cnode(); /* create the IO port cap */ write_slot( SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapIOPort), cap_io_port_cap_new( 0, /* first port */ NUM_IO_PORTS - 1, /* last port */ VPID_INVALID ) ); /* create the cap for managing thread domains */ create_domain_cap(root_cnode_cap); /* create the IRQ CNode */ if (!create_irq_cnode()) { return false; } /* initialise the IRQ states and provide the IRQ control cap */ init_irqs(root_cnode_cap); /* create the bootinfo frame */ bi_frame_pptr = allocate_bi_frame(0, ksNumCPUs, ipcbuf_vptr); if (!bi_frame_pptr) { return false; } extra_bi_region = allocate_extra_bi_region(extra_bi_size); if (extra_bi_region.start == 0) { return false; } /* populate vbe info block */ if (vbe->vbeMode != -1) { vbe->header.id = SEL4_BOOTINFO_HEADER_X86_VBE; vbe->header.len = sizeof(seL4_X86_BootInfo_VBE); memcpy((void*)(extra_bi_region.start + extra_bi_offset), vbe, sizeof(seL4_X86_BootInfo_VBE)); extra_bi_offset += sizeof(seL4_X86_BootInfo_VBE); } /* provde a chunk for any leftover padding in the extended boot info */ seL4_BootInfoHeader padding_header; padding_header.id = SEL4_BOOTINFO_HEADER_PADDING; padding_header.len = (extra_bi_region.end - extra_bi_region.start) - extra_bi_offset; memcpy((void*)(extra_bi_region.start + extra_bi_offset), &padding_header, sizeof(seL4_BootInfoHeader)); /* Construct an initial address space with enough virtual addresses * to cover the user image + ipc buffer and bootinfo frames */ it_vspace_cap = create_it_address_space(root_cnode_cap, it_v_reg); if (cap_get_capType(it_vspace_cap) == cap_null_cap) { return false; } /* Create and map bootinfo frame cap */ create_bi_frame_cap( root_cnode_cap, it_vspace_cap, bi_frame_pptr, bi_frame_vptr ); /* create and map extra bootinfo region */ extra_bi_ret = create_frames_of_region( root_cnode_cap, it_vspace_cap, extra_bi_region, true, pptr_to_paddr((void*)(extra_bi_region.start - extra_bi_frame_vptr)) ); if (!extra_bi_ret.success) { return false; } ndks_boot.bi_frame->extraBIPages = extra_bi_ret.region; /* create the initial thread's IPC buffer */ ipcbuf_cap = create_ipcbuf_frame(root_cnode_cap, it_vspace_cap, ipcbuf_vptr); if (cap_get_capType(ipcbuf_cap) == cap_null_cap) { return false; } /* create all userland image frames */ create_frames_ret = create_frames_of_region( root_cnode_cap, it_vspace_cap, ui_reg, true, ui_info.pv_offset ); if (!create_frames_ret.success) { return false; } ndks_boot.bi_frame->userImageFrames = create_frames_ret.region; /* create the initial thread's ASID pool */ it_ap_cap = create_it_asid_pool(root_cnode_cap); if (cap_get_capType(it_ap_cap) == cap_null_cap) { return false; } write_it_asid_pool(it_ap_cap, it_vspace_cap); ndks_boot.bi_frame->archInfo = tsc_init(); /* create the idle thread */ if (!create_idle_thread()) { return false; } /* create the initial thread */ if (!create_initial_thread( root_cnode_cap, it_vspace_cap, ui_info.v_entry, bi_frame_vptr, ipcbuf_vptr, ipcbuf_cap )) { return false; } if (config_set(CONFIG_IOMMU)) { /* initialise VTD-related data structures and the IOMMUs */ if (!vtd_init(cpu_id, num_drhu, rmrr_list)) { return false; } /* write number of IOMMU PT levels into bootinfo */ ndks_boot.bi_frame->numIOPTLevels = x86KSnumIOPTLevels; /* write IOSpace master cap */ write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapIOSpace), master_iospace_cap()); } else { ndks_boot.bi_frame->numIOPTLevels = -1; } /* create all of the untypeds. Both devices and kernel window memory */ if (!create_untypeds(root_cnode_cap, boot_mem_reuse_reg)) { return false; } /* WARNING: alloc_region() must not be called anymore after here! */ /* finalise the bootinfo frame */ bi_finalise(); return true; }
int main(int argc, char *argv[], char *envp[]) { #ifdef LISP_FEATURE_WIN32 /* Exception handling support structure. Evil Win32 hack. */ struct lisp_exception_frame exception_frame; #endif /* the name of the core file we're to execute. Note that this is * a malloc'ed string which should be freed eventually. */ char *core = 0; char **sbcl_argv = 0; os_vm_offset_t embedded_core_offset = 0; char *runtime_path = 0; /* other command line options */ boolean noinform = 0; boolean end_runtime_options = 0; boolean disable_lossage_handler_p = 0; lispobj initial_function; const char *sbcl_home = getenv("SBCL_HOME"); interrupt_init(); block_blockable_signals(0, 0); setlocale(LC_ALL, ""); runtime_options = NULL; /* Check early to see if this executable has an embedded core, * which also populates runtime_options if the core has runtime * options */ runtime_path = os_get_runtime_executable_path(); if (runtime_path) { os_vm_offset_t offset = search_for_embedded_core(runtime_path); if (offset != -1) { embedded_core_offset = offset; core = runtime_path; } else { free(runtime_path); } } /* Parse our part of the command line (aka "runtime options"), * stripping out those options that we handle. */ if (runtime_options != NULL) { dynamic_space_size = runtime_options->dynamic_space_size; thread_control_stack_size = runtime_options->thread_control_stack_size; sbcl_argv = argv; } else { int argi = 1; runtime_options = successful_malloc(sizeof(struct runtime_options)); while (argi < argc) { char *arg = argv[argi]; if (0 == strcmp(arg, "--script")) { /* This is both a runtime and a toplevel option. As a * runtime option, it is equivalent to --noinform. * This exits, and does not increment argi, so that * TOPLEVEL-INIT sees the option. */ noinform = 1; end_runtime_options = 1; disable_lossage_handler_p = 1; lose_on_corruption_p = 1; break; } else if (0 == strcmp(arg, "--noinform")) { noinform = 1; ++argi; } else if (0 == strcmp(arg, "--core")) { if (core) { lose("more than one core file specified\n"); } else { ++argi; if (argi >= argc) { lose("missing filename for --core argument\n"); } core = copied_string(argv[argi]); ++argi; } } else if (0 == strcmp(arg, "--help")) { /* I think this is the (or a) usual convention: upon * seeing "--help" we immediately print our help * string and exit, ignoring everything else. */ print_help(); exit(0); } else if (0 == strcmp(arg, "--version")) { /* As in "--help" case, I think this is expected. */ print_version(); exit(0); } else if (0 == strcmp(arg, "--dynamic-space-size")) { ++argi; if (argi >= argc) lose("missing argument for --dynamic-space-size"); errno = 0; dynamic_space_size = strtol(argv[argi++], 0, 0) << 20; if (errno) lose("argument to --dynamic-space-size is not a number"); # ifdef MAX_DYNAMIC_SPACE_END if (!((DYNAMIC_SPACE_START < DYNAMIC_SPACE_START+dynamic_space_size) && (DYNAMIC_SPACE_START+dynamic_space_size <= MAX_DYNAMIC_SPACE_END))) lose("specified --dynamic-space-size too large"); # endif } else if (0 == strcmp(arg, "--control-stack-size")) { ++argi; if (argi >= argc) lose("missing argument for --control-stack-size"); errno = 0; thread_control_stack_size = strtol(argv[argi++], 0, 0) << 20; if (errno) lose("argument to --control-stack-size is not a number"); } else if (0 == strcmp(arg, "--debug-environment")) { int n = 0; printf("; Commandline arguments:\n"); while (n < argc) { printf("; %2d: \"%s\"\n", n, argv[n]); ++n; } n = 0; printf(";\n; Environment:\n"); while (ENVIRON[n]) { printf("; %2d: \"%s\"\n", n, ENVIRON[n]); ++n; } ++argi; } else if (0 == strcmp(arg, "--disable-ldb")) { disable_lossage_handler_p = 1; ++argi; } else if (0 == strcmp(arg, "--lose-on-corruption")) { lose_on_corruption_p = 1; ++argi; } else if (0 == strcmp(arg, "--end-runtime-options")) { end_runtime_options = 1; ++argi; break; } else { /* This option was unrecognized as a runtime option, * so it must be a toplevel option or a user option, * so we must be past the end of the runtime option * section. */ break; } } /* This is where we strip out those options that we handle. We * also take this opportunity to make sure that we don't find * an out-of-place "--end-runtime-options" option. */ { char *argi0 = argv[argi]; int argj = 1; /* (argc - argi) for the arguments, one for the binary, and one for the terminating NULL. */ sbcl_argv = successful_malloc((2 + argc - argi) * sizeof(char *)); sbcl_argv[0] = argv[0]; while (argi < argc) { char *arg = argv[argi++]; /* If we encounter --end-runtime-options for the first * time after the point where we had to give up on * runtime options, then the point where we had to * give up on runtime options must've been a user * error. */ if (!end_runtime_options && 0 == strcmp(arg, "--end-runtime-options")) { lose("bad runtime option \"%s\"\n", argi0); } sbcl_argv[argj++] = arg; } sbcl_argv[argj] = 0; } } /* Align down to multiple of page_table page size, and to the appropriate * stack alignment. */ dynamic_space_size &= ~(PAGE_BYTES-1); thread_control_stack_size &= ~(CONTROL_STACK_ALIGNMENT_BYTES-1); /* Preserve the runtime options for possible future core saving */ runtime_options->dynamic_space_size = dynamic_space_size; runtime_options->thread_control_stack_size = thread_control_stack_size; /* KLUDGE: os_vm_page_size is set by os_init(), and on some * systems (e.g. Alpha) arch_init() needs need os_vm_page_size, so * it must follow os_init(). -- WHN 2000-01-26 */ os_init(argv, envp); arch_init(); gc_init(); validate(); /* If no core file was specified, look for one. */ if (!core) { core = search_for_core(); } /* Make sure that SBCL_HOME is set and not the empty string, unless loading an embedded core. */ if (!(sbcl_home && *sbcl_home) && embedded_core_offset == 0) { char *envstring, *copied_core, *dir; char *stem = "SBCL_HOME="; copied_core = copied_string(core); dir = dirname(copied_core); envstring = (char *) calloc(strlen(stem) + strlen(dir) + 1, sizeof(char)); sprintf(envstring, "%s%s", stem, dir); putenv(envstring); free(copied_core); } if (!noinform && embedded_core_offset == 0) { print_banner(); fflush(stdout); } #if defined(SVR4) || defined(__linux__) tzset(); #endif define_var("nil", NIL, 1); define_var("t", T, 1); if (!disable_lossage_handler_p) enable_lossage_handler(); globals_init(); initial_function = load_core_file(core, embedded_core_offset); if (initial_function == NIL) { lose("couldn't find initial function\n"); } #ifdef LISP_FEATURE_HPUX /* -1 = CLOSURE_FUN_OFFSET, 23 = SIMPLE_FUN_CODE_OFFSET, we are * not in LANGUAGE_ASSEMBLY so we cant reach them. */ return_from_lisp_stub = (void *) ((char *)*((unsigned long *) ((char *)initial_function + -1)) + 23); #endif gc_initialize_pointers(); arch_install_interrupt_handlers(); #ifndef LISP_FEATURE_WIN32 os_install_interrupt_handlers(); #else /* wos_install_interrupt_handlers(handler); */ wos_install_interrupt_handlers(&exception_frame); #endif /* Pass core filename and the processed argv into Lisp. They'll * need to be processed further there, to do locale conversion. */ core_string = core; posix_argv = sbcl_argv; FSHOW((stderr, "/funcalling initial_function=0x%lx\n", (unsigned long)initial_function)); #ifdef LISP_FEATURE_WIN32 fprintf(stderr, "\n\ This is experimental prerelease support for the Windows platform: use\n\ at your own risk. \"Your Kitten of Death awaits!\"\n"); fflush(stdout); fflush(stderr); #endif create_initial_thread(initial_function); lose("CATS. CATS ARE NICE.\n"); return 0; }
static BOOT_CODE bool_t try_init_kernel( paddr_t ui_p_reg_start, paddr_t ui_p_reg_end, sword_t pv_offset, vptr_t v_entry, paddr_t dtb_addr_start, paddr_t dtb_addr_end ) { cap_t root_cnode_cap; cap_t it_ap_cap; cap_t it_pd_cap; cap_t ipcbuf_cap; region_t ui_reg = paddr_to_pptr_reg((p_region_t) { ui_p_reg_start, ui_p_reg_end }); region_t dtb_reg; word_t extra_bi_size = sizeof(seL4_BootInfoHeader) + (dtb_addr_end - dtb_addr_start); region_t extra_bi_region; pptr_t extra_bi_offset = 0; vptr_t extra_bi_frame_vptr; pptr_t bi_frame_pptr; vptr_t bi_frame_vptr; vptr_t ipcbuf_vptr; create_frames_of_region_ret_t create_frames_ret; create_frames_of_region_ret_t extra_bi_ret; /* convert from physical addresses to userland vptrs */ v_region_t ui_v_reg; v_region_t it_v_reg; ui_v_reg.start = ui_p_reg_start - pv_offset; ui_v_reg.end = ui_p_reg_end - pv_offset; ipcbuf_vptr = ui_v_reg.end; bi_frame_vptr = ipcbuf_vptr + BIT(PAGE_BITS); extra_bi_frame_vptr = bi_frame_vptr + BIT(PAGE_BITS); /* If no DTB was provided, skip allocating extra bootinfo */ if (dtb_addr_start == 0) { extra_bi_size = 0; dtb_reg = (region_t) { 0, 0 }; } else { dtb_reg = paddr_to_pptr_reg((p_region_t) { dtb_addr_start, ROUND_UP(dtb_addr_end, PAGE_BITS) }); } /* The region of the initial thread is the user image + ipcbuf and boot info */ it_v_reg.start = ui_v_reg.start; it_v_reg.end = extra_bi_frame_vptr; if (it_v_reg.end > kernelBase) { printf("Userland image virtual end address too high\n"); return false; } /* setup virtual memory for the kernel */ map_kernel_window(); /* initialise the CPU */ if (!init_cpu()) { return false; } /* debug output via serial port is only available from here */ printf("Bootstrapping kernel\n"); /* initialise the platform */ init_plat(); /* make the free memory available to alloc_region() */ arch_init_freemem(ui_reg, dtb_reg); /* create the root cnode */ root_cnode_cap = create_root_cnode(); if (cap_get_capType(root_cnode_cap) == cap_null_cap) { return false; } /* create the cap for managing thread domains */ create_domain_cap(root_cnode_cap); /* initialise the IRQ states and provide the IRQ control cap */ init_irqs(root_cnode_cap); /* create the bootinfo frame */ bi_frame_pptr = allocate_bi_frame(0, CONFIG_MAX_NUM_NODES, ipcbuf_vptr); if (!bi_frame_pptr) { return false; } /* create extra bootinfo region - will return an empty allocation if extra_bi_size = 0 */ extra_bi_region = allocate_extra_bi_region(extra_bi_size); if (extra_bi_region.start == 0) { return false; } /* update initial thread virtual address range for extra bootinfo */ it_v_reg.end += extra_bi_region.end - extra_bi_region.start; if (it_v_reg.end > kernelBase) { printf("Userland extra bootinfo end address too high\n"); return false; } /* put DTB in the bootinfo block, if present. */ seL4_BootInfoHeader header; if (dtb_reg.start) { header.id = SEL4_BOOTINFO_HEADER_FDT; header.len = sizeof(header) + dtb_reg.end - dtb_reg.start; *(seL4_BootInfoHeader *)(extra_bi_region.start + extra_bi_offset) = header; extra_bi_offset += sizeof(header); memcpy((void *)(extra_bi_region.start + extra_bi_offset), (void *)dtb_reg.start, dtb_reg.end - dtb_reg.start); extra_bi_offset += dtb_reg.end - dtb_reg.start; } if ((extra_bi_region.end - extra_bi_region.start) - extra_bi_offset > 0) { /* provde a chunk for any leftover padding in the extended boot info */ header.id = SEL4_BOOTINFO_HEADER_PADDING; header.len = (extra_bi_region.end - extra_bi_region.start) - extra_bi_offset; *(seL4_BootInfoHeader *)(extra_bi_region.start + extra_bi_offset) = header; } if (config_set(CONFIG_ARM_SMMU)) { ndks_boot.bi_frame->ioSpaceCaps = create_iospace_caps(root_cnode_cap); if (ndks_boot.bi_frame->ioSpaceCaps.start == 0 && ndks_boot.bi_frame->ioSpaceCaps.end == 0) { return false; } } else { ndks_boot.bi_frame->ioSpaceCaps = S_REG_EMPTY; } /* Construct an initial address space with enough virtual addresses * to cover the user image + ipc buffer and bootinfo frames */ it_pd_cap = create_it_address_space(root_cnode_cap, it_v_reg); if (cap_get_capType(it_pd_cap) == cap_null_cap) { return false; } /* Create and map bootinfo frame cap */ create_bi_frame_cap( root_cnode_cap, it_pd_cap, bi_frame_pptr, bi_frame_vptr ); /* create and map extra bootinfo region */ if (extra_bi_size > 0) { extra_bi_ret = create_frames_of_region( root_cnode_cap, it_pd_cap, extra_bi_region, true, pptr_to_paddr((void *)extra_bi_region.start) - extra_bi_frame_vptr ); if (!extra_bi_ret.success) { return false; } ndks_boot.bi_frame->extraBIPages = extra_bi_ret.region; } /* create the initial thread's IPC buffer */ ipcbuf_cap = create_ipcbuf_frame(root_cnode_cap, it_pd_cap, ipcbuf_vptr); if (cap_get_capType(ipcbuf_cap) == cap_null_cap) { return false; } /* create all userland image frames */ create_frames_ret = create_frames_of_region( root_cnode_cap, it_pd_cap, ui_reg, true, pv_offset ); if (!create_frames_ret.success) { return false; } ndks_boot.bi_frame->userImageFrames = create_frames_ret.region; /* create/initialise the initial thread's ASID pool */ it_ap_cap = create_it_asid_pool(root_cnode_cap); if (cap_get_capType(it_ap_cap) == cap_null_cap) { return false; } write_it_asid_pool(it_ap_cap, it_pd_cap); /* create the idle thread */ if (!create_idle_thread()) { return false; } /* Before creating the initial thread (which also switches to it) * we clean the cache so that any page table information written * as a result of calling create_frames_of_region will be correctly * read by the hardware page table walker */ cleanInvalidateL1Caches(); /* create the initial thread */ tcb_t *initial = create_initial_thread( root_cnode_cap, it_pd_cap, v_entry, bi_frame_vptr, ipcbuf_vptr, ipcbuf_cap ); if (initial == NULL) { return false; } init_core_state(initial); /* create all of the untypeds. Both devices and kernel window memory */ if (!create_untypeds( root_cnode_cap, (region_t) { kernelBase, (pptr_t)ki_boot_end } /* reusable boot code/data */ )) { return false; } /* no shared-frame caps (ARM has no multikernel support) */ ndks_boot.bi_frame->sharedFrames = S_REG_EMPTY; /* finalise the bootinfo frame */ bi_finalise(); /* make everything written by the kernel visible to userland. Cleaning to PoC is not * strictly neccessary, but performance is not critical here so clean and invalidate * everything to PoC */ cleanInvalidateL1Caches(); invalidateLocalTLB(); if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) { invalidateHypTLB(); } ksNumCPUs = 1; /* initialize BKL before booting up other cores */ SMP_COND_STATEMENT(clh_lock_init()); SMP_COND_STATEMENT(release_secondary_cpus()); /* grab BKL before leaving the kernel */ NODE_LOCK_SYS; printf("Booting all finished, dropped to user space\n"); /* kernel successfully initialized */ return true; }
BOOT_CODE bool_t init_node_state( p_region_t avail_p_reg, p_region_t sh_p_reg, dev_p_regs_t* dev_p_regs, ui_info_t ui_info, p_region_t boot_mem_reuse_p_reg, node_id_t node_id, uint32_t num_nodes, /* parameters below not modeled in abstract specification */ pdpte_t* kernel_pdpt, pde_t* kernel_pd, pte_t* kernel_pt #ifdef CONFIG_IOMMU , cpu_id_t cpu_id, uint32_t num_drhu, paddr_t* drhu_list, uint32_t num_passthrough_dev, dev_id_t* passthrough_dev_list, uint32_t* pci_bus_used_bitmap #endif ) { cap_t root_cnode_cap; vptr_t bi_frame_vptr; vptr_t ipcbuf_vptr; cap_t it_vspace_cap; cap_t it_ap_cap; cap_t ipcbuf_cap; pptr_t bi_frame_pptr; create_frames_of_region_ret_t create_frames_ret; int i; #ifdef CONFIG_BENCHMARK vm_attributes_t buffer_attr = {{ 0 }}; uint32_t paddr; pde_t pde; #endif /* CONFIG_BENCHMARK */ /* convert from physical addresses to kernel pptrs */ region_t avail_reg = paddr_to_pptr_reg(avail_p_reg); region_t ui_reg = paddr_to_pptr_reg(ui_info.p_reg); region_t sh_reg = paddr_to_pptr_reg(sh_p_reg); region_t boot_mem_reuse_reg = paddr_to_pptr_reg(boot_mem_reuse_p_reg); /* convert from physical addresses to userland vptrs */ v_region_t ui_v_reg; v_region_t it_v_reg; ui_v_reg.start = ui_info.p_reg.start - ui_info.pv_offset; ui_v_reg.end = ui_info.p_reg.end - ui_info.pv_offset; ipcbuf_vptr = ui_v_reg.end; bi_frame_vptr = ipcbuf_vptr + BIT(PAGE_BITS); /* The region of the initial thread is the user image + ipcbuf and boot info */ it_v_reg.start = ui_v_reg.start; it_v_reg.end = bi_frame_vptr + BIT(PAGE_BITS); /* make the free memory available to alloc_region() */ ndks_boot.freemem[0] = avail_reg; for (i = 1; i < MAX_NUM_FREEMEM_REG; i++) { ndks_boot.freemem[i] = REG_EMPTY; } /* initialise virtual-memory-related data structures (not in abstract spec) */ if (!init_vm_state(kernel_pdpt, kernel_pd, kernel_pt)) { return false; } #ifdef CONFIG_BENCHMARK /* allocate and create the log buffer */ buffer_attr.words[0] = IA32_PAT_MT_WRITE_THROUGH; paddr = pptr_to_paddr((void *) alloc_region(pageBitsForSize(IA32_LargePage))); /* allocate a large frame for logging */ pde = pde_pde_large_new( paddr, /* page_base_address */ vm_attributes_get_ia32PATBit(buffer_attr), /* pat */ 0, /* avl_cte_depth */ 1, /* global */ 0, /* dirty */ 0, /* accessed */ vm_attributes_get_ia32PCDBit(buffer_attr), /* cache_disabled */ vm_attributes_get_ia32PWTBit(buffer_attr), /* write_through */ 0, /* super_user */ 1, /* read_write */ 1 /* present */ ); /* TODO this shouldn't be hardcoded */ ia32KSkernelPD[IA32_KSLOG_IDX] = pde; /* flush the tlb */ invalidatePageStructureCache(); /* if we crash here, the log isn't working */ #ifdef CONFIG_DEBUG_BUILD printf("Testing log\n"); ksLog[0] = 0xdeadbeef; printf("Wrote to ksLog %x\n", ksLog[0]); assert(ksLog[0] == 0xdeadbeef); #endif /* CONFIG_DEBUG_BUILD */ #endif /* CONFIG_BENCHMARK */ /* create the root cnode */ root_cnode_cap = create_root_cnode(); /* create the IO port cap */ write_slot( SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IO_PORT), cap_io_port_cap_new( 0, /* first port */ NUM_IO_PORTS - 1 /* last port */ ) ); /* create the cap for managing thread domains */ create_domain_cap(root_cnode_cap); /* create the IRQ CNode */ if (!create_irq_cnode()) { return false; } /* initialise the IRQ states and provide the IRQ control cap */ init_irqs(root_cnode_cap, node_id != 0); /* create the bootinfo frame */ bi_frame_pptr = allocate_bi_frame(node_id, num_nodes, ipcbuf_vptr); if (!bi_frame_pptr) { return false; } /* Construct an initial address space with enough virtual addresses * to cover the user image + ipc buffer and bootinfo frames */ it_vspace_cap = create_it_address_space(root_cnode_cap, it_v_reg); if (cap_get_capType(it_vspace_cap) == cap_null_cap) { return false; } /* Create and map bootinfo frame cap */ create_bi_frame_cap( root_cnode_cap, it_vspace_cap, bi_frame_pptr, bi_frame_vptr ); /* create the initial thread's IPC buffer */ ipcbuf_cap = create_ipcbuf_frame(root_cnode_cap, it_vspace_cap, ipcbuf_vptr); if (cap_get_capType(ipcbuf_cap) == cap_null_cap) { return false; } /* create all userland image frames */ create_frames_ret = create_frames_of_region( root_cnode_cap, it_vspace_cap, ui_reg, true, ui_info.pv_offset ); if (!create_frames_ret.success) { return false; } ndks_boot.bi_frame->ui_frame_caps = create_frames_ret.region; /* create the initial thread's ASID pool */ it_ap_cap = create_it_asid_pool(root_cnode_cap); if (cap_get_capType(it_ap_cap) == cap_null_cap) { return false; } write_it_asid_pool(it_ap_cap, it_vspace_cap); /* * Initialise the NULL FPU state. This is different from merely zero'ing it * out (i.e., the NULL FPU state is non-zero), and must be performed before * the first thread is created. */ resetFpu(); saveFpuState(&ia32KSnullFpuState); ia32KSfpuOwner = NULL; /* create the idle thread */ if (!create_idle_thread()) { return false; } /* create the initial thread */ if (!create_initial_thread( root_cnode_cap, it_vspace_cap, ui_info.v_entry, bi_frame_vptr, ipcbuf_vptr, ipcbuf_cap )) { return false; } #ifdef CONFIG_IOMMU /* initialise VTD-related data structures and the IOMMUs */ if (!vtd_init(cpu_id, num_drhu, pci_bus_used_bitmap, num_passthrough_dev, passthrough_dev_list)) { return false; } /* write number of IOMMU PT levels into bootinfo */ ndks_boot.bi_frame->num_iopt_levels = ia32KSnumIOPTLevels; /* write IOSpace master cap */ write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IO_SPACE), master_iospace_cap()); #endif /* convert the remaining free memory into UT objects and provide the caps */ if (!create_untypeds(root_cnode_cap, boot_mem_reuse_reg)) { return false; } /* WARNING: alloc_region() must not be called anymore after here! */ /* create device frames */ if (!create_device_frames(root_cnode_cap, dev_p_regs)) { return false; } /* create all shared frames */ create_frames_ret = create_frames_of_region( root_cnode_cap, it_vspace_cap, sh_reg, false, 0 ); if (!create_frames_ret.success) { return false; } ndks_boot.bi_frame->sh_frame_caps = create_frames_ret.region;; /* finalise the bootinfo frame */ bi_finalise(); #ifdef DEBUG ia32KSconsolePort = console_port_of_node(node_id); ia32KSdebugPort = debug_port_of_node(node_id); #endif return true; }