static BOOT_CODE bool_t try_boot_sys_node(cpu_id_t cpu_id) { p_region_t boot_mem_reuse_p_reg; if (!map_kernel_window( boot_state.num_ioapic, boot_state.ioapic_paddr, boot_state.num_drhu, boot_state.drhu_list )) { return false; } setCurrentVSpaceRoot(kpptr_to_paddr(X86_GLOBAL_VSPACE_ROOT), 0); /* Sync up the compilers view of the world here to force the PD to actually * be set *right now* instead of delayed */ asm volatile("" ::: "memory"); /* reuse boot code/data memory */ boot_mem_reuse_p_reg.start = PADDR_LOAD; boot_mem_reuse_p_reg.end = (paddr_t)ki_boot_end - KERNEL_BASE_OFFSET; /* initialise the CPU */ if (!init_cpu(config_set(CONFIG_IRQ_IOAPIC) ? 1 : 0)) { return false; } /* initialise NDKS and kernel heap */ if (!init_sys_state( cpu_id, boot_state.mem_p_regs, boot_state.ui_info, boot_mem_reuse_p_reg, /* parameters below not modeled in abstract specification */ boot_state.num_drhu, boot_state.drhu_list, &boot_state.rmrr_list, &boot_state.vbe_info )) { return false; } return true; }
static BOOT_CODE bool_t try_init_kernel( paddr_t ui_p_reg_start, paddr_t ui_p_reg_end, sword_t pv_offset, vptr_t v_entry, paddr_t dtb_addr_start, paddr_t dtb_addr_end ) { cap_t root_cnode_cap; cap_t it_ap_cap; cap_t it_pd_cap; cap_t ipcbuf_cap; region_t ui_reg = paddr_to_pptr_reg((p_region_t) { ui_p_reg_start, ui_p_reg_end }); region_t dtb_reg; word_t extra_bi_size = sizeof(seL4_BootInfoHeader) + (dtb_addr_end - dtb_addr_start); region_t extra_bi_region; pptr_t extra_bi_offset = 0; vptr_t extra_bi_frame_vptr; pptr_t bi_frame_pptr; vptr_t bi_frame_vptr; vptr_t ipcbuf_vptr; create_frames_of_region_ret_t create_frames_ret; create_frames_of_region_ret_t extra_bi_ret; /* convert from physical addresses to userland vptrs */ v_region_t ui_v_reg; v_region_t it_v_reg; ui_v_reg.start = ui_p_reg_start - pv_offset; ui_v_reg.end = ui_p_reg_end - pv_offset; ipcbuf_vptr = ui_v_reg.end; bi_frame_vptr = ipcbuf_vptr + BIT(PAGE_BITS); extra_bi_frame_vptr = bi_frame_vptr + BIT(PAGE_BITS); /* If no DTB was provided, skip allocating extra bootinfo */ if (dtb_addr_start == 0) { extra_bi_size = 0; dtb_reg = (region_t) { 0, 0 }; } else { dtb_reg = paddr_to_pptr_reg((p_region_t) { dtb_addr_start, ROUND_UP(dtb_addr_end, PAGE_BITS) }); } /* The region of the initial thread is the user image + ipcbuf and boot info */ it_v_reg.start = ui_v_reg.start; it_v_reg.end = extra_bi_frame_vptr; if (it_v_reg.end > kernelBase) { printf("Userland image virtual end address too high\n"); return false; } /* setup virtual memory for the kernel */ map_kernel_window(); /* initialise the CPU */ if (!init_cpu()) { return false; } /* debug output via serial port is only available from here */ printf("Bootstrapping kernel\n"); /* initialise the platform */ init_plat(); /* make the free memory available to alloc_region() */ arch_init_freemem(ui_reg, dtb_reg); /* create the root cnode */ root_cnode_cap = create_root_cnode(); if (cap_get_capType(root_cnode_cap) == cap_null_cap) { return false; } /* create the cap for managing thread domains */ create_domain_cap(root_cnode_cap); /* initialise the IRQ states and provide the IRQ control cap */ init_irqs(root_cnode_cap); /* create the bootinfo frame */ bi_frame_pptr = allocate_bi_frame(0, CONFIG_MAX_NUM_NODES, ipcbuf_vptr); if (!bi_frame_pptr) { return false; } /* create extra bootinfo region - will return an empty allocation if extra_bi_size = 0 */ extra_bi_region = allocate_extra_bi_region(extra_bi_size); if (extra_bi_region.start == 0) { return false; } /* update initial thread virtual address range for extra bootinfo */ it_v_reg.end += extra_bi_region.end - extra_bi_region.start; if (it_v_reg.end > kernelBase) { printf("Userland extra bootinfo end address too high\n"); return false; } /* put DTB in the bootinfo block, if present. */ seL4_BootInfoHeader header; if (dtb_reg.start) { header.id = SEL4_BOOTINFO_HEADER_FDT; header.len = sizeof(header) + dtb_reg.end - dtb_reg.start; *(seL4_BootInfoHeader *)(extra_bi_region.start + extra_bi_offset) = header; extra_bi_offset += sizeof(header); memcpy((void *)(extra_bi_region.start + extra_bi_offset), (void *)dtb_reg.start, dtb_reg.end - dtb_reg.start); extra_bi_offset += dtb_reg.end - dtb_reg.start; } if ((extra_bi_region.end - extra_bi_region.start) - extra_bi_offset > 0) { /* provde a chunk for any leftover padding in the extended boot info */ header.id = SEL4_BOOTINFO_HEADER_PADDING; header.len = (extra_bi_region.end - extra_bi_region.start) - extra_bi_offset; *(seL4_BootInfoHeader *)(extra_bi_region.start + extra_bi_offset) = header; } if (config_set(CONFIG_ARM_SMMU)) { ndks_boot.bi_frame->ioSpaceCaps = create_iospace_caps(root_cnode_cap); if (ndks_boot.bi_frame->ioSpaceCaps.start == 0 && ndks_boot.bi_frame->ioSpaceCaps.end == 0) { return false; } } else { ndks_boot.bi_frame->ioSpaceCaps = S_REG_EMPTY; } /* Construct an initial address space with enough virtual addresses * to cover the user image + ipc buffer and bootinfo frames */ it_pd_cap = create_it_address_space(root_cnode_cap, it_v_reg); if (cap_get_capType(it_pd_cap) == cap_null_cap) { return false; } /* Create and map bootinfo frame cap */ create_bi_frame_cap( root_cnode_cap, it_pd_cap, bi_frame_pptr, bi_frame_vptr ); /* create and map extra bootinfo region */ if (extra_bi_size > 0) { extra_bi_ret = create_frames_of_region( root_cnode_cap, it_pd_cap, extra_bi_region, true, pptr_to_paddr((void *)extra_bi_region.start) - extra_bi_frame_vptr ); if (!extra_bi_ret.success) { return false; } ndks_boot.bi_frame->extraBIPages = extra_bi_ret.region; } /* create the initial thread's IPC buffer */ ipcbuf_cap = create_ipcbuf_frame(root_cnode_cap, it_pd_cap, ipcbuf_vptr); if (cap_get_capType(ipcbuf_cap) == cap_null_cap) { return false; } /* create all userland image frames */ create_frames_ret = create_frames_of_region( root_cnode_cap, it_pd_cap, ui_reg, true, pv_offset ); if (!create_frames_ret.success) { return false; } ndks_boot.bi_frame->userImageFrames = create_frames_ret.region; /* create/initialise the initial thread's ASID pool */ it_ap_cap = create_it_asid_pool(root_cnode_cap); if (cap_get_capType(it_ap_cap) == cap_null_cap) { return false; } write_it_asid_pool(it_ap_cap, it_pd_cap); /* create the idle thread */ if (!create_idle_thread()) { return false; } /* Before creating the initial thread (which also switches to it) * we clean the cache so that any page table information written * as a result of calling create_frames_of_region will be correctly * read by the hardware page table walker */ cleanInvalidateL1Caches(); /* create the initial thread */ tcb_t *initial = create_initial_thread( root_cnode_cap, it_pd_cap, v_entry, bi_frame_vptr, ipcbuf_vptr, ipcbuf_cap ); if (initial == NULL) { return false; } init_core_state(initial); /* create all of the untypeds. Both devices and kernel window memory */ if (!create_untypeds( root_cnode_cap, (region_t) { kernelBase, (pptr_t)ki_boot_end } /* reusable boot code/data */ )) { return false; } /* no shared-frame caps (ARM has no multikernel support) */ ndks_boot.bi_frame->sharedFrames = S_REG_EMPTY; /* finalise the bootinfo frame */ bi_finalise(); /* make everything written by the kernel visible to userland. Cleaning to PoC is not * strictly neccessary, but performance is not critical here so clean and invalidate * everything to PoC */ cleanInvalidateL1Caches(); invalidateLocalTLB(); if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) { invalidateHypTLB(); } ksNumCPUs = 1; /* initialize BKL before booting up other cores */ SMP_COND_STATEMENT(clh_lock_init()); SMP_COND_STATEMENT(release_secondary_cpus()); /* grab BKL before leaving the kernel */ NODE_LOCK_SYS; printf("Booting all finished, dropped to user space\n"); /* kernel successfully initialized */ return true; }