BOOT_CODE create_frames_of_region_ret_t create_frames_of_region( cap_t root_cnode_cap, cap_t pd_cap, region_t reg, bool_t do_map, int32_t pv_offset ) { pptr_t f; cap_t frame_cap; slot_pos_t slot_pos_before; slot_pos_t slot_pos_after; slot_pos_before = ndks_boot.slot_pos_cur; for (f = reg.start; f < reg.end; f += BIT(PAGE_BITS)) { if (do_map) { frame_cap = create_mapped_it_frame_cap(pd_cap, f, f - BASE_OFFSET - pv_offset, false, false); } else { frame_cap = create_unmapped_it_frame_cap(f, false); } if (!provide_cap(root_cnode_cap, frame_cap)) return (create_frames_of_region_ret_t) { S_REG_EMPTY, false }; } slot_pos_after = ndks_boot.slot_pos_cur; return (create_frames_of_region_ret_t) { (slot_region_t) { slot_pos_before, slot_pos_after }, true }; }
BOOT_CODE static bool_t create_device_frames( cap_t root_cnode_cap, dev_p_regs_t* dev_p_regs ) { seL4_SlotPos slot_pos_before; seL4_SlotPos slot_pos_after; vm_page_size_t frame_size; region_t dev_reg; seL4_DeviceRegion bi_dev_reg; cap_t frame_cap; uint32_t i; pptr_t f; for (i = 0; i < dev_p_regs->count; i++) { /* write the frame caps of this device region into the root CNode and update the bootinfo */ dev_reg = paddr_to_pptr_reg(dev_p_regs->list[i]); /* use large frames if possible, otherwise use 4K frames */ if (IS_ALIGNED(dev_reg.start, LARGE_PAGE_BITS) && IS_ALIGNED(dev_reg.end, LARGE_PAGE_BITS)) { frame_size = X86_LargePage; } else { frame_size = X86_SmallPage; } slot_pos_before = ndks_boot.slot_pos_cur; /* create/provide frame caps covering the region */ for (f = dev_reg.start; f < dev_reg.end; f += BIT(pageBitsForSize(frame_size))) { frame_cap = create_unmapped_it_frame_cap(f, frame_size == X86_LargePage); if (!provide_cap(root_cnode_cap, frame_cap)) { return false; } } slot_pos_after = ndks_boot.slot_pos_cur; /* add device-region entry to bootinfo */ bi_dev_reg.basePaddr = pptr_to_paddr((void*)dev_reg.start); bi_dev_reg.frameSizeBits = pageBitsForSize(frame_size); bi_dev_reg.frames = (seL4_SlotRegion) { slot_pos_before, slot_pos_after }; ndks_boot.bi_frame->deviceRegions[i] = bi_dev_reg; } ndks_boot.bi_frame->numDeviceRegions = dev_p_regs->count; return true; }
BOOT_CODE static bool_t provide_untyped_cap( cap_t root_cnode_cap, pptr_t pptr, uint32_t size_bits, slot_pos_t first_untyped_slot ) { bool_t ret; unsigned int i = ndks_boot.slot_pos_cur - first_untyped_slot; if (i < CONFIG_MAX_NUM_BOOTINFO_UNTYPED_CAPS) { ndks_boot.bi_frame->ut_obj_paddr_list[i] = pptr_to_paddr((void*)pptr); ndks_boot.bi_frame->ut_obj_size_bits_list[i] = size_bits; ret = provide_cap(root_cnode_cap, cap_untyped_cap_new(0, size_bits, pptr)); } else { printf("Kernel init: Too many untyped regions for boot info\n"); ret = true; } return ret; }
/* Create an address space for the initial thread. * This includes page directory and page tables */ BOOT_CODE static cap_t create_it_address_space(cap_t root_cnode_cap, v_region_t it_v_reg) { cap_t vspace_cap; vptr_t vptr; pptr_t pptr; slot_pos_t slot_pos_before; slot_pos_t slot_pos_after; slot_pos_before = ndks_boot.slot_pos_cur; if (PDPT_BITS == 0) { cap_t pd_cap; pptr_t pd_pptr; /* just create single PD obj and cap */ pd_pptr = alloc_region(PD_SIZE_BITS); if (!pd_pptr) { return cap_null_cap_new(); } memzero(PDE_PTR(pd_pptr), 1 << PD_SIZE_BITS); copyGlobalMappings(PDE_PTR(pd_pptr)); pd_cap = create_it_page_directory_cap(cap_null_cap_new(), pd_pptr, 0, IT_ASID); if (!provide_cap(root_cnode_cap, pd_cap)) { return cap_null_cap_new(); } write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_VSPACE), pd_cap); vspace_cap = pd_cap; } else { cap_t pdpt_cap; pptr_t pdpt_pptr; unsigned int i; /* create a PDPT obj and cap */ pdpt_pptr = alloc_region(PDPT_SIZE_BITS); if (!pdpt_pptr) { return cap_null_cap_new(); } memzero(PDPTE_PTR(pdpt_pptr), 1 << PDPT_SIZE_BITS); pdpt_cap = cap_pdpt_cap_new( true, /* capPDPTISMapped */ IT_ASID, /* capPDPTMappedASID */ pdpt_pptr /* capPDPTBasePtr */ ); /* create all PD objs and caps necessary to cover userland image. For simplicity * to ensure we also cover the kernel window we create all PDs */ for (i = 0; i < BIT(PDPT_BITS); i++) { /* The compiler is under the mistaken belief here that this shift could be * undefined. However, in the case that it would be undefined this code path * is not reachable because PDPT_BITS == 0 (see if statement at the top of * this function), so to work around it we must both put in a redundant * if statement AND place the shift in a variable. While the variable * will get compiled away it prevents the compiler from evaluating * the 1 << 32 as a constant when it shouldn't * tl;dr gcc evaluates constants even if code is unreachable */ int shift = (PD_BITS + PT_BITS + PAGE_BITS); if (shift != 32) { vptr = i << shift; } else { return cap_null_cap_new(); } pptr = alloc_region(PD_SIZE_BITS); if (!pptr) { return cap_null_cap_new(); } memzero(PDE_PTR(pptr), 1 << PD_SIZE_BITS); if (!provide_cap(root_cnode_cap, create_it_page_directory_cap(pdpt_cap, pptr, vptr, IT_ASID)) ) { return cap_null_cap_new(); } } /* now that PDs exist we can copy the global mappings */ copyGlobalMappings(PDPTE_PTR(pdpt_pptr)); write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_VSPACE), pdpt_cap); vspace_cap = pdpt_cap; } slot_pos_after = ndks_boot.slot_pos_cur; ndks_boot.bi_frame->ui_pd_caps = (slot_region_t) { slot_pos_before, slot_pos_after }; /* create all PT objs and caps necessary to cover userland image */ slot_pos_before = ndks_boot.slot_pos_cur; for (vptr = ROUND_DOWN(it_v_reg.start, PT_BITS + PAGE_BITS); vptr < it_v_reg.end; vptr += BIT(PT_BITS + PAGE_BITS)) { pptr = alloc_region(PT_SIZE_BITS); if (!pptr) { return cap_null_cap_new(); } memzero(PTE_PTR(pptr), 1 << PT_SIZE_BITS); if (!provide_cap(root_cnode_cap, create_it_page_table_cap(vspace_cap, pptr, vptr, IT_ASID)) ) { return cap_null_cap_new(); } } slot_pos_after = ndks_boot.slot_pos_cur; ndks_boot.bi_frame->ui_pt_caps = (slot_region_t) { slot_pos_before, slot_pos_after }; return vspace_cap; }