BOOT_CODE pptr_t allocate_bi_frame( node_id_t node_id, word_t num_nodes, vptr_t ipcbuf_vptr ) { pptr_t pptr; /* create the bootinfo frame object */ pptr = alloc_region(BI_FRAME_SIZE_BITS); if (!pptr) { printf("Kernel init failed: could not allocate bootinfo frame\n"); return 0; } clearMemory((void*)pptr, PAGE_BITS); /* initialise bootinfo-related global state */ ndks_boot.bi_frame = BI_PTR(pptr); ndks_boot.slot_pos_cur = BI_CAP_DYN_START; BI_PTR(pptr)->node_id = node_id; BI_PTR(pptr)->num_nodes = num_nodes; BI_PTR(pptr)->num_iopt_levels = 0; BI_PTR(pptr)->ipcbuf_vptr = ipcbuf_vptr; BI_PTR(pptr)->it_cnode_size_bits = CONFIG_ROOT_CNODE_SIZE_BITS; BI_PTR(pptr)->it_domain = ksDomSchedule[ksDomScheduleIdx].domain; return pptr; }
BOOT_CODE cap_t create_root_cnode(void) { pptr_t pptr; cap_t cap; /* write the number of root CNode slots to global state */ ndks_boot.slot_pos_max = BIT(CONFIG_ROOT_CNODE_SIZE_BITS); /* create an empty root CNode */ pptr = alloc_region(CONFIG_ROOT_CNODE_SIZE_BITS + CTE_SIZE_BITS); if (!pptr) { printf("Kernel init failing: could not create root cnode\n"); return cap_null_cap_new(); } memzero(CTE_PTR(pptr), 1U << (CONFIG_ROOT_CNODE_SIZE_BITS + CTE_SIZE_BITS)); cap = cap_cnode_cap_new( CONFIG_ROOT_CNODE_SIZE_BITS, /* radix */ wordBits - CONFIG_ROOT_CNODE_SIZE_BITS, /* guard size */ 0, /* guard */ pptr /* pptr */ ); /* write the root CNode cap into the root CNode */ write_slot(SLOT_PTR(pptr, BI_CAP_IT_CNODE), cap); return cap; }
void alloc_reg() { if (region_stack_idx < NUM_REGIONS) { alloc_region(®ion_stack[region_stack_idx]); region_stat[region_stack_idx] = 0; from_idx = region_stack_idx; to_idx = region_stack_idx; num_alloc_reg++; region_stack_idx++; } }
static int32_t eflash_region_config(uint32_t addr, sasc_size_e size, sasc_ap_e sap, sasc_ap_e uap, sasc_cd_e ucd, uint8_t is_security) { uint8_t region_idx = alloc_region(); eflash_region_security_config(region_idx, is_security); eflash_region_address_config(region_idx, addr, size); eflash_region_cd_config(region_idx, ucd); eflash_region_ap_config(region_idx, sap, uap); eflash_region_active_config(region_idx); return 0; }
VC_REGION_T *vc_region_make (int size_x, int size_y, int on_transparency) { VC_REGION_T *region = alloc_region(); if (region) { memset(region, 0, sizeof(VC_REGION_T)); region->size_x = size_x; region->size_y = size_y; region->on_transparency = on_transparency; // Recompute the bands that make up the (whole) region. vc_region_recompute_bands(region); } return region; }
BOOT_CODE bool_t create_irq_cnode(void) { pptr_t pptr; /* create an empty IRQ CNode */ pptr = alloc_region(PAGE_BITS); if (!pptr) { printf("Kernel init failing: could not create irq cnode\n"); return false; } memzero((void*)pptr, 1 << PAGE_BITS); intStateIRQNode = (cte_t*)pptr; return true; }
static int32_t sram_region_config(uint32_t addr, sasc_size_e size, sasc_ap_e sap, sasc_ap_e uap, sasc_cd_e ucd, uint8_t is_security) { uint8_t region_idx = alloc_region(); s_sram_base = SRAM_REG_BASE; sram_addr_offset = 17; sram_region_security_config(region_idx, is_security); sram_region_address_config(region_idx, addr, size); sram_region_cd_config(region_idx, ucd); sram_region_ap_config(region_idx, sap, uap); sram_region_active_config(region_idx); return 0; }
BOOT_CODE bool_t create_idle_thread(void) { pptr_t pptr; pptr = alloc_region(TCB_BLOCK_SIZE_BITS); if (!pptr) { printf("Kernel init failed: Unable to allocate tcb for idle thread\n"); return false; } memzero((void *)pptr, 1 << TCB_BLOCK_SIZE_BITS); ksIdleThread = TCB_PTR(pptr + TCB_OFFSET); configureIdleThread(ksIdleThread); return true; }
unsigned long init_sysmem (void) { unsigned long freemem = init_mem (); if (CONFIG_KDYNAMIC_MEMORY > freemem) return -1; if (!(memory_pool = alloc_region ((ulong)ebss, RAMTOP, CONFIG_KDYNAMIC_MEMORY))) return -1; // Setting up TLSF with the largest free area, memory_pool will be // zeroed by this function as well printf ("\nSetting up the dynamic memory manager (%d kbytes at 0x%x)\n", CONFIG_KDYNAMIC_MEMORY/1024, memory_pool); if (init_memory_pool (CONFIG_KDYNAMIC_MEMORY, memory_pool) == 0) return -1; return freemem; }
BOOT_CODE cap_t create_ipcbuf_frame(cap_t root_cnode_cap, cap_t pd_cap, vptr_t vptr) { cap_t cap; pptr_t pptr; /* allocate the IPC buffer frame */ pptr = alloc_region(PAGE_BITS); if (!pptr) { printf("Kernel init failing: could not create ipc buffer frame\n"); return cap_null_cap_new(); } clearMemory((void*)pptr, PAGE_BITS); /* create a cap of it and write it into the root CNode */ cap = create_mapped_it_frame_cap(pd_cap, pptr, vptr, false, false); write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_IPCBUF), cap); return cap; }
void *alloc_node( size_t size_in ) { if( size_in > Region::SIZE/4 ) { doll::basicErrorf( "Requested too much memory for a script node." ); return nullptr; } unsigned size = static_cast< unsigned >( size_in ); if( size % ALIGN != 0 ) { size += ( ALIGN - ( size % ALIGN ) ); } if( !m_current_region || m_current_region->used + size > Region::SIZE ) { alloc_region(); } AX_EXPECT_MEMORY( m_current_region ); char *const p = &m_current_region->data[ m_current_region->used ]; m_current_region->used += size; return ( void * )p; }
BOOT_CODE cap_t create_it_asid_pool(cap_t root_cnode_cap) { pptr_t ap_pptr; cap_t ap_cap; /* create ASID pool */ ap_pptr = alloc_region(ASID_POOL_SIZE_BITS); if (!ap_pptr) { printf("Kernel init failed: failed to create initial thread asid pool\n"); return cap_null_cap_new(); } memzero(ASID_POOL_PTR(ap_pptr), 1 << ASID_POOL_SIZE_BITS); ap_cap = cap_asid_pool_cap_new(IT_ASID >> asidLowBits, ap_pptr); write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_ASID_POOL), ap_cap); /* create ASID control cap */ write_slot( SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_ASID_CTRL), cap_asid_control_cap_new() ); return ap_cap; }
BOOT_CODE bool_t init_sys_state( cpu_id_t cpu_id, mem_p_regs_t mem_p_regs, dev_p_regs_t* dev_p_regs, ui_info_t ui_info, p_region_t boot_mem_reuse_p_reg, /* parameters below not modeled in abstract specification */ uint32_t num_drhu, paddr_t* drhu_list, acpi_rmrr_list_t *rmrr_list ) { cap_t root_cnode_cap; vptr_t bi_frame_vptr; vptr_t ipcbuf_vptr; cap_t it_vspace_cap; cap_t it_ap_cap; cap_t ipcbuf_cap; pptr_t bi_frame_pptr; create_frames_of_region_ret_t create_frames_ret; #ifdef CONFIG_ENABLE_BENCHMARKS vm_attributes_t buffer_attr = {{ 0 }}; word_t paddr; pde_t pde; #endif /* CONFIG_ENABLE_BENCHMARKS */ /* convert from physical addresses to kernel pptrs */ region_t ui_reg = paddr_to_pptr_reg(ui_info.p_reg); region_t boot_mem_reuse_reg = paddr_to_pptr_reg(boot_mem_reuse_p_reg); /* convert from physical addresses to userland vptrs */ v_region_t ui_v_reg; v_region_t it_v_reg; ui_v_reg.start = ui_info.p_reg.start - ui_info.pv_offset; ui_v_reg.end = ui_info.p_reg.end - ui_info.pv_offset; ipcbuf_vptr = ui_v_reg.end; bi_frame_vptr = ipcbuf_vptr + BIT(PAGE_BITS); /* The region of the initial thread is the user image + ipcbuf and boot info */ it_v_reg.start = ui_v_reg.start; it_v_reg.end = bi_frame_vptr + BIT(PAGE_BITS); init_freemem(ui_info.p_reg, mem_p_regs); /* initialise virtual-memory-related data structures (not in abstract spec) */ if (!init_vm_state()) { return false; } #ifdef CONFIG_ENABLE_BENCHMARKS /* allocate and create the log buffer */ buffer_attr.words[0] = IA32_PAT_MT_WRITE_THROUGH; paddr = pptr_to_paddr((void *) alloc_region(pageBitsForSize(X86_LargePage))); /* allocate a large frame for logging */ pde = x86_make_pde_mapping(paddr, buffer_attr); ia32KSGlobalPD[IA32_KSLOG_IDX] = pde; /* flush the tlb */ invalidateTranslationAll(); /* if we crash here, the log isn't working */ #ifdef CONFIG_DEBUG_BUILD #if CONFIG_MAX_NUM_TRACE_POINTS > 0 printf("Testing log\n"); ksLog[0].data = 0xdeadbeef; printf("Wrote to ksLog %x\n", ksLog[0].data); assert(ksLog[0].data == 0xdeadbeef); #endif /* CONFIG_MAX_NUM_TRACE_POINTS */ #endif /* CONFIG_DEBUG_BUILD */ #endif /* CONFIG_ENABLE_BENCHMARKS */ /* create the root cnode */ root_cnode_cap = create_root_cnode(); /* create the IO port cap */ write_slot( SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapIOPort), cap_io_port_cap_new( 0, /* first port */ NUM_IO_PORTS - 1 /* last port */ ) ); /* create the cap for managing thread domains */ create_domain_cap(root_cnode_cap); /* create the IRQ CNode */ if (!create_irq_cnode()) { return false; } /* initialise the IRQ states and provide the IRQ control cap */ init_irqs(root_cnode_cap); /* create the bootinfo frame */ bi_frame_pptr = allocate_bi_frame(0, 1, ipcbuf_vptr); if (!bi_frame_pptr) { return false; } /* Construct an initial address space with enough virtual addresses * to cover the user image + ipc buffer and bootinfo frames */ it_vspace_cap = create_it_address_space(root_cnode_cap, it_v_reg); if (cap_get_capType(it_vspace_cap) == cap_null_cap) { return false; } /* Create and map bootinfo frame cap */ create_bi_frame_cap( root_cnode_cap, it_vspace_cap, bi_frame_pptr, bi_frame_vptr ); /* create the initial thread's IPC buffer */ ipcbuf_cap = create_ipcbuf_frame(root_cnode_cap, it_vspace_cap, ipcbuf_vptr); if (cap_get_capType(ipcbuf_cap) == cap_null_cap) { return false; } /* create all userland image frames */ create_frames_ret = create_frames_of_region( root_cnode_cap, it_vspace_cap, ui_reg, true, ui_info.pv_offset ); if (!create_frames_ret.success) { return false; } ndks_boot.bi_frame->userImageFrames = create_frames_ret.region; /* create the initial thread's ASID pool */ it_ap_cap = create_it_asid_pool(root_cnode_cap); if (cap_get_capType(it_ap_cap) == cap_null_cap) { return false; } write_it_asid_pool(it_ap_cap, it_vspace_cap); /* * Initialise the NULL FPU state. This is different from merely zero'ing it * out (i.e., the NULL FPU state is non-zero), and must be performed before * the first thread is created. */ resetFpu(); saveFpuState(&x86KSnullFpuState); x86KSfpuOwner = NULL; /* create the idle thread */ if (!create_idle_thread()) { return false; } /* create the initial thread */ if (!create_initial_thread( root_cnode_cap, it_vspace_cap, ui_info.v_entry, bi_frame_vptr, ipcbuf_vptr, ipcbuf_cap )) { return false; } if (config_set(CONFIG_IOMMU)) { /* initialise VTD-related data structures and the IOMMUs */ if (!vtd_init(cpu_id, num_drhu, rmrr_list)) { return false; } /* write number of IOMMU PT levels into bootinfo */ ndks_boot.bi_frame->numIOPTLevels = x86KSnumIOPTLevels; /* write IOSpace master cap */ write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapIOSpace), master_iospace_cap()); } else { ndks_boot.bi_frame->numIOPTLevels = -1; } /* convert the remaining free memory into UT objects and provide the caps */ if (!create_untypeds(root_cnode_cap, boot_mem_reuse_reg)) { return false; } /* WARNING: alloc_region() must not be called anymore after here! */ /* create device frames */ if (!create_device_frames(root_cnode_cap, dev_p_regs)) { return false; } /* finalise the bootinfo frame */ bi_finalise(); return true; }
static Int32 c_code(){ /***** Init Link Code *****/ topRegion = ((Int32)0); freelist = ((Int32)0); maxStack = ((Int32)0); exn_flag = ((Int32)0); exn_val = ((Int32)0); /* Reset global exn flag.*/ exn_flag = ((Int32)0); /* Exn name*/ exnameCounter = ((Int32)5); /* Exn ptr.*/ *(Int32 *)(((Int32)(&exnPtr)) + 4*((Int32)0)) = ((Int32)0); /*Setup primitive exception: Div*/ IntReg25 = ((Int32)(&exn_DIV))+((Int32)4); *(Int32 *)(((Int32)(&exn_DIV)) + 4*((Int32)0)) = (IntReg25); *(Int32 *)(((Int32)(&exn_DIV)) + 4*((Int32)1)) = ((Int32)4); IntReg25 = ((Int32)(&String373)); *(Int32 *)(((Int32)(&exn_DIV)) + 4*((Int32)2)) = (IntReg25); /*Setup primitive exception: Interrupt*/ IntReg25 = ((Int32)(&exn_INTERRUPT))+((Int32)4); *(Int32 *)(((Int32)(&exn_INTERRUPT)) + 4*((Int32)0)) = (IntReg25); *(Int32 *)(((Int32)(&exn_INTERRUPT)) + 4*((Int32)1)) = ((Int32)3); IntReg25 = ((Int32)(&String372)); *(Int32 *)(((Int32)(&exn_INTERRUPT)) + 4*((Int32)2)) = (IntReg25); /*Setup primitive exception: Overflow*/ IntReg25 = ((Int32)(&exn_OVERFLOW))+((Int32)4); *(Int32 *)(((Int32)(&exn_OVERFLOW)) + 4*((Int32)0)) = (IntReg25); *(Int32 *)(((Int32)(&exn_OVERFLOW)) + 4*((Int32)1)) = ((Int32)2); IntReg25 = ((Int32)(&String371)); *(Int32 *)(((Int32)(&exn_OVERFLOW)) + 4*((Int32)2)) = (IntReg25); /*Setup primitive exception: Bind*/ IntReg25 = ((Int32)(&exn_BIND))+((Int32)4); *(Int32 *)(((Int32)(&exn_BIND)) + 4*((Int32)0)) = (IntReg25); *(Int32 *)(((Int32)(&exn_BIND)) + 4*((Int32)1)) = ((Int32)1); IntReg25 = ((Int32)(&String370)); *(Int32 *)(((Int32)(&exn_BIND)) + 4*((Int32)2)) = (IntReg25); /*Setup primitive exception: Match*/ IntReg25 = ((Int32)(&exn_MATCH))+((Int32)4); *(Int32 *)(((Int32)(&exn_MATCH)) + 4*((Int32)0)) = (IntReg25); *(Int32 *)(((Int32)(&exn_MATCH)) + 4*((Int32)1)) = ((Int32)0); IntReg25 = ((Int32)(&String369)); *(Int32 *)(((Int32)(&exn_MATCH)) + 4*((Int32)2)) = (IntReg25); /*Allocate global regions and push them on teh stack.*/ IntReg26 = (IntReg30); offsetSPDef(((Int32)4)); DatLab83 = alloc_region((IntReg26)); IntReg26 = (IntReg30); offsetSPDef(((Int32)4)); DatLab82 = alloc_region((IntReg26)); IntReg26 = (IntReg30); offsetSPDef(((Int32)4)); DatLab81 = alloc_region((IntReg26)); /* Setup top level handler code*/ IntReg25 = (IntReg30); IntReg26 = ((Int32)TopLevelHandlerLab); pushDef((IntReg26)); pushDef((IntReg25)); IntReg5 = *(Int32 *)(((Int32)(&exnPtr)) + 4*((Int32)0)); pushDef((IntReg5)); *(Int32 *)(((Int32)(&exnPtr)) + 4*((Int32)0)) = (IntReg30); /*Push addresses of program units on stack, starting with*/ /*the exit label and ending with label for the second program unit.*/ IntReg26 = ((Int32)Lab78684); pushDef((IntReg26)); IntReg26 = ((Int32)Lab78667); pushDef((IntReg26)); /*Jump to first block*/ return ((Int32)Lab160); }
BOOT_CODE bool_t create_initial_thread( cap_t root_cnode_cap, cap_t it_pd_cap, vptr_t ui_v_entry, vptr_t bi_frame_vptr, vptr_t ipcbuf_vptr, cap_t ipcbuf_cap ) { pptr_t pptr; cap_t cap; tcb_t* tcb; deriveCap_ret_t dc_ret; /* allocate TCB */ pptr = alloc_region(TCB_BLOCK_SIZE_BITS); if (!pptr) { printf("Kernel init failed: Unable to allocate tcb for initial thread\n"); return false; } memzero((void*)pptr, 1 << TCB_BLOCK_SIZE_BITS); tcb = TCB_PTR(pptr + TCB_OFFSET); tcb->tcbTimeSlice = CONFIG_TIME_SLICE; Arch_initContext(&tcb->tcbArch.tcbContext); /* derive a copy of the IPC buffer cap for inserting */ dc_ret = deriveCap(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_IPCBUF), ipcbuf_cap); if (dc_ret.status != EXCEPTION_NONE) { printf("Failed to derive copy of IPC Buffer\n"); return false; } /* initialise TCB (corresponds directly to abstract specification) */ cteInsert( root_cnode_cap, SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_CNODE), SLOT_PTR(pptr, tcbCTable) ); cteInsert( it_pd_cap, SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_VSPACE), SLOT_PTR(pptr, tcbVTable) ); cteInsert( dc_ret.cap, SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_IPCBUF), SLOT_PTR(pptr, tcbBuffer) ); tcb->tcbIPCBuffer = ipcbuf_vptr; setRegister(tcb, capRegister, bi_frame_vptr); setNextPC(tcb, ui_v_entry); /* initialise TCB */ tcb->tcbPriority = seL4_MaxPrio; setupReplyMaster(tcb); setThreadState(tcb, ThreadState_Running); ksSchedulerAction = SchedulerAction_ResumeCurrentThread; ksCurThread = ksIdleThread; ksCurDomain = ksDomSchedule[ksDomScheduleIdx].domain; ksDomainTime = ksDomSchedule[ksDomScheduleIdx].length; assert(ksCurDomain < CONFIG_NUM_DOMAINS && ksDomainTime > 0); /* initialise current thread pointer */ switchToThread(tcb); /* initialises ksCurThread */ /* create initial thread's TCB cap */ cap = cap_thread_cap_new(TCB_REF(tcb)); write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_TCB), cap); #ifdef DEBUG setThreadName(tcb, "rootserver"); #endif return true; }
/* Create an address space for the initial thread. * This includes page directory and page tables */ BOOT_CODE static cap_t create_it_address_space(cap_t root_cnode_cap, v_region_t it_v_reg) { cap_t vspace_cap; vptr_t vptr; pptr_t pptr; slot_pos_t slot_pos_before; slot_pos_t slot_pos_after; slot_pos_before = ndks_boot.slot_pos_cur; if (PDPT_BITS == 0) { cap_t pd_cap; pptr_t pd_pptr; /* just create single PD obj and cap */ pd_pptr = alloc_region(PD_SIZE_BITS); if (!pd_pptr) { return cap_null_cap_new(); } memzero(PDE_PTR(pd_pptr), 1 << PD_SIZE_BITS); copyGlobalMappings(PDE_PTR(pd_pptr)); pd_cap = create_it_page_directory_cap(cap_null_cap_new(), pd_pptr, 0, IT_ASID); if (!provide_cap(root_cnode_cap, pd_cap)) { return cap_null_cap_new(); } write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_VSPACE), pd_cap); vspace_cap = pd_cap; } else { cap_t pdpt_cap; pptr_t pdpt_pptr; unsigned int i; /* create a PDPT obj and cap */ pdpt_pptr = alloc_region(PDPT_SIZE_BITS); if (!pdpt_pptr) { return cap_null_cap_new(); } memzero(PDPTE_PTR(pdpt_pptr), 1 << PDPT_SIZE_BITS); pdpt_cap = cap_pdpt_cap_new( true, /* capPDPTISMapped */ IT_ASID, /* capPDPTMappedASID */ pdpt_pptr /* capPDPTBasePtr */ ); /* create all PD objs and caps necessary to cover userland image. For simplicity * to ensure we also cover the kernel window we create all PDs */ for (i = 0; i < BIT(PDPT_BITS); i++) { /* The compiler is under the mistaken belief here that this shift could be * undefined. However, in the case that it would be undefined this code path * is not reachable because PDPT_BITS == 0 (see if statement at the top of * this function), so to work around it we must both put in a redundant * if statement AND place the shift in a variable. While the variable * will get compiled away it prevents the compiler from evaluating * the 1 << 32 as a constant when it shouldn't * tl;dr gcc evaluates constants even if code is unreachable */ int shift = (PD_BITS + PT_BITS + PAGE_BITS); if (shift != 32) { vptr = i << shift; } else { return cap_null_cap_new(); } pptr = alloc_region(PD_SIZE_BITS); if (!pptr) { return cap_null_cap_new(); } memzero(PDE_PTR(pptr), 1 << PD_SIZE_BITS); if (!provide_cap(root_cnode_cap, create_it_page_directory_cap(pdpt_cap, pptr, vptr, IT_ASID)) ) { return cap_null_cap_new(); } } /* now that PDs exist we can copy the global mappings */ copyGlobalMappings(PDPTE_PTR(pdpt_pptr)); write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_VSPACE), pdpt_cap); vspace_cap = pdpt_cap; } slot_pos_after = ndks_boot.slot_pos_cur; ndks_boot.bi_frame->ui_pd_caps = (slot_region_t) { slot_pos_before, slot_pos_after }; /* create all PT objs and caps necessary to cover userland image */ slot_pos_before = ndks_boot.slot_pos_cur; for (vptr = ROUND_DOWN(it_v_reg.start, PT_BITS + PAGE_BITS); vptr < it_v_reg.end; vptr += BIT(PT_BITS + PAGE_BITS)) { pptr = alloc_region(PT_SIZE_BITS); if (!pptr) { return cap_null_cap_new(); } memzero(PTE_PTR(pptr), 1 << PT_SIZE_BITS); if (!provide_cap(root_cnode_cap, create_it_page_table_cap(vspace_cap, pptr, vptr, IT_ASID)) ) { return cap_null_cap_new(); } } slot_pos_after = ndks_boot.slot_pos_cur; ndks_boot.bi_frame->ui_pt_caps = (slot_region_t) { slot_pos_before, slot_pos_after }; return vspace_cap; }
BOOT_CODE bool_t init_node_state( p_region_t avail_p_reg, p_region_t sh_p_reg, dev_p_regs_t* dev_p_regs, ui_info_t ui_info, p_region_t boot_mem_reuse_p_reg, node_id_t node_id, uint32_t num_nodes, /* parameters below not modeled in abstract specification */ pdpte_t* kernel_pdpt, pde_t* kernel_pd, pte_t* kernel_pt #ifdef CONFIG_IOMMU , cpu_id_t cpu_id, uint32_t num_drhu, paddr_t* drhu_list, uint32_t num_passthrough_dev, dev_id_t* passthrough_dev_list, uint32_t* pci_bus_used_bitmap #endif ) { cap_t root_cnode_cap; vptr_t bi_frame_vptr; vptr_t ipcbuf_vptr; cap_t it_vspace_cap; cap_t it_ap_cap; cap_t ipcbuf_cap; pptr_t bi_frame_pptr; create_frames_of_region_ret_t create_frames_ret; int i; #ifdef CONFIG_BENCHMARK vm_attributes_t buffer_attr = {{ 0 }}; uint32_t paddr; pde_t pde; #endif /* CONFIG_BENCHMARK */ /* convert from physical addresses to kernel pptrs */ region_t avail_reg = paddr_to_pptr_reg(avail_p_reg); region_t ui_reg = paddr_to_pptr_reg(ui_info.p_reg); region_t sh_reg = paddr_to_pptr_reg(sh_p_reg); region_t boot_mem_reuse_reg = paddr_to_pptr_reg(boot_mem_reuse_p_reg); /* convert from physical addresses to userland vptrs */ v_region_t ui_v_reg; v_region_t it_v_reg; ui_v_reg.start = ui_info.p_reg.start - ui_info.pv_offset; ui_v_reg.end = ui_info.p_reg.end - ui_info.pv_offset; ipcbuf_vptr = ui_v_reg.end; bi_frame_vptr = ipcbuf_vptr + BIT(PAGE_BITS); /* The region of the initial thread is the user image + ipcbuf and boot info */ it_v_reg.start = ui_v_reg.start; it_v_reg.end = bi_frame_vptr + BIT(PAGE_BITS); /* make the free memory available to alloc_region() */ ndks_boot.freemem[0] = avail_reg; for (i = 1; i < MAX_NUM_FREEMEM_REG; i++) { ndks_boot.freemem[i] = REG_EMPTY; } /* initialise virtual-memory-related data structures (not in abstract spec) */ if (!init_vm_state(kernel_pdpt, kernel_pd, kernel_pt)) { return false; } #ifdef CONFIG_BENCHMARK /* allocate and create the log buffer */ buffer_attr.words[0] = IA32_PAT_MT_WRITE_THROUGH; paddr = pptr_to_paddr((void *) alloc_region(pageBitsForSize(IA32_LargePage))); /* allocate a large frame for logging */ pde = pde_pde_large_new( paddr, /* page_base_address */ vm_attributes_get_ia32PATBit(buffer_attr), /* pat */ 0, /* avl_cte_depth */ 1, /* global */ 0, /* dirty */ 0, /* accessed */ vm_attributes_get_ia32PCDBit(buffer_attr), /* cache_disabled */ vm_attributes_get_ia32PWTBit(buffer_attr), /* write_through */ 0, /* super_user */ 1, /* read_write */ 1 /* present */ ); /* TODO this shouldn't be hardcoded */ ia32KSkernelPD[IA32_KSLOG_IDX] = pde; /* flush the tlb */ invalidatePageStructureCache(); /* if we crash here, the log isn't working */ #ifdef CONFIG_DEBUG_BUILD printf("Testing log\n"); ksLog[0] = 0xdeadbeef; printf("Wrote to ksLog %x\n", ksLog[0]); assert(ksLog[0] == 0xdeadbeef); #endif /* CONFIG_DEBUG_BUILD */ #endif /* CONFIG_BENCHMARK */ /* create the root cnode */ root_cnode_cap = create_root_cnode(); /* create the IO port cap */ write_slot( SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IO_PORT), cap_io_port_cap_new( 0, /* first port */ NUM_IO_PORTS - 1 /* last port */ ) ); /* create the cap for managing thread domains */ create_domain_cap(root_cnode_cap); /* create the IRQ CNode */ if (!create_irq_cnode()) { return false; } /* initialise the IRQ states and provide the IRQ control cap */ init_irqs(root_cnode_cap, node_id != 0); /* create the bootinfo frame */ bi_frame_pptr = allocate_bi_frame(node_id, num_nodes, ipcbuf_vptr); if (!bi_frame_pptr) { return false; } /* Construct an initial address space with enough virtual addresses * to cover the user image + ipc buffer and bootinfo frames */ it_vspace_cap = create_it_address_space(root_cnode_cap, it_v_reg); if (cap_get_capType(it_vspace_cap) == cap_null_cap) { return false; } /* Create and map bootinfo frame cap */ create_bi_frame_cap( root_cnode_cap, it_vspace_cap, bi_frame_pptr, bi_frame_vptr ); /* create the initial thread's IPC buffer */ ipcbuf_cap = create_ipcbuf_frame(root_cnode_cap, it_vspace_cap, ipcbuf_vptr); if (cap_get_capType(ipcbuf_cap) == cap_null_cap) { return false; } /* create all userland image frames */ create_frames_ret = create_frames_of_region( root_cnode_cap, it_vspace_cap, ui_reg, true, ui_info.pv_offset ); if (!create_frames_ret.success) { return false; } ndks_boot.bi_frame->ui_frame_caps = create_frames_ret.region; /* create the initial thread's ASID pool */ it_ap_cap = create_it_asid_pool(root_cnode_cap); if (cap_get_capType(it_ap_cap) == cap_null_cap) { return false; } write_it_asid_pool(it_ap_cap, it_vspace_cap); /* * Initialise the NULL FPU state. This is different from merely zero'ing it * out (i.e., the NULL FPU state is non-zero), and must be performed before * the first thread is created. */ resetFpu(); saveFpuState(&ia32KSnullFpuState); ia32KSfpuOwner = NULL; /* create the idle thread */ if (!create_idle_thread()) { return false; } /* create the initial thread */ if (!create_initial_thread( root_cnode_cap, it_vspace_cap, ui_info.v_entry, bi_frame_vptr, ipcbuf_vptr, ipcbuf_cap )) { return false; } #ifdef CONFIG_IOMMU /* initialise VTD-related data structures and the IOMMUs */ if (!vtd_init(cpu_id, num_drhu, pci_bus_used_bitmap, num_passthrough_dev, passthrough_dev_list)) { return false; } /* write number of IOMMU PT levels into bootinfo */ ndks_boot.bi_frame->num_iopt_levels = ia32KSnumIOPTLevels; /* write IOSpace master cap */ write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IO_SPACE), master_iospace_cap()); #endif /* convert the remaining free memory into UT objects and provide the caps */ if (!create_untypeds(root_cnode_cap, boot_mem_reuse_reg)) { return false; } /* WARNING: alloc_region() must not be called anymore after here! */ /* create device frames */ if (!create_device_frames(root_cnode_cap, dev_p_regs)) { return false; } /* create all shared frames */ create_frames_ret = create_frames_of_region( root_cnode_cap, it_vspace_cap, sh_reg, false, 0 ); if (!create_frames_ret.success) { return false; } ndks_boot.bi_frame->sh_frame_caps = create_frames_ret.region;; /* finalise the bootinfo frame */ bi_finalise(); #ifdef DEBUG ia32KSconsolePort = console_port_of_node(node_id); ia32KSdebugPort = debug_port_of_node(node_id); #endif return true; }