static errval_t startup_alloc_init( void* state, genvaddr_t gvbase, size_t bytes, uint32_t flags, void** ret ) { const struct startup_l2_info* s2i = (const struct startup_l2_info*)state; lvaddr_t sv = round_down((lvaddr_t)gvbase, BASE_PAGE_SIZE); size_t off = (lvaddr_t)gvbase - sv; lvaddr_t lv = round_up((lvaddr_t)gvbase + bytes, BASE_PAGE_SIZE); lpaddr_t pa; //STARTUP_PROGRESS(); if(hal_cpu_is_bsp()) pa = bsp_alloc_phys_aligned((lv - sv), BASE_PAGE_SIZE); else pa = app_alloc_phys_aligned((lv - sv), BASE_PAGE_SIZE); if (lv > sv && (pa != 0)) { spawn_init_map(s2i->l2_table, s2i->l2_base, sv, pa, lv - sv, elf_to_l2_flags(flags)); *ret = (void*)(local_phys_to_mem(pa) + off); } else { *ret = 0; } return SYS_ERR_OK; }
struct dcb *spawn_bsp_init(const char *name) { MSG("spawning '%s' on BSP core\n", name); /* Only the first core can run this code */ assert(cpu_is_bsp()); /* Allocate bootinfo */ lpaddr_t bootinfo_phys = bsp_alloc_phys_aligned(BOOTINFO_SIZE, BASE_PAGE_SIZE); memset((void *)local_phys_to_mem(bootinfo_phys), 0, BOOTINFO_SIZE); /* store pointer to bootinfo in kernel virtual memory */ bootinfo = (struct bootinfo *) local_phys_to_mem(bootinfo_phys); /* Construct cmdline args */ char bootinfochar[16]; snprintf(bootinfochar, sizeof(bootinfochar), "%u", INIT_BOOTINFO_VBASE); const char *argv[] = { "init", bootinfochar }; int argc = 2; /* perform common spawning of init domain */ struct dcb *init_dcb = spawn_init_common(name, argc, argv,bootinfo_phys, bsp_alloc_phys, bsp_alloc_phys_aligned); /* map boot info into init's VSPACE */ spawn_init_map(init_l3, ARMV8_INIT_VBASE, INIT_BOOTINFO_VBASE, bootinfo_phys, BOOTINFO_SIZE, INIT_PERM_RW); /* load the image */ genvaddr_t init_ep, got_base; struct startup_l3_info l3_info = { init_l3, ARMV8_INIT_VBASE }; load_init_image(&l3_info, BSP_INIT_MODULE_NAME, &init_ep, &got_base); MSG("init loaded with entry=0x%" PRIxGENVADDR " and GOT=0x%" PRIxGENVADDR "\n", init_ep, got_base); struct dispatcher_shared_aarch64 *disp_aarch64 = get_dispatcher_shared_aarch64(init_dcb->disp); /* setting GOT pointers */ disp_aarch64->got_base = got_base; /* XXX - Why does the kernel do this? -DC */ disp_aarch64->enabled_save_area.named.x10 = got_base; disp_aarch64->disabled_save_area.named.x10 = got_base; /* setting entry points */ disp_aarch64->disabled_save_area.named.pc = init_ep; disp_aarch64->disabled_save_area.named.spsr = AARCH64_MODE_USR | CPSR_F_MASK; /* Create caps for init to use */ create_module_caps(&spawn_state); lpaddr_t init_alloc_end = bsp_alloc_phys(0); create_phys_caps(armv8_glbl_core_data->start_kernel_ram, init_alloc_end); /* Fill bootinfo struct */ bootinfo->mem_spawn_core = KERNEL_IMAGE_SIZE; // Size of kernel return init_dcb; }
/* * \brief Initialzie page tables * * This includes setting up page tables for the init process. */ static void init_page_tables(void) { // Create page table for init if(hal_cpu_is_bsp()) { init_l1 = (union arm_l1_entry *)local_phys_to_mem(bsp_alloc_phys_aligned(INIT_L1_BYTES, ARM_L1_ALIGN)); memset(init_l1, 0, INIT_L1_BYTES); init_l2 = (union arm_l2_entry *)local_phys_to_mem(bsp_alloc_phys_aligned(INIT_L2_BYTES, ARM_L2_ALIGN)); memset(init_l2, 0, INIT_L2_BYTES); } else { init_l1 = (union arm_l1_entry *)local_phys_to_mem(app_alloc_phys_aligned(INIT_L1_BYTES, ARM_L1_ALIGN)); memset(init_l1, 0, INIT_L1_BYTES); init_l2 = (union arm_l2_entry *)local_phys_to_mem(app_alloc_phys_aligned(INIT_L2_BYTES, ARM_L2_ALIGN)); memset(init_l2, 0, INIT_L2_BYTES); } printf("init_page_tables done: init_l1=%p init_l2=%p\n", init_l1, init_l2); /* Map pagetables into page CN */ int pagecn_pagemap = 0; /* * ARM has: * * L1 has 4096 entries (16KB). * L2 Coarse has 256 entries (256 * 4B = 1KB). * * CPU driver currently fakes having 1024 entries in L1 and * L2 with 1024 entries by treating a page as 4 consecutive * L2 tables and mapping this as a unit in L1. */ caps_create_new(ObjType_VNode_ARM_l1, mem_to_local_phys((lvaddr_t)init_l1), vnode_objbits(ObjType_VNode_ARM_l1), 0, caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++) ); //STARTUP_PROGRESS(); // Map L2 into successive slots in pagecn size_t i; for (i = 0; i < INIT_L2_BYTES / BASE_PAGE_SIZE; i++) { size_t objbits_vnode = vnode_objbits(ObjType_VNode_ARM_l2); assert(objbits_vnode == BASE_PAGE_BITS); caps_create_new( ObjType_VNode_ARM_l2, mem_to_local_phys((lvaddr_t)init_l2) + (i << objbits_vnode), objbits_vnode, 0, caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++) ); } /* * Initialize init page tables - this just wires the L1 * entries through to the corresponding L2 entries. */ STATIC_ASSERT(0 == (INIT_VBASE % ARM_L1_SECTION_BYTES), ""); for (lvaddr_t vaddr = INIT_VBASE; vaddr < INIT_SPACE_LIMIT; vaddr += ARM_L1_SECTION_BYTES) { uintptr_t section = (vaddr - INIT_VBASE) / ARM_L1_SECTION_BYTES; uintptr_t l2_off = section * ARM_L2_TABLE_BYTES; lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l2) + l2_off; paging_map_user_pages_l1((lvaddr_t)init_l1, vaddr, paddr); } printf("Calling paging_context_switch with address = %"PRIxLVADDR"\n", mem_to_local_phys((lvaddr_t) init_l1)); paging_context_switch(mem_to_local_phys((lvaddr_t)init_l1)); }