struct sysret sys_create(struct capability *root, enum objtype type, uint8_t objbits, capaddr_t dest_cnode_cptr, cslot_t dest_slot, int dest_vbits) { errval_t err; uint8_t bits = 0; genpaddr_t base = 0; /* Paramter checking */ if (type == ObjType_Null || type >= ObjType_Num) { return SYSRET(SYS_ERR_ILLEGAL_DEST_TYPE); } /* Destination CNode */ struct capability *dest_cnode_cap; err = caps_lookup_cap(root, dest_cnode_cptr, dest_vbits, &dest_cnode_cap, CAPRIGHTS_READ_WRITE); if (err_is_fail(err)) { return SYSRET(err_push(err, SYS_ERR_DEST_CNODE_LOOKUP)); } /* Destination slot */ struct cte *dest_cte; dest_cte = caps_locate_slot(dest_cnode_cap->u.cnode.cnode, dest_slot); if (dest_cte->cap.type != ObjType_Null) { return SYSRET(SYS_ERR_SLOTS_IN_USE); } /* List capabilities allowed to be created at runtime. */ switch(type) { case ObjType_ID: break; // only certain types of capabilities can be created at runtime default: return SYSRET(SYS_ERR_TYPE_NOT_CREATABLE); } return SYSRET(caps_create_new(type, base, bits, objbits, my_core_id, dest_cte)); }
struct dcb *spawn_app_init(struct arm_core_data *core_data, const char *name, alloc_phys_func alloc_phys) { errval_t err; /* Construct cmdline args */ // Core id of the core that booted this core char coreidchar[10]; snprintf(coreidchar, sizeof(coreidchar), "%d", core_data->src_core_id); // IPI channel id of core that booted this core char chanidchar[30]; snprintf(chanidchar, sizeof(chanidchar), "chanid=%"PRIu32, core_data->chan_id); // Arch id of the core that booted this core char archidchar[30]; snprintf(archidchar, sizeof(archidchar), "archid=%d", core_data->src_arch_id); const char *argv[5] = { name, coreidchar, chanidchar, archidchar }; int argc = 4; struct dcb *init_dcb = spawn_init_common(name, argc, argv,0, alloc_phys); // Urpc frame cap struct cte *urpc_frame_cte = caps_locate_slot(CNODE(spawn_state.taskcn), TASKCN_SLOT_MON_URPC); // XXX: Create as devframe so the memory is not zeroed out err = caps_create_new(ObjType_DevFrame, core_data->urpc_frame_base, core_data->urpc_frame_bits, core_data->urpc_frame_bits, urpc_frame_cte); assert(err_is_ok(err)); urpc_frame_cte->cap.type = ObjType_Frame; lpaddr_t urpc_ptr = gen_phys_to_local_phys(urpc_frame_cte->cap.u.frame.base); /* Map urpc frame at MON_URPC_BASE */ spawn_init_map(init_l2, INIT_VBASE, MON_URPC_VBASE, urpc_ptr, MON_URPC_SIZE, INIT_PERM_RW); struct startup_l2_info l2_info = { init_l2, INIT_VBASE }; // elf load the domain genvaddr_t entry_point, got_base=0; err = elf_load(EM_ARM, startup_alloc_init, &l2_info, local_phys_to_mem(core_data->monitor_binary), core_data->monitor_binary_size, &entry_point); if (err_is_fail(err)) { //err_print_calltrace(err); panic("ELF load of init module failed!"); } // TODO: Fix application linkage so that it's non-PIC. struct Elf32_Shdr* got_shdr = elf32_find_section_header_name(local_phys_to_mem(core_data->monitor_binary), core_data->monitor_binary_size, ".got"); if (got_shdr) { got_base = got_shdr->sh_addr; } struct dispatcher_shared_arm *disp_arm = get_dispatcher_shared_arm(init_dcb->disp); disp_arm->enabled_save_area.named.r10 = got_base; disp_arm->got_base = got_base; disp_arm->disabled_save_area.named.pc = entry_point; #ifndef __ARM_ARCH_7M__ //the armv7-m profile does not have such a mode field disp_arm->disabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK; #endif disp_arm->disabled_save_area.named.r10 = got_base; //disp_arm->disabled_save_area.named.rtls = INIT_DISPATCHER_VBASE; return init_dcb; }
static struct dcb *spawn_init_common(const char *name, int argc, const char *argv[], lpaddr_t bootinfo_phys, alloc_phys_func alloc_phys) { printf("spawn_init_common %s\n", name); lvaddr_t paramaddr; struct dcb *init_dcb = spawn_module(&spawn_state, name, argc, argv, bootinfo_phys, INIT_ARGS_VBASE, alloc_phys, ¶maddr); init_page_tables(); printf("about to call mem_to_local_phys with lvaddr=%"PRIxLVADDR"\n", init_l1); init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_l1); spawn_init_map(init_l2, INIT_VBASE, INIT_ARGS_VBASE, spawn_state.args_page, ARGS_SIZE, INIT_PERM_RW); // Map dispatcher spawn_init_map(init_l2, INIT_VBASE, INIT_DISPATCHER_VBASE, mem_to_local_phys(init_dcb->disp), DISPATCHER_SIZE, INIT_PERM_RW); /* * we create the capability to the devices at this stage and store it * in the TASKCN_SLOT_IO, where on x86 the IO capability is stored for * device access on PCI. PCI is not available on the pandaboard so this * should not be a problem. */ struct cte *iocap = caps_locate_slot(CNODE(spawn_state.taskcn), TASKCN_SLOT_IO); errval_t err = caps_create_new(ObjType_DevFrame, 0x40000000, 30, 30, iocap); assert(err_is_ok(err)); struct dispatcher_shared_generic *disp = get_dispatcher_shared_generic(init_dcb->disp); struct dispatcher_shared_arm *disp_arm = get_dispatcher_shared_arm(init_dcb->disp); /* Initialize dispatcher */ disp->disabled = true; strncpy(disp->name, argv[0], DISP_NAME_LEN); /* tell init the vspace addr of its dispatcher */ disp->udisp = INIT_DISPATCHER_VBASE; disp_arm->enabled_save_area.named.r0 = paramaddr; #ifndef __ARM_ARCH_7M__ //the armv7-m profile does not have such a mode field disp_arm->enabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK; #endif disp_arm->enabled_save_area.named.rtls = INIT_DISPATCHER_VBASE; disp_arm->disabled_save_area.named.rtls = INIT_DISPATCHER_VBASE; printf("spawn_init_common: starting from=%"PRIxLVADDR"\n"); return init_dcb; }
/* * \brief Initialzie page tables * * This includes setting up page tables for the init process. */ static void init_page_tables(void) { // Create page table for init if(hal_cpu_is_bsp()) { init_l1 = (union arm_l1_entry *)local_phys_to_mem(bsp_alloc_phys_aligned(INIT_L1_BYTES, ARM_L1_ALIGN)); memset(init_l1, 0, INIT_L1_BYTES); init_l2 = (union arm_l2_entry *)local_phys_to_mem(bsp_alloc_phys_aligned(INIT_L2_BYTES, ARM_L2_ALIGN)); memset(init_l2, 0, INIT_L2_BYTES); } else { init_l1 = (union arm_l1_entry *)local_phys_to_mem(app_alloc_phys_aligned(INIT_L1_BYTES, ARM_L1_ALIGN)); memset(init_l1, 0, INIT_L1_BYTES); init_l2 = (union arm_l2_entry *)local_phys_to_mem(app_alloc_phys_aligned(INIT_L2_BYTES, ARM_L2_ALIGN)); memset(init_l2, 0, INIT_L2_BYTES); } printf("init_page_tables done: init_l1=%p init_l2=%p\n", init_l1, init_l2); /* Map pagetables into page CN */ int pagecn_pagemap = 0; /* * ARM has: * * L1 has 4096 entries (16KB). * L2 Coarse has 256 entries (256 * 4B = 1KB). * * CPU driver currently fakes having 1024 entries in L1 and * L2 with 1024 entries by treating a page as 4 consecutive * L2 tables and mapping this as a unit in L1. */ caps_create_new(ObjType_VNode_ARM_l1, mem_to_local_phys((lvaddr_t)init_l1), vnode_objbits(ObjType_VNode_ARM_l1), 0, caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++) ); //STARTUP_PROGRESS(); // Map L2 into successive slots in pagecn size_t i; for (i = 0; i < INIT_L2_BYTES / BASE_PAGE_SIZE; i++) { size_t objbits_vnode = vnode_objbits(ObjType_VNode_ARM_l2); assert(objbits_vnode == BASE_PAGE_BITS); caps_create_new( ObjType_VNode_ARM_l2, mem_to_local_phys((lvaddr_t)init_l2) + (i << objbits_vnode), objbits_vnode, 0, caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++) ); } /* * Initialize init page tables - this just wires the L1 * entries through to the corresponding L2 entries. */ STATIC_ASSERT(0 == (INIT_VBASE % ARM_L1_SECTION_BYTES), ""); for (lvaddr_t vaddr = INIT_VBASE; vaddr < INIT_SPACE_LIMIT; vaddr += ARM_L1_SECTION_BYTES) { uintptr_t section = (vaddr - INIT_VBASE) / ARM_L1_SECTION_BYTES; uintptr_t l2_off = section * ARM_L2_TABLE_BYTES; lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l2) + l2_off; paging_map_user_pages_l1((lvaddr_t)init_l1, vaddr, paddr); } printf("Calling paging_context_switch with address = %"PRIxLVADDR"\n", mem_to_local_phys((lvaddr_t) init_l1)); paging_context_switch(mem_to_local_phys((lvaddr_t)init_l1)); }
/// Setup the module cnode, which contains frame caps to all multiboot modules void create_module_caps(struct spawn_state *st) { errval_t err; /* Create caps for multiboot modules */ struct multiboot_modinfo *module = (struct multiboot_modinfo *)local_phys_to_mem(glbl_core_data->mods_addr); // Allocate strings area lpaddr_t mmstrings_phys = bsp_alloc_phys(BASE_PAGE_SIZE); lvaddr_t mmstrings_base = local_phys_to_mem(mmstrings_phys); lvaddr_t mmstrings = mmstrings_base; // create cap for strings area in first slot of modulecn assert(st->modulecn_slot == 0); err = caps_create_new(ObjType_Frame, mmstrings_phys, BASE_PAGE_BITS, BASE_PAGE_BITS, caps_locate_slot(CNODE(st->modulecn), st->modulecn_slot++)); assert(err_is_ok(err)); /* Walk over multiboot modules, creating frame caps */ for (int i = 0; i < glbl_core_data->mods_count; i++) { struct multiboot_modinfo *m = &module[i]; // Set memory regions within bootinfo struct mem_region *region = &bootinfo->regions[bootinfo->regions_length++]; genpaddr_t remain = MULTIBOOT_MODULE_SIZE(*m); genpaddr_t base_addr = local_phys_to_gen_phys(m->mod_start); region->mr_type = RegionType_Module; region->mr_base = base_addr; region->mrmod_slot = st->modulecn_slot; // first slot containing caps region->mrmod_size = remain; // size of image _in bytes_ region->mrmod_data = mmstrings - mmstrings_base; // offset of string in area // round up to page size for caps remain = ROUND_UP(remain, BASE_PAGE_SIZE); // Create max-sized caps to multiboot module in module cnode while (remain > 0) { assert((base_addr & BASE_PAGE_MASK) == 0); assert((remain & BASE_PAGE_MASK) == 0); // determine size of next chunk uint8_t block_size = bitaddralign(remain, base_addr); assert(st->modulecn_slot < (1UL << st->modulecn->cap.u.cnode.bits)); // create as DevFrame cap to avoid zeroing memory contents err = caps_create_new(ObjType_DevFrame, base_addr, block_size, block_size, caps_locate_slot(CNODE(st->modulecn), st->modulecn_slot++)); assert(err_is_ok(err)); // Advance by that chunk base_addr += ((genpaddr_t)1 << block_size); remain -= ((genpaddr_t)1 << block_size); } // Copy multiboot module string to mmstrings area strcpy((char *)mmstrings, MBADDR_ASSTRING(m->string)); mmstrings += strlen(MBADDR_ASSTRING(m->string)) + 1; assert(mmstrings < mmstrings_base + BASE_PAGE_SIZE); } }
struct dcb *spawn_app_init(struct armv8_core_data *core_data, const char *name) { errval_t err; MSG("spawning '%s' on APP core\n", name); /* Only the app core can run this code */ assert(!cpu_is_bsp()); /* Construct cmdline args */ // Core id of the core that booted this core char coreidchar[10]; snprintf(coreidchar, sizeof(coreidchar), "%d", core_data->src_core_id); // IPI channel id of core that booted this core char chanidchar[30]; snprintf(chanidchar, sizeof(chanidchar), "chanid=%"PRIu32, core_data->chan_id); // Arch id of the core that booted this core char archidchar[30]; snprintf(archidchar, sizeof(archidchar), "archid=%d", core_data->src_arch_id); const char *argv[5] = { name, coreidchar, chanidchar, archidchar }; int argc = 4; struct dcb *init_dcb= spawn_init_common(name, argc, argv, 0, app_alloc_phys, app_alloc_phys_aligned); MSG("creating monitor URPC frame cap\n"); // Urpc frame cap struct cte *urpc_frame_cte = caps_locate_slot(CNODE(spawn_state.taskcn), TASKCN_SLOT_MON_URPC); // XXX: Create as devframe so the memory is not zeroed out err = caps_create_new(ObjType_DevFrame, core_data->urpc_frame.base, core_data->urpc_frame.length, core_data->urpc_frame.length, my_core_id, urpc_frame_cte); assert(err_is_ok(err)); urpc_frame_cte->cap.type = ObjType_Frame; lpaddr_t urpc_ptr = gen_phys_to_local_phys(urpc_frame_cte->cap.u.frame.base); /* Map urpc frame at MON_URPC_BASE */ MSG("mapping URPC frame cap %" PRIxLPADDR" \n",urpc_ptr ); spawn_init_map(init_l3, ARMV8_INIT_VBASE, MON_URPC_VBASE, urpc_ptr, MON_URPC_SIZE, INIT_PERM_RW); struct startup_l3_info l3_info = { init_l3, ARMV8_INIT_VBASE }; // elf load the domain genvaddr_t entry_point, got_base=0; MSG("loading elf '%s' @ %" PRIxLPADDR "\n", name, local_phys_to_mem(core_data->monitor_binary.base)); err = elf_load(EM_AARCH64, startup_alloc_init, &l3_info, local_phys_to_mem(core_data->monitor_binary.base), core_data->monitor_binary.length, &entry_point); if (err_is_fail(err)) { //err_print_calltrace(err); panic("ELF load of init module failed!"); } // TODO: Fix application linkage so that it's non-PIC. struct Elf64_Shdr* got_shdr; got_shdr = elf64_find_section_header_name(local_phys_to_mem(core_data->monitor_binary.base), core_data->monitor_binary.length, ".got"); if (got_shdr) { got_base = got_shdr->sh_addr; } MSG("init loaded with entry=0x%" PRIxGENVADDR " and GOT=0x%" PRIxGENVADDR "\n", entry_point, got_base); struct dispatcher_shared_aarch64 *disp_aarch64 = get_dispatcher_shared_aarch64(init_dcb->disp); disp_aarch64->got_base = got_base; disp_aarch64->enabled_save_area.named.x10 = got_base; disp_aarch64->disabled_save_area.named.x10 = got_base; /* setting entry points */ disp_aarch64->disabled_save_area.named.pc = entry_point; disp_aarch64->disabled_save_area.named.spsr = AARCH64_MODE_USR | CPSR_F_MASK; //arch_set_thread_register(INIT_DISPATCHER_VBASE); MSG("init dcb set up\n"); return init_dcb; }
static void init_page_tables(void) { lpaddr_t (*alloc_phys_aligned)(size_t size, size_t align); if (cpu_is_bsp()) { alloc_phys_aligned = bsp_alloc_phys_aligned; } else { alloc_phys_aligned = app_alloc_phys_aligned; } // Create page table for init const size_t l0_size = VMSAv8_64_PTABLE_NUM_ENTRIES * INIT_L0_SIZE * sizeof(union armv8_ttable_entry); init_l0 = (void *) local_phys_to_mem(alloc_phys_aligned(l0_size, VMSAv8_64_PTABLE_SIZE)); memset(init_l0, 0, l0_size); const size_t l1_size = l0_size * INIT_L1_SIZE; init_l1 = (void *) local_phys_to_mem(alloc_phys_aligned(l1_size, VMSAv8_64_PTABLE_SIZE)); memset(init_l1, 0, l1_size); const size_t l2_size = l1_size * INIT_L2_SIZE; init_l2 = (void *) local_phys_to_mem(alloc_phys_aligned(l2_size, VMSAv8_64_PTABLE_SIZE)); memset(init_l2, 0, l2_size); const size_t l3_size = l2_size * INIT_L3_SIZE; init_l3 = (void *) local_phys_to_mem(alloc_phys_aligned(l3_size, VMSAv8_64_PTABLE_SIZE)); memset(init_l3, 0, l3_size); /* Map pagetables into page CN */ int pagecn_pagemap = 0; /* * AARCH64 has: * * L0 has 1 entry. * L1 has 1 entry. * L2 Coarse has 16 entries (512 * 8B = 4KB). * L3 Coarse has 16*512 entries (512 * 8B = 4KB). * */ printk(LOG_NOTE, "init page tables: l0=%p, l1=%p, l2=%p, l3=%p\n", init_l0, init_l1, init_l2, init_l3); caps_create_new( ObjType_VNode_AARCH64_l0, mem_to_local_phys((lvaddr_t)init_l0), vnode_objsize(ObjType_VNode_AARCH64_l0), 0, my_core_id, caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++) ); for (size_t i = 0; i < INIT_L1_SIZE; i++) { size_t objsize_vnode = vnode_objsize(ObjType_VNode_AARCH64_l1); assert(objsize_vnode == BASE_PAGE_SIZE); caps_create_new( ObjType_VNode_AARCH64_l1, mem_to_local_phys((lvaddr_t)init_l1) + (i * objsize_vnode), objsize_vnode, 0, my_core_id, caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++) ); } //STARTUP_PROGRESS(); for(size_t i = 0; i < INIT_L2_SIZE; i++) { size_t objsize_vnode = vnode_objsize(ObjType_VNode_AARCH64_l2); assert(objsize_vnode == BASE_PAGE_SIZE); caps_create_new( ObjType_VNode_AARCH64_l2, mem_to_local_phys((lvaddr_t)init_l2) + (i * objsize_vnode), objsize_vnode, 0, my_core_id, caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++) ); } // Map L3 into successive slots in pagecn for(size_t i = 0; i < INIT_L3_SIZE; i++) { size_t objsize_vnode = vnode_objsize(ObjType_VNode_AARCH64_l3); assert(objsize_vnode == BASE_PAGE_SIZE); caps_create_new( ObjType_VNode_AARCH64_l3, mem_to_local_phys((lvaddr_t)init_l3) + (i * objsize_vnode), objsize_vnode, 0, my_core_id, caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++) ); } /* * Initialize init page tables - this just wires the L0 * entries through to the corresponding L1 entries. */ for(lvaddr_t vaddr = ARMV8_INIT_VBASE; vaddr < ARMV8_INIT_SPACE_LIMIT; vaddr += VMSAv8_64_L0_SIZE) { uintptr_t section = (vaddr - ARMV8_INIT_VBASE) / VMSAv8_64_L0_SIZE; uintptr_t l1_off = section * VMSAv8_64_PTABLE_SIZE; lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l1) + l1_off; paging_map_table_l0(init_l0, vaddr, paddr); } /* * Initialize init page tables - this just wires the L1 * entries through to the corresponding L2 entries. */ for(lvaddr_t vaddr = ARMV8_INIT_VBASE; vaddr < ARMV8_INIT_SPACE_LIMIT; vaddr += VMSAv8_64_L1_BLOCK_SIZE) { uintptr_t section = (vaddr - ARMV8_INIT_VBASE) / VMSAv8_64_L1_BLOCK_SIZE; uintptr_t l2_off = section * VMSAv8_64_PTABLE_SIZE; lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l2) + l2_off; paging_map_table_l1(init_l1, vaddr, paddr); } /* * Initialize init page tables - this just wires the L2 * entries through to the corresponding L3 entries. */ STATIC_ASSERT(0 == (ARMV8_INIT_VBASE % VMSAv8_64_L2_BLOCK_SIZE), ""); for(lvaddr_t vaddr = ARMV8_INIT_VBASE; vaddr < ARMV8_INIT_SPACE_LIMIT; vaddr += VMSAv8_64_L2_BLOCK_SIZE) { uintptr_t section = (vaddr - ARMV8_INIT_VBASE) / VMSAv8_64_L2_BLOCK_SIZE; uintptr_t l3_off = section * VMSAv8_64_PTABLE_SIZE; lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l3) + l3_off; paging_map_table_l2(init_l2, vaddr, paddr); } }
/// Setup the module cnode, which contains frame caps to all multiboot modules void create_module_caps(struct spawn_state *st) { errval_t err; /* Create caps for multiboot modules */ struct multiboot_header_tag *multiboot = (struct multiboot_header_tag *)local_phys_to_mem(armv8_glbl_core_data->multiboot_image.base); // Allocate strings area lpaddr_t mmstrings_phys = bsp_alloc_phys(BASE_PAGE_SIZE); lvaddr_t mmstrings_base = local_phys_to_mem(mmstrings_phys); lvaddr_t mmstrings = mmstrings_base; // create cap for strings area in first slot of modulecn assert(st->modulecn_slot == 0); err = caps_create_new(ObjType_Frame, mmstrings_phys, BASE_PAGE_SIZE, BASE_PAGE_SIZE, my_core_id, caps_locate_slot(CNODE(st->modulecn), st->modulecn_slot++)); assert(err_is_ok(err)); //Nag bootinfo->regions_length = 0; /* Walk over multiboot modules, creating frame caps */ size_t position = 0; size_t size = armv8_glbl_core_data->multiboot_image.length; struct mem_region *region; lpaddr_t acpi_base = (lpaddr_t)-1; /* add the ACPI regions */ struct multiboot_tag_new_acpi *acpi_new; acpi_new = (struct multiboot_tag_new_acpi *) multiboot2_find_header(multiboot, size, MULTIBOOT_TAG_TYPE_ACPI_NEW); if (acpi_new) { acpi_base = mem_to_local_phys((lvaddr_t)&acpi_new->rsdp[0]); } else { struct multiboot_tag_old_acpi *acpi_old; acpi_old = (struct multiboot_tag_old_acpi *) multiboot2_find_header(multiboot, size, MULTIBOOT_TAG_TYPE_ACPI_OLD); if (acpi_old) { acpi_base = mem_to_local_phys((lvaddr_t)&acpi_old->rsdp[0]); } } if (acpi_base != (lpaddr_t)-1) { region = &bootinfo->regions[bootinfo->regions_length++]; region->mr_base = acpi_base; region->mr_type = RegionType_ACPI_TABLE; } /* add the module regions */ position = 0; struct multiboot_tag_module_64 *module = (struct multiboot_tag_module_64 *) multiboot2_find_header(multiboot, size, MULTIBOOT_TAG_TYPE_MODULE_64); while (module) { // Set memory regions within bootinfo region = &bootinfo->regions[bootinfo->regions_length++]; genpaddr_t remain = module->mod_end - module->mod_start; genpaddr_t base_addr = local_phys_to_gen_phys(module->mod_start); region->mr_type = RegionType_Module; region->mr_base = base_addr; region->mrmod_slot = st->modulecn_slot; // first slot containing caps region->mrmod_size = remain; // size of image _in bytes_ region->mrmod_data = mmstrings - mmstrings_base; // offset of string in area // round up to page size for caps remain = ROUND_UP(remain, BASE_PAGE_SIZE); assert((base_addr & BASE_PAGE_MASK) == 0); assert((remain & BASE_PAGE_MASK) == 0); assert(st->modulecn_slot < cnode_get_slots(&st->modulecn->cap)); // create as DevFrame cap to avoid zeroing memory contents err = caps_create_new(ObjType_DevFrame, base_addr, remain, remain, my_core_id, caps_locate_slot(CNODE(st->modulecn), st->modulecn_slot++)); assert(err_is_ok(err)); // Copy multiboot module string to mmstrings area strcpy((char *)mmstrings, module->cmdline); mmstrings += strlen(module->cmdline) + 1; assert(mmstrings < mmstrings_base + BASE_PAGE_SIZE); module = ((void *) module) + module->size; position += module->size; module = (struct multiboot_tag_module_64 *) multiboot2_find_header( (struct multiboot_header_tag *) module, size - position, MULTIBOOT_TAG_TYPE_MODULE_64); } }
spawn_init(const char* name, int32_t kernel_id, const uint8_t* initrd_base, size_t initrd_bytes) { assert(0 == kernel_id); // Create page table for init init_l1 = (uintptr_t*)alloc_mem_aligned(INIT_L1_BYTES, ARM_L1_ALIGN); memset(init_l1, 0, INIT_L1_BYTES); init_l2 = (uintptr_t*)alloc_mem_aligned(INIT_L2_BYTES, ARM_L2_ALIGN); memset(init_l2, 0, INIT_L2_BYTES); STARTUP_PROGRESS(); /* Allocate bootinfo */ lpaddr_t bootinfo_phys = alloc_phys(BOOTINFO_SIZE); memset((void *)local_phys_to_mem(bootinfo_phys), 0, BOOTINFO_SIZE); STARTUP_PROGRESS(); /* Construct cmdline args */ char bootinfochar[16]; snprintf(bootinfochar, sizeof(bootinfochar), "%u", INIT_BOOTINFO_VBASE); const char *argv[] = { "init", bootinfochar }; lvaddr_t paramaddr; struct dcb *init_dcb = spawn_module(&spawn_state, name, ARRAY_LENGTH(argv), argv, bootinfo_phys, INIT_ARGS_VBASE, alloc_phys, ¶maddr); STARTUP_PROGRESS(); /* * Create a capability that allows user-level applications to * access device memory. This capability will be passed to Kaluga, * split up into smaller pieces and distributed to among device * drivers. * * For armv5, this is currently a dummy capability. We do not * have support for user-level device drivers in gem5 yet, so we * do not allocate any memory as device memory. Some cap_copy * operations in the bootup code fail if this capability is not * present. */ struct cte *iocap = caps_locate_slot(CNODE(spawn_state.taskcn), TASKCN_SLOT_IO); errval_t err = caps_create_new(ObjType_IO, 0, 0, 0, my_core_id, iocap); assert(err_is_ok(err)); struct dispatcher_shared_generic *disp = get_dispatcher_shared_generic(init_dcb->disp); struct dispatcher_shared_arm *disp_arm = get_dispatcher_shared_arm(init_dcb->disp); assert(NULL != disp); STARTUP_PROGRESS(); /* Initialize dispatcher */ disp->udisp = INIT_DISPATCHER_VBASE; STARTUP_PROGRESS(); init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_l1); STARTUP_PROGRESS(); /* Page table setup */ /* Map pagetables into page CN */ int pagecn_pagemap = 0; /* * ARM has: * * L1 has 4096 entries (16KB). * L2 Coarse has 256 entries (256 * 4B = 1KB). * * CPU driver currently fakes having 1024 entries in L1 and * L2 with 1024 entries by treating a page as 4 consecutive * L2 tables and mapping this as a unit in L1. */ caps_create_new( ObjType_VNode_ARM_l1, mem_to_local_phys((lvaddr_t)init_l1), vnode_objbits(ObjType_VNode_ARM_l1), 0, my_core_id, caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++) ); STARTUP_PROGRESS(); // Map L2 into successive slots in pagecn size_t i; for (i = 0; i < INIT_L2_BYTES / BASE_PAGE_SIZE; i++) { size_t objbits_vnode = vnode_objbits(ObjType_VNode_ARM_l2); assert(objbits_vnode == BASE_PAGE_BITS); caps_create_new( ObjType_VNode_ARM_l2, mem_to_local_phys((lvaddr_t)init_l2) + (i << objbits_vnode), objbits_vnode, 0, my_core_id, caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++) ); } /* * Initialize init page tables - this just wires the L1 * entries through to the corresponding L2 entries. */ STATIC_ASSERT(0 == (INIT_VBASE % ARM_L1_SECTION_BYTES), ""); for (lvaddr_t vaddr = INIT_VBASE; vaddr < INIT_SPACE_LIMIT; vaddr += ARM_L1_SECTION_BYTES) { uintptr_t section = (vaddr - INIT_VBASE) / ARM_L1_SECTION_BYTES; uintptr_t l2_off = section * ARM_L2_TABLE_BYTES; lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l2) + l2_off; paging_map_user_pages_l1((lvaddr_t)init_l1, vaddr, paddr); } paging_make_good((lvaddr_t)init_l1, INIT_L1_BYTES); STARTUP_PROGRESS(); printf("XXX: Debug print to make Bram's code work\n"); paging_context_switch(mem_to_local_phys((lvaddr_t)init_l1)); STARTUP_PROGRESS(); // Map cmdline arguments in VSpace at ARGS_BASE STATIC_ASSERT(0 == (ARGS_SIZE % BASE_PAGE_SIZE), ""); STARTUP_PROGRESS(); spawn_init_map(init_l2, INIT_VBASE, INIT_ARGS_VBASE, spawn_state.args_page, ARGS_SIZE, INIT_PERM_RW); STARTUP_PROGRESS(); // Map bootinfo spawn_init_map(init_l2, INIT_VBASE, INIT_BOOTINFO_VBASE, bootinfo_phys, BOOTINFO_SIZE, INIT_PERM_RW); struct startup_l2_info l2_info = { init_l2, INIT_VBASE }; genvaddr_t init_ep, got_base; load_init_image(&l2_info, initrd_base, initrd_bytes, &init_ep, &got_base); // Set startup arguments (argc, argv) disp_arm->enabled_save_area.named.r0 = paramaddr; disp_arm->enabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK; disp_arm->enabled_save_area.named.rtls = INIT_DISPATCHER_VBASE; disp_arm->enabled_save_area.named.r10 = got_base; disp_arm->got_base = got_base; struct bootinfo* bootinfo = (struct bootinfo*)INIT_BOOTINFO_VBASE; bootinfo->regions_length = 0; STARTUP_PROGRESS(); create_modules_from_initrd(bootinfo, initrd_base, initrd_bytes); debug(SUBSYS_STARTUP, "used %"PRIuCSLOT" slots in modulecn\n", spawn_state.modulecn_slot); STARTUP_PROGRESS(); create_phys_caps(&spawn_state.physaddrcn->cap, bootinfo); STARTUP_PROGRESS(); bootinfo->mem_spawn_core = ~0; // Size of kernel if bringing up others // Map dispatcher spawn_init_map(init_l2, INIT_VBASE, INIT_DISPATCHER_VBASE, mem_to_local_phys(init_dcb->disp), DISPATCHER_SIZE, INIT_PERM_RW); STARTUP_PROGRESS(); // NB libbarrelfish initialization sets up the stack. disp_arm->disabled_save_area.named.pc = init_ep; disp_arm->disabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK; disp_arm->disabled_save_area.named.rtls = INIT_DISPATCHER_VBASE; disp_arm->disabled_save_area.named.r10 = got_base; #ifdef __XSCALE__ cp15_disable_cache(); #endif printf("Kernel ready.\n"); pit_start(); // On to userland... STARTUP_PROGRESS(); dispatch(init_dcb); panic("Not reached."); }
static void create_modules_from_initrd(struct bootinfo* bi, const uint8_t* initrd_base, size_t initrd_bytes) { errval_t err; lvaddr_t mmstrings_base = 0; lvaddr_t mmstrings = 0; // CPIO archive is crafted such that first file is // command-line strings for "modules" - ie menu.lst. The // subsequent file follow in the order they appear in // menu.lst.arm. const uint8_t* data; size_t bytes; if (cpio_get_file_by_name(initrd_base, initrd_bytes, "menu.lst.modules", &data, &bytes)) { assert(bytes < BASE_PAGE_SIZE); mmstrings_base = alloc_mem(BASE_PAGE_SIZE); mmstrings = mmstrings_base; STARTUP_PROGRESS(); // Create cap for strings area in first slot of modulecn err = caps_create_new( ObjType_Frame, mem_to_local_phys(mmstrings_base), BASE_PAGE_BITS, BASE_PAGE_BITS, my_core_id, caps_locate_slot( CNODE(spawn_state.modulecn), spawn_state.modulecn_slot++) ); assert(err_is_ok(err)); STARTUP_PROGRESS(); // Copy strings from file into allocated page memcpy((void*)mmstrings_base, data, bytes); ((char*)mmstrings_base)[bytes] = '\0'; STARTUP_PROGRESS(); // Skip first line (corresponds to bootscript in archive) strtok((char*)mmstrings_base, "\r\n"); STARTUP_PROGRESS(); assert(bi->regions_length == 0); int ord = 1; const char* name; while ((mmstrings = (lvaddr_t)strtok(NULL, "\r\n")) != 0) { if (!cpio_get_file_by_ordinal(initrd_base, initrd_bytes, ord, &name, &data, &bytes)) { panic("Failed to find file\n"); } ord++; debug(SUBSYS_STARTUP, "Creating caps for \"%s\" (Command-line \"%s\")\n", name, (char*)mmstrings); // Copy file from archive into RAM. // TODO: Give up archive space. size_t pa_bytes = round_up(bytes, BASE_PAGE_SIZE); lpaddr_t pa = alloc_phys(pa_bytes); memcpy((void*)local_phys_to_mem(pa), data, bytes); struct mem_region* region = &bi->regions[bi->regions_length++]; region->mr_type = RegionType_Module; region->mrmod_slot = spawn_state.modulecn_slot; region->mrmod_size = pa_bytes; region->mrmod_data = mmstrings - mmstrings_base; assert((pa & BASE_PAGE_MASK) == 0); assert((pa_bytes & BASE_PAGE_MASK) == 0); while (pa_bytes != 0) { assert(spawn_state.modulecn_slot < (1UL << spawn_state.modulecn->cap.u.cnode.bits)); // create as DevFrame cap to avoid zeroing memory contents err = caps_create_new( ObjType_DevFrame, pa, BASE_PAGE_BITS, BASE_PAGE_BITS, my_core_id, caps_locate_slot( CNODE(spawn_state.modulecn), spawn_state.modulecn_slot++) ); assert(err_is_ok(err)); pa += BASE_PAGE_SIZE; pa_bytes -= BASE_PAGE_SIZE; } } } else { panic("No command-line file.\n"); } }
struct dcb *spawn_app_init(struct x86_core_data *core_data, const char *name, alloc_phys_func alloc_phys) { errval_t err; /* Construct cmdline args */ // Core id of the core that booted this core char coreidchar[10]; snprintf(coreidchar, sizeof(coreidchar), "%d", core_data->src_core_id); // IPI channel id of core that booted this core char chanidchar[30]; snprintf(chanidchar, sizeof(chanidchar), "chanid=%"PRIu32, core_data->chan_id); // Arch id of the core that booted this core char archidchar[30]; snprintf(archidchar, sizeof(archidchar), "archid=%d", core_data->src_arch_id); const char *argv[5] = { name, coreidchar, chanidchar, archidchar }; int argc = 4; #ifdef __scc__ char urpc_frame_base_char[30]; snprintf(urpc_frame_base_char, sizeof(urpc_frame_base_char), "frame=%" PRIuGENPADDR, core_data->urpc_frame_base); argv[argc++] = urpc_frame_base_char; #endif struct dcb *init_dcb = spawn_init_common(&spawn_state, name, argc, argv, 0, alloc_phys); // Urpc frame cap struct cte *urpc_frame_cte = caps_locate_slot(CNODE(spawn_state.taskcn), TASKCN_SLOT_MON_URPC); // XXX: Create as devframe so the memory is not zeroed out err = caps_create_new(ObjType_DevFrame, core_data->urpc_frame_base, core_data->urpc_frame_bits, core_data->urpc_frame_bits, core_data->src_core_id, urpc_frame_cte); assert(err_is_ok(err)); urpc_frame_cte->cap.type = ObjType_Frame; lpaddr_t urpc_ptr = gen_phys_to_local_phys(urpc_frame_cte->cap.u.frame.base); /* Map urpc frame at MON_URPC_BASE */ #ifdef CONFIG_PAE paging_x86_32_map_pdpte(&init_pdpte[X86_32_PDPTE_BASE(MON_URPC_BASE)], mem_to_local_phys((lvaddr_t)init_pdir)); #endif paging_x86_32_map_table(&init_pdir[X86_32_PDIR_BASE(MON_URPC_BASE)], mem_to_local_phys((lvaddr_t)init_ptable)); for (int i = 0; i < MON_URPC_SIZE / BASE_PAGE_SIZE; i++) { paging_x86_32_map(&init_ptable[X86_32_PTABLE_BASE(MON_URPC_BASE) + i], urpc_ptr + i * BASE_PAGE_SIZE, INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R | PF_W)); } // elf load the domain genvaddr_t entry_point; err = elf_load(EM_386, startup_alloc_init, &spawn_state, local_phys_to_mem(core_data->monitor_binary), core_data->monitor_binary_size, &entry_point); if (err_is_fail(err)) { //err_print_calltrace(err); panic("ELF load of init module failed!"); } struct dispatcher_shared_x86_32 *init_disp_x86_32 = get_dispatcher_shared_x86_32(init_dcb->disp); init_disp_x86_32->disabled_save_area.eip = entry_point; return init_dcb; }
struct dcb *spawn_bsp_init(const char *name, alloc_phys_func alloc_phys) { errval_t err; /* Only the first core can run this code */ assert(apic_is_bsp()); /* Allocate bootinfo */ lpaddr_t bootinfo_phys = alloc_phys(BOOTINFO_SIZE); memset((void *)local_phys_to_mem(bootinfo_phys), 0, BOOTINFO_SIZE); /* Construct cmdline args */ char bootinfochar[16]; snprintf(bootinfochar, sizeof(bootinfochar), "%"PRIuLPADDR, BOOTINFO_BASE); const char *argv[6] = { "init", bootinfochar }; int argc = 2; #ifdef __scc__ if(glbl_core_data->urpc_frame_base != 0) { char coreidchar[10]; snprintf(coreidchar, sizeof(coreidchar), "%d", glbl_core_data->src_core_id); argv[argc++] = coreidchar; char chan_id_char[30]; snprintf(chan_id_char, sizeof(chan_id_char), "chanid=%"PRIu32, glbl_core_data->chan_id); argv[argc++] = chan_id_char; char urpc_frame_base_char[30]; snprintf(urpc_frame_base_char, sizeof(urpc_frame_base_char), "frame=%" PRIuGENPADDR, glbl_core_data->urpc_frame_base); argv[argc++] = urpc_frame_base_char; } #endif struct dcb *init_dcb = spawn_init_common(&spawn_state, name, argc, argv, bootinfo_phys, alloc_phys); /* Map bootinfo R/W into VSpace at vaddr 0x200000 (BOOTINFO_BASE) */ #ifdef CONFIG_PAE paging_x86_32_map_pdpte(&init_pdpte[0], mem_to_local_phys((lvaddr_t)init_pdir)); paging_x86_32_map_table(&init_pdir[1], mem_to_local_phys((lvaddr_t)init_ptable)); for (int i = 0; i < BOOTINFO_SIZE / BASE_PAGE_SIZE; i++) { paging_x86_32_map(&init_ptable[i], bootinfo_phys + i * BASE_PAGE_SIZE, INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R|PF_W)); } #else paging_x86_32_map_table(&init_pdir[0], mem_to_local_phys((lvaddr_t)init_ptable)); for (int i = 0; i < BOOTINFO_SIZE / BASE_PAGE_SIZE; i++) { paging_x86_32_map(&init_ptable[i + 512], bootinfo_phys + i * BASE_PAGE_SIZE, INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R|PF_W)); } #endif /* Load init ELF32 binary */ struct multiboot_modinfo *module = multiboot_find_module(name); if (module == NULL) { panic("Could not find init module!"); } genvaddr_t init_ep; err = elf_load(EM_386, startup_alloc_init, &spawn_state, local_phys_to_mem(module->mod_start), MULTIBOOT_MODULE_SIZE(*module), &init_ep); if (err_is_fail(err)) { //err_print_calltrace(err); panic("ELF load of init module failed!"); } struct dispatcher_shared_x86_32 *init_disp_x86_32 = get_dispatcher_shared_x86_32(init_dcb->disp); init_disp_x86_32->disabled_save_area.eip = init_ep; /* Create caps for init to use */ create_module_caps(&spawn_state); lpaddr_t init_alloc_end = alloc_phys(0); // XXX create_phys_caps(init_alloc_end); /* Fill bootinfo struct */ bootinfo->mem_spawn_core = NEEDED_KERNEL_SPACE; // Size of kernel /* for (int i = 0; i < bootinfo->regions_length; i++) { */ /* printf("%d region %d: 0x%09" PRIxPTR " - 0x%09lx (%lu MB, %u bits)\n", */ /* bootinfo->regions[i].mr_type, i, bootinfo->regions[i].mr_base, */ /* bootinfo->regions[i].mr_base + (1UL<<bootinfo->regions[i].mr_bits), */ /* bootinfo->regions[i].mr_bits >= 20 */ /* ? 1UL << (bootinfo->regions[i].mr_bits - 20) : 0, */ /* bootinfo->regions[i].mr_bits); */ /* } */ #if 0 // If app core, map (static) URPC channel if(kernel_scckernel != 0) { printf("SCC app kernel, frame at: 0x%x\n", kernel_scckernel); #define TASKCN_SLOT_MON_URPC (TASKCN_SLOTS_USER+6) ///< Frame cap for urpc comm. err = caps_create_new(ObjType_Frame, kernel_scckernel, 13, 13, caps_locate_slot(CNODE(taskcn), TASKCN_SLOT_MON_URPC)); assert(err_is_ok(err)); } #endif return init_dcb; }
static struct dcb *spawn_init_common(struct spawn_state *st, const char *name, int argc, const char *argv[], lpaddr_t bootinfo_phys, alloc_phys_func alloc_phys) { errval_t err; /* Perform arch-independent spawn */ lvaddr_t paramaddr; struct dcb *init_dcb = spawn_module(st, name, argc, argv, bootinfo_phys, ARGS_BASE, alloc_phys, ¶maddr); /* Init page tables */ init_page_tables(st, alloc_phys); /* Map dispatcher R/W into VSpace starting at vaddr 0x204000 * (Starting after Bootinfo pages)*/ #ifdef CONFIG_PAE paging_x86_32_map_pdpte(&init_pdpte[X86_32_PDPTE_BASE(DISPATCHER_BASE)], mem_to_local_phys((lvaddr_t)init_pdir)); #endif paging_x86_32_map_table(&init_pdir[X86_32_PDIR_BASE(DISPATCHER_BASE)], mem_to_local_phys((lvaddr_t)init_ptable)); for (int i = 0; i < DISPATCHER_SIZE / BASE_PAGE_SIZE; i++) { paging_x86_32_map(&init_ptable[X86_32_PTABLE_BASE(DISPATCHER_BASE) + i], mem_to_local_phys(init_dcb->disp) + i * BASE_PAGE_SIZE, INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R | PF_W)); } struct dispatcher_shared_generic *init_disp = get_dispatcher_shared_generic(init_dcb->disp); struct dispatcher_shared_x86_32 *init_disp_x86_32 = get_dispatcher_shared_x86_32(init_dcb->disp); registers_set_param(&init_disp_x86_32->enabled_save_area, paramaddr); // Map IO cap in task cnode struct cte *iocap = caps_locate_slot(CNODE(st->taskcn), TASKCN_SLOT_IO); err = caps_create_new(ObjType_IO, 0, 0, 0, my_core_id, iocap); assert(err_is_ok(err)); /* Set fields in DCB */ // Set Vspace #ifdef CONFIG_PAE init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_pdpte); #else init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_pdir); #endif /* Initialize dispatcher */ init_disp->disabled = true; strncpy(init_disp->name, argv[0], DISP_NAME_LEN); /* tell init the vspace addr of its dispatcher */ init_disp->udisp = DISPATCHER_BASE; init_disp_x86_32->disabled_save_area.edi = DISPATCHER_BASE; init_disp_x86_32->disabled_save_area.fs = 0; init_disp_x86_32->disabled_save_area.gs = 0; init_disp_x86_32->disabled_save_area.cs = USER_CS; init_disp_x86_32->disabled_save_area.ss = USER_SS; init_disp_x86_32->disabled_save_area.eflags = USER_EFLAGS; return init_dcb; }
static void init_page_tables(struct spawn_state *st, alloc_phys_func alloc_phys) { /* Allocate memory for init's page tables */ #ifdef CONFIG_PAE init_pdpte = (void *)local_phys_to_mem(alloc_phys(X86_32_PDPTE_SIZE * sizeof(union x86_32_pdpte_entry))); #endif init_pdir = (void *)local_phys_to_mem( alloc_phys(X86_32_PTABLE_SIZE * INIT_PDIR_SIZE * sizeof(union x86_32_pdir_entry))); init_ptable = (void *)local_phys_to_mem( alloc_phys(X86_32_PTABLE_SIZE * INIT_PDIR_SIZE * INIT_PTABLE_SIZE * sizeof(union x86_32_ptable_entry))); /* Page table setup */ /* Initialize init page tables */ for(size_t j = 0; j < INIT_PDIR_SIZE; j++) { paging_x86_32_clear_pdir(&init_pdir[j]); for(size_t k = 0; k < INIT_PTABLE_SIZE; k++) { paging_x86_32_clear_ptable(&init_ptable[j * X86_32_PTABLE_SIZE + k]); } } /* Map pagetables into pageCN */ int pagecn_pagemap = 0; #ifdef CONFIG_PAE // Map PDPTE into first slot in pagecn caps_create_new(ObjType_VNode_x86_32_pdpt, mem_to_local_phys((lvaddr_t)init_pdpte), BASE_PAGE_BITS, 0, my_core_id, caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++)); #endif // Map PDIR into successive slots in pagecn for(size_t i = 0; i < INIT_PDIR_SIZE; i++) { caps_create_new(ObjType_VNode_x86_32_pdir, mem_to_local_phys((lvaddr_t)init_pdir) + i * BASE_PAGE_SIZE, BASE_PAGE_BITS, 0, my_core_id, caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++)); } // Map page tables into successive slots in pagecn for(size_t i = 0; i < INIT_PTABLE_SIZE; i++) { caps_create_new(ObjType_VNode_x86_32_ptable, mem_to_local_phys((lvaddr_t)init_ptable) + i * BASE_PAGE_SIZE, BASE_PAGE_BITS, 0, my_core_id, caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++)); } // Connect all page tables to page directories. // init's memory manager expects page tables within the pagecn to // already be connected to the corresponding directories. To avoid // unneccessary special cases, we connect them here. for(lvaddr_t vaddr = 0; vaddr < X86_32_INIT_SPACE_LIMIT; vaddr += BASE_PAGE_SIZE) { #ifdef CONFIG_PAE union x86_32_pdpte_entry *pdpte_base = &init_pdpte[X86_32_PDPTE_BASE(vaddr)]; union x86_32_pdir_entry *pdir_base = &init_pdir[X86_32_PDPTE_BASE(vaddr) * X86_32_PTABLE_SIZE + X86_32_PDIR_BASE(vaddr)]; union x86_32_ptable_entry *ptable_base = &init_ptable[X86_32_PDPTE_BASE(vaddr) * X86_32_PTABLE_SIZE * X86_32_PTABLE_SIZE + X86_32_PDIR_BASE(vaddr) * X86_32_PTABLE_SIZE + X86_32_PTABLE_BASE(vaddr)]; paging_x86_32_map_pdpte(pdpte_base, mem_to_local_phys((lvaddr_t)pdir_base)); #else union x86_32_pdir_entry *pdir_base = &init_pdir[X86_32_PDIR_BASE(vaddr)]; union x86_32_ptable_entry *ptable_base = &init_ptable[X86_32_PDIR_BASE(vaddr) * X86_32_PTABLE_SIZE + X86_32_PTABLE_BASE(vaddr)]; #endif paging_x86_32_map_table(pdir_base, mem_to_local_phys((lvaddr_t)ptable_base)); } /* Switch to init's VSpace */ #ifdef CONFIG_PAE paging_x86_32_context_switch(mem_to_local_phys((lvaddr_t)init_pdpte)); #else paging_x86_32_context_switch(mem_to_local_phys((lvaddr_t)init_pdir)); #endif /***** VSpace available *****/ /* Map cmdline args R/W into VSpace at ARGS_BASE */ #ifdef CONFIG_PAE paging_x86_32_map_pdpte(&init_pdpte[X86_32_PDPTE_BASE(ARGS_BASE)], mem_to_local_phys((lvaddr_t)init_pdir)); #endif paging_x86_32_map_table(&init_pdir[X86_32_PDIR_BASE(ARGS_BASE)], mem_to_local_phys((lvaddr_t)init_ptable)); for (int i = 0; i < ARGS_SIZE / BASE_PAGE_SIZE; i++) { paging_x86_32_map(&init_ptable[X86_32_PTABLE_BASE(ARGS_BASE) + i], st->args_page + i * BASE_PAGE_SIZE, INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R | PF_W)); } }