/* * Load an ELF file into physical memory at the given physical address. * * Return the byte past the last byte of the physical address used. */ static paddr_t load_elf(const char *name, void *elf, paddr_t dest_paddr, struct image_info *info) { uint64_t min_vaddr, max_vaddr; size_t image_size; /* Fetch image info. */ elf_getMemoryBounds(elf, 0, &min_vaddr, &max_vaddr); max_vaddr = ROUND_UP(max_vaddr, PAGE_BITS); image_size = (size_t)(max_vaddr - min_vaddr); /* Ensure our starting physical address is aligned. */ if (!IS_ALIGNED(dest_paddr, PAGE_BITS)) { printf("Attempting to load ELF at unaligned physical address!\n"); abort(); } /* Ensure that the ELF file itself is 4-byte aligned in memory, so that * libelf can perform word accesses on it. */ if (!IS_ALIGNED(dest_paddr, 2)) { printf("Input ELF file not 4-byte aligned in memory!\n"); abort(); } /* Print diagnostics. */ printf("ELF-loading image '%s'\n", name); printf(" paddr=[%lx..%lx]\n", dest_paddr, dest_paddr + image_size - 1); printf(" vaddr=[%lx..%lx]\n", (vaddr_t)min_vaddr, (vaddr_t)max_vaddr - 1); printf(" virt_entry=%lx\n", (vaddr_t)elf_getEntryPoint(elf)); /* Ensure the ELF file is valid. */ if (elf_checkFile(elf) != 0) { printf("Attempting to load invalid ELF file '%s'.\n", name); abort(); } /* Ensure sane alignment of the image. */ if (!IS_ALIGNED(min_vaddr, PAGE_BITS)) { printf("Start of image '%s' is not 4K-aligned!\n", name); abort(); } /* Ensure that we region we want to write to is sane. */ ensure_phys_range_valid(dest_paddr, dest_paddr + image_size); /* Copy the data. */ unpack_elf_to_paddr(elf, dest_paddr); /* Record information about the placement of the image. */ info->phys_region_start = dest_paddr; info->phys_region_end = dest_paddr + image_size; info->virt_region_start = (vaddr_t)min_vaddr; info->virt_region_end = (vaddr_t)max_vaddr; info->virt_entry = (vaddr_t)elf_getEntryPoint(elf); info->phys_virt_offset = dest_paddr - (vaddr_t)min_vaddr; /* Return address of next free physical frame. */ return ROUND_UP(dest_paddr + image_size, PAGE_BITS); }
/* * This function loads the entire elf file into the phsical frames and * maps fpages corresponding to virtual address in elf file to the process */ int load_code_segment_virtual(char *elfFile,L4_ThreadId_t new_tid) { uint32_t min[2]; uint32_t max[2]; elf_getMemoryBounds(elfFile, 0, (uint64_t*)min, (uint64_t*)max); //Now we need to reserve memory between min and max L4_Word_t lower_address = ((L4_Word_t) min[1] / PAGESIZE) * PAGESIZE; L4_Word_t upper_address = ((L4_Word_t) max[1] / PAGESIZE) * PAGESIZE; while(lower_address <= upper_address) { L4_Word_t frame = frame_alloc(); if(!frame) { //Oops out of frames unmap_process(new_tid); return -1; } else { L4_Fpage_t targetpage = L4_FpageLog2(lower_address,12); lower_address += PAGESIZE; //Now map fpage L4_Set_Rights(&targetpage,L4_FullyAccessible); L4_PhysDesc_t phys = L4_PhysDesc(frame, L4_DefaultMemory); //Map the frame to root task but enter entries in pagetable with tid since we will update the mappings once elf loading is done if (L4_MapFpage(L4_Myself(), targetpage, phys) ) { page_table[(frame-new_low)/PAGESIZE].tid = new_tid; page_table[(frame-new_low)/PAGESIZE].pinned = 1; page_table[(frame-new_low)/PAGESIZE].pageNo = targetpage; } else { unmap_process(new_tid); } } } //Now we have mapped the pages, now load elf_file should work with the virtual addresses if(elf_loadFile(elfFile,0) == 1) { //Elffile was successfully loaded //Map the fpages which were previously mapped to Myself to the tid for(int i=0;i<numPTE;i++) { if(L4_ThreadNo(new_tid) == L4_ThreadNo(page_table[i].tid)) { //Now remap the pages which were mapped to root task to the new tid L4_UnmapFpage(L4_Myself(),page_table[i].pageNo); L4_PhysDesc_t phys = L4_PhysDesc(new_low + i * PAGESIZE, L4_DefaultMemory); if(!L4_MapFpage(new_tid, page_table[i].pageNo, phys)) { unmap_process(new_tid); return -1; } } } } else { unmap_process(new_tid); } //Remove later L4_CacheFlushAll(); return 0; }
/* * ELF-loader for ARM systems. * * We are currently running out of physical memory, with an ELF file for the * kernel and one or more ELF files for the userspace image. (Typically there * will only be one userspace ELF file, though if we are running a multi-core * CPU, we may have multiple userspace images; one per CPU.) These ELF files * are packed into an 'ar' archive. * * The kernel ELF file indicates what physical address it wants to be loaded * at, while userspace images run out of virtual memory, so don't have any * requirements about where they are located. We place the kernel at its * desired location, and then load userspace images straight after it in * physical memory. * * Several things could possibly go wrong: * * 1. The physical load address of the kernel might want to overwrite this * ELF-loader; * * 2. The physical load addresses of the kernel might not actually be in * physical memory; * * 3. Userspace images may not fit in physical memory, or may try to overlap * the ELF-loader. * * We attempt to check for some of these, but some may go unnoticed. */ void load_images(struct image_info *kernel_info, struct image_info *user_info, int max_user_images, int *num_images) { int i; uint64_t kernel_phys_start, kernel_phys_end; paddr_t next_phys_addr; const char *elf_filename; unsigned long unused; /* Load kernel. */ void *kernel_elf = cpio_get_file(_archive_start, "kernel.elf", &unused); if (kernel_elf == NULL) { printf("No kernel image present in archive!\n"); abort(); } if (elf_checkFile(kernel_elf)) { printf("Kernel image not a valid ELF file!\n"); abort(); } elf_getMemoryBounds(kernel_elf, 1, &kernel_phys_start, &kernel_phys_end); next_phys_addr = load_elf("kernel", kernel_elf, (paddr_t)kernel_phys_start, kernel_info); /* * Load userspace images. * * We assume (and check) that the kernel is the first file in the archive, * and then load the (n+1)'th file in the archive onto the (n)'th CPU. */ (void)cpio_get_entry(_archive_start, 0, &elf_filename, &unused); if (strcmp(elf_filename, "kernel.elf") != 0) { printf("Kernel image not first image in archive.\n"); abort(); } *num_images = 0; for (i = 0; i < max_user_images; i++) { /* Fetch info about the next ELF file in the archive. */ void *user_elf = cpio_get_entry(_archive_start, i + 1, &elf_filename, &unused); if (user_elf == NULL) { break; } /* Load the file into memory. */ next_phys_addr = load_elf(elf_filename, user_elf, next_phys_addr, &user_info[*num_images]); *num_images = i + 1; } }
/* * Unpack an ELF file to the given physical address. */ static void unpack_elf_to_paddr(void *elf, paddr_t dest_paddr) { uint16_t i; uint64_t min_vaddr, max_vaddr; size_t image_size; word_t phys_virt_offset; /* Get size of the image. */ elf_getMemoryBounds(elf, 0, &min_vaddr, &max_vaddr); image_size = (size_t)(max_vaddr - min_vaddr); phys_virt_offset = dest_paddr - (paddr_t)min_vaddr; /* Zero out all memory in the region, as the ELF file may be sparse. */ memset((char *)dest_paddr, 0, image_size); /* Load each segment in the ELF file. */ for (i = 0; i < elf_getNumProgramHeaders(elf); i++) { vaddr_t dest_vaddr; size_t data_size, data_offset; /* Skip segments that are not marked as being loadable. */ if (elf_getProgramHeaderType(elf, i) != PT_LOAD) { continue; } /* Parse size/length headers. */ dest_vaddr = elf_getProgramHeaderVaddr(elf, i); data_size = elf_getProgramHeaderFileSize(elf, i); data_offset = elf_getProgramHeaderOffset(elf, i); /* Load data into memory. */ memcpy((char *)dest_vaddr + phys_virt_offset, (char *)elf + data_offset, data_size); } }
BOOT_CODE static paddr_t load_boot_module(multiboot_module_t* boot_module, paddr_t load_paddr) { v_region_t v_reg; word_t entry; Elf_Header_t* elf_file = (Elf_Header_t*)(word_t)boot_module->start; if (!elf_checkFile(elf_file)) { printf("Boot module does not contain a valid ELF image\n"); return 0; } v_reg = elf_getMemoryBounds(elf_file); entry = elf_file->e_entry; if (v_reg.end == 0) { printf("ELF image in boot module does not contain any segments\n"); return 0; } v_reg.end = ROUND_UP(v_reg.end, PAGE_BITS); printf("size=0x%lx v_entry=%p v_start=%p v_end=%p ", v_reg.end - v_reg.start, (void*)entry, (void*)v_reg.start, (void*)v_reg.end ); if (!IS_ALIGNED(v_reg.start, PAGE_BITS)) { printf("Userland image virtual start address must be 4KB-aligned\n"); return 0; } if (v_reg.end + 2 * BIT(PAGE_BITS) > PPTR_USER_TOP) { /* for IPC buffer frame and bootinfo frame, need 2*4K of additional userland virtual memory */ printf("Userland image virtual end address too high\n"); return 0; } if ((entry < v_reg.start) || (entry >= v_reg.end)) { printf("Userland image entry point does not lie within userland image\n"); return 0; } load_paddr = find_load_paddr(load_paddr, v_reg.end - v_reg.start); assert(load_paddr); /* fill ui_info struct */ boot_state.ui_info.pv_offset = load_paddr - v_reg.start; boot_state.ui_info.p_reg.start = load_paddr; load_paddr += v_reg.end - v_reg.start; boot_state.ui_info.p_reg.end = load_paddr; boot_state.ui_info.v_entry = entry; printf("p_start=0x%lx p_end=0x%lx\n", boot_state.ui_info.p_reg.start, boot_state.ui_info.p_reg.end ); if (!module_paddr_region_valid( boot_state.ui_info.p_reg.start, boot_state.ui_info.p_reg.end)) { printf("End of loaded userland image lies outside of usable physical memory\n"); return 0; } /* initialise all initial userland memory and load potentially sparse ELF image */ memzero( (void*)boot_state.ui_info.p_reg.start, boot_state.ui_info.p_reg.end - boot_state.ui_info.p_reg.start ); elf_load(elf_file, boot_state.ui_info.pv_offset); return load_paddr; }