void coldboot_init(coldboot_crt0_reloc_list_t *reloc_list, uintptr_t start_cold) { //MAILBOX_NX_SECMON_BOOT_TIME = TIMERUS_CNTR_1US_0; /* Custom approach */ reloc_list->reloc_base = start_cold; /* TODO: Set NX BOOTLOADER clock time field */ /* This at least copies .warm_crt0 to its VMA. */ for(size_t i = 0; i < reloc_list->nb_relocs_pre_mmu_init; i++) { do_relocation(reloc_list, i); } /* At this point, we can (and will) access functions located in .warm_crt0 */ /* From https://events.static.linuxfound.org/sites/events/files/slides/slides_17.pdf : Caches may write back dirty lines at any time: - To make space for new allocations - Even if MMU is off - Even if Cacheable accesses are disabled (caches are never 'off') It should be fine to clear that here and not before. */ flush_dcache_all(); invalidate_icache_all(); /* Set target firmware. */ g_exosphere_target_firmware_for_init = exosphere_get_target_firmware_for_init(); /* Initialize DMA controllers, and write to AHB_GIZMO_TZRAM. */ /* TZRAM accesses should work normally after this point. */ init_dma_controllers(g_exosphere_target_firmware_for_init); configure_ttbls(); set_memory_registers_enable_mmu(); /* Copy or clear the remaining sections */ for(size_t i = 0; i < reloc_list->nb_relocs_post_mmu_init; i++) { do_relocation(reloc_list, reloc_list->nb_relocs_pre_mmu_init + i); } flush_dcache_all(); invalidate_icache_all(); /* At this point we can access all the mapped segments (all other functions, data...) normally */ }
/* export { */ struct module *module_load(const char *filename, getsym_t getsym_fun, void *getsym_arg) { FILE* elf_file = NULL; Elf32_Shdr *section_headers = NULL; struct module *mod = NULL; TRY_PTR(mod = malloc(sizeof(struct module))); module_init(mod); TRY_PTR(elf_file = fopen(filename, "rb")); Elf32_Ehdr elf_header; TRY_TRUE(fread(&elf_header, sizeof(Elf32_Ehdr), 1, elf_file) == 1); TRY_TRUE(elf_header.e_ident[EI_MAG0] == ELFMAG0 && elf_header.e_ident[EI_MAG1] == ELFMAG1 && elf_header.e_ident[EI_MAG2] == ELFMAG2 && elf_header.e_ident[EI_MAG3] == ELFMAG3); TRY_TRUE(elf_header.e_ident[EI_CLASS] == ELFCLASS32); TRY_TRUE(elf_header.e_ident[EI_DATA] == ELFDATA2LSB); TRY_TRUE(elf_header.e_type == ET_REL); TRY_TRUE(elf_header.e_machine == EM_386); TRY_PTR(elf_header.e_shoff); TRY_TRUE(fseek(elf_file, elf_header.e_shoff, SEEK_SET) == 0); TRY_TRUE(elf_header.e_shentsize == sizeof(Elf32_Shdr)); // If the number of sections is greater than or equal to SHN_LORESERVE // (0xff00), e_shnum has the value SHN_UNDEF (0) and the actual number of // section header table entries is contained in the sh_size field of the // section header at index 0 // We do not handle this extension TRY_TRUE(elf_header.e_shnum != SHN_UNDEF); TRY_TRUE(elf_header.e_shnum < SHN_LORESERVE); TRY_PTR(section_headers = malloc(sizeof(Elf32_Shdr) * elf_header.e_shnum)); TRY_TRUE(fread(section_headers, elf_header.e_shentsize, elf_header.e_shnum, elf_file) == elf_header.e_shnum); // Count and create sections mod->sections_sz = elf_header.e_shnum; TRY_PTR(mod->sections = malloc(sizeof(struct section) * mod->sections_sz)); memset(mod->sections, 0, sizeof(struct section) * mod->sections_sz); // Not actually first global but we will treat all of them as global size_t global_sym_idx = 0; size_t symtab_idx = 0; // Load sections for (size_t idx = 0; idx < elf_header.e_shnum; idx++) { Elf32_Shdr *shdr = section_headers + idx; struct section *section; TRY_PTR(section = module_get_section(mod, idx)); switch (shdr->sh_type) { case SHT_NULL: case SHT_STRTAB: // We will read appropriate strings table with symbols table case SHT_REL: // We will perform relocations later on break; case SHT_SYMTAB: TRY_TRUE(symtab_idx == 0); symtab_idx = idx; TRY_SYS(module_read_symbols(mod, shdr, elf_file)); global_sym_idx = shdr->sh_info; // Field sh_link contains section header index of associated string table TRY_TRUE(IS_VALID_SHNDX(shdr->sh_link) && shdr->sh_link < elf_header.e_shnum); TRY_SYS(module_read_strings(mod, section_headers + shdr->sh_link, elf_file)); break; case SHT_NOBITS: if ((shdr->sh_flags & SHF_ALLOC) && shdr->sh_size > 0) { TRY_SYS(section_alloc(section, shdr)); memset((void *) section->addr, 0, shdr->sh_size); } break; case SHT_PROGBITS: default: if ((shdr->sh_flags & SHF_ALLOC) && shdr->sh_size > 0) { TRY_SYS(section_alloc(section, shdr)); TRY_TRUE(fseek(elf_file, shdr->sh_offset, SEEK_SET) == 0); TRY_TRUE(fread((void *) section->addr, shdr->sh_size, 1, elf_file) == 1); } break; } } // An empty string table section is permitted. // TRY_PTR(mod->strings); TRY_PTR(mod->symbols); // Perform relocations for (size_t idx = 0; idx < elf_header.e_shnum; idx++) { Elf32_Shdr *shdr = section_headers + idx; if (shdr->sh_type == SHT_REL && shdr->sh_link == symtab_idx) { // mising (shdr->sh_flags & SHF_INFO_LINK) TRY_TRUE(fseek(elf_file, shdr->sh_offset, SEEK_SET) == 0); TRY_TRUE(sizeof(Elf32_Rel) == shdr->sh_entsize); size_t rel_num = shdr->sh_size / shdr->sh_entsize; TRY_TRUE(shdr->sh_size == rel_num * shdr->sh_entsize); struct section *dest_section; TRY_PTR(dest_section = module_get_section(mod, shdr->sh_info)); if (section_is_alloc(dest_section)) { for (size_t idx = 0; idx < rel_num; idx++) { Elf32_Rel relocation; TRY_TRUE(fread(&relocation, sizeof(Elf32_Rel), 1, elf_file) == 1); TRY_SYS(do_relocation(mod, dest_section, &relocation, getsym_fun, getsym_arg)); } } } } // Compress symbol table (remove local symbols after relocations) TRY_TRUE(global_sym_idx < mod->symbols_sz); mod->symbols_sz -= global_sym_idx; memmove(mod->symbols, mod->symbols + global_sym_idx, mod->symbols_sz * sizeof(symbol_t)); TRY_PTR(mod->symbols = realloc(mod->symbols, mod->symbols_sz * sizeof(symbol_t))); // Set-up sections protection for (size_t idx = 0; idx < elf_header.e_shnum; idx++) { struct section *section; TRY_PTR(section = module_get_section(mod, idx)); if (section_is_alloc(section)) { TRY_SYS(mprotect((void *) section->mmap_start, section->mmap_length, section->mmap_prot)); } } free(section_headers); fclose(elf_file); return mod; CATCH: free(section_headers); if (elf_file) { fclose(elf_file); } module_unload(mod); return NULL; }