static uint32_t contract(uint32_t new_size, heap_t *heap) { // Sanity check. ASSERT(new_size < heap->end_address-heap->start_address); // Get the nearest following page boundary. if(new_size & 0x1000) { new_size &= 0x1000; new_size += 0x1000; } // Don't contract too far! if(new_size < HEAP_MIN_SIZE) { new_size = HEAP_MIN_SIZE; } uint32_t old_size = heap->end_address-heap->start_address; uint32_t i = old_size - 0x1000; while(new_size < i) { free_frame(paging_get_page(heap->start_address+i, 0, kernel_directory)); i -= 0x1000; pages_wired--; } heap->end_address = heap->start_address + new_size; return new_size; }
static void expand(uint32_t new_size, heap_t *heap, int size) { // Sanity check. ASSERT(new_size > heap->end_address - heap->start_address); // Get the nearest following page boundary. if((new_size & 0xFFFFF000) != 0) { new_size &= 0xFFFFF000; new_size += 0x1000; } // Make sure we are not overreaching ourselves. ASSERT(heap->start_address+new_size <= heap->max_address); // This should always be on a page boundary. uint32_t old_size = heap->end_address-heap->start_address; // Expand until there is enough pages mapped uint32_t i = old_size; // new_size += 0x1000; while(i < new_size) { alloc_frame(paging_get_page(heap->start_address+i, true, kernel_directory), (heap->supervisor) ? true : false, true); i += 0x1000; pages_wired++; } // kprintf("Expanding heap from 0x%X until 0x%X\n", heap->end_address, heap->start_address + new_size); heap->end_address = heap->start_address+new_size; }
uint32_t kmalloc_int(uint32_t sz, bool align, uint32_t *phys) { if(kheap != 0) { // if we have a kernel heap, use that void *addr = alloc(sz, align, kheap); if(phys != 0) { page_t *page = paging_get_page((uint32_t) addr, false, kernel_directory); uint32_t physAddr = ((page->frame * 0x1000) + ((uint32_t) addr & 0xFFF)); *phys = physAddr; } return (uint32_t) addr; } else { // otherwise, use the 'dumb' allocation if(align == 1 && (kheap_placement_address & 0xFFFFF000)) { // Align the placement address; kheap_placement_address &= 0xFFFFF000; kheap_placement_address += 0x1000; } if(phys) { *phys = kheap_placement_address; } uint32_t tmp = kheap_placement_address; kheap_placement_address += sz; return tmp; } }
/* * Initialises paging and sets up page tables. */ void paging_init() { kheap = NULL; unsigned int i = 0; // Hopefully the bootloader got the correct himem size uint32_t mem_end_page = sys_multiboot_info->mem_upper * 1024; nframes = mem_end_page / 0x1000; pages_total = nframes; frames = (uint32_t *) kmalloc(INDEX_FROM_BIT(nframes)); memclr(frames, INDEX_FROM_BIT(nframes)); // Allocate mem for a page directory. kernel_directory = (page_directory_t *) kmalloc_a(sizeof(page_directory_t)); ASSERT(kernel_directory != NULL); memclr(kernel_directory, sizeof(page_directory_t)); current_directory = kernel_directory; // Map some pages in the kernel heap area. // Here we call get_page but not alloc_frame. This causes page_table_t's // to be created where necessary. We can't allocate frames yet because they // they need to be identity mapped first below, and yet we can't increase // placement_address between identity mapping and enabling the heap for(i = KHEAP_START; i < KHEAP_START+KHEAP_INITIAL_SIZE; i += 0x1000) { page_t* page = paging_get_page(i, true, kernel_directory); memclr(page, sizeof(page_t)); page->rw = 1; page->user = 0; } // This step serves to map the kernel itself // We don't allocate frames here, as that's done below. for(i = 0xC0000000; i < 0xC7FFF000; i += 0x1000) { page_t* page = paging_get_page(i, true, kernel_directory); memclr(page, sizeof(page_t)); page->present = 1; page->rw = 1; page->user = 0; page->frame = ((i & 0x0FFFF000) >> 12); } // Allocate enough memory past the kernel heap so we can use the 'smart' allocator. // Note that this actually performs identity mapping. i = 0x00000000; while(i < (kheap_placement_address & 0x0FFFFFFF) + 0x1000) { alloc_frame(paging_get_page(i, true, kernel_directory), true, true); if(i < (uint32_t) &__kern_size) { pages_wired++; } i += 0x1000; } // Allocate kernel heap pages. for(i = KHEAP_START; i < KHEAP_START+KHEAP_INITIAL_SIZE; i += 0x1000) { alloc_frame(paging_get_page(i, false, kernel_directory), true, true); } // Set page fault handler // sys_set_idt_gate(14, (uint32_t) isr14, 0x08, 0x8E); // Convert kernel directory address to physical and save it kern_dir_phys = (uint32_t) &kernel_directory->tablesPhysical; kern_dir_phys -= 0xC0000000; kernel_directory->physicalAddr = kern_dir_phys; // Enable paging paging_switch_directory(kernel_directory); // Initialise a kernel heap kheap = create_heap(KHEAP_START, KHEAP_START+KHEAP_INITIAL_SIZE, 0xCFFFF000, true, true); }
/* * Links the module into the kernel. */ void module_load(void *elf, char *moduleName) { elf_header_t *header = (elf_header_t *) elf; elf_section_entry_t *sections = elf + header->sh_offset; // Verify header if(ELF_CHECK_MAGIC(header->ident.magic)) { KERROR("Module '%s' has invalid ELF magic of 0x%X%X%X%X\n", moduleName, header->ident.magic[0], header->ident.magic[1], header->ident.magic[2], header->ident.magic[3]); goto nextModule; } // Variables used for mapping of sections unsigned int progbits_start = 0, progbits_size = 0, progbits_offset = 0, progbits_size_raw = 0; unsigned int nobits_start = 0, nobits_size = 0; // Symbol table elf_symbol_entry_t *symtab = NULL; unsigned int symtab_entries = 0; // String and section string tables char *strtab = NULL; char *shstrtab = NULL; elf_section_entry_t *shstrtab_sec = §ions[header->sh_str_index]; shstrtab = elf + shstrtab_sec->sh_offset; // Relocation table(s) unsigned int currentRtab = 0; struct { elf_program_relocation_t *rtab; unsigned int rtab_entries; } rtabs[16]; // Read the section table for(unsigned int s = 0; s < header->sh_entry_count; s++) { elf_section_entry_t *section = §ions[s]; char *section_name = shstrtab + section->sh_name; // Does this section have physical memory associated with it? if(section->sh_type == SHT_PROGBITS) { // Ignore .eh_frame section if(strcmp(".eh_frame", section_name)) { progbits_size += section->sh_size; if(!progbits_offset) { progbits_offset = section->sh_offset; } } } else if(section->sh_type == SHT_NOBITS) { // NOBITS? nobits_size += section->sh_size; // Ensure consecutive NOBITS sections are properly handled if(!nobits_start) { nobits_start = section->sh_addr; } } else if(section->sh_type == SHT_REL) { // relocation // Ignore .eh_frame section if(strcmp(".rel.eh_frame", section_name)) { rtabs[currentRtab].rtab = elf + section->sh_offset; rtabs[currentRtab++].rtab_entries = section->sh_size / sizeof(elf_program_relocation_t); } } else if(section->sh_type == SHT_SYMTAB) { // symbol table symtab = elf + section->sh_offset; symtab_entries = section->sh_size / sizeof(elf_symbol_entry_t); } else if(section->sh_type == SHT_STRTAB) { // string table if((elf + section->sh_offset) != shstrtab) { strtab = elf + section->sh_offset; } } } // Sane-ify section addresses and sizes progbits_size_raw = progbits_size; progbits_size += 0x1000; progbits_size &= 0xFFFFF000; progbits_start = module_placement_addr; // Traverse symbol table to find "module_entry" and "compiler" unsigned int init_addr = 0; char *compilerInfo = NULL, *supportedKernel = NULL; bool entry_found = false; for(unsigned int s = 0; s < symtab_entries; s++) { elf_symbol_entry_t *symbol = &symtab[s]; // Look for some symbols if(symbol->st_info & STT_OBJECT) { char *name = strtab + symbol->st_name; // Note how sh_offset is used, as we read out of the loaded elf if(!strcmp(name, "compiler")) { elf_section_entry_t *section = §ions[symbol->st_shndx]; compilerInfo = elf + section->sh_offset + symbol->st_address; } else if(!strcmp(name, "supported_kernel")) { elf_section_entry_t *section = §ions[symbol->st_shndx]; supportedKernel = elf + section->sh_offset + symbol->st_address; } } } // Check if we're using compatible compiler versions if(strcmp(compilerInfo, KERNEL_COMPILER)) { if(!hal_config_get_bool("module_ignore_compiler")) { KERROR("'%s' has incompatible compiler of '%s', expected '%s'", moduleName, compilerInfo, KERNEL_COMPILER); goto nextModule; } else { KWARNING("'%s' has incompatible compiler of '%s', but loading anyways", moduleName, compilerInfo); } } // Check if the module is for this kernel version if(strcmp(supportedKernel, KERNEL_VERSION)) { if(!hal_config_get_bool("module_ignore_version")) { KERROR("'%s' requires TSOS version '%s', but kernel is '%s'", moduleName, supportedKernel, KERNEL_VERSION); goto nextModule; } else { KERROR("'%s' requires TSOS version '%s', but kernel is '%s', loading anyways", moduleName, supportedKernel, KERNEL_VERSION); } } /* * To determine the entry function to initialise this module, we * depend on the ELF file's entry header field to have the correct * value. This means that the module's entry point MUST be extern C, * and be named "start". */ unsigned int init_function_addr = progbits_start + header->entry; /* * In order for the module to be able to properly call into kernel * functions, relocation needs to be performed so it has the proper * addresses for kernel functions. * * To do this, the relocation table is searched for entries whose * type is R_386_PC32, and who are marked as "undefined" in the * symbol table. * * If all of the above conditions are met for a symbol, it's looked * up in the kernel symbol table. If this lookup fails, module * loading is aborted, as the module may crash later in hard to * debug ways if a function is simply left unrelocated. */ for(unsigned int u = 0; u < currentRtab; u++) { elf_program_relocation_t *rtab = rtabs[u].rtab; unsigned int rtab_entries = rtabs[u].rtab_entries; // Perform relocation for this rtab. for(unsigned int r = 0; r < rtab_entries; r++) { elf_program_relocation_t *ent = &rtab[r]; unsigned int symtab_index = ELF32_R_SYM(ent->r_info); // Function call relocations? if(ELF32_R_TYPE(ent->r_info) == R_386_PC32) { /* * The ELF spec says that R_386_PC32 relocation entries must * add the value at the offset to the symbol address, and * subtract the section base address added to the offset. */ // Look up only non-NULL relocations if(symtab_index != STN_UNDEF) { // Get symbol in question elf_symbol_entry_t *symbol = &symtab[symtab_index]; char *name = strtab + symbol->st_name; unsigned int *ptr = elf + progbits_offset + ent->r_offset; unsigned int kern_symbol_loc = 0; // Search the module first for(unsigned int i = 0; i < symtab_entries; i++) { elf_symbol_entry_t *entry = &symtab[i]; char *symbol_name = strtab + entry->st_name; // Symbol found in module? if(unlikely(!strcmp(name, symbol_name)) && likely(entry->st_shndx != STN_UNDEF)) { kern_symbol_loc = (entry->st_address + module_placement_addr); *ptr = kern_symbol_loc + *ptr - (module_placement_addr + ent->r_offset); #if DEBUG_MOBULE_RELOC KDEBUG("0x%08X -> 0x%08X (%s, module)", (unsigned int) ent->r_offset, *ptr, name); #endif goto linkNext; } } // We drop down here if the symbol isn't in the module kern_symbol_loc = find_symbol_in_kernel(name); if(kern_symbol_loc) { // Perform the relocation. *ptr = kern_symbol_loc + *ptr - (module_placement_addr + ent->r_offset); #if DEBUG_MOBULE_RELOC KDEBUG("0x%08X -> 0x%08X (%s, kernel)", (unsigned int) ent->r_offset, *ptr, name); #endif } else { KERROR("Module %s references '%s', but symbol does not exist", moduleName, name); goto nextModule; } } else { KERROR("Module %s has undefined linkage", moduleName); goto nextModule; } } else if(ELF32_R_TYPE(ent->r_info) == R_386_32) { /* * The ELF spec says that R_386_32 relocation entries must * add the value at the offset to the symbol address. */ elf_symbol_entry_t *symbol = &symtab[symtab_index]; // If name = 0, relocating section if(symbol->st_name == 0) { // Get the section requested unsigned int sectionIndex = symbol->st_shndx; elf_section_entry_t *section = §ions[sectionIndex]; char *name = shstrtab + section->sh_name; // Get virtual address of the section unsigned int addr = section->sh_addr + module_placement_addr; // Perform relocation unsigned int *ptr = elf + progbits_offset + ent->r_offset; *ptr = addr + *ptr; #if DEBUG_MOBULE_RELOC KDEBUG("0x%08X -> 0x%08X (section: %s+0x%X)", (unsigned int) ent->r_offset, *ptr, name, *ptr - addr); #endif } else { // Get symbol name and a placeholder address char *name = strtab + symbol->st_name; unsigned int addr = 0; #if DEBUG_MOBULE_RELOC bool inKernel = false; #endif // Search through the module's symbols first for(unsigned int i = 0; i < symtab_entries; i++) { elf_symbol_entry_t *entry = &symtab[i]; char *symbol_name = strtab + entry->st_name; // Symbol found in module? if(unlikely(!strcmp(name, symbol_name)) && likely(entry->st_shndx != STN_UNDEF)) { addr = entry->st_address + module_placement_addr; // Take into account the section's address elf_section_entry_t *section = §ions[symbol->st_shndx]; addr += section->sh_addr; // Go to the relocation code #if DEBUG_MOBULE_RELOC inKernel = false; #endif goto R_386_32_reloc_good; } } // See if the kernel has the symbol if(unlikely(!(addr = find_symbol_in_kernel(name)))) { KERROR("Module %s references '%s', but symbol does not exist", moduleName, name); goto nextModule; } #if DEBUG_MOBULE_RELOC inKernel = true; #endif // Perform relocation R_386_32_reloc_good: ; unsigned int *ptr = elf + progbits_offset + ent->r_offset; *ptr = addr + *ptr; #if DEBUG_MOBULE_RELOC KDEBUG("0x%08X -> 0x%08X (%s, %s)", (unsigned int) ent->r_offset, addr, name, inKernel ? "kernel" : "module"); #endif } } // Drop down here to link the next symbol linkNext: ; } } // Move PROGBITS from the file forward however many bytes the offset is memmove(elf, elf+progbits_offset, progbits_size_raw); // Perform mapping for the PROGBITS section #if DEBUG_MODULE_MAPPING KDEBUG("Mapping PROGBITS from 0x%08X to 0x%08X", module_placement_addr, module_placement_addr+progbits_size); #endif unsigned int progbits_end = module_placement_addr + progbits_size; for(unsigned int a = module_placement_addr; a < progbits_end; a += 0x1000) { unsigned int progbits_offset = a - module_placement_addr; unsigned int progbits_virt_addr = ((unsigned int) elf) + progbits_offset; // Get the page whose physical address we want page_t *elf_page = paging_get_page(progbits_virt_addr, false, kernel_directory); // Create a page in the proper virtual space, and assign physical address of above page_t *new_page = paging_get_page(a, true, kernel_directory); new_page->rw = 1; new_page->present = 1; new_page->frame = elf_page->frame; // KDEBUG("0x%08X -> 0x%08X (0x%08X)", a, progbits_offset, progbits_virt_addr); } module_placement_addr += progbits_size; // Perform mapping for NOBITS section, if needed if(nobits_size) { nobits_size += 0x1000; nobits_size &= 0xFFFFF000; unsigned int nobits_end = module_placement_addr+nobits_size; #if DEBUG_MODULE_MAPPING KDEBUG("Mapping NOBITS from 0x%08X to 0x%08X", module_placement_addr, nobits_end); #endif // Map the pages, and allocate memory to them for(unsigned a = module_placement_addr; a < nobits_end; a += 0x1000) { page_t *page = paging_get_page(a, true, kernel_directory); alloc_frame(page, true, true); // Zero the memory memclr((void *) a, 4096); } nobits_start = module_placement_addr; module_placement_addr += nobits_size; } else { #if DEBUG_MODULE_MAPPING KDEBUG("NOBITS section not required"); #endif } // Initialise driver module_t *driver = ((module_t* (*)(void)) init_function_addr)(); // Save locations driver->progbits_start = progbits_start; driver->map_end = module_placement_addr-1; // If there's no BSS, leave the nobits_start empty if(nobits_size) { driver->nobits_start = nobits_start; } else { driver->nobits_start = 0; } // Register them list_add(loaded_module_names, (char *) driver->name); hashmap_insert(loaded_module_map, (char *) driver->name, driver); // Drop down here when the module is all happy nextModule: ; }