extern "C" void initialiseBootTimePaging() { PageDirEntry *pde_start = (PageDirEntry*) VIRTUAL_TO_PHYSICAL_BOOT((pointer)kernel_page_directory); PageTableEntry *pte_start = (PageTableEntry*) VIRTUAL_TO_PHYSICAL_BOOT((pointer)kernel_page_tables); uint32 i; // the verdex board has physical ram mapped to 0xA0000000 uint32 base = 0xA00; uint32 base_4k = 0xA0000; // clear the page dir for (i = 0; i < 4096; ++i) *((uint32*)pde_start + i) = 0; // map 16 mb PTs for kernel for (i = 0; i < 16; ++i) { pde_start[2048 + i].pt.size = 1; pde_start[2048 + i].pt.offset = i % 4; pde_start[2048 + i].pt.pt_ppn = (((pointer) &pte_start[PAGE_TABLE_ENTRIES * i]) / PAGE_SIZE); } // clear the page tables for (i = 0; i < 16 * PAGE_TABLE_ENTRIES; ++i) *((uint32*)pte_start + i) = 0; // map kernel into PTs size_t kernel_last_page = (size_t)VIRTUAL_TO_PHYSICAL_BOOT((pointer)&kernel_end_address) / PAGE_SIZE; extern size_t ro_data_end_address; size_t last_ro_data_page = (size_t)VIRTUAL_TO_PHYSICAL_BOOT((pointer)&ro_data_end_address) / PAGE_SIZE; for (i = 0; i < last_ro_data_page - base_4k; ++i) { pte_start[i].size = 2; pte_start[i].permissions = 1; pte_start[i].page_ppn = i + base_4k; } for (; i < kernel_last_page - base_4k; ++i) { pte_start[i].size = 2; pte_start[i].permissions = 1; pte_start[i].page_ppn = i + base_4k; } // 1 : 1 mapping of the first 8 mbs for (i = 0; i < 8; ++i) mapBootTimePage(pde_start, i, base + i); // 1 : 1 mapping of the first 8 mbs of physical ram for (i = 0; i < 8; ++i) mapBootTimePage(pde_start, base + i, base + i); // map first 4 mb for kernel TODO: remove this loop! for (i = 0; i < 4; ++i) mapBootTimePage(pde_start, 0x800 + i, base + i); // 3gb 1:1 mapping for (i = 0; i < 1024; ++i) mapBootTimePage(pde_start, 0xC00 + i, base + i); // map devices from 0x81000000 upwards mapBootTimePage(pde_start,0x860,0x401); // uart device mapBootTimePage(pde_start,0x900,0x440); // lcd controller mapBootTimePage(pde_start,0x840,0x40D); // interrupt controller mapBootTimePage(pde_start,0x830,0x40A); // timer mapBootTimePage(pde_start,0x8C0,0x411); // mmc controller }
size_t ArchMemory::get_PPN_Of_VPN_In_KernelMapping(size_t virtual_page, size_t *physical_page, size_t *physical_pte_page) { ArchMemoryMapping m = resolveMapping(((uint64) VIRTUAL_TO_PHYSICAL_BOOT(kernel_page_map_level_4) / PAGE_SIZE), virtual_page); if (physical_page) *physical_page = m.page_ppn; if (physical_pte_page) *physical_pte_page = m.pt_ppn; return m.page_size; }
void ArchMemory::unmapKernelPage(size_t virtual_page) { ArchMemoryMapping mapping = resolveMapping(((uint64) VIRTUAL_TO_PHYSICAL_BOOT(kernel_page_map_level_4) / PAGE_SIZE), virtual_page); PageMapLevel4Entry* pml4 = kernel_page_map_level_4; assert(pml4[mapping.pml4i].present); PageDirPointerTableEntry *pdpt = (PageDirPointerTableEntry*) getIdentAddressOfPPN(pml4[mapping.pml4i].page_ppn); assert(pdpt[mapping.pdpti].pd.present); PageDirEntry *pd = (PageDirEntry*) getIdentAddressOfPPN(pdpt[mapping.pdpti].pd.page_ppn); assert(pd[mapping.pdi].pt.present); PageTableEntry *pt = (PageTableEntry*) getIdentAddressOfPPN(pd[mapping.pdi].pt.page_ppn); assert(pt[mapping.pti].present); pt[mapping.pti].present = 0; pt[mapping.pti].writeable = 0; PageManager::instance()->freePPN(pt[mapping.pti].page_ppn); asm volatile ("movq %%cr3, %%rax; movq %%rax, %%cr3;" ::: "%rax"); }
void ArchThreads::createBaseThreadRegisters(ArchThreadRegisters *&info, void* start_function, void* stack) { info = (ArchThreadRegisters*)new uint8[sizeof(ArchThreadRegisters)]; memset((void*)info, 0, sizeof(ArchThreadRegisters)); pointer root_of_kernel_paging_structure = VIRTUAL_TO_PHYSICAL_BOOT(((pointer)ArchMemory::getRootOfKernelPagingStructure())); info->esp = (size_t)stack; info->ebp = (size_t)stack; info->eflags = 0x200; info->eip = (size_t)start_function; info->cr3 = root_of_kernel_paging_structure; /* fpu (=fninit) */ info->fpu[0] = 0xFFFF037F; info->fpu[1] = 0xFFFF0000; info->fpu[2] = 0xFFFFFFFF; info->fpu[3] = 0x00000000; info->fpu[4] = 0x00000000; info->fpu[5] = 0x00000000; info->fpu[6] = 0xFFFF0000; }
uint32 isPageUsed(uint32 page_number) { uint32 &fb_start = *((uint32*)VIRTUAL_TO_PHYSICAL_BOOT((pointer)&fb_start_hack)); uint8 *fb = (uint8*) 0x000B8000; uint32 i; uint32 num_modules = ArchCommon::getNumModules(0); for (i=0;i<num_modules;++i) { uint32 start_page = ArchCommon::getModuleStartAddress(i,0) / PAGE_SIZE; uint32 end_page = ArchCommon::getModuleEndAddress(i,0) / PAGE_SIZE; if ( start_page <= page_number && end_page >= page_number) { print(page_number); return 1; } } return 0; }
void ArchBoardSpecific::frameBufferInit() { // frame buffer initialization from http://elinux.org/RPi_Framebuffer#Notes for (uint32 i = 0; i < 10 && (fbs.pointer == 0 || fbs.size == 0); ++i) { fbs.width = 640; fbs.height = 480; fbs.vwidth = fbs.width; fbs.vheight = fbs.height; fbs.pitch = 0; fbs.depth = 16; fbs.xoffset = 0; fbs.yoffset = 0; fbs.pointer = 0; fbs.size = 0; uint32* MAIL0_READ = (uint32*)0x9000b880; uint32* MAIL0_WRITE = (uint32*)0x9000b8A0; uint32* MAIL0_STATUS = (uint32*)0x9000b898; memory_barrier(); while (*MAIL0_STATUS & (1 << 31)); assert((((uint32)&fbs) & 0xF) == 0); *MAIL0_WRITE = VIRTUAL_TO_PHYSICAL_BOOT(((uint32)&fbs) & ~0xF) | (0x1); memory_barrier(); uint32 read = 0; while ((read & 0xF) != 1) { while (*MAIL0_STATUS & (1 << 30)); read = *MAIL0_READ; } memory_barrier(); for (uint32 i = 0; i < 0x10000; ++i); } assert(fbs.pointer != 0); assert(fbs.width == fbs.vwidth); assert(fbs.height == fbs.vheight); assert(fbs.size == (fbs.width * fbs.height * fbs.depth / 8)); framebuffer = (fbs.pointer & ~0xC0000000) + 0xC0000000; }
// Be careful, this works because the beloved compiler generates // relative calls in this case. // if the compiler generated an absolut call we'd be screwed since we // have not set up paging yet :) void initialiseBootTimePaging() { uint32 i; uint8 * fb = (uint8*) 0x000B8000; uint32 &fb_start = *((uint32*)VIRTUAL_TO_PHYSICAL_BOOT((pointer)&fb_start_hack)); page_directory_entry *pde_start = (page_directory_entry*)VIRTUAL_TO_PHYSICAL_BOOT((pointer)&kernel_page_directory_start); //uint8 *pde_start_bytes = (uint8 *)pde_start; page_table_entry *pte_start = (page_table_entry*)VIRTUAL_TO_PHYSICAL_BOOT((pointer)&kernel_page_tables_start); uint32 kernel_last_page = ((uint32)VIRTUAL_TO_PHYSICAL_BOOT((pointer)&kernel_end_address)) / PAGE_SIZE; uint32 first_free_page = kernel_last_page + 1; print((uint32)VIRTUAL_TO_PHYSICAL_BOOT((pointer)&kernel_end_address)); print(first_free_page); // we do not have to clear the pde since its in the bss for (i = 0; i < 5; ++i) { pde_start[i].pde4m.present = 1; pde_start[i].pde4m.writeable = 1; pde_start[i].pde4m.use_4_m_pages = 1; pde_start[i].pde4m.page_base_address = i; } for (i=0;i<4;++i) { pde_start[i+512].pde4k.present = 1; pde_start[i+512].pde4k.writeable = 1; pde_start[i+512].pde4k.page_table_base_address = ((pointer)&pte_start[1024*i])/PAGE_SIZE; } // ok, we currently only fill in mappings for the first 4 megs (aka one page table) // we do not have to zero out the other page tables since they're already empty // thanks to the bss clearance. // update, from now on, all pages up to the last page containing only rodata // will be write protected. extern uint32 ro_data_end_address; pointer rod = (pointer)&ro_data_end_address; uint32 last_ro_data_page = (rod-LINK_BASE)/PAGE_SIZE; // last_ro_data_page = 0; for (i=0;i<last_ro_data_page;++i) { pte_start[i].present = 1; pte_start[i].writeable = 0; pte_start[i].page_base_address = i+256; } print(first_free_page); for (i=last_ro_data_page;i<(first_free_page-256);++i) { pte_start[i].present = 1; pte_start[i].writeable = 1; pte_start[i].page_base_address = i+256; } uint32 start_page = first_free_page; //HACK Example: Extend Kernel Memory (by Bernhard T.): // just change 1024 here to anything <= 3*1024 (because we have 3 pte's in bss which were mapped in the pde above) for (i=first_free_page-256;i<1024;++i) { pte_start[i].present = 1; pte_start[i].writeable = 1; pte_start[i].page_base_address = getNextFreePage(start_page); start_page = pte_start[i].page_base_address+1; } if (ArchCommon::haveVESAConsole(0)) { for (i=0;i<4;++i) { pde_start[764+i].pde4m.present = 1; pde_start[764+i].pde4m.writeable = 1; pde_start[764+i].pde4m.use_4_m_pages = 1; pde_start[764+i].pde4m.cache_disabled = 1; pde_start[764+i].pde4m.write_through = 1; pde_start[764+i].pde4m.page_base_address = (ArchCommon::getVESAConsoleLFBPtr(0) / (1024*1024*4))+i; } } for (i=0;i<256;++i) { pde_start[i+768].pde4m.present = 1; pde_start[i+768].pde4m.writeable = 1; pde_start[i+768].pde4m.use_4_m_pages = 1; pde_start[i+768].pde4m.page_base_address = i; } }
extern "C" void initialiseBootTimePaging() { uint32 i; PageDirEntry *pde_start = (PageDirEntry*) VIRTUAL_TO_PHYSICAL_BOOT((pointer )kernel_page_directory); PageTableEntry *pte_start = (PageTableEntry*) VIRTUAL_TO_PHYSICAL_BOOT((pointer )kernel_page_tables); uint32 kernel_last_page = VIRTUAL_TO_PHYSICAL_BOOT((pointer)&kernel_end_address) / PAGE_SIZE; // we do not have to clear the pde since its in the bss for (i = 0; i < 5; ++i) { pde_start[i].page.present = 1; pde_start[i].page.writeable = 1; pde_start[i].page.size = 1; pde_start[i].page.page_ppn = i; } for (i = 0; i < 4; ++i) { pde_start[i + 512].pt.present = 1; pde_start[i + 512].pt.writeable = 1; pde_start[i + 512].pt.page_table_ppn = ((pointer) &pte_start[1024 * i]) / PAGE_SIZE; } // ok, we currently only fill in mappings for the first 4 megs (aka one page table) // we do not have to zero out the other page tables since they're already empty // thanks to the bss clearance. // update, from now on, all pages up to the last page containing only rodata // will be write protected. extern uint32 ro_data_end_address; uint32 last_ro_data_page = VIRTUAL_TO_PHYSICAL_BOOT((pointer)&ro_data_end_address) / PAGE_SIZE; // ppns are 1mb = 256 pages after vpns... for (i = 0; i < last_ro_data_page - 256; ++i) { pte_start[i].present = 1; pte_start[i].writeable = 0; pte_start[i].page_ppn = i + 256; } for (; i < kernel_last_page - 256; ++i) { pte_start[i].present = 1; pte_start[i].writeable = 1; pte_start[i].page_ppn = i + 256; } if (ArchCommon::haveVESAConsole(0)) { for (i = 0; i < 4; ++i) { pde_start[764 + i].page.present = 1; pde_start[764 + i].page.writeable = 1; pde_start[764 + i].page.size = 1; pde_start[764 + i].page.cache_disabled = 1; pde_start[764 + i].page.write_through = 1; pde_start[764 + i].page.page_ppn = (ArchCommon::getVESAConsoleLFBPtr(0) / (1024 * 1024 * 4)) + i; } } // identity mapping for (i = 0; i < 256; ++i) { pde_start[i + 768].page.present = 1; pde_start[i + 768].page.writeable = 1; pde_start[i + 768].page.size = 1; pde_start[i + 768].page.page_ppn = i; } }