/** * Page table mapper init function. * @note This function should be called by mmu init. */ void ptmapper_init(void) { SUBSYS_INIT(); KERROR(KERROR_INFO, "ptmapper init started"); /* Allocate memory for mmu_pagetable_master */ if (ptmapper_alloc(&mmu_pagetable_master)) { panic("Can't allocate memory for master page table."); } mmu_pagetable_system.master_pt_addr = mmu_pagetable_master.master_pt_addr; if (ptmapper_alloc(&mmu_pagetable_system)) { panic("Can't allocate memory for system page table."); } /* Initialize system page tables */ mmu_init_pagetable(&mmu_pagetable_master); mmu_init_pagetable(&mmu_pagetable_system); /* Init regions */ /* Kernel ro region */ mmu_region_kernel.num_pages = MMU_PAGE_CNT_BY_RANGE( MMU_VADDR_KERNEL_START, (intptr_t)(&_rodata_end) - 1, MMU_PGSIZE_COARSE); /* Kernel rw data region */ mmu_region_kdata.vaddr = (intptr_t)(&_data_start); mmu_region_kdata.num_pages = MMU_PAGE_CNT_BY_RANGE( (intptr_t)(&_data_start), MMU_VADDR_KERNEL_END, MMU_PGSIZE_COARSE); mmu_region_kdata.paddr = (intptr_t)(&_data_start); /* Fill page tables with translations & attributes */ { #if configDEBUG >= KERROR_DEBUG char buf[80]; const char str_type[2][9] = {"sections", "pages"}; #define PRINTMAPREG(region) \ ksprintf(buf, sizeof(buf), "Mapped %s: %u %s", \ #region, region.num_pages, \ (region.pt->type == MMU_PTT_MASTER) ? \ str_type[0] : str_type[1]); \ KERROR(KERROR_DEBUG, buf); #else #define PRINTMAPREG(region) #endif #define MAP_REGION(reg) \ mmu_map_region(®); \ PRINTMAPREG(reg) //MAP_REGION(mmu_region_tkstack); MAP_REGION(mmu_region_kstack); MAP_REGION(mmu_region_kernel); MAP_REGION(mmu_region_kdata); MAP_REGION(mmu_region_page_tables); MAP_REGION(mmu_region_rpihw); #undef MAP_REGION #undef PRINTMAPREG } /* Copy system page table to vm version of it, this is the only easy way to * solve some issues now. TODO Maybe we'd like to do some major refactoring * some day. */ vm_pagetable_system.pt = mmu_pagetable_system; vm_pagetable_system.linkcount = 1; /* Activate page tables */ mmu_attach_pagetable(&mmu_pagetable_master); /* Load L1 TTB */ #if configDEBUG >= KERROR_DEBUG KERROR(KERROR_DEBUG, "Attached TTB mmu_pagetable_master"); #endif mmu_attach_pagetable(&mmu_pagetable_system); /* Add L2 pte into L1 mpt */ #if configDEBUG >= KERROR_DEBUG KERROR(KERROR_DEBUG, "Attached mmu_pagetable_system"); #endif SUBSYS_INITFINI("ptmapper OK"); }
/** * Page table mapper init function. * @note This function should be called by mmu init. */ int ptmapper_init(void) { SUBSYS_INIT("ptmapper"); #if defined(configPTMAPPER_DEBUG) kputs("\n"); #endif /* Allocate memory for mmu_pagetable_master */ if (ptmapper_alloc(&mmu_pagetable_master)) { /* Critical failure */ panic("Can't allocate memory for master page table.\n"); } mmu_pagetable_system.master_pt_addr = mmu_pagetable_master.master_pt_addr; mmu_pagetable_system.nr_tables = (MMU_VADDR_KERNEL_END + 1) / MMU_PGSIZE_SECTION; if (ptmapper_alloc(&mmu_pagetable_system)) { /* Critical failure */ panic("Can't allocate memory for system page table.\n"); } /* Initialize system page tables */ mmu_init_pagetable(&mmu_pagetable_master); mmu_init_pagetable(&mmu_pagetable_system); /* * Init regions */ /* Kernel ro region */ mmu_region_kernel.num_pages = MMU_PAGE_CNT_BY_RANGE( MMU_VADDR_KERNEL_START, (intptr_t)(&_rodata_end) - 1, MMU_PGSIZE_COARSE); /* Kernel rw data region */ mmu_region_kdata.vaddr = (intptr_t)(&_data_start); mmu_region_kdata.num_pages = MMU_PAGE_CNT_BY_RANGE( (intptr_t)(&_data_start), MMU_VADDR_KERNEL_END, MMU_PGSIZE_COARSE); mmu_region_kdata.paddr = (intptr_t)(&_data_start); /* Fill page tables with translations & attributes */ { mmu_region_t ** regp; #if defined(configPTMAPPER_DEBUG) const char str_type[2][9] = {"sections", "pages"}; #define PRINTMAPREG(region) \ KERROR(KERROR_DEBUG, "Mapped %s: %u %s\n", \ #region, region.num_pages, \ (region.pt->pt_type == MMU_PTT_MASTER) ? \ str_type[0] : str_type[1]); #else #define PRINTMAPREG(region) #endif #define MAP_REGION(reg) \ mmu_map_region(®); \ PRINTMAPREG(reg) MAP_REGION(mmu_region_kstack); MAP_REGION(mmu_region_kernel); MAP_REGION(mmu_region_kdata); MAP_REGION(mmu_region_page_tables); #undef MAP_REGION #undef PRINTMAPREG SET_FOREACH(regp, ptmapper_fixed_regions) { mmu_map_region(*regp); } }
.vaddr = 0, .pt_addr = 0, /* Set later */ .master_pt_addr = 0, /* Set later */ .type = MMU_PTT_COARSE, .dom = MMU_DOM_KERNEL }; struct vm_pt vm_pagetable_system; /* Fixed Regions **************************************************************/ /** Kernel mode stacks. */ const mmu_region_t mmu_region_kstack = { .vaddr = MMU_VADDR_KSTACK_START, .num_pages = MMU_PAGE_CNT_BY_RANGE( MMU_VADDR_KSTACK_START, MMU_VADDR_KSTACK_END, MMU_PGSIZE_COARSE), .ap = MMU_AP_RWNA, .control = MMU_CTRL_MEMTYPE_WB | MMU_CTRL_XN, .paddr = MMU_VADDR_KSTACK_START, .pt = &mmu_pagetable_system }; #if 0 /* Kernel mode system/thread stack. */ mmu_region_t mmu_region_tkstack = { .vaddr = MMU_VADDR_TKSTACK_START, .num_pages = MMU_PAGE_CNT_BY_RANGE( MMU_VADDR_TKSTACK_START, MMU_VADDR_TKSTACK_END, 4096), .ap = MMU_AP_RWNA, .control = MMU_CTRL_XN,
.pt_addr = 0, /* Will be set later */ .nr_tables = 0, /* Will be set later */ .master_pt_addr = 0, /* Will be set later */ .pt_type = MMU_PTT_COARSE, .pt_dom = MMU_DOM_KERNEL }; struct vm_pt vm_pagetable_system; /* Fixed Regions **************************************************************/ /** Kernel mode stacks, other than thread kernel stack. */ const mmu_region_t mmu_region_kstack = { .vaddr = MMU_VADDR_KSTACK_START, .num_pages = MMU_PAGE_CNT_BY_RANGE( MMU_VADDR_KSTACK_START, MMU_VADDR_KSTACK_END, MMU_PGSIZE_COARSE), .ap = MMU_AP_RWNA, .control = MMU_CTRL_MEMTYPE_WB | MMU_CTRL_XN, .paddr = MMU_VADDR_KSTACK_START, .pt = &mmu_pagetable_system }; extern void * _rodata_end __attribute__((weak)); /** Read-only kernel code & ro-data */ mmu_region_t mmu_region_kernel = { .vaddr = MMU_VADDR_KERNEL_START, .num_pages = 0, /* Set in init */ .ap = MMU_AP_RONA, .control = MMU_CTRL_MEMTYPE_WB, .paddr = MMU_VADDR_KERNEL_START,