void print_single_tlb(unsigned long tlb, int single_print) { regType_t pteH; regType_t pteL; unsigned int valid, shared, asid, epn, cb, ppn; char *pSize; char *pProt; /* ** in case of single print <single_print> is true, this implies: ** 1) print the TLB in any case also if NOT VALID ** 2) print out the header */ pteH = getConfigReg(tlb); valid = GET_VALID(pteH); if (single_print) printk(HOW_TO_READ_TLB_CONTENT); else if (!valid) return; pteL = getConfigReg(tlb + 1); shared = GET_SHARED(pteH); asid = GET_ASID(pteH); epn = GET_EPN(pteH); cb = GET_CBEHAVIOR(pteL); pSize = GET_PAGE_SIZE(pteL); pProt = GET_PROTECTION(pteL); ppn = GET_PPN(pteL); printk("[%c%2ld] 0x%08x 0x%08x %03d %02x %02x %4s %s\n", ((valid) ? ' ' : 'u'), ((tlb & 0x0ffff) / TLB_STEP), ppn, epn, asid, shared, cb, pSize, pProt); }
void bt_mmu_switch(bt_pgd_t pgd_h) { bt_pgd_t pgd = GET_PGD(pgd_h); BT_u32 asid = GET_ASID(pgd_h); bt_paddr_t phys = (bt_paddr_t) bt_virt_to_phys(pgd); if(phys != current_user_ttb()) { bt_mmu_switch_ttb(phys, asid); } }
bt_pgd_t bt_mmu_newmap(void) { bt_paddr_t pg; bt_pgd_t pgd; pg = create_pgd(); if(!pg) { return 0; } pgd = (bt_pgd_t) bt_phys_to_virt(GET_PGD(pg)); memset(pgd, 0, MMU_L1TBL_SIZE); /* * At this point the kernel page table will point to valid page tables, * that were created during the bt_mmu_initialise routine. * * The user-space section should all be 0, i.e. cause page faults. * This means process VMs always match the kernel ptes correctly, * as the kernel pgd will never be updated after mmu initialisation. */ memcpy(pgd, g_MMUTable, MMU_L1TBL_SIZE); return (bt_pgd_t) ((BT_u32)pgd | (GET_ASID(pg))); }