void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ctx; int entry, ftlbhighset; local_irq_save(flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi(); htw_stop(); write_c0_entrylo0(0); write_c0_entrylo1(0); entry = read_c0_wired(); /* * Blast 'em all away. * If there are any wired entries, fall back to iterating */ if (cpu_has_tlbinv && !entry) { if (current_cpu_data.tlbsizevtlb) { write_c0_index(0); mtc0_tlbw_hazard(); tlbinvf(); /* invalidate VTLB */ } ftlbhighset = current_cpu_data.tlbsizevtlb + current_cpu_data.tlbsizeftlbsets; for (entry = current_cpu_data.tlbsizevtlb; entry < ftlbhighset; entry++) { write_c0_index(entry); mtc0_tlbw_hazard(); tlbinvf(); /* invalidate one FTLB set */ } } else { while (entry < current_cpu_data.tlbsize) { /* Make sure all entries differ. */ write_c0_entryhi(UNIQUE_ENTRYHI(entry)); write_c0_index(entry); mtc0_tlbw_hazard(); tlb_write_indexed(); entry++; } } tlbw_use_hazard(); write_c0_entryhi(old_ctx); htw_start(); flush_micro_tlb(); local_irq_restore(flags); }
void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ctx; int entry; #ifdef DEBUG_TLB printk("[tlball]"); #endif local_irq_save(flags); /* Save old context and create impossible VPN2 value */ old_ctx = (read_c0_entryhi() & ASID_MASK); write_c0_entryhi(XKPHYS); write_c0_entrylo0(0); write_c0_entrylo1(0); BARRIER; entry = read_c0_wired(); /* Blast 'em all away. */ while(entry < current_cpu_data.tlbsize) { /* Make sure all entries differ. */ write_c0_entryhi(XKPHYS+entry*0x2000); write_c0_index(entry); BARRIER; tlb_write_indexed(); BARRIER; entry++; } BARRIER; write_c0_entryhi(old_ctx); local_irq_restore(flags); }
/* Initialize the wired register and all tlb entries to * known good state. */ void __init early_tlb_init(void) { unsigned long index; struct cpuinfo_mips *c = ¤t_cpu_data; tmp_tlb_ent = c->tlbsize; /* printk(KERN_ALERT "%s: tlb size %ld\n", __FUNCTION__, c->tlbsize); */ /* * initialize entire TLB to uniqe virtual addresses * but with the PAGE_VALID bit not set */ write_c0_pagemask(PM_DEFAULT_MASK); write_c0_wired(0); write_c0_entrylo0(0); /* not _PAGE_VALID */ write_c0_entrylo1(0); for (index = 0; index < c->tlbsize; index++) { /* Make sure all entries differ. */ write_c0_entryhi(UNIQUE_ENTRYHI(index+32)); write_c0_index(index); mtc0_tlbw_hazard(); tlb_write_indexed(); } tlbw_use_hazard(); }
void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ctx; int entry; ENTER_CRITICAL(flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi(); write_c0_entrylo0(0); write_c0_entrylo1(0); entry = read_c0_wired(); /* Blast 'em all away. */ while (entry < current_cpu_data.tlbsize) { /* Make sure all entries differ. */ write_c0_entryhi(UNIQUE_ENTRYHI(entry)); write_c0_index(entry); mtc0_tlbw_hazard(); tlb_write_indexed(); entry++; } tlbw_use_hazard(); write_c0_entryhi(old_ctx); FLUSH_ITLB; EXIT_CRITICAL(flags); }
void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ctx; unsigned long entry; #ifdef DEBUG_TLB printk("[tlball]"); #endif local_irq_save(flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi() & ASID_MASK; write_c0_entryhi(CKSEG0); write_c0_entrylo0(0); write_c0_entrylo1(0); entry = read_c0_wired(); /* Blast 'em all away. */ while (entry < NTLB_ENTRIES) { write_c0_index(entry); tlb_write_indexed(); entry++; } write_c0_entryhi(old_ctx); local_irq_restore(flags); }
void write_one_tlb(int index, u32 pagemask, u32 hi, u32 low0, u32 low1) { write_c0_entrylo0(low0); write_c0_pagemask(pagemask); write_c0_entrylo1(low1); write_c0_entryhi(hi); write_c0_index(index); tlb_write_indexed(); }
static void ipu_add_wired_entry(unsigned long pid, unsigned long entrylo0, unsigned long entrylo1, unsigned long entryhi, unsigned long pagemask) { unsigned long flags; unsigned long wired; unsigned long old_pagemask; unsigned long old_ctx; struct task_struct *g, *p; /* We will lock an 4MB page size entry to map the 4MB reserved IPU memory */ wired = read_c0_wired(); if (wired) return; do_each_thread(g, p) { if (p->pid == pid ) g_asid = p->mm->context[0]; } while_each_thread(g, p); local_irq_save(flags); entrylo0 = entrylo0 >> 6; /* PFN */ entrylo0 |= 0x6 | (0 << 3); /* Write-through cacheable, dirty, valid */ /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi() & 0xff; old_pagemask = read_c0_pagemask(); wired = read_c0_wired(); write_c0_wired(wired + 1); write_c0_index(wired); BARRIER; entryhi &= ~0xff; /* new add, 20070906 */ entryhi |= g_asid; /* new add, 20070906 */ // entryhi |= old_ctx; /* new add, 20070906 */ write_c0_pagemask(pagemask); write_c0_entryhi(entryhi); write_c0_entrylo0(entrylo0); write_c0_entrylo1(entrylo1); BARRIER; tlb_write_indexed(); BARRIER; write_c0_entryhi(old_ctx); BARRIER; write_c0_pagemask(old_pagemask); local_flush_tlb_all(); local_irq_restore(flags); #if defined(DEBUG) printk("\nold_ctx=%03d\n", old_ctx); show_tlb(); #endif }
void tlb_init() { int index; write_c0_entrylo0((DEFAULT_PHY_ADDR|TLB_COHERENT | TLB_VALID | TLB_DIRTY | TLB_GLOBAL)^TLB_GLOBAL); write_c0_entrylo1((TLB_ELO0TO1(DEFAULT_PHY_ADDR)|TLB_COHERENT | TLB_VALID | TLB_DIRTY | TLB_GLOBAL)^TLB_GLOBAL); for(index=0;index<TLB_SIZE;index++) { write_c0_index(index); write_c0_entryhi(IVLD_ASID|(index<<13)); tlbwi(); } write_c0_entryhi(0x0); }
void __init add_tmptlb_entry(unsigned long entrylo0, unsigned long entrylo1, unsigned long entryhi, unsigned long pagemask) { /* write one tlb entry */ --tmp_tlb_ent; write_c0_index(tmp_tlb_ent); write_c0_pagemask(pagemask); write_c0_entryhi(entryhi); write_c0_entrylo0(entrylo0); write_c0_entrylo1(entrylo1); mtc0_tlbw_hazard(); tlb_write_indexed(); tlbw_use_hazard(); }
static void ipu_add_wired_entry(unsigned long pid, unsigned long entrylo0, unsigned long entrylo1, unsigned long entryhi, unsigned long pagemask) { unsigned long flags; unsigned long wired; unsigned long old_pagemask; unsigned long old_ctx; struct task_struct *g, *p; do_each_thread(g, p) { if (p->pid == pid ) g_asid = p->mm->context[0]; } while_each_thread(g, p); local_irq_save(flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi() & 0xff; old_pagemask = read_c0_pagemask(); wired = read_c0_wired(); write_c0_wired(wired + 1); write_c0_index(wired); BARRIER; entryhi &= ~0xff; /* new add, 20070906 */ entryhi |= g_asid; /* new add, 20070906 */ // entryhi |= old_ctx; /* new add, 20070906 */ write_c0_pagemask(pagemask); write_c0_entryhi(entryhi); write_c0_entrylo0(entrylo0); write_c0_entrylo1(entrylo1); BARRIER; tlb_write_indexed(); BARRIER; write_c0_entryhi(old_ctx); BARRIER; write_c0_pagemask(old_pagemask); local_flush_tlb_all(); local_irq_restore(flags); #if defined(DEBUG) printk("\nold_ctx=%03d\n", old_ctx); show_tlb(); #endif }
void dump_tlb(int first, int last) { int i; unsigned int asid; unsigned long entryhi, entrylo0; asid = read_c0_entryhi() & 0xfc0; for(i=first;i<=last;i++) { write_c0_index(i<<8); __asm__ __volatile__( ".set\tnoreorder\n\t" "tlbr\n\t" "nop\n\t" ".set\treorder"); entryhi = read_c0_entryhi(); entrylo0 = read_c0_entrylo0(); /* Unused entries have a virtual address of KSEG0. */ if ((entryhi & 0xffffe000) != 0x80000000 && (entryhi & 0xfc0) == asid) { /* * Only print entries in use */ printk("Index: %2d ", i); printk("va=%08lx asid=%08lx" " [pa=%06lx n=%d d=%d v=%d g=%d]", (entryhi & 0xffffe000), entryhi & 0xfc0, entrylo0 & PAGE_MASK, (entrylo0 & (1 << 11)) ? 1 : 0, (entrylo0 & (1 << 10)) ? 1 : 0, (entrylo0 & (1 << 9)) ? 1 : 0, (entrylo0 & (1 << 8)) ? 1 : 0); } } printk("\n"); write_c0_entryhi(asid); }
void show_tlb(void) { #define ASID_MASK 0xFF unsigned long flags; unsigned int old_ctx; unsigned int entry; unsigned int entrylo0, entrylo1, entryhi; unsigned int pagemask; local_irq_save(flags); /* Save old context */ old_ctx = (read_c0_entryhi() & 0xff); printk("TLB content:\n"); entry = 0; while(entry < 32) { write_c0_index(entry); BARRIER; tlb_read(); BARRIER; entryhi = read_c0_entryhi(); entrylo0 = read_c0_entrylo0(); entrylo1 = read_c0_entrylo1(); pagemask = read_c0_pagemask(); printk("%02d: ASID=%02d%s VA=0x%08x ", entry, entryhi & ASID_MASK, (entrylo0 & entrylo1 & 1) ? "(G)" : " ", entryhi & ~ASID_MASK); printk("PA0=0x%08x C0=%x %s%s%s\n", (entrylo0>>6)<<12, (entrylo0>>3) & 7, (entrylo0 & 4) ? "Dirty " : "", (entrylo0 & 2) ? "Valid " : "Invalid ", (entrylo0 & 1) ? "Global" : ""); printk("\t\t\t PA1=0x%08x C1=%x %s%s%s\n", (entrylo1>>6)<<12, (entrylo1>>3) & 7, (entrylo1 & 4) ? "Dirty " : "", (entrylo1 & 2) ? "Valid " : "Invalid ", (entrylo1 & 1) ? "Global" : ""); printk("\t\tpagemask=0x%08x", pagemask); printk("\tentryhi=0x%08x\n", entryhi); printk("\t\tentrylo0=0x%08x", entrylo0); printk("\tentrylo1=0x%08x\n", entrylo1); entry++; } BARRIER; write_c0_entryhi(old_ctx); local_irq_restore(flags); }
static void dump_tlb(int first, int last) { int i; unsigned int asid; unsigned long entryhi, entrylo0; asid = read_c0_entryhi() & ASID_MASK; for (i = first; i <= last; i++) { write_c0_index(i<<8); __asm__ __volatile__( ".set\tnoreorder\n\t" "tlbr\n\t" "nop\n\t" ".set\treorder"); entryhi = read_c0_entryhi(); entrylo0 = read_c0_entrylo0(); /* Unused entries have a virtual address of KSEG0. */ if ((entryhi & PAGE_MASK) != KSEG0 && (entrylo0 & R3K_ENTRYLO_G || (entryhi & ASID_MASK) == asid)) { /* * Only print entries in use */ printk("Index: %2d ", i); printk("va=%08lx asid=%08lx" " [pa=%06lx n=%d d=%d v=%d g=%d]", entryhi & PAGE_MASK, entryhi & ASID_MASK, entrylo0 & PAGE_MASK, (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0, (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0, (entrylo0 & R3K_ENTRYLO_V) ? 1 : 0, (entrylo0 & R3K_ENTRYLO_G) ? 1 : 0); } } printk("\n"); write_c0_entryhi(asid); }
static int add_wired_tlb_entry(uint32_t entrylo0, uint32_t entrylo1, uint32_t entryhi, uint32_t pgsize) { uint32_t tlbindex; tlbindex = read_c0_wired(); if (tlbindex >= get_tlb_size() || tlbindex >= C0_WIRED_MASK) { printk(BIOS_ERR, "Ran out of TLB entries\n"); return -1; } write_c0_wired(tlbindex + 1); write_c0_index(tlbindex); write_c0_pagemask(((pgsize / MIN_PAGE_SIZE) - 1) << C0_PAGEMASK_SHIFT); write_c0_entryhi(entryhi); write_c0_entrylo0(entrylo0); write_c0_entrylo1(entrylo1); mtc0_tlbw_hazard(); tlb_write_indexed(); tlbw_use_hazard(); return 0; }
void local_flush_tlb_all(void) { unsigned long flags, config6_flags; unsigned long old_ctx; int entry; ENTER_CRITICAL(flags); disable_pgwalker(config6_flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi(); write_c0_entrylo0(0); write_c0_entrylo1(0); entry = read_c0_wired(); #if defined(CONFIG_MAPPED_KERNEL) if (!entry) printk("[%s] flushing entry=%d in MAPPED_KERNEL mode!\n", __FUNCTION__, entry); #endif /* Blast 'em all away. */ while (entry < current_cpu_data.tlbsize) { /* Make sure all entries differ. */ #ifndef CONFIG_NLM_VMIPS write_c0_entryhi(UNIQUE_ENTRYHI(entry)); #else __write_64bit_c0_register($10, 0, (UNIQUE_VMIPS_ENTRYHI(entry))); #endif write_c0_index(entry); mtc0_tlbw_hazard(); tlb_write_indexed(); entry++; } tlbw_use_hazard(); write_c0_entryhi(old_ctx); FLUSH_ITLB; enable_pgwalker(config6_flags); EXIT_CRITICAL(flags); }
/* TLB operations. */ void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ctx; int entry; #ifdef DEBUG_TLB printk("[tlball]"); #endif local_irq_save(flags); old_ctx = read_c0_entryhi() & ASID_MASK; write_c0_entrylo0(0); entry = r3k_have_wired_reg ? read_c0_wired() : 8; for (; entry < current_cpu_data.tlbsize; entry++) { write_c0_index(entry << 8); write_c0_entryhi((entry | 0x80000) << 12); BARRIER; tlb_write_indexed(); } write_c0_entryhi(old_ctx); local_irq_restore(flags); }
static void dump_tlb(int first, int last) { unsigned long s_entryhi, entryhi, asid; unsigned long long entrylo0, entrylo1, pa; unsigned int s_index, s_pagemask, pagemask, c0, c1, i; #ifdef CONFIG_32BIT bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA); int pwidth = xpa ? 11 : 8; int vwidth = 8; #else bool xpa = false; int pwidth = 11; int vwidth = 11; #endif s_pagemask = read_c0_pagemask(); s_entryhi = read_c0_entryhi(); s_index = read_c0_index(); asid = s_entryhi & 0xff; for (i = first; i <= last; i++) { write_c0_index(i); mtc0_tlbr_hazard(); tlb_read(); tlb_read_hazard(); pagemask = read_c0_pagemask(); entryhi = read_c0_entryhi(); entrylo0 = read_c0_entrylo0(); entrylo1 = read_c0_entrylo1(); /* EHINV bit marks entire entry as invalid */ if (cpu_has_tlbinv && entryhi & MIPS_ENTRYHI_EHINV) continue; /* * Prior to tlbinv, unused entries have a virtual address of * CKSEG0. */ if ((entryhi & ~0x1ffffUL) == CKSEG0) continue; /* * ASID takes effect in absence of G (global) bit. * We check both G bits, even though architecturally they should * match one another, because some revisions of the SB1 core may * leave only a single G bit set after a machine check exception * due to duplicate TLB entry. */ if (!((entrylo0 | entrylo1) & MIPS_ENTRYLO_G) && (entryhi & 0xff) != asid) continue; /* * Only print entries in use */ printk("Index: %2d pgmask=%s ", i, msk2str(pagemask)); c0 = (entrylo0 & MIPS_ENTRYLO_C) >> MIPS_ENTRYLO_C_SHIFT; c1 = (entrylo1 & MIPS_ENTRYLO_C) >> MIPS_ENTRYLO_C_SHIFT; printk("va=%0*lx asid=%02lx\n", vwidth, (entryhi & ~0x1fffUL), entryhi & 0xff); /* RI/XI are in awkward places, so mask them off separately */ pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); if (xpa) pa |= (unsigned long long)readx_c0_entrylo0() << 30; pa = (pa << 6) & PAGE_MASK; printk("\t["); if (cpu_has_rixi) printk("ri=%d xi=%d ", (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0, (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0); printk("pa=%0*llx c=%d d=%d v=%d g=%d] [", pwidth, pa, c0, (entrylo0 & MIPS_ENTRYLO_D) ? 1 : 0, (entrylo0 & MIPS_ENTRYLO_V) ? 1 : 0, (entrylo0 & MIPS_ENTRYLO_G) ? 1 : 0); /* RI/XI are in awkward places, so mask them off separately */ pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); if (xpa) pa |= (unsigned long long)readx_c0_entrylo1() << 30; pa = (pa << 6) & PAGE_MASK; if (cpu_has_rixi) printk("ri=%d xi=%d ", (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0, (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0); printk("pa=%0*llx c=%d d=%d v=%d g=%d]\n", pwidth, pa, c1, (entrylo1 & MIPS_ENTRYLO_D) ? 1 : 0, (entrylo1 & MIPS_ENTRYLO_V) ? 1 : 0, (entrylo1 & MIPS_ENTRYLO_G) ? 1 : 0); } printk("\n"); write_c0_entryhi(s_entryhi); write_c0_index(s_index); write_c0_pagemask(s_pagemask); }