void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ctx; int entry; ENTER_CRITICAL(flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi(); write_c0_entrylo0(0); write_c0_entrylo1(0); entry = read_c0_wired(); /* Blast 'em all away. */ while (entry < current_cpu_data.tlbsize) { /* Make sure all entries differ. */ write_c0_entryhi(UNIQUE_ENTRYHI(entry)); write_c0_index(entry); mtc0_tlbw_hazard(); tlb_write_indexed(); entry++; } tlbw_use_hazard(); write_c0_entryhi(old_ctx); FLUSH_ITLB; EXIT_CRITICAL(flags); }
void dump_tlb_wired(void) { int wired = r3k_have_wired_reg ? read_c0_wired() : 8; printk("Wired: %d", wired); dump_tlb(0, wired - 1); }
static void ipu_del_wired_entry( void ) { unsigned long flags; unsigned long wired; local_irq_save(flags); wired = read_c0_wired(); if (wired) { write_c0_wired(0); } local_irq_restore(flags); }
static void ipu_add_wired_entry(unsigned long pid, unsigned long entrylo0, unsigned long entrylo1, unsigned long entryhi, unsigned long pagemask) { unsigned long flags; unsigned long wired; unsigned long old_pagemask; unsigned long old_ctx; struct task_struct *g, *p; /* We will lock an 4MB page size entry to map the 4MB reserved IPU memory */ wired = read_c0_wired(); if (wired) return; do_each_thread(g, p) { if (p->pid == pid ) g_asid = p->mm->context[0]; } while_each_thread(g, p); local_irq_save(flags); entrylo0 = entrylo0 >> 6; /* PFN */ entrylo0 |= 0x6 | (0 << 3); /* Write-through cacheable, dirty, valid */ /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi() & 0xff; old_pagemask = read_c0_pagemask(); write_c0_wired(wired + 1); write_c0_index(wired); BARRIER; entryhi &= ~0xff; /* new add, 20070906 */ entryhi |= g_asid; /* new add, 20070906 */ // entryhi |= old_ctx; /* new add, 20070906 */ write_c0_pagemask(pagemask); write_c0_entryhi(entryhi); write_c0_entrylo0(entrylo0); write_c0_entrylo1(entrylo1); BARRIER; tlb_write_indexed(); BARRIER; write_c0_entryhi(old_ctx); BARRIER; write_c0_pagemask(old_pagemask); local_flush_tlb_all(); local_irq_restore(flags); #if defined(DEBUG) printk("\nold_ctx=%03d\n", old_ctx); show_tlb(); #endif }
static void kvm_mips_init_tlbs(struct kvm *kvm) { unsigned long wired; /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */ wired = read_c0_wired(); write_c0_wired(wired + 1); mtc0_tlbw_hazard(); kvm->arch.commpage_tlb = wired; kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(), kvm->arch.commpage_tlb); }
void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ctx; int entry, ftlbhighset; local_irq_save(flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi(); htw_stop(); write_c0_entrylo0(0); write_c0_entrylo1(0); entry = read_c0_wired(); /* * Blast 'em all away. * If there are any wired entries, fall back to iterating */ if (cpu_has_tlbinv && !entry) { if (current_cpu_data.tlbsizevtlb) { write_c0_index(0); mtc0_tlbw_hazard(); tlbinvf(); /* invalidate VTLB */ } ftlbhighset = current_cpu_data.tlbsizevtlb + current_cpu_data.tlbsizeftlbsets; for (entry = current_cpu_data.tlbsizevtlb; entry < ftlbhighset; entry++) { write_c0_index(entry); mtc0_tlbw_hazard(); tlbinvf(); /* invalidate one FTLB set */ } } else { while (entry < current_cpu_data.tlbsize) { /* Make sure all entries differ. */ write_c0_entryhi(UNIQUE_ENTRYHI(entry)); write_c0_index(entry); mtc0_tlbw_hazard(); tlb_write_indexed(); entry++; } } tlbw_use_hazard(); write_c0_entryhi(old_ctx); htw_start(); flush_micro_tlb(); local_irq_restore(flags); }
static void ipu_add_wired_entry(unsigned long pid, unsigned long entrylo0, unsigned long entrylo1, unsigned long entryhi, unsigned long pagemask) { unsigned long flags; unsigned long wired; unsigned long old_pagemask; unsigned long old_ctx; struct task_struct *g, *p; do_each_thread(g, p) { if (p->pid == pid ) g_asid = p->mm->context[0]; } while_each_thread(g, p); local_irq_save(flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi() & 0xff; old_pagemask = read_c0_pagemask(); wired = read_c0_wired(); write_c0_wired(wired + 1); write_c0_index(wired); BARRIER; entryhi &= ~0xff; /* new add, 20070906 */ entryhi |= g_asid; /* new add, 20070906 */ // entryhi |= old_ctx; /* new add, 20070906 */ write_c0_pagemask(pagemask); write_c0_entryhi(entryhi); write_c0_entrylo0(entrylo0); write_c0_entrylo1(entrylo1); BARRIER; tlb_write_indexed(); BARRIER; write_c0_entryhi(old_ctx); BARRIER; write_c0_pagemask(old_pagemask); local_flush_tlb_all(); local_irq_restore(flags); #if defined(DEBUG) printk("\nold_ctx=%03d\n", old_ctx); show_tlb(); #endif }
void dump_cp0(char *key) { if (key == NULL) key = ""; print_cp0(key, 0, "INDEX ", read_c0_index()); print_cp0(key, 2, "ENTRYLO1", read_c0_entrylo0()); print_cp0(key, 3, "ENTRYLO2", read_c0_entrylo1()); print_cp0(key, 4, "CONTEXT ", read_c0_context()); print_cp0(key, 5, "PAGEMASK", read_c0_pagemask()); print_cp0(key, 6, "WIRED ", read_c0_wired()); //print_cp0(key, 8, "BADVADDR", read_c0_badvaddr()); print_cp0(key, 9, "COUNT ", read_c0_count()); print_cp0(key, 10, "ENTRYHI ", read_c0_entryhi()); print_cp0(key, 11, "COMPARE ", read_c0_compare()); print_cp0(key, 12, "STATUS ", read_c0_status()); print_cp0(key, 13, "CAUSE ", read_c0_cause() & 0xffff87ff); print_cp0(key, 16, "CONFIG ", read_c0_config()); return; }
static int add_wired_tlb_entry(uint32_t entrylo0, uint32_t entrylo1, uint32_t entryhi, uint32_t pgsize) { uint32_t tlbindex; tlbindex = read_c0_wired(); if (tlbindex >= get_tlb_size() || tlbindex >= C0_WIRED_MASK) { printk(BIOS_ERR, "Ran out of TLB entries\n"); return -1; } write_c0_wired(tlbindex + 1); write_c0_index(tlbindex); write_c0_pagemask(((pgsize / MIN_PAGE_SIZE) - 1) << C0_PAGEMASK_SHIFT); write_c0_entryhi(entryhi); write_c0_entrylo0(entrylo0); write_c0_entrylo1(entrylo1); mtc0_tlbw_hazard(); tlb_write_indexed(); tlbw_use_hazard(); return 0; }
void local_flush_tlb_all(void) { unsigned long flags, config6_flags; unsigned long old_ctx; int entry; ENTER_CRITICAL(flags); disable_pgwalker(config6_flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi(); write_c0_entrylo0(0); write_c0_entrylo1(0); entry = read_c0_wired(); #if defined(CONFIG_MAPPED_KERNEL) if (!entry) printk("[%s] flushing entry=%d in MAPPED_KERNEL mode!\n", __FUNCTION__, entry); #endif /* Blast 'em all away. */ while (entry < current_cpu_data.tlbsize) { /* Make sure all entries differ. */ #ifndef CONFIG_NLM_VMIPS write_c0_entryhi(UNIQUE_ENTRYHI(entry)); #else __write_64bit_c0_register($10, 0, (UNIQUE_VMIPS_ENTRYHI(entry))); #endif write_c0_index(entry); mtc0_tlbw_hazard(); tlb_write_indexed(); entry++; } tlbw_use_hazard(); write_c0_entryhi(old_ctx); FLUSH_ITLB; enable_pgwalker(config6_flags); EXIT_CRITICAL(flags); }
/* TLB operations. */ void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ctx; int entry; #ifdef DEBUG_TLB printk("[tlball]"); #endif local_irq_save(flags); old_ctx = read_c0_entryhi() & ASID_MASK; write_c0_entrylo0(0); entry = r3k_have_wired_reg ? read_c0_wired() : 8; for (; entry < current_cpu_data.tlbsize; entry++) { write_c0_index(entry << 8); write_c0_entryhi((entry | 0x80000) << 12); BARRIER; tlb_write_indexed(); } write_c0_entryhi(old_ctx); local_irq_restore(flags); }
void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ctx; unsigned long entry; local_irq_save(flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi() & ASID_MASK; write_c0_entryhi(CKSEG0); write_c0_entrylo0(0); write_c0_entrylo1(0); entry = read_c0_wired(); /* Blast 'em all away. */ while (entry < NTLB_ENTRIES) { write_c0_index(entry); tlb_write_indexed(); entry++; } write_c0_entryhi(old_ctx); local_irq_restore(flags); }
void dump_tlb_nonwired(void) { int wired = r3k_have_wired_reg ? read_c0_wired() : 8; dump_tlb(wired, current_cpu_data.tlbsize - 1); }