void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ctx; int entry; #ifdef DEBUG_TLB printk("[tlball]"); #endif local_irq_save(flags); /* Save old context and create impossible VPN2 value */ old_ctx = (read_c0_entryhi() & ASID_MASK); write_c0_entryhi(XKPHYS); write_c0_entrylo0(0); write_c0_entrylo1(0); BARRIER; entry = read_c0_wired(); /* Blast 'em all away. */ while(entry < current_cpu_data.tlbsize) { /* Make sure all entries differ. */ write_c0_entryhi(XKPHYS+entry*0x2000); write_c0_index(entry); BARRIER; tlb_write_indexed(); BARRIER; entry++; } BARRIER; write_c0_entryhi(old_ctx); local_irq_restore(flags); }
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { int cpu = smp_processor_id(); unsigned long flags; int oldpid, newpid; signed long idx; if (!cpu_context(cpu, vma->vm_mm)) return; newpid = cpu_asid(cpu, vma->vm_mm); page &= PAGE_MASK; local_irq_save(flags); oldpid = read_c0_entryhi(); write_c0_vaddr(page); write_c0_entryhi(newpid); tlb_probe(); idx = read_c0_tlbset(); if (idx < 0) goto finish; write_c0_entrylo(0); write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); tlb_write(); finish: write_c0_entryhi(oldpid); local_irq_restore(flags); }
void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ctx; int entry; ENTER_CRITICAL(flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi(); write_c0_entrylo0(0); write_c0_entrylo1(0); entry = read_c0_wired(); /* Blast 'em all away. */ while (entry < current_cpu_data.tlbsize) { /* Make sure all entries differ. */ write_c0_entryhi(UNIQUE_ENTRYHI(entry)); write_c0_index(entry); mtc0_tlbw_hazard(); tlb_write_indexed(); entry++; } tlbw_use_hazard(); write_c0_entryhi(old_ctx); FLUSH_ITLB; EXIT_CRITICAL(flags); }
void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ctx; unsigned long entry; #ifdef DEBUG_TLB printk("[tlball]"); #endif local_irq_save(flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi() & ASID_MASK; write_c0_entryhi(CKSEG0); write_c0_entrylo0(0); write_c0_entrylo1(0); entry = read_c0_wired(); /* Blast 'em all away. */ while (entry < NTLB_ENTRIES) { write_c0_index(entry); tlb_write_indexed(); entry++; } write_c0_entryhi(old_ctx); local_irq_restore(flags); }
/* * We will need multiple versions of update_mmu_cache(), one that just * updates the TLB with the new pte(s), and another which also checks * for the R4k "end of page" hardware bug and does the needy. */ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) { unsigned long flags; pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; int pid; /* * Handle debugger faulting in for debugee. */ if (current->active_mm != vma->vm_mm) return; pid = read_c0_entryhi() & ASID_MASK; local_irq_save(flags); address &= PAGE_MASK; write_c0_vaddr(address); write_c0_entryhi(pid); pgdp = pgd_offset(vma->vm_mm, address); pmdp = pmd_offset(pgdp, address); ptep = pte_offset_map(pmdp, address); tlb_probe(); write_c0_entrylo(pte_val(*ptep++) >> 6); tlb_write(); write_c0_entryhi(pid); local_irq_restore(flags); }
static void ipu_add_wired_entry(unsigned long pid, unsigned long entrylo0, unsigned long entrylo1, unsigned long entryhi, unsigned long pagemask) { unsigned long flags; unsigned long wired; unsigned long old_pagemask; unsigned long old_ctx; struct task_struct *g, *p; /* We will lock an 4MB page size entry to map the 4MB reserved IPU memory */ wired = read_c0_wired(); if (wired) return; do_each_thread(g, p) { if (p->pid == pid ) g_asid = p->mm->context[0]; } while_each_thread(g, p); local_irq_save(flags); entrylo0 = entrylo0 >> 6; /* PFN */ entrylo0 |= 0x6 | (0 << 3); /* Write-through cacheable, dirty, valid */ /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi() & 0xff; old_pagemask = read_c0_pagemask(); wired = read_c0_wired(); write_c0_wired(wired + 1); write_c0_index(wired); BARRIER; entryhi &= ~0xff; /* new add, 20070906 */ entryhi |= g_asid; /* new add, 20070906 */ // entryhi |= old_ctx; /* new add, 20070906 */ write_c0_pagemask(pagemask); write_c0_entryhi(entryhi); write_c0_entrylo0(entrylo0); write_c0_entrylo1(entrylo1); BARRIER; tlb_write_indexed(); BARRIER; write_c0_entryhi(old_ctx); BARRIER; write_c0_pagemask(old_pagemask); local_flush_tlb_all(); local_irq_restore(flags); #if defined(DEBUG) printk("\nold_ctx=%03d\n", old_ctx); show_tlb(); #endif }
void tlb_init() { int index; write_c0_entrylo0((DEFAULT_PHY_ADDR|TLB_COHERENT | TLB_VALID | TLB_DIRTY | TLB_GLOBAL)^TLB_GLOBAL); write_c0_entrylo1((TLB_ELO0TO1(DEFAULT_PHY_ADDR)|TLB_COHERENT | TLB_VALID | TLB_DIRTY | TLB_GLOBAL)^TLB_GLOBAL); for(index=0;index<TLB_SIZE;index++) { write_c0_index(index); write_c0_entryhi(IVLD_ASID|(index<<13)); tlbwi(); } write_c0_entryhi(0x0); }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { unsigned long size, flags; unsigned long config6_flags; ENTER_CRITICAL(flags); disable_pgwalker(config6_flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; if (size <= current_cpu_data.tlbsize/2) { int oldpid = read_c0_entryhi(); int newpid = cpu_asid(cpu, mm); start &= (PAGE_MASK << 1); end += ((PAGE_SIZE << 1) - 1); end &= (PAGE_MASK << 1); while (start < end) { int idx; write_c0_entryhi(start | newpid); start += (PAGE_SIZE << 1); mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entrylo1(0); if (idx < 0) continue; /* Make sure all entries differ. */ #ifndef CONFIG_NLM_VMIPS write_c0_entryhi(UNIQUE_ENTRYHI(idx)); #else __write_64bit_c0_register($10, 0, (UNIQUE_VMIPS_ENTRYHI(idx))); #endif mtc0_tlbw_hazard(); tlb_write_indexed(); } tlbw_use_hazard(); write_c0_entryhi(oldpid); } else { drop_mmu_context(mm, cpu); } FLUSH_ITLB; enable_pgwalker(config6_flags); EXIT_CRITICAL(flags); }
void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ctx; int entry, ftlbhighset; local_irq_save(flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi(); htw_stop(); write_c0_entrylo0(0); write_c0_entrylo1(0); entry = read_c0_wired(); /* * Blast 'em all away. * If there are any wired entries, fall back to iterating */ if (cpu_has_tlbinv && !entry) { if (current_cpu_data.tlbsizevtlb) { write_c0_index(0); mtc0_tlbw_hazard(); tlbinvf(); /* invalidate VTLB */ } ftlbhighset = current_cpu_data.tlbsizevtlb + current_cpu_data.tlbsizeftlbsets; for (entry = current_cpu_data.tlbsizevtlb; entry < ftlbhighset; entry++) { write_c0_index(entry); mtc0_tlbw_hazard(); tlbinvf(); /* invalidate one FTLB set */ } } else { while (entry < current_cpu_data.tlbsize) { /* Make sure all entries differ. */ write_c0_entryhi(UNIQUE_ENTRYHI(entry)); write_c0_index(entry); mtc0_tlbw_hazard(); tlb_write_indexed(); entry++; } } tlbw_use_hazard(); write_c0_entryhi(old_ctx); htw_start(); flush_micro_tlb(); local_irq_restore(flags); }
void local_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end) { int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { unsigned long flags; int size; #ifdef DEBUG_TLB printk("[tlbrange<%02x,%08lx,%08lx>]", (mm->context & ASID_MASK), start, end); #endif local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; if(size <= current_cpu_data.tlbsize/2) { int oldpid = read_c0_entryhi() & ASID_MASK; int newpid = cpu_asid(cpu, mm); start &= (PAGE_MASK << 1); end += ((PAGE_SIZE << 1) - 1); end &= (PAGE_MASK << 1); while(start < end) { int idx; write_c0_entryhi(start | newpid); start += (PAGE_SIZE << 1); BARRIER; tlb_probe(); BARRIER; idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entrylo1(0); if(idx < 0) continue; /* Make sure all entries differ. */ write_c0_entryhi(XKPHYS+idx*0x2000); BARRIER; tlb_write_indexed(); BARRIER; } write_c0_entryhi(oldpid); } else { drop_mmu_context(mm, cpu); } local_irq_restore(flags); }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; int cpu = smp_processor_id(); unsigned long flags; int oldpid, newpid, size; if (!cpu_context(cpu, mm)) return; size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; local_irq_save(flags); if (size > TFP_TLB_SIZE / 2) { drop_mmu_context(mm, cpu); goto out_restore; } oldpid = read_c0_entryhi(); newpid = cpu_asid(cpu, mm); write_c0_entrylo(0); start &= PAGE_MASK; end += (PAGE_SIZE - 1); end &= PAGE_MASK; while (start < end) { signed long idx; write_c0_vaddr(start); write_c0_entryhi(start); start += PAGE_SIZE; tlb_probe(); idx = read_c0_tlbset(); if (idx < 0) continue; write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); tlb_write(); } write_c0_entryhi(oldpid); out_restore: local_irq_restore(flags); }
/* Initialize the wired register and all tlb entries to * known good state. */ void __init early_tlb_init(void) { unsigned long index; struct cpuinfo_mips *c = ¤t_cpu_data; tmp_tlb_ent = c->tlbsize; /* printk(KERN_ALERT "%s: tlb size %ld\n", __FUNCTION__, c->tlbsize); */ /* * initialize entire TLB to uniqe virtual addresses * but with the PAGE_VALID bit not set */ write_c0_pagemask(PM_DEFAULT_MASK); write_c0_wired(0); write_c0_entrylo0(0); /* not _PAGE_VALID */ write_c0_entrylo1(0); for (index = 0; index < c->tlbsize; index++) { /* Make sure all entries differ. */ write_c0_entryhi(UNIQUE_ENTRYHI(index+32)); write_c0_index(index); mtc0_tlbw_hazard(); tlb_write_indexed(); } tlbw_use_hazard(); }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { unsigned long size, flags; local_irq_save(flags); start = round_down(start, PAGE_SIZE << 1); end = round_up(end, PAGE_SIZE << 1); size = (end - start) >> (PAGE_SHIFT + 1); if (size <= (current_cpu_data.tlbsizeftlbsets ? current_cpu_data.tlbsize / 8 : current_cpu_data.tlbsize / 2)) { int oldpid = read_c0_entryhi(); int newpid = cpu_asid(cpu, mm); htw_stop(); while (start < end) { int idx; write_c0_entryhi(start | newpid); start += (PAGE_SIZE << 1); mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entrylo1(0); if (idx < 0) continue; /* Make sure all entries differ. */ write_c0_entryhi(UNIQUE_ENTRYHI(idx)); mtc0_tlbw_hazard(); tlb_write_indexed(); } tlbw_use_hazard(); write_c0_entryhi(oldpid); htw_start(); } else { drop_mmu_context(mm, cpu); } flush_micro_tlb(); local_irq_restore(flags); }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { unsigned long flags; int size; ENTER_CRITICAL(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; local_irq_save(flags); if (size <= current_cpu_data.tlbsize/2) { int oldpid = read_c0_entryhi(); int newpid = cpu_asid(cpu, mm); start &= (PAGE_MASK << 1); end += ((PAGE_SIZE << 1) - 1); end &= (PAGE_MASK << 1); while (start < end) { int idx; write_c0_entryhi(start | newpid); start += (PAGE_SIZE << 1); mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entrylo1(0); if (idx < 0) continue; /* Make sure all entries differ. */ write_c0_entryhi(UNIQUE_ENTRYHI(idx)); mtc0_tlbw_hazard(); tlb_write_indexed(); } tlbw_use_hazard(); write_c0_entryhi(oldpid); } else { drop_mmu_context(mm, cpu); } EXIT_CRITICAL(flags); }
static void ipu_add_wired_entry(unsigned long pid, unsigned long entrylo0, unsigned long entrylo1, unsigned long entryhi, unsigned long pagemask) { unsigned long flags; unsigned long wired; unsigned long old_pagemask; unsigned long old_ctx; struct task_struct *g, *p; do_each_thread(g, p) { if (p->pid == pid ) g_asid = p->mm->context[0]; } while_each_thread(g, p); local_irq_save(flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi() & 0xff; old_pagemask = read_c0_pagemask(); wired = read_c0_wired(); write_c0_wired(wired + 1); write_c0_index(wired); BARRIER; entryhi &= ~0xff; /* new add, 20070906 */ entryhi |= g_asid; /* new add, 20070906 */ // entryhi |= old_ctx; /* new add, 20070906 */ write_c0_pagemask(pagemask); write_c0_entryhi(entryhi); write_c0_entrylo0(entrylo0); write_c0_entrylo1(entrylo1); BARRIER; tlb_write_indexed(); BARRIER; write_c0_entryhi(old_ctx); BARRIER; write_c0_pagemask(old_pagemask); local_flush_tlb_all(); local_irq_restore(flags); #if defined(DEBUG) printk("\nold_ctx=%03d\n", old_ctx); show_tlb(); #endif }
u32 do_general_exception(arch_regs_t *uregs) { u32 cp0_cause = read_c0_cause(); u32 cp0_status = read_c0_status(); mips32_entryhi_t ehi; u32 victim_asid; u32 victim_inst; struct vmm_vcpu *c_vcpu; u8 delay_slot_exception = IS_BD_SET(cp0_cause); ehi._entryhi = read_c0_entryhi(); victim_asid = ehi._s_entryhi.asid >> ASID_SHIFT; c_vcpu = vmm_scheduler_current_vcpu(); /* * When exception is happening in the delay slot. We need to emulate * the corresponding branch instruction first. If its one of the "likely" * instructions, we don't need to emulate the faulting instruction since * "likely" instructions don't allow slot to be executed if branch is not * taken. */ if (delay_slot_exception) { victim_inst = *((u32 *)(uregs->cp0_epc + 4)); /* * If this function returns zero, the branch instruction was a * "likely" instruction and the branch wasn't taken. So don't * execute the delay slot, just return. The correct EPC to return * to will be programmed under our feet. */ if (!cpu_vcpu_emulate_branch_and_jump_inst(c_vcpu, *((u32 *)uregs->cp0_epc), uregs)) { return VMM_OK; } } else { victim_inst = *((u32 *)uregs->cp0_epc); } switch (EXCEPTION_CAUSE(cp0_cause)) { case EXEC_CODE_COPU: cpu_vcpu_emulate_cop_inst(c_vcpu, victim_inst, uregs); if (!delay_slot_exception) uregs->cp0_epc += 4; break; case EXEC_CODE_TLBL: if (CPU_IN_USER_MODE(cp0_status) && is_vmm_asid(ehi._s_entryhi.asid)) { ehi._s_entryhi.asid = (0x1 << ASID_SHIFT); write_c0_entryhi(ehi._entryhi); vmm_panic("CPU is in user mode and ASID is pointing to VMM!!\n"); } break; } return VMM_OK; }
void write_one_tlb(int index, u32 pagemask, u32 hi, u32 low0, u32 low1) { write_c0_entrylo0(low0); write_c0_pagemask(pagemask); write_c0_entrylo1(low1); write_c0_entryhi(hi); write_c0_index(index); tlb_write_indexed(); }
void local_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end) { int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { unsigned long flags; int size; #ifdef DEBUG_TLB printk("[tlbrange<%02x,%08lx,%08lx>]", (mm->context & ASID_MASK), start, end); #endif local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; if (size <= NTLB_ENTRIES_HALF) { int oldpid = (read_c0_entryhi() & ASID_MASK); int newpid = (cpu_context(smp_processor_id(), mm) & ASID_MASK); start &= (PAGE_MASK << 1); end += ((PAGE_SIZE << 1) - 1); end &= (PAGE_MASK << 1); while(start < end) { int idx; write_c0_entryhi(start | newpid); start += (PAGE_SIZE << 1); tlb_probe(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entrylo1(0); write_c0_entryhi(KSEG0); if(idx < 0) continue; tlb_write_indexed(); } write_c0_entryhi(oldpid); } else { drop_mmu_context(mm, cpu); } local_irq_restore(flags); }
/** @brief Flush memory range If the memory range is too big, we flush all entries with this ASID */ void local_flush_tlb_range(unsigned int asid, unsigned long start, unsigned long end) { unsigned long size, flags; ENTER_CRITICAL(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; if (size <= current_cpu_data.tlbsize / 2) { int oldpid = read_c0_entryhi(); int newpid = asid; start &= (PAGE_MASK << 1); end += ((PAGE_SIZE << 1) - 1); end &= (PAGE_MASK << 1); while (start < end) { int idx; write_c0_entryhi(start | newpid); start += (PAGE_SIZE << 1); mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entrylo1(0); if (idx < 0) continue; /* Make sure all entries differ. */ write_c0_entryhi(UNIQUE_ENTRYHI(idx)); mtc0_tlbw_hazard(); tlb_write_indexed(); } tlbw_use_hazard(); write_c0_entryhi(oldpid); } else local_flush_asid(asid); FLUSH_ITLB; EXIT_CRITICAL(flags); }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { unsigned long flags; int size; #ifdef DEBUG_TLB printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", cpu_context(cpu, mm) & ASID_MASK, start, end); #endif local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size <= current_cpu_data.tlbsize) { int oldpid = read_c0_entryhi() & ASID_MASK; int newpid = cpu_context(cpu, mm) & ASID_MASK; start &= PAGE_MASK; end += PAGE_SIZE - 1; end &= PAGE_MASK; while (start < end) { int idx; write_c0_entryhi(start | newpid); start += PAGE_SIZE; /* BARRIER */ tlb_probe(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entryhi(KSEG0); if (idx < 0) /* BARRIER */ continue; tlb_write_indexed(); } write_c0_entryhi(oldpid); } else { drop_mmu_context(mm, cpu); } local_irq_restore(flags); }
void dump_tlb_addr(unsigned long addr) { unsigned long flags, oldpid; int index; local_irq_save(flags); oldpid = read_c0_entryhi() & 0xff; write_c0_entryhi((addr & PAGE_MASK) | oldpid); tlb_probe(); index = read_c0_index(); write_c0_entryhi(oldpid); local_irq_restore(flags); if (index < 0) { printk("No entry for address 0x%08lx in TLB\n", addr); return; } printk("Entry %d maps address 0x%08lx\n", index, addr); dump_tlb(index, index); }
void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ctx; int entry; local_irq_save(flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi(); write_c0_entrylo(0); for (entry = 0; entry < TFP_TLB_SIZE; entry++) { write_c0_tlbset(entry >> TFP_TLB_SET_SHIFT); write_c0_vaddr(entry << PAGE_SHIFT); write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1))); mtc0_tlbw_hazard(); tlb_write(); } tlbw_use_hazard(); write_c0_entryhi(old_ctx); local_irq_restore(flags); }
void local_flush_tlb_all(void) { unsigned long flags, config6_flags; unsigned long old_ctx; int entry; ENTER_CRITICAL(flags); disable_pgwalker(config6_flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi(); write_c0_entrylo0(0); write_c0_entrylo1(0); entry = read_c0_wired(); #if defined(CONFIG_MAPPED_KERNEL) if (!entry) printk("[%s] flushing entry=%d in MAPPED_KERNEL mode!\n", __FUNCTION__, entry); #endif /* Blast 'em all away. */ while (entry < current_cpu_data.tlbsize) { /* Make sure all entries differ. */ #ifndef CONFIG_NLM_VMIPS write_c0_entryhi(UNIQUE_ENTRYHI(entry)); #else __write_64bit_c0_register($10, 0, (UNIQUE_VMIPS_ENTRYHI(entry))); #endif write_c0_index(entry); mtc0_tlbw_hazard(); tlb_write_indexed(); entry++; } tlbw_use_hazard(); write_c0_entryhi(old_ctx); FLUSH_ITLB; enable_pgwalker(config6_flags); EXIT_CRITICAL(flags); }
/* TLB operations. */ void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ctx; int entry; #ifdef DEBUG_TLB printk("[tlball]"); #endif local_irq_save(flags); old_ctx = read_c0_entryhi() & ASID_MASK; write_c0_entrylo0(0); entry = r3k_have_wired_reg ? read_c0_wired() : 8; for (; entry < current_cpu_data.tlbsize; entry++) { write_c0_index(entry << 8); write_c0_entryhi((entry | 0x80000) << 12); BARRIER; tlb_write_indexed(); } write_c0_entryhi(old_ctx); local_irq_restore(flags); }
/* Usable for KV1 addresses only! */ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { unsigned long flags; int size; size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; if (size > TFP_TLB_SIZE / 2) { local_flush_tlb_all(); return; } local_irq_save(flags); write_c0_entrylo(0); start &= PAGE_MASK; end += (PAGE_SIZE - 1); end &= PAGE_MASK; while (start < end) { signed long idx; write_c0_vaddr(start); write_c0_entryhi(start); start += PAGE_SIZE; tlb_probe(); idx = read_c0_tlbset(); if (idx < 0) continue; write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); tlb_write(); } local_irq_restore(flags); }
void __init add_tmptlb_entry(unsigned long entrylo0, unsigned long entrylo1, unsigned long entryhi, unsigned long pagemask) { /* write one tlb entry */ --tmp_tlb_ent; write_c0_index(tmp_tlb_ent); write_c0_pagemask(pagemask); write_c0_entryhi(entryhi); write_c0_entrylo0(entrylo0); write_c0_entrylo1(entrylo1); mtc0_tlbw_hazard(); tlb_write_indexed(); tlbw_use_hazard(); }
static void refill_tbl_to(struct km_walk_ctx * ctx, unsigned int asid, int write, int pos) { unsigned long entry, oldl1, oldl2; unsigned long G_FLAG; int idx; int oldpid; /* Just test ASID consistency: Current ASID must equal to Given ASID, kernel process do not obay this rule. */ oldpid = read_c0_entryhi(); /* Entry HI */ asid = asid & CPU_PAGE_FALG_ASID_MASK; entry = get_vpn2(ctx->current_virtual_address); entry |= asid; write_c0_entryhi(entry); mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); idx = read_c0_index(); oldl1 = read_c0_entrylo0(); oldl2 = read_c0_entrylo1(); /* Add the G_FLAG if ASID == 0, because the entry is from kernel and shared by all process */ G_FLAG = (ctx->mem == &kp_get_system()->mem_ctx)? 1 : 0; /* Entry Low0 and Low1 */ WRITE_LO; /* Write by type, the write is random if the TLB entry is flushed for R/W flags changing */ mtc0_tlbw_hazard(); if (unlikely(idx < 0)) tlb_write_random(); else { if (write == 2) { printk("Write is forced index for %x, pos %d, idx %d,asid %d, %x %x.\n", ctx->current_virtual_address, pos, idx, asid, oldl1, oldl2); } tlb_write_indexed(); } tlbw_use_hazard(); /* Sanity: Just test ASID consistency: Current ASID must equal to Given ASID, kernel process do not obey this rule. */ if ((oldpid & 0xff) != (asid & 0xff) && asid != 0/*kernel asid*/) printk("Why old = %x, asid = %x. ", oldpid, asid); }
void dump_tlb(int first, int last) { int i; unsigned int asid; unsigned long entryhi, entrylo0; asid = read_c0_entryhi() & 0xfc0; for(i=first;i<=last;i++) { write_c0_index(i<<8); __asm__ __volatile__( ".set\tnoreorder\n\t" "tlbr\n\t" "nop\n\t" ".set\treorder"); entryhi = read_c0_entryhi(); entrylo0 = read_c0_entrylo0(); /* Unused entries have a virtual address of KSEG0. */ if ((entryhi & 0xffffe000) != 0x80000000 && (entryhi & 0xfc0) == asid) { /* * Only print entries in use */ printk("Index: %2d ", i); printk("va=%08lx asid=%08lx" " [pa=%06lx n=%d d=%d v=%d g=%d]", (entryhi & 0xffffe000), entryhi & 0xfc0, entrylo0 & PAGE_MASK, (entrylo0 & (1 << 11)) ? 1 : 0, (entrylo0 & (1 << 10)) ? 1 : 0, (entrylo0 & (1 << 9)) ? 1 : 0, (entrylo0 & (1 << 8)) ? 1 : 0); } } printk("\n"); write_c0_entryhi(asid); }
void show_tlb(void) { #define ASID_MASK 0xFF unsigned long flags; unsigned int old_ctx; unsigned int entry; unsigned int entrylo0, entrylo1, entryhi; unsigned int pagemask; local_irq_save(flags); /* Save old context */ old_ctx = (read_c0_entryhi() & 0xff); printk("TLB content:\n"); entry = 0; while(entry < 32) { write_c0_index(entry); BARRIER; tlb_read(); BARRIER; entryhi = read_c0_entryhi(); entrylo0 = read_c0_entrylo0(); entrylo1 = read_c0_entrylo1(); pagemask = read_c0_pagemask(); printk("%02d: ASID=%02d%s VA=0x%08x ", entry, entryhi & ASID_MASK, (entrylo0 & entrylo1 & 1) ? "(G)" : " ", entryhi & ~ASID_MASK); printk("PA0=0x%08x C0=%x %s%s%s\n", (entrylo0>>6)<<12, (entrylo0>>3) & 7, (entrylo0 & 4) ? "Dirty " : "", (entrylo0 & 2) ? "Valid " : "Invalid ", (entrylo0 & 1) ? "Global" : ""); printk("\t\t\t PA1=0x%08x C1=%x %s%s%s\n", (entrylo1>>6)<<12, (entrylo1>>3) & 7, (entrylo1 & 4) ? "Dirty " : "", (entrylo1 & 2) ? "Valid " : "Invalid ", (entrylo1 & 1) ? "Global" : ""); printk("\t\tpagemask=0x%08x", pagemask); printk("\tentryhi=0x%08x\n", entryhi); printk("\t\tentrylo0=0x%08x", entrylo0); printk("\tentrylo1=0x%08x\n", entrylo1); entry++; } BARRIER; write_c0_entryhi(old_ctx); local_irq_restore(flags); }
static void dump_tlb(int first, int last) { int i; unsigned int asid; unsigned long entryhi, entrylo0; asid = read_c0_entryhi() & ASID_MASK; for (i = first; i <= last; i++) { write_c0_index(i<<8); __asm__ __volatile__( ".set\tnoreorder\n\t" "tlbr\n\t" "nop\n\t" ".set\treorder"); entryhi = read_c0_entryhi(); entrylo0 = read_c0_entrylo0(); /* Unused entries have a virtual address of KSEG0. */ if ((entryhi & PAGE_MASK) != KSEG0 && (entrylo0 & R3K_ENTRYLO_G || (entryhi & ASID_MASK) == asid)) { /* * Only print entries in use */ printk("Index: %2d ", i); printk("va=%08lx asid=%08lx" " [pa=%06lx n=%d d=%d v=%d g=%d]", entryhi & PAGE_MASK, entryhi & ASID_MASK, entrylo0 & PAGE_MASK, (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0, (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0, (entrylo0 & R3K_ENTRYLO_V) ? 1 : 0, (entrylo0 & R3K_ENTRYLO_G) ? 1 : 0); } } printk("\n"); write_c0_entryhi(asid); }