void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { int cpu = smp_processor_id(); unsigned long flags; int oldpid, newpid; signed long idx; if (!cpu_context(cpu, vma->vm_mm)) return; newpid = cpu_asid(cpu, vma->vm_mm); page &= PAGE_MASK; local_irq_save(flags); oldpid = read_c0_entryhi(); write_c0_vaddr(page); write_c0_entryhi(newpid); tlb_probe(); idx = read_c0_tlbset(); if (idx < 0) goto finish; write_c0_entrylo(0); write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); tlb_write(); finish: write_c0_entryhi(oldpid); local_irq_restore(flags); }
void local_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end) { int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { unsigned long flags; int size; #ifdef DEBUG_TLB printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", cpu_asid(cpu, mm), start, end); #endif local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size <= current_cpu_data.tlbsize) { int oldpid = read_c0_entryhi() & ASID_MASK; int newpid = cpu_asid(cpu, mm); start &= PAGE_MASK; end += PAGE_SIZE - 1; end &= PAGE_MASK; while (start < end) { int idx; write_c0_entryhi(start | newpid); start += PAGE_SIZE; /* BARRIER */ tlb_probe(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entryhi(KSEG0); if (idx < 0) /* BARRIER */ continue; tlb_write_indexed(); } write_c0_entryhi(oldpid); } else { drop_mmu_context(mm, cpu); } local_irq_restore(flags); }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { unsigned long size, flags; unsigned long config6_flags; ENTER_CRITICAL(flags); disable_pgwalker(config6_flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; if (size <= current_cpu_data.tlbsize/2) { int oldpid = read_c0_entryhi(); int newpid = cpu_asid(cpu, mm); start &= (PAGE_MASK << 1); end += ((PAGE_SIZE << 1) - 1); end &= (PAGE_MASK << 1); while (start < end) { int idx; write_c0_entryhi(start | newpid); start += (PAGE_SIZE << 1); mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entrylo1(0); if (idx < 0) continue; /* Make sure all entries differ. */ #ifndef CONFIG_NLM_VMIPS write_c0_entryhi(UNIQUE_ENTRYHI(idx)); #else __write_64bit_c0_register($10, 0, (UNIQUE_VMIPS_ENTRYHI(idx))); #endif mtc0_tlbw_hazard(); tlb_write_indexed(); } tlbw_use_hazard(); write_c0_entryhi(oldpid); } else { drop_mmu_context(mm, cpu); } FLUSH_ITLB; enable_pgwalker(config6_flags); EXIT_CRITICAL(flags); }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; int cpu = smp_processor_id(); unsigned long flags; int oldpid, newpid, size; if (!cpu_context(cpu, mm)) return; size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; local_irq_save(flags); if (size > TFP_TLB_SIZE / 2) { drop_mmu_context(mm, cpu); goto out_restore; } oldpid = read_c0_entryhi(); newpid = cpu_asid(cpu, mm); write_c0_entrylo(0); start &= PAGE_MASK; end += (PAGE_SIZE - 1); end &= PAGE_MASK; while (start < end) { signed long idx; write_c0_vaddr(start); write_c0_entryhi(start); start += PAGE_SIZE; tlb_probe(); idx = read_c0_tlbset(); if (idx < 0) continue; write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); tlb_write(); } write_c0_entryhi(oldpid); out_restore: local_irq_restore(flags); }
void local_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end) { int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { unsigned long flags; int size; #ifdef DEBUG_TLB printk("[tlbrange<%02x,%08lx,%08lx>]", (mm->context & ASID_MASK), start, end); #endif local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; if(size <= current_cpu_data.tlbsize/2) { int oldpid = read_c0_entryhi() & ASID_MASK; int newpid = cpu_asid(cpu, mm); start &= (PAGE_MASK << 1); end += ((PAGE_SIZE << 1) - 1); end &= (PAGE_MASK << 1); while(start < end) { int idx; write_c0_entryhi(start | newpid); start += (PAGE_SIZE << 1); BARRIER; tlb_probe(); BARRIER; idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entrylo1(0); if(idx < 0) continue; /* Make sure all entries differ. */ write_c0_entryhi(XKPHYS+idx*0x2000); BARRIER; tlb_write_indexed(); BARRIER; } write_c0_entryhi(oldpid); } else { drop_mmu_context(mm, cpu); } local_irq_restore(flags); }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { unsigned long size, flags; local_irq_save(flags); start = round_down(start, PAGE_SIZE << 1); end = round_up(end, PAGE_SIZE << 1); size = (end - start) >> (PAGE_SHIFT + 1); if (size <= (current_cpu_data.tlbsizeftlbsets ? current_cpu_data.tlbsize / 8 : current_cpu_data.tlbsize / 2)) { int oldpid = read_c0_entryhi(); int newpid = cpu_asid(cpu, mm); htw_stop(); while (start < end) { int idx; write_c0_entryhi(start | newpid); start += (PAGE_SIZE << 1); mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entrylo1(0); if (idx < 0) continue; /* Make sure all entries differ. */ write_c0_entryhi(UNIQUE_ENTRYHI(idx)); mtc0_tlbw_hazard(); tlb_write_indexed(); } tlbw_use_hazard(); write_c0_entryhi(oldpid); htw_start(); } else { drop_mmu_context(mm, cpu); } flush_micro_tlb(); local_irq_restore(flags); }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { unsigned long flags; int size; ENTER_CRITICAL(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; local_irq_save(flags); if (size <= current_cpu_data.tlbsize/2) { int oldpid = read_c0_entryhi(); int newpid = cpu_asid(cpu, mm); start &= (PAGE_MASK << 1); end += ((PAGE_SIZE << 1) - 1); end &= (PAGE_MASK << 1); while (start < end) { int idx; write_c0_entryhi(start | newpid); start += (PAGE_SIZE << 1); mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entrylo1(0); if (idx < 0) continue; /* Make sure all entries differ. */ write_c0_entryhi(UNIQUE_ENTRYHI(idx)); mtc0_tlbw_hazard(); tlb_write_indexed(); } tlbw_use_hazard(); write_c0_entryhi(oldpid); } else { drop_mmu_context(mm, cpu); } EXIT_CRITICAL(flags); }
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { unsigned int cpu = smp_processor_id(); if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { unsigned long flags; unsigned long asid; unsigned long saved_asid = MMU_NO_ASID; asid = cpu_asid(cpu, vma->vm_mm); page &= PAGE_MASK; local_irq_save(flags); if (vma->vm_mm != current->mm) { saved_asid = get_asid(); set_asid(asid); } local_flush_tlb_one(asid, page); if (saved_asid != MMU_NO_ASID) set_asid(saved_asid); local_irq_restore(flags); } }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; unsigned int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != NO_CONTEXT) { unsigned long flags; int size; local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ cpu_context(cpu, mm) = NO_CONTEXT; if (mm == current->mm) activate_context(mm, cpu); } else { unsigned long asid; unsigned long saved_asid = MMU_NO_ASID; asid = cpu_asid(cpu, mm); start &= PAGE_MASK; end += (PAGE_SIZE - 1); end &= PAGE_MASK; if (mm != current->mm) { saved_asid = get_asid(); set_asid(asid); } while (start < end) { local_flush_tlb_one(asid, start); start += PAGE_SIZE; } if (saved_asid != MMU_NO_ASID) set_asid(saved_asid); } local_irq_restore(flags); }