void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ASID; int entry; local_irq_save(flags); old_ASID = pevn_get() & ASID_MASK; pectx_set(0); entry = tlblock_get(); for (; entry < TLBSIZE; entry++) { tlbpt_set(entry); pevn_set(KSEG1); barrier(); tlb_write_indexed(); } pevn_set(old_ASID); local_irq_restore(flags); }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; unsigned long vma_mm_context = mm->context; if (mm->context != 0) { unsigned long flags; int size; local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size <= TLBSIZE) { int oldpid = pevn_get() & ASID_MASK; int newpid = vma_mm_context & ASID_MASK; start &= PAGE_MASK; end += (PAGE_SIZE - 1); end &= PAGE_MASK; while (start < end) { int idx; pevn_set(start | newpid); start += PAGE_SIZE; barrier(); tlb_probe(); idx = tlbpt_get(); pectx_set(0); pevn_set(KSEG1); if (idx < 0) continue; tlb_write_indexed(); } pevn_set(oldpid); } else { /* Bigger than TLBSIZE, get new ASID directly */ get_new_mmu_context(mm); if (mm == current->active_mm) pevn_set(vma_mm_context & ASID_MASK); } local_irq_restore(flags); }