void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start, unsigned long end, int psize) { unsigned long pid; unsigned long addr; int local = mm_is_thread_local(mm); unsigned long ap = mmu_get_ap(psize); unsigned long page_size = 1UL << mmu_psize_defs[psize].shift; preempt_disable(); pid = mm ? mm->context.id : 0; if (unlikely(pid == MMU_NO_CONTEXT)) goto err_out; if (end == TLB_FLUSH_ALL || (end - start) > tlb_single_page_flush_ceiling * page_size) { if (local) _tlbiel_pid(pid, RIC_FLUSH_TLB); else _tlbie_pid(pid, RIC_FLUSH_TLB); goto err_out; } for (addr = start; addr < end; addr += page_size) { if (local) _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB); else _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB); } err_out: preempt_enable(); }
void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr) { int local = mm_is_thread_local(mm); unsigned long ap = mmu_get_ap(mmu_virtual_psize); unsigned long pid, end; pid = mm ? mm->context.id : 0; if (unlikely(pid == MMU_NO_CONTEXT)) goto no_context; /* 4k page size, just blow the world */ if (PAGE_SIZE == 0x1000) { radix__flush_all_mm(mm); return; } /* Otherwise first do the PWC */ if (local) _tlbiel_pid(pid, RIC_FLUSH_PWC); else _tlbie_pid(pid, RIC_FLUSH_PWC); /* Then iterate the pages */ end = addr + HPAGE_PMD_SIZE; for (; addr < end; addr += PAGE_SIZE) { if (local) _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB); else _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB); } no_context: preempt_enable(); }
static inline void _tlbiel_va(unsigned long va, unsigned long pid, unsigned long psize, unsigned long ric) { unsigned long ap = mmu_get_ap(psize); asm volatile("ptesync": : :"memory"); __tlbiel_va(va, pid, ap, ric); asm volatile("ptesync": : :"memory"); }
static inline void fixup_tlbie_lpid(unsigned long lpid) { unsigned long va = ((1UL << 52) - 1); if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) { asm volatile("ptesync": : :"memory"); __tlbie_lpid_va(va, lpid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB); } }
static inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid, unsigned long psize, unsigned long ric) { unsigned long ap = mmu_get_ap(psize); asm volatile("ptesync": : :"memory"); __tlbie_lpid_va(va, lpid, ap, ric); fixup_tlbie_lpid(lpid); asm volatile("eieio; tlbsync; ptesync": : :"memory"); }
static inline void __tlbie_va_range(unsigned long start, unsigned long end, unsigned long pid, unsigned long page_size, unsigned long psize) { unsigned long addr; unsigned long ap = mmu_get_ap(psize); for (addr = start; addr < end; addr += page_size) __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB); }
void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, int psize) { unsigned long pid; unsigned long ap = mmu_get_ap(psize); preempt_disable(); pid = mm ? mm->context.id : 0; if (pid != MMU_NO_CONTEXT) _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB); preempt_enable(); }
void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, int psize) { unsigned long pid; unsigned long ap = mmu_get_ap(psize); preempt_disable(); pid = mm ? mm->context.id : 0; if (unlikely(pid == MMU_NO_CONTEXT)) goto bail; if (!mm_is_thread_local(mm)) _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB); else _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB); bail: preempt_enable(); }
void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa, unsigned long page_size) { unsigned long rb,rs,prs,r; unsigned long ap; unsigned long ric = RIC_FLUSH_TLB; ap = mmu_get_ap(radix_get_mmu_psize(page_size)); rb = gpa & ~(PPC_BITMASK(52, 63)); rb |= ap << PPC_BITLSHIFT(58); rs = lpid & ((1UL << 32) - 1); prs = 0; /* process scoped */ r = 1; /* raidx format */ asm volatile("ptesync": : :"memory"); asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); asm volatile("eieio; tlbsync; ptesync": : :"memory"); }
void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, int psize) { unsigned long pid; unsigned long ap = mmu_get_ap(psize); preempt_disable(); pid = mm ? mm->context.id : 0; if (unlikely(pid == MMU_NO_CONTEXT)) goto bail; if (!mm_is_thread_local(mm)) { int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); if (lock_tlbie) raw_spin_lock(&native_tlbie_lock); _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB); if (lock_tlbie) raw_spin_unlock(&native_tlbie_lock); } else _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB); bail: preempt_enable(); }