void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr) { int local = mm_is_thread_local(mm); unsigned long ap = mmu_get_ap(mmu_virtual_psize); unsigned long pid, end; pid = mm ? mm->context.id : 0; if (unlikely(pid == MMU_NO_CONTEXT)) goto no_context; /* 4k page size, just blow the world */ if (PAGE_SIZE == 0x1000) { radix__flush_all_mm(mm); return; } /* Otherwise first do the PWC */ if (local) _tlbiel_pid(pid, RIC_FLUSH_PWC); else _tlbie_pid(pid, RIC_FLUSH_PWC); /* Then iterate the pages */ end = addr + HPAGE_PMD_SIZE; for (; addr < end; addr += PAGE_SIZE) { if (local) _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB); else _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB); } no_context: preempt_enable(); }
void radix__flush_tlb_mm(struct mm_struct *mm) { unsigned long pid; pid = mm->context.id; if (unlikely(pid == MMU_NO_CONTEXT)) return; preempt_disable(); /* * Order loads of mm_cpumask vs previous stores to clear ptes before * the invalidate. See barrier in switch_mm_irqs_off */ smp_mb(); if (!mm_is_thread_local(mm)) { if (unlikely(mm_is_singlethreaded(mm))) { exit_flush_lazy_tlbs(mm); goto local; } if (mm_needs_flush_escalation(mm)) _tlbie_pid(pid, RIC_FLUSH_ALL); else _tlbie_pid(pid, RIC_FLUSH_TLB); } else { local: _tlbiel_pid(pid, RIC_FLUSH_TLB); } preempt_enable(); }
void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start, unsigned long end, int psize) { unsigned long pid; unsigned long addr; int local = mm_is_thread_local(mm); unsigned long ap = mmu_get_ap(psize); unsigned long page_size = 1UL << mmu_psize_defs[psize].shift; preempt_disable(); pid = mm ? mm->context.id : 0; if (unlikely(pid == MMU_NO_CONTEXT)) goto err_out; if (end == TLB_FLUSH_ALL || (end - start) > tlb_single_page_flush_ceiling * page_size) { if (local) _tlbiel_pid(pid, RIC_FLUSH_TLB); else _tlbie_pid(pid, RIC_FLUSH_TLB); goto err_out; } for (addr = start; addr < end; addr += page_size) { if (local) _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB); else _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB); } err_out: preempt_enable(); }
void radix__flush_all_mm(struct mm_struct *mm) { unsigned long pid; pid = mm->context.id; if (unlikely(pid == MMU_NO_CONTEXT)) return; preempt_disable(); if (!mm_is_thread_local(mm)) _tlbie_pid(pid, RIC_FLUSH_ALL); else _tlbiel_pid(pid, RIC_FLUSH_ALL); preempt_enable(); }
void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, int psize) { unsigned long pid; pid = mm->context.id; if (unlikely(pid == MMU_NO_CONTEXT)) return; preempt_disable(); if (!mm_is_thread_local(mm)) _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB); else _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB); preempt_enable(); }
void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, int psize) { unsigned long pid; unsigned long ap = mmu_get_ap(psize); preempt_disable(); pid = mm ? mm->context.id : 0; if (unlikely(pid == MMU_NO_CONTEXT)) goto bail; if (!mm_is_thread_local(mm)) _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB); else _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB); bail: preempt_enable(); }
void radix__flush_tlb_mm(struct mm_struct *mm) { unsigned long pid; preempt_disable(); pid = mm->context.id; if (unlikely(pid == MMU_NO_CONTEXT)) goto no_context; if (!mm_is_thread_local(mm)) { int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); if (lock_tlbie) raw_spin_lock(&native_tlbie_lock); _tlbie_pid(pid, RIC_FLUSH_ALL); if (lock_tlbie) raw_spin_unlock(&native_tlbie_lock); } else _tlbiel_pid(pid, RIC_FLUSH_ALL); no_context: preempt_enable(); }
void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, int psize) { unsigned long pid; unsigned long ap = mmu_get_ap(psize); preempt_disable(); pid = mm ? mm->context.id : 0; if (unlikely(pid == MMU_NO_CONTEXT)) goto bail; if (!mm_is_thread_local(mm)) { int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); if (lock_tlbie) raw_spin_lock(&native_tlbie_lock); _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB); if (lock_tlbie) raw_spin_unlock(&native_tlbie_lock); } else _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB); bail: preempt_enable(); }
void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, int psize) { unsigned long pid; pid = mm->context.id; if (unlikely(pid == MMU_NO_CONTEXT)) return; preempt_disable(); smp_mb(); /* see radix__flush_tlb_mm */ if (!mm_is_thread_local(mm)) { if (unlikely(mm_is_singlethreaded(mm))) { exit_flush_lazy_tlbs(mm); goto local; } _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB); } else { local: _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB); } preempt_enable(); }
static void __flush_all_mm(struct mm_struct *mm, bool fullmm) { unsigned long pid; pid = mm->context.id; if (unlikely(pid == MMU_NO_CONTEXT)) return; preempt_disable(); smp_mb(); /* see radix__flush_tlb_mm */ if (!mm_is_thread_local(mm)) { if (unlikely(mm_is_singlethreaded(mm))) { if (!fullmm) { exit_flush_lazy_tlbs(mm); goto local; } } _tlbie_pid(pid, RIC_FLUSH_ALL); } else { local: _tlbiel_pid(pid, RIC_FLUSH_ALL); } preempt_enable(); }
static inline void __radix__flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end, bool flush_all_sizes) { unsigned long pid; unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift; unsigned long page_size = 1UL << page_shift; unsigned long nr_pages = (end - start) >> page_shift; bool local, full; pid = mm->context.id; if (unlikely(pid == MMU_NO_CONTEXT)) return; preempt_disable(); smp_mb(); /* see radix__flush_tlb_mm */ if (!mm_is_thread_local(mm)) { if (unlikely(mm_is_singlethreaded(mm))) { if (end != TLB_FLUSH_ALL) { exit_flush_lazy_tlbs(mm); goto is_local; } } local = false; full = (end == TLB_FLUSH_ALL || nr_pages > tlb_single_page_flush_ceiling); } else { is_local: local = true; full = (end == TLB_FLUSH_ALL || nr_pages > tlb_local_single_page_flush_ceiling); } if (full) { if (local) { _tlbiel_pid(pid, RIC_FLUSH_TLB); } else { if (mm_needs_flush_escalation(mm)) _tlbie_pid(pid, RIC_FLUSH_ALL); else _tlbie_pid(pid, RIC_FLUSH_TLB); } } else { bool hflush = flush_all_sizes; bool gflush = flush_all_sizes; unsigned long hstart, hend; unsigned long gstart, gend; if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) hflush = true; if (hflush) { hstart = (start + PMD_SIZE - 1) & PMD_MASK; hend = end & PMD_MASK; if (hstart == hend) hflush = false; } if (gflush) { gstart = (start + PUD_SIZE - 1) & PUD_MASK; gend = end & PUD_MASK; if (gstart == gend) gflush = false; } asm volatile("ptesync": : :"memory"); if (local) { __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize); if (hflush) __tlbiel_va_range(hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M); if (gflush) __tlbiel_va_range(gstart, gend, pid, PUD_SIZE, MMU_PAGE_1G); asm volatile("ptesync": : :"memory"); } else { __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize); if (hflush) __tlbie_va_range(hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M); if (gflush) __tlbie_va_range(gstart, gend, pid, PUD_SIZE, MMU_PAGE_1G); fixup_tlbie(); asm volatile("eieio; tlbsync; ptesync": : :"memory"); } }
void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; unsigned long pid; unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift; unsigned long page_size = 1UL << page_shift; unsigned long nr_pages = (end - start) >> page_shift; bool local, full; #ifdef CONFIG_HUGETLB_PAGE if (is_vm_hugetlb_page(vma)) return radix__flush_hugetlb_tlb_range(vma, start, end); #endif pid = mm->context.id; if (unlikely(pid == MMU_NO_CONTEXT)) return; preempt_disable(); if (mm_is_thread_local(mm)) { local = true; full = (end == TLB_FLUSH_ALL || nr_pages > tlb_local_single_page_flush_ceiling); } else { local = false; full = (end == TLB_FLUSH_ALL || nr_pages > tlb_single_page_flush_ceiling); } if (full) { if (local) { _tlbiel_pid(pid, RIC_FLUSH_TLB); } else { if (mm_needs_flush_escalation(mm)) _tlbie_pid(pid, RIC_FLUSH_ALL); else _tlbie_pid(pid, RIC_FLUSH_TLB); } } else { bool hflush = false; unsigned long hstart, hend; #ifdef CONFIG_TRANSPARENT_HUGEPAGE hstart = (start + HPAGE_PMD_SIZE - 1) >> HPAGE_PMD_SHIFT; hend = end >> HPAGE_PMD_SHIFT; if (hstart < hend) { hstart <<= HPAGE_PMD_SHIFT; hend <<= HPAGE_PMD_SHIFT; hflush = true; } #endif asm volatile("ptesync": : :"memory"); if (local) { __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize); if (hflush) __tlbiel_va_range(hstart, hend, pid, HPAGE_PMD_SIZE, MMU_PAGE_2M); asm volatile("ptesync": : :"memory"); } else { __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize); if (hflush) __tlbie_va_range(hstart, hend, pid, HPAGE_PMD_SIZE, MMU_PAGE_2M); fixup_tlbie(); asm volatile("eieio; tlbsync; ptesync": : :"memory"); } }