void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { struct cpumask *cpu_mask; unsigned int pid; preempt_disable(); pid = vma ? vma->vm_mm->context.id : 0; if (unlikely(pid == MMU_NO_CONTEXT)) goto bail; cpu_mask = mm_cpumask(vma->vm_mm); if (!cpumask_equal(cpu_mask, cpumask_of(smp_processor_id()))) { /* If broadcast tlbivax is supported, use it */ if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) { int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL); if (lock) spin_lock(&tlbivax_lock); _tlbivax_bcast(vmaddr, pid); if (lock) spin_unlock(&tlbivax_lock); goto bail; } else { struct tlb_flush_param p = { .pid = pid, .addr = vmaddr }; /* Ignores smp_processor_id() even if set in cpu_mask */ smp_call_function_many(cpu_mask, do_flush_tlb_page_ipi, &p, 1); } } _tlbil_va(vmaddr, pid); bail: preempt_enable(); }
/* * Handle i/d cache flushing, called from set_pte_at() or ptep_set_access_flags() */ static pte_t do_dcache_icache_coherency(pte_t pte, unsigned long addr) { unsigned long pfn = pte_pfn(pte); struct page *page; if (unlikely(!pfn_valid(pfn))) return pte; page = pfn_to_page(pfn); #ifdef CONFIG_8xx /* On 8xx, cache control instructions (particularly * "dcbst" from flush_dcache_icache) fault as write * operation if there is an unpopulated TLB entry * for the address in question. To workaround that, * we invalidate the TLB here, thus avoiding dcbst * misbehaviour. */ _tlbil_va(addr, 0 /* 8xx doesn't care about PID */); #endif if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) { pr_devel("do_dcache_icache_coherency... flushing\n"); flush_dcache_icache_page(page); set_bit(PG_arch_1, &page->flags); } else pr_devel("do_dcache_icache_coherency... already clean\n"); return __pte(pte_val(pte) | _PAGE_HWEXEC); }
static pte_t set_pte_filter(pte_t pte, unsigned long addr) { pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || cpu_has_feature(CPU_FTR_NOEXECUTE))) { struct page *pg = maybe_pte_to_page(pte); if (!pg) return pte; if (!test_bit(PG_arch_1, &pg->flags)) { #ifdef CONFIG_8xx /* On 8xx, cache control instructions (particularly * "dcbst" from flush_dcache_icache) fault as write * operation if there is an unpopulated TLB entry * for the address in question. To workaround that, * we invalidate the TLB here, thus avoiding dcbst * misbehaviour. */ /* 8xx doesn't care about PID, size or ind args */ _tlbil_va(addr, 0, 0, 0); #endif /* CONFIG_8xx */ flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); } } return pte; }
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { unsigned int pid; preempt_disable(); pid = vma ? vma->vm_mm->context.id : 0; if (pid != MMU_NO_CONTEXT) _tlbil_va(vmaddr, pid); preempt_enable(); }
static pte_t set_pte_filter(pte_t pte, unsigned long addr) { pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || cpu_has_feature(CPU_FTR_NOEXECUTE))) { struct page *pg = maybe_pte_to_page(pte); if (!pg) return pte; if (!test_bit(PG_arch_1, &pg->flags)) { #ifdef CONFIG_8xx _tlbil_va(addr, 0, 0, 0); #endif flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); } } return pte; }
static void do_flush_tlb_page_ipi(void *param) { struct tlb_flush_param *p = param; _tlbil_va(p->addr, p->pid); }