static pte_t set_pte_filter(pte_t pte, unsigned long addr) { struct page *pg; if (!(pte_val(pte) & _PAGE_EXEC) || !pte_looks_normal(pte)) return pte; pg = maybe_pte_to_page(pte); if (unlikely(!pg)) return pte; if (test_bit(PG_arch_1, &pg->flags)) return pte; if (is_exec_fault()) { flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); return pte; } return __pte(pte_val(pte) & ~_PAGE_EXEC); }
/* Embedded type MMU with HW exec support. This is a bit more complicated * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so * instead we "filter out" the exec permission for non clean pages. */ static pte_t set_pte_filter(pte_t pte) { struct page *pg; /* No exec permission in the first place, move on */ if (!(pte_val(pte) & _PAGE_EXEC) || !pte_looks_normal(pte)) return pte; /* If you set _PAGE_EXEC on weird pages you're on your own */ pg = maybe_pte_to_page(pte); if (unlikely(!pg)) return pte; /* If the page clean, we move on */ if (test_bit(PG_arch_1, &pg->flags)) return pte; /* If it's an exec fault, we flush the cache and make it clean */ if (is_exec_fault()) { flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); return pte; } /* Else, we filter out _PAGE_EXEC */ return __pte(pte_val(pte) & ~_PAGE_EXEC); }
static pte_t set_pte_filter(pte_t pte, unsigned long addr) { pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || cpu_has_feature(CPU_FTR_NOEXECUTE))) { struct page *pg = maybe_pte_to_page(pte); if (!pg) return pte; if (!test_bit(PG_arch_1, &pg->flags)) { #ifdef CONFIG_8xx /* On 8xx, cache control instructions (particularly * "dcbst" from flush_dcache_icache) fault as write * operation if there is an unpopulated TLB entry * for the address in question. To workaround that, * we invalidate the TLB here, thus avoiding dcbst * misbehaviour. */ /* 8xx doesn't care about PID, size or ind args */ _tlbil_va(addr, 0, 0, 0); #endif /* CONFIG_8xx */ flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); } } return pte; }
static pte_t set_pte_filter(pte_t pte) { pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || cpu_has_feature(CPU_FTR_NOEXECUTE))) { struct page *pg = maybe_pte_to_page(pte); if (!pg) return pte; if (!test_bit(PG_arch_1, &pg->flags)) { flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); } } return pte; }
static pte_t set_pte_filter(pte_t pte, unsigned long addr) { pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || cpu_has_feature(CPU_FTR_NOEXECUTE))) { struct page *pg = maybe_pte_to_page(pte); if (!pg) return pte; if (!test_bit(PG_arch_1, &pg->flags)) { #ifdef CONFIG_8xx _tlbil_va(addr, 0, 0, 0); #endif flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); } } return pte; }
/* Other embedded CPUs with HW exec support per-page, we flush on exec * fault if HWEXEC is not set */ static inline int pte_need_exec_flush(pte_t pte, int set_pte) { return pte_looks_normal(pte) && is_exec_fault() && !(pte_val(pte) & _PAGE_HWEXEC); }
/* Embedded type MMU without HW exec support (8xx only so far), we flush * the cache for any present PTE */ static inline int pte_need_exec_flush(pte_t pte, int set_pte) { return set_pte && pte_looks_normal(pte); }
/* Server-style MMU handles coherency when hashing if HW exec permission * is supposed per page (currently 64-bit only). Else, we always flush * valid PTEs in set_pte. */ static inline int pte_need_exec_flush(pte_t pte, int set_pte) { return set_pte && pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || cpu_has_feature(CPU_FTR_NOEXECUTE)); }