void __update_cache(unsigned long address, pte_t pte) { struct page *page; unsigned long pfn, addr; int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc; pfn = pte_pfn(pte); if (unlikely(!pfn_valid(pfn))) return; page = pfn_to_page(pfn); if (Page_dcache_dirty(page)) { if (PageHighMem(page)) addr = (unsigned long)__kmap_atomic(page); else addr = (unsigned long)page_address(page); if (exec || pages_do_alias(addr, address & PAGE_MASK)) flush_data_cache_page(addr); if (PageHighMem(page)) __kunmap_atomic((void *)addr); ClearPageDcacheDirty(page); } }
/* * This is lazy way to flush icache provided the CPU has the NX feature enabled. * This is called from set_pte. */ void mic_flush_icache_nx(pte_t *ptep, pte_t pte) { /* * Donot continue if the icache snoop is enabled * or if the NX feature doesnt exist */ if(icache_snoop || !is_nx_support) return; /* * Similar to the ia64 set_pte code * We only flush and set PG_arch_1 bit if the page is * present && page is user page && has backing page struct * && page is executable && * (page swapin or new page or page migration || * copy_on_write with page copying) */ if (pte_present(pte) && pte_user(pte) && pfn_valid(pte_pfn(pte)) && !pte_no_exec(pte) && (!pte_present(*ptep) || pte_pfn(*ptep) != pte_pfn(pte))) mic_flush_icache_lazy(pte_page(pte)); }