void update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) { unsigned long pfn = pte_pfn(*ptep); struct page *page; if (!pfn_valid(pfn)) return; page = pfn_to_page(pfn); invalidate_itlb_mapping(addr); invalidate_dtlb_mapping(addr); #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { unsigned long vaddr = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK); unsigned long paddr = (unsigned long) page_address(page); unsigned long phys = page_to_phys(page); __flush_invalidate_dcache_page(paddr); __flush_invalidate_dcache_page_alias(vaddr, phys); __invalidate_icache_page_alias(vaddr, phys); clear_bit(PG_arch_1, &page->flags); } #else if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags) && (vma->vm_flags & VM_EXEC) != 0) { unsigned long paddr = (unsigned long) page_address(page); __flush_dcache_page(paddr); __invalidate_icache_page(paddr); set_bit(PG_arch_1, &page->flags); } #endif }
void update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) { unsigned long pfn = pte_pfn(*ptep); struct page *page; if (!pfn_valid(pfn)) return; page = pfn_to_page(pfn); /* Invalidate old entry in TLBs */ flush_tlb_page(vma, addr); #if (DCACHE_WAY_SIZE > PAGE_SIZE) if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { unsigned long phys = page_to_phys(page); unsigned long tmp; tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK); __flush_invalidate_dcache_page_alias(tmp, phys); tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK); __flush_invalidate_dcache_page_alias(tmp, phys); __invalidate_icache_page_alias(tmp, phys); clear_bit(PG_arch_1, &page->flags); } #else if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags) && (vma->vm_flags & VM_EXEC) != 0) { unsigned long paddr = (unsigned long)kmap_atomic(page); __flush_dcache_page(paddr); __invalidate_icache_page(paddr); set_bit(PG_arch_1, &page->flags); kunmap_atomic((void *)paddr); } #endif }