void __flush_anon_page(struct page *page, unsigned long vmaddr) { #ifdef CONFIG_RALINK_SOC if (!PageHighMem(page)) { unsigned long addr = (unsigned long) page_address(page); if (pages_do_alias(addr, vmaddr & PAGE_MASK)) { if (page_mapped(page) && !Page_dcache_dirty(page)) { void *kaddr; kaddr = kmap_coherent(page, vmaddr); flush_data_cache_page((unsigned long)kaddr); kunmap_coherent(); } else { flush_data_cache_page(addr); ClearPageDcacheDirty(page); } } } else { void *laddr = lowmem_page_address(page); if (pages_do_alias((unsigned long)laddr, vmaddr & PAGE_MASK)) { if (page_mapped(page) && !Page_dcache_dirty(page)) { void *kaddr; kaddr = kmap_coherent(page, vmaddr); flush_data_cache_page((unsigned long)kaddr); kunmap_coherent(); } else { void *kaddr; kaddr = kmap_atomic(page, KM_PTE1); flush_data_cache_page((unsigned long)kaddr); kunmap_atomic(kaddr, KM_PTE1); ClearPageDcacheDirty(page); } } } #else unsigned long addr = (unsigned long) page_address(page); if (pages_do_alias(addr, vmaddr)) { if (page_mapped(page) && !Page_dcache_dirty(page)) { void *kaddr; kaddr = kmap_coherent(page, vmaddr); flush_data_cache_page((unsigned long)kaddr); kunmap_coherent(); } else flush_data_cache_page(addr); } #endif }
void __update_cache(unsigned long address, pte_t pte) { struct page *page; unsigned long pfn, addr; int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc; pfn = pte_pfn(pte); if (unlikely(!pfn_valid(pfn))) return; page = pfn_to_page(pfn); if (Page_dcache_dirty(page)) { if (PageHighMem(page)) addr = (unsigned long)__kmap_atomic(page); else addr = (unsigned long)page_address(page); if (exec || pages_do_alias(addr, address & PAGE_MASK)) flush_data_cache_page(addr); if (PageHighMem(page)) __kunmap_atomic((void *)addr); ClearPageDcacheDirty(page); } }
void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct page *page; unsigned long pfn, addr; int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc; pfn = pte_pfn(pte); if (unlikely(!pfn_valid(pfn))) { wmb(); return; } page = pfn_to_page(pfn); if (page_mapped(page) && Page_dcache_dirty(page)) { void *kaddr = NULL; if (PageHighMem(page)) { addr = (unsigned long)kmap_atomic(page); kaddr = (void *)addr; } else addr = (unsigned long) page_address(page); if (exec || (cpu_has_dc_aliases && pages_do_alias(addr, address & PAGE_MASK))) { flush_data_cache_page(addr); ClearPageDcacheDirty(page); } if (kaddr) kunmap_atomic((void *)kaddr); } wmb(); /* finish any outstanding arch cache flushes before ret to user */ }
void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct page *page; unsigned long pfn, addr; pfn = pte_pfn(pte); if (pfn_valid(pfn) && (page = pfn_to_page(pfn), page_mapping(page)) && Page_dcache_dirty(page)) { if (pages_do_alias((unsigned long)page_address(page), address & PAGE_MASK)) { addr = (unsigned long) page_address(page); flush_data_cache_page(addr); } ClearPageDcacheDirty(page); } }
static void mips_flush_dcache_from_pte(pte_t pteval, unsigned long address) { struct page *page; unsigned long pfn = pte_pfn(pteval); if (unlikely(!pfn_valid(pfn))) return; page = pfn_to_page(pfn); if (page_mapping(page) && Page_dcache_dirty(page)) { unsigned long page_addr = (unsigned long) page_address(page); if (!cpu_has_ic_fills_f_dc || pages_do_alias(page_addr, address & PAGE_MASK)) flush_data_cache_page(page_addr); ClearPageDcacheDirty(page); } }
void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct page *page; unsigned long pfn, addr; int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc; pfn = pte_pfn(pte); if (unlikely(!pfn_valid(pfn))) return; page = pfn_to_page(pfn); if (page_mapping(page) && Page_dcache_dirty(page)) { addr = (unsigned long) page_address(page); if (exec || pages_do_alias(addr, address & PAGE_MASK)) flush_data_cache_page(addr); ClearPageDcacheDirty(page); } }
void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { unsigned long addr; struct page *page; if (!cpu_has_dc_aliases) return; page = pte_page(pte); if (VALID_PAGE(page) && page->mapping && (page->flags & (1UL << PG_dcache_dirty))) { if (pages_do_alias((unsigned long)page_address(page), address & PAGE_MASK)) { addr = (unsigned long) page_address(page); flush_data_cache_page(addr); } ClearPageDcacheDirty(page); } }
void __flush_dcache_page(struct page *page) { #ifdef CONFIG_RALINK_SOC void *addr; if (page_mapping(page) && !page_mapped(page)) { SetPageDcacheDirty(page); return; } #else struct address_space *mapping = page_mapping(page); unsigned long addr; if (PageHighMem(page)) return; if (mapping && !mapping_mapped(mapping)) { SetPageDcacheDirty(page); return; } #endif /* * We could delay the flush for the !page_mapping case too. But that * case is for exec env/arg pages and those are %99 certainly going to * get faulted into the tlb (and thus flushed) anyways. */ #ifdef CONFIG_RALINK_SOC if (PageHighMem(page)) { addr = kmap_atomic(page, KM_PTE1); flush_data_cache_page((unsigned long)addr); kunmap_atomic(addr, KM_PTE1); } else { addr = (void *) page_address(page); flush_data_cache_page((unsigned long)addr); } ClearPageDcacheDirty(page); #else addr = (unsigned long) page_address(page); flush_data_cache_page(addr); #endif }
void __flush_dcache_page(struct page *page) { void *addr; if (page_mapping(page) && !page_mapped(page)) { SetPageDcacheDirty(page); return; } /* * We could delay the flush for the !page_mapping case too. But that * case is for exec env/arg pages and those are %99 certainly going to * get faulted into the tlb (and thus flushed) anyways. */ if (PageHighMem(page)) { addr = kmap_atomic(page); flush_data_cache_page((unsigned long)addr); kunmap_atomic(addr); } else { addr = (void *) page_address(page); flush_data_cache_page((unsigned long)addr); } ClearPageDcacheDirty(page); }