void __flush_anon_page(struct page *page, unsigned long vmaddr) { #ifdef CONFIG_RALINK_SOC if (!PageHighMem(page)) { unsigned long addr = (unsigned long) page_address(page); if (pages_do_alias(addr, vmaddr & PAGE_MASK)) { if (page_mapped(page) && !Page_dcache_dirty(page)) { void *kaddr; kaddr = kmap_coherent(page, vmaddr); flush_data_cache_page((unsigned long)kaddr); kunmap_coherent(); } else { flush_data_cache_page(addr); ClearPageDcacheDirty(page); } } } else { void *laddr = lowmem_page_address(page); if (pages_do_alias((unsigned long)laddr, vmaddr & PAGE_MASK)) { if (page_mapped(page) && !Page_dcache_dirty(page)) { void *kaddr; kaddr = kmap_coherent(page, vmaddr); flush_data_cache_page((unsigned long)kaddr); kunmap_coherent(); } else { void *kaddr; kaddr = kmap_atomic(page, KM_PTE1); flush_data_cache_page((unsigned long)kaddr); kunmap_atomic(kaddr, KM_PTE1); ClearPageDcacheDirty(page); } } } #else unsigned long addr = (unsigned long) page_address(page); if (pages_do_alias(addr, vmaddr)) { if (page_mapped(page) && !Page_dcache_dirty(page)) { void *kaddr; kaddr = kmap_coherent(page, vmaddr); flush_data_cache_page((unsigned long)kaddr); kunmap_coherent(); } else flush_data_cache_page(addr); } #endif }
void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { void *vfrom, *vto; vto = kmap_atomic(to); if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && test_bit(PG_dcache_clean, &from->flags)) { vfrom = kmap_coherent(from, vaddr); copy_page(vto, vfrom); kunmap_coherent(vfrom); } else { vfrom = kmap_atomic(from); copy_page(vto, vfrom); kunmap_atomic(vfrom); } if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) || (vma->vm_flags & VM_EXEC)) __flush_purge_region(vto, PAGE_SIZE); kunmap_atomic(vto); /* Make sure this page is cleared on other CPU's too before using it */ smp_wmb(); }
void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct page *page; unsigned long pfn, addr; int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc; pfn = pte_pfn(pte); if (unlikely(!pfn_valid(pfn))) { wmb(); return; } page = pfn_to_page(pfn); if (page_mapped(page) && Page_dcache_dirty(page)) { void *kaddr = NULL; if (PageHighMem(page)) { addr = (unsigned long)kmap_atomic(page); kaddr = (void *)addr; } else addr = (unsigned long) page_address(page); if (exec || (cpu_has_dc_aliases && pages_do_alias(addr, address & PAGE_MASK))) { flush_data_cache_page(addr); ClearPageDcacheDirty(page); } if (kaddr) kunmap_atomic((void *)kaddr); } wmb(); /* finish any outstanding arch cache flushes before ret to user */ }
void __update_cache(unsigned long address, pte_t pte) { struct page *page; unsigned long pfn, addr; int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc; pfn = pte_pfn(pte); if (unlikely(!pfn_valid(pfn))) return; page = pfn_to_page(pfn); if (Page_dcache_dirty(page)) { if (PageHighMem(page)) addr = (unsigned long)__kmap_atomic(page); else addr = (unsigned long)page_address(page); if (exec || pages_do_alias(addr, address & PAGE_MASK)) flush_data_cache_page(addr); if (PageHighMem(page)) __kunmap_atomic((void *)addr); ClearPageDcacheDirty(page); } }
void __flush_anon_page(struct page *page, unsigned long vmaddr) { if (pages_do_alias((unsigned long)page_address(page), vmaddr)) { void *kaddr; kaddr = kmap_coherent(page, vmaddr); flush_data_cache_page((unsigned long)kaddr); kunmap_coherent(); } }
void clear_user_highpage(struct page *page, unsigned long vaddr) { void *kaddr = kmap_atomic(page); clear_page(kaddr); if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) __flush_purge_region(kaddr, PAGE_SIZE); kunmap_atomic(kaddr); }
unsigned long check_unmapped_fixed_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { if (addr > TASK_SIZE - len) return -ENOMEM; if (addr & ~PAGE_MASK) return -EINVAL; if (flags & MAP_SHARED && pages_do_alias((pgoff << PAGE_SHIFT), addr)) return -EINVAL; return addr; }
void __flush_anon_page(struct page *page, unsigned long vmaddr) { unsigned long addr = (unsigned long) page_address(page); if (pages_do_alias(addr, vmaddr)) { if (page_mapped(page) && !Page_dcache_dirty(page)) { void *kaddr; kaddr = kmap_coherent(page, vmaddr); flush_data_cache_page((unsigned long)kaddr); kunmap_coherent(); } else flush_data_cache_page(addr); } }
void __flush_anon_page(struct page *page, unsigned long vmaddr) { unsigned long addr = (unsigned long) page_address(page); if (pages_do_alias(addr, vmaddr)) { if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && test_bit(PG_dcache_clean, &page->flags)) { void *kaddr; kaddr = kmap_coherent(page, vmaddr); /* XXX.. For now kunmap_coherent() does a purge */ /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */ kunmap_coherent(kaddr); } else __flush_purge_region((void *)addr, PAGE_SIZE); } }
void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct page *page; unsigned long pfn, addr; pfn = pte_pfn(pte); if (pfn_valid(pfn) && (page = pfn_to_page(pfn), page_mapping(page)) && Page_dcache_dirty(page)) { if (pages_do_alias((unsigned long)page_address(page), address & PAGE_MASK)) { addr = (unsigned long) page_address(page); flush_data_cache_page(addr); } ClearPageDcacheDirty(page); } }
static void mips_flush_dcache_from_pte(pte_t pteval, unsigned long address) { struct page *page; unsigned long pfn = pte_pfn(pteval); if (unlikely(!pfn_valid(pfn))) return; page = pfn_to_page(pfn); if (page_mapping(page) && Page_dcache_dirty(page)) { unsigned long page_addr = (unsigned long) page_address(page); if (!cpu_has_ic_fills_f_dc || pages_do_alias(page_addr, address & PAGE_MASK)) flush_data_cache_page(page_addr); ClearPageDcacheDirty(page); } }
void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct page *page; unsigned long pfn, addr; int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc; pfn = pte_pfn(pte); if (unlikely(!pfn_valid(pfn))) return; page = pfn_to_page(pfn); if (page_mapping(page) && Page_dcache_dirty(page)) { addr = (unsigned long) page_address(page); if (exec || pages_do_alias(addr, address & PAGE_MASK)) flush_data_cache_page(addr); ClearPageDcacheDirty(page); } }
void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { unsigned long addr; struct page *page; if (!cpu_has_dc_aliases) return; page = pte_page(pte); if (VALID_PAGE(page) && page->mapping && (page->flags & (1UL << PG_dcache_dirty))) { if (pages_do_alias((unsigned long)page_address(page), address & PAGE_MASK)) { addr = (unsigned long) page_address(page); flush_data_cache_page(addr); } ClearPageDcacheDirty(page); } }
void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct page *page; unsigned long pfn = pte_pfn(pte); if (!boot_cpu_data.dcache.n_aliases) return; page = pfn_to_page(pfn); if (pfn_valid(pfn) && page_mapping(page)) { int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); if (dirty) { unsigned long addr = (unsigned long)page_address(page); if (pages_do_alias(addr, address & PAGE_MASK)) __flush_purge_region((void *)addr, PAGE_SIZE); } } }