void __flush_dcache_page(struct page *page) { struct address_space *mapping = page_mapping(page); unsigned long addr; if (mapping && !mapping_mapped(mapping)) { SetPageDcacheDirty(page); return; } /* * We could delay the flush for the !page_mapping case too. But that * case is for exec env/arg pages and those are %99 certainly going to * get faulted into the tlb (and thus flushed) anyways. */ if (PageHighMem(page)) addr = (unsigned long)__kmap_atomic(page); else addr = (unsigned long)page_address(page); flush_data_cache_page(addr); if (PageHighMem(page)) __kunmap_atomic((void *)addr); }
void __flush_dcache_page(struct page *page) { #ifdef CONFIG_RALINK_SOC void *addr; if (page_mapping(page) && !page_mapped(page)) { SetPageDcacheDirty(page); return; } #else struct address_space *mapping = page_mapping(page); unsigned long addr; if (PageHighMem(page)) return; if (mapping && !mapping_mapped(mapping)) { SetPageDcacheDirty(page); return; } #endif /* * We could delay the flush for the !page_mapping case too. But that * case is for exec env/arg pages and those are %99 certainly going to * get faulted into the tlb (and thus flushed) anyways. */ #ifdef CONFIG_RALINK_SOC if (PageHighMem(page)) { addr = kmap_atomic(page, KM_PTE1); flush_data_cache_page((unsigned long)addr); kunmap_atomic(addr, KM_PTE1); } else { addr = (void *) page_address(page); flush_data_cache_page((unsigned long)addr); } ClearPageDcacheDirty(page); #else addr = (unsigned long) page_address(page); flush_data_cache_page(addr); #endif }
void __flush_dcache_page(struct page *page) { struct address_space *mapping = page_mapping(page); unsigned long addr; if (PageHighMem(page)) return; if (mapping && !mapping_mapped(mapping)) { SetPageDcacheDirty(page); return; } addr = (unsigned long) page_address(page); flush_data_cache_page(addr); }
void flush_dcache_page(struct page *page) { unsigned long addr; if (!cpu_has_dc_aliases) return; if (page->mapping && page->mapping->i_mmap == NULL && page->mapping->i_mmap_shared == NULL) { SetPageDcacheDirty(page); return; } /* * We could delay the flush for the !page->mapping case too. But that * case is for exec env/arg pages and those are 99% certainly going to * get faulted into the tlb (and thus flushed) anyways. */ addr = (unsigned long) page_address(page); flush_data_cache_page(addr); }
void __flush_dcache_page(struct page *page) { void *addr; if (page_mapping(page) && !page_mapped(page)) { SetPageDcacheDirty(page); return; } /* * We could delay the flush for the !page_mapping case too. But that * case is for exec env/arg pages and those are %99 certainly going to * get faulted into the tlb (and thus flushed) anyways. */ if (PageHighMem(page)) { addr = kmap_atomic(page); flush_data_cache_page((unsigned long)addr); kunmap_atomic(addr); } else { addr = (void *) page_address(page); flush_data_cache_page((unsigned long)addr); } ClearPageDcacheDirty(page); }