static void homecache_finv_page_home(struct page *page, int home) { if (!PageHighMem(page) && home == page_home(page)) homecache_finv_page_va(page_address(page), home); else homecache_finv_map_page(page, home); }
static void homecache_finv_page_internal(struct page *page, int force_map) { int home = page_home(page); if (home == PAGE_HOME_UNCACHED) return; if (incoherent_home(home)) { int cpu; for_each_cpu(cpu, &cpu_cacheable_map) homecache_finv_map_page(page, cpu); } else if (force_map) { /* Force if, e.g., the normal mapping is migrating. */ homecache_finv_map_page(page, home); } else { homecache_finv_page_home(page, home); } sim_validate_lines_evicted(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE); }
/* Make the page ready to be read by the core. */ static void __dma_complete_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { #ifdef __tilegx__ switch (page_home(page)) { case PAGE_HOME_HASH: /* I/O device delivered data the way the cpu wanted it. */ break; case PAGE_HOME_INCOHERENT: /* Incoherent anyway, so no need to work hard here. */ break; case PAGE_HOME_IMMUTABLE: /* Extra read-only copies are not a problem. */ break; default: /* Flush the bogus hash-for-home I/O entries to memory. */ homecache_finv_map_page(page, PAGE_HOME_HASH); break; } #endif }