/* Flush a page out of whatever cache(s) it is in. */ void homecache_flush_cache(struct page *page, int order) { int pages = 1 << order; int length = cache_flush_length(pages * PAGE_SIZE); unsigned long pfn = page_to_pfn(page); struct cpumask home_mask; homecache_mask(page, pages, &home_mask); flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0); sim_validate_lines_evicted(PFN_PHYS(pfn), pages * PAGE_SIZE); }
static void homecache_finv_page_internal(struct page *page, int force_map) { int home = page_home(page); if (home == PAGE_HOME_UNCACHED) return; if (incoherent_home(home)) { int cpu; for_each_cpu(cpu, &cpu_cacheable_map) homecache_finv_map_page(page, cpu); } else if (force_map) { /* Force if, e.g., the normal mapping is migrating. */ homecache_finv_map_page(page, home); } else { homecache_finv_page_home(page, home); } sim_validate_lines_evicted(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE); }