static void __init highmem_init(void) { pr_debug("%x\n", (u32)PKMAP_BASE); map_page(PKMAP_BASE, 0, 0); /* XXX gross */ pkmap_page_table = virt_to_kpte(PKMAP_BASE); kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); kmap_prot = PAGE_KERNEL; }
void homecache_finv_map_page(struct page *page, int home) { unsigned long flags; unsigned long va; pte_t *ptep; pte_t pte; if (home == PAGE_HOME_UNCACHED) return; local_irq_save(flags); #ifdef CONFIG_HIGHMEM va = __fix_to_virt(FIX_KMAP_BEGIN + kmap_atomic_idx_push() + (KM_TYPE_NR * smp_processor_id())); #else va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id()); #endif ptep = virt_to_kpte(va); pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL); __set_pte(ptep, pte_set_home(pte, home)); homecache_finv_page_va((void *)va, home); __pte_clear(ptep); hv_flush_page(va, PAGE_SIZE); #ifdef CONFIG_HIGHMEM kmap_atomic_idx_pop(); #endif local_irq_restore(flags); }
int page_home(struct page *page) { if (PageHighMem(page)) { return PAGE_HOME_HASH; } else { unsigned long kva = (unsigned long)page_address(page); return pte_to_home(*virt_to_kpte(kva)); } }
void homecache_change_page_home(struct page *page, int order, int home) { int i, pages = (1 << order); unsigned long kva; BUG_ON(PageHighMem(page)); BUG_ON(page_count(page) > 1); BUG_ON(page_mapcount(page) != 0); kva = (unsigned long) page_address(page); flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map, kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask, NULL, 0); for (i = 0; i < pages; ++i, kva += PAGE_SIZE) { pte_t *ptep = virt_to_kpte(kva); pte_t pteval = *ptep; BUG_ON(!pte_present(pteval) || pte_huge(pteval)); __set_pte(ptep, pte_set_home(pteval, home)); } }
static void free_init_pages(char *what, unsigned long begin, unsigned long end) { unsigned long addr = (unsigned long) begin; if (kdata_huge && !initfree) { pr_warning("Warning: ignoring initfree=0:" " incompatible with kdata=huge\n"); initfree = 1; } end = (end + PAGE_SIZE - 1) & PAGE_MASK; local_flush_tlb_pages(NULL, begin, PAGE_SIZE, end - begin); for (addr = begin; addr < end; addr += PAGE_SIZE) { /* * Note we just reset the home here directly in the * page table. We know this is safe because our caller * just flushed the caches on all the other cpus, * and they won't be touching any of these pages. */ int pfn = kaddr_to_pfn((void *)addr); struct page *page = pfn_to_page(pfn); pte_t *ptep = virt_to_kpte(addr); if (!initfree) { /* * If debugging page accesses then do not free * this memory but mark them not present - any * buggy init-section access will create a * kernel page fault: */ pte_clear(&init_mm, addr, ptep); continue; } if (pte_huge(*ptep)) BUG_ON(!kdata_huge); else set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL)); memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); free_reserved_page(page); } pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10); }