void __flush_dcache_page(struct address_space *mapping, struct page *page) { /* * Writeback any data associated with the kernel mapping of this * page. This ensures that data in the physical page is mutually * coherent with the kernels mapping. */ #ifdef CONFIG_HIGHMEM /* * kmap_atomic() doesn't set the page virtual address, and * kunmap_atomic() takes care of cache flushing already. */ if (page_address(page)) #endif __cpuc_flush_dcache_page(page_address(page)); }
/* * Copy the user page. No aliasing to deal with so we can just * attack the kernel's existing mapping of these pages. */ static void v6_copy_user_highpage_nonaliasing(struct page *to, struct page *from, unsigned long vaddr) { void *kto, *kfrom; kfrom = kmap_atomic(from, KM_USER0); kto = kmap_atomic(to, KM_USER1); copy_page(kto, kfrom); #ifdef CONFIG_HIGHMEM /* * kmap_atomic() doesn't set the page virtual address, and * kunmap_atomic() takes care of cache flushing already. */ if (page_address(to) != NULL) #endif __cpuc_flush_dcache_page(kto); kunmap_atomic(kto, KM_USER1); kunmap_atomic(kfrom, KM_USER0); }
void kunmap_atomic(void *kvaddr, enum km_type type) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); if (kvaddr >= (void *)FIXADDR_START) { __cpuc_flush_dcache_page((void *)vaddr); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); local_flush_tlb_kernel_page(vaddr); #else (void) idx; /* to kill a warning */ #endif } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { /* this address was obtained through kmap_high_get() */ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); } pagefault_enable(); }