void __kunmap_atomic(void *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; int idx, type; if (kvaddr >= (void *)FIXADDR_START) { type = kmap_atomic_idx(); idx = type + KM_TYPE_NR * smp_processor_id(); if (cache_is_vivt()) __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); local_flush_tlb_kernel_page(vaddr); #else (void) idx; /* to kill a warning */ #endif kmap_atomic_idx_pop(); } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { /* this address was obtained through kmap_high_get() */ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); } pagefault_enable(); }
struct page *kmap_to_page(void *ptr) { struct page *page; if ((unsigned long)ptr < PKMAP_ADDR(0)) return virt_to_page(ptr); page = pte_page(pkmap_page_table[PKMAP_NR((unsigned long)ptr)]); return page; }
void kunmap_virt(void *ptr) { struct page *page; if ((unsigned long)ptr < PKMAP_ADDR(0)) return; page = pte_page(pkmap_page_table[PKMAP_NR((unsigned long)ptr)]); kunmap(page); }
void fastcall *kmap_high(struct page *page) { unsigned long vaddr; /* * For highmem pages, we can't trust "virtual" until * after we have the lock. * * We cannot call this from interrupts, as it may block */ spin_lock(&kmap_lock); vaddr = (unsigned long)page_address(page); if (!vaddr) vaddr = map_new_virtual(page); pkmap_count[PKMAP_NR(vaddr)]++; if (pkmap_count[PKMAP_NR(vaddr)] < 2) BUG(); spin_unlock(&kmap_lock); return (void*) vaddr; }
struct page *kmap_atomic_to_page(const void *ptr) { unsigned long vaddr = (unsigned long)ptr; pte_t *pte; if (vaddr < FIXADDR_START) { if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) return pte_page(pkmap_page_table[PKMAP_NR(vaddr)]); else return virt_to_page(ptr); } pte = TOP_PTE(vaddr); return pte_page(*pte); }
void fastcall kunmap_high(struct page *page) { unsigned long vaddr; unsigned long nr; int need_wakeup; spin_lock(&kmap_lock); vaddr = (unsigned long)page_address(page); if (!vaddr) BUG(); nr = PKMAP_NR(vaddr); /* * A count must never go down to zero * without a TLB flush! */ need_wakeup = 0; switch (--pkmap_count[nr]) { case 0: BUG(); case 1: /* * Avoid an unnecessary wake_up() function call. * The common case is pkmap_count[] == 1, but * no waiters. * The tasks queued in the wait-queue are guarded * by both the lock in the wait-queue-head and by * the kmap_lock. As the kmap_lock is held here, * no need for the wait-queue-head's lock. Simply * test if the queue is empty. */ need_wakeup = waitqueue_active(&pkmap_map_wait); } spin_unlock(&kmap_lock); /* do wake-up, if needed, race-free outside of the spin lock */ if (need_wakeup) wake_up(&pkmap_map_wait); }