void __kunmap_atomic(void *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; int idx, type; if (kvaddr >= (void *)FIXADDR_START) { type = kmap_atomic_idx(); idx = type + KM_TYPE_NR * smp_processor_id(); if (cache_is_vivt()) __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); local_flush_tlb_kernel_page(vaddr); #else (void) idx; /* to kill a warning */ #endif kmap_atomic_idx_pop(); } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { /* this address was obtained through kmap_high_get() */ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); } pagefault_enable(); }
void __kunmap(struct page *page) { BUG_ON(in_interrupt()); if (!PageHighMem(page)) return; kunmap_high(page); }
void kunmap(struct page *page) { if (in_interrupt()) BUG(); if (!PageHighMem(page)) return; kunmap_high(page); }
void kunmap(struct page *page) { if (in_interrupt()) BUG(); if (page < highmem_start_page) return; kunmap_high(page); }
void kunmap(struct page *page) { BUG_ON(in_interrupt()); //POS (Cheolhee Lee) if (!(PageHighMem(page) || PageNVRAM(page))) return; kunmap_high(page); }
static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, void (*op)(const void *, size_t, int)) { /* * A single sg entry may refer to multiple physically contiguous * pages. But we still need to process highmem pages individually. * If highmem is not configured then the bulk of this loop gets * optimized out. */ size_t left = size; do { size_t len = left; void *vaddr; if (PageHighMem(page)) { if (len + offset > PAGE_SIZE) { if (offset >= PAGE_SIZE) { page += offset / PAGE_SIZE; offset %= PAGE_SIZE; } len = PAGE_SIZE - offset; } vaddr = kmap_high_get(page); if (vaddr) { vaddr += offset; op(vaddr, len, dir); kunmap_high(page); } else if (cache_is_vipt()) { pte_t saved_pte; vaddr = kmap_high_l1_vipt(page, &saved_pte); if(vaddr) //TI { //TI op(vaddr + offset, len, dir); kunmap_high_l1_vipt(page, saved_pte); }else //TI printk(KERN_ERR "SSN: 1 vaddr is NULL\n");//TI } else //TI printk(KERN_ERR "SSN: 2 vaddr is NULL\n"); //TI } else { vaddr = page_address(page) + offset; if(vaddr) //TI op(vaddr, len, dir); else //TI printk(KERN_ERR "SSN: 3 vaddr is NULL\n"); //TI } offset = 0; page++; left -= len; } while (left); }
void __exit kmap_high_exit(void) { if(pages) { kunmap_high(pages); //解除高端内存页到内核的映射 printk("<0>kunmap_high succeed!\n"); __free_pages(pages,0); //释放由alloc_pages( )函数所分配的高端页 printk("<0>__free_pages ok!\n"); } printk("<0>exit ok!\n"); }
static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, void (*op)(const void *, size_t, int)) { unsigned long pfn; size_t left = size; pfn = page_to_pfn(page) + offset / PAGE_SIZE; offset %= PAGE_SIZE; /* * A single sg entry may refer to multiple physically contiguous * pages. But we still need to process highmem pages individually. * If highmem is not configured then the bulk of this loop gets * optimized out. */ /* size_t left = size; */ do { size_t len = left; void *vaddr; page = pfn_to_page(pfn); if (PageHighMem(page)) { /* if (len + offset > PAGE_SIZE) { if (offset >= PAGE_SIZE) { page += offset / PAGE_SIZE; offset %= PAGE_SIZE; } */ if (len + offset > PAGE_SIZE) len = PAGE_SIZE - offset; /* } */ vaddr = kmap_high_get(page); if (vaddr) { vaddr += offset; op(vaddr, len, dir); kunmap_high(page); } else if (cache_is_vipt()) { /* unmapped pages might still be cached */ vaddr = kmap_atomic(page); op(vaddr + offset, len, dir); kunmap_atomic(vaddr); } } else { vaddr = page_address(page) + offset; op(vaddr, len, dir); } offset = 0; /* page++; */ pfn++; left -= len; } while (left); }
/** * 撤销先前由kmap建立的永久内核映射 */ void kunmap(struct page *page) { /** * kmap和kunmap都不允许在中断中使用。 */ if (in_interrupt()) BUG(); /** * 如果对应页根本就不是高端内存,当然就没有进行内核映射,也就不用调用本函数了。 */ if (!PageHighMem(page)) return; /** * kunmap_high真正执行unmap过程 */ kunmap_high(page); }