static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, void (*op)(const void *, size_t, int)) { /* * A single sg entry may refer to multiple physically contiguous * pages. But we still need to process highmem pages individually. * If highmem is not configured then the bulk of this loop gets * optimized out. */ size_t left = size; do { size_t len = left; void *vaddr; if (PageHighMem(page)) { if (len + offset > PAGE_SIZE) { if (offset >= PAGE_SIZE) { page += offset / PAGE_SIZE; offset %= PAGE_SIZE; } len = PAGE_SIZE - offset; } vaddr = kmap_high_get(page); if (vaddr) { vaddr += offset; op(vaddr, len, dir); kunmap_high(page); } else if (cache_is_vipt()) { pte_t saved_pte; vaddr = kmap_high_l1_vipt(page, &saved_pte); if(vaddr) //TI { //TI op(vaddr + offset, len, dir); kunmap_high_l1_vipt(page, saved_pte); }else //TI printk(KERN_ERR "SSN: 1 vaddr is NULL\n");//TI } else //TI printk(KERN_ERR "SSN: 2 vaddr is NULL\n"); //TI } else { vaddr = page_address(page) + offset; if(vaddr) //TI op(vaddr, len, dir); else //TI printk(KERN_ERR "SSN: 3 vaddr is NULL\n"); //TI } offset = 0; page++; left -= len; } while (left); }
static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, void (*op)(const void *, size_t, int)) { unsigned long pfn; size_t left = size; pfn = page_to_pfn(page) + offset / PAGE_SIZE; offset %= PAGE_SIZE; /* * A single sg entry may refer to multiple physically contiguous * pages. But we still need to process highmem pages individually. * If highmem is not configured then the bulk of this loop gets * optimized out. */ /* size_t left = size; */ do { size_t len = left; void *vaddr; page = pfn_to_page(pfn); if (PageHighMem(page)) { /* if (len + offset > PAGE_SIZE) { if (offset >= PAGE_SIZE) { page += offset / PAGE_SIZE; offset %= PAGE_SIZE; } */ if (len + offset > PAGE_SIZE) len = PAGE_SIZE - offset; /* } */ vaddr = kmap_high_get(page); if (vaddr) { vaddr += offset; op(vaddr, len, dir); kunmap_high(page); } else if (cache_is_vipt()) { /* unmapped pages might still be cached */ vaddr = kmap_atomic(page); op(vaddr + offset, len, dir); kunmap_atomic(vaddr); } } else { vaddr = page_address(page) + offset; op(vaddr, len, dir); } offset = 0; /* page++; */ pfn++; left -= len; } while (left); }