dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size); return virt_to_bus(ptr); }
/* * ICI takes a virtual address and the page may not currently have one * - so we temporarily attach the page to a bit of virtual space so that is can be flushed */ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long start, unsigned long len) { void *vaddr = kmap_atomic(page, __KM_CACHE); start = (start & ~PAGE_MASK) | (unsigned long) vaddr; frv_cache_wback_inv(start, start + len); kunmap_atomic(vaddr, __KM_CACHE); } /* end flush_icache_user_range() */
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long start, unsigned long len) { unsigned long dampr2; void *vaddr; dampr2 = __get_DAMPR(2); vaddr = kmap_atomic_primary(page, __KM_CACHE); start = (start & ~PAGE_MASK) | (unsigned long) vaddr; frv_cache_wback_inv(start, start + len); kunmap_atomic_primary(vaddr, __KM_CACHE); if (dampr2) { __set_DAMPR(2, dampr2); __set_IAMPR(2, dampr2); } } /* */