int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { unsigned long dampr2; void *vaddr; int i; BUG_ON(direction == DMA_NONE); dampr2 = __get_DAMPR(2); for (i = 0; i < nents; i++) { vaddr = kmap_atomic_primary(sg_page(&sg[i]), __KM_CACHE); frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE); } kunmap_atomic_primary(vaddr, __KM_CACHE); if (dampr2) { __set_DAMPR(2, dampr2); __set_IAMPR(2, dampr2); } return nents; }
/* * DCF takes a virtual address and the page may not currently have one * - temporarily hijack a kmap_atomic() slot and attach the page to it */ void flush_dcache_page(struct page *page) { void *vaddr = kmap_atomic(page, __KM_CACHE); frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE); kunmap_atomic(vaddr, __KM_CACHE); } /* end flush_dcache_page() */
/* * make an area consistent. */ void consistent_sync(void *vaddr, size_t size, int direction) { unsigned long start = (unsigned long) vaddr; unsigned long end = start + size; switch (direction) { case PCI_DMA_NONE: BUG(); case PCI_DMA_FROMDEVICE: /* invalidate only */ frv_cache_invalidate(start, end); break; case PCI_DMA_TODEVICE: /* writeback only */ frv_dcache_writeback(start, end); break; case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */ frv_dcache_writeback(start, end); break; } }
void __set_pmd(pmd_t *pmdptr, unsigned long pmd) { unsigned long *__ste_p = pmdptr->ste; int loop; if (!pmd) { memset(__ste_p, 0, PME_SIZE); } else { BUG_ON(pmd & (0x3f00 | xAMPRx_SS | 0xe)); for (loop = PME_SIZE; loop > 0; loop -= 4) { *__ste_p++ = pmd; pmd += __frv_PT_SIZE; } } frv_dcache_writeback((unsigned long) pmdptr, (unsigned long) (pmdptr + 1)); }
int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction) { unsigned long dampr2; void *vaddr; int i; struct scatterlist *sg; BUG_ON(direction == DMA_NONE); dampr2 = __get_DAMPR(2); for_each_sg(sglist, sg, nents, i) { vaddr = kmap_atomic_primary(sg_page(sg)); frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE); }
void flush_dcache_page(struct page *page) { unsigned long dampr2; void *vaddr; dampr2 = __get_DAMPR(2); vaddr = kmap_atomic_primary(page, __KM_CACHE); frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE); kunmap_atomic_primary(vaddr, __KM_CACHE); if (dampr2) { __set_DAMPR(2, dampr2); __set_IAMPR(2, dampr2); } } /* */