/* sys_cacheflush -- flush (part of) the processor cache. */ asmlinkage int sys_cacheflush (unsigned long addr, unsigned long len, int op) { struct vm_area_struct *vma; if ((op < 0) || (op > (CACHEFLUSH_D_PURGE|CACHEFLUSH_I))) return -EINVAL; /* * Verify that the specified address region actually belongs * to this process. */ if (addr + len < addr) return -EFAULT; vma = find_vma (current->mm, addr); if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) return -EFAULT; switch (op & CACHEFLUSH_D_PURGE) { case CACHEFLUSH_D_INVAL: __flush_invalidate_region(addr, len); break; case CACHEFLUSH_D_WB: __flush_wback_region(addr, len); break; case CACHEFLUSH_D_PURGE: __flush_purge_region(addr, len); break; } if (op & CACHEFLUSH_I) { flush_icache_all(); } return 0; }
static void nand_read_buf_cached_block(struct mtd_info *mtd, uint8_t *buf, int len) { struct nand_chip *chip = mtd->priv; struct stm_nand_emi *data = chip->priv; unsigned long irq_flags; while (len > 0) { local_irq_save(irq_flags); __flush_invalidate_region(data->io_data, CACHEDIO_BLOCK_SIZE); memcpy_fromio(buf, data->io_data, min(len, CACHEDIO_BLOCK_SIZE)); local_irq_restore(irq_flags); buf += CACHEDIO_BLOCK_SIZE; len -= CACHEDIO_BLOCK_SIZE; } }
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, size_t size, enum dma_data_direction dir) { void *addr = sh_cacheop_vaddr(phys_to_virt(paddr)); switch (dir) { case DMA_FROM_DEVICE: /* invalidate only */ __flush_invalidate_region(addr, size); break; case DMA_TO_DEVICE: /* writeback only */ __flush_wback_region(addr, size); break; case DMA_BIDIRECTIONAL: /* writeback and invalidate */ __flush_purge_region(addr, size); break; default: BUG(); } }
void sh_sync_dma_for_device(void *vaddr, size_t size, enum dma_data_direction direction) { void *addr; addr = __in_29bit_mode() ? (void *)CAC_ADDR((unsigned long)vaddr) : vaddr; switch (direction) { case DMA_FROM_DEVICE: /* invalidate only */ __flush_invalidate_region(addr, size); break; case DMA_TO_DEVICE: /* writeback only */ __flush_wback_region(addr, size); break; case DMA_BIDIRECTIONAL: /* writeback and invalidate */ __flush_purge_region(addr, size); break; default: BUG(); } }
void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { #if defined(CONFIG_CPU_SH5) || defined(CONFIG_PMB) void *p1addr = vaddr; #else void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr); #endif switch (direction) { case DMA_FROM_DEVICE: /* invalidate only */ __flush_invalidate_region(p1addr, size); break; case DMA_TO_DEVICE: /* writeback only */ __flush_wback_region(p1addr, size); break; case DMA_BIDIRECTIONAL: /* writeback and invalidate */ __flush_purge_region(p1addr, size); break; default: BUG(); } }