void invalidate_dcache_region(void *start, size_t size) { unsigned long v, begin, end, linesz, mask; linesz = boot_cpu_data.dcache.linesz; mask = linesz - 1; /* when first and/or last cachelines are shared, flush them * instead of invalidating ... never discard valid data! */ begin = (unsigned long)start; end = begin + size; if (begin & mask) { flush_dcache_line(start); begin += linesz; } if (end & mask) { flush_dcache_line((void *)end); end &= ~mask; } /* remaining cachelines only need invalidation */ for (v = begin; v < end; v += linesz) invalidate_dcache_line((void *)v); flush_write_buffer(); }
void flush_dcache_region(void *start, size_t size) { unsigned long v, begin, end, linesz; linesz = boot_cpu_data.dcache.linesz; begin = (unsigned long)start & ~(linesz - 1); end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1); for (v = begin; v < end; v += linesz) flush_dcache_line((void *)v); flush_write_buffer(); }
static void r5900_dma_cache_inv_pc(unsigned long addr, unsigned long size) { unsigned long end, a; if (size >= dcache_size) { flush_cache_all(); } else { unsigned long flags; __save_and_cli(flags); a = addr & ~(dc_lsize - 1); end = (addr + size) & ~(dc_lsize - 1); while (1) { flush_dcache_line(a); /* Hit_Writeback_Inv_D */ if (a == end) break; a += dc_lsize; } __restore_flags(flags); } bc_inv(addr, size); }
void r4k_dcache_wback_inv(rt_ubase_t addr, rt_ubase_t size) { rt_ubase_t end, a; if (size >= dcache_size) { blast_dcache16(); } else { rt_ubase_t dc_lsize = cpu_dcache_line_size(); a = addr & ~(dc_lsize - 1); end = ((addr + size) - 1) & ~(dc_lsize - 1); while (1) { flush_dcache_line(a); if (a == end) break; a += dc_lsize; } } }