void invalidate_dcache_region(void *start, size_t size) { unsigned long v, begin, end, linesz, mask; linesz = boot_cpu_data.dcache.linesz; mask = linesz - 1; /* when first and/or last cachelines are shared, flush them * instead of invalidating ... never discard valid data! */ begin = (unsigned long)start; end = begin + size; if (begin & mask) { flush_dcache_line(start); begin += linesz; } if (end & mask) { flush_dcache_line((void *)end); end &= ~mask; } /* remaining cachelines only need invalidation */ for (v = begin; v < end; v += linesz) invalidate_dcache_line((void *)v); flush_write_buffer(); }
void r4k_dcache_inv(rt_ubase_t addr, rt_ubase_t size) { rt_ubase_t end, a; rt_ubase_t dc_lsize = cpu_dcache_line_size(); a = addr & ~(dc_lsize - 1); end = ((addr + size) - 1) & ~(dc_lsize - 1); while (1) { invalidate_dcache_line(a); if (a == end) break; a += dc_lsize; } }