static void uniphier_cache_maint_range(u32 start, u32 end, u32 operation) { u32 size; /* * If start address is not aligned to cache-line, * do cache operation for the first cache-line */ start = start & ~(SSC_LINE_SIZE - 1); if (start == 0 && end >= (u32)(-SSC_LINE_SIZE)) { /* this means cache operation for all range */ uniphier_cache_maint_all(operation); return; } /* * If end address is not aligned to cache-line, * do cache operation for the last cache-line */ size = (end - start + SSC_LINE_SIZE - 1) & ~(SSC_LINE_SIZE - 1); while (size) { u32 chunk_size = size > SSC_RANGE_OP_MAX_SIZE ? SSC_RANGE_OP_MAX_SIZE : size; __uniphier_cache_maint_range(start, chunk_size, operation); start += chunk_size; size -= chunk_size; } writel(SSCOPE_CM_SYNC, SSCOPE); /* drain internal buffers */ readl(SSCOPE); /* need a read back to confirm */ }
static void uniphier_cache_maint_range(u32 start, u32 end, u32 operation) { u32 size; /* * If start address is not aligned to cache-line, * do cache operation for the first cache-line */ start = start & ~(SSC_LINE_SIZE - 1); size = end - start; if (unlikely(size >= (u32)(-SSC_LINE_SIZE))) { /* this means cache operation for all range */ uniphier_cache_maint_all(operation); return; } /* * If end address is not aligned to cache-line, * do cache operation for the last cache-line */ size = ALIGN(size, SSC_LINE_SIZE); while (size) { u32 chunk_size = size > SSC_RANGE_OP_MAX_SIZE ? SSC_RANGE_OP_MAX_SIZE : size; __uniphier_cache_maint_range(start, chunk_size, operation); start += chunk_size; size -= chunk_size; } uniphier_cache_sync(); }
void v7_outer_cache_inval_all(void) { uniphier_cache_maint_all(SSCOQM_CM_INV); }
void v7_outer_cache_flush_all(void) { uniphier_cache_maint_all(SSCOQM_CM_WB_INV); }