static void l2x0_clean_range(unsigned long start, unsigned long end) { void __iomem *base = l2x0_base; unsigned long flags; raw_spin_lock_irqsave(&l2x0_lock, flags); start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { unsigned long blk_end = start + min(end - start, 4096UL); while (start < blk_end) { cache_wait(base + L2X0_CLEAN_LINE_PA, 1); writel(start, base + L2X0_CLEAN_LINE_PA); start += CACHE_LINE_SIZE; } if (blk_end < end) { raw_spin_unlock_irqrestore(&l2x0_lock, flags); raw_spin_lock_irqsave(&l2x0_lock, flags); } } cache_wait(base + L2X0_CLEAN_LINE_PA, 1); cache_sync(); raw_spin_unlock_irqrestore(&l2x0_lock, flags); }
static inline void l2x0_flush_line(unsigned long addr) { void __iomem *base = l2x0_base; /* Clean by PA followed by Invalidate by PA */ cache_wait(base + L2X0_CLEAN_LINE_PA, 1); writel(addr, base + L2X0_CLEAN_LINE_PA); cache_wait(base + L2X0_INV_LINE_PA, 1); writel(addr, base + L2X0_INV_LINE_PA); }
static inline void l2x0_flush_line(unsigned long addr) { void __iomem *base = l2x0_base; if (omap_rev() == OMAP4430_REV_ES1_0) { /* Clean by PA followed by Invalidate by PA */ cache_wait(base + L2X0_CLEAN_LINE_PA, 1); writel(addr, base + L2X0_CLEAN_LINE_PA); cache_wait(base + L2X0_INV_LINE_PA, 1); writel(addr, base + L2X0_INV_LINE_PA); } else { cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); writel(addr, base + L2X0_CLEAN_INV_LINE_PA); } }
static void l2x0_flush_range(unsigned long start, unsigned long end) { #ifndef CONFIG_EMXX_L310_NORAM void __iomem *base = l2x0_base; #endif unsigned long flags; _l2x0_lock(&l2x0_lock, flags); #ifndef CONFIG_EMXX_L310_NORAM start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { unsigned long blk_end = block_end(start, end); while (start < blk_end) { l2x0_flush_line(start); start += CACHE_LINE_SIZE; } if (blk_end < end) { _l2x0_unlock(&l2x0_lock, flags); _l2x0_lock(&l2x0_lock, flags); } } cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); #endif cache_sync(); _l2x0_unlock(&l2x0_lock, flags); }
void l2x0_invalid_range(unsigned long start, unsigned long end) { if (start & (CACHE_LINE_SIZE - 1)) { start &= ~(CACHE_LINE_SIZE - 1); l2x0_flush_line(start); start += CACHE_LINE_SIZE; } if (end & (CACHE_LINE_SIZE - 1)) { end &= ~(CACHE_LINE_SIZE - 1); l2x0_flush_line(end); } while (start < end) { unsigned long blk_end = start + min(end - start, 4096UL); while (start < blk_end) { l2x0_inv_line(start); start += CACHE_LINE_SIZE; } if (blk_end < end) { } } cache_wait( L2X0_INV_LINE_PA, 1); cache_sync(); }
static inline void l2x0_inv_all(void) { /* invalidate all ways */ writel(0xff, l2x0_base + L2X0_INV_WAY); cache_wait(l2x0_base + L2X0_INV_WAY, 0xff); cache_sync(); }
static void l2x0_clean_all(void) { void __iomem *base = l2x0_base; unsigned char way; unsigned long flags, value; if (omap_rev() == OMAP4430_REV_ES1_0) { l2x0_lock(&l2x0_lock, flags); debug_writel(0x03); /* Clean all the ways */ for (way = 0; way <= 0xf; way++, value = 0) { value = 1 << way; writel(value, base + L2X0_CLEAN_WAY); cache_wait_always(base + L2X0_CLEAN_WAY, value); cache_sync(); } debug_writel(0x00); l2x0_unlock(&l2x0_lock, flags); } else { /* invalidate all ways */ spin_lock_irqsave(&l2x0_lock, flags); writel(0xff, l2x0_base + L2X0_CLEAN_WAY); cache_wait(l2x0_base + L2X0_CLEAN_WAY, 0xff); cache_sync(); spin_unlock_irqrestore(&l2x0_lock, flags); } }
static void l2x0_flush_range(unsigned long start, unsigned long end) { void __iomem *base = l2x0_base; unsigned long flags; l2x0_lock(&l2x0_lock, flags); start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { unsigned long blk_end = block_end(start, end); debug_writel(0x03); while (start < blk_end) { l2x0_flush_line(start); start += CACHE_LINE_SIZE; } debug_writel(0x00); if (blk_end < end) { l2x0_unlock(&l2x0_lock, flags); l2x0_lock(&l2x0_lock, flags); } } cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); cache_sync(); l2x0_unlock(&l2x0_lock, flags); }
void l2x0_flush_range(unsigned long start, unsigned long end) { start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { l2x0_flush_line(start); start += CACHE_LINE_SIZE; } cache_wait(L2X0_CLEAN_INV_LINE_PA, 1); cache_sync(); }
void l2x0_clean_inv_all () { if(l2x0_status()) { /* invalidate all ways */ writel(l2x0_way_mask, L2X0_CLEAN_INV_WAY); cache_wait(L2X0_CLEAN_INV_WAY, l2x0_way_mask); cache_sync(); } }
void l2x0_inv_all_ref(void) { /* invalidate all ways */ writel(l2x0_way_mask, L2X0_INV_WAY_ref); asm("dsb"); asm("isb"); pwr_wait(100); cache_wait(L2X0_INV_WAY_ref, l2x0_way_mask); cache_sync_ref(); }
void l2x0_clean_all () { /* invalidate all ways */ writel(l2x0_way_mask, L2X0_CLEAN_WAY); asm("dsb"); asm("isb"); pwr_wait(100); cache_wait(L2X0_CLEAN_WAY, l2x0_way_mask); cache_sync(); }
static inline void l2x0_inv_all(void) { unsigned long flags; /* invalidate all ways */ raw_spin_lock_irqsave(&l2x0_lock, flags); writel(0xff, l2x0_base + L2X0_INV_WAY); cache_wait(l2x0_base + L2X0_INV_WAY, 0xff); cache_sync(); raw_spin_unlock_irqrestore(&l2x0_lock, flags); }
static void cns3xxx_l2_clean_range(unsigned long start, unsigned long end) { unsigned long addr; start &= ~(CACHE_LINE_SIZE - 1); for (addr = start; addr < end; addr += CACHE_LINE_SIZE) writel(addr, cns3xxx_l2_base + L2CC_CLEAN_LINE_PA); cache_wait(cns3xxx_l2_base + L2CC_CLEAN_LINE_PA, 1); cache_sync(); }
/* * Invalidate by way is non-atomic, background operation * has to be protected with the spinlock. */ static inline void l2x0_inv_all(void) { void __iomem *base = l2x0_base; unsigned long flags; /* invalidate all ways */ spin_lock_irqsave(&l2x0_lock, flags); writel_relaxed(l2x0_way_mask, base + L2X0_INV_WAY); cache_wait(base + L2X0_INV_WAY, l2x0_way_mask); atomic_cache_sync(base); spin_unlock_irqrestore(&l2x0_lock, flags); }
static void l2x0_flush_range(unsigned long start, unsigned long end) { start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { unsigned long blk_end = start + min(end - start, 4096UL); while (start < blk_end) { l2x0_flush_line(start); start += CACHE_LINE_SIZE; } } cache_wait(l2x0_base + L2X0_CLEAN_INV_LINE_PA, 1); cache_sync(); }
void l2x0_clean_range(unsigned long start, unsigned long end) { start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { unsigned long blk_end = start + end - start; while (start < blk_end) { l2x0_clean_line(start); start += CACHE_LINE_SIZE; } } cache_wait(L2X0_CLEAN_LINE_PA, 1); cache_sync(); }
static void l2x0_clean_range(unsigned long start, unsigned long end) { void __iomem *base = l2x0_base; start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { unsigned long blk_end = start + min(end - start, 4096UL); while (start < blk_end) { l2x0_clean_line(start); start += CACHE_LINE_SIZE; } } cache_wait(base + L2X0_CLEAN_LINE_PA, 1); cache_sync(); }
void l2x0_clean_line(unsigned long addr) { cache_wait(L2X0_CLEAN_LINE_PA, 1); writel(addr, L2X0_CLEAN_LINE_PA); }
static inline void l2x0_inv_line(unsigned long addr) { void __iomem *base = l2x0_base; cache_wait(base + L2X0_INV_LINE_PA, 1); writel(addr, base + L2X0_INV_LINE_PA); }
static inline void cache_sync(void) { void __iomem *base = l2x0_base; writel(0, base + L2X0_CACHE_SYNC); cache_wait(base + L2X0_CACHE_SYNC, 1); }
void l2x0_flush_line(unsigned long addr) { cache_wait( L2X0_CLEAN_INV_LINE_PA, 1); writel(addr,L2X0_CLEAN_INV_LINE_PA); }
void cache_sync(void) { writel(0, L2X0_CACHE_SYNC); cache_wait(L2X0_CACHE_SYNC, 1); }
void cache_sync_rel(void) { writel(0, L2X0_CACHE_SYNC_rel); cache_wait(L2X0_CACHE_SYNC_rel, 1); }
void l2x0_inv_line(unsigned long addr) { cache_wait(L2X0_INV_LINE_PA, 1); writel(addr, L2X0_INV_LINE_PA); }
/* * Atomic operations * * The following are atomic operations: * . Clean Line by PA or by Set/Way. * . Invalidate Line by PA. * . Clean and Invalidate Line by PA or by Set/Way. * . Cache Sync. * These operations stall the slave ports until they are complete. * When these registers are read, bit [0], the C flag, * indicates that a background operation is in progress. * When written, bit 0 must be zero. */ static inline void atomic_cache_sync( void __iomem *base ) { cache_wait(base + L2X0_CACHE_SYNC, 1); writel_relaxed(0, base + L2X0_CACHE_SYNC); }
void l2x0_flush_all(void) { writel(l2x0_way_mask/*8 ways*/, L2X0_CLEAN_INV_WAY); cache_wait(L2X0_CLEAN_INV_WAY, l2x0_way_mask); cache_sync(); }
static inline void atomic_flush_line( void __iomem *base, unsigned long addr) { cache_wait(base + L2X0_INV_LINE_PA, 1); writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA); }
void l2x0_flush_all_ref(void) { writel(l2x0_way_mask/*8 ways*/, L2X0_CLEAN_INV_WAY_ref); cache_wait(L2X0_CLEAN_INV_WAY_ref, l2x0_way_mask); cache_sync_ref(); }