static void l2x0_flush_range(unsigned long start, unsigned long end) { void __iomem *base = l2x0_base; start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { atomic_flush_line(base, start); start += CACHE_LINE_SIZE; } atomic_cache_sync(base); }
/* * Invalidate by way is non-atomic, background operation * has to be protected with the spinlock. */ static inline void l2x0_inv_all(void) { void __iomem *base = l2x0_base; unsigned long flags; /* invalidate all ways */ spin_lock_irqsave(&l2x0_lock, flags); writel_relaxed(l2x0_way_mask, base + L2X0_INV_WAY); cache_wait(base + L2X0_INV_WAY, l2x0_way_mask); atomic_cache_sync(base); spin_unlock_irqrestore(&l2x0_lock, flags); }
static void BCMFASTPATH l2x0_inv_range(unsigned long start, unsigned long end) { void __iomem *base = l2x0_base; /* Ramge edges could contain live dirty data */ if( start & (CACHE_LINE_SIZE-1) ) atomic_flush_line(base, start & ~(CACHE_LINE_SIZE-1)); if( end & (CACHE_LINE_SIZE-1) ) atomic_flush_line(base, end & ~(CACHE_LINE_SIZE-1)); start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { atomic_inv_line(base, start); start += CACHE_LINE_SIZE; } atomic_cache_sync(base); }
static void l2x0_cache_sync(void) { void __iomem *base = l2x0_base; atomic_cache_sync( base ); }