static void l2x0_clean_all(void) { void __iomem *base = l2x0_base; unsigned char way; unsigned long flags, value; if (omap_rev() == OMAP4430_REV_ES1_0) { l2x0_lock(&l2x0_lock, flags); debug_writel(0x03); /* Clean all the ways */ for (way = 0; way <= 0xf; way++, value = 0) { value = 1 << way; writel(value, base + L2X0_CLEAN_WAY); cache_wait_always(base + L2X0_CLEAN_WAY, value); cache_sync(); } debug_writel(0x00); l2x0_unlock(&l2x0_lock, flags); } else { /* invalidate all ways */ spin_lock_irqsave(&l2x0_lock, flags); writel(0xff, l2x0_base + L2X0_CLEAN_WAY); cache_wait(l2x0_base + L2X0_CLEAN_WAY, 0xff); cache_sync(); spin_unlock_irqrestore(&l2x0_lock, flags); } }
void l2x0_invalid_range(unsigned long start, unsigned long end) { if (start & (CACHE_LINE_SIZE - 1)) { start &= ~(CACHE_LINE_SIZE - 1); l2x0_flush_line(start); start += CACHE_LINE_SIZE; } if (end & (CACHE_LINE_SIZE - 1)) { end &= ~(CACHE_LINE_SIZE - 1); l2x0_flush_line(end); } while (start < end) { unsigned long blk_end = start + min(end - start, 4096UL); while (start < blk_end) { l2x0_inv_line(start); start += CACHE_LINE_SIZE; } if (blk_end < end) { } } cache_wait( L2X0_INV_LINE_PA, 1); cache_sync(); }
static void l2x0_flush_range(unsigned long start, unsigned long end) { #ifndef CONFIG_EMXX_L310_NORAM void __iomem *base = l2x0_base; #endif unsigned long flags; _l2x0_lock(&l2x0_lock, flags); #ifndef CONFIG_EMXX_L310_NORAM start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { unsigned long blk_end = block_end(start, end); while (start < blk_end) { l2x0_flush_line(start); start += CACHE_LINE_SIZE; } if (blk_end < end) { _l2x0_unlock(&l2x0_lock, flags); _l2x0_lock(&l2x0_lock, flags); } } cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); #endif cache_sync(); _l2x0_unlock(&l2x0_lock, flags); }
int hlfs_flush(struct hlfs_ctrl *ctrl) { if(ctrl->cctrl!=NULL){ cache_sync(ctrl->cctrl); } return 0; }
static inline void pl310_inv_all(void) { /* invalidate all ways */ sil_wrw_mem((void*)(RMA1_L2CACHE_BASE + PL310_INV_WAY), pl310_way_mask); cache_wait_way((uint32_t *)(RMA1_L2CACHE_BASE+PL310_INV_WAY), pl310_way_mask); cache_sync(); }
static void l2x0_clean_range(unsigned long start, unsigned long end) { void __iomem *base = l2x0_base; unsigned long flags; raw_spin_lock_irqsave(&l2x0_lock, flags); start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { unsigned long blk_end = start + min(end - start, 4096UL); while (start < blk_end) { cache_wait(base + L2X0_CLEAN_LINE_PA, 1); writel(start, base + L2X0_CLEAN_LINE_PA); start += CACHE_LINE_SIZE; } if (blk_end < end) { raw_spin_unlock_irqrestore(&l2x0_lock, flags); raw_spin_lock_irqsave(&l2x0_lock, flags); } } cache_wait(base + L2X0_CLEAN_LINE_PA, 1); cache_sync(); raw_spin_unlock_irqrestore(&l2x0_lock, flags); }
static inline void l2x0_inv_all(void) { /* invalidate all ways */ writel(0xff, l2x0_base + L2X0_INV_WAY); cache_wait(l2x0_base + L2X0_INV_WAY, 0xff); cache_sync(); }
static void l2x0_flush_range(unsigned long start, unsigned long end) { void __iomem *base = l2x0_base; unsigned long flags; l2x0_lock(&l2x0_lock, flags); start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { unsigned long blk_end = block_end(start, end); debug_writel(0x03); while (start < blk_end) { l2x0_flush_line(start); start += CACHE_LINE_SIZE; } debug_writel(0x00); if (blk_end < end) { l2x0_unlock(&l2x0_lock, flags); l2x0_lock(&l2x0_lock, flags); } } cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); cache_sync(); l2x0_unlock(&l2x0_lock, flags); }
static GSList* all_subdirs (GConfSource* source, const gchar* key, GError** err) { Dir* dir; XMLSource* xs = (XMLSource*)source; GError *sync_err; /* We have to sync before we can do this, to see which * subdirs have gone away. */ sync_err = NULL; cache_sync (xs->cache, &sync_err); if (sync_err) { gconf_log (GCL_WARNING, _("Error syncing the XML backend directory cache: %s"), sync_err->message); g_error_free (sync_err); sync_err = NULL; /* continue, may as well try our best. */ } dir = cache_lookup (xs->cache, key, FALSE, err); if (dir == NULL) return NULL; else return dir_all_subdirs (dir, err); }
static void l2x0_flush_all(void) { unsigned long flags; #ifdef CONFIG_PL310_ERRATA_727915 __u32 debug_ctrl; #endif /* invalidate all ways */ _l2x0_lock(&l2x0_lock, flags); #ifndef CONFIG_EMXX_L310_NORAM #ifdef CONFIG_PL310_ERRATA_727915 debug_ctrl = readl(l2x0_base + L2X0_DEBUG_CTRL); writel(debug_ctrl | 0x3, l2x0_base + L2X0_DEBUG_CTRL); #endif #ifdef CONFIG_EMXX_L310_16WAY writel(0xffff, l2x0_base + L2X0_CLEAN_INV_WAY); cache_wait_always(l2x0_base + L2X0_CLEAN_INV_WAY, 0xffff); #else writel(0xff, l2x0_base + L2X0_CLEAN_INV_WAY); cache_wait_always(l2x0_base + L2X0_CLEAN_INV_WAY, 0xff); #endif #ifdef CONFIG_PL310_ERRATA_727915 writel(debug_ctrl, l2x0_base + L2X0_DEBUG_CTRL); #endif #endif cache_sync(); _l2x0_unlock(&l2x0_lock, flags); }
static void cns3xxx_l2_cache_sync(void) { unsigned long flags; spin_lock_irqsave(&cns3xxx_l2_lock, flags); cache_sync(); spin_unlock_irqrestore(&cns3xxx_l2_lock, flags); }
static gboolean sync_all (GConfSource* source, GError** err) { XMLSource* xs = (XMLSource*)source; return cache_sync (xs->cache, err); }
static void l2x0_cache_sync(void) { unsigned long flags; l2x0_lock(&l2x0_lock, flags); cache_sync(); l2x0_unlock(&l2x0_lock, flags); }
static void l2x0_flush_range(unsigned long start, unsigned long end) { unsigned long addr; start &= ~(CACHE_LINE_SIZE - 1); for (addr = start; addr < end; addr += CACHE_LINE_SIZE) sync_writel(addr, L2X0_CLEAN_INV_LINE_PA, 1); cache_sync(); }
void l2x0_flush_range(unsigned long start, unsigned long end) { start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { l2x0_flush_line(start); start += CACHE_LINE_SIZE; } cache_wait(L2X0_CLEAN_INV_LINE_PA, 1); cache_sync(); }
void l2x0_clean_inv_all () { if(l2x0_status()) { /* invalidate all ways */ writel(l2x0_way_mask, L2X0_CLEAN_INV_WAY); cache_wait(L2X0_CLEAN_INV_WAY, l2x0_way_mask); cache_sync(); } }
static inline void l2x0_inv_all(void) { unsigned long flags; /* invalidate all ways */ raw_spin_lock_irqsave(&l2x0_lock, flags); writel(0xff, l2x0_base + L2X0_INV_WAY); cache_wait(l2x0_base + L2X0_INV_WAY, 0xff); cache_sync(); raw_spin_unlock_irqrestore(&l2x0_lock, flags); }
static inline void l2x0_inv_all(void) { unsigned long flags; /* invalidate all ways */ l2x0_lock(&l2x0_lock, flags); writel(0xff, l2x0_base + L2X0_INV_WAY); cache_wait_always(l2x0_base + L2X0_INV_WAY, 0xff); cache_sync(); l2x0_unlock(&l2x0_lock, flags); }
void l2x0_clean_all () { /* invalidate all ways */ writel(l2x0_way_mask, L2X0_CLEAN_WAY); asm("dsb"); asm("isb"); pwr_wait(100); cache_wait(L2X0_CLEAN_WAY, l2x0_way_mask); cache_sync(); }
void l2x0_inv_all(void) { /* invalidate all ways */ writel(l2x0_way_mask, L2X0_INV_WAY); asm("dsb"); asm("isb"); pwr_wait(100); cache_wait(L2X0_INV_WAY, l2x0_way_mask); cache_sync(); }
static inline void l2x0_flush_all(void) { unsigned long flags; /* flush all ways */ l2x0_lock(&l2x0_lock, flags); writel(0xff, l2x0_base + L2X0_CLEAN_INV_WAY); cache_wait_always(l2x0_base + L2X0_CLEAN_INV_WAY, 0xff); cache_sync(); l2x0_unlock(&l2x0_lock, flags); }
static void cns3xxx_l2_clean_range(unsigned long start, unsigned long end) { unsigned long addr; start &= ~(CACHE_LINE_SIZE - 1); for (addr = start; addr < end; addr += CACHE_LINE_SIZE) writel(addr, cns3xxx_l2_base + L2CC_CLEAN_LINE_PA); cache_wait(cns3xxx_l2_base + L2CC_CLEAN_LINE_PA, 1); cache_sync(); }
static inline void l2x0_clean_all(void) { /* invalidate all ways */ #ifdef CONFIG_EMXX_L310_16WAY writel(0xffff, l2x0_base + L2X0_CLEAN_WAY); cache_wait_always(l2x0_base + L2X0_CLEAN_WAY, 0xffff); #else writel(0xff, l2x0_base + L2X0_CLEAN_WAY); cache_wait_always(l2x0_base + L2X0_CLEAN_WAY, 0xff); #endif cache_sync(); }
static void l2x0_flush_range(unsigned long start, unsigned long end) { start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { unsigned long blk_end = start + min(end - start, 4096UL); while (start < blk_end) { l2x0_flush_line(start); start += CACHE_LINE_SIZE; } } cache_wait(l2x0_base + L2X0_CLEAN_INV_LINE_PA, 1); cache_sync(); }
void l2x0_clean_range(unsigned long start, unsigned long end) { start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { unsigned long blk_end = start + end - start; while (start < blk_end) { l2x0_clean_line(start); start += CACHE_LINE_SIZE; } } cache_wait(L2X0_CLEAN_LINE_PA, 1); cache_sync(); }
static void l2x0_clean_range(unsigned long start, unsigned long end) { void __iomem *base = l2x0_base; start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { unsigned long blk_end = start + min(end - start, 4096UL); while (start < blk_end) { l2x0_clean_line(start); start += CACHE_LINE_SIZE; } } cache_wait(base + L2X0_CLEAN_LINE_PA, 1); cache_sync(); }
static inline void l2x0_inv_all(void) { unsigned long flags; /* invalidate all ways */ _l2x0_lock(&l2x0_lock, flags); #ifndef CONFIG_EMXX_L310_NORAM #ifdef CONFIG_EMXX_L310_16WAY writel(0xffff, l2x0_base + L2X0_INV_WAY); cache_wait_always(l2x0_base + L2X0_INV_WAY, 0xffff); #else writel(0xff, l2x0_base + L2X0_INV_WAY); cache_wait_always(l2x0_base + L2X0_INV_WAY, 0xff); #endif #endif cache_sync(); _l2x0_unlock(&l2x0_lock, flags); }
static void l2x0_inv_range(unsigned long start, unsigned long end) { unsigned long addr; if (start & (CACHE_LINE_SIZE - 1)) { start &= ~(CACHE_LINE_SIZE - 1); sync_writel(start, L2X0_CLEAN_INV_LINE_PA, 1); start += CACHE_LINE_SIZE; } if (end & (CACHE_LINE_SIZE - 1)) { end &= ~(CACHE_LINE_SIZE - 1); sync_writel(end, L2X0_CLEAN_INV_LINE_PA, 1); } for (addr = start; addr < end; addr += CACHE_LINE_SIZE) sync_writel(addr, L2X0_INV_LINE_PA, 1); cache_sync(); }
static void cns3xxx_l2_inv_range(unsigned long start, unsigned long end) { unsigned long addr; if (start & (CACHE_LINE_SIZE - 1)) { start &= ~(CACHE_LINE_SIZE - 1); writel(start, cns3xxx_l2_base + L2CC_CLEAN_INV_LINE_PA); start += CACHE_LINE_SIZE; } if (end & (CACHE_LINE_SIZE - 1)) { end &= ~(CACHE_LINE_SIZE - 1); writel(end, cns3xxx_l2_base + L2CC_CLEAN_INV_LINE_PA); } for (addr = start; addr < end; addr += CACHE_LINE_SIZE) writel(addr, cns3xxx_l2_base + L2CC_INV_LINE_PA); cache_sync(); }
MAILBOX_EXTERN int mailbox_delivery(unsigned int channel_id) { struct mb_vx_cfg *cfg = &g_mailbox_vx_cfg_tbl[0]; struct mb_vx_cfg *cfg_find = MAILBOX_NULL; while (MAILBOX_MAILCODE_INVALID != cfg->channel_id) { /*找到与传入邮箱ID最适配的系统邮箱配置*/ if (cfg->channel_id == channel_id) { cfg_find = cfg; break; } cfg++; } cache_sync(); if (MAILBOX_NULL != cfg_find) { return (int)BSP_IPC_IntSend((IPC_INT_CORE_E)cfg_find->dst_cpu, (IPC_INT_LEV_E)cfg_find->Int_src); } return mailbox_logerro_p1(MAILBOX_ERR_VXWORKS_CHANNEL_NOT_FIND, channel_id); }