/**************************************************************************** * * Invalidate the Data cache for the given address range. * If the bytes specified by the address (adr) are cached by the Data cache, * the cacheline containing that byte is invalidated. If the cacheline * is modified (dirty), the modified contents are lost and are NOT * written to system memory before the line is invalidated. * * @param Start address of range to be invalidated. * @param Length of range to be invalidated in bytes. * * @return None. * * @note None. * ****************************************************************************/ void Xil_DCacheInvalidateRange(unsigned int adr, unsigned len) { const unsigned cacheline = 32; unsigned int end; unsigned int tempadr = adr; unsigned int tempend; unsigned int currmask; volatile u32 *L2CCOffset = (volatile u32 *) (XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_PA_OFFSET); currmask = mfcpsr(); mtcpsr(currmask | IRQ_FIQ_MASK); if (len != 0) { end = tempadr + len; tempend = end; /* Select L1 Data cache in CSSR */ mtcp(XREG_CP15_CACHE_SIZE_SEL, 0); if (tempadr & (cacheline-1)) { tempadr &= ~(cacheline - 1); Xil_L1DCacheFlushLine(tempadr); /* Disable Write-back and line fills */ Xil_L2WriteDebugCtrl(0x3); Xil_L2CacheFlushLine(tempadr); /* Enable Write-back and line fills */ Xil_L2WriteDebugCtrl(0x0); Xil_L2CacheSync(); tempadr += cacheline; } if (tempend & (cacheline-1)) { tempend &= ~(cacheline - 1); Xil_L1DCacheFlushLine(tempend); /* Disable Write-back and line fills */ Xil_L2WriteDebugCtrl(0x3); Xil_L2CacheFlushLine(tempend); /* Enable Write-back and line fills */ Xil_L2WriteDebugCtrl(0x0); Xil_L2CacheSync(); } while (tempadr < tempend) { /* Invalidate L2 cache line */ *L2CCOffset = tempadr; dsb(); #ifdef __GNUC__ /* Invalidate L1 Data cache line */ __asm__ __volatile__("mcr " \ XREG_CP15_INVAL_DC_LINE_MVA_POC :: "r" (tempadr)); #else { volatile register unsigned int Reg __asm(XREG_CP15_INVAL_DC_LINE_MVA_POC); Reg = tempadr; } #endif tempadr += cacheline; } }
/**************************************************************************** * * Invalidate the Data cache for the given address range. * If the bytes specified by the address (adr) are cached by the Data cache, * the cacheline containing that byte is invalidated. If the cacheline * is modified (dirty), the modified contents are lost and are NOT * written to system memory before the line is invalidated. * * In this function, if start address or end address is not aligned to cache-line, * particular cache-line containing unaligned start or end address is flush first * and then invalidated the others as invalidating the same unaligned cache line * may result into loss of data. This issue raises few possibilities. * * * If the address to be invalidated is not cache-line aligned, the * following choices are available: * 1) Invalidate the cache line when required and do not bother much for the * side effects. Though it sounds good, it can result in hard-to-debug issues. * The problem is, if some other variable are allocated in the * same cache line and had been recently updated (in cache), the invalidation * would result in loss of data. * * 2) Flush the cache line first. This will ensure that if any other variable * present in the same cache line and updated recently are flushed out to memory. * Then it can safely be invalidated. Again it sounds good, but this can result * in issues. For example, when the invalidation happens * in a typical ISR (after a DMA transfer has updated the memory), then flushing * the cache line means, loosing data that were updated recently before the ISR * got invoked. * * Linux prefers the second one. To have uniform implementation (across standalone * and Linux), the second option is implemented. * This being the case, follwoing needs to be taken care of: * 1) Whenever possible, the addresses must be cache line aligned. Please nore that, * not just start address, even the end address must be cache line aligned. If that * is taken care of, this will always work. * 2) Avoid situations where invalidation has to be done after the data is updated by * peripheral/DMA directly into the memory. It is not tough to achieve (may be a bit * risky). The common use case to do invalidation is when a DMA happens. Generally * for such use cases, buffers can be allocated first and then start the DMA. The * practice that needs to be followed here is, immediately after buffer allocation * and before starting the DMA, do the invalidation. With this approach, invalidation * need not to be done after the DMA transfer is over. * * This is going to always work if done carefully. * However, the concern is, there is no guarantee that invalidate has not needed to be * done after DMA is complete. For example, because of some reasons if the first cache * line or last cache line (assuming the buffer in question comprises of multiple cache * lines) are brought into cache (between the time it is invalidated and DMA completes) * because of some speculative prefetching or reading data for a variable present * in the same cache line, then we will have to invalidate the cache after DMA is complete. * * * @param Start address of range to be invalidated. * @param Length of range to be invalidated in bytes. * * @return None. * * @note None. * ****************************************************************************/ void Xil_DCacheInvalidateRange(INTPTR adr, u32 len) { const u32 cacheline = 32U; u32 end; u32 tempadr = adr; u32 tempend; u32 currmask; volatile u32 *L2CCOffset = (volatile u32 *)(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_PA_OFFSET); currmask = mfcpsr(); mtcpsr(currmask | IRQ_FIQ_MASK); if (len != 0U) { end = tempadr + len; tempend = end; /* Select L1 Data cache in CSSR */ mtcp(XREG_CP15_CACHE_SIZE_SEL, 0U); if ((tempadr & (cacheline-1U)) != 0U) { tempadr &= (~(cacheline - 1U)); Xil_L1DCacheFlushLine(tempadr); #ifndef USE_AMP /* Disable Write-back and line fills */ Xil_L2WriteDebugCtrl(0x3U); Xil_L2CacheFlushLine(tempadr); /* Enable Write-back and line fills */ Xil_L2WriteDebugCtrl(0x0U); Xil_L2CacheSync(); #endif tempadr += cacheline; } if ((tempend & (cacheline-1U)) != 0U) { tempend &= (~(cacheline - 1U)); Xil_L1DCacheFlushLine(tempend); #ifndef USE_AMP /* Disable Write-back and line fills */ Xil_L2WriteDebugCtrl(0x3U); Xil_L2CacheFlushLine(tempend); /* Enable Write-back and line fills */ Xil_L2WriteDebugCtrl(0x0U); Xil_L2CacheSync(); #endif } while (tempadr < tempend) { #ifndef USE_AMP /* Invalidate L2 cache line */ *L2CCOffset = tempadr; Xil_L2CacheSync(); #endif #ifdef __GNUC__ /* Invalidate L1 Data cache line */ __asm__ __volatile__("mcr " \ XREG_CP15_INVAL_DC_LINE_MVA_POC :: "r" (tempadr)); #elif defined (__ICCARM__) __asm volatile ("mcr " \ XREG_CP15_INVAL_DC_LINE_MVA_POC :: "r" (tempadr)); #else { volatile register u32 Reg __asm(XREG_CP15_INVAL_DC_LINE_MVA_POC); Reg = tempadr; } #endif tempadr += cacheline; } }