/***************************************************************************** * * Disable MMU for Cortex A53 processors. This function invalidates the TLBs, * Branch Predictor Array and flushed the D Caches before disabling * the MMU and D cache. * * @param None. * * @return None. * ******************************************************************************/ void Xil_DisableMMU(void) { u32 Reg; mtcp(XREG_CP15_INVAL_UTLB_UNLOCKED, 0U); mtcp(XREG_CP15_INVAL_BRANCH_ARRAY, 0U); Xil_DCacheFlush(); Reg = mfcp(XREG_CP15_SYS_CONTROL); Reg &= (u32)(~0x05U); mtcp(XREG_CP15_SYS_CONTROL, Reg); }
/***************************************************************************** * * Enable a background Region in MPU with default memory attributes for Cortex R5 * processor. * * @param None. * * @return None. * * ******************************************************************************/ static void Xil_EnableBackgroundRegion(void) { u32 CtrlReg, Reg; mtcp(XREG_CP15_INVAL_BRANCH_ARRAY, 0); Reg=mfcp(XREG_CP15_SYS_CONTROL); Reg |= (0x00000001U<<17U); dsb(); mtcp(XREG_CP15_SYS_CONTROL,Reg); isb(); }
/***************************************************************************** * * Set the memory attributes for a section, in the translation table. * * @param addr is the address for which attributes are to be set. * @param attrib specifies the attributes for that memory region. * * @return None. * * @note The MMU and D-cache need not be disabled before changing an * translation table attribute. * ******************************************************************************/ void XFsbl_SetTlbAttributes(INTPTR Addr, UINTPTR attrib) { void (*Funcptr)(void); #ifdef ARMA53_64 INTPTR *ptr; INTPTR section; u64 block_size; /* if region is less than 4GB MMUTable level 2 need to be modified */ if(Addr < ADDRESS_LIMIT_4GB){ /* block size is 2MB for addressed < 4GB*/ block_size = BLOCK_SIZE_2MB; section = Addr / block_size; Funcptr = &MMUTableL2; ptr = (INTPTR*)Funcptr + section; } /* if region is greater than 4GB MMUTable level 1 need to be modified */ else{ /* block size is 1GB for addressed > 4GB */ block_size = BLOCK_SIZE_1GB; section = Addr / block_size; Funcptr = &MMUTableL1; ptr = (INTPTR*)Funcptr + section; } *ptr = (Addr & (~(block_size-1))) | attrib; mtcptlbi(ALLE3); dsb(); /* ensure completion of the BP and TLB invalidation */ isb(); /* synchronize context on this processor */ #else u32 *ptr; u32 section; section = Addr / 0x100000U; Funcptr = &MMUTable; ptr = (u32*)Funcptr + section; if(ptr != NULL) { *ptr = (Addr & 0xFFF00000U) | attrib; } mtcp(XREG_CP15_INVAL_UTLB_UNLOCKED, 0U); /* Invalidate all branch predictors */ mtcp(XREG_CP15_INVAL_BRANCH_ARRAY, 0U); dsb(); /* ensure completion of the BP and TLB invalidation */ isb(); /* synchronize context on this processor */ #endif }
/***************************************************************************** * * Set the memory attributes for a section of memory with starting address addr * of the region size defined by reg_size having attributes attrib of region number * reg_num * * @param addr is the address for which attributes are to be set. * @param attrib specifies the attributes for that memory region. * @param reg_size specifies the size for that memory region. * @param reg_num specifies the number for that memory region. * @return None. * * ******************************************************************************/ static void Xil_SetAttribute(u32 addr, u32 reg_size,s32 reg_num, u32 attrib) { u32 Local_reg_size = reg_size; Local_reg_size = Local_reg_size<<1U; Local_reg_size |= REGION_EN; dsb(); mtcp(XREG_CP15_MPU_MEMORY_REG_NUMBER,reg_num); isb(); mtcp(XREG_CP15_MPU_REG_BASEADDR,addr); /* Set base address of a region */ mtcp(XREG_CP15_MPU_REG_ACCESS_CTRL,attrib); /* Set the control attribute */ mtcp(XREG_CP15_MPU_REG_SIZE_EN,Local_reg_size); /* set the region size and enable it*/ dsb(); isb(); /* synchronize context on this processor */ }
/***************************************************************************** * * Disable all the MPU regions if any of them is enabled * * @param None. * * @return None. * * ******************************************************************************/ static void Xil_DisableMPURegions(void) { u32 Temp; u32 Index; for (Index = 0; Index <= 15; Index++) { mtcp(XREG_CP15_MPU_MEMORY_REG_NUMBER,Index); Temp = mfcp(XREG_CP15_MPU_REG_SIZE_EN); Temp &= (~REGION_EN); dsb(); mtcp(XREG_CP15_MPU_REG_SIZE_EN,Temp); dsb(); isb(); } }
/**************************************************************************** * * Invalidate a Data cache line. If the byte specified by the address (adr) * is cached by the Data cache, the cacheline containing that byte is * invalidated. If the cacheline is modified (dirty), the modified contents * are lost and are NOT written to system memory before the line is * invalidated. * * @param Address to be flushed. * * @return None. * * @note The bottom 4 bits are set to 0, forced by architecture. * ****************************************************************************/ void Xil_DCacheInvalidateLine(INTPTR adr) { u32 currmask; currmask = mfcpsr(); mtcpsr(currmask | IRQ_FIQ_MASK); mtcp(XREG_CP15_CACHE_SIZE_SEL, 0); mtcp(XREG_CP15_INVAL_DC_LINE_MVA_POC, (adr & (~0x1F))); /* Wait for invalidate to complete */ dsb(); mtcpsr(currmask); }
/** * Modified SetTlbAttributes to call MyXil_DCacheFlush in order * to prevelt L2 Cache controller accesses */ void MyXil_SetTlbAttributes(u32 addr, u32 attrib) { u32 *ptr; u32 section; mtcp(XREG_CP15_INVAL_UTLB_UNLOCKED, 0); dsb(); mtcp(XREG_CP15_INVAL_BRANCH_ARRAY, 0); dsb(); MyXil_DCacheFlush(); section = addr / 0x100000; ptr = &MMUTable + section; *ptr = (addr & 0xFFF00000) | attrib; dsb(); }
/**************************************************************************** * * Get the time from the Cycle Counter Register. * * @param Pointer to the location to be updated with the time. * * @return None. * * @note None. * ****************************************************************************/ void XTime_GetTime(XTime *Xtime) { u32 reg; u32 low; /* loop until we got a consistent result */ do { #ifdef __GNUC__ low = mfcp(XREG_CP15_PERF_CYCLE_COUNTER); reg = mfcp(XREG_CP15_V_FLAG_STATUS); #else { register unsigned int Reg __asm(XREG_CP15_PERF_CYCLE_COUNTER); low = Reg; } { register unsigned int Reg __asm(XREG_CP15_V_FLAG_STATUS); reg = Reg; } #endif if (reg & CYCLE_COUNTER_MASK) { /* clear overflow */ mtcp(XREG_CP15_V_FLAG_STATUS, CYCLE_COUNTER_MASK); high++; } } while (reg & CYCLE_COUNTER_MASK); *Xtime = (((XTime) high) << 32) | (XTime) low; }
/***************************************************************************** * * Set the memory attributes for a section of memory with starting address addr * of the region size defined by reg_size having attributes attrib of region number * reg_num * * @param addr is the address for which attributes are to be set. * @param attrib specifies the attributes for that memory region. * @param reg_size specifies the size for that memory region. * @param reg_num specifies the number for that memory region. * @return None. * * ******************************************************************************/ static void Xil_SetAttribute(u32 addr, u32 reg_size,s32 reg_num, u32 attrib) { u32 CtrlReg, Alignment_Check=0x1U; u32 Index; u32 Local_reg_size = reg_size; Local_reg_size = Local_reg_size<<1U; Local_reg_size |= REGION_EN; mtcp(XREG_CP15_MPU_MEMORY_REG_NUMBER,reg_num); mtcp(XREG_CP15_MPU_REG_BASEADDR,addr); /* Set base address of a region */ mtcp(XREG_CP15_MPU_REG_ACCESS_CTRL,attrib); /* Set the control attribute */ mtcp(XREG_CP15_MPU_REG_SIZE_EN,Local_reg_size); /* set the region size and enable it*/ dsb(); /* ensure completion of the BP invalidation */ isb(); /* synchronize context on this processor */ }
/**************************************************************************** * * Invalidate the Data cache for the given address range. * If the bytes specified by the address (adr) are cached by the Data cache, * the cacheline containing that byte is invalidated. If the cacheline * is modified (dirty), the modified contents are lost and are NOT * written to system memory before the line is invalidated. * * @param Start address of range to be invalidated. * @param Length of range to be invalidated in bytes. * * @return None. * * @note None. * ****************************************************************************/ void Xil_DCacheInvalidateRange(unsigned int adr, unsigned len) { const unsigned cacheline = 32; unsigned int end; unsigned int tempadr = adr; unsigned int tempend; unsigned int currmask; volatile u32 *L2CCOffset = (volatile u32 *) (XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_PA_OFFSET); currmask = mfcpsr(); mtcpsr(currmask | IRQ_FIQ_MASK); if (len != 0) { end = tempadr + len; tempend = end; /* Select L1 Data cache in CSSR */ mtcp(XREG_CP15_CACHE_SIZE_SEL, 0); if (tempadr & (cacheline-1)) { tempadr &= ~(cacheline - 1); Xil_L1DCacheFlushLine(tempadr); /* Disable Write-back and line fills */ Xil_L2WriteDebugCtrl(0x3); Xil_L2CacheFlushLine(tempadr); /* Enable Write-back and line fills */ Xil_L2WriteDebugCtrl(0x0); Xil_L2CacheSync(); tempadr += cacheline; } if (tempend & (cacheline-1)) { tempend &= ~(cacheline - 1); Xil_L1DCacheFlushLine(tempend); /* Disable Write-back and line fills */ Xil_L2WriteDebugCtrl(0x3); Xil_L2CacheFlushLine(tempend); /* Enable Write-back and line fills */ Xil_L2WriteDebugCtrl(0x0); Xil_L2CacheSync(); } while (tempadr < tempend) { /* Invalidate L2 cache line */ *L2CCOffset = tempadr; dsb(); #ifdef __GNUC__ /* Invalidate L1 Data cache line */ __asm__ __volatile__("mcr " \ XREG_CP15_INVAL_DC_LINE_MVA_POC :: "r" (tempadr)); #else { volatile register unsigned int Reg __asm(XREG_CP15_INVAL_DC_LINE_MVA_POC); Reg = tempadr; } #endif tempadr += cacheline; } }
/***************************************************************************** * * Set the memory attributes for a section, in the translation table. Each * section covers 1MB of memory. * * @param addr is the address for which attributes are to be set. * @param attrib specifies the attributes for that memory region. * * @return None. * * @note The MMU and D-cache need not be disabled before changing an * translation table attribute. * ******************************************************************************/ void Xil_SetTlbAttributes(u32 addr, u32 attrib) { u32 *ptr; u32 section; section = addr / 0x100000; ptr = &MMUTable + section; *ptr = (addr & 0xFFF00000) | attrib; Xil_DCacheFlush(); mtcp(XREG_CP15_INVAL_UTLB_UNLOCKED, 0); /* Invalidate all branch predictors */ mtcp(XREG_CP15_INVAL_BRANCH_ARRAY, 0); dsb(); /* ensure completion of the BP and TLB invalidation */ isb(); /* synchronize context on this processor */ }
/***************************************************************************** * * Disable all the MPU regions if any of them is enabled * * @param None. * * @return None. * * ******************************************************************************/ static void Xil_DisableMPURegions(void) { u32 Temp = 0U; u32 Index = 0U; for (Index = 0; Index <= 15; Index++) { mtcp(XREG_CP15_MPU_MEMORY_REG_NUMBER,Index); #if defined (__GNUC__) Temp = mfcp(XREG_CP15_MPU_REG_SIZE_EN); #elif defined (__ICCARM__) mfcp(XREG_CP15_MPU_REG_SIZE_EN,Temp); #endif Temp &= (~REGION_EN); dsb(); mtcp(XREG_CP15_MPU_REG_SIZE_EN,Temp); dsb(); isb(); } }
/** * @brief Disable MPU for Cortex R5 processors. This function invalidates I * cache and flush the D Caches, and then disabes the MPU. * * @param None. * * @return None. * ******************************************************************************/ void Xil_DisableMPU(void) { u32 CtrlReg, Reg; s32 DCacheStatus=0, ICacheStatus=0; /* enable caches only if they are disabled */ #if defined (__GNUC__) CtrlReg = mfcp(XREG_CP15_SYS_CONTROL); #elif defined (__ICCARM__) mfcp(XREG_CP15_SYS_CONTROL,CtrlReg); #endif if ((CtrlReg & XREG_CP15_CONTROL_C_BIT) != 0x00000000U) { DCacheStatus=1; } if ((CtrlReg & XREG_CP15_CONTROL_I_BIT) != 0x00000000U) { ICacheStatus=1; } if(DCacheStatus != 0) { Xil_DCacheDisable(); } if(ICacheStatus != 0){ Xil_ICacheDisable(); } mtcp(XREG_CP15_INVAL_BRANCH_ARRAY, 0); #if defined (__GNUC__) Reg = mfcp(XREG_CP15_SYS_CONTROL); #elif defined (__ICCARM__) mfcp(XREG_CP15_SYS_CONTROL,Reg); #endif Reg &= ~(0x00000001U); dsb(); mtcp(XREG_CP15_SYS_CONTROL, Reg); isb(); /* enable caches only if they are disabled in routine*/ if(DCacheStatus != 0) { Xil_DCacheEnable(); } if(ICacheStatus != 0) { Xil_ICacheEnable(); } }
/***************************************************************************** * * Set the memory attributes for a section, in the translation table. Each * section covers 1MB of memory. * * @param Addr is the address for which attributes are to be set. * @param attrib specifies the attributes for that memory region. * * @return None. * * @note The MMU and D-cache need not be disabled before changing an * translation table attribute. * ******************************************************************************/ void Xil_SetTlbAttributes(INTPTR Addr, u32 attrib) { u32 *ptr; u32 section; section = Addr / 0x100000U; ptr = &MMUTable; ptr += section; if(ptr != NULL) { *ptr = (Addr & 0xFFF00000U) | attrib; } Xil_DCacheFlush(); mtcp(XREG_CP15_INVAL_UTLB_UNLOCKED, 0U); /* Invalidate all branch predictors */ mtcp(XREG_CP15_INVAL_BRANCH_ARRAY, 0U); dsb(); /* ensure completion of the BP and TLB invalidation */ isb(); /* synchronize context on this processor */ }
/***************************************************************************** * * Disable MMU for Cortex A9 processors. This function invalidates the TLBs, * Branch Predictor Array and flushed the D Caches before disabling * the MMU and D cache. * * @param None. * * @return None. * ******************************************************************************/ void Xil_DisableMMU(void) { u32 Reg; mtcp(XREG_CP15_INVAL_UTLB_UNLOCKED, 0); mtcp(XREG_CP15_INVAL_BRANCH_ARRAY, 0); Xil_DCacheFlush(); #ifdef __GNUC__ Reg = mfcp(XREG_CP15_SYS_CONTROL); #else { volatile register unsigned int Cp15Reg __asm(XREG_CP15_SYS_CONTROL); Reg = Cp15Reg; } #endif Reg &= ~0x05; #ifdef CONFIG_ARM_ERRATA_794073 /* Disable Branch Prediction */ Reg &= ~0x800; #endif mtcp(XREG_CP15_SYS_CONTROL, Reg); }
/** * @brief Set the memory attributes for a section of memory in the * translation table. * * @param Addr: 32-bit address for which memory attributes need to be set.. * @param size: size is the size of the region. * @param attrib: Attribute for the given memory region. * @return None. * * ******************************************************************************/ u32 Xil_SetMPURegion(INTPTR addr, u64 size, u32 attrib) { u32 Regionsize = 0; INTPTR Localaddr = addr; u32 NextAvailableMemRegion; unsigned int i; NextAvailableMemRegion = Xil_GetNextMPURegion(); if (NextAvailableMemRegion == 0xFF) { xdbg_printf(DEBUG, "No regions available\r\n"); return XST_FAILURE; } Xil_DCacheFlush(); Xil_ICacheInvalidate(); mtcp(XREG_CP15_MPU_MEMORY_REG_NUMBER,NextAvailableMemRegion); isb(); /* Lookup the size. */ for (i = 0; i < sizeof region_size / sizeof region_size[0]; i++) { if (size <= region_size[i].size) { Regionsize = region_size[i].encoding; break; } } Localaddr &= ~(region_size[i].size - 1); Regionsize <<= 1; Regionsize |= REGION_EN; dsb(); mtcp(XREG_CP15_MPU_REG_BASEADDR, Localaddr); /* Set base address of a region */ mtcp(XREG_CP15_MPU_REG_ACCESS_CTRL, attrib); /* Set the control attribute */ mtcp(XREG_CP15_MPU_REG_SIZE_EN, Regionsize); /* set the region size and enable it*/ dsb(); isb(); Xil_UpdateMPUConfig(NextAvailableMemRegion, Localaddr, Regionsize, attrib); return XST_SUCCESS; }
/***************************************************************************** * * Invalidate the caches, enable MMU and D Caches for Cortex A53 processor. * * @param None. * @return None. * ******************************************************************************/ void Xil_EnableMMU(void) { u32 Reg; Xil_DCacheInvalidate(); Xil_ICacheInvalidate(); Reg = mfcp(XREG_CP15_SYS_CONTROL); Reg |= (u32)0x05U; mtcp(XREG_CP15_SYS_CONTROL, Reg); dsb(); isb(); }
/**************************************************************************** * * Invalidate the entire Data cache. * * @param None. * * @return None. * * @note None. * ****************************************************************************/ void Xil_DCacheInvalidate(void) { u32 currmask; u32 stack_start,stack_end,stack_size; currmask = mfcpsr(); mtcpsr(currmask | IRQ_FIQ_MASK); stack_end = (u32 )&_stack_end; stack_start = (u32 )&__undef_stack; stack_size = stack_start-stack_end; /* Flush stack memory to save return address */ Xil_DCacheFlushRange(stack_end, stack_size); mtcp(XREG_CP15_CACHE_SIZE_SEL, 0); /*invalidate all D cache*/ mtcp(XREG_CP15_INVAL_DC_ALL, 0); mtcpsr(currmask); }
/**************************************************************************** * * Disable the Data cache. * * @param None. * * @return None. * * @note None. * ****************************************************************************/ void Xil_DCacheDisable(void) { register u32 CtrlReg; /* clean and invalidate the Data cache */ Xil_DCacheFlush(); /* disable the Data cache */ CtrlReg = mfcp(XREG_CP15_SYS_CONTROL); CtrlReg &= ~(XREG_CP15_CONTROL_C_BIT); mtcp(XREG_CP15_SYS_CONTROL, CtrlReg); }
/** * * @brief This function resets the Cortex R5 event counters. * * @param None. * * @return None. * *****************************************************************************/ void Xpm_ResetEventCounters(void) { u32 Reg; #ifdef __GNUC__ Reg = mfcp(XREG_CP15_PERF_MONITOR_CTRL); #elif defined (__ICCARM__) mfcp(XREG_CP15_PERF_MONITOR_CTRL, Reg); #else { register u32 C15Reg __asm(XREG_CP15_PERF_MONITOR_CTRL); Reg = C15Reg; } #endif Reg |= (1U << 2U); /* reset event counters */ mtcp(XREG_CP15_PERF_MONITOR_CTRL, Reg); }
/**************************************************************************** * * Set the time in the Cycle Counter Register. * * @param Value to be written to the Cycle Counter Register. * * @return None. * * @note None. * ****************************************************************************/ void XTime_SetTime(XTime Xtime) { u32 reg; #ifdef __GNUC__ /* disable the cycle counter before updating */ reg = mfcp(XREG_CP15_COUNT_ENABLE_CLR); #else { register unsigned int Reg __asm(XREG_CP15_COUNT_ENABLE_CLR); reg = Reg; } #endif mtcp(XREG_CP15_COUNT_ENABLE_CLR, reg | CYCLE_COUNTER_MASK); /* clear the cycle counter overflow flag */ #ifdef __GNUC__ reg = mfcp(XREG_CP15_V_FLAG_STATUS); #else { register unsigned int Reg __asm(XREG_CP15_V_FLAG_STATUS); reg = Reg; } #endif mtcp(XREG_CP15_V_FLAG_STATUS, reg & CYCLE_COUNTER_MASK); /* set the time in cyle counter reg */ mtcp(XREG_CP15_PERF_CYCLE_COUNTER, (u32) Xtime); high = Xtime >> 32; /* enable the cycle counter */ #ifdef __GNUC__ reg = mfcp(XREG_CP15_COUNT_ENABLE_SET); #else { register unsigned int Reg __asm(XREG_CP15_COUNT_ENABLE_SET); reg = Reg; } #endif mtcp(XREG_CP15_COUNT_ENABLE_SET, reg | CYCLE_COUNTER_MASK); }
/**************************************************************************** * * Enable the Data cache. * * @param None. * * @return None. * * @note None. * ****************************************************************************/ void Xil_DCacheEnable(void) { register u32 CtrlReg; /* enable caches only if they are disabled */ CtrlReg = mfcp(XREG_CP15_SYS_CONTROL); if ((CtrlReg & XREG_CP15_CONTROL_C_BIT)==0x00000000U) { /* invalidate the Data cache */ Xil_DCacheInvalidate(); /* enable the Data cache */ CtrlReg |= (XREG_CP15_CONTROL_C_BIT); mtcp(XREG_CP15_SYS_CONTROL, CtrlReg); } }
/***************************************************************************** * * Invalidate the caches, enable MMU and D Caches for Cortex A9 processor. * * @param None. * @return None. * ******************************************************************************/ void Xil_EnableMMU(void) { u32 Reg; Xil_DCacheInvalidate(); Xil_ICacheInvalidate(); #ifdef __GNUC__ Reg = mfcp(XREG_CP15_SYS_CONTROL); #else { volatile register unsigned int Cp15Reg __asm(XREG_CP15_SYS_CONTROL); Reg = Cp15Reg; } #endif Reg |= 0x05; mtcp(XREG_CP15_SYS_CONTROL, Reg); dsb(); isb(); }
/** * * @brief This function disables the event counters and returns the counter * values. * * @param PmCtrValue: Pointer to an array of type u32 PmCtrValue[6]. * It is an output parameter which is used to return the PM * counter values. * * @return None. * *****************************************************************************/ void Xpm_GetEventCounters(u32 *PmCtrValue) { u32 Counter; Xpm_DisableEventCounters(); for(Counter = 0U; Counter < XPM_CTRCOUNT; Counter++) { mtcp(XREG_CP15_EVENT_CNTR_SEL, Counter); #ifdef __GNUC__ PmCtrValue[Counter] = mfcp(XREG_CP15_PERF_MONITOR_COUNT); #elif defined (__ICCARM__) mfcp(XREG_CP15_PERF_MONITOR_COUNT, PmCtrValue[Counter]); #else { register u32 Cp15Reg __asm(XREG_CP15_PERF_MONITOR_COUNT); PmCtrValue[Counter] = Cp15Reg; } #endif } }
/**************************************************************************** * * Invalidate the Data cache for the given address range. * If the bytes specified by the address (adr) are cached by the Data cache, * the cacheline containing that byte is invalidated. If the cacheline * is modified (dirty), the modified contents are lost and are NOT * written to system memory before the line is invalidated. * * @param Start address of range to be invalidated. * @param Length of range to be invalidated in bytes. * * @return None. * * @note None. * ****************************************************************************/ void Xil_DCacheInvalidateRange(INTPTR adr, u32 len) { const u32 cacheline = 32U; u32 end; u32 tempadr = adr; u32 tempend; u32 currmask; currmask = mfcpsr(); mtcpsr(currmask | IRQ_FIQ_MASK); if (len != 0U) { end = tempadr + len; tempend = end; /* Select L1 Data cache in CSSR */ mtcp(XREG_CP15_CACHE_SIZE_SEL, 0U); if ((tempadr & (cacheline-1U)) != 0U) { tempadr &= (~(cacheline - 1U)); Xil_DCacheFlushLine(tempadr); } if ((tempend & (cacheline-1U)) != 0U) { tempend &= (~(cacheline - 1U)); Xil_DCacheFlushLine(tempend); } while (tempadr < tempend) { /* Invalidate Data cache line */ __asm__ __volatile__("mcr " \ XREG_CP15_INVAL_DC_LINE_MVA_POC :: "r" (tempadr)); tempadr += cacheline; } }
/** * * @brief This function enables the Cortex R5 event counters. * * @param None. * * @return None. * *****************************************************************************/ void Xpm_EnableEventCounters(void) { /* Enable the event counters */ mtcp(XREG_CP15_COUNT_ENABLE_SET, 0x3f); }
/** * * @brief This function configures the Cortex R5 event counters controller, * with the event codes, in a configuration selected by the user and * enables the counters. * * @param PmcrCfg: Configuration value based on which the event counters * are configured.XPM_CNTRCFG* values defined in xpm_counter.h can * be utilized for setting configuration * * @return None. * *****************************************************************************/ void Xpm_SetEvents(s32 PmcrCfg) { u32 Counter; static PmcrEventCfg32 PmcrEvents[] = { { XPM_EVENT_SOFTINCR, XPM_EVENT_INSRFETCH_CACHEREFILL, XPM_EVENT_INSTRFECT_TLBREFILL, XPM_EVENT_DATA_CACHEREFILL, XPM_EVENT_DATA_CACHEACCESS, XPM_EVENT_DATA_TLBREFILL }, { XPM_EVENT_DATA_READS, XPM_EVENT_DATA_WRITE, XPM_EVENT_EXCEPTION, XPM_EVENT_EXCEPRETURN, XPM_EVENT_CHANGECONTEXT, XPM_EVENT_SW_CHANGEPC }, { XPM_EVENT_IMMEDBRANCH, XPM_EVENT_UNALIGNEDACCESS, XPM_EVENT_BRANCHMISS, XPM_EVENT_CLOCKCYCLES, XPM_EVENT_BRANCHPREDICT, XPM_EVENT_JAVABYTECODE }, { XPM_EVENT_SWJAVABYTECODE, XPM_EVENT_JAVABACKBRANCH, XPM_EVENT_COHERLINEMISS, XPM_EVENT_COHERLINEHIT, XPM_EVENT_INSTRSTALL, XPM_EVENT_DATASTALL }, { XPM_EVENT_MAINTLBSTALL, XPM_EVENT_STREXPASS, XPM_EVENT_STREXFAIL, XPM_EVENT_DATAEVICT, XPM_EVENT_NODISPATCH, XPM_EVENT_ISSUEEMPTY }, { XPM_EVENT_INSTRRENAME, XPM_EVENT_PREDICTFUNCRET, XPM_EVENT_MAINEXEC, XPM_EVENT_SECEXEC, XPM_EVENT_LDRSTR, XPM_EVENT_FLOATRENAME }, { XPM_EVENT_NEONRENAME, XPM_EVENT_PLDSTALL, XPM_EVENT_WRITESTALL, XPM_EVENT_INSTRTLBSTALL, XPM_EVENT_DATATLBSTALL, XPM_EVENT_INSTR_uTLBSTALL }, { XPM_EVENT_DATA_uTLBSTALL, XPM_EVENT_DMB_STALL, XPM_EVENT_INT_CLKEN, XPM_EVENT_DE_CLKEN, XPM_EVENT_INSTRISB, XPM_EVENT_INSTRDSB }, { XPM_EVENT_INSTRDMB, XPM_EVENT_EXTINT, XPM_EVENT_PLE_LRC, XPM_EVENT_PLE_LRS, XPM_EVENT_PLE_FLUSH, XPM_EVENT_PLE_CMPL }, { XPM_EVENT_PLE_OVFL, XPM_EVENT_PLE_PROG, XPM_EVENT_PLE_LRC, XPM_EVENT_PLE_LRS, XPM_EVENT_PLE_FLUSH, XPM_EVENT_PLE_CMPL }, { XPM_EVENT_DATASTALL, XPM_EVENT_INSRFETCH_CACHEREFILL, XPM_EVENT_INSTRFECT_TLBREFILL, XPM_EVENT_DATA_CACHEREFILL, XPM_EVENT_DATA_CACHEACCESS, XPM_EVENT_DATA_TLBREFILL }, }; const u32 *ptr = PmcrEvents[PmcrCfg]; Xpm_DisableEventCounters(); for(Counter = 0U; Counter < XPM_CTRCOUNT; Counter++) { /* Selecet event counter */ mtcp(XREG_CP15_EVENT_CNTR_SEL, Counter); /* Set the event */ mtcp(XREG_CP15_EVENT_TYPE_SEL, ptr[Counter]); } Xpm_ResetEventCounters(); Xpm_EnableEventCounters(); }
/** * * @brief This function disables the Cortex R5 event counters. * * @param None. * * @return None. * *****************************************************************************/ void Xpm_DisableEventCounters(void) { /* Disable the event counters */ mtcp(XREG_CP15_COUNT_ENABLE_CLR, 0x3f); }
/**************************************************************************** * * Invalidate the Data cache for the given address range. * If the bytes specified by the address (adr) are cached by the Data cache, * the cacheline containing that byte is invalidated. If the cacheline * is modified (dirty), the modified contents are lost and are NOT * written to system memory before the line is invalidated. * * In this function, if start address or end address is not aligned to cache-line, * particular cache-line containing unaligned start or end address is flush first * and then invalidated the others as invalidating the same unaligned cache line * may result into loss of data. This issue raises few possibilities. * * * If the address to be invalidated is not cache-line aligned, the * following choices are available: * 1) Invalidate the cache line when required and do not bother much for the * side effects. Though it sounds good, it can result in hard-to-debug issues. * The problem is, if some other variable are allocated in the * same cache line and had been recently updated (in cache), the invalidation * would result in loss of data. * * 2) Flush the cache line first. This will ensure that if any other variable * present in the same cache line and updated recently are flushed out to memory. * Then it can safely be invalidated. Again it sounds good, but this can result * in issues. For example, when the invalidation happens * in a typical ISR (after a DMA transfer has updated the memory), then flushing * the cache line means, loosing data that were updated recently before the ISR * got invoked. * * Linux prefers the second one. To have uniform implementation (across standalone * and Linux), the second option is implemented. * This being the case, follwoing needs to be taken care of: * 1) Whenever possible, the addresses must be cache line aligned. Please nore that, * not just start address, even the end address must be cache line aligned. If that * is taken care of, this will always work. * 2) Avoid situations where invalidation has to be done after the data is updated by * peripheral/DMA directly into the memory. It is not tough to achieve (may be a bit * risky). The common use case to do invalidation is when a DMA happens. Generally * for such use cases, buffers can be allocated first and then start the DMA. The * practice that needs to be followed here is, immediately after buffer allocation * and before starting the DMA, do the invalidation. With this approach, invalidation * need not to be done after the DMA transfer is over. * * This is going to always work if done carefully. * However, the concern is, there is no guarantee that invalidate has not needed to be * done after DMA is complete. For example, because of some reasons if the first cache * line or last cache line (assuming the buffer in question comprises of multiple cache * lines) are brought into cache (between the time it is invalidated and DMA completes) * because of some speculative prefetching or reading data for a variable present * in the same cache line, then we will have to invalidate the cache after DMA is complete. * * * @param Start address of range to be invalidated. * @param Length of range to be invalidated in bytes. * * @return None. * * @note None. * ****************************************************************************/ void Xil_DCacheInvalidateRange(INTPTR adr, u32 len) { const u32 cacheline = 32U; u32 end; u32 tempadr = adr; u32 tempend; u32 currmask; volatile u32 *L2CCOffset = (volatile u32 *)(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_PA_OFFSET); currmask = mfcpsr(); mtcpsr(currmask | IRQ_FIQ_MASK); if (len != 0U) { end = tempadr + len; tempend = end; /* Select L1 Data cache in CSSR */ mtcp(XREG_CP15_CACHE_SIZE_SEL, 0U); if ((tempadr & (cacheline-1U)) != 0U) { tempadr &= (~(cacheline - 1U)); Xil_L1DCacheFlushLine(tempadr); #ifndef USE_AMP /* Disable Write-back and line fills */ Xil_L2WriteDebugCtrl(0x3U); Xil_L2CacheFlushLine(tempadr); /* Enable Write-back and line fills */ Xil_L2WriteDebugCtrl(0x0U); Xil_L2CacheSync(); #endif tempadr += cacheline; } if ((tempend & (cacheline-1U)) != 0U) { tempend &= (~(cacheline - 1U)); Xil_L1DCacheFlushLine(tempend); #ifndef USE_AMP /* Disable Write-back and line fills */ Xil_L2WriteDebugCtrl(0x3U); Xil_L2CacheFlushLine(tempend); /* Enable Write-back and line fills */ Xil_L2WriteDebugCtrl(0x0U); Xil_L2CacheSync(); #endif } while (tempadr < tempend) { #ifndef USE_AMP /* Invalidate L2 cache line */ *L2CCOffset = tempadr; Xil_L2CacheSync(); #endif #ifdef __GNUC__ /* Invalidate L1 Data cache line */ __asm__ __volatile__("mcr " \ XREG_CP15_INVAL_DC_LINE_MVA_POC :: "r" (tempadr)); #elif defined (__ICCARM__) __asm volatile ("mcr " \ XREG_CP15_INVAL_DC_LINE_MVA_POC :: "r" (tempadr)); #else { volatile register u32 Reg __asm(XREG_CP15_INVAL_DC_LINE_MVA_POC); Reg = tempadr; } #endif tempadr += cacheline; } }