/**************************************************************************** * * Get the time from the Cycle Counter Register. * * @param Pointer to the location to be updated with the time. * * @return None. * * @note None. * ****************************************************************************/ void XTime_GetTime(XTime *Xtime) { u32 reg; u32 low; /* loop until we got a consistent result */ do { #ifdef __GNUC__ low = mfcp(XREG_CP15_PERF_CYCLE_COUNTER); reg = mfcp(XREG_CP15_V_FLAG_STATUS); #else { register unsigned int Reg __asm(XREG_CP15_PERF_CYCLE_COUNTER); low = Reg; } { register unsigned int Reg __asm(XREG_CP15_V_FLAG_STATUS); reg = Reg; } #endif if (reg & CYCLE_COUNTER_MASK) { /* clear overflow */ mtcp(XREG_CP15_V_FLAG_STATUS, CYCLE_COUNTER_MASK); high++; } } while (reg & CYCLE_COUNTER_MASK); *Xtime = (((XTime) high) << 32) | (XTime) low; }
static void sleep_common(u32 n, u32 count) { XTime tEnd, tCur; /* Start global timer counter, it will only be enabled if it is disabled */ XTime_StartTimer(); tCur = mfcp(CNTPCT_EL0); tEnd = tCur + (((XTime) n) * count); do { tCur = mfcp(CNTPCT_EL0); } while (tCur < tEnd); }
/** * * Default Prefetch abort handler which prints prefetch fault status register through * which information about instruction prefetch fault can be acquired * * @param None * * @return None. * * @note None. * ****************************************************************************/ void Xil_PrefetchAbortHandler(void *CallBackRef){ u32 FaultStatus; #ifdef __GNUC__ FaultStatus = mfcp(XREG_CP15_INST_FAULT_STATUS); #elif defined (__ICCARM__) mfcp(XREG_CP15_INST_FAULT_STATUS,FaultStatus); #else { volatile register unsigned int Reg __asm(XREG_CP15_INST_FAULT_STATUS); FaultStatus = Reg; } #endif xdbg_printf(XDBG_DEBUG_ERROR, "Prefetch abort with Instruction Fault Status Register %x\n",FaultStatus); while(1); }
/** * * @brief This function resets the Cortex R5 event counters. * * @param None. * * @return None. * *****************************************************************************/ void Xpm_ResetEventCounters(void) { u32 Reg; #ifdef __GNUC__ Reg = mfcp(XREG_CP15_PERF_MONITOR_CTRL); #elif defined (__ICCARM__) mfcp(XREG_CP15_PERF_MONITOR_CTRL, Reg); #else { register u32 C15Reg __asm(XREG_CP15_PERF_MONITOR_CTRL); Reg = C15Reg; } #endif Reg |= (1U << 2U); /* reset event counters */ mtcp(XREG_CP15_PERF_MONITOR_CTRL, Reg); }
/** * * Default Prefetch abort handler which prints prefetch fault status register through * which information about instruction prefetch fault can be acquired * * @param None * * @return None. * * @note None. * ****************************************************************************/ void Xil_PrefetchAbortHandler(void *CallBackRef){ u32 FaultStatus; #ifdef __GNUC__ FaultStatus = mfcp(XREG_CP15_INST_FAULT_STATUS); #elif defined (__ICCARM__) mfcp(XREG_CP15_INST_FAULT_STATUS,FaultStatus); #else { volatile register u32 Reg __asm(XREG_CP15_INST_FAULT_STATUS); FaultStatus = Reg; } #endif xdbg_printf(XDBG_DEBUG_GENERAL, "Prefetch abort with Instruction Fault Status Register %x\n",FaultStatus); xdbg_printf(XDBG_DEBUG_GENERAL, "Address of Instrcution causing Prefetch abort %x\n",PrefetchAbortAddr); while(1) { ; } }
/** * * This API gives a delay in microseconds * * @param useconds requested * * @return 0 if the delay can be achieved, -1 if the requested delay * is out of range * * @note None. * ****************************************************************************/ s32 usleep(u32 useconds) { XTime tEnd, tCur; /* Start global timer counter, it will only be enabled if it is disabled */ #if !GUEST XTime_StartTimer(); #endif tCur = mfcp(CNTPCT_EL0); tEnd = tCur + (((XTime) useconds) * COUNTS_PER_USECOND); do { tCur = mfcp(CNTPCT_EL0); } while (tCur < tEnd); return 0; }
void Xil_DataAbortHandler(void *CallBackRef){ u32 FaultStatus; xdbg_printf(XDBG_DEBUG_ERROR, "Data abort \n"); #ifdef __GNUC__ FaultStatus = mfcp(XREG_CP15_DATA_FAULT_STATUS); #elif defined (__ICCARM__) mfcp(XREG_CP15_DATA_FAULT_STATUS,FaultStatus); #else { volatile register u32 Reg __asm(XREG_CP15_DATA_FAULT_STATUS); FaultStatus = Reg; } #endif xdbg_printf(XDBG_DEBUG_GENERAL, "Data abort with Data Fault Status Register %x\n",FaultStatus); xdbg_printf(XDBG_DEBUG_GENERAL, "Address of Instrcution causing Data abort %x\n",DataAbortAddr); while(1) { ; } }
/** * * @brief This function disables the event counters and returns the counter * values. * * @param PmCtrValue: Pointer to an array of type u32 PmCtrValue[6]. * It is an output parameter which is used to return the PM * counter values. * * @return None. * *****************************************************************************/ void Xpm_GetEventCounters(u32 *PmCtrValue) { u32 Counter; Xpm_DisableEventCounters(); for(Counter = 0U; Counter < XPM_CTRCOUNT; Counter++) { mtcp(XREG_CP15_EVENT_CNTR_SEL, Counter); #ifdef __GNUC__ PmCtrValue[Counter] = mfcp(XREG_CP15_PERF_MONITOR_COUNT); #elif defined (__ICCARM__) mfcp(XREG_CP15_PERF_MONITOR_COUNT, PmCtrValue[Counter]); #else { register u32 Cp15Reg __asm(XREG_CP15_PERF_MONITOR_COUNT); PmCtrValue[Counter] = Cp15Reg; } #endif } }
/***************************************************************************** * * Disable all the MPU regions if any of them is enabled * * @param None. * * @return None. * * ******************************************************************************/ static void Xil_DisableMPURegions(void) { u32 Temp = 0U; u32 Index = 0U; for (Index = 0; Index <= 15; Index++) { mtcp(XREG_CP15_MPU_MEMORY_REG_NUMBER,Index); #if defined (__GNUC__) Temp = mfcp(XREG_CP15_MPU_REG_SIZE_EN); #elif defined (__ICCARM__) mfcp(XREG_CP15_MPU_REG_SIZE_EN,Temp); #endif Temp &= (~REGION_EN); dsb(); mtcp(XREG_CP15_MPU_REG_SIZE_EN,Temp); dsb(); isb(); } }
/** * @brief Disable MPU for Cortex R5 processors. This function invalidates I * cache and flush the D Caches, and then disabes the MPU. * * @param None. * * @return None. * ******************************************************************************/ void Xil_DisableMPU(void) { u32 CtrlReg, Reg; s32 DCacheStatus=0, ICacheStatus=0; /* enable caches only if they are disabled */ #if defined (__GNUC__) CtrlReg = mfcp(XREG_CP15_SYS_CONTROL); #elif defined (__ICCARM__) mfcp(XREG_CP15_SYS_CONTROL,CtrlReg); #endif if ((CtrlReg & XREG_CP15_CONTROL_C_BIT) != 0x00000000U) { DCacheStatus=1; } if ((CtrlReg & XREG_CP15_CONTROL_I_BIT) != 0x00000000U) { ICacheStatus=1; } if(DCacheStatus != 0) { Xil_DCacheDisable(); } if(ICacheStatus != 0){ Xil_ICacheDisable(); } mtcp(XREG_CP15_INVAL_BRANCH_ARRAY, 0); #if defined (__GNUC__) Reg = mfcp(XREG_CP15_SYS_CONTROL); #elif defined (__ICCARM__) mfcp(XREG_CP15_SYS_CONTROL,Reg); #endif Reg &= ~(0x00000001U); dsb(); mtcp(XREG_CP15_SYS_CONTROL, Reg); isb(); /* enable caches only if they are disabled in routine*/ if(DCacheStatus != 0) { Xil_DCacheEnable(); } if(ICacheStatus != 0) { Xil_ICacheEnable(); } }
/***************************************************************************** * * Disable MMU for Cortex A53 processors. This function invalidates the TLBs, * Branch Predictor Array and flushed the D Caches before disabling * the MMU and D cache. * * @param None. * * @return None. * ******************************************************************************/ void Xil_DisableMMU(void) { u32 Reg; mtcp(XREG_CP15_INVAL_UTLB_UNLOCKED, 0U); mtcp(XREG_CP15_INVAL_BRANCH_ARRAY, 0U); Xil_DCacheFlush(); Reg = mfcp(XREG_CP15_SYS_CONTROL); Reg &= (u32)(~0x05U); mtcp(XREG_CP15_SYS_CONTROL, Reg); }
/***************************************************************************** * * Invalidate the caches, enable MMU and D Caches for Cortex A53 processor. * * @param None. * @return None. * ******************************************************************************/ void Xil_EnableMMU(void) { u32 Reg; Xil_DCacheInvalidate(); Xil_ICacheInvalidate(); Reg = mfcp(XREG_CP15_SYS_CONTROL); Reg |= (u32)0x05U; mtcp(XREG_CP15_SYS_CONTROL, Reg); dsb(); isb(); }
/***************************************************************************** * * Enable a background Region in MPU with default memory attributes for Cortex R5 * processor. * * @param None. * * @return None. * * ******************************************************************************/ static void Xil_EnableBackgroundRegion(void) { u32 CtrlReg, Reg; mtcp(XREG_CP15_INVAL_BRANCH_ARRAY, 0); Reg=mfcp(XREG_CP15_SYS_CONTROL); Reg |= (0x00000001U<<17U); dsb(); mtcp(XREG_CP15_SYS_CONTROL,Reg); isb(); }
/**************************************************************************** * * Disable the Data cache. * * @param None. * * @return None. * * @note None. * ****************************************************************************/ void Xil_DCacheDisable(void) { register u32 CtrlReg; /* clean and invalidate the Data cache */ Xil_DCacheFlush(); /* disable the Data cache */ CtrlReg = mfcp(XREG_CP15_SYS_CONTROL); CtrlReg &= ~(XREG_CP15_CONTROL_C_BIT); mtcp(XREG_CP15_SYS_CONTROL, CtrlReg); }
/***************************************************************************** * * Disable all the MPU regions if any of them is enabled * * @param None. * * @return None. * * ******************************************************************************/ static void Xil_DisableMPURegions(void) { u32 Temp; u32 Index; for (Index = 0; Index <= 15; Index++) { mtcp(XREG_CP15_MPU_MEMORY_REG_NUMBER,Index); Temp = mfcp(XREG_CP15_MPU_REG_SIZE_EN); Temp &= (~REGION_EN); dsb(); mtcp(XREG_CP15_MPU_REG_SIZE_EN,Temp); dsb(); isb(); } }
/**************************************************************************** * * Set the time in the Cycle Counter Register. * * @param Value to be written to the Cycle Counter Register. * * @return None. * * @note None. * ****************************************************************************/ void XTime_SetTime(XTime Xtime) { u32 reg; #ifdef __GNUC__ /* disable the cycle counter before updating */ reg = mfcp(XREG_CP15_COUNT_ENABLE_CLR); #else { register unsigned int Reg __asm(XREG_CP15_COUNT_ENABLE_CLR); reg = Reg; } #endif mtcp(XREG_CP15_COUNT_ENABLE_CLR, reg | CYCLE_COUNTER_MASK); /* clear the cycle counter overflow flag */ #ifdef __GNUC__ reg = mfcp(XREG_CP15_V_FLAG_STATUS); #else { register unsigned int Reg __asm(XREG_CP15_V_FLAG_STATUS); reg = Reg; } #endif mtcp(XREG_CP15_V_FLAG_STATUS, reg & CYCLE_COUNTER_MASK); /* set the time in cyle counter reg */ mtcp(XREG_CP15_PERF_CYCLE_COUNTER, (u32) Xtime); high = Xtime >> 32; /* enable the cycle counter */ #ifdef __GNUC__ reg = mfcp(XREG_CP15_COUNT_ENABLE_SET); #else { register unsigned int Reg __asm(XREG_CP15_COUNT_ENABLE_SET); reg = Reg; } #endif mtcp(XREG_CP15_COUNT_ENABLE_SET, reg | CYCLE_COUNTER_MASK); }
/**************************************************************************** * * Enable the Data cache. * * @param None. * * @return None. * * @note None. * ****************************************************************************/ void Xil_DCacheEnable(void) { register u32 CtrlReg; /* enable caches only if they are disabled */ CtrlReg = mfcp(XREG_CP15_SYS_CONTROL); if ((CtrlReg & XREG_CP15_CONTROL_C_BIT)==0x00000000U) { /* invalidate the Data cache */ Xil_DCacheInvalidate(); /* enable the Data cache */ CtrlReg |= (XREG_CP15_CONTROL_C_BIT); mtcp(XREG_CP15_SYS_CONTROL, CtrlReg); } }
/***************************************************************************** * * Invalidate the caches, enable MMU and D Caches for Cortex A9 processor. * * @param None. * @return None. * ******************************************************************************/ void Xil_EnableMMU(void) { u32 Reg; Xil_DCacheInvalidate(); Xil_ICacheInvalidate(); #ifdef __GNUC__ Reg = mfcp(XREG_CP15_SYS_CONTROL); #else { volatile register unsigned int Cp15Reg __asm(XREG_CP15_SYS_CONTROL); Reg = Cp15Reg; } #endif Reg |= 0x05; mtcp(XREG_CP15_SYS_CONTROL, Reg); dsb(); isb(); }
/* Exception handler (fatal). * Attempt to print out a backtrace. */ void FreeRTOS_ExHandler(void *data) { unsigned *fp, lr; static int exception_count = 0; int offset = (int)data; xil_printf("\n\rEXCEPTION, HALTED!\n\r"); fp = (unsigned*)mfgpr(11); /* get current frame pointer */ if (! ptr_valid(fp)) { goto spin; } /* Fetch Data Fault Address from CP15 */ lr = mfcp(XREG_CP15_DATA_FAULT_ADDRESS); xil_printf("Data Fault Address: 0x%08x\n\r", lr); /* The exception frame is built by DataAbortHandler (for example) in * FreeRTOS/Source/portable/GCC/Zynq/port_asm_vectors.s: * stmdb sp!,{r0-r3,r12,lr} * and the initial handler function (i.e. DataAbortInterrupt() ) in * standalone_bsp/src/arm/vectors.c, which is the standard compiler EABI : * push {fp, lr} * * The relative position of the frame build in port_asm_vectors.s is assumed, * as there is no longer any direct reference to it. If this file (or vectors.c) * are modified this location will need to be updated. * * r0+r1+r2+r3+r12+lr = 5 registers to get to the initial link register where * the exception occurred. */ xil_printf("FP: 0x%08x LR: 0x%08x\n\r", (unsigned)fp, *(fp + 5) - offset); xil_printf("R0: 0x%08x R1: 0x%08x\n\r", *(fp + 0), *(fp + 1)); xil_printf("R2: 0x%08x R3: 0x%08x\n\r", *(fp + 2), *(fp + 3)); xil_printf("R12: 0x%08x\n\r", *(fp + 4)); spin: exception_count++; if (exception_count > 1) { /* Nested exceptions */ while (1) {;} } while (1) {;} }
/***************************************************************************** * * Disable MMU for Cortex A9 processors. This function invalidates the TLBs, * Branch Predictor Array and flushed the D Caches before disabling * the MMU and D cache. * * @param None. * * @return None. * ******************************************************************************/ void Xil_DisableMMU(void) { u32 Reg; mtcp(XREG_CP15_INVAL_UTLB_UNLOCKED, 0); mtcp(XREG_CP15_INVAL_BRANCH_ARRAY, 0); Xil_DCacheFlush(); #ifdef __GNUC__ Reg = mfcp(XREG_CP15_SYS_CONTROL); #else { volatile register unsigned int Cp15Reg __asm(XREG_CP15_SYS_CONTROL); Reg = Cp15Reg; } #endif Reg &= ~0x05; #ifdef CONFIG_ARM_ERRATA_794073 /* Disable Branch Prediction */ Reg &= ~0x800; #endif mtcp(XREG_CP15_SYS_CONTROL, Reg); }
/** * This function initializes the processor and updates the cluster id * which indicates CPU on which fsbl is running * * @param FsblInstancePtr is pointer to the XFsbl Instance * * @return returns the error codes described in xfsbl_error.h on any error * returns XFSBL_SUCCESS on success * ******************************************************************************/ static u32 XFsbl_ProcessorInit(XFsblPs * FsblInstancePtr) { u32 Status = XFSBL_SUCCESS; //u64 ClusterId=0U; PTRSIZE ClusterId=0U; u32 RegValue; u32 Index=0U; /** * Read the cluster ID and Update the Processor ID * Initialize the processor settings that are not done in * BSP startup code */ #ifdef XFSBL_A53 ClusterId = mfcp(MPIDR_EL1); #else ClusterId = mfcp(XREG_CP15_MULTI_PROC_AFFINITY); #endif XFsbl_Printf(DEBUG_INFO,"Cluster ID 0x%0lx\n\r", ClusterId); if (XFSBL_PLATFORM == XFSBL_PLATFORM_QEMU) { /** * Remmaping for R5 in QEMU */ if (ClusterId == 0x80000004U) { ClusterId = 0xC0000100U; } else if (ClusterId == 0x80000005U) { /* this corresponds to R5-1 */ Status = XFSBL_ERROR_UNSUPPORTED_CLUSTER_ID; XFsbl_Printf(DEBUG_GENERAL, "XFSBL_ERROR_UNSUPPORTED_CLUSTER_ID\n\r"); goto END; } else { /* For MISRA C compliance */ } } /* store the processor ID based on the cluster ID */ if ((ClusterId & XFSBL_CLUSTER_ID_MASK) == XFSBL_A53_PROCESSOR) { XFsbl_Printf(DEBUG_GENERAL,"Running on A53-0 "); FsblInstancePtr->ProcessorID = XIH_PH_ATTRB_DEST_CPU_A53_0; #ifdef __aarch64__ /* Running on A53 64-bit */ XFsbl_Printf(DEBUG_GENERAL,"(64-bit) Processor \n\r"); FsblInstancePtr->A53ExecState = XIH_PH_ATTRB_A53_EXEC_ST_AA64; #else /* Running on A53 32-bit */ XFsbl_Printf(DEBUG_GENERAL,"(32-bit) Processor \n\r"); FsblInstancePtr->A53ExecState = XIH_PH_ATTRB_A53_EXEC_ST_AA32; #endif } else if ((ClusterId & XFSBL_CLUSTER_ID_MASK) == XFSBL_R5_PROCESSOR) { /* A53ExecState is not valid for R5 */ FsblInstancePtr->A53ExecState = XIH_INVALID_EXEC_ST; RegValue = XFsbl_In32(RPU_RPU_GLBL_CNTL); if ((RegValue & RPU_RPU_GLBL_CNTL_SLSPLIT_MASK) == 0U) { XFsbl_Printf(DEBUG_GENERAL, "Running on R5 Processor in Lockstep \n\r"); FsblInstancePtr->ProcessorID = XIH_PH_ATTRB_DEST_CPU_R5_L; } else { XFsbl_Printf(DEBUG_GENERAL, "Running on R5-0 Processor \n\r"); FsblInstancePtr->ProcessorID = XIH_PH_ATTRB_DEST_CPU_R5_0; } /** * Update the Vector locations in R5 TCM */ while (Index<32U) { XFsbl_Out32(Index, 0U); XFsbl_Out32(Index, XFSBL_R5_VECTOR_VALUE); Index += 4; } } else { Status = XFSBL_ERROR_UNSUPPORTED_CLUSTER_ID; XFsbl_Printf(DEBUG_GENERAL, "XFSBL_ERROR_UNSUPPORTED_CLUSTER_ID\n\r"); goto END; } /** * Register the exception handlers */ XFsbl_RegisterHandlers(); END: return Status; }
/** * This function initializes the processor and updates the cluster id * which indicates CPU on which fsbl is running * * @param FsblInstancePtr is pointer to the XFsbl Instance * * @return returns the error codes described in xfsbl_error.h on any error * returns XFSBL_SUCCESS on success * ******************************************************************************/ static u32 XFsbl_ProcessorInit(XFsblPs * FsblInstancePtr) { u32 Status = XFSBL_SUCCESS; //u64 ClusterId=0U; PTRSIZE ClusterId=0U; u32 RegValue; u32 Index=0U; /** * Read the cluster ID and Update the Processor ID * Initialize the processor settings that are not done in * BSP startup code */ #ifdef ARMA53_64 ClusterId = mfcp(MPIDR_EL1); #else ClusterId = mfcp(XREG_CP15_MULTI_PROC_AFFINITY); #endif XFsbl_Printf(DEBUG_INFO,"Cluster ID 0x%0lx\n\r", ClusterId); if (XGet_Zynq_UltraMp_Platform_info() == XPLAT_ZYNQ_ULTRA_MPQEMU) { /** * Remmaping for R5 in QEMU */ if (ClusterId == 0x80000004U) { ClusterId = 0xC0000100U; } else if (ClusterId == 0x80000005U) { /* this corresponds to R5-1 */ Status = XFSBL_ERROR_UNSUPPORTED_CLUSTER_ID; XFsbl_Printf(DEBUG_GENERAL, "XFSBL_ERROR_UNSUPPORTED_CLUSTER_ID\n\r"); goto END; } else { /* For MISRA C compliance */ } } /* store the processor ID based on the cluster ID */ if ((ClusterId & XFSBL_CLUSTER_ID_MASK) == XFSBL_A53_PROCESSOR) { XFsbl_Printf(DEBUG_GENERAL,"Running on A53-0 "); FsblInstancePtr->ProcessorID = XIH_PH_ATTRB_DEST_CPU_A53_0; #ifdef __aarch64__ /* Running on A53 64-bit */ XFsbl_Printf(DEBUG_GENERAL,"(64-bit) Processor \n\r"); FsblInstancePtr->A53ExecState = XIH_PH_ATTRB_A53_EXEC_ST_AA64; #else /* Running on A53 32-bit */ XFsbl_Printf(DEBUG_GENERAL,"(32-bit) Processor \n\r"); FsblInstancePtr->A53ExecState = XIH_PH_ATTRB_A53_EXEC_ST_AA32; #endif } else if ((ClusterId & XFSBL_CLUSTER_ID_MASK) == XFSBL_R5_PROCESSOR) { /* A53ExecState is not valid for R5 */ FsblInstancePtr->A53ExecState = XIH_INVALID_EXEC_ST; RegValue = XFsbl_In32(RPU_RPU_GLBL_CNTL); if ((RegValue & RPU_RPU_GLBL_CNTL_SLSPLIT_MASK) == 0U) { XFsbl_Printf(DEBUG_GENERAL, "Running on R5 Processor in Lockstep \n\r"); FsblInstancePtr->ProcessorID = XIH_PH_ATTRB_DEST_CPU_R5_L; } else { XFsbl_Printf(DEBUG_GENERAL, "Running on R5-0 Processor \n\r"); FsblInstancePtr->ProcessorID = XIH_PH_ATTRB_DEST_CPU_R5_0; } /** * Update the Vector locations in R5 TCM */ while (Index<32U) { XFsbl_Out32(Index, XFSBL_R5_VECTOR_VALUE); Index += 4; } } else { Status = XFSBL_ERROR_UNSUPPORTED_CLUSTER_ID; XFsbl_Printf(DEBUG_GENERAL, "XFSBL_ERROR_UNSUPPORTED_CLUSTER_ID\n\r"); goto END; } /** * Register the exception handlers */ XFsbl_RegisterHandlers(); /* Prints for the perf measurement */ #ifdef XFSBL_PERF #if !defined(ARMR5) if (FsblInstancePtr->ProcessorID == XIH_PH_ATTRB_DEST_CPU_A53_0) { XFsbl_Printf(DEBUG_PRINT_ALWAYS, "Proc: A53-0 Freq: %d Hz", XPAR_CPU_CORTEXA53_0_CPU_CLK_FREQ_HZ); if (FsblInstancePtr->A53ExecState == XIH_PH_ATTRB_A53_EXEC_ST_AA32) { XFsbl_Printf(DEBUG_PRINT_ALWAYS, " Arch: 32 \r\n"); } else if (FsblInstancePtr->A53ExecState == XIH_PH_ATTRB_A53_EXEC_ST_AA64) { XFsbl_Printf(DEBUG_PRINT_ALWAYS, " Arch: 64 \r\n"); } } #else if (FsblInstancePtr->ProcessorID == XIH_PH_ATTRB_DEST_CPU_R5_0) { XFsbl_Printf(DEBUG_PRINT_ALWAYS, "Proc: R5-0 Freq: %d Hz \r\n", XPAR_PSU_CORTEXR5_0_CPU_CLK_FREQ_HZ) } else if (FsblInstancePtr->ProcessorID == XIH_PH_ATTRB_DEST_CPU_R5_L) { XFsbl_Printf(DEBUG_PRINT_ALWAYS, "Proc: R5-Lockstep " "Freq: %d Hz \r\n", XPAR_PSU_CORTEXR5_0_CPU_CLK_FREQ_HZ); } #endif #endif END: return Status; }