/** * Family 15h Kaveri core 0 entry point for performing the necessary steps after * a warm reset has occurred. * * The steps are as follows: * * 1. Temp1=D18F5x170[SwNbPstateLoDis]. * 2. Temp2=D18F5x170[NbPstateDisOnP0]. * 3. Temp3=D18F5x170[NbPstateThreshold]. * 4. Temp4=D18F5x170[NbPstateGnbSlowDis]. * 5. If MSRC001_0070[NbPstate]=0, go to step 6. If MSRC001_0070[NbPstate]=1, go to step 11. * 6. Write 1 to D18F5x170[NbPstateGnbSlowDis]. * 7. Write 0 to D18F5x170[SwNbPstateLoDis, NbPstateDisOnP0, NbPstateThreshold]. * 8. Wait for D18F5x174[CurNbPstate] = D18F5x170[NbPstateLo] and D18F5x174[CurNbFid, CurNb- * Did]=[NbFid, NbDid] from D18F5x1[6C:60] indexed by D18F5x170[NbPstateLo]. * 9. Set D18F5x170[SwNbPstateLoDis]=1. * 10. Wait for D18F5x174[CurNbPstate] = D18F5x170[NbPstateHi] and D18F5x174[CurNbFid, CurNb- * Did]=[NbFid, NbDid] from D18F5x1[6C:60] indexed by D18F5x170[NbPstateHi]. Go to step 15. * 11. Write 1 to D18F5x170[SwNbPstateLoDis]. * 12. Wait for D18F5x174[CurNbPstate] = D18F5x170[NbPstateHi] and D18F5x174[CurNbFid, CurNb- * Did]=[NbFid, NbDid] from D18F5x1[6C:60] indexed by D18F5x170[NbPstateHi]. * 13. Write 0 to D18F5x170[SwNbPstateLoDis, NbPstateDisOnP0, NbPstateThreshold]. * 14. Wait for D18F5x174[CurNbPstate] = D18F5x170[NbPstateLo] and D18F5x174[CurNbFid, CurNb- * Did]=[NbFid, NbDid] from D18F5x1[6C:60] indexed by D18F5x170[NbPstateLo]. * 15. Set D18F5x170[SwNbPstateLoDis]=Temp1, D18F5x170[NbPstateDisOnP0]=Temp2, D18F5x170[NbP- * stateThreshold]=Temp3, and D18F5x170[NbPstateGnbSlowDis]=Temp4. * * @param[in] FamilySpecificServices The current Family Specific Services. * @param[in] CpuEarlyParamsPtr Service parameters * @param[in] StdHeader Config handle for library and services. * */ VOID F15KvPmNbAfterReset ( IN CPU_SPECIFIC_SERVICES *FamilySpecificServices, IN AMD_CPU_EARLY_PARAMS *CpuEarlyParamsPtr, IN AMD_CONFIG_PARAMS *StdHeader ) { UINT32 Socket; UINT32 Module; UINT32 Core; UINT32 TaskedCore; UINT32 Ignored; AP_TASK TaskPtr; AGESA_STATUS IgnoredSts; IDS_HDT_CONSOLE (CPU_TRACE, " F15KvPmNbAfterReset\n"); IdentifyCore (StdHeader, &Socket, &Module, &Core, &IgnoredSts); ASSERT (Core == 0); // Launch one core per node. TaskPtr.FuncAddress.PfApTask = F15KvPmNbAfterResetOnCore; TaskPtr.DataTransfer.DataSizeInDwords = 0; TaskPtr.ExeFlags = WAIT_FOR_CORE; for (Module = 0; Module < GetPlatformNumberOfModules (); Module++) { if (GetGivenModuleCoreRange (Socket, Module, &TaskedCore, &Ignored, StdHeader)) { if (TaskedCore != 0) { ApUtilRunCodeOnSocketCore ((UINT8) Socket, (UINT8) TaskedCore, &TaskPtr, StdHeader); } } } ApUtilTaskOnExecutingCore (&TaskPtr, StdHeader, (VOID *) CpuEarlyParamsPtr); }
/** * Family 15h Trinity core 0 entry point for performing the necessary steps after * a warm reset has occurred. * * The steps are as follows: * * 1. Temp1=D18F5x170[SwNbPstateLoDis]. * 2. Temp2=D18F5x170[NbPstateDisOnP0]. * 3. Temp3=D18F5x170[NbPstateThreshold]. * 4. Temp4=D18F5x170[NbPstateGnbSlowDis]. * 5. If MSRC001_0070[NbPstate]=0, go to step 6. If MSRC001_0070[NbPstate]=1, go to step 11. * 6. Write 1 to D18F5x170[NbPstateGnbSlowDis]. * 7. Write 0 to D18F5x170[SwNbPstateLoDis, NbPstateDisOnP0, NbPstateThreshold]. * 8. Wait for D18F5x174[CurNbPstate] = D18F5x170[NbPstateLo] and D18F5x174[CurNbFid, CurNb- * Did]=[NbFid, NbDid] from D18F5x1[6C:60] indexed by D18F5x170[NbPstateLo]. * 9. Set D18F5x170[SwNbPstateLoDis]=1. * 10. Wait for D18F5x174[CurNbPstate] = D18F5x170[NbPstateHi] and D18F5x174[CurNbFid, CurNb- * Did]=[NbFid, NbDid] from D18F5x1[6C:60] indexed by D18F5x170[NbPstateHi]. Go to step 15. * 11. Write 1 to D18F5x170[SwNbPstateLoDis]. * 12. Wait for D18F5x174[CurNbPstate] = D18F5x170[NbPstateHi] and D18F5x174[CurNbFid, CurNb- * Did]=[NbFid, NbDid] from D18F5x1[6C:60] indexed by D18F5x170[NbPstateHi]. * 13. Write 0 to D18F5x170[SwNbPstateLoDis, NbPstateDisOnP0, NbPstateThreshold]. * 14. Wait for D18F5x174[CurNbPstate] = D18F5x170[NbPstateLo] and D18F5x174[CurNbFid, CurNb- * Did]=[NbFid, NbDid] from D18F5x1[6C:60] indexed by D18F5x170[NbPstateLo]. * 15. Set D18F5x170[SwNbPstateLoDis]=Temp1, D18F5x170[NbPstateDisOnP0]=Temp2, D18F5x170[NbP- * stateThreshold]=Temp3, and D18F5x170[NbPstateGnbSlowDis]=Temp4. * * @param[in] FamilySpecificServices The current Family Specific Services. * @param[in] CpuEarlyParamsPtr Service parameters * @param[in] StdHeader Config handle for library and services. * */ VOID F15TnPmNbAfterReset ( IN CPU_SPECIFIC_SERVICES *FamilySpecificServices, IN AMD_CPU_EARLY_PARAMS *CpuEarlyParamsPtr, IN AMD_CONFIG_PARAMS *StdHeader ) { UINT32 Socket; UINT32 Module; UINT32 Core; UINT32 TaskedCore; UINT32 Ignored; AP_TASK TaskPtr; PCI_ADDR PciAddress; AGESA_STATUS IgnoredSts; LOCATE_HEAP_PTR Locate; IDS_HDT_CONSOLE (CPU_TRACE, " F15TnPmNbAfterReset\n"); IdentifyCore (StdHeader, &Socket, &Module, &Core, &IgnoredSts); ASSERT (Core == 0); if (FamilySpecificServices->IsNbPstateEnabled (FamilySpecificServices, &CpuEarlyParamsPtr->PlatformConfig, StdHeader)) { PciAddress.AddressValue = NB_PSTATE_CTRL_PCI_ADDR; Locate.BufferHandle = AMD_CPU_NB_PSTATE_FIXUP_HANDLE; if (HeapLocateBuffer (&Locate, StdHeader) == AGESA_SUCCESS) { LibAmdPciWrite (AccessWidth32, PciAddress, Locate.BufferPtr, StdHeader); } else { ASSERT (FALSE); } } // Launch one core per node. TaskPtr.FuncAddress.PfApTask = F15TnPmNbAfterResetOnCore; TaskPtr.DataTransfer.DataSizeInDwords = 0; TaskPtr.ExeFlags = WAIT_FOR_CORE; for (Module = 0; Module < GetPlatformNumberOfModules (); Module++) { if (GetGivenModuleCoreRange (Socket, Module, &TaskedCore, &Ignored, StdHeader)) { if (TaskedCore != 0) { ApUtilRunCodeOnSocketCore ((UINT8) Socket, (UINT8) TaskedCore, &TaskPtr, StdHeader); } } } ApUtilTaskOnExecutingCore (&TaskPtr, StdHeader, (VOID *) CpuEarlyParamsPtr); }
/** * * * * * @param[in,out] *mmPtr - Pointer to the MEM_MAIN_DATA_BLOCK * * @return TRUE - No fatal error occurs. * @return FALSE - Fatal error occurs. */ BOOLEAN MemMParallelTraining ( IN OUT MEM_MAIN_DATA_BLOCK *mmPtr ) { AMD_CONFIG_PARAMS *StdHeader; MEM_DATA_STRUCT *MemPtr; MEM_NB_BLOCK *NBPtr; DIE_INFO TrainInfo[MAX_NODES_SUPPORTED]; AP_DATA_TRANSFER ReturnData; AGESA_STATUS Status; UINT8 ApSts; UINT8 Die; UINT8 Socket; UINT32 Module; UINT32 LowCore; UINT32 HighCore; UINT32 Time; UINT32 TimeOut; UINT32 TargetApicId; BOOLEAN StillTraining; ALLOCATE_HEAP_PARAMS AllocHeapParams; UINT8 *BufferPtr; BOOLEAN TimeoutEn; NBPtr = mmPtr->NBPtr; MemPtr = mmPtr->MemPtr; StdHeader = &(mmPtr->MemPtr->StdHeader); Time = 0; TimeOut = PARALLEL_TRAINING_TIMEOUT; TimeoutEn = TRUE; IDS_TIMEOUT_CTL (&TimeoutEn); IDS_HDT_CONSOLE (MEM_STATUS, "\nStart parallel training\n"); AGESA_TESTPOINT (TpProcMemBeforeAnyTraining, StdHeader); // // Initialize Training Info Array // for (Die = 0; Die < mmPtr->DieCount; Die ++) { Socket = TrainInfo[Die].Socket = NBPtr[Die].MCTPtr->SocketId; Module = NBPtr[Die].MCTPtr->DieId; GetGivenModuleCoreRange (Socket, Module, &LowCore, &HighCore, StdHeader); TrainInfo[Die].Core = (UINT8) (LowCore & 0x000000FF); IDS_HDT_CONSOLE (MEM_FLOW, "\tLaunch core %d of socket %d\n", LowCore, Socket); TrainInfo[Die].Training = FALSE; } // // Start Training on Each remote die. // for (Die = 0; Die < mmPtr->DieCount; Die ++ ) { if (Die != BSP_DIE) { NBPtr[Die].BeforeDqsTraining (&(mmPtr->NBPtr[Die])); if (NBPtr[Die].MCTPtr->NodeMemSize != 0) { if (!NBPtr[Die].FeatPtr->Training (&(mmPtr->NBPtr[Die]))) { // Fail to launch code on AP PutEventLog (AGESA_ERROR, MEM_ERROR_PARALLEL_TRAINING_LAUNCH_FAIL, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_ERROR, NBPtr[Die].MCTPtr); MemPtr->ErrorHandling (NBPtr[Die].MCTPtr, EXCLUDE_ALL_DCT, EXCLUDE_ALL_CHIPSEL, &MemPtr->StdHeader); } else { TrainInfo[Die].Training = TRUE; } } } } // // Call training on BSP // IDS_HDT_CONSOLE (MEM_STATUS, "Node %d\n", NBPtr[BSP_DIE].Node); NBPtr[BSP_DIE].BeforeDqsTraining (&(mmPtr->NBPtr[BSP_DIE])); NBPtr[BSP_DIE].TrainingFlow (&(mmPtr->NBPtr[BSP_DIE])); NBPtr[BSP_DIE].AfterDqsTraining (&(mmPtr->NBPtr[BSP_DIE])); // // Get Results from remote processors training // do { StillTraining = FALSE; for (Die = 0; Die < mmPtr->DieCount; Die ++ ) { // // For each Die that is training, read the status // if (TrainInfo[Die].Training == TRUE) { GetLocalApicIdForCore (TrainInfo[Die].Socket, TrainInfo[Die].Core, &TargetApicId, StdHeader); ApSts = ApUtilReadRemoteControlByte (TargetApicId, StdHeader); if ((ApSts & 0x80) == 0) { // // Allocate buffer for received data // AllocHeapParams.RequestedBufferSize = ( sizeof (DIE_STRUCT) + NBPtr[Die].DctCount * ( sizeof (DCT_STRUCT) + ( NBPtr[Die].ChannelCount * ( sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK) + ( (NBPtr[Die].MCTPtr->DctData[0].ChData[0].RowCount * NBPtr[Die].MCTPtr->DctData[0].ChData[0].ColumnCount * NUMBER_OF_DELAY_TABLES) + (MAX_BYTELANES_PER_CHANNEL * MAX_CS_PER_CHANNEL * NUMBER_OF_FAILURE_MASK_TABLES) ) ) ) ) ) + 3; AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_PAR_TRN_HANDLE, Die, 0, 0); AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) == AGESA_SUCCESS) { // // Receive Training Results // ReturnData.DataPtr = AllocHeapParams.BufferPtr; ReturnData.DataSizeInDwords = (UINT16) AllocHeapParams.RequestedBufferSize / 4; ReturnData.DataTransferFlags = 0; Status = ApUtilReceiveBuffer (TrainInfo[Die].Socket, TrainInfo[Die].Core, &ReturnData, StdHeader); if (Status != AGESA_SUCCESS) { SetMemError (Status, NBPtr[Die].MCTPtr); } BufferPtr = AllocHeapParams.BufferPtr; LibAmdMemCopy (NBPtr[Die].MCTPtr, BufferPtr, sizeof (DIE_STRUCT), StdHeader); BufferPtr += sizeof (DIE_STRUCT); LibAmdMemCopy ( NBPtr[Die].MCTPtr->DctData, BufferPtr, NBPtr[Die].DctCount * (sizeof (DCT_STRUCT) + NBPtr[Die].ChannelCount * sizeof (CH_DEF_STRUCT)), StdHeader); BufferPtr += NBPtr[Die].DctCount * (sizeof (DCT_STRUCT) + NBPtr[Die].ChannelCount * sizeof (CH_DEF_STRUCT)); LibAmdMemCopy ( NBPtr[Die].PSBlock, BufferPtr, NBPtr[Die].DctCount * NBPtr[Die].ChannelCount * sizeof (MEM_PS_BLOCK), StdHeader); BufferPtr += NBPtr[Die].DctCount * NBPtr[Die].ChannelCount * sizeof (MEM_PS_BLOCK); LibAmdMemCopy ( NBPtr[Die].MCTPtr->DctData[0].ChData[0].RcvEnDlys, BufferPtr, (NBPtr[Die].DctCount * NBPtr[Die].ChannelCount) * ((NBPtr[Die].MCTPtr->DctData[0].ChData[0].RowCount * NBPtr[Die].MCTPtr->DctData[0].ChData[0].ColumnCount * NUMBER_OF_DELAY_TABLES) + (MAX_BYTELANES_PER_CHANNEL * MAX_CS_PER_CHANNEL * NUMBER_OF_FAILURE_MASK_TABLES) ), StdHeader); HeapDeallocateBuffer (AllocHeapParams.BufferHandle, StdHeader); NBPtr[Die].AfterDqsTraining (&(mmPtr->NBPtr[Die])); TrainInfo[Die].Training = FALSE; } else { PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_RECEIVED_DATA, NBPtr[Die].Node, 0, 0, 0, StdHeader); SetMemError (AGESA_FATAL, NBPtr[Die].MCTPtr); ASSERT(FALSE); // Insufficient Heap Space allocation for parallel training buffer } } else if (ApSts == CORE_IDLE) { // AP does not have buffer to transmit to BSP // AP fails to locate a buffer for data transfer TrainInfo[Die].Training = FALSE; } else { // Signal to loop through again StillTraining = TRUE; } } } // Wait for 1 us MemUWait10ns (100, NBPtr->MemPtr); Time ++; } while ((StillTraining) && ((Time < TimeOut) || !TimeoutEn)); // Continue until all Dies are finished // if cannot finish in 1 s, do fatal exit if (StillTraining && TimeoutEn) { // Parallel training time out, do fatal exit, as there is at least one AP hangs. PutEventLog (AGESA_FATAL, MEM_ERROR_PARALLEL_TRAINING_TIME_OUT, 0, 0, 0, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_FATAL, NBPtr[BSP_DIE].MCTPtr); ASSERT(FALSE); // Timeout occurred while still training } for (Die = 0; Die < mmPtr->DieCount; Die ++ ) { if (NBPtr[Die].MCTPtr->ErrCode == AGESA_FATAL) { return FALSE; } } return TRUE; }
/** * Performs CPU related initialization at the early entry point * * This function performs a large list of initialization items. These items * include: * * -1 local APIC initialization * -2 MSR table initialization * -3 PCI table initialization * -4 HT Phy PCI table initialization * -5 microcode patch loading * -6 namestring determination/programming * -7 AP initialization * -8 power management initialization * -9 core leveling * * This routine must be run by all cores in the system. Please note that * all APs that enter will never exit. * * @param[in] StdHeader Config handle for library and services * @param[in] PlatformConfig Config handle for platform specific information * * @retval AGESA_SUCCESS * */ AGESA_STATUS AmdCpuEarly ( IN AMD_CONFIG_PARAMS *StdHeader, IN PLATFORM_CONFIGURATION *PlatformConfig ) { UINT8 WaitStatus; UINT8 i; UINT8 StartCore; UINT8 EndCore; UINT32 NodeNum; UINT32 PrimaryCore; UINT32 SocketNum; UINT32 ModuleNum; UINT32 HighCore; UINT32 ApHeapIndex; UINT32 CurrentPerformEarlyFlag; UINT32 TargetApicId; AP_WAIT_FOR_STATUS WaitForStatus; AGESA_STATUS Status; AGESA_STATUS CalledStatus; CPU_SPECIFIC_SERVICES *FamilySpecificServices; AMD_CPU_EARLY_PARAMS CpuEarlyParams; S_PERFORM_EARLY_INIT_ON_CORE *EarlyTableOnCore; Status = AGESA_SUCCESS; CalledStatus = AGESA_SUCCESS; AmdCpuEarlyInitializer (StdHeader, PlatformConfig, &CpuEarlyParams); IDS_OPTION_HOOK (IDS_CPU_Early_Override, &CpuEarlyParams, StdHeader); GetCpuServicesOfCurrentCore ((CONST CPU_SPECIFIC_SERVICES **)&FamilySpecificServices, StdHeader); EarlyTableOnCore = NULL; FamilySpecificServices->GetEarlyInitOnCoreTable (FamilySpecificServices, (CONST S_PERFORM_EARLY_INIT_ON_CORE **)&EarlyTableOnCore, &CpuEarlyParams, StdHeader); if (EarlyTableOnCore != NULL) { GetPerformEarlyFlag (&CurrentPerformEarlyFlag, StdHeader); for (i = 0; EarlyTableOnCore[i].PerformEarlyInitOnCore != NULL; i++) { if ((EarlyTableOnCore[i].PerformEarlyInitFlag & CurrentPerformEarlyFlag) != 0) { IDS_HDT_CONSOLE (CPU_TRACE, " Perform core init step %d\n", i); EarlyTableOnCore[i].PerformEarlyInitOnCore (FamilySpecificServices, &CpuEarlyParams, StdHeader); } } } // B S P C O D E T O I N I T I A L I Z E A Ps // ------------------------------------------------------- // ------------------------------------------------------- // IMPORTANT: Here we determine if we are BSP or AP if (IsBsp (StdHeader, &CalledStatus)) { // Even though the bsc does not need to send itself a heap index, this sequence performs other important initialization. // Use '0' as a dummy heap index value. GetSocketModuleOfNode (0, &SocketNum, &ModuleNum, StdHeader); GetCpuServicesOfSocket (SocketNum, (CONST CPU_SPECIFIC_SERVICES **)&FamilySpecificServices, StdHeader); FamilySpecificServices->SetApCoreNumber (FamilySpecificServices, SocketNum, ModuleNum, 0, StdHeader); FamilySpecificServices->TransferApCoreNumber (FamilySpecificServices, StdHeader); // Clear BSP's Status Byte ApUtilWriteControlByte (CORE_ACTIVE, StdHeader); NodeNum = 0; ApHeapIndex = 1; while (NodeNum < MAX_NODES && GetSocketModuleOfNode (NodeNum, &SocketNum, &ModuleNum, StdHeader)) { GetCpuServicesOfSocket (SocketNum, (CONST CPU_SPECIFIC_SERVICES **)&FamilySpecificServices, StdHeader); GetGivenModuleCoreRange (SocketNum, ModuleNum, &PrimaryCore, &HighCore, StdHeader); if (NodeNum == 0) { StartCore = (UINT8) PrimaryCore + 1; } else { StartCore = (UINT8) PrimaryCore; } EndCore = (UINT8) HighCore; for (i = StartCore; i <= EndCore; i++) { FamilySpecificServices->SetApCoreNumber (FamilySpecificServices, SocketNum, ModuleNum, ApHeapIndex, StdHeader); IDS_HDT_CONSOLE (CPU_TRACE, " Launch socket %d core %d\n", SocketNum, i); if (FamilySpecificServices->LaunchApCore (FamilySpecificServices, SocketNum, ModuleNum, i, PrimaryCore, StdHeader)) { IDS_HDT_CONSOLE (CPU_TRACE, " Waiting for socket %d core %d\n", SocketNum, i); GetLocalApicIdForCore (SocketNum, i, &TargetApicId, StdHeader); WaitStatus = CORE_IDLE; WaitForStatus.Status = &WaitStatus; WaitForStatus.NumberOfElements = 1; WaitForStatus.RetryCount = WAIT_INFINITELY; WaitForStatus.WaitForStatusFlags = WAIT_STATUS_EQUALITY; ApUtilWaitForCoreStatus (TargetApicId, &WaitForStatus, StdHeader); ApHeapIndex++; } } NodeNum++; } // B S P P h a s e - 1 E N D IDS_OPTION_HOOK (IDS_BEFORE_PM_INIT, &CpuEarlyParams, StdHeader); AGESA_TESTPOINT (TpProcCpuBeforePMFeatureInit, StdHeader); IDS_HDT_CONSOLE (CPU_TRACE, " Dispatch CPU features before early power mgmt init\n"); CalledStatus = DispatchCpuFeatures (CPU_FEAT_BEFORE_PM_INIT, PlatformConfig, StdHeader); if (CalledStatus > Status) { Status = CalledStatus; } AGESA_TESTPOINT (TpProcCpuPowerMgmtInit, StdHeader); CalledStatus = PmInitializationAtEarly (&CpuEarlyParams, StdHeader); if (CalledStatus > Status) { Status = CalledStatus; } AGESA_TESTPOINT (TpProcCpuEarlyFeatureInit, StdHeader); IDS_HDT_CONSOLE (CPU_TRACE, " Dispatch CPU features after early power mgmt init\n"); CalledStatus = DispatchCpuFeatures (CPU_FEAT_AFTER_PM_INIT, PlatformConfig, StdHeader); IDS_OPTION_HOOK (IDS_BEFORE_AP_EARLY_HALT, &CpuEarlyParams, StdHeader); // Sleep all APs IDS_HDT_CONSOLE (CPU_TRACE, " Halting all APs\n"); ApUtilWriteControlByte (CORE_IDLE_HLT, StdHeader); } else { ApEntry (StdHeader, &CpuEarlyParams); } if (CalledStatus > Status) { Status = CalledStatus; } return (Status); }
/** * Performs core leveling for the system. * * This function implements the AMD_CPU_EARLY_PARAMS.CoreLevelingMode parameter. * The possible modes are: * -0 CORE_LEVEL_LOWEST Level to lowest common denominator * -1 CORE_LEVEL_TWO Level to 2 cores * -2 CORE_LEVEL_POWER_OF_TWO Level to 1,2,4 or 8 * -3 CORE_LEVEL_NONE Do no leveling * -4 CORE_LEVEL_COMPUTE_UNIT Level cores to one core per compute unit * * @param[in] EntryPoint Timepoint designator. * @param[in] PlatformConfig Contains the leveling mode parameter * @param[in] StdHeader Config handle for library and services * * @return The most severe status of any family specific service. * */ AGESA_STATUS CoreLevelingAtEarly ( IN UINT64 EntryPoint, IN PLATFORM_CONFIGURATION *PlatformConfig, IN AMD_CONFIG_PARAMS *StdHeader ) { UINT32 CoreNumPerComputeUnit; UINT32 MinNumOfComputeUnit; UINT32 EnabledComputeUnit; UINT32 Socket; UINT32 Module; UINT32 NumberOfSockets; UINT32 NumberOfModules; UINT32 MinCoreCountOnNode; UINT32 MaxCoreCountOnNode; UINT32 LowCore; UINT32 HighCore; UINT32 LeveledCores; UINT32 RequestedCores; UINT32 TotalEnabledCoresOnNode; BOOLEAN RegUpdated; AP_MAIL_INFO ApMailboxInfo; CORE_LEVELING_TYPE CoreLevelMode; CPU_CORE_LEVELING_FAMILY_SERVICES *FamilySpecificServices; WARM_RESET_REQUEST Request; MaxCoreCountOnNode = 0; MinCoreCountOnNode = 0xFFFFFFFF; LeveledCores = 0; CoreNumPerComputeUnit = 1; MinNumOfComputeUnit = 0xFF; ASSERT (PlatformConfig->CoreLevelingMode < CoreLevelModeMax); // Get OEM IO core level mode CoreLevelMode = (CORE_LEVELING_TYPE) PlatformConfig->CoreLevelingMode; // Get socket count NumberOfSockets = GetPlatformNumberOfSockets (); GetApMailbox (&ApMailboxInfo.Info, StdHeader); NumberOfModules = ApMailboxInfo.Fields.ModuleType + 1; // Collect cpu core info for (Socket = 0; Socket < NumberOfSockets; Socket++) { if (IsProcessorPresent (Socket, StdHeader)) { for (Module = 0; Module < NumberOfModules; Module++) { if (GetGivenModuleCoreRange (Socket, Module, &LowCore, &HighCore, StdHeader)) { // Get the highest and lowest core count in all nodes TotalEnabledCoresOnNode = HighCore - LowCore + 1; if (TotalEnabledCoresOnNode < MinCoreCountOnNode) { MinCoreCountOnNode = TotalEnabledCoresOnNode; } if (TotalEnabledCoresOnNode > MaxCoreCountOnNode) { MaxCoreCountOnNode = TotalEnabledCoresOnNode; } EnabledComputeUnit = TotalEnabledCoresOnNode; switch (GetComputeUnitMapping (StdHeader)) { case AllCoresMapping: // All cores are in their own compute unit. break; case EvenCoresMapping: // Cores are paired in compute units. CoreNumPerComputeUnit = 2; EnabledComputeUnit = (TotalEnabledCoresOnNode / 2); break; default: ASSERT (FALSE); } // Get minimum of compute unit. This will either be the minimum number of cores (AllCoresMapping), // or less (EvenCoresMapping). if (EnabledComputeUnit < MinNumOfComputeUnit) { MinNumOfComputeUnit = EnabledComputeUnit; } } } } } // Get LeveledCores switch (CoreLevelMode) { case CORE_LEVEL_LOWEST: if (MinCoreCountOnNode == MaxCoreCountOnNode) { return (AGESA_SUCCESS); } LeveledCores = (MinCoreCountOnNode / CoreNumPerComputeUnit) * CoreNumPerComputeUnit; break; case CORE_LEVEL_TWO: LeveledCores = 2 / NumberOfModules; if (LeveledCores != 0) { LeveledCores = (LeveledCores <= MinCoreCountOnNode) ? LeveledCores : MinCoreCountOnNode; } else { return (AGESA_WARNING); } if ((LeveledCores * NumberOfModules) != 2) { PutEventLog ( AGESA_WARNING, CPU_WARNING_ADJUSTED_LEVELING_MODE, 2, (LeveledCores * NumberOfModules), 0, 0, StdHeader ); } break; case CORE_LEVEL_POWER_OF_TWO: // Level to power of 2 (1, 2, 4, 8...) LeveledCores = 1; while (MinCoreCountOnNode >= (LeveledCores * 2)) { LeveledCores = LeveledCores * 2; } break; case CORE_LEVEL_COMPUTE_UNIT: // Level cores to one core per compute unit, with additional reduction to level // all processors to match the processor with the minimum number of cores. if (CoreNumPerComputeUnit == 1) { // If there is one core per compute unit, this is the same as CORE_LEVEL_LOWEST. if (MinCoreCountOnNode == MaxCoreCountOnNode) { return (AGESA_SUCCESS); } LeveledCores = MinCoreCountOnNode; } else { // If there are more than one core per compute unit, level to the number of compute units. LeveledCores = MinNumOfComputeUnit; } break; case CORE_LEVEL_ONE: LeveledCores = 1; if (NumberOfModules > 1) { PutEventLog ( AGESA_WARNING, CPU_WARNING_ADJUSTED_LEVELING_MODE, 1, NumberOfModules, 0, 0, StdHeader ); } break; case CORE_LEVEL_THREE: case CORE_LEVEL_FOUR: case CORE_LEVEL_FIVE: case CORE_LEVEL_SIX: case CORE_LEVEL_SEVEN: case CORE_LEVEL_EIGHT: case CORE_LEVEL_NINE: case CORE_LEVEL_TEN: case CORE_LEVEL_ELEVEN: case CORE_LEVEL_TWELVE: case CORE_LEVEL_THIRTEEN: case CORE_LEVEL_FOURTEEN: case CORE_LEVEL_FIFTEEN: // MCM processors can not have an odd number of cores. For an odd CORE_LEVEL_N, MCM processors will be // leveled as though CORE_LEVEL_N+1 was chosen. // Processors with compute units disable all cores in an entire compute unit at a time, or on an MCM processor, // two compute units at a time. For example, on an SCM processor with two cores per compute unit, the effective // explicit levels are CORE_LEVEL_ONE, CORE_LEVEL_TWO, CORE_LEVEL_FOUR, CORE_LEVEL_SIX, and // CORE_LEVEL_EIGHT. The same example for an MCM processor with two cores per compute unit has effective // explicit levels of CORE_LEVEL_TWO, CORE_LEVEL_FOUR, CORE_LEVEL_EIGHT, and CORE_LEVEL_TWELVE. RequestedCores = CoreLevelMode - CORE_LEVEL_THREE + 3; LeveledCores = (RequestedCores + NumberOfModules - 1) / NumberOfModules; LeveledCores = (LeveledCores / CoreNumPerComputeUnit) * CoreNumPerComputeUnit; LeveledCores = (LeveledCores <= MinCoreCountOnNode) ? LeveledCores : MinCoreCountOnNode; if (LeveledCores != 1) { LeveledCores = (LeveledCores / CoreNumPerComputeUnit) * CoreNumPerComputeUnit; } if ((LeveledCores * NumberOfModules * CoreNumPerComputeUnit) != RequestedCores) { PutEventLog ( AGESA_WARNING, CPU_WARNING_ADJUSTED_LEVELING_MODE, RequestedCores, (LeveledCores * NumberOfModules * CoreNumPerComputeUnit), 0, 0, StdHeader ); } break; default: ASSERT (FALSE); } // Set down core register for (Socket = 0; Socket < NumberOfSockets; Socket++) { if (IsProcessorPresent (Socket, StdHeader)) { GetFeatureServicesOfSocket (&CoreLevelingFamilyServiceTable, Socket, &FamilySpecificServices, StdHeader); if (FamilySpecificServices != NULL) { for (Module = 0; Module < NumberOfModules; Module++) { RegUpdated = FamilySpecificServices->SetDownCoreRegister (FamilySpecificServices, &Socket, &Module, &LeveledCores, CoreLevelMode, StdHeader); // If the down core register is updated, trigger a warm reset. if (RegUpdated) { GetWarmResetFlag (StdHeader, &Request); Request.RequestBit = TRUE; Request.StateBits = Request.PostStage - 1; SetWarmResetFlag (StdHeader, &Request); } } } } } return (AGESA_SUCCESS); }
/** * Family 10h core 0 entry point for performing power plane initialization. * * The steps are as follows: * 1. If single plane, program lower VID code of CpuVid & NbVid for all * enabled P-States. * 2. Configure F3xA0[SlamMode] & F3xD8[VsRampTime & VsSlamTime] based on * platform requirements. * 3. Configure F3xD4[PowerStepUp & PowerStepDown] * 4. Optionally configure F3xA0[PsiVidEn & PsiVid] * * @param[in] FamilySpecificServices The current Family Specific Services. * @param[in] CpuEarlyParams Service parameters * @param[in] StdHeader Config handle for library and services. * */ VOID F10CpuAmdPmPwrPlaneInit ( IN CPU_SPECIFIC_SERVICES *FamilySpecificServices, IN AMD_CPU_EARLY_PARAMS *CpuEarlyParams, IN AMD_CONFIG_PARAMS *StdHeader ) { BOOLEAN PviModeFlag; PCI_ADDR PciAddress; UINT16 PowerStepTime; UINT32 PowerStepEncoded; UINT32 PciRegister; UINT32 VsSlamTime; UINT32 Socket; UINT32 Module; UINT32 Core; UINT32 NumOfCores; UINT32 LowCore; UINT32 AndMask; UINT32 OrMask; UINT64 MsrRegister; AP_TASK TaskPtr; AGESA_STATUS IgnoredSts; PLATFORM_FEATS Features; CPU_LOGICAL_ID LogicalId; // Initialize the union Features.PlatformValue = 0; GetPlatformFeatures (&Features, &CpuEarlyParams->PlatformConfig, StdHeader); IdentifyCore (StdHeader, &Socket, &Module, &Core, &IgnoredSts); GetPciAddress (StdHeader, Socket, Module, &PciAddress, &IgnoredSts); ASSERT (Core == 0); GetLogicalIdOfCurrentCore (&LogicalId, StdHeader); // Set SlamVidMode PciAddress.Address.Function = FUNC_3; PciAddress.Address.Register = PW_CTL_MISC_REG; LibAmdPciRead (AccessWidth32, PciAddress, &PciRegister, StdHeader); AndMask = 0xFFFFFFFF; OrMask = 0x00000000; if (((POWER_CTRL_MISC_REGISTER *) &PciRegister)->PviMode == 1) { PviModeFlag = TRUE; ((POWER_CTRL_MISC_REGISTER *) &AndMask)->SlamVidMode = 0; // Have all single plane cores adjust their NB and CPU VID fields TaskPtr.FuncAddress.PfApTask = F10PmPwrPlaneInitPviCore; TaskPtr.DataTransfer.DataSizeInDwords = 0; TaskPtr.ExeFlags = WAIT_FOR_CORE; ApUtilRunCodeOnAllLocalCoresAtEarly (&TaskPtr, StdHeader, CpuEarlyParams); } else { PviModeFlag = FALSE; ((POWER_CTRL_MISC_REGISTER *) &OrMask)->SlamVidMode = 1; } ModifyCurrentSocketPci (&PciAddress, AndMask, OrMask, StdHeader); F10ProgramVSSlamTimeOnSocket (&PciAddress, CpuEarlyParams, StdHeader); // Configure PowerStepUp/PowerStepDown PciAddress.Address.Register = CPTC0_REG; if ((Features.PlatformFeatures.PlatformSingleLink == 1) || (Features.PlatformFeatures.PlatformUma == 1) || (Features.PlatformFeatures.PlatformUmaIfcm == 1) || (Features.PlatformFeatures.PlatformIfcm == 1) || (Features.PlatformFeatures.PlatformIommu == 1)) { PowerStepEncoded = 0x8; } else { GetGivenModuleCoreRange ((UINT32) Socket, (UINT32) Module, &LowCore, &NumOfCores, StdHeader); NumOfCores = ((NumOfCores - LowCore) + 1); PowerStepTime = (UINT16) (400 / NumOfCores); for (PowerStepEncoded = 0xF; PowerStepEncoded > 0; PowerStepEncoded--) { if (PowerStepTime <= PowerStepEncodings[PowerStepEncoded]) { break; } } } AndMask = 0xFFFFFFFF; ((CLK_PWR_TIMING_CTRL_REGISTER *) &AndMask)->PowerStepUp = 0; ((CLK_PWR_TIMING_CTRL_REGISTER *) &AndMask)->PowerStepDown = 0; OrMask = 0x00000000; ((CLK_PWR_TIMING_CTRL_REGISTER *) &OrMask)->PowerStepUp = PowerStepEncoded; ((CLK_PWR_TIMING_CTRL_REGISTER *) &OrMask)->PowerStepDown = PowerStepEncoded; ModifyCurrentSocketPci (&PciAddress, AndMask, OrMask, StdHeader); if ((LogicalId.Revision & AMD_F10_C3) != 0) { // Set up Pop up P-state register PciAddress.Address.Register = CPTC2_REG; LibAmdPciRead (AccessWidth32, PciAddress, &PciRegister, StdHeader); AndMask = 0xFFFFFFFF; ((POPUP_PSTATE_REGISTER *) &AndMask)->PopupPstate = 0; ((POPUP_PSTATE_REGISTER *) &AndMask)->PopupCpuVid = 0; ((POPUP_PSTATE_REGISTER *) &AndMask)->PopupCpuFid = 0; ((POPUP_PSTATE_REGISTER *) &AndMask)->PopupCpuDid = 0; OrMask = 0x00000000; ((POPUP_PSTATE_REGISTER *) &OrMask)->PopupEn = 0; ((POPUP_PSTATE_REGISTER *) &OrMask)->PopupPstate = ((CLK_PWR_TIMING_CTRL2_REGISTER *) &PciRegister)->PstateMaxVal; LibAmdMsrRead ((((CLK_PWR_TIMING_CTRL2_REGISTER *) &PciRegister)->PstateMaxVal + PS_REG_BASE), &MsrRegister, StdHeader); ((POPUP_PSTATE_REGISTER *) &OrMask)->PopupCpuVid = (UINT32) ((PSTATE_MSR *) &MsrRegister)->CpuVid; ((POPUP_PSTATE_REGISTER *) &OrMask)->PopupCpuFid = (UINT32) ((PSTATE_MSR *) &MsrRegister)->CpuFid; ((POPUP_PSTATE_REGISTER *) &OrMask)->PopupCpuDid = (UINT32) ((PSTATE_MSR *) &MsrRegister)->CpuDid; PciAddress.Address.Register = POPUP_PSTATE_REG; ModifyCurrentSocketPci (&PciAddress, AndMask, OrMask, StdHeader); // Set AltVidStart PciAddress.Address.Register = CPTC1_REG; AndMask = 0xFFFFFFFF; ((CLK_PWR_TIMING_CTRL1_REGISTER *) &AndMask)->AltVidStart = 0; OrMask = 0x00000000; ((CLK_PWR_TIMING_CTRL1_REGISTER *) &OrMask)->AltVidStart = (UINT32) ((PSTATE_MSR *) &MsrRegister)->CpuVid; ModifyCurrentSocketPci (&PciAddress, AndMask, OrMask, StdHeader); // Set up Altvid slam time PciAddress.Address.Register = CPTC2_REG; VsSlamTime = F10CalculateAltvidVSSlamTimeOnCore (PviModeFlag, &PciAddress, CpuEarlyParams, StdHeader); AndMask = 0xFFFFFFFF; ((CLK_PWR_TIMING_CTRL2_REGISTER *) &AndMask)->AltvidVSSlamTime = 0; ((CLK_PWR_TIMING_CTRL2_REGISTER *) &AndMask)->SlamTimeMode = 0; OrMask = 0x00000000; ((CLK_PWR_TIMING_CTRL2_REGISTER *) &OrMask)->AltvidVSSlamTime = VsSlamTime; ((CLK_PWR_TIMING_CTRL2_REGISTER *) &OrMask)->SlamTimeMode = 2; ModifyCurrentSocketPci (&PciAddress, AndMask, OrMask, StdHeader); } if (IsWarmReset (StdHeader) && !PviModeFlag) { // Configure PsiVid F10PmVrmLowPowerModeEnable (FamilySpecificServices, CpuEarlyParams, StdHeader); } }
/** * Performs core leveling for the system. * * This function implements the AMD_CPU_EARLY_PARAMS.CoreLevelingMode parameter. * The possible modes are: * -0 CORE_LEVEL_LOWEST Level to lowest common denominator * -1 CORE_LEVEL_TWO Level to 2 cores * -2 CORE_LEVEL_POWER_OF_TWO Level to 1,2,4 or 8 * -3 CORE_LEVEL_NONE Do no leveling * -4 CORE_LEVEL_COMPUTE_UNIT Level cores to one core per compute unit * * @param[in] EntryPoint Timepoint designator. * @param[in] PlatformConfig Contains the leveling mode parameter * @param[in] StdHeader Config handle for library and services * * @return The most severe status of any family specific service. * */ AGESA_STATUS CoreLevelingAtEarly ( IN UINT64 EntryPoint, IN PLATFORM_CONFIGURATION *PlatformConfig, IN AMD_CONFIG_PARAMS *StdHeader ) { UINT32 CoreNumPerComputeUnit; UINT32 EnabledComputeUnit; UINT32 SocketAndModule; UINT32 LowCore; UINT32 HighCore; UINT32 LeveledCores; UINT32 RequestedCores; UINT32 TotalEnabledCoresOnNode; BOOLEAN RegUpdated; CORE_LEVELING_TYPE CoreLevelMode; CPU_CORE_LEVELING_FAMILY_SERVICES *FamilySpecificServices; WARM_RESET_REQUEST Request; IDS_HDT_CONSOLE (CPU_TRACE, "CoreLevelingAtEarly\n CoreLevelMode: %d\n", PlatformConfig->CoreLevelingMode); LeveledCores = 0; SocketAndModule = 0; ASSERT (PlatformConfig->CoreLevelingMode < CoreLevelModeMax); // Get OEM IO core level mode CoreLevelMode = (CORE_LEVELING_TYPE) PlatformConfig->CoreLevelingMode; // Collect cpu core info GetGivenModuleCoreRange (0, 0, &LowCore, &HighCore, StdHeader); // Get the highest and lowest core count in all nodes TotalEnabledCoresOnNode = HighCore - LowCore + 1; switch (GetComputeUnitMapping (StdHeader)) { case AllCoresMapping: // All cores are in their own compute unit. CoreNumPerComputeUnit = 1; EnabledComputeUnit = TotalEnabledCoresOnNode; break; case EvenCoresMapping: // Cores are paired in compute units. CoreNumPerComputeUnit = 2; EnabledComputeUnit = (TotalEnabledCoresOnNode / 2); break; case TripleCoresMapping: // Three cores are grouped in compute units. CoreNumPerComputeUnit = 3; EnabledComputeUnit = (TotalEnabledCoresOnNode / 3); break; case QuadCoresMapping: // Four cores are grouped in compute units. CoreNumPerComputeUnit = 4; EnabledComputeUnit = (TotalEnabledCoresOnNode / 4); break; default: CoreNumPerComputeUnit = 1; EnabledComputeUnit = TotalEnabledCoresOnNode; ASSERT (FALSE); } IDS_HDT_CONSOLE (CPU_TRACE, " TotalEnabledCoresOnNode %d EnabledComputeUnit %d\n", \ TotalEnabledCoresOnNode, EnabledComputeUnit); // Get LeveledCores switch (CoreLevelMode) { case CORE_LEVEL_LOWEST: return (AGESA_SUCCESS); break; case CORE_LEVEL_TWO: LeveledCores = 2; LeveledCores = (LeveledCores <= TotalEnabledCoresOnNode) ? LeveledCores : TotalEnabledCoresOnNode; if (LeveledCores != 2) { PutEventLog ( AGESA_WARNING, CPU_WARNING_ADJUSTED_LEVELING_MODE, 2, LeveledCores, 0, 0, StdHeader ); } break; case CORE_LEVEL_POWER_OF_TWO: // Level to power of 2 (1, 2, 4, 8...) LeveledCores = 1; while (TotalEnabledCoresOnNode >= (LeveledCores * 2)) { LeveledCores = LeveledCores * 2; } break; case CORE_LEVEL_COMPUTE_UNIT: case CORE_LEVEL_COMPUTE_UNIT_TWO: case CORE_LEVEL_COMPUTE_UNIT_THREE: // Level cores to 1~3 core(s) per compute unit, with additional reduction to level // all processors to match the processor with the minimum number of cores. if (CoreNumPerComputeUnit == 1) { // If there is one core per compute unit, this is the same as CORE_LEVEL_LOWEST. return (AGESA_SUCCESS); } else { // If there are more than one core per compute unit, level to the number of compute units * cores per compute unit. LeveledCores = EnabledComputeUnit * (CoreLevelMode - CORE_LEVEL_COMPUTE_UNIT + 1); } break; case CORE_LEVEL_ONE: LeveledCores = 1; break; case CORE_LEVEL_THREE: case CORE_LEVEL_FOUR: case CORE_LEVEL_FIVE: case CORE_LEVEL_SIX: case CORE_LEVEL_SEVEN: case CORE_LEVEL_EIGHT: case CORE_LEVEL_NINE: case CORE_LEVEL_TEN: case CORE_LEVEL_ELEVEN: case CORE_LEVEL_TWELVE: case CORE_LEVEL_THIRTEEN: case CORE_LEVEL_FOURTEEN: case CORE_LEVEL_FIFTEEN: // Processors with compute units disable all cores in an entire compute unit at a time // For example, on a processor with two cores per compute unit, the effective // explicit levels are CORE_LEVEL_ONE, CORE_LEVEL_TWO, CORE_LEVEL_FOUR, CORE_LEVEL_SIX, and // CORE_LEVEL_EIGHT. RequestedCores = CoreLevelMode - CORE_LEVEL_THREE + 3; LeveledCores = RequestedCores; LeveledCores = (LeveledCores / CoreNumPerComputeUnit) * CoreNumPerComputeUnit; LeveledCores = (LeveledCores <= TotalEnabledCoresOnNode) ? LeveledCores : TotalEnabledCoresOnNode; if (LeveledCores != 1) { LeveledCores = (LeveledCores / CoreNumPerComputeUnit) * CoreNumPerComputeUnit; } if (LeveledCores != RequestedCores) { PutEventLog ( AGESA_WARNING, CPU_WARNING_ADJUSTED_LEVELING_MODE, RequestedCores, LeveledCores, 0, 0, StdHeader ); } break; default: ASSERT (FALSE); } // Set down core register GetFeatureServicesOfSocket (&CoreLevelingFamilyServiceTable, 0, (CONST VOID **)&FamilySpecificServices, StdHeader); if (FamilySpecificServices != NULL) { IDS_HDT_CONSOLE (CPU_TRACE, " SetDownCoreRegister: LeveledCores %d CoreLevelMode %d\n", LeveledCores, CoreLevelMode); RegUpdated = FamilySpecificServices->SetDownCoreRegister (FamilySpecificServices, &SocketAndModule, &SocketAndModule, &LeveledCores, CoreLevelMode, StdHeader); // If the down core register is updated, trigger a warm reset. if (RegUpdated) { GetWarmResetFlag (StdHeader, &Request); Request.RequestBit = TRUE; Request.StateBits = Request.PostStage - 1; IDS_HDT_CONSOLE (CPU_TRACE, " Request a warm reset.\n"); SetWarmResetFlag (StdHeader, &Request); } } return (AGESA_SUCCESS); }
/** * * * This is the training function which set up the environment for remote * training on the ap and launches the remote routine. * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * * @return TRUE - Launch training on AP successfully. * @return FALSE - Fail to launch training on AP. */ BOOLEAN MemFParallelTrainingHy ( IN OUT MEM_NB_BLOCK *NBPtr ) { AMD_CONFIG_PARAMS *StdHeader; DIE_STRUCT *MCTPtr; REMOTE_TRAINING_ENV *EnvPtr; AP_TASK TrainingTask; UINT8 Socket; UINT8 Module; UINT8 APCore; UINT8 p; UINT32 LowCore; UINT32 HighCore; UINT32 BspSocket; UINT32 BspModule; UINT32 BspCore; AGESA_STATUS Status; ALLOCATE_HEAP_PARAMS AllocHeapParams; UINT16 MctDataSize; StdHeader = &(NBPtr->MemPtr->StdHeader); MCTPtr = NBPtr->MCTPtr; Socket = MCTPtr->SocketId; Module = MCTPtr->DieId; // // Allocate buffer for REMOTE_TRAINING_ENV // MctDataSize = MAX_DCTS_PER_NODE_HY * ( sizeof (DCT_STRUCT) + ( MAX_CHANNELS_PER_DCT_HY * (sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK)) ) ); AllocHeapParams.RequestedBufferSize = MctDataSize + sizeof (REMOTE_TRAINING_ENV); AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_PAR_TRN_HANDLE, Socket, Module, 0); AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) == AGESA_SUCCESS) { EnvPtr = (REMOTE_TRAINING_ENV *) AllocHeapParams.BufferPtr; AllocHeapParams.BufferPtr += sizeof (REMOTE_TRAINING_ENV); // // Setup Remote training environment // LibAmdMemCopy (&(EnvPtr->StdHeader), StdHeader, sizeof (AMD_CONFIG_PARAMS), StdHeader); LibAmdMemCopy (&(EnvPtr->DieStruct), MCTPtr, sizeof (DIE_STRUCT), StdHeader); for (p = 0; p < MAX_PLATFORM_TYPES; p++) { EnvPtr->GetPlatformCfg[p] = NBPtr->MemPtr->GetPlatformCfg[p]; } EnvPtr->ErrorHandling = NBPtr->MemPtr->ErrorHandling; EnvPtr->NBBlockCtor = MemConstructRemoteNBBlockHY; EnvPtr->FeatPtr = NBPtr->FeatPtr; EnvPtr->HoleBase = NBPtr->RefPtr->HoleBase; EnvPtr->BottomIo = NBPtr->RefPtr->BottomIo; EnvPtr->UmaSize = NBPtr->RefPtr->UmaSize; EnvPtr->SysLimit = NBPtr->RefPtr->SysLimit; EnvPtr->TableBasedAlterations = NBPtr->RefPtr->TableBasedAlterations; EnvPtr->PlatformMemoryConfiguration = NBPtr->RefPtr->PlatformMemoryConfiguration; LibAmdMemCopy (AllocHeapParams.BufferPtr, MCTPtr->DctData, MctDataSize, StdHeader); // // Get Socket, Core of the BSP // IdentifyCore (StdHeader, &BspSocket, &BspModule, &BspCore, &Status); EnvPtr->BspSocket = ((UINT8)BspSocket & 0x000000FF); EnvPtr->BspCore = ((UINT8)BspCore & 0x000000FF); // // Set up the remote task structure // TrainingTask.DataTransfer.DataPtr = EnvPtr; TrainingTask.DataTransfer.DataSizeInDwords = (UINT16) (AllocHeapParams.RequestedBufferSize + 3) / 4; TrainingTask.DataTransfer.DataTransferFlags = 0; TrainingTask.ExeFlags = 0; TrainingTask.FuncAddress.PfApTaskI = (PF_AP_TASK_I)MemFParallelTraining; // // Get Target AP Core // GetGivenModuleCoreRange (Socket, Module, &LowCore, &HighCore, StdHeader); APCore = (UINT8) (LowCore & 0x000000FF); // // Launch Remote Training // ApUtilRunCodeOnSocketCore (Socket, APCore, &TrainingTask, StdHeader); HeapDeallocateBuffer (AllocHeapParams.BufferHandle, StdHeader); return TRUE; } else { PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_REMOTE_TRAINING_ENV, NBPtr->Node, 0, 0, 0, StdHeader); SetMemError (AGESA_FATAL, MCTPtr); ASSERT(FALSE); // Could not allocated heap space for "REMOTE_TRAINING_ENV" return FALSE; } }
/** * Family 15h core 0 entry point for performing the family 15h Processor- * Systemboard Power Delivery Check. * * The steps are as follows: * 1. Starting with P0, loop through all P-states until a passing state is * found. A passing state is one in which the current required by the * CPU is less than the maximum amount of current that the system can * provide to the CPU. If P0 is under the limit, no further action is * necessary. * 2. If at least one P-State is under the limit & at least one P-State is * over the limit, the BIOS must: * a. If the processor's current P-State is disabled by the power check, * then the BIOS must request a transition to an enabled P-state * using MSRC001_0062[PstateCmd] and wait for MSRC001_0063[CurPstate] * to reflect the new value. * b. Copy the contents of the enabled P-state MSRs to the highest * performance P-state locations. * c. Request a P-state transition to the P-state MSR containing the * COF/VID values currently applied. * d. If a subset of boosted P-states are disabled, then copy the contents * of the highest performance boosted P-state still enabled to the * boosted P-states that have been disabled. * e. If all boosted P-states are disabled, then program D18F4x15C[BoostSrc] * to zero. * f. Adjust the following P-state parameters affected by the P-state * MSR copy by subtracting the number of P-states that are disabled * by the power check. * 1. F3x64[HtcPstateLimit] * 2. F3x68[SwPstateLimit] * 3. F3xDC[PstateMaxVal] * 3. If all P-States are over the limit, the BIOS must: * a. If the processor's current P-State is !=F3xDC[PstateMaxVal], then * write F3xDC[PstateMaxVal] to MSRC001_0062[PstateCmd] and wait for * MSRC001_0063[CurPstate] to reflect the new value. * b. If MSRC001_0061[PstateMaxVal]!=000b, copy the contents of the P-state * MSR pointed to by F3xDC[PstateMaxVal] to the software P0 MSR. * Write 000b to MSRC001_0062[PstateCmd] and wait for MSRC001_0063 * [CurPstate] to reflect the new value. * c. Adjust the following P-state parameters to zero: * 1. F3x64[HtcPstateLimit] * 2. F3x68[SwPstateLimit] * 3. F3xDC[PstateMaxVal] * d. Program D18F4x15C[BoostSrc] to zero. * * @param[in] FamilySpecificServices The current Family Specific Services. * @param[in] CpuEarlyParams Service parameters * @param[in] StdHeader Config handle for library and services. * */ VOID F15PmPwrCheck ( IN CPU_SPECIFIC_SERVICES *FamilySpecificServices, IN AMD_CPU_EARLY_PARAMS *CpuEarlyParams, IN AMD_CONFIG_PARAMS *StdHeader ) { UINT8 DisPsNum; UINT8 PsMaxVal; UINT8 Pstate; UINT32 ProcIddMax; UINT32 LocalPciRegister; UINT32 Socket; UINT32 Module; UINT32 Core; UINT32 AndMask; UINT32 OrMask; UINT32 PstateLimit; PCI_ADDR PciAddress; UINT64 LocalMsrRegister; AP_TASK TaskPtr; AGESA_STATUS IgnoredSts; PWRCHK_ERROR_DATA ErrorData; UINT32 NumModules; UINT32 HighCore; UINT32 LowCore; UINT32 ModuleIndex; // get the socket number IdentifyCore (StdHeader, &Socket, &Module, &Core, &IgnoredSts); ErrorData.SocketNumber = (UINT8) Socket; ASSERT (Core == 0); // get the Max P-state value for (PsMaxVal = NM_PS_REG - 1; PsMaxVal != 0; --PsMaxVal) { LibAmdMsrRead (PS_REG_BASE + PsMaxVal, &LocalMsrRegister, StdHeader); if (((F15_PSTATE_MSR *) &LocalMsrRegister)->PsEnable == 1) { break; } } ErrorData.HwPstateNumber = (UINT8) (PsMaxVal + 1); // Starting with P0, loop through all P-states until a passing state is // found. A passing state is one in which the current required by the // CPU is less than the maximum amount of current that the system can // provide to the CPU. If P0 is under the limit, no further action is // necessary. DisPsNum = 0; for (Pstate = 0; Pstate < ErrorData.HwPstateNumber; Pstate++) { if (FamilySpecificServices->GetProcIddMax (FamilySpecificServices, Pstate, &ProcIddMax, StdHeader)) { if (ProcIddMax > CpuEarlyParams->PlatformConfig.VrmProperties[CoreVrm].CurrentLimit) { // Add to event log the Pstate that exceeded the current limit PutEventLog (AGESA_WARNING, CPU_EVENT_PM_PSTATE_OVERCURRENT, Socket, Pstate, 0, 0, StdHeader); DisPsNum++; } else { break; } } } ErrorData.AllowablePstateNumber = ((PsMaxVal + 1) - DisPsNum); if (ErrorData.AllowablePstateNumber == 0) { PutEventLog (AGESA_FATAL, CPU_EVENT_PM_ALL_PSTATE_OVERCURRENT, Socket, 0, 0, 0, StdHeader); } if (DisPsNum != 0) { GetPciAddress (StdHeader, Socket, Module, &PciAddress, &IgnoredSts); PciAddress.Address.Function = FUNC_4; PciAddress.Address.Register = CPB_CTRL_REG; LibAmdPciRead (AccessWidth32, PciAddress, &LocalPciRegister, StdHeader); // F4x15C ErrorData.NumberOfBoostStates = (UINT8) ((F15_CPB_CTRL_REGISTER *) &LocalPciRegister)->NumBoostStates; if (DisPsNum >= ErrorData.NumberOfBoostStates) { // If all boosted P-states are disabled, then program D18F4x15C[BoostSrc] to zero. AndMask = 0xFFFFFFFF; ((F15_CPB_CTRL_REGISTER *) &AndMask)->BoostSrc = 0; OrMask = 0x00000000; OptionMultiSocketConfiguration.ModifyCurrSocketPci (&PciAddress, AndMask, OrMask, StdHeader); // F4x15C // Update the result of isFeatureEnabled in heap. UpdateFeatureStatusInHeap (CoreBoost, FALSE, StdHeader); ErrorData.NumberOfSwPstatesDisabled = DisPsNum - ErrorData.NumberOfBoostStates; } else { ErrorData.NumberOfSwPstatesDisabled = 0; } NumModules = GetPlatformNumberOfModules (); // Only execute this loop if this is an MCM. if (NumModules > 1) { // Since the P-State MSRs are shared across a // node, we only need to set one core in the node for the modified number of supported p-states // to be reported across all of the cores in the module. TaskPtr.FuncAddress.PfApTaskI = F15PmPwrCheckCore; TaskPtr.DataTransfer.DataSizeInDwords = SIZE_IN_DWORDS (PWRCHK_ERROR_DATA); TaskPtr.DataTransfer.DataPtr = &ErrorData; TaskPtr.DataTransfer.DataTransferFlags = 0; TaskPtr.ExeFlags = WAIT_FOR_CORE; for (ModuleIndex = 0; ModuleIndex < NumModules; ModuleIndex++) { // Execute the P-State reduction code on the module's primary core only. // Skip this code for the BSC's module. if (ModuleIndex != Module) { if (GetGivenModuleCoreRange (Socket, ModuleIndex, &LowCore, &HighCore, StdHeader)) { ApUtilRunCodeOnSocketCore ((UINT8)Socket, (UINT8)LowCore, &TaskPtr, StdHeader); } } } } // Path for SCM and the BSC F15PmPwrCheckCore (&ErrorData, StdHeader); // Final Step // F3x64[HtPstatelimit] -= disPsNum // F3x68[SwPstateLimit] -= disPsNum // F3xDC[PstateMaxVal] -= disPsNum PciAddress.Address.Function = FUNC_3; PciAddress.Address.Register = HTC_REG; AndMask = 0xFFFFFFFF; ((HTC_REGISTER *) &AndMask)->HtcPstateLimit = 0; OrMask = 0x00000000; LibAmdPciRead (AccessWidth32, PciAddress, &LocalPciRegister, StdHeader); // F3x64 PstateLimit = ((HTC_REGISTER *) &LocalPciRegister)->HtcPstateLimit; if (PstateLimit > ErrorData.NumberOfSwPstatesDisabled) { PstateLimit -= ErrorData.NumberOfSwPstatesDisabled; ((HTC_REGISTER *) &OrMask)->HtcPstateLimit = PstateLimit; } OptionMultiSocketConfiguration.ModifyCurrSocketPci (&PciAddress, AndMask, OrMask, StdHeader); // F3x64 PciAddress.Address.Register = SW_PS_LIMIT_REG; AndMask = 0xFFFFFFFF; ((SW_PS_LIMIT_REGISTER *) &AndMask)->SwPstateLimit = 0; OrMask = 0x00000000; LibAmdPciRead (AccessWidth32, PciAddress, &LocalPciRegister, StdHeader); // F3x68 PstateLimit = ((SW_PS_LIMIT_REGISTER *) &LocalPciRegister)->SwPstateLimit; if (PstateLimit > ErrorData.NumberOfSwPstatesDisabled) { PstateLimit -= ErrorData.NumberOfSwPstatesDisabled; ((SW_PS_LIMIT_REGISTER *) &OrMask)->SwPstateLimit = PstateLimit; } OptionMultiSocketConfiguration.ModifyCurrSocketPci (&PciAddress, AndMask, OrMask, StdHeader); // F3x68 PciAddress.Address.Register = CPTC2_REG; AndMask = 0xFFFFFFFF; ((CLK_PWR_TIMING_CTRL2_REGISTER *) &AndMask)->PstateMaxVal = 0; OrMask = 0x00000000; LibAmdPciRead (AccessWidth32, PciAddress, &LocalPciRegister, StdHeader); // F3xDC PstateLimit = ((CLK_PWR_TIMING_CTRL2_REGISTER *) &LocalPciRegister)->PstateMaxVal; if (PstateLimit > ErrorData.NumberOfSwPstatesDisabled) { PstateLimit -= ErrorData.NumberOfSwPstatesDisabled; ((CLK_PWR_TIMING_CTRL2_REGISTER *) &OrMask)->PstateMaxVal = PstateLimit; } OptionMultiSocketConfiguration.ModifyCurrSocketPci (&PciAddress, AndMask, OrMask, StdHeader); // F3xDC } }