BOOLEAN MemFOnlineSpare ( IN OUT MEM_NB_BLOCK *NBPtr ) { UINT8 Dct; UINT8 q; UINT8 Value8; BOOLEAN Flag; BOOLEAN OnlineSprEnabled[MAX_CHANNELS_PER_SOCKET]; MEM_PARAMETER_STRUCT *RefPtr; DIE_STRUCT *MCTPtr; ASSERT (NBPtr != NULL); RefPtr = NBPtr->RefPtr; Flag = FALSE; if (RefPtr->EnableOnLineSpareCtl != 0) { RefPtr->GStatus[GsbEnDIMMSpareNW] = TRUE; MCTPtr = NBPtr->MCTPtr; // Check if online spare can be enabled on current node for (Dct = 0; Dct < NBPtr->DctCount; Dct++) { ASSERT (Dct < sizeof (OnlineSprEnabled)); NBPtr->SwitchDCT (NBPtr, Dct); OnlineSprEnabled[Dct] = FALSE; if ((MCTPtr->GangedMode == 0) || (MCTPtr->Dct == 0)) { if (NBPtr->DCTPtr->Timings.DctMemSize != 0) { // Make sure at least two chip-selects are available Value8 = LibAmdBitScanReverse (NBPtr->DCTPtr->Timings.CsEnabled); if (Value8 > LibAmdBitScanForward (NBPtr->DCTPtr->Timings.CsEnabled)) { OnlineSprEnabled[Dct] = TRUE; Flag = TRUE; } else { PutEventLog (AGESA_ERROR, MEM_ERROR_DIMM_SPARING_NOT_ENABLED, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader); MCTPtr->ErrStatus[EsbSpareDis] = TRUE; } } } } // If we don't have spared rank on any DCT, we don't run the rest part of the code. if (!Flag) { return FALSE; } MCTPtr->NodeMemSize = 0; for (Dct = 0; Dct < NBPtr->DctCount; Dct++) { NBPtr->SwitchDCT (NBPtr, Dct); if (OnlineSprEnabled[Dct]) { // Only run StitchMemory if we need to set a spare rank. NBPtr->DCTPtr->Timings.DctMemSize = 0; for (q = 0; q < MAX_CS_PER_CHANNEL; q++) { NBPtr->SetBitField (NBPtr, BFCSBaseAddr0Reg + q, 0); } Flag = NBPtr->StitchMemory (NBPtr); ASSERT (Flag == TRUE); } else if ((MCTPtr->GangedMode == 0) && (NBPtr->DCTPtr->Timings.DctMemSize != 0)) { // Otherwise, need to adjust the memory size on the node. MCTPtr->NodeMemSize += NBPtr->DCTPtr->Timings.DctMemSize; MCTPtr->NodeSysLimit = MCTPtr->NodeMemSize - 1; } } return TRUE; } else { return FALSE; } }
AGESA_STATUS GfxFmMapEngineToDisplayPath ( IN PCIe_ENGINE_CONFIG *Engine, OUT EXT_DISPLAY_PATH *DisplayPathList, IN GFX_PLATFORM_CONFIG *Gfx ) { AGESA_STATUS Status; UINT8 PrimaryDisplayPathId; UINT8 SecondaryDisplayPathId; UINTN DisplayPathIndex; PrimaryDisplayPathId = 0xff; SecondaryDisplayPathId = 0xff; for (DisplayPathIndex = 0; DisplayPathIndex < (sizeof (DdiLaneConfigArray) / 4); DisplayPathIndex++) { if (DdiLaneConfigArray[DisplayPathIndex][0] == Engine->EngineData.StartLane && DdiLaneConfigArray[DisplayPathIndex][1] == Engine->EngineData.EndLane) { PrimaryDisplayPathId = DdiLaneConfigArray[DisplayPathIndex][2]; SecondaryDisplayPathId = DdiLaneConfigArray[DisplayPathIndex][3]; break; } } if (Engine->Type.Ddi.DdiData.ConnectorType == ConnectorTypeDualLinkDVI || (Engine->Type.Ddi.DdiData.ConnectorType == ConnectorTypeLvds && PrimaryDisplayPathId != 0)) { // Display config invalid for ON PrimaryDisplayPathId = 0xff; } if (PrimaryDisplayPathId != 0xff) { ASSERT (Engine->Type.Ddi.DdiData.AuxIndex <= Aux3); IDS_HDT_CONSOLE (GFX_MISC, " Allocate Display Connector at Primary sPath[%d]\n", PrimaryDisplayPathId); Engine->InitStatus |= INIT_STATUS_DDI_ACTIVE; if (Engine->Type.Ddi.DdiData.AuxIndex == Aux3) { Engine->Type.Ddi.DdiData.AuxIndex = 7; } GfxIntegratedCopyDisplayInfo ( Engine, &DisplayPathList[PrimaryDisplayPathId], (PrimaryDisplayPathId != SecondaryDisplayPathId) ? &DisplayPathList[SecondaryDisplayPathId] : NULL, Gfx ); if (Engine->Type.Ddi.DdiData.ConnectorType == ConnectorTypeSingleLinkDviI) { LibAmdMemCopy (&DisplayPathList[6], &DisplayPathList[PrimaryDisplayPathId], sizeof (EXT_DISPLAY_PATH), GnbLibGetHeader (Gfx)); DisplayPathList[6].usDeviceACPIEnum = 0x100; DisplayPathList[6].usDeviceTag = ATOM_DEVICE_CRT1_SUPPORT; } Status = AGESA_SUCCESS; } else { IDS_HDT_CONSOLE (GFX_MISC, " ERROR!!! Map DDI lanes %d - %d to display path failed\n", Engine->EngineData.StartLane, Engine->EngineData.EndLane ); PutEventLog ( AGESA_ERROR, GNB_EVENT_INVALID_DDI_LINK_CONFIGURATION, Engine->EngineData.StartLane, Engine->EngineData.EndLane, 0, 0, GnbLibGetHeader (Gfx) ); Status = AGESA_ERROR; } return Status; }
/** * This function initializes the heap for each CPU core. * * Check for already initialized. If not, determine offset of local heap in CAS and * setup initial heap markers and bookkeeping status. Also create an initial event log. * * @param[in] StdHeader Handle of Header for calling lib functions and services. * * @retval AGESA_SUCCESS This core's heap is initialized * @retval AGESA_FATAL This core's heap cannot be initialized due to any reasons below: * - current processor family cannot be identified. * */ AGESA_STATUS HeapManagerInit ( IN AMD_CONFIG_PARAMS *StdHeader ) { // First Time Initialization // Note: First 16 bytes of buffer is reserved for Heap Manager use UINT16 HeapAlreadyInitSizeDword; UINT32 HeapAlreadyRead; UINT8 L2LineSize; UINT8 *HeapBufferPtr; UINT8 *HeapInitPtr; UINT32 *HeapDataPtr; UINT64 MsrData; UINT64 MsrMask; UINT8 Ignored; CPUID_DATA CpuId; BUFFER_NODE *FreeSpaceNode; CACHE_INFO *CacheInfoPtr; CPU_SPECIFIC_SERVICES *FamilySpecificServices; CPU_LOGICAL_ID CpuFamilyRevision; // Check whether this is a known processor family. GetLogicalIdOfCurrentCore (&CpuFamilyRevision, StdHeader); if ((CpuFamilyRevision.Family == 0) && (CpuFamilyRevision.Revision == 0)) { IDS_ERROR_TRAP; return AGESA_FATAL; } GetCpuServicesOfCurrentCore (&FamilySpecificServices, StdHeader); FamilySpecificServices->GetCacheInfo (FamilySpecificServices, (CONST VOID **) &CacheInfoPtr, &Ignored, StdHeader); HeapBufferPtr = (UINT8 *) StdHeader->HeapBasePtr; // Check whether the heap manager is already initialized LibAmdMsrRead (AMD_MTRR_VARIABLE_HEAP_MASK, &MsrData, StdHeader); if (!IsSecureS3 (StdHeader)) { if (MsrData == (CacheInfoPtr->VariableMtrrMask & AMD_HEAP_MTRR_MASK)) { LibAmdMsrRead (AMD_MTRR_VARIABLE_HEAP_BASE, &MsrData, StdHeader); if ((MsrData & CacheInfoPtr->HeapBaseMask) == ((UINT64) (UINTN) HeapBufferPtr & CacheInfoPtr->HeapBaseMask)) { if (((HEAP_MANAGER *) HeapBufferPtr)->Signature == HEAP_SIGNATURE_VALID) { // This is not a bug, there are multiple premem basic entry points, // and each will call heap init to make sure create struct will succeed. // If that is later deemed a problem, there needs to be a reasonable test // for the calling code to make to determine if it needs to init heap or not. // In the mean time, add this to the event log PutEventLog (AGESA_SUCCESS, CPU_ERROR_HEAP_IS_ALREADY_INITIALIZED, 0, 0, 0, 0, StdHeader); return AGESA_SUCCESS; } } } // Set variable MTRR base and mask MsrData = ((UINT64) (UINTN) HeapBufferPtr & CacheInfoPtr->HeapBaseMask); MsrMask = CacheInfoPtr->VariableMtrrHeapMask & AMD_HEAP_MTRR_MASK; MsrData |= 0x06; LibAmdMsrWrite (AMD_MTRR_VARIABLE_HEAP_BASE, &MsrData, StdHeader); LibAmdMsrWrite (AMD_MTRR_VARIABLE_HEAP_MASK, &MsrMask, StdHeader); // Set top of memory to a temp value LibAmdMsrRead (TOP_MEM, &MsrData, StdHeader); if (AMD_TEMP_TOM > MsrData) { MsrData = (UINT64) (AMD_TEMP_TOM); LibAmdMsrWrite (TOP_MEM, &MsrData, StdHeader); } } // Enable variable MTTRs LibAmdMsrRead (SYS_CFG, &MsrData, StdHeader); MsrData |= AMD_VAR_MTRR_ENABLE_BIT; LibAmdMsrWrite (SYS_CFG, &MsrData, StdHeader); // Initialize Heap Space // BIOS may store to a line only after it has been allocated by a load LibAmdCpuidRead (AMD_CPUID_L2L3Cache_L2TLB, &CpuId, StdHeader); L2LineSize = (UINT8) (CpuId.ECX_Reg); HeapInitPtr = HeapBufferPtr ; for (HeapAlreadyRead = 0; HeapAlreadyRead < AMD_HEAP_SIZE_PER_CORE; (HeapAlreadyRead = HeapAlreadyRead + L2LineSize)) { Ignored = *HeapInitPtr; HeapInitPtr += L2LineSize; } HeapDataPtr = (UINT32 *) HeapBufferPtr; for (HeapAlreadyInitSizeDword = 0; HeapAlreadyInitSizeDword < AMD_HEAP_SIZE_DWORD_PER_CORE; HeapAlreadyInitSizeDword++) { *HeapDataPtr = 0; HeapDataPtr++; } // Note: We are reserving the first 16 bytes for Heap Manager use // UsedSize indicates the size of heap spaced is used for HEAP_MANAGER, BUFFER_NODE, // Pad for 16-byte alignment, buffer data, and IDS SENTINEL. // FirstActiveBufferOffset is initalized as invalid heap offset, AMD_HEAP_INVALID_HEAP_OFFSET. // FirstFreeSpaceOffset is initalized as the byte right after HEAP_MANAGER header. // Then we set Signature of HEAP_MANAGER header as valid, HEAP_SIGNATURE_VALID. ((HEAP_MANAGER*) HeapBufferPtr)->UsedSize = sizeof (HEAP_MANAGER); ((HEAP_MANAGER*) HeapBufferPtr)->FirstActiveBufferOffset = AMD_HEAP_INVALID_HEAP_OFFSET; ((HEAP_MANAGER*) HeapBufferPtr)->FirstFreeSpaceOffset = sizeof (HEAP_MANAGER); ((HEAP_MANAGER*) HeapBufferPtr)->Signature = HEAP_SIGNATURE_VALID; // Create free space link FreeSpaceNode = (BUFFER_NODE *) (HeapBufferPtr + sizeof (HEAP_MANAGER)); FreeSpaceNode->BufferSize = AMD_HEAP_SIZE_PER_CORE - sizeof (HEAP_MANAGER) - sizeof (BUFFER_NODE); FreeSpaceNode->OffsetOfNextNode = AMD_HEAP_INVALID_HEAP_OFFSET; StdHeader->HeapStatus = HEAP_LOCAL_CACHE; EventLogInitialization (StdHeader); return AGESA_SUCCESS; }
/** * Performs core leveling for the system. * * This function implements the AMD_CPU_EARLY_PARAMS.CoreLevelingMode parameter. * The possible modes are: * -0 CORE_LEVEL_LOWEST Level to lowest common denominator * -1 CORE_LEVEL_TWO Level to 2 cores * -2 CORE_LEVEL_POWER_OF_TWO Level to 1,2,4 or 8 * -3 CORE_LEVEL_NONE Do no leveling * -4 CORE_LEVEL_COMPUTE_UNIT Level cores to one core per compute unit * * @param[in] EntryPoint Timepoint designator. * @param[in] PlatformConfig Contains the leveling mode parameter * @param[in] StdHeader Config handle for library and services * * @return The most severe status of any family specific service. * */ AGESA_STATUS CoreLevelingAtEarly ( IN UINT64 EntryPoint, IN PLATFORM_CONFIGURATION *PlatformConfig, IN AMD_CONFIG_PARAMS *StdHeader ) { UINT32 CoreNumPerComputeUnit; UINT32 MinNumOfComputeUnit; UINT32 EnabledComputeUnit; UINT32 Socket; UINT32 Module; UINT32 NumberOfSockets; UINT32 NumberOfModules; UINT32 MinCoreCountOnNode; UINT32 MaxCoreCountOnNode; UINT32 LowCore; UINT32 HighCore; UINT32 LeveledCores; UINT32 RequestedCores; UINT32 TotalEnabledCoresOnNode; BOOLEAN RegUpdated; AP_MAIL_INFO ApMailboxInfo; CORE_LEVELING_TYPE CoreLevelMode; CPU_CORE_LEVELING_FAMILY_SERVICES *FamilySpecificServices; WARM_RESET_REQUEST Request; MaxCoreCountOnNode = 0; MinCoreCountOnNode = 0xFFFFFFFF; LeveledCores = 0; CoreNumPerComputeUnit = 1; MinNumOfComputeUnit = 0xFF; ASSERT (PlatformConfig->CoreLevelingMode < CoreLevelModeMax); // Get OEM IO core level mode CoreLevelMode = (CORE_LEVELING_TYPE) PlatformConfig->CoreLevelingMode; // Get socket count NumberOfSockets = GetPlatformNumberOfSockets (); GetApMailbox (&ApMailboxInfo.Info, StdHeader); NumberOfModules = ApMailboxInfo.Fields.ModuleType + 1; // Collect cpu core info for (Socket = 0; Socket < NumberOfSockets; Socket++) { if (IsProcessorPresent (Socket, StdHeader)) { for (Module = 0; Module < NumberOfModules; Module++) { if (GetGivenModuleCoreRange (Socket, Module, &LowCore, &HighCore, StdHeader)) { // Get the highest and lowest core count in all nodes TotalEnabledCoresOnNode = HighCore - LowCore + 1; if (TotalEnabledCoresOnNode < MinCoreCountOnNode) { MinCoreCountOnNode = TotalEnabledCoresOnNode; } if (TotalEnabledCoresOnNode > MaxCoreCountOnNode) { MaxCoreCountOnNode = TotalEnabledCoresOnNode; } EnabledComputeUnit = TotalEnabledCoresOnNode; switch (GetComputeUnitMapping (StdHeader)) { case AllCoresMapping: // All cores are in their own compute unit. break; case EvenCoresMapping: // Cores are paired in compute units. CoreNumPerComputeUnit = 2; EnabledComputeUnit = (TotalEnabledCoresOnNode / 2); break; default: ASSERT (FALSE); } // Get minimum of compute unit. This will either be the minimum number of cores (AllCoresMapping), // or less (EvenCoresMapping). if (EnabledComputeUnit < MinNumOfComputeUnit) { MinNumOfComputeUnit = EnabledComputeUnit; } } } } } // Get LeveledCores switch (CoreLevelMode) { case CORE_LEVEL_LOWEST: if (MinCoreCountOnNode == MaxCoreCountOnNode) { return (AGESA_SUCCESS); } LeveledCores = (MinCoreCountOnNode / CoreNumPerComputeUnit) * CoreNumPerComputeUnit; break; case CORE_LEVEL_TWO: LeveledCores = 2 / NumberOfModules; if (LeveledCores != 0) { LeveledCores = (LeveledCores <= MinCoreCountOnNode) ? LeveledCores : MinCoreCountOnNode; } else { return (AGESA_WARNING); } if ((LeveledCores * NumberOfModules) != 2) { PutEventLog ( AGESA_WARNING, CPU_WARNING_ADJUSTED_LEVELING_MODE, 2, (LeveledCores * NumberOfModules), 0, 0, StdHeader ); } break; case CORE_LEVEL_POWER_OF_TWO: // Level to power of 2 (1, 2, 4, 8...) LeveledCores = 1; while (MinCoreCountOnNode >= (LeveledCores * 2)) { LeveledCores = LeveledCores * 2; } break; case CORE_LEVEL_COMPUTE_UNIT: // Level cores to one core per compute unit, with additional reduction to level // all processors to match the processor with the minimum number of cores. if (CoreNumPerComputeUnit == 1) { // If there is one core per compute unit, this is the same as CORE_LEVEL_LOWEST. if (MinCoreCountOnNode == MaxCoreCountOnNode) { return (AGESA_SUCCESS); } LeveledCores = MinCoreCountOnNode; } else { // If there are more than one core per compute unit, level to the number of compute units. LeveledCores = MinNumOfComputeUnit; } break; case CORE_LEVEL_ONE: LeveledCores = 1; if (NumberOfModules > 1) { PutEventLog ( AGESA_WARNING, CPU_WARNING_ADJUSTED_LEVELING_MODE, 1, NumberOfModules, 0, 0, StdHeader ); } break; case CORE_LEVEL_THREE: case CORE_LEVEL_FOUR: case CORE_LEVEL_FIVE: case CORE_LEVEL_SIX: case CORE_LEVEL_SEVEN: case CORE_LEVEL_EIGHT: case CORE_LEVEL_NINE: case CORE_LEVEL_TEN: case CORE_LEVEL_ELEVEN: case CORE_LEVEL_TWELVE: case CORE_LEVEL_THIRTEEN: case CORE_LEVEL_FOURTEEN: case CORE_LEVEL_FIFTEEN: // MCM processors can not have an odd number of cores. For an odd CORE_LEVEL_N, MCM processors will be // leveled as though CORE_LEVEL_N+1 was chosen. // Processors with compute units disable all cores in an entire compute unit at a time, or on an MCM processor, // two compute units at a time. For example, on an SCM processor with two cores per compute unit, the effective // explicit levels are CORE_LEVEL_ONE, CORE_LEVEL_TWO, CORE_LEVEL_FOUR, CORE_LEVEL_SIX, and // CORE_LEVEL_EIGHT. The same example for an MCM processor with two cores per compute unit has effective // explicit levels of CORE_LEVEL_TWO, CORE_LEVEL_FOUR, CORE_LEVEL_EIGHT, and CORE_LEVEL_TWELVE. RequestedCores = CoreLevelMode - CORE_LEVEL_THREE + 3; LeveledCores = (RequestedCores + NumberOfModules - 1) / NumberOfModules; LeveledCores = (LeveledCores / CoreNumPerComputeUnit) * CoreNumPerComputeUnit; LeveledCores = (LeveledCores <= MinCoreCountOnNode) ? LeveledCores : MinCoreCountOnNode; if (LeveledCores != 1) { LeveledCores = (LeveledCores / CoreNumPerComputeUnit) * CoreNumPerComputeUnit; } if ((LeveledCores * NumberOfModules * CoreNumPerComputeUnit) != RequestedCores) { PutEventLog ( AGESA_WARNING, CPU_WARNING_ADJUSTED_LEVELING_MODE, RequestedCores, (LeveledCores * NumberOfModules * CoreNumPerComputeUnit), 0, 0, StdHeader ); } break; default: ASSERT (FALSE); } // Set down core register for (Socket = 0; Socket < NumberOfSockets; Socket++) { if (IsProcessorPresent (Socket, StdHeader)) { GetFeatureServicesOfSocket (&CoreLevelingFamilyServiceTable, Socket, &FamilySpecificServices, StdHeader); if (FamilySpecificServices != NULL) { for (Module = 0; Module < NumberOfModules; Module++) { RegUpdated = FamilySpecificServices->SetDownCoreRegister (FamilySpecificServices, &Socket, &Module, &LeveledCores, CoreLevelMode, StdHeader); // If the down core register is updated, trigger a warm reset. if (RegUpdated) { GetWarmResetFlag (StdHeader, &Request); Request.RequestBit = TRUE; Request.StateBits = Request.PostStage - 1; SetWarmResetFlag (StdHeader, &Request); } } } } } return (AGESA_SUCCESS); }
/** * * A sub-function which extracts Slow mode, Address timing and Output driver compensation value * from a input table and store those value to a specific address. * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * @param[in] *EntryOfTables - Pointer to MEM_PSC_TABLE_BLOCK * * @return TRUE - Table values can be extracted per dimm population and ranks type. * @return FALSE - Table values cannot be extracted per dimm population and ranks type. * */ BOOLEAN MemPGetSAO ( IN OUT MEM_NB_BLOCK *NBPtr, IN MEM_PSC_TABLE_BLOCK *EntryOfTables ) { UINT8 i; UINT8 MaxDimmPerCh; UINT8 MaxDimmSlotPerCh; UINT8 NOD; UINT8 TableSize; UINT32 CurDDRrate; UINT8 DDR3Voltage; UINT16 RankTypeOfPopulatedDimm; UINT16 RankTypeInTable; UINT8 PsoMaskSAO; DIMM_TYPE DimmType; UINT8 *MotherboardLayerPtr; UINT8 MotherboardLayer; UINT8 MotherboardPower; CPU_LOGICAL_ID LogicalCpuid; UINT8 PackageType; PSCFG_SAO_ENTRY *TblPtr; CH_DEF_STRUCT *CurrentChannel; CurrentChannel = NBPtr->ChannelPtr; TblPtr = NULL; MotherboardLayer = 0; MotherboardPower = 0; TableSize = 0; PackageType = 0; LogicalCpuid.Family = AMD_FAMILY_UNKNOWN; MaxDimmPerCh = GetMaxDimmsPerChannel (NBPtr->RefPtr->PlatformMemoryConfiguration, NBPtr->MCTPtr->SocketId, CurrentChannel->ChannelID); MaxDimmSlotPerCh = MaxDimmPerCh - GetMaxSolderedDownDimmsPerChannel (NBPtr->RefPtr->PlatformMemoryConfiguration, NBPtr->MCTPtr->SocketId, CurrentChannel->ChannelID); if (CurrentChannel->RegDimmPresent != 0) { DimmType = RDIMM_TYPE; } else if (CurrentChannel->SODimmPresent != 0) { DimmType = SODIMM_TYPE; } else if (CurrentChannel->LrDimmPresent != 0) { DimmType = LRDIMM_TYPE; } else { DimmType = UDIMM_TYPE; } // Check if it is "SODIMM plus soldered-down DRAM" or "Soldered-down DRAM only" configuration, // DimmType is changed to 'SODWN_SODIMM_TYPE' if soldered-down DRAM exist if (MaxDimmSlotPerCh != MaxDimmPerCh) { // SODIMM plus soldered-down DRAM DimmType = SODWN_SODIMM_TYPE; } else if (FindPSOverrideEntry (NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_SOLDERED_DOWN_SODIMM_TYPE, NBPtr->MCTPtr->SocketId, NBPtr->ChannelPtr->ChannelID, 0, NULL, NULL) != NULL) { // Soldered-down DRAM only DimmType = SODWN_SODIMM_TYPE; MaxDimmSlotPerCh = 0; } if (DimmType != SODWN_SODIMM_TYPE || MaxDimmSlotPerCh != 0) { NBPtr->RefPtr->EnableDllPDBypassMode = FALSE; } NOD = (UINT8) (MaxDimmSlotPerCh != 0) ? (1 << (MaxDimmSlotPerCh - 1)) : _DIMM_NONE; if (NBPtr->IsSupported[SelectMotherboardLayer]) { MotherboardLayerPtr = FindPSOverrideEntry (NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_MOTHER_BOARD_LAYERS, 0, 0, 0, NULL, NULL); if (MotherboardLayerPtr != NULL) { MotherboardLayer = (1 << *MotherboardLayerPtr); } } if (NBPtr->IsSupported[SelectMotherboardPower]) { if (NBPtr->RefPtr->EnableDllPDBypassMode) { MotherboardPower = 1; } else { MotherboardPower = 2; } } i = 0; // Obtain table pointer, table size, Logical Cpuid and PSC type according to Dimm, NB and package type. while (EntryOfTables->TblEntryOfSAO[i] != NULL) { if (((EntryOfTables->TblEntryOfSAO[i])->Header.DimmType & DimmType) != 0) { if (((EntryOfTables->TblEntryOfSAO[i])->Header.NumOfDimm & NOD) != 0) { if (!NBPtr->IsSupported[SelectMotherboardLayer] || ((EntryOfTables->TblEntryOfSAO[i])->Header.MotherboardLayer & MotherboardLayer) != 0) { if (!NBPtr->IsSupported[SelectMotherboardPower] || ((EntryOfTables->TblEntryOfSAO[i])->Header.MotherboardPower & MotherboardPower) != 0) { // // Determine if this is the expected NB Type // LogicalCpuid = (EntryOfTables->TblEntryOfSAO[i])->Header.LogicalCpuid; PackageType = (EntryOfTables->TblEntryOfSAO[i])->Header.PackageType; if (MemPIsIdSupported (NBPtr, LogicalCpuid, PackageType)) { TblPtr = (PSCFG_SAO_ENTRY *) ((EntryOfTables->TblEntryOfSAO[i])->TBLPtr); TableSize = (EntryOfTables->TblEntryOfSAO[i])->TableSize; break; } } } } } i++; } // Check whether no table entry is found. if (EntryOfTables->TblEntryOfSAO[i] == NULL) { IDS_HDT_CONSOLE (MEM_FLOW, "\nNo SlowAccMode, AddrTmg and ODCCtrl table\n"); return FALSE; } CurDDRrate = (UINT32) (1 << (CurrentChannel->DCTPtr->Timings.Speed / 66)); DDR3Voltage = (UINT8) (1 << CONVERT_VDDIO_TO_ENCODED (NBPtr->RefPtr->DDR3Voltage)); RankTypeOfPopulatedDimm = MemPGetPsRankType (CurrentChannel); for (i = 0; i < TableSize; i++) { MemPConstructRankTypeMap ((UINT16) TblPtr->Dimm0, (UINT16) TblPtr->Dimm1, (UINT16) TblPtr->Dimm2, &RankTypeInTable); if ((TblPtr->DimmPerCh & NOD) != 0) { if ((TblPtr->DDRrate & CurDDRrate) != 0) { if ((TblPtr->VDDIO & DDR3Voltage) != 0) { if ((RankTypeInTable & RankTypeOfPopulatedDimm) == RankTypeOfPopulatedDimm) { CurrentChannel->DctAddrTmg = TblPtr->AddTmgCtl; CurrentChannel->DctOdcCtl = TblPtr->ODC; CurrentChannel->SlowMode = (TblPtr->SlowMode == 1) ? TRUE : FALSE; NBPtr->PsPtr->ProcessorOnDieTerminationOff = (TblPtr->POdtOff == 1) ? TRUE : FALSE; break; } } } } TblPtr++; } // // If there is no entry, check if overriding values (SlowAccMode, AddrTmg and ODCCtrl) existed. If not, show no entry found. // PsoMaskSAO = (UINT8) MemPProceedTblDrvOverride (NBPtr, NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_TBLDRV_SLOWACCMODE); PsoMaskSAO &= (UINT8) MemPProceedTblDrvOverride (NBPtr, NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_TBLDRV_ODCCTRL); PsoMaskSAO &= (UINT8) MemPProceedTblDrvOverride (NBPtr, NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_TBLDRV_ADDRTMG); if ((PsoMaskSAO == 0) && (i == TableSize)) { IDS_HDT_CONSOLE (MEM_FLOW, "\nNo SlowAccMode, AddrTmg and ODCCtrl entries\n"); } else { return TRUE; } if (NBPtr->SharedPtr->VoltageMap != VDDIO_DETERMINED) { return TRUE; } PutEventLog (AGESA_ERROR, MEM_ERROR_SAO_NOT_FOUND, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_ERROR, NBPtr->MCTPtr); if (!NBPtr->MemPtr->ErrorHandling (NBPtr->MCTPtr, NBPtr->Dct, EXCLUDE_ALL_CHIPSEL, &NBPtr->MemPtr->StdHeader)) { ASSERT (FALSE); } return FALSE; }
AGESA_STATUS AmdIdentifyDimm ( IN OUT AMD_IDENTIFY_DIMM *AmdDimmIdentify ) { UINT8 i; AGESA_STATUS RetVal; MEM_MAIN_DATA_BLOCK mmData; // Main Data block MEM_NB_BLOCK *NBPtr; MEM_DATA_STRUCT MemData; LOCATE_HEAP_PTR LocHeap; ALLOCATE_HEAP_PARAMS AllocHeapParams; UINT8 Node; UINT8 Dct; UINT8 Die; UINT8 DieCount; LibAmdMemCopy (&(MemData.StdHeader), &(AmdDimmIdentify->StdHeader), sizeof (AMD_CONFIG_PARAMS), &(AmdDimmIdentify->StdHeader)); mmData.MemPtr = &MemData; RetVal = MemSocketScan (&mmData); if (RetVal == AGESA_FATAL) { return RetVal; } DieCount = mmData.DieCount; // Search for AMD_MEM_AUTO_HANDLE on the heap first. // Only apply for space on the heap if cannot find AMD_MEM_AUTO_HANDLE on the heap. LocHeap.BufferHandle = AMD_MEM_AUTO_HANDLE; if (HeapLocateBuffer (&LocHeap, &AmdDimmIdentify->StdHeader) == AGESA_SUCCESS) { // NB block has already been constructed by main block. // No need to construct it here. NBPtr = (MEM_NB_BLOCK *)LocHeap.BufferPtr; mmData.NBPtr = NBPtr; } else { AllocHeapParams.RequestedBufferSize = (DieCount * (sizeof (MEM_NB_BLOCK))); AllocHeapParams.BufferHandle = AMD_MEM_AUTO_HANDLE; AllocHeapParams.Persist = HEAP_SYSTEM_MEM; if (HeapAllocateBuffer (&AllocHeapParams, &AmdDimmIdentify->StdHeader) != AGESA_SUCCESS) { PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_IDENTIFY_DIMM_MEM_NB_BLOCK, 0, 0, 0, 0, &AmdDimmIdentify->StdHeader); ASSERT(FALSE); // Could not allocate heap space for NB block for Identify DIMM return AGESA_FATAL; } NBPtr = (MEM_NB_BLOCK *)AllocHeapParams.BufferPtr; mmData.NBPtr = NBPtr; // Construct each die. for (Die = 0; Die < DieCount; Die ++) { i = 0; while (memNBInstalled[i].MemIdentifyDimmConstruct != 0) { if (memNBInstalled[i].MemIdentifyDimmConstruct (&NBPtr[Die], &MemData, Die)) { break; } i++; }; if (memNBInstalled[i].MemIdentifyDimmConstruct == 0) { PutEventLog (AGESA_FATAL, MEM_ERROR_NO_CONSTRUCTOR_FOR_IDENTIFY_DIMM, Die, 0, 0, 0, &AmdDimmIdentify->StdHeader); ASSERT(FALSE); // No Identify DIMM constructor found return AGESA_FATAL; } } } i = 0; while (memNBInstalled[i].MemIdentifyDimmConstruct != 0) { if ((RetVal = memNBInstalled[i].MemTransSysAddrToCs (AmdDimmIdentify, &mmData)) == AGESA_SUCCESS) { // Translate Node, DCT and Chip select number to Socket, Channel and Dimm number. Node = AmdDimmIdentify->SocketId; Dct = AmdDimmIdentify->MemChannelId; AmdDimmIdentify->SocketId = MemData.DiesPerSystem[Node].SocketId; AmdDimmIdentify->MemChannelId = NBPtr[Node].GetSocketRelativeChannel (&NBPtr[Node], Dct, 0); AmdDimmIdentify->DimmId = AmdDimmIdentify->ChipSelect / 2; AmdDimmIdentify->ChipSelect %= 2; break; } i++; }; return RetVal; }
/** * * * This function defines the DDR3 initialization flow * when only DDR3 DIMMs are present in the system * * @param[in,out] *MemMainPtr - Pointer to the MEM_MAIN_DATA_BLOCK * * @return AGESA_STATUS * - AGESA_FATAL * - AGESA_CRITICAL * - AGESA_SUCCESS */ AGESA_STATUS MemMD3FlowKV ( IN OUT MEM_MAIN_DATA_BLOCK *MemMainPtr ) { UINT8 Dct; MEM_NB_BLOCK *NBPtr; MEM_DATA_STRUCT *MemPtr; ID_INFO CallOutIdInfo; INT8 MemPstate; UINT8 LowestMemPstate; UINT8 PmuImage; BOOLEAN ErrorRecovery; BOOLEAN IgnoreErr; NBPtr = &MemMainPtr->NBPtr[BSP_DIE]; MemPtr = MemMainPtr->MemPtr; ErrorRecovery = TRUE; IgnoreErr = FALSE; IDS_HDT_CONSOLE (MEM_FLOW, "DDR3 Mode\n"); //---------------------------------------------------------------- // Defines DDR3 registers //---------------------------------------------------------------- MemNInitNBRegTableD3KV (NBPtr); //---------------------------------------------------------------- // Clock and power gate unsued channels //---------------------------------------------------------------- MemNClockAndPowerGateUnusedDctKV (NBPtr); //---------------------------------------------------------------- // Set DDR3 mode //---------------------------------------------------------------- MemNSetDdrModeD3KV (NBPtr); //---------------------------------------------------------------- // Enable PHY Calibration //---------------------------------------------------------------- for (Dct = 0; Dct < NBPtr->DctCount; Dct++) { MemNSwitchDCTNb (NBPtr, Dct); if (NBPtr->DCTPtr->Timings.DctDimmValid != 0) { MemNEnablePhyCalibrationKV (NBPtr); } } //---------------------------------------------------------------- // Low voltage DDR3 //---------------------------------------------------------------- // Levelize DDR3 voltage based on socket, as each socket has its own voltage for dimms. AGESA_TESTPOINT (TpProcMemLvDdr3, &(MemMainPtr->MemPtr->StdHeader)); if (!MemFeatMain.LvDDR3 (MemMainPtr)) { return AGESA_FATAL; } //---------------------------------------------------------------- // Find the maximum speed that all DCTs are capable running at //---------------------------------------------------------------- if (!MemTSPDGetTargetSpeed3 (NBPtr->TechPtr)) { return AGESA_FATAL; } //---------------------------------------------------------------- // Adjust memClkFreq based on MaxDdrRate //---------------------------------------------------------------- MemNAdjustDdrSpeed3Unb (NBPtr); //------------------------------------------------ // Finalize target frequency //------------------------------------------------ if (!MemMLvDdr3PerformanceEnhFinalize (MemMainPtr)) { return AGESA_FATAL; } //---------------------------------------------------------------- // Program DCT address map //---------------------------------------------------------------- IDS_HDT_CONSOLE (MEM_FLOW, "DCT addr map\n"); for (Dct = 0; Dct < NBPtr->DctCount; Dct++) { IDS_HDT_CONSOLE (MEM_STATUS, "\tDct %d\n", Dct); MemNSwitchDCTNb (NBPtr, Dct); if (NBPtr->DCTPtr->Timings.DctDimmValid == 0) { MemNDisableDctKV (NBPtr); } else { IDS_HDT_CONSOLE (MEM_FLOW, "\t\tCS Addr Map\n"); if (MemTSPDSetBanks3 (NBPtr->TechPtr)) { if (MemNStitchMemoryNb (NBPtr)) { if (NBPtr->DCTPtr->Timings.CsEnabled == 0) { MemNDisableDctKV (NBPtr); } else { IDS_HDT_CONSOLE (MEM_FLOW, "\t\tAuto Cfg\n"); MemNAutoConfigKV (NBPtr); IDS_HDT_CONSOLE (MEM_FLOW, "\t\tTraining Cfg\n"); MemNConfigureDctForTrainingD3KV (NBPtr); } } } } } IDS_OPTION_HOOK (IDS_BEFORE_DRAM_INIT, NBPtr, &(MemMainPtr->MemPtr->StdHeader)); //---------------------------------------------------------------- // Init Phy mode //---------------------------------------------------------------- for (Dct = 0; Dct < NBPtr->DctCount; Dct++) { MemNSwitchDCTNb (NBPtr, Dct); if (NBPtr->DCTPtr->Timings.DctMemSize != 0) { IDS_HDT_CONSOLE (MEM_STATUS, "\tDct %d\n", Dct); // 1. Program D18F2x9C_x0002_0099_dct[3:0][PmuReset,PmuStall] = 1,1. // 2. Program D18F2x9C_x0002_000E_dct[3:0][PhyDisable]=0. Tester_mode=0. MemNPmuResetNb (NBPtr); // 3. According to the type of DRAM attached, program D18F2x9C_x00FFF04A_dct[3:0][MajorMode], // D18F2x9C_x0002_000E_dct[3:0][G5_Mode], and D18F2x9C_x0002_0098_dct[3:0][CalG5D3]. // D18F2x9C_x0[3,1:0][F,7:0]1_[F,B:0]04A_dct[3:0]. MemNSetPhyDdrModeKV (NBPtr, DRAM_TYPE_DDR3_KV); // Work-around for CPU A0/A1, PhyReceiverPowerMode if ((NBPtr->MCTPtr->LogicalCpuid.Revision & AMD_F15_KV_A0) != 0) { MemNPrePhyReceiverLowPowerKV (NBPtr); } } } //---------------------------------------------------------------- // Temporary buffer for DRAM CAD Bus Configuration //---------------------------------------------------------------- if (!MemNInitDramCadBusConfigKV (NBPtr)) { return AGESA_FATAL; } //---------------------------------------------------------------- // Program Mem Pstate dependent registers //---------------------------------------------------------------- IEM_SKIP_CODE (IEM_EARLY_DCT_CONFIG) { // PMU required M1 settings regardless Memory Pstate disabled. LowestMemPstate = 1; } for (MemPstate = LowestMemPstate; MemPstate >= 0; MemPstate--) { // When memory pstate is enabled, this loop will goes through M1 first then M0 // Otherwise, this loop only goes through M0. MemNSwitchMemPstateKV (NBPtr, MemPstate); // By default, start up speed is DDR667 for M1 // For M0, we need to set speed to highest possible frequency if (MemPstate == 0) { for (Dct = 0; Dct < NBPtr->DctCount; Dct++) { MemNSwitchDCTNb (NBPtr, Dct); NBPtr->DCTPtr->Timings.Speed = NBPtr->DCTPtr->Timings.TargetSpeed; } } IDS_HDT_CONSOLE (MEM_FLOW, "MemClkFreq = %d MHz\n", NBPtr->DCTPtr->Timings.Speed); // Program SPD timings and frequency dependent settings for (Dct = 0; Dct < NBPtr->DctCount; Dct++) { IDS_HDT_CONSOLE (MEM_STATUS, "\tDct %d\n", Dct); MemNSwitchDCTNb (NBPtr, Dct); if (NBPtr->DCTPtr->Timings.DctMemSize != 0) { IDS_HDT_CONSOLE (MEM_FLOW, "\t\tSPD timings\n"); if (MemTAutoCycTiming3 (NBPtr->TechPtr)) { IDS_HDT_CONSOLE (MEM_FLOW, "\t\tMemPs Reg\n"); MemNProgramMemPstateRegD3KV (NBPtr, MemPstate); IDS_HDT_CONSOLE (MEM_FLOW, "\t\tPlatform Spec\n"); if (MemNPlatformSpecKV (NBPtr)) { MemNSetBitFieldNb (NBPtr, BFMemClkDis, 0); // 7. Program default CAD bus values. // 8. Program default data bus values. IDS_HDT_CONSOLE (MEM_FLOW, "\t\tCAD Data Bus Cfg\n"); MemNProgramCadDataBusD3KV (NBPtr); IDS_HDT_CONSOLE (MEM_FLOW, "\t\tPredriver\n"); MemNPredriverInitKV (NBPtr); IDS_HDT_CONSOLE (MEM_FLOW, "\t\tMode Register initialization\n"); MemNModeRegisterInitializationKV (NBPtr); IDS_HDT_CONSOLE (MEM_FLOW, "\t\tDRAM PHY Power Savings\n"); MemNDramPhyPowerSavingsKV (NBPtr); } } } } MemFInitTableDrive (NBPtr, MTBeforeDInit); } //---------------------------------------------------------------- // Program Phy //---------------------------------------------------------------- for (Dct = 0; Dct < NBPtr->DctCount; Dct++) { MemNSwitchDCTNb (NBPtr, Dct); if (NBPtr->DCTPtr->Timings.DctMemSize != 0) { IDS_HDT_CONSOLE (MEM_STATUS, "\tDct %d\n", Dct); // 4. Program general phy static configuration. See 2.10.7.3.1. MemNPhyGenCfgKV (NBPtr); // 5. Phy Voltage Level Programming. See 2.10.7.3.2. MemNPhyVoltageLevelKV (NBPtr); // 6. Program DRAM channel frequency. See 2.10.7.3.3. MemNProgramChannelFreqKV (NBPtr, DRAM_TYPE_DDR3_KV); // Step 7 and 8 are done in MemPs dependent section // 9. Program FIFO pointer init values. See 2.10.7.3.6. MemNPhyFifoConfigD3KV (NBPtr); } } IEM_INSERT_CODE (IEM_EARLY_DEVICE_INIT, IemEarlyDeviceInitD3KV, (NBPtr)); //------------------------------------------------ // Callout before Dram Init //------------------------------------------------ AGESA_TESTPOINT (TpProcMemBeforeAgesaHookBeforeDramInit, &(MemMainPtr->MemPtr->StdHeader)); CallOutIdInfo.IdField.SocketId = NBPtr->MCTPtr->SocketId; CallOutIdInfo.IdField.ModuleId = NBPtr->MCTPtr->DieId; //------------------------------------------------------------------------ // Callout to Platform BIOS to set the VDDP/VDDR voltage based upon Bit 21 // ProductIdentification Register (Dev18Fun3x1FC) //------------------------------------------------------------------------ if (MemNGetBitFieldNb (NBPtr, BFVddpVddrLowVoltSupp)) { MemMainPtr->MemPtr->ParameterListPtr->VddpVddrVoltage.Voltage = VOLT0_95; MemMainPtr->MemPtr->ParameterListPtr->VddpVddrVoltage.IsValid = TRUE; NBPtr->DCTPtr->Timings.TargetSpeed = DDR1600_FREQUENCY; } else { MemMainPtr->MemPtr->ParameterListPtr->VddpVddrVoltage.IsValid = FALSE; } IDS_HDT_CONSOLE (MEM_FLOW, "\nCalling out to Platform BIOS on Socket %d, Module %d...\n", CallOutIdInfo.IdField.SocketId, CallOutIdInfo.IdField.ModuleId); AgesaHookBeforeDramInit ((UINTN) CallOutIdInfo.IdInformation, MemMainPtr->MemPtr); NBPtr[BSP_DIE].FamilySpecificHook[AmpVoltageDisp] (&NBPtr[BSP_DIE], NULL); IDS_HDT_CONSOLE (MEM_FLOW, "\nVDDIO = 1.%dV\n", (NBPtr->RefPtr->DDR3Voltage == VOLT1_5) ? 5 : (NBPtr->RefPtr->DDR3Voltage == VOLT1_35) ? 35 : (NBPtr->RefPtr->DDR3Voltage == VOLT1_25) ? 25 : 999); AGESA_TESTPOINT (TpProcMemAfterAgesaHookBeforeDramInit, &(NBPtr->MemPtr->StdHeader)); //---------------------------------------------------------------------------- // Deassert MemResetL //---------------------------------------------------------------------------- for (Dct = 0; Dct < NBPtr->DctCount; Dct++) { MemNSwitchDCTNb (NBPtr, Dct); if (NBPtr->DCTPtr->Timings.DctMemSize != 0) { // Deassert Procedure: // MemResetL = 0 // Go to LP2 // Go to PS0 MemNSetBitFieldNb (NBPtr, BFMemResetL, 0); MemNSetBitFieldNb (NBPtr, RegPwrStateCmd, 4); MemNSetBitFieldNb (NBPtr, RegPwrStateCmd, 0); } } MemUWait10ns (20000, NBPtr->MemPtr); //---------------------------------------------------------------------------- // Program PMU SRAM Message Block, Initiate PMU based Dram init and training //---------------------------------------------------------------------------- for (PmuImage = 0; PmuImage < MemNNumberOfPmuFirmwareImageKV (NBPtr); ++PmuImage) { NBPtr->PmuFirmwareImage = PmuImage; NBPtr->FeatPtr->LoadPmuFirmware (NBPtr); for (Dct = 0; Dct < NBPtr->DctCount; Dct++) { MemNSwitchDCTNb (NBPtr, Dct); if (NBPtr->DCTPtr->Timings.DctMemSize != 0) { IDS_HDT_CONSOLE (MEM_STATUS, "Dct %d\n", Dct); IDS_HDT_CONSOLE (MEM_FLOW, "Initialize the PMU SRAM Message Block buffer\n"); if (MemNInitPmuSramMsgBlockKV (NBPtr) == FALSE) { IDS_HDT_CONSOLE (MEM_FLOW, "\tNot able to initialize the PMU SRAM Message Block buffer\n"); // Not able to initialize the PMU SRAM Message Block buffer. Log an event. PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_PMU_SRAM_MSG_BLOCK, 0, 0, 0, 0, &(MemMainPtr->MemPtr->StdHeader)); return AGESA_FATAL; } for (MemPstate = LowestMemPstate; MemPstate >= 0; MemPstate--) { // When memory pstate is enabled, this loop will goes through M1 first then M0 // Otherwise, this loop only goes through M0. MemNSwitchMemPstateKV (NBPtr, MemPstate); IDS_HDT_CONSOLE (MEM_FLOW, "\t\tPMU MemPs Reg\n"); MemNPopulatePmuSramTimingsD3KV (NBPtr); } MemNPopulatePmuSramConfigD3KV (NBPtr); MemNSetPmuSequenceControlKV (NBPtr); if (MemNWritePmuSramMsgBlockKV (NBPtr) == FALSE) { IDS_HDT_CONSOLE (MEM_FLOW, "\tNot able to load the PMU SRAM Message Block in to DMEM\n"); // Not able to load the PMU SRAM Message Block in to DMEM. Log an event. PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_LOCATE_FOR_PMU_SRAM_MSG_BLOCK, 0, 0, 0, 0, &(MemMainPtr->MemPtr->StdHeader)); return AGESA_FATAL; } // Query for the calibrate completion. MemNPendOnPhyCalibrateCompletionKV (NBPtr); // Set calibration rate. MemNStartPmuNb (NBPtr); } } for (Dct = 0; Dct < NBPtr->DctCount; Dct++) { MemNSwitchDCTNb (NBPtr, Dct); if (NBPtr->DCTPtr->Timings.DctMemSize != 0) { IDS_HDT_CONSOLE (MEM_STATUS, "\tDct %d\n", Dct); if (MemNPendOnPmuCompletionNb (NBPtr) == FALSE) { PutEventLog (AGESA_FATAL, MEM_ERROR_PMU_TRAINING, 0, 0, 0, 0, &(MemMainPtr->MemPtr->StdHeader)); AGESA_TESTPOINT (TpProcMemPmuFailed, &(MemMainPtr->MemPtr->StdHeader)); IDS_OPTION_HOOK (IDS_MEM_ERROR_RECOVERY, &ErrorRecovery, &(MemMainPtr->MemPtr->StdHeader)); if (ErrorRecovery) { IDS_HDT_CONSOLE (MEM_FLOW, "Chipselects that PMU failed training %x\n",MemNGetBitFieldNb (NBPtr, PmuTestFail)); NBPtr->DCTPtr->Timings.CsTrainFail = (UINT16) MemNGetBitFieldNb (NBPtr, PmuTestFail); NBPtr->MCTPtr->ChannelTrainFail |= (UINT32)1 << Dct; } else { IDS_OPTION_HOOK (IDS_MEM_IGNORE_ERROR, &IgnoreErr, &(MemMainPtr->MemPtr->StdHeader)); if (!(IgnoreErr)) { return AGESA_FATAL; } } } MemNRateOfPhyCalibrateKV (NBPtr); } } } //---------------------------------------------------------------- // De-allocate the PMU SRAM Message Block buffer. //---------------------------------------------------------------- IDS_HDT_CONSOLE (MEM_FLOW, "De-allocate PMU SRAM Message Block buffer\n"); if (MemNPostPmuSramMsgBlockKV (NBPtr) == FALSE) { IDS_HDT_CONSOLE (MEM_FLOW, "\tNot able to free the PMU SRAM Message Block buffer\n"); // Not able to free the PMU SRAM Message Block buffer. Log an event. PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_DEALLOCATE_FOR_PMU_SRAM_MSG_BLOCK, 0, 0, 0, 0, &(MemMainPtr->MemPtr->StdHeader)); return AGESA_FATAL; } //---------------------------------------------------------------- // De-allocate temporary buffer for DRAM CAD Bus Configuration //---------------------------------------------------------------- if (!MemNPostDramCadBusConfigKV (NBPtr)) { return AGESA_FATAL; } //---------------------------------------------------------------- // Disable chipselects that failed training //---------------------------------------------------------------- IDS_HDT_CONSOLE (MEM_FLOW, "\t\tDIMM Excludes\n"); MemNDimmExcludesKV (NBPtr); //---------------------------------------------------------------- // Synchronize Channels //---------------------------------------------------------------- MemNSyncChannelInitKV (NBPtr); //---------------------------------------------------------------- // Train MaxRdLatency //---------------------------------------------------------------- IEM_SKIP_CODE (IEM_LATE_DCT_CONFIG) { NBPtr->TechPtr->FindMaxDlyForMaxRdLat = MemTFindMaxRcvrEnDlyTrainedByPmuByte; NBPtr->TechPtr->ResetDCTWrPtr = MemNResetRcvFifoKV; MemTTrainMaxLatency (NBPtr->TechPtr); // The fourth loop will restore the Northbridge P-State control register // to the original value. for (NBPtr->NbFreqChgState = 1; NBPtr->NbFreqChgState <= 4; NBPtr->NbFreqChgState++) { if (!MemNChangeNbFrequencyWrapUnb (NBPtr, NBPtr->NbFreqChgState) || (NBPtr->NbFreqChgState == 4)) { break; } MemTTrainMaxLatency (NBPtr->TechPtr); } } //---------------------------------------------------------------- // Set MajorMode //---------------------------------------------------------------- for (Dct = 0; Dct < NBPtr->DctCount; Dct++) { MemNSwitchDCTNb (NBPtr, Dct); // Work-around for CPU A0/A1, PhyReceiverPowerMode if ((NBPtr->MCTPtr->LogicalCpuid.Revision & AMD_F15_KV_A0) != 0) { MemNPostPhyReceiverLowPowerKV (NBPtr); } } //---------------------------------------------------------------- // Configure DCT for normal operation //---------------------------------------------------------------- for (Dct = 0; Dct < NBPtr->DctCount; Dct++) { MemNSwitchDCTNb (NBPtr, Dct); if (NBPtr->DCTPtr->Timings.DctMemSize != 0) { IDS_HDT_CONSOLE (MEM_STATUS, "\tDct %d\n", Dct); IDS_HDT_CONSOLE (MEM_FLOW, "\t\tMission mode cfg\n"); MemNConfigureDctNormalD3KV (NBPtr); //---------------------------------------------------------------- // Program turnaround timings //---------------------------------------------------------------- for (MemPstate = LowestMemPstate; MemPstate >= 0; MemPstate--) { MemNSwitchMemPstateKV (NBPtr, MemPstate); MemNProgramTurnaroundTimingsD3KV (NBPtr); //---------------------------------------------------------------- // After Mem Pstate1 Partial Training Table values //---------------------------------------------------------------- MemFInitTableDrive (NBPtr, MTAfterMemPstate1PartialTrn); } } } IEM_INSERT_CODE (IEM_LATE_DCT_CONFIG, IemLateDctConfigD3KV, (NBPtr)); for (Dct = 0; Dct < NBPtr->DctCount; Dct++) { MemNSwitchDCTNb (NBPtr, Dct); if (NBPtr->DCTPtr->Timings.DctMemSize != 0) { IDS_HDT_CONSOLE (MEM_FLOW, "\t\tAdditional DRAM PHY Power Savings\n"); MemNAddlDramPhyPowerSavingsKV (NBPtr); } } //---------------------------------------------------------------- // Initialize Channels interleave address bit. //---------------------------------------------------------------- MemNInitChannelIntlvAddressBitKV (NBPtr); //---------------------------------------------------------------- // Assign physical address ranges for DCTs and node. Also, enable channel interleaving. //---------------------------------------------------------------- IDS_HDT_CONSOLE (MEM_FLOW, "\t\tHT mem map\n"); if (!NBPtr->FeatPtr->InterleaveChannels (NBPtr)) { MemNHtMemMapKV (NBPtr); } //---------------------------------------------------- // If there is no dimm on the system, do fatal exit //---------------------------------------------------- if (NBPtr->RefPtr->SysLimit == 0) { PutEventLog (AGESA_FATAL, MEM_ERROR_NO_DIMM_FOUND_ON_SYSTEM, 0, 0, 0, 0, &(MemMainPtr->MemPtr->StdHeader)); return AGESA_FATAL; } //---------------------------------------------------------------- // CpuMemTyping //---------------------------------------------------------------- IDS_HDT_CONSOLE (MEM_FLOW, "\t\tMem typing\n"); MemNCPUMemTypingNb (NBPtr); IDS_OPTION_HOOK (IDS_BEFORE_DQS_TRAINING, MemMainPtr, &(MemMainPtr->MemPtr->StdHeader)); //---------------------------------------------------------------- // After Training Table values //---------------------------------------------------------------- MemFInitTableDrive (NBPtr, MTAfterTrn); //---------------------------------------------------------------- // Interleave banks //---------------------------------------------------------------- IDS_HDT_CONSOLE (MEM_FLOW, "\t\tBank Intlv\n"); if (NBPtr->FeatPtr->InterleaveBanks (NBPtr)) { if (NBPtr->MCTPtr->ErrCode == AGESA_FATAL) { return AGESA_FATAL; } } //---------------------------------------------------------------- // After Programming Interleave registers //---------------------------------------------------------------- MemFInitTableDrive (NBPtr, MTAfterInterleave); //---------------------------------------------------------------- // Memory Clear //---------------------------------------------------------------- AGESA_TESTPOINT (TpProcMemMemClr, &(MemMainPtr->MemPtr->StdHeader)); if (!MemFeatMain.MemClr (MemMainPtr)) { return AGESA_FATAL; } //---------------------------------------------------------------- // ECC //---------------------------------------------------------------- if (!MemFeatMain.InitEcc (MemMainPtr)) { return AGESA_FATAL; } //---------------------------------------------------------------- // C6 and ACP Engine Storage Allocation //---------------------------------------------------------------- IDS_HDT_CONSOLE (MEM_FLOW, "\t\tC6 and ACP Engine Storage\n"); MemNAllocateC6AndAcpEngineStorageKV (NBPtr); //---------------------------------------------------------------- // UMA Allocation & UMAMemTyping //---------------------------------------------------------------- AGESA_TESTPOINT (TpProcMemUMAMemTyping, &(MemMainPtr->MemPtr->StdHeader)); IDS_HDT_CONSOLE (MEM_FLOW, "\t\tUMA Alloc\n"); if (!MemFeatMain.UmaAllocation (MemMainPtr)) { return AGESA_FATAL; } //---------------------------------------------------------------- // OnDimm Thermal //---------------------------------------------------------------- if (NBPtr->FeatPtr->OnDimmThermal (NBPtr)) { if (NBPtr->MCTPtr->ErrCode == AGESA_FATAL) { return AGESA_FATAL; } } //---------------------------------------------------------------- // Finalize MCT //---------------------------------------------------------------- MemNFinalizeMctKV (NBPtr); MemFInitTableDrive (NBPtr, MTAfterFinalizeMCT); //---------------------------------------------------------------- // Memory Context Save //---------------------------------------------------------------- MemFeatMain.MemSave (MemMainPtr); //---------------------------------------------------------------- // Memory DMI support //---------------------------------------------------------------- if (!MemFeatMain.MemDmi (MemMainPtr)) { return AGESA_CRITICAL; } //---------------------------------------------------------------- // Memory CRAT support //---------------------------------------------------------------- if (!MemFeatMain.MemCrat (MemMainPtr)) { return AGESA_CRITICAL; } //---------------------------------------------------------------- // Save memory S3 data //---------------------------------------------------------------- IDS_HDT_CONSOLE (MEM_FLOW, "\t\tS3 Save\n"); if (!MemMS3Save (MemMainPtr)) { return AGESA_CRITICAL; } //---------------------------------------------------------------- // Switch back to DCT 0 before sending control back //---------------------------------------------------------------- MemNSwitchDCTNb (NBPtr, 0); return AGESA_SUCCESS; }
/** * Deallocates a previously allocated buffer in the heap * * This function will deallocate buffer either by using internal 'AGESA' heapmanager * or by using externa (IBV) heapmanager. * * @param[in] BufferHandle Handle of the buffer to free. * @param[in] StdHeader Config handle for library and services. * * @retval AGESA_SUCCESS No error * @retval AGESA_BOUNDS_CHK Handle does not exist on the heap * */ AGESA_STATUS HeapDeallocateBuffer ( IN UINT32 BufferHandle, IN AMD_CONFIG_PARAMS *StdHeader ) { UINT8 *BaseAddress; UINT32 NodeSize; UINT32 OffsetOfFreeSpaceNode; UINT32 OffsetOfPreviousNode; UINT32 OffsetOfCurrentNode; BOOLEAN HeapLocateFlag; HEAP_MANAGER *HeapManager; BUFFER_NODE *CurrentNode; BUFFER_NODE *PreviousNode; BUFFER_NODE *FreeSpaceNode; AGESA_BUFFER_PARAMS AgesaBuffer; ASSERT (StdHeader != NULL); HeapLocateFlag = TRUE; BaseAddress = (UINT8 *) (UINTN) StdHeader->HeapBasePtr; HeapManager = (HEAP_MANAGER *) BaseAddress; // Check Heap database is valid if ((BaseAddress == NULL) || (HeapManager->Signature != HEAP_SIGNATURE_VALID)) { // The base address in StdHeader is incorrect, get base address by itself BaseAddress = (UINT8 *)(UINTN) HeapGetBaseAddress (StdHeader); HeapManager = (HEAP_MANAGER *) BaseAddress; if ((BaseAddress == NULL) || (HeapManager->Signature != HEAP_SIGNATURE_VALID)) { // Heap is not available, ASSERT here ASSERT (FALSE); return AGESA_ERROR; } StdHeader->HeapBasePtr = (UINTN)BaseAddress; } OffsetOfPreviousNode = AMD_HEAP_INVALID_HEAP_OFFSET; OffsetOfCurrentNode = HeapManager->FirstActiveBufferOffset; CurrentNode = (BUFFER_NODE *) (BaseAddress + OffsetOfCurrentNode); // Locate heap if ((BaseAddress != NULL) && (HeapManager->Signature == HEAP_SIGNATURE_VALID)) { if (OffsetOfCurrentNode == AMD_HEAP_INVALID_HEAP_OFFSET) { HeapLocateFlag = FALSE; } else { while (CurrentNode->BufferHandle != BufferHandle) { if (CurrentNode->OffsetOfNextNode == AMD_HEAP_INVALID_HEAP_OFFSET) { HeapLocateFlag = FALSE; break; } else { OffsetOfPreviousNode = OffsetOfCurrentNode; OffsetOfCurrentNode = CurrentNode->OffsetOfNextNode; CurrentNode = (BUFFER_NODE *) (BaseAddress + OffsetOfCurrentNode); } } } } else { HeapLocateFlag = FALSE; } if (HeapLocateFlag == TRUE) { // CurrentNode points to the buffer which wanted to be deallocated. // Remove deallocated heap from active buffer chain. if (OffsetOfPreviousNode == AMD_HEAP_INVALID_HEAP_OFFSET) { HeapManager->FirstActiveBufferOffset = CurrentNode->OffsetOfNextNode; } else { PreviousNode = (BUFFER_NODE *) (BaseAddress + OffsetOfPreviousNode); PreviousNode->OffsetOfNextNode = CurrentNode->OffsetOfNextNode; } // Now, CurrentNode become a free space node. HeapManager->UsedSize -= CurrentNode->BufferSize + sizeof (BUFFER_NODE); // Loop free space chain to see if any free space node is just before/after CurrentNode, then merge them. OffsetOfFreeSpaceNode = HeapManager->FirstFreeSpaceOffset; FreeSpaceNode = (BUFFER_NODE *) (BaseAddress + OffsetOfFreeSpaceNode); while (OffsetOfFreeSpaceNode != AMD_HEAP_INVALID_HEAP_OFFSET) { if ((OffsetOfFreeSpaceNode + sizeof (BUFFER_NODE) + FreeSpaceNode->BufferSize) == OffsetOfCurrentNode) { DeleteFreeSpaceNode (StdHeader, OffsetOfFreeSpaceNode); NodeSize = FreeSpaceNode->BufferSize + CurrentNode->BufferSize + sizeof (BUFFER_NODE); OffsetOfCurrentNode = OffsetOfFreeSpaceNode; CurrentNode = FreeSpaceNode; CurrentNode->BufferSize = NodeSize; } else if (OffsetOfFreeSpaceNode == (OffsetOfCurrentNode + sizeof (BUFFER_NODE) + CurrentNode->BufferSize)) { DeleteFreeSpaceNode (StdHeader, OffsetOfFreeSpaceNode); NodeSize = FreeSpaceNode->BufferSize + CurrentNode->BufferSize + sizeof (BUFFER_NODE); CurrentNode->BufferSize = NodeSize; } OffsetOfFreeSpaceNode = FreeSpaceNode->OffsetOfNextNode; FreeSpaceNode = (BUFFER_NODE *) (BaseAddress + OffsetOfFreeSpaceNode); } InsertFreeSpaceNode (StdHeader, OffsetOfCurrentNode); return AGESA_SUCCESS; } else { // If HeapStatus == HEAP_SYSTEM_MEM, try callout function if (StdHeader->HeapStatus == HEAP_SYSTEM_MEM) { AgesaBuffer.StdHeader = *StdHeader; AgesaBuffer.BufferHandle = BufferHandle; AGESA_TESTPOINT (TpIfBeforeDeallocateHeapBuffer, StdHeader); if (AgesaDeallocateBuffer (0, &AgesaBuffer) != AGESA_SUCCESS) { return AGESA_ERROR; } AGESA_TESTPOINT (TpIfAfterDeallocateHeapBuffer, StdHeader); return AGESA_SUCCESS; } // If we are still unable to locate the buffer handle, return AGESA_BOUNDS_CHK if ((BaseAddress != NULL) && (HeapManager->Signature == HEAP_SIGNATURE_VALID)) { PutEventLog (AGESA_BOUNDS_CHK, CPU_ERROR_HEAP_BUFFER_HANDLE_IS_NOT_PRESENT, BufferHandle, 0, 0, 0, StdHeader); } else { ASSERT (FALSE); } return AGESA_BOUNDS_CHK; } }
/** * Locates a previously allocated buffer on the heap. * * This function searches the heap for a buffer with the desired handle, and * returns a pointer to the buffer. * * @param[in,out] LocateHeap Structure containing the buffer's handle, * and the return pointer. * @param[in] StdHeader Config handle for library and services. * * @retval AGESA_SUCCESS No error * @retval AGESA_BOUNDS_CHK Handle does not exist on the heap * */ AGESA_STATUS HeapLocateBuffer ( IN OUT LOCATE_HEAP_PTR *LocateHeap, IN AMD_CONFIG_PARAMS *StdHeader ) { UINT8 *BaseAddress; UINT8 AlignTo16Byte; UINT32 OffsetOfCurrentNode; BOOLEAN HeapLocateFlag; HEAP_MANAGER *HeapManager; BUFFER_NODE *CurrentNode; AGESA_BUFFER_PARAMS AgesaBuffer; ASSERT (StdHeader != NULL); HeapLocateFlag = TRUE; BaseAddress = (UINT8 *) (UINTN) StdHeader->HeapBasePtr; HeapManager = (HEAP_MANAGER *) BaseAddress; // Check Heap database is valid if ((BaseAddress == NULL) || (HeapManager->Signature != HEAP_SIGNATURE_VALID)) { // The base address in StdHeader is incorrect, get base address by itself BaseAddress = (UINT8 *)(UINTN) HeapGetBaseAddress (StdHeader); HeapManager = (HEAP_MANAGER *) BaseAddress; if ((BaseAddress == NULL) || (HeapManager->Signature != HEAP_SIGNATURE_VALID)) { // Heap is not available, ASSERT here ASSERT (FALSE); return AGESA_ERROR; } StdHeader->HeapBasePtr = (UINTN)BaseAddress; } OffsetOfCurrentNode = HeapManager->FirstActiveBufferOffset; CurrentNode = (BUFFER_NODE *) (BaseAddress + OffsetOfCurrentNode); // Find buffer using internal heap manager // Locate the heap using handle = LocateHeap-> BufferHandle // If HeapStatus != HEAP_SYSTEM_ MEM if ((BaseAddress != NULL) && (HeapManager->Signature == HEAP_SIGNATURE_VALID)) { if (OffsetOfCurrentNode == AMD_HEAP_INVALID_HEAP_OFFSET) { HeapLocateFlag = FALSE; } else { while (CurrentNode->BufferHandle != LocateHeap->BufferHandle) { if (CurrentNode->OffsetOfNextNode == AMD_HEAP_INVALID_HEAP_OFFSET) { HeapLocateFlag = FALSE; break; } else { OffsetOfCurrentNode = CurrentNode->OffsetOfNextNode; CurrentNode = (BUFFER_NODE *) (BaseAddress + OffsetOfCurrentNode); } } } } else { HeapLocateFlag = FALSE; } if (HeapLocateFlag) { AlignTo16Byte = CurrentNode->PadSize; LocateHeap->BufferPtr = (UINT8 *) ((UINT8 *) CurrentNode + sizeof (BUFFER_NODE) + SIZE_OF_SENTINEL + AlignTo16Byte); LocateHeap->BufferSize = CurrentNode->BufferSize - NUM_OF_SENTINEL * SIZE_OF_SENTINEL - AlignTo16Byte; return AGESA_SUCCESS; } else { // If HeapStatus == HEAP_SYSTEM_MEM, try callout function if (StdHeader->HeapStatus == HEAP_SYSTEM_MEM) { AgesaBuffer.StdHeader = *StdHeader; AgesaBuffer.BufferHandle = LocateHeap->BufferHandle; AGESA_TESTPOINT (TpIfBeforeLocateHeapBuffer, StdHeader); if (AgesaLocateBuffer (0, &AgesaBuffer) != AGESA_SUCCESS) { LocateHeap->BufferPtr = NULL; return AGESA_ERROR; } LocateHeap->BufferSize = AgesaBuffer.BufferLength; AGESA_TESTPOINT (TpIfAfterLocateHeapBuffer, StdHeader); LocateHeap->BufferPtr = (UINT8 *) (AgesaBuffer.BufferPointer); return AGESA_SUCCESS; } // If we are still unable to deallocate the buffer handle, return AGESA_BOUNDS_CHK LocateHeap->BufferPtr = NULL; LocateHeap->BufferSize = 0; if ((BaseAddress != NULL) && (HeapManager->Signature == HEAP_SIGNATURE_VALID)) { PutEventLog (AGESA_BOUNDS_CHK, CPU_ERROR_HEAP_BUFFER_HANDLE_IS_NOT_PRESENT, LocateHeap->BufferHandle, 0, 0, 0, StdHeader); } else { ASSERT (FALSE); } return AGESA_BOUNDS_CHK; } }
VOID STATIC MemSPDDataProcess ( IN OUT MEM_DATA_STRUCT *MemPtr ) { UINT8 Socket; UINT8 Channel; UINT8 Dimm; UINT8 DimmIndex; UINT32 AgesaStatus; UINT8 MaxSockets; UINT8 MaxChannelsPerSocket; UINT8 MaxDimmsPerChannel; SPD_DEF_STRUCT *DimmSPDPtr; PSO_TABLE *PsoTable; ALLOCATE_HEAP_PARAMS AllocHeapParams; AGESA_READ_SPD_PARAMS SpdParam; ASSERT (MemPtr != NULL); MaxSockets = (UINT8) (0x000000FF & GetPlatformNumberOfSockets ()); PsoTable = MemPtr->ParameterListPtr->PlatformMemoryConfiguration; // // Allocate heap for the table // AllocHeapParams.RequestedBufferSize = (GetSpdSocketIndex (PsoTable, MaxSockets, &MemPtr->StdHeader) * sizeof (SPD_DEF_STRUCT)); AllocHeapParams.BufferHandle = AMD_MEM_SPD_HANDLE; AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, &MemPtr->StdHeader) == AGESA_SUCCESS) { MemPtr->SpdDataStructure = (SPD_DEF_STRUCT *) AllocHeapParams.BufferPtr; // // Initialize SpdParam Structure // LibAmdMemCopy ((VOID *)&SpdParam, (VOID *)MemPtr, (UINTN)sizeof (SpdParam.StdHeader), &MemPtr->StdHeader); // // Populate SPDDataBuffer // SpdParam.MemData = MemPtr; DimmIndex = 0; for (Socket = 0; Socket < (UINT16)MaxSockets; Socket++) { MaxChannelsPerSocket = GetMaxChannelsPerSocket (PsoTable, Socket, &MemPtr->StdHeader); SpdParam.SocketId = Socket; for (Channel = 0; Channel < MaxChannelsPerSocket; Channel++) { SpdParam.MemChannelId = Channel; MaxDimmsPerChannel = GetMaxDimmsPerChannel (PsoTable, Socket, Channel); for (Dimm = 0; Dimm < MaxDimmsPerChannel; Dimm++) { SpdParam.DimmId = Dimm; DimmSPDPtr = &(MemPtr->SpdDataStructure[DimmIndex++]); SpdParam.Buffer = DimmSPDPtr->Data; AGESA_TESTPOINT (TpProcMemBeforeAgesaReadSpd, &MemPtr->StdHeader); AgesaStatus = AgesaReadSpd (0, &SpdParam); AGESA_TESTPOINT (TpProcMemAfterAgesaReadSpd, &MemPtr->StdHeader); if (AgesaStatus == AGESA_SUCCESS) { DimmSPDPtr->DimmPresent = TRUE; IDS_HDT_CONSOLE (MEM_FLOW, "SPD Socket %d Channel %d Dimm %d: %08x\n", Socket, Channel, Dimm, (intptr_t)SpdParam.Buffer); } else { DimmSPDPtr->DimmPresent = FALSE; } } } } } else { PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_SPD, 0, 0, 0, 0, &MemPtr->StdHeader); // // Assert here if unable to allocate heap for SPDs // IDS_ERROR_TRAP; } }
/** * * A sub-function which extracts LRDIMM F0RC8, F1RC0, F1RC1 and F1RC2 value from a input * table and stores extracted value to a specific address. * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * @param[in] *EntryOfTables - Pointer to MEM_PSC_TABLE_BLOCK * * @return TRUE - Succeed in extracting the table value * @return FALSE - Fail to extract the table value * */ BOOLEAN MemPGetLRIBT ( IN OUT MEM_NB_BLOCK *NBPtr, IN MEM_PSC_TABLE_BLOCK *EntryOfTables ) { UINT8 i; UINT8 MaxDimmPerCh; UINT8 NOD; UINT8 TableSize; UINT32 CurDDRrate; UINT8 DDR3Voltage; UINT16 RankTypeOfPopulatedDimm; UINT16 RankTypeInTable; UINT8 PsoMaskLRIBT; CPU_LOGICAL_ID LogicalCpuid; UINT8 PackageType; PSCFG_L_IBT_ENTRY *TblPtr; CH_DEF_STRUCT *CurrentChannel; CurrentChannel = NBPtr->ChannelPtr; if (CurrentChannel->LrDimmPresent == 0) { return TRUE; } TblPtr = NULL; TableSize = 0; PackageType = 0; LogicalCpuid.Family = AMD_FAMILY_UNKNOWN; MaxDimmPerCh = GetMaxDimmsPerChannel (NBPtr->RefPtr->PlatformMemoryConfiguration, NBPtr->MCTPtr->SocketId, CurrentChannel->ChannelID); NOD = (UINT8) 1 << (MaxDimmPerCh - 1); i = 0; // Obtain table pointer, table size, Logical Cpuid and PSC type according to NB type and package type. while (EntryOfTables->TblEntryOfLRIBT[i] != NULL) { if (((EntryOfTables->TblEntryOfLRIBT[i])->Header.NumOfDimm & NOD) != 0) { LogicalCpuid = (EntryOfTables->TblEntryOfLRIBT[i])->Header.LogicalCpuid; PackageType = (EntryOfTables->TblEntryOfLRIBT[i])->Header.PackageType; // // Determine if this is the expected NB Type // if (MemPIsIdSupported (NBPtr, LogicalCpuid, PackageType)) { TblPtr = (PSCFG_L_IBT_ENTRY *) ((EntryOfTables->TblEntryOfLRIBT[i])->TBLPtr); TableSize = (EntryOfTables->TblEntryOfLRIBT[i])->TableSize; break; } } i++; } // Check whether no table entry is found. if (EntryOfTables->TblEntryOfLRIBT[i] == NULL) { IDS_HDT_CONSOLE (MEM_FLOW, "\nNo LRDIMM IBT table\n"); return FALSE; } CurDDRrate = (UINT32) (1 << (CurrentChannel->DCTPtr->Timings.Speed / 66)); DDR3Voltage = (UINT8) (1 << CONVERT_VDDIO_TO_ENCODED (NBPtr->RefPtr->DDR3Voltage)); RankTypeOfPopulatedDimm = MemPGetPsRankType (CurrentChannel); for (i = 0; i < TableSize; i++) { MemPConstructRankTypeMap ((UINT16) TblPtr->Dimm0, (UINT16) TblPtr->Dimm1, (UINT16) TblPtr->Dimm2, &RankTypeInTable); if ((TblPtr->DimmPerCh & NOD) != 0) { if ((TblPtr->DDRrate & CurDDRrate) != 0) { if ((TblPtr->VDDIO & DDR3Voltage) != 0) { if ((RankTypeInTable & RankTypeOfPopulatedDimm) == RankTypeOfPopulatedDimm) { NBPtr->PsPtr->F0RC8 = (UINT8) TblPtr->F0RC8; NBPtr->PsPtr->F1RC0 = (UINT8) TblPtr->F1RC0; NBPtr->PsPtr->F1RC1 = (UINT8) TblPtr->F1RC1; NBPtr->PsPtr->F1RC2 = (UINT8) TblPtr->F1RC2; break; } } } } TblPtr++; } // // If there is no entry, check if overriding value existed. If not, return FALSE // PsoMaskLRIBT = (UINT8) MemPProceedTblDrvOverride (NBPtr, NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_TBLDRV_LRDIMM_IBT); if ((PsoMaskLRIBT == 0) && (i == TableSize)) { IDS_HDT_CONSOLE (MEM_FLOW, "\nNo LRDIMM IBT entries\n"); PutEventLog (AGESA_ERROR, MEM_ERROR_LR_IBT_NOT_FOUND, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_ERROR, NBPtr->MCTPtr); if (!NBPtr->MemPtr->ErrorHandling (NBPtr->MCTPtr, NBPtr->Dct, EXCLUDE_ALL_CHIPSEL, &NBPtr->MemPtr->StdHeader)) { ASSERT (FALSE); } return FALSE; } return TRUE; }
/** * Family 10h core 0 entry point for performing the family 10h Processor- * Systemboard Power Delivery Check. * * The steps are as follows: * 1. Starting with P0, loop through all P-states until a passing state is * found. A passing state is one in which the current required by the * CPU is less than the maximum amount of current that the system can * provide to the CPU. If P0 is under the limit, no further action is * necessary. * 2. If at least one P-State is under the limit & at least one P-State is * over the limit, the BIOS must: * a. If the processor's current P-State is disabled by the power check, * then the BIOS must request a transition to an enabled P-state * using MSRC001_0062[PstateCmd] and wait for MSRC001_0063[CurPstate] * to reflect the new value. * b. Copy the contents of the enabled P-state MSRs to the highest * performance P-state locations. * c. Request a P-state transition to the P-state MSR containing the * COF/VID values currently applied. * d. On revision E systems with CPUID Fn8000_0007[CPB]=1, if P0 is disabled then * program F4x15C[BoostSrc]=0. This step uses hardware P-state numbering. * e. Adjust the following P-state parameters affected by the P-state * MSR copy by subtracting the number of P-states that are disabled * by the power check. * 1. F3x64[HtcPstateLimit] * 2. F3x68[StcPstateLimit] * 3. F3xDC[PstateMaxVal] * 3. If all P-States are over the limit, the BIOS must: * a. If the processor's current P-State is !=F3xDC[PstateMaxVal], then * write F3xDC[PstateMaxVal] to MSRC001_0062[PstateCmd] and wait for * MSRC001_0063[CurPstate] to reflect the new value. * b. If F3xDC[PstateMaxVal]!= 000b, copy the contents of the P-state * MSR pointed to by F3xDC[PstateMaxVal] to MSRC001_0064 and set * MSRC001_0064[PstateEn] * c. Write 000b to MSRC001_0062[PstateCmd] and wait for MSRC001_0063 * [CurPstate] to reflect the new value. * d. Adjust the following P-state parameters to zero on revision D and earlier processors. * On revision E processors adjust the following fields to F4x15C[NumBoostStates]: * 1. F3x64[HtcPstateLimit] * 2. F3x68[StcPstateLimit] * 3. F3xDC[PstateMaxVal] * e. For revision E systems with CPUID Fn8000_0007[CPB]=1, program F4x15C[BoostSrc]=0. * * @param[in] FamilySpecificServices The current Family Specific Services. * @param[in] CpuEarlyParams Service parameters * @param[in] StdHeader Config handle for library and services. * */ VOID F10PmPwrCheck ( IN CPU_SPECIFIC_SERVICES *FamilySpecificServices, IN AMD_CPU_EARLY_PARAMS *CpuEarlyParams, IN AMD_CONFIG_PARAMS *StdHeader ) { UINT8 DisPsNum; UINT8 PsMaxVal; UINT8 Pstate; UINT32 ProcIddMax; UINT32 LocalPciRegister; UINT32 Socket; UINT32 Module; UINT32 Core; UINT32 AndMask; UINT32 OrMask; UINT32 PstateLimit; PCI_ADDR PciAddress; UINT64 LocalMsrRegister; AP_TASK TaskPtr; AGESA_STATUS IgnoredSts; PWRCHK_ERROR_DATA ErrorData; // get the socket number IdentifyCore (StdHeader, &Socket, &Module, &Core, &IgnoredSts); ErrorData.SocketNumber = (UINT8)Socket; ASSERT (Core == 0); // get the Max P-state value for (PsMaxVal = NM_PS_REG - 1; PsMaxVal != 0; --PsMaxVal) { LibAmdMsrRead (PS_REG_BASE + PsMaxVal, &LocalMsrRegister, StdHeader); if (((PSTATE_MSR *) &LocalMsrRegister)->PsEnable == 1) { break; } } ErrorData.HwPstateNumber = (UINT8) (PsMaxVal + 1); DisPsNum = 0; for (Pstate = 0; Pstate < ErrorData.HwPstateNumber; Pstate++) { if (FamilySpecificServices->GetProcIddMax (FamilySpecificServices, Pstate, &ProcIddMax, StdHeader)) { if (ProcIddMax > CpuEarlyParams->PlatformConfig.VrmProperties[CoreVrm].CurrentLimit) { // Add to event log the Pstate that exceeded the current limit PutEventLog (AGESA_WARNING, CPU_EVENT_PM_PSTATE_OVERCURRENT, Socket, Pstate, 0, 0, StdHeader); DisPsNum++; } else { break; } } } // If all P-state registers are disabled, move P[PsMaxVal] to P0 // and transition to P0, then wait for CurPstate = 0 ErrorData.AllowablePstateNumber = ((PsMaxVal + 1) - DisPsNum); // We only need to log this event on the BSC if (ErrorData.AllowablePstateNumber == 0) { PutEventLog (AGESA_FATAL, CPU_EVENT_PM_ALL_PSTATE_OVERCURRENT, Socket, 0, 0, 0, StdHeader); } if (DisPsNum != 0) { GetPciAddress (StdHeader, Socket, Module, &PciAddress, &IgnoredSts); // Check if CPB is supported. if yes, get the number of boost states. ErrorData.NumberofBoostStates = F10GetNumberOfBoostedPstatesOnCore (StdHeader); TaskPtr.FuncAddress.PfApTaskI = F10PmPwrCheckCore; TaskPtr.DataTransfer.DataSizeInDwords = SIZE_IN_DWORDS (PWRCHK_ERROR_DATA); TaskPtr.DataTransfer.DataPtr = &ErrorData; TaskPtr.DataTransfer.DataTransferFlags = 0; TaskPtr.ExeFlags = WAIT_FOR_CORE; ApUtilRunCodeOnAllLocalCoresAtEarly (&TaskPtr, StdHeader, CpuEarlyParams); // Final Step 1 // For revision E systems with CPUID Fn8000_0007[CPB]=1, if P0 is disabled then // program F4x15C[BoostSrc]=0. This step uses hardware P-state numbering. if (ErrorData.NumberofBoostStates == 1) { PciAddress.Address.Function = FUNC_4; PciAddress.Address.Register = CPB_CTRL_REG; LibAmdPciRead (AccessWidth32, PciAddress, &LocalPciRegister, StdHeader); ((CPB_CTRL_REGISTER *) &LocalPciRegister)->BoostSrc = 0; LibAmdPciWrite (AccessWidth32, PciAddress, &LocalPciRegister, StdHeader); } // Final Step 2 // F3x64[HtPstatelimit] -= disPsNum // F3x68[StcPstateLimit]-= disPsNum // F3xDC[PstateMaxVal]-= disPsNum PciAddress.Address.Function = FUNC_3; PciAddress.Address.Register = HTC_REG; AndMask = 0xFFFFFFFF; ((HTC_REGISTER *) &AndMask)->HtcPstateLimit = 0; OrMask = 0x00000000; LibAmdPciRead (AccessWidth32, PciAddress, &LocalPciRegister, StdHeader); // F3x64 PstateLimit = ((HTC_REGISTER *) &LocalPciRegister)->HtcPstateLimit; if (ErrorData.AllowablePstateNumber != 0) { if (PstateLimit > DisPsNum) { PstateLimit -= DisPsNum; ((HTC_REGISTER *) &OrMask)->HtcPstateLimit = PstateLimit; } } else { ((HTC_REGISTER *) &OrMask)->HtcPstateLimit = ErrorData.NumberofBoostStates; } ModifyCurrentSocketPci (&PciAddress, AndMask, OrMask, StdHeader); // F3x64 PciAddress.Address.Register = STC_REG; AndMask = 0xFFFFFFFF; ((STC_REGISTER *) &AndMask)->StcPstateLimit = 0; OrMask = 0x00000000; LibAmdPciRead (AccessWidth32, PciAddress, &LocalPciRegister, StdHeader); // F3x68 PstateLimit = ((STC_REGISTER *) &LocalPciRegister)->StcPstateLimit; if (ErrorData.AllowablePstateNumber != 0) { if (PstateLimit > DisPsNum) { PstateLimit -= DisPsNum; ((STC_REGISTER *) &OrMask)->StcPstateLimit = PstateLimit; } } else { ((STC_REGISTER *) &OrMask)->StcPstateLimit = ErrorData.NumberofBoostStates; } ModifyCurrentSocketPci (&PciAddress, AndMask, OrMask, StdHeader); // F3x68 PciAddress.Address.Register = CPTC2_REG; AndMask = 0xFFFFFFFF; ((CLK_PWR_TIMING_CTRL2_REGISTER *) &AndMask)->PstateMaxVal = 0; OrMask = 0x00000000; LibAmdPciRead (AccessWidth32, PciAddress, &LocalPciRegister, StdHeader); // F3xDC PstateLimit = ((CLK_PWR_TIMING_CTRL2_REGISTER *) &LocalPciRegister)->PstateMaxVal; if (ErrorData.AllowablePstateNumber != 0) { if (PstateLimit > DisPsNum) { PstateLimit -= DisPsNum; ((CLK_PWR_TIMING_CTRL2_REGISTER *) &OrMask)->PstateMaxVal = PstateLimit; } } else { ((CLK_PWR_TIMING_CTRL2_REGISTER *) &OrMask)->PstateMaxVal = ErrorData.NumberofBoostStates; } ModifyCurrentSocketPci (&PciAddress, AndMask, OrMask, StdHeader); // F3xDC // Now that P0 has changed, recalculate VSSlamTime F10ProgramVSSlamTimeOnSocket (&PciAddress, CpuEarlyParams, StdHeader); } }
/** * Family 15h core 0 entry point for performing the family 15h Processor- * Systemboard Power Delivery Check. * * The steps are as follows: * 1. Starting with P0, loop through all P-states until a passing state is * found. A passing state is one in which the current required by the * CPU is less than the maximum amount of current that the system can * provide to the CPU. If P0 is under the limit, no further action is * necessary. * 2. If at least one P-State is under the limit & at least one P-State is * over the limit, the BIOS must: * a. If the processor's current P-State is disabled by the power check, * then the BIOS must request a transition to an enabled P-state * using MSRC001_0062[PstateCmd] and wait for MSRC001_0063[CurPstate] * to reflect the new value. * b. Copy the contents of the enabled P-state MSRs to the highest * performance P-state locations. * c. Request a P-state transition to the P-state MSR containing the * COF/VID values currently applied. * d. If a subset of boosted P-states are disabled, then copy the contents * of the highest performance boosted P-state still enabled to the * boosted P-states that have been disabled. * e. If all boosted P-states are disabled, then program D18F4x15C[BoostSrc] * to zero. * f. Adjust the following P-state parameters affected by the P-state * MSR copy by subtracting the number of P-states that are disabled * by the power check. * 1. F3x64[HtcPstateLimit] * 2. F3x68[SwPstateLimit] * 3. F3xDC[PstateMaxVal] * 3. If all P-States are over the limit, the BIOS must: * a. If the processor's current P-State is !=F3xDC[PstateMaxVal], then * write F3xDC[PstateMaxVal] to MSRC001_0062[PstateCmd] and wait for * MSRC001_0063[CurPstate] to reflect the new value. * b. If MSRC001_0061[PstateMaxVal]!=000b, copy the contents of the P-state * MSR pointed to by F3xDC[PstateMaxVal] to the software P0 MSR. * Write 000b to MSRC001_0062[PstateCmd] and wait for MSRC001_0063 * [CurPstate] to reflect the new value. * c. Adjust the following P-state parameters to zero: * 1. F3x64[HtcPstateLimit] * 2. F3x68[SwPstateLimit] * 3. F3xDC[PstateMaxVal] * d. Program D18F4x15C[BoostSrc] to zero. * * @param[in] FamilySpecificServices The current Family Specific Services. * @param[in] CpuEarlyParams Service parameters * @param[in] StdHeader Config handle for library and services. * */ VOID F15PmPwrCheck ( IN CPU_SPECIFIC_SERVICES *FamilySpecificServices, IN AMD_CPU_EARLY_PARAMS *CpuEarlyParams, IN AMD_CONFIG_PARAMS *StdHeader ) { UINT8 DisPsNum; UINT8 PsMaxVal; UINT8 Pstate; UINT32 ProcIddMax; UINT32 LocalPciRegister; UINT32 Socket; UINT32 Module; UINT32 Core; UINT32 AndMask; UINT32 OrMask; UINT32 PstateLimit; PCI_ADDR PciAddress; UINT64 LocalMsrRegister; AP_TASK TaskPtr; AGESA_STATUS IgnoredSts; PWRCHK_ERROR_DATA ErrorData; UINT32 NumModules; UINT32 HighCore; UINT32 LowCore; UINT32 ModuleIndex; // get the socket number IdentifyCore (StdHeader, &Socket, &Module, &Core, &IgnoredSts); ErrorData.SocketNumber = (UINT8) Socket; ASSERT (Core == 0); // get the Max P-state value for (PsMaxVal = NM_PS_REG - 1; PsMaxVal != 0; --PsMaxVal) { LibAmdMsrRead (PS_REG_BASE + PsMaxVal, &LocalMsrRegister, StdHeader); if (((F15_PSTATE_MSR *) &LocalMsrRegister)->PsEnable == 1) { break; } } ErrorData.HwPstateNumber = (UINT8) (PsMaxVal + 1); // Starting with P0, loop through all P-states until a passing state is // found. A passing state is one in which the current required by the // CPU is less than the maximum amount of current that the system can // provide to the CPU. If P0 is under the limit, no further action is // necessary. DisPsNum = 0; for (Pstate = 0; Pstate < ErrorData.HwPstateNumber; Pstate++) { if (FamilySpecificServices->GetProcIddMax (FamilySpecificServices, Pstate, &ProcIddMax, StdHeader)) { if (ProcIddMax > CpuEarlyParams->PlatformConfig.VrmProperties[CoreVrm].CurrentLimit) { // Add to event log the Pstate that exceeded the current limit PutEventLog (AGESA_WARNING, CPU_EVENT_PM_PSTATE_OVERCURRENT, Socket, Pstate, 0, 0, StdHeader); DisPsNum++; } else { break; } } } ErrorData.AllowablePstateNumber = ((PsMaxVal + 1) - DisPsNum); if (ErrorData.AllowablePstateNumber == 0) { PutEventLog (AGESA_FATAL, CPU_EVENT_PM_ALL_PSTATE_OVERCURRENT, Socket, 0, 0, 0, StdHeader); } if (DisPsNum != 0) { GetPciAddress (StdHeader, Socket, Module, &PciAddress, &IgnoredSts); PciAddress.Address.Function = FUNC_4; PciAddress.Address.Register = CPB_CTRL_REG; LibAmdPciRead (AccessWidth32, PciAddress, &LocalPciRegister, StdHeader); // F4x15C ErrorData.NumberOfBoostStates = (UINT8) ((F15_CPB_CTRL_REGISTER *) &LocalPciRegister)->NumBoostStates; if (DisPsNum >= ErrorData.NumberOfBoostStates) { // If all boosted P-states are disabled, then program D18F4x15C[BoostSrc] to zero. AndMask = 0xFFFFFFFF; ((F15_CPB_CTRL_REGISTER *) &AndMask)->BoostSrc = 0; OrMask = 0x00000000; OptionMultiSocketConfiguration.ModifyCurrSocketPci (&PciAddress, AndMask, OrMask, StdHeader); // F4x15C // Update the result of isFeatureEnabled in heap. UpdateFeatureStatusInHeap (CoreBoost, FALSE, StdHeader); ErrorData.NumberOfSwPstatesDisabled = DisPsNum - ErrorData.NumberOfBoostStates; } else { ErrorData.NumberOfSwPstatesDisabled = 0; } NumModules = GetPlatformNumberOfModules (); // Only execute this loop if this is an MCM. if (NumModules > 1) { // Since the P-State MSRs are shared across a // node, we only need to set one core in the node for the modified number of supported p-states // to be reported across all of the cores in the module. TaskPtr.FuncAddress.PfApTaskI = F15PmPwrCheckCore; TaskPtr.DataTransfer.DataSizeInDwords = SIZE_IN_DWORDS (PWRCHK_ERROR_DATA); TaskPtr.DataTransfer.DataPtr = &ErrorData; TaskPtr.DataTransfer.DataTransferFlags = 0; TaskPtr.ExeFlags = WAIT_FOR_CORE; for (ModuleIndex = 0; ModuleIndex < NumModules; ModuleIndex++) { // Execute the P-State reduction code on the module's primary core only. // Skip this code for the BSC's module. if (ModuleIndex != Module) { if (GetGivenModuleCoreRange (Socket, ModuleIndex, &LowCore, &HighCore, StdHeader)) { ApUtilRunCodeOnSocketCore ((UINT8)Socket, (UINT8)LowCore, &TaskPtr, StdHeader); } } } } // Path for SCM and the BSC F15PmPwrCheckCore (&ErrorData, StdHeader); // Final Step // F3x64[HtPstatelimit] -= disPsNum // F3x68[SwPstateLimit] -= disPsNum // F3xDC[PstateMaxVal] -= disPsNum PciAddress.Address.Function = FUNC_3; PciAddress.Address.Register = HTC_REG; AndMask = 0xFFFFFFFF; ((HTC_REGISTER *) &AndMask)->HtcPstateLimit = 0; OrMask = 0x00000000; LibAmdPciRead (AccessWidth32, PciAddress, &LocalPciRegister, StdHeader); // F3x64 PstateLimit = ((HTC_REGISTER *) &LocalPciRegister)->HtcPstateLimit; if (PstateLimit > ErrorData.NumberOfSwPstatesDisabled) { PstateLimit -= ErrorData.NumberOfSwPstatesDisabled; ((HTC_REGISTER *) &OrMask)->HtcPstateLimit = PstateLimit; } OptionMultiSocketConfiguration.ModifyCurrSocketPci (&PciAddress, AndMask, OrMask, StdHeader); // F3x64 PciAddress.Address.Register = SW_PS_LIMIT_REG; AndMask = 0xFFFFFFFF; ((SW_PS_LIMIT_REGISTER *) &AndMask)->SwPstateLimit = 0; OrMask = 0x00000000; LibAmdPciRead (AccessWidth32, PciAddress, &LocalPciRegister, StdHeader); // F3x68 PstateLimit = ((SW_PS_LIMIT_REGISTER *) &LocalPciRegister)->SwPstateLimit; if (PstateLimit > ErrorData.NumberOfSwPstatesDisabled) { PstateLimit -= ErrorData.NumberOfSwPstatesDisabled; ((SW_PS_LIMIT_REGISTER *) &OrMask)->SwPstateLimit = PstateLimit; } OptionMultiSocketConfiguration.ModifyCurrSocketPci (&PciAddress, AndMask, OrMask, StdHeader); // F3x68 PciAddress.Address.Register = CPTC2_REG; AndMask = 0xFFFFFFFF; ((CLK_PWR_TIMING_CTRL2_REGISTER *) &AndMask)->PstateMaxVal = 0; OrMask = 0x00000000; LibAmdPciRead (AccessWidth32, PciAddress, &LocalPciRegister, StdHeader); // F3xDC PstateLimit = ((CLK_PWR_TIMING_CTRL2_REGISTER *) &LocalPciRegister)->PstateMaxVal; if (PstateLimit > ErrorData.NumberOfSwPstatesDisabled) { PstateLimit -= ErrorData.NumberOfSwPstatesDisabled; ((CLK_PWR_TIMING_CTRL2_REGISTER *) &OrMask)->PstateMaxVal = PstateLimit; } OptionMultiSocketConfiguration.ModifyCurrSocketPci (&PciAddress, AndMask, OrMask, StdHeader); // F3xDC } }
/** * * Look up data Bus config tables and return the pointer to the matched entry. * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * @param[in] *ListOfTables - Pointer to PSC_TBL_ENTRY array of pointers * * @return TRUE - Table values can be extracted per dimm population and ranks type. * @return FALSE - Table values cannot be extracted per dimm population and ranks type. * */ BOOLEAN MemPLookupDataBusCfgTabs ( IN OUT MEM_NB_BLOCK *NBPtr, IN PSC_TBL_ENTRY *ListOfTables[] ) { UINT8 i; UINT8 TableSize; UINT32 CurDDRrate; UINT8 DDR3Voltage; UINT16 RankTypeInTable; UINT8 PsoMaskSAO; UINT8 Chipsel; PSCFG_DATABUS_ENTRY *TblPtr; CH_DEF_STRUCT *CurrentChannel; CurrentChannel = NBPtr->ChannelPtr; TblPtr = (PSCFG_DATABUS_ENTRY *) MemPGetTableEntry (NBPtr, ListOfTables, &TableSize); if (TblPtr != NULL) { CurDDRrate = (UINT32) (1 << (CurrentChannel->DCTPtr->Timings.Speed / 66)); DDR3Voltage = (UINT8) (1 << CONVERT_VDDIO_TO_ENCODED (NBPtr->RefPtr->DDR3Voltage)); for (i = 0; i < TableSize; i++) { if ((TblPtr->DimmPerCh & NBPtr->PsPtr->NumOfDimmSlots) != 0) { if ((TblPtr->DDRrate & CurDDRrate) != 0) { if ((TblPtr->VDDIO & DDR3Voltage) != 0) { RankTypeInTable = ((UINT16) TblPtr->Dimm0) | ((UINT16) TblPtr->Dimm1 << 4) | (NP << 8) | (NP << 12); if ((RankTypeInTable & NBPtr->PsPtr->RankType) == NBPtr->PsPtr->RankType) { for (Chipsel = 0; Chipsel < MAX_CS_PER_CHANNEL; Chipsel++) { NBPtr->PsPtr->RttNom[Chipsel] = (UINT8) TblPtr->RttNom; NBPtr->PsPtr->RttWr[Chipsel] = (UINT8) TblPtr->RttWr; } NBPtr->PsPtr->DqStrength = (UINT8) TblPtr->DqStrength; NBPtr->PsPtr->DqsStrength = (UINT8) TblPtr->DqsStrength; NBPtr->PsPtr->OdtStrength = (UINT8) TblPtr->OdtStrength; break; } } } } TblPtr++; } } else { i = TableSize = 0; } PsoMaskSAO = (UINT8) MemPProceedTblDrvOverride (NBPtr, NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_TBLDRV_SLOWACCMODE); PsoMaskSAO &= (UINT8) MemPProceedTblDrvOverride (NBPtr, NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_TBLDRV_ADDRTMG); if ((PsoMaskSAO == 0) && (i == TableSize)) { IDS_HDT_CONSOLE (MEM_FLOW, "\nNo data bus config entries\n"); } else { return TRUE; } if (NBPtr->SharedPtr->VoltageMap != VDDIO_DETERMINED) { return TRUE; } PutEventLog (AGESA_ERROR, MEM_ERROR_SAO_NOT_FOUND, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_ERROR, NBPtr->MCTPtr); if (!NBPtr->MemPtr->ErrorHandling (NBPtr->MCTPtr, NBPtr->Dct, EXCLUDE_ALL_CHIPSEL, &NBPtr->MemPtr->StdHeader)) { ASSERT (FALSE); } return FALSE; }
/** * * * This function defines the memory initialization flow for * systems that only support RB processors. * * @param[in,out] *MemMainPtr - Pointer to the MEM_MAIN_DATA_BLOCK * * @return AGESA_STATUS * - AGESA_ALERT * - AGESA_FATAL * - AGESA_SUCCESS * - AGESA_WARNING */ AGESA_STATUS MemMFlowDA ( IN OUT MEM_MAIN_DATA_BLOCK *MemMainPtr ) { UINT8 Node; UINT8 NodeCnt; MEM_NB_BLOCK *NBPtr; MEM_TECH_BLOCK *TechPtr; NBPtr = MemMainPtr->NBPtr; TechPtr = MemMainPtr->TechPtr; NodeCnt = MemMainPtr->DieCount; //---------------------------------------------------------------- // Initialize MCT //---------------------------------------------------------------- AGESA_TESTPOINT (TpProcMemInitializeMCT, &(MemMainPtr->MemPtr->StdHeader)); for (Node = 0; Node < NodeCnt; Node++) { if (!NBPtr[Node].InitializeMCT (&NBPtr[Node])) { return AGESA_FATAL; } } //---------------------------------------------------------------- // Low voltage DDR3 //---------------------------------------------------------------- // Levelize DDR3 voltage based on socket, as each socket has its own voltage for dimms. AGESA_TESTPOINT (TpProcMemLvDdr3, &(MemMainPtr->MemPtr->StdHeader)); if (!MemFeatMain.LvDDR3 (MemMainPtr)) { return AGESA_FATAL; } //---------------------------------------------------------------- // Initialize DRAM and DCTs, and Create Memory Map //---------------------------------------------------------------- AGESA_TESTPOINT (TpProcMemInitMCT, &(MemMainPtr->MemPtr->StdHeader)); for (Node = 0; Node < NodeCnt; Node++) { // Initialize Memory Controller and Dram IDS_HDT_CONSOLE ("!Node %d\n", Node); if (!NBPtr[Node].InitMCT (&NBPtr[Node])) { return AGESA_FATAL; // fatalexit } // Create memory map AGESA_TESTPOINT (TpProcMemSystemMemoryMapping, &(MemMainPtr->MemPtr->StdHeader)); if (!NBPtr[Node].HtMemMapInit (&NBPtr[Node])) { return AGESA_FATAL; } } //---------------------------------------------------- // If there is no dimm on the system, do fatal exit //---------------------------------------------------- if (NBPtr[BSP_DIE].RefPtr->SysLimit == 0) { PutEventLog (AGESA_FATAL, MEM_ERROR_NO_DIMM_FOUND_ON_SYSTEM, 0, 0, 0, 0, &(MemMainPtr->MemPtr->StdHeader)); ASSERT (FALSE); return AGESA_FATAL; } //---------------------------------------------------------------- // Synchronize DCTs //---------------------------------------------------------------- AGESA_TESTPOINT (TpProcMemSynchronizeDcts, &(MemMainPtr->MemPtr->StdHeader)); for (Node = 0; Node < NodeCnt; Node++) { if (!NBPtr[Node].SyncDctsReady (&NBPtr[Node])) { return AGESA_FATAL; } } //---------------------------------------------------------------- // CpuMemTyping //---------------------------------------------------------------- AGESA_TESTPOINT (TpProcMemMtrrConfiguration, &(MemMainPtr->MemPtr->StdHeader)); if (!NBPtr[BSP_DIE].CpuMemTyping (&NBPtr[BSP_DIE])) { return AGESA_FATAL; } //---------------------------------------------------------------- // Before Training Table values //---------------------------------------------------------------- for (Node = 0; Node < NodeCnt; Node++) { MemFInitTableDrive (&NBPtr[Node], MTBeforeTrn); } //---------------------------------------------------------------- // Memory Context Restore //---------------------------------------------------------------- if (!MemFeatMain.MemRestore (MemMainPtr)) { // Do DQS training only if memory context restore fails //---------------------------------------------------------------- // Training //---------------------------------------------------------------- AGESA_TESTPOINT (TpProcMemDramTraining, &(MemMainPtr->MemPtr->StdHeader)); IDS_OPTION_HOOK (IDS_BEFORE_DQS_TRAINING, MemMainPtr, &(MemMainPtr->MemPtr->StdHeader)); MemMainPtr->mmSharedPtr->DimmExcludeFlag = TRAINING; if (!MemFeatMain.Training (MemMainPtr)) { return AGESA_FATAL; } IDS_HDT_CONSOLE ("\nEnd DQS training\n\n"); } //---------------------------------------------------------------- // Disable chipselects that fail training //---------------------------------------------------------------- MemMainPtr->mmSharedPtr->DimmExcludeFlag = END_TRAINING; MemFeatMain.ExcludeDIMM (MemMainPtr); MemMainPtr->mmSharedPtr->DimmExcludeFlag = NORMAL; //---------------------------------------------------------------- // OtherTiming //---------------------------------------------------------------- AGESA_TESTPOINT (TpProcMemOtherTiming, &(MemMainPtr->MemPtr->StdHeader)); for (Node = 0; Node < NodeCnt; Node++) { if (!NBPtr[Node].OtherTiming (&NBPtr[Node])) { return AGESA_FATAL; } } //---------------------------------------------------------------- // After Training Table values //---------------------------------------------------------------- for (Node = 0; Node < NodeCnt; Node++) { MemFInitTableDrive (&NBPtr[Node], MTAfterTrn); } //---------------------------------------------------------------- // SetDqsEccTimings //---------------------------------------------------------------- AGESA_TESTPOINT (TpProcMemSetDqsEccTmgs, &(MemMainPtr->MemPtr->StdHeader)); for (Node = 0; Node < NodeCnt; Node++) { if (!TechPtr[Node].SetDqsEccTmgs (&TechPtr[Node])) { return AGESA_FATAL; } } //---------------------------------------------------------------- // Online Spare //---------------------------------------------------------------- if (!MemFeatMain.OnlineSpare (MemMainPtr)) { return AGESA_FATAL; } //---------------------------------------------------------------- // Interleave banks //---------------------------------------------------------------- for (Node = 0; Node < NodeCnt; Node++) { if (NBPtr[Node].FeatPtr->InterleaveBanks (&NBPtr[Node])) { if (NBPtr[Node].MCTPtr->ErrCode == AGESA_FATAL) { return AGESA_FATAL; } } } //---------------------------------------------------------------- // Interleave Nodes //---------------------------------------------------------------- if (!MemFeatMain.InterleaveNodes (MemMainPtr)) { return AGESA_FATAL; } //---------------------------------------------------------------- // Interleave channels //---------------------------------------------------------------- for (Node = 0; Node < NodeCnt; Node++) { if (NBPtr[Node].FeatPtr->InterleaveChannels (&NBPtr[Node])) { if (NBPtr[Node].MCTPtr->ErrCode == AGESA_FATAL) { return AGESA_FATAL; } } } //---------------------------------------------------------------- // UMA Allocation & UMAMemTyping //---------------------------------------------------------------- AGESA_TESTPOINT (TpProcMemUMAMemTyping, &(MemMainPtr->MemPtr->StdHeader)); if (!MemFeatMain.UmaAllocation (MemMainPtr)) { return AGESA_FATAL; } //---------------------------------------------------------------- // Interleave region //---------------------------------------------------------------- NBPtr[BSP_DIE].FeatPtr->InterleaveRegion (&NBPtr[BSP_DIE]); //---------------------------------------------------------------- // ECC //---------------------------------------------------------------- if (!MemFeatMain.InitEcc (MemMainPtr)) { return AGESA_FATAL; } //---------------------------------------------------------------- // Memory Clear //---------------------------------------------------------------- AGESA_TESTPOINT (TpProcMemMemClr, &(MemMainPtr->MemPtr->StdHeader)); if (!MemFeatMain.MemClr (MemMainPtr)) { return AGESA_FATAL; } //---------------------------------------------------------------- // OnDimm Thermal //---------------------------------------------------------------- for (Node = 0; Node < NodeCnt; Node++) { if (NBPtr[Node].FeatPtr->OnDimmThermal (&NBPtr[Node])) { if (NBPtr[Node].MCTPtr->ErrCode == AGESA_FATAL) { return AGESA_FATAL; } } } //---------------------------------------------------------------- // Finalize MCT //---------------------------------------------------------------- for (Node = 0; Node < NodeCnt; Node++) { if (!NBPtr[Node].FinalizeMCT (&NBPtr[Node])) { return AGESA_FATAL; } } //---------------------------------------------------------------- // Memory Context Save //---------------------------------------------------------------- MemFeatMain.MemSave (MemMainPtr); //---------------------------------------------------------------- // Memory DMI support //---------------------------------------------------------------- if (!MemFeatMain.MemDmi (MemMainPtr)) { return AGESA_CRITICAL; } return AGESA_SUCCESS; }
/** * * A sub-function which extracts the value of max frequency supported from a input table and * compares it with DCTPtr->Timings.TargetSpeed * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * @param[in] *EntryOfTables - Pointer to MEM_PSC_TABLE_BLOCK * * @return TRUE - Succeed in extracting the table value * @return FALSE - Fail to extract the table value * */ BOOLEAN MemPGetMaxFreqSupported ( IN OUT MEM_NB_BLOCK *NBPtr, IN MEM_PSC_TABLE_BLOCK *EntryOfTables ) { UINT8 i; UINT8 MaxDimmSlotPerCh; UINT8 MaxDimmPerCh; UINT8 NOD; UINT8 TableSize; PSCFG_TYPE Type; UINT16 CDN; UINT16 MaxFreqSupported; UINT16 *SpeedArray; UINT8 DDR3Voltage; UINT8 CurrentVoltage; DIMM_TYPE DimmType; CPU_LOGICAL_ID LogicalCpuid; UINT8 PackageType; BOOLEAN DisDct; UINT8 PsoMaskMaxFreq; UINT16 PsoMaskMaxFreq16; UINT8 NumDimmSlotInTable; UINT16 DimmPopInTable; PSCFG_MAXFREQ_ENTRY *TblPtr; CH_DEF_STRUCT *CurrentChannel; PSC_TBL_ENTRY **TblEntryOfMaxFreq; CurrentChannel = NBPtr->ChannelPtr; DisDct = FALSE; Type = PSCFG_MAXFREQ; TblPtr = NULL; TableSize = 0; PackageType = 0; NumDimmSlotInTable = 0; DimmPopInTable = 0; LogicalCpuid.Family = AMD_FAMILY_UNKNOWN; SpeedArray = NULL; MaxDimmPerCh = GetMaxDimmsPerChannel (NBPtr->RefPtr->PlatformMemoryConfiguration, NBPtr->MCTPtr->SocketId, CurrentChannel->ChannelID); MaxDimmSlotPerCh = MaxDimmPerCh - GetMaxSolderedDownDimmsPerChannel (NBPtr->RefPtr->PlatformMemoryConfiguration, NBPtr->MCTPtr->SocketId, CurrentChannel->ChannelID); if (CurrentChannel->RegDimmPresent != 0) { DimmType = RDIMM_TYPE; } else if (CurrentChannel->SODimmPresent != 0) { DimmType = SODIMM_TYPE; } else if (CurrentChannel->LrDimmPresent != 0) { DimmType = LRDIMM_TYPE; } else { DimmType = UDIMM_TYPE; } // Check if it is "SODIMM plus soldered-down DRAM" or "Soldered-down DRAM only" configuration, // DimmType is changed to 'SODWN_SODIMM_TYPE' if soldered-down DRAM exist if (MaxDimmSlotPerCh != MaxDimmPerCh) { // SODIMM plus soldered-down DRAM DimmType = SODWN_SODIMM_TYPE; } else if (FindPSOverrideEntry (NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_SOLDERED_DOWN_SODIMM_TYPE, NBPtr->MCTPtr->SocketId, NBPtr->ChannelPtr->ChannelID, 0, NULL, NULL) != NULL) { // Soldered-down DRAM only DimmType = SODWN_SODIMM_TYPE; MaxDimmSlotPerCh = 0; } NOD = (UINT8) (MaxDimmSlotPerCh != 0) ? (1 << (MaxDimmSlotPerCh - 1)) : _DIMM_NONE; TblEntryOfMaxFreq = EntryOfTables->TblEntryOfMaxFreq; IDS_OPTION_HOOK (IDS_GET_STRETCH_FREQUENCY_LIMIT, &TblEntryOfMaxFreq, &NBPtr->MemPtr->StdHeader); i = 0; // Obtain table pointer, table size, Logical Cpuid and PSC type according to Dimm, NB and package type. while (TblEntryOfMaxFreq[i] != NULL) { if (((TblEntryOfMaxFreq[i])->Header.DimmType & DimmType) != 0) { if (((TblEntryOfMaxFreq[i])->Header.NumOfDimm & NOD) != 0) { // // Determine if this is the expected NB Type // LogicalCpuid = (TblEntryOfMaxFreq[i])->Header.LogicalCpuid; PackageType = (TblEntryOfMaxFreq[i])->Header.PackageType; if (MemPIsIdSupported (NBPtr, LogicalCpuid, PackageType)) { TblPtr = (PSCFG_MAXFREQ_ENTRY *) ((TblEntryOfMaxFreq[i])->TBLPtr); TableSize = (TblEntryOfMaxFreq[i])->TableSize; Type = (TblEntryOfMaxFreq[i])->Header.PSCType; break; } } } i++; } // Check whether no table entry is found. if (TblEntryOfMaxFreq[i] == NULL) { IDS_HDT_CONSOLE (MEM_FLOW, "\nDCT %d: No MaxFreq table. This channel will be disabled.\n", NBPtr->Dct); return FALSE; } MaxFreqSupported = UNSUPPORTED_DDR_FREQUENCY; CDN = 0; DDR3Voltage = (UINT8) CONVERT_VDDIO_TO_ENCODED (NBPtr->RefPtr->DDR3Voltage); // Construct the condition value ((CDNMaxFreq *)&CDN)->Dimms = CurrentChannel->Dimms; if (Type == PSCFG_MAXFREQ) { for (i = 0; i < MAX_DIMMS_PER_CHANNEL; i++) { if ((CurrentChannel->DimmSRPresent & (UINT8) (1 << i)) != 0) { ((CDNMaxFreq *)&CDN)->SR += 1; } if ((CurrentChannel->DimmDrPresent & (UINT16) (1 << i)) != 0) { ((CDNMaxFreq *)&CDN)->DR += 1; } if ((CurrentChannel->DimmQrPresent & (UINT16) (1 << i)) != 0) { if (i < 2) { ((CDNMaxFreq *)&CDN)->QR += 1; } } } } else { ((CDNLMaxFreq *)&CDN)->LR = CurrentChannel->Dimms; } for (i = 0; i < TableSize; i++) { NumDimmSlotInTable = TblPtr->MAXFREQ_ENTRY.DimmSlotPerCh; DimmPopInTable = (Type == PSCFG_MAXFREQ) ? TblPtr->MAXFREQ_ENTRY.CDN : ((PSCFG_LR_MAXFREQ_ENTRY *)TblPtr)->LR_MAXFREQ_ENTRY.CDN; if (((NumDimmSlotInTable & NOD) != 0) && (CDN == DimmPopInTable)) { if (Type == PSCFG_MAXFREQ) { SpeedArray = TblPtr->MAXFREQ_ENTRY.Speed; } else { SpeedArray = ((PSCFG_LR_MAXFREQ_ENTRY *)TblPtr)->LR_MAXFREQ_ENTRY.Speed; } break; } TblPtr++; } PsoMaskMaxFreq16 = MemPProceedTblDrvOverride (NBPtr, NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_TBLDRV_SPEEDLIMIT); if ((PsoMaskMaxFreq16 & INVALID_CONFIG_FLAG) == 0) { PsoMaskMaxFreq = (UINT8) PsoMaskMaxFreq16; if (PsoMaskMaxFreq != 0) { SpeedArray = NBPtr->PsPtr->SpeedLimit; } } else { SpeedArray = NULL; } if (SpeedArray != NULL) { if (NBPtr->SharedPtr->VoltageMap != VDDIO_DETERMINED) { IDS_HDT_CONSOLE (MEM_FLOW, "\nCheck speed supported for each VDDIO for Node%d DCT%d: ", NBPtr->Node, NBPtr->Dct); for (CurrentVoltage = VOLT1_5_ENCODED_VAL; CurrentVoltage <= VOLT1_25_ENCODED_VAL; CurrentVoltage ++) { if (NBPtr->SharedPtr->VoltageMap & (1 << CurrentVoltage)) { IDS_HDT_CONSOLE (MEM_FLOW, "%s -> %dMHz ", (CurrentVoltage == VOLT1_5_ENCODED_VAL) ? "1.5V" : ((CurrentVoltage == VOLT1_35_ENCODED_VAL) ? "1.35V" : "1.25V"), SpeedArray[CurrentVoltage]); if (NBPtr->DCTPtr->Timings.TargetSpeed > SpeedArray[CurrentVoltage]) { MaxFreqSupported = SpeedArray[CurrentVoltage]; } else { MaxFreqSupported = NBPtr->DCTPtr->Timings.TargetSpeed; } if (NBPtr->MaxFreqVDDIO[CurrentVoltage] > MaxFreqSupported) { NBPtr->MaxFreqVDDIO[CurrentVoltage] = MaxFreqSupported; } } else { NBPtr->MaxFreqVDDIO[CurrentVoltage] = 0; } } IDS_HDT_CONSOLE (MEM_FLOW, "\n"); } ASSERT (DDR3Voltage <= VOLT1_25_ENCODED_VAL); MaxFreqSupported = SpeedArray[DDR3Voltage]; } if (MaxFreqSupported == UNSUPPORTED_DDR_FREQUENCY) { // No entry in the table for current dimm population is found IDS_HDT_CONSOLE (MEM_FLOW, "\nDCT %d: No entry is found in the Max Frequency table\n", NBPtr->Dct); DisDct = TRUE; } else if (MaxFreqSupported != 0) { if (NBPtr->DCTPtr->Timings.TargetSpeed > MaxFreqSupported) { NBPtr->DCTPtr->Timings.TargetSpeed = MaxFreqSupported; } } else if (NBPtr->SharedPtr->VoltageMap == VDDIO_DETERMINED) { // Dimm population is not supported at current voltage // Also if there is no performance optimization, disable the DCT DisDct = TRUE; } if (DisDct) { NBPtr->DCTPtr->Timings.DimmExclude |= NBPtr->DCTPtr->Timings.DctDimmValid; PutEventLog (AGESA_ERROR, MEM_ERROR_UNSUPPORTED_DIMM_CONFIG, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_ERROR, NBPtr->MCTPtr); // Change target speed to highest value so it won't affect other channels when leveling frequency across the node. NBPtr->DCTPtr->Timings.TargetSpeed = UNSUPPORTED_DDR_FREQUENCY; } return TRUE; }
/** * Performs core leveling for the system. * * This function implements the AMD_CPU_EARLY_PARAMS.CoreLevelingMode parameter. * The possible modes are: * -0 CORE_LEVEL_LOWEST Level to lowest common denominator * -1 CORE_LEVEL_TWO Level to 2 cores * -2 CORE_LEVEL_POWER_OF_TWO Level to 1,2,4 or 8 * -3 CORE_LEVEL_NONE Do no leveling * -4 CORE_LEVEL_COMPUTE_UNIT Level cores to one core per compute unit * * @param[in] EntryPoint Timepoint designator. * @param[in] PlatformConfig Contains the leveling mode parameter * @param[in] StdHeader Config handle for library and services * * @return The most severe status of any family specific service. * */ AGESA_STATUS CoreLevelingAtEarly ( IN UINT64 EntryPoint, IN PLATFORM_CONFIGURATION *PlatformConfig, IN AMD_CONFIG_PARAMS *StdHeader ) { UINT32 CoreNumPerComputeUnit; UINT32 EnabledComputeUnit; UINT32 SocketAndModule; UINT32 LowCore; UINT32 HighCore; UINT32 LeveledCores; UINT32 RequestedCores; UINT32 TotalEnabledCoresOnNode; BOOLEAN RegUpdated; CORE_LEVELING_TYPE CoreLevelMode; CPU_CORE_LEVELING_FAMILY_SERVICES *FamilySpecificServices; WARM_RESET_REQUEST Request; IDS_HDT_CONSOLE (CPU_TRACE, "CoreLevelingAtEarly\n CoreLevelMode: %d\n", PlatformConfig->CoreLevelingMode); LeveledCores = 0; SocketAndModule = 0; ASSERT (PlatformConfig->CoreLevelingMode < CoreLevelModeMax); // Get OEM IO core level mode CoreLevelMode = (CORE_LEVELING_TYPE) PlatformConfig->CoreLevelingMode; // Collect cpu core info GetGivenModuleCoreRange (0, 0, &LowCore, &HighCore, StdHeader); // Get the highest and lowest core count in all nodes TotalEnabledCoresOnNode = HighCore - LowCore + 1; switch (GetComputeUnitMapping (StdHeader)) { case AllCoresMapping: // All cores are in their own compute unit. CoreNumPerComputeUnit = 1; EnabledComputeUnit = TotalEnabledCoresOnNode; break; case EvenCoresMapping: // Cores are paired in compute units. CoreNumPerComputeUnit = 2; EnabledComputeUnit = (TotalEnabledCoresOnNode / 2); break; case TripleCoresMapping: // Three cores are grouped in compute units. CoreNumPerComputeUnit = 3; EnabledComputeUnit = (TotalEnabledCoresOnNode / 3); break; case QuadCoresMapping: // Four cores are grouped in compute units. CoreNumPerComputeUnit = 4; EnabledComputeUnit = (TotalEnabledCoresOnNode / 4); break; default: CoreNumPerComputeUnit = 1; EnabledComputeUnit = TotalEnabledCoresOnNode; ASSERT (FALSE); } IDS_HDT_CONSOLE (CPU_TRACE, " TotalEnabledCoresOnNode %d EnabledComputeUnit %d\n", \ TotalEnabledCoresOnNode, EnabledComputeUnit); // Get LeveledCores switch (CoreLevelMode) { case CORE_LEVEL_LOWEST: return (AGESA_SUCCESS); break; case CORE_LEVEL_TWO: LeveledCores = 2; LeveledCores = (LeveledCores <= TotalEnabledCoresOnNode) ? LeveledCores : TotalEnabledCoresOnNode; if (LeveledCores != 2) { PutEventLog ( AGESA_WARNING, CPU_WARNING_ADJUSTED_LEVELING_MODE, 2, LeveledCores, 0, 0, StdHeader ); } break; case CORE_LEVEL_POWER_OF_TWO: // Level to power of 2 (1, 2, 4, 8...) LeveledCores = 1; while (TotalEnabledCoresOnNode >= (LeveledCores * 2)) { LeveledCores = LeveledCores * 2; } break; case CORE_LEVEL_COMPUTE_UNIT: case CORE_LEVEL_COMPUTE_UNIT_TWO: case CORE_LEVEL_COMPUTE_UNIT_THREE: // Level cores to 1~3 core(s) per compute unit, with additional reduction to level // all processors to match the processor with the minimum number of cores. if (CoreNumPerComputeUnit == 1) { // If there is one core per compute unit, this is the same as CORE_LEVEL_LOWEST. return (AGESA_SUCCESS); } else { // If there are more than one core per compute unit, level to the number of compute units * cores per compute unit. LeveledCores = EnabledComputeUnit * (CoreLevelMode - CORE_LEVEL_COMPUTE_UNIT + 1); } break; case CORE_LEVEL_ONE: LeveledCores = 1; break; case CORE_LEVEL_THREE: case CORE_LEVEL_FOUR: case CORE_LEVEL_FIVE: case CORE_LEVEL_SIX: case CORE_LEVEL_SEVEN: case CORE_LEVEL_EIGHT: case CORE_LEVEL_NINE: case CORE_LEVEL_TEN: case CORE_LEVEL_ELEVEN: case CORE_LEVEL_TWELVE: case CORE_LEVEL_THIRTEEN: case CORE_LEVEL_FOURTEEN: case CORE_LEVEL_FIFTEEN: // Processors with compute units disable all cores in an entire compute unit at a time // For example, on a processor with two cores per compute unit, the effective // explicit levels are CORE_LEVEL_ONE, CORE_LEVEL_TWO, CORE_LEVEL_FOUR, CORE_LEVEL_SIX, and // CORE_LEVEL_EIGHT. RequestedCores = CoreLevelMode - CORE_LEVEL_THREE + 3; LeveledCores = RequestedCores; LeveledCores = (LeveledCores / CoreNumPerComputeUnit) * CoreNumPerComputeUnit; LeveledCores = (LeveledCores <= TotalEnabledCoresOnNode) ? LeveledCores : TotalEnabledCoresOnNode; if (LeveledCores != 1) { LeveledCores = (LeveledCores / CoreNumPerComputeUnit) * CoreNumPerComputeUnit; } if (LeveledCores != RequestedCores) { PutEventLog ( AGESA_WARNING, CPU_WARNING_ADJUSTED_LEVELING_MODE, RequestedCores, LeveledCores, 0, 0, StdHeader ); } break; default: ASSERT (FALSE); } // Set down core register GetFeatureServicesOfSocket (&CoreLevelingFamilyServiceTable, 0, (CONST VOID **)&FamilySpecificServices, StdHeader); if (FamilySpecificServices != NULL) { IDS_HDT_CONSOLE (CPU_TRACE, " SetDownCoreRegister: LeveledCores %d CoreLevelMode %d\n", LeveledCores, CoreLevelMode); RegUpdated = FamilySpecificServices->SetDownCoreRegister (FamilySpecificServices, &SocketAndModule, &SocketAndModule, &LeveledCores, CoreLevelMode, StdHeader); // If the down core register is updated, trigger a warm reset. if (RegUpdated) { GetWarmResetFlag (StdHeader, &Request); Request.RequestBit = TRUE; Request.StateBits = Request.PostStage - 1; IDS_HDT_CONSOLE (CPU_TRACE, " Request a warm reset.\n"); SetWarmResetFlag (StdHeader, &Request); } } return (AGESA_SUCCESS); }
BOOLEAN MemConstructNBBlockNi ( IN OUT MEM_NB_BLOCK *NBPtr, IN OUT MEM_DATA_STRUCT *MemPtr, IN MEM_FEAT_BLOCK_NB *FeatPtr, IN MEM_SHARED_DATA *SharedPtr, IN UINT8 NodeID ) { UINT8 Dct; UINT8 Channel; UINT8 SpdSocketIndex; UINT8 SpdChannelIndex; DIE_STRUCT *MCTPtr; ALLOCATE_HEAP_PARAMS AllocHeapParams; // // Determine if this is the expected NB Type // GetLogicalIdOfSocket (MemPtr->DiesPerSystem[NodeID].SocketId, &(MemPtr->DiesPerSystem[NodeID].LogicalCpuid), &(MemPtr->StdHeader)); if (!MemNIsIdSupportedDA (NBPtr, &(MemPtr->DiesPerSystem[NodeID].LogicalCpuid))) { return FALSE; } NBPtr->MemPtr = MemPtr; NBPtr->RefPtr = MemPtr->ParameterListPtr; NBPtr->SharedPtr = SharedPtr; MCTPtr = &(MemPtr->DiesPerSystem[NodeID]); NBPtr->MCTPtr = MCTPtr; NBPtr->MCTPtr->NodeId = NodeID; NBPtr->PciAddr.AddressValue = MCTPtr->PciAddr.AddressValue; NBPtr->VarMtrrHiMsk = GetVarMtrrHiMsk (&(MemPtr->DiesPerSystem[NodeID].LogicalCpuid), &(MemPtr->StdHeader)); // // Allocate buffer for DCT_STRUCTs and CH_DEF_STRUCTs // AllocHeapParams.RequestedBufferSize = MAX_DCTS_PER_NODE_DA * ( sizeof (DCT_STRUCT) + ( MAX_CHANNELS_PER_DCT_DA * (sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK)) ) ); AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_DCT_STRUCT_HANDLE, NodeID, 0, 0); AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, &MemPtr->StdHeader) != AGESA_SUCCESS) { PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_DCT_STRUCT_AND_CH_DEF_STRUCTs, NBPtr->Node, 0, 0, 0, &MemPtr->StdHeader); SetMemError (AGESA_FATAL, MCTPtr); return FALSE; } MCTPtr->DctCount = MAX_DCTS_PER_NODE_DA; MCTPtr->DctData = (DCT_STRUCT *) AllocHeapParams.BufferPtr; AllocHeapParams.BufferPtr += MAX_DCTS_PER_NODE_DA * sizeof (DCT_STRUCT); for (Dct = 0; Dct < MAX_DCTS_PER_NODE_DA; Dct++) { MCTPtr->DctData[Dct].Dct = Dct; MCTPtr->DctData[Dct].ChannelCount = MAX_CHANNELS_PER_DCT_DA; MCTPtr->DctData[Dct].ChData = (CH_DEF_STRUCT *) AllocHeapParams.BufferPtr; MCTPtr->DctData[Dct].ChData[0].Dct = Dct; AllocHeapParams.BufferPtr += MAX_CHANNELS_PER_DCT_DA * sizeof (CH_DEF_STRUCT); } NBPtr->PSBlock = (MEM_PS_BLOCK *) AllocHeapParams.BufferPtr; // // Initialize Socket List // for (Dct = 0; Dct < MAX_DCTS_PER_NODE_DA; Dct++) { MemPtr->SocketList[MCTPtr->SocketId].ChannelPtr[Dct] = &(MCTPtr->DctData[Dct].ChData[0]); MemPtr->SocketList[MCTPtr->SocketId].TimingsPtr[Dct] = &(MCTPtr->DctData[Dct].Timings); MCTPtr->DctData[Dct].ChData[0].ChannelID = Dct; } MemNInitNBDataNi (NBPtr); FeatPtr->InitCPG (NBPtr); NBPtr->FeatPtr = FeatPtr; FeatPtr->InitHwRxEn (NBPtr); // // Calculate SPD Offsets per channel and assign pointers to the data. At this point, we calculate the Node-Dct-Channel // centric offsets and store the pointers to the first DIMM of each channel in the Channel Definition struct for that // channel. This pointer is then used later to calculate the offsets to be used for each logical dimm once the // dimm types(QR or not) are known. This is done in the Technology block constructor. // // Calculate the SpdSocketIndex separately from the SpdChannelIndex. // This will facilitate modifications due to some processors that might // map the DCT-CHANNEL differently. // SpdSocketIndex = GetSpdSocketIndex (NBPtr->RefPtr->PlatformMemoryConfiguration, NBPtr->MCTPtr->SocketId, &MemPtr->StdHeader); // // Traverse the Dct/Channel structures // for (Dct = 0; Dct < MAX_DCTS_PER_NODE_DA; Dct++) { for (Channel = 0; Channel < MAX_CHANNELS_PER_DCT_DA; Channel++) { // // Calculate the number of Dimms on this channel using the // die/dct/channel to Socket/channel conversion. // SpdChannelIndex = GetSpdChannelIndex (NBPtr->RefPtr->PlatformMemoryConfiguration, NBPtr->MCTPtr->SocketId, MemNGetSocketRelativeChannelNb (NBPtr, Dct, Channel), &MemPtr->StdHeader); NBPtr->MCTPtr->DctData[Dct].ChData[Channel].SpdPtr = &(MemPtr->SpdDataStructure[SpdSocketIndex + SpdChannelIndex]); } } MemNSwitchDCTNb (NBPtr, 0); return TRUE; }
BOOLEAN MemTTrainMaxLatency ( IN OUT MEM_TECH_BLOCK *TechPtr ) { UINT32 TestAddrRJ16; UINT8 Dct; UINT8 ChipSel; UINT8 *PatternBufPtr; UINT8 *TestBufferPtr; UINT8 CurrentNbPstate; UINT16 CalcMaxLatDly; UINT16 MaxLatDly; UINT16 MaxLatLimit; UINT16 Margin; UINT16 CurTest; UINT16 _CL_; UINT8 TimesFail; UINT8 TimesRetrain; UINT16 i; MEM_DATA_STRUCT *MemPtr; DIE_STRUCT *MCTPtr; MEM_NB_BLOCK *NBPtr; NBPtr = TechPtr->NBPtr; MCTPtr = NBPtr->MCTPtr; MemPtr = NBPtr->MemPtr; TechPtr->TrainingType = TRN_MAX_READ_LATENCY; TimesRetrain = DEFAULT_TRAINING_TIMES; IDS_OPTION_HOOK (IDS_MEM_RETRAIN_TIMES, &TimesRetrain, &MemPtr->StdHeader); IDS_HDT_CONSOLE (MEM_STATUS, "\nStart MaxRdLat training\n"); // Set environment settings before training AGESA_TESTPOINT (TpProcMemMaxRdLatencyTraining, &(MemPtr->StdHeader)); MemTBeginTraining (TechPtr); // // Initialize the Training Pattern // if (AGESA_SUCCESS != NBPtr->TrainingPatternInit (NBPtr)) { return (BOOLEAN) (MCTPtr->ErrCode < AGESA_FATAL); } TechPtr->PatternLength = (MCTPtr->Status[Sb128bitmode]) ? 6 : 3; // // Setup hardware training engine (if applicable) // NBPtr->FamilySpecificHook[SetupHwTrainingEngine] (NBPtr, &TechPtr->TrainingType); MaxLatDly = 0; _CL_ = TechPtr->PatternLength; PatternBufPtr = TechPtr->PatternBufPtr; TestBufferPtr = TechPtr->TestBufPtr; // // Begin max latency training // for (Dct = 0; Dct < NBPtr->DctCount; Dct++) { if (MCTPtr->Status[Sb128bitmode] && (Dct != 0)) { break; } IDS_HDT_CONSOLE (MEM_STATUS, "\tDct %d\n", Dct); NBPtr->SwitchDCT (NBPtr, Dct); if (NBPtr->DCTPtr->Timings.DctMemSize != 0) { if (TechPtr->FindMaxDlyForMaxRdLat (TechPtr, &ChipSel)) { TechPtr->ChipSel = ChipSel; if (NBPtr->GetSysAddr (NBPtr, ChipSel, &TestAddrRJ16)) { IDS_HDT_CONSOLE (MEM_STATUS, "\t\tCS %d\n", ChipSel); IDS_HDT_CONSOLE (MEM_FLOW, "\t\t\tWrite to address: %04x0000\n", TestAddrRJ16); // Write the test patterns AGESA_TESTPOINT (TpProcMemMaxRdLatWritePattern, &(MemPtr->StdHeader)); NBPtr->WritePattern (NBPtr, TestAddrRJ16, PatternBufPtr, _CL_); // Sweep max latency delays NBPtr->getMaxLatParams (NBPtr, TechPtr->MaxDlyForMaxRdLat, &CalcMaxLatDly, &MaxLatLimit, &Margin); AGESA_TESTPOINT (TpProcMemMaxRdLatStartSweep, &(MemPtr->StdHeader)); TimesFail = 0; ERROR_HANDLE_RETRAIN_BEGIN (TimesFail, TimesRetrain) { MaxLatDly = CalcMaxLatDly; for (i = 0; i < (MaxLatLimit - CalcMaxLatDly); i++) { NBPtr->SetBitField (NBPtr, BFMaxLatency, MaxLatDly); IDS_HDT_CONSOLE (MEM_FLOW, "\t\t\tDly %3x", MaxLatDly); TechPtr->ResetDCTWrPtr (TechPtr, 6); AGESA_TESTPOINT (TpProcMemMaxRdLatReadPattern, &(MemPtr->StdHeader)); NBPtr->ReadPattern (NBPtr, TestBufferPtr, TestAddrRJ16, _CL_); AGESA_TESTPOINT (TpProcMemMaxRdLatTestPattern, &(MemPtr->StdHeader)); CurTest = NBPtr->CompareTestPattern (NBPtr, TestBufferPtr, PatternBufPtr, _CL_ * 64); NBPtr->FlushPattern (NBPtr, TestAddrRJ16, _CL_); if (NBPtr->IsSupported[ReverseMaxRdLatTrain]) { // Reverse training decrements MaxLatDly whenever the test passes // and uses the last passing MaxLatDly as left edge if (CurTest == 0xFFFF) { IDS_HDT_CONSOLE (MEM_FLOW, " P"); if (MaxLatDly == 0) { break; } else { MaxLatDly--; } } } else { // Traditional training increments MaxLatDly until the test passes // and uses it as left edge if (CurTest == 0xFFFF) { IDS_HDT_CONSOLE (MEM_FLOW, " P"); break; } else { MaxLatDly++; } } IDS_HDT_CONSOLE (MEM_FLOW, "\n"); } // End of delay sweep ERROR_HANDLE_RETRAIN_END ((MaxLatDly >= MaxLatLimit), TimesFail) } AGESA_TESTPOINT (TpProcMemMaxRdLatSetDelay, &(MemPtr->StdHeader)); if (MaxLatDly >= MaxLatLimit) { PutEventLog (AGESA_ERROR, MEM_ERROR_MAX_LAT_NO_WINDOW, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_ERROR, MCTPtr); NBPtr->DCTPtr->Timings.CsTrainFail |= NBPtr->DCTPtr->Timings.CsPresent; MCTPtr->ChannelTrainFail |= (UINT32)1 << Dct; if (!NBPtr->MemPtr->ErrorHandling (MCTPtr, NBPtr->Dct, EXCLUDE_ALL_CHIPSEL, &NBPtr->MemPtr->StdHeader)) { ASSERT (FALSE); return FALSE; } } else { NBPtr->FamilySpecificHook[AddlMaxRdLatTrain] (NBPtr, &TestAddrRJ16); MaxLatDly = MaxLatDly + Margin; if (NBPtr->IsSupported[ReverseMaxRdLatTrain]) { MaxLatDly++; // Add 1 to get back to the last passing value } // Set final delays CurrentNbPstate = (UINT8) MemNGetBitFieldNb (NBPtr, BFCurNbPstate); ASSERT (CurrentNbPstate <= 3); NBPtr->ChannelPtr->DctMaxRdLat [CurrentNbPstate] = MaxLatDly; NBPtr->SetBitField (NBPtr, BFMaxLatency, MaxLatDly); IDS_HDT_CONSOLE (MEM_FLOW, "\t\tFinal MaxRdLat: %03x\n", MaxLatDly); } } }
/** * Multisocket call to determine the most severe AGESA_STATUS return value after * processing the power management initialization tables. * * This function loops through all possible socket locations, collecting any * power management initialization errors that may have occurred. These errors * are transferred from the core 0s of the socket in which the errors occurred * to the BSC's heap. The BSC's heap is then searched for the most severe error * that occurred, and returns it. This function must be called by the BSC only. * * @param[in] StdHeader Config handle for library and services * * @return The most severe error code from power management init * */ AGESA_STATUS GetEarlyPmErrorsMulti ( IN AMD_CONFIG_PARAMS *StdHeader ) { UINT16 i; UINT32 BscSocket; UINT32 BscModule; UINT32 BscCoreNum; UINT32 Socket; UINT32 NumberOfSockets; AP_TASK TaskPtr; AGESA_EVENT EventLogEntry; AGESA_STATUS ReturnCode; AGESA_STATUS DummyStatus; ASSERT (IsBsp (StdHeader, &ReturnCode)); ReturnCode = AGESA_SUCCESS; EventLogEntry.EventClass = AGESA_SUCCESS; EventLogEntry.EventInfo = 0; EventLogEntry.DataParam1 = 0; EventLogEntry.DataParam2 = 0; EventLogEntry.DataParam3 = 0; EventLogEntry.DataParam4 = 0; NumberOfSockets = GetPlatformNumberOfSockets (); IdentifyCore (StdHeader, &BscSocket, &BscModule, &BscCoreNum, &DummyStatus); TaskPtr.FuncAddress.PfApTaskI = GetNextEvent; TaskPtr.DataTransfer.DataSizeInDwords = SIZE_IN_DWORDS (AGESA_EVENT); TaskPtr.DataTransfer.DataPtr = &EventLogEntry; TaskPtr.DataTransfer.DataTransferFlags = 0; TaskPtr.ExeFlags = WAIT_FOR_CORE | RETURN_PARAMS; for (Socket = 0; Socket < NumberOfSockets; Socket++) { if (Socket != BscSocket) { if (IsProcessorPresent (Socket, StdHeader)) { do { ApUtilRunCodeOnSocketCore ((UINT8)Socket, (UINT8) 0, &TaskPtr, StdHeader); if ((EventLogEntry.EventInfo & CPU_EVENT_PM_EVENT_MASK) == CPU_EVENT_PM_EVENT_CLASS) { PutEventLog ( EventLogEntry.EventClass, EventLogEntry.EventInfo, EventLogEntry.DataParam1, EventLogEntry.DataParam2, EventLogEntry.DataParam3, EventLogEntry.DataParam4, StdHeader ); } } while (EventLogEntry.EventInfo != 0); } } } for (i = 0; PeekEventLog (&EventLogEntry, i, StdHeader); i++) { if ((EventLogEntry.EventInfo & CPU_EVENT_PM_EVENT_MASK) == CPU_EVENT_PM_EVENT_CLASS) { if (EventLogEntry.EventClass > ReturnCode) { ReturnCode = EventLogEntry.EventClass; } } } return (ReturnCode); }
/** * * Find the common supported voltage on all nodes, taken into account of the * user option for performance and power saving. * * @param[in,out] *MemMainPtr - Pointer to the MEM_MAIN_DATA_BLOCK * * @return TRUE - No fatal error occurs. * @return FALSE - Fatal error occurs. */ BOOLEAN MemMLvDdr3PerformanceEnhPre ( IN OUT MEM_MAIN_DATA_BLOCK *MemMainPtr ) { UINT8 Node; BOOLEAN RetVal; DIMM_VOLTAGE VDDIO; MEM_NB_BLOCK *NBPtr; MEM_PARAMETER_STRUCT *ParameterPtr; MEM_SHARED_DATA *mmSharedPtr; PLATFORM_POWER_POLICY PowerPolicy; UINT8 *PowerPolicyPtr; NBPtr = MemMainPtr->NBPtr; mmSharedPtr = MemMainPtr->mmSharedPtr; ParameterPtr = MemMainPtr->MemPtr->ParameterListPtr; PowerPolicyPtr = FindPSOverrideEntry (NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_MEMORY_POWER_POLICY, 0, 0, 0, NULL, NULL); if (PowerPolicyPtr != NULL) { PowerPolicy = (PLATFORM_POWER_POLICY) *PowerPolicyPtr; IDS_HDT_CONSOLE (MEM_FLOW, "\nPlatform overrides memory power policy"); } else { PowerPolicy = MemMainPtr->MemPtr->PlatFormConfig->PlatformProfile.PlatformPowerPolicy; } IDS_OPTION_HOOK (IDS_MEMORY_POWER_POLICY, &PowerPolicy, &NBPtr->MemPtr->StdHeader); IDS_HDT_CONSOLE (MEM_FLOW, (PowerPolicy == Performance) ? "\nMaximize Performance\n" : "\nMaximize Battery Life\n"); if (ParameterPtr->DDR3Voltage != VOLT_INITIAL) { mmSharedPtr->VoltageMap = VDDIO_DETERMINED; PutEventLog (AGESA_WARNING, MEM_WARNING_INITIAL_DDR3VOLT_NONZERO, 0, 0, 0, 0, &(NBPtr[BSP_DIE].MemPtr->StdHeader)); SetMemError (AGESA_WARNING, NBPtr[BSP_DIE].MCTPtr); IDS_HDT_CONSOLE (MEM_FLOW, "Warning: Initial Value for VDDIO has been changed.\n"); RetVal = TRUE; } else { RetVal = MemMLvDdr3 (MemMainPtr); VDDIO = ParameterPtr->DDR3Voltage; if (NBPtr->IsSupported[PerformanceOnly] || ((PowerPolicy == Performance) && (mmSharedPtr->VoltageMap != 0))) { // When there is no commonly supported voltage, do not optimize performance // For cases where we can maximize performance, do the following // When VDDIO is enforced, DDR3Voltage will be overriden by specific VDDIO // So cases with DDR3Voltage left to be VOLT_UNSUPPORTED will be open to maximizing performance. ParameterPtr->DDR3Voltage = VOLT_UNSUPPORTED; } IDS_OPTION_HOOK (IDS_ENFORCE_VDDIO, &(ParameterPtr->DDR3Voltage), &NBPtr->MemPtr->StdHeader); if (ParameterPtr->DDR3Voltage != VOLT_UNSUPPORTED) { // When Voltage is already determined, do not have further process to choose maximum frequency to optimize performance mmSharedPtr->VoltageMap = VDDIO_DETERMINED; IDS_HDT_CONSOLE (MEM_FLOW, "VDDIO is determined. No further optimization will be done.\n"); } else { for (Node = 0; Node < MemMainPtr->DieCount; Node++) { NBPtr[Node].MaxFreqVDDIO[VOLT1_5_ENCODED_VAL] = UNSUPPORTED_DDR_FREQUENCY; NBPtr[Node].MaxFreqVDDIO[VOLT1_35_ENCODED_VAL] = UNSUPPORTED_DDR_FREQUENCY; NBPtr[Node].MaxFreqVDDIO[VOLT1_25_ENCODED_VAL] = UNSUPPORTED_DDR_FREQUENCY; } // Reprogram the leveling result as temporal candidate ParameterPtr->DDR3Voltage = VDDIO; } } ASSERT (ParameterPtr->DDR3Voltage != VOLT_UNSUPPORTED); return RetVal; }
/** * * Check and disable Chip selects that fail training on all nodes. * * @param[in,out] *MemMainPtr - Pointer to the MEM_MAIN_DATA_BLOCK * * @return TRUE - No fatal error occurs. * @return FALSE - Fatal error occurs. */ BOOLEAN MemMRASExcludeDIMM ( IN OUT MEM_MAIN_DATA_BLOCK *MemMainPtr ) { UINT8 Node; BOOLEAN IsEnabled; BOOLEAN RetVal; BOOLEAN IsChannelIntlvEnabled[MAX_NODES_SUPPORTED]; UINT8 FirstEnabledNode; UINT32 BottomIO; MEM_NB_BLOCK *NBPtr; MEM_PARAMETER_STRUCT *RefPtr; S_UINT64 SMsr; FirstEnabledNode = 0; IsEnabled = FALSE; RetVal = TRUE; NBPtr = MemMainPtr->NBPtr; RefPtr = NBPtr[BSP_DIE].RefPtr; for (Node = 0; Node < MemMainPtr->DieCount; Node++) { if (NBPtr[Node].FeatPtr->ExcludeDIMM (&NBPtr[Node])) { if (!IsEnabled) { // Record the first node that has exclude dimm enabled FirstEnabledNode = Node; IsEnabled = TRUE; } } } if (IsEnabled) { // Check if all nodes have all dimms excluded. If yes, fatal exit NBPtr[BSP_DIE].SharedPtr->CurrentNodeSysBase = 0; BottomIO = (NBPtr[BSP_DIE].RefPtr->BottomIo & 0xF8) << 8; // If the first node that has excluded dimms does not have a system base smaller // than bottomIO, then we don't need to reset the GStatus, as we don't need to // remap memory hole. if (NBPtr[FirstEnabledNode].MCTPtr->NodeSysBase < BottomIO) { RefPtr->GStatus[GsbHWHole] = FALSE; RefPtr->GStatus[GsbSpIntRemapHole] = FALSE; RefPtr->GStatus[GsbSoftHole] = FALSE; RefPtr->HoleBase = 0; RefPtr->SysLimit = 0; } // If Node Interleaving has been executed before the remapping then we need to // start from the first node. // There may be a few senarios: // 1. Node interleaving is not enabled before the remap, and still cannot be enabled after // remap // 2. Node interleaving cannot be enabled before the remap, but it can be enabled after // remap // 3. Node interleaving is enabled before the remap, but it cannot be enabled after the remap if (NBPtr->SharedPtr->NodeIntlv.IsValid) { FirstEnabledNode = 0; } for (Node = 0; Node < MemMainPtr->DieCount; Node++) { IsChannelIntlvEnabled [Node] = FALSE; // Check if node interleaving has been enabled on this node // if yes, disable it. if (NBPtr[Node].GetBitField (&NBPtr[Node], BFDramIntlvEn) != 0) { NBPtr[Node].SetBitField (&NBPtr[Node], BFDramIntlvEn, 0); NBPtr[Node].SetBitField (&NBPtr[Node], BFDramIntlvSel, 0); } if (Node >= FirstEnabledNode) { // Remap memory on nodes with node number larger than the first node that has excluded dimms. // If channel interleaving has already been enabled, need to disable it before remapping memory. if (NBPtr[Node].GetBitField (&NBPtr[Node], BFDctSelIntLvEn) != 0) { NBPtr[Node].SetBitField (&NBPtr[Node], BFDctSelIntLvEn, 0); IsChannelIntlvEnabled [Node] = TRUE; } NBPtr[Node].MCTPtr->Status[SbHWHole] = FALSE; NBPtr[Node].MCTPtr->Status[SbSWNodeHole] = FALSE; NBPtr[Node].SetBitField (&NBPtr[Node], BFDctSelBaseAddr, 0); NBPtr[Node].SetBitField (&NBPtr[Node], BFDctSelHiRngEn, 0); NBPtr[Node].SetBitField (&NBPtr[Node], BFDctSelHi, 0); NBPtr[Node].SetBitField (&NBPtr[Node], BFDctSelBaseOffset, 0); NBPtr[Node].SetBitField (&NBPtr[Node], BFDramHoleAddrReg, 0); NBPtr[Node].HtMemMapInit (&NBPtr[Node]); } else if (NBPtr[Node].MCTPtr->NodeMemSize != 0) { // No change is needed in the memory map of this node. // Need to adjust the current system base for other nodes processed later. NBPtr[Node].SharedPtr->CurrentNodeSysBase = (NBPtr[Node].MCTPtr->NodeSysLimit + 1) & 0xFFFFFFF0; RefPtr->SysLimit = NBPtr[Node].MCTPtr->NodeSysLimit; // If the current node does not have the memory hole, then set DramHoleAddrReg to be 0. // If memory hoisting is enabled later by other node, SyncAddrMapToAllNodes will set the base // and DramMemHoistValid. // Otherwise, do not change the register value, as we need to keep DramHoleOffset unchanged, as well // DramHoleValid. if (!NBPtr[Node].MCTPtr->Status[SbHWHole]) { NBPtr[Node].SetBitField (&NBPtr[Node], BFDramHoleAddrReg, 0); } } } for (Node = 0; Node < MemMainPtr->DieCount; Node++) { NBPtr[Node].SyncAddrMapToAllNodes (&NBPtr[Node]); } LibAmdMsrRead (TOP_MEM, (UINT64 *)&SMsr, &NBPtr->MemPtr->StdHeader); // Only when TOM is set can CpuMemTyping be re-run if ((SMsr.hi == 0) && (SMsr.lo == 0)) { if (RefPtr->SysLimit != 0) { NBPtr[BSP_DIE].CpuMemTyping (&NBPtr[BSP_DIE]); } } // Re-run node interleaving if it has been exeucuted before the remap if (NBPtr->SharedPtr->NodeIntlv.IsValid) { MemFeatMain.InterleaveNodes (MemMainPtr); } // Re-enable channel interleaving if it was enabled before remapping memory for (Node = 0; Node < MemMainPtr->DieCount; Node++) { if (IsChannelIntlvEnabled [Node]) { NBPtr[Node].FeatPtr->InterleaveChannels (&NBPtr[Node]); } } } // if all dimms on all nodes are excluded, do fatal exit if (RefPtr->SysLimit == 0) { PutEventLog (AGESA_FATAL, MEM_ERROR_NO_DIMM_FOUND_ON_SYSTEM, 0, 0, 0, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_FATAL, NBPtr[BSP_DIE].MCTPtr); ASSERT (FALSE); } for (Node = 0; Node < MemMainPtr->DieCount; Node ++) { RetVal &= (BOOLEAN) (NBPtr[Node].MCTPtr->ErrCode < AGESA_FATAL); } return RetVal; }
/** * * This function executes receiver enable training for a specific die * * @param[in,out] *TechPtr - Pointer to the MEM_TECH_BLOCK * @param[in] Pass - Pass of the receiver training * * @return TRUE - No fatal error occurs. * @return FALSE - Fatal error occurs. */ BOOLEAN STATIC MemTDqsTrainRcvrEnSw ( IN OUT MEM_TECH_BLOCK *TechPtr, IN UINT8 Pass ) { _16BYTE_ALIGN UINT8 PatternBuffer[3 * 64]; UINT8 TestBuffer[120]; UINT8 *PatternBufPtr[4]; UINT8 *TempPtr; UINT32 TestAddrRJ16[4]; UINT32 TempAddrRJ16; UINT32 RealAddr; UINT16 CurTest[4]; UINT8 Dct; UINT8 Receiver; UINT8 i; UINT8 TimesFail; UINT8 TimesRetrain; UINT16 RcvrEnDly; UINT16 MaxRcvrEnDly; UINT16 RcvrEnDlyLimit; UINT16 MaxDelayCha; BOOLEAN IsDualRank; BOOLEAN S0En; BOOLEAN S1En; UINT8 MaxFilterDly; MEM_DATA_STRUCT *MemPtr; DIE_STRUCT *MCTPtr; DCT_STRUCT *DCTPtr; MEM_NB_BLOCK *NBPtr; NBPtr = TechPtr->NBPtr; MemPtr = NBPtr->MemPtr; MCTPtr = NBPtr->MCTPtr; TempAddrRJ16 = 0; TempPtr = NULL; MaxDelayCha = 0; MaxFilterDly = TechPtr->MaxFilterDly; RcvrEnDlyLimit = NBPtr->RcvrEnDlyLimit; TimesRetrain = DEFAULT_TRAINING_TIMES; IDS_OPTION_HOOK (IDS_MEM_RETRAIN_TIMES, &TimesRetrain, &MemPtr->StdHeader); IDS_HDT_CONSOLE ("!\nStart SW RxEn training\n"); // Set environment settings before training MemTBeginTraining (TechPtr); PatternBufPtr[0] = PatternBufPtr[2] = PatternBuffer; MemUFillTrainPattern (TestPattern0, PatternBufPtr[0], 64); PatternBufPtr[1] = PatternBufPtr[3] = PatternBufPtr[0] + 128; MemUFillTrainPattern (TestPattern1, PatternBufPtr[1], 64); // Begin receiver enable training AGESA_TESTPOINT (TpProcMemReceiverEnableTraining, &(MemPtr->StdHeader)); MaxRcvrEnDly = 0; for (Dct = 0; Dct < NBPtr->DctCount; Dct++) { IDS_HDT_CONSOLE ("!\tDct %d\n", Dct); NBPtr->SwitchDCT (NBPtr, Dct); DCTPtr = NBPtr->DCTPtr; // Set training bit NBPtr->SetBitField (NBPtr, BFDqsRcvEnTrain, 1); // Relax Max Latency before training NBPtr->SetMaxLatency (NBPtr, 0xFFFF); if (Pass == FIRST_PASS) { TechPtr->InitDQSPos4RcvrEn (TechPtr); } // there are four receiver pairs, loosely associated with chipselects. Receiver = DCTPtr->Timings.CsEnabled ? 0 : 8; for (; Receiver < 8; Receiver += 2) { TechPtr->DqsRcvEnSaved = 0; RcvrEnDly = RcvrEnDlyLimit; S0En = NBPtr->GetSysAddr (NBPtr, Receiver, &TestAddrRJ16[0]); S1En = NBPtr->GetSysAddr (NBPtr, Receiver + 1, &TestAddrRJ16[2]); if (S0En) { TestAddrRJ16[1] = TestAddrRJ16[0] + BIGPAGE_X8_RJ16; } if (S1En) { TestAddrRJ16[3] = TestAddrRJ16[2] + BIGPAGE_X8_RJ16; } if (S0En && S1En) { IsDualRank = TRUE; } else { IsDualRank = FALSE; } if (S0En || S1En) { IDS_HDT_CONSOLE ("!\t\tCS %d\n", Receiver); // Write the test patterns AGESA_TESTPOINT (TpProcMemRcvrWritePattern, &(MemPtr->StdHeader)); IDS_HDT_CONSOLE ("\t\t\tWrite to addresses: "); for (i = (S0En ? 0 : 2); i < (S1En ? 4 : 2); i++) { RealAddr = MemUSetUpperFSbase (TestAddrRJ16[i], MemPtr); MemUWriteCachelines (RealAddr, PatternBufPtr[i], 1); IDS_HDT_CONSOLE (" %04lx0000 ", TestAddrRJ16[i]); } IDS_HDT_CONSOLE ("\n"); // Initialize RcvrEnDly value and other DCT stored values // MCTPtr->DqsRcvEnPass = Pass ? 0xFF : 0; // Sweep receiver enable delays AGESA_TESTPOINT (TpProcMemRcvrStartSweep, &(MemPtr->StdHeader)); TimesFail = 0; ERROR_HANDLE_RETRAIN_BEGIN (TimesFail, TimesRetrain) { for (RcvrEnDly = 0; RcvrEnDly < RcvrEnDlyLimit; RcvrEnDly++) { AGESA_TESTPOINT (TpProcMemRcvrSetDelay, &(MemPtr->StdHeader)); TechPtr->SetRcvrEnDly (TechPtr, Receiver, RcvrEnDly); IDS_HDT_CONSOLE ("\t\t\tDly %3x", RcvrEnDly); // Read and compare the first beat of data for (i = (S0En ? 0 : 2); i < (S1En ? 4 : 2); i++) { AGESA_TESTPOINT (TpProcMemRcvrReadPattern, &(MemPtr->StdHeader)); RealAddr = MemUSetUpperFSbase (TestAddrRJ16[i], MemPtr); MemUReadCachelines (TestBuffer, RealAddr, 1); AGESA_TESTPOINT (TpProcMemRcvrTestPattern, &(MemPtr->StdHeader)); CurTest[i] = TechPtr->Compare1ClPattern (TechPtr, TestBuffer, PatternBufPtr[i]); // Due to speculative execution during MemUReadCachelines, we must // flush one more cache line than we read. MemUProcIOClFlush (TestAddrRJ16[i], 2, MemPtr); TechPtr->ResetDCTWrPtr (TechPtr, Receiver); // // Swap the test pointers such that even and odd steps alternate. // if ((i % 2) == 0) { TempPtr = PatternBufPtr[i]; PatternBufPtr[i] = PatternBufPtr[i + 1]; TempAddrRJ16 = TestAddrRJ16[i]; TestAddrRJ16[i] = TestAddrRJ16[i + 1]; } else { PatternBufPtr[i] = TempPtr; TestAddrRJ16[i] = TempAddrRJ16; } } if (TechPtr->SaveRcvrEnDly (TechPtr, Receiver, RcvrEnDly, S0En ? (CurTest[0] & CurTest[1]) : 0xFFFF, S1En ? (CurTest[2] & CurTest[3]) : 0xFFFF)) { // if all bytelanes pass if (MaxRcvrEnDly < (RcvrEnDly - MaxFilterDly)) { MaxRcvrEnDly = RcvrEnDly - MaxFilterDly; } break; } } // End of delay sweep ERROR_HANDLE_RETRAIN_END ((RcvrEnDly > (RcvrEnDlyLimit - 1)), TimesFail) } if (RcvrEnDly == RcvrEnDlyLimit) { // no passing window PutEventLog (AGESA_ERROR, MEM_ERROR_RCVR_EN_NO_PASSING_WINDOW_EQUAL_LIMIT, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_ERROR, MCTPtr); } if (RcvrEnDly > (RcvrEnDlyLimit - 1)) { // passing window too narrow, too far delayed PutEventLog (AGESA_ERROR, MEM_ERROR_RCVR_EN_VALUE_TOO_LARGE_LIMIT_LESS_ONE, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_ERROR, MCTPtr); DCTPtr->Timings.CsTrainFail |= DCTPtr->Timings.CsPresent & (UINT16) (3 << Receiver); MCTPtr->ChannelTrainFail |= (UINT32)1 << Dct; if (!NBPtr->MemPtr->ErrorHandling (MCTPtr, NBPtr->Dct, DCTPtr->Timings.CsTrainFail, &NBPtr->MemPtr->StdHeader)) { return FALSE; } } } TechPtr->LoadRcvrEnDly (TechPtr, Receiver); // set final delays } // End while Receiver < 8 // Clear training bit when done NBPtr->SetBitField (NBPtr, BFDqsRcvEnTrain, 0); // Set Max Latency for both channels MaxRcvrEnDly += 0x20; // @attention - IDS_HDT_CONSOLE ("\t\tMaxRcvrEnDly: %03x\n", MaxRcvrEnDly); if (MCTPtr->GangedMode) { if (Dct == 0) { MaxDelayCha = MaxRcvrEnDly; } else if (MaxRcvrEnDly > MaxDelayCha) { NBPtr->SwitchDCT (NBPtr, 0); NBPtr->SetMaxLatency (NBPtr, MaxRcvrEnDly); } } else { NBPtr->SetMaxLatency (NBPtr, MaxRcvrEnDly); } TechPtr->ResetDCTWrPtr (TechPtr, 6); }
/** * * * This is the main function to perform parallel training on all nodes. * This is the routine which will run on the remote AP. * * @param[in,out] *EnvPtr - Pointer to the Training Environment Data * @param[in,out] *StdHeader - Pointer to the Standard Header of the AP * * @return TRUE - This feature is enabled. * @return FALSE - This feature is not enabled. */ BOOLEAN MemFParallelTraining ( IN OUT REMOTE_TRAINING_ENV *EnvPtr, IN OUT AMD_CONFIG_PARAMS *StdHeader ) { MEM_PARAMETER_STRUCT ParameterList; MEM_NB_BLOCK NB; MEM_TECH_BLOCK TB; ALLOCATE_HEAP_PARAMS AllocHeapParams; MEM_DATA_STRUCT *MemPtr; DIE_STRUCT *MCTPtr; UINT8 p; UINT8 i; UINT8 Dct; UINT8 Channel; UINT8 *BufferPtr; UINT8 DctCount; UINT8 ChannelCount; UINT8 RowCount; UINT8 ColumnCount; UINT16 SizeOfNewBuffer; AP_DATA_TRANSFER ReturnData; // // Initialize Parameters // ReturnData.DataPtr = NULL; ReturnData.DataSizeInDwords = 0; ReturnData.DataTransferFlags = 0; ASSERT (EnvPtr != NULL); // // Replace Standard header of a AP // LibAmdMemCopy (StdHeader, &(EnvPtr->StdHeader), sizeof (AMD_CONFIG_PARAMS), &(EnvPtr->StdHeader)); // // Allocate buffer for training data // BufferPtr = (UINT8 *) (&EnvPtr->DieStruct); DctCount = EnvPtr->DieStruct.DctCount; BufferPtr += sizeof (DIE_STRUCT); ChannelCount = ((DCT_STRUCT *) BufferPtr)->ChannelCount; BufferPtr += DctCount * sizeof (DCT_STRUCT); RowCount = ((CH_DEF_STRUCT *) BufferPtr)->RowCount; ColumnCount = ((CH_DEF_STRUCT *) BufferPtr)->ColumnCount; SizeOfNewBuffer = sizeof (DIE_STRUCT) + DctCount * ( sizeof (DCT_STRUCT) + ( ChannelCount * ( sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK) + ( RowCount * ColumnCount * NUMBER_OF_DELAY_TABLES + (MAX_BYTELANES_PER_CHANNEL * MAX_CS_PER_CHANNEL * NUMBER_OF_FAILURE_MASK_TABLES) + (MAX_DIMMS_PER_CHANNEL * MAX_NUMBER_LANES) ) ) ) ); AllocHeapParams.RequestedBufferSize = SizeOfNewBuffer; AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_PAR_TRN_HANDLE, 0, 0, 0); AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) == AGESA_SUCCESS) { BufferPtr = AllocHeapParams.BufferPtr; LibAmdMemCopy ( BufferPtr, &(EnvPtr->DieStruct), sizeof (DIE_STRUCT) + DctCount * (sizeof (DCT_STRUCT) + ChannelCount * (sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK))), StdHeader ); // // Fix up pointers // MCTPtr = (DIE_STRUCT *) BufferPtr; BufferPtr += sizeof (DIE_STRUCT); MCTPtr->DctData = (DCT_STRUCT *) BufferPtr; BufferPtr += MCTPtr->DctCount * sizeof (DCT_STRUCT); for (Dct = 0; Dct < MCTPtr->DctCount; Dct++) { MCTPtr->DctData[Dct].ChData = (CH_DEF_STRUCT *) BufferPtr; BufferPtr += MCTPtr->DctData[Dct].ChannelCount * sizeof (CH_DEF_STRUCT); for (Channel = 0; Channel < MCTPtr->DctData[Dct].ChannelCount; Channel++) { MCTPtr->DctData[Dct].ChData[Channel].MCTPtr = MCTPtr; MCTPtr->DctData[Dct].ChData[Channel].DCTPtr = &MCTPtr->DctData[Dct]; } } NB.PSBlock = (MEM_PS_BLOCK *) BufferPtr; BufferPtr += DctCount * ChannelCount * sizeof (MEM_PS_BLOCK); ReturnData.DataPtr = AllocHeapParams.BufferPtr; ReturnData.DataSizeInDwords = (SizeOfNewBuffer + 3) / 4; ReturnData.DataTransferFlags = 0; // // Allocate Memory for the MEM_DATA_STRUCT we will use // AllocHeapParams.RequestedBufferSize = sizeof (MEM_DATA_STRUCT); AllocHeapParams.BufferHandle = AMD_MEM_DATA_HANDLE; AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) == AGESA_SUCCESS) { MemPtr = (MEM_DATA_STRUCT *)AllocHeapParams.BufferPtr; LibAmdMemCopy (&(MemPtr->StdHeader), &(EnvPtr->StdHeader), sizeof (AMD_CONFIG_PARAMS), StdHeader); // // Copy Parameters from environment // ParameterList.HoleBase = EnvPtr->HoleBase; ParameterList.BottomIo = EnvPtr->BottomIo; ParameterList.UmaSize = EnvPtr->UmaSize; ParameterList.SysLimit = EnvPtr->SysLimit; ParameterList.TableBasedAlterations = EnvPtr->TableBasedAlterations; ParameterList.PlatformMemoryConfiguration = EnvPtr->PlatformMemoryConfiguration; MemPtr->ParameterListPtr = &ParameterList; for (p = 0; p < MAX_PLATFORM_TYPES; p++) { MemPtr->GetPlatformCfg[p] = EnvPtr->GetPlatformCfg[p]; } MemPtr->ErrorHandling = EnvPtr->ErrorHandling; // // Create Local NBBlock and Tech Block // EnvPtr->NBBlockCtor (&NB, MCTPtr, EnvPtr->FeatPtr); NB.RefPtr = &ParameterList; NB.MemPtr = MemPtr; i = 0; while (memTechInstalled[i] != NULL) { if (memTechInstalled[i] (&TB, &NB)) { break; } i++; } NB.TechPtr = &TB; NB.TechBlockSwitch (&NB); // // Setup CPU Mem Type MSRs on the AP // NB.CpuMemTyping (&NB); IDS_HDT_CONSOLE (MEM_STATUS, "Node %d\n", NB.Node); // // Call Technology Specific Training routine // NB.TrainingFlow (&NB); // // Copy training data to ReturnData buffer // LibAmdMemCopy ( BufferPtr, MCTPtr->DctData[0].ChData[0].RcvEnDlys, ((DctCount * ChannelCount) * ( (RowCount * ColumnCount * NUMBER_OF_DELAY_TABLES) + (MAX_BYTELANES_PER_CHANNEL * MAX_CS_PER_CHANNEL * NUMBER_OF_FAILURE_MASK_TABLES) + (MAX_DIMMS_PER_CHANNEL * MAX_NUMBER_LANES) ) ), StdHeader); HeapDeallocateBuffer (AMD_MEM_DATA_HANDLE, StdHeader); // // Restore pointers // for (Dct = 0; Dct < MCTPtr->DctCount; Dct++) { for (Channel = 0; Channel < MCTPtr->DctData[Dct].ChannelCount; Channel++) { MCTPtr->DctData[Dct].ChData[Channel].MCTPtr = &EnvPtr->DieStruct; MCTPtr->DctData[Dct].ChData[Channel].DCTPtr = &EnvPtr->DieStruct.DctData[Dct]; MCTPtr->DctData[Dct].ChData[Channel].RcvEnDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RcvEnDlys; MCTPtr->DctData[Dct].ChData[Channel].WrDqsDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].WrDqsDlys; MCTPtr->DctData[Dct].ChData[Channel].RdDqsDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqsDlys; MCTPtr->DctData[Dct].ChData[Channel].RdDqsDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqsDlys; MCTPtr->DctData[Dct].ChData[Channel].WrDatDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].WrDatDlys; MCTPtr->DctData[Dct].ChData[Channel].RdDqs2dDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqs2dDlys; MCTPtr->DctData[Dct].ChData[Channel].RdDqsMinDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqsMinDlys; MCTPtr->DctData[Dct].ChData[Channel].RdDqsMaxDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqsMaxDlys; MCTPtr->DctData[Dct].ChData[Channel].WrDatMinDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].WrDatMinDlys; MCTPtr->DctData[Dct].ChData[Channel].WrDatMaxDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].WrDatMaxDlys; MCTPtr->DctData[Dct].ChData[Channel].FailingBitMask = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].FailingBitMask; } MCTPtr->DctData[Dct].ChData = EnvPtr->DieStruct.DctData[Dct].ChData; } MCTPtr->DctData = EnvPtr->DieStruct.DctData; } // // Signal to BSP that training is complete and Send Results // ASSERT (ReturnData.DataPtr != NULL); ApUtilTransmitBuffer (EnvPtr->BspSocket, EnvPtr->BspCore, &ReturnData, StdHeader); // // Clean up and exit. // HeapDeallocateBuffer (GENERATE_MEM_HANDLE (ALLOC_PAR_TRN_HANDLE, 0, 0, 0), StdHeader); } else { MCTPtr = &EnvPtr->DieStruct; PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_TRAINING_DATA, MCTPtr->NodeId, 0, 0, 0, StdHeader); SetMemError (AGESA_FATAL, MCTPtr); ASSERT(FALSE); // Could not allocate heap for buffer for parallel training data } return TRUE; }
/** * Configure engine list to support lane allocation according to configuration ID. * * PCIE port * * * 1 Check if lane from user port descriptor (PCIe_PORT_DESCRIPTOR) belongs to wrapper (PCIe_WRAPPER_CONFIG) * 2 Check if link width from user descriptor less or equal to link width of engine (PCIe_ENGINE_CONFIG) * 3 Check if link width is correct. Correct link width for PCIe port x1, x2, x4, x8, x16, correct link width for DDI x4, x8 * 4 Check if user port device number (PCIe_PORT_DESCRIPTOR) match engine port device number (PCIe_ENGINE_CONFIG) * 5 Check if lane can be muxed * * * DDI Link * * 1 Check if lane from user port descriptor (PCIe_DDI_DESCRIPTOR) belongs to wrapper (PCIe_WRAPPER_CONFIG) * 2 Check lane from (PCIe_DDI_DESCRIPTOR) match exactly phy lane (PCIe_ENGINE_CONFIG) * * * * @param[in] ComplexDescriptor Pointer to used define complex descriptor * @param[in,out] Wrapper Pointer to wrapper config descriptor * @param[in] Pcie Pointer to global PCIe configuration * @retval AGESA_SUCCESS Topology successfully mapped * @retval AGESA_ERROR Topology can not be mapped */ AGESA_STATUS PcieMapTopologyOnWrapper ( IN PCIe_COMPLEX_DESCRIPTOR *ComplexDescriptor, IN OUT PCIe_WRAPPER_CONFIG *Wrapper, IN PCIe_PLATFORM_CONFIG *Pcie ) { AGESA_STATUS AgesaStatus; AGESA_STATUS Status; PCIe_ENGINE_CONFIG *EngineList; UINT32 WrapperPhyLaneBitMap; IDS_HDT_CONSOLE (GNB_TRACE, "PcieMapTopologyOnWrapper Enter\n"); AgesaStatus = AGESA_SUCCESS; if (PcieLibIsPcieWrapper (Wrapper)) { Status = PcieEnginesToWrapper (PciePortEngine, ComplexDescriptor, Wrapper); AGESA_STATUS_UPDATE (Status, AgesaStatus); if (Status == AGESA_ERROR) { // If we can not map topology on wrapper we can not enable any engines. PutEventLog ( AGESA_ERROR, GNB_EVENT_INVALID_PCIE_TOPOLOGY_CONFIGURATION, Wrapper->WrapId, Wrapper->StartPhyLane, Wrapper->EndPhyLane, 0, GnbLibGetHeader (Pcie) ); PcieConfigDisableAllEngines (PciePortEngine, Wrapper); } } if (PcieLibIsDdiWrapper (Wrapper)) { Status = PcieEnginesToWrapper (PcieDdiEngine, ComplexDescriptor, Wrapper); AGESA_STATUS_UPDATE (Status, AgesaStatus); if (Status == AGESA_ERROR) { // If we can not map topology on wrapper we can not enable any engines. PutEventLog ( AGESA_ERROR, GNB_EVENT_INVALID_DDI_TOPOLOGY_CONFIGURATION, Wrapper->WrapId, Wrapper->StartPhyLane, Wrapper->EndPhyLane, 0, GnbLibGetHeader (Pcie) ); PcieConfigDisableAllEngines (PcieDdiEngine, Wrapper); } } // Copy engine data PcieMapInitializeEngineData (ComplexDescriptor, Wrapper, Pcie); EngineList = PcieConfigGetChildEngine (Wrapper); // Verify if we oversubscribe lanes and PHY link width WrapperPhyLaneBitMap = 0; while (EngineList != NULL) { UINT32 EnginePhyLaneBitMap; if (PcieLibIsEngineAllocated (EngineList)) { EnginePhyLaneBitMap = PcieConfigGetEnginePhyLaneBitMap (EngineList); if ((WrapperPhyLaneBitMap & EnginePhyLaneBitMap) != 0) { IDS_HDT_CONSOLE (PCIE_MISC, " ERROR! Lanes double subscribe lanes [Engine Lanes %d..%d]\n", EngineList->EngineData.StartLane, EngineList->EngineData.EndLane ); PutEventLog ( AGESA_ERROR, GNB_EVENT_INVALID_LANES_CONFIGURATION, EngineList->EngineData.StartLane, EngineList->EngineData.EndLane, 0, 0, GnbLibGetHeader (Pcie) ); PcieConfigDisableEngine (EngineList); Status = AGESA_ERROR; AGESA_STATUS_UPDATE (Status, AgesaStatus); } else { WrapperPhyLaneBitMap |= EnginePhyLaneBitMap; } } EngineList = PcieLibGetNextDescriptor (EngineList); } IDS_HDT_CONSOLE (GNB_TRACE, "PcieMapTopologyOnWrapper Exit [%d]\n", AgesaStatus); return AgesaStatus; }
/** * * A sub-function extracts WL and HW RxEn seeds from PSCFG tables * from a input table * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * @param[in] *EntryOfTables - Pointer to MEM_PSC_TABLE_BLOCK * * @return NBPtr->PsPtr->WLSeedVal * @return NBPtr->PsPtr->HWRxENSeedVal * */ BOOLEAN MemPGetTrainingSeeds ( IN OUT MEM_NB_BLOCK *NBPtr, IN MEM_PSC_TABLE_BLOCK *EntryOfTables ) { UINT8 i; UINT8 MaxDimmPerCh; UINT8 MaxDimmSlotPerCh; UINT8 NOD; UINT8 TableSize; DIMM_TYPE DimmType; CPU_LOGICAL_ID LogicalCpuid; UINT8 PackageType; UINT8 Seedloop; UINT8 CH; PSC_TBL_ENTRY **TblEntryPtr; PSCFG_SEED_ENTRY *TblPtr; CH_DEF_STRUCT *CurrentChannel; CurrentChannel = NBPtr->ChannelPtr; TblEntryPtr = NULL; TblPtr = NULL; TableSize = 0; PackageType = 0; LogicalCpuid.Family = AMD_FAMILY_UNKNOWN; MaxDimmPerCh = GetMaxDimmsPerChannel (NBPtr->RefPtr->PlatformMemoryConfiguration, NBPtr->MCTPtr->SocketId, CurrentChannel->ChannelID); MaxDimmSlotPerCh = MaxDimmPerCh - GetMaxSolderedDownDimmsPerChannel (NBPtr->RefPtr->PlatformMemoryConfiguration, NBPtr->MCTPtr->SocketId, CurrentChannel->ChannelID); CH = 1 << (CurrentChannel->ChannelID); if (CurrentChannel->RegDimmPresent != 0) { DimmType = RDIMM_TYPE; } else if (CurrentChannel->SODimmPresent != 0) { DimmType = SODIMM_TYPE; } else if (CurrentChannel->LrDimmPresent != 0) { DimmType = LRDIMM_TYPE; } else { DimmType = UDIMM_TYPE; } // Check if it is "SODIMM plus soldered-down DRAM" or "Soldered-down DRAM only" configuration, // DimmType is changed to 'SODWN_SODIMM_TYPE' if soldered-down DRAM exist if (MaxDimmSlotPerCh != MaxDimmPerCh) { // SODIMM plus soldered-down DRAM DimmType = SODWN_SODIMM_TYPE; } else if (FindPSOverrideEntry (NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_SOLDERED_DOWN_SODIMM_TYPE, NBPtr->MCTPtr->SocketId, NBPtr->ChannelPtr->ChannelID, 0, NULL, NULL) != NULL) { // Soldered-down DRAM only DimmType = SODWN_SODIMM_TYPE; MaxDimmSlotPerCh = 0; } NOD = (UINT8) (MaxDimmSlotPerCh != 0) ? (1 << (MaxDimmSlotPerCh - 1)) : _DIMM_NONE; // Get seed value of WL, then HW RxEn for (Seedloop = 0; Seedloop < 2; Seedloop++) { TblEntryPtr = (Seedloop == 0) ? EntryOfTables->TblEntryOfWLSeed : EntryOfTables->TblEntryOfHWRxENSeed; i = 0; // Obtain table pointer, table size, Logical Cpuid and PSC type according to Dimm, NB and package type. while (TblEntryPtr[i] != NULL) { if (((TblEntryPtr[i])->Header.DimmType & DimmType) != 0) { // // Determine if this is the expected NB Type // LogicalCpuid = (TblEntryPtr[i])->Header.LogicalCpuid; PackageType = (TblEntryPtr[i])->Header.PackageType; if (MemPIsIdSupported (NBPtr, LogicalCpuid, PackageType)) { TblPtr = (PSCFG_SEED_ENTRY *) ((TblEntryPtr[i])->TBLPtr); TableSize = (TblEntryPtr[i])->TableSize; break; } } i++; } // Check whether no table entry is found. if (TblEntryPtr[i] == NULL) { IDS_HDT_CONSOLE (MEM_FLOW, "\nNo %s training seeds Config table\n", (Seedloop == 0) ? "WL" : "HW RxEn"); return FALSE; } for (i = 0; i < TableSize; i++) { if ((TblPtr->DimmPerCh & NOD) != 0) { if ((TblPtr->Channel & CH) != 0) { if (Seedloop == 0) { NBPtr->PsPtr->WLSeedVal = (UINT8) TblPtr->SeedVal; } else { NBPtr->PsPtr->HWRxENSeedVal = TblPtr->SeedVal; } break; } } TblPtr++; } if (i == TableSize) { IDS_HDT_CONSOLE (MEM_FLOW, "\nNo %s seed entries\n\n", (Seedloop == 0) ? "WL" : "HW RxEn"); PutEventLog (AGESA_ERROR, MEM_ERROR_TRAINING_SEED_NOT_FOUND, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_ERROR, NBPtr->MCTPtr); if (!NBPtr->MemPtr->ErrorHandling (NBPtr->MCTPtr, NBPtr->Dct, EXCLUDE_ALL_CHIPSEL, &NBPtr->MemPtr->StdHeader)) { ASSERT (FALSE); } return FALSE; } } return TRUE; }
/** * * A sub-function which extracts RC10 operating speed value from a input table and stores extracted * value to a specific address. * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * @param[in] *EntryOfTables - Pointer to MEM_PSC_TABLE_BLOCK * * @return TRUE - Succeed in extracting the table value * @return FALSE - Fail to extract the table value * */ BOOLEAN MemPGetRC10OpSpd ( IN OUT MEM_NB_BLOCK *NBPtr, IN MEM_PSC_TABLE_BLOCK *EntryOfTables ) { UINT8 i; UINT8 TableSize; UINT32 CurDDRrate; CPU_LOGICAL_ID LogicalCpuid; UINT8 PackageType; UINT8 PsoMaskRC10OpSpeed; PSCFG_OPSPD_ENTRY *TblPtr; CH_DEF_STRUCT *CurrentChannel; CurrentChannel = NBPtr->ChannelPtr; if (CurrentChannel->RegDimmPresent == 0) { return TRUE; } TblPtr = NULL; TableSize = 0; PackageType = 0; LogicalCpuid.Family = AMD_FAMILY_UNKNOWN; i = 0; // Obtain table pointer, table size, Logical Cpuid and PSC type according to NB type and package type. while (EntryOfTables->TblEntryOfRC10OpSpeed[i] != NULL) { LogicalCpuid = (EntryOfTables->TblEntryOfRC10OpSpeed[i])->Header.LogicalCpuid; PackageType = (EntryOfTables->TblEntryOfRC10OpSpeed[i])->Header.PackageType; // // Determine if this is the expected NB Type // if (MemPIsIdSupported (NBPtr, LogicalCpuid, PackageType)) { TblPtr = (PSCFG_OPSPD_ENTRY *) ((EntryOfTables->TblEntryOfRC10OpSpeed[i])->TBLPtr); TableSize = (EntryOfTables->TblEntryOfRC10OpSpeed[i])->TableSize; break; } i++; } // Check whether no table entry is found. if (EntryOfTables->TblEntryOfRC10OpSpeed[i] == NULL) { IDS_HDT_CONSOLE (MEM_FLOW, "\nNo RC10 Op Speed table\n"); return FALSE; } CurDDRrate = (UINT32) (1 << (CurrentChannel->DCTPtr->Timings.Speed / 66)); for (i = 0; i < TableSize; i++) { if ((TblPtr->DDRrate & CurDDRrate) != 0) { NBPtr->PsPtr->RC10OpSpd = TblPtr->OPSPD; break; } TblPtr++; } // // If there is no entry, check if overriding value existed. If not, return FALSE. // PsoMaskRC10OpSpeed = (UINT8) MemPProceedTblDrvOverride (NBPtr, NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_TBLDRV_RC10_OPSPEED); if ((PsoMaskRC10OpSpeed == 0) && (i == TableSize)) { IDS_HDT_CONSOLE (MEM_FLOW, "\nNo RC10 Op Speed entries\n"); PutEventLog (AGESA_ERROR, MEM_ERROR_RC10_OP_SPEED_NOT_FOUND, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_ERROR, NBPtr->MCTPtr); if (!NBPtr->MemPtr->ErrorHandling (NBPtr->MCTPtr, NBPtr->Dct, EXCLUDE_ALL_CHIPSEL, &NBPtr->MemPtr->StdHeader)) { ASSERT (FALSE); } return FALSE; } return TRUE; }
/** * * * This is the training function which set up the environment for remote * training on the ap and launches the remote routine. * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * * @return TRUE - Launch training on AP successfully. * @return FALSE - Fail to launch training on AP. */ BOOLEAN MemFParallelTrainingHy ( IN OUT MEM_NB_BLOCK *NBPtr ) { AMD_CONFIG_PARAMS *StdHeader; DIE_STRUCT *MCTPtr; REMOTE_TRAINING_ENV *EnvPtr; AP_TASK TrainingTask; UINT8 Socket; UINT8 Module; UINT8 APCore; UINT8 p; UINT32 LowCore; UINT32 HighCore; UINT32 BspSocket; UINT32 BspModule; UINT32 BspCore; AGESA_STATUS Status; ALLOCATE_HEAP_PARAMS AllocHeapParams; UINT16 MctDataSize; StdHeader = &(NBPtr->MemPtr->StdHeader); MCTPtr = NBPtr->MCTPtr; Socket = MCTPtr->SocketId; Module = MCTPtr->DieId; // // Allocate buffer for REMOTE_TRAINING_ENV // MctDataSize = MAX_DCTS_PER_NODE_HY * ( sizeof (DCT_STRUCT) + ( MAX_CHANNELS_PER_DCT_HY * (sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK)) ) ); AllocHeapParams.RequestedBufferSize = MctDataSize + sizeof (REMOTE_TRAINING_ENV); AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_PAR_TRN_HANDLE, Socket, Module, 0); AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) == AGESA_SUCCESS) { EnvPtr = (REMOTE_TRAINING_ENV *) AllocHeapParams.BufferPtr; AllocHeapParams.BufferPtr += sizeof (REMOTE_TRAINING_ENV); // // Setup Remote training environment // LibAmdMemCopy (&(EnvPtr->StdHeader), StdHeader, sizeof (AMD_CONFIG_PARAMS), StdHeader); LibAmdMemCopy (&(EnvPtr->DieStruct), MCTPtr, sizeof (DIE_STRUCT), StdHeader); for (p = 0; p < MAX_PLATFORM_TYPES; p++) { EnvPtr->GetPlatformCfg[p] = NBPtr->MemPtr->GetPlatformCfg[p]; } EnvPtr->ErrorHandling = NBPtr->MemPtr->ErrorHandling; EnvPtr->NBBlockCtor = MemConstructRemoteNBBlockHY; EnvPtr->FeatPtr = NBPtr->FeatPtr; EnvPtr->HoleBase = NBPtr->RefPtr->HoleBase; EnvPtr->BottomIo = NBPtr->RefPtr->BottomIo; EnvPtr->UmaSize = NBPtr->RefPtr->UmaSize; EnvPtr->SysLimit = NBPtr->RefPtr->SysLimit; EnvPtr->TableBasedAlterations = NBPtr->RefPtr->TableBasedAlterations; EnvPtr->PlatformMemoryConfiguration = NBPtr->RefPtr->PlatformMemoryConfiguration; LibAmdMemCopy (AllocHeapParams.BufferPtr, MCTPtr->DctData, MctDataSize, StdHeader); // // Get Socket, Core of the BSP // IdentifyCore (StdHeader, &BspSocket, &BspModule, &BspCore, &Status); EnvPtr->BspSocket = ((UINT8)BspSocket & 0x000000FF); EnvPtr->BspCore = ((UINT8)BspCore & 0x000000FF); // // Set up the remote task structure // TrainingTask.DataTransfer.DataPtr = EnvPtr; TrainingTask.DataTransfer.DataSizeInDwords = (UINT16) (AllocHeapParams.RequestedBufferSize + 3) / 4; TrainingTask.DataTransfer.DataTransferFlags = 0; TrainingTask.ExeFlags = 0; TrainingTask.FuncAddress.PfApTaskI = (PF_AP_TASK_I)MemFParallelTraining; // // Get Target AP Core // GetGivenModuleCoreRange (Socket, Module, &LowCore, &HighCore, StdHeader); APCore = (UINT8) (LowCore & 0x000000FF); // // Launch Remote Training // ApUtilRunCodeOnSocketCore (Socket, APCore, &TrainingTask, StdHeader); HeapDeallocateBuffer (AllocHeapParams.BufferHandle, StdHeader); return TRUE; } else { PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_REMOTE_TRAINING_ENV, NBPtr->Node, 0, 0, 0, StdHeader); SetMemError (AGESA_FATAL, MCTPtr); ASSERT(FALSE); // Could not allocated heap space for "REMOTE_TRAINING_ENV" return FALSE; } }
/** * Allocates space for a new buffer in the heap * * This function will allocate new buffer either by using internal 'AGESA' heapmanager * or by using externa (IBV) heapmanager. This function will also determine if whether or not * there is enough space for the new structure. If so, it will zero out the buffer, * and return a pointer to the region. * * @param[in,out] AllocateHeapParams structure pointer containing the size of the * desired new region, its handle, and the * return pointer. * @param[in,out] StdHeader Config handle for library and services. * * @retval AGESA_SUCCESS No error * @retval AGESA_BOUNDS_CHK Handle already exists, or not enough * free space * @retval AGESA_UNSUPPORTED Do not support this kind of heap allocation * @retval AGESA_ERROR Heap is invaild * */ AGESA_STATUS HeapAllocateBuffer ( IN OUT ALLOCATE_HEAP_PARAMS *AllocateHeapParams, IN OUT AMD_CONFIG_PARAMS *StdHeader ) { UINT8 *BaseAddress; UINT8 AlignTo16Byte; UINT8 CalloutFcnData; UINT32 RemainSize; UINT32 OffsetOfSplitNode; UINT32 OffsetOfNode; HEAP_MANAGER *HeapManager; BUFFER_NODE *FreeSpaceNode; BUFFER_NODE *SplitFreeSpaceNode; BUFFER_NODE *CurrentBufferNode; BUFFER_NODE *NewBufferNode; AGESA_BUFFER_PARAMS AgesaBuffer; ASSERT (StdHeader != NULL); if (AllocateHeapParams->Persist == HEAP_RUNTIME_SYSTEM_MEM) { ASSERT (StdHeader->HeapStatus == HEAP_SYSTEM_MEM); if (StdHeader->HeapStatus != HEAP_SYSTEM_MEM) { return AGESA_UNSUPPORTED; } } // At this stage we will decide to either use external (IBV) heap manger // or internal (AGESA) heap manager. // If (HeapStatus == HEAP_SYSTEM_MEM), then use the call function to call // external heap manager if (StdHeader->HeapStatus == HEAP_SYSTEM_MEM) { AgesaBuffer.StdHeader = *StdHeader; AgesaBuffer.BufferHandle = AllocateHeapParams->BufferHandle; AgesaBuffer.BufferLength = AllocateHeapParams->RequestedBufferSize; if (AllocateHeapParams->Persist == HEAP_RUNTIME_SYSTEM_MEM) { CalloutFcnData = HEAP_CALLOUT_RUNTIME; } else { CalloutFcnData = HEAP_CALLOUT_BOOTTIME; } AGESA_TESTPOINT (TpIfBeforeAllocateHeapBuffer, StdHeader); if (AgesaAllocateBuffer (CalloutFcnData, &AgesaBuffer) != AGESA_SUCCESS) { AllocateHeapParams->BufferPtr = NULL; return AGESA_ERROR; } AGESA_TESTPOINT (TpIfAfterAllocateHeapBuffer, StdHeader); AllocateHeapParams->BufferPtr = (UINT8 *) (AgesaBuffer.BufferPointer); return AGESA_SUCCESS; } // If (StdHeader->HeapStatus != HEAP_SYSTEM_MEM), then allocated buffer // using following AGESA Heap Manager code. // Buffer pointer is NULL unless we return a buffer. AlignTo16Byte = 0; AllocateHeapParams->BufferPtr = NULL; AllocateHeapParams->RequestedBufferSize += NUM_OF_SENTINEL * SIZE_OF_SENTINEL; // Get base address BaseAddress = (UINT8 *) (UINTN) StdHeader->HeapBasePtr; HeapManager = (HEAP_MANAGER *) BaseAddress; // Check Heap database is valid if ((BaseAddress == NULL) || (HeapManager->Signature != HEAP_SIGNATURE_VALID)) { // The base address in StdHeader is incorrect, get base address by itself BaseAddress = (UINT8 *) HeapGetBaseAddress (StdHeader); HeapManager = (HEAP_MANAGER *) BaseAddress; if ((BaseAddress == NULL) || (HeapManager->Signature != HEAP_SIGNATURE_VALID)) { // Heap is not available, ASSERT here ASSERT (FALSE); return AGESA_ERROR; } StdHeader->HeapBasePtr = (UINT64) BaseAddress; } // Allocate CurrentBufferNode = (BUFFER_NODE *) (BaseAddress + sizeof (HEAP_MANAGER)); // If there already has been a heap with the incoming BufferHandle, we return AGESA_BOUNDS_CHK. if (HeapManager->FirstActiveBufferOffset != AMD_HEAP_INVALID_HEAP_OFFSET) { CurrentBufferNode = (BUFFER_NODE *) (BaseAddress + HeapManager->FirstActiveBufferOffset); while (CurrentBufferNode->OffsetOfNextNode != AMD_HEAP_INVALID_HEAP_OFFSET) { if (CurrentBufferNode->BufferHandle == AllocateHeapParams->BufferHandle) { PutEventLog (AGESA_BOUNDS_CHK, CPU_ERROR_HEAP_BUFFER_HANDLE_IS_ALREADY_USED, AllocateHeapParams->BufferHandle, 0, 0, 0, StdHeader); return AGESA_BOUNDS_CHK; } else { CurrentBufferNode = (BUFFER_NODE *) (BaseAddress + CurrentBufferNode->OffsetOfNextNode); } } if (CurrentBufferNode->BufferHandle == AllocateHeapParams->BufferHandle) { PutEventLog (AGESA_BOUNDS_CHK, CPU_ERROR_HEAP_BUFFER_HANDLE_IS_ALREADY_USED, AllocateHeapParams->BufferHandle, 0, 0, 0, StdHeader); return AGESA_BOUNDS_CHK; } } // Find the buffer size that first matches the requested buffer size (i.e. the first free buffer of greater size). OffsetOfNode = HeapManager->FirstFreeSpaceOffset; FreeSpaceNode = (BUFFER_NODE *) (BaseAddress + OffsetOfNode); while (OffsetOfNode != AMD_HEAP_INVALID_HEAP_OFFSET) { AlignTo16Byte = (UINT8) ((0x10 - (((UINTN) (VOID *) FreeSpaceNode + sizeof (BUFFER_NODE) + SIZE_OF_SENTINEL) & 0xF)) & 0xF); AllocateHeapParams->RequestedBufferSize = (UINT32) (AllocateHeapParams->RequestedBufferSize + AlignTo16Byte); if (FreeSpaceNode->BufferSize >= AllocateHeapParams->RequestedBufferSize) { break; } AllocateHeapParams->RequestedBufferSize = (UINT32) (AllocateHeapParams->RequestedBufferSize - AlignTo16Byte); OffsetOfNode = FreeSpaceNode->OffsetOfNextNode; FreeSpaceNode = (BUFFER_NODE *) (BaseAddress + OffsetOfNode); } if (OffsetOfNode == AMD_HEAP_INVALID_HEAP_OFFSET) { // We don't find any free space buffer that matches the requested buffer size. PutEventLog (AGESA_BOUNDS_CHK, CPU_ERROR_HEAP_IS_FULL, AllocateHeapParams->BufferHandle, 0, 0, 0, StdHeader); return AGESA_BOUNDS_CHK; } else { // We find one matched free space buffer. DeleteFreeSpaceNode (StdHeader, OffsetOfNode); NewBufferNode = FreeSpaceNode; // Add new buffer node to the buffer chain if (HeapManager->FirstActiveBufferOffset == AMD_HEAP_INVALID_HEAP_OFFSET) { HeapManager->FirstActiveBufferOffset = sizeof (HEAP_MANAGER); } else { CurrentBufferNode->OffsetOfNextNode = OffsetOfNode; } // New buffer size RemainSize = FreeSpaceNode->BufferSize - AllocateHeapParams->RequestedBufferSize; if (RemainSize > sizeof (BUFFER_NODE)) { NewBufferNode->BufferSize = AllocateHeapParams->RequestedBufferSize; OffsetOfSplitNode = OffsetOfNode + sizeof (BUFFER_NODE) + NewBufferNode->BufferSize; SplitFreeSpaceNode = (BUFFER_NODE *) (BaseAddress + OffsetOfSplitNode); SplitFreeSpaceNode->BufferSize = RemainSize - sizeof (BUFFER_NODE); InsertFreeSpaceNode (StdHeader, OffsetOfSplitNode); } else { // Remain size is less than BUFFER_NODE, we use whole size instead of requested size. NewBufferNode->BufferSize = FreeSpaceNode->BufferSize; } } // Initialize BUFFER_NODE structure of NewBufferNode NewBufferNode->BufferHandle = AllocateHeapParams->BufferHandle; if ((AllocateHeapParams->Persist == HEAP_TEMP_MEM) || (AllocateHeapParams->Persist == HEAP_SYSTEM_MEM)) { NewBufferNode->Persist = AllocateHeapParams->Persist; } else { NewBufferNode->Persist = HEAP_LOCAL_CACHE; } NewBufferNode->OffsetOfNextNode = AMD_HEAP_INVALID_HEAP_OFFSET; NewBufferNode->PadSize = AlignTo16Byte; // Clear to 0x00 LibAmdMemFill ((VOID *) ((UINT8 *) NewBufferNode + sizeof (BUFFER_NODE)), 0x00, NewBufferNode->BufferSize, StdHeader); // Debug feature SET_SENTINEL_BEFORE (NewBufferNode, AlignTo16Byte); SET_SENTINEL_AFTER (NewBufferNode); // Update global variables HeapManager->UsedSize += NewBufferNode->BufferSize + sizeof (BUFFER_NODE); // Now fill in the incoming structure AllocateHeapParams->BufferPtr = (UINT8 *) ((UINT8 *) NewBufferNode + sizeof (BUFFER_NODE) + SIZE_OF_SENTINEL + AlignTo16Byte); AllocateHeapParams->RequestedBufferSize -= (NUM_OF_SENTINEL * SIZE_OF_SENTINEL + AlignTo16Byte); return AGESA_SUCCESS; }
/** * * * * * @param[in,out] *mmPtr - Pointer to the MEM_MAIN_DATA_BLOCK * * @return TRUE - No fatal error occurs. * @return FALSE - Fatal error occurs. */ BOOLEAN MemMParallelTraining ( IN OUT MEM_MAIN_DATA_BLOCK *mmPtr ) { AMD_CONFIG_PARAMS *StdHeader; MEM_DATA_STRUCT *MemPtr; MEM_NB_BLOCK *NBPtr; DIE_INFO TrainInfo[MAX_NODES_SUPPORTED]; AP_DATA_TRANSFER ReturnData; AGESA_STATUS Status; UINT8 ApSts; UINT8 Die; UINT8 Socket; UINT32 Module; UINT32 LowCore; UINT32 HighCore; UINT32 Time; UINT32 TimeOut; BOOLEAN StillTraining; ALLOCATE_HEAP_PARAMS AllocHeapParams; UINT8 *BufferPtr; BOOLEAN TimeoutEn; NBPtr = mmPtr->NBPtr; MemPtr = mmPtr->MemPtr; StdHeader = &(mmPtr->MemPtr->StdHeader); Time = 0; TimeOut = PARALLEL_TRAINING_TIMEOUT; TimeoutEn = TRUE; IDS_TIMEOUT_CTL (&TimeoutEn); IDS_HDT_CONSOLE (MEM_STATUS, "\nStart parallel training\n"); AGESA_TESTPOINT (TpProcMemBeforeAnyTraining, StdHeader); // // Initialize Training Info Array // for (Die = 0; Die < mmPtr->DieCount; Die ++) { Socket = TrainInfo[Die].Socket = NBPtr[Die].MCTPtr->SocketId; Module = NBPtr[Die].MCTPtr->DieId; GetGivenModuleCoreRange (Socket, Module, &LowCore, &HighCore, StdHeader); TrainInfo[Die].Core = (UINT8) (LowCore & 0x000000FF); IDS_HDT_CONSOLE (MEM_FLOW, "\tLaunch core %d of socket %d\n", LowCore, Socket); TrainInfo[Die].Training = FALSE; } // // Start Training on Each remote die. // for (Die = 0; Die < mmPtr->DieCount; Die ++ ) { if (Die != BSP_DIE) { NBPtr[Die].BeforeDqsTraining (&(mmPtr->NBPtr[Die])); if (NBPtr[Die].MCTPtr->NodeMemSize != 0) { if (!NBPtr[Die].FeatPtr->Training (&(mmPtr->NBPtr[Die]))) { // Fail to launch code on AP PutEventLog (AGESA_ERROR, MEM_ERROR_PARALLEL_TRAINING_LAUNCH_FAIL, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_ERROR, NBPtr[Die].MCTPtr); MemPtr->ErrorHandling (NBPtr[Die].MCTPtr, EXCLUDE_ALL_DCT, EXCLUDE_ALL_CHIPSEL, &MemPtr->StdHeader); } else { TrainInfo[Die].Training = TRUE; } } } } // // Call training on BSP // IDS_HDT_CONSOLE (MEM_STATUS, "Node %d\n", NBPtr[BSP_DIE].Node); NBPtr[BSP_DIE].BeforeDqsTraining (&(mmPtr->NBPtr[BSP_DIE])); NBPtr[BSP_DIE].TrainingFlow (&(mmPtr->NBPtr[BSP_DIE])); NBPtr[BSP_DIE].AfterDqsTraining (&(mmPtr->NBPtr[BSP_DIE])); // // Get Results from remote processors training // do { StillTraining = FALSE; for (Die = 0; Die < mmPtr->DieCount; Die ++ ) { // // For each Die that is training, read the status // if (TrainInfo[Die].Training == TRUE) { ApSts = ApUtilReadRemoteControlByte (TrainInfo[Die].Socket, TrainInfo[Die].Core, StdHeader); if ((ApSts & 0x80) == 0) { // // Allocate buffer for received data // AllocHeapParams.RequestedBufferSize = ( sizeof (DIE_STRUCT) + NBPtr[Die].DctCount * ( sizeof (DCT_STRUCT) + ( NBPtr[Die].ChannelCount * ( sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK) + ( (NBPtr[Die].MCTPtr->DctData[0].ChData[0].RowCount * NBPtr[Die].MCTPtr->DctData[0].ChData[0].ColumnCount * NUMBER_OF_DELAY_TABLES) + (MAX_BYTELANES_PER_CHANNEL * MAX_CS_PER_CHANNEL * NUMBER_OF_FAILURE_MASK_TABLES) ) ) ) ) ) + 3; AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_PAR_TRN_HANDLE, Die, 0, 0); AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) == AGESA_SUCCESS) { // // Receive Training Results // ReturnData.DataPtr = AllocHeapParams.BufferPtr; ReturnData.DataSizeInDwords = (UINT16) AllocHeapParams.RequestedBufferSize / 4; ReturnData.DataTransferFlags = 0; Status = ApUtilReceiveBuffer (TrainInfo[Die].Socket, TrainInfo[Die].Core, &ReturnData, StdHeader); if (Status != AGESA_SUCCESS) { SetMemError (Status, NBPtr[Die].MCTPtr); } BufferPtr = AllocHeapParams.BufferPtr; LibAmdMemCopy (NBPtr[Die].MCTPtr, BufferPtr, sizeof (DIE_STRUCT), StdHeader); BufferPtr += sizeof (DIE_STRUCT); LibAmdMemCopy ( NBPtr[Die].MCTPtr->DctData, BufferPtr, NBPtr[Die].DctCount * (sizeof (DCT_STRUCT) + NBPtr[Die].ChannelCount * sizeof (CH_DEF_STRUCT)), StdHeader); BufferPtr += NBPtr[Die].DctCount * (sizeof (DCT_STRUCT) + NBPtr[Die].ChannelCount * sizeof (CH_DEF_STRUCT)); LibAmdMemCopy ( NBPtr[Die].PSBlock, BufferPtr, NBPtr[Die].DctCount * NBPtr[Die].ChannelCount * sizeof (MEM_PS_BLOCK), StdHeader); BufferPtr += NBPtr[Die].DctCount * NBPtr[Die].ChannelCount * sizeof (MEM_PS_BLOCK); LibAmdMemCopy ( NBPtr[Die].MCTPtr->DctData[0].ChData[0].RcvEnDlys, BufferPtr, (NBPtr[Die].DctCount * NBPtr[Die].ChannelCount) * ((NBPtr[Die].MCTPtr->DctData[0].ChData[0].RowCount * NBPtr[Die].MCTPtr->DctData[0].ChData[0].ColumnCount * NUMBER_OF_DELAY_TABLES) + (MAX_BYTELANES_PER_CHANNEL * MAX_CS_PER_CHANNEL * NUMBER_OF_FAILURE_MASK_TABLES) ), StdHeader); HeapDeallocateBuffer (AllocHeapParams.BufferHandle, StdHeader); NBPtr[Die].AfterDqsTraining (&(mmPtr->NBPtr[Die])); TrainInfo[Die].Training = FALSE; } else { PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_RECEIVED_DATA, NBPtr[Die].Node, 0, 0, 0, StdHeader); SetMemError (AGESA_FATAL, NBPtr[Die].MCTPtr); ASSERT(FALSE); // Insufficient Heap Space allocation for parallel training buffer } } else if (ApSts == CORE_IDLE) { // AP does not have buffer to transmit to BSP // AP fails to locate a buffer for data transfer TrainInfo[Die].Training = FALSE; } else { // Signal to loop through again StillTraining = TRUE; } } } // Wait for 1 us MemUWait10ns (100, NBPtr->MemPtr); Time ++; } while ((StillTraining) && ((Time < TimeOut) || !TimeoutEn)); // Continue until all Dies are finished // if cannot finish in 1 s, do fatal exit if (StillTraining && TimeoutEn) { // Parallel training time out, do fatal exit, as there is at least one AP hangs. PutEventLog (AGESA_FATAL, MEM_ERROR_PARALLEL_TRAINING_TIME_OUT, 0, 0, 0, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_FATAL, NBPtr[BSP_DIE].MCTPtr); ASSERT(FALSE); // Timeout occurred while still training } for (Die = 0; Die < mmPtr->DieCount; Die ++ ) { if (NBPtr[Die].MCTPtr->ErrCode == AGESA_FATAL) { return FALSE; } } return TRUE; }