/** * Perform ODT Platform Override * * @param[in] NBPtr - Pointer to Current NBBlock * @param[in] Buffer - Pointer to the Action Command Data (w/o Type and Len) * * @return BOOLEAN - TRUE : Action was performed * FALSE: Action was not performed * * ---------------------------------------------------------------------------- */ BOOLEAN STATIC MemPSODoActionODT ( IN OUT MEM_NB_BLOCK *NBPtr, IN UINT8 *Buffer ) { BOOLEAN Result; UINT32 Speed; UINT8 Dimms; UINT8 i; UINT8 QR_Dimms; Result = FALSE; Speed = ((UINT32) 1 << (NBPtr->DCTPtr->Timings.Speed / 66)); Dimms = NBPtr->ChannelPtr->Dimms; QR_Dimms = 0; for (i = 0; i < MAX_DIMMS_PER_CHANNEL; i++) { if (((NBPtr->ChannelPtr->DimmQrPresent & (UINT16) (1 << i)) != 0) && (i < 2)) { QR_Dimms ++; } } if ((Speed & ((UINT32 *) Buffer)[0]) != 0) { if ((((UINT8) (1 << (Dimms - 1)) & Buffer[4]) != 0) || (Buffer[4] == ANY_NUM)) { if (((QR_Dimms == 0) && (Buffer[5] == NO_DIMM)) || ((QR_Dimms > 0) && (((UINT8) (1 << (QR_Dimms - 1)) & Buffer[5]) != 0)) || (Buffer[5] == ANY_NUM)) { NBPtr->PsPtr->DramTerm = Buffer[6]; NBPtr->PsPtr->QR_DramTerm = Buffer[7]; NBPtr->PsPtr->DynamicDramTerm = Buffer[8]; Result = TRUE; IDS_HDT_CONSOLE (MEM_FLOW, " Platform Override: DramTerm:%02x, QRDramTerm:%02x, DynDramTerm:%02x\n", Buffer[6], Buffer[7], Buffer[8]); } } } return Result; }
/** * Enable IO Cstate feature * * @param[in] EntryPoint Timepoint designator. * @param[in] PlatformConfig Contains the runtime modifiable feature input data. * @param[in] StdHeader Config Handle for library, services. * * @retval AGESA_SUCCESS Always succeeds. * */ AGESA_STATUS STATIC InitializeIoCstateFeature ( IN UINT64 EntryPoint, IN PLATFORM_CONFIGURATION *PlatformConfig, IN AMD_CONFIG_PARAMS *StdHeader ) { AP_TASK TaskPtr; AMD_CPU_EARLY_PARAMS CpuEarlyParams; IDS_HDT_CONSOLE (CPU_TRACE, " IO C-state is enabled\n"); CpuEarlyParams.PlatformConfig = *PlatformConfig; TaskPtr.FuncAddress.PfApTaskIC = EnableIoCstateOnSocket; TaskPtr.DataTransfer.DataSizeInDwords = 2; TaskPtr.DataTransfer.DataPtr = &EntryPoint; TaskPtr.DataTransfer.DataTransferFlags = 0; TaskPtr.ExeFlags = PASS_EARLY_PARAMS; OptionMultiSocketConfiguration.BscRunCodeOnAllSystemCore0s (&TaskPtr, StdHeader, &CpuEarlyParams); return AGESA_SUCCESS; }
AGESA_STATUS GfxConfigPostInterface ( IN AMD_CONFIG_PARAMS *StdHeader ) { GFX_PLATFORM_CONFIG *Gfx; AMD_POST_PARAMS *PostParamsPtr; AGESA_STATUS Status; GNB_BUILD_OPTIONS_COMMON *GnbCommonOptions; PostParamsPtr = (AMD_POST_PARAMS *)StdHeader; Status = AGESA_SUCCESS; IDS_HDT_CONSOLE (GNB_TRACE, "GfxConfigPostInterface Enter\n"); Gfx = GnbAllocateHeapBuffer (AMD_GFX_PLATFORM_CONFIG_HANDLE, sizeof (GFX_PLATFORM_CONFIG), StdHeader); ASSERT (Gfx != NULL); if (Gfx != NULL) { LibAmdMemFill (Gfx, 0x00, sizeof (GFX_PLATFORM_CONFIG), StdHeader); GnbCommonOptions = (GNB_BUILD_OPTIONS_COMMON*) GnbFmGnbBuildOptions (StdHeader); if (GnbBuildOptions.IgfxModeAsPcieEp) { Gfx->GfxControllerMode = GfxControllerPcieEndpointMode; Gfx->GfxPciAddress.AddressValue = MAKE_SBDFO (0, 0, 1, 0, 0); } else { Gfx->GfxControllerMode = GfxControllerLegacyBridgeMode; Gfx->GfxPciAddress.AddressValue = MAKE_SBDFO (0, 1, 5, 0, 0); } Gfx->StdHeader = (PVOID) StdHeader; Gfx->GnbHdAudio = PostParamsPtr->PlatformConfig.GnbHdAudio; Gfx->AbmSupport = PostParamsPtr->PlatformConfig.AbmSupport; Gfx->DynamicRefreshRate = PostParamsPtr->PlatformConfig.DynamicRefreshRate; Gfx->LcdBackLightControl = PostParamsPtr->PlatformConfig.LcdBackLightControl; Gfx->AmdPlatformType = UserOptions.CfgAmdPlatformType; Gfx->GmcClockGating = GnbCommonOptions->CfgGmcClockGating; Gfx->GmcPowerGating = GnbCommonOptions->GmcPowerGating; Gfx->UmaSteering = GnbCommonOptions->CfgUmaSteering; GNB_DEBUG_CODE ( GfxConfigDebugDump (Gfx); );
/** * Dump gfx integrated info table * * * @param[in] SystemInfoTableV3Ptr Pointer to integrated info table * @param[in] Gfx Pointer to global GFX configuration * */ VOID GfxIntInfoTableDebugDumpV3 ( IN ATOM_FUSION_SYSTEM_INFO_V3 *SystemInfoTableV3Ptr, IN GFX_PLATFORM_CONFIG *Gfx ) { ATOM_PPLIB_POWERPLAYTABLE4 *PpTable; ATOM_PPLIB_EXTENDEDHEADER *ExtendedHeader; IDS_HDT_CONSOLE (GFX_MISC, "GfxIntInfoTableDebugDumpV3 Enter\n"); PpTable = (ATOM_PPLIB_POWERPLAYTABLE4*) &SystemInfoTableV3Ptr->ulPowerplayTable; ExtendedHeader = (ATOM_PPLIB_EXTENDEDHEADER *) ((UINT8 *) (PpTable) + PpTable->usExtendendedHeaderOffset); IDS_HDT_CONSOLE (GFX_MISC, " ExtendedHeader usSize %d\n", ExtendedHeader->usSize); IDS_HDT_CONSOLE (GFX_MISC, " SizeOf %d\n", sizeof(ATOM_PPLIB_EXTENDEDHEADER)); IDS_HDT_CONSOLE (GFX_MISC, " ucHtcTmpLmt 0x%X\n", SystemInfoTableV3Ptr->sIntegratedSysInfo.ucHtcTmpLmt); IDS_HDT_CONSOLE (GFX_MISC, " ATOM_INTEGRATED_SYSTEM_INFO_V1_8_fld11 0x%X\n", SystemInfoTableV3Ptr->sIntegratedSysInfo.ATOM_INTEGRATED_SYSTEM_INFO_V1_8_fld11); IDS_HDT_CONSOLE (GFX_MISC, "GfxIntInfoTableDebugDumpV3 Exit\n"); }
/** * Main entry point for the AMD_INIT_RESET function. * * This entry point is responsible for establishing the HT links to the program * ROM and for performing basic processor initialization. * * @param[in,out] ResetParams Required input parameters for the AMD_INIT_RESET * entry point. * * @return Aggregated status across all internal AMD reset calls invoked. * */ AGESA_STATUS AmdInitReset ( IN OUT AMD_RESET_PARAMS *ResetParams ) { AGESA_STATUS AgesaStatus; AGESA_STATUS CalledAgesaStatus; WARM_RESET_REQUEST Request; UINT8 PrevRequestBit; UINT8 PrevStateBits; AgesaStatus = AGESA_SUCCESS; // Setup ROM execution cache CalledAgesaStatus = AllocateExecutionCache (&ResetParams->StdHeader, &ResetParams->CacheRegion[0]); if (CalledAgesaStatus > AgesaStatus) { AgesaStatus = CalledAgesaStatus; } // IDS_EXTENDED_HOOK (IDS_INIT_RESET_BEFORE, NULL, NULL, &ResetParams->StdHeader); // Init Debug Print function IDS_HDT_CONSOLE_INIT (&ResetParams->StdHeader); IDS_HDT_CONSOLE (MAIN_FLOW, "\nAmdInitReset: Start\n\n"); IDS_HDT_CONSOLE (MAIN_FLOW, "\n*** %s ***\n\n", (CHAR8 *)&UserOptions.VersionString); AGESA_TESTPOINT (TpIfAmdInitResetEntry, &ResetParams->StdHeader); ASSERT (ResetParams != NULL); PrevRequestBit = FALSE; PrevStateBits = WR_STATE_COLD; if (IsBsp (&ResetParams->StdHeader, &AgesaStatus)) { CalledAgesaStatus = BldoptFchFunction.InitReset (ResetParams); AgesaStatus = (CalledAgesaStatus > AgesaStatus) ? CalledAgesaStatus : AgesaStatus; } // If a previously requested warm reset cannot be triggered in the // current stage, store the previous state of request and reset the // request struct to the current post stage GetWarmResetFlag (&ResetParams->StdHeader, &Request); if (Request.RequestBit == TRUE) { if (Request.StateBits >= Request.PostStage) { PrevRequestBit = Request.RequestBit; PrevStateBits = Request.StateBits; Request.RequestBit = FALSE; Request.StateBits = Request.PostStage - 1; SetWarmResetFlag (&ResetParams->StdHeader, &Request); } } // Initialize the PCI MMIO access mechanism InitializePciMmio (&ResetParams->StdHeader); // Initialize Hyper Transport Registers if (HtOptionInitReset.HtInitReset != NULL) { IDS_HDT_CONSOLE (MAIN_FLOW, "HtInitReset: Start\n"); CalledAgesaStatus = HtOptionInitReset.HtInitReset (&ResetParams->StdHeader, &ResetParams->HtConfig); IDS_HDT_CONSOLE (MAIN_FLOW, "HtInitReset: End\n"); if (CalledAgesaStatus > AgesaStatus) { AgesaStatus = CalledAgesaStatus; } } // Warm Reset, should be at the end of AmdInitReset GetWarmResetFlag (&ResetParams->StdHeader, &Request); // If a warm reset is requested in the current post stage, trigger the // warm reset and ignore the previous request if (Request.RequestBit == TRUE) { if (Request.StateBits < Request.PostStage) { AgesaDoReset (WARM_RESET_WHENEVER, &ResetParams->StdHeader); } } else { // Otherwise, if there's a previous request, restore it // so that the subsequent post stage can trigger the warm reset if (PrevRequestBit == TRUE) { Request.RequestBit = PrevRequestBit; Request.StateBits = PrevStateBits; SetWarmResetFlag (&ResetParams->StdHeader, &Request); } } // Check for Cache As Ram Corruption IDS_CAR_CORRUPTION_CHECK (&ResetParams->StdHeader); IDS_HDT_CONSOLE (MAIN_FLOW, "\nAmdInitReset: End\n\n"); AGESA_TESTPOINT (TpIfAmdInitResetExit, &ResetParams->StdHeader); return AgesaStatus; }
VOID STATIC PcieMidPortInitCallbackKV ( IN PCIe_ENGINE_CONFIG *Engine, IN OUT VOID *Buffer, IN PCIe_PLATFORM_CONFIG *Pcie ) { DxFxx68_STRUCT DxFxx68; D0F0xE4_PIF_0012_STRUCT D0F0xE4_PIF_0012; PCIe_SUBLINK_INFO *SublinkInfo; PCIe_WRAPPER_INFO *WrapperInfo; PCIe_WRAPPER_CONFIG *Wrapper; CPU_LOGICAL_ID LogicalId; UINT8 Count; UINT8 Nibble; PciePortProgramRegisterTable (PortInitMidTableKV.Table, PortInitMidTableKV.Length, Engine, TRUE, Pcie); if (PcieConfigCheckPortStatus (Engine, INIT_STATUS_PCIE_TRAINING_SUCCESS) || Engine->Type.Port.PortData.LinkHotplug != HotplugDisabled) { PcieEnableSlotPowerLimitV5 (Engine, Pcie); if (GnbFmCheckIommuPresent ((GNB_HANDLE*) PcieConfigGetParentSilicon (Engine), GnbLibGetHeader (Pcie))) { PcieInitPortForIommuV4 (Engine, Pcie); } // After GFX link is trained up and before ASPM is enabled, AGESA needs to check link width, // if it equals to x16, then apply the following change to GFX port: // Per port register 0xA1 - PCIE LC TRAINING CONTROL, bit16 - LC_EXTEND_WAIT_FOR_SKP = 1 GnbLibPciRead ( Engine->Type.Port.Address.AddressValue | DxFxx68_ADDRESS, AccessWidth32, &DxFxx68, GnbLibGetHeader (Pcie) ); if (DxFxx68.Field.NegotiatedLinkWidth == 16) { PciePortRegisterRMW ( Engine, DxFxxE4_xA1_ADDRESS, DxFxxE4_xA1_LcExtendWaitForSkp_MASK, (1 << DxFxxE4_xA1_LcExtendWaitForSkp_OFFSET), TRUE, Pcie ); } } Wrapper = PcieConfigGetParentWrapper (Engine); SublinkInfo = &(((PCIe_INFO_BUFFER *)Buffer)->SublinkInfo[MIN (Engine->EngineData.StartLane, Engine->EngineData.EndLane) / 4]); WrapperInfo = &(((PCIe_INFO_BUFFER *)Buffer)->WrapperInfo[Wrapper->WrapId]); GetLogicalIdOfCurrentCore (&LogicalId, (AMD_CONFIG_PARAMS *)Pcie->StdHeader); // Check if this CPU is KV A0 // UBTS468566 if ((LogicalId.Revision & AMD_F15_KV_A0) != 0) { Count = SublinkInfo->GppPortCount; IDS_HDT_CONSOLE (GNB_TRACE, "x1x2 PortCount = %02x\n", Count); if (Count == 2) { // If number of GPP ports under the same sublink is 2, Delay L1 Exit (prolong minimum time spent in L1) PciePortRegisterRMW ( Engine, DxFxxE4_xA0_ADDRESS, DxFxxE4_xA0_LcDelayCount_MASK | DxFxxE4_xA0_LcDelayL1Exit_MASK, (0 << DxFxxE4_xA0_LcDelayCount_OFFSET) | (1 << DxFxxE4_xA0_LcDelayL1Exit_OFFSET), TRUE, Pcie ); } else if (Count > 2) { // If number of GPP ports > 2 if (SublinkInfo->MaxGenCapability > Gen1) { // If at least 1 GPP is Gen2 capable, Disable PLL Power down feature Wrapper = PcieConfigGetParentWrapper (Engine); Nibble = (UINT8) ((MIN (Engine->EngineData.StartLane, Engine->EngineData.EndLane) % 8) / 4); // Only PSD and PPD can have x1/x2 links, so we assume that PIF number is always 0 D0F0xE4_PIF_0012.Value = PcieRegisterRead ( Wrapper, PIF_SPACE (Wrapper->WrapId, 0, D0F0xE4_PIF_0012_ADDRESS + Nibble), Pcie ); D0F0xE4_PIF_0012.Field.PllPowerStateInOff = PifPowerStateL0; D0F0xE4_PIF_0012.Field.PllPowerStateInTxs2 = PifPowerStateL0; PcieRegisterWrite ( Wrapper, PIF_SPACE (Wrapper->WrapId, 0, D0F0xE4_PIF_0012_ADDRESS + Nibble), D0F0xE4_PIF_0012.Value, TRUE, Pcie ); } else { // All ports are only Gen1 PciePortRegisterRMW ( Engine, DxFxxE4_xC0_ADDRESS, DxFxxE4_xC0_StrapMedyTSxCount_MASK, 0x2 << DxFxxE4_xC0_StrapMedyTSxCount_OFFSET, TRUE, Pcie ); } } } PcieRegisterRMW ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_0802_ADDRESS + (0x100 * Engine->Type.Port.PortId)), D0F0xE4_WRAP_0802_StrapBifL1ExitLatency_MASK, (WrapperInfo->L1ExitLatencyValue << D0F0xE4_WRAP_0802_StrapBifL1ExitLatency_OFFSET), TRUE, Pcie ); if (WrapperInfo->DisableL1OnWrapper == TRUE) { Engine->Type.Port.PortData.LinkAspm &= ~(AspmL1); } PcieEnableAspm (Engine, Pcie); }
VOID PcieTopologySelectMasterPll ( IN PCIe_WRAPPER_CONFIG *Wrapper, OUT BOOLEAN *ConfigChanged, IN PCIe_PLATFORM_CONFIG *Pcie ) { PCIe_ENGINE_CONFIG *EngineList; UINT16 MasterLane; UINT16 MasterHotplugLane; D0F0xE4_WRAP_8013_STRUCT D0F0xE4_WRAP_8013; D0F0xE4_WRAP_8013_STRUCT D0F0xE4_WRAP_8013_BASE; IDS_HDT_CONSOLE (GNB_TRACE, "PcieTopologySelectMasterPll Enter\n"); MasterLane = 0xFFFF; MasterHotplugLane = 0xFFFF; EngineList = PcieConfigGetChildEngine (Wrapper); while (EngineList != NULL) { if (PcieConfigIsEngineAllocated (EngineList) && EngineList->Type.Port.PortData.PortPresent != PortDisabled && PcieConfigIsPcieEngine (EngineList)) { if (EngineList->Type.Port.PortData.LinkHotplug != HotplugDisabled) { MasterHotplugLane = PcieConfigGetPcieEngineMasterLane (EngineList); } else { MasterLane = PcieConfigGetPcieEngineMasterLane (EngineList); if (PcieConfigIsSbPcieEngine (EngineList)) { break; } } } EngineList = PcieLibGetNextDescriptor (EngineList); } if (MasterLane == 0xffff) { if (MasterHotplugLane != 0xffff) { MasterLane = MasterHotplugLane; } else { MasterLane = 0x0; } } D0F0xE4_WRAP_8013.Value = PcieRegisterRead ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8013_ADDRESS), Pcie ); D0F0xE4_WRAP_8013_BASE.Value = D0F0xE4_WRAP_8013.Value; if ( MasterLane <= 3 ) { D0F0xE4_WRAP_8013.Field.MasterPciePllA = 0x1; D0F0xE4_WRAP_8013.Field.MasterPciePllB = 0x0; D0F0xE4_WRAP_8013.Field.MasterPciePllC = 0x0; D0F0xE4_WRAP_8013.Field.MasterPciePllD = 0x0; Wrapper->MasterPll = 0xA; } else if (MasterLane <= 7) { D0F0xE4_WRAP_8013.Field.MasterPciePllA = 0x0; D0F0xE4_WRAP_8013.Field.MasterPciePllB = 0x1; D0F0xE4_WRAP_8013.Field.MasterPciePllC = 0x0; D0F0xE4_WRAP_8013.Field.MasterPciePllD = 0x0; Wrapper->MasterPll = 0xB; } else if (MasterLane <= 11) { D0F0xE4_WRAP_8013.Field.MasterPciePllA = 0x0; D0F0xE4_WRAP_8013.Field.MasterPciePllB = 0x0; D0F0xE4_WRAP_8013.Field.MasterPciePllC = 0x1; D0F0xE4_WRAP_8013.Field.MasterPciePllD = 0x0; Wrapper->MasterPll = 0xC; } else { D0F0xE4_WRAP_8013.Field.MasterPciePllA = 0x0; D0F0xE4_WRAP_8013.Field.MasterPciePllB = 0x0; D0F0xE4_WRAP_8013.Field.MasterPciePllC = 0x0; D0F0xE4_WRAP_8013.Field.MasterPciePllD = 0x1; Wrapper->MasterPll = 0xD; } if (ConfigChanged != NULL) { *ConfigChanged = (D0F0xE4_WRAP_8013.Value == D0F0xE4_WRAP_8013_BASE.Value) ? FALSE : TRUE; } PcieRegisterWrite ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8013_ADDRESS), D0F0xE4_WRAP_8013.Value, FALSE, Pcie ); IDS_HDT_CONSOLE (GNB_TRACE, "PcieTopologySelectMasterPll Exit\n"); }
VOID GfxConfigDebugDump ( IN GFX_PLATFORM_CONFIG *Gfx ) { IDS_HDT_CONSOLE (GFX_MISC, "<-------------- GFX Config Start ------------->\n"); IDS_HDT_CONSOLE (GFX_MISC, " HD Audio - %s\n", (Gfx->GnbHdAudio == 0) ? "Disabled" : "Enabled"); IDS_HDT_CONSOLE (GFX_MISC, " DynamicRefreshRate - 0x%x\n", Gfx->DynamicRefreshRate); IDS_HDT_CONSOLE (GFX_MISC, " LcdBackLightControl - 0x%x\n", Gfx->LcdBackLightControl); IDS_HDT_CONSOLE (GFX_MISC, " AbmSupport - %s\n", (Gfx->AbmSupport == 0) ? "Disabled" : "Enabled"); IDS_HDT_CONSOLE (GFX_MISC, " GmcClockGating - %s\n", (Gfx->GmcClockGating == 0) ? "Disabled" : "Enabled"); IDS_HDT_CONSOLE (GFX_MISC, " GmcPowerGating - %s\n", (Gfx->GmcPowerGating == GmcPowerGatingDisabled) ? "Disabled" : ( (Gfx->GmcPowerGating == GmcPowerGatingStutterOnly) ? "GmcPowerGatingStutterOnly" : ( (Gfx->GmcPowerGating == GmcPowerGatingWidthStutter) ? "GmcPowerGatingWidthStutter" : "Unknown")) ); IDS_HDT_CONSOLE (GFX_MISC, " UmaSteering - %s\n", (Gfx->UmaSteering == excel993 ) ? "excel993" : ( (Gfx->UmaSteering == excel992 ) ? "excel992" : "Unknown") ); IDS_HDT_CONSOLE (GFX_MISC, " iGpuVgaMode - %s\n", (Gfx->iGpuVgaMode == iGpuVgaAdapter) ? "VGA" : ( (Gfx->iGpuVgaMode == iGpuVgaNonAdapter) ? "Non VGA" : "Unknown") ); IDS_HDT_CONSOLE (GFX_MISC, " UmaMode - %s\n", (Gfx->UmaInfo.UmaMode == UMA_NONE) ? "No UMA" : "UMA"); if (Gfx->UmaInfo.UmaMode != UMA_NONE) { IDS_HDT_CONSOLE (GFX_MISC, " UmaBase - 0x%x\n", Gfx->UmaInfo.UmaBase); IDS_HDT_CONSOLE (GFX_MISC, " UmaSize - 0x%x\n", Gfx->UmaInfo.UmaSize); IDS_HDT_CONSOLE (GFX_MISC, " UmaAttributes - 0x%x\n", Gfx->UmaInfo.UmaAttributes); } IDS_HDT_CONSOLE (GFX_MISC, "<-------------- GFX Config End --------------->\n"); }
/** * * A sub-function which extracts RC10 operating speed value from a input table and stores extracted * value to a specific address. * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * @param[in] *EntryOfTables - Pointer to MEM_PSC_TABLE_BLOCK * * @return TRUE - Succeed in extracting the table value * @return FALSE - Fail to extract the table value * */ BOOLEAN MemPGetRC10OpSpd ( IN OUT MEM_NB_BLOCK *NBPtr, IN MEM_PSC_TABLE_BLOCK *EntryOfTables ) { UINT8 i; UINT8 TableSize; UINT32 CurDDRrate; CPU_LOGICAL_ID LogicalCpuid; UINT8 PackageType; UINT8 PsoMaskRC10OpSpeed; PSCFG_OPSPD_ENTRY *TblPtr; CH_DEF_STRUCT *CurrentChannel; CurrentChannel = NBPtr->ChannelPtr; if (CurrentChannel->RegDimmPresent == 0) { return TRUE; } TblPtr = NULL; TableSize = 0; PackageType = 0; LogicalCpuid.Family = AMD_FAMILY_UNKNOWN; i = 0; // Obtain table pointer, table size, Logical Cpuid and PSC type according to NB type and package type. while (EntryOfTables->TblEntryOfRC10OpSpeed[i] != NULL) { LogicalCpuid = (EntryOfTables->TblEntryOfRC10OpSpeed[i])->Header.LogicalCpuid; PackageType = (EntryOfTables->TblEntryOfRC10OpSpeed[i])->Header.PackageType; // // Determine if this is the expected NB Type // if (MemPIsIdSupported (NBPtr, LogicalCpuid, PackageType)) { TblPtr = (PSCFG_OPSPD_ENTRY *) ((EntryOfTables->TblEntryOfRC10OpSpeed[i])->TBLPtr); TableSize = (EntryOfTables->TblEntryOfRC10OpSpeed[i])->TableSize; break; } i++; } // Check whether no table entry is found. if (EntryOfTables->TblEntryOfRC10OpSpeed[i] == NULL) { IDS_HDT_CONSOLE (MEM_FLOW, "\nNo RC10 Op Speed table\n"); return FALSE; } CurDDRrate = (UINT32) (1 << (CurrentChannel->DCTPtr->Timings.Speed / 66)); for (i = 0; i < TableSize; i++) { if ((TblPtr->DDRrate & CurDDRrate) != 0) { NBPtr->PsPtr->RC10OpSpd = TblPtr->OPSPD; break; } TblPtr++; } // // If there is no entry, check if overriding value existed. If not, return FALSE. // PsoMaskRC10OpSpeed = (UINT8) MemPProceedTblDrvOverride (NBPtr, NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_TBLDRV_RC10_OPSPEED); if ((PsoMaskRC10OpSpeed == 0) && (i == TableSize)) { IDS_HDT_CONSOLE (MEM_FLOW, "\nNo RC10 Op Speed entries\n"); PutEventLog (AGESA_ERROR, MEM_ERROR_RC10_OP_SPEED_NOT_FOUND, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_ERROR, NBPtr->MCTPtr); if (!NBPtr->MemPtr->ErrorHandling (NBPtr->MCTPtr, NBPtr->Dct, EXCLUDE_ALL_CHIPSEL, &NBPtr->MemPtr->StdHeader)) { ASSERT (FALSE); } return FALSE; } return TRUE; }
/** * * It will create the ACPI tale of WHEA and return the pointer to the table. * * @param[in, out] StdHeader Standard Head Pointer * @param[in, out] WheaMcePtr Point to Whea Hest Mce table * @param[in, out] WheaCmcPtr Point to Whea Hest Cmc table * * @retval UINT32 AGESA_STATUS */ AGESA_STATUS GetAcpiWheaMain ( IN OUT AMD_CONFIG_PARAMS *StdHeader, IN OUT VOID **WheaMcePtr, IN OUT VOID **WheaCmcPtr ) { UINT8 BankNum; UINT8 Entries; UINT16 HestMceTableSize; UINT16 HestCmcTableSize; UINT64 MsrData; AMD_HEST_MCE_TABLE *HestMceTablePtr; AMD_HEST_CMC_TABLE *HestCmcTablePtr; AMD_HEST_BANK *HestBankPtr; AMD_WHEA_INIT_DATA *WheaInitDataPtr; ALLOCATE_HEAP_PARAMS AllocParams; CPU_SPECIFIC_SERVICES *FamilySpecificServices; FamilySpecificServices = NULL; IDS_HDT_CONSOLE (CPU_TRACE, " WHEA is created\n"); // step 1: calculate Hest table size LibAmdMsrRead (MSR_MCG_CAP, &MsrData, StdHeader); BankNum = (UINT8) (((MSR_MCG_CAP_STRUCT *) (&MsrData))->Count); if (BankNum == 0) { return AGESA_ERROR; } GetCpuServicesOfCurrentCore (&FamilySpecificServices, StdHeader); FamilySpecificServices->GetWheaInitData (FamilySpecificServices, &WheaInitDataPtr, &Entries, StdHeader); ASSERT (WheaInitDataPtr->HestBankNum <= BankNum); HestMceTableSize = sizeof (AMD_HEST_MCE_TABLE) + WheaInitDataPtr->HestBankNum * sizeof (AMD_HEST_BANK); HestCmcTableSize = sizeof (AMD_HEST_CMC_TABLE) + WheaInitDataPtr->HestBankNum * sizeof (AMD_HEST_BANK); HestMceTablePtr = (AMD_HEST_MCE_TABLE *) *WheaMcePtr; HestCmcTablePtr = (AMD_HEST_CMC_TABLE *) *WheaCmcPtr; // step 2: allocate a buffer by callback function if ((HestMceTablePtr == NULL) || (HestCmcTablePtr == NULL)) { AllocParams.RequestedBufferSize = (UINT32) (HestMceTableSize + HestCmcTableSize); AllocParams.BufferHandle = AMD_WHEA_BUFFER_HANDLE; AllocParams.Persist = HEAP_SYSTEM_MEM; AGESA_TESTPOINT (TpProcCpuBeforeAllocateWheaBuffer, StdHeader); if (HeapAllocateBuffer (&AllocParams, StdHeader) != AGESA_SUCCESS) { return AGESA_ERROR; } AGESA_TESTPOINT (TpProcCpuAfterAllocateWheaBuffer, StdHeader); HestMceTablePtr = (AMD_HEST_MCE_TABLE *) AllocParams.BufferPtr; HestCmcTablePtr = (AMD_HEST_CMC_TABLE *) ((UINT8 *) (HestMceTablePtr + 1) + (WheaInitDataPtr->HestBankNum * sizeof (AMD_HEST_BANK))); } // step 3: fill in Hest MCE table HestMceTablePtr->TblLength = HestMceTableSize; HestMceTablePtr->GlobCapInitDataLSD = WheaInitDataPtr->GlobCapInitDataLSD; HestMceTablePtr->GlobCapInitDataMSD = WheaInitDataPtr->GlobCapInitDataMSD; HestMceTablePtr->GlobCtrlInitDataLSD = WheaInitDataPtr->GlobCtrlInitDataLSD; HestMceTablePtr->GlobCtrlInitDataMSD = WheaInitDataPtr->GlobCtrlInitDataMSD; HestMceTablePtr->NumHWBanks = WheaInitDataPtr->HestBankNum; HestBankPtr = (AMD_HEST_BANK *) (HestMceTablePtr + 1); CreateHestBank (HestBankPtr, WheaInitDataPtr->HestBankNum, WheaInitDataPtr); // step 4: fill in Hest CMC table HestCmcTablePtr->NumHWBanks = WheaInitDataPtr->HestBankNum; HestCmcTablePtr->TblLength = HestCmcTableSize; HestBankPtr = (AMD_HEST_BANK *) (HestCmcTablePtr + 1); CreateHestBank (HestBankPtr, WheaInitDataPtr->HestBankNum, WheaInitDataPtr); // step 5: fill in the incoming structure *WheaMcePtr = HestMceTablePtr; *WheaCmcPtr = HestCmcTablePtr; return (AGESA_SUCCESS); }
VOID MemNPhyFenceTrainingUnb ( IN OUT MEM_NB_BLOCK *NBPtr ) { UINT8 FenceThresholdTxDll; UINT8 FenceThresholdRxDll; UINT8 FenceThresholdTxPad; UINT16 Fence2Data; MemNSetBitFieldNb (NBPtr, BFDataFence2, 0); MemNSetBitFieldNb (NBPtr, BFFence2, 0); // 1. Program D18F2x[1,0]9C_x0000_0008[FenceTrSel]=10b. // 2. Perform phy fence training. // 3. Write the calculated fence value to D18F2x[1,0]9C_x0000_000C[FenceThresholdTxDll]. MemNSetBitFieldNb (NBPtr, BFFenceTrSel, 2); MAKE_TSEFO (NBPtr->NBRegTable, DCT_PHY_ACCESS, 0x0C, 30, 26, BFPhyFence); IDS_HDT_CONSOLE (MEM_FLOW, "\t\tFenceThresholdTxDll\n"); MemNTrainPhyFenceNb (NBPtr); FenceThresholdTxDll = (UINT8) MemNGetBitFieldNb (NBPtr, BFPhyFence); NBPtr->FamilySpecificHook[DetectMemPllError] (NBPtr, &FenceThresholdTxDll); // 4. Program D18F2x[1,0]9C_x0D0F_0[F,7:0]0F[AlwaysEnDllClks]=001b. MemNSetBitFieldNb (NBPtr, BFAlwaysEnDllClks, 0x1000); // 5. Program D18F2x[1,0]9C_x0000_0008[FenceTrSel]=01b. // 6. Perform phy fence training. // 7. Write the calculated fence value to D18F2x[1,0]9C_x0000_000C[FenceThresholdRxDll]. MemNSetBitFieldNb (NBPtr, BFFenceTrSel, 1); MAKE_TSEFO (NBPtr->NBRegTable, DCT_PHY_ACCESS, 0x0C, 25, 21, BFPhyFence); IDS_HDT_CONSOLE (MEM_FLOW, "\n\t\tFenceThresholdRxDll\n"); MemNTrainPhyFenceNb (NBPtr); FenceThresholdRxDll = (UINT8) MemNGetBitFieldNb (NBPtr, BFPhyFence); NBPtr->FamilySpecificHook[DetectMemPllError] (NBPtr, &FenceThresholdRxDll); // 8. Program D18F2x[1,0]9C_x0D0F_0[F,7:0]0F[AlwaysEnDllClks]=000b. MemNSetBitFieldNb (NBPtr, BFAlwaysEnDllClks, 0x0000); // 9. Program D18F2x[1,0]9C_x0000_0008[FenceTrSel]=11b. // 10. Perform phy fence training. // 11. Write the calculated fence value to D18F2x[1,0]9C_x0000_000C[FenceThresholdTxPad]. MemNSetBitFieldNb (NBPtr, BFFenceTrSel, 3); MAKE_TSEFO (NBPtr->NBRegTable, DCT_PHY_ACCESS, 0x0C, 20, 16, BFPhyFence); IDS_HDT_CONSOLE (MEM_FLOW, "\n\t\tFenceThresholdTxPad\n"); MemNTrainPhyFenceNb (NBPtr); FenceThresholdTxPad = (UINT8) MemNGetBitFieldNb (NBPtr, BFPhyFence); NBPtr->FamilySpecificHook[DetectMemPllError] (NBPtr, &FenceThresholdTxPad); // Program Fence2 threshold for Clk, Cmd, and Addr if (FenceThresholdTxPad < 16) { MemNSetBitFieldNb (NBPtr, BFClkFence2, FenceThresholdTxPad | 0x10); MemNSetBitFieldNb (NBPtr, BFCmdFence2, FenceThresholdTxPad | 0x10); MemNSetBitFieldNb (NBPtr, BFAddrFence2, FenceThresholdTxPad | 0x10); } else { MemNSetBitFieldNb (NBPtr, BFClkFence2, 0); MemNSetBitFieldNb (NBPtr, BFCmdFence2, 0); MemNSetBitFieldNb (NBPtr, BFAddrFence2, 0); } // Program Fence2 threshold for data Fence2Data = 0; if (FenceThresholdTxPad < 16) { Fence2Data |= FenceThresholdTxPad | 0x10; } if (FenceThresholdRxDll < 16) { Fence2Data |= (FenceThresholdRxDll | 0x10) << 10; } if (FenceThresholdTxDll < 16) { Fence2Data |= (FenceThresholdTxDll | 0x10) << 5; } MemNSetBitFieldNb (NBPtr, BFDataFence2, Fence2Data); NBPtr->FamilySpecificHook[ProgramFence2RxDll] (NBPtr, &Fence2Data); if (NBPtr->MCTPtr->Status[SbLrdimms]) { // 18. If motherboard routing requires CS[7:6] to adopt address timings, e.g. 3 LRDIMMs/ch with CS[7:6] // routed across all DIMM sockets, BIOS performs the following: if (FindPSOverrideEntry (NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_NO_LRDIMM_CS67_ROUTING, NBPtr->MCTPtr->SocketId, NBPtr->ChannelPtr->ChannelID, 0, NULL, NULL) != NULL) { // A. Program D18F2xA8_dct[1:0][CSTimingMux67] = 1. MemNSetBitFieldNb (NBPtr, BFCSTimingMux67, 1); // B. Program D18F2x9C_x0D0F_8021_dct[1:0]: // - DiffTimingEn = 1. // - IF (D18F2x9C_x0000_0004_dct[1:0][AddrCmdFineDelay] >= // D18F2x9C_x0D0F_E008_dct[1:0][FenceValue]) THEN Fence = 1 ELSE Fence = 0. // - Delay = D18F2x9C_x0000_0004_dct[1:0][AddrCmdFineDelay]. // MemNSetBitFieldNb (NBPtr, BFDiffTimingEn, 1); MemNSetBitFieldNb (NBPtr, BFFence, (MemNGetBitFieldNb (NBPtr, BFAddrCmdFineDelay) >= MemNGetBitFieldNb (NBPtr, BFFenceValue)) ? 1 : 0); MemNSetBitFieldNb (NBPtr, BFDelay, (MemNGetBitFieldNb (NBPtr, BFAddrCmdFineDelay))); } } // 19. Reprogram F2x9C_04. MemNSetBitFieldNb (NBPtr, BFAddrTmgControl, MemNGetBitFieldNb (NBPtr, BFAddrTmgControl)); }
/** * Per wrapper Pcie Init prior training. * * * @param[in] Wrapper Pointer to wrapper configuration descriptor * @param[in] Buffer Pointer buffer * @param[in] Pcie Pointer to global PCIe configuration */ AGESA_STATUS STATIC PcieEarlyInitCallbackCZ ( IN PCIe_WRAPPER_CONFIG *Wrapper, IN OUT VOID *Buffer, IN PCIe_PLATFORM_CONFIG *Pcie ) { AGESA_STATUS Status; BOOLEAN CoreConfigChanged; BOOLEAN PllConfigChanged; BOOLEAN AriSupportEnable; GNB_BUILD_OPTIONS_CZ *GnbBuildOptionData; IDS_HDT_CONSOLE (GNB_TRACE, "PcieEarlyInitCallbackCZ Enter\n"); CoreConfigChanged = FALSE; PllConfigChanged = FALSE; GnbBuildOptionData = GnbLocateHeapBuffer (AMD_GNB_BUILD_OPTIONS_HANDLE, GnbLibGetHeader (Pcie)); ASSERT (GnbBuildOptionData != NULL); AriSupportEnable = GnbBuildOptionData->CfgAriSupport; if (AriSupportEnable == TRUE) { // Enable Alternative Routing-ID Interpretation GnbLibPciIndirectRMW ( MAKE_SBDFO (0,0,0,0, D0F0x60_ADDRESS), D0F0x64_x46_ADDRESS, AccessWidth32, (UINT32)~D0F0x64_x46_IocAriSupported_MASK, (1 << D0F0x64_x46_IocAriSupported_OFFSET), GnbLibGetHeader (Pcie) ); PcieRegisterRMW ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_0000_ADDRESS), D0F0xE4_WRAP_0000_StrapBif2AriEn_MASK, (1 << D0F0xE4_WRAP_0000_StrapBif2AriEn_OFFSET), TRUE, Pcie ); } if (GnbBuildOptionData->CfgACSEnable == TRUE) { // Enable Access Control Services PcieRegisterRMW ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_000A_ADDRESS), 0xFFFFFFF8, (BIT0 | BIT1 | BIT2), TRUE, Pcie ); } IDS_OPTION_HOOK (IDS_BEFORE_RECONFIGURATION, Pcie, (AMD_CONFIG_PARAMS *)Pcie->StdHeader); PcieTopologyPrepareForReconfigCZ (Wrapper, Pcie); //step 2 Status = PcieTopologySetCoreConfigCZ (Wrapper, &CoreConfigChanged, Pcie); //step 3 ASSERT (Status == AGESA_SUCCESS); PcieTopologyApplyLaneMuxCZ (Wrapper, Pcie); //step 4 // PciePifSetRxDetectPowerMode (Wrapper, Pcie); // PciePifSetLs2ExitTime (Wrapper, Pcie); // PciePifApplyGanging (Wrapper, Pcie); // PciePhyApplyGangingCZ (Wrapper, Pcie); PcieTopologySelectMasterPllCZ (Wrapper, &PllConfigChanged, Pcie); //step 5 if (CoreConfigChanged) { PcieTopologyExecuteReconfigCZ (Wrapper, Pcie); // step 6 } PcieEarlyWrapperTxPresetLoadingSequenceCZ (Wrapper, Pcie); PcieTopologyCleanUpReconfigCZ (Wrapper, Pcie); // step 7 PcieTopologySetLinkReversalV4 (Wrapper, Pcie); // step 8 // PciePifPllConfigureCZ (Wrapper, Pcie); PcieTopologyLaneControlCZ ( DisableLanes, PcieUtilGetWrapperLaneBitMap (LANE_TYPE_CORE_ALL, LANE_TYPE_PCIE_CORE_ALLOC, Wrapper), Wrapper, Pcie ); //step 9 // PciePollPifForCompeletion (Wrapper, Pcie); // PciePhyAvertClockPickersCZ (Wrapper, Pcie); PcieEarlyCoreInitCZ (Wrapper, Pcie); PcieSetSsidCZ (UserOptions.CfgGnbPcieSSID, Wrapper, Pcie); PcieHwInitPowerGatingCZ (Wrapper, Pcie); IDS_HDT_CONSOLE (GNB_TRACE, "PcieEarlyInitCallbackCZ Exit [%x]\n", Status); return Status; }
/** * Pcie TxPreset loading sequence * * * @param[in] Wrapper Pointer to wrapper configuration descriptor * @param[in] Pcie Pointer to global PCIe configuration */ VOID STATIC PcieEarlyWrapperTxPresetLoadingSequenceCZ ( IN PCIe_WRAPPER_CONFIG *Wrapper, IN PCIe_PLATFORM_CONFIG *Pcie ) { UINT8 Pif; UINT8 CoreId; IDS_HDT_CONSOLE (GNB_TRACE, "PcieEarlyWrapperTxPresetLoadingSequenceCZ Enter\n"); // Step 1: program TX preset value of PCIE_WRAPPER:PSX80/81_WRP_BIF_LANE_EQUALIZATION_CNTL to 0x7 ( from h/w default 0xF ) PcieRegisterRMW ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_0050_ADDRESS), D0F0xE4_WRAP_0050_StrapBifPcieLaneEqCntlDsPortTxPreset_MASK | D0F0xE4_WRAP_0050_StrapBifPcieLaneEqCntlUsPortTxPreset_MASK, (7 << D0F0xE4_WRAP_0050_StrapBifPcieLaneEqCntlDsPortTxPreset_OFFSET) | (7 << D0F0xE4_WRAP_0050_StrapBifPcieLaneEqCntlUsPortTxPreset_OFFSET), TRUE, Pcie ); IDS_OPTION_CALLOUT (IDS_CALLOUT_GNB_BEFORE_TXPRESET_LOADING, Pcie, (AMD_CONFIG_PARAMS *)Pcie->StdHeader); for (CoreId = Wrapper->StartPcieCoreId; CoreId <= Wrapper->EndPcieCoreId; CoreId++) { // Step 2: program TOGGLESTRAP bit of PCIE_WRAPPER:PSX80/81_BIF_SWRST_COMMAND_1 to 0x1 PcieRegisterRMW ( Wrapper, CORE_SPACE (CoreId, D0F0xE4_CORE_0103_ADDRESS), D0F0xE4_CORE_0103_Togglestrap_MASK, (1 << D0F0xE4_CORE_0103_Togglestrap_OFFSET), TRUE, Pcie ); // Wait for ~50ns GnbLibStall (1, (AMD_CONFIG_PARAMS *)Pcie->StdHeader); // program TOGGLESTRAP bit of PCIE_WRAPPER:PSX80/81_BIF_SWRST_COMMAND_1 to 0x0 PcieRegisterRMW ( Wrapper, CORE_SPACE (CoreId, D0F0xE4_CORE_0103_ADDRESS), D0F0xE4_CORE_0103_Togglestrap_MASK, (0 << D0F0xE4_CORE_0103_Togglestrap_OFFSET), TRUE, Pcie ); } for (Pif = 0; Pif < Wrapper->NumberOfPIFs; Pif++) { // Step 3: program TXPWR_IN_INIT bit of PCIE_WRAPPER:PSX80/81_PIF0_TX_CTRL to 0x1 ( from h/w default 0x2 ) // program RXPWR_IN_INIT bit of PCIE_WRAPPER:PSX80/81_PIF0_RX_CTRL to 0x1 ( from h/w default 0x2 ) PcieRegisterRMW ( Wrapper, PIF_SPACE (Wrapper->WrapId, Pif, D0F0xE4_PIF_0008_ADDRESS), D0F0xE4_PIF_0008_TxpwrInInit_MASK, (1 << D0F0xE4_PIF_0008_TxpwrInInit_OFFSET), TRUE, Pcie ); PcieRegisterRMW ( Wrapper, PIF_SPACE (Wrapper->WrapId, Pif, D0F0xE4_PIF_000A_ADDRESS), D0F0xE4_PIF_000A_RxpwrInInit_MASK, (1 << D0F0xE4_PIF_000A_RxpwrInInit_OFFSET), TRUE, Pcie ); // Wait for ~1ns GnbLibStall (1, (AMD_CONFIG_PARAMS *)Pcie->StdHeader); //Step 5: program TXPWR_IN_INIT bit of PCIE_WRAPPER:PSX80/81_PIF0_TX_CTRL back to 0x2 // program RXPWR_IN_INIT bit of PCIE_WRAPPER:PSX80/81_PIF0_RX_CTRL back to 0x2 PcieRegisterRMW ( Wrapper, PIF_SPACE (Wrapper->WrapId, Pif, D0F0xE4_PIF_0008_ADDRESS), D0F0xE4_PIF_0008_TxpwrInInit_MASK, (2 << D0F0xE4_PIF_0008_TxpwrInInit_OFFSET), TRUE, Pcie ); PcieRegisterRMW ( Wrapper, PIF_SPACE (Wrapper->WrapId, Pif, D0F0xE4_PIF_000A_ADDRESS), D0F0xE4_PIF_000A_RxpwrInInit_MASK, (2 << D0F0xE4_PIF_000A_RxpwrInInit_OFFSET), TRUE, Pcie ); } IDS_HDT_CONSOLE (GNB_TRACE, "PcieEarlyWrapperTxPresetLoadingSequenceCZ Exit\n"); }
VOID STATIC PcieHwInitPowerGatingCZ ( IN PCIe_WRAPPER_CONFIG *Wrapper, IN PCIe_PLATFORM_CONFIG *Pcie ) { UINT8 Pif; UINT32 Value; D0F0xE4_PIF_0004_STRUCT D0F0xE4_PIF_0004; D0F0xE4_PIF_0008_STRUCT D0F0xE4_PIF_0008; D0F0xE4_PIF_000A_STRUCT D0F0xE4_PIF_000A; D0F0xE4_CORE_012A_STRUCT D0F0xE4_CORE_012A; D0F0xE4_CORE_012C_STRUCT D0F0xE4_CORE_012C; D0F0xE4_CORE_012D_STRUCT D0F0xE4_CORE_012D; GNB_BUILD_OPTIONS_CZ *GnbBuildOptionData; IDS_HDT_CONSOLE (GNB_TRACE, "PcieHwInitPowerGatingCZ Enter\n"); GnbBuildOptionData = GnbLocateHeapBuffer (AMD_GNB_BUILD_OPTIONS_HANDLE, GnbLibGetHeader (Pcie)); ASSERT (GnbBuildOptionData != NULL); Value = 0x0; if ((GnbBuildOptionData->CfgPcieHwInitPwerGating & PcieHwInitPwrGatingL1Pg) == PcieHwInitPwrGatingL1Pg) { Value = 0x1; } PcieRegisterWriteField ( Wrapper, CORE_SPACE (Wrapper->StartPcieCoreId, D0F0xE4_CORE_003D_ADDRESS), D0F0xE4_CORE_003D_LC_L1_POWER_GATING_EN_OFFSET, D0F0xE4_CORE_003D_LC_L1_POWER_GATING_EN_WIDTH, Value, TRUE, Pcie ); for (Pif = 0; Pif < Wrapper->NumberOfPIFs; Pif++) { D0F0xE4_PIF_0008.Value = PcieRegisterRead ( Wrapper, PIF_SPACE (Wrapper->WrapId, Pif, D0F0xE4_PIF_0008_ADDRESS), Pcie ); D0F0xE4_PIF_000A.Value = PcieRegisterRead ( Wrapper, PIF_SPACE (Wrapper->WrapId, Pif, D0F0xE4_PIF_000A_ADDRESS), Pcie ); D0F0xE4_PIF_0008.Field.TxpwrInOff = GnbBuildOptionData->CfgPcieTxpwrInOff; D0F0xE4_PIF_000A.Field.RxpwrInOff = GnbBuildOptionData->CfgPcieRxpwrInOff; D0F0xE4_PIF_000A.Field.RxEiDetInPs2Degrade = 0x0; D0F0xE4_PIF_0008.Field.TxpwrGatingInL1 = 0x0; D0F0xE4_PIF_000A.Field.RxpwrGatingInL1 = 0x0; if ((GnbBuildOptionData->CfgPcieHwInitPwerGating & PcieHwInitPwrGatingL1Pg) == PcieHwInitPwrGatingL1Pg) { D0F0xE4_PIF_0008.Field.TxpwrGatingInL1 = 0x1; D0F0xE4_PIF_000A.Field.RxpwrGatingInL1 = 0x1; } D0F0xE4_PIF_0008.Field.TxpwrGatingInUnused = 0x0; D0F0xE4_PIF_000A.Field.RxpwrGatingInUnused = 0x0; if ((GnbBuildOptionData->CfgPcieHwInitPwerGating & PcieHwInitPwrGatingOffPg) == PcieHwInitPwrGatingOffPg) { D0F0xE4_PIF_0008.Field.TxpwrGatingInUnused = 0x1; D0F0xE4_PIF_000A.Field.RxpwrGatingInUnused = 0x1; } PcieRegisterWrite ( Wrapper, PIF_SPACE (Wrapper->WrapId, Pif, D0F0xE4_PIF_0008_ADDRESS), D0F0xE4_PIF_0008.Value, TRUE, Pcie ); PcieRegisterWrite ( Wrapper, PIF_SPACE (Wrapper->WrapId, Pif, D0F0xE4_PIF_000A_ADDRESS), D0F0xE4_PIF_000A.Value, TRUE, Pcie ); D0F0xE4_PIF_0004.Value = PcieRegisterRead ( Wrapper, PIF_SPACE (Wrapper->WrapId, Pif, D0F0xE4_PIF_0004_ADDRESS), Pcie ); D0F0xE4_PIF_0004.Field.PifDegradePwrPllMode = 0x0; PcieRegisterWrite ( Wrapper, PIF_SPACE (Wrapper->WrapId, Pif, D0F0xE4_PIF_0004_ADDRESS), D0F0xE4_PIF_0004.Value, TRUE, Pcie ); } D0F0xE4_CORE_012A.Value = PcieRegisterRead ( Wrapper, CORE_SPACE (Wrapper->StartPcieCoreId, D0F0xE4_CORE_012A_ADDRESS), Pcie ); D0F0xE4_CORE_012C.Value = PcieRegisterRead ( Wrapper, CORE_SPACE (Wrapper->StartPcieCoreId, D0F0xE4_CORE_012C_ADDRESS), Pcie ); D0F0xE4_CORE_012D.Value = PcieRegisterRead ( Wrapper, CORE_SPACE (Wrapper->StartPcieCoreId, D0F0xE4_CORE_012D_ADDRESS), Pcie ); D0F0xE4_CORE_012A.Field.LMLaneDegrade0 = 1; D0F0xE4_CORE_012A.Field.LMLaneDegrade1 = 1; D0F0xE4_CORE_012A.Field.LMLaneDegrade2 = 1; D0F0xE4_CORE_012A.Field.LMLaneDegrade3 = 1; D0F0xE4_CORE_012C.Field.LMLaneUnused0 = 1; D0F0xE4_CORE_012C.Field.LMLaneUnused1 = 1; D0F0xE4_CORE_012C.Field.LMLaneUnused2 = 1; D0F0xE4_CORE_012D.Field.LMLaneUnused3 = 1; PcieRegisterWrite ( Wrapper, CORE_SPACE (Wrapper->StartPcieCoreId, D0F0xE4_CORE_012A_ADDRESS), D0F0xE4_CORE_012A.Value, TRUE, Pcie ); PcieRegisterWrite ( Wrapper, CORE_SPACE (Wrapper->StartPcieCoreId, D0F0xE4_CORE_012C_ADDRESS), D0F0xE4_CORE_012C.Value, TRUE, Pcie ); PcieRegisterWrite ( Wrapper, CORE_SPACE (Wrapper->StartPcieCoreId, D0F0xE4_CORE_012D_ADDRESS), D0F0xE4_CORE_012D.Value, TRUE, Pcie ); IDS_HDT_CONSOLE (GNB_TRACE, "PcieHwInitPowerGatingCZ Exit\n"); }
/** * PHY lane parameter Init * * * * @param[in] Wrapper Pointer to wrapper config descriptor * @param[in] Buffer Pointer to buffer * @param[in] Pcie Pointer to global PCIe configuration */ AGESA_STATUS STATIC PciePhyLaneInitInitCallbackCZ ( IN PCIe_WRAPPER_CONFIG *Wrapper, IN VOID *Buffer, IN PCIe_PLATFORM_CONFIG *Pcie ) { UINT8 Phy; UINT8 PhyLaneIndex; UINT8 Lane; UINT32 LaneBitmap; UINTN Index; IDS_HDT_CONSOLE (GNB_TRACE, "PciePhyLaneInitInitCallbackCZ Enter\n"); LaneBitmap = PcieUtilGetWrapperLaneBitMap (LANE_TYPE_PCIE_CORE_ALLOC, 0, Wrapper); if (LaneBitmap == 0) { IDS_HDT_CONSOLE (GNB_TRACE, "No device allocated in this wrapper\n"); return AGESA_SUCCESS; } LaneBitmap = PcieUtilGetWrapperLaneBitMap (LANE_TYPE_PCIE_PHY_NATIVE, 0, Wrapper); for (Lane = 0; Lane < Wrapper->NumberOfLanes; ++Lane) { Phy = Lane / MAX_NUM_LANE_PER_PHY; PhyLaneIndex = Lane - Phy * MAX_NUM_LANE_PER_PHY; if ((LaneBitmap & (1 << Lane)) != 0) { for (Index = 0; Index < PhyLaneInitEarlyTableCZ.Length; Index++) { UINT32 Value; Value = PcieRegisterRead ( Wrapper, PHY_SPACE (Wrapper->WrapId, Phy, PhyLaneInitEarlyTableCZ.Table[Index].Reg + (PhyLaneIndex * 0x100)), Pcie ); Value &= (~PhyLaneInitEarlyTableCZ.Table[Index].Mask); Value |= PhyLaneInitEarlyTableCZ.Table[Index].Data; PcieRegisterWrite ( Wrapper, PHY_SPACE (Wrapper->WrapId, Phy, PhyLaneInitEarlyTableCZ.Table[Index].Reg + (PhyLaneIndex * 0x100)), Value, FALSE, Pcie ); } } } for (Lane = 0; Lane < Wrapper->NumberOfLanes; Lane += MAX_NUM_LANE_PER_PHY) { Phy = Lane / MAX_NUM_LANE_PER_PHY; for (Index = 0; Index < PhyWrapperInitEarlyTableCZ.Length; Index++) { UINT32 Value; Value = PcieRegisterRead ( Wrapper, PHY_SPACE (Wrapper->WrapId, Phy, PhyWrapperInitEarlyTableCZ.Table[Index].Reg), Pcie ); Value &= (~PhyWrapperInitEarlyTableCZ.Table[Index].Mask); Value |= PhyWrapperInitEarlyTableCZ.Table[Index].Data; PcieRegisterWrite ( Wrapper, PHY_SPACE (Wrapper->WrapId, Phy, PhyWrapperInitEarlyTableCZ.Table[Index].Reg), Value, FALSE, Pcie ); } } IDS_HDT_CONSOLE (GNB_TRACE, "PciePhyLaneInitInitCallbackCZ Exit\n"); return AGESA_SUCCESS; }
/** * * This function executes receiver enable training for a specific die * * @param[in,out] *TechPtr - Pointer to the MEM_TECH_BLOCK * @param[in] Pass - Pass of the receiver training * * @return TRUE - No fatal error occurs. * @return FALSE - Fatal error occurs. */ BOOLEAN STATIC MemTDqsTrainOptRcvrEnSw ( IN OUT MEM_TECH_BLOCK *TechPtr, IN UINT8 Pass ) { _16BYTE_ALIGN UINT8 PatternBuffer[6 * 64]; UINT8 TestBuffer[256]; UINT8 *PatternBufPtr[6]; UINT8 *TempPtr; UINT32 TestAddrRJ16[4]; UINT32 TempAddrRJ16; UINT32 RealAddr; UINT16 CurTest[4]; UINT8 Dct; UINT8 Receiver; UINT8 i; UINT8 TimesFail; UINT8 TimesRetrain; UINT16 RcvrEnDly; UINT16 MaxRcvrEnDly; UINT16 RcvrEnDlyLimit; UINT16 MaxDelayCha; BOOLEAN IsDualRank; BOOLEAN S0En; BOOLEAN S1En; MEM_DATA_STRUCT *MemPtr; DIE_STRUCT *MCTPtr; DCT_STRUCT *DCTPtr; MEM_NB_BLOCK *NBPtr; NBPtr = TechPtr->NBPtr; MemPtr = NBPtr->MemPtr; MCTPtr = NBPtr->MCTPtr; TechPtr->TrainingType = TRN_RCVR_ENABLE; TempAddrRJ16 = 0; TempPtr = NULL; MaxDelayCha = 0; TimesRetrain = DEFAULT_TRAINING_TIMES; IDS_OPTION_HOOK (IDS_MEM_RETRAIN_TIMES, &TimesRetrain, &MemPtr->StdHeader); IDS_HDT_CONSOLE (MEM_STATUS, "\nStart Optimized SW RxEn training\n"); // Set environment settings before training MemTBeginTraining (TechPtr); PatternBufPtr[0] = PatternBufPtr[2] = PatternBuffer; // These two patterns used for first Test Address MemUFillTrainPattern (TestPattern0, PatternBufPtr[0], 64); // Second Cacheline used for Dummy Read is the inverse of // the first so that is is not mistaken for the real read MemUFillTrainPattern (TestPattern1, PatternBufPtr[0] + 64, 64); PatternBufPtr[1] = PatternBufPtr[3] = PatternBufPtr[0] + 128; // These two patterns used for second Test Address MemUFillTrainPattern (TestPattern1, PatternBufPtr[1], 64); // Second Cacheline used for Dummy Read is the inverse of // the first so that is is not mistaken for the real read MemUFillTrainPattern (TestPattern0, PatternBufPtr[1] + 64, 64); // Fill pattern for flush after every sweep PatternBufPtr[4] = PatternBufPtr[0] + 256; MemUFillTrainPattern (TestPattern3, PatternBufPtr[4], 64); // Fill pattern for initial dummy read PatternBufPtr[5] = PatternBufPtr[0] + 320; MemUFillTrainPattern (TestPattern4, PatternBufPtr[5], 64); // Begin receiver enable training AGESA_TESTPOINT (TpProcMemReceiverEnableTraining, &(MemPtr->StdHeader)); for (Dct = 0; Dct < NBPtr->DctCount; Dct++) { IDS_HDT_CONSOLE (MEM_STATUS, "\tDct %d\n", Dct); NBPtr->SwitchDCT (NBPtr, Dct); DCTPtr = NBPtr->DCTPtr; // Set training bit NBPtr->SetBitField (NBPtr, BFDqsRcvEnTrain, 1); // Relax Max Latency before training NBPtr->SetMaxLatency (NBPtr, 0xFFFF); if (Pass == FIRST_PASS) { TechPtr->InitDQSPos4RcvrEn (TechPtr); } // there are four receiver pairs, loosely associated with chipselects. Receiver = DCTPtr->Timings.CsEnabled ? 0 : 8; for (; Receiver < 8; Receiver += 2) { S0En = NBPtr->GetSysAddr (NBPtr, Receiver, &TestAddrRJ16[0]); S1En = NBPtr->GetSysAddr (NBPtr, Receiver + 1, &TestAddrRJ16[2]); if (S0En) { TestAddrRJ16[1] = TestAddrRJ16[0] + BIGPAGE_X8_RJ16; } if (S1En) { TestAddrRJ16[3] = TestAddrRJ16[2] + BIGPAGE_X8_RJ16; } if (S0En && S1En) { IsDualRank = TRUE; } else { IsDualRank = FALSE; } if (S0En || S1En) { IDS_HDT_CONSOLE (MEM_STATUS, "\t\tCS %d\n", Receiver); RcvrEnDlyLimit = 0x1FF; // @attention - limit depends on proc type TechPtr->DqsRcvEnSaved = 0; RcvrEnDly = RcvrEnDlyLimit; RealAddr = 0; TechPtr->GetFirstPassVal = FALSE; TechPtr->DqsRcvEnFirstPassVal = 0; TechPtr->RevertPassVal = FALSE; TechPtr->InitializeVariablesOpt (TechPtr); // Write the test patterns AGESA_TESTPOINT (TpProcMemRcvrWritePattern, &(MemPtr->StdHeader)); IDS_HDT_CONSOLE (MEM_FLOW, "\t\t\tWrite to addresses: "); for (i = (S0En ? 0 : 2); i < (S1En ? 4 : 2); i++) { RealAddr = MemUSetUpperFSbase (TestAddrRJ16[i], MemPtr); // One cacheline of data to be tested and one of dummy data MemUWriteCachelines (RealAddr, PatternBufPtr[i], 2); // This is dummy data with a different pattern used for the first dummy read. MemUWriteCachelines (RealAddr + 128, PatternBufPtr[5], 1); IDS_HDT_CONSOLE (MEM_FLOW, " %04x0000 ", TestAddrRJ16[i]); } IDS_HDT_CONSOLE (MEM_FLOW, "\n"); // Sweep receiver enable delays AGESA_TESTPOINT (TpProcMemRcvrStartSweep, &(MemPtr->StdHeader)); TimesFail = 0; ERROR_HANDLE_RETRAIN_BEGIN (TimesFail, TimesRetrain) { TechPtr->LoadInitialRcvrEnDlyOpt (TechPtr, Receiver); while (!TechPtr->CheckRcvrEnDlyLimitOpt (TechPtr)) { AGESA_TESTPOINT (TpProcMemRcvrSetDelay, &(MemPtr->StdHeader)); TechPtr->SetRcvrEnDlyOpt (TechPtr, Receiver, RcvrEnDly); // Read and compare the first beat of data for (i = (S0En ? 0 : 2); i < (S1En ? 4 : 2); i++) { AGESA_TESTPOINT (TpProcMemRcvrReadPattern, &(MemPtr->StdHeader)); RealAddr = MemUSetUpperFSbase (TestAddrRJ16[i], MemPtr); // // Issue dummy cacheline reads // MemUReadCachelines (TestBuffer + 128, RealAddr + 128, 1); MemUReadCachelines (TestBuffer, RealAddr, 1); MemUProcIOClFlush (TestAddrRJ16[i], 2, MemPtr); // // Perform actual read which will be compared // MemUReadCachelines (TestBuffer + 64, RealAddr + 64, 1); AGESA_TESTPOINT (TpProcMemRcvrTestPattern, &(MemPtr->StdHeader)); CurTest[i] = TechPtr->Compare1ClPatternOpt (TechPtr, TestBuffer + 64 , PatternBufPtr[i] + 64, i, Receiver, S1En); // Due to speculative execution during MemUReadCachelines, we must // flush one more cache line than we read. MemUProcIOClFlush (TestAddrRJ16[i], 4, MemPtr); TechPtr->ResetDCTWrPtr (TechPtr, Receiver); // // Swap the test pointers such that even and odd steps alternate. // if ((i % 2) == 0) { TempPtr = PatternBufPtr[i]; PatternBufPtr[i] = PatternBufPtr[i + 1]; TempAddrRJ16 = TestAddrRJ16[i]; TestAddrRJ16[i] = TestAddrRJ16[i + 1]; } else { PatternBufPtr[i] = TempPtr; TestAddrRJ16[i] = TempAddrRJ16; } } } // End of delay sweep ERROR_HANDLE_RETRAIN_END (!TechPtr->SetSweepErrorOpt (TechPtr, Receiver, Dct, TRUE), TimesFail) } if (!TechPtr->SetSweepErrorOpt (TechPtr, Receiver, Dct, FALSE)) { return FALSE; } TechPtr->LoadRcvrEnDlyOpt (TechPtr, Receiver); // set final delays // // Flush AA and 55 patterns by reading a dummy pattern to fill in FIFO // // Aquire a new FSBase, based on the last test address that we stored. RealAddr = MemUSetUpperFSbase (TempAddrRJ16, MemPtr); ASSERT (RealAddr != 0); MemUWriteCachelines (RealAddr, PatternBufPtr[4], 1); MemUWriteCachelines (RealAddr + 64, PatternBufPtr[4], 1); MemUReadCachelines (TestBuffer, RealAddr, 2); // Due to speculative execution during MemUReadCachelines, we must // flush one more cache line than we read. MemUProcIOClFlush (TempAddrRJ16, 3, MemPtr); } } // End while Receiver < 8
AGESA_STATUS PcieAlibBuildAcpiTableV2 ( IN AMD_CONFIG_PARAMS *StdHeader, OUT VOID **AlibSsdtPtr ) { AGESA_STATUS Status; AGESA_STATUS AgesaStatus; VOID *AlibSsdtBuffer; VOID *AlibSsdtTable; UINTN AlibSsdtlength; UINT32 AmlObjName; VOID *AmlObjPtr; IDS_HDT_CONSOLE (GNB_TRACE, "PcieAlibBuildAcpiTableV2 Enter\n"); AgesaStatus = AGESA_SUCCESS; AlibSsdtTable = GnbFmAlibGetBaseTable (StdHeader); AlibSsdtlength = ((ACPI_TABLE_HEADER*) AlibSsdtTable)->TableLength; if (*AlibSsdtPtr == NULL) { AlibSsdtBuffer = GnbAllocateHeapBuffer ( AMD_ACPI_ALIB_BUFFER_HANDLE, AlibSsdtlength, StdHeader ); ASSERT (AlibSsdtBuffer != NULL); if (AlibSsdtBuffer == NULL) { return AGESA_ERROR; } *AlibSsdtPtr = AlibSsdtBuffer; } else { AlibSsdtBuffer = *AlibSsdtPtr; } // Check length of port data ASSERT (sizeof (_ALIB_PORT_DATA) <= 20); // Check length of global data ASSERT (sizeof (_ALIB_GLOBAL_DATA) <= 32); // Copy template to buffer LibAmdMemCopy (AlibSsdtBuffer, AlibSsdtTable, AlibSsdtlength, StdHeader); // Update table OEM fields. LibAmdMemCopy ( (VOID *) &((ACPI_TABLE_HEADER*) AlibSsdtBuffer)->OemId, (VOID *) &GnbBuildOptions.OemIdString, sizeof (GnbBuildOptions.OemIdString), StdHeader); LibAmdMemCopy ( (VOID *) &((ACPI_TABLE_HEADER*) AlibSsdtBuffer)->OemTableId, (VOID *) &GnbBuildOptions.OemTableIdString, sizeof (GnbBuildOptions.OemTableIdString), StdHeader); // // Update register base base // PcieAlibUpdateGnbData (AlibSsdtBuffer, StdHeader); // // Update transfer block // AmlObjName = STRING_TO_UINT32 ('A', 'D', 'A', 'T'); AmlObjPtr = GnbLibFind (AlibSsdtBuffer, AlibSsdtlength, (UINT8*) &AmlObjName, sizeof (AmlObjName)); if (AmlObjPtr != NULL) { AmlObjPtr = (UINT8 *) AmlObjPtr + 10; } // Dispatch function from table Status = GnbLibDispatchFeaturesV2 (&AlibDispatchTableV2[0], AmlObjPtr, StdHeader); AGESA_STATUS_UPDATE (Status, AgesaStatus); if (AgesaStatus != AGESA_SUCCESS) { //Shrink table length to size of the header ((ACPI_TABLE_HEADER*) AlibSsdtBuffer)->TableLength = sizeof (ACPI_TABLE_HEADER); } ChecksumAcpiTable ((ACPI_TABLE_HEADER*) AlibSsdtBuffer, StdHeader); IDS_HDT_CONSOLE (GNB_TRACE, "PcieAlibBuildAcpiTableV2 Exit [0x%x]\n", AgesaStatus); return AgesaStatus; }
VOID MemNTrainPhyFenceNb ( IN OUT MEM_NB_BLOCK *NBPtr ) { UINT8 Byte; INT16 Avg; UINT8 PREvalue; if (MemNGetBitFieldNb (NBPtr, BFDisDramInterface)) { return; } // 1. BIOS first programs a seed value to the phase recovery // engine registers. // IDS_HDT_CONSOLE (MEM_FLOW, "\t\tSeeds: "); for (Byte = 0; Byte < MAX_BYTELANES_PER_CHANNEL; Byte++) { // This includes ECC as byte 8 MemNSetTrainDlyNb (NBPtr, AccessPhRecDly, DIMM_BYTE_ACCESS (0, Byte), 19); IDS_HDT_CONSOLE (MEM_FLOW, "%02x ", 19); } IDS_HDT_CONSOLE (MEM_FLOW, "\n\t\tPhyFenceTrEn = 1"); // 2. Set F2x[1, 0]9C_x08[PhyFenceTrEn]=1. MemNSetBitFieldNb (NBPtr, BFPhyFenceTrEn, 1); if (!NBPtr->IsSupported[UnifiedNbFence]) { // 3. Wait 200 MEMCLKs. MemNWaitXMemClksNb (NBPtr, 200); } else { // 3. Wait 2000 MEMCLKs. MemNWaitXMemClksNb (NBPtr, 2000); } // 4. Clear F2x[1, 0]9C_x08[PhyFenceTrEn]=0. MemNSetBitFieldNb (NBPtr, BFPhyFenceTrEn, 0); // 5. BIOS reads the phase recovery engine registers // F2x[1, 0]9C_x[51:50] and F2x[1, 0]9C_x52. // 6. Calculate the average value of the fine delay and subtract 8. // Avg = 0; IDS_HDT_CONSOLE (MEM_FLOW, "\n\t\t PRE: "); for (Byte = 0; Byte < MAX_BYTELANES_PER_CHANNEL; Byte++) { // // This includes ECC as byte 8. ECC Byte lane (8) is ignored by MemNGetTrainDlyNb function where // ECC is not supported. // PREvalue = (UINT8) (0x1F & MemNGetTrainDlyNb (NBPtr, AccessPhRecDly, DIMM_BYTE_ACCESS (0, Byte))); Avg = Avg + ((INT16) PREvalue); IDS_HDT_CONSOLE (MEM_FLOW, "%02x ", PREvalue); } Avg = ((Avg + 8) / 9); // round up NBPtr->MemNPFenceAdjustNb (NBPtr, &Avg); IDS_HDT_CONSOLE (MEM_FLOW, "\n\t\tFence: %02x\n", Avg); // 7. Write the value to F2x[1, 0]9C_x0C[PhyFence]. MemNSetBitFieldNb (NBPtr, BFPhyFence, Avg); // 8. BIOS rewrites F2x[1, 0]9C_x04, DRAM Address/Command Timing Control // Register delays for both channels. This forces the phy to recompute // the fence. // MemNSetBitFieldNb (NBPtr, BFAddrTmgControl, MemNGetBitFieldNb (NBPtr, BFAddrTmgControl)); }
/** * * Look up CAD Bus config tables and return the pointer to the matched entry. * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * @param[in] *ListOfTables - Pointer to PSC_TBL_ENTRY array of pointers * * @return TRUE - Table values can be extracted per dimm population and ranks type. * @return FALSE - Table values cannot be extracted per dimm population and ranks type. * */ BOOLEAN MemPLookupCadBusCfgTabs ( IN OUT MEM_NB_BLOCK *NBPtr, IN PSC_TBL_ENTRY *ListOfTables[] ) { UINT8 i; UINT8 TableSize; UINT32 CurDDRrate; UINT8 DDR3Voltage; UINT16 RankTypeInTable; UINT8 PsoMaskSAO; PSCFG_CADBUS_ENTRY *TblPtr; CH_DEF_STRUCT *CurrentChannel; CurrentChannel = NBPtr->ChannelPtr; TblPtr = (PSCFG_CADBUS_ENTRY *) MemPGetTableEntry (NBPtr, ListOfTables, &TableSize); if (TblPtr != NULL) { CurDDRrate = (UINT32) (1 << (CurrentChannel->DCTPtr->Timings.Speed / 66)); DDR3Voltage = (UINT8) (1 << CONVERT_VDDIO_TO_ENCODED (NBPtr->RefPtr->DDR3Voltage)); for (i = 0; i < TableSize; i++) { if ((TblPtr->DimmPerCh & NBPtr->PsPtr->NumOfDimmSlots) != 0) { if ((TblPtr->DDRrate & CurDDRrate) != 0) { if ((TblPtr->VDDIO & DDR3Voltage) != 0) { RankTypeInTable = ((UINT16) TblPtr->Dimm0) | ((UINT16) TblPtr->Dimm1 << 4) | (NP << 8) | (NP << 12); if ((RankTypeInTable & NBPtr->PsPtr->RankType) == NBPtr->PsPtr->RankType) { CurrentChannel->DctAddrTmg = TblPtr->AddrCmdCtl; CurrentChannel->SlowMode = (TblPtr->SlowMode == 1) ? TRUE : FALSE; NBPtr->PsPtr->CkeStrength = TblPtr->CkeStrength; NBPtr->PsPtr->CsOdtStrength = TblPtr->CsOdtStrength; NBPtr->PsPtr->AddrCmdStrength = TblPtr->AddrCmdStrength; NBPtr->PsPtr->ClkStrength = TblPtr->ClkStrength; break; } } } } TblPtr++; } } else { i = TableSize = 0; } PsoMaskSAO = (UINT8) MemPProceedTblDrvOverride (NBPtr, NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_TBLDRV_SLOWACCMODE); PsoMaskSAO &= (UINT8) MemPProceedTblDrvOverride (NBPtr, NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_TBLDRV_ADDRTMG); if ((PsoMaskSAO == 0) && (i == TableSize)) { IDS_HDT_CONSOLE (MEM_FLOW, "\nNo CAD bus config entries\n"); } else { return TRUE; } if (NBPtr->SharedPtr->VoltageMap != VDDIO_DETERMINED) { return TRUE; } PutEventLog (AGESA_ERROR, MEM_ERROR_SAO_NOT_FOUND, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_ERROR, NBPtr->MCTPtr); if (!NBPtr->MemPtr->ErrorHandling (NBPtr->MCTPtr, NBPtr->Dct, EXCLUDE_ALL_CHIPSEL, &NBPtr->MemPtr->StdHeader)) { ASSERT (FALSE); } return FALSE; }
/** * * A sub-function extracts WL and HW RxEn seeds from PSCFG tables * from a input table * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * @param[in] *EntryOfTables - Pointer to MEM_PSC_TABLE_BLOCK * * @return NBPtr->PsPtr->WLSeedVal * @return NBPtr->PsPtr->HWRxENSeedVal * */ BOOLEAN MemPGetTrainingSeeds ( IN OUT MEM_NB_BLOCK *NBPtr, IN MEM_PSC_TABLE_BLOCK *EntryOfTables ) { UINT8 i; UINT8 MaxDimmPerCh; UINT8 NOD; UINT8 TableSize; DIMM_TYPE DimmType; CPU_LOGICAL_ID LogicalCpuid; UINT8 PackageType; UINT8 Seedloop; UINT8 CH; PSC_TBL_ENTRY **TblEntryPtr; PSCFG_SEED_ENTRY *TblPtr; CH_DEF_STRUCT *CurrentChannel; CurrentChannel = NBPtr->ChannelPtr; TblEntryPtr = NULL; TblPtr = NULL; TableSize = 0; PackageType = 0; LogicalCpuid.Family = AMD_FAMILY_UNKNOWN; MaxDimmPerCh = GetMaxDimmsPerChannel (NBPtr->RefPtr->PlatformMemoryConfiguration, NBPtr->MCTPtr->SocketId, CurrentChannel->ChannelID); NOD = (UINT8) 1 << (MaxDimmPerCh - 1); CH = 1 << (CurrentChannel->ChannelID); if (CurrentChannel->RegDimmPresent != 0) { DimmType = RDIMM_TYPE; } else if (CurrentChannel->SODimmPresent != 0) { DimmType = SODIMM_TYPE; if (FindPSOverrideEntry (NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_SOLDERED_DOWN_SODIMM_TYPE, NBPtr->MCTPtr->SocketId, NBPtr->ChannelPtr->ChannelID, 0, NULL, NULL) != NULL) { DimmType = SODWN_SODIMM_TYPE; } } else if (CurrentChannel->LrDimmPresent != 0) { DimmType = LRDIMM_TYPE; } else { DimmType = UDIMM_TYPE; } // Get seed value of WL, then HW RxEn for (Seedloop = 0; Seedloop < 2; Seedloop++) { TblEntryPtr = (Seedloop == 0) ? EntryOfTables->TblEntryOfWLSeed : EntryOfTables->TblEntryOfHWRxENSeed; i = 0; // Obtain table pointer, table size, Logical Cpuid and PSC type according to Dimm, NB and package type. while (TblEntryPtr[i] != NULL) { if (((TblEntryPtr[i])->Header.DimmType & DimmType) != 0) { // // Determine if this is the expected NB Type // LogicalCpuid = (TblEntryPtr[i])->Header.LogicalCpuid; PackageType = (TblEntryPtr[i])->Header.PackageType; if (MemPIsIdSupported (NBPtr, LogicalCpuid, PackageType)) { TblPtr = (PSCFG_SEED_ENTRY *) ((TblEntryPtr[i])->TBLPtr); TableSize = (TblEntryPtr[i])->TableSize; break; } } i++; } // Check whether no table entry is found. if (TblEntryPtr[i] == NULL) { IDS_HDT_CONSOLE (MEM_FLOW, "\nNo %s training seeds Config table\n", (Seedloop == 0) ? "WL" : "HW RxEn"); return FALSE; } for (i = 0; i < TableSize; i++) { if ((TblPtr->DimmPerCh & NOD) != 0) { if ((TblPtr->Channel & CH) != 0) { if (Seedloop == 0) { NBPtr->PsPtr->WLSeedVal = (UINT8) TblPtr->SeedVal; } else { NBPtr->PsPtr->HWRxENSeedVal = TblPtr->SeedVal; } break; } } TblPtr++; } if (i == TableSize) { IDS_HDT_CONSOLE (MEM_FLOW, "\nNo %s seed entries\n\n", (Seedloop == 0) ? "WL" : "HW RxEn"); PutEventLog (AGESA_ERROR, MEM_ERROR_TRAINING_SEED_NOT_FOUND, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_ERROR, NBPtr->MCTPtr); if (!NBPtr->MemPtr->ErrorHandling (NBPtr->MCTPtr, NBPtr->Dct, EXCLUDE_ALL_CHIPSEL, &NBPtr->MemPtr->StdHeader)) { ASSERT (FALSE); } return FALSE; } } return TRUE; }
/** * Entry point for enabling Power Status Indicator * * This function must be run after all P-State routines have been executed * * @param[in] PsiServices The current CPU's family services. * @param[in] EntryPoint Timepoint designator. * @param[in] PlatformConfig Contains the runtime modifiable feature input data. * @param[in] StdHeader Config handle for library and services. * * @retval AGESA_SUCCESS Always succeeds. * */ AGESA_STATUS STATIC F15CzInitializePsi ( IN PSI_FAMILY_SERVICES *PsiServices, IN UINT64 EntryPoint, IN PLATFORM_CONFIGURATION *PlatformConfig, IN AMD_CONFIG_PARAMS *StdHeader ) { PCI_ADDR PciAddress; CPU_SPECIFIC_SERVICES *FamilySpecificServices; UINT32 HwPstateMaxVal; F15_CZ_CLK_PWR_TIMING_CTRL2_REGISTER ClkPwrTimingCtrl2; UINT32 CoreVrmLowPowerThreshold; UINT32 Pstate; UINT32 PstateCurrent; UINT32 NextPstateCurrent; PSTATE_MSR PstateMsr; UINT32 CurrentVid; UINT32 PreviousVid; NB_PSTATE_REGISTER NbPstateReg; NB_PSTATE_CTRL_REGISTER NbPsCtrl; UINT32 NbVrmLowPowerThreshold; UINT32 NbPstate; UINT32 NbPstateMaxVal; UINT32 NbPstateCurrent; UINT32 NextNbPstateCurrent; UINT32 PreviousNbVid; UINT32 CurrentNbVid; SMUSVI_MISC_VID_STATUS_REGISTER SmuSviMiscVidStatus; SMUSVI_POWER_CONTROL_MISC_REGISTER SmuSviPowerCtrlMisc; if ((EntryPoint & (CPU_FEAT_AFTER_POST_MTRR_SYNC | CPU_FEAT_AFTER_RESUME_MTRR_SYNC)) != 0) { // Configure PsiVid GetCpuServicesOfCurrentCore ((CONST CPU_SPECIFIC_SERVICES **) &FamilySpecificServices, StdHeader); IDS_HDT_CONSOLE (CPU_TRACE, " F15CzPmVrmLowPowerModeEnable\n"); if (PlatformConfig->VrmProperties[CoreVrm].LowPowerThreshold != 0) { // Set up PSI0_L for VDD CoreVrmLowPowerThreshold = PlatformConfig->VrmProperties[CoreVrm].LowPowerThreshold; IDS_HDT_CONSOLE (CPU_TRACE, " Core VRM - LowPowerThreshold: %d \n", CoreVrmLowPowerThreshold); PreviousVid = 0xFF; PciAddress.AddressValue = CPTC2_PCI_ADDR; LibAmdPciRead (AccessWidth32, PciAddress, &ClkPwrTimingCtrl2, StdHeader); HwPstateMaxVal = ClkPwrTimingCtrl2.HwPstateMaxVal; IDS_HDT_CONSOLE (CPU_TRACE, " HwPstateMaxVal %d\n", HwPstateMaxVal); for (Pstate = 0; Pstate <= HwPstateMaxVal; Pstate++) { // Check only valid P-state if (FamilySpecificServices->GetProcIddMax (FamilySpecificServices, (UINT8) Pstate, &PstateCurrent, StdHeader) != TRUE) { continue; } LibAmdMsrRead ((UINT32) (Pstate + PS_REG_BASE), (UINT64 *) &PstateMsr, StdHeader); CurrentVid = (UINT32) PstateMsr.CpuVid; if (Pstate == HwPstateMaxVal) { NextPstateCurrent = 0; } else { // Check P-state from P1 to HwPstateMaxVal if (FamilySpecificServices->GetProcIddMax (FamilySpecificServices, (UINT8) (Pstate + 1), &NextPstateCurrent, StdHeader) != TRUE) { continue; } } if ((PstateCurrent <= CoreVrmLowPowerThreshold) && (NextPstateCurrent <= CoreVrmLowPowerThreshold) && (CurrentVid != PreviousVid)) { // Program PsiVid and PsiVidEn if PSI state is found and stop searching. GnbLibPciIndirectRead (MAKE_SBDFO (0, 0, 0, 0, 0xB8), SMUSVI_POWER_CONTROL_MISC, AccessWidth32, &SmuSviPowerCtrlMisc, StdHeader); SmuSviPowerCtrlMisc.PSIVID = CurrentVid; SmuSviPowerCtrlMisc.PSIVIDEN = 1; GnbLibPciIndirectWrite (MAKE_SBDFO (0, 0, 0, 0, 0xB8), SMUSVI_POWER_CONTROL_MISC, AccessWidth32, &SmuSviPowerCtrlMisc, StdHeader); IDS_HDT_CONSOLE (CPU_TRACE, " PsiVid is enabled at P-state %d. PsiVid: %d\n", Pstate, CurrentVid); break; } else { PreviousVid = CurrentVid; } } } if (PlatformConfig->VrmProperties[NbVrm].LowPowerThreshold != 0) { // Set up NBPSI0_L for VDDNB NbVrmLowPowerThreshold = PlatformConfig->VrmProperties[NbVrm].LowPowerThreshold; IDS_HDT_CONSOLE (CPU_TRACE, " NB VRM - LowPowerThreshold: %d\n", NbVrmLowPowerThreshold); PreviousNbVid = 0xFF; PciAddress.AddressValue = NB_PSTATE_CTRL_PCI_ADDR; LibAmdPciRead (AccessWidth32, PciAddress, &NbPsCtrl, StdHeader); NbPstateMaxVal = NbPsCtrl.NbPstateMaxVal; ASSERT (NbPstateMaxVal < NM_NB_PS_REG); IDS_HDT_CONSOLE (CPU_TRACE, " NbPstateMaxVal %d\n", NbPstateMaxVal); for (NbPstate = 0; NbPstate <= NbPstateMaxVal; NbPstate++) { // Check only valid NB P-state if (FamilySpecificServices->GetNbIddMax (FamilySpecificServices, (UINT8) NbPstate, &NbPstateCurrent, StdHeader) != TRUE) { continue; } PciAddress.Address.Register = (NB_PSTATE_0 + (sizeof (NB_PSTATE_REGISTER) * NbPstate)); LibAmdPciRead (AccessWidth32, PciAddress, &NbPstateReg, StdHeader); CurrentNbVid = (UINT32) GetF15CzNbVid (&NbPstateReg); if (NbPstate == NbPstateMaxVal) { NextNbPstateCurrent = 0; } else { // Check only valid NB P-state if (FamilySpecificServices->GetNbIddMax (FamilySpecificServices, (UINT8) (NbPstate + 1), &NextNbPstateCurrent, StdHeader) != TRUE) { continue; } } if ((NbPstateCurrent <= NbVrmLowPowerThreshold) && (NextNbPstateCurrent <= NbVrmLowPowerThreshold) && (CurrentNbVid != PreviousNbVid)) { // Program NbPsi0Vid and NbPsi0VidEn if PSI state is found and stop searching. GnbLibPciIndirectRead (MAKE_SBDFO (0, 0, 0, 0, 0xB8), SMUSVI_MISC_VID_STATUS, AccessWidth32, &SmuSviMiscVidStatus, StdHeader); SmuSviMiscVidStatus.NB_PSI_VID = CurrentNbVid; SmuSviMiscVidStatus.NB_PSI_VID_EN = 1; GnbLibPciIndirectWrite (MAKE_SBDFO (0, 0, 0, 0, 0xB8), SMUSVI_MISC_VID_STATUS, AccessWidth32, &SmuSviMiscVidStatus, StdHeader); IDS_HDT_CONSOLE (CPU_TRACE, " NbPsi0Vid is enabled at NB P-state %d. NbPsi0Vid: %d\n", NbPstate, CurrentNbVid); break; } else { PreviousNbVid = CurrentNbVid; } } } } return AGESA_SUCCESS; }
VOID STATIC MemRecSPDDataProcess ( IN OUT MEM_DATA_STRUCT *MemPtr ) { BOOLEAN FindSocketWithMem; UINT8 Channel; UINT8 Dimm; UINT8 MaxSockets; UINT8 *SocketWithMem; UINT8 Socket; AGESA_STATUS AgesaStatus; SPD_DEF_STRUCT *DimmSPDPtr; ALLOCATE_HEAP_PARAMS AllocHeapParams; AGESA_READ_SPD_PARAMS SpdParam; ASSERT (MemPtr != NULL); FindSocketWithMem = FALSE; // // Allocate heap to save socket number with memory on it. // AllocHeapParams.RequestedBufferSize = sizeof (UINT8); AllocHeapParams.BufferHandle = AMD_REC_MEM_SOCKET_HANDLE; AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, &MemPtr->StdHeader) == AGESA_SUCCESS) { SocketWithMem = (UINT8 *) AllocHeapParams.BufferPtr; *SocketWithMem = 0; // // Allocate heap for the table // MaxSockets = (UINT8) GetPlatformNumberOfSockets (); AllocHeapParams.RequestedBufferSize = (MaxSockets * MAX_CHANNELS_PER_SOCKET * MAX_DIMMS_PER_CHANNEL * sizeof (SPD_DEF_STRUCT)); AllocHeapParams.BufferHandle = AMD_MEM_SPD_HANDLE; AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, &MemPtr->StdHeader) == AGESA_SUCCESS) { MemPtr->SpdDataStructure = (SPD_DEF_STRUCT *) AllocHeapParams.BufferPtr; // // Initialize SpdParam Structure // LibAmdMemCopy ((VOID *)&SpdParam, (VOID *)MemPtr, (UINTN)sizeof (SpdParam.StdHeader), &MemPtr->StdHeader); // // Populate SPDDataBuffer // SpdParam.MemData = MemPtr; for (Socket = 0; Socket < MaxSockets; Socket ++) { SpdParam.SocketId = Socket; for (Channel = 0; Channel < MAX_CHANNELS_PER_SOCKET; Channel++) { SpdParam.MemChannelId = Channel; for (Dimm = 0; Dimm < MAX_DIMMS_PER_CHANNEL; Dimm++) { SpdParam.DimmId = Dimm; DimmSPDPtr = &(MemPtr->SpdDataStructure[(Socket * MAX_CHANNELS_PER_SOCKET + Channel) * MAX_DIMMS_PER_CHANNEL + Dimm]); SpdParam.Buffer = DimmSPDPtr->Data; AgesaStatus = AgesaReadSpdRecovery (0, &SpdParam); if (AgesaStatus == AGESA_SUCCESS) { DimmSPDPtr->DimmPresent = TRUE; IDS_HDT_CONSOLE (MEM_FLOW, "SPD Socket %d Channel %d Dimm %d: %08x\n", Socket, Channel, Dimm, SpdParam.Buffer); if (!FindSocketWithMem) { FindSocketWithMem = TRUE; } } else { DimmSPDPtr->DimmPresent = FALSE; } } } if (FindSocketWithMem) { *SocketWithMem = Socket; break; } } } } }
VOID PcieTopologyApplyLaneMux ( IN PCIe_WRAPPER_CONFIG *Wrapper, IN PCIe_PLATFORM_CONFIG *Pcie ) { PCIe_ENGINE_CONFIG *EngineList; UINT8 CurrentPhyLane; UINT8 CurrentCoreLane; UINT8 CoreLaneIndex; UINT8 PhyLaneIndex; UINT8 NumberOfPhyLane; UINT8 TxLaneMuxSelectorArray [sizeof (LaneMuxSelectorTable)]; UINT8 RxLaneMuxSelectorArray [sizeof (LaneMuxSelectorTable)]; UINT8 Index; UINT32 TxMaxSelectorValue; UINT32 RxMaxSelectorValue; IDS_HDT_CONSOLE (GNB_TRACE, "PcieTopologyApplyLaneMux Enter\n"); if (PcieLibIsPcieWrapper (Wrapper)) { EngineList = PcieConfigGetChildEngine (Wrapper); LibAmdMemCopy ( &TxLaneMuxSelectorArray[0], &LaneMuxSelectorTable[0], sizeof (LaneMuxSelectorTable), GnbLibGetHeader (Pcie) ); LibAmdMemCopy ( &RxLaneMuxSelectorArray[0], &LaneMuxSelectorTable[0], sizeof (LaneMuxSelectorTable), GnbLibGetHeader (Pcie) ); while (EngineList != NULL) { if (PcieLibIsPcieEngine (EngineList) && PcieLibIsEngineAllocated (EngineList)) { CurrentPhyLane = (UINT8) PcieLibGetLoPhyLane (EngineList) - Wrapper->StartPhyLane; NumberOfPhyLane = (UINT8) PcieConfigGetNumberOfPhyLane (EngineList); CurrentCoreLane = (UINT8) EngineList->Type.Port.StartCoreLane; if (PcieUtilIsLinkReversed (FALSE, EngineList, Pcie)) { CurrentCoreLane = CurrentCoreLane + PcieConfigGetNumberOfCoreLane (EngineList) - NumberOfPhyLane; } for (Index = 0; Index < NumberOfPhyLane; Index = Index + 2 ) { CoreLaneIndex = (CurrentCoreLane + Index) / 2; PhyLaneIndex = (CurrentPhyLane + Index) / 2; if (RxLaneMuxSelectorArray [CoreLaneIndex] != PhyLaneIndex) { RxLaneMuxSelectorArray [PcieTopologyLocateMuxIndex (RxLaneMuxSelectorArray, PhyLaneIndex)] = RxLaneMuxSelectorArray [CoreLaneIndex]; RxLaneMuxSelectorArray [CoreLaneIndex] = PhyLaneIndex; } if (TxLaneMuxSelectorArray [PhyLaneIndex] != CoreLaneIndex) { TxLaneMuxSelectorArray [PcieTopologyLocateMuxIndex (TxLaneMuxSelectorArray, CoreLaneIndex)] = TxLaneMuxSelectorArray [PhyLaneIndex]; TxLaneMuxSelectorArray [PhyLaneIndex] = CoreLaneIndex; } } } EngineList = PcieLibGetNextDescriptor (EngineList); } RxMaxSelectorValue = 0; TxMaxSelectorValue = 0; for (Index = 0; Index < sizeof (LaneMuxSelectorTable); Index++) { RxMaxSelectorValue |= (RxLaneMuxSelectorArray[Index] << (Index * 4)); TxMaxSelectorValue |= (TxLaneMuxSelectorArray[Index] << (Index * 4)); } PcieRegisterWrite ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8021_ADDRESS), TxMaxSelectorValue, FALSE, Pcie ); PcieRegisterWrite ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8022_ADDRESS), RxMaxSelectorValue, FALSE, Pcie ); } IDS_HDT_CONSOLE (GNB_TRACE, "PcieTopologyApplyLaneMux Exit\n"); }
VOID PcieTopologySelectMasterPll ( IN PCIe_WRAPPER_CONFIG *Wrapper, IN PCIe_PLATFORM_CONFIG *Pcie ) { PCIe_ENGINE_CONFIG *EngineList; UINT16 MasterPhyLane; UINT16 MasterHotplugPhyLane; D0F0xE4_WRAP_8013_STRUCT D0F0xE4_WRAP_8013; EngineList = PcieWrapperGetEngineList (Wrapper); MasterPhyLane = 0xffff; MasterHotplugPhyLane = 0xffff; IDS_HDT_CONSOLE (GNB_TRACE, "PcieTopologySelectMasterPll Enter\n"); while (EngineList != NULL) { if (PcieLibIsEngineAllocated (EngineList)) { if (EngineList->EngineData.EngineType == PciePortEngine) { MasterPhyLane = EngineList->EngineData.StartLane; if (EngineList->Type.Port.PortData.LinkHotplug != HotplugDisabled) { MasterHotplugPhyLane = MasterPhyLane; } } } EngineList = PcieLibGetNextDescriptor (EngineList); } if (MasterPhyLane == 0xffff) { MasterPhyLane = MasterHotplugPhyLane; if (MasterPhyLane == 0xffff) { MasterPhyLane = Wrapper->StartPhyLane; } } D0F0xE4_WRAP_8013.Value = PcieRegisterRead ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8013_ADDRESS), Pcie ); MasterPhyLane = MasterPhyLane - Wrapper->StartPhyLane; if ( MasterPhyLane <= 3 ) { D0F0xE4_WRAP_8013.Field.MasterPciePllA = 0x1; D0F0xE4_WRAP_8013.Field.MasterPciePllB = 0x0; D0F0xE4_WRAP_8013.Field.MasterPciePllC = 0x0; D0F0xE4_WRAP_8013.Field.MasterPciePllD = 0x0; } else if (MasterPhyLane <= 7) { D0F0xE4_WRAP_8013.Field.MasterPciePllA = 0x0; D0F0xE4_WRAP_8013.Field.MasterPciePllB = 0x1; D0F0xE4_WRAP_8013.Field.MasterPciePllC = 0x0; D0F0xE4_WRAP_8013.Field.MasterPciePllD = 0x0; } else if (MasterPhyLane <= 11) { D0F0xE4_WRAP_8013.Field.MasterPciePllA = 0x0; D0F0xE4_WRAP_8013.Field.MasterPciePllB = 0x0; D0F0xE4_WRAP_8013.Field.MasterPciePllC = 0x1; D0F0xE4_WRAP_8013.Field.MasterPciePllD = 0x0; } else { D0F0xE4_WRAP_8013.Field.MasterPciePllA = 0x0; D0F0xE4_WRAP_8013.Field.MasterPciePllB = 0x0; D0F0xE4_WRAP_8013.Field.MasterPciePllC = 0x0; D0F0xE4_WRAP_8013.Field.MasterPciePllD = 0x1; } PcieRegisterWrite ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8013_ADDRESS), D0F0xE4_WRAP_8013.Value, FALSE, Pcie ); IDS_HDT_CONSOLE (GNB_TRACE, "PcieTopologySelectMasterPll Enter\n"); }
/** * Execute/clean up reconfiguration * * * @param[in] Wrapper Pointer to wrapper config descriptor * @param[in] Pcie Pointer to global PCIe configuration */ VOID PcieTopologyExecuteReconfig ( IN PCIe_WRAPPER_CONFIG *Wrapper, IN PCIe_PLATFORM_CONFIG *Pcie ) { D0F0xE4_WRAP_8062_STRUCT D0F0xE4_WRAP_8062; D0F0xE4_WRAP_8060_STRUCT D0F0xE4_WRAP_8060; if (PcieLibIsPcieWrapper (Wrapper)) { IDS_HDT_CONSOLE (GNB_TRACE, "PcieTopologyExecuteReconfig Enter\n"); PcieTopologyInitSrbmReset (FALSE, Wrapper, Pcie); D0F0xE4_WRAP_8062.Value = PcieRegisterRead ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8062_ADDRESS), Pcie ); D0F0xE4_WRAP_8060.Value = PcieRegisterRead ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8060_ADDRESS), Pcie ); D0F0xE4_WRAP_8062.Field.ReconfigureEn = 0x1; PcieRegisterWrite ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8062_ADDRESS), D0F0xE4_WRAP_8062.Value, FALSE, Pcie ); D0F0xE4_WRAP_8060.Field.Reconfigure = 0x1; PcieRegisterWrite ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8060_ADDRESS), D0F0xE4_WRAP_8060.Value, FALSE, Pcie ); do { D0F0xE4_WRAP_8060.Value = PcieRegisterRead ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8060_ADDRESS), Pcie ); } while (D0F0xE4_WRAP_8060.Field.Reconfigure == 1); D0F0xE4_WRAP_8062.Field.ConfigXferMode = 0x1; D0F0xE4_WRAP_8062.Field.ReconfigureEn = 0x0; PcieRegisterWrite ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8062_ADDRESS), D0F0xE4_WRAP_8062.Value, FALSE, Pcie ); PcieTopologyInitSrbmReset (TRUE, Wrapper, Pcie); IDS_HDT_CONSOLE (GNB_TRACE, "PcieTopologyExecuteReconfig Exit\n"); } }
VOID MemNSetOtherTimingTN ( IN OUT MEM_NB_BLOCK *NBPtr ) { INT8 ROD; INT8 WOD; INT8 LD; INT8 WrEarlyx2; INT8 CDDTrdrdSdDc; INT8 CDDTrdrdDd; INT8 CDDTwrwrDd; INT8 CDDTwrwrSdDc; INT8 CDDTrwtTO; INT8 CDDTwrrd; UINT8 TrdrdSdDc; UINT8 TrdrdDd; UINT8 TwrwrSdDc; UINT8 TwrwrDd; UINT8 TrdrdSdSc; UINT8 TwrwrSdSc; UINT8 Twrrd; UINT8 TrwtTO; BOOLEAN PerRankTimingEn; CH_DEF_STRUCT *ChannelPtr; ChannelPtr = NBPtr->ChannelPtr; PerRankTimingEn = (BOOLEAN) (MemNGetBitFieldNb (NBPtr, BFPerRankTimingEn)); // // Latency Difference (LD) = Tcl - Tcwl // LD = (INT8) (MemNGetBitFieldNb (NBPtr, BFTcl)) - (INT8) (MemNGetBitFieldNb (NBPtr, BFTcwl)); // // Read ODT Delay (ROD) = MAX ( 0, (RdOdtOnDuration - 6)) + MAX ( 0, (RdOdtTrnOnDly - LD)) // ROD = MAX (0, (INT8) (MemNGetBitFieldNb (NBPtr, BFRdOdtOnDuration) - 6)) + MAX ( 0, (INT8) (MemNGetBitFieldNb (NBPtr, BFRdOdtTrnOnDly) - LD)); // // Write ODT Delay (WOD) = MAX (0, (WrOdtOnDuration - 6)) // WOD = MAX (0, (INT8) (MemNGetBitFieldNb (NBPtr, BFWrOdtOnDuration) - 6)); // // WrEarly = ABS (WrDqDqsEarly) / 2 // WrEarlyx2 = (INT8) MemNGetBitFieldNb (NBPtr, BFWrDqDqsEarly); IDS_HDT_CONSOLE (MEM_FLOW, "\t\tLD: %d ROD: %d WOD: %d WrEarlyx2: %d\n\n", LD, ROD, WOD, WrEarlyx2); // // Read to Read Timing (TrdrdSdSc, TrdrdScDc, TrdrdDd) // // TrdrdSdSc = 1. // TrdrdSdDc (in MEMCLKs) = MAX(TrdrdSdSc, 3 + (IF (D18F2xA8_dct[1:0][PerRankTimingEn]) // THEN CEIL(CDDTrdrdSdDc / 2 ) ELSE 0 ENDIF)). // TrdrdDd = MAX(TrdrdSdDc, CEIL(MAX(ROD + 3, CDDTrdrdDd/2 + 3.5))) // TrdrdSdSc = 1; CDDTrdrdSdDc = (INT8) MemNCalcCDDNb (NBPtr, AccessRcvEnDly, AccessRcvEnDly, TRUE, FALSE); TrdrdSdDc = MAX (0, PerRankTimingEn ? (3 + (CDDTrdrdSdDc + 1) / 2) : 3); TrdrdSdDc = MAX (TrdrdSdSc, TrdrdSdDc); CDDTrdrdDd = (INT8) MemNCalcCDDNb (NBPtr, AccessRcvEnDly, AccessRcvEnDly, FALSE, TRUE); TrdrdDd = MAX (ROD + 3, (CDDTrdrdDd + 7 + 1) / 2); TrdrdDd = MAX (TrdrdSdDc, TrdrdDd); MemNSetBitFieldNb (NBPtr, BFTrdrdDd, (UINT32) TrdrdDd); MemNSetBitFieldNb (NBPtr, BFTrdrdSdDc, (UINT32) TrdrdSdDc); MemNSetBitFieldNb (NBPtr, BFTrdrdSdSc, (UINT32) TrdrdSdSc); // // Write to Write Timing (TwrwrSdSc, TwrwrScDc, TwrwrDd) // // TwrwrSdSc = 1. // TwrwrSdDc = CEIL(MAX(WOD + 3, CDDTwrwrSdDc / 2 + // (IF (D18F2xA8_dct[1:0][PerRankTimingEn]) THEN 3.5 ELSE 3 ENDIF))). // // TwrwrDd = CEIL (MAX (WOD + 3, CDDTwrwrDd / 2 + 3.5)) // TwrwrSdSc <= TwrwrSdDc <= TwrwrDd // TwrwrSdSc = 1; CDDTwrwrSdDc = (INT8) MemNCalcCDDNb (NBPtr, AccessWrDqsDly, AccessWrDqsDly, TRUE, FALSE); TwrwrSdDc = (UINT8) MAX (WOD + 3, (CDDTwrwrSdDc + (PerRankTimingEn ? 7 : 6) + 1 ) / 2); CDDTwrwrDd = (INT8) MemNCalcCDDNb (NBPtr, AccessWrDqsDly, AccessWrDqsDly, FALSE, TRUE); TwrwrDd = (UINT8) MAX ((UINT8) (WOD + 3), (CDDTwrwrDd + 7 + 1) / 2); TwrwrSdDc = (TwrwrSdSc <= TwrwrSdDc) ? TwrwrSdDc : TwrwrSdSc; TwrwrDd = (TwrwrSdDc <= TwrwrDd) ? TwrwrDd : TwrwrSdDc; MemNSetBitFieldNb (NBPtr, BFTwrwrDd, (UINT32) TwrwrDd); MemNSetBitFieldNb (NBPtr, BFTwrwrSdDc, (UINT32) TwrwrSdDc); MemNSetBitFieldNb (NBPtr, BFTwrwrSdSc, (UINT32) TwrwrSdSc); // // Write to Read DIMM Termination Turn-around // // Twrrd = MAX ( 1, CEIL (MAX (WOD, (CDDTwrrd / 2) + 0.5 - WrEarly) - LD + 3)) // CDDTwrrd = (INT8) MemNCalcCDDNb (NBPtr, AccessWrDqsDly, AccessRcvEnDly, TRUE, TRUE); Twrrd = MAX (1, MAX (WOD, (CDDTwrrd + 1 - WrEarlyx2 + 1) / 2) - LD + 3); MemNSetBitFieldNb (NBPtr, BFTwrrd, (UINT32) Twrrd); // // Read to Write Turnaround for Data, DQS Contention // // TrwtTO = CEIL (MAX (ROD, (CDDTrwtTO / 2) - 0.5 + WrEarly) + LD + 3) // CDDTrwtTO = (INT8) MemNCalcCDDNb (NBPtr, AccessRcvEnDly, AccessWrDqsDly, TRUE, TRUE); TrwtTO = MAX ((ChannelPtr->Dimms == 1 ? 0 : ROD) , (CDDTrwtTO - 1 + WrEarlyx2 + 1) / 2) + LD + 3; MemNSetBitFieldNb (NBPtr, BFTrwtTO, (UINT32) TrwtTO); // // Read to Write Turnaround for opportunistic Write Bursting // // TrwtWB = TrwtTO + 1 // MemNSetBitFieldNb (NBPtr, BFTrwtWB, (UINT32) TrwtTO + 1); IDS_HDT_CONSOLE (MEM_FLOW, "\t\t TrdrdSdSc : %02x\n", TrdrdSdSc); IDS_HDT_CONSOLE (MEM_FLOW, "\t\tCDDTrdrdSdDc : %02x TrdrdSdDc : %02x\n", CDDTrdrdSdDc, TrdrdSdDc); IDS_HDT_CONSOLE (MEM_FLOW, "\t\tCDDTrdrdDd : %02x TrdrdDd : %02x\n\n", CDDTrdrdDd, TrdrdDd); IDS_HDT_CONSOLE (MEM_FLOW, "\t\t TwrwrSdSc : %02x\n", TwrwrSdSc); IDS_HDT_CONSOLE (MEM_FLOW, "\t\tCDDTwrwrSdDc : %02x TwrwrSdDc : %02x\n", CDDTwrwrSdDc, TwrwrSdDc ); IDS_HDT_CONSOLE (MEM_FLOW, "\t\tCDDTwrwrDd : %02x TwrwrDd : %02x\n\n", CDDTwrwrDd, TwrwrDd); IDS_HDT_CONSOLE (MEM_FLOW, "\t\t TrwtWB : %02x\n", TrwtTO + 1); IDS_HDT_CONSOLE (MEM_FLOW, "\t\tCDDTwrrd : %02x Twrrd : %02x\n", (UINT8) CDDTwrrd, (UINT8) Twrrd ); IDS_HDT_CONSOLE (MEM_FLOW, "\t\tCDDTrwtTO : %02x TrwtTO : %02x\n\n", (UINT8) CDDTrwtTO, (UINT8) TrwtTO ); }
/** * * A sub-function which extracts Slow mode, Address timing and Output driver compensation value * from a input table and store those value to a specific address. * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * @param[in] *EntryOfTables - Pointer to MEM_PSC_TABLE_BLOCK * * @return TRUE - Table values can be extracted per dimm population and ranks type. * @return FALSE - Table values cannot be extracted per dimm population and ranks type. * */ BOOLEAN MemPGetSAO ( IN OUT MEM_NB_BLOCK *NBPtr, IN MEM_PSC_TABLE_BLOCK *EntryOfTables ) { UINT8 i; UINT8 MaxDimmPerCh; UINT8 NOD; UINT8 TableSize; UINT32 CurDDRrate; UINT8 DDR3Voltage; UINT16 RankTypeOfPopulatedDimm; UINT16 RankTypeInTable; UINT8 PsoMaskSAO; DIMM_TYPE DimmType; CPU_LOGICAL_ID LogicalCpuid; UINT8 PackageType; PSCFG_SAO_ENTRY *TblPtr; CH_DEF_STRUCT *CurrentChannel; CurrentChannel = NBPtr->ChannelPtr; TblPtr = NULL; TableSize = 0; PackageType = 0; LogicalCpuid.Family = AMD_FAMILY_UNKNOWN; MaxDimmPerCh = GetMaxDimmsPerChannel (NBPtr->RefPtr->PlatformMemoryConfiguration, NBPtr->MCTPtr->SocketId, CurrentChannel->ChannelID); NOD = (UINT8) 1 << (MaxDimmPerCh - 1); if (CurrentChannel->RegDimmPresent != 0) { DimmType = RDIMM_TYPE; } else if (CurrentChannel->SODimmPresent != 0) { DimmType = SODIMM_TYPE; if (FindPSOverrideEntry (NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_SOLDERED_DOWN_SODIMM_TYPE, NBPtr->MCTPtr->SocketId, NBPtr->ChannelPtr->ChannelID, 0, NULL, NULL) != NULL) { DimmType = SODWN_SODIMM_TYPE; } } else if (CurrentChannel->LrDimmPresent != 0) { DimmType = LRDIMM_TYPE; } else { DimmType = UDIMM_TYPE; } i = 0; // Obtain table pointer, table size, Logical Cpuid and PSC type according to Dimm, NB and package type. while (EntryOfTables->TblEntryOfSAO[i] != NULL) { if (((EntryOfTables->TblEntryOfSAO[i])->Header.DimmType & DimmType) != 0) { if (((EntryOfTables->TblEntryOfSAO[i])->Header.NumOfDimm & NOD) != 0) { // // Determine if this is the expected NB Type // LogicalCpuid = (EntryOfTables->TblEntryOfSAO[i])->Header.LogicalCpuid; PackageType = (EntryOfTables->TblEntryOfSAO[i])->Header.PackageType; if (MemPIsIdSupported (NBPtr, LogicalCpuid, PackageType)) { TblPtr = (PSCFG_SAO_ENTRY *) ((EntryOfTables->TblEntryOfSAO[i])->TBLPtr); TableSize = (EntryOfTables->TblEntryOfSAO[i])->TableSize; break; } } } i++; } // Check whether no table entry is found. if (EntryOfTables->TblEntryOfSAO[i] == NULL) { IDS_HDT_CONSOLE (MEM_FLOW, "\nNo SlowAccMode, AddrTmg and ODCCtrl table\n"); return FALSE; } CurDDRrate = (UINT32) (1 << (CurrentChannel->DCTPtr->Timings.Speed / 66)); DDR3Voltage = (UINT8) (1 << CONVERT_VDDIO_TO_ENCODED (NBPtr->RefPtr->DDR3Voltage)); RankTypeOfPopulatedDimm = MemPGetPsRankType (CurrentChannel); for (i = 0; i < TableSize; i++) { MemPConstructRankTypeMap ((UINT16) TblPtr->Dimm0, (UINT16) TblPtr->Dimm1, (UINT16) TblPtr->Dimm2, &RankTypeInTable); if (TblPtr->DimmPerCh == MaxDimmPerCh) { if ((TblPtr->DDRrate & CurDDRrate) != 0) { if ((TblPtr->VDDIO & DDR3Voltage) != 0) { if ((RankTypeInTable & RankTypeOfPopulatedDimm) == RankTypeOfPopulatedDimm) { CurrentChannel->DctAddrTmg = TblPtr->AddTmgCtl; CurrentChannel->DctOdcCtl = TblPtr->ODC; CurrentChannel->SlowMode = (TblPtr->SlowMode == 1) ? TRUE : FALSE; break; } } } } TblPtr++; } // // If there is no entry, check if overriding values (SlowAccMode, AddrTmg and ODCCtrl) existed. If not, show no entry found. // PsoMaskSAO = (UINT8) MemPProceedTblDrvOverride (NBPtr, NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_TBLDRV_SLOWACCMODE); PsoMaskSAO &= (UINT8) MemPProceedTblDrvOverride (NBPtr, NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_TBLDRV_ODCCTRL); PsoMaskSAO &= (UINT8) MemPProceedTblDrvOverride (NBPtr, NBPtr->RefPtr->PlatformMemoryConfiguration, PSO_TBLDRV_ADDRTMG); if ((PsoMaskSAO == 0) && (i == TableSize)) { IDS_HDT_CONSOLE (MEM_FLOW, "\nNo SlowAccMode, AddrTmg and ODCCtrl entries\n"); } else { return TRUE; } if (NBPtr->SharedPtr->VoltageMap != VDDIO_DETERMINED) { return TRUE; } PutEventLog (AGESA_ERROR, MEM_ERROR_SAO_NOT_FOUND, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_ERROR, NBPtr->MCTPtr); if (!NBPtr->MemPtr->ErrorHandling (NBPtr->MCTPtr, NBPtr->Dct, EXCLUDE_ALL_CHIPSEL, &NBPtr->MemPtr->StdHeader)) { ASSERT (FALSE); } return FALSE; }
/** * * * * * @param[in,out] *mmPtr - Pointer to the MEM_MAIN_DATA_BLOCK * * @return TRUE - No fatal error occurs. * @return FALSE - Fatal error occurs. */ BOOLEAN MemMParallelTraining ( IN OUT MEM_MAIN_DATA_BLOCK *mmPtr ) { AMD_CONFIG_PARAMS *StdHeader; MEM_DATA_STRUCT *MemPtr; MEM_NB_BLOCK *NBPtr; DIE_INFO TrainInfo[MAX_NODES_SUPPORTED]; AP_DATA_TRANSFER ReturnData; AGESA_STATUS Status; UINT8 ApSts; UINT8 Die; UINT8 Socket; UINT32 Module; UINT32 LowCore; UINT32 HighCore; UINT32 Time; UINT32 TimeOut; UINT32 TargetApicId; BOOLEAN StillTraining; ALLOCATE_HEAP_PARAMS AllocHeapParams; UINT8 *BufferPtr; BOOLEAN TimeoutEn; NBPtr = mmPtr->NBPtr; MemPtr = mmPtr->MemPtr; StdHeader = &(mmPtr->MemPtr->StdHeader); Time = 0; TimeOut = PARALLEL_TRAINING_TIMEOUT; TimeoutEn = TRUE; IDS_TIMEOUT_CTL (&TimeoutEn); IDS_HDT_CONSOLE (MEM_STATUS, "\nStart parallel training\n"); AGESA_TESTPOINT (TpProcMemBeforeAnyTraining, StdHeader); // // Initialize Training Info Array // for (Die = 0; Die < mmPtr->DieCount; Die ++) { Socket = TrainInfo[Die].Socket = NBPtr[Die].MCTPtr->SocketId; Module = NBPtr[Die].MCTPtr->DieId; GetGivenModuleCoreRange (Socket, Module, &LowCore, &HighCore, StdHeader); TrainInfo[Die].Core = (UINT8) (LowCore & 0x000000FF); IDS_HDT_CONSOLE (MEM_FLOW, "\tLaunch core %d of socket %d\n", LowCore, Socket); TrainInfo[Die].Training = FALSE; } // // Start Training on Each remote die. // for (Die = 0; Die < mmPtr->DieCount; Die ++ ) { if (Die != BSP_DIE) { NBPtr[Die].BeforeDqsTraining (&(mmPtr->NBPtr[Die])); if (NBPtr[Die].MCTPtr->NodeMemSize != 0) { if (!NBPtr[Die].FeatPtr->Training (&(mmPtr->NBPtr[Die]))) { // Fail to launch code on AP PutEventLog (AGESA_ERROR, MEM_ERROR_PARALLEL_TRAINING_LAUNCH_FAIL, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_ERROR, NBPtr[Die].MCTPtr); MemPtr->ErrorHandling (NBPtr[Die].MCTPtr, EXCLUDE_ALL_DCT, EXCLUDE_ALL_CHIPSEL, &MemPtr->StdHeader); } else { TrainInfo[Die].Training = TRUE; } } } } // // Call training on BSP // IDS_HDT_CONSOLE (MEM_STATUS, "Node %d\n", NBPtr[BSP_DIE].Node); NBPtr[BSP_DIE].BeforeDqsTraining (&(mmPtr->NBPtr[BSP_DIE])); NBPtr[BSP_DIE].TrainingFlow (&(mmPtr->NBPtr[BSP_DIE])); NBPtr[BSP_DIE].AfterDqsTraining (&(mmPtr->NBPtr[BSP_DIE])); // // Get Results from remote processors training // do { StillTraining = FALSE; for (Die = 0; Die < mmPtr->DieCount; Die ++ ) { // // For each Die that is training, read the status // if (TrainInfo[Die].Training == TRUE) { GetLocalApicIdForCore (TrainInfo[Die].Socket, TrainInfo[Die].Core, &TargetApicId, StdHeader); ApSts = ApUtilReadRemoteControlByte (TargetApicId, StdHeader); if ((ApSts & 0x80) == 0) { // // Allocate buffer for received data // AllocHeapParams.RequestedBufferSize = ( sizeof (DIE_STRUCT) + NBPtr[Die].DctCount * ( sizeof (DCT_STRUCT) + ( NBPtr[Die].ChannelCount * ( sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK) + ( (NBPtr[Die].MCTPtr->DctData[0].ChData[0].RowCount * NBPtr[Die].MCTPtr->DctData[0].ChData[0].ColumnCount * NUMBER_OF_DELAY_TABLES) + (MAX_BYTELANES_PER_CHANNEL * MAX_CS_PER_CHANNEL * NUMBER_OF_FAILURE_MASK_TABLES) ) ) ) ) ) + 3; AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_PAR_TRN_HANDLE, Die, 0, 0); AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) == AGESA_SUCCESS) { // // Receive Training Results // ReturnData.DataPtr = AllocHeapParams.BufferPtr; ReturnData.DataSizeInDwords = (UINT16) AllocHeapParams.RequestedBufferSize / 4; ReturnData.DataTransferFlags = 0; Status = ApUtilReceiveBuffer (TrainInfo[Die].Socket, TrainInfo[Die].Core, &ReturnData, StdHeader); if (Status != AGESA_SUCCESS) { SetMemError (Status, NBPtr[Die].MCTPtr); } BufferPtr = AllocHeapParams.BufferPtr; LibAmdMemCopy (NBPtr[Die].MCTPtr, BufferPtr, sizeof (DIE_STRUCT), StdHeader); BufferPtr += sizeof (DIE_STRUCT); LibAmdMemCopy ( NBPtr[Die].MCTPtr->DctData, BufferPtr, NBPtr[Die].DctCount * (sizeof (DCT_STRUCT) + NBPtr[Die].ChannelCount * sizeof (CH_DEF_STRUCT)), StdHeader); BufferPtr += NBPtr[Die].DctCount * (sizeof (DCT_STRUCT) + NBPtr[Die].ChannelCount * sizeof (CH_DEF_STRUCT)); LibAmdMemCopy ( NBPtr[Die].PSBlock, BufferPtr, NBPtr[Die].DctCount * NBPtr[Die].ChannelCount * sizeof (MEM_PS_BLOCK), StdHeader); BufferPtr += NBPtr[Die].DctCount * NBPtr[Die].ChannelCount * sizeof (MEM_PS_BLOCK); LibAmdMemCopy ( NBPtr[Die].MCTPtr->DctData[0].ChData[0].RcvEnDlys, BufferPtr, (NBPtr[Die].DctCount * NBPtr[Die].ChannelCount) * ((NBPtr[Die].MCTPtr->DctData[0].ChData[0].RowCount * NBPtr[Die].MCTPtr->DctData[0].ChData[0].ColumnCount * NUMBER_OF_DELAY_TABLES) + (MAX_BYTELANES_PER_CHANNEL * MAX_CS_PER_CHANNEL * NUMBER_OF_FAILURE_MASK_TABLES) ), StdHeader); HeapDeallocateBuffer (AllocHeapParams.BufferHandle, StdHeader); NBPtr[Die].AfterDqsTraining (&(mmPtr->NBPtr[Die])); TrainInfo[Die].Training = FALSE; } else { PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_RECEIVED_DATA, NBPtr[Die].Node, 0, 0, 0, StdHeader); SetMemError (AGESA_FATAL, NBPtr[Die].MCTPtr); ASSERT(FALSE); // Insufficient Heap Space allocation for parallel training buffer } } else if (ApSts == CORE_IDLE) { // AP does not have buffer to transmit to BSP // AP fails to locate a buffer for data transfer TrainInfo[Die].Training = FALSE; } else { // Signal to loop through again StillTraining = TRUE; } } } // Wait for 1 us MemUWait10ns (100, NBPtr->MemPtr); Time ++; } while ((StillTraining) && ((Time < TimeOut) || !TimeoutEn)); // Continue until all Dies are finished // if cannot finish in 1 s, do fatal exit if (StillTraining && TimeoutEn) { // Parallel training time out, do fatal exit, as there is at least one AP hangs. PutEventLog (AGESA_FATAL, MEM_ERROR_PARALLEL_TRAINING_TIME_OUT, 0, 0, 0, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_FATAL, NBPtr[BSP_DIE].MCTPtr); ASSERT(FALSE); // Timeout occurred while still training } for (Die = 0; Die < mmPtr->DieCount; Die ++ ) { if (NBPtr[Die].MCTPtr->ErrCode == AGESA_FATAL) { return FALSE; } } return TRUE; }
/** * Main entry point for the AMD_INIT_LATE function. * * This entry point is responsible for creating any desired ACPI tables, providing * information for DMI, and to prepare the processors for the operating system * bootstrap load process. * * @param[in,out] LateParams Required input parameters for the AMD_INIT_LATE * entry point. * * @return Aggregated status across all internal AMD late calls invoked. * */ AGESA_STATUS AmdInitLate ( IN OUT AMD_LATE_PARAMS *LateParams ) { AGESA_STATUS AgesaStatus; AGESA_STATUS AmdInitLateStatus; IDS_HDT_CONSOLE (MAIN_FLOW, "AmdInitLate: Start\n\n"); AGESA_TESTPOINT (TpIfAmdInitLateEntry, &LateParams->StdHeader); IDS_PERF_TIME_MEASURE (&LateParams->StdHeader); ASSERT (LateParams != NULL); AmdInitLateStatus = AGESA_SUCCESS; IDS_OPTION_HOOK (IDS_INIT_LATE_BEFORE, LateParams, &LateParams->StdHeader); IDS_HDT_CONSOLE (MAIN_FLOW, "CreatSystemTable: Start\n"); // _PSS, XPSS, _PCT, _PSD, _PPC, _CST, _CSD Tables if ((LateParams->PlatformConfig.UserOptionPState) || (IsFeatureEnabled (IoCstate, &LateParams->PlatformConfig, &LateParams->StdHeader))) { AgesaStatus = ((*(OptionPstateLateConfiguration.SsdtFeature)) (&LateParams->StdHeader, &LateParams->PlatformConfig, &LateParams->AcpiPState)); if (AgesaStatus > AmdInitLateStatus) { AmdInitLateStatus = AgesaStatus; } } // SRAT Table Generation if (LateParams->PlatformConfig.UserOptionSrat) { AgesaStatus = CreateAcpiSrat (&LateParams->StdHeader, &LateParams->AcpiSrat); if (AgesaStatus > AmdInitLateStatus) { AmdInitLateStatus = AgesaStatus; } } // SLIT Table Generation if (LateParams->PlatformConfig.UserOptionSlit) { AgesaStatus = CreateAcpiSlit (&LateParams->StdHeader, &LateParams->PlatformConfig, &LateParams->AcpiSlit); if (AgesaStatus > AmdInitLateStatus) { AmdInitLateStatus = AgesaStatus; } } // WHEA Table Generation if (LateParams->PlatformConfig.UserOptionWhea) { AgesaStatus = CreateAcpiWhea (&LateParams->StdHeader, &LateParams->AcpiWheaMce, &LateParams->AcpiWheaCmc); if (AgesaStatus > AmdInitLateStatus) { AmdInitLateStatus = AgesaStatus; } } // DMI Table Generation if (LateParams->PlatformConfig.UserOptionDmi) { AgesaStatus = CreateDmiRecords (&LateParams->StdHeader, &LateParams->DmiTable); if (AgesaStatus > AmdInitLateStatus) { AmdInitLateStatus = AgesaStatus; } } IDS_HDT_CONSOLE (MAIN_FLOW, "CreatSystemTable: End\n"); // Cpu Features IDS_HDT_CONSOLE (MAIN_FLOW, "DispatchCpuFeatures: LateStart\n"); AgesaStatus = DispatchCpuFeatures (CPU_FEAT_INIT_LATE_END, &LateParams->PlatformConfig, &LateParams->StdHeader); IDS_HDT_CONSOLE (MAIN_FLOW, "DispatchCpuFeatures: LateEnd\n"); if (AgesaStatus > AmdInitLateStatus) { AmdInitLateStatus = AgesaStatus; } // It is the last function run by the AGESA CPU module and prepares the processor // for the operating system bootstrap load process. IDS_HDT_CONSOLE (MAIN_FLOW, "AmdCpuLate: Start\n"); AgesaStatus = AmdCpuLate (&LateParams->StdHeader); IDS_HDT_CONSOLE (MAIN_FLOW, "AmdCpuLate: End\n"); if (AgesaStatus > AmdInitLateStatus) { AmdInitLateStatus = AgesaStatus; } AgesaStatus = GnbInitAtLate (LateParams); if (AgesaStatus > AmdInitLateStatus) { AmdInitLateStatus = AgesaStatus; } IDS_OPTION_HOOK (IDS_INIT_LATE_AFTER, LateParams, &LateParams->StdHeader); IDS_OPTION_HOOK (IDS_BEFORE_OS, LateParams, &LateParams->StdHeader); IDS_PERF_TIME_MEASURE (&LateParams->StdHeader); AGESA_TESTPOINT (TpIfAmdInitLateExit, &LateParams->StdHeader); IDS_HDT_CONSOLE (MAIN_FLOW, "\nAmdInitLate: End\n\n"); AGESA_TESTPOINT (EndAgesaTps, &LateParams->StdHeader); //End Debug Print Service IDS_HDT_CONSOLE_EXIT (&LateParams->StdHeader); return AmdInitLateStatus; }
/** * * This function programs Vref for 2D Read/Write Training * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * @param[in] *VrefPtr - Pointer to Vref value * * @return BOOLEAN * TRUE - Success * FAIL (External Callout only) * */ BOOLEAN STATIC MemFRdWr2DProgramVrefML ( IN OUT MEM_NB_BLOCK *NBPtr, IN VOID *VrefPtr ) { AGESA_STATUS Status; MEM_DATA_STRUCT *MemPtr; ID_INFO CallOutIdInfo; VOLTAGE_ADJUST Va; UINT8 Vref; ASSERT (NBPtr != NULL); ASSERT (VrefPtr != NULL); MemPtr = NBPtr->MemPtr; Vref = *(UINT8*)VrefPtr; CallOutIdInfo.IdField.SocketId = NBPtr->MCTPtr->SocketId; CallOutIdInfo.IdField.ModuleId = NBPtr->MCTPtr->DieId; LibAmdMemCopy ((VOID *)&Va, (VOID *)MemPtr, (UINTN)sizeof (Va.StdHeader), &MemPtr->StdHeader); Va.MemData = MemPtr; Status = AGESA_SUCCESS; if (NBPtr->TechPtr->Direction == DQS_READ_DIR) { if (NBPtr->RefPtr->ExternalVrefCtl == FALSE) { // // Internal vref control // ASSERT (Vref < 61); if (Vref < 30) { Vref = (62 - Vref); } else { Vref = (Vref - 30); } NBPtr->SetBitField (NBPtr, BFVrefDAC, Vref << 3); } else { // // External vref control // AGESA_TESTPOINT (TpProcMemBefore2dTrainExtVrefChange, &(NBPtr->MemPtr->StdHeader)); NBPtr->MemPtr->ParameterListPtr->ExternalVrefValue = Vref; IDS_HDT_CONSOLE (MEM_FLOW, "\n2D Read Training External CPU Vref Callout \n"); Va.VoltageType = VTYPE_CPU_VREF; Va.AdjustValue = Vref = (Vref - 15) << 1; Status = AgesaExternalVoltageAdjust ((UINTN)CallOutIdInfo.IdInformation, &Va); AGESA_TESTPOINT (TpProcMemAfter2dTrainExtVrefChange, &(NBPtr->MemPtr->StdHeader)); } } else { // // DIMM Vref Control // Va.VoltageType = VTYPE_DIMM_VREF; // // Offset by 15 and multiply by 2. // Va.AdjustValue = Vref = (Vref - 15) << 1; Status = AgesaExternalVoltageAdjust ((UINTN)CallOutIdInfo.IdInformation, &Va); IDS_HDT_CONSOLE (MEM_FLOW, "\t\t\tDimm Vref = %c%d% ", (Va.AdjustValue < 0) ? '-':'+', (Va.AdjustValue < 0) ? (0 - Va.AdjustValue) : Va.AdjustValue ); if (Status != AGESA_SUCCESS) { IDS_HDT_CONSOLE (MEM_FLOW, "* Dimm Vref Callout Failed *"); } IDS_HDT_CONSOLE (MEM_FLOW, "\n"); } return (Status == AGESA_SUCCESS) ? TRUE : FALSE; }