/** * Family specific check PsppPolicy to initially enable appropriate DPM states * * * @param[in] LclkDpmValid UINT32 Lclk Dpm Valid * @param[in] StdHeader Pointer to AMD_CONFIG_PARAMS */ UINT32 NbFmDpmStateBootupInit ( IN UINT32 LclkDpmValid, IN AMD_CONFIG_PARAMS *StdHeader ) { PCIe_PLATFORM_CONFIG *Pcie; UINT32 LclkDpmValidState; UINT8 Dpm0ValidOffset; if ((LclkDpmValid & 0xFF) == 0) { IDS_HDT_CONSOLE (NB_MISC, " No valid DPM State Bootup Init\n"); return 0; } // For ON, from DPM0(the most right non-zero bit) to highest DPM(bit 7) Dpm0ValidOffset = LibAmdBitScanForward (LclkDpmValid & 0xFF); // Enable DPM0 LclkDpmValidState = 1 << Dpm0ValidOffset; if (PcieLocateConfigurationData (StdHeader, &Pcie) == AGESA_SUCCESS) { switch (Pcie->PsppPolicy) { case PsppDisabled: case PsppPerformance: case PsppBalanceHigh: if ((Dpm0ValidOffset + 2) <= 7) { // Enable DPM0 + DPM2 LclkDpmValidState = LclkDpmValidState + (1 << (Dpm0ValidOffset + 2)); } break; case PsppBalanceLow: if ((Dpm0ValidOffset + 1) <= 7) { // Enable DPM0 + DPM1 LclkDpmValidState = LclkDpmValidState + (1 << (Dpm0ValidOffset + 1)); } break; case PsppPowerSaving: // Enable DPM0 break; default: ASSERT (FALSE); } } else { IDS_HDT_CONSOLE (NB_MISC, " DPM State Bootup Init Pcie Locate ConfigurationData Fail!! -- Enable DPM0 only\n"); } return LclkDpmValidState; }
/** * RX offset cancellation enablement * * * * @param[in] Wrapper Pointer to Wrapper configuration data area * @param[in] Pcie Pointer to PCIe configuration data area */ VOID PcieOffsetCancelCalibration ( IN PCIe_WRAPPER_CONFIG *Wrapper, IN PCIe_PLATFORM_CONFIG *Pcie ) { UINT32 LaneBitmap; D0F0xBC_x1F39C_STRUCT D0F0xBC_x1F39C; LaneBitmap = PcieUtilGetWrapperLaneBitMap (LANE_TYPE_PHY_NATIVE_ALL, LANE_TYPE_PCIE_SB_CORE_CONFIG, Wrapper); if ((Wrapper->WrapId != GFX_WRAP_ID) && (Wrapper->WrapId != GPP_WRAP_ID)) { return; } if (LaneBitmap != 0) { D0F0xBC_x1F39C.Value = 0; D0F0xBC_x1F39C.Field.Tx = 1; D0F0xBC_x1F39C.Field.Rx = 1; D0F0xBC_x1F39C.Field.UpperLaneID = LibAmdBitScanReverse (LaneBitmap) + Wrapper->StartPhyLane; D0F0xBC_x1F39C.Field.LowerLaneID = LibAmdBitScanForward (LaneBitmap) + Wrapper->StartPhyLane; GnbRegisterWriteTN (D0F0xBC_x1F39C_TYPE, D0F0xBC_x1F39C_ADDRESS, &D0F0xBC_x1F39C.Value, GNB_REG_ACC_FLAG_S3SAVE, GnbLibGetHeader (Pcie)); GnbSmuServiceRequestV4 ( PcieConfigGetParentSilicon (Wrapper)->Address, SMC_MSG_PHY_LN_OFF, GNB_REG_ACC_FLAG_S3SAVE, GnbLibGetHeader (Pcie) ); GnbSmuServiceRequestV4 ( PcieConfigGetParentSilicon (Wrapper)->Address, SMC_MSG_PHY_LN_ON, GNB_REG_ACC_FLAG_S3SAVE, GnbLibGetHeader (Pcie) ); } PcieTopologyLaneControl ( EnableLanes, PcieUtilGetWrapperLaneBitMap (LANE_TYPE_ALL, 0, Wrapper), Wrapper, Pcie ); }
AGESA_STATUS PcieFP2x8CheckCallbackTN ( IN PCIe_WRAPPER_CONFIG *Wrapper, IN OUT VOID *Buffer, IN PCIe_PLATFORM_CONFIG *Pcie ) { UINT32 LaneBitmap; AGESA_STATUS Status; IDS_HDT_CONSOLE (GNB_TRACE, "PcieFP2x8CheckCallbackTN Enter\n"); Status = AGESA_SUCCESS; if (Wrapper->WrapId == GFX_WRAP_ID) { LaneBitmap = PcieUtilGetWrapperLaneBitMap (LANE_TYPE_PCIE_PHY_NATIVE | LANE_TYPE_DDI_PHY_NATIVE, 0, Wrapper); IDS_HDT_CONSOLE (GNB_TRACE, "FP2 GFX Wrpper phy LaneBitmap = %x\n", LaneBitmap); if (((LaneBitmap & 0xFF) != 0) && ((LaneBitmap & 0xFF00) != 0)) { IDS_HDT_CONSOLE (GNB_TRACE, "Error!! FP2 GFX Wrpper cannot use both phy#\n"); Status = AGESA_ERROR; PcieConfigDisableAllEngines (PciePortEngine | PcieDdiEngine, Wrapper); PutEventLog ( AGESA_ERROR, GNB_EVENT_INVALID_LANES_CONFIGURATION, (LibAmdBitScanForward (LaneBitmap) + Wrapper->StartPhyLane), (LibAmdBitScanReverse (LaneBitmap) + Wrapper->StartPhyLane), 0, 0, GnbLibGetHeader (Pcie) ); ASSERT (FALSE); } } IDS_HDT_CONSOLE (GNB_TRACE, "PcieFP2x8CheckCallbackTN Exit\n"); return Status; }
VOID STATIC PcieTopologyApplyLaneMuxCZ ( IN PCIe_WRAPPER_CONFIG *Wrapper, IN PCIe_PLATFORM_CONFIG *Pcie ) { PCIe_ENGINE_CONFIG *EngineList; UINT32 Index; UINT8 RxLaneMuxSelectorArray [sizeof (LaneMuxSelectorArrayCZ)]; UINT8 TxLaneMuxSelectorArray [sizeof (LaneMuxSelectorArrayCZ)]; IDS_HDT_CONSOLE (GNB_TRACE, "PcieTopologyApplyLaneMuxCZ Enter\n"); if (PcieLibIsPcieWrapper (Wrapper)) { LibAmdMemCopy ( &TxLaneMuxSelectorArray[0], &LaneMuxSelectorArrayCZ[0], sizeof (LaneMuxSelectorArrayCZ), GnbLibGetHeader (Pcie) ); LibAmdMemCopy ( &RxLaneMuxSelectorArray[0], &LaneMuxSelectorArrayCZ[0], sizeof (LaneMuxSelectorArrayCZ), GnbLibGetHeader (Pcie) ); EngineList = PcieConfigGetChildEngine (Wrapper); while (EngineList != NULL) { if (PcieLibIsPcieEngine (EngineList) && PcieLibIsEngineAllocated (EngineList)) { UINT32 CoreLaneBitmap; UINT32 PifLaneBitmap; UINT8 CurrentCoreLane; UINT8 CurrentPifLane; CoreLaneBitmap = PcieUtilGetEngineLaneBitMap (LANE_TYPE_PCIE_CORE_ALLOC, 0, EngineList); PifLaneBitmap = PcieUtilGetEngineLaneBitMap (LANE_TYPE_PCIE_PHY_NATIVE, 0, EngineList); IDS_HDT_CONSOLE (GNB_TRACE, "CoreLaneBitmap - %x, CurrentPifLane - %x\n", CoreLaneBitmap, PifLaneBitmap); while (CoreLaneBitmap != 0) { CurrentCoreLane = LibAmdBitScanForward (CoreLaneBitmap); CurrentPifLane = LibAmdBitScanForward (PifLaneBitmap); if (TxLaneMuxSelectorArray[CurrentPifLane] != CurrentCoreLane) { TxLaneMuxSelectorArray[PcieTopologyLocateMuxIndexCZ (TxLaneMuxSelectorArray, CurrentCoreLane)] = TxLaneMuxSelectorArray[CurrentPifLane]; TxLaneMuxSelectorArray[CurrentPifLane] = CurrentCoreLane; } if (RxLaneMuxSelectorArray[CurrentCoreLane] != CurrentPifLane) { RxLaneMuxSelectorArray[PcieTopologyLocateMuxIndexCZ (RxLaneMuxSelectorArray, CurrentPifLane)] = RxLaneMuxSelectorArray[CurrentCoreLane]; RxLaneMuxSelectorArray[CurrentCoreLane] = CurrentPifLane; } CoreLaneBitmap &= (~ (1 << CurrentCoreLane)); PifLaneBitmap &= (~ (1 << CurrentPifLane)); } } EngineList = PcieLibGetNextDescriptor (EngineList); } for (Index = 0; Index < 2; ++Index) { PcieRegisterWrite ( Wrapper, CORE_SPACE (Wrapper->StartPcieCoreId, D0F0xE4_CORE_0121_ADDRESS + Index), ((UINT32 *) TxLaneMuxSelectorArray) [Index], FALSE, Pcie ); PcieRegisterWrite ( Wrapper, CORE_SPACE (Wrapper->StartPcieCoreId, D0F0xE4_CORE_0125_ADDRESS + Index), ((UINT32 *) RxLaneMuxSelectorArray) [Index], FALSE, Pcie ); } } IDS_HDT_CONSOLE (GNB_TRACE, "PcieTopologyApplyLaneMuxCZ Exit\n"); }
BOOLEAN MemFOnlineSpare ( IN OUT MEM_NB_BLOCK *NBPtr ) { UINT8 Dct; UINT8 q; UINT8 Value8; BOOLEAN Flag; BOOLEAN OnlineSprEnabled[MAX_CHANNELS_PER_SOCKET]; MEM_PARAMETER_STRUCT *RefPtr; DIE_STRUCT *MCTPtr; ASSERT (NBPtr != NULL); RefPtr = NBPtr->RefPtr; Flag = FALSE; if (RefPtr->EnableOnLineSpareCtl != 0) { RefPtr->GStatus[GsbEnDIMMSpareNW] = TRUE; MCTPtr = NBPtr->MCTPtr; // Check if online spare can be enabled on current node for (Dct = 0; Dct < NBPtr->DctCount; Dct++) { ASSERT (Dct < sizeof (OnlineSprEnabled)); NBPtr->SwitchDCT (NBPtr, Dct); OnlineSprEnabled[Dct] = FALSE; if ((MCTPtr->GangedMode == 0) || (MCTPtr->Dct == 0)) { if (NBPtr->DCTPtr->Timings.DctMemSize != 0) { // Make sure at least two chip-selects are available Value8 = LibAmdBitScanReverse (NBPtr->DCTPtr->Timings.CsEnabled); if (Value8 > LibAmdBitScanForward (NBPtr->DCTPtr->Timings.CsEnabled)) { OnlineSprEnabled[Dct] = TRUE; Flag = TRUE; } else { PutEventLog (AGESA_ERROR, MEM_ERROR_DIMM_SPARING_NOT_ENABLED, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader); MCTPtr->ErrStatus[EsbSpareDis] = TRUE; } } } } // If we don't have spared rank on any DCT, we don't run the rest part of the code. if (!Flag) { return FALSE; } MCTPtr->NodeMemSize = 0; for (Dct = 0; Dct < NBPtr->DctCount; Dct++) { NBPtr->SwitchDCT (NBPtr, Dct); if (OnlineSprEnabled[Dct]) { // Only run StitchMemory if we need to set a spare rank. NBPtr->DCTPtr->Timings.DctMemSize = 0; for (q = 0; q < MAX_CS_PER_CHANNEL; q++) { NBPtr->SetBitField (NBPtr, BFCSBaseAddr0Reg + q, 0); } Flag = NBPtr->StitchMemory (NBPtr); ASSERT (Flag == TRUE); } else if ((MCTPtr->GangedMode == 0) && (NBPtr->DCTPtr->Timings.DctMemSize != 0)) { // Otherwise, need to adjust the memory size on the node. MCTPtr->NodeMemSize += NBPtr->DCTPtr->Timings.DctMemSize; MCTPtr->NodeSysLimit = MCTPtr->NodeMemSize - 1; } } return TRUE; } else { return FALSE; } }