/** * Restores the context of a 'conditional' PCI device. * * This traverses the provided register list restoring PCI registers when appropriate. * * @param[in] StdHeader AMD standard header config param. * @param[in] Device 'conditional' PCI device to restore. * @param[in] CallPoint Indicates whether this is AMD_INIT_RESUME or * AMD_S3LATE_RESTORE. * @param[in,out] OrMask Current buffer pointer of raw register values. * */ VOID RestoreConditionalPciDevice ( IN AMD_CONFIG_PARAMS *StdHeader, IN CONDITIONAL_PCI_DEVICE_DESCRIPTOR *Device, IN CALL_POINTS CallPoint, IN OUT VOID **OrMask ) { UINT8 RegSizeInBytes; UINT8 SpecialCaseIndex; UINT8 *IntermediatePtr; UINT8 BootMode; UINT16 i; UINT32 Socket; UINT32 Module; UINT32 RegValueRead; UINT32 RegValueWrite; UINT32 AndMask; ACCESS_WIDTH AccessWidth; AGESA_STATUS IgnoredSts; PCI_ADDR PciAddress; CPCI_REGISTER_BLOCK_HEADER *RegisterHdr; GetSocketModuleOfNode ((UINT32) Device->Node, &Socket, &Module, StdHeader); GetPciAddress (StdHeader, Socket, Module, &PciAddress, &IgnoredSts); if (CallPoint == INIT_RESUME) { MemFS3GetCPciDeviceRegisterList (Device, &RegisterHdr, StdHeader); } else { S3GetCPciDeviceRegisterList (Device, &RegisterHdr, StdHeader); } BootMode = S3_RESUME_MODE; if (StdHeader->Func == AMD_INIT_POST) { BootMode = RESTORE_TRAINING_MODE | CAPSULE_REBOOT_MODE; } for (i = 0; i < RegisterHdr->NumRegisters; i++) { if (((Device->Mask1 & RegisterHdr->RegisterList[i].Mask1) != 0) && ((Device->Mask2 & RegisterHdr->RegisterList[i].Mask2) != 0)) { PciAddress.Address.Function = RegisterHdr->RegisterList[i].Function; PciAddress.Address.Register = RegisterHdr->RegisterList[i].Offset; RegSizeInBytes = RegisterHdr->RegisterList[i].Type.RegisterSize; switch (RegSizeInBytes) { case 1: AndMask = 0xFFFFFFFF & ((UINT8) RegisterHdr->RegisterList[i].AndMask); RegValueWrite = **(UINT8 **)OrMask; AccessWidth = AccessS3SaveWidth8; break; case 2: AndMask = 0xFFFFFFFF & ((UINT16) RegisterHdr->RegisterList[i].AndMask); RegValueWrite = **(UINT16 **)OrMask; AccessWidth = AccessS3SaveWidth16; break; case 3: // In this case, we don't need to restore a register. We just need to call a special // function to do certain things in the save and resume sequence. // This should not be used in a non-special case. AndMask = 0; RegValueWrite = 0; RegSizeInBytes = 0; AccessWidth = 0; break; default: AndMask = RegisterHdr->RegisterList[i].AndMask; RegSizeInBytes = 4; RegValueWrite = **(UINT32 **)OrMask; AccessWidth = AccessS3SaveWidth32; break; } if ((RegisterHdr->RegisterList[i].BootMode == 0) || ((BootMode & RegisterHdr->RegisterList[i].BootMode) != 0)) { // Do not restore the register if not in the right boot mode // Pointer to the saved data buffer still needs to be adjusted as data will be saved regardless of boot mode if (RegisterHdr->RegisterList[i].Type.SpecialCaseFlag == 0) { LibAmdPciRead (AccessWidth, PciAddress, &RegValueRead, StdHeader); RegValueWrite |= RegValueRead & (~AndMask); LibAmdPciWrite (AccessWidth, PciAddress, &RegValueWrite, StdHeader); } else { SpecialCaseIndex = RegisterHdr->RegisterList[i].Type.SpecialCaseIndex; if (AndMask != 0) { RegisterHdr->SpecialCases[SpecialCaseIndex].Save (AccessWidth, PciAddress, &RegValueRead, StdHeader); RegValueWrite |= RegValueRead & (~AndMask); } RegisterHdr->SpecialCases[SpecialCaseIndex].Restore (AccessWidth, PciAddress, &RegValueWrite, StdHeader); } IDS_OPTION_HOOK (IDS_AFTER_RESTORING_PCI_REG, RegisterHdr, StdHeader); } IntermediatePtr = (UINT8 *) *OrMask; *OrMask = &IntermediatePtr[RegSizeInBytes]; if ((RegSizeInBytes == 0) && (RegValueWrite == RESTART_FROM_BEGINNING_LIST)) { // Restart from the beginning of the register list i = 0xFFFF; } } } }
/** * * * MemSocketScan - Scan all nodes, recording the physical Socket number, * Die Number (relative to the socket), and PCI Device address of each * populated socket. * * This information is used by the northbridge block to map a dram * channel on a particular DCT, on a particular CPU Die, in a particular * socket to a the DRAM SPD Data for the DIMMS physically connected to * that channel. * * Also, the customer socket map is populated with pointers to the * appropriate channel structures, so that the customer can locate the * appropriate channel configuration data. * * This socket scan will always result in Die 0 as the BSP. * * @param[in,out] *mmPtr - Pointer to the MEM_MAIN_DATA_BLOCK * */ AGESA_STATUS MemSocketScan ( IN OUT MEM_MAIN_DATA_BLOCK *mmPtr ) { MEM_DATA_STRUCT *MemPtr; UINT8 DieIndex; UINT8 DieCount; UINT32 SocketId; UINT32 DieId; UINT8 Die; PCI_ADDR Address; AGESA_STATUS AgesaStatus; ALLOCATE_HEAP_PARAMS AllocHeapParams; ASSERT (mmPtr != NULL); ASSERT (mmPtr->MemPtr != NULL); MemPtr = mmPtr->MemPtr; // // Count the number of dies in the system // DieCount = 0; for (Die = 0; Die < MAX_NODES_SUPPORTED; Die++) { if (GetSocketModuleOfNode ((UINT32)Die, &SocketId, &DieId, (VOID *)MemPtr)) { DieCount++; } } MemPtr->DieCount = DieCount; mmPtr->DieCount = DieCount; if (DieCount > 0) { // // Allocate buffer for DIE_STRUCTs // AllocHeapParams.RequestedBufferSize = ((UINT16)DieCount * sizeof (DIE_STRUCT)); AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_DIE_STRUCT_HANDLE, 0, 0, 0); AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, &MemPtr->StdHeader) == AGESA_SUCCESS) { MemPtr->DiesPerSystem = (DIE_STRUCT *)AllocHeapParams.BufferPtr; // // Find SocketId, DieId, and PCI address of each node // DieIndex = 0; for (Die = 0; Die < MAX_NODES_SUPPORTED; Die++) { if (GetSocketModuleOfNode ((UINT32)Die, &SocketId, &DieId, (VOID *)MemPtr)) { if (GetPciAddress ((VOID *)MemPtr, (UINT8)SocketId, (UINT8)DieId, &Address, &AgesaStatus)) { MemPtr->DiesPerSystem[DieIndex].SocketId = (UINT8)SocketId; MemPtr->DiesPerSystem[DieIndex].DieId = (UINT8)DieId; MemPtr->DiesPerSystem[DieIndex].PciAddr.AddressValue = Address.AddressValue; DieIndex++; } } } AgesaStatus = AGESA_SUCCESS; } else { ASSERT(FALSE); // Heap allocation failed for DIE_STRUCTs AgesaStatus = AGESA_FATAL; } } else { ASSERT(FALSE); // No die in the system AgesaStatus = AGESA_FATAL; } return AgesaStatus; }
/** * Saves the context of a 'conditional' PCI device. * * This traverses the provided register list saving PCI registers when appropriate. * * @param[in] StdHeader AMD standard header config param. * @param[in] Device 'conditional' PCI device to restore. * @param[in] CallPoint Indicates whether this is AMD_INIT_RESUME or * AMD_S3LATE_RESTORE. * @param[in,out] OrMask Current buffer pointer of raw register values. * */ VOID SaveConditionalPciDevice ( IN AMD_CONFIG_PARAMS *StdHeader, IN CONDITIONAL_PCI_DEVICE_DESCRIPTOR *Device, IN CALL_POINTS CallPoint, IN OUT VOID **OrMask ) { UINT8 RegSizeInBytes; UINT8 SpecialCaseIndex; UINT8 *IntermediatePtr; UINT16 i; UINT32 Socket; UINT32 Module; UINT32 AndMask; ACCESS_WIDTH AccessWidth; AGESA_STATUS IgnoredSts; PCI_ADDR PciAddress; CPCI_REGISTER_BLOCK_HEADER *RegisterHdr; GetSocketModuleOfNode ((UINT32) Device->Node, &Socket, &Module, StdHeader); GetPciAddress (StdHeader, Socket, Module, &PciAddress, &IgnoredSts); if (CallPoint == INIT_RESUME) { MemFS3GetCPciDeviceRegisterList (Device, &RegisterHdr, StdHeader); } else { S3GetCPciDeviceRegisterList (Device, &RegisterHdr, StdHeader); } for (i = 0; i < RegisterHdr->NumRegisters; i++) { if (((Device->Mask1 & RegisterHdr->RegisterList[i].Mask1) != 0) && ((Device->Mask2 & RegisterHdr->RegisterList[i].Mask2) != 0)) { PciAddress.Address.Function = RegisterHdr->RegisterList[i].Function; PciAddress.Address.Register = RegisterHdr->RegisterList[i].Offset; RegSizeInBytes = RegisterHdr->RegisterList[i].Type.RegisterSize; switch (RegSizeInBytes) { case 1: AndMask = 0xFFFFFFFF & ((UINT8) RegisterHdr->RegisterList[i].AndMask); AccessWidth = AccessS3SaveWidth8; break; case 2: AndMask = 0xFFFFFFFF & ((UINT16) RegisterHdr->RegisterList[i].AndMask); AccessWidth = AccessS3SaveWidth16; break; case 3: // In this case, we don't need to save a register. We just need to call a special // function to do certain things in the save and resume sequence. // This should not be used in a non-special case. AndMask = 0; RegSizeInBytes = 0; AccessWidth = 0; break; default: AndMask = RegisterHdr->RegisterList[i].AndMask; RegSizeInBytes = 4; AccessWidth = AccessS3SaveWidth32; break; } if (RegisterHdr->RegisterList[i].Type.SpecialCaseFlag == 0) { ASSERT ((AndMask != 0) && (RegSizeInBytes != 0) && (AccessWidth != 0)); LibAmdPciRead (AccessWidth, PciAddress, *OrMask, StdHeader); } else { SpecialCaseIndex = RegisterHdr->RegisterList[i].Type.SpecialCaseIndex; RegisterHdr->SpecialCases[SpecialCaseIndex].Save (AccessWidth, PciAddress, *OrMask, StdHeader); } if (AndMask != 0) { // If AndMask is 0, then it is a not-care. Don't need to apply it to the OrMask **((UINT32 **) OrMask) &= AndMask; } if ((RegSizeInBytes == 0) && (**((UINT32 **) OrMask) == RESTART_FROM_BEGINNING_LIST)) { // Restart from the beginning of the register list i = 0xFFFF; } IntermediatePtr = (UINT8 *) *OrMask; *OrMask = &IntermediatePtr[RegSizeInBytes]; // += RegSizeInBytes; } } }
/** * Performs CPU related initialization at the early entry point * * This function performs a large list of initialization items. These items * include: * * -1 local APIC initialization * -2 MSR table initialization * -3 PCI table initialization * -4 HT Phy PCI table initialization * -5 microcode patch loading * -6 namestring determination/programming * -7 AP initialization * -8 power management initialization * -9 core leveling * * This routine must be run by all cores in the system. Please note that * all APs that enter will never exit. * * @param[in] StdHeader Config handle for library and services * @param[in] PlatformConfig Config handle for platform specific information * * @retval AGESA_SUCCESS * */ AGESA_STATUS AmdCpuEarly ( IN AMD_CONFIG_PARAMS *StdHeader, IN PLATFORM_CONFIGURATION *PlatformConfig ) { UINT8 WaitStatus; UINT8 i; UINT8 StartCore; UINT8 EndCore; UINT32 NodeNum; UINT32 PrimaryCore; UINT32 SocketNum; UINT32 ModuleNum; UINT32 HighCore; UINT32 ApHeapIndex; UINT32 CurrentPerformEarlyFlag; UINT32 TargetApicId; AP_WAIT_FOR_STATUS WaitForStatus; AGESA_STATUS Status; AGESA_STATUS CalledStatus; CPU_SPECIFIC_SERVICES *FamilySpecificServices; AMD_CPU_EARLY_PARAMS CpuEarlyParams; S_PERFORM_EARLY_INIT_ON_CORE *EarlyTableOnCore; Status = AGESA_SUCCESS; CalledStatus = AGESA_SUCCESS; AmdCpuEarlyInitializer (StdHeader, PlatformConfig, &CpuEarlyParams); IDS_OPTION_HOOK (IDS_CPU_Early_Override, &CpuEarlyParams, StdHeader); GetCpuServicesOfCurrentCore ((CONST CPU_SPECIFIC_SERVICES **)&FamilySpecificServices, StdHeader); EarlyTableOnCore = NULL; FamilySpecificServices->GetEarlyInitOnCoreTable (FamilySpecificServices, (CONST S_PERFORM_EARLY_INIT_ON_CORE **)&EarlyTableOnCore, &CpuEarlyParams, StdHeader); if (EarlyTableOnCore != NULL) { GetPerformEarlyFlag (&CurrentPerformEarlyFlag, StdHeader); for (i = 0; EarlyTableOnCore[i].PerformEarlyInitOnCore != NULL; i++) { if ((EarlyTableOnCore[i].PerformEarlyInitFlag & CurrentPerformEarlyFlag) != 0) { IDS_HDT_CONSOLE (CPU_TRACE, " Perform core init step %d\n", i); EarlyTableOnCore[i].PerformEarlyInitOnCore (FamilySpecificServices, &CpuEarlyParams, StdHeader); } } } // B S P C O D E T O I N I T I A L I Z E A Ps // ------------------------------------------------------- // ------------------------------------------------------- // IMPORTANT: Here we determine if we are BSP or AP if (IsBsp (StdHeader, &CalledStatus)) { // Even though the bsc does not need to send itself a heap index, this sequence performs other important initialization. // Use '0' as a dummy heap index value. GetSocketModuleOfNode (0, &SocketNum, &ModuleNum, StdHeader); GetCpuServicesOfSocket (SocketNum, (CONST CPU_SPECIFIC_SERVICES **)&FamilySpecificServices, StdHeader); FamilySpecificServices->SetApCoreNumber (FamilySpecificServices, SocketNum, ModuleNum, 0, StdHeader); FamilySpecificServices->TransferApCoreNumber (FamilySpecificServices, StdHeader); // Clear BSP's Status Byte ApUtilWriteControlByte (CORE_ACTIVE, StdHeader); NodeNum = 0; ApHeapIndex = 1; while (NodeNum < MAX_NODES && GetSocketModuleOfNode (NodeNum, &SocketNum, &ModuleNum, StdHeader)) { GetCpuServicesOfSocket (SocketNum, (CONST CPU_SPECIFIC_SERVICES **)&FamilySpecificServices, StdHeader); GetGivenModuleCoreRange (SocketNum, ModuleNum, &PrimaryCore, &HighCore, StdHeader); if (NodeNum == 0) { StartCore = (UINT8) PrimaryCore + 1; } else { StartCore = (UINT8) PrimaryCore; } EndCore = (UINT8) HighCore; for (i = StartCore; i <= EndCore; i++) { FamilySpecificServices->SetApCoreNumber (FamilySpecificServices, SocketNum, ModuleNum, ApHeapIndex, StdHeader); IDS_HDT_CONSOLE (CPU_TRACE, " Launch socket %d core %d\n", SocketNum, i); if (FamilySpecificServices->LaunchApCore (FamilySpecificServices, SocketNum, ModuleNum, i, PrimaryCore, StdHeader)) { IDS_HDT_CONSOLE (CPU_TRACE, " Waiting for socket %d core %d\n", SocketNum, i); GetLocalApicIdForCore (SocketNum, i, &TargetApicId, StdHeader); WaitStatus = CORE_IDLE; WaitForStatus.Status = &WaitStatus; WaitForStatus.NumberOfElements = 1; WaitForStatus.RetryCount = WAIT_INFINITELY; WaitForStatus.WaitForStatusFlags = WAIT_STATUS_EQUALITY; ApUtilWaitForCoreStatus (TargetApicId, &WaitForStatus, StdHeader); ApHeapIndex++; } } NodeNum++; } // B S P P h a s e - 1 E N D IDS_OPTION_HOOK (IDS_BEFORE_PM_INIT, &CpuEarlyParams, StdHeader); AGESA_TESTPOINT (TpProcCpuBeforePMFeatureInit, StdHeader); IDS_HDT_CONSOLE (CPU_TRACE, " Dispatch CPU features before early power mgmt init\n"); CalledStatus = DispatchCpuFeatures (CPU_FEAT_BEFORE_PM_INIT, PlatformConfig, StdHeader); if (CalledStatus > Status) { Status = CalledStatus; } AGESA_TESTPOINT (TpProcCpuPowerMgmtInit, StdHeader); CalledStatus = PmInitializationAtEarly (&CpuEarlyParams, StdHeader); if (CalledStatus > Status) { Status = CalledStatus; } AGESA_TESTPOINT (TpProcCpuEarlyFeatureInit, StdHeader); IDS_HDT_CONSOLE (CPU_TRACE, " Dispatch CPU features after early power mgmt init\n"); CalledStatus = DispatchCpuFeatures (CPU_FEAT_AFTER_PM_INIT, PlatformConfig, StdHeader); IDS_OPTION_HOOK (IDS_BEFORE_AP_EARLY_HALT, &CpuEarlyParams, StdHeader); // Sleep all APs IDS_HDT_CONSOLE (CPU_TRACE, " Halting all APs\n"); ApUtilWriteControlByte (CORE_IDLE_HLT, StdHeader); } else { ApEntry (StdHeader, &CpuEarlyParams); } if (CalledStatus > Status) { Status = CalledStatus; } return (Status); }
/** * Restores the context of a PCI device. * * This traverses the provided register list restoring PCI registers. * * @param[in] StdHeader AMD standard header config param. * @param[in] Device 'conditional' PCI device to restore. * @param[in] CallPoint Indicates whether this is AMD_INIT_RESUME or * AMD_S3LATE_RESTORE. * @param[in,out] OrMask Current buffer pointer of raw register values. * */ VOID RestorePciDevice ( IN AMD_CONFIG_PARAMS *StdHeader, IN PCI_DEVICE_DESCRIPTOR *Device, IN CALL_POINTS CallPoint, IN OUT VOID **OrMask ) { UINT8 RegSizeInBytes; UINT8 SpecialCaseIndex; UINT8 *IntermediatePtr; UINT16 i; UINT32 Socket; UINT32 Module; UINT32 AndMask; UINT32 RegValueRead; UINT32 RegValueWrite; ACCESS_WIDTH AccessWidth; AGESA_STATUS IgnoredSts; PCI_ADDR PciAddress; PCI_REGISTER_BLOCK_HEADER *RegisterHdr; GetSocketModuleOfNode ((UINT32) Device->Node, &Socket, &Module, StdHeader); GetPciAddress (StdHeader, Socket, Module, &PciAddress, &IgnoredSts); if (CallPoint == INIT_RESUME) { MemFS3GetPciDeviceRegisterList (Device, &RegisterHdr, StdHeader); } else { S3GetPciDeviceRegisterList (Device, &RegisterHdr, StdHeader); } for (i = 0; i < RegisterHdr->NumRegisters; i++) { PciAddress.Address.Function = RegisterHdr->RegisterList[i].Function; PciAddress.Address.Register = RegisterHdr->RegisterList[i].Offset; RegSizeInBytes = RegisterHdr->RegisterList[i].Type.RegisterSize; switch (RegSizeInBytes) { case 1: AndMask = 0xFFFFFFFF & ((UINT8) RegisterHdr->RegisterList[i].AndMask); RegValueWrite = **(UINT8 **)OrMask; AccessWidth = AccessS3SaveWidth8; break; case 2: AndMask = 0xFFFFFFFF & ((UINT16) RegisterHdr->RegisterList[i].AndMask); RegValueWrite = **(UINT16 **)OrMask; AccessWidth = AccessS3SaveWidth16; break; case 3: // In this case, we don't need to restore a register. We just need to call a special // function to do certain things in the save and resume sequence. // This should not be used in a non-special case. AndMask = 0; RegValueWrite = 0; RegSizeInBytes = 0; AccessWidth = 0; break; default: AndMask = RegisterHdr->RegisterList[i].AndMask; RegSizeInBytes = 4; RegValueWrite = **(UINT32 **)OrMask; AccessWidth = AccessS3SaveWidth32; break; } if (RegisterHdr->RegisterList[i].Type.SpecialCaseFlag == 0) { ASSERT ((AndMask != 0) && (RegSizeInBytes != 0) && (AccessWidth != 0)); LibAmdPciRead (AccessWidth, PciAddress, &RegValueRead, StdHeader); RegValueWrite |= RegValueRead & (~AndMask); LibAmdPciWrite (AccessWidth, PciAddress, &RegValueWrite, StdHeader); } else { SpecialCaseIndex = RegisterHdr->RegisterList[i].Type.SpecialCaseIndex; if (AndMask != 0) { RegisterHdr->SpecialCases[SpecialCaseIndex].Save (AccessWidth, PciAddress, &RegValueRead, StdHeader); RegValueWrite |= RegValueRead & (~AndMask); } RegisterHdr->SpecialCases[SpecialCaseIndex].Restore (AccessWidth, PciAddress, &RegValueWrite, StdHeader); } IntermediatePtr = (UINT8 *) *OrMask; *OrMask = &IntermediatePtr[RegSizeInBytes]; // += RegSizeInBytes; } }
/** * Restores the context of a 'conditional' PCI device. * * This traverses the provided register list restoring PCI registers when appropriate. * * @param[in] StdHeader AMD standard header config param. * @param[in] Device 'conditional' PCI device to restore. * @param[in] CallPoint Indicates whether this is AMD_INIT_RESUME or * AMD_S3LATE_RESTORE. * @param[in,out] OrMask Current buffer pointer of raw register values. * */ VOID RestoreConditionalPciDevice ( IN AMD_CONFIG_PARAMS *StdHeader, IN CONDITIONAL_PCI_DEVICE_DESCRIPTOR *Device, IN CALL_POINTS CallPoint, IN OUT VOID **OrMask ) { UINT8 RegSizeInBytes; UINT8 SpecialCaseIndex; UINT8 *IntermediatePtr; UINT16 i; UINT32 Socket; UINT32 Module; UINT32 RegValueRead; UINT32 RegValueWrite; UINT32 AndMask; ACCESS_WIDTH AccessWidth; AGESA_STATUS IgnoredSts; PCI_ADDR PciAddress; CPCI_REGISTER_BLOCK_HEADER *RegisterHdr; GetSocketModuleOfNode ((UINT32) Device->Node, &Socket, &Module, StdHeader); GetPciAddress (StdHeader, Socket, Module, &PciAddress, &IgnoredSts); if (CallPoint == INIT_RESUME) { MemFS3GetCPciDeviceRegisterList (Device, &RegisterHdr, StdHeader); } else { S3GetCPciDeviceRegisterList (Device, &RegisterHdr, StdHeader); } for (i = 0; i < RegisterHdr->NumRegisters; i++) { if (((Device->Mask1 & RegisterHdr->RegisterList[i].Mask1) != 0) && ((Device->Mask2 & RegisterHdr->RegisterList[i].Mask2) != 0)) { PciAddress.Address.Function = RegisterHdr->RegisterList[i].Function; PciAddress.Address.Register = RegisterHdr->RegisterList[i].Offset; RegSizeInBytes = RegisterHdr->RegisterList[i].Type.RegisterSize; switch (RegSizeInBytes) { case 1: AndMask = 0xFFFFFFFF & ((UINT8) RegisterHdr->RegisterList[i].AndMask); RegValueWrite = **(UINT8 **)OrMask; AccessWidth = AccessS3SaveWidth8; break; case 2: AndMask = 0xFFFFFFFF & ((UINT16) RegisterHdr->RegisterList[i].AndMask); RegValueWrite = **(UINT16 **)OrMask; AccessWidth = AccessS3SaveWidth16; break; default: AndMask = RegisterHdr->RegisterList[i].AndMask; RegSizeInBytes = 4; RegValueWrite = **(UINT32 **)OrMask; AccessWidth = AccessS3SaveWidth32; break; } if (RegisterHdr->RegisterList[i].Type.SpecialCaseFlag == 0) { LibAmdPciRead (AccessWidth, PciAddress, &RegValueRead, StdHeader); RegValueWrite |= RegValueRead & (~AndMask); LibAmdPciWrite (AccessWidth, PciAddress, &RegValueWrite, StdHeader); } else { SpecialCaseIndex = RegisterHdr->RegisterList[i].Type.SpecialCaseIndex; RegisterHdr->SpecialCases[SpecialCaseIndex].Save (AccessWidth, PciAddress, &RegValueRead, StdHeader); RegValueWrite |= RegValueRead & (~AndMask); RegisterHdr->SpecialCases[SpecialCaseIndex].Restore (AccessWidth, PciAddress, &RegValueWrite, StdHeader); } IntermediatePtr = (UINT8 *) *OrMask; *OrMask = &IntermediatePtr[RegSizeInBytes]; } } }
/** * * MemMStandardTrainingUsingAdjacentDies * * This function implements standard memory training whereby training functions * for all nodes are run by the BSP while enabling other dies to eable argressor channel * * * @param[in,out] *mmPtr - Pointer to the MEM_MAIN_DATA_BLOCK * * @return TRUE - No fatal error occurs. * @return FALSE - Fatal error occurs. */ BOOLEAN MemMStandardTrainingUsingAdjacentDies ( IN OUT MEM_MAIN_DATA_BLOCK *mmPtr ) { UINT8 Die; UINT8 AdjacentDie; UINT32 AdjacentSocketNum; UINT32 TargetSocketNum; UINT32 ModuleNum; UINT8 i; UINT8 Dct; UINT8 ChipSel; BOOLEAN FirstCsFound; // // If training is disabled, return success. // if (!UserOptions.CfgDqsTrainingControl) { return TRUE; } mmPtr->mmSharedPtr->CommonSmallestMaxNegVref = 0x7F; mmPtr->mmSharedPtr->CommonSmallestMaxPosVref = 0x7F; // // Run Northbridge-specific Standard Training feature for each die. // IDS_HDT_CONSOLE (MEM_STATUS, "\nStart standard serial training\n"); for (Die = 0 ; Die < mmPtr->DieCount ; Die ++ ) { IDS_HDT_CONSOLE (MEM_STATUS, "Node %d\n", Die); AGESA_TESTPOINT (TpProcMemBeforeAnyTraining, &(mmPtr->MemPtr->StdHeader)); mmPtr->NBPtr[Die].BeforeDqsTraining (&mmPtr->NBPtr[Die]); mmPtr->NBPtr[Die].Execute1dMaxRdLatTraining = FALSE; mmPtr->NBPtr[Die].FeatPtr->Training (&mmPtr->NBPtr[Die]); if (mmPtr->NBPtr[Die].MCTPtr->ErrCode == AGESA_FATAL) { break; } } IDS_HDT_CONSOLE (MEM_STATUS, "\nStart 2D training with agressors\n"); for (Die = 0 ; Die < mmPtr->DieCount ; Die ++ ) { IDS_HDT_CONSOLE (MEM_STATUS, "Node %d\n", Die); AGESA_TESTPOINT (TpProcMemBeforeAnyTraining, &(mmPtr->MemPtr->StdHeader)); GetSocketModuleOfNode (Die, &TargetSocketNum, &ModuleNum, &(mmPtr->MemPtr->StdHeader)); for (AdjacentDie = 0; AdjacentDie < mmPtr->DieCount; AdjacentDie++) { mmPtr->NBPtr[Die].DieEnabled[AdjacentDie] = FALSE; GetSocketModuleOfNode (AdjacentDie, &AdjacentSocketNum, &ModuleNum, &(mmPtr->MemPtr->StdHeader)); if (TargetSocketNum == AdjacentSocketNum) { if (AdjacentDie != Die) { if (mmPtr->NBPtr[AdjacentDie].MCTPtr->NodeMemSize != 0) { mmPtr->NBPtr[Die].AdjacentDieNBPtr = &mmPtr->NBPtr[AdjacentDie]; mmPtr->NBPtr[Die].DieEnabled[AdjacentDie] = TRUE; } } else { if (mmPtr->NBPtr[Die].MCTPtr->NodeMemSize != 0) { mmPtr->NBPtr[Die].DieEnabled[Die] = TRUE; } } // Determine the initial target CS, Max Dimms and max CS number for all DCTs (potential aggressors) if (mmPtr->NBPtr[AdjacentDie].MCTPtr->NodeMemSize != 0) { for (Dct = 0; Dct < mmPtr->NBPtr[AdjacentDie].DctCount; Dct++) { FirstCsFound = FALSE; mmPtr->NBPtr[AdjacentDie].SwitchDCT (&mmPtr->NBPtr[AdjacentDie], Dct); for (ChipSel = 0; ChipSel < mmPtr->NBPtr[AdjacentDie].CsPerChannel; ChipSel = ChipSel + (mmPtr->NBPtr[Die].IsSupported[PerDimmAggressors2D] ? 2 : mmPtr->NBPtr[AdjacentDie].CsPerDelay) ) { if ((mmPtr->NBPtr[AdjacentDie].DCTPtr->Timings.CsEnabled & ((UINT16) 1 << ChipSel)) != 0) { if (FirstCsFound == FALSE) { // Set Initial CS value for Current Aggressor CS mmPtr->NBPtr[AdjacentDie].InitialAggressorCSTarget[Dct] = ChipSel; mmPtr->NBPtr[AdjacentDie].CurrentAggressorCSTarget[Dct] = mmPtr->NBPtr[AdjacentDie].InitialAggressorCSTarget[Dct]; FirstCsFound = TRUE; } mmPtr->NBPtr[AdjacentDie].MaxAggressorCSEnabled[Dct] = ChipSel; mmPtr->NBPtr[AdjacentDie].MaxAggressorDimms[Dct]++; } } } } } } if (mmPtr->NBPtr[Die].MCTPtr->NodeMemSize != 0) { //Execute Technology specific 2D training features i = 0; while (memTrainSequenceDDR3[i].TrainingSequenceEnabled != 0) { if (memTrainSequenceDDR3[i].TrainingSequenceEnabled (&mmPtr->NBPtr[Die])) { mmPtr->NBPtr[Die].TrainingSequenceIndex = i; // Execute 2D RdDqs Training memTrainSequenceDDR3[i].MemTechFeatBlock->RdDqs2DTraining (mmPtr->NBPtr[Die].TechPtr); // Execute MaxRdLat Training After 2D training do { if (memTrainSequenceDDR3[i].MemTechFeatBlock->MaxRdLatencyTraining (mmPtr->NBPtr[Die].TechPtr)) { MemFInitTableDrive (&mmPtr->NBPtr[Die], MTAfterMaxRdLatTrn); } } while (mmPtr->NBPtr->ChangeNbFrequency (&mmPtr->NBPtr[Die])); break; } i++; } } mmPtr->NBPtr[Die].TechPtr->TechnologySpecificHook[LrdimmSyncTrainedDlys] (mmPtr->NBPtr[Die].TechPtr, NULL); mmPtr->NBPtr[Die].AfterDqsTraining (&mmPtr->NBPtr[Die]); if (mmPtr->NBPtr[Die].MCTPtr->ErrCode == AGESA_FATAL) { break; } } return (BOOLEAN) (Die == mmPtr->DieCount); }