/** * * * This function deallocates heap space allocated in memory S3 resume. * * @param[in] *StdHeader - Config handle for library and services * * @return AGESA_STATUS * - AGESA_ALERT * - AGESA_FATAL * - AGESA_SUCCESS * - AGESA_WARNING */ AGESA_STATUS MemS3Deallocate ( IN AMD_CONFIG_PARAMS *StdHeader ) { AGESA_STATUS RetVal; AGESA_STATUS tempRetVal; UINT8 Tab; RetVal = AGESA_SUCCESS; for (Tab = 0; Tab < NumberOfNbRegTables; Tab++) { HeapDeallocateBuffer (GENERATE_MEM_HANDLE (ALLOC_NB_REG_TABLE, Tab, 0, 0), StdHeader); } tempRetVal = HeapDeallocateBuffer (GENERATE_MEM_HANDLE (ALLOC_DIE_STRUCT_HANDLE, 0, 0, 0), StdHeader); if (tempRetVal > RetVal) { RetVal = tempRetVal; } tempRetVal = HeapDeallocateBuffer (AMD_MEM_AUTO_HANDLE, StdHeader); if (tempRetVal > RetVal) { RetVal = tempRetVal; } RetVal = HeapDeallocateBuffer (AMD_MEM_S3_NB_HANDLE, StdHeader); if (tempRetVal > RetVal) { RetVal = tempRetVal; } RetVal = HeapDeallocateBuffer (AMD_MEM_DATA_HANDLE, StdHeader); if (tempRetVal > RetVal) { RetVal = tempRetVal; } return RetVal; }
/** * * * This function deallocates heap space allocated in memory S3 resume. * * @param[in] *StdHeader - Config handle for library and services * * @return AGESA_STATUS * - AGESA_ALERT * - AGESA_FATAL * - AGESA_SUCCESS * - AGESA_WARNING */ AGESA_STATUS MemS3Deallocate ( IN AMD_CONFIG_PARAMS *StdHeader ) { AGESA_STATUS RetVal; AGESA_STATUS tempRetVal; RetVal = AGESA_SUCCESS; tempRetVal = HeapDeallocateBuffer (GENERATE_MEM_HANDLE (ALLOC_DIE_STRUCT_HANDLE, 0, 0, 0), StdHeader); if (tempRetVal > RetVal) { RetVal = tempRetVal; } tempRetVal = HeapDeallocateBuffer (AMD_MEM_AUTO_HANDLE, StdHeader); if (tempRetVal > RetVal) { RetVal = tempRetVal; } RetVal = HeapDeallocateBuffer (AMD_MEM_S3_NB_HANDLE, StdHeader); if (tempRetVal > RetVal) { RetVal = tempRetVal; } RetVal = HeapDeallocateBuffer (AMD_MEM_DATA_HANDLE, StdHeader); if (tempRetVal > RetVal) { RetVal = tempRetVal; } return RetVal; }
/** * * * This function deallocates heap buffers that were allocated in AmdMemAuto * * @param[in,out] *MemPtr - Pointer to the MEM_DATA_STRUCT * * @return AGESA_STATUS * - AGESA_ALERT * - AGESA_FATAL * - AGESA_SUCCESS * - AGESA_WARNING */ AGESA_STATUS MemAmdFinalize ( IN OUT MEM_DATA_STRUCT *MemPtr ) { UINT8 Die; for (Die = 0; Die < MemPtr->DieCount; Die++ ) { HeapDeallocateBuffer (GENERATE_MEM_HANDLE (ALLOC_TRN_DATA_HANDLE, Die, 0, 0), &MemPtr->StdHeader); HeapDeallocateBuffer (GENERATE_MEM_HANDLE (ALLOC_DCT_STRUCT_HANDLE, Die, 0, 0), &MemPtr->StdHeader); } HeapDeallocateBuffer (GENERATE_MEM_HANDLE (ALLOC_DIE_STRUCT_HANDLE, 0, 0, 0), &MemPtr->StdHeader); HeapDeallocateBuffer (AMD_S3_SAVE_HANDLE, &MemPtr->StdHeader); HeapDeallocateBuffer (AMD_MEM_SPD_HANDLE, &MemPtr->StdHeader); HeapDeallocateBuffer (AMD_MEM_AUTO_HANDLE, &MemPtr->StdHeader); return AGESA_SUCCESS; }
BOOLEAN MemConstructNBBlockDA ( IN OUT MEM_NB_BLOCK *NBPtr, IN OUT MEM_DATA_STRUCT *MemPtr, IN MEM_FEAT_BLOCK_NB *FeatPtr, IN MEM_SHARED_DATA *SharedPtr, IN UINT8 NodeID ) { UINT8 Dct; UINT8 Channel; UINT8 SpdSocketIndex; UINT8 SpdChannelIndex; DIE_STRUCT *MCTPtr; ALLOCATE_HEAP_PARAMS AllocHeapParams; // // Determine if this is the expected NB Type // GetLogicalIdOfSocket (MemPtr->DiesPerSystem[NodeID].SocketId, &(MemPtr->DiesPerSystem[NodeID].LogicalCpuid), &(MemPtr->StdHeader)); if (!MemNIsIdSupportedDA (NBPtr, &(MemPtr->DiesPerSystem[NodeID].LogicalCpuid))) { return FALSE; } NBPtr->MemPtr = MemPtr; NBPtr->RefPtr = MemPtr->ParameterListPtr; NBPtr->SharedPtr = SharedPtr; MCTPtr = &(MemPtr->DiesPerSystem[NodeID]); NBPtr->MCTPtr = MCTPtr; NBPtr->MCTPtr->NodeId = NodeID; NBPtr->PciAddr.AddressValue = MCTPtr->PciAddr.AddressValue; NBPtr->VarMtrrHiMsk = GetVarMtrrHiMsk (&(MemPtr->DiesPerSystem[NodeID].LogicalCpuid), &(MemPtr->StdHeader)); // // Allocate buffer for DCT_STRUCTs and CH_DEF_STRUCTs // AllocHeapParams.RequestedBufferSize = MAX_DCTS_PER_NODE_DA * ( sizeof (DCT_STRUCT) + ( MAX_CHANNELS_PER_DCT_DA * (sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK)) ) ); AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_DCT_STRUCT_HANDLE, NodeID, 0, 0); AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, &MemPtr->StdHeader) != AGESA_SUCCESS) { PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_DCT_STRUCT_AND_CH_DEF_STRUCTs, NBPtr->Node, 0, 0, 0, &MemPtr->StdHeader); SetMemError (AGESA_FATAL, MCTPtr); return FALSE; } MCTPtr->DctCount = MAX_DCTS_PER_NODE_DA; MCTPtr->DctData = (DCT_STRUCT *) AllocHeapParams.BufferPtr; AllocHeapParams.BufferPtr += MAX_DCTS_PER_NODE_DA * sizeof (DCT_STRUCT); for (Dct = 0; Dct < MAX_DCTS_PER_NODE_DA; Dct++) { MCTPtr->DctData[Dct].Dct = Dct; MCTPtr->DctData[Dct].ChannelCount = MAX_CHANNELS_PER_DCT_DA; MCTPtr->DctData[Dct].ChData = (CH_DEF_STRUCT *) AllocHeapParams.BufferPtr; AllocHeapParams.BufferPtr += MAX_CHANNELS_PER_DCT_DA * sizeof (CH_DEF_STRUCT); } NBPtr->PSBlock = (MEM_PS_BLOCK *) AllocHeapParams.BufferPtr; // // Initialize Socket List // for (Dct = 0; Dct < MAX_DCTS_PER_NODE_DA; Dct++) { MemPtr->SocketList[MCTPtr->SocketId].ChannelPtr[Dct] = &(MCTPtr->DctData[Dct].ChData[0]); MemPtr->SocketList[MCTPtr->SocketId].TimingsPtr[Dct] = &(MCTPtr->DctData[Dct].Timings); MCTPtr->DctData[Dct].ChData[0].ChannelID = Dct; } MemNInitNBDataDA (NBPtr); FeatPtr->InitCPG (NBPtr); NBPtr->FeatPtr = FeatPtr; FeatPtr->InitHwRxEn (NBPtr); // // Calculate SPD Offsets per channel and assign pointers to the data. At this point, we calculate the Node-Dct-Channel // centric offsets and store the pointers to the first DIMM of each channel in the Channel Definition struct for that // channel. This pointer is then used later to calculate the offsets to be used for each logical dimm once the // dimm types(QR or not) are known. This is done in the Technology block constructor. // // Calculate the SpdSocketIndex separately from the SpdChannelIndex. // This will facilitate modifications due to some processors that might // map the DCT-CHANNEL differently. // SpdSocketIndex = GetSpdSocketIndex (NBPtr->RefPtr->PlatformMemoryConfiguration, NBPtr->MCTPtr->SocketId, &MemPtr->StdHeader); // // Traverse the Dct/Channel structures // for (Dct = 0; Dct < MAX_DCTS_PER_NODE_DA; Dct++) { for (Channel = 0; Channel < MAX_CHANNELS_PER_DCT_DA; Channel++) { // // Calculate the number of Dimms on this channel using the // die/dct/channel to Socket/channel conversion. // SpdChannelIndex = GetSpdChannelIndex (NBPtr->RefPtr->PlatformMemoryConfiguration, NBPtr->MCTPtr->SocketId, MemNGetSocketRelativeChannelNb (NBPtr, Dct, Channel), &MemPtr->StdHeader); NBPtr->MCTPtr->DctData[Dct].ChData[Channel].SpdPtr = &(MemPtr->SpdDataStructure[SpdSocketIndex + SpdChannelIndex]); } } MemNSwitchDCTNb (NBPtr, 0); return TRUE; }
/** * * * * * @param[in,out] *mmPtr - Pointer to the MEM_MAIN_DATA_BLOCK * * @return TRUE - No fatal error occurs. * @return FALSE - Fatal error occurs. */ BOOLEAN MemMParallelTraining ( IN OUT MEM_MAIN_DATA_BLOCK *mmPtr ) { AMD_CONFIG_PARAMS *StdHeader; MEM_DATA_STRUCT *MemPtr; MEM_NB_BLOCK *NBPtr; DIE_INFO TrainInfo[MAX_NODES_SUPPORTED]; AP_DATA_TRANSFER ReturnData; AGESA_STATUS Status; UINT8 ApSts; UINT8 Die; UINT8 Socket; UINT32 Module; UINT32 LowCore; UINT32 HighCore; UINT32 Time; UINT32 TimeOut; UINT32 TargetApicId; BOOLEAN StillTraining; ALLOCATE_HEAP_PARAMS AllocHeapParams; UINT8 *BufferPtr; BOOLEAN TimeoutEn; NBPtr = mmPtr->NBPtr; MemPtr = mmPtr->MemPtr; StdHeader = &(mmPtr->MemPtr->StdHeader); Time = 0; TimeOut = PARALLEL_TRAINING_TIMEOUT; TimeoutEn = TRUE; IDS_TIMEOUT_CTL (&TimeoutEn); IDS_HDT_CONSOLE (MEM_STATUS, "\nStart parallel training\n"); AGESA_TESTPOINT (TpProcMemBeforeAnyTraining, StdHeader); // // Initialize Training Info Array // for (Die = 0; Die < mmPtr->DieCount; Die ++) { Socket = TrainInfo[Die].Socket = NBPtr[Die].MCTPtr->SocketId; Module = NBPtr[Die].MCTPtr->DieId; GetGivenModuleCoreRange (Socket, Module, &LowCore, &HighCore, StdHeader); TrainInfo[Die].Core = (UINT8) (LowCore & 0x000000FF); IDS_HDT_CONSOLE (MEM_FLOW, "\tLaunch core %d of socket %d\n", LowCore, Socket); TrainInfo[Die].Training = FALSE; } // // Start Training on Each remote die. // for (Die = 0; Die < mmPtr->DieCount; Die ++ ) { if (Die != BSP_DIE) { NBPtr[Die].BeforeDqsTraining (&(mmPtr->NBPtr[Die])); if (NBPtr[Die].MCTPtr->NodeMemSize != 0) { if (!NBPtr[Die].FeatPtr->Training (&(mmPtr->NBPtr[Die]))) { // Fail to launch code on AP PutEventLog (AGESA_ERROR, MEM_ERROR_PARALLEL_TRAINING_LAUNCH_FAIL, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_ERROR, NBPtr[Die].MCTPtr); MemPtr->ErrorHandling (NBPtr[Die].MCTPtr, EXCLUDE_ALL_DCT, EXCLUDE_ALL_CHIPSEL, &MemPtr->StdHeader); } else { TrainInfo[Die].Training = TRUE; } } } } // // Call training on BSP // IDS_HDT_CONSOLE (MEM_STATUS, "Node %d\n", NBPtr[BSP_DIE].Node); NBPtr[BSP_DIE].BeforeDqsTraining (&(mmPtr->NBPtr[BSP_DIE])); NBPtr[BSP_DIE].TrainingFlow (&(mmPtr->NBPtr[BSP_DIE])); NBPtr[BSP_DIE].AfterDqsTraining (&(mmPtr->NBPtr[BSP_DIE])); // // Get Results from remote processors training // do { StillTraining = FALSE; for (Die = 0; Die < mmPtr->DieCount; Die ++ ) { // // For each Die that is training, read the status // if (TrainInfo[Die].Training == TRUE) { GetLocalApicIdForCore (TrainInfo[Die].Socket, TrainInfo[Die].Core, &TargetApicId, StdHeader); ApSts = ApUtilReadRemoteControlByte (TargetApicId, StdHeader); if ((ApSts & 0x80) == 0) { // // Allocate buffer for received data // AllocHeapParams.RequestedBufferSize = ( sizeof (DIE_STRUCT) + NBPtr[Die].DctCount * ( sizeof (DCT_STRUCT) + ( NBPtr[Die].ChannelCount * ( sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK) + ( (NBPtr[Die].MCTPtr->DctData[0].ChData[0].RowCount * NBPtr[Die].MCTPtr->DctData[0].ChData[0].ColumnCount * NUMBER_OF_DELAY_TABLES) + (MAX_BYTELANES_PER_CHANNEL * MAX_CS_PER_CHANNEL * NUMBER_OF_FAILURE_MASK_TABLES) ) ) ) ) ) + 3; AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_PAR_TRN_HANDLE, Die, 0, 0); AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) == AGESA_SUCCESS) { // // Receive Training Results // ReturnData.DataPtr = AllocHeapParams.BufferPtr; ReturnData.DataSizeInDwords = (UINT16) AllocHeapParams.RequestedBufferSize / 4; ReturnData.DataTransferFlags = 0; Status = ApUtilReceiveBuffer (TrainInfo[Die].Socket, TrainInfo[Die].Core, &ReturnData, StdHeader); if (Status != AGESA_SUCCESS) { SetMemError (Status, NBPtr[Die].MCTPtr); } BufferPtr = AllocHeapParams.BufferPtr; LibAmdMemCopy (NBPtr[Die].MCTPtr, BufferPtr, sizeof (DIE_STRUCT), StdHeader); BufferPtr += sizeof (DIE_STRUCT); LibAmdMemCopy ( NBPtr[Die].MCTPtr->DctData, BufferPtr, NBPtr[Die].DctCount * (sizeof (DCT_STRUCT) + NBPtr[Die].ChannelCount * sizeof (CH_DEF_STRUCT)), StdHeader); BufferPtr += NBPtr[Die].DctCount * (sizeof (DCT_STRUCT) + NBPtr[Die].ChannelCount * sizeof (CH_DEF_STRUCT)); LibAmdMemCopy ( NBPtr[Die].PSBlock, BufferPtr, NBPtr[Die].DctCount * NBPtr[Die].ChannelCount * sizeof (MEM_PS_BLOCK), StdHeader); BufferPtr += NBPtr[Die].DctCount * NBPtr[Die].ChannelCount * sizeof (MEM_PS_BLOCK); LibAmdMemCopy ( NBPtr[Die].MCTPtr->DctData[0].ChData[0].RcvEnDlys, BufferPtr, (NBPtr[Die].DctCount * NBPtr[Die].ChannelCount) * ((NBPtr[Die].MCTPtr->DctData[0].ChData[0].RowCount * NBPtr[Die].MCTPtr->DctData[0].ChData[0].ColumnCount * NUMBER_OF_DELAY_TABLES) + (MAX_BYTELANES_PER_CHANNEL * MAX_CS_PER_CHANNEL * NUMBER_OF_FAILURE_MASK_TABLES) ), StdHeader); HeapDeallocateBuffer (AllocHeapParams.BufferHandle, StdHeader); NBPtr[Die].AfterDqsTraining (&(mmPtr->NBPtr[Die])); TrainInfo[Die].Training = FALSE; } else { PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_RECEIVED_DATA, NBPtr[Die].Node, 0, 0, 0, StdHeader); SetMemError (AGESA_FATAL, NBPtr[Die].MCTPtr); ASSERT(FALSE); // Insufficient Heap Space allocation for parallel training buffer } } else if (ApSts == CORE_IDLE) { // AP does not have buffer to transmit to BSP // AP fails to locate a buffer for data transfer TrainInfo[Die].Training = FALSE; } else { // Signal to loop through again StillTraining = TRUE; } } } // Wait for 1 us MemUWait10ns (100, NBPtr->MemPtr); Time ++; } while ((StillTraining) && ((Time < TimeOut) || !TimeoutEn)); // Continue until all Dies are finished // if cannot finish in 1 s, do fatal exit if (StillTraining && TimeoutEn) { // Parallel training time out, do fatal exit, as there is at least one AP hangs. PutEventLog (AGESA_FATAL, MEM_ERROR_PARALLEL_TRAINING_TIME_OUT, 0, 0, 0, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_FATAL, NBPtr[BSP_DIE].MCTPtr); ASSERT(FALSE); // Timeout occurred while still training } for (Die = 0; Die < mmPtr->DieCount; Die ++ ) { if (NBPtr[Die].MCTPtr->ErrCode == AGESA_FATAL) { return FALSE; } } return TRUE; }
AGESA_STATUS AmdMemRecovery ( IN OUT MEM_DATA_STRUCT *MemPtr ) { UINT8 Socket; UINT8 Module; UINT8 i; AGESA_STATUS AgesaStatus; PCI_ADDR Address; MEM_NB_BLOCK NBBlock; MEM_TECH_BLOCK TechBlock; LOCATE_HEAP_PTR SocketWithMem; ALLOCATE_HEAP_PARAMS AllocHeapParams; // // Read SPD data // MemRecSPDDataProcess (MemPtr); // // Get the socket id from heap. // SocketWithMem.BufferHandle = AMD_REC_MEM_SOCKET_HANDLE; if (HeapLocateBuffer (&SocketWithMem, &MemPtr->StdHeader) == AGESA_SUCCESS) { Socket = *(UINT8 *) SocketWithMem.BufferPtr; } else { ASSERT(FALSE); // Socket handle not found return AGESA_FATAL; } // // Allocate buffer for memory init structures // AllocHeapParams.RequestedBufferSize = MAX_DIES_PER_SOCKET * sizeof (DIE_STRUCT); AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_DIE_STRUCT_HANDLE, 0, 0, 0); AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, &MemPtr->StdHeader) != AGESA_SUCCESS) { ASSERT(FALSE); // Heap allocation failed to allocate Die struct return AGESA_FATAL; } MemPtr->DiesPerSystem = (DIE_STRUCT *)AllocHeapParams.BufferPtr; // // Discover populated CPUs // for (Module = 0; Module < MAX_DIES_PER_SOCKET; Module++) { if (GetPciAddress ((VOID *)MemPtr, Socket, Module, &Address, &AgesaStatus)) { MemPtr->DiesPerSystem[Module].SocketId = Socket; MemPtr->DiesPerSystem[Module].DieId = Module; MemPtr->DiesPerSystem[Module].PciAddr.AddressValue = Address.AddressValue; } } i = 0; while (MemRecNBInstalled[i] != NULL) { if (MemRecNBInstalled[i] (&NBBlock, MemPtr, 0) == TRUE) { break; } i++; }; if (MemRecNBInstalled[i] == NULL) { ASSERT(FALSE); // No NB installed return AGESA_FATAL; } MemRecTechInstalled[0] (&TechBlock, &NBBlock); NBBlock.TechPtr = &TechBlock; return NBBlock.InitRecovery (&NBBlock); }
/** * * * MemSocketScan - Scan all nodes, recording the physical Socket number, * Die Number (relative to the socket), and PCI Device address of each * populated socket. * * This information is used by the northbridge block to map a dram * channel on a particular DCT, on a particular CPU Die, in a particular * socket to a the DRAM SPD Data for the DIMMS physically connected to * that channel. * * Also, the customer socket map is populated with pointers to the * appropriate channel structures, so that the customer can locate the * appropriate channel configuration data. * * This socket scan will always result in Die 0 as the BSP. * * @param[in,out] *mmPtr - Pointer to the MEM_MAIN_DATA_BLOCK * */ AGESA_STATUS MemSocketScan ( IN OUT MEM_MAIN_DATA_BLOCK *mmPtr ) { MEM_DATA_STRUCT *MemPtr; UINT8 DieIndex; UINT8 DieCount; UINT32 SocketId; UINT32 DieId; UINT8 Die; PCI_ADDR Address; AGESA_STATUS AgesaStatus; ALLOCATE_HEAP_PARAMS AllocHeapParams; ASSERT (mmPtr != NULL); ASSERT (mmPtr->MemPtr != NULL); MemPtr = mmPtr->MemPtr; // // Count the number of dies in the system // DieCount = 0; for (Die = 0; Die < MAX_NODES_SUPPORTED; Die++) { if (GetSocketModuleOfNode ((UINT32)Die, &SocketId, &DieId, (VOID *)MemPtr)) { DieCount++; } } MemPtr->DieCount = DieCount; mmPtr->DieCount = DieCount; if (DieCount > 0) { // // Allocate buffer for DIE_STRUCTs // AllocHeapParams.RequestedBufferSize = ((UINT16)DieCount * sizeof (DIE_STRUCT)); AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_DIE_STRUCT_HANDLE, 0, 0, 0); AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, &MemPtr->StdHeader) == AGESA_SUCCESS) { MemPtr->DiesPerSystem = (DIE_STRUCT *)AllocHeapParams.BufferPtr; // // Find SocketId, DieId, and PCI address of each node // DieIndex = 0; for (Die = 0; Die < MAX_NODES_SUPPORTED; Die++) { if (GetSocketModuleOfNode ((UINT32)Die, &SocketId, &DieId, (VOID *)MemPtr)) { if (GetPciAddress ((VOID *)MemPtr, (UINT8)SocketId, (UINT8)DieId, &Address, &AgesaStatus)) { MemPtr->DiesPerSystem[DieIndex].SocketId = (UINT8)SocketId; MemPtr->DiesPerSystem[DieIndex].DieId = (UINT8)DieId; MemPtr->DiesPerSystem[DieIndex].PciAddr.AddressValue = Address.AddressValue; DieIndex++; } } } AgesaStatus = AGESA_SUCCESS; } else { ASSERT(FALSE); // Heap allocation failed for DIE_STRUCTs AgesaStatus = AGESA_FATAL; } } else { ASSERT(FALSE); // No die in the system AgesaStatus = AGESA_FATAL; } return AgesaStatus; }
/** * * * This is the training function which set up the environment for remote * training on the ap and launches the remote routine. * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * * @return TRUE - Launch training on AP successfully. * @return FALSE - Fail to launch training on AP. */ BOOLEAN MemFParallelTrainingHy ( IN OUT MEM_NB_BLOCK *NBPtr ) { AMD_CONFIG_PARAMS *StdHeader; DIE_STRUCT *MCTPtr; REMOTE_TRAINING_ENV *EnvPtr; AP_TASK TrainingTask; UINT8 Socket; UINT8 Module; UINT8 APCore; UINT8 p; UINT32 LowCore; UINT32 HighCore; UINT32 BspSocket; UINT32 BspModule; UINT32 BspCore; AGESA_STATUS Status; ALLOCATE_HEAP_PARAMS AllocHeapParams; UINT16 MctDataSize; StdHeader = &(NBPtr->MemPtr->StdHeader); MCTPtr = NBPtr->MCTPtr; Socket = MCTPtr->SocketId; Module = MCTPtr->DieId; // // Allocate buffer for REMOTE_TRAINING_ENV // MctDataSize = MAX_DCTS_PER_NODE_HY * ( sizeof (DCT_STRUCT) + ( MAX_CHANNELS_PER_DCT_HY * (sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK)) ) ); AllocHeapParams.RequestedBufferSize = MctDataSize + sizeof (REMOTE_TRAINING_ENV); AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_PAR_TRN_HANDLE, Socket, Module, 0); AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) == AGESA_SUCCESS) { EnvPtr = (REMOTE_TRAINING_ENV *) AllocHeapParams.BufferPtr; AllocHeapParams.BufferPtr += sizeof (REMOTE_TRAINING_ENV); // // Setup Remote training environment // LibAmdMemCopy (&(EnvPtr->StdHeader), StdHeader, sizeof (AMD_CONFIG_PARAMS), StdHeader); LibAmdMemCopy (&(EnvPtr->DieStruct), MCTPtr, sizeof (DIE_STRUCT), StdHeader); for (p = 0; p < MAX_PLATFORM_TYPES; p++) { EnvPtr->GetPlatformCfg[p] = NBPtr->MemPtr->GetPlatformCfg[p]; } EnvPtr->ErrorHandling = NBPtr->MemPtr->ErrorHandling; EnvPtr->NBBlockCtor = MemConstructRemoteNBBlockHY; EnvPtr->FeatPtr = NBPtr->FeatPtr; EnvPtr->HoleBase = NBPtr->RefPtr->HoleBase; EnvPtr->BottomIo = NBPtr->RefPtr->BottomIo; EnvPtr->UmaSize = NBPtr->RefPtr->UmaSize; EnvPtr->SysLimit = NBPtr->RefPtr->SysLimit; EnvPtr->TableBasedAlterations = NBPtr->RefPtr->TableBasedAlterations; EnvPtr->PlatformMemoryConfiguration = NBPtr->RefPtr->PlatformMemoryConfiguration; LibAmdMemCopy (AllocHeapParams.BufferPtr, MCTPtr->DctData, MctDataSize, StdHeader); // // Get Socket, Core of the BSP // IdentifyCore (StdHeader, &BspSocket, &BspModule, &BspCore, &Status); EnvPtr->BspSocket = ((UINT8)BspSocket & 0x000000FF); EnvPtr->BspCore = ((UINT8)BspCore & 0x000000FF); // // Set up the remote task structure // TrainingTask.DataTransfer.DataPtr = EnvPtr; TrainingTask.DataTransfer.DataSizeInDwords = (UINT16) (AllocHeapParams.RequestedBufferSize + 3) / 4; TrainingTask.DataTransfer.DataTransferFlags = 0; TrainingTask.ExeFlags = 0; TrainingTask.FuncAddress.PfApTaskI = (PF_AP_TASK_I)MemFParallelTraining; // // Get Target AP Core // GetGivenModuleCoreRange (Socket, Module, &LowCore, &HighCore, StdHeader); APCore = (UINT8) (LowCore & 0x000000FF); // // Launch Remote Training // ApUtilRunCodeOnSocketCore (Socket, APCore, &TrainingTask, StdHeader); HeapDeallocateBuffer (AllocHeapParams.BufferHandle, StdHeader); return TRUE; } else { PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_REMOTE_TRAINING_ENV, NBPtr->Node, 0, 0, 0, StdHeader); SetMemError (AGESA_FATAL, MCTPtr); ASSERT(FALSE); // Could not allocated heap space for "REMOTE_TRAINING_ENV" return FALSE; } }
/** * * * This is the main function to perform parallel training on all nodes. * This is the routine which will run on the remote AP. * * @param[in,out] *EnvPtr - Pointer to the Training Environment Data * @param[in,out] *StdHeader - Pointer to the Standard Header of the AP * * @return TRUE - This feature is enabled. * @return FALSE - This feature is not enabled. */ BOOLEAN MemFParallelTraining ( IN OUT REMOTE_TRAINING_ENV *EnvPtr, IN OUT AMD_CONFIG_PARAMS *StdHeader ) { MEM_PARAMETER_STRUCT ParameterList; MEM_NB_BLOCK NB; MEM_TECH_BLOCK TB; ALLOCATE_HEAP_PARAMS AllocHeapParams; MEM_DATA_STRUCT *MemPtr; DIE_STRUCT *MCTPtr; UINT8 p; UINT8 i; UINT8 Dct; UINT8 Channel; UINT8 *BufferPtr; UINT8 DctCount; UINT8 ChannelCount; UINT8 RowCount; UINT8 ColumnCount; UINT16 SizeOfNewBuffer; AP_DATA_TRANSFER ReturnData; // // Initialize Parameters // ReturnData.DataPtr = NULL; ReturnData.DataSizeInDwords = 0; ReturnData.DataTransferFlags = 0; ASSERT (EnvPtr != NULL); // // Replace Standard header of a AP // LibAmdMemCopy (StdHeader, &(EnvPtr->StdHeader), sizeof (AMD_CONFIG_PARAMS), &(EnvPtr->StdHeader)); // // Allocate buffer for training data // BufferPtr = (UINT8 *) (&EnvPtr->DieStruct); DctCount = EnvPtr->DieStruct.DctCount; BufferPtr += sizeof (DIE_STRUCT); ChannelCount = ((DCT_STRUCT *) BufferPtr)->ChannelCount; BufferPtr += DctCount * sizeof (DCT_STRUCT); RowCount = ((CH_DEF_STRUCT *) BufferPtr)->RowCount; ColumnCount = ((CH_DEF_STRUCT *) BufferPtr)->ColumnCount; SizeOfNewBuffer = sizeof (DIE_STRUCT) + DctCount * ( sizeof (DCT_STRUCT) + ( ChannelCount * ( sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK) + ( RowCount * ColumnCount * NUMBER_OF_DELAY_TABLES + (MAX_BYTELANES_PER_CHANNEL * MAX_CS_PER_CHANNEL * NUMBER_OF_FAILURE_MASK_TABLES) + (MAX_DIMMS_PER_CHANNEL * MAX_NUMBER_LANES) ) ) ) ); AllocHeapParams.RequestedBufferSize = SizeOfNewBuffer; AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_PAR_TRN_HANDLE, 0, 0, 0); AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) == AGESA_SUCCESS) { BufferPtr = AllocHeapParams.BufferPtr; LibAmdMemCopy ( BufferPtr, &(EnvPtr->DieStruct), sizeof (DIE_STRUCT) + DctCount * (sizeof (DCT_STRUCT) + ChannelCount * (sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK))), StdHeader ); // // Fix up pointers // MCTPtr = (DIE_STRUCT *) BufferPtr; BufferPtr += sizeof (DIE_STRUCT); MCTPtr->DctData = (DCT_STRUCT *) BufferPtr; BufferPtr += MCTPtr->DctCount * sizeof (DCT_STRUCT); for (Dct = 0; Dct < MCTPtr->DctCount; Dct++) { MCTPtr->DctData[Dct].ChData = (CH_DEF_STRUCT *) BufferPtr; BufferPtr += MCTPtr->DctData[Dct].ChannelCount * sizeof (CH_DEF_STRUCT); for (Channel = 0; Channel < MCTPtr->DctData[Dct].ChannelCount; Channel++) { MCTPtr->DctData[Dct].ChData[Channel].MCTPtr = MCTPtr; MCTPtr->DctData[Dct].ChData[Channel].DCTPtr = &MCTPtr->DctData[Dct]; } } NB.PSBlock = (MEM_PS_BLOCK *) BufferPtr; BufferPtr += DctCount * ChannelCount * sizeof (MEM_PS_BLOCK); ReturnData.DataPtr = AllocHeapParams.BufferPtr; ReturnData.DataSizeInDwords = (SizeOfNewBuffer + 3) / 4; ReturnData.DataTransferFlags = 0; // // Allocate Memory for the MEM_DATA_STRUCT we will use // AllocHeapParams.RequestedBufferSize = sizeof (MEM_DATA_STRUCT); AllocHeapParams.BufferHandle = AMD_MEM_DATA_HANDLE; AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) == AGESA_SUCCESS) { MemPtr = (MEM_DATA_STRUCT *)AllocHeapParams.BufferPtr; LibAmdMemCopy (&(MemPtr->StdHeader), &(EnvPtr->StdHeader), sizeof (AMD_CONFIG_PARAMS), StdHeader); // // Copy Parameters from environment // ParameterList.HoleBase = EnvPtr->HoleBase; ParameterList.BottomIo = EnvPtr->BottomIo; ParameterList.UmaSize = EnvPtr->UmaSize; ParameterList.SysLimit = EnvPtr->SysLimit; ParameterList.TableBasedAlterations = EnvPtr->TableBasedAlterations; ParameterList.PlatformMemoryConfiguration = EnvPtr->PlatformMemoryConfiguration; MemPtr->ParameterListPtr = &ParameterList; for (p = 0; p < MAX_PLATFORM_TYPES; p++) { MemPtr->GetPlatformCfg[p] = EnvPtr->GetPlatformCfg[p]; } MemPtr->ErrorHandling = EnvPtr->ErrorHandling; // // Create Local NBBlock and Tech Block // EnvPtr->NBBlockCtor (&NB, MCTPtr, EnvPtr->FeatPtr); NB.RefPtr = &ParameterList; NB.MemPtr = MemPtr; i = 0; while (memTechInstalled[i] != NULL) { if (memTechInstalled[i] (&TB, &NB)) { break; } i++; } NB.TechPtr = &TB; NB.TechBlockSwitch (&NB); // // Setup CPU Mem Type MSRs on the AP // NB.CpuMemTyping (&NB); IDS_HDT_CONSOLE (MEM_STATUS, "Node %d\n", NB.Node); // // Call Technology Specific Training routine // NB.TrainingFlow (&NB); // // Copy training data to ReturnData buffer // LibAmdMemCopy ( BufferPtr, MCTPtr->DctData[0].ChData[0].RcvEnDlys, ((DctCount * ChannelCount) * ( (RowCount * ColumnCount * NUMBER_OF_DELAY_TABLES) + (MAX_BYTELANES_PER_CHANNEL * MAX_CS_PER_CHANNEL * NUMBER_OF_FAILURE_MASK_TABLES) + (MAX_DIMMS_PER_CHANNEL * MAX_NUMBER_LANES) ) ), StdHeader); HeapDeallocateBuffer (AMD_MEM_DATA_HANDLE, StdHeader); // // Restore pointers // for (Dct = 0; Dct < MCTPtr->DctCount; Dct++) { for (Channel = 0; Channel < MCTPtr->DctData[Dct].ChannelCount; Channel++) { MCTPtr->DctData[Dct].ChData[Channel].MCTPtr = &EnvPtr->DieStruct; MCTPtr->DctData[Dct].ChData[Channel].DCTPtr = &EnvPtr->DieStruct.DctData[Dct]; MCTPtr->DctData[Dct].ChData[Channel].RcvEnDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RcvEnDlys; MCTPtr->DctData[Dct].ChData[Channel].WrDqsDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].WrDqsDlys; MCTPtr->DctData[Dct].ChData[Channel].RdDqsDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqsDlys; MCTPtr->DctData[Dct].ChData[Channel].RdDqsDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqsDlys; MCTPtr->DctData[Dct].ChData[Channel].WrDatDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].WrDatDlys; MCTPtr->DctData[Dct].ChData[Channel].RdDqs2dDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqs2dDlys; MCTPtr->DctData[Dct].ChData[Channel].RdDqsMinDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqsMinDlys; MCTPtr->DctData[Dct].ChData[Channel].RdDqsMaxDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqsMaxDlys; MCTPtr->DctData[Dct].ChData[Channel].WrDatMinDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].WrDatMinDlys; MCTPtr->DctData[Dct].ChData[Channel].WrDatMaxDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].WrDatMaxDlys; MCTPtr->DctData[Dct].ChData[Channel].FailingBitMask = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].FailingBitMask; } MCTPtr->DctData[Dct].ChData = EnvPtr->DieStruct.DctData[Dct].ChData; } MCTPtr->DctData = EnvPtr->DieStruct.DctData; } // // Signal to BSP that training is complete and Send Results // ASSERT (ReturnData.DataPtr != NULL); ApUtilTransmitBuffer (EnvPtr->BspSocket, EnvPtr->BspCore, &ReturnData, StdHeader); // // Clean up and exit. // HeapDeallocateBuffer (GENERATE_MEM_HANDLE (ALLOC_PAR_TRN_HANDLE, 0, 0, 0), StdHeader); } else { MCTPtr = &EnvPtr->DieStruct; PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_TRAINING_DATA, MCTPtr->NodeId, 0, 0, 0, StdHeader); SetMemError (AGESA_FATAL, MCTPtr); ASSERT(FALSE); // Could not allocate heap for buffer for parallel training data } return TRUE; }
/** * * * This function is the main memory configuration function for DR DDR3 * * Requirements: * * Run-Time Requirements: * 1. Complete Hypertransport Bus Configuration * 2. AmdMemInitDataStructDef must be run to set default values * 3. MSR bit to allow access to high PCI regs set on all nodes * 4. BSP in Big Real Mode * 5. Stack available * 6. MCG_CTL=-1, MC4_EN=0 for all CPUs * 7. MCi_STS from shutdown/warm reset recorded (if desired) prior to entry * 8. All var MTRRs reset to zero * 9. State of NB_CFG.DisDatMsk set properly on all CPUs * * @param[in,out] *MemPtr - Pointer to the MEM_DATA_STRUCT * * @return AGESA_STATUS * - AGESA_ALERT * - AGESA_FATAL * - AGESA_SUCCESS * - AGESA_WARNING */ AGESA_STATUS AmdMemAuto ( IN OUT MEM_DATA_STRUCT *MemPtr ) { MEM_SHARED_DATA mmSharedData; MEM_MAIN_DATA_BLOCK mmData; MEM_NB_BLOCK *NBPtr; MEM_TECH_BLOCK *TechPtr; ALLOCATE_HEAP_PARAMS AllocHeapParams; AGESA_STATUS Retval; UINT8 i; UINT8 Die; UINT8 DieCount; UINT8 Tab; CPU_SPECIFIC_SERVICES *FamilySpecificServices; ASSERT (MemPtr != NULL); AGESA_TESTPOINT (TpProcMemAmdMemAuto, &MemPtr->StdHeader); IDS_HDT_CONSOLE (MEM_FLOW, "MEM PARAMS:\n"); IDS_HDT_CONSOLE (MEM_FLOW, "\tBottomIo : %04x\n", MemPtr->ParameterListPtr->BottomIo); IDS_HDT_CONSOLE (MEM_FLOW, "\tMemHoleRemap : %d\n", MemPtr->ParameterListPtr->MemHoleRemapping); IDS_HDT_CONSOLE (MEM_FLOW, "\tLimitBelow1TB : %d\n", MemPtr->ParameterListPtr->LimitMemoryToBelow1Tb); IDS_HDT_CONSOLE (MEM_FLOW, "\tUserTimingMode : %d\n", MemPtr->ParameterListPtr->UserTimingMode); IDS_HDT_CONSOLE (MEM_FLOW, "\tMemClockValue : %d\n", MemPtr->ParameterListPtr->MemClockValue); IDS_HDT_CONSOLE (MEM_FLOW, "\tBankIntlv : %d\n", MemPtr->ParameterListPtr->EnableBankIntlv); IDS_HDT_CONSOLE (MEM_FLOW, "\tNodeIntlv : %d\n", MemPtr->ParameterListPtr->EnableNodeIntlv); IDS_HDT_CONSOLE (MEM_FLOW, "\tChannelIntlv : %d\n", MemPtr->ParameterListPtr->EnableChannelIntlv); IDS_HDT_CONSOLE (MEM_FLOW, "\tEccFeature : %d\n", MemPtr->ParameterListPtr->EnableEccFeature); IDS_HDT_CONSOLE (MEM_FLOW, "\tPowerDown : %d\n", MemPtr->ParameterListPtr->EnablePowerDown); IDS_HDT_CONSOLE (MEM_FLOW, "\tOnLineSpare : %d\n", MemPtr->ParameterListPtr->EnableOnLineSpareCtl); IDS_HDT_CONSOLE (MEM_FLOW, "\tParity : %d\n", MemPtr->ParameterListPtr->EnableParity); IDS_HDT_CONSOLE (MEM_FLOW, "\tBankSwizzle : %d\n", MemPtr->ParameterListPtr->EnableBankSwizzle); IDS_HDT_CONSOLE (MEM_FLOW, "\tMemClr : %d\n", MemPtr->ParameterListPtr->EnableMemClr); IDS_HDT_CONSOLE (MEM_FLOW, "\tUmaMode : %d\n", MemPtr->ParameterListPtr->UmaMode); IDS_HDT_CONSOLE (MEM_FLOW, "\tUmaSize : %d\n", MemPtr->ParameterListPtr->UmaSize); IDS_HDT_CONSOLE (MEM_FLOW, "\tMemRestoreCtl : %d\n", MemPtr->ParameterListPtr->MemRestoreCtl); IDS_HDT_CONSOLE (MEM_FLOW, "\tSaveMemContextCtl : %d\n", MemPtr->ParameterListPtr->SaveMemContextCtl); IDS_HDT_CONSOLE (MEM_FLOW, "\tExternalVrefCtl : %d\n", MemPtr->ParameterListPtr->ExternalVrefCtl ); IDS_HDT_CONSOLE (MEM_FLOW, "\tForceTrainMode : %d\n\n", MemPtr->ParameterListPtr->ForceTrainMode ); //---------------------------------------------------------------------------- // Get TSC rate, which will be used later in Wait10ns routine //---------------------------------------------------------------------------- GetCpuServicesOfCurrentCore ((CONST CPU_SPECIFIC_SERVICES **)&FamilySpecificServices, &MemPtr->StdHeader); FamilySpecificServices->GetTscRate (FamilySpecificServices, &MemPtr->TscRate, &MemPtr->StdHeader); //---------------------------------------------------------------------------- // Read In SPD Data //---------------------------------------------------------------------------- AGESA_TESTPOINT (TpProcMemBeforeSpdProcessing, &MemPtr->StdHeader); MemSPDDataProcess (MemPtr); //---------------------------------------------------------------- // Initialize Main Data Block //---------------------------------------------------------------- mmData.MemPtr = MemPtr; mmData.mmSharedPtr = &mmSharedData; LibAmdMemFill (&mmSharedData, 0, sizeof (mmSharedData), &MemPtr->StdHeader); mmSharedData.DimmExcludeFlag = NORMAL; mmSharedData.NodeIntlv.IsValid = FALSE; //---------------------------------------------------------------- // Discover populated CPUs // //---------------------------------------------------------------- Retval = MemSocketScan (&mmData); if (Retval == AGESA_FATAL) { return Retval; } DieCount = mmData.DieCount; //---------------------------------------------------------------- // // Allocate Memory for NB and Tech Blocks // // NBPtr[Die]----+ // | // V // +---+---+---+---+---+---+---+---+ // | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | NB Blocks // +---+---+---+---+---+---+---+---+ // | | | | | | | | // | | | | | | | | // v v v v v v v v // +---+---+---+---+---+---+---+---+ // | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | Tech Blocks // +---+---+---+---+---+---+---+---+ // // //---------------------------------------------------------------- AllocHeapParams.RequestedBufferSize = (DieCount * (sizeof (MEM_NB_BLOCK) + sizeof (MEM_TECH_BLOCK))); AllocHeapParams.BufferHandle = AMD_MEM_AUTO_HANDLE; AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (AGESA_SUCCESS != HeapAllocateBuffer (&AllocHeapParams, &MemPtr->StdHeader)) { ASSERT(FALSE); // NB and Tech Block Heap allocate error return AGESA_FATAL; } NBPtr = (MEM_NB_BLOCK *)AllocHeapParams.BufferPtr; TechPtr = (MEM_TECH_BLOCK *) (&NBPtr[DieCount]); mmData.NBPtr = NBPtr; mmData.TechPtr = TechPtr; //---------------------------------------------------------------- // Create NB Blocks // //---------------------------------------------------------------- for (Die = 0 ; Die < DieCount ; Die++ ) { i = 0; while (memNBInstalled[i].MemConstructNBBlock != 0) { if (memNBInstalled[i].MemConstructNBBlock (&NBPtr[Die], MemPtr, memNBInstalled[i].MemFeatBlock, &mmSharedData, Die) == TRUE) { break; } i++; } // Couldn't find a NB which supported this family if (memNBInstalled[i].MemConstructNBBlock == 0) { return AGESA_FATAL; } } //---------------------------------------------------------------- // Create Technology Blocks // //---------------------------------------------------------------- for (Die = 0 ; Die < DieCount ; Die++ ) { i = 0; while (memTechInstalled[i] != NULL) { if (memTechInstalled[i] (&TechPtr[Die], &NBPtr[Die])) { NBPtr[Die].TechPtr = &TechPtr[Die]; break; } i++; } // Couldn't find a Tech block which supported this family if (memTechInstalled[i] == NULL) { return AGESA_FATAL; } } //---------------------------------------------------------------- // // MEMORY INITIALIZATION TASKS // //---------------------------------------------------------------- i = 0; while (memFlowControlInstalled[i] != NULL) { Retval = memFlowControlInstalled[i] (&mmData); if (MemPtr->IsFlowControlSupported == TRUE) { break; } i++; } //---------------------------------------------------------------- // Deallocate NB register tables //---------------------------------------------------------------- for (Tab = 0; Tab < NumberOfNbRegTables; Tab++) { HeapDeallocateBuffer (GENERATE_MEM_HANDLE (ALLOC_NB_REG_TABLE, Tab, 0, 0), &MemPtr->StdHeader); } //---------------------------------------------------------------- // Check for errors and return //---------------------------------------------------------------- AGESA_TESTPOINT (TpProcMemEnd, &MemPtr->StdHeader); for (Die = 0; Die < DieCount; Die++) { if (NBPtr[Die].MCTPtr->ErrCode > Retval) { Retval = NBPtr[Die].MCTPtr->ErrCode; } } return Retval; }
BOOLEAN MemRecConstructNBBlockON ( IN OUT MEM_NB_BLOCK *NBPtr, IN OUT MEM_DATA_STRUCT *MemPtr, IN UINT8 NodeID ) { UINT8 i; DIE_STRUCT *MCTPtr; ALLOCATE_HEAP_PARAMS AllocHeapParams; // // Determine if this is the expected NB Type // GetLogicalIdOfSocket (MemPtr->DiesPerSystem->SocketId, &(MemPtr->DiesPerSystem->LogicalCpuid), &(MemPtr->StdHeader)); if (!MemRecNIsIdSupportedON (NBPtr, &(MemPtr->DiesPerSystem->LogicalCpuid))) { return FALSE; } NBPtr->MemPtr = MemPtr; NBPtr->RefPtr = MemPtr->ParameterListPtr; MCTPtr = MemPtr->DiesPerSystem; NBPtr->MCTPtr = MCTPtr; NBPtr->MCTPtr->NodeId = 0; NBPtr->PciAddr.AddressValue = MCTPtr->PciAddr.AddressValue; // // Allocate buffer for DCT_STRUCTs and CH_DEF_STRUCTs // AllocHeapParams.RequestedBufferSize = (sizeof (DCT_STRUCT) + sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK)) + (MAX_DIMMS * MAX_DELAYS * NUMBER_OF_DELAY_TABLES); AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_DCT_STRUCT_HANDLE, NodeID, 0, 0); AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, &MemPtr->StdHeader) != AGESA_SUCCESS) { ASSERT(FALSE); // Could not allocate buffer for DCT_STRUCTs and CH_DEF_STRUCTs return FALSE; } NBPtr->SPDPtr = MemPtr->SpdDataStructure; NBPtr->AllNodeSPDPtr = MemPtr->SpdDataStructure; MemPtr->DieCount = 1; MCTPtr->Dct = 0; MCTPtr->DctCount = 1; MCTPtr->DctData = (DCT_STRUCT *) AllocHeapParams.BufferPtr; AllocHeapParams.BufferPtr += sizeof (DCT_STRUCT); MCTPtr->DctData->ChannelCount = 1; MCTPtr->DctData->ChData = (CH_DEF_STRUCT *) AllocHeapParams.BufferPtr; AllocHeapParams.BufferPtr += sizeof (CH_DEF_STRUCT); NBPtr->PSBlock = (MEM_PS_BLOCK *) AllocHeapParams.BufferPtr; AllocHeapParams.BufferPtr += sizeof (MEM_PS_BLOCK); MCTPtr->DctData->ChData->RcvEnDlys = (UINT16 *) AllocHeapParams.BufferPtr; AllocHeapParams.BufferPtr += (MAX_DIMMS * MAX_DELAYS) * 2; MCTPtr->DctData->ChData->WrDqsDlys = AllocHeapParams.BufferPtr; // // Initialize NB block's variables // NBPtr->DCTPtr = NBPtr->MCTPtr->DctData; NBPtr->DctCachePtr = NBPtr->DctCache; NBPtr->PsPtr = NBPtr->PSBlock; NBPtr->ChannelPtr = NBPtr->DCTPtr->ChData; NBPtr->DctCachePtr = NBPtr->DctCache; MemRecNInitNBRegTableON (NBPtr->NBRegTable); NBPtr->Dct = 0; NBPtr->Channel = 0; NBPtr->VarMtrrHiMsk = MemRecGetVarMtrrHiMsk (&(MemPtr->DiesPerSystem[NodeID].LogicalCpuid), &(MemPtr->StdHeader)); NBPtr->FreqChangeParam = (MEM_FREQ_CHANGE_PARAM *) &RecFreqChangeParamON; LibAmdMemFill (NBPtr->DctCache, 0, sizeof (NBPtr->DctCache), &NBPtr->MemPtr->StdHeader); LibAmdMemFill (NBPtr->IsSupported, FALSE, sizeof (NBPtr->IsSupported), &NBPtr->MemPtr->StdHeader); for (i = 0; i < NumberOfHooks; i++) { NBPtr->FamilySpecificHook[i] = (BOOLEAN (*) (MEM_NB_BLOCK *, VOID *)) MemRecDefTrue; } NBPtr->InitRecovery = MemRecNMemInitON; NBPtr->RecModeDefRegArray = RecModeDefRegArrayON; NBPtr->SwitchNodeRec = (VOID (*) (MEM_NB_BLOCK *, UINT8)) MemRecDefRet; NBPtr->SwitchDCT = (VOID (*) (MEM_NB_BLOCK *, UINT8)) MemRecDefRet; NBPtr->SwitchChannel = (VOID (*) (MEM_NB_BLOCK *, UINT8)) MemRecDefRet; NBPtr->SetMaxLatency = MemRecNSetMaxLatencyON; NBPtr->GetSysAddrRec = MemRecNGetMCTSysAddrNb; NBPtr->SendMrsCmd = MemRecNSendMrsCmdNb; NBPtr->sendZQCmd = MemRecNSendZQCmdNb; NBPtr->SetDramOdtRec = MemRecNSetDramOdtON; NBPtr->GetBitField = MemRecNGetBitFieldNb; NBPtr->SetBitField = MemRecNSetBitFieldNb; NBPtr->GetTrainDly = MemRecNGetTrainDlyNb; NBPtr->SetTrainDly = MemRecNSetTrainDlyNb; NBPtr->MemRecNCmnGetSetFieldNb = MemRecNCmnGetSetFieldON; NBPtr->MemRecNcmnGetSetTrainDlyNb = MemRecNcmnGetSetTrainDlyClientNb; NBPtr->MemRecNSwitchDctNb = (VOID (*) (MEM_NB_BLOCK *, UINT8)) MemRecDefRet; NBPtr->TrainingFlow = MemNRecTrainingFlowClientNb; NBPtr->ReadPattern = MemRecNContReadPatternClientNb; NBPtr->IsSupported[DramModeAfterDimmPres] = TRUE; NBPtr->FamilySpecificHook[OverrideRcvEnSeed] = MemRecNOverrideRcvEnSeedON; return TRUE; }
BOOLEAN MemConstructNBBlockLN ( IN OUT MEM_NB_BLOCK *NBPtr, IN OUT MEM_DATA_STRUCT *MemPtr, IN MEM_FEAT_BLOCK_NB *FeatPtr, IN MEM_SHARED_DATA *SharedPtr, IN UINT8 NodeID ) { UINT8 Dct; UINT8 Channel; UINT8 SpdSocketIndex; UINT8 SpdChannelIndex; DIE_STRUCT *MCTPtr; ALLOCATE_HEAP_PARAMS AllocHeapParams; // // Determine if this is the expected NB Type // GetLogicalIdOfSocket (MemPtr->DiesPerSystem[NodeID].SocketId, &(MemPtr->DiesPerSystem[NodeID].LogicalCpuid), &(MemPtr->StdHeader)); if (!MemNIsIdSupportedLN (NBPtr, &(MemPtr->DiesPerSystem[NodeID].LogicalCpuid))) { return FALSE; } NBPtr->MemPtr = MemPtr; NBPtr->RefPtr = MemPtr->ParameterListPtr; NBPtr->SharedPtr = SharedPtr; MCTPtr = &(MemPtr->DiesPerSystem[NodeID]); NBPtr->MCTPtr = MCTPtr; NBPtr->MCTPtr->NodeId = NodeID; NBPtr->PciAddr.AddressValue = MCTPtr->PciAddr.AddressValue; NBPtr->VarMtrrHiMsk = GetVarMtrrHiMsk (&(MemPtr->DiesPerSystem[NodeID].LogicalCpuid), &(MemPtr->StdHeader)); // // Allocate buffer for DCT_STRUCTs and CH_DEF_STRUCTs // AllocHeapParams.RequestedBufferSize = MAX_DCTS_PER_NODE_LN * ( sizeof (DCT_STRUCT) + ( MAX_CHANNELS_PER_DCT_LN * (sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK)) ) ); AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_DCT_STRUCT_HANDLE, NodeID, 0, 0); AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, &MemPtr->StdHeader) != AGESA_SUCCESS) { PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_DCT_STRUCT_AND_CH_DEF_STRUCTs, NBPtr->Node, 0, 0, 0, &MemPtr->StdHeader); SetMemError (AGESA_FATAL, MCTPtr); ASSERT(FALSE); // Could not allocate buffer for DCT_STRUCTs and CH_DEF_STRUCTs return FALSE; } MCTPtr->DctCount = MAX_DCTS_PER_NODE_LN; MCTPtr->DctData = (DCT_STRUCT *) AllocHeapParams.BufferPtr; AllocHeapParams.BufferPtr += MAX_DCTS_PER_NODE_LN * sizeof (DCT_STRUCT); for (Dct = 0; Dct < MAX_DCTS_PER_NODE_LN; Dct++) { MCTPtr->DctData[Dct].Dct = Dct; MCTPtr->DctData[Dct].ChannelCount = MAX_CHANNELS_PER_DCT_LN; MCTPtr->DctData[Dct].ChData = (CH_DEF_STRUCT *) AllocHeapParams.BufferPtr; MCTPtr->DctData[Dct].ChData[0].Dct = Dct; AllocHeapParams.BufferPtr += MAX_CHANNELS_PER_DCT_LN * sizeof (CH_DEF_STRUCT); } NBPtr->PSBlock = (MEM_PS_BLOCK *) AllocHeapParams.BufferPtr; // // Initialize Socket List // for (Dct = 0; Dct < MAX_DCTS_PER_NODE_LN; Dct++) { MemPtr->SocketList[MCTPtr->SocketId].ChannelPtr[Dct] = &(MCTPtr->DctData[Dct].ChData[0]); MemPtr->SocketList[MCTPtr->SocketId].TimingsPtr[Dct] = &(MCTPtr->DctData[Dct].Timings); MCTPtr->DctData[Dct].ChData[0].ChannelID = Dct; } // // Initialize NB block member variables // NBPtr->DctCachePtr = NBPtr->DctCache; NBPtr->PsPtr = NBPtr->PSBlock; MemNInitNBRegTableLN (NBPtr, NBPtr->NBRegTable); NBPtr->Node = 0; NBPtr->Dct = 0; NBPtr->Channel = 0; NBPtr->DctCount = MAX_DCTS_PER_NODE_LN; NBPtr->ChannelCount = MAX_CHANNELS_PER_DCT_LN; NBPtr->NodeCount = MAX_NODES_SUPPORTED_LN; NBPtr->Ganged = FALSE; NBPtr->PosTrnPattern = POS_PATTERN_256B; NBPtr->MemCleared = FALSE; NBPtr->StartupSpeed = DDR800_FREQUENCY; NBPtr->RcvrEnDlyLimit = 0x1FF; NBPtr->NbFreqChgState = 0; NBPtr->DefDctSelIntLvAddr = 5; NBPtr->FreqChangeParam = (MEM_FREQ_CHANGE_PARAM *) &FreqChangeParamLN; NBPtr->CsRegMsk = 0x1FF83FE0; NBPtr->MaxRxEnSeedTotal = 0x33F; NBPtr->MinRxEnSeedGross = 0; LibAmdMemFill (NBPtr->DctCache, 0, sizeof (NBPtr->DctCache), &NBPtr->MemPtr->StdHeader); NBPtr->SetMaxLatency = MemNSetMaxLatencyLN; NBPtr->getMaxLatParams = MemNGetMaxLatParamsClientLN; NBPtr->InitializeMCT = (BOOLEAN (*) (MEM_NB_BLOCK *)) memDefTrue; NBPtr->FinalizeMCT = MemNFinalizeMctLN; NBPtr->SendMrsCmd = MemNSendMrsCmdLN; NBPtr->sendZQCmd = MemNSendZQCmdNb; NBPtr->WritePattern = MemNWritePatternLN; NBPtr->ReadPattern = MemNReadPatternLN; NBPtr->GenHwRcvEnReads = (VOID (*) (MEM_NB_BLOCK *, UINT32)) memDefRet; NBPtr->CompareTestPattern = MemNCompareTestPatternNb; NBPtr->InsDlyCompareTestPattern = MemNInsDlyCompareTestPatternNb; NBPtr->InitMCT = MemNInitMCTNb; NBPtr->StitchMemory = MemNStitchMemoryNb; NBPtr->AutoConfig = MemNAutoConfigLN; NBPtr->PlatformSpec = MemNPlatformSpecUnb; NBPtr->DisableDCT = MemNDisableDCTClientNb; NBPtr->StartupDCT = MemNStartupDCTUnb; NBPtr->SyncTargetSpeed = MemNSyncTargetSpeedNb; NBPtr->MemNCapSpeedBatteryLife = (VOID (*) (MEM_NB_BLOCK *)) memDefRet; NBPtr->ChangeFrequency = MemNChangeFrequencyClientNb; NBPtr->RampUpFrequency = MemNRampUpFrequencyNb; NBPtr->ChangeNbFrequency = MemNChangeNbFrequencyNb; NBPtr->ProgramNbPsDependentRegs = MemNProgramNbPstateDependentRegistersClientNb; NBPtr->ProgramCycTimings = MemNProgramCycTimingsClientNb; NBPtr->SyncDctsReady = (BOOLEAN (*) (MEM_NB_BLOCK *)) memDefTrue; NBPtr->HtMemMapInit = MemNHtMemMapInitLN; NBPtr->SyncAddrMapToAllNodes = (VOID (*) (MEM_NB_BLOCK *)) memDefRet; NBPtr->CpuMemTyping = MemNCPUMemTypingNb; NBPtr->UMAMemTyping = MemNUMAMemTypingNb; NBPtr->BeforeDqsTraining = MemNBeforeDQSTrainingLN; NBPtr->AfterDqsTraining = MemNAfterDQSTrainingLN; NBPtr->OtherTiming = MemNOtherTimingLN; NBPtr->GetSocketRelativeChannel = MemNGetSocketRelativeChannelNb; NBPtr->TechBlockSwitch = MemNTechBlockSwitchLN; NBPtr->SetEccSymbolSize = (VOID (*) (MEM_NB_BLOCK *)) memDefRet; NBPtr->TrainingFlow = (VOID (*) (MEM_NB_BLOCK *))(memNTrainFlowControl[DDR3_TRAIN_FLOW]); NBPtr->MinDataEyeWidth = MemNMinDataEyeWidthNb; NBPtr->ChangeNbFrequencyWrap = MemNChangeNbFrequencyWrapLN; NBPtr->AllocateC6Storage = MemNAllocateC6StorageClientNb; MemNInitNBDataNb (NBPtr); FeatPtr->InitHwRxEn (NBPtr); NBPtr->PollBitField = MemNPollBitFieldNb; NBPtr->BrdcstCheck = MemNBrdcstCheckNb; NBPtr->BrdcstSet = MemNBrdcstSetNb; NBPtr->GetTrainDly = MemNGetTrainDlyNb; NBPtr->SetTrainDly = MemNSetTrainDlyNb; NBPtr->PhyFenceTraining = MemNPhyFenceTrainingUnb; NBPtr->GetSysAddr = MemNGetMCTSysAddrNb; NBPtr->RankEnabled = MemNRankEnabledNb; NBPtr->MemNCmnGetSetFieldNb = MemNCmnGetSetFieldLN; NBPtr->MemNBeforeDramInitNb = (VOID (*) (MEM_NB_BLOCK *)) memDefRet; NBPtr->MemNBeforePlatformSpecNb = (VOID (*) (MEM_NB_BLOCK *)) memDefRet; NBPtr->MemNInitPhyComp = MemNInitPhyCompClientNb; NBPtr->MemNcmnGetSetTrainDly = MemNcmnGetSetTrainDlyClientNb; NBPtr->MemPPhyFenceTrainingNb = (VOID (*) (MEM_NB_BLOCK *)) memDefRet; NBPtr->MemNPlatformSpecificFormFactorInitNb = MemNPlatformSpecificFormFactorInitLN; NBPtr->MemNPFenceAdjustNb = MemNPFenceAdjustUnb; NBPtr->GetTrainDlyParms = MemNGetTrainDlyParmsClientNb; NBPtr->TrainingPatternInit = MemNTrainingPatternInitNb; NBPtr->TrainingPatternFinalize = MemNTrainingPatternFinalizeNb; NBPtr->GetApproximateWriteDatDelay = MemNGetApproximateWriteDatDelayNb; NBPtr->CSPerChannel = MemNCSPerChannelLN; NBPtr->CSPerDelay = MemNCSPerDelayNb; NBPtr->FlushPattern = MemNFlushPatternNb; NBPtr->GetUmaSize = MemNGetUmaSizeLN; NBPtr->GetMemClkFreqId = MemNGetMemClkFreqIdClientNb; NBPtr->EnableSwapIntlvRgn = MemNEnableSwapIntlvRgnLN; NBPtr->WaitXMemClks = MemNWaitXMemClksNb; NBPtr->MemNGetDramTerm = MemNGetDramTermNb; NBPtr->MemNGetDynDramTerm = MemNGetDynDramTermNb; NBPtr->MemNGetMR0CL = MemNGetMR0CLNb; NBPtr->MemNGetMR0WR = MemNGetMR0WRLN; NBPtr->MemNSaveMR0 = (VOID (*) (MEM_NB_BLOCK *, UINT32)) memDefRet; NBPtr->MemNGetMR2CWL = MemNGetMR2CWLNb; NBPtr->IsSupported[SetDllShutDown] = TRUE; NBPtr->IsSupported[CheckPhyFenceTraining] = TRUE; NBPtr->IsSupported[CheckSendAllMRCmds] = TRUE; NBPtr->IsSupported[CheckFindPSDct] = TRUE; NBPtr->IsSupported[FenceTrnBeforeDramInit] = TRUE; NBPtr->IsSupported[WLSeedAdjust] = TRUE; NBPtr->IsSupported[UnifiedNbFence] = TRUE; NBPtr->IsSupported[CheckODTControls] = TRUE; NBPtr->IsSupported[ReverseMaxRdLatTrain] = TRUE; NBPtr->IsSupported[SkipErrTrain] = TRUE; NBPtr->IsSupported[DramSrHys] = TRUE; NBPtr->IsSupported[CheckMaxDramRate] = TRUE; NBPtr->IsSupported[SchedDlySlot1Extra] = TRUE; NBPtr->IsSupported[CsrPhyPllPdEn] = TRUE; NBPtr->IsSupported[AdjustTrc] = TRUE; NBPtr->IsSupported[ProgramCsrComparator] = TRUE; NBPtr->IsSupported[CheckDrvImpCtrl] = TRUE; NBPtr->IsSupported[EnProcOdtAdvForUDIMM] = TRUE; NBPtr->FamilySpecificHook[AddlMaxRdLatTrain] = MemNSlot1MaxRdLatTrainClientNb; NBPtr->FamilySpecificHook[BeforePhyFenceTraining] = MemNBeforePhyFenceTrainingClientNb; NBPtr->FamilySpecificHook[ReEnablePhyComp] = MemNReEnablePhyCompNb; NBPtr->FamilySpecificHook[AdjustTxpdll] = MemNAdjustTxpdllClientNb; NBPtr->FamilySpecificHook[DisLowPwrDrvStr] = MemNDisLowPwrDrvStrLN; NBPtr->FamilySpecificHook[CalcWrDqDqsEarly] = MemNCalcWrDqDqsEarlyClientNb; NBPtr->FamilySpecificHook[InitializeRxEnSeedlessTraining] = MemNInitializeRxEnSeedlessTrainingUnb; NBPtr->FamilySpecificHook[TrackRxEnSeedlessRdWrNoWindBLError] = MemNTrackRxEnSeedlessRdWrNoWindBLErrorUnb; NBPtr->FamilySpecificHook[TrackRxEnSeedlessRdWrSmallWindBLError] = MemNTrackRxEnSeedlessRdWrSmallWindBLErrorUnb; NBPtr->FamilySpecificHook[InitialzeRxEnSeedlessByteLaneError] = MemNInitialzeRxEnSeedlessByteLaneErrorUnb; NBPtr->FamilySpecificHook[OverridePrevPassRcvEnDly] = MemNOverridePrevPassRcvEnDlyLN; NBPtr->FamilySpecificHook[ResetRxFifoPtr] = MemNResetRxFifoPtrClientNb; NBPtr->FamilySpecificHook[BfAfExcludeDimm] = MemNBfAfExcludeDimmClientNb; FeatPtr->InitCPG (NBPtr); FeatPtr->InitEarlySampleSupport (NBPtr); NBPtr->FeatPtr = FeatPtr; // // Calculate SPD Offsets per channel and assign pointers // to the data. // SpdSocketIndex = GetSpdSocketIndex (NBPtr->RefPtr->PlatformMemoryConfiguration, NBPtr->MCTPtr->SocketId, &MemPtr->StdHeader); // // Traverse the Dct/Channel structures // for (Dct = 0; Dct < MAX_DCTS_PER_NODE_LN; Dct++) { for (Channel = 0; Channel < MAX_CHANNELS_PER_DCT_LN; Channel++) { // // Calculate the number of Dimms on this channel using the // die/dct/channel to Socket/channel conversion. // SpdChannelIndex = GetSpdChannelIndex (NBPtr->RefPtr->PlatformMemoryConfiguration, NBPtr->MCTPtr->SocketId, MemNGetSocketRelativeChannelNb (NBPtr, Dct, Channel), &MemPtr->StdHeader); NBPtr->MCTPtr->DctData[Dct].ChData[Channel].SpdPtr = &(MemPtr->SpdDataStructure[SpdSocketIndex + SpdChannelIndex]); } } MemNSwitchDCTNb (NBPtr, 0); NBPtr->Channel = 0; return TRUE; }