/** * * * This function deallocates heap space allocated in memory S3 resume. * * @param[in] *StdHeader - Config handle for library and services * * @return AGESA_STATUS * - AGESA_ALERT * - AGESA_FATAL * - AGESA_SUCCESS * - AGESA_WARNING */ AGESA_STATUS MemS3Deallocate ( IN AMD_CONFIG_PARAMS *StdHeader ) { AGESA_STATUS RetVal; AGESA_STATUS tempRetVal; UINT8 Tab; RetVal = AGESA_SUCCESS; for (Tab = 0; Tab < NumberOfNbRegTables; Tab++) { HeapDeallocateBuffer (GENERATE_MEM_HANDLE (ALLOC_NB_REG_TABLE, Tab, 0, 0), StdHeader); } tempRetVal = HeapDeallocateBuffer (GENERATE_MEM_HANDLE (ALLOC_DIE_STRUCT_HANDLE, 0, 0, 0), StdHeader); if (tempRetVal > RetVal) { RetVal = tempRetVal; } tempRetVal = HeapDeallocateBuffer (AMD_MEM_AUTO_HANDLE, StdHeader); if (tempRetVal > RetVal) { RetVal = tempRetVal; } RetVal = HeapDeallocateBuffer (AMD_MEM_S3_NB_HANDLE, StdHeader); if (tempRetVal > RetVal) { RetVal = tempRetVal; } RetVal = HeapDeallocateBuffer (AMD_MEM_DATA_HANDLE, StdHeader); if (tempRetVal > RetVal) { RetVal = tempRetVal; } return RetVal; }
/** * * * This function deallocates heap space allocated in memory S3 resume. * * @param[in] *StdHeader - Config handle for library and services * * @return AGESA_STATUS * - AGESA_ALERT * - AGESA_FATAL * - AGESA_SUCCESS * - AGESA_WARNING */ AGESA_STATUS MemS3Deallocate ( IN AMD_CONFIG_PARAMS *StdHeader ) { AGESA_STATUS RetVal; AGESA_STATUS tempRetVal; RetVal = AGESA_SUCCESS; tempRetVal = HeapDeallocateBuffer (GENERATE_MEM_HANDLE (ALLOC_DIE_STRUCT_HANDLE, 0, 0, 0), StdHeader); if (tempRetVal > RetVal) { RetVal = tempRetVal; } tempRetVal = HeapDeallocateBuffer (AMD_MEM_AUTO_HANDLE, StdHeader); if (tempRetVal > RetVal) { RetVal = tempRetVal; } RetVal = HeapDeallocateBuffer (AMD_MEM_S3_NB_HANDLE, StdHeader); if (tempRetVal > RetVal) { RetVal = tempRetVal; } RetVal = HeapDeallocateBuffer (AMD_MEM_DATA_HANDLE, StdHeader); if (tempRetVal > RetVal) { RetVal = tempRetVal; } return RetVal; }
/* *--------------------------------------------------------------------------------------- * * AmdInitPostDestructor * * Destruct routine that provide a chance if something need to be done * before the end of AmdInitPost. * * @param[in] StdHeader The standard header. * @param[in] PostParamsPtr AMD init post param. * * @retval AGESA_STATUS * *--------------------------------------------------------------------------------------- */ AGESA_STATUS AmdInitPostDestructor ( IN AMD_CONFIG_PARAMS *StdHeader, IN AMD_POST_PARAMS *PostParamsPtr ) { ASSERT (PostParamsPtr != NULL); PostParamsPtr->StdHeader = *StdHeader; PostParamsPtr->MemConfig.MemData->StdHeader = *StdHeader; // // AmdMemAuto completed. Here, release heap space which is used for memory init. // MemAmdFinalize (PostParamsPtr->MemConfig.MemData); HeapDeallocateBuffer (AMD_MEM_DATA_HANDLE, StdHeader); // // AmdCpuPost completed. // if (PostParamsPtr->MemConfig.SysLimit != 0) { // WBINVD can only be executed when memory is available FinalizeAtPost (StdHeader); } return AGESA_SUCCESS; }
BOOLEAN MemNPostPmuSramMsgBlockKV ( IN OUT MEM_NB_BLOCK *NBPtr ) { // De-allocate the PMU SRAM Message Block buffer. return (BOOLEAN) (HeapDeallocateBuffer (AMD_MEM_PMU_SRAM_MSG_BLOCK_HANDLE, &(NBPtr->MemPtr->StdHeader)) == AGESA_SUCCESS); }
/** * ReleaseSlitBuffer * * Description: * Deallocate SLIT buffer * * Parameters: * @param[in, out] *StdHeader * * @retval AGESA_STATUS * */ AGESA_STATUS ReleaseSlitBuffer ( IN OUT AMD_CONFIG_PARAMS *StdHeader ) { HeapDeallocateBuffer ((UINT32) HOP_COUNT_TABLE_HANDLE, StdHeader); return AGESA_SUCCESS; }
/** * Initialize S3 Script framework * * * * @param[in] StdHeader Pointer to standard header * @param[in,out] S3SaveTable S3 save table header */ STATIC AGESA_STATUS S3SaveStateExtendTableLenth ( IN AMD_CONFIG_PARAMS *StdHeader, IN OUT S3_SAVE_TABLE_HEADER **S3SaveTable ) { AGESA_STATUS Status; ALLOCATE_HEAP_PARAMS AllocHeapParams; VOID *TempBuffer; UINT16 NewTableLength; UINT16 CurrentTableLength; //Allocate temporary buffer NewTableLength = (*S3SaveTable)->TableLength + S3_TABLE_LENGTH_INCREMENT; AllocHeapParams.RequestedBufferSize = NewTableLength; AllocHeapParams.BufferHandle = AMD_S3_SCRIPT_TEMP_BUFFER_HANDLE; AllocHeapParams.Persist = StdHeader->HeapStatus; Status = HeapAllocateBuffer (&AllocHeapParams, StdHeader); if (Status != AGESA_SUCCESS) { return Status; } //Save current table length CurrentTableLength = (*S3SaveTable)->TableLength; //Update table length (*S3SaveTable)->TableLength = NewTableLength; //Copy S3 save toable to temporary location LibAmdMemCopy (AllocHeapParams.BufferPtr, *S3SaveTable, CurrentTableLength, StdHeader); //Save pointer to temp buffer TempBuffer = AllocHeapParams.BufferPtr; // Free original S3 save buffer HeapDeallocateBuffer (AMD_S3_SCRIPT_SAVE_TABLE_HANDLE, StdHeader); AllocHeapParams.RequestedBufferSize = NewTableLength; AllocHeapParams.BufferHandle = AMD_S3_SCRIPT_SAVE_TABLE_HANDLE; AllocHeapParams.Persist = StdHeader->HeapStatus; Status = HeapAllocateBuffer (&AllocHeapParams, StdHeader); if (Status != AGESA_SUCCESS) { return Status; } LibAmdMemCopy (AllocHeapParams.BufferPtr, TempBuffer, AllocHeapParams.RequestedBufferSize, StdHeader); *S3SaveTable = (S3_SAVE_TABLE_HEADER*) AllocHeapParams.BufferPtr; HeapDeallocateBuffer (AMD_S3_SCRIPT_TEMP_BUFFER_HANDLE, StdHeader); return Status; }
/** * * Exit function for HDT out Function. * * Restore debug register and Deallocate heap, and will also fire a HDTOUT * Command to let hdtout script do corresponding things. * * @param[in,out] StdHeader The Pointer of AGESA Header * **/ VOID AmdIdsHdtOutExit ( IN OUT AMD_CONFIG_PARAMS *StdHeader ) { IDSAPLATETASK IdsApLateTask; if (AmdIdsHdtOutSupport ()) { IdsApLateTask.ApTask = (PF_IDS_AP_TASK) AmdIdsHdtOutExitCoreTask; IdsApLateTask.ApTaskPara = NULL; IdsAgesaRunFcnOnAllCoresLate (&IdsApLateTask, StdHeader); HeapDeallocateBuffer (IDS_HDT_OUT_BUFFER_HANDLE, StdHeader); } }
/** * * Exit function for HDT out Function of S3 Resume * * Restore debug register and Deallocate heap, and will also fire a HDTOUT * Command to let hdtout script do corresponding things. * * @param[in,out] StdHeader The Pointer of AGESA Header * **/ VOID AmdIdsHdtOutS3Exit ( IN OUT AMD_CONFIG_PARAMS *StdHeader ) { AGESA_STATUS AgesaStatus; if (AmdIdsHdtOutSupport ()) { //Ap debug print exit have been done at the end of AmdInitResume, so we only BSP at here AmdIdsHdtOutExitCoreTask (NULL, StdHeader); if (IsBsp (StdHeader, &AgesaStatus)) { HeapDeallocateBuffer (IDS_HDT_OUT_BUFFER_HANDLE, StdHeader); } } }
/** * * * This function deallocates heap buffers that were allocated in AmdMemAuto * * @param[in,out] *MemPtr - Pointer to the MEM_DATA_STRUCT * * @return AGESA_STATUS * - AGESA_ALERT * - AGESA_FATAL * - AGESA_SUCCESS * - AGESA_WARNING */ AGESA_STATUS MemAmdFinalize ( IN OUT MEM_DATA_STRUCT *MemPtr ) { UINT8 Die; for (Die = 0; Die < MemPtr->DieCount; Die++ ) { HeapDeallocateBuffer (GENERATE_MEM_HANDLE (ALLOC_TRN_DATA_HANDLE, Die, 0, 0), &MemPtr->StdHeader); HeapDeallocateBuffer (GENERATE_MEM_HANDLE (ALLOC_DCT_STRUCT_HANDLE, Die, 0, 0), &MemPtr->StdHeader); } HeapDeallocateBuffer (GENERATE_MEM_HANDLE (ALLOC_DIE_STRUCT_HANDLE, 0, 0, 0), &MemPtr->StdHeader); HeapDeallocateBuffer (AMD_S3_SAVE_HANDLE, &MemPtr->StdHeader); HeapDeallocateBuffer (AMD_MEM_SPD_HANDLE, &MemPtr->StdHeader); HeapDeallocateBuffer (AMD_MEM_AUTO_HANDLE, &MemPtr->StdHeader); return AGESA_SUCCESS; }
/** * * * This function is the main memory entry point for the S3 resume sequence * Requirements: * * Run-Time Requirements: * 1. Complete Hypertransport Bus Configuration * 4. BSP in Big Real Mode * 5. Stack available * * @param[in] *StdHeader - Config handle for library and services * * @return AGESA_STATUS * - AGESA_ALERT * - AGESA_FATAL * - AGESA_SUCCESS * - AGESA_WARNING */ AGESA_STATUS AmdMemS3Resume ( IN AMD_CONFIG_PARAMS *StdHeader ) { AGESA_STATUS RetVal; MEM_MAIN_DATA_BLOCK mmData; S3_MEM_NB_BLOCK *S3NBPtr; MEM_DATA_STRUCT *MemData; UINT8 Die; UINT8 DieCount; //--------------------------------------------- // Creation of NB Block for S3 resume //--------------------------------------------- RetVal = MemS3InitNB (&S3NBPtr, &MemData, &mmData, StdHeader); if (RetVal == AGESA_FATAL) { return RetVal; } DieCount = mmData.DieCount; //--------------------------------------------- //1. Errata Before resume sequence //2. S3 Resume sequence //3. Errata After resume sequence //--------------------------------------------- for (Die = 0; Die < DieCount; Die ++) { if (!S3NBPtr[Die].MemS3Resume (&S3NBPtr[Die], Die)) { return AGESA_FATAL; } S3NBPtr[Die].MemS3RestoreScrub (S3NBPtr[Die].NBPtr, Die); } HeapDeallocateBuffer (AMD_MEM_S3_DATA_HANDLE, StdHeader); return AGESA_SUCCESS; }
/** * * * * * @param[in,out] *mmPtr - Pointer to the MEM_MAIN_DATA_BLOCK * * @return TRUE - No fatal error occurs. * @return FALSE - Fatal error occurs. */ BOOLEAN MemMParallelTraining ( IN OUT MEM_MAIN_DATA_BLOCK *mmPtr ) { AMD_CONFIG_PARAMS *StdHeader; MEM_DATA_STRUCT *MemPtr; MEM_NB_BLOCK *NBPtr; DIE_INFO TrainInfo[MAX_NODES_SUPPORTED]; AP_DATA_TRANSFER ReturnData; AGESA_STATUS Status; UINT8 ApSts; UINT8 Die; UINT8 Socket; UINT32 Module; UINT32 LowCore; UINT32 HighCore; UINT32 Time; UINT32 TimeOut; UINT32 TargetApicId; BOOLEAN StillTraining; ALLOCATE_HEAP_PARAMS AllocHeapParams; UINT8 *BufferPtr; BOOLEAN TimeoutEn; NBPtr = mmPtr->NBPtr; MemPtr = mmPtr->MemPtr; StdHeader = &(mmPtr->MemPtr->StdHeader); Time = 0; TimeOut = PARALLEL_TRAINING_TIMEOUT; TimeoutEn = TRUE; IDS_TIMEOUT_CTL (&TimeoutEn); IDS_HDT_CONSOLE (MEM_STATUS, "\nStart parallel training\n"); AGESA_TESTPOINT (TpProcMemBeforeAnyTraining, StdHeader); // // Initialize Training Info Array // for (Die = 0; Die < mmPtr->DieCount; Die ++) { Socket = TrainInfo[Die].Socket = NBPtr[Die].MCTPtr->SocketId; Module = NBPtr[Die].MCTPtr->DieId; GetGivenModuleCoreRange (Socket, Module, &LowCore, &HighCore, StdHeader); TrainInfo[Die].Core = (UINT8) (LowCore & 0x000000FF); IDS_HDT_CONSOLE (MEM_FLOW, "\tLaunch core %d of socket %d\n", LowCore, Socket); TrainInfo[Die].Training = FALSE; } // // Start Training on Each remote die. // for (Die = 0; Die < mmPtr->DieCount; Die ++ ) { if (Die != BSP_DIE) { NBPtr[Die].BeforeDqsTraining (&(mmPtr->NBPtr[Die])); if (NBPtr[Die].MCTPtr->NodeMemSize != 0) { if (!NBPtr[Die].FeatPtr->Training (&(mmPtr->NBPtr[Die]))) { // Fail to launch code on AP PutEventLog (AGESA_ERROR, MEM_ERROR_PARALLEL_TRAINING_LAUNCH_FAIL, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_ERROR, NBPtr[Die].MCTPtr); MemPtr->ErrorHandling (NBPtr[Die].MCTPtr, EXCLUDE_ALL_DCT, EXCLUDE_ALL_CHIPSEL, &MemPtr->StdHeader); } else { TrainInfo[Die].Training = TRUE; } } } } // // Call training on BSP // IDS_HDT_CONSOLE (MEM_STATUS, "Node %d\n", NBPtr[BSP_DIE].Node); NBPtr[BSP_DIE].BeforeDqsTraining (&(mmPtr->NBPtr[BSP_DIE])); NBPtr[BSP_DIE].TrainingFlow (&(mmPtr->NBPtr[BSP_DIE])); NBPtr[BSP_DIE].AfterDqsTraining (&(mmPtr->NBPtr[BSP_DIE])); // // Get Results from remote processors training // do { StillTraining = FALSE; for (Die = 0; Die < mmPtr->DieCount; Die ++ ) { // // For each Die that is training, read the status // if (TrainInfo[Die].Training == TRUE) { GetLocalApicIdForCore (TrainInfo[Die].Socket, TrainInfo[Die].Core, &TargetApicId, StdHeader); ApSts = ApUtilReadRemoteControlByte (TargetApicId, StdHeader); if ((ApSts & 0x80) == 0) { // // Allocate buffer for received data // AllocHeapParams.RequestedBufferSize = ( sizeof (DIE_STRUCT) + NBPtr[Die].DctCount * ( sizeof (DCT_STRUCT) + ( NBPtr[Die].ChannelCount * ( sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK) + ( (NBPtr[Die].MCTPtr->DctData[0].ChData[0].RowCount * NBPtr[Die].MCTPtr->DctData[0].ChData[0].ColumnCount * NUMBER_OF_DELAY_TABLES) + (MAX_BYTELANES_PER_CHANNEL * MAX_CS_PER_CHANNEL * NUMBER_OF_FAILURE_MASK_TABLES) ) ) ) ) ) + 3; AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_PAR_TRN_HANDLE, Die, 0, 0); AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) == AGESA_SUCCESS) { // // Receive Training Results // ReturnData.DataPtr = AllocHeapParams.BufferPtr; ReturnData.DataSizeInDwords = (UINT16) AllocHeapParams.RequestedBufferSize / 4; ReturnData.DataTransferFlags = 0; Status = ApUtilReceiveBuffer (TrainInfo[Die].Socket, TrainInfo[Die].Core, &ReturnData, StdHeader); if (Status != AGESA_SUCCESS) { SetMemError (Status, NBPtr[Die].MCTPtr); } BufferPtr = AllocHeapParams.BufferPtr; LibAmdMemCopy (NBPtr[Die].MCTPtr, BufferPtr, sizeof (DIE_STRUCT), StdHeader); BufferPtr += sizeof (DIE_STRUCT); LibAmdMemCopy ( NBPtr[Die].MCTPtr->DctData, BufferPtr, NBPtr[Die].DctCount * (sizeof (DCT_STRUCT) + NBPtr[Die].ChannelCount * sizeof (CH_DEF_STRUCT)), StdHeader); BufferPtr += NBPtr[Die].DctCount * (sizeof (DCT_STRUCT) + NBPtr[Die].ChannelCount * sizeof (CH_DEF_STRUCT)); LibAmdMemCopy ( NBPtr[Die].PSBlock, BufferPtr, NBPtr[Die].DctCount * NBPtr[Die].ChannelCount * sizeof (MEM_PS_BLOCK), StdHeader); BufferPtr += NBPtr[Die].DctCount * NBPtr[Die].ChannelCount * sizeof (MEM_PS_BLOCK); LibAmdMemCopy ( NBPtr[Die].MCTPtr->DctData[0].ChData[0].RcvEnDlys, BufferPtr, (NBPtr[Die].DctCount * NBPtr[Die].ChannelCount) * ((NBPtr[Die].MCTPtr->DctData[0].ChData[0].RowCount * NBPtr[Die].MCTPtr->DctData[0].ChData[0].ColumnCount * NUMBER_OF_DELAY_TABLES) + (MAX_BYTELANES_PER_CHANNEL * MAX_CS_PER_CHANNEL * NUMBER_OF_FAILURE_MASK_TABLES) ), StdHeader); HeapDeallocateBuffer (AllocHeapParams.BufferHandle, StdHeader); NBPtr[Die].AfterDqsTraining (&(mmPtr->NBPtr[Die])); TrainInfo[Die].Training = FALSE; } else { PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_RECEIVED_DATA, NBPtr[Die].Node, 0, 0, 0, StdHeader); SetMemError (AGESA_FATAL, NBPtr[Die].MCTPtr); ASSERT(FALSE); // Insufficient Heap Space allocation for parallel training buffer } } else if (ApSts == CORE_IDLE) { // AP does not have buffer to transmit to BSP // AP fails to locate a buffer for data transfer TrainInfo[Die].Training = FALSE; } else { // Signal to loop through again StillTraining = TRUE; } } } // Wait for 1 us MemUWait10ns (100, NBPtr->MemPtr); Time ++; } while ((StillTraining) && ((Time < TimeOut) || !TimeoutEn)); // Continue until all Dies are finished // if cannot finish in 1 s, do fatal exit if (StillTraining && TimeoutEn) { // Parallel training time out, do fatal exit, as there is at least one AP hangs. PutEventLog (AGESA_FATAL, MEM_ERROR_PARALLEL_TRAINING_TIME_OUT, 0, 0, 0, 0, &NBPtr->MemPtr->StdHeader); SetMemError (AGESA_FATAL, NBPtr[BSP_DIE].MCTPtr); ASSERT(FALSE); // Timeout occurred while still training } for (Die = 0; Die < mmPtr->DieCount; Die ++ ) { if (NBPtr[Die].MCTPtr->ErrCode == AGESA_FATAL) { return FALSE; } } return TRUE; }
/** * Save and Restore or Initialize the content of the mailbox registers. * * The registers used for AP mailbox should have the content related to their function * preserved. * * @param[in] EntryPoint Timepoint designator. * @param[in] PlatformConfig Contains the runtime modifiable feature input data. * @param[in] StdHeader Config Handle for library, services. * * @return AGESA_SUCCESS Always succeeds. * */ AGESA_STATUS STATIC PreserveMailboxes ( IN UINT64 EntryPoint, IN PLATFORM_CONFIGURATION *PlatformConfig, IN AMD_CONFIG_PARAMS *StdHeader ) { PRESERVE_MAILBOX_FAMILY_SERVICES *FamilySpecificServices; UINT32 Socket; UINT32 Module; PCI_ADDR BaseAddress; PCI_ADDR MailboxRegister; PCI_ADDR *NextRegister; AGESA_STATUS IgnoredStatus; AGESA_STATUS HeapStatus; UINT32 Value; ALLOCATE_HEAP_PARAMS AllocateParams; LOCATE_HEAP_PTR LocateParams; UINT32 RegisterEntryIndex; BaseAddress.AddressValue = ILLEGAL_SBDFO; if (EntryPoint == CPU_FEAT_AFTER_COHERENT_DISCOVERY) { // The save step. Save either the register content or zero (for cold boot, if family specifies that). AllocateParams.BufferHandle = PRESERVE_MAIL_BOX_HANDLE; AllocateParams.RequestedBufferSize = (sizeof (UINT32) * (MAX_PRESERVE_REGISTER_ENTRIES * (MAX_SOCKETS * MAX_DIES))); AllocateParams.Persist = HEAP_SYSTEM_MEM; HeapStatus = HeapAllocateBuffer (&AllocateParams, StdHeader); ASSERT ((HeapStatus == AGESA_SUCCESS) && (AllocateParams.BufferPtr != NULL)); LibAmdMemFill (AllocateParams.BufferPtr, 0xFF, AllocateParams.RequestedBufferSize, StdHeader); RegisterEntryIndex = 0; for (Socket = 0; Socket < GetPlatformNumberOfSockets (); Socket++) { for (Module = 0; Module < GetPlatformNumberOfModules (); Module++) { if (GetPciAddress (StdHeader, Socket, Module, &BaseAddress, &IgnoredStatus)) { GetFeatureServicesOfSocket (&PreserveMailboxFamilyServiceTable, Socket, (const VOID **)&FamilySpecificServices, StdHeader); ASSERT (FamilySpecificServices != NULL); NextRegister = FamilySpecificServices->RegisterList; while (NextRegister->AddressValue != ILLEGAL_SBDFO) { ASSERT (RegisterEntryIndex < (MAX_PRESERVE_REGISTER_ENTRIES * GetPlatformNumberOfSockets () * GetPlatformNumberOfModules ())); if (FamilySpecificServices->IsZeroOnCold && (!IsWarmReset (StdHeader))) { Value = 0; } else { MailboxRegister = BaseAddress; MailboxRegister.Address.Function = NextRegister->Address.Function; MailboxRegister.Address.Register = NextRegister->Address.Register; LibAmdPciRead (AccessWidth32, MailboxRegister, &Value, StdHeader); } (* (MAILBOX_REGISTER_SAVE_ENTRY) AllocateParams.BufferPtr) [RegisterEntryIndex] = Value; RegisterEntryIndex++; NextRegister++; } } } } } else if ((EntryPoint == CPU_FEAT_INIT_LATE_END) || (EntryPoint == CPU_FEAT_AFTER_RESUME_MTRR_SYNC)) { // The restore step. Just write out the saved content in the buffer. LocateParams.BufferHandle = PRESERVE_MAIL_BOX_HANDLE; HeapStatus = HeapLocateBuffer (&LocateParams, StdHeader); ASSERT ((HeapStatus == AGESA_SUCCESS) && (LocateParams.BufferPtr != NULL)); RegisterEntryIndex = 0; for (Socket = 0; Socket < GetPlatformNumberOfSockets (); Socket++) { for (Module = 0; Module < GetPlatformNumberOfModules (); Module++) { if (GetPciAddress (StdHeader, Socket, Module, &BaseAddress, &IgnoredStatus)) { GetFeatureServicesOfSocket (&PreserveMailboxFamilyServiceTable, Socket, (const VOID **)&FamilySpecificServices, StdHeader); NextRegister = FamilySpecificServices->RegisterList; while (NextRegister->AddressValue != ILLEGAL_SBDFO) { ASSERT (RegisterEntryIndex < (MAX_PRESERVE_REGISTER_ENTRIES * GetPlatformNumberOfSockets () * GetPlatformNumberOfModules ())); MailboxRegister = BaseAddress; MailboxRegister.Address.Function = NextRegister->Address.Function; MailboxRegister.Address.Register = NextRegister->Address.Register; Value = (* (MAILBOX_REGISTER_SAVE_ENTRY) LocateParams.BufferPtr) [RegisterEntryIndex]; LibAmdPciWrite (AccessWidth32, MailboxRegister, &Value, StdHeader); RegisterEntryIndex++; NextRegister++; } } } } HeapStatus = HeapDeallocateBuffer (PRESERVE_MAIL_BOX_HANDLE, StdHeader); } return AGESA_SUCCESS; }
/** * * * This is the training function which set up the environment for remote * training on the ap and launches the remote routine. * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * * @return TRUE - Launch training on AP successfully. * @return FALSE - Fail to launch training on AP. */ BOOLEAN MemFParallelTrainingHy ( IN OUT MEM_NB_BLOCK *NBPtr ) { AMD_CONFIG_PARAMS *StdHeader; DIE_STRUCT *MCTPtr; REMOTE_TRAINING_ENV *EnvPtr; AP_TASK TrainingTask; UINT8 Socket; UINT8 Module; UINT8 APCore; UINT8 p; UINT32 LowCore; UINT32 HighCore; UINT32 BspSocket; UINT32 BspModule; UINT32 BspCore; AGESA_STATUS Status; ALLOCATE_HEAP_PARAMS AllocHeapParams; UINT16 MctDataSize; StdHeader = &(NBPtr->MemPtr->StdHeader); MCTPtr = NBPtr->MCTPtr; Socket = MCTPtr->SocketId; Module = MCTPtr->DieId; // // Allocate buffer for REMOTE_TRAINING_ENV // MctDataSize = MAX_DCTS_PER_NODE_HY * ( sizeof (DCT_STRUCT) + ( MAX_CHANNELS_PER_DCT_HY * (sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK)) ) ); AllocHeapParams.RequestedBufferSize = MctDataSize + sizeof (REMOTE_TRAINING_ENV); AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_PAR_TRN_HANDLE, Socket, Module, 0); AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) == AGESA_SUCCESS) { EnvPtr = (REMOTE_TRAINING_ENV *) AllocHeapParams.BufferPtr; AllocHeapParams.BufferPtr += sizeof (REMOTE_TRAINING_ENV); // // Setup Remote training environment // LibAmdMemCopy (&(EnvPtr->StdHeader), StdHeader, sizeof (AMD_CONFIG_PARAMS), StdHeader); LibAmdMemCopy (&(EnvPtr->DieStruct), MCTPtr, sizeof (DIE_STRUCT), StdHeader); for (p = 0; p < MAX_PLATFORM_TYPES; p++) { EnvPtr->GetPlatformCfg[p] = NBPtr->MemPtr->GetPlatformCfg[p]; } EnvPtr->ErrorHandling = NBPtr->MemPtr->ErrorHandling; EnvPtr->NBBlockCtor = MemConstructRemoteNBBlockHY; EnvPtr->FeatPtr = NBPtr->FeatPtr; EnvPtr->HoleBase = NBPtr->RefPtr->HoleBase; EnvPtr->BottomIo = NBPtr->RefPtr->BottomIo; EnvPtr->UmaSize = NBPtr->RefPtr->UmaSize; EnvPtr->SysLimit = NBPtr->RefPtr->SysLimit; EnvPtr->TableBasedAlterations = NBPtr->RefPtr->TableBasedAlterations; EnvPtr->PlatformMemoryConfiguration = NBPtr->RefPtr->PlatformMemoryConfiguration; LibAmdMemCopy (AllocHeapParams.BufferPtr, MCTPtr->DctData, MctDataSize, StdHeader); // // Get Socket, Core of the BSP // IdentifyCore (StdHeader, &BspSocket, &BspModule, &BspCore, &Status); EnvPtr->BspSocket = ((UINT8)BspSocket & 0x000000FF); EnvPtr->BspCore = ((UINT8)BspCore & 0x000000FF); // // Set up the remote task structure // TrainingTask.DataTransfer.DataPtr = EnvPtr; TrainingTask.DataTransfer.DataSizeInDwords = (UINT16) (AllocHeapParams.RequestedBufferSize + 3) / 4; TrainingTask.DataTransfer.DataTransferFlags = 0; TrainingTask.ExeFlags = 0; TrainingTask.FuncAddress.PfApTaskI = (PF_AP_TASK_I)MemFParallelTraining; // // Get Target AP Core // GetGivenModuleCoreRange (Socket, Module, &LowCore, &HighCore, StdHeader); APCore = (UINT8) (LowCore & 0x000000FF); // // Launch Remote Training // ApUtilRunCodeOnSocketCore (Socket, APCore, &TrainingTask, StdHeader); HeapDeallocateBuffer (AllocHeapParams.BufferHandle, StdHeader); return TRUE; } else { PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_REMOTE_TRAINING_ENV, NBPtr->Node, 0, 0, 0, StdHeader); SetMemError (AGESA_FATAL, MCTPtr); ASSERT(FALSE); // Could not allocated heap space for "REMOTE_TRAINING_ENV" return FALSE; } }
/** * * * This is the main function to perform parallel training on all nodes. * This is the routine which will run on the remote AP. * * @param[in,out] *EnvPtr - Pointer to the Training Environment Data * @param[in,out] *StdHeader - Pointer to the Standard Header of the AP * * @return TRUE - This feature is enabled. * @return FALSE - This feature is not enabled. */ BOOLEAN MemFParallelTraining ( IN OUT REMOTE_TRAINING_ENV *EnvPtr, IN OUT AMD_CONFIG_PARAMS *StdHeader ) { MEM_PARAMETER_STRUCT ParameterList; MEM_NB_BLOCK NB; MEM_TECH_BLOCK TB; ALLOCATE_HEAP_PARAMS AllocHeapParams; MEM_DATA_STRUCT *MemPtr; DIE_STRUCT *MCTPtr; UINT8 p; UINT8 i; UINT8 Dct; UINT8 Channel; UINT8 *BufferPtr; UINT8 DctCount; UINT8 ChannelCount; UINT8 RowCount; UINT8 ColumnCount; UINT16 SizeOfNewBuffer; AP_DATA_TRANSFER ReturnData; // // Initialize Parameters // ReturnData.DataPtr = NULL; ReturnData.DataSizeInDwords = 0; ReturnData.DataTransferFlags = 0; ASSERT (EnvPtr != NULL); // // Replace Standard header of a AP // LibAmdMemCopy (StdHeader, &(EnvPtr->StdHeader), sizeof (AMD_CONFIG_PARAMS), &(EnvPtr->StdHeader)); // // Allocate buffer for training data // BufferPtr = (UINT8 *) (&EnvPtr->DieStruct); DctCount = EnvPtr->DieStruct.DctCount; BufferPtr += sizeof (DIE_STRUCT); ChannelCount = ((DCT_STRUCT *) BufferPtr)->ChannelCount; BufferPtr += DctCount * sizeof (DCT_STRUCT); RowCount = ((CH_DEF_STRUCT *) BufferPtr)->RowCount; ColumnCount = ((CH_DEF_STRUCT *) BufferPtr)->ColumnCount; SizeOfNewBuffer = sizeof (DIE_STRUCT) + DctCount * ( sizeof (DCT_STRUCT) + ( ChannelCount * ( sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK) + ( RowCount * ColumnCount * NUMBER_OF_DELAY_TABLES + (MAX_BYTELANES_PER_CHANNEL * MAX_CS_PER_CHANNEL * NUMBER_OF_FAILURE_MASK_TABLES) + (MAX_DIMMS_PER_CHANNEL * MAX_NUMBER_LANES) ) ) ) ); AllocHeapParams.RequestedBufferSize = SizeOfNewBuffer; AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_PAR_TRN_HANDLE, 0, 0, 0); AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) == AGESA_SUCCESS) { BufferPtr = AllocHeapParams.BufferPtr; LibAmdMemCopy ( BufferPtr, &(EnvPtr->DieStruct), sizeof (DIE_STRUCT) + DctCount * (sizeof (DCT_STRUCT) + ChannelCount * (sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK))), StdHeader ); // // Fix up pointers // MCTPtr = (DIE_STRUCT *) BufferPtr; BufferPtr += sizeof (DIE_STRUCT); MCTPtr->DctData = (DCT_STRUCT *) BufferPtr; BufferPtr += MCTPtr->DctCount * sizeof (DCT_STRUCT); for (Dct = 0; Dct < MCTPtr->DctCount; Dct++) { MCTPtr->DctData[Dct].ChData = (CH_DEF_STRUCT *) BufferPtr; BufferPtr += MCTPtr->DctData[Dct].ChannelCount * sizeof (CH_DEF_STRUCT); for (Channel = 0; Channel < MCTPtr->DctData[Dct].ChannelCount; Channel++) { MCTPtr->DctData[Dct].ChData[Channel].MCTPtr = MCTPtr; MCTPtr->DctData[Dct].ChData[Channel].DCTPtr = &MCTPtr->DctData[Dct]; } } NB.PSBlock = (MEM_PS_BLOCK *) BufferPtr; BufferPtr += DctCount * ChannelCount * sizeof (MEM_PS_BLOCK); ReturnData.DataPtr = AllocHeapParams.BufferPtr; ReturnData.DataSizeInDwords = (SizeOfNewBuffer + 3) / 4; ReturnData.DataTransferFlags = 0; // // Allocate Memory for the MEM_DATA_STRUCT we will use // AllocHeapParams.RequestedBufferSize = sizeof (MEM_DATA_STRUCT); AllocHeapParams.BufferHandle = AMD_MEM_DATA_HANDLE; AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) == AGESA_SUCCESS) { MemPtr = (MEM_DATA_STRUCT *)AllocHeapParams.BufferPtr; LibAmdMemCopy (&(MemPtr->StdHeader), &(EnvPtr->StdHeader), sizeof (AMD_CONFIG_PARAMS), StdHeader); // // Copy Parameters from environment // ParameterList.HoleBase = EnvPtr->HoleBase; ParameterList.BottomIo = EnvPtr->BottomIo; ParameterList.UmaSize = EnvPtr->UmaSize; ParameterList.SysLimit = EnvPtr->SysLimit; ParameterList.TableBasedAlterations = EnvPtr->TableBasedAlterations; ParameterList.PlatformMemoryConfiguration = EnvPtr->PlatformMemoryConfiguration; MemPtr->ParameterListPtr = &ParameterList; for (p = 0; p < MAX_PLATFORM_TYPES; p++) { MemPtr->GetPlatformCfg[p] = EnvPtr->GetPlatformCfg[p]; } MemPtr->ErrorHandling = EnvPtr->ErrorHandling; // // Create Local NBBlock and Tech Block // EnvPtr->NBBlockCtor (&NB, MCTPtr, EnvPtr->FeatPtr); NB.RefPtr = &ParameterList; NB.MemPtr = MemPtr; i = 0; while (memTechInstalled[i] != NULL) { if (memTechInstalled[i] (&TB, &NB)) { break; } i++; } NB.TechPtr = &TB; NB.TechBlockSwitch (&NB); // // Setup CPU Mem Type MSRs on the AP // NB.CpuMemTyping (&NB); IDS_HDT_CONSOLE (MEM_STATUS, "Node %d\n", NB.Node); // // Call Technology Specific Training routine // NB.TrainingFlow (&NB); // // Copy training data to ReturnData buffer // LibAmdMemCopy ( BufferPtr, MCTPtr->DctData[0].ChData[0].RcvEnDlys, ((DctCount * ChannelCount) * ( (RowCount * ColumnCount * NUMBER_OF_DELAY_TABLES) + (MAX_BYTELANES_PER_CHANNEL * MAX_CS_PER_CHANNEL * NUMBER_OF_FAILURE_MASK_TABLES) + (MAX_DIMMS_PER_CHANNEL * MAX_NUMBER_LANES) ) ), StdHeader); HeapDeallocateBuffer (AMD_MEM_DATA_HANDLE, StdHeader); // // Restore pointers // for (Dct = 0; Dct < MCTPtr->DctCount; Dct++) { for (Channel = 0; Channel < MCTPtr->DctData[Dct].ChannelCount; Channel++) { MCTPtr->DctData[Dct].ChData[Channel].MCTPtr = &EnvPtr->DieStruct; MCTPtr->DctData[Dct].ChData[Channel].DCTPtr = &EnvPtr->DieStruct.DctData[Dct]; MCTPtr->DctData[Dct].ChData[Channel].RcvEnDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RcvEnDlys; MCTPtr->DctData[Dct].ChData[Channel].WrDqsDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].WrDqsDlys; MCTPtr->DctData[Dct].ChData[Channel].RdDqsDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqsDlys; MCTPtr->DctData[Dct].ChData[Channel].RdDqsDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqsDlys; MCTPtr->DctData[Dct].ChData[Channel].WrDatDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].WrDatDlys; MCTPtr->DctData[Dct].ChData[Channel].RdDqs2dDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqs2dDlys; MCTPtr->DctData[Dct].ChData[Channel].RdDqsMinDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqsMinDlys; MCTPtr->DctData[Dct].ChData[Channel].RdDqsMaxDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqsMaxDlys; MCTPtr->DctData[Dct].ChData[Channel].WrDatMinDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].WrDatMinDlys; MCTPtr->DctData[Dct].ChData[Channel].WrDatMaxDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].WrDatMaxDlys; MCTPtr->DctData[Dct].ChData[Channel].FailingBitMask = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].FailingBitMask; } MCTPtr->DctData[Dct].ChData = EnvPtr->DieStruct.DctData[Dct].ChData; } MCTPtr->DctData = EnvPtr->DieStruct.DctData; } // // Signal to BSP that training is complete and Send Results // ASSERT (ReturnData.DataPtr != NULL); ApUtilTransmitBuffer (EnvPtr->BspSocket, EnvPtr->BspCore, &ReturnData, StdHeader); // // Clean up and exit. // HeapDeallocateBuffer (GENERATE_MEM_HANDLE (ALLOC_PAR_TRN_HANDLE, 0, 0, 0), StdHeader); } else { MCTPtr = &EnvPtr->DieStruct; PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_TRAINING_DATA, MCTPtr->NodeId, 0, 0, 0, StdHeader); SetMemError (AGESA_FATAL, MCTPtr); ASSERT(FALSE); // Could not allocate heap for buffer for parallel training data } return TRUE; }
/** * * * This function is the main memory configuration function for DR DDR3 * * Requirements: * * Run-Time Requirements: * 1. Complete Hypertransport Bus Configuration * 2. AmdMemInitDataStructDef must be run to set default values * 3. MSR bit to allow access to high PCI regs set on all nodes * 4. BSP in Big Real Mode * 5. Stack available * 6. MCG_CTL=-1, MC4_EN=0 for all CPUs * 7. MCi_STS from shutdown/warm reset recorded (if desired) prior to entry * 8. All var MTRRs reset to zero * 9. State of NB_CFG.DisDatMsk set properly on all CPUs * * @param[in,out] *MemPtr - Pointer to the MEM_DATA_STRUCT * * @return AGESA_STATUS * - AGESA_ALERT * - AGESA_FATAL * - AGESA_SUCCESS * - AGESA_WARNING */ AGESA_STATUS AmdMemAuto ( IN OUT MEM_DATA_STRUCT *MemPtr ) { MEM_SHARED_DATA mmSharedData; MEM_MAIN_DATA_BLOCK mmData; MEM_NB_BLOCK *NBPtr; MEM_TECH_BLOCK *TechPtr; ALLOCATE_HEAP_PARAMS AllocHeapParams; AGESA_STATUS Retval; UINT8 i; UINT8 Die; UINT8 DieCount; UINT8 Tab; CPU_SPECIFIC_SERVICES *FamilySpecificServices; ASSERT (MemPtr != NULL); AGESA_TESTPOINT (TpProcMemAmdMemAuto, &MemPtr->StdHeader); IDS_HDT_CONSOLE (MEM_FLOW, "MEM PARAMS:\n"); IDS_HDT_CONSOLE (MEM_FLOW, "\tBottomIo : %04x\n", MemPtr->ParameterListPtr->BottomIo); IDS_HDT_CONSOLE (MEM_FLOW, "\tMemHoleRemap : %d\n", MemPtr->ParameterListPtr->MemHoleRemapping); IDS_HDT_CONSOLE (MEM_FLOW, "\tLimitBelow1TB : %d\n", MemPtr->ParameterListPtr->LimitMemoryToBelow1Tb); IDS_HDT_CONSOLE (MEM_FLOW, "\tUserTimingMode : %d\n", MemPtr->ParameterListPtr->UserTimingMode); IDS_HDT_CONSOLE (MEM_FLOW, "\tMemClockValue : %d\n", MemPtr->ParameterListPtr->MemClockValue); IDS_HDT_CONSOLE (MEM_FLOW, "\tBankIntlv : %d\n", MemPtr->ParameterListPtr->EnableBankIntlv); IDS_HDT_CONSOLE (MEM_FLOW, "\tNodeIntlv : %d\n", MemPtr->ParameterListPtr->EnableNodeIntlv); IDS_HDT_CONSOLE (MEM_FLOW, "\tChannelIntlv : %d\n", MemPtr->ParameterListPtr->EnableChannelIntlv); IDS_HDT_CONSOLE (MEM_FLOW, "\tEccFeature : %d\n", MemPtr->ParameterListPtr->EnableEccFeature); IDS_HDT_CONSOLE (MEM_FLOW, "\tPowerDown : %d\n", MemPtr->ParameterListPtr->EnablePowerDown); IDS_HDT_CONSOLE (MEM_FLOW, "\tOnLineSpare : %d\n", MemPtr->ParameterListPtr->EnableOnLineSpareCtl); IDS_HDT_CONSOLE (MEM_FLOW, "\tParity : %d\n", MemPtr->ParameterListPtr->EnableParity); IDS_HDT_CONSOLE (MEM_FLOW, "\tBankSwizzle : %d\n", MemPtr->ParameterListPtr->EnableBankSwizzle); IDS_HDT_CONSOLE (MEM_FLOW, "\tMemClr : %d\n", MemPtr->ParameterListPtr->EnableMemClr); IDS_HDT_CONSOLE (MEM_FLOW, "\tUmaMode : %d\n", MemPtr->ParameterListPtr->UmaMode); IDS_HDT_CONSOLE (MEM_FLOW, "\tUmaSize : %d\n", MemPtr->ParameterListPtr->UmaSize); IDS_HDT_CONSOLE (MEM_FLOW, "\tMemRestoreCtl : %d\n", MemPtr->ParameterListPtr->MemRestoreCtl); IDS_HDT_CONSOLE (MEM_FLOW, "\tSaveMemContextCtl : %d\n", MemPtr->ParameterListPtr->SaveMemContextCtl); IDS_HDT_CONSOLE (MEM_FLOW, "\tExternalVrefCtl : %d\n", MemPtr->ParameterListPtr->ExternalVrefCtl ); IDS_HDT_CONSOLE (MEM_FLOW, "\tForceTrainMode : %d\n\n", MemPtr->ParameterListPtr->ForceTrainMode ); //---------------------------------------------------------------------------- // Get TSC rate, which will be used later in Wait10ns routine //---------------------------------------------------------------------------- GetCpuServicesOfCurrentCore ((CONST CPU_SPECIFIC_SERVICES **)&FamilySpecificServices, &MemPtr->StdHeader); FamilySpecificServices->GetTscRate (FamilySpecificServices, &MemPtr->TscRate, &MemPtr->StdHeader); //---------------------------------------------------------------------------- // Read In SPD Data //---------------------------------------------------------------------------- AGESA_TESTPOINT (TpProcMemBeforeSpdProcessing, &MemPtr->StdHeader); MemSPDDataProcess (MemPtr); //---------------------------------------------------------------- // Initialize Main Data Block //---------------------------------------------------------------- mmData.MemPtr = MemPtr; mmData.mmSharedPtr = &mmSharedData; LibAmdMemFill (&mmSharedData, 0, sizeof (mmSharedData), &MemPtr->StdHeader); mmSharedData.DimmExcludeFlag = NORMAL; mmSharedData.NodeIntlv.IsValid = FALSE; //---------------------------------------------------------------- // Discover populated CPUs // //---------------------------------------------------------------- Retval = MemSocketScan (&mmData); if (Retval == AGESA_FATAL) { return Retval; } DieCount = mmData.DieCount; //---------------------------------------------------------------- // // Allocate Memory for NB and Tech Blocks // // NBPtr[Die]----+ // | // V // +---+---+---+---+---+---+---+---+ // | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | NB Blocks // +---+---+---+---+---+---+---+---+ // | | | | | | | | // | | | | | | | | // v v v v v v v v // +---+---+---+---+---+---+---+---+ // | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | Tech Blocks // +---+---+---+---+---+---+---+---+ // // //---------------------------------------------------------------- AllocHeapParams.RequestedBufferSize = (DieCount * (sizeof (MEM_NB_BLOCK) + sizeof (MEM_TECH_BLOCK))); AllocHeapParams.BufferHandle = AMD_MEM_AUTO_HANDLE; AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (AGESA_SUCCESS != HeapAllocateBuffer (&AllocHeapParams, &MemPtr->StdHeader)) { ASSERT(FALSE); // NB and Tech Block Heap allocate error return AGESA_FATAL; } NBPtr = (MEM_NB_BLOCK *)AllocHeapParams.BufferPtr; TechPtr = (MEM_TECH_BLOCK *) (&NBPtr[DieCount]); mmData.NBPtr = NBPtr; mmData.TechPtr = TechPtr; //---------------------------------------------------------------- // Create NB Blocks // //---------------------------------------------------------------- for (Die = 0 ; Die < DieCount ; Die++ ) { i = 0; while (memNBInstalled[i].MemConstructNBBlock != 0) { if (memNBInstalled[i].MemConstructNBBlock (&NBPtr[Die], MemPtr, memNBInstalled[i].MemFeatBlock, &mmSharedData, Die) == TRUE) { break; } i++; } // Couldn't find a NB which supported this family if (memNBInstalled[i].MemConstructNBBlock == 0) { return AGESA_FATAL; } } //---------------------------------------------------------------- // Create Technology Blocks // //---------------------------------------------------------------- for (Die = 0 ; Die < DieCount ; Die++ ) { i = 0; while (memTechInstalled[i] != NULL) { if (memTechInstalled[i] (&TechPtr[Die], &NBPtr[Die])) { NBPtr[Die].TechPtr = &TechPtr[Die]; break; } i++; } // Couldn't find a Tech block which supported this family if (memTechInstalled[i] == NULL) { return AGESA_FATAL; } } //---------------------------------------------------------------- // // MEMORY INITIALIZATION TASKS // //---------------------------------------------------------------- i = 0; while (memFlowControlInstalled[i] != NULL) { Retval = memFlowControlInstalled[i] (&mmData); if (MemPtr->IsFlowControlSupported == TRUE) { break; } i++; } //---------------------------------------------------------------- // Deallocate NB register tables //---------------------------------------------------------------- for (Tab = 0; Tab < NumberOfNbRegTables; Tab++) { HeapDeallocateBuffer (GENERATE_MEM_HANDLE (ALLOC_NB_REG_TABLE, Tab, 0, 0), &MemPtr->StdHeader); } //---------------------------------------------------------------- // Check for errors and return //---------------------------------------------------------------- AGESA_TESTPOINT (TpProcMemEnd, &MemPtr->StdHeader); for (Die = 0; Die < DieCount; Die++) { if (NBPtr[Die].MCTPtr->ErrCode > Retval) { Retval = NBPtr[Die].MCTPtr->ErrCode; } } return Retval; }
/** * The top level external interface for Hypertransport Initialization. * * Create our initial internal state, initialize the coherent fabric, * initialize the non-coherent chains, and perform any required fabric tuning or * optimization. * * @param[in] StdHeader Opaque handle to standard config header * @param[in] PlatformConfiguration The platform configuration options. * @param[in] AmdHtInterface HT Interface structure. * * @retval AGESA_SUCCESS Only information events logged. * @retval AGESA_ALERT Sync Flood or CRC error logged. * @retval AGESA_WARNING Example: expected capability not found * @retval AGESA_ERROR logged events indicating some devices may not be available * @retval AGESA_FATAL Mixed Family or MP capability mismatch * */ AGESA_STATUS AmdHtInitialize ( IN AMD_CONFIG_PARAMS *StdHeader, IN PLATFORM_CONFIGURATION *PlatformConfiguration, IN AMD_HT_INTERFACE *AmdHtInterface ) { STATE_DATA State; NORTHBRIDGE Nb; HT_FEATURES HtFeatures; HT_INTERFACE HtInterface; AGESA_STATUS DeallocateStatus; AP_MAIL_INFO ApMailboxInfo; UINT8 ApNode; ALLOCATE_HEAP_PARAMS AllocHeapParams; State.HtBlock = AmdHtInterface; State.ConfigHandle = StdHeader; State.PlatformConfiguration = PlatformConfiguration; // Get the current HT internal interface (to HtBlock data) NewHtInterface (&HtInterface, State.ConfigHandle); State.HtInterface = &HtInterface; // Get the current HT Feature Set NewHtFeatures (&HtFeatures, State.ConfigHandle); State.HtFeatures = &HtFeatures; // Initialize from static options State.IsUsingRecoveryHt = OptionHtConfiguration.IsUsingRecoveryHt; State.IsSetHtCrcFlood = OptionHtConfiguration.IsSetHtCrcFlood; State.IsUsingUnitIdClumping = OptionHtConfiguration.IsUsingUnitIdClumping; // Initialize for status and event output State.MaxEventClass = AGESA_SUCCESS; // Allocate permanent heap structs that are interfaces to other AGESA services. State.HtInterface->NewNodeAndSocketTables (&State); if (IsBootCore (&State)) { AGESA_TESTPOINT (TpProcHtEntry, State.ConfigHandle); // Allocate Bsp only interface heap structs. State.HtInterface->NewHopCountTable (&State); // Allocate heap for our temporary working space. AllocHeapParams.RequestedBufferSize = (sizeof (PORT_DESCRIPTOR) * (MAX_PLATFORM_LINKS * 2)); AllocHeapParams.BufferHandle = HT_STATE_DATA_HANDLE; AllocHeapParams.Persist = HEAP_LOCAL_CACHE; if (HeapAllocateBuffer (&AllocHeapParams, State.ConfigHandle) == AGESA_SUCCESS) { State.PortList = (PORT_LIST)AllocHeapParams.BufferPtr; // Create the BSP's northbridge. NewNorthBridge (0, &State, &Nb); State.Nb = &Nb; CoherentInit (&State); NcInit (&State); LinkOptimization (&State); Tuning (&State); DeallocateStatus = HeapDeallocateBuffer (HT_STATE_DATA_HANDLE, State.ConfigHandle); ASSERT (DeallocateStatus == AGESA_SUCCESS); AGESA_TESTPOINT (TpProcHtDone, State.ConfigHandle); } else { ASSERT (FALSE); State.MaxEventClass = AGESA_ERROR; // Cannot Log entry due to heap allocate failed. } } else { // Do the AP HT Init, which produces Node and Socket Maps for the AP's use. AGESA_TESTPOINT (TpProcHtApMapEntry, State.ConfigHandle); GetApMailbox (&ApMailboxInfo.Info, State.ConfigHandle); ASSERT (ApMailboxInfo.Fields.Node < MAX_NODES); ApNode = (UINT8)ApMailboxInfo.Fields.Node; NewNorthBridge (ApNode, &State, &Nb); State.Nb = &Nb; InitApMaps (&State); AGESA_TESTPOINT (TpProcHtApMapDone, State.ConfigHandle); } return State.MaxEventClass; }