/** * Main entry point for the AMD_INIT_POST function. * * This entry point is responsible for initializing all system memory, * gathering important data out of the pre-memory cache storage into a * temporary holding buffer in main memory. After that APs will be * shutdown in preparation for the host environment to take control. * Note: pre-memory stack will be disabled also. * * @param[in,out] PostParams Required input parameters for the AMD_INIT_POST * entry point. * * @return Aggregated status across all internal AMD POST calls invoked. * */ AGESA_STATUS AmdInitPost ( IN OUT AMD_POST_PARAMS *PostParams ) { AGESA_STATUS AgesaStatus; AGESA_STATUS AmdInitPostStatus; WARM_RESET_REQUEST Request; UINT8 PrevRequestBit; UINT8 PrevStateBits; IDS_PERF_TIMESTAMP (TP_BEGINPROCAMDINITPOST, &PostParams->StdHeader); AGESA_TESTPOINT (TpIfAmdInitPostEntry, &PostParams->StdHeader); IDS_HDT_CONSOLE (MAIN_FLOW, "AmdInitPost: Start\n\n"); ASSERT (PostParams != NULL); AmdInitPostStatus = AGESA_SUCCESS; PrevRequestBit = FALSE; PrevStateBits = WR_STATE_COLD; IDS_OPTION_HOOK (IDS_INIT_POST_BEFORE, PostParams, &PostParams->StdHeader); // If a previously requested warm reset cannot be triggered in the // current stage, store the previous state of request and reset the // request struct to the current post stage GetWarmResetFlag (&PostParams->StdHeader, &Request); if (Request.RequestBit == TRUE) { if (Request.StateBits >= Request.PostStage) { PrevRequestBit = Request.RequestBit; PrevStateBits = Request.StateBits; Request.RequestBit = FALSE; Request.StateBits = Request.PostStage - 1; SetWarmResetFlag (&PostParams->StdHeader, &Request); } } IDS_PERF_TIMESTAMP (TP_BEGINGNBINITATPOST, &PostParams->StdHeader); AgesaStatus = GnbInitAtPost (PostParams); if (AgesaStatus > AmdInitPostStatus) { AmdInitPostStatus = AgesaStatus; } IDS_PERF_TIMESTAMP (TP_ENDGNBINITATPOST, &PostParams->StdHeader); IDS_PERF_TIMESTAMP (TP_BEGINAMDMEMAUTO, &PostParams->StdHeader); IDS_HDT_CONSOLE (MAIN_FLOW, "AmdMemAuto: Start\n"); PostParams->MemConfig.MemData->StdHeader = PostParams->StdHeader; AgesaStatus = AmdMemAuto (PostParams->MemConfig.MemData); IDS_HDT_CONSOLE (MAIN_FLOW, "AmdMemAuto: End\n"); if (AgesaStatus > AmdInitPostStatus) { AmdInitPostStatus = AgesaStatus; } IDS_PERF_TIMESTAMP (TP_ENDAMDMEMAUTO, &PostParams->StdHeader); if (AgesaStatus != AGESA_FATAL) { // Check BIST status AgesaStatus = CheckBistStatus (&PostParams->StdHeader); if (AgesaStatus > AmdInitPostStatus) { AmdInitPostStatus = AgesaStatus; } // // P-State data gathered, then, Relinquish APs // IDS_PERF_TIMESTAMP (TP_BEGINAMDCPUPOST, &PostParams->StdHeader); IDS_HDT_CONSOLE (MAIN_FLOW, "AmdCpuPost: Start\n"); AgesaStatus = AmdCpuPost (&PostParams->StdHeader, &PostParams->PlatformConfig); IDS_HDT_CONSOLE (MAIN_FLOW, "AmdCpuPost: End\n"); if (AgesaStatus > AmdInitPostStatus) { AmdInitPostStatus = AgesaStatus; } IDS_PERF_TIMESTAMP (TP_ENDAMDCPUPOST, &PostParams->StdHeader); // Warm Reset GetWarmResetFlag (&PostParams->StdHeader, &Request); // If a warm reset is requested in the current post stage, trigger the // warm reset and ignore the previous request if (Request.RequestBit == TRUE) { if (Request.StateBits < Request.PostStage) { AgesaDoReset (WARM_RESET_WHENEVER, &PostParams->StdHeader); } } else { // Otherwise, if there's a previous request, restore it // so that the subsequent post stage can trigger the warm reset if (PrevRequestBit == TRUE) { Request.RequestBit = PrevRequestBit; Request.StateBits = PrevStateBits; SetWarmResetFlag (&PostParams->StdHeader, &Request); } } IDS_PERF_TIMESTAMP (TP_BEGINGNBINITATPOSTAFTERDRAM, &PostParams->StdHeader); AgesaStatus = GnbInitAtPostAfterDram (PostParams); if (AgesaStatus > AmdInitPostStatus) { AmdInitPostStatus = AgesaStatus; } IDS_PERF_TIMESTAMP (TP_ENDGNBINITATPOSTAFTERDRAM, &PostParams->StdHeader); IDS_OPTION_HOOK (IDS_INIT_POST_AFTER, PostParams, &PostParams->StdHeader); AGESA_TESTPOINT (TpIfAmdInitPostExit, &PostParams->StdHeader); IDS_HDT_CONSOLE (MAIN_FLOW, "\nAmdInitPost: End\n\n"); IDS_HDT_CONSOLE (MAIN_FLOW, "Heap transfer Start ...\n\n"); //For Heap will be relocate to new address in next stage, flush out debug print buffer if needed IDS_HDT_CONSOLE_FLUSH_BUFFER (&PostParams->StdHeader); // WARNING: IDT will be moved from local cache to temp memory, so restore IDTR for BSP here IDS_EXCEPTION_TRAP (IDS_IDT_RESTORE_IDTR_FOR_BSC, NULL, &PostParams->StdHeader); IDS_PERF_TIMESTAMP (TP_ENDPROCAMDINITPOST, &PostParams->StdHeader); // Copies BSP heap content to RAM, and it should be at the end of AmdInitPost AgesaStatus = CopyHeapToTempRamAtPost (&(PostParams->StdHeader)); if (AgesaStatus > AmdInitPostStatus) { AmdInitPostStatus = AgesaStatus; } PostParams->StdHeader.HeapStatus = HEAP_TEMP_MEM; } // Check for Cache As Ram Corruption IDS_CAR_CORRUPTION_CHECK (&PostParams->StdHeader); // At the end of AmdInitPost, set StateBits to POST to allow any warm reset that occurs outside // of AGESA to be recognized by IsWarmReset() GetWarmResetFlag (&PostParams->StdHeader, &Request); Request.StateBits = Request.PostStage; SetWarmResetFlag (&PostParams->StdHeader, &Request); return AmdInitPostStatus; }
/** * Main entry point for the AMD_INIT_RESET function. * * This entry point is responsible for performing basic processor initialization. * * @param[in,out] ResetParams Required input parameters for the AMD_INIT_RESET * entry point. * * @return Aggregated status across all internal AMD reset calls invoked. * */ AGESA_STATUS AmdInitReset ( IN OUT AMD_RESET_PARAMS *ResetParams ) { AGESA_STATUS AgesaStatus; AGESA_STATUS CalledAgesaStatus; WARM_RESET_REQUEST Request; UINT8 PrevRequestBit; UINT8 PrevStateBits; IDS_PERF_TIMESTAMP (TP_BEGINPROCAMDINITRESET, &ResetParams->StdHeader); AgesaStatus = AGESA_SUCCESS; // Setup ROM execution cache CalledAgesaStatus = AllocateExecutionCache (&ResetParams->StdHeader, &ResetParams->CacheRegion[0]); if (CalledAgesaStatus > AgesaStatus) { AgesaStatus = CalledAgesaStatus; } IDS_EXTENDED_HOOK (IDS_INIT_RESET_BEFORE, NULL, NULL, &ResetParams->StdHeader); // Init Debug Print function IDS_HDT_CONSOLE_INIT (&ResetParams->StdHeader); IDS_HDT_CONSOLE (MAIN_FLOW, "\nAmdInitReset: Start\n\n"); IDS_HDT_CONSOLE (MAIN_FLOW, "\n*** %s ***\n\n", &UserOptions.VersionString); AGESA_TESTPOINT (TpIfAmdInitResetEntry, &ResetParams->StdHeader); ASSERT (ResetParams != NULL); PrevRequestBit = FALSE; PrevStateBits = WR_STATE_COLD; IDS_PERF_TIMESTAMP (TP_BEGININITRESET, &ResetParams->StdHeader); if (IsBsp (&ResetParams->StdHeader, &AgesaStatus)) { CalledAgesaStatus = BldoptFchFunction.InitReset (ResetParams); AgesaStatus = (CalledAgesaStatus > AgesaStatus) ? CalledAgesaStatus : AgesaStatus; } IDS_PERF_TIMESTAMP (TP_ENDINITRESET, &ResetParams->StdHeader); // If a previously requested warm reset cannot be triggered in the // current stage, store the previous state of request and reset the // request struct to the current post stage GetWarmResetFlag (&ResetParams->StdHeader, &Request); if (Request.RequestBit == TRUE) { if (Request.StateBits >= Request.PostStage) { PrevRequestBit = Request.RequestBit; PrevStateBits = Request.StateBits; Request.RequestBit = FALSE; Request.StateBits = Request.PostStage - 1; SetWarmResetFlag (&ResetParams->StdHeader, &Request); } } // Initialize the PCI MMIO access mechanism InitializePciMmio (&ResetParams->StdHeader); // Warm Reset, should be at the end of AmdInitReset GetWarmResetFlag (&ResetParams->StdHeader, &Request); // If a warm reset is requested in the current post stage, trigger the // warm reset and ignore the previous request if (Request.RequestBit == TRUE) { if (Request.StateBits < Request.PostStage) { AgesaDoReset (WARM_RESET_WHENEVER, &ResetParams->StdHeader); } } else { // Otherwise, if there's a previous request, restore it // so that the subsequent post stage can trigger the warm reset if (PrevRequestBit == TRUE) { Request.RequestBit = PrevRequestBit; Request.StateBits = PrevStateBits; SetWarmResetFlag (&ResetParams->StdHeader, &Request); } } // Check for Cache As Ram Corruption IDS_CAR_CORRUPTION_CHECK (&ResetParams->StdHeader); IDS_HDT_CONSOLE (MAIN_FLOW, "\nAmdInitReset: End\n\n"); AGESA_TESTPOINT (TpIfAmdInitResetExit, &ResetParams->StdHeader); IDS_PERF_TIMESTAMP (TP_ENDPROCAMDINITRESET, &ResetParams->StdHeader); return AgesaStatus; }
/** * Performs core leveling for the system. * * This function implements the AMD_CPU_EARLY_PARAMS.CoreLevelingMode parameter. * The possible modes are: * -0 CORE_LEVEL_LOWEST Level to lowest common denominator * -1 CORE_LEVEL_TWO Level to 2 cores * -2 CORE_LEVEL_POWER_OF_TWO Level to 1,2,4 or 8 * -3 CORE_LEVEL_NONE Do no leveling * -4 CORE_LEVEL_COMPUTE_UNIT Level cores to one core per compute unit * * @param[in] EntryPoint Timepoint designator. * @param[in] PlatformConfig Contains the leveling mode parameter * @param[in] StdHeader Config handle for library and services * * @return The most severe status of any family specific service. * */ AGESA_STATUS CoreLevelingAtEarly ( IN UINT64 EntryPoint, IN PLATFORM_CONFIGURATION *PlatformConfig, IN AMD_CONFIG_PARAMS *StdHeader ) { UINT32 CoreNumPerComputeUnit; UINT32 EnabledComputeUnit; UINT32 SocketAndModule; UINT32 LowCore; UINT32 HighCore; UINT32 LeveledCores; UINT32 RequestedCores; UINT32 TotalEnabledCoresOnNode; BOOLEAN RegUpdated; CORE_LEVELING_TYPE CoreLevelMode; CPU_CORE_LEVELING_FAMILY_SERVICES *FamilySpecificServices; WARM_RESET_REQUEST Request; IDS_HDT_CONSOLE (CPU_TRACE, "CoreLevelingAtEarly\n CoreLevelMode: %d\n", PlatformConfig->CoreLevelingMode); LeveledCores = 0; SocketAndModule = 0; ASSERT (PlatformConfig->CoreLevelingMode < CoreLevelModeMax); // Get OEM IO core level mode CoreLevelMode = (CORE_LEVELING_TYPE) PlatformConfig->CoreLevelingMode; // Collect cpu core info GetGivenModuleCoreRange (0, 0, &LowCore, &HighCore, StdHeader); // Get the highest and lowest core count in all nodes TotalEnabledCoresOnNode = HighCore - LowCore + 1; switch (GetComputeUnitMapping (StdHeader)) { case AllCoresMapping: // All cores are in their own compute unit. CoreNumPerComputeUnit = 1; EnabledComputeUnit = TotalEnabledCoresOnNode; break; case EvenCoresMapping: // Cores are paired in compute units. CoreNumPerComputeUnit = 2; EnabledComputeUnit = (TotalEnabledCoresOnNode / 2); break; case TripleCoresMapping: // Three cores are grouped in compute units. CoreNumPerComputeUnit = 3; EnabledComputeUnit = (TotalEnabledCoresOnNode / 3); break; case QuadCoresMapping: // Four cores are grouped in compute units. CoreNumPerComputeUnit = 4; EnabledComputeUnit = (TotalEnabledCoresOnNode / 4); break; default: CoreNumPerComputeUnit = 1; EnabledComputeUnit = TotalEnabledCoresOnNode; ASSERT (FALSE); } IDS_HDT_CONSOLE (CPU_TRACE, " TotalEnabledCoresOnNode %d EnabledComputeUnit %d\n", \ TotalEnabledCoresOnNode, EnabledComputeUnit); // Get LeveledCores switch (CoreLevelMode) { case CORE_LEVEL_LOWEST: return (AGESA_SUCCESS); break; case CORE_LEVEL_TWO: LeveledCores = 2; LeveledCores = (LeveledCores <= TotalEnabledCoresOnNode) ? LeveledCores : TotalEnabledCoresOnNode; if (LeveledCores != 2) { PutEventLog ( AGESA_WARNING, CPU_WARNING_ADJUSTED_LEVELING_MODE, 2, LeveledCores, 0, 0, StdHeader ); } break; case CORE_LEVEL_POWER_OF_TWO: // Level to power of 2 (1, 2, 4, 8...) LeveledCores = 1; while (TotalEnabledCoresOnNode >= (LeveledCores * 2)) { LeveledCores = LeveledCores * 2; } break; case CORE_LEVEL_COMPUTE_UNIT: case CORE_LEVEL_COMPUTE_UNIT_TWO: case CORE_LEVEL_COMPUTE_UNIT_THREE: // Level cores to 1~3 core(s) per compute unit, with additional reduction to level // all processors to match the processor with the minimum number of cores. if (CoreNumPerComputeUnit == 1) { // If there is one core per compute unit, this is the same as CORE_LEVEL_LOWEST. return (AGESA_SUCCESS); } else { // If there are more than one core per compute unit, level to the number of compute units * cores per compute unit. LeveledCores = EnabledComputeUnit * (CoreLevelMode - CORE_LEVEL_COMPUTE_UNIT + 1); } break; case CORE_LEVEL_ONE: LeveledCores = 1; break; case CORE_LEVEL_THREE: case CORE_LEVEL_FOUR: case CORE_LEVEL_FIVE: case CORE_LEVEL_SIX: case CORE_LEVEL_SEVEN: case CORE_LEVEL_EIGHT: case CORE_LEVEL_NINE: case CORE_LEVEL_TEN: case CORE_LEVEL_ELEVEN: case CORE_LEVEL_TWELVE: case CORE_LEVEL_THIRTEEN: case CORE_LEVEL_FOURTEEN: case CORE_LEVEL_FIFTEEN: // Processors with compute units disable all cores in an entire compute unit at a time // For example, on a processor with two cores per compute unit, the effective // explicit levels are CORE_LEVEL_ONE, CORE_LEVEL_TWO, CORE_LEVEL_FOUR, CORE_LEVEL_SIX, and // CORE_LEVEL_EIGHT. RequestedCores = CoreLevelMode - CORE_LEVEL_THREE + 3; LeveledCores = RequestedCores; LeveledCores = (LeveledCores / CoreNumPerComputeUnit) * CoreNumPerComputeUnit; LeveledCores = (LeveledCores <= TotalEnabledCoresOnNode) ? LeveledCores : TotalEnabledCoresOnNode; if (LeveledCores != 1) { LeveledCores = (LeveledCores / CoreNumPerComputeUnit) * CoreNumPerComputeUnit; } if (LeveledCores != RequestedCores) { PutEventLog ( AGESA_WARNING, CPU_WARNING_ADJUSTED_LEVELING_MODE, RequestedCores, LeveledCores, 0, 0, StdHeader ); } break; default: ASSERT (FALSE); } // Set down core register GetFeatureServicesOfSocket (&CoreLevelingFamilyServiceTable, 0, (CONST VOID **)&FamilySpecificServices, StdHeader); if (FamilySpecificServices != NULL) { IDS_HDT_CONSOLE (CPU_TRACE, " SetDownCoreRegister: LeveledCores %d CoreLevelMode %d\n", LeveledCores, CoreLevelMode); RegUpdated = FamilySpecificServices->SetDownCoreRegister (FamilySpecificServices, &SocketAndModule, &SocketAndModule, &LeveledCores, CoreLevelMode, StdHeader); // If the down core register is updated, trigger a warm reset. if (RegUpdated) { GetWarmResetFlag (StdHeader, &Request); Request.RequestBit = TRUE; Request.StateBits = Request.PostStage - 1; IDS_HDT_CONSOLE (CPU_TRACE, " Request a warm reset.\n"); SetWarmResetFlag (StdHeader, &Request); } } return (AGESA_SUCCESS); }
/** * Performs core leveling for the system. * * This function implements the AMD_CPU_EARLY_PARAMS.CoreLevelingMode parameter. * The possible modes are: * -0 CORE_LEVEL_LOWEST Level to lowest common denominator * -1 CORE_LEVEL_TWO Level to 2 cores * -2 CORE_LEVEL_POWER_OF_TWO Level to 1,2,4 or 8 * -3 CORE_LEVEL_NONE Do no leveling * -4 CORE_LEVEL_COMPUTE_UNIT Level cores to one core per compute unit * * @param[in] EntryPoint Timepoint designator. * @param[in] PlatformConfig Contains the leveling mode parameter * @param[in] StdHeader Config handle for library and services * * @return The most severe status of any family specific service. * */ AGESA_STATUS CoreLevelingAtEarly ( IN UINT64 EntryPoint, IN PLATFORM_CONFIGURATION *PlatformConfig, IN AMD_CONFIG_PARAMS *StdHeader ) { UINT32 CoreNumPerComputeUnit; UINT32 MinNumOfComputeUnit; UINT32 EnabledComputeUnit; UINT32 Socket; UINT32 Module; UINT32 NumberOfSockets; UINT32 NumberOfModules; UINT32 MinCoreCountOnNode; UINT32 MaxCoreCountOnNode; UINT32 LowCore; UINT32 HighCore; UINT32 LeveledCores; UINT32 RequestedCores; UINT32 TotalEnabledCoresOnNode; BOOLEAN RegUpdated; AP_MAIL_INFO ApMailboxInfo; CORE_LEVELING_TYPE CoreLevelMode; CPU_CORE_LEVELING_FAMILY_SERVICES *FamilySpecificServices; WARM_RESET_REQUEST Request; MaxCoreCountOnNode = 0; MinCoreCountOnNode = 0xFFFFFFFF; LeveledCores = 0; CoreNumPerComputeUnit = 1; MinNumOfComputeUnit = 0xFF; ASSERT (PlatformConfig->CoreLevelingMode < CoreLevelModeMax); // Get OEM IO core level mode CoreLevelMode = (CORE_LEVELING_TYPE) PlatformConfig->CoreLevelingMode; // Get socket count NumberOfSockets = GetPlatformNumberOfSockets (); GetApMailbox (&ApMailboxInfo.Info, StdHeader); NumberOfModules = ApMailboxInfo.Fields.ModuleType + 1; // Collect cpu core info for (Socket = 0; Socket < NumberOfSockets; Socket++) { if (IsProcessorPresent (Socket, StdHeader)) { for (Module = 0; Module < NumberOfModules; Module++) { if (GetGivenModuleCoreRange (Socket, Module, &LowCore, &HighCore, StdHeader)) { // Get the highest and lowest core count in all nodes TotalEnabledCoresOnNode = HighCore - LowCore + 1; if (TotalEnabledCoresOnNode < MinCoreCountOnNode) { MinCoreCountOnNode = TotalEnabledCoresOnNode; } if (TotalEnabledCoresOnNode > MaxCoreCountOnNode) { MaxCoreCountOnNode = TotalEnabledCoresOnNode; } EnabledComputeUnit = TotalEnabledCoresOnNode; switch (GetComputeUnitMapping (StdHeader)) { case AllCoresMapping: // All cores are in their own compute unit. break; case EvenCoresMapping: // Cores are paired in compute units. CoreNumPerComputeUnit = 2; EnabledComputeUnit = (TotalEnabledCoresOnNode / 2); break; default: ASSERT (FALSE); } // Get minimum of compute unit. This will either be the minimum number of cores (AllCoresMapping), // or less (EvenCoresMapping). if (EnabledComputeUnit < MinNumOfComputeUnit) { MinNumOfComputeUnit = EnabledComputeUnit; } } } } } // Get LeveledCores switch (CoreLevelMode) { case CORE_LEVEL_LOWEST: if (MinCoreCountOnNode == MaxCoreCountOnNode) { return (AGESA_SUCCESS); } LeveledCores = (MinCoreCountOnNode / CoreNumPerComputeUnit) * CoreNumPerComputeUnit; break; case CORE_LEVEL_TWO: LeveledCores = 2 / NumberOfModules; if (LeveledCores != 0) { LeveledCores = (LeveledCores <= MinCoreCountOnNode) ? LeveledCores : MinCoreCountOnNode; } else { return (AGESA_WARNING); } if ((LeveledCores * NumberOfModules) != 2) { PutEventLog ( AGESA_WARNING, CPU_WARNING_ADJUSTED_LEVELING_MODE, 2, (LeveledCores * NumberOfModules), 0, 0, StdHeader ); } break; case CORE_LEVEL_POWER_OF_TWO: // Level to power of 2 (1, 2, 4, 8...) LeveledCores = 1; while (MinCoreCountOnNode >= (LeveledCores * 2)) { LeveledCores = LeveledCores * 2; } break; case CORE_LEVEL_COMPUTE_UNIT: // Level cores to one core per compute unit, with additional reduction to level // all processors to match the processor with the minimum number of cores. if (CoreNumPerComputeUnit == 1) { // If there is one core per compute unit, this is the same as CORE_LEVEL_LOWEST. if (MinCoreCountOnNode == MaxCoreCountOnNode) { return (AGESA_SUCCESS); } LeveledCores = MinCoreCountOnNode; } else { // If there are more than one core per compute unit, level to the number of compute units. LeveledCores = MinNumOfComputeUnit; } break; case CORE_LEVEL_ONE: LeveledCores = 1; if (NumberOfModules > 1) { PutEventLog ( AGESA_WARNING, CPU_WARNING_ADJUSTED_LEVELING_MODE, 1, NumberOfModules, 0, 0, StdHeader ); } break; case CORE_LEVEL_THREE: case CORE_LEVEL_FOUR: case CORE_LEVEL_FIVE: case CORE_LEVEL_SIX: case CORE_LEVEL_SEVEN: case CORE_LEVEL_EIGHT: case CORE_LEVEL_NINE: case CORE_LEVEL_TEN: case CORE_LEVEL_ELEVEN: case CORE_LEVEL_TWELVE: case CORE_LEVEL_THIRTEEN: case CORE_LEVEL_FOURTEEN: case CORE_LEVEL_FIFTEEN: // MCM processors can not have an odd number of cores. For an odd CORE_LEVEL_N, MCM processors will be // leveled as though CORE_LEVEL_N+1 was chosen. // Processors with compute units disable all cores in an entire compute unit at a time, or on an MCM processor, // two compute units at a time. For example, on an SCM processor with two cores per compute unit, the effective // explicit levels are CORE_LEVEL_ONE, CORE_LEVEL_TWO, CORE_LEVEL_FOUR, CORE_LEVEL_SIX, and // CORE_LEVEL_EIGHT. The same example for an MCM processor with two cores per compute unit has effective // explicit levels of CORE_LEVEL_TWO, CORE_LEVEL_FOUR, CORE_LEVEL_EIGHT, and CORE_LEVEL_TWELVE. RequestedCores = CoreLevelMode - CORE_LEVEL_THREE + 3; LeveledCores = (RequestedCores + NumberOfModules - 1) / NumberOfModules; LeveledCores = (LeveledCores / CoreNumPerComputeUnit) * CoreNumPerComputeUnit; LeveledCores = (LeveledCores <= MinCoreCountOnNode) ? LeveledCores : MinCoreCountOnNode; if (LeveledCores != 1) { LeveledCores = (LeveledCores / CoreNumPerComputeUnit) * CoreNumPerComputeUnit; } if ((LeveledCores * NumberOfModules * CoreNumPerComputeUnit) != RequestedCores) { PutEventLog ( AGESA_WARNING, CPU_WARNING_ADJUSTED_LEVELING_MODE, RequestedCores, (LeveledCores * NumberOfModules * CoreNumPerComputeUnit), 0, 0, StdHeader ); } break; default: ASSERT (FALSE); } // Set down core register for (Socket = 0; Socket < NumberOfSockets; Socket++) { if (IsProcessorPresent (Socket, StdHeader)) { GetFeatureServicesOfSocket (&CoreLevelingFamilyServiceTable, Socket, &FamilySpecificServices, StdHeader); if (FamilySpecificServices != NULL) { for (Module = 0; Module < NumberOfModules; Module++) { RegUpdated = FamilySpecificServices->SetDownCoreRegister (FamilySpecificServices, &Socket, &Module, &LeveledCores, CoreLevelMode, StdHeader); // If the down core register is updated, trigger a warm reset. if (RegUpdated) { GetWarmResetFlag (StdHeader, &Request); Request.RequestBit = TRUE; Request.StateBits = Request.PostStage - 1; SetWarmResetFlag (StdHeader, &Request); } } } } } return (AGESA_SUCCESS); }
/** * Perform initialization services required at the Early Init POST time point. * * Execution Cache, HyperTransport, and AP Init advanced services are performed. * * @param[in] EarlyParams The interface struct for all early services * * @return The most severe AGESA_STATUS returned by any called service. * */ AGESA_STATUS AmdInitEarly ( IN OUT AMD_EARLY_PARAMS *EarlyParams ) { AGESA_STATUS CalledAgesaStatus; AGESA_STATUS EarlyInitStatus; WARM_RESET_REQUEST Request; AGESA_TESTPOINT (TpIfAmdInitEarlyEntry, &EarlyParams->StdHeader); IDS_PERF_TIME_MEASURE (&EarlyParams->StdHeader); ASSERT (EarlyParams != NULL); EarlyInitStatus = AGESA_SUCCESS; GetWarmResetFlag (&EarlyParams->StdHeader, &Request); Request.RequestBit = FALSE; SetWarmResetFlag (&EarlyParams->StdHeader, &Request); IDS_OPTION_HOOK (IDS_INIT_EARLY_BEFORE, EarlyParams, &EarlyParams->StdHeader); // Setup ROM execution cache CalledAgesaStatus = AllocateExecutionCache (&EarlyParams->StdHeader, &EarlyParams->CacheRegion[0]); if (CalledAgesaStatus > EarlyInitStatus) { EarlyInitStatus = CalledAgesaStatus; } // Full Hypertransport Initialization // IMPORTANT: All AP cores call Ht Init. HT Init handles full init for the BSC, and map init for APs. CalledAgesaStatus = AmdHtInitialize (&EarlyParams->StdHeader, &EarlyParams->PlatformConfig, &EarlyParams->HtConfig); if (CalledAgesaStatus > EarlyInitStatus) { EarlyInitStatus = CalledAgesaStatus; } // AP launch CalledAgesaStatus = AmdCpuEarly (&EarlyParams->StdHeader, &EarlyParams->PlatformConfig); if (CalledAgesaStatus > EarlyInitStatus) { EarlyInitStatus = CalledAgesaStatus; } // Warm Rest, should be at the end of AmdInitEarly GetWarmResetFlag (&EarlyParams->StdHeader, &Request); if (Request.RequestBit == TRUE) { Request.RequestBit = FALSE; Request.StateBits = WR_STATE_EARLY; SetWarmResetFlag (&EarlyParams->StdHeader, &Request); AgesaDoReset (WARM_RESET_WHENEVER, &EarlyParams->StdHeader); } else { if (Request.StateBits < WR_STATE_EARLY) { Request.StateBits = WR_STATE_EARLY; SetWarmResetFlag (&EarlyParams->StdHeader, &Request); } } CalledAgesaStatus = GnbInitAtEarly ( &EarlyParams->StdHeader, &EarlyParams->PlatformConfig, &EarlyParams->GnbConfig ); if (CalledAgesaStatus > EarlyInitStatus) { EarlyInitStatus = CalledAgesaStatus; } // Check for Cache As Ram Corruption IDS_CAR_CORRUPTION_CHECK (&EarlyParams->StdHeader); IDS_OPTION_HOOK (IDS_AFTER_WARM_RESET, EarlyParams, &EarlyParams->StdHeader); IDS_OPTION_HOOK (IDS_INIT_EARLY_AFTER, EarlyParams, &EarlyParams->StdHeader); IDS_PERF_TIME_MEASURE (&EarlyParams->StdHeader); AGESA_TESTPOINT (TpIfAmdInitEarlyExit, &EarlyParams->StdHeader); return EarlyInitStatus; }