Пример #1
0
VOID
GfxIntegratedInfoInitSclkTable (
  IN       PP_FUSE_ARRAY                    *PpFuseArray,
  IN       ATOM_INTEGRATED_SYSTEM_INFO_V6   *IntegratedInfoTable,
  IN       GFX_PLATFORM_CONFIG              *Gfx
  )
{
  UINTN                       Index;
  UINTN                       TargetIndex;
  UINTN                       ValidSclkStateMask;
  UINT8                       TempDID;
  UINT8                       SclkVidArray[4];
  UINTN                       AvailSclkIndex;
  ATOM_AVAILABLE_SCLK_LIST    *AvailSclkList;
  BOOLEAN                     Sorting;
  AvailSclkList = &IntegratedInfoTable->sAvail_SCLK[0];
  GnbLibPciRead (
    MAKE_SBDFO ( 0, 0, 0x18, 3, D18F3x15C_ADDRESS),
    AccessWidth32,
    &SclkVidArray[0],
    GnbLibGetHeader (Gfx)
    );
  AvailSclkIndex = 0;
  for (Index = 0; Index < MAX_NUM_OF_FUSED_DPM_STATES; Index++) {
    if (PpFuseArray->SclkDpmDid[Index] != 0) {
      AvailSclkList[AvailSclkIndex].ulSupportedSCLK = GfxLibCalculateClk (PpFuseArray->SclkDpmDid[Index], IntegratedInfoTable->ulDentistVCOFreq);
      AvailSclkList[AvailSclkIndex].usVoltageIndex = PpFuseArray->SclkDpmVid[Index];
      AvailSclkList[AvailSclkIndex].usVoltageID = SclkVidArray [PpFuseArray->SclkDpmVid[Index]];
      AvailSclkIndex++;
    }
  }
  //Sort by VoltageIndex & ulSupportedSCLK
  do {
    Sorting = FALSE;
    for (Index = 0; Index < (AvailSclkIndex - 1); Index++) {
      ATOM_AVAILABLE_SCLK_LIST  Temp;
      BOOLEAN                   Exchange;
      Exchange = FALSE;
      if (AvailSclkList[Index].usVoltageIndex > AvailSclkList[Index + 1].usVoltageIndex) {
        Exchange = TRUE;
      }
      if ((AvailSclkList[Index].usVoltageIndex == AvailSclkList[Index + 1].usVoltageIndex) &&
          (AvailSclkList[Index].ulSupportedSCLK > AvailSclkList[Index + 1].ulSupportedSCLK)) {
        Exchange = TRUE;
      }
      if (Exchange) {
        Sorting = TRUE;
        LibAmdMemCopy (&Temp, &AvailSclkList[Index], sizeof (ATOM_AVAILABLE_SCLK_LIST), GnbLibGetHeader (Gfx));
        LibAmdMemCopy (&AvailSclkList[Index], &AvailSclkList[Index + 1], sizeof (ATOM_AVAILABLE_SCLK_LIST), GnbLibGetHeader (Gfx));
        LibAmdMemCopy (&AvailSclkList[Index + 1], &Temp, sizeof (ATOM_AVAILABLE_SCLK_LIST), GnbLibGetHeader (Gfx));
      }
    }
  } while (Sorting);

  if (PpFuseArray->GpuBoostCap == 1) {
    IntegratedInfoTable->SclkDpmThrottleMargin = PpFuseArray->SclkDpmThrottleMargin;
    IntegratedInfoTable->SclkDpmTdpLimitPG = PpFuseArray->SclkDpmTdpLimitPG;
    IntegratedInfoTable->EnableBoost = PpFuseArray->GpuBoostCap;
    IntegratedInfoTable->SclkDpmBoostMargin = PpFuseArray->SclkDpmBoostMargin;
    IntegratedInfoTable->SclkDpmTdpLimitBoost = (PpFuseArray->SclkDpmTdpLimit)[5];
    IntegratedInfoTable->ulBoostEngineCLock = GfxFmCalculateClock ((PpFuseArray->SclkDpmDid)[5], GnbLibGetHeader (Gfx));
    IntegratedInfoTable->ulBoostVid_2bit = (PpFuseArray->SclkDpmVid)[5];

    ValidSclkStateMask = 0;
    TargetIndex = 0;
    for (Index = 0; Index < 6; Index++) {
      ValidSclkStateMask |= (PpFuseArray->SclkDpmValid)[Index];
    }
    TempDID = 0x7F;
    for (Index = 0; Index < 6; Index++) {
      if ((ValidSclkStateMask & ((UINTN)1 << Index)) != 0) {
        if ((PpFuseArray->SclkDpmDid)[Index] <= TempDID) {
          TempDID = (PpFuseArray->SclkDpmDid)[Index];
          TargetIndex = Index;
        }
      }
    }
    IntegratedInfoTable->GnbTdpLimit = (PpFuseArray->SclkDpmTdpLimit)[TargetIndex];
  }

}
Пример #2
0
/**
 *
 * This function generates a complete SLIT table into a memory buffer.
 * After completion, this table must be set by the system BIOS into its
 * internal ACPI namespace, and linked into the RSDT/XSDT
 *
 *    @param[in, out]  StdHeader        Standard Head Pointer
 *    @param[in]       PlatformConfig   Config handle for platform specific information
 *    @param[in, out]  SlitPtr          Point to Slit Struct including buffer address and length
 *
 *    @retval          UINT32  AGESA_STATUS
 */
AGESA_STATUS
GetAcpiSlitMain (
  IN OUT   AMD_CONFIG_PARAMS      *StdHeader,
  IN       PLATFORM_CONFIGURATION *PlatformConfig,
  IN OUT   VOID                   **SlitPtr
   )
{
  UINT8 MaxHops;
  UINT8 SocketNum;
  UINT8 i;
  UINT8 j;
  UINT8 *BufferPtr;
  UINT8 *SocketTopologyDataPtr;
  UINT8 *SocketTopologyPtr;
  ACPI_TABLE_HEADER *CpuSlitHeaderStructPtr;
  AGESA_STATUS Flag;
  ALLOCATE_HEAP_PARAMS AllocStruct;

  MaxHops = 0;
  SocketTopologyPtr = NULL;
  Flag = AGESA_ERROR;

  // find out the pointer to the BufferHandle which contains
  // Node Topology information
  AcpiSlitHBufferFind (StdHeader, &SocketTopologyPtr);
  if (SocketTopologyPtr == 0) {
    return (Flag);
  }

  SocketNum = *SocketTopologyPtr;

  IDS_HDT_CONSOLE (CPU_TRACE, "  SLIT is created\n");

  // create a buffer by calling IBV callout routine
  AllocStruct.RequestedBufferSize = (SocketNum * SocketNum) + AMD_ACPI_SLIT_SOCKET_NUM_LENGTH + sizeof (ACPI_TABLE_HEADER);
  AllocStruct.BufferHandle = AMD_ACPI_SLIT_BUFFER_HANDLE;
  AllocStruct.Persist = HEAP_SYSTEM_MEM;
  if (HeapAllocateBuffer (&AllocStruct, StdHeader) != AGESA_SUCCESS) {
    return (Flag);
  }
  *SlitPtr = AllocStruct.BufferPtr;

  //SLIT header
  LibAmdMemCopy (*SlitPtr, (VOID *) &CpuSlitHdrStruct, (UINTN) (sizeof (ACPI_TABLE_HEADER)), StdHeader);
  CpuSlitHeaderStructPtr = (ACPI_TABLE_HEADER *) *SlitPtr;
  CpuSlitHeaderStructPtr->TableLength = (UINT32) AllocStruct.RequestedBufferSize;
  BufferPtr = *SlitPtr;

  Flag = AGESA_SUCCESS;
  // SLIT body
  // check if is PfMode (Prober Filer Mode)
  if (!IsFeatureEnabled (HtAssist, PlatformConfig, StdHeader)) {
    // probe filter is disabled

    // get MaxHops
    SocketTopologyDataPtr = SocketTopologyPtr + sizeof (SocketNum);
    for (i = 0; i < SocketNum; i++) {
      for (j = 0; j < SocketNum; j++) {
        if (*SocketTopologyDataPtr > MaxHops) {
          MaxHops = *SocketTopologyDataPtr;
        }
        SocketTopologyDataPtr++;
      }
    }

    // the Max hop entries have a value of 13
    // and all other entries have 10.
    SocketTopologyDataPtr = SocketTopologyPtr + sizeof (SocketNum);
    for (i = 0; i < SocketNum; i++) {
      for (j = 0; j < SocketNum; j++) {
        if (*SocketTopologyDataPtr++ == MaxHops) {
          *(BufferPtr + sizeof (ACPI_TABLE_HEADER) +
            AMD_ACPI_SLIT_SOCKET_NUM_LENGTH + (i * SocketNum) + j) = 13;
        } else {
          *(BufferPtr + sizeof (ACPI_TABLE_HEADER) +
            AMD_ACPI_SLIT_SOCKET_NUM_LENGTH + (i * SocketNum) + j) = 10;
        }
      }
    }
  } else {
    // probe filter is enabled

    // formula : num_hops * 6 + 10
    SocketTopologyDataPtr = SocketTopologyPtr + sizeof (SocketNum);
    for (i = 0; i < SocketNum; i++) {
      for (j = 0; j < SocketNum; j++) {
        *(BufferPtr + sizeof (ACPI_TABLE_HEADER) +
          AMD_ACPI_SLIT_SOCKET_NUM_LENGTH + (i * SocketNum) + j) =
          ((*SocketTopologyDataPtr++) * 6) + 10;
      }
    }
  }

  BufferPtr += sizeof (ACPI_TABLE_HEADER);
  *((UINT64 *) BufferPtr) = (UINT64) SocketNum;

  //Update SLIT header Checksum
  ChecksumAcpiTable ((ACPI_TABLE_HEADER *) *SlitPtr, StdHeader);

  return (Flag);
}
Пример #3
0
/**
 *
 *
 *       This is the training function which set up the environment for remote
 * training on the ap and launches the remote routine.
 *
 *     @param[in,out]   *NBPtr   - Pointer to the MEM_NB_BLOCK
 *
 *     @return          TRUE -  Launch training on AP successfully.
 *     @return          FALSE - Fail to launch training on AP.
 */
BOOLEAN
MemFParallelTrainingHy (
  IN OUT   MEM_NB_BLOCK *NBPtr
  )
{
  AMD_CONFIG_PARAMS *StdHeader;
  DIE_STRUCT *MCTPtr;
  REMOTE_TRAINING_ENV *EnvPtr;
  AP_TASK TrainingTask;
  UINT8 Socket;
  UINT8 Module;
  UINT8 APCore;
  UINT8 p;
  UINT32 LowCore;
  UINT32 HighCore;
  UINT32 BspSocket;
  UINT32 BspModule;
  UINT32 BspCore;
  AGESA_STATUS Status;
  ALLOCATE_HEAP_PARAMS AllocHeapParams;
  UINT16 MctDataSize;
  StdHeader = &(NBPtr->MemPtr->StdHeader);
  MCTPtr = NBPtr->MCTPtr;
  Socket = MCTPtr->SocketId;
  Module = MCTPtr->DieId;

  //
  // Allocate buffer for REMOTE_TRAINING_ENV
  //
  MctDataSize = MAX_DCTS_PER_NODE_HY * (
                  sizeof (DCT_STRUCT) + (
                    MAX_CHANNELS_PER_DCT_HY * (sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK))
                  )
                );
  AllocHeapParams.RequestedBufferSize = MctDataSize + sizeof (REMOTE_TRAINING_ENV);
  AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_PAR_TRN_HANDLE, Socket, Module, 0);
  AllocHeapParams.Persist = HEAP_LOCAL_CACHE;
  if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) == AGESA_SUCCESS) {
    EnvPtr = (REMOTE_TRAINING_ENV *) AllocHeapParams.BufferPtr;
    AllocHeapParams.BufferPtr += sizeof (REMOTE_TRAINING_ENV);

    //
    // Setup Remote training environment
    //
    LibAmdMemCopy (&(EnvPtr->StdHeader), StdHeader, sizeof (AMD_CONFIG_PARAMS), StdHeader);
    LibAmdMemCopy (&(EnvPtr->DieStruct), MCTPtr, sizeof (DIE_STRUCT), StdHeader);
    for (p = 0; p < MAX_PLATFORM_TYPES; p++) {
      EnvPtr->GetPlatformCfg[p] = NBPtr->MemPtr->GetPlatformCfg[p];
    }
    EnvPtr->ErrorHandling = NBPtr->MemPtr->ErrorHandling;
    EnvPtr->NBBlockCtor = MemConstructRemoteNBBlockHY;
    EnvPtr->FeatPtr = NBPtr->FeatPtr;
    EnvPtr->HoleBase = NBPtr->RefPtr->HoleBase;
    EnvPtr->BottomIo = NBPtr->RefPtr->BottomIo;
    EnvPtr->UmaSize = NBPtr->RefPtr->UmaSize;
    EnvPtr->SysLimit = NBPtr->RefPtr->SysLimit;
    EnvPtr->TableBasedAlterations = NBPtr->RefPtr->TableBasedAlterations;
    EnvPtr->PlatformMemoryConfiguration = NBPtr->RefPtr->PlatformMemoryConfiguration;

    LibAmdMemCopy (AllocHeapParams.BufferPtr, MCTPtr->DctData, MctDataSize, StdHeader);

    //
    // Get Socket, Core of the BSP
    //
    IdentifyCore (StdHeader, &BspSocket, &BspModule, &BspCore, &Status);
    EnvPtr->BspSocket = ((UINT8)BspSocket & 0x000000FF);
    EnvPtr->BspCore = ((UINT8)BspCore & 0x000000FF);

    //
    // Set up the remote task structure
    //
    TrainingTask.DataTransfer.DataPtr = EnvPtr;
    TrainingTask.DataTransfer.DataSizeInDwords = (UINT16) (AllocHeapParams.RequestedBufferSize + 3) / 4;
    TrainingTask.DataTransfer.DataTransferFlags = 0;
    TrainingTask.ExeFlags = 0;
    TrainingTask.FuncAddress.PfApTaskI = (PF_AP_TASK_I)MemFParallelTraining;

    //
    // Get Target AP Core
    //
    GetGivenModuleCoreRange (Socket, Module, &LowCore, &HighCore, StdHeader);
    APCore = (UINT8) (LowCore & 0x000000FF);

    //
    // Launch Remote Training
    //
    ApUtilRunCodeOnSocketCore (Socket, APCore, &TrainingTask, StdHeader);

    HeapDeallocateBuffer (AllocHeapParams.BufferHandle, StdHeader);

    return TRUE;
  } else {
    PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_REMOTE_TRAINING_ENV, NBPtr->Node, 0, 0, 0, StdHeader);
    SetMemError (AGESA_FATAL, MCTPtr);
    ASSERT(FALSE); // Could not allocated heap space for "REMOTE_TRAINING_ENV"
    return FALSE;
  }
}
Пример #4
0
/**
 *
 *
 *
 *
 *     @param[in,out]   *mmPtr   - Pointer to the MEM_MAIN_DATA_BLOCK
 *
 *     @return          TRUE -  No fatal error occurs.
 *     @return          FALSE - Fatal error occurs.
 */
BOOLEAN
MemMParallelTraining (
  IN OUT   MEM_MAIN_DATA_BLOCK *mmPtr
  )
{
  AMD_CONFIG_PARAMS *StdHeader;
  MEM_DATA_STRUCT *MemPtr;
  MEM_NB_BLOCK *NBPtr;
  DIE_INFO TrainInfo[MAX_NODES_SUPPORTED];
  AP_DATA_TRANSFER ReturnData;
  AGESA_STATUS Status;
  UINT8 ApSts;
  UINT8 Die;
  UINT8 Socket;
  UINT32 Module;
  UINT32 LowCore;
  UINT32 HighCore;
  UINT32 Time;
  UINT32 TimeOut;
  BOOLEAN StillTraining;
  ALLOCATE_HEAP_PARAMS AllocHeapParams;
  UINT8 *BufferPtr;
  BOOLEAN TimeoutEn;

  NBPtr = mmPtr->NBPtr;
  MemPtr = mmPtr->MemPtr;
  StdHeader = &(mmPtr->MemPtr->StdHeader);
  Time = 0;
  TimeOut = PARALLEL_TRAINING_TIMEOUT;
  TimeoutEn = TRUE;
  IDS_TIMEOUT_CTL (&TimeoutEn);

  IDS_HDT_CONSOLE (MEM_STATUS, "\nStart parallel training\n");
  AGESA_TESTPOINT (TpProcMemBeforeAnyTraining, StdHeader);
  //
  // Initialize Training Info Array
  //
  for (Die = 0; Die < mmPtr->DieCount; Die ++) {
    Socket = TrainInfo[Die].Socket = NBPtr[Die].MCTPtr->SocketId;
    Module = NBPtr[Die].MCTPtr->DieId;
    GetGivenModuleCoreRange (Socket, Module, &LowCore, &HighCore, StdHeader);
    TrainInfo[Die].Core = (UINT8) (LowCore & 0x000000FF);
    IDS_HDT_CONSOLE (MEM_FLOW, "\tLaunch core %d of socket %d\n", LowCore, Socket);
    TrainInfo[Die].Training = FALSE;
  }
  //
  // Start Training on Each remote die.
  //
  for (Die = 0; Die < mmPtr->DieCount; Die ++ ) {
    if (Die != BSP_DIE) {
      NBPtr[Die].BeforeDqsTraining (&(mmPtr->NBPtr[Die]));
      if (NBPtr[Die].MCTPtr->NodeMemSize != 0) {
        if (!NBPtr[Die].FeatPtr->Training (&(mmPtr->NBPtr[Die]))) {
          // Fail to launch code on AP
          PutEventLog (AGESA_ERROR, MEM_ERROR_PARALLEL_TRAINING_LAUNCH_FAIL, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader);
          SetMemError (AGESA_ERROR, NBPtr[Die].MCTPtr);
          MemPtr->ErrorHandling (NBPtr[Die].MCTPtr, EXCLUDE_ALL_DCT, EXCLUDE_ALL_CHIPSEL, &MemPtr->StdHeader);
        } else {
          TrainInfo[Die].Training = TRUE;
        }
      }
    }
  }
  //
  // Call training on BSP
  //
  IDS_HDT_CONSOLE (MEM_STATUS, "Node %d\n", NBPtr[BSP_DIE].Node);
  NBPtr[BSP_DIE].BeforeDqsTraining (&(mmPtr->NBPtr[BSP_DIE]));
  NBPtr[BSP_DIE].TrainingFlow (&(mmPtr->NBPtr[BSP_DIE]));
  NBPtr[BSP_DIE].AfterDqsTraining (&(mmPtr->NBPtr[BSP_DIE]));

  //
  // Get Results from remote processors training
  //
  do {
    StillTraining = FALSE;
    for (Die = 0; Die < mmPtr->DieCount; Die ++ ) {
      //
      // For each Die that is training, read the status
      //
      if (TrainInfo[Die].Training == TRUE) {
        ApSts = ApUtilReadRemoteControlByte (TrainInfo[Die].Socket, TrainInfo[Die].Core, StdHeader);
        if ((ApSts & 0x80) == 0) {
          //
          // Allocate buffer for received data
          //
          AllocHeapParams.RequestedBufferSize = (
            sizeof (DIE_STRUCT) +
            NBPtr[Die].DctCount * (
              sizeof (DCT_STRUCT) + (
                NBPtr[Die].ChannelCount * (
                  sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK) + (
                   (NBPtr[Die].MCTPtr->DctData[0].ChData[0].RowCount *
                    NBPtr[Die].MCTPtr->DctData[0].ChData[0].ColumnCount *
                    NUMBER_OF_DELAY_TABLES) +
                    (MAX_BYTELANES_PER_CHANNEL * MAX_CS_PER_CHANNEL * NUMBER_OF_FAILURE_MASK_TABLES)
                  )
                )
              )
            )
          ) + 3;
          AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_PAR_TRN_HANDLE, Die, 0, 0);
          AllocHeapParams.Persist = HEAP_LOCAL_CACHE;
          if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) == AGESA_SUCCESS) {
            //
            // Receive Training Results
            //

            ReturnData.DataPtr = AllocHeapParams.BufferPtr;
            ReturnData.DataSizeInDwords = (UINT16) AllocHeapParams.RequestedBufferSize / 4;
            ReturnData.DataTransferFlags = 0;
            Status = ApUtilReceiveBuffer (TrainInfo[Die].Socket, TrainInfo[Die].Core, &ReturnData, StdHeader);
            if (Status != AGESA_SUCCESS) {
              SetMemError (Status, NBPtr[Die].MCTPtr);
            }

            BufferPtr = AllocHeapParams.BufferPtr;
            LibAmdMemCopy (NBPtr[Die].MCTPtr, BufferPtr, sizeof (DIE_STRUCT), StdHeader);
            BufferPtr += sizeof (DIE_STRUCT);
            LibAmdMemCopy ( NBPtr[Die].MCTPtr->DctData,
                            BufferPtr,
                            NBPtr[Die].DctCount * (sizeof (DCT_STRUCT) + NBPtr[Die].ChannelCount * sizeof (CH_DEF_STRUCT)),
                            StdHeader);
            BufferPtr += NBPtr[Die].DctCount * (sizeof (DCT_STRUCT) + NBPtr[Die].ChannelCount * sizeof (CH_DEF_STRUCT));
            LibAmdMemCopy ( NBPtr[Die].PSBlock,
                            BufferPtr,
                            NBPtr[Die].DctCount * NBPtr[Die].ChannelCount * sizeof (MEM_PS_BLOCK),
                            StdHeader);
            BufferPtr += NBPtr[Die].DctCount * NBPtr[Die].ChannelCount * sizeof (MEM_PS_BLOCK);
            LibAmdMemCopy ( NBPtr[Die].MCTPtr->DctData[0].ChData[0].RcvEnDlys,
                            BufferPtr,
                            (NBPtr[Die].DctCount * NBPtr[Die].ChannelCount) *
                            ((NBPtr[Die].MCTPtr->DctData[0].ChData[0].RowCount *
                              NBPtr[Die].MCTPtr->DctData[0].ChData[0].ColumnCount *
                              NUMBER_OF_DELAY_TABLES) +
                              (MAX_BYTELANES_PER_CHANNEL * MAX_CS_PER_CHANNEL * NUMBER_OF_FAILURE_MASK_TABLES)
                            ),
                            StdHeader);

            HeapDeallocateBuffer (AllocHeapParams.BufferHandle, StdHeader);

            NBPtr[Die].AfterDqsTraining (&(mmPtr->NBPtr[Die]));
            TrainInfo[Die].Training = FALSE;
          } else {
            PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_RECEIVED_DATA, NBPtr[Die].Node, 0, 0, 0, StdHeader);
            SetMemError (AGESA_FATAL, NBPtr[Die].MCTPtr);
            ASSERT(FALSE); // Insufficient Heap Space allocation for parallel training buffer
          }
        } else if (ApSts == CORE_IDLE) {
          // AP does not have buffer to transmit to BSP
          // AP fails to locate a buffer for data transfer
          TrainInfo[Die].Training = FALSE;
        } else {
          // Signal to loop through again
          StillTraining = TRUE;
        }
      }
    }
    // Wait for 1 us
    MemUWait10ns (100, NBPtr->MemPtr);
    Time ++;
  } while ((StillTraining) && ((Time < TimeOut) || !TimeoutEn)); // Continue until all Dies are finished
                                                // if cannot finish in 1 s, do fatal exit

  if (StillTraining && TimeoutEn) {
    // Parallel training time out, do fatal exit, as there is at least one AP hangs.
    PutEventLog (AGESA_FATAL, MEM_ERROR_PARALLEL_TRAINING_TIME_OUT, 0, 0, 0, 0, &NBPtr->MemPtr->StdHeader);
    SetMemError (AGESA_FATAL, NBPtr[BSP_DIE].MCTPtr);
    ASSERT(FALSE); // Timeout occurred while still training
  }

  for (Die = 0; Die < mmPtr->DieCount; Die ++ ) {
    if (NBPtr[Die].MCTPtr->ErrCode == AGESA_FATAL) {
      return FALSE;
    }
  }
  return TRUE;
}
Пример #5
0
VOID
STATIC
MemSPDDataProcess (
  IN OUT   MEM_DATA_STRUCT *MemPtr
  )
{
  UINT8 Socket;
  UINT8 Channel;
  UINT8 Dimm;
  UINT8 DimmIndex;
  UINT32 AgesaStatus;
  UINT8 MaxSockets;
  UINT8 MaxChannelsPerSocket;
  UINT8 MaxDimmsPerChannel;
  SPD_DEF_STRUCT *DimmSPDPtr;
  PSO_TABLE *PsoTable;
  ALLOCATE_HEAP_PARAMS AllocHeapParams;
  AGESA_READ_SPD_PARAMS SpdParam;

  ASSERT (MemPtr != NULL);
  MaxSockets = (UINT8) (0x000000FF & GetPlatformNumberOfSockets ());
  PsoTable = MemPtr->ParameterListPtr->PlatformMemoryConfiguration;
  //
  // Allocate heap for the table
  //
  AllocHeapParams.RequestedBufferSize = (GetSpdSocketIndex (PsoTable, MaxSockets, &MemPtr->StdHeader) * sizeof (SPD_DEF_STRUCT));
  AllocHeapParams.BufferHandle = AMD_MEM_SPD_HANDLE;
  AllocHeapParams.Persist = HEAP_LOCAL_CACHE;
  if (HeapAllocateBuffer (&AllocHeapParams, &MemPtr->StdHeader) == AGESA_SUCCESS) {
    MemPtr->SpdDataStructure = (SPD_DEF_STRUCT *) AllocHeapParams.BufferPtr;
    //
    // Initialize SpdParam Structure
    //
    LibAmdMemCopy ((VOID *)&SpdParam, (VOID *)MemPtr, (UINTN)sizeof (SpdParam.StdHeader), &MemPtr->StdHeader);
    //
    // Populate SPDDataBuffer
    //
    SpdParam.MemData = MemPtr;
    DimmIndex = 0;
    for (Socket = 0; Socket < (UINT16)MaxSockets; Socket++) {
      MaxChannelsPerSocket = GetMaxChannelsPerSocket (PsoTable, Socket, &MemPtr->StdHeader);
      SpdParam.SocketId = Socket;
      for (Channel = 0; Channel < MaxChannelsPerSocket; Channel++) {
        SpdParam.MemChannelId = Channel;
        MaxDimmsPerChannel = GetMaxDimmsPerChannel (PsoTable, Socket, Channel);
        for (Dimm = 0; Dimm < MaxDimmsPerChannel; Dimm++) {
          SpdParam.DimmId = Dimm;
          DimmSPDPtr = &(MemPtr->SpdDataStructure[DimmIndex++]);
          SpdParam.Buffer = DimmSPDPtr->Data;
          AGESA_TESTPOINT (TpProcMemBeforeAgesaReadSpd, &MemPtr->StdHeader);
          AgesaStatus = AgesaReadSpd (0, &SpdParam);
          AGESA_TESTPOINT (TpProcMemAfterAgesaReadSpd, &MemPtr->StdHeader);
          if (AgesaStatus == AGESA_SUCCESS) {
            DimmSPDPtr->DimmPresent = TRUE;
            IDS_HDT_CONSOLE (MEM_FLOW, "SPD Socket %d Channel %d Dimm %d: %08x\n", Socket, Channel, Dimm, SpdParam.Buffer);
          } else {
            DimmSPDPtr->DimmPresent = FALSE;
          }
        }
      }
    }
  } else {
    PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_SPD, 0, 0, 0, 0, &MemPtr->StdHeader);
    //
    // Assert here if unable to allocate heap for SPDs
    //
    IDS_ERROR_TRAP;
  }
}
Пример #6
0
/**
 *
 *  Initial function for HDT out Function.
 *
 *  Init required Debug register & heap, and will also fire a HDTOUT
 *  Command to let hdtout script do corresponding things.
 *
 *  @param[in,out] StdHeader    The Pointer of AGESA Header
 *
 **/
VOID
AmdIdsHdtOutInit (
  IN OUT   AMD_CONFIG_PARAMS *StdHeader
  )
{
  ALLOCATE_HEAP_PARAMS AllocHeapParams;
  HDTOUT_HEADER HdtoutHeader;
  UINT8  Persist;
  AGESA_STATUS IgnoreSts;
  HDTOUT_HEADER *pHdtoutHeader;

  IDS_FUNCLIST_EXTERN ();
  if (AmdIdsHdtOutSupport ()) {
    AmdIdsHdtOutRegisterInit (StdHeader);
    // Initialize HDTOUT Header
    HdtoutHeader.Signature = HDTOUT_HEADER_SIGNATURE;
    HdtoutHeader.Version = HDTOUT_VERSION;
    HdtoutHeader.BufferSize = HDTOUT_DEFAULT_BUFFER_SIZE;
    HdtoutHeader.DataIndex = 0;
    HdtoutHeader.PrintCtrl = HDTOUT_PRINTCTRL_ON;
    HdtoutHeader.NumBreakpointUnit = 0;
    HdtoutHeader.FuncListAddr = (UINT32) (UINT64) IDS_FUNCLIST_ADDR;
    HdtoutHeader.StatusStr[0] = 0;
    HdtoutHeader.OutBufferMode = HDTOUT_BUFFER_MODE_ON;
    HdtoutHeader.EnableMask = 0;
    HdtoutHeader.ConsoleFilter = IDS_DEBUG_PRINT_MASK;

    // Trigger HDTOUT breakpoint to get inputs from script
    IdsOutPort (HDTOUT_INIT, (UINT32) (UINT64) &HdtoutHeader, 0);
    // Disable AP HDTOUT if set BspOnlyFlag
    if (HdtoutHeader.BspOnlyFlag == HDTOUT_BSP_ONLY) {
      if (!IsBsp (StdHeader, &IgnoreSts)) {
        AmdIdsHdtOutRegisterRestore (StdHeader);
        return;
      }
    }
    // Convert legacy EnableMask to new ConsoleFilter
    HdtoutHeader.ConsoleFilter |= HdtoutHeader.EnableMask;

    // Disable the buffer if the size is not large enough
    if (HdtoutHeader.BufferSize < 128) {
      HdtoutHeader.BufferSize = 0;
      HdtoutHeader.OutBufferMode = HDTOUT_BUFFER_MODE_OFF;
    } else {
      HdtoutHeader.OutBufferMode = HDTOUT_BUFFER_MODE_ON;
    }

    // Check if Hdtout header have been initialed, if so it must 2nd time come here
    if (AmdIdsHdtoutGetHeader (&pHdtoutHeader, StdHeader)) {
      Persist = HEAP_SYSTEM_MEM;
    } else {
      Persist = HEAP_LOCAL_CACHE;
    }

    // Allocate heap
    do {
      AllocHeapParams.RequestedBufferSize = HdtoutHeader.BufferSize + sizeof (HdtoutHeader) - 2;
      AllocHeapParams.BufferHandle = IDS_HDT_OUT_BUFFER_HANDLE;
      AllocHeapParams.Persist = Persist;
      if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) == AGESA_SUCCESS) {
        break;
      } else {
        IdsOutPort (HDTOUT_ERROR, HDTOUT_ERROR_HEAP_ALLOCATION, AllocHeapParams.RequestedBufferSize);
        HdtoutHeader.BufferSize -= 256;
      }
    } while ((HdtoutHeader.BufferSize & 0x8000) == 0);
    // If the buffer have been successfully allocated?
    if ((HdtoutHeader.BufferSize & 0x8000) == 0) {
      LibAmdWriteCpuReg (DR3_REG, (UINT32) (UINT64) AllocHeapParams.BufferPtr);
      LibAmdMemCopy (AllocHeapParams.BufferPtr, &HdtoutHeader, sizeof (HdtoutHeader) - 2, StdHeader);
    } else {
      /// Clear DR3_REG
      IdsOutPort (HDTOUT_ERROR, HDTOUT_ERROR_HEAP_AllOCATE_FAIL, IDS_DEBUG_PRINT_MASK);
      LibAmdWriteCpuReg (DR3_REG, 0);
    }
  }
}
Пример #7
0
AGESA_STATUS
GfxSamuInit (
  IN      GFX_PLATFORM_CONFIG   *Gfx
  )
{
  UINT32                    D0F0xBC_xC00C0000;
  GNB_HANDLE                *GnbHandle;
  VOID                      *ControlXBuffer;
  VOID                      *AlignedControlXBuffer;
  VOID                      *PatchYBuffer;
  VOID                      *AlignedPatchYBuffer;
  SAMU_BOOT_CONTROL         *SamuBootControl;

  UINT32                    D0F0xBC_x800000A4;
  UINT32                    GMMx22000;
  UINT32                    GMMx22004;
  UINT32                    GMMx22008;
  UINT32                    GMMx2200C;
  UINT32                    LoopCount;
  BOOLEAN                   SamuUseF1dPatch;
  BOOLEAN                   SamuPatchEnabled;

  IDS_HDT_CONSOLE (GNB_TRACE, "GnbSamuInit Enter\n");

  GnbHandle = GnbGetHandle (GnbLibGetHeader (Gfx));
  ASSERT (GnbHandle != NULL);
  GnbRegisterReadKB (GnbHandle, 0x4, 0xc00c0000,
      &D0F0xBC_xC00C0000, 0, GnbLibGetHeader (Gfx));

  SamuPatchEnabled = GnbBuildOptions.CfgSamuPatchEnabled;
  IDS_OPTION_HOOK (IDS_GNB_LOAD_SAMU_PATCH, &SamuPatchEnabled, GnbLibGetHeader (Gfx));

  if ((((D0F0xBC_xC00C0000) & BIT24) == 0) &&
      (SamuPatchEnabled == TRUE)) {

    // Decide which version of the patch to use
    SamuUseF1dPatch = TRUE;

    GMMx22008 = 0x29;
    GnbRegisterWriteKB (GnbHandle, 0x12, 0x22008,
        &GMMx22008, 0, GnbLibGetHeader (Gfx));
    GnbRegisterReadKB (GnbHandle, 0x12, 0x2200C,
        &GMMx2200C, 0, GnbLibGetHeader (Gfx));
    IDS_HDT_CONSOLE (GNB_TRACE, " SAMSAB:29=%08x\n", GMMx2200C);

    if (GMMx2200C == 0x80000001) {
      SamuUseF1dPatch = FALSE;
    }

    ControlXBuffer = GnbAllocateHeapBufferAndClear (AMD_GNB_SAMU_BOOT_CONTROL_HANDLE, 2 * LENGTH_1MBYTE, GnbLibGetHeader (Gfx));
    ASSERT (ControlXBuffer != NULL);
    if (ControlXBuffer == NULL) {
      return  AGESA_ERROR;
    }
    AlignedControlXBuffer = (VOID *) (((UINTN)ControlXBuffer + LENGTH_1MBYTE) & (~MASK_1MBYTE));
    PatchYBuffer = GnbAllocateHeapBuffer (AMD_GNB_SAMU_PATCH_HANDLE, 2 * LENGTH_1MBYTE, GnbLibGetHeader (Gfx));
    ASSERT (PatchYBuffer != NULL);
    if (PatchYBuffer == NULL) {
      return  AGESA_ERROR;
    }
    AlignedPatchYBuffer = (VOID *) (((UINTN)PatchYBuffer + LENGTH_1MBYTE) & (~MASK_1MBYTE));

    // Copy samu firmware patch to PatchYBuffer
    if (SamuUseF1dPatch == TRUE) {
      LibAmdMemCopy (AlignedPatchYBuffer, &SamuPatchKB[0],
          SamuPatchKBHeader[1], GnbLibGetHeader (Gfx));
    } else {
      LibAmdMemCopy (AlignedPatchYBuffer, &SamuPatchKBUnf1[0],
          SamuPatchKBHeaderUnf1[1], GnbLibGetHeader (Gfx));
    }

    // WBINVD
    LibAmdWriteBackInvalidateCache ();

    // Load boot control structure
    SamuBootControl = (SAMU_BOOT_CONTROL *)AlignedControlXBuffer;
    SamuBootControl->BootControl = 0x3;
    SamuBootControl->KernelAddrLo = (UINT32) (AlignedPatchYBuffer);
    SamuBootControl->KernelAddrHi = 0; //(UINT32) ((((UINT64) AlignedPatchYBuffer) >> 32) & 0xFF);
    if (SamuUseF1dPatch == TRUE) {
      SamuBootControl->TweakSelect = 0xBB027E1F;
      SamuBootControl->KeySelect = 0x8E174F83;
    } else {
      SamuBootControl->TweakSelect = 0x0;
      SamuBootControl->KeySelect = 0x0;
    }


    // Write 0x0 to SAM_CGC_HOST_CTRL to release the clock-gating of SAMU
    GMMx22000 = 0x3;
    GnbRegisterWriteKB (GnbHandle, 0x12, 0x22000, &GMMx22000, 0, GnbLibGetHeader (Gfx));
    GMMx22004 = 0x0;
    GnbRegisterWriteKB (GnbHandle, 0x12, 0x22004, &GMMx22004, 0, GnbLibGetHeader (Gfx));

    // Write (physical address of boot control structure)>>8 into SAM_SAB_INIT_TLB_CONFIG (Location X >> 8)
    GMMx22008 = 0x4;
    GnbRegisterWriteKB (GnbHandle, 0x12, 0x22008, &GMMx22008, 0, GnbLibGetHeader (Gfx));
    GMMx2200C = ((UINT32) ((UINT32) AlignedControlXBuffer)) >> 8;
    GnbRegisterWriteKB (GnbHandle, 0x12, 0x2200C, &GMMx2200C, 0, GnbLibGetHeader (Gfx));

    // Write 0x0 to SAM_RST_HOST_SOFT_RESET
    GMMx22000 = 0x1;
    GnbRegisterWriteKB (GnbHandle, 0x12, 0x22000, &GMMx22000, 0, GnbLibGetHeader (Gfx));
    GMMx22004 = 0x0;
    GnbRegisterWriteKB (GnbHandle, 0x12, 0x22004, &GMMx22004, 0, GnbLibGetHeader (Gfx));

    // Write 0x2 to SAM_SCRATCH_0 to start the firmware boot
    GMMx22000 = 0x38;
    GnbRegisterWriteKB (GnbHandle, 0x12, 0x22000, &GMMx22000, 0, GnbLibGetHeader (Gfx));
    GMMx22004 = 0x2;
    GnbRegisterWriteKB (GnbHandle, 0x12, 0x22004, &GMMx22004, 0, GnbLibGetHeader (Gfx));

    // Poll SAM_RST_HOST_SOFT_RST_RDY and wait for HOST_RDY
    do {
      // Write 0x2 to SAM_SCRATCH_0 to start the firmware boot
      GMMx22000 = 0x51;
      GnbRegisterWriteKB (GnbHandle, 0x12, 0x22000, &GMMx22000, 0, GnbLibGetHeader (Gfx));
      GnbRegisterReadKB (GnbHandle, 0x12, 0x22004, &GMMx22004, 0, GnbLibGetHeader (Gfx));
    } while ((GMMx22004 & BIT0) == 0);

    // Clear the allocated memory ranges, locations X and Y (write 0), issue WBINVD
    LibAmdMemFill (ControlXBuffer, 0, 2 * LENGTH_1MBYTE, GnbLibGetHeader (Gfx));
    LibAmdMemFill (PatchYBuffer, 0, 2 * LENGTH_1MBYTE, GnbLibGetHeader (Gfx));
    LibAmdWriteBackInvalidateCache ();

    // Confirm read of SMC_DRAM_ACCESS_CNTL is 0x1
    D0F0xBC_x800000A4 = 0;
    for (LoopCount = 0; LoopCount < 0x00FFFFFF; LoopCount++) {
      GnbRegisterReadKB (GnbHandle, 0x4, 0x800000A4, &D0F0xBC_x800000A4, 0, GnbLibGetHeader (Gfx));
      if ((D0F0xBC_x800000A4 & BIT0) != 0)  {
        break;
      }
    }
    ASSERT ((D0F0xBC_x800000A4 & BIT0) != 0);
  }
Пример #8
0
/**
 *
 *
 *      This function initialize needed data structures for S3 resume.
 *
 *      @param[in, out]  **S3NBPtr - Pointer to the pointer of northbridge block.
 *      @param[in, out]  *MemPtr - Pointer to MEM_DATA_STRUCT.
 *      @param[in, out]  *mmData - Pointer to MEM_MAIN_DATA_BLOCK.
 *      @param[in]       *StdHeader - Config handle for library and services.
 *
 *      @return          AGESA_STATUS
 *                          - AGESA_ALERT
 *                          - AGESA_FATAL
 *                          - AGESA_SUCCESS
 *                          - AGESA_WARNING
 */
AGESA_STATUS
MemS3InitNB (
  IN OUT   S3_MEM_NB_BLOCK **S3NBPtr,
  IN OUT   MEM_DATA_STRUCT **MemPtr,
  IN OUT   MEM_MAIN_DATA_BLOCK *mmData,
  IN       AMD_CONFIG_PARAMS *StdHeader
  )
{
  UINT8 i;
  AGESA_STATUS RetVal;
  LOCATE_HEAP_PTR LocHeap;
  MEM_NB_BLOCK *NBPtr;
  ALLOCATE_HEAP_PARAMS AllocHeapParams;
  UINT8 Die;
  UINT8 DieCount;
  BOOLEAN SkipScan;

  SkipScan = FALSE;
  LocHeap.BufferHandle = AMD_MEM_DATA_HANDLE;
  if (HeapLocateBuffer (&LocHeap, StdHeader) == AGESA_SUCCESS) {
    // NB block has already been constructed by main block.
    // No need to construct it here.
    *MemPtr = (MEM_DATA_STRUCT *)LocHeap.BufferPtr;
    SkipScan = TRUE;
  } else {
    AllocHeapParams.RequestedBufferSize = sizeof (MEM_DATA_STRUCT);
    AllocHeapParams.BufferHandle = AMD_MEM_DATA_HANDLE;
    AllocHeapParams.Persist = HEAP_SYSTEM_MEM;
    if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) != AGESA_SUCCESS) {
      ASSERT(FALSE); // Allocate failed for MEM_DATA_STRUCT
      return AGESA_FATAL;
    }
    *MemPtr = (MEM_DATA_STRUCT *)AllocHeapParams.BufferPtr;
  }
  LibAmdMemCopy (&(*MemPtr)->StdHeader, StdHeader, sizeof (AMD_CONFIG_PARAMS), StdHeader);
  mmData->MemPtr = *MemPtr;

  if (!SkipScan) {
    RetVal = MemSocketScan (mmData);
    if (RetVal == AGESA_FATAL) {
      return RetVal;
    }
  } else {
    // We already have initialize data block, no need to do it again.
    mmData->DieCount = mmData->MemPtr->DieCount;
  }
  DieCount = mmData->DieCount;

  //---------------------------------------------
  //  Creation of NB Block for S3 resume
  //---------------------------------------------
  // Search for AMD_MEM_AUTO_HANDLE on the heap first.
  // Only apply for space on the heap if cannot find AMD_MEM_AUTO_HANDLE on the heap.
  LocHeap.BufferHandle = AMD_MEM_S3_NB_HANDLE;
  if (HeapLocateBuffer (&LocHeap, StdHeader) == AGESA_SUCCESS) {
    // NB block has already been constructed by main block.
    // No need to construct it here.
    *S3NBPtr = (S3_MEM_NB_BLOCK *)LocHeap.BufferPtr;
  } else {
    AllocHeapParams.RequestedBufferSize = (DieCount * (sizeof (S3_MEM_NB_BLOCK)));
    AllocHeapParams.BufferHandle = AMD_MEM_S3_NB_HANDLE;
    AllocHeapParams.Persist = HEAP_SYSTEM_MEM;
    if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) != AGESA_SUCCESS) {
      ASSERT(FALSE); // Could not allocate space for "S3_MEM_NB_BLOCK"
      return AGESA_FATAL;
    }
    *S3NBPtr = (S3_MEM_NB_BLOCK *)AllocHeapParams.BufferPtr;

    LocHeap.BufferHandle = AMD_MEM_AUTO_HANDLE;
    if (HeapLocateBuffer (&LocHeap, StdHeader) == AGESA_SUCCESS) {
      // NB block has already been constructed by main block.
      // No need to construct it here.
      NBPtr = (MEM_NB_BLOCK *)LocHeap.BufferPtr;
    } else {
      AllocHeapParams.RequestedBufferSize = (DieCount * (sizeof (MEM_NB_BLOCK)));
      AllocHeapParams.BufferHandle = AMD_MEM_AUTO_HANDLE;
      AllocHeapParams.Persist = HEAP_SYSTEM_MEM;
      if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) != AGESA_SUCCESS) {
        ASSERT(FALSE); // Allocate failed for "MEM_NB_BLOCK"
        return AGESA_FATAL;
      }
      NBPtr = (MEM_NB_BLOCK *)AllocHeapParams.BufferPtr;
    }
    // Construct each die.
    for (Die = 0; Die < DieCount; Die ++) {
      i = 0;
      ((*S3NBPtr)[Die]).NBPtr = &NBPtr[Die];
      while (memNBInstalled[i].MemS3ResumeConstructNBBlock != 0) {
        if (memNBInstalled[i].MemS3ResumeConstructNBBlock ((VOID *)&((*S3NBPtr)[Die]), *MemPtr, Die)) {
          break;
        }
        i++;
      };
      if (memNBInstalled[i].MemS3ResumeConstructNBBlock == 0) {
        ASSERT(FALSE); // S3 resume NB constructor not found
        return AGESA_FATAL;
      }
    }
  }
  return AGESA_SUCCESS;
}
Пример #9
0
/*---------------------------------------------------------------------------------------*/
VOID
OemCustomizeInitEarly (
	IN	OUT AMD_EARLY_PARAMS	*InitEarly
	)
{
	AGESA_STATUS		 Status;
	VOID				 *BrazosPcieComplexListPtr;
	VOID				 *BrazosPciePortPtr;
	VOID				 *BrazosPcieDdiPtr;

	ALLOCATE_HEAP_PARAMS AllocHeapParams;

PCIe_PORT_DESCRIPTOR PortList [] = {
		// Initialize Port descriptor (PCIe port, Lanes 4, PCI Device Number 4, ...)
		{
			0, //Descriptor flags	!!!IMPORTANT!!! Terminate last element of array
			PCIE_ENGINE_DATA_INITIALIZER (PciePortEngine, 4, 4),
			PCIE_PORT_DATA_INITIALIZER (GNB_GPP_PORT4_PORT_PRESENT, GNB_GPP_PORT4_CHANNEL_TYPE, 4, GNB_GPP_PORT4_HOTPLUG_SUPPORT, GNB_GPP_PORT4_SPEED_MODE, GNB_GPP_PORT4_SPEED_MODE, GNB_GPP_PORT4_LINK_ASPM, 46)
		},
		// Initialize Port descriptor (PCIe port, Lanes 5, PCI Device Number 5, ...)
		{
			0, //Descriptor flags	!!!IMPORTANT!!! Terminate last element of array
			PCIE_ENGINE_DATA_INITIALIZER (PciePortEngine, 5, 5),
			PCIE_PORT_DATA_INITIALIZER (GNB_GPP_PORT5_PORT_PRESENT, GNB_GPP_PORT5_CHANNEL_TYPE, 5, GNB_GPP_PORT5_HOTPLUG_SUPPORT, GNB_GPP_PORT5_SPEED_MODE, GNB_GPP_PORT5_SPEED_MODE, GNB_GPP_PORT5_LINK_ASPM, 46)
		},
		// Initialize Port descriptor (PCIe port, Lanes 6, PCI Device Number 6, ...)
		{
			0, //Descriptor flags	!!!IMPORTANT!!! Terminate last element of array
			PCIE_ENGINE_DATA_INITIALIZER (PciePortEngine, 6, 6),
			PCIE_PORT_DATA_INITIALIZER (GNB_GPP_PORT6_PORT_PRESENT, GNB_GPP_PORT6_CHANNEL_TYPE, 6, GNB_GPP_PORT6_HOTPLUG_SUPPORT, GNB_GPP_PORT6_SPEED_MODE, GNB_GPP_PORT6_SPEED_MODE, GNB_GPP_PORT6_LINK_ASPM, 46)
		},
		// Initialize Port descriptor (PCIe port, Lanes 7, PCI Device Number 7, ...)
		{
			0,
			PCIE_ENGINE_DATA_INITIALIZER (PciePortEngine, 7, 7),
			PCIE_PORT_DATA_INITIALIZER (GNB_GPP_PORT7_PORT_PRESENT, GNB_GPP_PORT7_CHANNEL_TYPE, 7, GNB_GPP_PORT7_HOTPLUG_SUPPORT, GNB_GPP_PORT7_SPEED_MODE, GNB_GPP_PORT7_SPEED_MODE, GNB_GPP_PORT7_LINK_ASPM, 0)
		},
		// Initialize Port descriptor (PCIe port, Lanes 8, PCI Device Number 8, ...)
		{
			DESCRIPTOR_TERMINATE_LIST, //Descriptor flags	!!!IMPORTANT!!! Terminate last element of array
			PCIE_ENGINE_DATA_INITIALIZER (PciePortEngine, 0, 3),
			PCIE_PORT_DATA_INITIALIZER (GNB_GPP_PORT8_PORT_PRESENT, GNB_GPP_PORT8_CHANNEL_TYPE, 8, GNB_GPP_PORT8_HOTPLUG_SUPPORT, GNB_GPP_PORT8_SPEED_MODE, GNB_GPP_PORT8_SPEED_MODE, GNB_GPP_PORT8_LINK_ASPM, 0)
		}
};

PCIe_DDI_DESCRIPTOR DdiList [] = {
		// Initialize Ddi descriptor (DDI interface Lanes 8:11, DdA, ...)
		{
			0,	 //Descriptor flags
			PCIE_ENGINE_DATA_INITIALIZER (PcieDdiEngine, 8, 11),
			//PCIE_DDI_DATA_INITIALIZER (ConnectorTypeDP, Aux1, Hdp1)
			{ConnectorTypeLvds, Aux1, Hdp1}
		},
		// Initialize Ddi descriptor (DDI interface Lanes 12:15, DdB, ...)
		{
			DESCRIPTOR_TERMINATE_LIST, //Descriptor flags	!!!IMPORTANT!!! Terminate last element of array
			PCIE_ENGINE_DATA_INITIALIZER (PcieDdiEngine, 12, 15),
			//PCIE_DDI_DATA_INITIALIZER (ConnectorTypeDP, Aux2, Hdp2)
			{ConnectorTypeDP, Aux2, Hdp2}
		}
};

PCIe_COMPLEX_DESCRIPTOR Brazos = {
		DESCRIPTOR_TERMINATE_LIST,
		0,
		&PortList[0],
		&DdiList[0]
};

	// GNB PCIe topology Porting

	//
	// Allocate buffer for PCIe_COMPLEX_DESCRIPTOR , PCIe_PORT_DESCRIPTOR and PCIe_DDI_DESCRIPTOR
	//
	AllocHeapParams.RequestedBufferSize = (sizeof (PCIe_COMPLEX_DESCRIPTOR)	+
										 sizeof (PCIe_PORT_DESCRIPTOR) * 5 +
										 sizeof (PCIe_DDI_DESCRIPTOR)) * 2;

	AllocHeapParams.BufferHandle = AMD_MEM_MISC_HANDLES_START;
	AllocHeapParams.Persist = HEAP_LOCAL_CACHE;
	Status = HeapAllocateBuffer (&AllocHeapParams, &InitEarly->StdHeader);
	if ( Status!= AGESA_SUCCESS) {
	// Could not allocate buffer for PCIe_COMPLEX_DESCRIPTOR , PCIe_PORT_DESCRIPTOR and PCIe_DDI_DESCRIPTOR
	ASSERT(FALSE);
	return;
	}

	BrazosPcieComplexListPtr	=	(PCIe_COMPLEX_DESCRIPTOR *) AllocHeapParams.BufferPtr;

	AllocHeapParams.BufferPtr += sizeof (PCIe_COMPLEX_DESCRIPTOR);
	BrazosPciePortPtr		 =	(PCIe_PORT_DESCRIPTOR *)AllocHeapParams.BufferPtr;

	AllocHeapParams.BufferPtr += sizeof (PCIe_PORT_DESCRIPTOR) * 5;
	BrazosPcieDdiPtr			=	(PCIe_DDI_DESCRIPTOR *) AllocHeapParams.BufferPtr;

	LibAmdMemFill (BrazosPcieComplexListPtr,
					 0,
					 sizeof (PCIe_COMPLEX_DESCRIPTOR),
					 &InitEarly->StdHeader);

	LibAmdMemFill (BrazosPciePortPtr,
					 0,
					 sizeof (PCIe_PORT_DESCRIPTOR) * 5,
					 &InitEarly->StdHeader);

	LibAmdMemFill (BrazosPcieDdiPtr,
					 0,
					 sizeof (PCIe_DDI_DESCRIPTOR) * 2,
					 &InitEarly->StdHeader);

	LibAmdMemCopy	(BrazosPcieComplexListPtr, &Brazos, sizeof (PCIe_COMPLEX_DESCRIPTOR), &InitEarly->StdHeader);
	LibAmdMemCopy	(BrazosPciePortPtr, &PortList[0], sizeof (PCIe_PORT_DESCRIPTOR) * 5, &InitEarly->StdHeader);
	LibAmdMemCopy	(BrazosPcieDdiPtr, &DdiList[0], sizeof (PCIe_DDI_DESCRIPTOR) *2, &InitEarly->StdHeader);


	((PCIe_COMPLEX_DESCRIPTOR*)BrazosPcieComplexListPtr)->PciePortList =	(PCIe_PORT_DESCRIPTOR*)BrazosPciePortPtr;
	((PCIe_COMPLEX_DESCRIPTOR*)BrazosPcieComplexListPtr)->DdiLinkList	=	(PCIe_DDI_DESCRIPTOR*)BrazosPcieDdiPtr;

	InitEarly->GnbConfig.PcieComplexList = BrazosPcieComplexListPtr;
	InitEarly->GnbConfig.PsppPolicy		= 0;
}
Пример #10
0
VOID
PcieTopologyApplyLaneMux (
  IN      PCIe_WRAPPER_CONFIG   *Wrapper,
  IN      PCIe_PLATFORM_CONFIG  *Pcie
  )
{
  PCIe_ENGINE_CONFIG  *EngineList;
  UINT8               CurrentPhyLane;
  UINT8               CurrentCoreLane;
  UINT8               CoreLaneIndex;
  UINT8               PhyLaneIndex;
  UINT8               NumberOfPhyLane;
  UINT8               TxLaneMuxSelectorArray [sizeof (LaneMuxSelectorTable)];
  UINT8               RxLaneMuxSelectorArray [sizeof (LaneMuxSelectorTable)];
  UINT8               Index;
  UINT32              TxMaxSelectorValue;
  UINT32              RxMaxSelectorValue;

  IDS_HDT_CONSOLE (GNB_TRACE, "PcieTopologyApplyLaneMux Enter\n");
  if (PcieLibIsPcieWrapper (Wrapper)) {
    EngineList = PcieConfigGetChildEngine (Wrapper);
    LibAmdMemCopy (
      &TxLaneMuxSelectorArray[0],
      &LaneMuxSelectorTable[0],
      sizeof (LaneMuxSelectorTable),
      GnbLibGetHeader (Pcie)
      );
    LibAmdMemCopy (
      &RxLaneMuxSelectorArray[0],
      &LaneMuxSelectorTable[0],
      sizeof (LaneMuxSelectorTable),
      GnbLibGetHeader (Pcie)
      );
    while (EngineList != NULL) {
      if (PcieLibIsPcieEngine (EngineList) && PcieLibIsEngineAllocated (EngineList)) {
        CurrentPhyLane = (UINT8) PcieLibGetLoPhyLane (EngineList) - Wrapper->StartPhyLane;
        NumberOfPhyLane = (UINT8) PcieConfigGetNumberOfPhyLane (EngineList);
        CurrentCoreLane = (UINT8) EngineList->Type.Port.StartCoreLane;
        if (PcieUtilIsLinkReversed (FALSE, EngineList, Pcie)) {
          CurrentCoreLane = CurrentCoreLane + PcieConfigGetNumberOfCoreLane (EngineList) - NumberOfPhyLane;
        }
        for (Index = 0; Index < NumberOfPhyLane; Index = Index + 2 ) {
          CoreLaneIndex = (CurrentCoreLane + Index) / 2;
          PhyLaneIndex = (CurrentPhyLane + Index) / 2;

          if (RxLaneMuxSelectorArray [CoreLaneIndex] != PhyLaneIndex) {
            RxLaneMuxSelectorArray [PcieTopologyLocateMuxIndex (RxLaneMuxSelectorArray, PhyLaneIndex)] = RxLaneMuxSelectorArray [CoreLaneIndex];
            RxLaneMuxSelectorArray [CoreLaneIndex] = PhyLaneIndex;
          }
          if (TxLaneMuxSelectorArray [PhyLaneIndex] != CoreLaneIndex) {
            TxLaneMuxSelectorArray [PcieTopologyLocateMuxIndex (TxLaneMuxSelectorArray, CoreLaneIndex)] =  TxLaneMuxSelectorArray [PhyLaneIndex];
            TxLaneMuxSelectorArray [PhyLaneIndex] = CoreLaneIndex;
          }
        }
      }
      EngineList = PcieLibGetNextDescriptor (EngineList);
    }
    RxMaxSelectorValue = 0;
    TxMaxSelectorValue = 0;
    for (Index = 0; Index < sizeof (LaneMuxSelectorTable); Index++) {
      RxMaxSelectorValue |= (RxLaneMuxSelectorArray[Index] << (Index * 4));
      TxMaxSelectorValue |= (TxLaneMuxSelectorArray[Index] << (Index * 4));
    }
    PcieRegisterWrite (
      Wrapper,
      WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8021_ADDRESS),
      TxMaxSelectorValue,
      FALSE,
      Pcie
      );
    PcieRegisterWrite (
      Wrapper,
      WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8022_ADDRESS),
      RxMaxSelectorValue,
      FALSE,
      Pcie
      );
  }
  IDS_HDT_CONSOLE (GNB_TRACE, "PcieTopologyApplyLaneMux Exit\n");
}
Пример #11
0
/**
 *
 *
 *      This function is the entrance to get device list for memory registers.
 *
 *      @param[in, out]  **DeviceBlockHdrPtr - Pointer to the memory containing the
 *                                             device descriptor list
 *      @param[in]       *StdHeader - Config handle for library and services
 *      @return          AGESA_STATUS
 *                          - AGESA_ALERT
 *                          - AGESA_FATAL
 *                          - AGESA_SUCCESS
 *                          - AGESA_WARNING
 */
AGESA_STATUS
MemFS3GetDeviceList (
  IN OUT   DEVICE_BLOCK_HEADER **DeviceBlockHdrPtr,
  IN       AMD_CONFIG_PARAMS *StdHeader
  )
{
  UINT8 i;
  UINT16 BufferSize;
  UINT64 BufferOffset;
  S3_MEM_NB_BLOCK *S3NBPtr;
  MEM_DATA_STRUCT *MemData;
  MEM_MAIN_DATA_BLOCK mmData;
  UINT8 Die;
  UINT8 DieCount;
  AGESA_STATUS RetVal;
  ALLOCATE_HEAP_PARAMS AllocHeapParams;
  DESCRIPTOR_GROUP DeviceDescript[MAX_NODES_SUPPORTED];
  BufferSize = 0;

  //---------------------------------------------
  //  Creation of NB Block for S3 resume
  //---------------------------------------------
  RetVal = MemS3InitNB (&S3NBPtr, &MemData, &mmData, StdHeader);
  if (RetVal == AGESA_FATAL) {
    return RetVal;
  }
  DieCount = mmData.DieCount;

  // Get the mask bit and the register list for node that presents
  for (Die = 0; Die < DieCount; Die ++) {
    S3NBPtr->MemS3GetConPCIMask (S3NBPtr[Die].NBPtr, (VOID *)&DeviceDescript[Die]);
    S3NBPtr->MemS3GetConMSRMask (S3NBPtr[Die].NBPtr, (VOID *)&DeviceDescript[Die]);
    BufferSize = BufferSize + S3NBPtr->MemS3GetRegLstPtr (S3NBPtr[Die].NBPtr, (VOID *)&DeviceDescript[Die]);
  }

  // Base on the size of the device list, apply for a buffer for it.
  AllocHeapParams.RequestedBufferSize = BufferSize + sizeof (DEVICE_BLOCK_HEADER);
  AllocHeapParams.BufferHandle = AMD_S3_NB_INFO_BUFFER_HANDLE;
  AGESA_TESTPOINT (TpIfBeforeAllocateMemoryS3SaveBuffer, StdHeader);
  if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) != AGESA_SUCCESS) {
    return AGESA_FATAL;
  }
  AGESA_TESTPOINT (TpIfAfterAllocateMemoryS3SaveBuffer, StdHeader);

  *DeviceBlockHdrPtr = (DEVICE_BLOCK_HEADER *) AllocHeapParams.BufferPtr;
  (*DeviceBlockHdrPtr)->RelativeOrMaskOffset = (UINT16) AllocHeapParams.RequestedBufferSize;

  // Copy device list on the stack to the heap.
  BufferOffset = sizeof (DEVICE_BLOCK_HEADER) + (UINTN) AllocHeapParams.BufferPtr;
  for (Die = 0; Die < DieCount; Die ++) {
    for (i = PRESELFREF; i <= POSTSELFREF; i ++) {
      // Copy PCI device descriptor to the heap if it exists.
      if (DeviceDescript[Die].PCIDevice[i].RegisterListID != 0xFFFFFFFF) {
        LibAmdMemCopy ((VOID *)(UINTN) BufferOffset, &(DeviceDescript[Die].PCIDevice[i]), sizeof (PCI_DEVICE_DESCRIPTOR), StdHeader);
        (*DeviceBlockHdrPtr)->NumDevices ++;
        BufferOffset += sizeof (PCI_DEVICE_DESCRIPTOR);
      }
      // Copy conditional PCI device descriptor to the heap if it exists.
      if (DeviceDescript[Die].CPCIDevice[i].RegisterListID != 0xFFFFFFFF) {
        LibAmdMemCopy ((VOID *)(UINTN) BufferOffset, &(DeviceDescript[Die].CPCIDevice[i]), sizeof (CONDITIONAL_PCI_DEVICE_DESCRIPTOR), StdHeader);
        (*DeviceBlockHdrPtr)->NumDevices ++;
        BufferOffset += sizeof (CONDITIONAL_PCI_DEVICE_DESCRIPTOR);
      }
      // Copy MSR device descriptor to the heap if it exists.
      if (DeviceDescript[Die].MSRDevice[i].RegisterListID != 0xFFFFFFFF) {
        LibAmdMemCopy ((VOID *)(UINTN) BufferOffset, &(DeviceDescript[Die].MSRDevice[i]), sizeof (MSR_DEVICE_DESCRIPTOR), StdHeader);
        (*DeviceBlockHdrPtr)->NumDevices ++;
        BufferOffset += sizeof (MSR_DEVICE_DESCRIPTOR);
      }
      // Copy conditional MSR device descriptor to the heap if it exists.
      if (DeviceDescript[Die].CMSRDevice[i].RegisterListID != 0xFFFFFFFF) {
        LibAmdMemCopy ((VOID *)(UINTN) BufferOffset, &(DeviceDescript[Die].PCIDevice[i]), sizeof (CONDITIONAL_MSR_DEVICE_DESCRIPTOR), StdHeader);
        (*DeviceBlockHdrPtr)->NumDevices ++;
        BufferOffset += sizeof (CONDITIONAL_MSR_DEVICE_DESCRIPTOR);
      }
    }
  }

  return RetVal;
}
Пример #12
0
VOID
STATIC
MemRecSPDDataProcess (
  IN OUT   MEM_DATA_STRUCT *MemPtr
  )
{
  BOOLEAN FindSocketWithMem;
  UINT8 Channel;
  UINT8 Dimm;
  UINT8 MaxSockets;
  UINT8 *SocketWithMem;
  UINT8 Socket;
  AGESA_STATUS AgesaStatus;
  SPD_DEF_STRUCT *DimmSPDPtr;
  ALLOCATE_HEAP_PARAMS AllocHeapParams;
  AGESA_READ_SPD_PARAMS SpdParam;
  ASSERT (MemPtr != NULL);
  FindSocketWithMem = FALSE;
  //
  // Allocate heap to save socket number with memory on it.
  //
  AllocHeapParams.RequestedBufferSize = sizeof (UINT8);
  AllocHeapParams.BufferHandle = AMD_REC_MEM_SOCKET_HANDLE;
  AllocHeapParams.Persist = HEAP_LOCAL_CACHE;
  if (HeapAllocateBuffer (&AllocHeapParams, &MemPtr->StdHeader) == AGESA_SUCCESS) {
    SocketWithMem = (UINT8 *) AllocHeapParams.BufferPtr;
    *SocketWithMem = 0;

    //
    // Allocate heap for the table
    //
    MaxSockets = (UINT8) GetPlatformNumberOfSockets ();

    AllocHeapParams.RequestedBufferSize = (MaxSockets * MAX_CHANNELS_PER_SOCKET * MAX_DIMMS_PER_CHANNEL * sizeof (SPD_DEF_STRUCT));
    AllocHeapParams.BufferHandle = AMD_MEM_SPD_HANDLE;
    AllocHeapParams.Persist = HEAP_LOCAL_CACHE;
    if (HeapAllocateBuffer (&AllocHeapParams, &MemPtr->StdHeader) == AGESA_SUCCESS) {
      MemPtr->SpdDataStructure = (SPD_DEF_STRUCT *) AllocHeapParams.BufferPtr;
      //
      // Initialize SpdParam Structure
      //
      LibAmdMemCopy ((VOID *)&SpdParam, (VOID *)MemPtr, (UINTN)sizeof (SpdParam.StdHeader), &MemPtr->StdHeader);
      //
      // Populate SPDDataBuffer
      //

      SpdParam.MemData = MemPtr;
      for (Socket = 0; Socket < MaxSockets; Socket ++) {
        SpdParam.SocketId = Socket;
        for (Channel = 0; Channel < MAX_CHANNELS_PER_SOCKET; Channel++) {
          SpdParam.MemChannelId = Channel;
          for (Dimm = 0; Dimm < MAX_DIMMS_PER_CHANNEL; Dimm++) {
            SpdParam.DimmId = Dimm;
            DimmSPDPtr = &(MemPtr->SpdDataStructure[(Socket * MAX_CHANNELS_PER_SOCKET + Channel) * MAX_DIMMS_PER_CHANNEL + Dimm]);
            SpdParam.Buffer = DimmSPDPtr->Data;
            AgesaStatus = AgesaReadSpdRecovery (0, &SpdParam);
            if (AgesaStatus == AGESA_SUCCESS) {
              DimmSPDPtr->DimmPresent = TRUE;
              IDS_HDT_CONSOLE (MEM_FLOW, "SPD Socket %d Channel %d Dimm %d: %08x\n", Socket, Channel, Dimm, SpdParam.Buffer);
              if (!FindSocketWithMem) {
                FindSocketWithMem = TRUE;
              }
            } else {
              DimmSPDPtr->DimmPresent = FALSE;
            }
          }
        }
        if (FindSocketWithMem) {
          *SocketWithMem = Socket;
          break;
        }
      }
    }
  }
}
Пример #13
0
/**
 *
 *  CopyHeapToTempRamAtPost
 *
 *     This function copies BSP heap content to RAM
 *
 *    @param[in,out]   StdHeader   - Pointer to AMD_CONFIG_PARAMS struct.
 *
 *    @retval          AGESA_STATUS
 *
 */
AGESA_STATUS
CopyHeapToTempRamAtPost (
  IN OUT   AMD_CONFIG_PARAMS *StdHeader
  )
{
  EFI_PEI_SERVICES    **PeiServices;
  EFI_STATUS          Status;
  UINT32              SizeOfHeapNode;
  UINT32              HeapBaseInCache;
  UINT8               *HeapBaseInTempMem;
  UINT8               *Source;
  UINT8               *Destination;
  UINT8               AlignTo16ByteInCache;
  UINT32              SizeOfNodeData;
  UINT32              TotalSize;
  UINT32              HeapInCacheOffset;
  HEAP_MANAGER        *HeapManagerInCache;
  BUFFER_NODE         *HeapInCache;
  HEAP_MANAGER        *HeapManagerInTempMem;
  BUFFER_NODE         *HeapInTempMem;
  BUFFER_NODE         *EndNodeInTempMem;

  //
  // Only BSP is expected to run this routine
  //
  if (IsBsp (StdHeader)) {
    if (StdHeader->HeapStatus != HEAP_LOCAL_CACHE) {
      //
      // System memory range is out of PEI, we don't have to copy any thing into HOB.
      //
      return AGESA_UNSUPPORTED;
    }

    PeiServices = (EFI_PEI_SERVICES **) StdHeader->ImageBasePtr;

    Status = EFI_SUCCESS;
    HeapInCache = NULL;
    HeapManagerInCache = NULL;
    SizeOfHeapNode = 0;

    TotalSize = sizeof (HEAP_MANAGER);
    SizeOfNodeData = 0;

    // Heap in cache variables
    HeapBaseInCache = (UINT32) StdHeader->HeapBasePtr;
    HeapManagerInCache = (HEAP_MANAGER *) HeapBaseInCache;
    HeapInCacheOffset = HeapManagerInCache->FirstActiveBufferOffset;
    HeapInCache = (BUFFER_NODE*) (HeapBaseInCache + HeapInCacheOffset);
    // Heap in temp buffer variables
    HeapBaseInTempMem = (UINT8 *) GLOBAL_CPU_UEFI_HOB_LIST_TEMP_ADDR;
    HeapManagerInTempMem = (HEAP_MANAGER *) HeapBaseInTempMem;
    HeapInTempMem = (BUFFER_NODE *) (HeapBaseInTempMem + TotalSize);
    EndNodeInTempMem = HeapInTempMem;

    //
    // Calculate the whole size of heap that need to transferred.
    //
    while (HeapInCacheOffset != AMD_HEAP_INVALID_HEAP_OFFSET) {
      //
      // Copy nodes that will persist in system memory
      //
      if (HeapInCache->Persist > HEAP_LOCAL_CACHE) {
        AlignTo16ByteInCache = HeapInCache->PadSize;

        SizeOfNodeData = HeapInCache->BufferSize - AlignTo16ByteInCache;
        SizeOfHeapNode = SizeOfNodeData + sizeof (BUFFER_NODE);

        TotalSize += SizeOfHeapNode;
        //
        // Copy heap in cache with 16-byte alignment to temp buffer compactly.
        //
        Source = (UINT8 *) HeapInCache + sizeof (BUFFER_NODE) + AlignTo16ByteInCache;
        Destination = (UINT8 *) HeapInTempMem + sizeof (BUFFER_NODE);
        LibAmdMemCopy  (HeapInTempMem, HeapInCache, sizeof (BUFFER_NODE), StdHeader);
        LibAmdMemCopy  (Destination, Source, SizeOfNodeData, StdHeader);
        //
        // Fix buffer size in temp memory which has no 16-byte alignment.
        // Next node offset is the total size used.
        //
        HeapInTempMem->BufferSize = SizeOfNodeData;
        HeapInTempMem->OffsetOfNextNode = TotalSize;
        HeapInTempMem->PadSize = 0;
        EndNodeInTempMem = HeapInTempMem;
        HeapInTempMem = (BUFFER_NODE *) (HeapBaseInTempMem + TotalSize);
      }
      //
      // Point to the next buffer node
      //
      HeapInCacheOffset = HeapInCache->OffsetOfNextNode;
      HeapInCache = (BUFFER_NODE *) (HeapBaseInCache + HeapInCacheOffset);
    }

    //
    // Finalize new heap manager in temp memory
    // No free space node is reserved for temp memory.
    //
    HeapManagerInTempMem->FirstFreeSpaceOffset = AMD_HEAP_INVALID_HEAP_OFFSET;
    if (TotalSize == sizeof (HEAP_MANAGER)) {
      HeapManagerInTempMem->UsedSize = sizeof (HEAP_MANAGER);
      HeapManagerInTempMem->FirstActiveBufferOffset = AMD_HEAP_INVALID_HEAP_OFFSET;
    } else {
      HeapManagerInTempMem->UsedSize = TotalSize;
      HeapManagerInTempMem->FirstActiveBufferOffset = sizeof (HEAP_MANAGER);
      //
      // Last Node has no next node.
      //
      EndNodeInTempMem->OffsetOfNextNode = AMD_HEAP_INVALID_HEAP_OFFSET;
    }
    HeapManagerInCache->Signature = HEAP_SIGNATURE_INVALID;
    HeapManagerInTempMem->Signature = HEAP_SIGNATURE_VALID;

    //
    // Start to copy data into UEFI HOB.
    //
    if (BuildGuidDataHob (&gAmdHeapHobGuid, HeapBaseInTempMem, (UINTN) TotalSize) == NULL) {
      return AGESA_ERROR;
    }
    //
    // Update StdHeader with new heap status and base address
    //
    StdHeader->HeapStatus = HEAP_TEMP_MEM;
    StdHeader->HeapBasePtr = HeapGetBaseAddressInTempMem (StdHeader);
  }
  return AGESA_SUCCESS;
}
Пример #14
0
/*
 * Hardware Monitor Fan Control
 * Hardware limitation:
 *  HWM will fail to read the input temperature via I2C if other
 *  software switches the I2C address.  AMD recommends using IMC
 *  to control fans, instead of HWM.
 */
static void oem_fan_control(FCH_DATA_BLOCK *FchParams)
{
	FCH_HWM_FAN_CTR oem_factl[5] = {
		/*temperature input, fan mode, frequency, low_duty, med_duty, multiplier, lowtemp, medtemp, hightemp, LinearRange, LinearHoldCount */
		/* DB-FT3 FanOUT0 Fan header J32 */
		{FAN_INPUT_INTERNAL_DIODE, (FAN_STEPMODE | FAN_POLARITY_HIGH), FREQ_100HZ, 40, 60,  0, 40, 65, 85, 0, 0},
		/* DB-FT3 FanOUT1 Fan header J31*/
		{FAN_INPUT_INTERNAL_DIODE, (FAN_STEPMODE | FAN_POLARITY_HIGH), FREQ_100HZ, 40, 60,  0, 40, 65, 85, 0, 0},
		{FAN_INPUT_INTERNAL_DIODE, (FAN_STEPMODE | FAN_POLARITY_HIGH), FREQ_100HZ, 40, 60,  0, 40, 65, 85, 0, 0},
		{FAN_INPUT_INTERNAL_DIODE, (FAN_STEPMODE | FAN_POLARITY_HIGH), FREQ_100HZ, 40, 60,  0, 40, 65, 85, 0, 0},
		{FAN_INPUT_INTERNAL_DIODE, (FAN_STEPMODE | FAN_POLARITY_HIGH), FREQ_100HZ, 40, 60,  0, 40, 65, 85, 0, 0},
	};
	LibAmdMemCopy ((VOID *)(FchParams->Hwm.HwmFanControl), &oem_factl, (sizeof (FCH_HWM_FAN_CTR) * 5), FchParams->StdHeader);

	/* Enable IMC fan control. the recommended way */
#if IS_ENABLED(CONFIG_HUDSON_IMC_FWM)

	/* HwMonitorEnable = TRUE &&  HwmFchtsiAutoOpll ==FALSE to call FchECfancontrolservice */
	FchParams->Hwm.HwMonitorEnable = TRUE;
	FchParams->Hwm.HwmFchtsiAutoPoll = FALSE;               /* 0 disable, 1 enable TSI Auto Polling */

	FchParams->Imc.ImcEnable = TRUE;
	FchParams->Hwm.HwmControl = 1;                          /* 1 IMC, 0 HWM */
	FchParams->Imc.ImcEnableOverWrite = 1;                  /* 2 disable IMC , 1 enable IMC, 0 following hw strap setting */

	LibAmdMemFill(&(FchParams->Imc.EcStruct), 0, sizeof(FCH_EC), FchParams->StdHeader);

	/* Thermal Zone Parameter */
	FchParams->Imc.EcStruct.MsgFun81Zone0MsgReg0 = 0x00;
	FchParams->Imc.EcStruct.MsgFun81Zone0MsgReg1 = 0x00;    /* Zone */
	FchParams->Imc.EcStruct.MsgFun81Zone0MsgReg2 = 0x3d;    //BIT0 | BIT2 | BIT5;
	FchParams->Imc.EcStruct.MsgFun81Zone0MsgReg3 = 0x4e;    //6 | BIT3;
	FchParams->Imc.EcStruct.MsgFun81Zone0MsgReg4 = 0x00;
	FchParams->Imc.EcStruct.MsgFun81Zone0MsgReg5 = 0x04;
	FchParams->Imc.EcStruct.MsgFun81Zone0MsgReg6 = 0x9a;    /* SMBUS Address for SMBUS based temperature sensor such as SB-TSI and ADM1032 */
	FchParams->Imc.EcStruct.MsgFun81Zone0MsgReg7 = 0x01;
	FchParams->Imc.EcStruct.MsgFun81Zone0MsgReg8 = 0x01;    /* PWM steping rate in unit of PWM level percentage */
	FchParams->Imc.EcStruct.MsgFun81Zone0MsgReg9 = 0x00;

	/* IMC Fan Policy temperature thresholds */
	FchParams->Imc.EcStruct.MsgFun83Zone0MsgReg0 = 0x00;
	FchParams->Imc.EcStruct.MsgFun83Zone0MsgReg1 = 0x00;    /* Zone */
	FchParams->Imc.EcStruct.MsgFun83Zone0MsgReg2 = 0x46;    /*AC0 threshold in Celsius */
	FchParams->Imc.EcStruct.MsgFun83Zone0MsgReg3 = 0x3c;    /*AC1 threshold in Celsius */
	FchParams->Imc.EcStruct.MsgFun83Zone0MsgReg4 = 0x32;    /*AC2 threshold in Celsius */
	FchParams->Imc.EcStruct.MsgFun83Zone0MsgReg5 = 0xff;    /*AC3 threshold in Celsius, 0xFF is not define */
	FchParams->Imc.EcStruct.MsgFun83Zone0MsgReg6 = 0xff;    /*AC4 threshold in Celsius, 0xFF is not define */
	FchParams->Imc.EcStruct.MsgFun83Zone0MsgReg7 = 0xff;    /*AC5 threshold in Celsius, 0xFF is not define */
	FchParams->Imc.EcStruct.MsgFun83Zone0MsgReg8 = 0xff;    /*AC6 threshold in Celsius, 0xFF is not define */
	FchParams->Imc.EcStruct.MsgFun83Zone0MsgReg9 = 0xff;    /*AC7 lowest threshold in Celsius, 0xFF is not define */
	FchParams->Imc.EcStruct.MsgFun83Zone0MsgRegA = 0x4b;    /*critical threshold* in Celsius, 0xFF is not define */
	FchParams->Imc.EcStruct.MsgFun83Zone0MsgRegB = 0x00;

	/* IMC Fan Policy PWM Settings */
	FchParams->Imc.EcStruct.MsgFun85Zone0MsgReg0 = 0x00;
	FchParams->Imc.EcStruct.MsgFun85Zone0MsgReg1 = 0x00;    /* Zone */
	FchParams->Imc.EcStruct.MsgFun85Zone0MsgReg2 = 0x5a;    /* AL0 percentage */
	FchParams->Imc.EcStruct.MsgFun85Zone0MsgReg3 = 0x46;    /* AL1 percentage */
	FchParams->Imc.EcStruct.MsgFun85Zone0MsgReg4 = 0x28;    /* AL2 percentage */
	FchParams->Imc.EcStruct.MsgFun85Zone0MsgReg5 = 0xff;    /* AL3 percentage */
	FchParams->Imc.EcStruct.MsgFun85Zone0MsgReg6 = 0xff;    /* AL4 percentage */
	FchParams->Imc.EcStruct.MsgFun85Zone0MsgReg7 = 0xff;    /* AL5 percentage */
	FchParams->Imc.EcStruct.MsgFun85Zone0MsgReg8 = 0xff;    /* AL6 percentage */
	FchParams->Imc.EcStruct.MsgFun85Zone0MsgReg9 = 0xff;    /* AL7 percentage */

	FchParams->Imc.EcStruct.MsgFun81Zone1MsgReg0 = 0x00;
	FchParams->Imc.EcStruct.MsgFun81Zone1MsgReg1 = 0x01;    /* Zone */
	FchParams->Imc.EcStruct.MsgFun81Zone1MsgReg2 = 0x55;    //BIT0 | BIT2 | BIT5;
	FchParams->Imc.EcStruct.MsgFun81Zone1MsgReg3 = 0x17;
	FchParams->Imc.EcStruct.MsgFun81Zone1MsgReg4 = 0x00;
	FchParams->Imc.EcStruct.MsgFun81Zone1MsgReg5 = 0x00;
	FchParams->Imc.EcStruct.MsgFun81Zone1MsgReg6 = 0x90;    /* SMBUS Address for SMBUS based temperature sensor such as SB-TSI and ADM1032 */
	FchParams->Imc.EcStruct.MsgFun81Zone1MsgReg7 = 0;
	FchParams->Imc.EcStruct.MsgFun81Zone1MsgReg8 = 0;       /* PWM steping rate in unit of PWM level percentage */
	FchParams->Imc.EcStruct.MsgFun81Zone1MsgReg9 = 0;

	FchParams->Imc.EcStruct.MsgFun83Zone1MsgReg0 = 0x00;
	FchParams->Imc.EcStruct.MsgFun83Zone1MsgReg1 = 0x01;    /* zone */
	FchParams->Imc.EcStruct.MsgFun83Zone1MsgReg2 = 60;      /*AC0 threshold in Celsius */
	FchParams->Imc.EcStruct.MsgFun83Zone1MsgReg3 = 40;      /*AC1 threshold in Celsius */
	FchParams->Imc.EcStruct.MsgFun83Zone1MsgReg4 = 0;       /*AC2 threshold in Celsius */
	FchParams->Imc.EcStruct.MsgFun83Zone1MsgReg5 = 0;       /*AC3 threshold in Celsius, 0xFF is not define */
	FchParams->Imc.EcStruct.MsgFun83Zone1MsgReg6 = 0;       /*AC4 threshold in Celsius, 0xFF is not define */
	FchParams->Imc.EcStruct.MsgFun83Zone1MsgReg7 = 0;       /*AC5 threshold in Celsius, 0xFF is not define */
	FchParams->Imc.EcStruct.MsgFun83Zone1MsgReg8 = 0;       /*AC6 threshold in Celsius, 0xFF is not define */
	FchParams->Imc.EcStruct.MsgFun83Zone1MsgReg9 = 0;       /*AC7 lowest threshold in Celsius, 0xFF is not define */
	FchParams->Imc.EcStruct.MsgFun83Zone1MsgRegA = 0;       /*critical threshold* in Celsius, 0xFF is not define */
	FchParams->Imc.EcStruct.MsgFun83Zone1MsgRegB = 0x00;

	FchParams->Imc.EcStruct.MsgFun85Zone1MsgReg0 = 0x00;
	FchParams->Imc.EcStruct.MsgFun85Zone1MsgReg1 = 0x01;    /*Zone */
	FchParams->Imc.EcStruct.MsgFun85Zone1MsgReg2 = 0;       /* AL0 percentage */
	FchParams->Imc.EcStruct.MsgFun85Zone1MsgReg3 = 0;       /* AL1 percentage */
	FchParams->Imc.EcStruct.MsgFun85Zone1MsgReg4 = 0;       /* AL2 percentage */
	FchParams->Imc.EcStruct.MsgFun85Zone1MsgReg5 = 0x00;    /* AL3 percentage */
	FchParams->Imc.EcStruct.MsgFun85Zone1MsgReg6 = 0x00;    /* AL4 percentage */
	FchParams->Imc.EcStruct.MsgFun85Zone1MsgReg7 = 0x00;    /* AL5 percentage */
	FchParams->Imc.EcStruct.MsgFun85Zone1MsgReg8 = 0x00;    /* AL6 percentage */
	FchParams->Imc.EcStruct.MsgFun85Zone1MsgReg9 = 0x00;    /* AL7 percentage */

	FchParams->Imc.EcStruct.MsgFun81Zone2MsgReg0 = 0x00;
	FchParams->Imc.EcStruct.MsgFun81Zone2MsgReg1 = 0x2;     /* Zone */
	FchParams->Imc.EcStruct.MsgFun81Zone2MsgReg2 = 0x0;     //BIT0 | BIT2 | BIT5;
	FchParams->Imc.EcStruct.MsgFun81Zone2MsgReg3 = 0x0;
	FchParams->Imc.EcStruct.MsgFun81Zone2MsgReg4 = 0x00;
	FchParams->Imc.EcStruct.MsgFun81Zone2MsgReg5 = 0x00;
	FchParams->Imc.EcStruct.MsgFun81Zone2MsgReg6 = 0x98;    /* SMBUS Address for SMBUS based temperature sensor such as SB-TSI and ADM1032 */
	FchParams->Imc.EcStruct.MsgFun81Zone2MsgReg7 = 2;
	FchParams->Imc.EcStruct.MsgFun81Zone2MsgReg8 = 5;       /* PWM steping rate in unit of PWM level percentage */
	FchParams->Imc.EcStruct.MsgFun81Zone2MsgReg9 = 0;

	FchParams->Imc.EcStruct.MsgFun81Zone3MsgReg0 = 0x00;
	FchParams->Imc.EcStruct.MsgFun81Zone3MsgReg1 = 0x3;     /* Zone */
	FchParams->Imc.EcStruct.MsgFun81Zone3MsgReg2 = 0x0;     //BIT0 | BIT2 | BIT5;
	FchParams->Imc.EcStruct.MsgFun81Zone3MsgReg3 = 0x0;
	FchParams->Imc.EcStruct.MsgFun81Zone3MsgReg4 = 0x00;
	FchParams->Imc.EcStruct.MsgFun81Zone3MsgReg5 = 0x00;
	FchParams->Imc.EcStruct.MsgFun81Zone3MsgReg6 = 0x0;     /* SMBUS Address for SMBUS based temperature sensor such as SB-TSI and ADM1032 */
	FchParams->Imc.EcStruct.MsgFun81Zone3MsgReg7 = 0;
	FchParams->Imc.EcStruct.MsgFun81Zone3MsgReg8 = 0;       /* PWM steping rate in unit of PWM level percentage */
	FchParams->Imc.EcStruct.MsgFun81Zone3MsgReg9 = 0;

	/* IMC Function */
	FchParams->Imc.EcStruct.IMCFUNSupportBitMap = 0x333;    //BIT0 | BIT4 |BIT8;

	/* NOTE:
	 * FchInitLateHwm will overwrite the EcStruct with EcDefaultMessage,
	 * AGESA put EcDefaultMessage as global data in ROM, so we can't override it.
	 * so we remove it from AGESA code. Please See FchInitLateHwm.
	 */

#else /* HWM fan control, using the alternative method */
	FchParams->Imc.ImcEnable = FALSE;
	FchParams->Hwm.HwMonitorEnable = TRUE;
	FchParams->Hwm.HwmFchtsiAutoPoll = TRUE;                /* 1 enable, 0 disable TSI Auto Polling */

#endif /* CONFIG_HUDSON_IMC_FWM */
}
Пример #15
0
/**
 *
 *
 *    This is the main function to perform parallel training on all nodes.
 *    This is the routine which will run on the remote AP.
 *
 *     @param[in,out]   *EnvPtr   - Pointer to the Training Environment Data
 *     @param[in,out]   *StdHeader   - Pointer to the Standard Header of the AP
 *
 *     @return          TRUE -  This feature is enabled.
 *     @return          FALSE - This feature is not enabled.
 */
BOOLEAN
MemFParallelTraining (
  IN OUT   REMOTE_TRAINING_ENV *EnvPtr,
  IN OUT   AMD_CONFIG_PARAMS *StdHeader
  )
{
  MEM_PARAMETER_STRUCT ParameterList;
  MEM_NB_BLOCK NB;
  MEM_TECH_BLOCK TB;
  ALLOCATE_HEAP_PARAMS AllocHeapParams;
  MEM_DATA_STRUCT *MemPtr;
  DIE_STRUCT *MCTPtr;
  UINT8 p;
  UINT8 i;
  UINT8 Dct;
  UINT8 Channel;
  UINT8 *BufferPtr;
  UINT8 DctCount;
  UINT8 ChannelCount;
  UINT8 RowCount;
  UINT8 ColumnCount;
  UINT16 SizeOfNewBuffer;
  AP_DATA_TRANSFER ReturnData;

  //
  // Initialize Parameters
  //
  ReturnData.DataPtr = NULL;
  ReturnData.DataSizeInDwords = 0;
  ReturnData.DataTransferFlags = 0;

  ASSERT (EnvPtr != NULL);
  //
  // Replace Standard header of a AP
  //
  LibAmdMemCopy (StdHeader, &(EnvPtr->StdHeader), sizeof (AMD_CONFIG_PARAMS), &(EnvPtr->StdHeader));


  //
  //  Allocate buffer for training data
  //
  BufferPtr = (UINT8 *) (&EnvPtr->DieStruct);
  DctCount = EnvPtr->DieStruct.DctCount;
  BufferPtr += sizeof (DIE_STRUCT);
  ChannelCount = ((DCT_STRUCT *) BufferPtr)->ChannelCount;
  BufferPtr += DctCount * sizeof (DCT_STRUCT);
  RowCount = ((CH_DEF_STRUCT *) BufferPtr)->RowCount;
  ColumnCount = ((CH_DEF_STRUCT *) BufferPtr)->ColumnCount;

  SizeOfNewBuffer = sizeof (DIE_STRUCT) +
                    DctCount * (
                      sizeof (DCT_STRUCT) + (
                        ChannelCount * (
                          sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK) + (
                            RowCount * ColumnCount * NUMBER_OF_DELAY_TABLES +
                            (MAX_BYTELANES_PER_CHANNEL * MAX_CS_PER_CHANNEL * NUMBER_OF_FAILURE_MASK_TABLES) +
                            (MAX_DIMMS_PER_CHANNEL * MAX_NUMBER_LANES)
                          )
                        )
                      )
                    );
  AllocHeapParams.RequestedBufferSize = SizeOfNewBuffer;
  AllocHeapParams.BufferHandle = GENERATE_MEM_HANDLE (ALLOC_PAR_TRN_HANDLE, 0, 0, 0);
  AllocHeapParams.Persist = HEAP_LOCAL_CACHE;
  if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) == AGESA_SUCCESS) {
    BufferPtr = AllocHeapParams.BufferPtr;
    LibAmdMemCopy ( BufferPtr,
                    &(EnvPtr->DieStruct),
                    sizeof (DIE_STRUCT) + DctCount * (sizeof (DCT_STRUCT) + ChannelCount * (sizeof (CH_DEF_STRUCT) + sizeof (MEM_PS_BLOCK))),
                    StdHeader
                  );

    //
    //  Fix up pointers
    //
    MCTPtr = (DIE_STRUCT *) BufferPtr;
    BufferPtr += sizeof (DIE_STRUCT);
    MCTPtr->DctData = (DCT_STRUCT *) BufferPtr;
    BufferPtr += MCTPtr->DctCount * sizeof (DCT_STRUCT);
    for (Dct = 0; Dct < MCTPtr->DctCount; Dct++) {
      MCTPtr->DctData[Dct].ChData = (CH_DEF_STRUCT *) BufferPtr;
      BufferPtr += MCTPtr->DctData[Dct].ChannelCount * sizeof (CH_DEF_STRUCT);
      for (Channel = 0; Channel < MCTPtr->DctData[Dct].ChannelCount; Channel++) {
        MCTPtr->DctData[Dct].ChData[Channel].MCTPtr = MCTPtr;
        MCTPtr->DctData[Dct].ChData[Channel].DCTPtr = &MCTPtr->DctData[Dct];
      }
    }
    NB.PSBlock = (MEM_PS_BLOCK *) BufferPtr;
    BufferPtr += DctCount * ChannelCount * sizeof (MEM_PS_BLOCK);

    ReturnData.DataPtr = AllocHeapParams.BufferPtr;
    ReturnData.DataSizeInDwords = (SizeOfNewBuffer + 3) / 4;
    ReturnData.DataTransferFlags = 0;

    //
    // Allocate Memory for the MEM_DATA_STRUCT we will use
    //
    AllocHeapParams.RequestedBufferSize = sizeof (MEM_DATA_STRUCT);
    AllocHeapParams.BufferHandle = AMD_MEM_DATA_HANDLE;
    AllocHeapParams.Persist = HEAP_LOCAL_CACHE;
    if (HeapAllocateBuffer (&AllocHeapParams, StdHeader) == AGESA_SUCCESS) {
      MemPtr = (MEM_DATA_STRUCT *)AllocHeapParams.BufferPtr;

      LibAmdMemCopy (&(MemPtr->StdHeader), &(EnvPtr->StdHeader), sizeof (AMD_CONFIG_PARAMS), StdHeader);

      //
      // Copy Parameters from environment
      //
      ParameterList.HoleBase = EnvPtr->HoleBase;
      ParameterList.BottomIo = EnvPtr->BottomIo;
      ParameterList.UmaSize = EnvPtr->UmaSize;
      ParameterList.SysLimit = EnvPtr->SysLimit;
      ParameterList.TableBasedAlterations = EnvPtr->TableBasedAlterations;
      ParameterList.PlatformMemoryConfiguration = EnvPtr->PlatformMemoryConfiguration;
      MemPtr->ParameterListPtr = &ParameterList;

      for (p = 0; p < MAX_PLATFORM_TYPES; p++) {
        MemPtr->GetPlatformCfg[p] = EnvPtr->GetPlatformCfg[p];
      }

      MemPtr->ErrorHandling = EnvPtr->ErrorHandling;
      //
      // Create Local NBBlock and Tech Block
      //
      EnvPtr->NBBlockCtor (&NB, MCTPtr, EnvPtr->FeatPtr);
      NB.RefPtr = &ParameterList;
      NB.MemPtr = MemPtr;
      i = 0;
      while (memTechInstalled[i] != NULL) {
        if (memTechInstalled[i] (&TB, &NB)) {
          break;
        }
        i++;
      }
      NB.TechPtr = &TB;
      NB.TechBlockSwitch (&NB);

      //
      // Setup CPU Mem Type MSRs on the AP
      //
      NB.CpuMemTyping (&NB);

      IDS_HDT_CONSOLE (MEM_STATUS, "Node %d\n", NB.Node);
      //
      // Call Technology Specific Training routine
      //
      NB.TrainingFlow (&NB);
      //
      // Copy training data to ReturnData buffer
      //
      LibAmdMemCopy ( BufferPtr,
                      MCTPtr->DctData[0].ChData[0].RcvEnDlys,
                      ((DctCount * ChannelCount) * (
                         (RowCount * ColumnCount * NUMBER_OF_DELAY_TABLES) +
                         (MAX_BYTELANES_PER_CHANNEL * MAX_CS_PER_CHANNEL * NUMBER_OF_FAILURE_MASK_TABLES) +
                         (MAX_DIMMS_PER_CHANNEL * MAX_NUMBER_LANES)
                        )
                      ),
                      StdHeader);

      HeapDeallocateBuffer (AMD_MEM_DATA_HANDLE, StdHeader);
      //
      // Restore pointers
      //
      for (Dct = 0; Dct < MCTPtr->DctCount; Dct++) {
        for (Channel = 0; Channel < MCTPtr->DctData[Dct].ChannelCount; Channel++) {
          MCTPtr->DctData[Dct].ChData[Channel].MCTPtr = &EnvPtr->DieStruct;
          MCTPtr->DctData[Dct].ChData[Channel].DCTPtr = &EnvPtr->DieStruct.DctData[Dct];

          MCTPtr->DctData[Dct].ChData[Channel].RcvEnDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RcvEnDlys;
          MCTPtr->DctData[Dct].ChData[Channel].WrDqsDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].WrDqsDlys;
          MCTPtr->DctData[Dct].ChData[Channel].RdDqsDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqsDlys;
          MCTPtr->DctData[Dct].ChData[Channel].RdDqsDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqsDlys;
          MCTPtr->DctData[Dct].ChData[Channel].WrDatDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].WrDatDlys;
          MCTPtr->DctData[Dct].ChData[Channel].RdDqs2dDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqs2dDlys;
          MCTPtr->DctData[Dct].ChData[Channel].RdDqsMinDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqsMinDlys;
          MCTPtr->DctData[Dct].ChData[Channel].RdDqsMaxDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqsMaxDlys;
          MCTPtr->DctData[Dct].ChData[Channel].WrDatMinDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].WrDatMinDlys;
          MCTPtr->DctData[Dct].ChData[Channel].WrDatMaxDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].WrDatMaxDlys;
          MCTPtr->DctData[Dct].ChData[Channel].FailingBitMask = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].FailingBitMask;
        }
        MCTPtr->DctData[Dct].ChData = EnvPtr->DieStruct.DctData[Dct].ChData;
      }
      MCTPtr->DctData = EnvPtr->DieStruct.DctData;
    }

    //
    // Signal to BSP that training is complete and Send Results
    //
    ASSERT (ReturnData.DataPtr != NULL);
    ApUtilTransmitBuffer (EnvPtr->BspSocket, EnvPtr->BspCore, &ReturnData, StdHeader);

    //
    // Clean up and exit.
    //
    HeapDeallocateBuffer (GENERATE_MEM_HANDLE (ALLOC_PAR_TRN_HANDLE, 0, 0, 0), StdHeader);
  } else {
    MCTPtr = &EnvPtr->DieStruct;
    PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_TRAINING_DATA, MCTPtr->NodeId, 0, 0, 0, StdHeader);
    SetMemError (AGESA_FATAL, MCTPtr);
    ASSERT(FALSE); // Could not allocate heap for buffer for parallel training data
  }
  return TRUE;
}
Пример #16
0
AGESA_STATUS
PcieMapPortPciAddressTN (
  IN      PCIe_ENGINE_CONFIG     *Engine
  )
{
  AGESA_STATUS            Status;
  TN_COMPLEX_CONFIG       *ComplexConfig;
  PCIe_PLATFORM_CONFIG    *Pcie;
  UINT8                   PortDevMap[6];
  UINT8                   FreeDevMap[6];
  UINT8                   PortIndex;
  UINT8                   EnginePortIndex;
  UINT8                   FreeIndex;
  D0F0x64_x20_STRUCT      D0F0x64_x20;
  D0F0x64_x21_STRUCT      D0F0x64_x21;
  Status = AGESA_SUCCESS;
  IDS_HDT_CONSOLE (GNB_TRACE, "PcieMapPortPciAddressTN Enter\n");
  if (Engine->Type.Port.PortData.DeviceNumber == 0 && Engine->Type.Port.PortData.FunctionNumber == 0) {
    Engine->Type.Port.PortData.DeviceNumber = Engine->Type.Port.NativeDevNumber;
    Engine->Type.Port.PortData.FunctionNumber = Engine->Type.Port.NativeFunNumber;
  }
  if (!PcieConfigIsSbPcieEngine (Engine)) {
    ComplexConfig = (TN_COMPLEX_CONFIG *) PcieConfigGetParent (DESCRIPTOR_SILICON, &Engine->Header);
    Pcie = (PCIe_PLATFORM_CONFIG *) PcieConfigGetParent (DESCRIPTOR_PLATFORM, &Engine->Header);
    LibAmdMemFill (&FreeDevMap[0], 0x0, sizeof (FreeDevMap), GnbLibGetHeader (Pcie));
    LibAmdMemCopy (&PortDevMap[0], &ComplexConfig->FmSilicon.PortDevMap, sizeof (PortDevMap), GnbLibGetHeader (Pcie));
    for (PortIndex = 0; PortIndex < sizeof (PortDevMap); PortIndex++) {
      if (PortDevMap[PortIndex] != 0) {
        FreeDevMap[PortDevMap[PortIndex] - 2] = 1;
      }
    }
    EnginePortIndex = Engine->Type.Port.PortData.DeviceNumber - 2;
    if (FreeDevMap[EnginePortIndex] == 0) {
      // Dev number not yet allocated
      ComplexConfig->FmSilicon.PortDevMap[Engine->Type.Port.NativeDevNumber - 2] = Engine->Type.Port.PortData.DeviceNumber;
      FreeDevMap[EnginePortIndex] = 1;
      PortDevMap[Engine->Type.Port.NativeDevNumber - 2] = Engine->Type.Port.PortData.DeviceNumber;
      for (PortIndex = 0; PortIndex < sizeof (PortDevMap); PortIndex++) {
        if (PortDevMap[PortIndex] == 0) {
          for (FreeIndex = 0; FreeIndex < sizeof (FreeDevMap); FreeIndex++) {
            if (FreeDevMap[FreeIndex] == 0) {
              FreeDevMap[FreeIndex] = 1;
              break;
            }
          }
          PortDevMap[PortIndex] = FreeIndex + 2;
        }
      }

      GnbRegisterReadTN (D0F0x64_x20_TYPE, D0F0x64_x20_ADDRESS, &D0F0x64_x20, 0, GnbLibGetHeader (Pcie));
      D0F0x64_x20.Field.ProgDevMapEn = 0;
      GnbRegisterWriteTN (D0F0x64_x20_TYPE, D0F0x64_x20_ADDRESS, &D0F0x64_x20, 0, GnbLibGetHeader (Pcie));
      GnbRegisterReadTN (D0F0x64_x21_TYPE, D0F0x64_x21_ADDRESS, &D0F0x64_x21, 0, GnbLibGetHeader (Pcie));
      D0F0x64_x21.Field.GfxPortADevmap = PortDevMap[2 - 2];
      D0F0x64_x21.Field.GfxPortBDevmap = PortDevMap[3 - 2];
      D0F0x64_x20.Field.GppPortBDevmap = PortDevMap[4 - 2];
      D0F0x64_x20.Field.GppPortCDevmap = PortDevMap[5 - 2];
      D0F0x64_x20.Field.GppPortDDevmap = PortDevMap[6 - 2];
      D0F0x64_x20.Field.GppPortEDevmap = PortDevMap[7 - 2];
      D0F0x64_x20.Field.ProgDevMapEn = 0x1;
      GnbRegisterWriteTN (D0F0x64_x20_TYPE, D0F0x64_x20_ADDRESS, &D0F0x64_x20, 0, GnbLibGetHeader (Pcie));
      GnbRegisterWriteTN (D0F0x64_x21_TYPE, D0F0x64_x21_ADDRESS, &D0F0x64_x21, 0, GnbLibGetHeader (Pcie));
      D0F0x64_x20.Field.ProgDevMapEn = 1;
      GnbRegisterWriteTN (D0F0x64_x20_TYPE, D0F0x64_x20_ADDRESS, &D0F0x64_x20, 0, GnbLibGetHeader (Pcie));
    } else {
      IDS_HDT_CONSOLE (GNB_TRACE, "  Fail device %d to port %d\n", Engine->Type.Port.PortData.DeviceNumber, Engine->Type.Port.NativeDevNumber);
      Status = AGESA_ERROR;
    }
  }
  IDS_HDT_CONSOLE (GNB_TRACE, "PcieMapPortPciAddressTN Exit [0x%x]\n", Status);
  return  Status;
}