/** Worker function setting fixed MTRRs @param FixedSettings A buffer to hold fixed Mtrrs content. **/ VOID MtrrSetFixedMtrrWorker ( IN MTRR_FIXED_SETTINGS *FixedSettings ) { UINT32 Index; for (Index = 0; Index < MTRR_NUMBER_OF_FIXED_MTRR; Index++) { AsmWriteMsr64 ( mMtrrLibFixedMtrrTable[Index].Msr, FixedSettings->Mtrr[Index] ); } }
/** Internal worker function that is called to complete CPU initialization at the end of SmmCpuFeaturesInitializeProcessor(). **/ VOID FinishSmmCpuFeaturesInitializeProcessor ( VOID ) { MSR_IA32_SMM_MONITOR_CTL_REGISTER SmmMonitorCtl; // // Set MSEG Base Address in SMM Monitor Control MSR. // if (mMsegBase > 0) { SmmMonitorCtl.Uint64 = 0; SmmMonitorCtl.Bits.MsegBase = (UINT32)mMsegBase >> 12; SmmMonitorCtl.Bits.Valid = 1; AsmWriteMsr64 (MSR_IA32_SMM_MONITOR_CTL, SmmMonitorCtl.Uint64); }
VOID CpuSmmSxWorkAround( ) { UINT64 MsrValue; MsrValue = AsmReadMsr64 (0xE2); if (MsrValue & BIT15) { return; } if (MsrValue & BIT10) { MsrValue &= ~BIT10; AsmWriteMsr64 (0xE2, MsrValue); } }
EFI_STATUS AmdSmmOpen ( IN EFI_SMM_ACCESS2_PROTOCOL *This ) { UINT64 Value; if (mSmmAccess2.LockState) { return EFI_ACCESS_DENIED; } Value = AsmReadMsr64 (0xc0010113) & ~2; //TValid AsmWriteMsr64 (0xc0010113, Value); mSmramMap.RegionState = EFI_SMRAM_OPEN; mSmmAccess2.OpenState = TRUE; return EFI_SUCCESS; }
EFI_STATUS AmdSmmClose ( IN EFI_SMM_ACCESS2_PROTOCOL *This ) { UINT64 Value; if (mSmmAccess2.LockState) { return EFI_ACCESS_DENIED; } Value = AsmReadMsr64 (0xc0010113) | 2; AsmWriteMsr64 (0xc0010113, Value); mSmramMap.RegionState = EFI_SMRAM_CLOSED; mSmmAccess2.OpenState = FALSE; return EFI_SUCCESS; }
/** Programs fixed MTRRs registers. @param MemoryCacheType The memory type to set. @param Base The base address of memory range. @param Length The length of memory range. @retval RETURN_SUCCESS The cache type was updated successfully @retval RETURN_UNSUPPORTED The requested range or cache type was invalid for the fixed MTRRs. **/ RETURN_STATUS ProgramFixedMtrr ( IN UINT64 MemoryCacheType, IN OUT UINT64 *Base, IN OUT UINT64 *Length ) { UINT32 MsrNum; UINT32 ByteShift; UINT64 TempQword; UINT64 OrMask; UINT64 ClearMask; TempQword = 0; OrMask = 0; ClearMask = 0; for (MsrNum = 0; MsrNum < MTRR_NUMBER_OF_FIXED_MTRR; MsrNum++) { if ((*Base >= mMtrrLibFixedMtrrTable[MsrNum].BaseAddress) && (*Base < ( mMtrrLibFixedMtrrTable[MsrNum].BaseAddress + (8 * mMtrrLibFixedMtrrTable[MsrNum].Length) ) ) ) { break; } } if (MsrNum == MTRR_NUMBER_OF_FIXED_MTRR) { return RETURN_UNSUPPORTED; } // // We found the fixed MTRR to be programmed // for (ByteShift = 0; ByteShift < 8; ByteShift++) { if (*Base == ( mMtrrLibFixedMtrrTable[MsrNum].BaseAddress + (ByteShift * mMtrrLibFixedMtrrTable[MsrNum].Length) ) ) { break; } } if (ByteShift == 8) { return RETURN_UNSUPPORTED; } for ( ; ((ByteShift < 8) && (*Length >= mMtrrLibFixedMtrrTable[MsrNum].Length)); ByteShift++ ) { OrMask |= LShiftU64 ((UINT64) MemoryCacheType, (UINT32) (ByteShift * 8)); ClearMask |= LShiftU64 ((UINT64) 0xFF, (UINT32) (ByteShift * 8)); *Length -= mMtrrLibFixedMtrrTable[MsrNum].Length; *Base += mMtrrLibFixedMtrrTable[MsrNum].Length; } if (ByteShift < 8 && (*Length != 0)) { return RETURN_UNSUPPORTED; } TempQword = (AsmReadMsr64 (mMtrrLibFixedMtrrTable[MsrNum].Msr) & ~ClearMask) | OrMask; AsmWriteMsr64 (mMtrrLibFixedMtrrTable[MsrNum].Msr, TempQword); return RETURN_SUCCESS; }
/** This function enable TXT environment. @retval EFI_SUCCESS TXT environment is enabled @retval EFI_UNSUPPORTED TXT environment is not supported **/ EFI_STATUS EnableTxt ( VOID ) { TXT_GETSEC_CAPABILITIES TxtCapabilities; UINT32 Index; UINT32 RegEax; UINT32 RegEbx; UINT32 RegEcx; BOOLEAN ClearMca; UINT32 McaCount; // // Enable SMX // AsmWriteCr4(AsmReadCr4() | CR4_SMXE); // // Check TXT Chipset // Index = 0; TxtCapabilities.Uint32 = AsmGetSecCapabilities (Index); DumpGetSecCapabilities (Index, TxtCapabilities.Uint32); if (TxtCapabilities.Bits.ChipsetPresent == 0) { DEBUG ((EFI_D_ERROR, "(TXT) TXT Chipset not present!\n")); return EFI_UNSUPPORTED; } if (TxtCapabilities.Bits.Senter == 0) { DEBUG ((EFI_D_ERROR, "(TXT) SENTER not supported!\n")); return EFI_UNSUPPORTED; } while (TxtCapabilities.Bits.ExtendedLeafs != 0) { Index ++; TxtCapabilities.Uint32 = AsmGetSecCapabilities (Index); DumpGetSecCapabilities (Index, TxtCapabilities.Uint32); } // // Get parameters // ClearMca = TRUE; Index = 0; while (TRUE) { AsmGetSecParameters (Index, &RegEax, &RegEbx, &RegEcx); if ((RegEax & GETSEC_PARAMETER_TYPE_MASK) == 0) { break; } DumpGetSecParameters (RegEax, RegEbx, RegEcx); if ((RegEax & GETSEC_PARAMETER_TYPE_MASK) == GETSEC_PARAMETER_TYPE_EXTERNSION) { if ((RegEax & (1 << 6)) != 0) { // No need clear MCA ClearMca = FALSE; } } Index ++; } // // Clear MCA // if (ClearMca) { McaCount = (UINT32)AsmReadMsr64 (IA32_MCG_CAP) & 0xFF; for (Index = 0; Index < McaCount; Index++) { AsmWriteMsr64 (IA32_MC0_STATUS + Index * 4, 0); } } return EFI_SUCCESS; }
/** Programs registers for the calling processor. This function programs registers for the calling processor. @param RegisterTable Pointer to register table of the running processor. **/ VOID SetProcessorRegister ( CPU_REGISTER_TABLE *RegisterTable ) { CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry; UINTN Index; UINTN Value; // // Traverse Register Table of this logical processor // RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry; for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) { // // Check the type of specified register // switch (RegisterTableEntry->RegisterType) { // // The specified register is Control Register // case ControlRegister: switch (RegisterTableEntry->Index) { case 0: Value = AsmReadCr0 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr0 (Value); break; case 2: Value = AsmReadCr2 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr2 (Value); break; case 3: Value = AsmReadCr3 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr3 (Value); break; case 4: Value = AsmReadCr4 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr4 (Value); break; default: break; } break; // // The specified register is Model Specific Register // case Msr: // // If this function is called to restore register setting after INIT signal, // there is no need to restore MSRs in register table. // if (RegisterTableEntry->ValidBitLength >= 64) { // // If length is not less than 64 bits, then directly write without reading // AsmWriteMsr64 ( RegisterTableEntry->Index, RegisterTableEntry->Value ); } else { // // Set the bit section according to bit start and length // AsmMsrBitFieldWrite64 ( RegisterTableEntry->Index, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, RegisterTableEntry->Value ); } break; // // Enable or disable cache // case CacheControl: // // If value of the entry is 0, then disable cache. Otherwise, enable cache. // if (RegisterTableEntry->Value == 0) { AsmDisableCache (); } else { AsmEnableCache (); } break; default: break; } } }
/** This function is WRMSR handler for SMM. @param Index CPU index **/ VOID SmmWriteMsrHandler ( IN UINT32 Index ) { UINT64 Data64; UINT32 MsrIndex; VM_ENTRY_CONTROLS VmEntryControls; X86_REGISTER *Reg; STM_RSC_MSR_DESC *MsrDesc; STM_RSC_MSR_DESC LocalMsrDesc; Reg = &mGuestContextCommonSmm.GuestContextPerCpu[Index].Register; MsrIndex = ReadUnaligned32 ((UINT32 *)&Reg->Rcx); MsrDesc = GetStmResourceMsr (mHostContextCommon.MleProtectedResource.Base, MsrIndex); if ((MsrDesc != NULL) && (MsrDesc->WriteMask != 0)) { DEBUG ((EFI_D_ERROR, "WRMSR (%x) violation!\n", MsrIndex)); AddEventLogForResource (EvtHandledProtectionException, (STM_RSC *)MsrDesc); SmmExceptionHandler (Index); CpuDeadLoop (); } MsrDesc = GetStmResourceMsr ((STM_RSC *)(UINTN)mGuestContextCommonSmm.BiosHwResourceRequirementsPtr, MsrIndex); if ((MsrDesc == NULL) || (MsrDesc->WriteMask == 0) || (MsrDesc->KernelModeProcessing == 0)) { ZeroMem (&LocalMsrDesc, sizeof(LocalMsrDesc)); LocalMsrDesc.Hdr.RscType = MACHINE_SPECIFIC_REG; LocalMsrDesc.Hdr.Length = sizeof(LocalMsrDesc); LocalMsrDesc.MsrIndex = MsrIndex; LocalMsrDesc.ReadMask = 0; LocalMsrDesc.WriteMask = (UINT64)-1; AddEventLogForResource (EvtBiosAccessToUnclaimedResource, (STM_RSC *)&LocalMsrDesc); } // DEBUG ((EFI_D_INFO, "!!!WriteMsrHandler!!!\n")); Data64 = LShiftU64 ((UINT64)ReadUnaligned32 ((UINT32 *)&Reg->Rdx), 32) | (UINT64)ReadUnaligned32 ((UINT32 *)&Reg->Rax); switch (MsrIndex) { case IA32_EFER_MSR_INDEX: #if 0 AcquireSpinLock (&mHostContextCommon.DebugLock); if ((Data64 & IA32_EFER_MSR_SCE) != 0) { DEBUG ((EFI_D_INFO, "!!!WriteMsrHandler - SCE!!!\n")); } if ((Data64 & IA32_EFER_MSR_XDE) != 0) { DEBUG ((EFI_D_INFO, "!!!WriteMsrHandler - XDE!!!\n")); } ReleaseSpinLock (&mHostContextCommon.DebugLock); #endif mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer = Data64; // // Check IA32e mode switch // VmEntryControls.Uint32 = VmRead32 (VMCS_32_CONTROL_VMENTRY_CONTROLS_INDEX); if ((Data64 & IA32_EFER_MSR_MLE) != 0) { mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer |= IA32_EFER_MSR_MLE; } else { mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer &= ~IA32_EFER_MSR_MLE; } if (((mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer & IA32_EFER_MSR_MLE) != 0) && ((VmReadN (VMCS_N_GUEST_CR0_INDEX) & CR0_PG) != 0)) { mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer |= IA32_EFER_MSR_MLA; VmEntryControls.Bits.Ia32eGuest = 1; } else { mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer &= ~IA32_EFER_MSR_MLA; VmEntryControls.Bits.Ia32eGuest = 0; } VmWrite32 (VMCS_32_CONTROL_VMENTRY_CONTROLS_INDEX, VmEntryControls.Uint32); VmWrite64 (VMCS_64_GUEST_IA32_EFER_INDEX, mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer); break; case IA32_SYSENTER_CS_MSR_INDEX: VmWrite32 (VMCS_32_GUEST_IA32_SYSENTER_CS_INDEX, (UINT32)Data64); break; case IA32_SYSENTER_ESP_MSR_INDEX: VmWriteN (VMCS_N_GUEST_IA32_SYSENTER_ESP_INDEX, (UINTN)Data64); break; case IA32_SYSENTER_EIP_MSR_INDEX: VmWriteN (VMCS_N_GUEST_IA32_SYSENTER_EIP_INDEX, (UINTN)Data64); break; case IA32_FS_BASE_MSR_INDEX: VmWriteN (VMCS_N_GUEST_FS_BASE_INDEX, (UINTN)Data64); AsmWriteMsr64 (MsrIndex, Data64); // VMM does not use FS break; case IA32_GS_BASE_MSR_INDEX: VmWriteN (VMCS_N_GUEST_GS_BASE_INDEX, (UINTN)Data64); AsmWriteMsr64 (MsrIndex, Data64); // VMM does not use GS break; #if 0 case IA32_KERNAL_GS_BASE_MSR_INDEX: AsmWriteMsr64 (MsrIndex, Data64); // VMM does not use this break; case IA32_STAR_MSR_INDEX: AsmWriteMsr64 (MsrIndex, Data64); // VMM does not use this break; case IA32_LSTAR_MSR_INDEX: AsmWriteMsr64 (MsrIndex, Data64); // VMM does not use this break; case IA32_FMASK_MSR_INDEX: AsmWriteMsr64 (MsrIndex, Data64); // VMM does not use this break; #endif case IA32_SMM_MONITOR_CTL_MSR_INDEX: break; case EFI_MSR_NEHALEM_SMRR_PHYS_BASE: case EFI_MSR_NEHALEM_SMRR_PHYS_MASK: // Ignore the write break; case IA32_BIOS_UPDT_TRIG_MSR_INDEX: // Only write it when BIOS request MicrocodeUpdate MsrDesc = GetStmResourceMsr ((STM_RSC *)(UINTN)mGuestContextCommonSmm.BiosHwResourceRequirementsPtr, IA32_BIOS_UPDT_TRIG_MSR_INDEX); if (MsrDesc != NULL) { AsmWriteMsr64 (MsrIndex, Data64); } break; default: // // For rest one, we need pass back to BIOS // // // Need mask write item // if (MsrDesc != NULL) { Data64 |= MsrDesc->WriteMask; } AsmWriteMsr64 (MsrIndex, Data64); break; } VmWriteN (VMCS_N_GUEST_RIP_INDEX, VmReadN(VMCS_N_GUEST_RIP_INDEX) + VmRead32(VMCS_32_RO_VMEXIT_INSTRUCTION_LENGTH_INDEX)); return ; }
/** Programs registers for the calling processor. This function programs registers for the calling processor. @param PreSmmInit Specify the target register table. If TRUE, the target is the pre-SMM-init register table. If FALSE, the target is the post-SMM-init register table. @param ProcessorNumber Handle number of specified logical processor. **/ VOID SetProcessorRegisterEx ( IN BOOLEAN PreSmmInit, IN UINTN ProcessorNumber ) { CPU_REGISTER_TABLE *RegisterTable; CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry; UINTN Index; UINTN Value; UINTN StartIndex; UINTN EndIndex; if (PreSmmInit) { RegisterTable = &mCpuConfigConextBuffer.PreSmmInitRegisterTable[ProcessorNumber]; } else { RegisterTable = &mCpuConfigConextBuffer.RegisterTable[ProcessorNumber]; } // // If microcode patch has been applied, then the first register table entry // is for microcode upate, so it is skipped. // StartIndex = 0; if (mSetBeforeCpuOnlyReset) { EndIndex = StartIndex + RegisterTable->NumberBeforeReset; } else { StartIndex += RegisterTable->NumberBeforeReset; EndIndex = RegisterTable->TableLength; } // // Traverse Register Table of this logical processor // for (Index = StartIndex; Index < EndIndex; Index++) { RegisterTableEntry = &RegisterTable->RegisterTableEntry[Index]; // // Check the type of specified register // switch (RegisterTableEntry->RegisterType) { // // The specified register is Control Register // case ControlRegister: switch (RegisterTableEntry->Index) { case 0: Value = AsmReadCr0 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, RegisterTableEntry->Value ); AsmWriteCr0 (Value); break; case 2: Value = AsmReadCr2 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, RegisterTableEntry->Value ); AsmWriteCr2 (Value); break; case 3: Value = AsmReadCr3 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, RegisterTableEntry->Value ); AsmWriteCr3 (Value); break; case 4: Value = AsmReadCr4 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, RegisterTableEntry->Value ); AsmWriteCr4 (Value); break; case 8: // // Do we need to support CR8? // break; default: break; } break; // // The specified register is Model Specific Register // case Msr: // // If this function is called to restore register setting after INIT signal, // there is no need to restore MSRs in register table. // if (!mRestoreSettingAfterInit) { if (RegisterTableEntry->ValidBitLength >= 64) { // // If length is not less than 64 bits, then directly write without reading // AsmWriteMsr64 ( RegisterTableEntry->Index, RegisterTableEntry->Value ); } else { // // Set the bit section according to bit start and length // AsmMsrBitFieldWrite64 ( RegisterTableEntry->Index, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, RegisterTableEntry->Value ); } } break; // // Enable or disable cache // case CacheControl: // // If value of the entry is 0, then disable cache. Otherwise, enable cache. // if (RegisterTableEntry->Value == 0) { AsmDisableCache (); } else { AsmEnableCache (); } break; default: break; } } }
/** Programs registers for the calling processor. This function programs registers for the calling processor. @param RegisterTables Pointer to register table of the running processor. @param RegisterTableCount Register table count. **/ VOID SetProcessorRegister ( IN CPU_REGISTER_TABLE *RegisterTables, IN UINTN RegisterTableCount ) { CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry; UINTN Index; UINTN Value; SPIN_LOCK *MsrSpinLock; UINT32 InitApicId; CPU_REGISTER_TABLE *RegisterTable; InitApicId = GetInitialApicId (); RegisterTable = NULL; for (Index = 0; Index < RegisterTableCount; Index++) { if (RegisterTables[Index].InitialApicId == InitApicId) { RegisterTable = &RegisterTables[Index]; break; } } ASSERT (RegisterTable != NULL); // // Traverse Register Table of this logical processor // RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry; for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) { // // Check the type of specified register // switch (RegisterTableEntry->RegisterType) { // // The specified register is Control Register // case ControlRegister: switch (RegisterTableEntry->Index) { case 0: Value = AsmReadCr0 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr0 (Value); break; case 2: Value = AsmReadCr2 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr2 (Value); break; case 3: Value = AsmReadCr3 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr3 (Value); break; case 4: Value = AsmReadCr4 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr4 (Value); break; default: break; } break; // // The specified register is Model Specific Register // case Msr: // // If this function is called to restore register setting after INIT signal, // there is no need to restore MSRs in register table. // if (RegisterTableEntry->ValidBitLength >= 64) { // // If length is not less than 64 bits, then directly write without reading // AsmWriteMsr64 ( RegisterTableEntry->Index, RegisterTableEntry->Value ); } else { // // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode // to make sure MSR read/write operation is atomic. // MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index); AcquireSpinLock (MsrSpinLock); // // Set the bit section according to bit start and length // AsmMsrBitFieldWrite64 ( RegisterTableEntry->Index, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, RegisterTableEntry->Value ); ReleaseSpinLock (MsrSpinLock); } break; // // MemoryMapped operations // case MemoryMapped: AcquireSpinLock (mMemoryMappedLock); MmioBitFieldWrite32 ( (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)), RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINT32)RegisterTableEntry->Value ); ReleaseSpinLock (mMemoryMappedLock); break; // // Enable or disable cache // case CacheControl: // // If value of the entry is 0, then disable cache. Otherwise, enable cache. // if (RegisterTableEntry->Value == 0) { AsmDisableCache (); } else { AsmEnableCache (); } break; default: break; } } }
/** Detect whether specified processor can find matching microcode patch and load it. **/ VOID MicrocodeDetect ( VOID ) { UINT64 MicrocodePatchAddress; UINT64 MicrocodePatchRegionSize; UINT32 ExtendedTableLength; UINT32 ExtendedTableCount; EFI_CPU_MICROCODE_EXTENDED_TABLE *ExtendedTable; EFI_CPU_MICROCODE_EXTENDED_TABLE_HEADER *ExtendedTableHeader; EFI_CPU_MICROCODE_HEADER *MicrocodeEntryPoint; UINTN MicrocodeEnd; UINTN Index; UINT8 PlatformId; UINT32 RegEax; UINT32 LatestRevision; UINTN TotalSize; UINT32 CheckSum32; BOOLEAN CorrectMicrocode; INT32 CurrentSignature; MICROCODE_INFO MicrocodeInfo; ZeroMem (&MicrocodeInfo, sizeof (MICROCODE_INFO)); MicrocodePatchAddress = PcdGet64 (PcdCpuMicrocodePatchAddress); MicrocodePatchRegionSize = PcdGet64 (PcdCpuMicrocodePatchRegionSize); if (MicrocodePatchRegionSize == 0) { // // There is no microcode patches // return; } ExtendedTableLength = 0; // // Here data of CPUID leafs have not been collected into context buffer, so // GetProcessorCpuid() cannot be used here to retrieve CPUID data. // AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL); // // The index of platform information resides in bits 50:52 of MSR IA32_PLATFORM_ID // PlatformId = (UINT8) AsmMsrBitFieldRead64 (EFI_MSR_IA32_PLATFORM_ID, 50, 52); LatestRevision = 0; MicrocodeEnd = (UINTN) (MicrocodePatchAddress + MicrocodePatchRegionSize); MicrocodeEntryPoint = (EFI_CPU_MICROCODE_HEADER *) (UINTN) MicrocodePatchAddress; do { // // Check if the microcode is for the Cpu and the version is newer // and the update can be processed on the platform // CorrectMicrocode = FALSE; if (MicrocodeEntryPoint->HeaderVersion == 0x1) { // // It is the microcode header. It is not the padding data between microcode patches // becasue the padding data should not include 0x00000001 and it should be the repeated // byte format (like 0xXYXYXYXY....). // if (MicrocodeEntryPoint->ProcessorId == RegEax && MicrocodeEntryPoint->UpdateRevision > LatestRevision && (MicrocodeEntryPoint->ProcessorFlags & (1 << PlatformId)) ) { if (MicrocodeEntryPoint->DataSize == 0) { CheckSum32 = CalculateSum32 ((UINT32 *)MicrocodeEntryPoint, 2048); } else { CheckSum32 = CalculateSum32 ((UINT32 *)MicrocodeEntryPoint, MicrocodeEntryPoint->DataSize + sizeof(EFI_CPU_MICROCODE_HEADER)); } if (CheckSum32 == 0) { CorrectMicrocode = TRUE; } } else if ((MicrocodeEntryPoint->DataSize != 0) && (MicrocodeEntryPoint->UpdateRevision > LatestRevision)) { ExtendedTableLength = MicrocodeEntryPoint->TotalSize - (MicrocodeEntryPoint->DataSize + sizeof (EFI_CPU_MICROCODE_HEADER)); if (ExtendedTableLength != 0) { // // Extended Table exist, check if the CPU in support list // ExtendedTableHeader = (EFI_CPU_MICROCODE_EXTENDED_TABLE_HEADER *)((UINT8 *)(MicrocodeEntryPoint) + MicrocodeEntryPoint->DataSize + sizeof (EFI_CPU_MICROCODE_HEADER)); // // Calculate Extended Checksum // if ((ExtendedTableLength % 4) == 0) { CheckSum32 = CalculateSum32 ((UINT32 *)ExtendedTableHeader, ExtendedTableLength); if (CheckSum32 == 0) { // // Checksum correct // ExtendedTableCount = ExtendedTableHeader->ExtendedSignatureCount; ExtendedTable = (EFI_CPU_MICROCODE_EXTENDED_TABLE *)(ExtendedTableHeader + 1); for (Index = 0; Index < ExtendedTableCount; Index ++) { CheckSum32 = CalculateSum32 ((UINT32 *)ExtendedTable, sizeof(EFI_CPU_MICROCODE_EXTENDED_TABLE)); if (CheckSum32 == 0) { // // Verify Header // if ((ExtendedTable->ProcessorSignature == RegEax) && (ExtendedTable->ProcessorFlag & (1 << PlatformId)) ) { // // Find one // CorrectMicrocode = TRUE; break; } } ExtendedTable ++; } } } } } } else { // // It is the padding data between the microcode patches for microcode patches alignment. // Because the microcode patch is the multiple of 1-KByte, the padding data should not // exist if the microcode patch alignment value is not larger than 1-KByte. So, the microcode // alignment value should be larger than 1-KByte. We could skip SIZE_1KB padding data to // find the next possible microcode patch header. // MicrocodeEntryPoint = (EFI_CPU_MICROCODE_HEADER *) (((UINTN) MicrocodeEntryPoint) + SIZE_1KB); continue; } // // Get the next patch. // if (MicrocodeEntryPoint->DataSize == 0) { TotalSize = 2048; } else { TotalSize = MicrocodeEntryPoint->TotalSize; } if (CorrectMicrocode) { LatestRevision = MicrocodeEntryPoint->UpdateRevision; MicrocodeInfo.MicrocodeData = (VOID *)((UINTN)MicrocodeEntryPoint + sizeof (EFI_CPU_MICROCODE_HEADER)); MicrocodeInfo.MicrocodeSize = TotalSize; MicrocodeInfo.ProcessorId = RegEax; } MicrocodeEntryPoint = (EFI_CPU_MICROCODE_HEADER *) (((UINTN) MicrocodeEntryPoint) + TotalSize); } while (((UINTN) MicrocodeEntryPoint < MicrocodeEnd)); if (LatestRevision > 0) { // // Get microcode update signature of currently loaded microcode update // CurrentSignature = GetCurrentMicrocodeSignature (); // // If no microcode update has been loaded, then trigger microcode load. // if (CurrentSignature == 0) { AsmWriteMsr64 ( EFI_MSR_IA32_BIOS_UPDT_TRIG, (UINT64) (UINTN) MicrocodeInfo.MicrocodeData ); MicrocodeInfo.Load = TRUE; } else { MicrocodeInfo.Load = FALSE; } } }