/** The page fault handler that on-demand read PI CpuSaveStates for framework use. If the fault is not targeted to mFrameworkSmst->CpuSaveState range, the function will return FALSE to let PageFaultHandlerHook know it needs to pass the fault over to original page fault handler. @retval TRUE The page fault is correctly handled. @retval FALSE The page fault is not handled and is passed through to original handler. **/ BOOLEAN PageFaultHandler ( VOID ) { BOOLEAN IsHandled; UINT64 *PageTable; UINT64 PFAddress; UINTN NumCpuStatePages; ASSERT (mPageTableHookEnabled); AcquireSpinLock (&mPFLock); PageTable = (UINT64*)(UINTN)(AsmReadCr3 () & mPhyMask); PFAddress = AsmReadCr2 (); NumCpuStatePages = EFI_SIZE_TO_PAGES (mNumberOfProcessors * sizeof (EFI_SMM_CPU_SAVE_STATE)); IsHandled = FALSE; if (((UINTN)mFrameworkSmst->CpuSaveState & ~(SIZE_2MB-1)) == (PFAddress & ~(SIZE_2MB-1))) { if ((UINTN)mFrameworkSmst->CpuSaveState <= PFAddress && PFAddress < (UINTN)mFrameworkSmst->CpuSaveState + EFI_PAGES_TO_SIZE (NumCpuStatePages) ) { mCpuStatePageTable[BitFieldRead64 (PFAddress, 12, 20)] |= BIT0 | BIT1; // present and rw CpuFlushTlb (); ReadWriteCpuStatePage (PFAddress & ~(SIZE_4KB-1), TRUE); IsHandled = TRUE; } else { ASSERT (FALSE); } } ReleaseSpinLock (&mPFLock); return IsHandled; }
/** Save the volatile registers required to be restored following INIT IPI. @param VolatileRegisters Returns buffer saved the volatile resisters **/ VOID SaveVolatileRegisters ( OUT CPU_VOLATILE_REGISTERS *VolatileRegisters ) { UINT32 RegEdx; VolatileRegisters->Cr0 = AsmReadCr0 (); VolatileRegisters->Cr3 = AsmReadCr3 (); VolatileRegisters->Cr4 = AsmReadCr4 (); AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx); if ((RegEdx & BIT2) != 0) { // // If processor supports Debugging Extensions feature // by CPUID.[EAX=01H]:EDX.BIT2 // VolatileRegisters->Dr0 = AsmReadDr0 (); VolatileRegisters->Dr1 = AsmReadDr1 (); VolatileRegisters->Dr2 = AsmReadDr2 (); VolatileRegisters->Dr3 = AsmReadDr3 (); VolatileRegisters->Dr6 = AsmReadDr6 (); VolatileRegisters->Dr7 = AsmReadDr7 (); } }
/** This function prepare TXT environment. @param AcmBase ACM base @param AcmSize ACM size **/ VOID TxtPrepareEnvironment ( IN UINT32 AcmBase, IN UINT32 AcmSize ) { MLE_PRIVATE_DATA *MlePrivateData; DEBUG((EFI_D_INFO, "(TXT) TxtPrepareEnvironment ...\n")); MlePrivateData = GetMlePrivateData (); DEBUG((EFI_D_INFO, "(TXT) TxtSaveMtrr ...\n")); TxtSaveMtrr (MlePrivateData); DEBUG((EFI_D_INFO, "(TXT) TxtSaveMtrr Done\n")); MlePrivateData->Cr0 = AsmReadCr0 (); MlePrivateData->Cr4 = AsmReadCr4 (); MlePrivateData->Cr3 = (UINT32)AsmReadCr3 (); DEBUG((EFI_D_INFO, "(TXT) TxtConfigMtrr ...\n")); TxtConfigMtrr ( AcmBase, AcmSize, MEMORY_TYPE_WB ); DEBUG((EFI_D_INFO, "(TXT) TxtConfigMtrr Done\n")); DEBUG((EFI_D_INFO, "(TXT) AsmWriteCr0 ...\n")); AsmWriteCr0(AsmReadCr0 () | CR0_NE); DEBUG((EFI_D_INFO, "(TXT) AsmWriteCr0 Done\n")); DEBUG((EFI_D_INFO, "(TXT) TxtPrepareEnvironment Done\n")); return ; }
/** Flashes TLB caches. */ VOID VmFlashCaches(VOID) { // just reload CR3 AsmWriteCr3(AsmReadCr3()); }
/** Flushes all the Translation Lookaside Buffers(TLB) entries in a CPU. Flushes all the Translation Lookaside Buffers(TLB) entries in a CPU. **/ VOID EFIAPI CpuFlushTlb ( VOID ) { AsmWriteCr3 (AsmReadCr3 ()); }
VOID GetCurrentPageTable(PAGE_MAP_AND_DIRECTORY_POINTER **PageTable, UINTN *Flags) { UINTN CR3; CR3 = AsmReadCr3(); DBG("GetCurrentPageTable: CR3 = 0x%lx\n", CR3); *PageTable = (PAGE_MAP_AND_DIRECTORY_POINTER*)(UINTN)(CR3 & CR3_ADDR_MASK); *Flags = CR3 & (CR3_FLAG_PWT | CR3_FLAG_PCD); }
/** This function initialize basic context for FRM. **/ VOID InitBasicContext ( VOID ) { UINT32 RegEax; mHostContextCommon.CpuNum = GetCpuNumFromAcpi (); GetPciExpressInfoFromAcpi (&mHostContextCommon.PciExpressBaseAddress, &mHostContextCommon.PciExpressLength); PcdSet64 (PcdPciExpressBaseAddress, mHostContextCommon.PciExpressBaseAddress); if (mHostContextCommon.PciExpressBaseAddress == 0) { CpuDeadLoop (); } mHostContextCommon.AcpiTimerIoPortBaseAddress = GetAcpiTimerPort (&mHostContextCommon.AcpiTimerWidth); PcdSet16 (PcdAcpiTimerIoPortBaseAddress, mHostContextCommon.AcpiTimerIoPortBaseAddress); PcdSet8 (PcdAcpiTimerWidth, mHostContextCommon.AcpiTimerWidth); if (mHostContextCommon.AcpiTimerIoPortBaseAddress == 0) { CpuDeadLoop (); } mHostContextCommon.ResetIoPortBaseAddress = GetAcpiResetPort (); mHostContextCommon.AcpiPmControlIoPortBaseAddress = GetAcpiPmControlPort (); if (mHostContextCommon.AcpiPmControlIoPortBaseAddress == 0) { CpuDeadLoop (); } mHostContextCommon.HostContextPerCpu = AllocatePages (FRM_SIZE_TO_PAGES(sizeof(FRM_HOST_CONTEXT_PER_CPU)) * mHostContextCommon.CpuNum); mGuestContextCommon.GuestContextPerCpu = AllocatePages (FRM_SIZE_TO_PAGES(sizeof(FRM_GUEST_CONTEXT_PER_CPU)) * mHostContextCommon.CpuNum); mHostContextCommon.LowMemoryBase = mCommunicationData.LowMemoryBase; mHostContextCommon.LowMemorySize = mCommunicationData.LowMemorySize; mHostContextCommon.LowMemoryBackupBase = (UINT64)(UINTN)AllocatePages (FRM_SIZE_TO_PAGES ((UINTN)mCommunicationData.LowMemorySize)); // // Save current context // mBspIndex = ApicToIndex (ReadLocalApicId ()); mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr0 = AsmReadCr0 (); mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr3 = AsmReadCr3 (); mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr4 = AsmReadCr4 (); AsmReadGdtr (&mGuestContextCommon.GuestContextPerCpu[mBspIndex].Gdtr); AsmReadIdtr (&mGuestContextCommon.GuestContextPerCpu[mBspIndex].Idtr); AsmCpuid (CPUID_EXTENDED_INFORMATION, &RegEax, NULL, NULL, NULL); if (RegEax >= CPUID_EXTENDED_ADDRESS_SIZE) { AsmCpuid (CPUID_EXTENDED_ADDRESS_SIZE, &RegEax, NULL, NULL, NULL); mHostContextCommon.PhysicalAddressBits = (UINT8)RegEax; } else { mHostContextCommon.PhysicalAddressBits = 36; } }
/** Prepares startup vector for APs. This function prepares startup vector for APs. @param WorkingBuffer The address of the work buffer. **/ VOID PrepareApStartupVector ( EFI_PHYSICAL_ADDRESS WorkingBuffer ) { EFI_PHYSICAL_ADDRESS StartupVector; MP_ASSEMBLY_ADDRESS_MAP AddressMap; // // Get the address map of startup code for AP, // including code size, and offset of long jump instructions to redirect. // ZeroMem (&AddressMap, sizeof (AddressMap)); AsmGetAddressMap (&AddressMap); StartupVector = WorkingBuffer; // // Copy AP startup code to startup vector, and then redirect the long jump // instructions for mode switching. // CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size); *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset); if (AddressMap.LongJumpOffset != 0) { *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset); } // // Get the start address of exchange data between BSP and AP. // mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size); ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO)); CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR)); CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR)); // // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory // CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1); CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1); CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize); mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress; mExchangeInfo->StackSize = mAcpiCpuData.StackSize; mExchangeInfo->BufferStart = (UINT32) StartupVector; mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ()); mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits; }
/** Initialize page table for pages contain HookData. The function initialize PDE for 2MB range that contains HookData. If the related PDE points to a 2MB page, a page table will be allocated and initialized for 4KB pages. Otherwise we juse use the original page table. @param[in] HookData Based on which to initialize page table. @return The pointer to a Page Table that points to 4KB pages which contain HookData. **/ UINT64 * InitCpuStatePageTable ( IN VOID *HookData ) { UINTN Index; UINT64 *PageTable; UINT64 *Pdpte; UINT64 HookAddress; UINT64 Pde; UINT64 Address; // // Initialize physical address mask // NOTE: Physical memory above virtual address limit is not supported !!! // AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL); mPhyMask = LShiftU64 (1, (UINT8)Index) - 1; mPhyMask &= (1ull << 48) - EFI_PAGE_SIZE; HookAddress = (UINT64)(UINTN)HookData; PageTable = (UINT64 *)(UINTN)(AsmReadCr3 () & mPhyMask); PageTable = (UINT64 *)(UINTN)(PageTable[BitFieldRead64 (HookAddress, 39, 47)] & mPhyMask); PageTable = (UINT64 *)(UINTN)(PageTable[BitFieldRead64 (HookAddress, 30, 38)] & mPhyMask); Pdpte = (UINT64 *)(UINTN)PageTable; Pde = Pdpte[BitFieldRead64 (HookAddress, 21, 29)]; ASSERT ((Pde & BIT0) != 0); // Present and 2M Page if ((Pde & BIT7) == 0) { // 4KB Page Directory PageTable = (UINT64 *)(UINTN)(Pde & mPhyMask); } else { ASSERT ((Pde & mPhyMask) == (HookAddress & ~(SIZE_2MB-1))); // 2MB Page Point to HookAddress PageTable = AllocatePages (1); ASSERT (PageTable != NULL); Address = HookAddress & ~(SIZE_2MB-1); for (Index = 0; Index < 512; Index++) { PageTable[Index] = Address | BIT0 | BIT1; // Present and RW Address += SIZE_4KB; } Pdpte[BitFieldRead64 (HookAddress, 21, 29)] = (UINT64)(UINTN)PageTable | BIT0 | BIT1; // Present and RW } return PageTable; }
/** This function initialize guest BSP in S3. **/ VOID BspS3Init ( VOID ) { UINT32 Index; mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr0 = AsmReadCr0 (); mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr3 = AsmReadCr3 (); mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr4 = AsmReadCr4 (); AsmReadGdtr (&mGuestContextCommon.GuestContextPerCpu[mBspIndex].Gdtr); AsmReadIdtr (&mGuestContextCommon.GuestContextPerCpu[mBspIndex].Idtr); InitHostContextPerCpu (mBspIndex); for (Index = 0; Index < mHostContextCommon.CpuNum; Index++) { mGuestContextCommon.GuestContextPerCpu[Index].Cr0 = mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr0; mGuestContextCommon.GuestContextPerCpu[Index].Cr3 = mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr3; mGuestContextCommon.GuestContextPerCpu[Index].Cr4 = mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr4; CopyMem (&mGuestContextCommon.GuestContextPerCpu[Index].Gdtr, &mGuestContextCommon.GuestContextPerCpu[mBspIndex].Gdtr, sizeof(IA32_DESCRIPTOR)); } InitGuestContextPerCpu (mBspIndex); }
/** The Page fault handler to save SMM profile data. @param Rip The RIP when exception happens. @param ErrorCode The Error code of exception. **/ VOID SmmProfilePFHandler ( UINTN Rip, UINTN ErrorCode ) { UINT64 *PageTable; UINT64 PFAddress; UINTN CpuIndex; UINTN Index; UINT64 InstructionAddress; UINTN MaxEntryNumber; UINTN CurrentEntryNumber; BOOLEAN IsValidPFAddress; SMM_PROFILE_ENTRY *SmmProfileEntry; UINT64 SmiCommand; EFI_STATUS Status; UINT8 SoftSmiValue; EFI_SMM_SAVE_STATE_IO_INFO IoInfo; if (!mSmmProfileStart) { // // If SMM profile does not start, call original page fault handler. // SmiDefaultPFHandler (); return; } if (mBtsSupported) { DisableBTS (); } IsValidPFAddress = FALSE; PageTable = (UINT64 *)AsmReadCr3 (); PFAddress = AsmReadCr2 (); CpuIndex = GetCpuIndex (); if (PFAddress <= 0xFFFFFFFF) { RestorePageTableBelow4G (PageTable, PFAddress, CpuIndex, ErrorCode); } else { RestorePageTableAbove4G (PageTable, PFAddress, CpuIndex, ErrorCode, &IsValidPFAddress); } if (!IsValidPFAddress) { InstructionAddress = Rip; if ((ErrorCode & IA32_PF_EC_ID) != 0 && (mBtsSupported)) { // // If it is instruction fetch failure, get the correct IP from BTS. // InstructionAddress = GetSourceFromDestinationOnBts (CpuIndex, Rip); if (InstructionAddress == 0) { // // It indicates the instruction which caused page fault is not a jump instruction, // set instruction address same as the page fault address. // InstructionAddress = PFAddress; } } // // Indicate it is not software SMI // SmiCommand = 0xFFFFFFFFFFFFFFFFULL; for (Index = 0; Index < gSmst->NumberOfCpus; Index++) { Status = SmmReadSaveState(&mSmmCpu, sizeof(IoInfo), EFI_SMM_SAVE_STATE_REGISTER_IO, Index, &IoInfo); if (EFI_ERROR (Status)) { continue; } if (IoInfo.IoPort == mSmiCommandPort) { // // A software SMI triggered by SMI command port has been found, get SmiCommand from SMI command port. // SoftSmiValue = IoRead8 (mSmiCommandPort); SmiCommand = (UINT64)SoftSmiValue; break; } } SmmProfileEntry = (SMM_PROFILE_ENTRY *)(UINTN)(mSmmProfileBase + 1); // // Check if there is already a same entry in profile data. // for (Index = 0; Index < (UINTN) mSmmProfileBase->CurDataEntries; Index++) { if ((SmmProfileEntry[Index].ErrorCode == (UINT64)ErrorCode) && (SmmProfileEntry[Index].Address == PFAddress) && (SmmProfileEntry[Index].CpuNum == (UINT64)CpuIndex) && (SmmProfileEntry[Index].Instruction == InstructionAddress) && (SmmProfileEntry[Index].SmiCmd == SmiCommand)) { // // Same record exist, need not save again. // break; } } if (Index == mSmmProfileBase->CurDataEntries) { CurrentEntryNumber = (UINTN) mSmmProfileBase->CurDataEntries; MaxEntryNumber = (UINTN) mSmmProfileBase->MaxDataEntries; if (FeaturePcdGet (PcdCpuSmmProfileRingBuffer)) { CurrentEntryNumber = CurrentEntryNumber % MaxEntryNumber; } if (CurrentEntryNumber < MaxEntryNumber) { // // Log the new entry // SmmProfileEntry[CurrentEntryNumber].SmiNum = mSmmProfileBase->NumSmis; SmmProfileEntry[CurrentEntryNumber].ErrorCode = (UINT64)ErrorCode; SmmProfileEntry[CurrentEntryNumber].ApicId = (UINT64)GetApicId (); SmmProfileEntry[CurrentEntryNumber].CpuNum = (UINT64)CpuIndex; SmmProfileEntry[CurrentEntryNumber].Address = PFAddress; SmmProfileEntry[CurrentEntryNumber].Instruction = InstructionAddress; SmmProfileEntry[CurrentEntryNumber].SmiCmd = SmiCommand; // // Update current entry index and data size in the header. // mSmmProfileBase->CurDataEntries++; mSmmProfileBase->CurDataSize = MultU64x64 (mSmmProfileBase->CurDataEntries, sizeof (SMM_PROFILE_ENTRY)); } } } // // Flush TLB // CpuFlushTlb (); if (mBtsSupported) { EnableBTS (); } }
/** Programs registers for the calling processor. This function programs registers for the calling processor. @param RegisterTable Pointer to register table of the running processor. **/ VOID SetProcessorRegister ( CPU_REGISTER_TABLE *RegisterTable ) { CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry; UINTN Index; UINTN Value; // // Traverse Register Table of this logical processor // RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry; for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) { // // Check the type of specified register // switch (RegisterTableEntry->RegisterType) { // // The specified register is Control Register // case ControlRegister: switch (RegisterTableEntry->Index) { case 0: Value = AsmReadCr0 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr0 (Value); break; case 2: Value = AsmReadCr2 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr2 (Value); break; case 3: Value = AsmReadCr3 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr3 (Value); break; case 4: Value = AsmReadCr4 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr4 (Value); break; default: break; } break; // // The specified register is Model Specific Register // case Msr: // // If this function is called to restore register setting after INIT signal, // there is no need to restore MSRs in register table. // if (RegisterTableEntry->ValidBitLength >= 64) { // // If length is not less than 64 bits, then directly write without reading // AsmWriteMsr64 ( RegisterTableEntry->Index, RegisterTableEntry->Value ); } else { // // Set the bit section according to bit start and length // AsmMsrBitFieldWrite64 ( RegisterTableEntry->Index, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, RegisterTableEntry->Value ); } break; // // Enable or disable cache // case CacheControl: // // If value of the entry is 0, then disable cache. Otherwise, enable cache. // if (RegisterTableEntry->Value == 0) { AsmDisableCache (); } else { AsmEnableCache (); } break; default: break; } } }
{ volatile MP_CPU_EXCHANGE_INFO *ExchangeInfo; UINTN Index; PeiCpuMpData->ApFunction = (UINTN) Procedure; PeiCpuMpData->ApFunctionArgument = (UINTN) ProcedureArgument; PeiCpuMpData->FinishedCount = 0; ExchangeInfo = PeiCpuMpData->MpCpuExchangeInfo; ExchangeInfo->Lock = 0; ExchangeInfo->StackStart = PeiCpuMpData->Buffer; ExchangeInfo->StackSize = PeiCpuMpData->CpuApStackSize; ExchangeInfo->BufferStart = PeiCpuMpData->WakeupBuffer; ExchangeInfo->PmodeOffset = PeiCpuMpData->AddressMap.PModeEntryOffset; ExchangeInfo->LmodeOffset = PeiCpuMpData->AddressMap.LModeEntryOffset; ExchangeInfo->Cr3 = AsmReadCr3 (); ExchangeInfo->CFunction = (UINTN) ApCFunction; ExchangeInfo->NumApsExecuting = 0; ExchangeInfo->PeiCpuMpData = PeiCpuMpData; // // Get the BSP's data of GDT and IDT // CopyMem ((VOID *)&ExchangeInfo->GdtrProfile, &mGdt, sizeof(mGdt)); AsmReadIdtr ((IA32_DESCRIPTOR *) &ExchangeInfo->IdtrProfile); if (PeiCpuMpData->ApLoopMode == ApInMwaitLoop) { // // Get AP target C-state each time when waking up AP, // for it maybe updated by platform again //
/** Programs registers for the calling processor. This function programs registers for the calling processor. @param PreSmmInit Specify the target register table. If TRUE, the target is the pre-SMM-init register table. If FALSE, the target is the post-SMM-init register table. @param ProcessorNumber Handle number of specified logical processor. **/ VOID SetProcessorRegisterEx ( IN BOOLEAN PreSmmInit, IN UINTN ProcessorNumber ) { CPU_REGISTER_TABLE *RegisterTable; CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry; UINTN Index; UINTN Value; UINTN StartIndex; UINTN EndIndex; if (PreSmmInit) { RegisterTable = &mCpuConfigConextBuffer.PreSmmInitRegisterTable[ProcessorNumber]; } else { RegisterTable = &mCpuConfigConextBuffer.RegisterTable[ProcessorNumber]; } // // If microcode patch has been applied, then the first register table entry // is for microcode upate, so it is skipped. // StartIndex = 0; if (mSetBeforeCpuOnlyReset) { EndIndex = StartIndex + RegisterTable->NumberBeforeReset; } else { StartIndex += RegisterTable->NumberBeforeReset; EndIndex = RegisterTable->TableLength; } // // Traverse Register Table of this logical processor // for (Index = StartIndex; Index < EndIndex; Index++) { RegisterTableEntry = &RegisterTable->RegisterTableEntry[Index]; // // Check the type of specified register // switch (RegisterTableEntry->RegisterType) { // // The specified register is Control Register // case ControlRegister: switch (RegisterTableEntry->Index) { case 0: Value = AsmReadCr0 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, RegisterTableEntry->Value ); AsmWriteCr0 (Value); break; case 2: Value = AsmReadCr2 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, RegisterTableEntry->Value ); AsmWriteCr2 (Value); break; case 3: Value = AsmReadCr3 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, RegisterTableEntry->Value ); AsmWriteCr3 (Value); break; case 4: Value = AsmReadCr4 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, RegisterTableEntry->Value ); AsmWriteCr4 (Value); break; case 8: // // Do we need to support CR8? // break; default: break; } break; // // The specified register is Model Specific Register // case Msr: // // If this function is called to restore register setting after INIT signal, // there is no need to restore MSRs in register table. // if (!mRestoreSettingAfterInit) { if (RegisterTableEntry->ValidBitLength >= 64) { // // If length is not less than 64 bits, then directly write without reading // AsmWriteMsr64 ( RegisterTableEntry->Index, RegisterTableEntry->Value ); } else { // // Set the bit section according to bit start and length // AsmMsrBitFieldWrite64 ( RegisterTableEntry->Index, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, RegisterTableEntry->Value ); } } break; // // Enable or disable cache // case CacheControl: // // If value of the entry is 0, then disable cache. Otherwise, enable cache. // if (RegisterTableEntry->Value == 0) { AsmDisableCache (); } else { AsmEnableCache (); } break; default: break; } } }
/** Programs registers for the calling processor. This function programs registers for the calling processor. @param RegisterTables Pointer to register table of the running processor. @param RegisterTableCount Register table count. **/ VOID SetProcessorRegister ( IN CPU_REGISTER_TABLE *RegisterTables, IN UINTN RegisterTableCount ) { CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry; UINTN Index; UINTN Value; SPIN_LOCK *MsrSpinLock; UINT32 InitApicId; CPU_REGISTER_TABLE *RegisterTable; InitApicId = GetInitialApicId (); RegisterTable = NULL; for (Index = 0; Index < RegisterTableCount; Index++) { if (RegisterTables[Index].InitialApicId == InitApicId) { RegisterTable = &RegisterTables[Index]; break; } } ASSERT (RegisterTable != NULL); // // Traverse Register Table of this logical processor // RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry; for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) { // // Check the type of specified register // switch (RegisterTableEntry->RegisterType) { // // The specified register is Control Register // case ControlRegister: switch (RegisterTableEntry->Index) { case 0: Value = AsmReadCr0 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr0 (Value); break; case 2: Value = AsmReadCr2 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr2 (Value); break; case 3: Value = AsmReadCr3 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr3 (Value); break; case 4: Value = AsmReadCr4 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr4 (Value); break; default: break; } break; // // The specified register is Model Specific Register // case Msr: // // If this function is called to restore register setting after INIT signal, // there is no need to restore MSRs in register table. // if (RegisterTableEntry->ValidBitLength >= 64) { // // If length is not less than 64 bits, then directly write without reading // AsmWriteMsr64 ( RegisterTableEntry->Index, RegisterTableEntry->Value ); } else { // // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode // to make sure MSR read/write operation is atomic. // MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index); AcquireSpinLock (MsrSpinLock); // // Set the bit section according to bit start and length // AsmMsrBitFieldWrite64 ( RegisterTableEntry->Index, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, RegisterTableEntry->Value ); ReleaseSpinLock (MsrSpinLock); } break; // // MemoryMapped operations // case MemoryMapped: AcquireSpinLock (mMemoryMappedLock); MmioBitFieldWrite32 ( (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)), RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINT32)RegisterTableEntry->Value ); ReleaseSpinLock (mMemoryMappedLock); break; // // Enable or disable cache // case CacheControl: // // If value of the entry is 0, then disable cache. Otherwise, enable cache. // if (RegisterTableEntry->Value == 0) { AsmDisableCache (); } else { AsmEnableCache (); } break; default: break; } } }
/** Relocate SmmBases for each processor. Execute on first boot and all S3 resumes **/ VOID EFIAPI SmmRelocateBases ( VOID ) { UINT8 BakBuf[BACK_BUF_SIZE]; SMRAM_SAVE_STATE_MAP BakBuf2; SMRAM_SAVE_STATE_MAP *CpuStatePtr; UINT8 *U8Ptr; UINT32 ApicId; UINTN Index; UINTN BspIndex; // // Make sure the reserved size is large enough for procedure SmmInitTemplate. // ASSERT (sizeof (BakBuf) >= gcSmmInitSize); // // Patch ASM code template with current CR0, CR3, and CR4 values // gSmmCr0 = (UINT32)AsmReadCr0 (); gSmmCr3 = (UINT32)AsmReadCr3 (); gSmmCr4 = (UINT32)AsmReadCr4 (); // // Patch GDTR for SMM base relocation // gcSmiInitGdtr.Base = gcSmiGdtr.Base; gcSmiInitGdtr.Limit = gcSmiGdtr.Limit; U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET); CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET); // // Backup original contents at address 0x38000 // CopyMem (BakBuf, U8Ptr, sizeof (BakBuf)); CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2)); // // Load image for relocation // CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize); // // Retrieve the local APIC ID of current processor // ApicId = GetApicId (); // // Relocate SM bases for all APs // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate // mIsBsp = FALSE; BspIndex = (UINTN)-1; for (Index = 0; Index < mNumberOfCpus; Index++) { mRebased[Index] = FALSE; if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) { SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId); // // Wait for this AP to finish its 1st SMI // while (!mRebased[Index]); } else { // // BSP will be Relocated later // BspIndex = Index; } } // // Relocate BSP's SMM base // ASSERT (BspIndex != (UINTN)-1); mIsBsp = TRUE; SendSmiIpi (ApicId); // // Wait for the BSP to finish its 1st SMI // while (!mRebased[BspIndex]); // // Restore contents at address 0x38000 // CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2)); CopyMem (U8Ptr, BakBuf, sizeof (BakBuf)); }
/** Update page table to map the memory correctly in order to make the instruction which caused page fault execute successfully. And it also save the original page table to be restored in single-step exception. @param PageTable PageTable Address. @param PFAddress The memory address which caused page fault exception. @param CpuIndex The index of the processor. @param ErrorCode The Error code of exception. @param IsValidPFAddress The flag indicates if SMM profile data need be added. **/ VOID RestorePageTableAbove4G ( UINT64 *PageTable, UINT64 PFAddress, UINTN CpuIndex, UINTN ErrorCode, BOOLEAN *IsValidPFAddress ) { UINTN PTIndex; UINT64 Address; BOOLEAN Nx; BOOLEAN Existed; UINTN Index; UINTN PFIndex; ASSERT ((PageTable != NULL) && (IsValidPFAddress != NULL)); // // If page fault address is 4GB above. // // // Check if page fault address has existed in page table. // If it exists in page table but page fault is generated, // there are 2 possible reasons: 1. present flag is set to 0; 2. instruction fetch in protected memory range. // Existed = FALSE; PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK); PTIndex = BitFieldRead64 (PFAddress, 39, 47); if ((PageTable[PTIndex] & IA32_PG_P) != 0) { // PML4E PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); PTIndex = BitFieldRead64 (PFAddress, 30, 38); if ((PageTable[PTIndex] & IA32_PG_P) != 0) { // PDPTE PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); PTIndex = BitFieldRead64 (PFAddress, 21, 29); // PD if ((PageTable[PTIndex] & IA32_PG_PS) != 0) { // // 2MB page // Address = (UINT64)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); if ((Address & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)) == ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) { Existed = TRUE; } } else { // // 4KB page // PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); if (PageTable != 0) { // // When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB. // PTIndex = BitFieldRead64 (PFAddress, 12, 20); Address = (UINT64)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); if ((Address & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1)) == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) { Existed = TRUE; } } } } } // // If page entry does not existed in page table at all, create a new entry. // if (!Existed) { if (IsAddressValid (PFAddress, &Nx)) { // // If page fault address above 4GB is in protected range but it causes a page fault exception, // Will create a page entry for this page fault address, make page table entry as present/rw and execution-disable. // this access is not saved into SMM profile data. // *IsValidPFAddress = TRUE; } // // Create one entry in page table for page fault address. // SmiDefaultPFHandler (); // // Find the page table entry created just now. // PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK); PFAddress = AsmReadCr2 (); // PML4E PTIndex = BitFieldRead64 (PFAddress, 39, 47); PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); // PDPTE PTIndex = BitFieldRead64 (PFAddress, 30, 38); PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); // PD PTIndex = BitFieldRead64 (PFAddress, 21, 29); Address = PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK; // // Check if 2MB-page entry need be changed to 4KB-page entry. // if (IsAddressSplit (Address)) { AcquirePage (&PageTable[PTIndex]); // PTE PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); for (Index = 0; Index < 512; Index++) { PageTable[Index] = Address | IA32_PG_RW | IA32_PG_P; if (!IsAddressValid (Address, &Nx)) { PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P)); } if (Nx && mXdSupported) { PageTable[Index] = PageTable[Index] | IA32_PG_NX; } if (Address == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) { PTIndex = Index; } Address += SIZE_4KB; } // end for PT } else { // // Update 2MB page entry. // if (!IsAddressValid (Address, &Nx)) { // // Patch to remove present flag and rw flag. // PageTable[PTIndex] = PageTable[PTIndex] & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P)); } // // Set XD bit to 1 // if (Nx && mXdSupported) { PageTable[PTIndex] = PageTable[PTIndex] | IA32_PG_NX; } } } // // Record old entries with non-present status // Old entries include the memory which instruction is at and the memory which instruction access. // // ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT); if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) { PFIndex = mPFEntryCount[CpuIndex]; mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex]; mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex]; mPFEntryCount[CpuIndex]++; } // // Add present flag or clear XD flag to make page fault handler succeed. // PageTable[PTIndex] |= (UINT64)(IA32_PG_RW | IA32_PG_P); if ((ErrorCode & IA32_PF_EC_ID) != 0) { // // If page fault is caused by instruction fetch, clear XD bit in the entry. // PageTable[PTIndex] &= ~IA32_PG_NX; } return; }