/** This function prepare TXT environment. @param AcmBase ACM base @param AcmSize ACM size **/ VOID TxtPrepareEnvironment ( IN UINT32 AcmBase, IN UINT32 AcmSize ) { MLE_PRIVATE_DATA *MlePrivateData; DEBUG((EFI_D_INFO, "(TXT) TxtPrepareEnvironment ...\n")); MlePrivateData = GetMlePrivateData (); DEBUG((EFI_D_INFO, "(TXT) TxtSaveMtrr ...\n")); TxtSaveMtrr (MlePrivateData); DEBUG((EFI_D_INFO, "(TXT) TxtSaveMtrr Done\n")); MlePrivateData->Cr0 = AsmReadCr0 (); MlePrivateData->Cr4 = AsmReadCr4 (); MlePrivateData->Cr3 = (UINT32)AsmReadCr3 (); DEBUG((EFI_D_INFO, "(TXT) TxtConfigMtrr ...\n")); TxtConfigMtrr ( AcmBase, AcmSize, MEMORY_TYPE_WB ); DEBUG((EFI_D_INFO, "(TXT) TxtConfigMtrr Done\n")); DEBUG((EFI_D_INFO, "(TXT) AsmWriteCr0 ...\n")); AsmWriteCr0(AsmReadCr0 () | CR0_NE); DEBUG((EFI_D_INFO, "(TXT) AsmWriteCr0 Done\n")); DEBUG((EFI_D_INFO, "(TXT) TxtPrepareEnvironment Done\n")); return ; }
/** Preparation before programming MTRR. This function will do some preparation for programming MTRRs: disable cache, invalid cache and disable MTRR caching functionality @param[out] Pointer to context to save **/ VOID PreMtrrChange ( OUT MTRR_CONTEXT *MtrrContext ) { // // Disable interrupts and save current interrupt state // MtrrContext->InterruptState = SaveAndDisableInterrupts(); // // Enter no fill cache mode, CD=1(Bit30), NW=0 (Bit29) // AsmDisableCache (); // // Save original CR4 value and clear PGE flag (Bit 7) // MtrrContext->Cr4 = AsmReadCr4 (); AsmWriteCr4 (MtrrContext->Cr4 & (~BIT7)); // // Flush all TLBs // CpuFlushTlb (); // // Disable Mtrrs // AsmMsrBitFieldWrite64 (MTRR_LIB_IA32_MTRR_DEF_TYPE, 10, 11, 0); }
/** Save the volatile registers required to be restored following INIT IPI. @param VolatileRegisters Returns buffer saved the volatile resisters **/ VOID SaveVolatileRegisters ( OUT CPU_VOLATILE_REGISTERS *VolatileRegisters ) { UINT32 RegEdx; VolatileRegisters->Cr0 = AsmReadCr0 (); VolatileRegisters->Cr3 = AsmReadCr3 (); VolatileRegisters->Cr4 = AsmReadCr4 (); AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx); if ((RegEdx & BIT2) != 0) { // // If processor supports Debugging Extensions feature // by CPUID.[EAX=01H]:EDX.BIT2 // VolatileRegisters->Dr0 = AsmReadDr0 (); VolatileRegisters->Dr1 = AsmReadDr1 (); VolatileRegisters->Dr2 = AsmReadDr2 (); VolatileRegisters->Dr3 = AsmReadDr3 (); VolatileRegisters->Dr6 = AsmReadDr6 (); VolatileRegisters->Dr7 = AsmReadDr7 (); } }
/** Preparation before programming MTRR. This function will do some preparation for programming MTRRs: disable cache, invalid cache and disable MTRR caching functionality @return CR4 value before changing. **/ UINTN PreMtrrChange ( VOID ) { UINTN Value; // // Enter no fill cache mode, CD=1(Bit30), NW=0 (Bit29) // AsmDisableCache (); // // Save original CR4 value and clear PGE flag (Bit 7) // Value = AsmReadCr4 (); AsmWriteCr4 (Value & (~BIT7)); // // Flush all TLBs // CpuFlushTlb (); // // Disable Mtrrs // AsmMsrBitFieldWrite64 (MTRR_LIB_IA32_MTRR_DEF_TYPE, 10, 11, 0); // // Return original CR4 value // return Value; }
/** This function initialize basic context for FRM. **/ VOID InitBasicContext ( VOID ) { UINT32 RegEax; mHostContextCommon.CpuNum = GetCpuNumFromAcpi (); GetPciExpressInfoFromAcpi (&mHostContextCommon.PciExpressBaseAddress, &mHostContextCommon.PciExpressLength); PcdSet64 (PcdPciExpressBaseAddress, mHostContextCommon.PciExpressBaseAddress); if (mHostContextCommon.PciExpressBaseAddress == 0) { CpuDeadLoop (); } mHostContextCommon.AcpiTimerIoPortBaseAddress = GetAcpiTimerPort (&mHostContextCommon.AcpiTimerWidth); PcdSet16 (PcdAcpiTimerIoPortBaseAddress, mHostContextCommon.AcpiTimerIoPortBaseAddress); PcdSet8 (PcdAcpiTimerWidth, mHostContextCommon.AcpiTimerWidth); if (mHostContextCommon.AcpiTimerIoPortBaseAddress == 0) { CpuDeadLoop (); } mHostContextCommon.ResetIoPortBaseAddress = GetAcpiResetPort (); mHostContextCommon.AcpiPmControlIoPortBaseAddress = GetAcpiPmControlPort (); if (mHostContextCommon.AcpiPmControlIoPortBaseAddress == 0) { CpuDeadLoop (); } mHostContextCommon.HostContextPerCpu = AllocatePages (FRM_SIZE_TO_PAGES(sizeof(FRM_HOST_CONTEXT_PER_CPU)) * mHostContextCommon.CpuNum); mGuestContextCommon.GuestContextPerCpu = AllocatePages (FRM_SIZE_TO_PAGES(sizeof(FRM_GUEST_CONTEXT_PER_CPU)) * mHostContextCommon.CpuNum); mHostContextCommon.LowMemoryBase = mCommunicationData.LowMemoryBase; mHostContextCommon.LowMemorySize = mCommunicationData.LowMemorySize; mHostContextCommon.LowMemoryBackupBase = (UINT64)(UINTN)AllocatePages (FRM_SIZE_TO_PAGES ((UINTN)mCommunicationData.LowMemorySize)); // // Save current context // mBspIndex = ApicToIndex (ReadLocalApicId ()); mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr0 = AsmReadCr0 (); mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr3 = AsmReadCr3 (); mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr4 = AsmReadCr4 (); AsmReadGdtr (&mGuestContextCommon.GuestContextPerCpu[mBspIndex].Gdtr); AsmReadIdtr (&mGuestContextCommon.GuestContextPerCpu[mBspIndex].Idtr); AsmCpuid (CPUID_EXTENDED_INFORMATION, &RegEax, NULL, NULL, NULL); if (RegEax >= CPUID_EXTENDED_ADDRESS_SIZE) { AsmCpuid (CPUID_EXTENDED_ADDRESS_SIZE, &RegEax, NULL, NULL, NULL); mHostContextCommon.PhysicalAddressBits = (UINT8)RegEax; } else { mHostContextCommon.PhysicalAddressBits = 36; } }
/** This function initialize host context per CPU. @param Index CPU Index **/ VOID InitHostContextPerCpu ( IN UINT32 Index ) { // // VmxOn for this CPU // AsmWbinvd (); AsmWriteCr3 (mHostContextCommon.PageTable); AsmWriteCr4 (AsmReadCr4 () | CR4_PAE | ((UINTN)AsmReadMsr64 (IA32_VMX_CR4_FIXED0_MSR_INDEX) & (UINTN)AsmReadMsr64 (IA32_VMX_CR4_FIXED1_MSR_INDEX))); AsmWriteCr0 (AsmReadCr0 () | ((UINTN)AsmReadMsr64 (IA32_VMX_CR0_FIXED0_MSR_INDEX) & (UINTN)AsmReadMsr64 (IA32_VMX_CR0_FIXED1_MSR_INDEX))); AsmVmxOn (&mHostContextCommon.HostContextPerCpu[Index].Vmcs); }
/** Initialize IDT entries to support source level debug. **/ VOID InitializeDebugIdt ( VOID ) { IA32_IDT_GATE_DESCRIPTOR *IdtEntry; UINTN InterruptHandler; IA32_DESCRIPTOR IdtDescriptor; UINTN Index; UINT16 CodeSegment; UINT32 RegEdx; AsmReadIdtr (&IdtDescriptor); // // Use current CS as the segment selector of interrupt gate in IDT // CodeSegment = AsmReadCs (); IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) IdtDescriptor.Base; for (Index = 0; Index < 20; Index ++) { if (((PcdGet32 (PcdExceptionsIgnoredByDebugger) & ~(BIT1 | BIT3)) & (1 << Index)) != 0) { // // If the exception is masked to be reserved except for INT1 and INT3, skip it // continue; } InterruptHandler = (UINTN)&Exception0Handle + Index * ExceptionStubHeaderSize; IdtEntry[Index].Bits.OffsetLow = (UINT16)(UINTN)InterruptHandler; IdtEntry[Index].Bits.OffsetHigh = (UINT16)((UINTN)InterruptHandler >> 16); IdtEntry[Index].Bits.Selector = CodeSegment; IdtEntry[Index].Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32; } InterruptHandler = (UINTN) &TimerInterruptHandle; IdtEntry[DEBUG_TIMER_VECTOR].Bits.OffsetLow = (UINT16)(UINTN)InterruptHandler; IdtEntry[DEBUG_TIMER_VECTOR].Bits.OffsetHigh = (UINT16)((UINTN)InterruptHandler >> 16); IdtEntry[DEBUG_TIMER_VECTOR].Bits.Selector = CodeSegment; IdtEntry[DEBUG_TIMER_VECTOR].Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32; // // If the CPU supports Debug Extensions(CPUID:01 EDX:BIT2), then // Set DE flag in CR4 to enable IO breakpoint // AsmCpuid (1, NULL, NULL, NULL, &RegEdx); if ((RegEdx & BIT2) != 0) { AsmWriteCr4 (AsmReadCr4 () | BIT3); } }
/** Initialize SSE support. **/ VOID InitXMM ( VOID ) { UINT32 RegEdx; AsmCpuid (EFI_CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx); // //Check whether SSE2 is supported // if ((RegEdx & BIT26) == 0) { AsmWriteCr0 (AsmReadCr0 () | BIT1); AsmWriteCr4 (AsmReadCr4 () | BIT9 | BIT10); } }
/** This function is XSETBV handler. @param Index CPU index **/ VOID XsetbvHandler ( IN UINT32 Index ) { UINT64 Data64; UINT32 XsetbvIndex; XsetbvIndex = (UINT32)mGuestContextCommon.GuestContextPerCpu[Index].Register.Rcx; Data64 = (UINT64)(mGuestContextCommon.GuestContextPerCpu[Index].Register.Rdx & 0xFFFFFFFF); Data64 = LShiftU64 (Data64, 32); Data64 |= (UINT64)(mGuestContextCommon.GuestContextPerCpu[Index].Register.Rax & 0xFFFFFFFF); if (IsXStateSupoprted ()) { AsmWriteCr4 (AsmReadCr4() | CR4_OSXSAVE); } AsmXSetBv (XsetbvIndex, Data64); VmWriteN (VMCS_N_GUEST_RIP_INDEX, VmReadN(VMCS_N_GUEST_RIP_INDEX) + VmRead32(VMCS_32_RO_VMEXIT_INSTRUCTION_LENGTH_INDEX)); return ; }
/** This function initialize guest BSP in S3. **/ VOID BspS3Init ( VOID ) { UINT32 Index; mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr0 = AsmReadCr0 (); mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr3 = AsmReadCr3 (); mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr4 = AsmReadCr4 (); AsmReadGdtr (&mGuestContextCommon.GuestContextPerCpu[mBspIndex].Gdtr); AsmReadIdtr (&mGuestContextCommon.GuestContextPerCpu[mBspIndex].Idtr); InitHostContextPerCpu (mBspIndex); for (Index = 0; Index < mHostContextCommon.CpuNum; Index++) { mGuestContextCommon.GuestContextPerCpu[Index].Cr0 = mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr0; mGuestContextCommon.GuestContextPerCpu[Index].Cr3 = mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr3; mGuestContextCommon.GuestContextPerCpu[Index].Cr4 = mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr4; CopyMem (&mGuestContextCommon.GuestContextPerCpu[Index].Gdtr, &mGuestContextCommon.GuestContextPerCpu[mBspIndex].Gdtr, sizeof(IA32_DESCRIPTOR)); } InitGuestContextPerCpu (mBspIndex); }
/** Relocate SmmBases for each processor. Execute on first boot and all S3 resumes **/ VOID EFIAPI SmmRelocateBases ( VOID ) { UINT8 BakBuf[BACK_BUF_SIZE]; SMRAM_SAVE_STATE_MAP BakBuf2; SMRAM_SAVE_STATE_MAP *CpuStatePtr; UINT8 *U8Ptr; UINT32 ApicId; UINTN Index; UINTN BspIndex; // // Make sure the reserved size is large enough for procedure SmmInitTemplate. // ASSERT (sizeof (BakBuf) >= gcSmmInitSize); // // Patch ASM code template with current CR0, CR3, and CR4 values // gSmmCr0 = (UINT32)AsmReadCr0 (); gSmmCr3 = (UINT32)AsmReadCr3 (); gSmmCr4 = (UINT32)AsmReadCr4 (); // // Patch GDTR for SMM base relocation // gcSmiInitGdtr.Base = gcSmiGdtr.Base; gcSmiInitGdtr.Limit = gcSmiGdtr.Limit; U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET); CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET); // // Backup original contents at address 0x38000 // CopyMem (BakBuf, U8Ptr, sizeof (BakBuf)); CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2)); // // Load image for relocation // CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize); // // Retrieve the local APIC ID of current processor // ApicId = GetApicId (); // // Relocate SM bases for all APs // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate // mIsBsp = FALSE; BspIndex = (UINTN)-1; for (Index = 0; Index < mNumberOfCpus; Index++) { mRebased[Index] = FALSE; if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) { SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId); // // Wait for this AP to finish its 1st SMI // while (!mRebased[Index]); } else { // // BSP will be Relocated later // BspIndex = Index; } } // // Relocate BSP's SMM base // ASSERT (BspIndex != (UINTN)-1); mIsBsp = TRUE; SendSmiIpi (ApicId); // // Wait for the BSP to finish its 1st SMI // while (!mRebased[BspIndex]); // // Restore contents at address 0x38000 // CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2)); CopyMem (U8Ptr, BakBuf, sizeof (BakBuf)); }
/** This function enable TXT environment. @retval EFI_SUCCESS TXT environment is enabled @retval EFI_UNSUPPORTED TXT environment is not supported **/ EFI_STATUS EnableTxt ( VOID ) { TXT_GETSEC_CAPABILITIES TxtCapabilities; UINT32 Index; UINT32 RegEax; UINT32 RegEbx; UINT32 RegEcx; BOOLEAN ClearMca; UINT32 McaCount; // // Enable SMX // AsmWriteCr4(AsmReadCr4() | CR4_SMXE); // // Check TXT Chipset // Index = 0; TxtCapabilities.Uint32 = AsmGetSecCapabilities (Index); DumpGetSecCapabilities (Index, TxtCapabilities.Uint32); if (TxtCapabilities.Bits.ChipsetPresent == 0) { DEBUG ((EFI_D_ERROR, "(TXT) TXT Chipset not present!\n")); return EFI_UNSUPPORTED; } if (TxtCapabilities.Bits.Senter == 0) { DEBUG ((EFI_D_ERROR, "(TXT) SENTER not supported!\n")); return EFI_UNSUPPORTED; } while (TxtCapabilities.Bits.ExtendedLeafs != 0) { Index ++; TxtCapabilities.Uint32 = AsmGetSecCapabilities (Index); DumpGetSecCapabilities (Index, TxtCapabilities.Uint32); } // // Get parameters // ClearMca = TRUE; Index = 0; while (TRUE) { AsmGetSecParameters (Index, &RegEax, &RegEbx, &RegEcx); if ((RegEax & GETSEC_PARAMETER_TYPE_MASK) == 0) { break; } DumpGetSecParameters (RegEax, RegEbx, RegEcx); if ((RegEax & GETSEC_PARAMETER_TYPE_MASK) == GETSEC_PARAMETER_TYPE_EXTERNSION) { if ((RegEax & (1 << 6)) != 0) { // No need clear MCA ClearMca = FALSE; } } Index ++; } // // Clear MCA // if (ClearMca) { McaCount = (UINT32)AsmReadMsr64 (IA32_MCG_CAP) & 0xFF; for (Index = 0; Index < McaCount; Index++) { AsmWriteMsr64 (IA32_MC0_STATUS + Index * 4, 0); } } return EFI_SUCCESS; }
/** Programs registers for the calling processor. This function programs registers for the calling processor. @param RegisterTable Pointer to register table of the running processor. **/ VOID SetProcessorRegister ( CPU_REGISTER_TABLE *RegisterTable ) { CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry; UINTN Index; UINTN Value; // // Traverse Register Table of this logical processor // RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry; for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) { // // Check the type of specified register // switch (RegisterTableEntry->RegisterType) { // // The specified register is Control Register // case ControlRegister: switch (RegisterTableEntry->Index) { case 0: Value = AsmReadCr0 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr0 (Value); break; case 2: Value = AsmReadCr2 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr2 (Value); break; case 3: Value = AsmReadCr3 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr3 (Value); break; case 4: Value = AsmReadCr4 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr4 (Value); break; default: break; } break; // // The specified register is Model Specific Register // case Msr: // // If this function is called to restore register setting after INIT signal, // there is no need to restore MSRs in register table. // if (RegisterTableEntry->ValidBitLength >= 64) { // // If length is not less than 64 bits, then directly write without reading // AsmWriteMsr64 ( RegisterTableEntry->Index, RegisterTableEntry->Value ); } else { // // Set the bit section according to bit start and length // AsmMsrBitFieldWrite64 ( RegisterTableEntry->Index, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, RegisterTableEntry->Value ); } break; // // Enable or disable cache // case CacheControl: // // If value of the entry is 0, then disable cache. Otherwise, enable cache. // if (RegisterTableEntry->Value == 0) { AsmDisableCache (); } else { AsmEnableCache (); } break; default: break; } } }
/** Programs registers for the calling processor. This function programs registers for the calling processor. @param PreSmmInit Specify the target register table. If TRUE, the target is the pre-SMM-init register table. If FALSE, the target is the post-SMM-init register table. @param ProcessorNumber Handle number of specified logical processor. **/ VOID SetProcessorRegisterEx ( IN BOOLEAN PreSmmInit, IN UINTN ProcessorNumber ) { CPU_REGISTER_TABLE *RegisterTable; CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry; UINTN Index; UINTN Value; UINTN StartIndex; UINTN EndIndex; if (PreSmmInit) { RegisterTable = &mCpuConfigConextBuffer.PreSmmInitRegisterTable[ProcessorNumber]; } else { RegisterTable = &mCpuConfigConextBuffer.RegisterTable[ProcessorNumber]; } // // If microcode patch has been applied, then the first register table entry // is for microcode upate, so it is skipped. // StartIndex = 0; if (mSetBeforeCpuOnlyReset) { EndIndex = StartIndex + RegisterTable->NumberBeforeReset; } else { StartIndex += RegisterTable->NumberBeforeReset; EndIndex = RegisterTable->TableLength; } // // Traverse Register Table of this logical processor // for (Index = StartIndex; Index < EndIndex; Index++) { RegisterTableEntry = &RegisterTable->RegisterTableEntry[Index]; // // Check the type of specified register // switch (RegisterTableEntry->RegisterType) { // // The specified register is Control Register // case ControlRegister: switch (RegisterTableEntry->Index) { case 0: Value = AsmReadCr0 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, RegisterTableEntry->Value ); AsmWriteCr0 (Value); break; case 2: Value = AsmReadCr2 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, RegisterTableEntry->Value ); AsmWriteCr2 (Value); break; case 3: Value = AsmReadCr3 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, RegisterTableEntry->Value ); AsmWriteCr3 (Value); break; case 4: Value = AsmReadCr4 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, RegisterTableEntry->Value ); AsmWriteCr4 (Value); break; case 8: // // Do we need to support CR8? // break; default: break; } break; // // The specified register is Model Specific Register // case Msr: // // If this function is called to restore register setting after INIT signal, // there is no need to restore MSRs in register table. // if (!mRestoreSettingAfterInit) { if (RegisterTableEntry->ValidBitLength >= 64) { // // If length is not less than 64 bits, then directly write without reading // AsmWriteMsr64 ( RegisterTableEntry->Index, RegisterTableEntry->Value ); } else { // // Set the bit section according to bit start and length // AsmMsrBitFieldWrite64 ( RegisterTableEntry->Index, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, RegisterTableEntry->Value ); } } break; // // Enable or disable cache // case CacheControl: // // If value of the entry is 0, then disable cache. Otherwise, enable cache. // if (RegisterTableEntry->Value == 0) { AsmDisableCache (); } else { AsmEnableCache (); } break; default: break; } } }
/** Programs registers for the calling processor. This function programs registers for the calling processor. @param RegisterTables Pointer to register table of the running processor. @param RegisterTableCount Register table count. **/ VOID SetProcessorRegister ( IN CPU_REGISTER_TABLE *RegisterTables, IN UINTN RegisterTableCount ) { CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry; UINTN Index; UINTN Value; SPIN_LOCK *MsrSpinLock; UINT32 InitApicId; CPU_REGISTER_TABLE *RegisterTable; InitApicId = GetInitialApicId (); RegisterTable = NULL; for (Index = 0; Index < RegisterTableCount; Index++) { if (RegisterTables[Index].InitialApicId == InitApicId) { RegisterTable = &RegisterTables[Index]; break; } } ASSERT (RegisterTable != NULL); // // Traverse Register Table of this logical processor // RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry; for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) { // // Check the type of specified register // switch (RegisterTableEntry->RegisterType) { // // The specified register is Control Register // case ControlRegister: switch (RegisterTableEntry->Index) { case 0: Value = AsmReadCr0 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr0 (Value); break; case 2: Value = AsmReadCr2 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr2 (Value); break; case 3: Value = AsmReadCr3 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr3 (Value); break; case 4: Value = AsmReadCr4 (); Value = (UINTN) BitFieldWrite64 ( Value, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINTN) RegisterTableEntry->Value ); AsmWriteCr4 (Value); break; default: break; } break; // // The specified register is Model Specific Register // case Msr: // // If this function is called to restore register setting after INIT signal, // there is no need to restore MSRs in register table. // if (RegisterTableEntry->ValidBitLength >= 64) { // // If length is not less than 64 bits, then directly write without reading // AsmWriteMsr64 ( RegisterTableEntry->Index, RegisterTableEntry->Value ); } else { // // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode // to make sure MSR read/write operation is atomic. // MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index); AcquireSpinLock (MsrSpinLock); // // Set the bit section according to bit start and length // AsmMsrBitFieldWrite64 ( RegisterTableEntry->Index, RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, RegisterTableEntry->Value ); ReleaseSpinLock (MsrSpinLock); } break; // // MemoryMapped operations // case MemoryMapped: AcquireSpinLock (mMemoryMappedLock); MmioBitFieldWrite32 ( (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)), RegisterTableEntry->ValidBitStart, RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, (UINT32)RegisterTableEntry->Value ); ReleaseSpinLock (mMemoryMappedLock); break; // // Enable or disable cache // case CacheControl: // // If value of the entry is 0, then disable cache. Otherwise, enable cache. // if (RegisterTableEntry->Value == 0) { AsmDisableCache (); } else { AsmEnableCache (); } break; default: break; } } }
/** Transfers control to DxeCore. This function performs a CPU architecture specific operations to execute the entry point of DxeCore with the parameters of HobList. It also installs EFI_END_OF_PEI_PPI to signal the end of PEI phase. @param DxeCoreEntryPoint The entry point of DxeCore. @param HobList The start of HobList passed to DxeCore. **/ VOID HandOffToDxeCore ( IN EFI_PHYSICAL_ADDRESS DxeCoreEntryPoint, IN EFI_PEI_HOB_POINTERS HobList ) { EFI_STATUS Status; EFI_PHYSICAL_ADDRESS BaseOfStack; EFI_PHYSICAL_ADDRESS TopOfStack; UINTN PageTables; X64_IDT_GATE_DESCRIPTOR *IdtTable; UINTN SizeOfTemplate; VOID *TemplateBase; EFI_PHYSICAL_ADDRESS VectorAddress; UINT32 Index; X64_IDT_TABLE *IdtTableForX64; EFI_VECTOR_HANDOFF_INFO *VectorInfo; EFI_PEI_VECTOR_HANDOFF_INFO_PPI *VectorHandoffInfoPpi; BOOLEAN BuildPageTablesIa32Pae; if (IsNullDetectionEnabled ()) { ClearFirst4KPage (HobList.Raw); } Status = PeiServicesAllocatePages (EfiBootServicesData, EFI_SIZE_TO_PAGES (STACK_SIZE), &BaseOfStack); ASSERT_EFI_ERROR (Status); if (FeaturePcdGet(PcdDxeIplSwitchToLongMode)) { // // Compute the top of the stack we were allocated, which is used to load X64 dxe core. // Pre-allocate a 32 bytes which confroms to x64 calling convention. // // The first four parameters to a function are passed in rcx, rdx, r8 and r9. // Any further parameters are pushed on the stack. Furthermore, space (4 * 8bytes) for the // register parameters is reserved on the stack, in case the called function // wants to spill them; this is important if the function is variadic. // TopOfStack = BaseOfStack + EFI_SIZE_TO_PAGES (STACK_SIZE) * EFI_PAGE_SIZE - 32; // // x64 Calling Conventions requires that the stack must be aligned to 16 bytes // TopOfStack = (EFI_PHYSICAL_ADDRESS) (UINTN) ALIGN_POINTER (TopOfStack, 16); // // Load the GDT of Go64. Since the GDT of 32-bit Tiano locates in the BS_DATA // memory, it may be corrupted when copying FV to high-end memory // AsmWriteGdtr (&gGdt); // // Create page table and save PageMapLevel4 to CR3 // PageTables = CreateIdentityMappingPageTables (BaseOfStack, STACK_SIZE); // // End of PEI phase signal // PERF_EVENT_SIGNAL_BEGIN (gEndOfPeiSignalPpi.Guid); Status = PeiServicesInstallPpi (&gEndOfPeiSignalPpi); PERF_EVENT_SIGNAL_END (gEndOfPeiSignalPpi.Guid); ASSERT_EFI_ERROR (Status); // // Paging might be already enabled. To avoid conflict configuration, // disable paging first anyway. // AsmWriteCr0 (AsmReadCr0 () & (~BIT31)); AsmWriteCr3 (PageTables); // // Update the contents of BSP stack HOB to reflect the real stack info passed to DxeCore. // UpdateStackHob (BaseOfStack, STACK_SIZE); SizeOfTemplate = AsmGetVectorTemplatInfo (&TemplateBase); Status = PeiServicesAllocatePages ( EfiBootServicesData, EFI_SIZE_TO_PAGES(sizeof (X64_IDT_TABLE) + SizeOfTemplate * IDT_ENTRY_COUNT), &VectorAddress ); ASSERT_EFI_ERROR (Status); // // Store EFI_PEI_SERVICES** in the 4 bytes immediately preceding IDT to avoid that // it may not be gotten correctly after IDT register is re-written. // IdtTableForX64 = (X64_IDT_TABLE *) (UINTN) VectorAddress; IdtTableForX64->PeiService = GetPeiServicesTablePointer (); VectorAddress = (EFI_PHYSICAL_ADDRESS) (UINTN) (IdtTableForX64 + 1); IdtTable = IdtTableForX64->IdtTable; for (Index = 0; Index < IDT_ENTRY_COUNT; Index++) { IdtTable[Index].Ia32IdtEntry.Bits.GateType = 0x8e; IdtTable[Index].Ia32IdtEntry.Bits.Reserved_0 = 0; IdtTable[Index].Ia32IdtEntry.Bits.Selector = SYS_CODE64_SEL; IdtTable[Index].Ia32IdtEntry.Bits.OffsetLow = (UINT16) VectorAddress; IdtTable[Index].Ia32IdtEntry.Bits.OffsetHigh = (UINT16) (RShiftU64 (VectorAddress, 16)); IdtTable[Index].Offset32To63 = (UINT32) (RShiftU64 (VectorAddress, 32)); IdtTable[Index].Reserved = 0; CopyMem ((VOID *) (UINTN) VectorAddress, TemplateBase, SizeOfTemplate); AsmVectorFixup ((VOID *) (UINTN) VectorAddress, (UINT8) Index); VectorAddress += SizeOfTemplate; } gLidtDescriptor.Base = (UINTN) IdtTable; // // Disable interrupt of Debug timer, since new IDT table cannot handle it. // SaveAndSetDebugTimerInterrupt (FALSE); AsmWriteIdtr (&gLidtDescriptor); DEBUG (( DEBUG_INFO, "%a() Stack Base: 0x%lx, Stack Size: 0x%x\n", __FUNCTION__, BaseOfStack, STACK_SIZE )); // // Go to Long Mode and transfer control to DxeCore. // Interrupts will not get turned on until the CPU AP is loaded. // Call x64 drivers passing in single argument, a pointer to the HOBs. // AsmEnablePaging64 ( SYS_CODE64_SEL, DxeCoreEntryPoint, (EFI_PHYSICAL_ADDRESS)(UINTN)(HobList.Raw), 0, TopOfStack ); } else { // // Get Vector Hand-off Info PPI and build Guided HOB // Status = PeiServicesLocatePpi ( &gEfiVectorHandoffInfoPpiGuid, 0, NULL, (VOID **)&VectorHandoffInfoPpi ); if (Status == EFI_SUCCESS) { DEBUG ((EFI_D_INFO, "Vector Hand-off Info PPI is gotten, GUIDed HOB is created!\n")); VectorInfo = VectorHandoffInfoPpi->Info; Index = 1; while (VectorInfo->Attribute != EFI_VECTOR_HANDOFF_LAST_ENTRY) { VectorInfo ++; Index ++; } BuildGuidDataHob ( &gEfiVectorHandoffInfoPpiGuid, VectorHandoffInfoPpi->Info, sizeof (EFI_VECTOR_HANDOFF_INFO) * Index ); } // // Compute the top of the stack we were allocated. Pre-allocate a UINTN // for safety. // TopOfStack = BaseOfStack + EFI_SIZE_TO_PAGES (STACK_SIZE) * EFI_PAGE_SIZE - CPU_STACK_ALIGNMENT; TopOfStack = (EFI_PHYSICAL_ADDRESS) (UINTN) ALIGN_POINTER (TopOfStack, CPU_STACK_ALIGNMENT); PageTables = 0; BuildPageTablesIa32Pae = ToBuildPageTable (); if (BuildPageTablesIa32Pae) { PageTables = Create4GPageTablesIa32Pae (BaseOfStack, STACK_SIZE); if (IsEnableNonExecNeeded ()) { EnableExecuteDisableBit(); } } // // End of PEI phase signal // PERF_EVENT_SIGNAL_BEGIN (gEndOfPeiSignalPpi.Guid); Status = PeiServicesInstallPpi (&gEndOfPeiSignalPpi); PERF_EVENT_SIGNAL_END (gEndOfPeiSignalPpi.Guid); ASSERT_EFI_ERROR (Status); if (BuildPageTablesIa32Pae) { // // Paging might be already enabled. To avoid conflict configuration, // disable paging first anyway. // AsmWriteCr0 (AsmReadCr0 () & (~BIT31)); AsmWriteCr3 (PageTables); // // Set Physical Address Extension (bit 5 of CR4). // AsmWriteCr4 (AsmReadCr4 () | BIT5); } // // Update the contents of BSP stack HOB to reflect the real stack info passed to DxeCore. // UpdateStackHob (BaseOfStack, STACK_SIZE); DEBUG (( DEBUG_INFO, "%a() Stack Base: 0x%lx, Stack Size: 0x%x\n", __FUNCTION__, BaseOfStack, STACK_SIZE )); // // Transfer the control to the entry point of DxeCore. // if (BuildPageTablesIa32Pae) { AsmEnablePaging32 ( (SWITCH_STACK_ENTRY_POINT)(UINTN)DxeCoreEntryPoint, HobList.Raw, NULL, (VOID *) (UINTN) TopOfStack ); } else { SwitchStack ( (SWITCH_STACK_ENTRY_POINT)(UINTN)DxeCoreEntryPoint, HobList.Raw, NULL, (VOID *) (UINTN) TopOfStack ); } } }