/** Mark all the CpuSaveStates as not present. The function marks all CpuSaveStates memory range as not present so that page fault can be triggered on CpuSaveStates access. It is meant to be called on each SmmBaseHelper SMI callback before Framework handler is called. @param[in] CpuSaveState The base of CpuSaveStates. **/ VOID HookCpuStateMemory ( IN EFI_SMM_CPU_SAVE_STATE *CpuSaveState ) { UINT64 Index; UINT64 PTStartIndex; UINT64 PTEndIndex; PTStartIndex = BitFieldRead64 ((UINTN)CpuSaveState, 12, 20); PTEndIndex = BitFieldRead64 ((UINTN)CpuSaveState + mNumberOfProcessors * sizeof (EFI_SMM_CPU_SAVE_STATE) - 1, 12, 20); for (Index = PTStartIndex; Index <= PTEndIndex; Index++) { mCpuStatePageTable[Index] &= ~(BIT0|BIT5|BIT6); // not present nor accessed nor dirty } }
/** The page fault handler that on-demand read PI CpuSaveStates for framework use. If the fault is not targeted to mFrameworkSmst->CpuSaveState range, the function will return FALSE to let PageFaultHandlerHook know it needs to pass the fault over to original page fault handler. @retval TRUE The page fault is correctly handled. @retval FALSE The page fault is not handled and is passed through to original handler. **/ BOOLEAN PageFaultHandler ( VOID ) { BOOLEAN IsHandled; UINT64 *PageTable; UINT64 PFAddress; UINTN NumCpuStatePages; ASSERT (mPageTableHookEnabled); AcquireSpinLock (&mPFLock); PageTable = (UINT64*)(UINTN)(AsmReadCr3 () & mPhyMask); PFAddress = AsmReadCr2 (); NumCpuStatePages = EFI_SIZE_TO_PAGES (mNumberOfProcessors * sizeof (EFI_SMM_CPU_SAVE_STATE)); IsHandled = FALSE; if (((UINTN)mFrameworkSmst->CpuSaveState & ~(SIZE_2MB-1)) == (PFAddress & ~(SIZE_2MB-1))) { if ((UINTN)mFrameworkSmst->CpuSaveState <= PFAddress && PFAddress < (UINTN)mFrameworkSmst->CpuSaveState + EFI_PAGES_TO_SIZE (NumCpuStatePages) ) { mCpuStatePageTable[BitFieldRead64 (PFAddress, 12, 20)] |= BIT0 | BIT1; // present and rw CpuFlushTlb (); ReadWriteCpuStatePage (PFAddress & ~(SIZE_4KB-1), TRUE); IsHandled = TRUE; } else { ASSERT (FALSE); } } ReleaseSpinLock (&mPFLock); return IsHandled; }
/** Gets PCI CFG2 PPI. This internal function retrieves PCI CFG2 PPI from PPI database. @param Address The address that encodes the PCI Segment, Bus, Device, Function and Register. @return The pointer to PCI CFG2 PPI. **/ EFI_PEI_PCI_CFG2_PPI * InternalGetPciCfg2Ppi ( IN UINT64 Address ) { EFI_STATUS Status; UINTN Instance; EFI_PEI_PCI_CFG2_PPI *PciCfg2Ppi; UINT64 SegmentNumber; Instance = 0; PciCfg2Ppi = NULL; SegmentNumber = BitFieldRead64 (Address, 32, 63); // // Loop through all instances of the PPI and match segment number // do { Status = PeiServicesLocatePpi( &gEfiPciCfg2PpiGuid, Instance, NULL, (VOID**) &PciCfg2Ppi ); ASSERT_EFI_ERROR (Status); Instance++; } while (PciCfg2Ppi->Segment != SegmentNumber); return PciCfg2Ppi; }
/** Initialize page table for pages contain HookData. The function initialize PDE for 2MB range that contains HookData. If the related PDE points to a 2MB page, a page table will be allocated and initialized for 4KB pages. Otherwise we juse use the original page table. @param[in] HookData Based on which to initialize page table. @return The pointer to a Page Table that points to 4KB pages which contain HookData. **/ UINT64 * InitCpuStatePageTable ( IN VOID *HookData ) { UINTN Index; UINT64 *PageTable; UINT64 *Pdpte; UINT64 HookAddress; UINT64 Pde; UINT64 Address; // // Initialize physical address mask // NOTE: Physical memory above virtual address limit is not supported !!! // AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL); mPhyMask = LShiftU64 (1, (UINT8)Index) - 1; mPhyMask &= (1ull << 48) - EFI_PAGE_SIZE; HookAddress = (UINT64)(UINTN)HookData; PageTable = (UINT64 *)(UINTN)(AsmReadCr3 () & mPhyMask); PageTable = (UINT64 *)(UINTN)(PageTable[BitFieldRead64 (HookAddress, 39, 47)] & mPhyMask); PageTable = (UINT64 *)(UINTN)(PageTable[BitFieldRead64 (HookAddress, 30, 38)] & mPhyMask); Pdpte = (UINT64 *)(UINTN)PageTable; Pde = Pdpte[BitFieldRead64 (HookAddress, 21, 29)]; ASSERT ((Pde & BIT0) != 0); // Present and 2M Page if ((Pde & BIT7) == 0) { // 4KB Page Directory PageTable = (UINT64 *)(UINTN)(Pde & mPhyMask); } else { ASSERT ((Pde & mPhyMask) == (HookAddress & ~(SIZE_2MB-1))); // 2MB Page Point to HookAddress PageTable = AllocatePages (1); ASSERT (PageTable != NULL); Address = HookAddress & ~(SIZE_2MB-1); for (Index = 0; Index < 512; Index++) { PageTable[Index] = Address | BIT0 | BIT1; // Present and RW Address += SIZE_4KB; } Pdpte[BitFieldRead64 (HookAddress, 21, 29)] = (UINT64)(UINTN)PageTable | BIT0 | BIT1; // Present and RW } return PageTable; }
/** Write back the dirty Framework CpuSaveStates to PI. The function scans the page table for dirty pages in mFrameworkSmst->CpuSaveState to write back to PI CpuSaveStates. It is meant to be called on each SmmBaseHelper SMI callback after Framework handler is called. **/ VOID WriteBackDirtyPages ( VOID ) { UINTN NumCpuStatePages; UINTN PTIndex; UINTN PTStartIndex; UINTN PTEndIndex; NumCpuStatePages = EFI_SIZE_TO_PAGES (mNumberOfProcessors * sizeof (EFI_SMM_CPU_SAVE_STATE)); PTStartIndex = (UINTN)BitFieldRead64 ((UINT64) (UINTN) mFrameworkSmst->CpuSaveState, 12, 20); PTEndIndex = (UINTN)BitFieldRead64 ((UINT64) (UINTN) mFrameworkSmst->CpuSaveState + EFI_PAGES_TO_SIZE(NumCpuStatePages) - 1, 12, 20); for (PTIndex = PTStartIndex; PTIndex <= PTEndIndex; PTIndex++) { if ((mCpuStatePageTable[PTIndex] & (BIT0|BIT6)) == (BIT0|BIT6)) { // present and dirty? ReadWriteCpuStatePage (mCpuStatePageTable[PTIndex] & mPhyMask, FALSE); } } }
/** Return sub-entries number in entry. @param[in] Entry Pointer to entry @return Sub-entries number based on 0: 0 means there is 1 sub-entry under this entry 0x1ff means there is 512 sub-entries under this entry **/ UINT64 GetSubEntriesNum ( IN UINT64 *Entry ) { // // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry // return BitFieldRead64 (*Entry, 52, 60); }
/** Update page table to map the memory correctly in order to make the instruction which caused page fault execute successfully. And it also save the original page table to be restored in single-step exception. @param PageTable PageTable Address. @param PFAddress The memory address which caused page fault exception. @param CpuIndex The index of the processor. @param ErrorCode The Error code of exception. **/ VOID RestorePageTableBelow4G ( UINT64 *PageTable, UINT64 PFAddress, UINTN CpuIndex, UINTN ErrorCode ) { UINTN PTIndex; UINTN PFIndex; // // PML4 // if (sizeof(UINT64) == sizeof(UINTN)) { PTIndex = (UINTN)BitFieldRead64 (PFAddress, 39, 47); ASSERT (PageTable[PTIndex] != 0); PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); } // // PDPTE // PTIndex = (UINTN)BitFieldRead64 (PFAddress, 30, 38); ASSERT (PageTable[PTIndex] != 0); PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); // // PD // PTIndex = (UINTN)BitFieldRead64 (PFAddress, 21, 29); if ((PageTable[PTIndex] & IA32_PG_PS) != 0) { // // Large page // // // Record old entries with non-present status // Old entries include the memory which instruction is at and the memory which instruction access. // // ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT); if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) { PFIndex = mPFEntryCount[CpuIndex]; mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex]; mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex]; mPFEntryCount[CpuIndex]++; } // // Set new entry // PageTable[PTIndex] = (PFAddress & ~((1ull << 21) - 1)); PageTable[PTIndex] |= (UINT64)IA32_PG_PS; PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS; if ((ErrorCode & IA32_PF_EC_ID) != 0) { PageTable[PTIndex] &= ~IA32_PG_NX; } } else { // // Small page // ASSERT (PageTable[PTIndex] != 0); PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); // // 4K PTE // PTIndex = (UINTN)BitFieldRead64 (PFAddress, 12, 20); // // Record old entries with non-present status // Old entries include the memory which instruction is at and the memory which instruction access. // // ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT); if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) { PFIndex = mPFEntryCount[CpuIndex]; mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex]; mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex]; mPFEntryCount[CpuIndex]++; } // // Set new entry // PageTable[PTIndex] = (PFAddress & ~((1ull << 12) - 1)); PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS; if ((ErrorCode & IA32_PF_EC_ID) != 0) { PageTable[PTIndex] &= ~IA32_PG_NX; } } }
/** Update page table to map the memory correctly in order to make the instruction which caused page fault execute successfully. And it also save the original page table to be restored in single-step exception. @param PageTable PageTable Address. @param PFAddress The memory address which caused page fault exception. @param CpuIndex The index of the processor. @param ErrorCode The Error code of exception. @param IsValidPFAddress The flag indicates if SMM profile data need be added. **/ VOID RestorePageTableAbove4G ( UINT64 *PageTable, UINT64 PFAddress, UINTN CpuIndex, UINTN ErrorCode, BOOLEAN *IsValidPFAddress ) { UINTN PTIndex; UINT64 Address; BOOLEAN Nx; BOOLEAN Existed; UINTN Index; UINTN PFIndex; ASSERT ((PageTable != NULL) && (IsValidPFAddress != NULL)); // // If page fault address is 4GB above. // // // Check if page fault address has existed in page table. // If it exists in page table but page fault is generated, // there are 2 possible reasons: 1. present flag is set to 0; 2. instruction fetch in protected memory range. // Existed = FALSE; PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK); PTIndex = BitFieldRead64 (PFAddress, 39, 47); if ((PageTable[PTIndex] & IA32_PG_P) != 0) { // PML4E PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); PTIndex = BitFieldRead64 (PFAddress, 30, 38); if ((PageTable[PTIndex] & IA32_PG_P) != 0) { // PDPTE PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); PTIndex = BitFieldRead64 (PFAddress, 21, 29); // PD if ((PageTable[PTIndex] & IA32_PG_PS) != 0) { // // 2MB page // Address = (UINT64)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); if ((Address & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)) == ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) { Existed = TRUE; } } else { // // 4KB page // PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); if (PageTable != 0) { // // When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB. // PTIndex = BitFieldRead64 (PFAddress, 12, 20); Address = (UINT64)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); if ((Address & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1)) == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) { Existed = TRUE; } } } } } // // If page entry does not existed in page table at all, create a new entry. // if (!Existed) { if (IsAddressValid (PFAddress, &Nx)) { // // If page fault address above 4GB is in protected range but it causes a page fault exception, // Will create a page entry for this page fault address, make page table entry as present/rw and execution-disable. // this access is not saved into SMM profile data. // *IsValidPFAddress = TRUE; } // // Create one entry in page table for page fault address. // SmiDefaultPFHandler (); // // Find the page table entry created just now. // PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK); PFAddress = AsmReadCr2 (); // PML4E PTIndex = BitFieldRead64 (PFAddress, 39, 47); PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); // PDPTE PTIndex = BitFieldRead64 (PFAddress, 30, 38); PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); // PD PTIndex = BitFieldRead64 (PFAddress, 21, 29); Address = PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK; // // Check if 2MB-page entry need be changed to 4KB-page entry. // if (IsAddressSplit (Address)) { AcquirePage (&PageTable[PTIndex]); // PTE PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); for (Index = 0; Index < 512; Index++) { PageTable[Index] = Address | IA32_PG_RW | IA32_PG_P; if (!IsAddressValid (Address, &Nx)) { PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P)); } if (Nx && mXdSupported) { PageTable[Index] = PageTable[Index] | IA32_PG_NX; } if (Address == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) { PTIndex = Index; } Address += SIZE_4KB; } // end for PT } else { // // Update 2MB page entry. // if (!IsAddressValid (Address, &Nx)) { // // Patch to remove present flag and rw flag. // PageTable[PTIndex] = PageTable[PTIndex] & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P)); } // // Set XD bit to 1 // if (Nx && mXdSupported) { PageTable[PTIndex] = PageTable[PTIndex] | IA32_PG_NX; } } } // // Record old entries with non-present status // Old entries include the memory which instruction is at and the memory which instruction access. // // ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT); if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) { PFIndex = mPFEntryCount[CpuIndex]; mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex]; mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex]; mPFEntryCount[CpuIndex]++; } // // Add present flag or clear XD flag to make page fault handler succeed. // PageTable[PTIndex] |= (UINT64)(IA32_PG_RW | IA32_PG_P); if ((ErrorCode & IA32_PF_EC_ID) != 0) { // // If page fault is caused by instruction fetch, clear XD bit in the entry. // PageTable[PTIndex] &= ~IA32_PG_NX; } return; }