Ejemplo n.º 1
0
/**
  Preparation before programming MTRR.

  This function will do some preparation for programming MTRRs:
  disable cache, invalid cache and disable MTRR caching functionality

  @param[out]  Pointer to context to save

**/
VOID
PreMtrrChange (
  OUT MTRR_CONTEXT  *MtrrContext
  )
{
  //
  // Disable interrupts and save current interrupt state
  //
  MtrrContext->InterruptState = SaveAndDisableInterrupts();
  
  //
  // Enter no fill cache mode, CD=1(Bit30), NW=0 (Bit29)
  //
  AsmDisableCache ();

  //
  // Save original CR4 value and clear PGE flag (Bit 7)
  //
  MtrrContext->Cr4 = AsmReadCr4 ();
  AsmWriteCr4 (MtrrContext->Cr4 & (~BIT7));

  //
  // Flush all TLBs
  //
  CpuFlushTlb ();

  //
  // Disable Mtrrs
  //
  AsmMsrBitFieldWrite64 (MTRR_LIB_IA32_MTRR_DEF_TYPE, 10, 11, 0);
}
Ejemplo n.º 2
0
/**
  Cleaning up after programming MTRRs.

  This function will do some clean up after programming MTRRs:
  Flush all TLBs,  re-enable caching, restore CR4.

  @param[in]  Pointer to context to restore

**/
VOID
PostMtrrChangeEnableCache (
  IN MTRR_CONTEXT  *MtrrContext
  )
{
  //
  // Flush all TLBs 
  //
  CpuFlushTlb ();

  //
  // Enable Normal Mode caching CD=NW=0, CD(Bit30), NW(Bit29)
  //
  AsmEnableCache ();

  //
  // Restore original CR4 value
  //
  AsmWriteCr4 (MtrrContext->Cr4);
  
  //
  // Restore original interrupt state
  //
  SetInterruptState (MtrrContext->InterruptState);
}
Ejemplo n.º 3
0
/**
  Cleaning up after programming MTRRs.

  This function will do some clean up after programming MTRRs:
  enable MTRR caching functionality, and enable cache

  @param  Cr4  CR4 value to restore

**/
VOID
PostMtrrChange (
  UINTN Cr4
  )
{
  //
  // Enable Cache MTRR
  //
  AsmMsrBitFieldWrite64 (MTRR_LIB_IA32_MTRR_DEF_TYPE, 10, 11, 3);

  //
  // Flush all TLBs 
  //
  CpuFlushTlb ();

  //
  // Enable Normal Mode caching CD=NW=0, CD(Bit30), NW(Bit29)
  //
  AsmEnableCache ();

  //
  // Restore original CR4 value
  //
  AsmWriteCr4 (Cr4);
}
Ejemplo n.º 4
0
/**
  Preparation before programming MTRR.

  This function will do some preparation for programming MTRRs:
  disable cache, invalid cache and disable MTRR caching functionality

  @return  CR4 value before changing.

**/
UINTN
PreMtrrChange (
  VOID
  )
{
  UINTN  Value;

  //
  // Enter no fill cache mode, CD=1(Bit30), NW=0 (Bit29)
  //
  AsmDisableCache ();

  //
  // Save original CR4 value and clear PGE flag (Bit 7)
  //
  Value = AsmReadCr4 ();
  AsmWriteCr4 (Value & (~BIT7));

  //
  // Flush all TLBs
  //
  CpuFlushTlb ();

  //
  // Disable Mtrrs
  //
  AsmMsrBitFieldWrite64 (MTRR_LIB_IA32_MTRR_DEF_TYPE, 10, 11, 0);

  //
  // Return original CR4 value
  //
  return Value;
}
Ejemplo n.º 5
0
/**
  The page fault handler that on-demand read PI CpuSaveStates for framework use. If the fault
  is not targeted to mFrameworkSmst->CpuSaveState range, the function will return FALSE to let
  PageFaultHandlerHook know it needs to pass the fault over to original page fault handler.
  
  @retval TRUE     The page fault is correctly handled.
  @retval FALSE    The page fault is not handled and is passed through to original handler.

**/
BOOLEAN
PageFaultHandler (
  VOID
  )
{
  BOOLEAN        IsHandled;
  UINT64         *PageTable;
  UINT64         PFAddress;
  UINTN          NumCpuStatePages;
  
  ASSERT (mPageTableHookEnabled);
  AcquireSpinLock (&mPFLock);

  PageTable = (UINT64*)(UINTN)(AsmReadCr3 () & mPhyMask);
  PFAddress = AsmReadCr2 ();
  NumCpuStatePages = EFI_SIZE_TO_PAGES (mNumberOfProcessors * sizeof (EFI_SMM_CPU_SAVE_STATE));
  IsHandled = FALSE;
  if (((UINTN)mFrameworkSmst->CpuSaveState & ~(SIZE_2MB-1)) == (PFAddress & ~(SIZE_2MB-1))) {
    if ((UINTN)mFrameworkSmst->CpuSaveState <= PFAddress &&
        PFAddress < (UINTN)mFrameworkSmst->CpuSaveState + EFI_PAGES_TO_SIZE (NumCpuStatePages)
        ) {
      mCpuStatePageTable[BitFieldRead64 (PFAddress, 12, 20)] |= BIT0 | BIT1; // present and rw
      CpuFlushTlb ();
      ReadWriteCpuStatePage (PFAddress & ~(SIZE_4KB-1), TRUE);
      IsHandled = TRUE;
    } else {
      ASSERT (FALSE);
    }
  }

  ReleaseSpinLock (&mPFLock);
  return IsHandled;
}
Ejemplo n.º 6
0
/**
  Initialize all the stuff needed for on-demand paging hooks for PI<->Framework
  CpuSaveStates marshalling.

  @param[in] FrameworkSmst   Framework SMM system table pointer.

**/
VOID
InitHook (
  IN EFI_SMM_SYSTEM_TABLE  *FrameworkSmst
  )
{
  UINTN                 NumCpuStatePages;
  UINTN                 CpuStatePage;
  UINTN                 Bottom2MPage;
  UINTN                 Top2MPage;
  
  mPageTableHookEnabled = FALSE;
  NumCpuStatePages = EFI_SIZE_TO_PAGES (mNumberOfProcessors * sizeof (EFI_SMM_CPU_SAVE_STATE));
  //
  // Only hook page table for X64 image and less than 2MB needed to hold all CPU Save States
  //
  if (EFI_IMAGE_MACHINE_TYPE_SUPPORTED(EFI_IMAGE_MACHINE_X64) && NumCpuStatePages <= EFI_SIZE_TO_PAGES (SIZE_2MB)) {
    //
    // Allocate double page size to make sure all CPU Save States are in one 2MB page.
    //
    CpuStatePage = (UINTN)AllocatePages (NumCpuStatePages * 2);
    ASSERT (CpuStatePage != 0);
    Bottom2MPage = CpuStatePage & ~(SIZE_2MB-1);
    Top2MPage    = (CpuStatePage + EFI_PAGES_TO_SIZE (NumCpuStatePages * 2) - 1) & ~(SIZE_2MB-1);
    if (Bottom2MPage == Top2MPage ||
        CpuStatePage + EFI_PAGES_TO_SIZE (NumCpuStatePages * 2) - Top2MPage >= EFI_PAGES_TO_SIZE (NumCpuStatePages)
        ) {
      //
      // If the allocated 4KB pages are within the same 2MB page or higher portion is larger, use higher portion pages.
      //
      FrameworkSmst->CpuSaveState = (EFI_SMM_CPU_SAVE_STATE *)(CpuStatePage + EFI_PAGES_TO_SIZE (NumCpuStatePages));
      FreePages ((VOID*)CpuStatePage, NumCpuStatePages);
    } else {
      FrameworkSmst->CpuSaveState = (EFI_SMM_CPU_SAVE_STATE *)CpuStatePage;
      FreePages ((VOID*)(CpuStatePage + EFI_PAGES_TO_SIZE (NumCpuStatePages)), NumCpuStatePages);
    }
    //
    // Add temporary working buffer for hooking
    //
    mShadowSaveState = (EFI_SMM_CPU_SAVE_STATE*) AllocatePool (sizeof (EFI_SMM_CPU_SAVE_STATE));
    ASSERT (mShadowSaveState != NULL);
    //
    // Allocate and initialize 4KB Page Table for hooking CpuSaveState.
    // Replace the original 2MB PDE with new 4KB page table.
    //
    mCpuStatePageTable = InitCpuStatePageTable (FrameworkSmst->CpuSaveState);
    //
    // Mark PTE for CpuSaveState as non-exist.
    //
    HookCpuStateMemory (FrameworkSmst->CpuSaveState);
    HookPageFaultHandler ();
    CpuFlushTlb ();
    mPageTableHookEnabled = TRUE;
  }
  mHookInitialized = TRUE;
}
Ejemplo n.º 7
0
/**
  SMM profile specific INT 1 (single-step) exception handler.

  @param  InterruptType    Defines the type of interrupt or exception that
                           occurred on the processor.This parameter is processor architecture specific.
  @param  SystemContext    A pointer to the processor context when
                           the interrupt occurred on the processor.
**/
VOID
EFIAPI
DebugExceptionHandler (
    IN EFI_EXCEPTION_TYPE   InterruptType,
    IN EFI_SYSTEM_CONTEXT   SystemContext
  )
{
  UINTN  CpuIndex;
  UINTN  PFEntry;

  if (!mSmmProfileStart) {
    return;
  }
  CpuIndex = GetCpuIndex ();

  //
  // Clear last PF entries
  //
  for (PFEntry = 0; PFEntry < mPFEntryCount[CpuIndex]; PFEntry++) {
    *mLastPFEntryPointer[CpuIndex][PFEntry] = mLastPFEntryValue[CpuIndex][PFEntry];
  }

  //
  // Reset page fault exception count for next page fault.
  //
  mPFEntryCount[CpuIndex] = 0;

  //
  // Flush TLB
  //
  CpuFlushTlb ();

  //
  // Clear TF in EFLAGS
  //
  ClearTrapFlag (SystemContext);
}
Ejemplo n.º 8
0
/**
  Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.

**/
VOID
InitPaging (
  VOID
  )
{
  UINT64                            *Pml4;
  UINT64                            *Pde;
  UINT64                            *Pte;
  UINT64                            *Pt;
  UINTN                             Address;
  UINTN                             Level1;
  UINTN                             Level2;
  UINTN                             Level3;
  UINTN                             Level4;
  UINTN                             NumberOfPdpEntries;
  UINTN                             NumberOfPml4Entries;
  UINTN                             SizeOfMemorySpace;
  BOOLEAN                           Nx;

  if (sizeof (UINTN) == sizeof (UINT64)) {
    Pml4 = (UINT64*)(UINTN)mSmmProfileCr3;
    SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1;
    //
    // Calculate the table entries of PML4E and PDPTE.
    //
    if (SizeOfMemorySpace <= 39 ) {
      NumberOfPml4Entries = 1;
      NumberOfPdpEntries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 30));
    } else {
      NumberOfPml4Entries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 39));
      NumberOfPdpEntries = 512;
    }
  } else {
    NumberOfPml4Entries = 1;
    NumberOfPdpEntries  = 4;
  }

  //
  // Go through page table and change 2MB-page into 4KB-page.
  //
  for (Level1 = 0; Level1 < NumberOfPml4Entries; Level1++) {
    if (sizeof (UINTN) == sizeof (UINT64)) {
      if ((Pml4[Level1] & IA32_PG_P) == 0) {
        //
        // If Pml4 entry does not exist, skip it
        //
        continue;
      }
      Pde = (UINT64 *)(UINTN)(Pml4[Level1] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
    } else {
      Pde = (UINT64*)(UINTN)mSmmProfileCr3;
    }
    for (Level2 = 0; Level2 < NumberOfPdpEntries; Level2++, Pde++) {
      if ((*Pde & IA32_PG_P) == 0) {
        //
        // If PDE entry does not exist, skip it
        //
        continue;
      }
      if ((*Pde & IA32_PG_PS) != 0) {
        //
        // This is 1G entry, skip it
        //
        continue;
      }
      Pte = (UINT64 *)(UINTN)(*Pde & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
      if (Pte == 0) {
        continue;
      }
      for (Level3 = 0; Level3 < SIZE_4KB / sizeof (*Pte); Level3++, Pte++) {
        if ((*Pte & IA32_PG_P) == 0) {
          //
          // If PTE entry does not exist, skip it
          //
          continue;
        }
        Address = (((Level2 << 9) + Level3) << 21);

        //
        // If it is 2M page, check IsAddressSplit()
        //
        if (((*Pte & IA32_PG_PS) != 0) && IsAddressSplit (Address)) {
          //
          // Based on current page table, create 4KB page table for split area.
          //
          ASSERT (Address == (*Pte & PHYSICAL_ADDRESS_MASK));

          Pt = AllocatePageTableMemory (1);
          ASSERT (Pt != NULL);

          // Split it
          for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++) {
            Pt[Level4] = Address + ((Level4 << 12) | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
          } // end for PT
          *Pte = (UINT64)(UINTN)Pt | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
        } // end if IsAddressSplit
      } // end for PTE
    } // end for PDE
  }

  //
  // Go through page table and set several page table entries to absent or execute-disable.
  //
  DEBUG ((EFI_D_INFO, "Patch page table start ...\n"));
  for (Level1 = 0; Level1 < NumberOfPml4Entries; Level1++) {
    if (sizeof (UINTN) == sizeof (UINT64)) {
      if ((Pml4[Level1] & IA32_PG_P) == 0) {
        //
        // If Pml4 entry does not exist, skip it
        //
        continue;
      }
      Pde = (UINT64 *)(UINTN)(Pml4[Level1] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
    } else {
      Pde = (UINT64*)(UINTN)mSmmProfileCr3;
    }
    for (Level2 = 0; Level2 < NumberOfPdpEntries; Level2++, Pde++) {
      if ((*Pde & IA32_PG_P) == 0) {
        //
        // If PDE entry does not exist, skip it
        //
        continue;
      }
      if ((*Pde & IA32_PG_PS) != 0) {
        //
        // This is 1G entry, set NX bit and skip it
        //
        if (mXdSupported) {
          *Pde = *Pde | IA32_PG_NX;
        }
        continue;
      }
      Pte = (UINT64 *)(UINTN)(*Pde & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
      if (Pte == 0) {
        continue;
      }
      for (Level3 = 0; Level3 < SIZE_4KB / sizeof (*Pte); Level3++, Pte++) {
        if ((*Pte & IA32_PG_P) == 0) {
          //
          // If PTE entry does not exist, skip it
          //
          continue;
        }
        Address = (((Level2 << 9) + Level3) << 21);

        if ((*Pte & IA32_PG_PS) != 0) {
          // 2MB page

          if (!IsAddressValid (Address, &Nx)) {
            //
            // Patch to remove Present flag and RW flag
            //
            *Pte = *Pte & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
          }
          if (Nx && mXdSupported) {
            *Pte = *Pte | IA32_PG_NX;
          }
        } else {
          // 4KB page
          Pt = (UINT64 *)(UINTN)(*Pte & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
          if (Pt == 0) {
            continue;
          }
          for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++, Pt++) {
            if (!IsAddressValid (Address, &Nx)) {
              *Pt = *Pt & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
            }
            if (Nx && mXdSupported) {
              *Pt = *Pt | IA32_PG_NX;
            }
            Address += SIZE_4KB;
          } // end for PT
        } // end if PS
      } // end for PTE
    } // end for PDE
  }

  //
  // Flush TLB
  //
  CpuFlushTlb ();
  DEBUG ((EFI_D_INFO, "Patch page table done!\n"));
  //
  // Set execute-disable flag
  //
  mXdEnabled = TRUE;

  return ;
}
Ejemplo n.º 9
0
/**
  The Page fault handler to save SMM profile data.

  @param  Rip        The RIP when exception happens.
  @param  ErrorCode  The Error code of exception.

**/
VOID
SmmProfilePFHandler (
  UINTN Rip,
  UINTN ErrorCode
  )
{
  UINT64                *PageTable;
  UINT64                PFAddress;
  UINTN                 CpuIndex;
  UINTN                 Index;
  UINT64                InstructionAddress;
  UINTN                 MaxEntryNumber;
  UINTN                 CurrentEntryNumber;
  BOOLEAN               IsValidPFAddress;
  SMM_PROFILE_ENTRY     *SmmProfileEntry;
  UINT64                SmiCommand;
  EFI_STATUS            Status;
  UINT8                 SoftSmiValue;
  EFI_SMM_SAVE_STATE_IO_INFO    IoInfo;

  if (!mSmmProfileStart) {
    //
    // If SMM profile does not start, call original page fault handler.
    //
    SmiDefaultPFHandler ();
    return;
  }

  if (mBtsSupported) {
    DisableBTS ();
  }

  IsValidPFAddress  = FALSE;
  PageTable         = (UINT64 *)AsmReadCr3 ();
  PFAddress         = AsmReadCr2 ();
  CpuIndex          = GetCpuIndex ();

  if (PFAddress <= 0xFFFFFFFF) {
    RestorePageTableBelow4G (PageTable, PFAddress, CpuIndex, ErrorCode);
  } else {
    RestorePageTableAbove4G (PageTable, PFAddress, CpuIndex, ErrorCode, &IsValidPFAddress);
  }

  if (!IsValidPFAddress) {
    InstructionAddress = Rip;
    if ((ErrorCode & IA32_PF_EC_ID) != 0 && (mBtsSupported)) {
      //
      // If it is instruction fetch failure, get the correct IP from BTS.
      //
      InstructionAddress = GetSourceFromDestinationOnBts (CpuIndex, Rip);
      if (InstructionAddress == 0) {
        //
        // It indicates the instruction which caused page fault is not a jump instruction,
        // set instruction address same as the page fault address.
        //
        InstructionAddress = PFAddress;
      }
    }

    //
    // Indicate it is not software SMI
    //
    SmiCommand    = 0xFFFFFFFFFFFFFFFFULL;
    for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
      Status = SmmReadSaveState(&mSmmCpu, sizeof(IoInfo), EFI_SMM_SAVE_STATE_REGISTER_IO, Index, &IoInfo);
      if (EFI_ERROR (Status)) {
        continue;
      }
      if (IoInfo.IoPort == mSmiCommandPort) {
        //
        // A software SMI triggered by SMI command port has been found, get SmiCommand from SMI command port.
        //
        SoftSmiValue = IoRead8 (mSmiCommandPort);
        SmiCommand = (UINT64)SoftSmiValue;
        break;
      }
    }

    SmmProfileEntry = (SMM_PROFILE_ENTRY *)(UINTN)(mSmmProfileBase + 1);
    //
    // Check if there is already a same entry in profile data.
    //
    for (Index = 0; Index < (UINTN) mSmmProfileBase->CurDataEntries; Index++) {
      if ((SmmProfileEntry[Index].ErrorCode   == (UINT64)ErrorCode) &&
          (SmmProfileEntry[Index].Address     == PFAddress) &&
          (SmmProfileEntry[Index].CpuNum      == (UINT64)CpuIndex) &&
          (SmmProfileEntry[Index].Instruction == InstructionAddress) &&
          (SmmProfileEntry[Index].SmiCmd      == SmiCommand)) {
        //
        // Same record exist, need not save again.
        //
        break;
      }
    }
    if (Index == mSmmProfileBase->CurDataEntries) {
      CurrentEntryNumber = (UINTN) mSmmProfileBase->CurDataEntries;
      MaxEntryNumber     = (UINTN) mSmmProfileBase->MaxDataEntries;
      if (FeaturePcdGet (PcdCpuSmmProfileRingBuffer)) {
        CurrentEntryNumber = CurrentEntryNumber % MaxEntryNumber;
      }
      if (CurrentEntryNumber < MaxEntryNumber) {
        //
        // Log the new entry
        //
        SmmProfileEntry[CurrentEntryNumber].SmiNum      = mSmmProfileBase->NumSmis;
        SmmProfileEntry[CurrentEntryNumber].ErrorCode   = (UINT64)ErrorCode;
        SmmProfileEntry[CurrentEntryNumber].ApicId      = (UINT64)GetApicId ();
        SmmProfileEntry[CurrentEntryNumber].CpuNum      = (UINT64)CpuIndex;
        SmmProfileEntry[CurrentEntryNumber].Address     = PFAddress;
        SmmProfileEntry[CurrentEntryNumber].Instruction = InstructionAddress;
        SmmProfileEntry[CurrentEntryNumber].SmiCmd      = SmiCommand;
        //
        // Update current entry index and data size in the header.
        //
        mSmmProfileBase->CurDataEntries++;
        mSmmProfileBase->CurDataSize = MultU64x64 (mSmmProfileBase->CurDataEntries, sizeof (SMM_PROFILE_ENTRY));
      }
    }
  }
  //
  // Flush TLB
  //
  CpuFlushTlb ();

  if (mBtsSupported) {
    EnableBTS ();
  }
}