示例#1
0
文件: SmmProfile.c 项目: pmj/edk2
/**
  Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.

**/
VOID
InitPaging (
  VOID
  )
{
  UINT64                            *Pml4;
  UINT64                            *Pde;
  UINT64                            *Pte;
  UINT64                            *Pt;
  UINTN                             Address;
  UINTN                             Level1;
  UINTN                             Level2;
  UINTN                             Level3;
  UINTN                             Level4;
  UINTN                             NumberOfPdpEntries;
  UINTN                             NumberOfPml4Entries;
  UINTN                             SizeOfMemorySpace;
  BOOLEAN                           Nx;

  if (sizeof (UINTN) == sizeof (UINT64)) {
    Pml4 = (UINT64*)(UINTN)mSmmProfileCr3;
    SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1;
    //
    // Calculate the table entries of PML4E and PDPTE.
    //
    if (SizeOfMemorySpace <= 39 ) {
      NumberOfPml4Entries = 1;
      NumberOfPdpEntries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 30));
    } else {
      NumberOfPml4Entries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 39));
      NumberOfPdpEntries = 512;
    }
  } else {
    NumberOfPml4Entries = 1;
    NumberOfPdpEntries  = 4;
  }

  //
  // Go through page table and change 2MB-page into 4KB-page.
  //
  for (Level1 = 0; Level1 < NumberOfPml4Entries; Level1++) {
    if (sizeof (UINTN) == sizeof (UINT64)) {
      if ((Pml4[Level1] & IA32_PG_P) == 0) {
        //
        // If Pml4 entry does not exist, skip it
        //
        continue;
      }
      Pde = (UINT64 *)(UINTN)(Pml4[Level1] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
    } else {
      Pde = (UINT64*)(UINTN)mSmmProfileCr3;
    }
    for (Level2 = 0; Level2 < NumberOfPdpEntries; Level2++, Pde++) {
      if ((*Pde & IA32_PG_P) == 0) {
        //
        // If PDE entry does not exist, skip it
        //
        continue;
      }
      if ((*Pde & IA32_PG_PS) != 0) {
        //
        // This is 1G entry, skip it
        //
        continue;
      }
      Pte = (UINT64 *)(UINTN)(*Pde & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
      if (Pte == 0) {
        continue;
      }
      for (Level3 = 0; Level3 < SIZE_4KB / sizeof (*Pte); Level3++, Pte++) {
        if ((*Pte & IA32_PG_P) == 0) {
          //
          // If PTE entry does not exist, skip it
          //
          continue;
        }
        Address = (((Level2 << 9) + Level3) << 21);

        //
        // If it is 2M page, check IsAddressSplit()
        //
        if (((*Pte & IA32_PG_PS) != 0) && IsAddressSplit (Address)) {
          //
          // Based on current page table, create 4KB page table for split area.
          //
          ASSERT (Address == (*Pte & PHYSICAL_ADDRESS_MASK));

          Pt = AllocatePageTableMemory (1);
          ASSERT (Pt != NULL);

          // Split it
          for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++) {
            Pt[Level4] = Address + ((Level4 << 12) | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
          } // end for PT
          *Pte = (UINT64)(UINTN)Pt | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
        } // end if IsAddressSplit
      } // end for PTE
    } // end for PDE
  }

  //
  // Go through page table and set several page table entries to absent or execute-disable.
  //
  DEBUG ((EFI_D_INFO, "Patch page table start ...\n"));
  for (Level1 = 0; Level1 < NumberOfPml4Entries; Level1++) {
    if (sizeof (UINTN) == sizeof (UINT64)) {
      if ((Pml4[Level1] & IA32_PG_P) == 0) {
        //
        // If Pml4 entry does not exist, skip it
        //
        continue;
      }
      Pde = (UINT64 *)(UINTN)(Pml4[Level1] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
    } else {
      Pde = (UINT64*)(UINTN)mSmmProfileCr3;
    }
    for (Level2 = 0; Level2 < NumberOfPdpEntries; Level2++, Pde++) {
      if ((*Pde & IA32_PG_P) == 0) {
        //
        // If PDE entry does not exist, skip it
        //
        continue;
      }
      if ((*Pde & IA32_PG_PS) != 0) {
        //
        // This is 1G entry, set NX bit and skip it
        //
        if (mXdSupported) {
          *Pde = *Pde | IA32_PG_NX;
        }
        continue;
      }
      Pte = (UINT64 *)(UINTN)(*Pde & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
      if (Pte == 0) {
        continue;
      }
      for (Level3 = 0; Level3 < SIZE_4KB / sizeof (*Pte); Level3++, Pte++) {
        if ((*Pte & IA32_PG_P) == 0) {
          //
          // If PTE entry does not exist, skip it
          //
          continue;
        }
        Address = (((Level2 << 9) + Level3) << 21);

        if ((*Pte & IA32_PG_PS) != 0) {
          // 2MB page

          if (!IsAddressValid (Address, &Nx)) {
            //
            // Patch to remove Present flag and RW flag
            //
            *Pte = *Pte & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
          }
          if (Nx && mXdSupported) {
            *Pte = *Pte | IA32_PG_NX;
          }
        } else {
          // 4KB page
          Pt = (UINT64 *)(UINTN)(*Pte & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
          if (Pt == 0) {
            continue;
          }
          for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++, Pt++) {
            if (!IsAddressValid (Address, &Nx)) {
              *Pt = *Pt & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
            }
            if (Nx && mXdSupported) {
              *Pt = *Pt | IA32_PG_NX;
            }
            Address += SIZE_4KB;
          } // end for PT
        } // end if PS
      } // end for PTE
    } // end for PDE
  }

  //
  // Flush TLB
  //
  CpuFlushTlb ();
  DEBUG ((EFI_D_INFO, "Patch page table done!\n"));
  //
  // Set execute-disable flag
  //
  mXdEnabled = TRUE;

  return ;
}
示例#2
0
/**
  Update page table to map the memory correctly in order to make the instruction
  which caused page fault execute successfully. And it also save the original page
  table to be restored in single-step exception.

  @param  PageTable           PageTable Address.
  @param  PFAddress           The memory address which caused page fault exception.
  @param  CpuIndex            The index of the processor.
  @param  ErrorCode           The Error code of exception.
  @param  IsValidPFAddress    The flag indicates if SMM profile data need be added.

**/
VOID
RestorePageTableAbove4G (
  UINT64        *PageTable,
  UINT64        PFAddress,
  UINTN         CpuIndex,
  UINTN         ErrorCode,
  BOOLEAN       *IsValidPFAddress
  )
{
  UINTN         PTIndex;
  UINT64        Address;
  BOOLEAN       Nx;
  BOOLEAN       Existed;
  UINTN         Index;
  UINTN         PFIndex;

  ASSERT ((PageTable != NULL) && (IsValidPFAddress != NULL));

  //
  // If page fault address is 4GB above.
  //

  //
  // Check if page fault address has existed in page table.
  // If it exists in page table but page fault is generated,
  // there are 2 possible reasons: 1. present flag is set to 0; 2. instruction fetch in protected memory range.
  //
  Existed = FALSE;
  PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
  PTIndex = BitFieldRead64 (PFAddress, 39, 47);
  if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
    // PML4E
    PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
    PTIndex = BitFieldRead64 (PFAddress, 30, 38);
    if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
      // PDPTE
      PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
      PTIndex = BitFieldRead64 (PFAddress, 21, 29);
      // PD
      if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
        //
        // 2MB page
        //
        Address = (UINT64)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
        if ((Address & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)) == ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) {
          Existed = TRUE;
        }
      } else {
        //
        // 4KB page
        //
        PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
        if (PageTable != 0) {
          //
          // When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB.
          //
          PTIndex = BitFieldRead64 (PFAddress, 12, 20);
          Address = (UINT64)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
          if ((Address & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1)) == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {
            Existed = TRUE;
          }
        }
      }
    }
  }

  //
  // If page entry does not existed in page table at all, create a new entry.
  //
  if (!Existed) {

    if (IsAddressValid (PFAddress, &Nx)) {
      //
      // If page fault address above 4GB is in protected range but it causes a page fault exception,
      // Will create a page entry for this page fault address, make page table entry as present/rw and execution-disable.
      // this access is not saved into SMM profile data.
      //
      *IsValidPFAddress = TRUE;
    }

    //
    // Create one entry in page table for page fault address.
    //
    SmiDefaultPFHandler ();
    //
    // Find the page table entry created just now.
    //
    PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
    PFAddress = AsmReadCr2 ();
    // PML4E
    PTIndex = BitFieldRead64 (PFAddress, 39, 47);
    PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
    // PDPTE
    PTIndex = BitFieldRead64 (PFAddress, 30, 38);
    PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
    // PD
    PTIndex = BitFieldRead64 (PFAddress, 21, 29);
    Address = PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK;
    //
    // Check if 2MB-page entry need be changed to 4KB-page entry.
    //
    if (IsAddressSplit (Address)) {
      AcquirePage (&PageTable[PTIndex]);

      // PTE
      PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
      for (Index = 0; Index < 512; Index++) {
        PageTable[Index] = Address | IA32_PG_RW | IA32_PG_P;
        if (!IsAddressValid (Address, &Nx)) {
          PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P));
        }
        if (Nx && mXdSupported) {
          PageTable[Index] = PageTable[Index] | IA32_PG_NX;
        }
        if (Address == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {
          PTIndex = Index;
        }
        Address += SIZE_4KB;
      } // end for PT
    } else {
      //
      // Update 2MB page entry.
      //
      if (!IsAddressValid (Address, &Nx)) {
        //
        // Patch to remove present flag and rw flag.
        //
        PageTable[PTIndex] = PageTable[PTIndex] & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P));
      }
      //
      // Set XD bit to 1
      //
      if (Nx && mXdSupported) {
        PageTable[PTIndex] = PageTable[PTIndex] | IA32_PG_NX;
      }
    }
  }

  //
  // Record old entries with non-present status
  // Old entries include the memory which instruction is at and the memory which instruction access.
  //
  //
  ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
  if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
    PFIndex = mPFEntryCount[CpuIndex];
    mLastPFEntryValue[CpuIndex][PFIndex]   = PageTable[PTIndex];
    mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
    mPFEntryCount[CpuIndex]++;
  }

  //
  // Add present flag or clear XD flag to make page fault handler succeed.
  //
  PageTable[PTIndex] |= (UINT64)(IA32_PG_RW | IA32_PG_P);
  if ((ErrorCode & IA32_PF_EC_ID) != 0) {
    //
    // If page fault is caused by instruction fetch, clear XD bit in the entry.
    //
    PageTable[PTIndex] &= ~IA32_PG_NX;
  }

  return;
}