Пример #1
0
/**
  Set memory cache ability.

  @param    PageTable              PageTable Address
  @param    Address                Memory Address to change cache ability
  @param    Cacheability           Cache ability to set

**/
VOID
SetCacheability (
  IN      UINT64                    *PageTable,
  IN      UINTN                     Address,
  IN      UINT8                     Cacheability
  )
{
  UINTN   PTIndex;
  VOID    *NewPageTableAddress;
  UINT64  *NewPageTable;
  UINTN   Index;

  ASSERT ((Address & EFI_PAGE_MASK) == 0);

  if (sizeof (UINTN) == sizeof (UINT64)) {
    PTIndex = (UINTN)RShiftU64 (Address, 39) & 0x1ff;
    ASSERT (PageTable[PTIndex] & IA32_PG_P);
    PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
  }

  PTIndex = (UINTN)RShiftU64 (Address, 30) & 0x1ff;
  ASSERT (PageTable[PTIndex] & IA32_PG_P);
  PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);

  //
  // A perfect implementation should check the original cacheability with the
  // one being set, and break a 2M page entry into pieces only when they
  // disagreed.
  //
  PTIndex = (UINTN)RShiftU64 (Address, 21) & 0x1ff;
  if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
    //
    // Allocate a page from SMRAM
    //
    NewPageTableAddress = AllocatePageTableMemory (1);
    ASSERT (NewPageTableAddress != NULL);

    NewPageTable = (UINT64 *)NewPageTableAddress;

    for (Index = 0; Index < 0x200; Index++) {
      NewPageTable[Index] = PageTable[PTIndex];
      if ((NewPageTable[Index] & IA32_PG_PAT_2M) != 0) {
        NewPageTable[Index] &= ~((UINT64)IA32_PG_PAT_2M);
        NewPageTable[Index] |= (UINT64)IA32_PG_PAT_4K;
      }
      NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT);
    }

    PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | PAGE_ATTRIBUTE_BITS;
  }

  ASSERT (PageTable[PTIndex] & IA32_PG_P);
  PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);

  PTIndex = (UINTN)RShiftU64 (Address, 12) & 0x1ff;
  ASSERT (PageTable[PTIndex] & IA32_PG_P);
  PageTable[PTIndex] &= ~((UINT64)((IA32_PG_PAT_4K | IA32_PG_CD | IA32_PG_WT)));
  PageTable[PTIndex] |= (UINT64)Cacheability;
}
Пример #2
0
/**
  Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.

**/
VOID
InitPaging (
  VOID
  )
{
  UINT64                            *Pml4;
  UINT64                            *Pde;
  UINT64                            *Pte;
  UINT64                            *Pt;
  UINTN                             Address;
  UINTN                             Level1;
  UINTN                             Level2;
  UINTN                             Level3;
  UINTN                             Level4;
  UINTN                             NumberOfPdpEntries;
  UINTN                             NumberOfPml4Entries;
  UINTN                             SizeOfMemorySpace;
  BOOLEAN                           Nx;

  if (sizeof (UINTN) == sizeof (UINT64)) {
    Pml4 = (UINT64*)(UINTN)mSmmProfileCr3;
    SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1;
    //
    // Calculate the table entries of PML4E and PDPTE.
    //
    if (SizeOfMemorySpace <= 39 ) {
      NumberOfPml4Entries = 1;
      NumberOfPdpEntries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 30));
    } else {
      NumberOfPml4Entries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 39));
      NumberOfPdpEntries = 512;
    }
  } else {
    NumberOfPml4Entries = 1;
    NumberOfPdpEntries  = 4;
  }

  //
  // Go through page table and change 2MB-page into 4KB-page.
  //
  for (Level1 = 0; Level1 < NumberOfPml4Entries; Level1++) {
    if (sizeof (UINTN) == sizeof (UINT64)) {
      if ((Pml4[Level1] & IA32_PG_P) == 0) {
        //
        // If Pml4 entry does not exist, skip it
        //
        continue;
      }
      Pde = (UINT64 *)(UINTN)(Pml4[Level1] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
    } else {
      Pde = (UINT64*)(UINTN)mSmmProfileCr3;
    }
    for (Level2 = 0; Level2 < NumberOfPdpEntries; Level2++, Pde++) {
      if ((*Pde & IA32_PG_P) == 0) {
        //
        // If PDE entry does not exist, skip it
        //
        continue;
      }
      if ((*Pde & IA32_PG_PS) != 0) {
        //
        // This is 1G entry, skip it
        //
        continue;
      }
      Pte = (UINT64 *)(UINTN)(*Pde & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
      if (Pte == 0) {
        continue;
      }
      for (Level3 = 0; Level3 < SIZE_4KB / sizeof (*Pte); Level3++, Pte++) {
        if ((*Pte & IA32_PG_P) == 0) {
          //
          // If PTE entry does not exist, skip it
          //
          continue;
        }
        Address = (((Level2 << 9) + Level3) << 21);

        //
        // If it is 2M page, check IsAddressSplit()
        //
        if (((*Pte & IA32_PG_PS) != 0) && IsAddressSplit (Address)) {
          //
          // Based on current page table, create 4KB page table for split area.
          //
          ASSERT (Address == (*Pte & PHYSICAL_ADDRESS_MASK));

          Pt = AllocatePageTableMemory (1);
          ASSERT (Pt != NULL);

          // Split it
          for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++) {
            Pt[Level4] = Address + ((Level4 << 12) | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
          } // end for PT
          *Pte = (UINT64)(UINTN)Pt | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
        } // end if IsAddressSplit
      } // end for PTE
    } // end for PDE
  }

  //
  // Go through page table and set several page table entries to absent or execute-disable.
  //
  DEBUG ((EFI_D_INFO, "Patch page table start ...\n"));
  for (Level1 = 0; Level1 < NumberOfPml4Entries; Level1++) {
    if (sizeof (UINTN) == sizeof (UINT64)) {
      if ((Pml4[Level1] & IA32_PG_P) == 0) {
        //
        // If Pml4 entry does not exist, skip it
        //
        continue;
      }
      Pde = (UINT64 *)(UINTN)(Pml4[Level1] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
    } else {
      Pde = (UINT64*)(UINTN)mSmmProfileCr3;
    }
    for (Level2 = 0; Level2 < NumberOfPdpEntries; Level2++, Pde++) {
      if ((*Pde & IA32_PG_P) == 0) {
        //
        // If PDE entry does not exist, skip it
        //
        continue;
      }
      if ((*Pde & IA32_PG_PS) != 0) {
        //
        // This is 1G entry, set NX bit and skip it
        //
        if (mXdSupported) {
          *Pde = *Pde | IA32_PG_NX;
        }
        continue;
      }
      Pte = (UINT64 *)(UINTN)(*Pde & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
      if (Pte == 0) {
        continue;
      }
      for (Level3 = 0; Level3 < SIZE_4KB / sizeof (*Pte); Level3++, Pte++) {
        if ((*Pte & IA32_PG_P) == 0) {
          //
          // If PTE entry does not exist, skip it
          //
          continue;
        }
        Address = (((Level2 << 9) + Level3) << 21);

        if ((*Pte & IA32_PG_PS) != 0) {
          // 2MB page

          if (!IsAddressValid (Address, &Nx)) {
            //
            // Patch to remove Present flag and RW flag
            //
            *Pte = *Pte & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
          }
          if (Nx && mXdSupported) {
            *Pte = *Pte | IA32_PG_NX;
          }
        } else {
          // 4KB page
          Pt = (UINT64 *)(UINTN)(*Pte & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
          if (Pt == 0) {
            continue;
          }
          for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++, Pt++) {
            if (!IsAddressValid (Address, &Nx)) {
              *Pt = *Pt & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
            }
            if (Nx && mXdSupported) {
              *Pt = *Pt | IA32_PG_NX;
            }
            Address += SIZE_4KB;
          } // end for PT
        } // end if PS
      } // end for PTE
    } // end for PDE
  }

  //
  // Flush TLB
  //
  CpuFlushTlb ();
  DEBUG ((EFI_D_INFO, "Patch page table done!\n"));
  //
  // Set execute-disable flag
  //
  mXdEnabled = TRUE;

  return ;
}
Пример #3
0
/**
  Create PageTable for SMM use.

  @return The address of PML4 (to set CR3).

**/
UINT32
SmmInitPageTable (
  VOID
  )
{
  EFI_PHYSICAL_ADDRESS              Pages;
  UINT64                            *PTEntry;
  LIST_ENTRY                        *FreePage;
  UINTN                             Index;
  UINTN                             PageFaultHandlerHookAddress;
  IA32_IDT_GATE_DESCRIPTOR          *IdtEntry;
  EFI_STATUS                        Status;

  //
  // Initialize spin lock
  //
  InitializeSpinLock (mPFLock);

  mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);
  m1GPageTableSupport = Is1GPageSupport ();
  DEBUG ((DEBUG_INFO, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport));
  DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable));

  mPhysicalAddressBits = CalculateMaximumSupportAddress ();
  DEBUG ((DEBUG_INFO, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits));
  //
  // Generate PAE page table for the first 4GB memory space
  //
  Pages = Gen4GPageTable (FALSE);

  //
  // Set IA32_PG_PMNT bit to mask this entry
  //
  PTEntry = (UINT64*)(UINTN)Pages;
  for (Index = 0; Index < 4; Index++) {
    PTEntry[Index] |= IA32_PG_PMNT;
  }

  //
  // Fill Page-Table-Level4 (PML4) entry
  //
  PTEntry = (UINT64*)AllocatePageTableMemory (1);
  ASSERT (PTEntry != NULL);
  *PTEntry = Pages | PAGE_ATTRIBUTE_BITS;
  ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));

  //
  // Set sub-entries number
  //
  SetSubEntriesNum (PTEntry, 3);

  if (mCpuSmmStaticPageTable) {
    SetStaticPageTable ((UINTN)PTEntry);
  } else {
    //
    // Add pages to page pool
    //
    FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);
    ASSERT (FreePage != NULL);
    for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
      InsertTailList (&mPagePool, FreePage);
      FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
    }
  }

  if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
    //
    // Set own Page Fault entry instead of the default one, because SMM Profile
    // feature depends on IRET instruction to do Single Step
    //
    PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
    IdtEntry  = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
    IdtEntry += EXCEPT_IA32_PAGE_FAULT;
    IdtEntry->Bits.OffsetLow      = (UINT16)PageFaultHandlerHookAddress;
    IdtEntry->Bits.Reserved_0     = 0;
    IdtEntry->Bits.GateType       = IA32_IDT_GATE_TYPE_INTERRUPT_32;
    IdtEntry->Bits.OffsetHigh     = (UINT16)(PageFaultHandlerHookAddress >> 16);
    IdtEntry->Bits.OffsetUpper    = (UINT32)(PageFaultHandlerHookAddress >> 32);
    IdtEntry->Bits.Reserved_1     = 0;
  } else {
Пример #4
0
/**
  Set static page table.

  @param[in] PageTable     Address of page table.
**/
VOID
SetStaticPageTable (
  IN UINTN               PageTable
  )
{
  UINT64                                        PageAddress;
  UINTN                                         NumberOfPml4EntriesNeeded;
  UINTN                                         NumberOfPdpEntriesNeeded;
  UINTN                                         IndexOfPml4Entries;
  UINTN                                         IndexOfPdpEntries;
  UINTN                                         IndexOfPageDirectoryEntries;
  UINT64                                        *PageMapLevel4Entry;
  UINT64                                        *PageMap;
  UINT64                                        *PageDirectoryPointerEntry;
  UINT64                                        *PageDirectory1GEntry;
  UINT64                                        *PageDirectoryEntry;

  if (mPhysicalAddressBits <= 39 ) {
    NumberOfPml4EntriesNeeded = 1;
    NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 30));
  } else {
    NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 39));
    NumberOfPdpEntriesNeeded = 512;
  }

  //
  // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
  //
  PageMap         = (VOID *) PageTable;

  PageMapLevel4Entry = PageMap;
  PageAddress        = 0;
  for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {
    //
    // Each PML4 entry points to a page of Page Directory Pointer entries.
    //
    PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & gPhyMask);
    if (PageDirectoryPointerEntry == NULL) {
      PageDirectoryPointerEntry = AllocatePageTableMemory (1);
      ASSERT(PageDirectoryPointerEntry != NULL);
      ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));

      *PageMapLevel4Entry = ((UINTN)PageDirectoryPointerEntry & gPhyMask)  | PAGE_ATTRIBUTE_BITS;
    }

    if (m1GPageTableSupport) {
      PageDirectory1GEntry = PageDirectoryPointerEntry;
      for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
        if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
          //
          // Skip the < 4G entries
          //
          continue;
        }
        //
        // Fill in the Page Directory entries
        //
        *PageDirectory1GEntry = (PageAddress & gPhyMask) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
      }
    } else {
      PageAddress = BASE_4GB;
      for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
        if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
          //
          // Skip the < 4G entries
          //
          continue;
        }
        //
        // Each Directory Pointer entries points to a page of Page Directory entires.
        // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
        //
        PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & gPhyMask);
        if (PageDirectoryEntry == NULL) {
          PageDirectoryEntry = AllocatePageTableMemory (1);
          ASSERT(PageDirectoryEntry != NULL);
          ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));

          //
          // Fill in a Page Directory Pointer Entries
          //
          *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | PAGE_ATTRIBUTE_BITS;
        }

        for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
          //
          // Fill in the Page Directory entries
          //
          *PageDirectoryEntry = (UINT64)PageAddress | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
        }
      }
    }
  }
}
Пример #5
0
/**
  Allocates and fills in the Page Directory and Page Table Entries to
  establish a 4G page table.

  @param[in] StackBase  Stack base address.
  @param[in] StackSize  Stack size.

  @return The address of page table.

**/
UINTN
Create4GPageTablesIa32Pae (
  IN EFI_PHYSICAL_ADDRESS   StackBase,
  IN UINTN                  StackSize
  )
{
  UINT8                                         PhysicalAddressBits;
  EFI_PHYSICAL_ADDRESS                          PhysicalAddress;
  UINTN                                         IndexOfPdpEntries;
  UINTN                                         IndexOfPageDirectoryEntries;
  UINT32                                        NumberOfPdpEntriesNeeded;
  PAGE_MAP_AND_DIRECTORY_POINTER                *PageMap;
  PAGE_MAP_AND_DIRECTORY_POINTER                *PageDirectoryPointerEntry;
  PAGE_TABLE_ENTRY                              *PageDirectoryEntry;
  UINTN                                         TotalPagesNum;
  UINTN                                         PageAddress;
  UINT64                                        AddressEncMask;

  //
  // Make sure AddressEncMask is contained to smallest supported address field
  //
  AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;

  PhysicalAddressBits = 32;

  //
  // Calculate the table entries needed.
  //
  NumberOfPdpEntriesNeeded = (UINT32) LShiftU64 (1, (PhysicalAddressBits - 30));

  TotalPagesNum = NumberOfPdpEntriesNeeded + 1;
  PageAddress = (UINTN) AllocatePageTableMemory (TotalPagesNum);
  ASSERT (PageAddress != 0);

  PageMap = (VOID *) PageAddress;
  PageAddress += SIZE_4KB;

  PageDirectoryPointerEntry = PageMap;
  PhysicalAddress = 0;

  for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
    //
    // Each Directory Pointer entries points to a page of Page Directory entires.
    // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
    //
    PageDirectoryEntry = (VOID *) PageAddress;
    PageAddress += SIZE_4KB;

    //
    // Fill in a Page Directory Pointer Entries
    //
    PageDirectoryPointerEntry->Uint64 = (UINT64) (UINTN) PageDirectoryEntry | AddressEncMask;
    PageDirectoryPointerEntry->Bits.Present = 1;

    for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress += SIZE_2MB) {
      if ((IsNullDetectionEnabled () && PhysicalAddress == 0)
          || ((PhysicalAddress < StackBase + StackSize)
              && ((PhysicalAddress + SIZE_2MB) > StackBase))) {
        //
        // Need to split this 2M page that covers stack range.
        //
        Split2MPageTo4K (PhysicalAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);
      } else {
        //
        // Fill in the Page Directory entries
        //
        PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress | AddressEncMask;
        PageDirectoryEntry->Bits.ReadWrite = 1;
        PageDirectoryEntry->Bits.Present = 1;
        PageDirectoryEntry->Bits.MustBe1 = 1;
      }
    }
  }

  for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
    ZeroMem (
      PageDirectoryPointerEntry,
      sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)
      );
  }

  //
  // Protect the page table by marking the memory used for page table to be
  // read-only.
  //
  EnablePageTableProtection ((UINTN)PageMap, FALSE);

  return (UINTN) PageMap;
}
Пример #6
0
/**
  Create 4G PageTable in SMRAM.

  @param[in]      Is32BitPageTable Whether the page table is 32-bit PAE
  @return         PageTable Address

**/
UINT32
Gen4GPageTable (
  IN      BOOLEAN                   Is32BitPageTable
  )
{
  VOID    *PageTable;
  UINTN   Index;
  UINT64  *Pte;
  UINTN   PagesNeeded;
  UINTN   Low2MBoundary;
  UINTN   High2MBoundary;
  UINTN   Pages;
  UINTN   GuardPage;
  UINT64  *Pdpte;
  UINTN   PageIndex;
  UINTN   PageAddress;

  Low2MBoundary = 0;
  High2MBoundary = 0;
  PagesNeeded = 0;
  if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
    //
    // Add one more page for known good stack, then find the lower 2MB aligned address.
    //
    Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
    //
    // Add two more pages for known good stack and stack guard page,
    // then find the lower 2MB aligned address.
    //
    High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
    PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
  }
  //
  // Allocate the page table
  //
  PageTable = AllocatePageTableMemory (5 + PagesNeeded);
  ASSERT (PageTable != NULL);

  PageTable = (VOID *)((UINTN)PageTable);
  Pte = (UINT64*)PageTable;

  //
  // Zero out all page table entries first
  //
  ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));

  //
  // Set Page Directory Pointers
  //
  for (Index = 0; Index < 4; Index++) {
    Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |
                   (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
  }
  Pte += EFI_PAGE_SIZE / sizeof (*Pte);

  //
  // Fill in Page Directory Entries
  //
  for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
    Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
  }

  Pdpte = (UINT64*)PageTable;
  if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
    Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
    GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
    for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
      Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
      Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
      //
      // Fill in Page Table Entries
      //
      Pte = (UINT64*)Pages;
      PageAddress = PageIndex;
      for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
        if (PageAddress == GuardPage) {
          //
          // Mark the guard page as non-present
          //
          Pte[Index] = PageAddress | mAddressEncMask;
          GuardPage += mSmmStackSize;
          if (GuardPage > mSmmStackArrayEnd) {
            GuardPage = 0;
          }
        } else {
          Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
        }
        PageAddress+= EFI_PAGE_SIZE;
      }
      Pages += EFI_PAGE_SIZE;
    }
  }

  if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
    Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
    if ((Pte[0] & IA32_PG_PS) == 0) {
      // 4K-page entries are already mapped. Just hide the first one anyway.
      Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
      Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0
    } else {
      // Create 4K-page entries
      Pages = (UINTN)AllocatePageTableMemory (1);
      ASSERT (Pages != 0);

      Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);

      Pte = (UINT64*)Pages;
      PageAddress = 0;
      Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left
      for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
        PageAddress += EFI_PAGE_SIZE;
        Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
      }
    }
  }

  return (UINT32)(UINTN)PageTable;
}
Пример #7
0
/**
  Create 4G PageTable in SMRAM.

  @param          ExtraPages       Additional page numbers besides for 4G memory
  @return         PageTable Address

**/
UINT32
Gen4GPageTable (
  IN      UINTN                     ExtraPages
  )
{
  VOID    *PageTable;
  UINTN   Index;
  UINT64  *Pte;
  UINTN   PagesNeeded;
  UINTN   Low2MBoundary;
  UINTN   High2MBoundary;
  UINTN   Pages;
  UINTN   GuardPage;
  UINT64  *Pdpte;
  UINTN   PageIndex;
  UINTN   PageAddress;

  Low2MBoundary = 0;
  High2MBoundary = 0;
  PagesNeeded = 0;
  if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
    //
    // Add one more page for known good stack, then find the lower 2MB aligned address.
    //
    Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
    //
    // Add two more pages for known good stack and stack guard page,
    // then find the lower 2MB aligned address.
    //
    High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
    PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
  }
  //
  // Allocate the page table
  //
  PageTable = AllocatePageTableMemory (ExtraPages + 5 + PagesNeeded);
  ASSERT (PageTable != NULL);

  PageTable = (VOID *)((UINTN)PageTable + EFI_PAGES_TO_SIZE (ExtraPages));
  Pte = (UINT64*)PageTable;

  //
  // Zero out all page table entries first
  //
  ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));

  //
  // Set Page Directory Pointers
  //
  for (Index = 0; Index < 4; Index++) {
    Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + PAGE_ATTRIBUTE_BITS;
  }
  Pte += EFI_PAGE_SIZE / sizeof (*Pte);

  //
  // Fill in Page Directory Entries
  //
  for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
    Pte[Index] = (Index << 21) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
  }

  if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
    Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
    GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
    Pdpte = (UINT64*)PageTable;
    for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
      Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1));
      Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | PAGE_ATTRIBUTE_BITS;
      //
      // Fill in Page Table Entries
      //
      Pte = (UINT64*)Pages;
      PageAddress = PageIndex;
      for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
        if (PageAddress == GuardPage) {
          //
          // Mark the guard page as non-present
          //
          Pte[Index] = PageAddress;
          GuardPage += mSmmStackSize;
          if (GuardPage > mSmmStackArrayEnd) {
            GuardPage = 0;
          }
        } else {
          Pte[Index] = PageAddress | PAGE_ATTRIBUTE_BITS;
        }
        PageAddress+= EFI_PAGE_SIZE;
      }
      Pages += EFI_PAGE_SIZE;
    }
  }

  return (UINT32)(UINTN)PageTable;
}