EFIAPI AllocateAlignedRuntimePages ( IN UINTN Pages, IN UINTN Alignment ) { VOID *Buffer; Buffer = InternalAllocateAlignedPages (EfiRuntimeServicesData, Pages, Alignment); if (Buffer != NULL) { MemoryProfileLibRecord ( (PHYSICAL_ADDRESS) (UINTN) RETURN_ADDRESS(0), MEMORY_PROFILE_ACTION_LIB_ALLOCATE_ALIGNED_RUNTIME_PAGES, EfiRuntimeServicesData, Buffer, EFI_PAGES_TO_SIZE (Pages), NULL ); } return Buffer; }
/** Adjust the base and number of pages to really allocate according to Guard. @param[in,out] Memory Base address of free memory. @param[in,out] NumberOfPages Size of memory to allocate. @return VOID. **/ VOID AdjustMemoryA ( IN OUT EFI_PHYSICAL_ADDRESS *Memory, IN OUT UINTN *NumberOfPages ) { // // FindFreePages() has already taken the Guard into account. It's safe to // adjust the start address and/or number of pages here, to make sure that // the Guards are also "allocated". // if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) { // No tail Guard, add one. *NumberOfPages += 1; } if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) { // No head Guard, add one. *Memory -= EFI_PAGE_SIZE; *NumberOfPages += 1; } }
/** Retrieve corresponding bits in bitmap table according to given memory range. @param[in] Address Memory address to retrieve from. @param[in] NumberOfPages Number of pages to retrieve. @return An integer containing the guarded memory bitmap. **/ UINTN GetGuardedMemoryBits ( IN EFI_PHYSICAL_ADDRESS Address, IN UINTN NumberOfPages ) { UINT64 *BitMap; UINTN Bits; UINTN Result; UINTN Shift; UINTN BitsToUnitEnd; ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS); Result = 0; Shift = 0; while (NumberOfPages > 0) { BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap); if (NumberOfPages > BitsToUnitEnd) { // Cross map unit Bits = BitsToUnitEnd; } else { Bits = NumberOfPages; } if (BitMap != NULL) { Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift); } Shift += Bits; NumberOfPages -= Bits; Address += EFI_PAGES_TO_SIZE (Bits); } return Result; }
/** Allocates pages that are suitable for an OperationBusMasterCommonBuffer or OperationBusMasterCommonBuffer64 mapping. @param This The PPI instance pointer. @param MemoryType The type of memory to allocate, EfiBootServicesData or EfiRuntimeServicesData. @param Pages The number of pages to allocate. @param HostAddress A pointer to store the base system memory address of the allocated range. @param Attributes The requested bit mask of attributes for the allocated range. @retval EFI_SUCCESS The requested memory pages were allocated. @retval EFI_UNSUPPORTED Attributes is unsupported. The only legal attribute bits are MEMORY_WRITE_COMBINE, MEMORY_CACHED and DUAL_ADDRESS_CYCLE. @retval EFI_INVALID_PARAMETER One or more parameters are invalid. @retval EFI_OUT_OF_RESOURCES The memory pages could not be allocated. @retval EFI_NOT_AVAILABLE_YET DMA protection has been enabled, but DMA buffer are not available to be allocated yet. **/ EFI_STATUS EFIAPI PeiIoMmuAllocateBuffer ( IN EDKII_IOMMU_PPI *This, IN EFI_MEMORY_TYPE MemoryType, IN UINTN Pages, IN OUT VOID **HostAddress, IN UINT64 Attributes ) { UINTN Length; VOID *Hob; DMA_BUFFER_INFO *DmaBufferInfo; Hob = GetFirstGuidHob (&mDmaBufferInfoGuid); DmaBufferInfo = GET_GUID_HOB_DATA(Hob); DEBUG ((DEBUG_VERBOSE, "PeiIoMmuAllocateBuffer - page - %x\n", Pages)); DEBUG ((DEBUG_VERBOSE, " DmaBufferCurrentTop - %x\n", DmaBufferInfo->DmaBufferCurrentTop)); DEBUG ((DEBUG_VERBOSE, " DmaBufferCurrentBottom - %x\n", DmaBufferInfo->DmaBufferCurrentBottom)); if (DmaBufferInfo->DmaBufferCurrentTop == 0) { return EFI_NOT_AVAILABLE_YET; } Length = EFI_PAGES_TO_SIZE(Pages); if (Length > DmaBufferInfo->DmaBufferCurrentTop - DmaBufferInfo->DmaBufferCurrentBottom) { DEBUG ((DEBUG_ERROR, "PeiIoMmuAllocateBuffer - OUT_OF_RESOURCE\n")); ASSERT (FALSE); return EFI_OUT_OF_RESOURCES; } *HostAddress = (VOID *)(UINTN)(DmaBufferInfo->DmaBufferCurrentTop - Length); DmaBufferInfo->DmaBufferCurrentTop -= Length; DEBUG ((DEBUG_VERBOSE, "PeiIoMmuAllocateBuffer - allocate - %x\n", *HostAddress)); return EFI_SUCCESS; }
/** This function finds the table specified by the buffer. @param[in] Buffer Table buffer to find. @return ACPI table list. **/ EFI_ACPI_TABLE_LIST * FindTableByBuffer ( IN VOID *Buffer ) { EFI_ACPI_TABLE_INSTANCE *AcpiTableInstance; LIST_ENTRY *CurrentLink; EFI_ACPI_TABLE_LIST *CurrentTableList; LIST_ENTRY *StartLink; // // Get the instance of the ACPI Table // AcpiTableInstance = SdtGetAcpiTableInstance (); // // Find the notify // StartLink = &AcpiTableInstance->TableList; CurrentLink = StartLink->ForwardLink; while (CurrentLink != StartLink) { CurrentTableList = EFI_ACPI_TABLE_LIST_FROM_LINK (CurrentLink); if (((UINTN)CurrentTableList->PageAddress <= (UINTN)Buffer) && ((UINTN)CurrentTableList->PageAddress + EFI_PAGES_TO_SIZE((UINTN)CurrentTableList->NumberOfPages) > (UINTN)Buffer)) { // // Good! Found Table. // return CurrentTableList; } CurrentLink = CurrentLink->ForwardLink; } return NULL; }
/** Get SMI handler profile database. **/ VOID GetSmiHandlerProfileDatabase( VOID ) { EFI_STATUS Status; UINTN CommSize; UINT8 *CommBuffer; EFI_SMM_COMMUNICATE_HEADER *CommHeader; SMI_HANDLER_PROFILE_PARAMETER_GET_INFO *CommGetInfo; SMI_HANDLER_PROFILE_PARAMETER_GET_DATA_BY_OFFSET *CommGetData; EFI_SMM_COMMUNICATION_PROTOCOL *SmmCommunication; UINTN MinimalSizeNeeded; EDKII_PI_SMM_COMMUNICATION_REGION_TABLE *PiSmmCommunicationRegionTable; UINT32 Index; EFI_MEMORY_DESCRIPTOR *Entry; VOID *Buffer; UINTN Size; UINTN Offset; Status = gBS->LocateProtocol(&gEfiSmmCommunicationProtocolGuid, NULL, (VOID **)&SmmCommunication); if (EFI_ERROR(Status)) { Print(L"SmiHandlerProfile: Locate SmmCommunication protocol - %r\n", Status); return ; } MinimalSizeNeeded = EFI_PAGE_SIZE; Status = EfiGetSystemConfigurationTable( &gEdkiiPiSmmCommunicationRegionTableGuid, (VOID **)&PiSmmCommunicationRegionTable ); if (EFI_ERROR(Status)) { Print(L"SmiHandlerProfile: Get PiSmmCommunicationRegionTable - %r\n", Status); return ; } ASSERT(PiSmmCommunicationRegionTable != NULL); Entry = (EFI_MEMORY_DESCRIPTOR *)(PiSmmCommunicationRegionTable + 1); Size = 0; for (Index = 0; Index < PiSmmCommunicationRegionTable->NumberOfEntries; Index++) { if (Entry->Type == EfiConventionalMemory) { Size = EFI_PAGES_TO_SIZE((UINTN)Entry->NumberOfPages); if (Size >= MinimalSizeNeeded) { break; } } Entry = (EFI_MEMORY_DESCRIPTOR *)((UINT8 *)Entry + PiSmmCommunicationRegionTable->DescriptorSize); } ASSERT(Index < PiSmmCommunicationRegionTable->NumberOfEntries); CommBuffer = (UINT8 *)(UINTN)Entry->PhysicalStart; // // Get Size // CommHeader = (EFI_SMM_COMMUNICATE_HEADER *)&CommBuffer[0]; CopyMem(&CommHeader->HeaderGuid, &gSmiHandlerProfileGuid, sizeof(gSmiHandlerProfileGuid)); CommHeader->MessageLength = sizeof(SMI_HANDLER_PROFILE_PARAMETER_GET_INFO); CommGetInfo = (SMI_HANDLER_PROFILE_PARAMETER_GET_INFO *)&CommBuffer[OFFSET_OF(EFI_SMM_COMMUNICATE_HEADER, Data)]; CommGetInfo->Header.Command = SMI_HANDLER_PROFILE_COMMAND_GET_INFO; CommGetInfo->Header.DataLength = sizeof(*CommGetInfo); CommGetInfo->Header.ReturnStatus = (UINT64)-1; CommGetInfo->DataSize = 0; CommSize = sizeof(EFI_GUID) + sizeof(UINTN) + CommHeader->MessageLength; Status = SmmCommunication->Communicate(SmmCommunication, CommBuffer, &CommSize); if (EFI_ERROR(Status)) { Print(L"SmiHandlerProfile: SmmCommunication - %r\n", Status); return ; } if (CommGetInfo->Header.ReturnStatus != 0) { Print(L"SmiHandlerProfile: GetInfo - 0x%0x\n", CommGetInfo->Header.ReturnStatus); return ; } mSmiHandlerProfileDatabaseSize = (UINTN)CommGetInfo->DataSize; // // Get Data // mSmiHandlerProfileDatabase = AllocateZeroPool(mSmiHandlerProfileDatabaseSize); if (mSmiHandlerProfileDatabase == NULL) { Status = EFI_OUT_OF_RESOURCES; Print(L"SmiHandlerProfile: AllocateZeroPool (0x%x) for dump buffer - %r\n", mSmiHandlerProfileDatabaseSize, Status); return ; } CommHeader = (EFI_SMM_COMMUNICATE_HEADER *)&CommBuffer[0]; CopyMem(&CommHeader->HeaderGuid, &gSmiHandlerProfileGuid, sizeof(gSmiHandlerProfileGuid)); CommHeader->MessageLength = sizeof(SMI_HANDLER_PROFILE_PARAMETER_GET_DATA_BY_OFFSET); CommGetData = (SMI_HANDLER_PROFILE_PARAMETER_GET_DATA_BY_OFFSET *)&CommBuffer[OFFSET_OF(EFI_SMM_COMMUNICATE_HEADER, Data)]; CommGetData->Header.Command = SMI_HANDLER_PROFILE_COMMAND_GET_DATA_BY_OFFSET; CommGetData->Header.DataLength = sizeof(*CommGetData); CommGetData->Header.ReturnStatus = (UINT64)-1; CommSize = sizeof(EFI_GUID) + sizeof(UINTN) + CommHeader->MessageLength; Buffer = (UINT8 *)CommHeader + CommSize; Size -= CommSize; CommGetData->DataBuffer = (PHYSICAL_ADDRESS)(UINTN)Buffer; CommGetData->DataOffset = 0; while (CommGetData->DataOffset < mSmiHandlerProfileDatabaseSize) { Offset = (UINTN)CommGetData->DataOffset; if (Size <= (mSmiHandlerProfileDatabaseSize - CommGetData->DataOffset)) { CommGetData->DataSize = (UINT64)Size; } else { CommGetData->DataSize = (UINT64)(mSmiHandlerProfileDatabaseSize - CommGetData->DataOffset); } Status = SmmCommunication->Communicate(SmmCommunication, CommBuffer, &CommSize); ASSERT_EFI_ERROR(Status); if (CommGetData->Header.ReturnStatus != 0) { FreePool(mSmiHandlerProfileDatabase); mSmiHandlerProfileDatabase = NULL; Print(L"SmiHandlerProfile: GetData - 0x%x\n", CommGetData->Header.ReturnStatus); return ; } CopyMem((UINT8 *)mSmiHandlerProfileDatabase + Offset, (VOID *)(UINTN)CommGetData->DataBuffer, (UINTN)CommGetData->DataSize); } DEBUG ((DEBUG_INFO, "SmiHandlerProfileSize - 0x%x\n", mSmiHandlerProfileDatabaseSize)); return ; }
/** Allocates pages at a specified alignment that are suitable for an EfiPciIoOperationBusMasterCommonBuffer mapping. If Alignment is not a power of two and Alignment is not zero, then ASSERT(). @param PciIo The PciIo that can be used to access the host controller. @param Pages The number of pages to allocate. @param Alignment The requested alignment of the allocation. Must be a power of two. @param HostAddress The system memory address to map to the PCI controller. @param DeviceAddress The resulting map address for the bus master PCI controller to use to access the hosts HostAddress. @param Mapping A resulting value to pass to Unmap(). @retval EFI_SUCCESS Success to allocate aligned pages. @retval EFI_INVALID_PARAMETER Pages or Alignment is not valid. @retval EFI_OUT_OF_RESOURCES Do not have enough resources to allocate memory. **/ EFI_STATUS UsbHcAllocateAlignedPages ( IN EFI_PCI_IO_PROTOCOL *PciIo, IN UINTN Pages, IN UINTN Alignment, OUT VOID **HostAddress, OUT EFI_PHYSICAL_ADDRESS *DeviceAddress, OUT VOID **Mapping ) { EFI_STATUS Status; VOID *Memory; UINTN AlignedMemory; UINTN AlignmentMask; UINTN UnalignedPages; UINTN RealPages; UINTN Bytes; // // Alignment must be a power of two or zero. // ASSERT ((Alignment & (Alignment - 1)) == 0); if ((Alignment & (Alignment - 1)) != 0) { return EFI_INVALID_PARAMETER; } if (Pages == 0) { return EFI_INVALID_PARAMETER; } if (Alignment > EFI_PAGE_SIZE) { // // Calculate the total number of pages since alignment is larger than page size. // AlignmentMask = Alignment - 1; RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment); // // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow. // ASSERT (RealPages > Pages); Status = PciIo->AllocateBuffer ( PciIo, AllocateAnyPages, EfiBootServicesData, Pages, &Memory, 0 ); if (EFI_ERROR (Status)) { return EFI_OUT_OF_RESOURCES; } AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask; UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory); if (UnalignedPages > 0) { // // Free first unaligned page(s). // Status = PciIo->FreeBuffer (PciIo, UnalignedPages, Memory); ASSERT_EFI_ERROR (Status); } Memory = (VOID *)(UINTN)(AlignedMemory + EFI_PAGES_TO_SIZE (Pages)); UnalignedPages = RealPages - Pages - UnalignedPages; if (UnalignedPages > 0) { // // Free last unaligned page(s). // Status = PciIo->FreeBuffer (PciIo, UnalignedPages, Memory); ASSERT_EFI_ERROR (Status); } } else { // // Do not over-allocate pages in this case. // Status = PciIo->AllocateBuffer ( PciIo, AllocateAnyPages, EfiBootServicesData, Pages, &Memory, 0 ); if (EFI_ERROR (Status)) { return EFI_OUT_OF_RESOURCES; } AlignedMemory = (UINTN) Memory; } Bytes = EFI_PAGES_TO_SIZE (Pages); Status = PciIo->Map ( PciIo, EfiPciIoOperationBusMasterCommonBuffer, (VOID *) AlignedMemory, &Bytes, DeviceAddress, Mapping ); if (EFI_ERROR (Status) || (Bytes != EFI_PAGES_TO_SIZE (Pages))) { Status = PciIo->FreeBuffer (PciIo, Pages, (VOID *) AlignedMemory); return EFI_OUT_OF_RESOURCES; } *HostAddress = (VOID *) AlignedMemory; return EFI_SUCCESS; }
/** Allocate a block of memory to be used by the buffer pool. @param Pool The buffer pool to allocate memory for. @param Pages How many pages to allocate. @return The allocated memory block or NULL if failed. **/ USBHC_MEM_BLOCK * UsbHcAllocMemBlock ( IN USBHC_MEM_POOL *Pool, IN UINTN Pages ) { USBHC_MEM_BLOCK *Block; EFI_PCI_IO_PROTOCOL *PciIo; VOID *BufHost; VOID *Mapping; EFI_PHYSICAL_ADDRESS MappedAddr; UINTN Bytes; EFI_STATUS Status; PciIo = Pool->PciIo; Block = AllocateZeroPool (sizeof (USBHC_MEM_BLOCK)); if (Block == NULL) { return NULL; } // // each bit in the bit array represents USBHC_MEM_UNIT // bytes of memory in the memory block. // ASSERT (USBHC_MEM_UNIT * 8 <= EFI_PAGE_SIZE); Block->BufLen = EFI_PAGES_TO_SIZE (Pages); Block->BitsLen = Block->BufLen / (USBHC_MEM_UNIT * 8); Block->Bits = AllocateZeroPool (Block->BitsLen); if (Block->Bits == NULL) { gBS->FreePool (Block); return NULL; } // // Allocate the number of Pages of memory, then map it for // bus master read and write. // Status = PciIo->AllocateBuffer ( PciIo, AllocateAnyPages, EfiBootServicesData, Pages, &BufHost, 0 ); if (EFI_ERROR (Status)) { goto FREE_BITARRAY; } Bytes = EFI_PAGES_TO_SIZE (Pages); Status = PciIo->Map ( PciIo, EfiPciIoOperationBusMasterCommonBuffer, BufHost, &Bytes, &MappedAddr, &Mapping ); if (EFI_ERROR (Status) || (Bytes != EFI_PAGES_TO_SIZE (Pages))) { goto FREE_BUFFER; } Block->BufHost = BufHost; Block->Buf = (UINT8 *) ((UINTN) MappedAddr); Block->Mapping = Mapping; return Block; FREE_BUFFER: PciIo->FreeBuffer (PciIo, Pages, BufHost); FREE_BITARRAY: gBS->FreePool (Block->Bits); gBS->FreePool (Block); return NULL; }
/** Process a QEMU_LOADER_ALLOCATE command. @param[in] Allocate The QEMU_LOADER_ALLOCATE command to process. @param[in,out] Tracker The ORDERED_COLLECTION tracking the BLOB user structures created thus far. @retval EFI_SUCCESS An area of whole AcpiNVS pages has been allocated for the blob contents, and the contents have been saved. A BLOB object (user structure) has been allocated from pool memory, referencing the blob contents. The BLOB user structure has been linked into Tracker. @retval EFI_PROTOCOL_ERROR Malformed fw_cfg file name has been found in Allocate, or the Allocate command references a file that is already known by Tracker. @retval EFI_UNSUPPORTED Unsupported alignment request has been found in Allocate. @retval EFI_OUT_OF_RESOURCES Pool allocation failed. @return Error codes from QemuFwCfgFindFile() and gBS->AllocatePages(). **/ STATIC EFI_STATUS EFIAPI ProcessCmdAllocate ( IN CONST QEMU_LOADER_ALLOCATE *Allocate, IN OUT ORDERED_COLLECTION *Tracker ) { FIRMWARE_CONFIG_ITEM FwCfgItem; UINTN FwCfgSize; EFI_STATUS Status; UINTN NumPages; EFI_PHYSICAL_ADDRESS Address; BLOB *Blob; if (Allocate->File[QEMU_LOADER_FNAME_SIZE - 1] != '\0') { DEBUG ((EFI_D_ERROR, "%a: malformed file name\n", __FUNCTION__)); return EFI_PROTOCOL_ERROR; } if (Allocate->Alignment > EFI_PAGE_SIZE) { DEBUG ((EFI_D_ERROR, "%a: unsupported alignment 0x%x\n", __FUNCTION__, Allocate->Alignment)); return EFI_UNSUPPORTED; } Status = QemuFwCfgFindFile ((CHAR8 *)Allocate->File, &FwCfgItem, &FwCfgSize); if (EFI_ERROR (Status)) { DEBUG ((EFI_D_ERROR, "%a: QemuFwCfgFindFile(\"%a\"): %r\n", __FUNCTION__, Allocate->File, Status)); return Status; } NumPages = EFI_SIZE_TO_PAGES (FwCfgSize); Address = 0xFFFFFFFF; Status = gBS->AllocatePages (AllocateMaxAddress, EfiACPIMemoryNVS, NumPages, &Address); if (EFI_ERROR (Status)) { return Status; } Blob = AllocatePool (sizeof *Blob); if (Blob == NULL) { Status = EFI_OUT_OF_RESOURCES; goto FreePages; } CopyMem (Blob->File, Allocate->File, QEMU_LOADER_FNAME_SIZE); Blob->Size = FwCfgSize; Blob->Base = (VOID *)(UINTN)Address; Blob->HostsOnlyTableData = TRUE; Status = OrderedCollectionInsert (Tracker, NULL, Blob); if (Status == RETURN_ALREADY_STARTED) { DEBUG ((EFI_D_ERROR, "%a: duplicated file \"%a\"\n", __FUNCTION__, Allocate->File)); Status = EFI_PROTOCOL_ERROR; } if (EFI_ERROR (Status)) { goto FreeBlob; } QemuFwCfgSelectItem (FwCfgItem); QemuFwCfgReadBytes (FwCfgSize, Blob->Base); ZeroMem (Blob->Base + Blob->Size, EFI_PAGES_TO_SIZE (NumPages) - Blob->Size); DEBUG ((EFI_D_VERBOSE, "%a: File=\"%a\" Alignment=0x%x Zone=%d Size=0x%Lx " "Address=0x%Lx\n", __FUNCTION__, Allocate->File, Allocate->Alignment, Allocate->Zone, (UINT64)Blob->Size, (UINT64)(UINTN)Blob->Base)); return EFI_SUCCESS; FreeBlob: FreePool (Blob); FreePages: gBS->FreePages (Address, NumPages); return Status; }
/** Allocate a block of memory to be used by the buffer pool. @param Pages How many pages to allocate. @return Pointer to the allocated memory block or NULL if failed. **/ USBHC_MEM_BLOCK * UsbHcAllocMemBlock ( IN UINTN Pages ) { USBHC_MEM_BLOCK *Block; VOID *BufHost; VOID *Mapping; EFI_PHYSICAL_ADDRESS MappedAddr; EFI_STATUS Status; UINTN PageNumber; EFI_PHYSICAL_ADDRESS TempPtr; PageNumber = EFI_SIZE_TO_PAGES (sizeof (USBHC_MEM_BLOCK)); Status = PeiServicesAllocatePages ( EfiBootServicesData, PageNumber, &TempPtr ); if (EFI_ERROR (Status)) { return NULL; } ZeroMem ((VOID *) (UINTN) TempPtr, EFI_PAGES_TO_SIZE (PageNumber)); // // each bit in the bit array represents USBHC_MEM_UNIT // bytes of memory in the memory block. // ASSERT (USBHC_MEM_UNIT * 8 <= EFI_PAGE_SIZE); Block = (USBHC_MEM_BLOCK *) (UINTN) TempPtr; Block->BufLen = EFI_PAGES_TO_SIZE (Pages); Block->BitsLen = Block->BufLen / (USBHC_MEM_UNIT * 8); PageNumber = EFI_SIZE_TO_PAGES (Block->BitsLen); Status = PeiServicesAllocatePages ( EfiBootServicesData, PageNumber, &TempPtr ); if (EFI_ERROR (Status)) { return NULL; } ZeroMem ((VOID *) (UINTN) TempPtr, EFI_PAGES_TO_SIZE (PageNumber)); Block->Bits = (UINT8 *) (UINTN) TempPtr; Status = IoMmuAllocateBuffer ( Pages, &BufHost, &MappedAddr, &Mapping ); if (EFI_ERROR (Status)) { return NULL; } ZeroMem ((VOID *) (UINTN) BufHost, EFI_PAGES_TO_SIZE (Pages)); Block->BufHost = (UINT8 *) (UINTN) BufHost; Block->Buf = (UINT8 *) (UINTN) MappedAddr; Block->Mapping = Mapping; Block->Next = NULL; return Block; }
/** Create PRP lists for Data transfer which is larger than 2 memory pages. @param[in] Private The pointer to the PEI_NVME_CONTROLLER_PRIVATE_DATA data structure. @param[in] PhysicalAddr The physical base address of Data Buffer. @param[in] Pages The number of pages to be transfered. @retval The pointer Value to the first PRP List of the PRP lists. **/ UINT64 NvmeCreatePrpList ( IN PEI_NVME_CONTROLLER_PRIVATE_DATA *Private, IN EFI_PHYSICAL_ADDRESS PhysicalAddr, IN UINTN Pages ) { UINTN PrpEntryNo; UINTN PrpListNo; UINT64 PrpListBase; VOID *PrpListHost; UINTN PrpListIndex; UINTN PrpEntryIndex; UINT64 Remainder; EFI_PHYSICAL_ADDRESS PrpListPhyAddr; UINTN Bytes; UINT8 *PrpEntry; EFI_PHYSICAL_ADDRESS NewPhyAddr; // // The number of Prp Entry in a memory page. // PrpEntryNo = EFI_PAGE_SIZE / sizeof (UINT64); // // Calculate total PrpList number. // PrpListNo = (UINTN) DivU64x64Remainder ((UINT64)Pages, (UINT64)PrpEntryNo, &Remainder); if (Remainder != 0) { PrpListNo += 1; } if (PrpListNo > NVME_PRP_SIZE) { DEBUG (( DEBUG_ERROR, "%a: The implementation only supports PrpList number up to 4." " But %d are needed here.\n", __FUNCTION__, PrpListNo )); return 0; } PrpListHost = (VOID *)(UINTN) NVME_PRP_BASE (Private); Bytes = EFI_PAGES_TO_SIZE (PrpListNo); PrpListPhyAddr = (UINT64)(UINTN)(PrpListHost); // // Fill all PRP lists except of last one. // ZeroMem (PrpListHost, Bytes); for (PrpListIndex = 0; PrpListIndex < PrpListNo - 1; ++PrpListIndex) { PrpListBase = (UINTN)PrpListHost + PrpListIndex * EFI_PAGE_SIZE; for (PrpEntryIndex = 0; PrpEntryIndex < PrpEntryNo; ++PrpEntryIndex) { PrpEntry = (UINT8 *)(UINTN) (PrpListBase + PrpEntryIndex * sizeof(UINT64)); if (PrpEntryIndex != PrpEntryNo - 1) { // // Fill all PRP entries except of last one. // CopyMem (PrpEntry, (VOID *)(UINTN) (&PhysicalAddr), sizeof (UINT64)); PhysicalAddr += EFI_PAGE_SIZE; } else { // // Fill last PRP entries with next PRP List pointer. // NewPhyAddr = (PrpListPhyAddr + (PrpListIndex + 1) * EFI_PAGE_SIZE); CopyMem (PrpEntry, (VOID *)(UINTN) (&NewPhyAddr), sizeof (UINT64)); } } } // // Fill last PRP list. // PrpListBase = (UINTN)PrpListHost + PrpListIndex * EFI_PAGE_SIZE; for (PrpEntryIndex = 0; PrpEntryIndex < ((Remainder != 0) ? Remainder : PrpEntryNo); ++PrpEntryIndex) { PrpEntry = (UINT8 *)(UINTN) (PrpListBase + PrpEntryIndex * sizeof(UINT64)); CopyMem (PrpEntry, (VOID *)(UINTN) (&PhysicalAddr), sizeof (UINT64)); PhysicalAddr += EFI_PAGE_SIZE; } return PrpListPhyAddr; }
/** Initialize global data for MP support. @param[in] CpuMpData The pointer to CPU MP Data structure. **/ VOID InitMpGlobalData ( IN CPU_MP_DATA *CpuMpData ) { EFI_STATUS Status; EFI_PHYSICAL_ADDRESS Address; UINTN ApSafeBufferSize; UINTN Index; EFI_GCD_MEMORY_SPACE_DESCRIPTOR MemDesc; UINTN StackBase; CPU_INFO_IN_HOB *CpuInfoInHob; SaveCpuMpData (CpuMpData); if (CpuMpData->CpuCount == 1) { // // If only BSP exists, return // return; } if (PcdGetBool (PcdCpuStackGuard)) { // // One extra page at the bottom of the stack is needed for Guard page. // if (CpuMpData->CpuApStackSize <= EFI_PAGE_SIZE) { DEBUG ((DEBUG_ERROR, "PcdCpuApStackSize is not big enough for Stack Guard!\n")); ASSERT (FALSE); } // // DXE will reuse stack allocated for APs at PEI phase if it's available. // Let's check it here. // // Note: BSP's stack guard is set at DxeIpl phase. But for the sake of // BSP/AP exchange, stack guard for ApTopOfStack of cpu 0 will still be // set here. // CpuInfoInHob = (CPU_INFO_IN_HOB *)(UINTN)CpuMpData->CpuInfoInHob; for (Index = 0; Index < CpuMpData->CpuCount; ++Index) { if (CpuInfoInHob != NULL && CpuInfoInHob[Index].ApTopOfStack != 0) { StackBase = (UINTN)CpuInfoInHob[Index].ApTopOfStack - CpuMpData->CpuApStackSize; } else { StackBase = CpuMpData->Buffer + Index * CpuMpData->CpuApStackSize; } Status = gDS->GetMemorySpaceDescriptor (StackBase, &MemDesc); ASSERT_EFI_ERROR (Status); Status = gDS->SetMemorySpaceAttributes ( StackBase, EFI_PAGES_TO_SIZE (1), MemDesc.Attributes | EFI_MEMORY_RP ); ASSERT_EFI_ERROR (Status); DEBUG ((DEBUG_INFO, "Stack Guard set at %lx [cpu%lu]!\n", (UINT64)StackBase, (UINT64)Index)); } } // // Avoid APs access invalid buffer data which allocated by BootServices, // so we will allocate reserved data for AP loop code. We also need to // allocate this buffer below 4GB due to APs may be transferred to 32bit // protected mode on long mode DXE. // Allocating it in advance since memory services are not available in // Exit Boot Services callback function. // ApSafeBufferSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES ( CpuMpData->AddressMap.RelocateApLoopFuncSize )); Address = BASE_4GB - 1; Status = gBS->AllocatePages ( AllocateMaxAddress, EfiReservedMemoryType, EFI_SIZE_TO_PAGES (ApSafeBufferSize), &Address ); ASSERT_EFI_ERROR (Status); mReservedApLoopFunc = (VOID *) (UINTN) Address; ASSERT (mReservedApLoopFunc != NULL); // // Make sure that the buffer memory is executable if NX protection is enabled // for EfiReservedMemoryType. // // TODO: Check EFI_MEMORY_XP bit set or not once it's available in DXE GCD // service. // Status = gDS->GetMemorySpaceDescriptor (Address, &MemDesc); if (!EFI_ERROR (Status)) { gDS->SetMemorySpaceAttributes ( Address, ApSafeBufferSize, MemDesc.Attributes & (~EFI_MEMORY_XP) ); } ApSafeBufferSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES ( CpuMpData->CpuCount * AP_SAFE_STACK_SIZE )); Address = BASE_4GB - 1; Status = gBS->AllocatePages ( AllocateMaxAddress, EfiReservedMemoryType, EFI_SIZE_TO_PAGES (ApSafeBufferSize), &Address ); ASSERT_EFI_ERROR (Status); mReservedTopOfApStack = (UINTN) Address + ApSafeBufferSize; ASSERT ((mReservedTopOfApStack & (UINTN)(CPU_STACK_ALIGNMENT - 1)) == 0); CopyMem ( mReservedApLoopFunc, CpuMpData->AddressMap.RelocateApLoopFuncAddress, CpuMpData->AddressMap.RelocateApLoopFuncSize ); Status = gBS->CreateEvent ( EVT_TIMER | EVT_NOTIFY_SIGNAL, TPL_NOTIFY, CheckApsStatus, NULL, &mCheckAllApsEvent ); ASSERT_EFI_ERROR (Status); // // Set timer to check all APs status. // Status = gBS->SetTimer ( mCheckAllApsEvent, TimerPeriodic, AP_CHECK_INTERVAL ); ASSERT_EFI_ERROR (Status); Status = gBS->CreateEvent ( EVT_SIGNAL_EXIT_BOOT_SERVICES, TPL_CALLBACK, MpInitChangeApLoopCallback, NULL, &mMpInitExitBootServicesEvent ); ASSERT_EFI_ERROR (Status); Status = gBS->CreateEventEx ( EVT_NOTIFY_SIGNAL, TPL_CALLBACK, MpInitChangeApLoopCallback, NULL, &gEfiEventLegacyBootGuid, &mLegacyBootEvent ); ASSERT_EFI_ERROR (Status); }
/** Set static page table. @param[in] PageTable Address of page table. **/ VOID SetStaticPageTable ( IN UINTN PageTable ) { UINT64 PageAddress; UINTN NumberOfPml4EntriesNeeded; UINTN NumberOfPdpEntriesNeeded; UINTN IndexOfPml4Entries; UINTN IndexOfPdpEntries; UINTN IndexOfPageDirectoryEntries; UINT64 *PageMapLevel4Entry; UINT64 *PageMap; UINT64 *PageDirectoryPointerEntry; UINT64 *PageDirectory1GEntry; UINT64 *PageDirectoryEntry; if (mPhysicalAddressBits <= 39 ) { NumberOfPml4EntriesNeeded = 1; NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 30)); } else { NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 39)); NumberOfPdpEntriesNeeded = 512; } // // By architecture only one PageMapLevel4 exists - so lets allocate storage for it. // PageMap = (VOID *) PageTable; PageMapLevel4Entry = PageMap; PageAddress = 0; for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) { // // Each PML4 entry points to a page of Page Directory Pointer entries. // PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & gPhyMask); if (PageDirectoryPointerEntry == NULL) { PageDirectoryPointerEntry = AllocatePageTableMemory (1); ASSERT(PageDirectoryPointerEntry != NULL); ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1)); *PageMapLevel4Entry = ((UINTN)PageDirectoryPointerEntry & gPhyMask) | PAGE_ATTRIBUTE_BITS; } if (m1GPageTableSupport) { PageDirectory1GEntry = PageDirectoryPointerEntry; for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) { if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) { // // Skip the < 4G entries // continue; } // // Fill in the Page Directory entries // *PageDirectory1GEntry = (PageAddress & gPhyMask) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS; } } else { PageAddress = BASE_4GB; for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) { if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) { // // Skip the < 4G entries // continue; } // // Each Directory Pointer entries points to a page of Page Directory entires. // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop. // PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & gPhyMask); if (PageDirectoryEntry == NULL) { PageDirectoryEntry = AllocatePageTableMemory (1); ASSERT(PageDirectoryEntry != NULL); ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1)); // // Fill in a Page Directory Pointer Entries // *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | PAGE_ATTRIBUTE_BITS; } for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) { // // Fill in the Page Directory entries // *PageDirectoryEntry = (UINT64)PageAddress | IA32_PG_PS | PAGE_ATTRIBUTE_BITS; } } } } }
/** Creates and initializes the DebugImageInfo Table. Also creates the configuration table and registers it into the system table. **/ VOID CoreInitializeDebugImageInfoTable ( VOID ) { EFI_STATUS Status; UINTN Pages; EFI_PHYSICAL_ADDRESS Memory; UINTN AlignedMemory; UINTN AlignmentMask; UINTN UnalignedPages; UINTN RealPages; // // Allocate 4M aligned page for the structure and fill in the data. // Ideally we would update the CRC now as well, but the service may not yet be available. // See comments in the CoreUpdateDebugTableCrc32() function below for details. // Pages = EFI_SIZE_TO_PAGES (sizeof (EFI_SYSTEM_TABLE_POINTER)); AlignmentMask = SIZE_4MB - 1; RealPages = Pages + EFI_SIZE_TO_PAGES (SIZE_4MB); // // Attempt to allocate memory below PcdMaxEfiSystemTablePointerAddress // If PcdMaxEfiSystemTablePointerAddress is 0, then allocate memory below // MAX_ADDRESS // Memory = PcdGet64 (PcdMaxEfiSystemTablePointerAddress); if (Memory == 0) { Memory = MAX_ADDRESS; } Status = CoreAllocatePages ( AllocateMaxAddress, EfiBootServicesData, RealPages, &Memory ); if (EFI_ERROR (Status)) { /* if (PcdGet64 (PcdMaxEfiSystemTablePointerAddress) != 0) { DEBUG ((EFI_D_INFO, "Allocate memory for EFI_SYSTEM_TABLE_POINTER below PcdMaxEfiSystemTablePointerAddress failed. \ Retry to allocate memroy as close to the top of memory as feasible.\n")); } */ // // If the initial memory allocation fails, then reattempt allocation // as close to the top of memory as feasible. // Status = CoreAllocatePages ( AllocateAnyPages, EfiBootServicesData, RealPages, &Memory ); ASSERT_EFI_ERROR (Status); if (EFI_ERROR (Status)) { return; } } // // Free overallocated pages // AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask; UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN)Memory); if (UnalignedPages > 0) { // // Free first unaligned page(s). // Status = CoreFreePages (Memory, UnalignedPages); ASSERT_EFI_ERROR (Status); } Memory = (EFI_PHYSICAL_ADDRESS)(AlignedMemory + EFI_PAGES_TO_SIZE (Pages)); UnalignedPages = RealPages - Pages - UnalignedPages; if (UnalignedPages > 0) { // // Free last unaligned page(s). // Status = CoreFreePages (Memory, UnalignedPages); ASSERT_EFI_ERROR (Status); } // // Set mDebugTable to the 4MB aligned allocated pages // mDebugTable = (EFI_SYSTEM_TABLE_POINTER *)(AlignedMemory); ASSERT (mDebugTable != NULL); // // Initialize EFI_SYSTEM_TABLE_POINTER structure // mDebugTable->Signature = EFI_SYSTEM_TABLE_SIGNATURE; mDebugTable->EfiSystemTableBase = (EFI_PHYSICAL_ADDRESS) (UINTN) gDxeCoreST; mDebugTable->Crc32 = 0; // // Install the EFI_SYSTEM_TABLE_POINTER structure in the EFI System // Configuration Table // Status = CoreInstallConfigurationTable (&gEfiDebugImageInfoTableGuid, &mDebugInfoTableHeader); ASSERT_EFI_ERROR (Status); }
/** Allocate a block of memory to be used by the buffer pool. Use Redirect memory services to allocate memmory so that USB DMA transfers do not cause IMR violations on Quark. @param Pool The buffer pool to allocate memory for. @param Pages How many pages to allocate. @return The allocated memory block or NULL if failed. **/ USBHC_MEM_BLOCK * UsbHcAllocMemBlock ( IN USBHC_MEM_POOL *Pool, IN UINTN Pages ) { USBHC_MEM_BLOCK *Block; VOID *BufHost; VOID *Mapping; EFI_PHYSICAL_ADDRESS MappedAddr; EFI_STATUS Status; UINTN PageNumber; EFI_PHYSICAL_ADDRESS TempPtr; Mapping = NULL; PageNumber = sizeof(USBHC_MEM_BLOCK)/PAGESIZE +1; Status = PeiServicesAllocatePages ( EfiBootServicesCode, PageNumber, &TempPtr ); if (EFI_ERROR (Status)) { return NULL; } ZeroMem ((VOID *)(UINTN)TempPtr, PageNumber*EFI_PAGE_SIZE); // // each bit in the bit array represents USBHC_MEM_UNIT // bytes of memory in the memory block. // ASSERT (USBHC_MEM_UNIT * 8 <= EFI_PAGE_SIZE); Block = (USBHC_MEM_BLOCK*)(UINTN)TempPtr; Block->BufLen = EFI_PAGES_TO_SIZE (Pages); Block->BitsLen = Block->BufLen / (USBHC_MEM_UNIT * 8); PageNumber = (Block->BitsLen)/PAGESIZE +1; Status = PeiServicesAllocatePages ( EfiBootServicesCode, PageNumber, &TempPtr ); if (EFI_ERROR (Status)) { return NULL; } ZeroMem ((VOID *)(UINTN)TempPtr, PageNumber*EFI_PAGE_SIZE); Block->Bits = (UINT8 *)(UINTN)TempPtr; Status = PeiServicesAllocatePages ( EfiBootServicesCode, Pages, &TempPtr ); ZeroMem ((VOID *)(UINTN)TempPtr, Pages*EFI_PAGE_SIZE); BufHost = (VOID *)(UINTN)TempPtr; MappedAddr = (EFI_PHYSICAL_ADDRESS) (UINTN) BufHost; // // Check whether the data structure used by the host controller // should be restricted into the same 4G // if (Pool->Check4G && (Pool->Which4G != USB_HC_HIGH_32BIT (MappedAddr))) { return NULL; } Block->BufHost = BufHost; Block->Buf = (UINT8 *) ((UINTN) MappedAddr); Block->Mapping = Mapping; Block->Next = NULL; return Block; }
/** Remove exec permissions from all regions whose type is identified by PcdDxeNxMemoryProtectionPolicy. **/ STATIC VOID InitializeDxeNxMemoryProtectionPolicy ( VOID ) { UINTN MemoryMapSize; UINTN MapKey; UINTN DescriptorSize; UINT32 DescriptorVersion; EFI_MEMORY_DESCRIPTOR *MemoryMap; EFI_MEMORY_DESCRIPTOR *MemoryMapEntry; EFI_MEMORY_DESCRIPTOR *MemoryMapEnd; EFI_STATUS Status; UINT64 Attributes; LIST_ENTRY *Link; EFI_GCD_MAP_ENTRY *Entry; EFI_PEI_HOB_POINTERS Hob; EFI_HOB_MEMORY_ALLOCATION *MemoryHob; EFI_PHYSICAL_ADDRESS StackBase; // // Get the EFI memory map. // MemoryMapSize = 0; MemoryMap = NULL; Status = gBS->GetMemoryMap ( &MemoryMapSize, MemoryMap, &MapKey, &DescriptorSize, &DescriptorVersion ); ASSERT (Status == EFI_BUFFER_TOO_SMALL); do { MemoryMap = (EFI_MEMORY_DESCRIPTOR *) AllocatePool (MemoryMapSize); ASSERT (MemoryMap != NULL); Status = gBS->GetMemoryMap ( &MemoryMapSize, MemoryMap, &MapKey, &DescriptorSize, &DescriptorVersion ); if (EFI_ERROR (Status)) { FreePool (MemoryMap); } } while (Status == EFI_BUFFER_TOO_SMALL); ASSERT_EFI_ERROR (Status); StackBase = 0; if (PcdGetBool (PcdCpuStackGuard)) { // // Get the base of stack from Hob. // Hob.Raw = GetHobList (); while ((Hob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION, Hob.Raw)) != NULL) { MemoryHob = Hob.MemoryAllocation; if (CompareGuid(&gEfiHobMemoryAllocStackGuid, &MemoryHob->AllocDescriptor.Name)) { DEBUG (( DEBUG_INFO, "%a: StackBase = 0x%016lx StackSize = 0x%016lx\n", __FUNCTION__, MemoryHob->AllocDescriptor.MemoryBaseAddress, MemoryHob->AllocDescriptor.MemoryLength )); StackBase = MemoryHob->AllocDescriptor.MemoryBaseAddress; // // Ensure the base of the stack is page-size aligned. // ASSERT ((StackBase & EFI_PAGE_MASK) == 0); break; } Hob.Raw = GET_NEXT_HOB (Hob); } // // Ensure the base of stack can be found from Hob when stack guard is // enabled. // ASSERT (StackBase != 0); } DEBUG (( DEBUG_INFO, "%a: applying strict permissions to active memory regions\n", __FUNCTION__ )); MergeMemoryMapForProtectionPolicy (MemoryMap, &MemoryMapSize, DescriptorSize); MemoryMapEntry = MemoryMap; MemoryMapEnd = (EFI_MEMORY_DESCRIPTOR *) ((UINT8 *) MemoryMap + MemoryMapSize); while ((UINTN) MemoryMapEntry < (UINTN) MemoryMapEnd) { Attributes = GetPermissionAttributeForMemoryType (MemoryMapEntry->Type); if (Attributes != 0) { SetUefiImageMemoryAttributes ( MemoryMapEntry->PhysicalStart, LShiftU64 (MemoryMapEntry->NumberOfPages, EFI_PAGE_SHIFT), Attributes); // // Add EFI_MEMORY_RP attribute for page 0 if NULL pointer detection is // enabled. // if (MemoryMapEntry->PhysicalStart == 0 && PcdGet8 (PcdNullPointerDetectionPropertyMask) != 0) { ASSERT (MemoryMapEntry->NumberOfPages > 0); SetUefiImageMemoryAttributes ( 0, EFI_PAGES_TO_SIZE (1), EFI_MEMORY_RP | Attributes); } // // Add EFI_MEMORY_RP attribute for the first page of the stack if stack // guard is enabled. // if (StackBase != 0 && (StackBase >= MemoryMapEntry->PhysicalStart && StackBase < MemoryMapEntry->PhysicalStart + LShiftU64 (MemoryMapEntry->NumberOfPages, EFI_PAGE_SHIFT)) && PcdGetBool (PcdCpuStackGuard)) { SetUefiImageMemoryAttributes ( StackBase, EFI_PAGES_TO_SIZE (1), EFI_MEMORY_RP | Attributes); } } MemoryMapEntry = NEXT_MEMORY_DESCRIPTOR (MemoryMapEntry, DescriptorSize); } FreePool (MemoryMap); // // Apply the policy for RAM regions that we know are present and // accessible, but have not been added to the UEFI memory map (yet). // if (GetPermissionAttributeForMemoryType (EfiConventionalMemory) != 0) { DEBUG (( DEBUG_INFO, "%a: applying strict permissions to inactive memory regions\n", __FUNCTION__ )); CoreAcquireGcdMemoryLock (); Link = mGcdMemorySpaceMap.ForwardLink; while (Link != &mGcdMemorySpaceMap) { Entry = CR (Link, EFI_GCD_MAP_ENTRY, Link, EFI_GCD_MAP_SIGNATURE); if (Entry->GcdMemoryType == EfiGcdMemoryTypeReserved && Entry->EndAddress < MAX_ADDRESS && (Entry->Capabilities & (EFI_MEMORY_PRESENT | EFI_MEMORY_INITIALIZED | EFI_MEMORY_TESTED)) == (EFI_MEMORY_PRESENT | EFI_MEMORY_INITIALIZED)) { Attributes = GetPermissionAttributeForMemoryType (EfiConventionalMemory) | (Entry->Attributes & CACHE_ATTRIBUTE_MASK); DEBUG ((DEBUG_INFO, "Untested GCD memory space region: - 0x%016lx - 0x%016lx (0x%016lx)\n", Entry->BaseAddress, Entry->EndAddress - Entry->BaseAddress + 1, Attributes)); ASSERT(gCpu != NULL); gCpu->SetMemoryAttributes (gCpu, Entry->BaseAddress, Entry->EndAddress - Entry->BaseAddress + 1, Attributes); } Link = Link->ForwardLink; } CoreReleaseGcdMemoryLock (); } }
/** Allocates one or more 4KB pages of a certain memory type at a specified alignment. Allocates the number of 4KB pages specified by Pages of a certain memory type with an alignment specified by Alignment. The allocated buffer is returned. If Pages is 0, then NULL is returned. If there is not enough memory at the specified alignment remaining to satisfy the request, then NULL is returned. If Alignment is not a power of two and Alignment is not zero, then ASSERT(). If Pages plus EFI_SIZE_TO_PAGES (Alignment) overflows, then ASSERT(). @param MemoryType The type of memory to allocate. @param Pages The number of 4 KB pages to allocate. @param Alignment The requested alignment of the allocation. Must be a power of two. If Alignment is zero, then byte alignment is used. @return A pointer to the allocated buffer or NULL if allocation fails. **/ VOID * InternalAllocateAlignedPages ( IN EFI_MEMORY_TYPE MemoryType, IN UINTN Pages, IN UINTN Alignment ) { EFI_PHYSICAL_ADDRESS Memory; EFI_PHYSICAL_ADDRESS AlignedMemory; EFI_PEI_HOB_POINTERS Hob; BOOLEAN SkipBeforeMemHob; BOOLEAN SkipAfterMemHob; EFI_PHYSICAL_ADDRESS HobBaseAddress; UINT64 HobLength; EFI_MEMORY_TYPE HobMemoryType; UINTN TotalPages; // // Alignment must be a power of two or zero. // ASSERT ((Alignment & (Alignment - 1)) == 0); if (Pages == 0) { return NULL; } // // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow. // ASSERT (Pages <= (MAX_ADDRESS - EFI_SIZE_TO_PAGES (Alignment))); // // We would rather waste some memory to save PEI code size. // meaning in addition to the requested size for the aligned mem, // we simply reserve an overhead memory equal to Alignmemt(page-aligned), no matter what. // The overhead mem size could be reduced later with more involved malloc mechanisms // (e.g., somthing that can detect the alignment boundary before allocating memory or // can request that memory be allocated at a certain address that is aleady aligned). // TotalPages = Pages + (Alignment <= EFI_PAGE_SIZE ? 0 : EFI_SIZE_TO_PAGES(Alignment)); Memory = (EFI_PHYSICAL_ADDRESS) (UINTN) InternalAllocatePages (MemoryType, TotalPages); if (Memory == 0) { DEBUG((DEBUG_INFO, "Out of memory resource! \n")); return NULL; } DEBUG ((DEBUG_INFO, "Allocated Memory unaligned: Address = 0x%LX, Pages = 0x%X, Type = %d \n", Memory, TotalPages, (UINTN) MemoryType)); // // Alignment calculation // AlignedMemory = Memory; if (Alignment > EFI_PAGE_SIZE) { AlignedMemory = ALIGN_VALUE (Memory, Alignment); } DEBUG ((DEBUG_INFO, "After aligning to 0x%X bytes: Address = 0x%LX, Pages = 0x%X \n", Alignment, AlignedMemory, Pages)); // // In general three HOBs cover the total allocated space. // The aligned portion is covered by the aligned mem HOB and // the unaligned(to be freed) portions before and after the aligned portion are covered by newly created HOBs. // // Before mem HOB covers the region between "Memory" and "AlignedMemory" // Aligned mem HOB covers the region between "AlignedMemory" and "AlignedMemory + EFI_PAGES_TO_SIZE(Pages)" // After mem HOB covers the region between "AlignedMemory + EFI_PAGES_TO_SIZE(Pages)" and "Memory + EFI_PAGES_TO_SIZE(TotalPages)" // // The before or after mem HOBs need to be skipped under special cases where the aligned portion // touches either the top or bottom of the original allocated space. // SkipBeforeMemHob = FALSE; SkipAfterMemHob = FALSE; if (Memory == AlignedMemory) { SkipBeforeMemHob = TRUE; } if ((Memory + EFI_PAGES_TO_SIZE(TotalPages)) == (AlignedMemory + EFI_PAGES_TO_SIZE(Pages))) { // // This condition is never met in the current implementation. // There is always some after-mem since the overhead mem(used in TotalPages) // is no less than Alignment. // SkipAfterMemHob = TRUE; } // // Search for the mem HOB referring to the original(unaligned) allocation // and update the size and type if needed. // Hob.Raw = GetFirstHob (EFI_HOB_TYPE_MEMORY_ALLOCATION); while (Hob.Raw != NULL) { if (Hob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress == Memory) { break; } Hob.Raw = GET_NEXT_HOB (Hob); Hob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION, Hob.Raw); } ASSERT (Hob.Raw != NULL); if (SkipBeforeMemHob) { // // Use this HOB as aligned mem HOB as there is no portion before it. // HobLength = EFI_PAGES_TO_SIZE(Pages); Hob.MemoryAllocation->AllocDescriptor.MemoryLength = HobLength; } else { // // Use this HOB as before mem HOB and create a new HOB for the aligned portion // HobLength = (AlignedMemory - Memory); Hob.MemoryAllocation->AllocDescriptor.MemoryLength = HobLength; Hob.MemoryAllocation->AllocDescriptor.MemoryType = EfiConventionalMemory; } HobBaseAddress = Hob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress; HobMemoryType = Hob.MemoryAllocation->AllocDescriptor.MemoryType; // // Build the aligned mem HOB if needed // if (!SkipBeforeMemHob) { DEBUG((DEBUG_INFO, "Updated before-mem HOB with BaseAddress = %LX, Length = %LX, MemoryType = %d \n", HobBaseAddress, HobLength, (UINTN) HobMemoryType)); HobBaseAddress = AlignedMemory; HobLength = EFI_PAGES_TO_SIZE(Pages); HobMemoryType = MemoryType; BuildMemoryAllocationHob ( HobBaseAddress, HobLength, HobMemoryType ); DEBUG((DEBUG_INFO, "Created aligned-mem HOB with BaseAddress = %LX, Length = %LX, MemoryType = %d \n", HobBaseAddress, HobLength, (UINTN) HobMemoryType)); } else { if (HobBaseAddress != 0) { DEBUG((DEBUG_INFO, "Updated aligned-mem HOB with BaseAddress = %LX, Length = %LX, MemoryType = %d \n", HobBaseAddress, HobLength, (UINTN) HobMemoryType)); } } // // Build the after mem HOB if needed // if (!SkipAfterMemHob) { HobBaseAddress = AlignedMemory + EFI_PAGES_TO_SIZE(Pages); HobLength = (Memory + EFI_PAGES_TO_SIZE(TotalPages)) - (AlignedMemory + EFI_PAGES_TO_SIZE(Pages)); HobMemoryType = EfiConventionalMemory; BuildMemoryAllocationHob ( HobBaseAddress, HobLength, HobMemoryType ); DEBUG((DEBUG_INFO, "Created after-mem HOB with BaseAddress = %LX, Length = %LX, MemoryType = %d \n", HobBaseAddress, HobLength, (UINTN) HobMemoryType)); } return (VOID *) (UINTN) AlignedMemory; }
STATIC VOID SetupLinuxMemmap ( IN OUT struct boot_params *Bp ) { EFI_STATUS Status; UINT8 TmpMemoryMap[1]; UINTN MapKey; UINTN DescriptorSize; UINT32 DescriptorVersion; UINTN MemoryMapSize; EFI_MEMORY_DESCRIPTOR *MemoryMap; EFI_MEMORY_DESCRIPTOR *MemoryMapPtr; UINTN Index; struct efi_info *Efi; struct e820_entry *LastE820; struct e820_entry *E820; UINTN E820EntryCount; EFI_PHYSICAL_ADDRESS LastEndAddr; // // Get System MemoryMapSize // MemoryMapSize = sizeof (TmpMemoryMap); Status = gBS->GetMemoryMap ( &MemoryMapSize, (EFI_MEMORY_DESCRIPTOR *)TmpMemoryMap, &MapKey, &DescriptorSize, &DescriptorVersion ); ASSERT (Status == EFI_BUFFER_TOO_SMALL); // // Enlarge space here, because we will allocate pool now. // MemoryMapSize += EFI_PAGE_SIZE; MemoryMap = AllocatePool (MemoryMapSize); ASSERT (MemoryMap != NULL); // // Get System MemoryMap // Status = gBS->GetMemoryMap ( &MemoryMapSize, MemoryMap, &MapKey, &DescriptorSize, &DescriptorVersion ); ASSERT_EFI_ERROR (Status); LastE820 = NULL; E820 = &Bp->e820_map[0]; E820EntryCount = 0; LastEndAddr = 0; MemoryMapPtr = MemoryMap; for (Index = 0; Index < (MemoryMapSize / DescriptorSize); Index++) { UINTN E820Type = 0; if (MemoryMap->NumberOfPages == 0) { continue; } switch(MemoryMap->Type) { case EfiReservedMemoryType: case EfiRuntimeServicesCode: case EfiRuntimeServicesData: case EfiMemoryMappedIO: case EfiMemoryMappedIOPortSpace: case EfiPalCode: E820Type = E820_RESERVED; break; case EfiUnusableMemory: E820Type = E820_UNUSABLE; break; case EfiACPIReclaimMemory: E820Type = E820_ACPI; break; case EfiLoaderCode: case EfiLoaderData: case EfiBootServicesCode: case EfiBootServicesData: case EfiConventionalMemory: E820Type = E820_RAM; break; case EfiACPIMemoryNVS: E820Type = E820_NVS; break; default: DEBUG (( EFI_D_ERROR, "Invalid EFI memory descriptor type (0x%x)!\n", MemoryMap->Type )); continue; } if ((LastE820 != NULL) && (LastE820->type == (UINT32) E820Type) && (MemoryMap->PhysicalStart == LastEndAddr)) { LastE820->size += EFI_PAGES_TO_SIZE (MemoryMap->NumberOfPages); LastEndAddr += EFI_PAGES_TO_SIZE (MemoryMap->NumberOfPages); } else { if (E820EntryCount >= (sizeof (Bp->e820_map) / sizeof (Bp->e820_map[0]))) { break; } E820->type = (UINT32) E820Type; E820->addr = MemoryMap->PhysicalStart; E820->size = EFI_PAGES_TO_SIZE (MemoryMap->NumberOfPages); LastE820 = E820; LastEndAddr = E820->addr + E820->size; E820++; E820EntryCount++; } // // Get next item // MemoryMap = (EFI_MEMORY_DESCRIPTOR *)((UINTN)MemoryMap + DescriptorSize); } Bp->e820_entries = (UINT8) E820EntryCount; Efi = &Bp->efi_info; Efi->efi_systab = (UINT32)(UINTN) gST; Efi->efi_memdesc_size = (UINT32) DescriptorSize; Efi->efi_memdesc_version = DescriptorVersion; Efi->efi_memmap = (UINT32)(UINTN) MemoryMapPtr; Efi->efi_memmap_size = (UINT32) MemoryMapSize; #ifdef MDE_CPU_IA32 Efi->efi_loader_signature = SIGNATURE_32 ('E', 'L', '3', '2'); #else Efi->efi_systab_hi = ((UINT64)(UINTN) gST) >> 32; Efi->efi_memmap_hi = ((UINT64)(UINTN) MemoryMapPtr) >> 32; Efi->efi_loader_signature = SIGNATURE_32 ('E', 'L', '6', '4'); #endif gBS->ExitBootServices (gImageHandle, MapKey); }
VOID * UncachedInternalAllocateAlignedPages ( IN EFI_MEMORY_TYPE MemoryType, IN UINTN Pages, IN UINTN Alignment ) { EFI_STATUS Status; EFI_PHYSICAL_ADDRESS Memory; EFI_PHYSICAL_ADDRESS AlignedMemory; UINTN AlignmentMask; UINTN UnalignedPages; UINTN RealPages; EFI_GCD_MEMORY_SPACE_DESCRIPTOR Descriptor; // // Alignment must be a power of two or zero. // ASSERT ((Alignment & (Alignment - 1)) == 0); if (Pages == 0) { return NULL; } if (Alignment > EFI_PAGE_SIZE) { // // Caculate the total number of pages since alignment is larger than page size. // AlignmentMask = Alignment - 1; RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment); // // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow. // ASSERT (RealPages > Pages); Status = gBS->AllocatePages (AllocateAnyPages, MemoryType, RealPages, &Memory); if (EFI_ERROR (Status)) { return NULL; } AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask; UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory); if (UnalignedPages > 0) { // // Free first unaligned page(s). // Status = gBS->FreePages (Memory, UnalignedPages); ASSERT_EFI_ERROR (Status); } Memory = (EFI_PHYSICAL_ADDRESS) (AlignedMemory + EFI_PAGES_TO_SIZE (Pages)); UnalignedPages = RealPages - Pages - UnalignedPages; if (UnalignedPages > 0) { // // Free last unaligned page(s). // Status = gBS->FreePages (Memory, UnalignedPages); ASSERT_EFI_ERROR (Status); } } else { // // Do not over-allocate pages in this case. // Status = gBS->AllocatePages (AllocateAnyPages, MemoryType, Pages, &Memory); if (EFI_ERROR (Status)) { return NULL; } AlignedMemory = (UINTN) Memory; } Status = gDS->GetMemorySpaceDescriptor (Memory, &Descriptor); if (!EFI_ERROR (Status)) { // We are making an assumption that all of memory has the same default attributes gAttributes = Descriptor.Attributes; } Status = gDS->SetMemorySpaceAttributes (Memory, EFI_PAGES_TO_SIZE (Pages), EFI_MEMORY_WC); ASSERT_EFI_ERROR (Status); return (VOID *)(UINTN)Memory; }
VOID EFIAPI FixMemMap( IN UINTN MemoryMapSize, IN EFI_MEMORY_DESCRIPTOR *MemoryMap, IN UINTN DescriptorSize, IN UINT32 DescriptorVersion ) { UINTN NumEntries; UINTN Index; EFI_MEMORY_DESCRIPTOR *Desc; UINTN BlockSize; UINTN PhysicalEnd; DBG("FixMemMap: Size=%d, Addr=%p, DescSize=%d\n", MemoryMapSize, MemoryMap, DescriptorSize); DBGnvr("FixMemMap ...\n"); Desc = MemoryMap; NumEntries = MemoryMapSize / DescriptorSize; for (Index = 0; Index < NumEntries; Index++) { BlockSize = EFI_PAGES_TO_SIZE((UINTN)Desc->NumberOfPages); PhysicalEnd = Desc->PhysicalStart + BlockSize; // // Some UEFIs end up with "reserved" area with EFI_MEMORY_RUNTIME flag set // when Intel HD3000 or HD4000 is used. We will remove that flag here. // if ((Desc->Attribute & EFI_MEMORY_RUNTIME) != 0 && Desc->Type == EfiReservedMemoryType) { DBGnvr(" %s as RT: %lx (0x%x) - Att: %lx", EfiMemoryTypeDesc[Desc->Type], Desc->PhysicalStart, Desc->NumberOfPages, Desc->Attribute); Desc->Attribute = Desc->Attribute & (~EFI_MEMORY_RUNTIME); DBGnvr(" -> %lx\n", Desc->Attribute); /* This one is not working - blocks during DefragmentRuntimeServices() DBGnvr(" %s as RT: %lx (0x%x) - %s", EfiMemoryTypeDesc[Desc->Type], Desc->PhysicalStart, Desc->NumberOfPages, EfiMemoryTypeDesc[Desc->Type]); Desc->Type = EfiMemoryMappedIO; DBGnvr(" -> %s\n", EfiMemoryTypeDesc[Desc->Type]); */ /* Another possible solution - mark the range as MMIO. DBGnvr(" %s as RT: %lx (0x%x) - %s", EfiMemoryTypeDesc[Desc->Type], Desc->PhysicalStart, Desc->NumberOfPages, EfiMemoryTypeDesc[Desc->Type]); Desc->Type = EfiRuntimeServicesData; DBGnvr(" -> %s\n", EfiMemoryTypeDesc[Desc->Type]); */ } // // Fix by Slice - fixes sleep/wake on GB boards. // // if ((Desc->PhysicalStart >= 0x9e000) && (Desc->PhysicalStart < 0xa0000)) { if ((Desc->PhysicalStart < 0xa0000) && (PhysicalEnd >= 0x9e000)) { Desc->Type = EfiACPIMemoryNVS; Desc->Attribute = 0; } // // Also do some checking // if ((Desc->Attribute & EFI_MEMORY_RUNTIME) != 0) { // // block with RT flag. // if it is not RT or MMIO, then report to log // if (Desc->Type != EfiRuntimeServicesCode && Desc->Type != EfiRuntimeServicesData && Desc->Type != EfiMemoryMappedIO && Desc->Type != EfiMemoryMappedIOPortSpace ) { DBGnvr(" %s with RT flag: %lx (0x%x) - ???\n", EfiMemoryTypeDesc[Desc->Type], Desc->PhysicalStart, Desc->NumberOfPages); } } else { // // block without RT flag. // if it is RT or MMIO, then report to log // if (Desc->Type == EfiRuntimeServicesCode || Desc->Type == EfiRuntimeServicesData || Desc->Type == EfiMemoryMappedIO || Desc->Type == EfiMemoryMappedIOPortSpace ) { DBGnvr(" %s without RT flag: %lx (0x%x) - ???\n", EfiMemoryTypeDesc[Desc->Type], Desc->PhysicalStart, Desc->NumberOfPages); } } Desc = NEXT_MEMORY_DESCRIPTOR(Desc, DescriptorSize); } }
/** Allocate some memory from the host controller's memory pool which can be used to communicate with host controller. @param Pool The host controller's memory pool. @param Size Size of the memory to allocate. @return The allocated memory or NULL. **/ VOID * UsbHcAllocateMem ( IN USBHC_MEM_POOL *Pool, IN UINTN Size ) { USBHC_MEM_BLOCK *Head; USBHC_MEM_BLOCK *Block; USBHC_MEM_BLOCK *NewBlock; VOID *Mem; UINTN AllocSize; UINTN Pages; Mem = NULL; AllocSize = USBHC_MEM_ROUND (Size); Head = Pool->Head; ASSERT (Head != NULL); // // First check whether current memory blocks can satisfy the allocation. // for (Block = Head; Block != NULL; Block = Block->Next) { Mem = UsbHcAllocMemFromBlock (Block, AllocSize / USBHC_MEM_UNIT); if (Mem != NULL) { ZeroMem (Mem, Size); break; } } if (Mem != NULL) { return Mem; } // // Create a new memory block if there is not enough memory // in the pool. If the allocation size is larger than the // default page number, just allocate a large enough memory // block. Otherwise allocate default pages. // if (AllocSize > EFI_PAGES_TO_SIZE (USBHC_MEM_DEFAULT_PAGES)) { Pages = EFI_SIZE_TO_PAGES (AllocSize) + 1; } else { Pages = USBHC_MEM_DEFAULT_PAGES; } NewBlock = UsbHcAllocMemBlock (Pool, Pages); if (NewBlock == NULL) { DEBUG ((EFI_D_ERROR, "UsbHcAllocateMem: failed to allocate block\n")); return NULL; } // // Add the new memory block to the pool, then allocate memory from it // UsbHcInsertMemBlockToPool (Head, NewBlock); Mem = UsbHcAllocMemFromBlock (NewBlock, AllocSize / USBHC_MEM_UNIT); if (Mem != NULL) { ZeroMem (Mem, Size); } return Mem; }
/** Allocates and fills in the Page Directory and Page Table Entries to establish a 1:1 Virtual to Physical mapping. If BootScriptExector driver will run in 64-bit mode, this function will establish the 1:1 virtual to physical mapping page table. If BootScriptExector driver will not run in 64-bit mode, this function will do nothing. @return the 1:1 Virtual to Physical identity mapping page table base address. **/ EFI_PHYSICAL_ADDRESS S3CreateIdentityMappingPageTables ( VOID ) { if (FeaturePcdGet (PcdDxeIplSwitchToLongMode)) { UINT32 RegEax; UINT32 RegEdx; UINT8 PhysicalAddressBits; UINT32 NumberOfPml4EntriesNeeded; UINT32 NumberOfPdpEntriesNeeded; EFI_PHYSICAL_ADDRESS S3NvsPageTableAddress; UINTN TotalPageTableSize; VOID *Hob; BOOLEAN Page1GSupport; Page1GSupport = FALSE; if (PcdGetBool(PcdUse1GPageTable)) { AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL); if (RegEax >= 0x80000001) { AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx); if ((RegEdx & BIT26) != 0) { Page1GSupport = TRUE; } } } // // Get physical address bits supported. // Hob = GetFirstHob (EFI_HOB_TYPE_CPU); if (Hob != NULL) { PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace; } else { AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL); if (RegEax >= 0x80000008) { AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL); PhysicalAddressBits = (UINT8) RegEax; } else { PhysicalAddressBits = 36; } } // // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses. // ASSERT (PhysicalAddressBits <= 52); if (PhysicalAddressBits > 48) { PhysicalAddressBits = 48; } // // Calculate the table entries needed. // if (PhysicalAddressBits <= 39 ) { NumberOfPml4EntriesNeeded = 1; NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 30)); } else { NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 39)); NumberOfPdpEntriesNeeded = 512; } // // We need calculate whole page size then allocate once, because S3 restore page table does not know each page in Nvs. // if (!Page1GSupport) { TotalPageTableSize = (UINTN)(1 + NumberOfPml4EntriesNeeded + NumberOfPml4EntriesNeeded * NumberOfPdpEntriesNeeded); } else { TotalPageTableSize = (UINTN)(1 + NumberOfPml4EntriesNeeded); } DEBUG ((EFI_D_ERROR, "TotalPageTableSize - %x pages\n", TotalPageTableSize)); // // By architecture only one PageMapLevel4 exists - so lets allocate storage for it. // S3NvsPageTableAddress = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateMemoryBelow4G (EfiReservedMemoryType, EFI_PAGES_TO_SIZE(TotalPageTableSize)); ASSERT (S3NvsPageTableAddress != 0); return S3NvsPageTableAddress; } else { // // If DXE is running 32-bit mode, no need to establish page table. // return (EFI_PHYSICAL_ADDRESS) 0; } }
/** This function launch TXT environment. @retval EFI_UNSUPPORTED It is not supported to launch TXT not return means launch successfully **/ EFI_STATUS LaunchTxtEnvironment ( VOID ) { UINT64 PageTableBase; UINT32 SinitAcmBase; UINT32 SinitAcmSize; EFI_STATUS Status; UINT64 MleLoadAddress; UINT64 MleLoadSize; MLE_PRIVATE_DATA *MlePrivateData; // // Fill MLE image base // MlePrivateData = GetMlePrivateData (); MleLoadAddress = (UINT32)(MlePrivateData->DcePrivateData.DprBase + EFI_PAGES_TO_SIZE (MLE_PAGE_TABLE_PAGES)); MleLoadSize = EFI_PAGES_TO_SIZE (MLE_LOADER_PAGES) + MlePrivateData->DcePrivateData.MeasuredImageSize; // // Enable TXT CPU // Status = EnableTxt (); if (EFI_ERROR (Status)) { return EFI_UNSUPPORTED; } // // Setup SINIT ACM memory // Status = TxtSetupSinitAcmMemory (&SinitAcmBase, &SinitAcmSize); if (EFI_ERROR (Status)) { return EFI_UNSUPPORTED; } // // Setup page table // PageTableBase = MlePrivateData->DcePrivateData.DprBase; DEBUG((EFI_D_INFO, "(TXT) TxtSetupPageTable:\n")); DEBUG((EFI_D_INFO, "(TXT) MleLoadAddress: 0x%x\n", MleLoadAddress)); DEBUG((EFI_D_INFO, "(TXT) MleLoadSize: 0x%x\n", MleLoadSize)); DEBUG((EFI_D_INFO, "(TXT) PageTableBase: 0x%x\n", PageTableBase)); TxtSetupPageTable (MleLoadAddress, MleLoadSize, PageTableBase); // // Setup TXT heap // TxtSetupHeap (MleLoadAddress, MleLoadSize, PageTableBase); DumpMleHeader ((TXT_MLE_HEADER *)(UINTN)MleLoadAddress); DumpBiosToOsData ((UINT64 *)GetTxtBiosToOsData() - 1); DumpOsToSinitData ((UINT64 *)GetTxtOsToSinitData() - 1); // // Prepare environment // TxtPrepareEnvironment (SinitAcmBase, SinitAcmSize); DEBUG ((EFI_D_INFO, "(TXT) SENTER ...\n")); // // LaunchTxt // AsmGetSecSenter (SinitAcmBase, SinitAcmSize, 0); DEBUG ((EFI_D_ERROR, "(TXT) SENTER fail!\n")); return EFI_DEVICE_ERROR; }
/** Install the VBE Info and VBE Mode Info structures, and the VBE service handler routine in the C segment. Point the real-mode Int10h interrupt vector to the handler. The only advertised mode is 1024x768x32. @param[in] CardName Name of the video card to be exposed in the Product Name field of the VBE Info structure. The parameter must originate from a QEMU_VIDEO_CARD.Name field. @param[in] FrameBufferBase Guest-physical base address of the video card's frame buffer. **/ VOID InstallVbeShim ( IN CONST CHAR16 *CardName, IN EFI_PHYSICAL_ADDRESS FrameBufferBase ) { EFI_PHYSICAL_ADDRESS Segment0, SegmentC, SegmentF; UINTN Segment0Pages; IVT_ENTRY *Int0x10; EFI_STATUS Status; UINTN Pam1Address; UINT8 Pam1; UINTN SegmentCPages; VBE_INFO *VbeInfoFull; VBE_INFO_BASE *VbeInfo; UINT8 *Ptr; UINTN Printed; VBE_MODE_INFO *VbeModeInfo; Segment0 = 0x00000; SegmentC = 0xC0000; SegmentF = 0xF0000; // // Attempt to cover the real mode IVT with an allocation. This is a UEFI // driver, hence the arch protocols have been installed previously. Among // those, the CPU arch protocol has configured the IDT, so we can overwrite // the IVT used in real mode. // // The allocation request may fail, eg. if LegacyBiosDxe has already run. // Segment0Pages = 1; Int0x10 = (IVT_ENTRY *)(UINTN)Segment0 + 0x10; Status = gBS->AllocatePages (AllocateAddress, EfiBootServicesCode, Segment0Pages, &Segment0); if (EFI_ERROR (Status)) { EFI_PHYSICAL_ADDRESS Handler; // // Check if a video BIOS handler has been installed previously -- we // shouldn't override a real video BIOS with our shim, nor our own shim if // it's already present. // Handler = (Int0x10->Segment << 4) + Int0x10->Offset; if (Handler >= SegmentC && Handler < SegmentF) { DEBUG ((EFI_D_VERBOSE, "%a: Video BIOS handler found at %04x:%04x\n", __FUNCTION__, Int0x10->Segment, Int0x10->Offset)); return; } // // Otherwise we'll overwrite the Int10h vector, even though we may not own // the page at zero. // DEBUG ((EFI_D_VERBOSE, "%a: failed to allocate page at zero: %r\n", __FUNCTION__, Status)); } else { // // We managed to allocate the page at zero. SVN r14218 guarantees that it // is NUL-filled. // ASSERT (Int0x10->Segment == 0x0000); ASSERT (Int0x10->Offset == 0x0000); } // // Put the shim in place first. // Pam1Address = PCI_LIB_ADDRESS (0, 0, 0, 0x5A); // // low nibble covers 0xC0000 to 0xC3FFF // high nibble covers 0xC4000 to 0xC7FFF // bit1 in each nibble is Write Enable // bit0 in each nibble is Read Enable // Pam1 = PciRead8 (Pam1Address); PciWrite8 (Pam1Address, Pam1 | (BIT1 | BIT0)); // // We never added memory space durig PEI or DXE for the C segment, so we // don't need to (and can't) allocate from there. Also, guest operating // systems will see a hole in the UEFI memory map there. // SegmentCPages = 4; ASSERT (sizeof mVbeShim <= EFI_PAGES_TO_SIZE (SegmentCPages)); CopyMem ((VOID *)(UINTN)SegmentC, mVbeShim, sizeof mVbeShim); // // Fill in the VBE INFO structure. // VbeInfoFull = (VBE_INFO *)(UINTN)SegmentC; VbeInfo = &VbeInfoFull->Base; Ptr = VbeInfoFull->Buffer; CopyMem (VbeInfo->Signature, "VESA", 4); VbeInfo->VesaVersion = 0x0300; VbeInfo->OemNameAddress = (UINT32)(SegmentC << 12 | (UINT16)(UINTN)Ptr); CopyMem (Ptr, "QEMU", 5); Ptr += 5; VbeInfo->Capabilities = BIT0; // DAC can be switched into 8-bit mode VbeInfo->ModeListAddress = (UINT32)(SegmentC << 12 | (UINT16)(UINTN)Ptr); *(UINT16*)Ptr = 0x00f1; // mode number Ptr += 2; *(UINT16*)Ptr = 0xFFFF; // mode list terminator Ptr += 2; VbeInfo->VideoMem64K = (UINT16)((1024 * 768 * 4 + 65535) / 65536); VbeInfo->OemSoftwareVersion = 0x0000; VbeInfo->VendorNameAddress = (UINT32)(SegmentC << 12 | (UINT16)(UINTN)Ptr); CopyMem (Ptr, "OVMF", 5); Ptr += 5; VbeInfo->ProductNameAddress = (UINT32)(SegmentC << 12 | (UINT16)(UINTN)Ptr); Printed = AsciiSPrint ((CHAR8 *)Ptr, sizeof VbeInfoFull->Buffer - (Ptr - VbeInfoFull->Buffer), "%s", CardName); Ptr += Printed + 1; VbeInfo->ProductRevAddress = (UINT32)(SegmentC << 12 | (UINT16)(UINTN)Ptr); CopyMem (Ptr, mProductRevision, sizeof mProductRevision); Ptr += sizeof mProductRevision; ASSERT (sizeof VbeInfoFull->Buffer >= Ptr - VbeInfoFull->Buffer); ZeroMem (Ptr, sizeof VbeInfoFull->Buffer - (Ptr - VbeInfoFull->Buffer)); // // Fil in the VBE MODE INFO structure. // VbeModeInfo = (VBE_MODE_INFO *)(VbeInfoFull + 1); // // bit0: mode supported by present hardware configuration // bit1: optional information available (must be =1 for VBE v1.2+) // bit3: set if color, clear if monochrome // bit4: set if graphics mode, clear if text mode // bit5: mode is not VGA-compatible // bit7: linear framebuffer mode supported // VbeModeInfo->ModeAttr = BIT7 | BIT5 | BIT4 | BIT3 | BIT1 | BIT0; // // bit0: exists // bit1: bit1: readable // bit2: writeable // VbeModeInfo->WindowAAttr = BIT2 | BIT1 | BIT0; VbeModeInfo->WindowBAttr = 0x00; VbeModeInfo->WindowGranularityKB = 0x0040; VbeModeInfo->WindowSizeKB = 0x0040; VbeModeInfo->WindowAStartSegment = 0xA000; VbeModeInfo->WindowBStartSegment = 0x0000; VbeModeInfo->WindowPositioningAddress = 0x0000; VbeModeInfo->BytesPerScanLine = 1024 * 4; VbeModeInfo->Width = 1024; VbeModeInfo->Height = 768; VbeModeInfo->CharCellWidth = 8; VbeModeInfo->CharCellHeight = 16; VbeModeInfo->NumPlanes = 1; VbeModeInfo->BitsPerPixel = 32; VbeModeInfo->NumBanks = 1; VbeModeInfo->MemoryModel = 6; // direct color VbeModeInfo->BankSizeKB = 0; VbeModeInfo->NumImagePagesLessOne = 0; VbeModeInfo->Vbe3 = 0x01; VbeModeInfo->RedMaskSize = 8; VbeModeInfo->RedMaskPos = 16; VbeModeInfo->GreenMaskSize = 8; VbeModeInfo->GreenMaskPos = 8; VbeModeInfo->BlueMaskSize = 8; VbeModeInfo->BlueMaskPos = 0; VbeModeInfo->ReservedMaskSize = 8; VbeModeInfo->ReservedMaskPos = 24; // // bit1: Bytes in reserved field may be used by application // VbeModeInfo->DirectColorModeInfo = BIT1; VbeModeInfo->LfbAddress = (UINT32)FrameBufferBase; VbeModeInfo->OffScreenAddress = 0; VbeModeInfo->OffScreenSizeKB = 0; VbeModeInfo->BytesPerScanLineLinear = 1024 * 4; VbeModeInfo->NumImagesLessOneBanked = 0; VbeModeInfo->NumImagesLessOneLinear = 0; VbeModeInfo->RedMaskSizeLinear = 8; VbeModeInfo->RedMaskPosLinear = 16; VbeModeInfo->GreenMaskSizeLinear = 8; VbeModeInfo->GreenMaskPosLinear = 8; VbeModeInfo->BlueMaskSizeLinear = 8; VbeModeInfo->BlueMaskPosLinear = 0; VbeModeInfo->ReservedMaskSizeLinear = 8; VbeModeInfo->ReservedMaskPosLinear = 24; VbeModeInfo->MaxPixelClockHz = 0; ZeroMem (VbeModeInfo->Reserved, sizeof VbeModeInfo->Reserved); // // Clear Write Enable (bit1), keep Read Enable (bit0) set // PciWrite8 (Pam1Address, (Pam1 & ~BIT1) | BIT0); // // Second, point the Int10h vector at the shim. // Int0x10->Segment = SegmentC >> 4; Int0x10->Offset = (EFI_PHYSICAL_ADDRESS)(UINTN)(VbeModeInfo + 1) - SegmentC; DEBUG ((EFI_D_INFO, "%a: VBE shim installed\n", __FUNCTION__)); }
/** Alloctes Pages from the top of mem, up to address specified in Memory. Returns allocated address in Memory. */ EFI_STATUS EFIAPI AllocatePagesFromTop( IN EFI_MEMORY_TYPE MemoryType, IN UINTN Pages, IN OUT EFI_PHYSICAL_ADDRESS *Memory ) { EFI_STATUS Status; UINTN MemoryMapSize; EFI_MEMORY_DESCRIPTOR *MemoryMap; UINTN MapKey; UINTN DescriptorSize; UINT32 DescriptorVersion; EFI_MEMORY_DESCRIPTOR *MemoryMapEnd; EFI_MEMORY_DESCRIPTOR *Desc; Status = GetMemoryMapAlloc(gBS->GetMemoryMap, &MemoryMapSize, &MemoryMap, &MapKey, &DescriptorSize, &DescriptorVersion); if (EFI_ERROR(Status)) { return Status; } Status = EFI_NOT_FOUND; //PRINT("Search for Pages=%x, TopAddr=%lx\n", Pages, *Memory); //PRINT("MEMMAP: Size=%d, Addr=%p, DescSize=%d, DescVersion: 0x%x\n", MemoryMapSize, MemoryMap, DescriptorSize, DescriptorVersion); //PRINT("Type Start End VStart # Pages Attributes\n"); MemoryMapEnd = NEXT_MEMORY_DESCRIPTOR(MemoryMap, MemoryMapSize); Desc = PREV_MEMORY_DESCRIPTOR(MemoryMapEnd, DescriptorSize); for ( ; Desc >= MemoryMap; Desc = PREV_MEMORY_DESCRIPTOR(Desc, DescriptorSize)) { /* PRINT("%-12s %lX-%lX %lX %lX %lX\n", EfiMemoryTypeDesc[Desc->Type], Desc->PhysicalStart, Desc->PhysicalStart + EFI_PAGES_TO_SIZE(Desc->NumberOfPages) - 1, Desc->VirtualStart, Desc->NumberOfPages, Desc->Attribute ); */ if ( (Desc->Type == EfiConventionalMemory) // free mem && (Pages <= Desc->NumberOfPages) // contains enough space && (Desc->PhysicalStart + EFI_PAGES_TO_SIZE(Pages) <= *Memory) // contains space below specified Memory ) { // free block found if (Desc->PhysicalStart + EFI_PAGES_TO_SIZE((UINTN)Desc->NumberOfPages) <= *Memory) { // the whole block is unded Memory - allocate from the top of the block *Memory = Desc->PhysicalStart + EFI_PAGES_TO_SIZE((UINTN)Desc->NumberOfPages - Pages); //PRINT("found the whole block under top mem, allocating at %lx\n", *Memory); } else { // the block contains enough pages under Memory, but spans above it - allocate below Memory. *Memory = *Memory - EFI_PAGES_TO_SIZE(Pages); //PRINT("found the whole block under top mem, allocating at %lx\n", *Memory); } Status = gBS->AllocatePages(AllocateAddress, MemoryType, Pages, Memory); //PRINT("Alloc Pages=%x, Addr=%lx, Status=%r\n", Pages, *Memory, Status); break; } } // release mem FreePool(MemoryMap); return Status; }
/** Allocates pages that are suitable for an OperationBusMasterCommonBuffer or OperationBusMasterCommonBuffer64 mapping. @param Pages The number of pages to allocate. @param HostAddress A pointer to store the base system memory address of the allocated range. @param DeviceAddress The resulting map address for the bus master PCI controller to use to access the hosts HostAddress. @param Mapping A resulting value to pass to Unmap(). @retval EFI_SUCCESS The requested memory pages were allocated. @retval EFI_UNSUPPORTED Attributes is unsupported. The only legal attribute bits are MEMORY_WRITE_COMBINE and MEMORY_CACHED. @retval EFI_INVALID_PARAMETER One or more parameters are invalid. @retval EFI_OUT_OF_RESOURCES The memory pages could not be allocated. **/ EFI_STATUS IoMmuAllocateBuffer ( IN UINTN Pages, OUT VOID **HostAddress, OUT EFI_PHYSICAL_ADDRESS *DeviceAddress, OUT VOID **Mapping ) { EFI_STATUS Status; UINTN NumberOfBytes; EFI_PHYSICAL_ADDRESS HostPhyAddress; EDKII_IOMMU_PPI *IoMmu; *HostAddress = NULL; *DeviceAddress = 0; *Mapping = NULL; IoMmu = GetIoMmu (); if (IoMmu != NULL) { Status = IoMmu->AllocateBuffer ( IoMmu, EfiBootServicesData, Pages, HostAddress, 0 ); if (EFI_ERROR (Status)) { return EFI_OUT_OF_RESOURCES; } NumberOfBytes = EFI_PAGES_TO_SIZE (Pages); Status = IoMmu->Map ( IoMmu, EdkiiIoMmuOperationBusMasterCommonBuffer, *HostAddress, &NumberOfBytes, DeviceAddress, Mapping ); if (EFI_ERROR (Status)) { IoMmu->FreeBuffer (IoMmu, Pages, *HostAddress); *HostAddress = NULL; return EFI_OUT_OF_RESOURCES; } Status = IoMmu->SetAttribute ( IoMmu, *Mapping, EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE ); if (EFI_ERROR (Status)) { IoMmu->Unmap (IoMmu, *Mapping); IoMmu->FreeBuffer (IoMmu, Pages, *HostAddress); *Mapping = NULL; *HostAddress = NULL; return Status; } } else { Status = PeiServicesAllocatePages ( EfiBootServicesData, Pages, &HostPhyAddress ); if (EFI_ERROR (Status)) { return EFI_OUT_OF_RESOURCES; } *HostAddress = (VOID *) (UINTN) HostPhyAddress; *DeviceAddress = HostPhyAddress; *Mapping = NULL; } return Status; }
/** Allocates one or more 4KB pages of a certain memory type at a specified alignment. Allocates the number of 4KB pages specified by Pages of a certain memory type with an alignment specified by Alignment. The allocated buffer is returned. If Pages is 0, then NULL is returned. If there is not enough memory at the specified alignment remaining to satisfy the request, then NULL is returned. If Alignment is not a power of two and Alignment is not zero, then ASSERT(). If Pages plus EFI_SIZE_TO_PAGES (Alignment) overflows, then ASSERT(). @param MemoryType The type of memory to allocate. @param Pages The number of 4 KB pages to allocate. @param Alignment The requested alignment of the allocation. Must be a power of two. If Alignment is zero, then byte alignment is used. @return A pointer to the allocated buffer or NULL if allocation fails. **/ VOID * InternalAllocateAlignedPages ( IN EFI_MEMORY_TYPE MemoryType, IN UINTN Pages, IN UINTN Alignment ) { EFI_STATUS Status; EFI_PHYSICAL_ADDRESS Memory; UINTN AlignedMemory; UINTN AlignmentMask; UINTN UnalignedPages; UINTN RealPages; // // Alignment must be a power of two or zero. // ASSERT ((Alignment & (Alignment - 1)) == 0); if (Pages == 0) { return NULL; } if (Alignment > EFI_PAGE_SIZE) { // // Calculate the total number of pages since alignment is larger than page size. // AlignmentMask = Alignment - 1; RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment); // // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow. // ASSERT (RealPages > Pages); Status = gBS->AllocatePages (AllocateAnyPages, MemoryType, RealPages, &Memory); if (EFI_ERROR (Status)) { return NULL; } AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask; UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory); if (UnalignedPages > 0) { // // Free first unaligned page(s). // Status = gBS->FreePages (Memory, UnalignedPages); ASSERT_EFI_ERROR (Status); } Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages); UnalignedPages = RealPages - Pages - UnalignedPages; if (UnalignedPages > 0) { // // Free last unaligned page(s). // Status = gBS->FreePages (Memory, UnalignedPages); ASSERT_EFI_ERROR (Status); } } else { // // Do not over-allocate pages in this case. // Status = gBS->AllocatePages (AllocateAnyPages, MemoryType, Pages, &Memory); if (EFI_ERROR (Status)) { return NULL; } AlignedMemory = (UINTN) Memory; } return (VOID *) AlignedMemory; }
/** Write back the dirty Framework CpuSaveStates to PI. The function scans the page table for dirty pages in mFrameworkSmst->CpuSaveState to write back to PI CpuSaveStates. It is meant to be called on each SmmBaseHelper SMI callback after Framework handler is called. **/ VOID WriteBackDirtyPages ( VOID ) { UINTN NumCpuStatePages; UINTN PTIndex; UINTN PTStartIndex; UINTN PTEndIndex; NumCpuStatePages = EFI_SIZE_TO_PAGES (mNumberOfProcessors * sizeof (EFI_SMM_CPU_SAVE_STATE)); PTStartIndex = (UINTN)BitFieldRead64 ((UINT64) (UINTN) mFrameworkSmst->CpuSaveState, 12, 20); PTEndIndex = (UINTN)BitFieldRead64 ((UINT64) (UINTN) mFrameworkSmst->CpuSaveState + EFI_PAGES_TO_SIZE(NumCpuStatePages) - 1, 12, 20); for (PTIndex = PTStartIndex; PTIndex <= PTEndIndex; PTIndex++) { if ((mCpuStatePageTable[PTIndex] & (BIT0|BIT6)) == (BIT0|BIT6)) { // present and dirty? ReadWriteCpuStatePage (mCpuStatePageTable[PTIndex] & mPhyMask, FALSE); } } }
VOID WriteBootToOsPerformanceData ( VOID ) /*++ Routine Description: Allocates a block of memory and writes performance data of booting to OS into it. Arguments: None Returns: None --*/ { EFI_STATUS Status; EFI_CPU_ARCH_PROTOCOL *Cpu; EFI_PERFORMANCE_PROTOCOL *DrvPerf; UINT32 mAcpiLowMemoryLength; UINT32 LimitCount; EFI_PERF_HEADER mPerfHeader; EFI_PERF_DATA mPerfData; EFI_GAUGE_DATA *DumpData; EFI_HANDLE *Handles; UINTN NoHandles; UINT8 *Ptr; UINT8 *PdbFileName; UINT32 mIndex; UINT64 Ticker; UINT64 Freq; UINT32 Duration; UINT64 CurrentTicker; UINT64 TimerPeriod; // // Retrive time stamp count as early as possilbe // Ticker = EfiReadTsc (); // // Get performance architecture protocol // Status = gBS->LocateProtocol ( &gEfiPerformanceProtocolGuid, NULL, &DrvPerf ); if (EFI_ERROR (Status)) { return ; } // // Get CPU frequency // Status = gBS->LocateProtocol ( &gEfiCpuArchProtocolGuid, NULL, &Cpu ); if (EFI_ERROR (Status)) { return ; } // // Get Cpu Frequency // Status = Cpu->GetTimerValue (Cpu, 0, &CurrentTicker, &TimerPeriod); if (EFI_ERROR (Status)) { return ; } // // Put Detailed performance data into memory // Handles = NULL; Status = gBS->LocateHandleBuffer ( AllHandles, NULL, NULL, &NoHandles, &Handles ); if (EFI_ERROR (Status)) { return ; } // // Allocate a block of memory that contain performance data to OS // if it is not allocated yet. // if (mAcpiLowMemoryBase == 0x0FFFFFFFF) { Status = gBS->AllocatePages ( AllocateMaxAddress, EfiReservedMemoryType, 4, &mAcpiLowMemoryBase ); if (EFI_ERROR (Status)) { gBS->FreePool (Handles); return ; } } mAcpiLowMemoryLength = EFI_PAGES_TO_SIZE(4); Ptr = (UINT8 *) ((UINT32) mAcpiLowMemoryBase + sizeof (EFI_PERF_HEADER)); LimitCount = (mAcpiLowMemoryLength - sizeof (EFI_PERF_HEADER)) / sizeof (EFI_PERF_DATA); // // Initialize performance data structure // EfiZeroMem (&mPerfHeader, sizeof (EFI_PERF_HEADER)); Freq = DivU64x32 (1000000000000, (UINTN) TimerPeriod, NULL); mPerfHeader.CpuFreq = Freq; // // Record BDS raw performance data // mPerfHeader.BDSRaw = Ticker; // // Get DXE drivers performance // for (mIndex = 0; mIndex < NoHandles; mIndex++) { Ticker = 0; PdbFileName = NULL; DumpData = DrvPerf->GetGauge ( DrvPerf, // Context NULL, // Handle NULL, // Token NULL, // Host NULL // PrecGauge ); while (DumpData) { if (DumpData->Handle == Handles[mIndex]) { PdbFileName = &(DumpData->PdbFileName[0]); if (DumpData->StartTick < DumpData->EndTick) { Ticker += (DumpData->EndTick - DumpData->StartTick); } } DumpData = DrvPerf->GetGauge ( DrvPerf, // Context NULL, // Handle NULL, // Token NULL, // Host DumpData // PrecGauge ); } Duration = (UINT32) DivU64x32 ( Ticker, (UINT32) Freq, NULL ); if (Duration > 0) { EfiZeroMem (&mPerfData, sizeof (EFI_PERF_DATA)); if (PdbFileName != NULL) { EfiAsciiStrCpy (mPerfData.Token, PdbFileName); } mPerfData.Duration = Duration; EfiCopyMem (Ptr, &mPerfData, sizeof (EFI_PERF_DATA)); Ptr += sizeof (EFI_PERF_DATA); mPerfHeader.Count++; if (mPerfHeader.Count == LimitCount) { goto Done; } } } gBS->FreePool (Handles); // // Get inserted performance data // DumpData = DrvPerf->GetGauge ( DrvPerf, // Context NULL, // Handle NULL, // Token NULL, // Host NULL // PrecGauge ); while (DumpData) { if ((DumpData->Handle) || (DumpData->StartTick > DumpData->EndTick)) { DumpData = DrvPerf->GetGauge ( DrvPerf, // Context NULL, // Handle NULL, // Token NULL, // Host DumpData // PrecGauge ); continue; } EfiZeroMem (&mPerfData, sizeof (EFI_PERF_DATA)); ConvertChar16ToChar8 ((UINT8 *) mPerfData.Token, DumpData->Token); mPerfData.Duration = (UINT32) DivU64x32 ( DumpData->EndTick - DumpData->StartTick, (UINT32) Freq, NULL ); EfiCopyMem (Ptr, &mPerfData, sizeof (EFI_PERF_DATA)); Ptr += sizeof (EFI_PERF_DATA); mPerfHeader.Count++; if (mPerfHeader.Count == LimitCount) { goto Done; } DumpData = DrvPerf->GetGauge ( DrvPerf, // Context NULL, // Handle NULL, // Token NULL, // Host DumpData // PrecGauge ); } Done: mPerfHeader.Signiture = 0x66726550; // // Put performance data to memory // EfiCopyMem ( (UINTN *) (UINTN) mAcpiLowMemoryBase, &mPerfHeader, sizeof (EFI_PERF_HEADER) ); gRT->SetVariable ( L"PerfDataMemAddr", &gEfiGenericVariableGuid, EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS, sizeof (UINT32), (VOID *) &mAcpiLowMemoryBase ); return ; }
/** Get and dump SMRAM profile data. @return EFI_SUCCESS Get the SMRAM profile data successfully. @return other Fail to get the SMRAM profile data. **/ EFI_STATUS GetSmramProfileData ( VOID ) { EFI_STATUS Status; UINTN CommSize; UINT8 *CommBuffer; EFI_SMM_COMMUNICATE_HEADER *CommHeader; SMRAM_PROFILE_PARAMETER_GET_PROFILE_INFO *CommGetProfileInfo; SMRAM_PROFILE_PARAMETER_GET_PROFILE_DATA_BY_OFFSET *CommGetProfileData; UINTN ProfileSize; VOID *ProfileBuffer; EFI_SMM_COMMUNICATION_PROTOCOL *SmmCommunication; UINTN MinimalSizeNeeded; EDKII_PI_SMM_COMMUNICATION_REGION_TABLE *PiSmmCommunicationRegionTable; UINT32 Index; EFI_MEMORY_DESCRIPTOR *Entry; VOID *Buffer; UINTN Size; UINTN Offset; Status = gBS->LocateProtocol (&gEfiSmmCommunicationProtocolGuid, NULL, (VOID **) &SmmCommunication); if (EFI_ERROR (Status)) { DEBUG ((EFI_D_ERROR, "SmramProfile: Locate SmmCommunication protocol - %r\n", Status)); return Status; } MinimalSizeNeeded = sizeof (EFI_GUID) + sizeof (UINTN) + MAX (sizeof (SMRAM_PROFILE_PARAMETER_GET_PROFILE_INFO), sizeof (SMRAM_PROFILE_PARAMETER_GET_PROFILE_DATA_BY_OFFSET)); MinimalSizeNeeded += MAX (sizeof (MEMORY_PROFILE_CONTEXT), MAX (sizeof (MEMORY_PROFILE_DRIVER_INFO), MAX (sizeof (MEMORY_PROFILE_ALLOC_INFO), MAX (sizeof (MEMORY_PROFILE_DESCRIPTOR), MAX (sizeof (MEMORY_PROFILE_FREE_MEMORY), sizeof (MEMORY_PROFILE_MEMORY_RANGE)))))); Status = EfiGetSystemConfigurationTable ( &gEdkiiPiSmmCommunicationRegionTableGuid, (VOID **) &PiSmmCommunicationRegionTable ); if (EFI_ERROR (Status)) { DEBUG ((EFI_D_ERROR, "SmramProfile: Get PiSmmCommunicationRegionTable - %r\n", Status)); return Status; } ASSERT (PiSmmCommunicationRegionTable != NULL); Entry = (EFI_MEMORY_DESCRIPTOR *) (PiSmmCommunicationRegionTable + 1); Size = 0; for (Index = 0; Index < PiSmmCommunicationRegionTable->NumberOfEntries; Index++) { if (Entry->Type == EfiConventionalMemory) { Size = EFI_PAGES_TO_SIZE ((UINTN) Entry->NumberOfPages); if (Size >= MinimalSizeNeeded) { break; } } Entry = (EFI_MEMORY_DESCRIPTOR *) ((UINT8 *) Entry + PiSmmCommunicationRegionTable->DescriptorSize); } ASSERT (Index < PiSmmCommunicationRegionTable->NumberOfEntries); CommBuffer = (UINT8 *) (UINTN) Entry->PhysicalStart; // // Get Size // CommHeader = (EFI_SMM_COMMUNICATE_HEADER *) &CommBuffer[0]; CopyMem (&CommHeader->HeaderGuid, &gEdkiiMemoryProfileGuid, sizeof (gEdkiiMemoryProfileGuid)); CommHeader->MessageLength = sizeof (SMRAM_PROFILE_PARAMETER_GET_PROFILE_INFO); CommGetProfileInfo = (SMRAM_PROFILE_PARAMETER_GET_PROFILE_INFO *) &CommBuffer[OFFSET_OF (EFI_SMM_COMMUNICATE_HEADER, Data)]; CommGetProfileInfo->Header.Command = SMRAM_PROFILE_COMMAND_GET_PROFILE_INFO; CommGetProfileInfo->Header.DataLength = sizeof (*CommGetProfileInfo); CommGetProfileInfo->Header.ReturnStatus = (UINT64)-1; CommGetProfileInfo->ProfileSize = 0; CommSize = sizeof (EFI_GUID) + sizeof (UINTN) + CommHeader->MessageLength; Status = SmmCommunication->Communicate (SmmCommunication, CommBuffer, &CommSize); if (EFI_ERROR (Status)) { DEBUG ((EFI_D_ERROR, "SmramProfile: SmmCommunication - %r\n", Status)); return Status; } if (CommGetProfileInfo->Header.ReturnStatus != 0) { Print (L"SmramProfile: GetProfileInfo - 0x%0x\n", CommGetProfileInfo->Header.ReturnStatus); return EFI_SUCCESS; } ProfileSize = (UINTN) CommGetProfileInfo->ProfileSize; // // Get Data // ProfileBuffer = AllocateZeroPool (ProfileSize); if (ProfileBuffer == 0) { Status = EFI_OUT_OF_RESOURCES; Print (L"SmramProfile: AllocateZeroPool (0x%x) for profile buffer - %r\n", ProfileSize, Status); return Status; } CommHeader = (EFI_SMM_COMMUNICATE_HEADER *) &CommBuffer[0]; CopyMem (&CommHeader->HeaderGuid, &gEdkiiMemoryProfileGuid, sizeof(gEdkiiMemoryProfileGuid)); CommHeader->MessageLength = sizeof (SMRAM_PROFILE_PARAMETER_GET_PROFILE_DATA_BY_OFFSET); CommGetProfileData = (SMRAM_PROFILE_PARAMETER_GET_PROFILE_DATA_BY_OFFSET *) &CommBuffer[OFFSET_OF (EFI_SMM_COMMUNICATE_HEADER, Data)]; CommGetProfileData->Header.Command = SMRAM_PROFILE_COMMAND_GET_PROFILE_DATA_BY_OFFSET; CommGetProfileData->Header.DataLength = sizeof (*CommGetProfileData); CommGetProfileData->Header.ReturnStatus = (UINT64)-1; CommSize = sizeof (EFI_GUID) + sizeof (UINTN) + CommHeader->MessageLength; Buffer = (UINT8 *) CommHeader + CommSize; Size -= CommSize; CommGetProfileData->ProfileBuffer = (PHYSICAL_ADDRESS) (UINTN) Buffer; CommGetProfileData->ProfileOffset = 0; while (CommGetProfileData->ProfileOffset < ProfileSize) { Offset = (UINTN) CommGetProfileData->ProfileOffset; if (Size <= (ProfileSize - CommGetProfileData->ProfileOffset)) { CommGetProfileData->ProfileSize = (UINT64) Size; } else { CommGetProfileData->ProfileSize = (UINT64) (ProfileSize - CommGetProfileData->ProfileOffset); } Status = SmmCommunication->Communicate (SmmCommunication, CommBuffer, &CommSize); ASSERT_EFI_ERROR (Status); if (CommGetProfileData->Header.ReturnStatus != 0) { FreePool (ProfileBuffer); Print (L"GetProfileData - 0x%x\n", CommGetProfileData->Header.ReturnStatus); return EFI_SUCCESS; } CopyMem ((UINT8 *) ProfileBuffer + Offset, (VOID *) (UINTN) CommGetProfileData->ProfileBuffer, (UINTN) CommGetProfileData->ProfileSize); } Print (L"SmramProfileSize - 0x%x\n", ProfileSize); Print (L"======= SmramProfile begin =======\n"); DumpMemoryProfile ((PHYSICAL_ADDRESS) (UINTN) ProfileBuffer, ProfileSize); Print (L"======= SmramProfile end =======\n\n\n"); FreePool (ProfileBuffer); return EFI_SUCCESS; }