VOID EFIAPI PrimaryMain ( IN EFI_PEI_CORE_ENTRY_POINT PeiCoreEntryPoint ) { EFI_SEC_PEI_HAND_OFF SecCoreData; UINTN PpiListSize; EFI_PEI_PPI_DESCRIPTOR *PpiList; UINTN TemporaryRamBase; UINTN TemporaryRamSize; CreatePpiList (&PpiListSize, &PpiList); // Enable the GIC Distributor ArmGicEnableDistributor(PcdGet32(PcdGicDistributorBase)); // If ArmVe has not been built as Standalone then we need to wake up the secondary cores if (FeaturePcdGet (PcdSendSgiToBringUpSecondaryCores)) { // Sending SGI to all the Secondary CPU interfaces ArmGicSendSgiTo (PcdGet32(PcdGicDistributorBase), ARM_GIC_ICDSGIR_FILTER_EVERYONEELSE, 0x0E); } // Adjust the Temporary Ram as the new Ppi List (Common + Platform Ppi Lists) is created at // the base of the primary core stack PpiListSize = ALIGN_VALUE(PpiListSize, 0x4); TemporaryRamBase = (UINTN)PcdGet32 (PcdCPUCoresStackBase) + PpiListSize; TemporaryRamSize = (UINTN)PcdGet32 (PcdCPUCorePrimaryStackSize) - PpiListSize; // Make sure the size is 8-byte aligned. Once divided by 2, the size should be 4-byte aligned // to ensure the stack pointer is 4-byte aligned. TemporaryRamSize = TemporaryRamSize - (TemporaryRamSize & (0x8-1)); // // Bind this information into the SEC hand-off state // Note: this must be in sync with the stuff in the asm file // Note also: HOBs (pei temp ram) MUST be above stack // SecCoreData.DataSize = sizeof(EFI_SEC_PEI_HAND_OFF); SecCoreData.BootFirmwareVolumeBase = (VOID *)(UINTN)PcdGet32 (PcdFvBaseAddress); SecCoreData.BootFirmwareVolumeSize = PcdGet32 (PcdFvSize); SecCoreData.TemporaryRamBase = (VOID *)TemporaryRamBase; // We run on the primary core (and so we use the first stack) SecCoreData.TemporaryRamSize = TemporaryRamSize; SecCoreData.PeiTemporaryRamBase = SecCoreData.TemporaryRamBase; SecCoreData.PeiTemporaryRamSize = SecCoreData.TemporaryRamSize / 2; SecCoreData.StackBase = (VOID *)ALIGN_VALUE((UINTN)(SecCoreData.TemporaryRamBase) + SecCoreData.PeiTemporaryRamSize, 0x4); SecCoreData.StackSize = (TemporaryRamBase + TemporaryRamSize) - (UINTN)SecCoreData.StackBase; // Jump to PEI core entry point (PeiCoreEntryPoint)(&SecCoreData, PpiList); }
EFI_STATUS simple_file_read_all(EFI_FILE *file, UINTN *size, void **buffer) { EFI_STATUS efi_status; EFI_FILE_INFO *fi; char buf[1024]; *size = sizeof(buf); fi = (void *)buf; efi_status = uefi_call_wrapper(file->GetInfo, 4, file, &FILE_INFO, size, fi); if (efi_status != EFI_SUCCESS) { Print(L"Failed to get file info\n"); return efi_status; } *size = fi->FileSize; /* might use memory mapped, so align up to nearest page */ *buffer = AllocateZeroPool(ALIGN_VALUE(*size, 4096)); if (!*buffer) { Print(L"Failed to allocate buffer of size %d\n", *size); return EFI_OUT_OF_RESOURCES; } efi_status = uefi_call_wrapper(file->Read, 3, file, size, *buffer); return efi_status; }
/** Get Section buffer pointer by SectionType and SectionInstance. @param[in] SectionBuffer The buffer of section @param[in] SectionBufferSize The size of SectionBuffer in bytes @param[in] SectionType The SectionType of Section to be found @param[in] SectionInstance The Instance of Section to be found @param[out] OutSectionBuffer The section found, including SECTION_HEADER @param[out] OutSectionSize The size of section found, including SECTION_HEADER @retval TRUE The FFS buffer is found. @retval FALSE The FFS buffer is not found. **/ BOOLEAN GetSectionByType ( IN VOID *SectionBuffer, IN UINT32 SectionBufferSize, IN EFI_SECTION_TYPE SectionType, IN UINTN SectionInstance, OUT VOID **OutSectionBuffer, OUT UINTN *OutSectionSize ) { EFI_COMMON_SECTION_HEADER *SectionHeader; UINTN SectionSize; UINTN Instance; DEBUG ((DEBUG_INFO, "GetSectionByType - Buffer: 0x%08x - 0x%08x\n", SectionBuffer, SectionBufferSize)); // // Find Section // SectionHeader = SectionBuffer; Instance = 0; while ((UINTN)SectionHeader < (UINTN)SectionBuffer + SectionBufferSize) { DEBUG ((DEBUG_INFO, "GetSectionByType - Section: 0x%08x\n", SectionHeader)); if (IS_SECTION2(SectionHeader)) { SectionSize = SECTION2_SIZE(SectionHeader); } else { SectionSize = SECTION_SIZE(SectionHeader); } if (SectionHeader->Type == SectionType) { if (Instance == SectionInstance) { *OutSectionBuffer = (UINT8 *)SectionHeader; *OutSectionSize = SectionSize; DEBUG((DEBUG_INFO, "GetSectionByType - 0x%x - 0x%x\n", *OutSectionBuffer, *OutSectionSize)); return TRUE; } else { DEBUG((DEBUG_INFO, "GetSectionByType - find section instance %x\n", Instance)); Instance++; } } else { // // Skip other section type // DEBUG ((DEBUG_INFO, "GetSectionByType - other section type 0x%x\n", SectionHeader->Type)); } // // Next Section // SectionHeader = (EFI_COMMON_SECTION_HEADER *)((UINTN)SectionHeader + ALIGN_VALUE(SectionSize, 4)); } return FALSE; }
VOID EFIAPI PrimaryMain ( IN EFI_PEI_CORE_ENTRY_POINT PeiCoreEntryPoint ) { EFI_SEC_PEI_HAND_OFF SecCoreData; UINTN PpiListSize; EFI_PEI_PPI_DESCRIPTOR *PpiList; UINTN TemporaryRamBase; UINTN TemporaryRamSize; CreatePpiList (&PpiListSize, &PpiList); // Adjust the Temporary Ram as the new Ppi List (Common + Platform Ppi Lists) is created at // the base of the primary core stack PpiListSize = ALIGN_VALUE(PpiListSize, 0x4); TemporaryRamBase = (UINTN)PcdGet64 (PcdCPUCoresStackBase) + PpiListSize; TemporaryRamSize = (UINTN)PcdGet32 (PcdCPUCorePrimaryStackSize) - PpiListSize; // Make sure the size is 8-byte aligned. Once divided by 2, the size should be 4-byte aligned // to ensure the stack pointer is 4-byte aligned. TemporaryRamSize = TemporaryRamSize - (TemporaryRamSize & (0x8-1)); // // Bind this information into the SEC hand-off state // Note: this must be in sync with the stuff in the asm file // Note also: HOBs (pei temp ram) MUST be above stack // SecCoreData.DataSize = sizeof(EFI_SEC_PEI_HAND_OFF); SecCoreData.BootFirmwareVolumeBase = (VOID *)(UINTN)PcdGet64 (PcdFvBaseAddress); SecCoreData.BootFirmwareVolumeSize = PcdGet32 (PcdFvSize); SecCoreData.TemporaryRamBase = (VOID *)TemporaryRamBase; // We run on the primary core (and so we use the first stack) SecCoreData.TemporaryRamSize = TemporaryRamSize; SecCoreData.PeiTemporaryRamBase = SecCoreData.TemporaryRamBase; SecCoreData.PeiTemporaryRamSize = SecCoreData.TemporaryRamSize / 2; SecCoreData.StackBase = (VOID *)ALIGN_VALUE((UINTN)(SecCoreData.TemporaryRamBase) + SecCoreData.PeiTemporaryRamSize, 0x4); SecCoreData.StackSize = (TemporaryRamBase + TemporaryRamSize) - (UINTN)SecCoreData.StackBase; // Jump to PEI core entry point (PeiCoreEntryPoint)(&SecCoreData, PpiList); }
// Calculate the block size including encryption fields and padding if any. uint Archive::FullHeaderSize(size_t Size) { if (Encrypted) { Size = ALIGN_VALUE(Size, CRYPT_BLOCK_SIZE); // Align to encryption block size. if (Format == RARFMT50) Size += SIZE_INITV; else Size += SIZE_SALT30; } return uint(Size); }
/** Adjust address of free memory according to existing and/or required Guard. This function will check if there're existing Guard pages of adjacent memory blocks, and try to use it as the Guard page of the memory to be allocated. @param[in] Start Start address of free memory block. @param[in] Size Size of free memory block. @param[in] SizeRequested Size of memory to allocate. @return The end address of memory block found. @return 0 if no enough space for the required size of memory and its Guard. **/ UINT64 AdjustMemoryS ( IN UINT64 Start, IN UINT64 Size, IN UINT64 SizeRequested ) { UINT64 Target; // // UEFI spec requires that allocated pool must be 8-byte aligned. If it's // indicated to put the pool near the Tail Guard, we need extra bytes to // make sure alignment of the returned pool address. // if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) { SizeRequested = ALIGN_VALUE(SizeRequested, 8); } Target = Start + Size - SizeRequested; ASSERT (Target >= Start); if (Target == 0) { return 0; } if (!IsGuardPage (Start + Size)) { // No Guard at tail to share. One more page is needed. Target -= EFI_PAGES_TO_SIZE (1); } // Out of range? if (Target < Start) { return 0; } // At the edge? if (Target == Start) { if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) { // No enough space for a new head Guard if no Guard at head to share. return 0; } } // OK, we have enough pages for memory and its Guards. Return the End of the // free space. return Target + SizeRequested - 1; }
ERet ShaderCache_d::CreateShaders( const CacheHeader_d& header, AStreamReader& stream, FxShaderHandles &shaders ) { UINT32 totalShaderCount = 0; for( UINT32 shaderType = 0; shaderType < ShaderTypeCount; shaderType++ ) { totalShaderCount += header.numShaders[ shaderType ]; shaders.handles[shaderType].SetNum( header.numShaders[shaderType] ); } DBGOUT("Loading %u shaders: %u VS, %u GS, %u PS\n", totalShaderCount, header.numShaders[ShaderVertex], header.numShaders[ShaderGeometry], header.numShaders[ShaderFragment]); for( UINT32 shaderType = 0; shaderType < ShaderTypeCount; shaderType++ ) { for( UINT32 shaderIndex = 0; shaderIndex < header.numShaders[shaderType]; shaderIndex++ ) { UINT32 codeSize; mxDO(stream.Get( codeSize )); ScopedStackAlloc codeAlloc( gCore.frameAlloc ); void* codeBuffer = codeAlloc.Alloc( codeSize ); mxDO(stream.Read( codeBuffer, codeSize )); const UINT32 alignedOffset = ALIGN_VALUE(codeSize, 4); const UINT32 sizeOfPadding = alignedOffset - codeSize; Skip_N_bytes( stream, sizeOfPadding ); //DBGOUT("Creating '%s' (%u bytes)\n",EShaderTypeToChars((EShaderType)shaderType),codeSize); const HShader shaderHandle = llgl::CreateShader( (EShaderType)shaderType, codeBuffer, codeSize ); shaders.handles[shaderType][shaderIndex] = shaderHandle; } } return ALL_OK; }
/** Adjust the pool head position to make sure the Guard page is adjavent to pool tail or pool head. @param[in] Memory Base address of memory allocated. @param[in] NoPages Number of pages actually allocated. @param[in] Size Size of memory requested. (plus pool head/tail overhead) @return Address of pool head **/ VOID * AdjustPoolHeadA ( IN EFI_PHYSICAL_ADDRESS Memory, IN UINTN NoPages, IN UINTN Size ) { if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) { // // Pool head is put near the head Guard // return (VOID *)(UINTN)Memory; } // // Pool head is put near the tail Guard // Size = ALIGN_VALUE (Size, 8); return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size); }
RecVolumes5::RecVolumes5() { RealReadBuffer=NULL; DataCount=0; RecCount=0; TotalCount=0; RecBufferSize=0; for (uint I=0;I<ASIZE(ThreadData);I++) { ThreadData[I].RecRSPtr=this; ThreadData[I].RS=NULL; } #ifdef RAR_SMP RecThreadPool=CreateThreadPool(); #endif RealBuf=NULL; // Might be needed in case of exception. RealBuf=new byte[TotalBufferSize+SSE_ALIGNMENT]; Buf=(byte *)ALIGN_VALUE(RealBuf,SSE_ALIGNMENT); }
/** * giop_send_buffer_align: * @buf: the buffer * @boundary: the boundary. * * Appends memory to the SendBuffer to align it to a boundary * of size @boundary bytes - if neccessary. **/ void giop_send_buffer_align (GIOPSendBuffer *buf, gulong boundary) { gulong align_amt, ms; /* 1. Figure out how much to align by */ ms = buf->msg.header.message_size + buf->header_size; align_amt = ALIGN_VALUE(ms, boundary) - ms; /* 2. Do the alignment */ if (align_amt) { if (buf->indirect_left < align_amt) get_next_indirect (buf, 0); p_memzero (buf->indirect, align_amt); giop_send_buffer_append_real (buf, buf->indirect, align_amt); buf->indirect += align_amt; buf->indirect_left -= align_amt; } }
EFI_STATUS EFIAPI PrePeiCoreTemporaryRamSupport ( IN CONST EFI_PEI_SERVICES **PeiServices, IN EFI_PHYSICAL_ADDRESS TemporaryMemoryBase, IN EFI_PHYSICAL_ADDRESS PermanentMemoryBase, IN UINTN CopySize ) { VOID *OldHeap; VOID *NewHeap; VOID *OldStack; VOID *NewStack; UINTN HeapSize; HeapSize = ALIGN_VALUE (CopySize / 2, CPU_STACK_ALIGNMENT); OldHeap = (VOID*)(UINTN)TemporaryMemoryBase; NewHeap = (VOID*)((UINTN)PermanentMemoryBase + (CopySize - HeapSize)); OldStack = (VOID*)((UINTN)TemporaryMemoryBase + HeapSize); NewStack = (VOID*)(UINTN)PermanentMemoryBase; // // Migrate the temporary memory stack to permanent memory stack. // CopyMem (NewStack, OldStack, CopySize - HeapSize); // // Migrate the temporary memory heap to permanent memory heap. // CopyMem (NewHeap, OldHeap, HeapSize); SecSwitchStack ((UINTN)NewStack - (UINTN)OldStack); return EFI_SUCCESS; }
EFI_STATUS WriteFirstFreeSpiProtect ( IN CONST UINT32 PchRootComplexBar, IN CONST UINT32 DirectValue, IN CONST UINT32 BaseAddress, IN CONST UINT32 Length, OUT UINT32 *OffsetPtr ) { UINT32 RegVal; UINT32 Offset; UINT32 StepLen; ASSERT (PchRootComplexBar > 0); Offset = 0; if (OffsetPtr != NULL) { *OffsetPtr = Offset; } if (MmioRead32 (PchRootComplexBar + R_QNC_RCRB_SPIPBR0) == 0) { Offset = R_QNC_RCRB_SPIPBR0; } else { if (MmioRead32 (PchRootComplexBar + R_QNC_RCRB_SPIPBR1) == 0) { Offset = R_QNC_RCRB_SPIPBR1; } else { if (MmioRead32 (PchRootComplexBar + R_QNC_RCRB_SPIPBR2) == 0) { Offset = R_QNC_RCRB_SPIPBR2; } } } if (Offset != 0) { if (DirectValue == 0) { StepLen = ALIGN_VALUE (Length,SIZE_4KB); // Bring up to 4K boundary. RegVal = BaseAddress + StepLen - 1; RegVal &= 0x00FFF000; // Set EDS Protected Range Limit (PRL). RegVal |= ((BaseAddress >> 12) & 0xfff); // or in EDS Protected Range Base (PRB). } else {
/** ExitBootServices Callback function for memory protection. **/ VOID MemoryProtectionExitBootServicesCallback ( VOID ) { EFI_RUNTIME_IMAGE_ENTRY *RuntimeImage; LIST_ENTRY *Link; // // We need remove the RT protection, because RT relocation need write code segment // at SetVirtualAddressMap(). We cannot assume OS/Loader has taken over page table at that time. // // Firmware does not own page tables after ExitBootServices(), so the OS would // have to relax protection of RT code pages across SetVirtualAddressMap(), or // delay setting protections on RT code pages until after SetVirtualAddressMap(). // OS may set protection on RT based upon EFI_MEMORY_ATTRIBUTES_TABLE later. // if (mImageProtectionPolicy != 0) { for (Link = gRuntime->ImageHead.ForwardLink; Link != &gRuntime->ImageHead; Link = Link->ForwardLink) { RuntimeImage = BASE_CR (Link, EFI_RUNTIME_IMAGE_ENTRY, Link); SetUefiImageMemoryAttributes ((UINT64)(UINTN)RuntimeImage->ImageBase, ALIGN_VALUE(RuntimeImage->ImageSize, EFI_PAGE_SIZE), 0); } } }
/** The constructor function @param[in] ImageHandle The firmware allocated handle for the EFI image. @param[in] SystemTable A pointer to the EFI System Table. @retval EFI_SUCCESS The constructor always returns EFI_SUCCESS. **/ EFI_STATUS EFIAPI SmmCpuFeaturesLibStmConstructor ( IN EFI_HANDLE ImageHandle, IN EFI_SYSTEM_TABLE *SystemTable ) { EFI_STATUS Status; CPUID_VERSION_INFO_ECX RegEcx; EFI_HOB_GUID_TYPE *GuidHob; EFI_SMRAM_DESCRIPTOR *SmramDescriptor; // // Call the common constructor function // Status = SmmCpuFeaturesLibConstructor (ImageHandle, SystemTable); ASSERT_EFI_ERROR (Status); // // Lookup the MP Services Protocol // Status = gBS->LocateProtocol ( &gEfiMpServiceProtocolGuid, NULL, (VOID **)&mSmmCpuFeaturesLibMpService ); ASSERT_EFI_ERROR (Status); // // If CPU supports VMX, then determine SMRAM range for MSEG. // AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, &RegEcx.Uint32, NULL); if (RegEcx.Bits.VMX == 1) { GuidHob = GetFirstGuidHob (&gMsegSmramGuid); if (GuidHob != NULL) { // // Retrieve MSEG location from MSEG SRAM HOB // SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob); if (SmramDescriptor->PhysicalSize > 0) { mMsegBase = (UINTN)SmramDescriptor->CpuStart; mMsegSize = (UINTN)SmramDescriptor->PhysicalSize; } } else if (PcdGet32 (PcdCpuMsegSize) > 0) { // // Allocate MSEG from SMRAM memory // mMsegBase = (UINTN)AllocatePages (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuMsegSize))); if (mMsegBase > 0) { mMsegSize = ALIGN_VALUE (PcdGet32 (PcdCpuMsegSize), EFI_PAGE_SIZE); } else { DEBUG ((DEBUG_ERROR, "Not enough SMRAM resource to allocate MSEG size %08x\n", PcdGet32 (PcdCpuMsegSize))); } } if (mMsegBase > 0) { DEBUG ((DEBUG_INFO, "MsegBase: 0x%08x, MsegSize: 0x%08x\n", mMsegBase, mMsegSize)); } } return EFI_SUCCESS; }
void RecVolumes5::ProcessRS(RAROptions *Cmd,uint DataNum,const byte *Data,uint MaxRead,bool Encode) { /* RSCoder16 RS; RS.Init(DataCount,RecCount,Encode ? NULL:ValidFlags); uint Count=Encode ? RecCount : MissingVolumes; for (uint I=0;I<Count;I++) RS.UpdateECC(DataNum, I, Data, Buf+I*RecBufferSize, MaxRead); */ #ifdef RAR_SMP uint ThreadNumber=Cmd->Threads; #else uint ThreadNumber=1; #endif const uint MinThreadBlock=0x1000; ThreadNumber=Min(ThreadNumber,MaxRead/MinThreadBlock); if (ThreadNumber<1) ThreadNumber=1; uint ThreadDataSize=MaxRead/ThreadNumber; ThreadDataSize+=(ThreadDataSize&1); // Must be even for 16-bit RS coder. #ifdef USE_SSE ThreadDataSize=ALIGN_VALUE(ThreadDataSize,SSE_ALIGNMENT); // Alignment for SSE operations. #endif if (ThreadDataSize<MinThreadBlock) ThreadDataSize=MinThreadBlock; for (size_t I=0,CurPos=0;I<ThreadNumber && CurPos<MaxRead;I++) { RecRSThreadData *td=ThreadData+I; if (td->RS==NULL) { td->RS=new RSCoder16; td->RS->Init(DataCount,RecCount,Encode ? NULL:ValidFlags); } td->DataNum=DataNum; td->Data=Data; td->Encode=Encode; td->StartPos=CurPos; size_t EndPos=CurPos+ThreadDataSize; if (EndPos>MaxRead || I==ThreadNumber-1) EndPos=MaxRead; td->Size=EndPos-CurPos; CurPos=EndPos; #ifdef RAR_SMP if (ThreadNumber>1) RecThreadPool->AddTask(RecThreadRS,(void*)td); else ProcessAreaRS(td); #else ProcessAreaRS(td); #endif } #ifdef RAR_SMP RecThreadPool->WaitDone(); #endif // RAR_SMP }
/* Read the PartitionName fields from the GPT partition entries, putting them into an allocated array that should later be freed. */ STATIC EFI_STATUS ReadPartitionEntries ( IN EFI_BLOCK_IO_PROTOCOL *BlockIo, OUT EFI_PARTITION_ENTRY **PartitionEntries ) { UINTN EntrySize; UINTN NumEntries; UINTN BufferSize; UINT32 MediaId; EFI_PARTITION_TABLE_HEADER *GptHeader; EFI_STATUS Status; MediaId = BlockIo->Media->MediaId; // // Read size of Partition entry and number of entries from GPT header // GptHeader = AllocatePool (BlockIo->Media->BlockSize); if (GptHeader == NULL) { return EFI_OUT_OF_RESOURCES; } Status = BlockIo->ReadBlocks (BlockIo, MediaId, 1, BlockIo->Media->BlockSize, (VOID *) GptHeader); if (EFI_ERROR (Status)) { return Status; } // Check there is a GPT on the media if (GptHeader->Header.Signature != EFI_PTAB_HEADER_ID || GptHeader->MyLBA != 1) { DEBUG ((EFI_D_ERROR, "Fastboot platform: No GPT on flash. " "Fastboot on Versatile Express does not support MBR.\n" )); return EFI_DEVICE_ERROR; } EntrySize = GptHeader->SizeOfPartitionEntry; NumEntries = GptHeader->NumberOfPartitionEntries; FreePool (GptHeader); ASSERT (EntrySize != 0); ASSERT (NumEntries != 0); BufferSize = ALIGN_VALUE (EntrySize * NumEntries, BlockIo->Media->BlockSize); *PartitionEntries = AllocatePool (BufferSize); if (PartitionEntries == NULL) { return EFI_OUT_OF_RESOURCES; } Status = BlockIo->ReadBlocks (BlockIo, MediaId, 2, BufferSize, (VOID *) *PartitionEntries); if (EFI_ERROR (Status)) { FreePool (PartitionEntries); return Status; } return Status; }
bool RecVolumes5::Restore(RAROptions *Cmd,const wchar *Name,bool Silent) { wchar ArcName[NM]; wcscpy(ArcName,Name); wchar *Num=GetVolNumPart(ArcName); while (Num>ArcName && IsDigit(*(Num-1))) Num--; wcsncpyz(Num,L"*.*",ASIZE(ArcName)-(Num-ArcName)); wchar FirstVolName[NM]; *FirstVolName=0; int64 RecFileSize=0; FindFile VolFind; VolFind.SetMask(ArcName); FindData fd; uint FoundRecVolumes=0; while (VolFind.Next(&fd)) { Wait(); Archive *Vol=new Archive(Cmd); int ItemPos=-1; if (Vol->WOpen(fd.Name)) { if (CmpExt(fd.Name,L"rev")) { uint RecNum=ReadHeader(Vol,FoundRecVolumes==0); if (RecNum!=0) { if (FoundRecVolumes==0) RecFileSize=Vol->FileLength(); ItemPos=RecNum; FoundRecVolumes++; } } else if (Vol->IsArchive(true) && (Vol->SFXSize>0 || CmpExt(fd.Name,L"rar"))) { if (!Vol->Volume && !Vol->BrokenHeader) { uiMsg(UIERROR_NOTVOLUME,ArcName); return false; } // We work with archive as with raw data file, so we do not want // to spend time to QOpen I/O redirection. Vol->QOpenUnload(); Vol->Seek(0,SEEK_SET); // RAR volume found. Get its number, store the handle in appropriate // array slot, clean slots in between if we had to grow the array. wchar *Num=GetVolNumPart(fd.Name); uint VolNum=0; for (uint K=1;Num>=fd.Name && IsDigit(*Num);K*=10,Num--) VolNum+=(*Num-'0')*K; if (VolNum==0 || VolNum>MaxVolumes) continue; size_t CurSize=RecItems.Size(); if (VolNum>CurSize) { RecItems.Alloc(VolNum); for (size_t I=CurSize;I<VolNum;I++) RecItems[I].f=NULL; } ItemPos=VolNum-1; if (*FirstVolName==0) VolNameToFirstName(fd.Name,FirstVolName,ASIZE(FirstVolName),true); } } if (ItemPos==-1) delete Vol; // Skip found file, it is not RAR or REV volume. else if ((uint)ItemPos<RecItems.Size()) // Check if found more REV than needed. { // Store found RAR or REV volume. RecVolItem *Item=RecItems+ItemPos; Item->f=Vol; Item->New=false; wcsncpyz(Item->Name,fd.Name,ASIZE(Item->Name)); } } if (!Silent || FoundRecVolumes!=0) uiMsg(UIMSG_RECVOLFOUND,FoundRecVolumes); if (FoundRecVolumes==0) return false; uiMsg(UIMSG_RECVOLCALCCHECKSUM); MissingVolumes=0; for (uint I=0;I<TotalCount;I++) { RecVolItem *Item=&RecItems[I]; if (Item->f!=NULL) { uiMsg(UIMSG_STRING,Item->Name); uint RevCRC; CalcFileSum(Item->f,&RevCRC,NULL,Cmd->Threads,INT64NDF,CALCFSUM_CURPOS); Item->Valid=RevCRC==Item->CRC; if (!Item->Valid) { uiMsg(UIMSG_CHECKSUM,Item->Name); // Close only corrupt REV volumes here. We'll close and rename corrupt // RAR volumes later, if we'll know that recovery is possible. if (I>=DataCount) { Item->f->Close(); Item->f=NULL; FoundRecVolumes--; } } } if (I<DataCount && (Item->f==NULL || !Item->Valid)) MissingVolumes++; } uiMsg(UIMSG_RECVOLMISSING,MissingVolumes); if (MissingVolumes==0) { uiMsg(UIERROR_RECVOLALLEXIST); return false; } if (MissingVolumes>FoundRecVolumes) { uiMsg(UIERROR_RECVOLFOUND,FoundRecVolumes); // Intentionally not displayed in console mode. uiMsg(UIERROR_RECVOLCANNOTFIX); return false; } uiMsg(UIMSG_RECONSTRUCTING); // Create missing and rename bad volumes. uint64 MaxVolSize=0; for (uint I=0;I<DataCount;I++) { RecVolItem *Item=&RecItems[I]; if (Item->FileSize>MaxVolSize) MaxVolSize=Item->FileSize; if (Item->f!=NULL && !Item->Valid) { Item->f->Close(); wchar NewName[NM]; wcscpy(NewName,Item->Name); wcscat(NewName,L".bad"); uiMsg(UIMSG_BADARCHIVE,Item->Name); uiMsg(UIMSG_RENAMING,Item->Name,NewName); RenameFile(Item->Name,NewName); delete Item->f; Item->f=NULL; } if (Item->New=(Item->f==NULL)) { wcsncpyz(Item->Name,FirstVolName,ASIZE(Item->Name)); uiMsg(UIMSG_CREATING,Item->Name); uiMsg(UIEVENT_NEWARCHIVE,Item->Name); File *NewVol=new File; bool UserReject; if (!FileCreate(Cmd,NewVol,Item->Name,ASIZE(Item->Name),&UserReject)) { if (!UserReject) ErrHandler.CreateErrorMsg(Item->Name); ErrHandler.Exit(UserReject ? RARX_USERBREAK:RARX_CREATE); } NewVol->Prealloc(Item->FileSize); Item->f=NewVol; Item->New=true; } NextVolumeName(FirstVolName,ASIZE(FirstVolName),false); } int64 ProcessedSize=0; #ifndef GUI int LastPercent=-1; mprintf(L" "); #endif // Even though we already preliminary calculated missing volume number, // let's do it again now, when we have the final and exact information. MissingVolumes=0; ValidFlags=new bool[TotalCount]; for (uint I=0;I<TotalCount;I++) { ValidFlags[I]=RecItems[I].f!=NULL && !RecItems[I].New; if (I<DataCount && !ValidFlags[I]) MissingVolumes++; } // Size of per file buffer. RecBufferSize=TotalBufferSize/MissingVolumes; if ((RecBufferSize&1)==1) // Must be even for our RS16 codec. RecBufferSize--; #ifdef USE_SSE RecBufferSize&=~(SSE_ALIGNMENT-1); // Align for SSE. #endif uint *Data=new uint[TotalCount]; RSCoder16 RS; if (!RS.Init(DataCount,RecCount,ValidFlags)) return false; // Should not happen, we check parameter validity above. RealReadBuffer=new byte[RecBufferSize+SSE_ALIGNMENT]; byte *ReadBuf=(byte *)ALIGN_VALUE(RealReadBuffer,SSE_ALIGNMENT); while (true) { Wait(); int MaxRead=0; for (uint I=0,J=DataCount;I<DataCount;I++) { uint VolNum=I; if (!ValidFlags[I]) // If next RAR volume is missing or invalid. { while (!ValidFlags[J]) // Find next valid REV volume. J++; VolNum=J++; // Use next valid REV volume data instead of RAR. } RecVolItem *Item=RecItems+VolNum; byte *B=&ReadBuf[0]; int ReadSize=0; if (Item->f!=NULL && !Item->New) ReadSize=Item->f->Read(B,RecBufferSize); if (ReadSize!=RecBufferSize) memset(B+ReadSize,0,RecBufferSize-ReadSize); if (ReadSize>MaxRead) MaxRead=ReadSize; // We can have volumes of different size. Let's use data chunk // for largest volume size. uint DataToProcess=(uint)Min(RecBufferSize,MaxVolSize-ProcessedSize); ProcessRS(Cmd,I,B,DataToProcess,false); } if (MaxRead==0) break; for (uint I=0,J=0;I<DataCount;I++) if (!ValidFlags[I]) { RecVolItem *Item=RecItems+I; size_t WriteSize=(size_t)Min(MaxRead,Item->FileSize); Item->f->Write(Buf+(J++)*RecBufferSize,WriteSize); Item->FileSize-=WriteSize; } int CurPercent=ToPercent(ProcessedSize,RecFileSize); if (!Cmd->DisablePercentage && CurPercent!=LastPercent) { uiProcessProgress("RV",ProcessedSize,RecFileSize); LastPercent=CurPercent; } ProcessedSize+=MaxRead; } for (uint I=0;I<TotalCount;I++) if (RecItems[I].f!=NULL) RecItems[I].f->Close(); delete[] ValidFlags; delete[] Data; #if !defined(GUI) && !defined(SILENT) if (!Cmd->DisablePercentage) mprintf(L"\b\b\b\b100%%"); if (!Silent && !Cmd->DisableDone) mprintf(St(MDone)); #endif return true; }
/** Protect UEFI PE/COFF image. @param[in] LoadedImage The loaded image protocol @param[in] LoadedImageDevicePath The loaded image device path protocol **/ VOID ProtectUefiImage ( IN EFI_LOADED_IMAGE_PROTOCOL *LoadedImage, IN EFI_DEVICE_PATH_PROTOCOL *LoadedImageDevicePath ) { VOID *ImageAddress; EFI_IMAGE_DOS_HEADER *DosHdr; UINT32 PeCoffHeaderOffset; UINT32 SectionAlignment; EFI_IMAGE_SECTION_HEADER *Section; EFI_IMAGE_OPTIONAL_HEADER_PTR_UNION Hdr; UINT8 *Name; UINTN Index; IMAGE_PROPERTIES_RECORD *ImageRecord; CHAR8 *PdbPointer; IMAGE_PROPERTIES_RECORD_CODE_SECTION *ImageRecordCodeSection; UINT16 Magic; BOOLEAN IsAligned; UINT32 ProtectionPolicy; DEBUG ((DEBUG_INFO, "ProtectUefiImageCommon - 0x%x\n", LoadedImage)); DEBUG ((DEBUG_INFO, " - 0x%016lx - 0x%016lx\n", (EFI_PHYSICAL_ADDRESS)(UINTN)LoadedImage->ImageBase, LoadedImage->ImageSize)); if (gCpu == NULL) { return ; } ProtectionPolicy = GetUefiImageProtectionPolicy (LoadedImage, LoadedImageDevicePath); switch (ProtectionPolicy) { case DO_NOT_PROTECT: return ; case PROTECT_IF_ALIGNED_ELSE_ALLOW: break; default: ASSERT(FALSE); return ; } ImageRecord = AllocateZeroPool (sizeof(*ImageRecord)); if (ImageRecord == NULL) { return ; } ImageRecord->Signature = IMAGE_PROPERTIES_RECORD_SIGNATURE; // // Step 1: record whole region // ImageRecord->ImageBase = (EFI_PHYSICAL_ADDRESS)(UINTN)LoadedImage->ImageBase; ImageRecord->ImageSize = LoadedImage->ImageSize; ImageAddress = LoadedImage->ImageBase; PdbPointer = PeCoffLoaderGetPdbPointer ((VOID*) (UINTN) ImageAddress); if (PdbPointer != NULL) { DEBUG ((DEBUG_VERBOSE, " Image - %a\n", PdbPointer)); } // // Check PE/COFF image // DosHdr = (EFI_IMAGE_DOS_HEADER *) (UINTN) ImageAddress; PeCoffHeaderOffset = 0; if (DosHdr->e_magic == EFI_IMAGE_DOS_SIGNATURE) { PeCoffHeaderOffset = DosHdr->e_lfanew; } Hdr.Pe32 = (EFI_IMAGE_NT_HEADERS32 *)((UINT8 *) (UINTN) ImageAddress + PeCoffHeaderOffset); if (Hdr.Pe32->Signature != EFI_IMAGE_NT_SIGNATURE) { DEBUG ((DEBUG_VERBOSE, "Hdr.Pe32->Signature invalid - 0x%x\n", Hdr.Pe32->Signature)); // It might be image in SMM. goto Finish; } // // Get SectionAlignment // if (Hdr.Pe32->FileHeader.Machine == IMAGE_FILE_MACHINE_IA64 && Hdr.Pe32->OptionalHeader.Magic == EFI_IMAGE_NT_OPTIONAL_HDR32_MAGIC) { // // NOTE: Some versions of Linux ELILO for Itanium have an incorrect magic value // in the PE/COFF Header. If the MachineType is Itanium(IA64) and the // Magic value in the OptionalHeader is EFI_IMAGE_NT_OPTIONAL_HDR32_MAGIC // then override the magic value to EFI_IMAGE_NT_OPTIONAL_HDR64_MAGIC // Magic = EFI_IMAGE_NT_OPTIONAL_HDR64_MAGIC; } else { // // Get the magic value from the PE/COFF Optional Header // Magic = Hdr.Pe32->OptionalHeader.Magic; } if (Magic == EFI_IMAGE_NT_OPTIONAL_HDR32_MAGIC) { SectionAlignment = Hdr.Pe32->OptionalHeader.SectionAlignment; } else { SectionAlignment = Hdr.Pe32Plus->OptionalHeader.SectionAlignment; } IsAligned = IsMemoryProtectionSectionAligned (SectionAlignment, LoadedImage->ImageCodeType); if (!IsAligned) { DEBUG ((DEBUG_VERBOSE, "!!!!!!!! ProtectUefiImageCommon - Section Alignment(0x%x) is incorrect !!!!!!!!\n", SectionAlignment)); PdbPointer = PeCoffLoaderGetPdbPointer ((VOID*) (UINTN) ImageAddress); if (PdbPointer != NULL) { DEBUG ((DEBUG_VERBOSE, "!!!!!!!! Image - %a !!!!!!!!\n", PdbPointer)); } goto Finish; } Section = (EFI_IMAGE_SECTION_HEADER *) ( (UINT8 *) (UINTN) ImageAddress + PeCoffHeaderOffset + sizeof(UINT32) + sizeof(EFI_IMAGE_FILE_HEADER) + Hdr.Pe32->FileHeader.SizeOfOptionalHeader ); ImageRecord->CodeSegmentCount = 0; InitializeListHead (&ImageRecord->CodeSegmentList); for (Index = 0; Index < Hdr.Pe32->FileHeader.NumberOfSections; Index++) { Name = Section[Index].Name; DEBUG (( DEBUG_VERBOSE, " Section - '%c%c%c%c%c%c%c%c'\n", Name[0], Name[1], Name[2], Name[3], Name[4], Name[5], Name[6], Name[7] )); // // Instead of assuming that a PE/COFF section of type EFI_IMAGE_SCN_CNT_CODE // can always be mapped read-only, classify a section as a code section only // if it has the executable attribute set and the writable attribute cleared. // // This adheres more closely to the PE/COFF spec, and avoids issues with // Linux OS loaders that may consist of a single read/write/execute section. // if ((Section[Index].Characteristics & (EFI_IMAGE_SCN_MEM_WRITE | EFI_IMAGE_SCN_MEM_EXECUTE)) == EFI_IMAGE_SCN_MEM_EXECUTE) { DEBUG ((DEBUG_VERBOSE, " VirtualSize - 0x%08x\n", Section[Index].Misc.VirtualSize)); DEBUG ((DEBUG_VERBOSE, " VirtualAddress - 0x%08x\n", Section[Index].VirtualAddress)); DEBUG ((DEBUG_VERBOSE, " SizeOfRawData - 0x%08x\n", Section[Index].SizeOfRawData)); DEBUG ((DEBUG_VERBOSE, " PointerToRawData - 0x%08x\n", Section[Index].PointerToRawData)); DEBUG ((DEBUG_VERBOSE, " PointerToRelocations - 0x%08x\n", Section[Index].PointerToRelocations)); DEBUG ((DEBUG_VERBOSE, " PointerToLinenumbers - 0x%08x\n", Section[Index].PointerToLinenumbers)); DEBUG ((DEBUG_VERBOSE, " NumberOfRelocations - 0x%08x\n", Section[Index].NumberOfRelocations)); DEBUG ((DEBUG_VERBOSE, " NumberOfLinenumbers - 0x%08x\n", Section[Index].NumberOfLinenumbers)); DEBUG ((DEBUG_VERBOSE, " Characteristics - 0x%08x\n", Section[Index].Characteristics)); // // Step 2: record code section // ImageRecordCodeSection = AllocatePool (sizeof(*ImageRecordCodeSection)); if (ImageRecordCodeSection == NULL) { return ; } ImageRecordCodeSection->Signature = IMAGE_PROPERTIES_RECORD_CODE_SECTION_SIGNATURE; ImageRecordCodeSection->CodeSegmentBase = (UINTN)ImageAddress + Section[Index].VirtualAddress; ImageRecordCodeSection->CodeSegmentSize = ALIGN_VALUE(Section[Index].SizeOfRawData, SectionAlignment); DEBUG ((DEBUG_VERBOSE, "ImageCode: 0x%016lx - 0x%016lx\n", ImageRecordCodeSection->CodeSegmentBase, ImageRecordCodeSection->CodeSegmentSize)); InsertTailList (&ImageRecord->CodeSegmentList, &ImageRecordCodeSection->Link); ImageRecord->CodeSegmentCount++; } } if (ImageRecord->CodeSegmentCount == 0) { // // If a UEFI executable consists of a single read+write+exec PE/COFF // section, that isn't actually an error. The image can be launched // alright, only image protection cannot be applied to it fully. // // One example that elicits this is (some) Linux kernels (with the EFI stub // of course). // DEBUG ((DEBUG_WARN, "!!!!!!!! ProtectUefiImageCommon - CodeSegmentCount is 0 !!!!!!!!\n")); PdbPointer = PeCoffLoaderGetPdbPointer ((VOID*) (UINTN) ImageAddress); if (PdbPointer != NULL) { DEBUG ((DEBUG_WARN, "!!!!!!!! Image - %a !!!!!!!!\n", PdbPointer)); } goto Finish; } // // Final // SortImageRecordCodeSection (ImageRecord); // // Check overlap all section in ImageBase/Size // if (!IsImageRecordCodeSectionValid (ImageRecord)) { DEBUG ((DEBUG_ERROR, "IsImageRecordCodeSectionValid - FAIL\n")); goto Finish; } // // Round up the ImageSize, some CPU arch may return EFI_UNSUPPORTED if ImageSize is not aligned. // Given that the loader always allocates full pages, we know the space after the image is not used. // ImageRecord->ImageSize = ALIGN_VALUE(LoadedImage->ImageSize, EFI_PAGE_SIZE); // // CPU ARCH present. Update memory attribute directly. // SetUefiImageProtectionAttributes (ImageRecord); // // Record the image record in the list so we can undo the protections later // InsertTailList (&mProtectedImageRecordList, &ImageRecord->Link); Finish: return ; }
/** Get FFS buffer pointer by FileName GUID and FileType. @param[in] FdStart The System Firmware FD image @param[in] FdSize The size of System Firmware FD image @param[in] FileName The FileName GUID of FFS to be found @param[in] Type The FileType of FFS to be found @param[out] OutFfsBuffer The FFS buffer found, including FFS_FILE_HEADER @param[out] OutFfsBufferSize The size of FFS buffer found, including FFS_FILE_HEADER @retval TRUE The FFS buffer is found. @retval FALSE The FFS buffer is not found. **/ BOOLEAN GetFfsByName ( IN VOID *FdStart, IN UINTN FdSize, IN EFI_GUID *FileName, IN EFI_FV_FILETYPE Type, OUT VOID **OutFfsBuffer, OUT UINTN *OutFfsBufferSize ) { UINTN FvSize; EFI_FIRMWARE_VOLUME_HEADER *FvHeader; EFI_FIRMWARE_VOLUME_EXT_HEADER *FvExtHeader; EFI_FFS_FILE_HEADER *FfsHeader; UINT32 FfsSize; UINTN TestLength; BOOLEAN FvFound; DEBUG ((DEBUG_INFO, "GetFfsByName - FV: 0x%08x - 0x%08x\n", (UINTN)FdStart, (UINTN)FdSize)); FvFound = FALSE; FvHeader = (EFI_FIRMWARE_VOLUME_HEADER *)FdStart; while ((UINTN)FvHeader < (UINTN)FdStart + FdSize - 1) { FvSize = (UINTN)FdStart + FdSize - (UINTN)FvHeader; if (FvHeader->Signature != EFI_FVH_SIGNATURE) { FvHeader = (EFI_FIRMWARE_VOLUME_HEADER *)((UINTN)FvHeader + SIZE_4KB); continue; } DEBUG((DEBUG_ERROR, "checking FV....0x%08x - 0x%x\n", FvHeader, FvHeader->FvLength)); FvFound = TRUE; if (FvHeader->FvLength > FvSize) { DEBUG((DEBUG_ERROR, "GetFfsByName - FvSize: 0x%08x, MaxSize - 0x%08x\n", (UINTN)FvHeader->FvLength, (UINTN)FvSize)); return FALSE; } FvSize = (UINTN)FvHeader->FvLength; // // Find FFS // if (FvHeader->ExtHeaderOffset != 0) { FvExtHeader = (EFI_FIRMWARE_VOLUME_EXT_HEADER *)((UINT8 *)FvHeader + FvHeader->ExtHeaderOffset); FfsHeader = (EFI_FFS_FILE_HEADER *)((UINT8 *)FvExtHeader + FvExtHeader->ExtHeaderSize); } else { FfsHeader = (EFI_FFS_FILE_HEADER *)((UINT8 *)FvHeader + FvHeader->HeaderLength); } FfsHeader = (EFI_FFS_FILE_HEADER *)((UINTN)FvHeader + ALIGN_VALUE((UINTN)FfsHeader - (UINTN)FvHeader, 8)); while ((UINTN)FfsHeader < (UINTN)FvHeader + FvSize - 1) { DEBUG((DEBUG_INFO, "GetFfsByName - FFS: 0x%08x\n", FfsHeader)); TestLength = (UINTN)((UINTN)FvHeader + FvSize - (UINTN)FfsHeader); if (TestLength > sizeof(EFI_FFS_FILE_HEADER)) { TestLength = sizeof(EFI_FFS_FILE_HEADER); } if (IsBufferErased(1, FfsHeader, TestLength)) { break; } if (IS_FFS_FILE2(FfsHeader)) { FfsSize = FFS_FILE2_SIZE(FfsHeader); } else { FfsSize = FFS_FILE_SIZE(FfsHeader); } if (CompareGuid(FileName, &FfsHeader->Name) && ((Type == EFI_FV_FILETYPE_ALL) || (FfsHeader->Type == Type))) { // // Check section // *OutFfsBuffer = FfsHeader; *OutFfsBufferSize = FfsSize; return TRUE; } else { // // Any other type is not allowed // DEBUG((DEBUG_INFO, "GetFfsByName - other FFS type 0x%x, name %g\n", FfsHeader->Type, &FfsHeader->Name)); } // // Next File // FfsHeader = (EFI_FFS_FILE_HEADER *)((UINTN)FfsHeader + ALIGN_VALUE(FfsSize, 8)); } // // Next FV // FvHeader = (VOID *)(UINTN)((UINTN)FvHeader + FvHeader->FvLength); DEBUG((DEBUG_ERROR, "Next FV....0x%08x - 0x%x\n", FvHeader, FvHeader->FvLength)); } if (!FvFound) { DEBUG((DEBUG_ERROR, "GetFfsByName - NO FV Found\n")); } return FALSE; }
/** Provides the DMA controller-specific addresses needed to access system memory. Operation is relative to the DMA bus master. @param Operation Indicates if the bus master is going to read or write to system memory. @param HostAddress The system memory address to map to the DMA controller. @param NumberOfBytes On input the number of bytes to map. On output the number of bytes that were mapped. @param DeviceAddress The resulting map address for the bus master controller to use to access the hosts HostAddress. @param Mapping A resulting value to pass to Unmap(). @retval EFI_SUCCESS The range was mapped for the returned NumberOfBytes. @retval EFI_UNSUPPORTED The HostAddress cannot be mapped as a common buffer. @retval EFI_INVALID_PARAMETER One or more parameters are invalid. @retval EFI_OUT_OF_RESOURCES The request could not be completed due to a lack of resources. @retval EFI_DEVICE_ERROR The system hardware could not map the requested address. **/ EFI_STATUS EFIAPI DmaMap ( IN DMA_MAP_OPERATION Operation, IN VOID *HostAddress, IN OUT UINTN *NumberOfBytes, OUT PHYSICAL_ADDRESS *DeviceAddress, OUT VOID **Mapping ) { EFI_STATUS Status; MAP_INFO_INSTANCE *Map; VOID *Buffer; EFI_GCD_MEMORY_SPACE_DESCRIPTOR GcdDescriptor; if (HostAddress == NULL || NumberOfBytes == NULL || DeviceAddress == NULL || Mapping == NULL ) { return EFI_INVALID_PARAMETER; } if (Operation >= MapOperationMaximum) { return EFI_INVALID_PARAMETER; } *DeviceAddress = ConvertToPhysicalAddress (HostAddress); // Remember range so we can flush on the other side Map = AllocatePool (sizeof (MAP_INFO_INSTANCE)); if (Map == NULL) { return EFI_OUT_OF_RESOURCES; } *Mapping = Map; if ((((UINTN)HostAddress & (gCacheAlignment - 1)) != 0) || ((*NumberOfBytes % gCacheAlignment) != 0)) { // Get the cacheability of the region Status = gDS->GetMemorySpaceDescriptor (*DeviceAddress, &GcdDescriptor); if (EFI_ERROR(Status)) { return Status; } // If the mapped buffer is not an uncached buffer if ( (GcdDescriptor.Attributes != EFI_MEMORY_WC) && (GcdDescriptor.Attributes != EFI_MEMORY_UC) ) { // // If the buffer does not fill entire cache lines we must double buffer into // uncached memory. Device (PCI) address becomes uncached page. // Map->DoubleBuffer = TRUE; Status = DmaAllocateBuffer (EfiBootServicesData, EFI_SIZE_TO_PAGES (*NumberOfBytes), &Buffer); if (EFI_ERROR (Status)) { return Status; } if ((Operation == MapOperationBusMasterRead) || (Operation == MapOperationBusMasterCommonBuffer)) { CopyMem (Buffer, HostAddress, *NumberOfBytes); } *DeviceAddress = (PHYSICAL_ADDRESS)(UINTN)Buffer; } else { Map->DoubleBuffer = FALSE; } } else { Map->DoubleBuffer = FALSE; // Flush the Data Cache (should not have any effect if the memory region is uncached) gCpu->FlushDataCache (gCpu, *DeviceAddress, *NumberOfBytes, EfiCpuFlushTypeWriteBackInvalidate); if ((Operation == MapOperationBusMasterRead) || (Operation == MapOperationBusMasterCommonBuffer)) { // In case the buffer is used for instance to send command to a PCI controller, we must ensure the memory is uncached Status = gDS->SetMemorySpaceAttributes (*DeviceAddress & ~(BASE_4KB - 1), ALIGN_VALUE (*NumberOfBytes, BASE_4KB), EFI_MEMORY_WC); ASSERT_EFI_ERROR (Status); } } Map->HostAddress = (UINTN)HostAddress; Map->DeviceAddress = *DeviceAddress; Map->NumberOfBytes = *NumberOfBytes; Map->Operation = Operation; return EFI_SUCCESS; }
/** Configure a virtio ring. This function sets up internal storage (the guest-host communication area) and lays out several "navigation" (ie. no-ownership) pointers to parts of that storage. Relevant sections from the virtio-0.9.5 spec: - 1.1 Virtqueues, - 2.3 Virtqueue Configuration. @param[in] The number of descriptors to allocate for the virtio ring, as requested by the host. @param[out] Ring The virtio ring to set up. @retval EFI_OUT_OF_RESOURCES AllocatePages() failed to allocate contiguous pages for the requested QueueSize. Fields of Ring have indeterminate value. @retval EFI_SUCCESS Allocation and setup successful. Ring->Base (and nothing else) is responsible for deallocation. **/ EFI_STATUS EFIAPI VirtioRingInit ( IN UINT16 QueueSize, OUT VRING *Ring ) { UINTN RingSize; volatile UINT8 *RingPagesPtr; RingSize = ALIGN_VALUE ( sizeof *Ring->Desc * QueueSize + sizeof *Ring->Avail.Flags + sizeof *Ring->Avail.Idx + sizeof *Ring->Avail.Ring * QueueSize + sizeof *Ring->Avail.UsedEvent, EFI_PAGE_SIZE); RingSize += ALIGN_VALUE ( sizeof *Ring->Used.Flags + sizeof *Ring->Used.Idx + sizeof *Ring->Used.UsedElem * QueueSize + sizeof *Ring->Used.AvailEvent, EFI_PAGE_SIZE); Ring->NumPages = EFI_SIZE_TO_PAGES (RingSize); Ring->Base = AllocatePages (Ring->NumPages); if (Ring->Base == NULL) { return EFI_OUT_OF_RESOURCES; } SetMem (Ring->Base, RingSize, 0x00); RingPagesPtr = Ring->Base; Ring->Desc = (volatile VOID *) RingPagesPtr; RingPagesPtr += sizeof *Ring->Desc * QueueSize; Ring->Avail.Flags = (volatile VOID *) RingPagesPtr; RingPagesPtr += sizeof *Ring->Avail.Flags; Ring->Avail.Idx = (volatile VOID *) RingPagesPtr; RingPagesPtr += sizeof *Ring->Avail.Idx; Ring->Avail.Ring = (volatile VOID *) RingPagesPtr; RingPagesPtr += sizeof *Ring->Avail.Ring * QueueSize; Ring->Avail.UsedEvent = (volatile VOID *) RingPagesPtr; RingPagesPtr += sizeof *Ring->Avail.UsedEvent; RingPagesPtr = (volatile UINT8 *) Ring->Base + ALIGN_VALUE (RingPagesPtr - (volatile UINT8 *) Ring->Base, EFI_PAGE_SIZE); Ring->Used.Flags = (volatile VOID *) RingPagesPtr; RingPagesPtr += sizeof *Ring->Used.Flags; Ring->Used.Idx = (volatile VOID *) RingPagesPtr; RingPagesPtr += sizeof *Ring->Used.Idx; Ring->Used.UsedElem = (volatile VOID *) RingPagesPtr; RingPagesPtr += sizeof *Ring->Used.UsedElem * QueueSize; Ring->Used.AvailEvent = (volatile VOID *) RingPagesPtr; RingPagesPtr += sizeof *Ring->Used.AvailEvent; Ring->QueueSize = QueueSize; return EFI_SUCCESS; }
/** Allocates one or more 4KB pages of a certain memory type at a specified alignment. Allocates the number of 4KB pages specified by Pages of a certain memory type with an alignment specified by Alignment. The allocated buffer is returned. If Pages is 0, then NULL is returned. If there is not enough memory at the specified alignment remaining to satisfy the request, then NULL is returned. If Alignment is not a power of two and Alignment is not zero, then ASSERT(). If Pages plus EFI_SIZE_TO_PAGES (Alignment) overflows, then ASSERT(). @param MemoryType The type of memory to allocate. @param Pages The number of 4 KB pages to allocate. @param Alignment The requested alignment of the allocation. Must be a power of two. If Alignment is zero, then byte alignment is used. @return A pointer to the allocated buffer or NULL if allocation fails. **/ VOID * InternalAllocateAlignedPages ( IN EFI_MEMORY_TYPE MemoryType, IN UINTN Pages, IN UINTN Alignment ) { EFI_PHYSICAL_ADDRESS Memory; EFI_PHYSICAL_ADDRESS AlignedMemory; EFI_PEI_HOB_POINTERS Hob; BOOLEAN SkipBeforeMemHob; BOOLEAN SkipAfterMemHob; EFI_PHYSICAL_ADDRESS HobBaseAddress; UINT64 HobLength; EFI_MEMORY_TYPE HobMemoryType; UINTN TotalPages; // // Alignment must be a power of two or zero. // ASSERT ((Alignment & (Alignment - 1)) == 0); if (Pages == 0) { return NULL; } // // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow. // ASSERT (Pages <= (MAX_ADDRESS - EFI_SIZE_TO_PAGES (Alignment))); // // We would rather waste some memory to save PEI code size. // meaning in addition to the requested size for the aligned mem, // we simply reserve an overhead memory equal to Alignmemt(page-aligned), no matter what. // The overhead mem size could be reduced later with more involved malloc mechanisms // (e.g., somthing that can detect the alignment boundary before allocating memory or // can request that memory be allocated at a certain address that is aleady aligned). // TotalPages = Pages + (Alignment <= EFI_PAGE_SIZE ? 0 : EFI_SIZE_TO_PAGES(Alignment)); Memory = (EFI_PHYSICAL_ADDRESS) (UINTN) InternalAllocatePages (MemoryType, TotalPages); if (Memory == 0) { DEBUG((DEBUG_INFO, "Out of memory resource! \n")); return NULL; } DEBUG ((DEBUG_INFO, "Allocated Memory unaligned: Address = 0x%LX, Pages = 0x%X, Type = %d \n", Memory, TotalPages, (UINTN) MemoryType)); // // Alignment calculation // AlignedMemory = Memory; if (Alignment > EFI_PAGE_SIZE) { AlignedMemory = ALIGN_VALUE (Memory, Alignment); } DEBUG ((DEBUG_INFO, "After aligning to 0x%X bytes: Address = 0x%LX, Pages = 0x%X \n", Alignment, AlignedMemory, Pages)); // // In general three HOBs cover the total allocated space. // The aligned portion is covered by the aligned mem HOB and // the unaligned(to be freed) portions before and after the aligned portion are covered by newly created HOBs. // // Before mem HOB covers the region between "Memory" and "AlignedMemory" // Aligned mem HOB covers the region between "AlignedMemory" and "AlignedMemory + EFI_PAGES_TO_SIZE(Pages)" // After mem HOB covers the region between "AlignedMemory + EFI_PAGES_TO_SIZE(Pages)" and "Memory + EFI_PAGES_TO_SIZE(TotalPages)" // // The before or after mem HOBs need to be skipped under special cases where the aligned portion // touches either the top or bottom of the original allocated space. // SkipBeforeMemHob = FALSE; SkipAfterMemHob = FALSE; if (Memory == AlignedMemory) { SkipBeforeMemHob = TRUE; } if ((Memory + EFI_PAGES_TO_SIZE(TotalPages)) == (AlignedMemory + EFI_PAGES_TO_SIZE(Pages))) { // // This condition is never met in the current implementation. // There is always some after-mem since the overhead mem(used in TotalPages) // is no less than Alignment. // SkipAfterMemHob = TRUE; } // // Search for the mem HOB referring to the original(unaligned) allocation // and update the size and type if needed. // Hob.Raw = GetFirstHob (EFI_HOB_TYPE_MEMORY_ALLOCATION); while (Hob.Raw != NULL) { if (Hob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress == Memory) { break; } Hob.Raw = GET_NEXT_HOB (Hob); Hob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION, Hob.Raw); } ASSERT (Hob.Raw != NULL); if (SkipBeforeMemHob) { // // Use this HOB as aligned mem HOB as there is no portion before it. // HobLength = EFI_PAGES_TO_SIZE(Pages); Hob.MemoryAllocation->AllocDescriptor.MemoryLength = HobLength; } else { // // Use this HOB as before mem HOB and create a new HOB for the aligned portion // HobLength = (AlignedMemory - Memory); Hob.MemoryAllocation->AllocDescriptor.MemoryLength = HobLength; Hob.MemoryAllocation->AllocDescriptor.MemoryType = EfiConventionalMemory; } HobBaseAddress = Hob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress; HobMemoryType = Hob.MemoryAllocation->AllocDescriptor.MemoryType; // // Build the aligned mem HOB if needed // if (!SkipBeforeMemHob) { DEBUG((DEBUG_INFO, "Updated before-mem HOB with BaseAddress = %LX, Length = %LX, MemoryType = %d \n", HobBaseAddress, HobLength, (UINTN) HobMemoryType)); HobBaseAddress = AlignedMemory; HobLength = EFI_PAGES_TO_SIZE(Pages); HobMemoryType = MemoryType; BuildMemoryAllocationHob ( HobBaseAddress, HobLength, HobMemoryType ); DEBUG((DEBUG_INFO, "Created aligned-mem HOB with BaseAddress = %LX, Length = %LX, MemoryType = %d \n", HobBaseAddress, HobLength, (UINTN) HobMemoryType)); } else { if (HobBaseAddress != 0) { DEBUG((DEBUG_INFO, "Updated aligned-mem HOB with BaseAddress = %LX, Length = %LX, MemoryType = %d \n", HobBaseAddress, HobLength, (UINTN) HobMemoryType)); } } // // Build the after mem HOB if needed // if (!SkipAfterMemHob) { HobBaseAddress = AlignedMemory + EFI_PAGES_TO_SIZE(Pages); HobLength = (Memory + EFI_PAGES_TO_SIZE(TotalPages)) - (AlignedMemory + EFI_PAGES_TO_SIZE(Pages)); HobMemoryType = EfiConventionalMemory; BuildMemoryAllocationHob ( HobBaseAddress, HobLength, HobMemoryType ); DEBUG((DEBUG_INFO, "Created after-mem HOB with BaseAddress = %LX, Length = %LX, MemoryType = %d \n", HobBaseAddress, HobLength, (UINTN) HobMemoryType)); } return (VOID *) (UINTN) AlignedMemory; }
ERet FxLibrary::Load( AStreamReader& stream ) { FxHeader header; mxDO(stream.Get(header)); if( header.fourCC == FX_Library_MAGIC ) { mxDO(Serialization::LoadBinary(stream, FxLibrary::MetaClass(), this)); } // Initialization order: // 1) Render targets // 2) State objects // 3) Shaders // 4) Input layouts // 5) Everything else // render targets should be sorted by size // shader resources should be sorted by size // constant buffers should be sorted by size // Ideally, render states should be sorted by state deltas (so that changes between adjacent states are minimized) //CreateRenderTargets(resolution, m_colorTargets, m_depthTargets, false); for( UINT32 iSamplerState = 0; iSamplerState < m_samplerStates.Num(); iSamplerState++ ) { FxSamplerState& samplerState = m_samplerStates[ iSamplerState ]; samplerState.handle = llgl::CreateSamplerState( samplerState ); } for( UINT32 iDepthStencilState = 0; iDepthStencilState < m_depthStencilStates.Num(); iDepthStencilState++ ) { FxDepthStencilState& depthStencilState = m_depthStencilStates[ iDepthStencilState ]; depthStencilState.handle = llgl::CreateDepthStencilState( depthStencilState ); } for( UINT32 iRasterizerState = 0; iRasterizerState < m_rasterizerStates.Num(); iRasterizerState++ ) { FxRasterizerState& rasterizerState = m_rasterizerStates[ iRasterizerState ]; rasterizerState.handle = llgl::CreateRasterizerState( rasterizerState ); } for( UINT32 iBlendState = 0; iBlendState < m_blendStates.Num(); iBlendState++ ) { FxBlendState& blendState = m_blendStates[ iBlendState ]; blendState.handle = llgl::CreateBlendState( blendState ); } for( UINT32 iStateBlock = 0; iStateBlock < m_stateBlocks.Num(); iStateBlock++ ) { FxStateBlock& stateBlock = m_stateBlocks[ iStateBlock ]; stateBlock.blendState = m_blendStates[ stateBlock.blendState.id ].handle; stateBlock.rasterizerState = m_rasterizerStates[ stateBlock.rasterizerState.id ].handle; stateBlock.depthStencilState= m_depthStencilStates[ stateBlock.depthStencilState.id ].handle; } // create constant buffers and shader resources for( UINT32 iCB = 0; iCB < m_globalCBuffers.Num(); iCB++ ) { FxCBuffer& rCB = m_globalCBuffers[ iCB ]; rCB.handle = llgl::CreateBuffer( Buffer_Uniform, rCB.size, NULL ); } //for( UINT32 iSR = 0; iSR < shaderResources.Num(); iSR++ ) //{ // //XShaderResource& rSR = shaderResources[ iSR ]; // UNDONE; //} // Load shaders and create shader programs. for( UINT32 shaderType = 0; shaderType < ShaderTypeCount; shaderType++ ) { for( UINT32 shaderIndex = 0; shaderIndex < header.numShaders[shaderType]; shaderIndex++ ) { UINT32 codeSize; stream >> codeSize; ScopedStackAlloc codeAlloc( gCore.frameAlloc ); void* codeBuffer = codeAlloc.Alloc( codeSize ); mxDO(stream.Read( codeBuffer, codeSize )); const UINT32 alignedOffset = ALIGN_VALUE(codeSize, 4); const UINT32 sizeOfPadding = alignedOffset - codeSize; Skip_N_bytes( stream, sizeOfPadding ); const HShader shaderHandle = llgl::CreateShader( (EShaderType)shaderType, codeBuffer, codeSize ); m_shaders[shaderType][shaderIndex] = shaderHandle; //DBGOUT("Created '%s' (handle: %u, %u bytes)\n", // EShaderTypeToChars((EShaderType)shaderType), shaderHandle.id, codeSize); } } // Create programs. for( UINT32 iProgram = 0; iProgram < m_programs.Num(); iProgram++ ) { FxProgram& program = m_programs[ iProgram ]; ProgramDescription pd; for( UINT32 shaderType = 0; shaderType < ShaderTypeCount; shaderType++ ) { if( program.shaders[shaderType] != UINT16(~0) ) { pd.shaders[shaderType] = m_shaders[shaderType][program.shaders[shaderType]]; } } if(LLGL_Driver_Is_OpenGL) { pd.bindings = &m_bindings[ program.bindings ]; } program.handle = llgl::CreateProgram( pd ); } // Fixup shader techniques. for( UINT32 iTechnique = 0; iTechnique < m_techniques.Num(); iTechnique++ ) { FxShader& technique = m_techniques[ iTechnique ]; DBGOUT("Loading technique '%s'...\n", technique.name.ToPtr()); // Create local shader resources. { // Create local constant buffers. for( UINT32 iCB = 0; iCB < technique.locals.Num(); iCB++ ) { FxCBuffer& rCB = technique.locals[ iCB ]; rCB.handle = llgl::CreateBuffer( Buffer_Uniform, rCB.size, NULL ); } } // Fixup pointers to all shader inputs used by the technique. //technique.Link(*this); { for( UINT32 bufferIndex = 0; bufferIndex < technique.inputs.cbuffers.Num(); bufferIndex++ ) { FxCBufferRef& bufferReference = technique.inputs.cbuffers[ bufferIndex ]; UINT16 relativeIndex = bufferReference.handle.id; if( GetUpperBit( relativeIndex ) ) { ClearUpperBit( relativeIndex ); bufferReference.handle = technique.locals[ relativeIndex ].handle; mxASSERT(bufferReference.handle.IsValid()); } else { UNDONE; } } for( UINT32 samplerIndex = 0; samplerIndex < technique.inputs.samplers.Num(); samplerIndex++ ) { } } for( UINT32 iPass = 0; iPass < technique.passes.Num(); iPass++ ) { FxPass& pass = technique.passes[ iPass ]; for( UINT32 iProgram = 0; iProgram < pass.programs.Num(); iProgram++ ) { mxASSERT( pass.programs[iProgram].id != UINT16(~0) ); pass.programs[iProgram] = m_programs[ pass.programs[iProgram].id ].handle; } } } //DBGOUT("Loading %u shaders: %u VS, %u GS, %u PS\n", // totalShaderCount, // header.numShaders[ShaderVertex], // header.numShaders[ShaderGeometry], // header.numShaders[ShaderFragment]); return ALL_OK; }
/** Main entry point to last PEIM. This function finds DXE Core in the firmware volume and transfer the control to DXE core. @param This Entry point for DXE IPL PPI. @param PeiServices General purpose services available to every PEIM. @param HobList Address to the Pei HOB list. @return EFI_SUCCESS DXE core was successfully loaded. @return EFI_OUT_OF_RESOURCES There are not enough resources to load DXE core. **/ EFI_STATUS EFIAPI DxeLoadCore ( IN CONST EFI_DXE_IPL_PPI *This, IN EFI_PEI_SERVICES **PeiServices, IN EFI_PEI_HOB_POINTERS HobList ) { EFI_STATUS Status; EFI_FV_FILE_INFO DxeCoreFileInfo; EFI_PHYSICAL_ADDRESS DxeCoreAddress; UINT64 DxeCoreSize; EFI_PHYSICAL_ADDRESS DxeCoreEntryPoint; EFI_BOOT_MODE BootMode; EFI_PEI_FILE_HANDLE FileHandle; EFI_PEI_READ_ONLY_VARIABLE2_PPI *Variable; EFI_PEI_LOAD_FILE_PPI *LoadFile; UINTN Instance; UINT32 AuthenticationState; UINTN DataSize; EFI_PEI_S3_RESUME2_PPI *S3Resume; EFI_PEI_RECOVERY_MODULE_PPI *PeiRecovery; EFI_MEMORY_TYPE_INFORMATION MemoryData[EfiMaxMemoryType + 1]; // // if in S3 Resume, restore configure // BootMode = GetBootModeHob (); if (BootMode == BOOT_ON_S3_RESUME) { Status = PeiServicesLocatePpi ( &gEfiPeiS3Resume2PpiGuid, 0, NULL, (VOID **) &S3Resume ); if (EFI_ERROR (Status)) { // // Report Status code that S3Resume PPI can not be found // REPORT_STATUS_CODE ( EFI_ERROR_CODE | EFI_ERROR_MAJOR, (EFI_SOFTWARE_PEI_MODULE | EFI_SW_PEI_EC_S3_RESUME_PPI_NOT_FOUND) ); } ASSERT_EFI_ERROR (Status); Status = S3Resume->S3RestoreConfig2 (S3Resume); ASSERT_EFI_ERROR (Status); } else if (BootMode == BOOT_IN_RECOVERY_MODE) { REPORT_STATUS_CODE (EFI_PROGRESS_CODE, (EFI_SOFTWARE_PEI_MODULE | EFI_SW_PEI_PC_RECOVERY_BEGIN)); Status = PeiServicesLocatePpi ( &gEfiPeiRecoveryModulePpiGuid, 0, NULL, (VOID **) &PeiRecovery ); if (EFI_ERROR (Status)) { DEBUG ((DEBUG_ERROR, "Locate Recovery PPI Failed.(Status = %r)\n", Status)); // // Report Status code the failure of locating Recovery PPI // REPORT_STATUS_CODE ( EFI_ERROR_CODE | EFI_ERROR_MAJOR, (EFI_SOFTWARE_PEI_MODULE | EFI_SW_PEI_EC_RECOVERY_PPI_NOT_FOUND) ); CpuDeadLoop (); } REPORT_STATUS_CODE (EFI_PROGRESS_CODE, (EFI_SOFTWARE_PEI_MODULE | EFI_SW_PEI_PC_CAPSULE_LOAD)); Status = PeiRecovery->LoadRecoveryCapsule (PeiServices, PeiRecovery); if (EFI_ERROR (Status)) { DEBUG ((DEBUG_ERROR, "Load Recovery Capsule Failed.(Status = %r)\n", Status)); // // Report Status code that recovery image can not be found // REPORT_STATUS_CODE ( EFI_ERROR_CODE | EFI_ERROR_MAJOR, (EFI_SOFTWARE_PEI_MODULE | EFI_SW_PEI_EC_NO_RECOVERY_CAPSULE) ); CpuDeadLoop (); } REPORT_STATUS_CODE (EFI_PROGRESS_CODE, (EFI_SOFTWARE_PEI_MODULE | EFI_SW_PEI_PC_CAPSULE_START)); // // Now should have a HOB with the DXE core // } if (GetFirstGuidHob ((CONST EFI_GUID *)&gEfiMemoryTypeInformationGuid) == NULL) { // // Don't build GuidHob if GuidHob has been installed. // Status = PeiServicesLocatePpi ( &gEfiPeiReadOnlyVariable2PpiGuid, 0, NULL, (VOID **)&Variable ); if (!EFI_ERROR (Status)) { DataSize = sizeof (MemoryData); Status = Variable->GetVariable ( Variable, EFI_MEMORY_TYPE_INFORMATION_VARIABLE_NAME, &gEfiMemoryTypeInformationGuid, NULL, &DataSize, &MemoryData ); if (!EFI_ERROR (Status) && ValidateMemoryTypeInfoVariable(MemoryData, DataSize)) { // // Build the GUID'd HOB for DXE // BuildGuidDataHob ( &gEfiMemoryTypeInformationGuid, MemoryData, DataSize ); } } } // // Look in all the FVs present in PEI and find the DXE Core FileHandle // FileHandle = DxeIplFindDxeCore (); // // Load the DXE Core from a Firmware Volume. // Instance = 0; do { Status = PeiServicesLocatePpi (&gEfiPeiLoadFilePpiGuid, Instance++, NULL, (VOID **) &LoadFile); // // These must exist an instance of EFI_PEI_LOAD_FILE_PPI to support to load DxeCore file handle successfully. // ASSERT_EFI_ERROR (Status); Status = LoadFile->LoadFile ( LoadFile, FileHandle, &DxeCoreAddress, &DxeCoreSize, &DxeCoreEntryPoint, &AuthenticationState ); } while (EFI_ERROR (Status)); // // Get the DxeCore File Info from the FileHandle for the DxeCore GUID file name. // Status = PeiServicesFfsGetFileInfo (FileHandle, &DxeCoreFileInfo); ASSERT_EFI_ERROR (Status); // // Add HOB for the DXE Core // BuildModuleHob ( &DxeCoreFileInfo.FileName, DxeCoreAddress, ALIGN_VALUE (DxeCoreSize, EFI_PAGE_SIZE), DxeCoreEntryPoint ); // // Report Status Code EFI_SW_PEI_PC_HANDOFF_TO_NEXT // REPORT_STATUS_CODE (EFI_PROGRESS_CODE, (EFI_SOFTWARE_PEI_CORE | EFI_SW_PEI_CORE_PC_HANDOFF_TO_NEXT)); DEBUG ((DEBUG_INFO | DEBUG_LOAD, "Loading DXE CORE at 0x%11p EntryPoint=0x%11p\n", (VOID *)(UINTN)DxeCoreAddress, FUNCTION_ENTRY_POINT (DxeCoreEntryPoint))); // // Transfer control to the DXE Core // The hand off state is simply a pointer to the HOB list // HandOffToDxeCore (DxeCoreEntryPoint, HobList); // // If we get here, then the DXE Core returned. This is an error // DxeCore should not return. // ASSERT (FALSE); CpuDeadLoop (); return EFI_OUT_OF_RESOURCES; }
/** Initialize DMA protection. @param VTdInfo The VTd engine context information. @retval EFI_SUCCESS the DMA protection is initialized. @retval EFI_OUT_OF_RESOURCES no enough resource to initialize DMA protection. **/ EFI_STATUS InitDmaProtection ( IN VTD_INFO *VTdInfo ) { EFI_STATUS Status; UINT32 LowMemoryAlignment; UINT64 HighMemoryAlignment; UINTN MemoryAlignment; UINTN LowBottom; UINTN LowTop; UINTN HighBottom; UINT64 HighTop; DMA_BUFFER_INFO *DmaBufferInfo; VOID *Hob; EFI_PEI_PPI_DESCRIPTOR *OldDescriptor; EDKII_IOMMU_PPI *OldIoMmuPpi; Hob = GetFirstGuidHob (&mDmaBufferInfoGuid); DmaBufferInfo = GET_GUID_HOB_DATA(Hob); DEBUG ((DEBUG_INFO, " DmaBufferSize : 0x%x\n", DmaBufferInfo->DmaBufferSize)); LowMemoryAlignment = GetLowMemoryAlignment (VTdInfo, VTdInfo->EngineMask); HighMemoryAlignment = GetHighMemoryAlignment (VTdInfo, VTdInfo->EngineMask); if (LowMemoryAlignment < HighMemoryAlignment) { MemoryAlignment = (UINTN)HighMemoryAlignment; } else { MemoryAlignment = LowMemoryAlignment; } ASSERT (DmaBufferInfo->DmaBufferSize == ALIGN_VALUE(DmaBufferInfo->DmaBufferSize, MemoryAlignment)); DmaBufferInfo->DmaBufferBase = (UINTN)AllocateAlignedPages (EFI_SIZE_TO_PAGES(DmaBufferInfo->DmaBufferSize), MemoryAlignment); ASSERT (DmaBufferInfo->DmaBufferBase != 0); if (DmaBufferInfo->DmaBufferBase == 0) { DEBUG ((DEBUG_INFO, " InitDmaProtection : OutOfResource\n")); return EFI_OUT_OF_RESOURCES; } DEBUG ((DEBUG_INFO, " DmaBufferBase : 0x%x\n", DmaBufferInfo->DmaBufferBase)); DmaBufferInfo->DmaBufferCurrentTop = DmaBufferInfo->DmaBufferBase + DmaBufferInfo->DmaBufferSize; DmaBufferInfo->DmaBufferCurrentBottom = DmaBufferInfo->DmaBufferBase; // // (Re)Install PPI. // Status = PeiServicesLocatePpi ( &gEdkiiIoMmuPpiGuid, 0, &OldDescriptor, (VOID **) &OldIoMmuPpi ); if (!EFI_ERROR (Status)) { Status = PeiServicesReInstallPpi (OldDescriptor, &mIoMmuPpiList); } else { Status = PeiServicesInstallPpi (&mIoMmuPpiList); } ASSERT_EFI_ERROR (Status); LowBottom = 0; LowTop = DmaBufferInfo->DmaBufferBase; HighBottom = DmaBufferInfo->DmaBufferBase + DmaBufferInfo->DmaBufferSize; HighTop = LShiftU64 (1, VTdInfo->HostAddressWidth + 1); Status = SetDmaProtectedRange ( VTdInfo, VTdInfo->EngineMask, (UINT32)LowBottom, (UINT32)(LowTop - LowBottom), HighBottom, HighTop - HighBottom ); if (EFI_ERROR(Status)) { FreePages ((VOID *)DmaBufferInfo->DmaBufferBase, EFI_SIZE_TO_PAGES(DmaBufferInfo->DmaBufferSize)); } return Status; }