TArray<QuestTask*> QuestTask::getSubTasks() { TArray<QuestTask*> result; result.Init(0); return result; }
void FHDRLoadHelper::ExtractDDSInRGBE(TArray<uint8>& OutDDSFile) const { // header, one uint32 per texel, no mips OutDDSFile.Init(4 + sizeof(FDDSFileHeader) + GetWidth() * GetHeight() * sizeof(uint32)); // create the dds header { uint32* DDSMagicNumber = (uint32*)OutDDSFile.GetData(); // to identify DDS file format *DDSMagicNumber = 0x20534444; FDDSFileHeader* header = (FDDSFileHeader*)(OutDDSFile.GetData() + 4); FMemory::MemZero(*header); header->dwSize = sizeof(FDDSFileHeader); header->dwFlags = DDSF_Caps | DDSF_Height | DDSF_Width | DDSF_PixelFormat; header->dwWidth = GetWidth(); header->dwHeight = GetHeight(); header->dwCaps2 = 0; header->dwMipMapCount = 1; header->ddpf.dwSize = sizeof(FDDSPixelFormatHeader); header->ddpf.dwFlags = DDSPF_RGB; header->ddpf.dwRGBBitCount = 32; header->ddpf.dwRBitMask = 0x00ff0000; header->ddpf.dwGBitMask = 0x0000ff00; header->ddpf.dwBBitMask = 0x000000ff; } uint32 *DDSData = (uint32*)(OutDDSFile.GetData() + 4 + sizeof(FDDSFileHeader)); // Get the raw input data as 2d image DecompressWholeImage(DDSData); }
bool FPipeHandle::ReadToArray(TArray<uint8> & Output) { int BytesAvailable = 0; if (ioctl(PipeDesc, FIONREAD, &BytesAvailable) == 0) { if (BytesAvailable > 0) { Output.Init(BytesAvailable); int BytesRead = read(PipeDesc, Output.GetData(), BytesAvailable); if (BytesRead > 0) { if (BytesRead < BytesAvailable) { Output.SetNum(BytesRead); } return true; } else { Output.Empty(); } } } return false; }
bool FEnvQueryInstance::PrepareContext(UClass* Context, TArray<FVector>& Data) { if (Context == NULL) { return false; } FEnvQueryContextData ContextData; const bool bSuccess = PrepareContext(Context, ContextData); if (bSuccess && ContextData.ValueType && ContextData.ValueType->IsChildOf(UEnvQueryItemType_LocationBase::StaticClass())) { UEnvQueryItemType_LocationBase* DefTypeOb = (UEnvQueryItemType_LocationBase*)ContextData.ValueType->GetDefaultObject(); const uint16 DefTypeValueSize = DefTypeOb->GetValueSize(); uint8* RawData = (uint8*)ContextData.RawData.GetTypedData(); Data.Init(ContextData.NumValues); for (int32 i = 0; i < ContextData.NumValues; i++) { Data[i] = DefTypeOb->GetLocation(RawData); RawData += DefTypeValueSize; } } return bSuccess; }
void ABalloonGrid::Float() { SP_Grid NullGrid; TArray<SP_Grid> Highest; Highest.Init(NullGrid, GridSizeX); for (auto G : SearchResult) { if (Highest[G->X] == NullGrid || Highest[G->X]->Y > G->Y) { Highest[G->X] = G; } } ABalloon* SignalBalloon = NULL; int32 OffsetMax = 0; for (auto G : Highest) { int32 FloatOffset = 1; if (G == NullGrid) continue; //UE_LOG(YLog, Warning, TEXT("Float, X:%d,Y:%d"), G->X, G->Y); for (int32 y = G->Y+1; y < GridSizeY; ++y) { SP_Grid GTemp = Grids[y*GridSizeX + G->X]; if (GTemp->Object != NULL) { int32 TargetGridY = GTemp->Y - FloatOffset; if (FloatOffset > OffsetMax) { OffsetMax = FloatOffset; SignalBalloon = GTemp->Object; } GTemp->Object->SetTargetGrid(G->X, TargetGridY); Grids[TargetGridY*GridSizeX + G->X]->Object = GTemp->Object; GTemp->Object = NULL; } else { ++FloatOffset; } } UE_LOG(YLog, Warning, TEXT("FloatOffset:%d"), FloatOffset); if (FloatOffset == GridSizeY) { --MaxBalloonX; UE_LOG(YLog, Warning, TEXT("MaxBalloonX:%d"), MaxBalloonX); } } Join(); if (SignalBalloon) { UE_LOG(YLog, Warning, TEXT("SignalBalloon MaxBalloonX:%d"), MaxBalloonX); SignalBalloon->bIsSignal = true; SignalBalloon->NewBalloonX = MaxBalloonX; } else { SignalBalloon->gBalloonX = MaxBalloonX; } SearchResult.Empty(); }
void UGridMesher::rebuildBaseMeshFromGrid() { ClearAllMeshSections(); numMeshes = 0; if (myGrid != nullptr) { TArray<float> vertexRadii; vertexRadii.Init(baseMeshRadius, myGrid->numNodes); TArray<FColor> vertexColors; vertexColors.Init(FColor::Blue, myGrid->numNodes); if (renderBaseMesh) { buildNewMesh(vertexRadii, vertexColors,TArray<FVector>(), baseMeshMaterial); } } }
void UVaQuoleUIComponent::UpdateUITexture() { // Ignore texture update if (!bEnabled || WebUI == NULL) { return; } // Don't update when WebView resizes or changes texture format if (WebUI->IsPendingVisualEvents()) { return; } if (Texture && Texture->Resource) { // Check that texture is prepared auto rhiRef = static_cast<FTexture2DResource*>(Texture->Resource)->GetTexture2DRHI(); if (!rhiRef) return; // Load data from view const UCHAR* my_data = WebUI->GrabView(); const size_t size = Width * Height * sizeof(uint32); // @TODO This is a bit heavy to keep reallocating/deallocating, but not a big deal. Maybe we can ping pong between buffers instead. TArray<uint32> ViewBuffer; ViewBuffer.Init(Width * Height); FMemory::Memcpy(ViewBuffer.GetData(), my_data, size); // This will be passed off to the render thread, which will delete it when it has finished with it FVaQuoleTextureDataPtr DataPtr = MakeShareable(new FVaQuoleTextureData); DataPtr->SetRawData(Width, Height, sizeof(uint32), ViewBuffer); // Cleanup ViewBuffer.Empty(); my_data = 0; ENQUEUE_UNIQUE_RENDER_COMMAND_THREEPARAMETER( UpdateVaQuoleTexture, FVaQuoleTextureDataPtr, ImageData, DataPtr, FTexture2DRHIRef, TargetTexture, rhiRef, const size_t, DataSize, size, { uint32 stride = 0; void* MipData = GDynamicRHI->RHILockTexture2D(TargetTexture, 0, RLM_WriteOnly, stride, false); if (MipData) { FMemory::Memcpy(MipData, ImageData->GetRawBytesPtr(), ImageData->GetDataSize()); GDynamicRHI->RHIUnlockTexture2D(TargetTexture, 0, false); } ImageData.Reset(); });
int8 APlayerConnectionHandler::FirstFreeID() { TArray<bool> Available; Available.Init(true, Clients.Num()); for (auto Client : Clients){ if(Client.Status != EPlayerStatus::Disconnected) Available[Client.ID] = false; } for (int8 Result = 0; Result < Clients.Num(); Result++){ if (Available[Result]) return Result; } return -1; }
void AMagazineItem::Reload(APlayerCharacter* player) { TArray<AAmmoItem*> toReadd; toReadd.Init(NULL, 0); for (unsigned int i = 0; i < Ammunition.Num(); i++) { if (!Ammunition[i]) { // Stop if the magazine is filled if (GetStoredAmmo() >= Ammunition.Num()) break; ItemSlot slot = player->Inventory->FindItemByType(AAmmoItem::StaticClass()); if (slot.IsValid()) { AAmmoItem* item = Cast<AAmmoItem>(slot.Get()); if(item) { if (item->Calibre == Calibre) { slot.Delete(); Ammunition.Add(item); Ammunition.Swap(i, Ammunition.Num() - 1); Ammunition.RemoveAt(Ammunition.Num() - 1); item->SetState(None); item->AttachRootComponentTo(StaticMesh); item->SetActorRelativeLocation(FVector::ZeroVector); item->SetActorRelativeRotation(FRotator::ZeroRotator); } else { toReadd.Add(item); slot.Delete(); } } else break; } else break; } } for (auto& item: toReadd) { if (item) { if (player->Inventory->EquipItem(item)) { // Nothing changed! (possibly state, but that is dealt for us) // Stop pickup text player->Inventory->RecentlyEquiped = NULL; } else { item->SetState(ItemState::World); item->Unwear(player); item->DetachRootComponentFromParent(); Drop(player); } } } }
UNREALED_API bool BuildDestructibleMeshFromFractureSettings(UDestructibleMesh& DestructibleMesh, FSkeletalMeshImportData* OutData) { bool Success = false; #if WITH_APEX physx::NxDestructibleAsset* NewApexDestructibleAsset = NULL; #if WITH_EDITORONLY_DATA if (DestructibleMesh.FractureSettings != NULL) { TArray<UMaterialInterface*> OverrideMaterials; OverrideMaterials.Init(DestructibleMesh.Materials.Num()); //save old materials for (int32 MaterialIndex = 0; MaterialIndex < DestructibleMesh.Materials.Num(); ++MaterialIndex) { OverrideMaterials[MaterialIndex] = DestructibleMesh.Materials[MaterialIndex].MaterialInterface; } DestructibleMesh.Materials.Init(DestructibleMesh.FractureSettings->Materials.Num()); for (int32 MaterialIndex = 0; MaterialIndex < DestructibleMesh.Materials.Num(); ++MaterialIndex) { if (MaterialIndex < OverrideMaterials.Num()) //if user has overriden materials use it { DestructibleMesh.Materials[MaterialIndex].MaterialInterface = OverrideMaterials[MaterialIndex]; } else { DestructibleMesh.Materials[MaterialIndex].MaterialInterface = DestructibleMesh.FractureSettings->Materials[MaterialIndex]; } } NxDestructibleAssetCookingDesc DestructibleAssetCookingDesc; DestructibleMesh.FractureSettings->BuildDestructibleAssetCookingDesc(DestructibleAssetCookingDesc); NewApexDestructibleAsset = DestructibleMesh.FractureSettings->CreateApexDestructibleAsset(DestructibleAssetCookingDesc); } #endif // WITH_EDITORONLY_DATA if (NewApexDestructibleAsset != NULL) { Success = SetApexDestructibleAsset(DestructibleMesh, *NewApexDestructibleAsset, OutData, EDestructibleImportOptions::PreserveSettings); } #endif // WITH_APEX return Success; }
FString UMySocket::receiveMessage() { if (!mySocket) return ""; TArray<uint8> ReceivedData; if (pendingDateSize) { ReceivedData.Init('\0', FMath::Min(pendingDateSize, 65507u)); int32 Read = 0; mySocket->Recv(ReceivedData.GetData(), ReceivedData.Num(), Read); } if (ReceivedData.Num() <= 0) return ""; const FString ReceivedUE4String = StringFromBinaryArray(ReceivedData); return ReceivedUE4String; }
TArray<uint8> UMySocket::receiveDataWithPending() { TArray<uint8> ReceivedData; if (!mySocket) return ReceivedData; uint32 Size; while (mySocket->HasPendingData(Size)) { ReceivedData.Init('\0', FMath::Min(Size, 65507u)); int32 Read = 0; mySocket->Recv(ReceivedData.GetData(), ReceivedData.Num(), Read); } return (ReceivedData); }
void ATitanBotsPlayerController::TCPSocketListener() { /////////////// if (!ConnectionSocket) return; /////////////// //Binary Array TArray<uint8> ReceivedData; uint32 Size; while (ConnectionSocket->HasPendingData(Size)) { ReceivedData.Init(FMath::Min(Size, 65507u), 0); int32 Read = 0; ConnectionSocket->Recv(ReceivedData.GetData(), ReceivedData.Num(), Read); GEngine->AddOnScreenDebugMessage(-1, 5.f, FColor::Red, FString::Printf(TEXT("Data Read! %d"), ReceivedData.Num())); } if (ReceivedData.Num() <= 0) { //No Data Received return; } GEngine->AddOnScreenDebugMessage(-1, 5.f, FColor::Red, FString::Printf(TEXT("Data Bytes Read ~> %d"), ReceivedData.Num())); ///////////////////// // String From Binary Array const FString ReceivedUE4String = StringFromBinaryArray(ReceivedData); //////////////////// GEngine->AddOnScreenDebugMessage(-1, 5.f, FColor::Red, FString::Printf(TEXT("As String Data ~> %s"), *ReceivedUE4String)); if (bIsInGarage) { NFCIDCheck(ReceivedUE4String); } }
void FAsyncSoundFileExportTask::DoWork() { // Create a new sound file object which we will pass our data to so we can read from it FSoundFile SoundFileInput; ESoundFileError::Type Error = SoundFileInput.Initialize(SoundFileData); SOUND_EXPORT_CHECK(Error); // Create a new sound file object which will be written to disk FSoundFile SoundFileOutput; FSoundFileDescription OriginalDescription = SoundFileData->GetOriginalDescription(); const TArray<ESoundFileChannelMap::Type>& ChannelMap = SoundFileData->GetChannelMap(); Error = SoundFileOutput.OpenEmptyFileForExport(ExportPath, OriginalDescription, ChannelMap); SOUND_EXPORT_CHECK(Error); // Create a buffer to do the processing SoundFileCount ProcessBufferSamples = 1024 * OriginalDescription.NumChannels; TArray<double> ProcessBuffer; ProcessBuffer.Init(0.0, ProcessBufferSamples); // Now perform the encoding to the target file SoundFileCount SamplesRead = 0; Error = SoundFileInput.ReadSamples(ProcessBuffer.GetData(), ProcessBufferSamples, SamplesRead); SOUND_EXPORT_CHECK(Error); while (SamplesRead) { SOUND_EXPORT_CHECK(SoundFileInput.GetError()); SoundFileCount SamplesWritten; Error = SoundFileOutput.WriteSamples(ProcessBuffer.GetData(), SamplesRead, SamplesWritten); SOUND_EXPORT_CHECK(Error); Error = SoundFileInput.ReadSamples(ProcessBuffer.GetData(), ProcessBufferSamples, SamplesRead); SOUND_EXPORT_CHECK(Error); } SoundFileInput.ReleaseSoundFileHandle(); SoundFileOutput.ReleaseSoundFileHandle(); }
TArray<uint16> UWALandscapeNode_Multiply::GenerateHeightmap() { TArray<uint16> OutputA; TArray<uint16> OutputB; TArray<uint16> RetVal; if (InputA && InputB) { OutputA = InputA->GenerateHeightmap(); OutputB = InputB->GenerateHeightmap(); RetVal.Init(0, OutputA.Num()); for (int32 Idx = 0; Idx < OutputA.Num(); Idx++) { RetVal[Idx] = OutputA[Idx] * OutputB[Idx]; } } return RetVal; }
/** * Handles input passed by TCP listener * * @param ConnectionSocket The TCP socket connecting the listener and client * @param Endpoint The endpoint of the socket connection */ bool CloudyWebAPIImpl::InputHandler(FSocket* ConnectionSocket, const FIPv4Endpoint& Endpoint) { TArray<uint8> ReceivedData; uint32 Size; // wait for data to arrive while (!(ConnectionSocket->HasPendingData(Size))); // handle data - change global InputStr ReceivedData.Init(FMath::Min(Size, 65507u)); int32 Read = 0; ConnectionSocket->Recv(ReceivedData.GetData(), ReceivedData.Num(), Read); FString ReceivedString = StringFromBinaryArray(ReceivedData); InputStr = ReceivedString; HasInputStrChanged = true; TCPConnection = ConnectionSocket; return true; }
static void ExportAnimations(ExportContext& Context, FArchive& Ar) { guard(ExportAnimations); const CAnimSet* Anim = Context.SkelMesh->Anim; int NumBones = Context.SkelMesh->RefSkeleton.Num(); // Build mesh to anim bone map TArray<int> BoneMap; BoneMap.Init(-1, NumBones); TArray<int> AnimBones; AnimBones.Empty(NumBones); for (int i = 0; i < NumBones; i++) { const CSkelMeshBone &B = Context.SkelMesh->RefSkeleton[i]; for (int j = 0; j < Anim->TrackBoneNames.Num(); j++) { if (!stricmp(B.Name, Anim->TrackBoneNames[j])) { BoneMap[i] = j; // lookup CAnimSet bone by mesh bone index AnimBones.Add(i); // indicate that the bone has animation break; } } } Ar.Printf( " \"animations\" : [\n" ); int FirstDataIndex = Context.Data.Num(); // Iterate over all animations for (int SeqIndex = 0; SeqIndex < Anim->Sequences.Num(); SeqIndex++) { const CAnimSequence &Seq = *Anim->Sequences[SeqIndex]; Ar.Printf( " {\n" " \"name\" : \"%s\",\n", *Seq.Name ); struct AnimSampler { enum ChannelType { TRANSLATION, ROTATION }; int BoneNodeIndex; ChannelType Type; const CAnimTrack* Track; }; TArray<AnimSampler> Samplers; Samplers.Empty(AnimBones.Num() * 2); //!! Optimization: //!! 1. there will be missing tracks (AnimRotationOnly etc) - drop such samplers //!! 2. store all time tracks in a single BufferView, all rotation tracks in another, and all position track in 3rd one - this //!! will reduce amount of BufferViews in json text (combine them by data type) // Prepare channels array Ar.Printf(" \"channels\" : [\n"); for (int BoneIndex = 0; BoneIndex < AnimBones.Num(); BoneIndex++) { int MeshBoneIndex = AnimBones[BoneIndex]; int AnimBoneIndex = BoneMap[MeshBoneIndex]; const CAnimTrack* Track = Seq.Tracks[AnimBoneIndex]; int TranslationSamplerIndex = Samplers.Num(); AnimSampler* Sampler = new (Samplers) AnimSampler; Sampler->Type = AnimSampler::TRANSLATION; Sampler->BoneNodeIndex = MeshBoneIndex + FIRST_BONE_NODE; Sampler->Track = Track; int RotationSamplerIndex = Samplers.Num(); Sampler = new (Samplers) AnimSampler; Sampler->Type = AnimSampler::ROTATION; Sampler->BoneNodeIndex = MeshBoneIndex + FIRST_BONE_NODE; Sampler->Track = Track; // Print glTF information. Not using usual formatting here to make output a little bit more compact. Ar.Printf( " { \"sampler\" : %d, \"target\" : { \"node\" : %d, \"path\" : \"%s\" } },\n", TranslationSamplerIndex, MeshBoneIndex + FIRST_BONE_NODE, "translation" ); Ar.Printf( " { \"sampler\" : %d, \"target\" : { \"node\" : %d, \"path\" : \"%s\" } }%s\n", RotationSamplerIndex, MeshBoneIndex + FIRST_BONE_NODE, "rotation", BoneIndex == AnimBones.Num()-1 ? "" : "," ); } Ar.Printf(" ],\n"); // Prepare samplers Ar.Printf(" \"samplers\" : [\n"); for (int SamplerIndex = 0; SamplerIndex < Samplers.Num(); SamplerIndex++) { const AnimSampler& Sampler = Samplers[SamplerIndex]; // Prepare time array const TArray<float>* TimeArray = (Sampler.Type == AnimSampler::TRANSLATION) ? &Sampler.Track->KeyPosTime : &Sampler.Track->KeyQuatTime; if (TimeArray->Num() == 0) { // For this situation, use track's time array TimeArray = &Sampler.Track->KeyTime; } int NumKeys = Sampler.Type == (AnimSampler::TRANSLATION) ? Sampler.Track->KeyPos.Num() : Sampler.Track->KeyQuat.Num(); int TimeBufIndex = Context.Data.AddZeroed(); BufferData& TimeBuf = Context.Data[TimeBufIndex]; TimeBuf.Setup(NumKeys, "SCALAR", BufferData::FLOAT, sizeof(float)); float RateScale = 1.0f / Seq.Rate; float LastFrameTime = 0; if (TimeArray->Num() == 0 || NumKeys == 1) { // Fill with equally spaced values for (int i = 0; i < NumKeys; i++) { TimeBuf.Put(i * RateScale); } LastFrameTime = NumKeys-1; } else { for (int i = 0; i < TimeArray->Num(); i++) { TimeBuf.Put((*TimeArray)[i] * RateScale); } LastFrameTime = (*TimeArray)[TimeArray->Num()-1]; } // Prepare min/max values for time track, it's required by glTF standard TimeBuf.BoundsMin = "[ 0 ]"; char buf[64]; appSprintf(ARRAY_ARG(buf), "[ %g ]", LastFrameTime * RateScale); TimeBuf.BoundsMax = buf; // Try to reuse TimeBuf from previous tracks TimeBufIndex = Context.GetFinalIndexForLastBlock(FirstDataIndex); // Prepare data int DataBufIndex = Context.Data.AddZeroed(); BufferData& DataBuf = Context.Data[DataBufIndex]; if (Sampler.Type == AnimSampler::TRANSLATION) { // Translation track DataBuf.Setup(NumKeys, "VEC3", BufferData::FLOAT, sizeof(CVec3)); for (int i = 0; i < NumKeys; i++) { CVec3 Pos = Sampler.Track->KeyPos[i]; TransformPosition(Pos); DataBuf.Put(Pos); } } else { // Rotation track DataBuf.Setup(NumKeys, "VEC4", BufferData::FLOAT, sizeof(CQuat)); for (int i = 0; i < NumKeys; i++) { CQuat Rot = Sampler.Track->KeyQuat[i]; TransformRotation(Rot); if (Sampler.BoneNodeIndex - FIRST_BONE_NODE == 0) { Rot.Conjugate(); } DataBuf.Put(Rot); } } // Try to reuse data block as well DataBufIndex = Context.GetFinalIndexForLastBlock(FirstDataIndex); // Write glTF info Ar.Printf( " { \"input\" : %d, \"output\" : %d }%s\n", TimeBufIndex, DataBufIndex, SamplerIndex == Samplers.Num()-1 ? "" : "," ); } Ar.Printf(" ]\n"); Ar.Printf(" }%s\n", SeqIndex == Anim->Sequences.Num()-1 ? "" : ","); } Ar.Printf(" ],\n"); unguard; }
static void ExportSection(ExportContext& Context, const CBaseMeshLod& Lod, const CMeshVertex* Verts, int SectonIndex, FArchive& Ar) { guard(ExportSection); int VertexSize = Context.IsSkeletal() ? sizeof(CSkelMeshVertex) : sizeof(CStaticMeshVertex); const CMeshSection& S = Lod.Sections[SectonIndex]; bool bLast = (SectonIndex == Lod.Sections.Num()-1); // Remap section indices to local indices CIndexBuffer::IndexAccessor_t GetIndex = Lod.Indices.GetAccessor(); TArray<int> indexRemap; // old vertex index -> new vertex index indexRemap.Init(-1, Lod.NumVerts); int numLocalVerts = 0; int numLocalIndices = S.NumFaces * 3; for (int idx = 0; idx < numLocalIndices; idx++) { int vertIndex = GetIndex(S.FirstIndex + idx); if (indexRemap[vertIndex] == -1) { indexRemap[vertIndex] = numLocalVerts++; } } // Prepare buffers int IndexBufIndex = Context.Data.AddZeroed(); int PositionBufIndex = Context.Data.AddZeroed(); int NormalBufIndex = Context.Data.AddZeroed(); int TangentBufIndex = Context.Data.AddZeroed(); int BonesBufIndex = -1; int WeightsBufIndex = -1; if (Context.IsSkeletal()) { BonesBufIndex = Context.Data.AddZeroed(); WeightsBufIndex = Context.Data.AddZeroed(); } int UVBufIndex[MAX_MESH_UV_SETS]; for (int i = 0; i < Lod.NumTexCoords; i++) { UVBufIndex[i] = Context.Data.AddZeroed(); } BufferData& IndexBuf = Context.Data[IndexBufIndex]; BufferData& PositionBuf = Context.Data[PositionBufIndex]; BufferData& NormalBuf = Context.Data[NormalBufIndex]; BufferData& TangentBuf = Context.Data[TangentBufIndex]; BufferData* UVBuf[MAX_MESH_UV_SETS]; BufferData* BonesBuf = NULL; BufferData* WeightsBuf = NULL; PositionBuf.Setup(numLocalVerts, "VEC3", BufferData::FLOAT, sizeof(CVec3)); NormalBuf.Setup(numLocalVerts, "VEC3", BufferData::FLOAT, sizeof(CVec3)); TangentBuf.Setup(numLocalVerts, "VEC4", BufferData::FLOAT, sizeof(CVec4)); for (int i = 0; i < Lod.NumTexCoords; i++) { UVBuf[i] = &Context.Data[UVBufIndex[i]]; UVBuf[i]->Setup(numLocalVerts, "VEC2", BufferData::FLOAT, sizeof(CMeshUVFloat)); } if (Context.IsSkeletal()) { BonesBuf = &Context.Data[BonesBufIndex]; WeightsBuf = &Context.Data[WeightsBufIndex]; BonesBuf->Setup(numLocalVerts, "VEC4", BufferData::UNSIGNED_SHORT, sizeof(uint16)*4); WeightsBuf->Setup(numLocalVerts, "VEC4", BufferData::UNSIGNED_BYTE, sizeof(uint32), /*InNormalized=*/ true); } // Prepare and build indices TArray<int> localIndices; localIndices.AddUninitialized(numLocalIndices); int* pIndex = localIndices.GetData(); for (int i = 0; i < numLocalIndices; i++) { *pIndex++ = GetIndex(S.FirstIndex + i); } if (numLocalVerts <= 65536) { IndexBuf.Setup(numLocalIndices, "SCALAR", BufferData::UNSIGNED_SHORT, sizeof(uint16)); for (int idx = 0; idx < numLocalIndices; idx++) { IndexBuf.Put<uint16>(indexRemap[localIndices[idx]]); } } else { IndexBuf.Setup(numLocalIndices, "SCALAR", BufferData::UNSIGNED_INT, sizeof(uint32)); for (int idx = 0; idx < numLocalIndices; idx++) { IndexBuf.Put<uint32>(indexRemap[localIndices[idx]]); } } // Build reverse index map for fast lookup of vertex by its new index. // It maps new vertex index to old vertex index. TArray<int> revIndexMap; revIndexMap.AddUninitialized(numLocalVerts); for (int i = 0; i < indexRemap.Num(); i++) { int newIndex = indexRemap[i]; if (newIndex != -1) { revIndexMap[newIndex] = i; } } // Build vertices for (int i = 0; i < numLocalVerts; i++) { int vertIndex = revIndexMap[i]; const CMeshVertex& V = VERT(vertIndex); CVec3 Position = V.Position; CVec4 Normal, Tangent; Unpack(Normal, V.Normal); Unpack(Tangent, V.Tangent); // Unreal (and we are) using normal.w for computing binormal. glTF // uses tangent.w for that. Make this value exactly 1.0 of -1.0 to make glTF // validator happy. #if 0 // There's some problem: V.Normal.W == 0x80 -> -1.008 instead of -1.0 if (Normal.w > 1.001 || Normal.w < -1.001) { appError("%X -> %g\n", V.Normal.Data, Normal.w); } #endif Tangent.w = (Normal.w < 0) ? -1 : 1; TransformPosition(Position); TransformDirection(Normal); TransformDirection(Tangent); Normal.Normalize(); Tangent.Normalize(); // Fill buffers PositionBuf.Put(Position); NormalBuf.Put(Normal.xyz); TangentBuf.Put(Tangent); UVBuf[0]->Put(V.UV); } // Compute bounds for PositionBuf CVec3 Mins, Maxs; ComputeBounds((CVec3*)PositionBuf.Data, numLocalVerts, sizeof(CVec3), Mins, Maxs); char buf[256]; appSprintf(ARRAY_ARG(buf), "[ %g, %g, %g ]", VECTOR_ARG(Mins)); PositionBuf.BoundsMin = buf; appSprintf(ARRAY_ARG(buf), "[ %g, %g, %g ]", VECTOR_ARG(Maxs)); PositionBuf.BoundsMax = buf; if (Context.IsSkeletal()) { for (int i = 0; i < numLocalVerts; i++) { int vertIndex = revIndexMap[i]; const CMeshVertex& V0 = VERT(vertIndex); const CSkelMeshVertex& V = static_cast<const CSkelMeshVertex&>(V0); int16 Bones[NUM_INFLUENCES]; static_assert(NUM_INFLUENCES == 4, "Code designed for 4 influences"); static_assert(sizeof(Bones) == sizeof(V.Bone), "Unexpected V.Bones size"); memcpy(Bones, V.Bone, sizeof(Bones)); for (int j = 0; j < NUM_INFLUENCES; j++) { // We have INDEX_NONE as list terminator, should replace with something else for glTF if (Bones[j] == INDEX_NONE) { Bones[j] = 0; } } BonesBuf->Put(*(uint64*)&Bones); WeightsBuf->Put(V.PackedWeights); } } // Secondary UVs for (int uvIndex = 1; uvIndex < Lod.NumTexCoords; uvIndex++) { BufferData* pBuf = UVBuf[uvIndex]; const CMeshUVFloat* srcUV = Lod.ExtraUV[uvIndex-1]; for (int i = 0; i < numLocalVerts; i++) { int vertIndex = revIndexMap[i]; pBuf->Put(srcUV[vertIndex]); } } // Write primitive information to json Ar.Printf( " {\n" " \"attributes\" : {\n" " \"POSITION\" : %d,\n" " \"NORMAL\" : %d,\n" " \"TANGENT\" : %d,\n", PositionBufIndex, NormalBufIndex, TangentBufIndex ); if (Context.IsSkeletal()) { Ar.Printf( " \"JOINTS_0\" : %d,\n" " \"WEIGHTS_0\" : %d,\n", BonesBufIndex, WeightsBufIndex ); } for (int i = 0; i < Lod.NumTexCoords; i++) { Ar.Printf( " \"TEXCOORD_%d\" : %d%s\n", i, UVBufIndex[i], i < (Lod.NumTexCoords-1) ? "," : "" ); } Ar.Printf( " },\n" " \"indices\" : %d,\n" " \"material\" : %d\n" " }%s\n", IndexBufIndex, SectonIndex, SectonIndex < (Lod.Sections.Num()-1) ? "," : "" ); unguard; }
void DoWork() { // Synchronously load the input sound file TSharedPtr<ISoundFile> InputSoundFile = AudioModule->LoadSoundFile(*SoundFilePath, false); if (!InputSoundFile.IsValid()) { return; } TSharedPtr<FSoundFileReader> InputSoundFileReader = AudioModule->CreateSoundFileReader(); ESoundFileError::Type Error = InputSoundFileReader->Init(InputSoundFile, false); if (Error != ESoundFileError::NONE) { return; } check(Error == ESoundFileError::NONE); // Cast to an internal object to get access to non-public API FSoundFile* InputSoundFileInternal = static_cast<FSoundFile*>(InputSoundFile.Get()); FSoundFileDescription InputDescription; Error = InputSoundFileInternal->GetDescription(InputDescription); check(Error == ESoundFileError::NONE); TArray<ESoundFileChannelMap::Type> ChannelMap; Error = InputSoundFileInternal->GetChannelMap(ChannelMap); FSoundFileDescription NewSoundFileDescription; NewSoundFileDescription.NumChannels = InputDescription.NumChannels; NewSoundFileDescription.NumFrames = InputDescription.NumFrames; NewSoundFileDescription.FormatFlags = ConvertFormat.Format; NewSoundFileDescription.SampleRate = ConvertFormat.SampleRate; NewSoundFileDescription.NumSections = InputDescription.NumSections; NewSoundFileDescription.bIsSeekable = InputDescription.bIsSeekable; TSharedPtr<FSoundFileWriter> SoundFileWriter = AudioModule->CreateSoundFileWriter(); Error = SoundFileWriter->Init(NewSoundFileDescription, ChannelMap, ConvertFormat.EncodingQuality); check(Error == ESoundFileError::NONE); // Create a buffer to do the processing SoundFileCount ProcessBufferSamples = 1024 * NewSoundFileDescription.NumChannels; TArray<float> ProcessBuffer; ProcessBuffer.Init(0.0f, ProcessBufferSamples); double SampleRateConversionRatio = (double)InputDescription.SampleRate / ConvertFormat.SampleRate; FSampleRateConverter SampleRateConverter; SampleRateConverter.Init(SampleRateConversionRatio, NewSoundFileDescription.NumChannels); TArray<float> OutputBuffer; SoundFileCount OutputBufferSamples = ProcessBufferSamples / SampleRateConversionRatio; OutputBuffer.Reserve(OutputBufferSamples); // Find the max value if we've been told to do peak normalization on import float MaxValue = 0.0f; SoundFileCount InputSamplesRead = 0; bool bPerformPeakNormalization = ConvertFormat.bPerformPeakNormalization; if (bPerformPeakNormalization) { Error = InputSoundFileReader->ReadSamples(ProcessBuffer.GetData(), ProcessBufferSamples, InputSamplesRead); SOUND_CONVERT_CHECK(Error); while (InputSamplesRead) { for (SoundFileCount Sample = 0; Sample < InputSamplesRead; ++Sample) { if (ProcessBuffer[Sample] > FMath::Abs(MaxValue)) { MaxValue = ProcessBuffer[Sample]; } } Error = InputSoundFileReader->ReadSamples(ProcessBuffer.GetData(), ProcessBufferSamples, InputSamplesRead); SOUND_CONVERT_CHECK(Error); } // If this happens, it means we have a totally silent file if (MaxValue == 0.0) { bPerformPeakNormalization = false; } // Seek the file back to the beginning SoundFileCount OutOffset; InputSoundFileReader->SeekFrames(0, ESoundFileSeekMode::FROM_START, OutOffset); } bool SamplesProcessed = true; // Read the first block of samples Error = InputSoundFileReader->ReadSamples(ProcessBuffer.GetData(), ProcessBufferSamples, InputSamplesRead); SOUND_CONVERT_CHECK(Error); while (InputSamplesRead != 0) { SampleRateConverter.ProcessBlock(ProcessBuffer.GetData(), InputSamplesRead, OutputBuffer); SoundFileCount SamplesWritten; Error = SoundFileWriter->WriteSamples((const float*)OutputBuffer.GetData(), OutputBuffer.Num(), SamplesWritten); SOUND_CONVERT_CHECK(Error); DEBUG_AUDIO_CHECK(SamplesWritten == OutputBuffer.Num()); OutputBuffer.Reset(); // read more samples Error = InputSoundFileReader->ReadSamples(ProcessBuffer.GetData(), ProcessBufferSamples, InputSamplesRead); SOUND_CONVERT_CHECK(Error); // ... normalize the samples if we're told to if (bPerformPeakNormalization) { for (int32 Sample = 0; Sample < InputSamplesRead; ++Sample) { ProcessBuffer[Sample] /= MaxValue; } } } // Release the sound file handles as soon as we finished converting the file InputSoundFileReader->Release(); SoundFileWriter->Release(); // Get the raw binary data TArray<uint8>* Data = nullptr; SoundFileWriter->GetData(&Data); // Write the data to the output file bool bSuccess = FFileHelper::SaveArrayToFile(*Data, *OutSoundFilePath); check(bSuccess); }
void UEnvQueryTest_PathfindingBatch::RunTest(FEnvQueryInstance& QueryInstance) const { UObject* QueryOwner = QueryInstance.Owner.Get(); BoolValue.BindData(QueryOwner, QueryInstance.QueryID); PathFromContext.BindData(QueryOwner, QueryInstance.QueryID); SkipUnreachable.BindData(QueryOwner, QueryInstance.QueryID); FloatValueMin.BindData(QueryOwner, QueryInstance.QueryID); FloatValueMax.BindData(QueryOwner, QueryInstance.QueryID); ScanRangeMultiplier.BindData(QueryOwner, QueryInstance.QueryID); bool bWantsPath = BoolValue.GetValue(); bool bPathToItem = PathFromContext.GetValue(); bool bDiscardFailed = SkipUnreachable.GetValue(); float MinThresholdValue = FloatValueMin.GetValue(); float MaxThresholdValue = FloatValueMax.GetValue(); float RangeMultiplierValue = ScanRangeMultiplier.GetValue(); UNavigationSystem* NavSys = QueryInstance.World->GetNavigationSystem(); if (NavSys == nullptr) { return; } ANavigationData* NavData = FindNavigationData(*NavSys, QueryOwner); ARecastNavMesh* NavMeshData = Cast<ARecastNavMesh>(NavData); if (NavMeshData == nullptr) { return; } TArray<FVector> ContextLocations; if (!QueryInstance.PrepareContext(Context, ContextLocations)) { return; } TArray<FNavigationProjectionWork> TestPoints; TArray<float> CollectDistanceSq; CollectDistanceSq.Init(0.0f, ContextLocations.Num()); FSharedNavQueryFilter NavigationFilter = FilterClass != nullptr ? UNavigationQueryFilter::GetQueryFilter(*NavMeshData, FilterClass)->GetCopy() : NavMeshData->GetDefaultQueryFilter()->GetCopy(); NavigationFilter->SetBacktrackingEnabled(!bPathToItem); const dtQueryFilter* NavQueryFilter = ((const FRecastQueryFilter*)NavigationFilter->GetImplementation())->GetAsDetourQueryFilter(); { // scope for perf timers // can't use FEnvQueryInstance::ItemIterator yet, since it has built in scoring functionality for (int32 ItemIdx = 0; ItemIdx < QueryInstance.Items.Num(); ItemIdx++) { if (QueryInstance.Items[ItemIdx].IsValid()) { const FVector ItemLocation = GetItemLocation(QueryInstance, ItemIdx); TestPoints.Add(FNavigationProjectionWork(ItemLocation)); for (int32 ContextIdx = 0; ContextIdx < ContextLocations.Num(); ContextIdx++) { const float TestDistanceSq = FVector::DistSquared(ItemLocation, ContextLocations[ContextIdx]); CollectDistanceSq[ContextIdx] = FMath::Max(CollectDistanceSq[ContextIdx], TestDistanceSq); } } } NavMeshData->BatchProjectPoints(TestPoints, NavMeshData->GetDefaultQueryExtent(), NavigationFilter); } TArray<FRecastDebugPathfindingData> NodePoolData; NodePoolData.SetNum(ContextLocations.Num()); { // scope for perf timer TArray<NavNodeRef> Polys; for (int32 ContextIdx = 0; ContextIdx < ContextLocations.Num(); ContextIdx++) { const float MaxPathDistance = FMath::Sqrt(CollectDistanceSq[ContextIdx]) * RangeMultiplierValue; Polys.Reset(); NodePoolData[ContextIdx].Flags = ERecastDebugPathfindingFlags::PathLength; NavMeshData->GetPolysWithinPathingDistance(ContextLocations[ContextIdx], MaxPathDistance, Polys, NavigationFilter, nullptr, &NodePoolData[ContextIdx]); } } int32 ProjectedItemIdx = 0; if (GetWorkOnFloatValues()) { NodePoolHelpers::PathParamFunc Func[] = { nullptr, NodePoolHelpers::GetPathCost, NodePoolHelpers::GetPathLength }; FEnvQueryInstance::ItemIterator It(this, QueryInstance); for (It.IgnoreTimeLimit(); It; ++It, ProjectedItemIdx++) { for (int32 ContextIndex = 0; ContextIndex < ContextLocations.Num(); ContextIndex++) { const float PathValue = Func[TestMode](NodePoolData[ContextIndex], TestPoints[ProjectedItemIdx], NavQueryFilter); It.SetScore(TestPurpose, FilterType, PathValue, MinThresholdValue, MaxThresholdValue); if (bDiscardFailed && PathValue >= BIG_NUMBER) { It.ForceItemState(EEnvItemStatus::Failed); } } } } else { FEnvQueryInstance::ItemIterator It(this, QueryInstance); for (It.IgnoreTimeLimit(); It; ++It, ProjectedItemIdx++) { for (int32 ContextIndex = 0; ContextIndex < ContextLocations.Num(); ContextIndex++) { const bool bFoundPath = NodePoolHelpers::HasPath(NodePoolData[ContextIndex], TestPoints[ProjectedItemIdx]); It.SetScore(TestPurpose, FilterType, bFoundPath, bWantsPath); } } } }
void UBlendSpaceBase::FillupGridElements(const TArray<FVector> & PointList, const TArray<FEditorElement> & GridElements) { // need to convert all GridElements indexing to PointList to the data I care // The data I care here is my SampleData // create new Map from PointList index to SampleData TArray<int32> PointListToSampleIndices; PointListToSampleIndices.Init(INDEX_NONE, PointList.Num()); for (int32 I=0; I<PointList.Num(); ++I) { for (int32 J=0; J<SampleData.Num(); ++J) { if (SampleData[J].SampleValue == PointList[I]) { PointListToSampleIndices[I] = J; break; } } } GridSamples.Empty(GridElements.Num()); GridSamples.AddUninitialized(GridElements.Num()); for (int32 I=0; I<GridElements.Num(); ++I) { const FEditorElement& ViewGrid = GridElements[I]; FEditorElement NewGrid; float TotalWeight = 0.f; for (int32 J=0; J<FEditorElement::MAX_VERTICES; ++J) { if(ViewGrid.Indices[J] != INDEX_NONE) { NewGrid.Indices[J] = PointListToSampleIndices[ViewGrid.Indices[J]]; } else { NewGrid.Indices[J] = INDEX_NONE; } if (NewGrid.Indices[J]==INDEX_NONE) { NewGrid.Weights[J] = 0.f; } else { NewGrid.Weights[J] = ViewGrid.Weights[J]; TotalWeight += ViewGrid.Weights[J]; } } // Need to renormalize if (TotalWeight>0.f) { for (int32 J=0; J<FEditorElement::MAX_VERTICES; ++J) { NewGrid.Weights[J]/=TotalWeight; } } GridSamples[I] = NewGrid; } }
/** * Converts a power-of-two image to a square format (ex: 256x512 -> 512x512). Be wary of memory waste when too many texture are not square. * * @param Image The image to be converted to a square * @return true if the image was converted successfully, else false */ static bool SquarifyImage(FImage& Image, uint32 MinSquareSize) { // Early out if (Image.SizeX == Image.SizeY && Image.SizeX >= int32(MinSquareSize)) { return true; } // Figure out the squarified size uint32 SquareSize = FMath::Max(Image.SizeX, Image.SizeY); if(SquareSize < MinSquareSize) { SquareSize = MinSquareSize; } // Calculate how many times to duplicate each row of column uint32 MultX = SquareSize / Image.SizeX; uint32 MultY = SquareSize / Image.SizeY; // Only give memory overhead warning if we're actually going to use a larger image // Small mips that have to be upscaled for compression only save the smaller mip for use if(MultX == 1 || MultY == 1) { float FOverhead = float(FMath::Min(Image.SizeX, Image.SizeY)) / float(SquareSize); int32 POverhead = FMath::RoundToInt(100.0f - (FOverhead * 100.0f)); UE_LOG(LogTextureFormatPVR, Warning, TEXT("Expanding mip (%d,%d) to (%d, %d). Memory overhead: ~%d%%"), Image.SizeX, Image.SizeY, SquareSize, SquareSize, POverhead); } else if (MultX != MultY) { float FOverhead = float(FMath::Min(Image.SizeX, Image.SizeY)) / float(FMath::Max(Image.SizeX, Image.SizeY)); int32 POverhead = FMath::RoundToInt(100.0f - (FOverhead * 100.0f)); UE_LOG(LogTextureFormatPVR, Warning, TEXT("Expanding mip (%d,%d) to (%d, %d). Memory overhead: ~%d%%"), Image.SizeX, Image.SizeY, FMath::Max(Image.SizeX, Image.SizeY), FMath::Max(Image.SizeX, Image.SizeY), POverhead); } // Allocate room to fill out into TArray<uint32> SquareRawData; SquareRawData.Init(SquareSize * SquareSize * Image.NumSlices); int32 SourceSliceSize = Image.SizeX * Image.SizeY; int32 DestSliceSize = SquareSize * SquareSize; for ( int32 SliceIndex=0; SliceIndex < Image.NumSlices; ++SliceIndex ) { uint32* RectData = ((uint32*)Image.RawData.GetData()) + SliceIndex * SourceSliceSize; uint32* SquareData = ((uint32*)SquareRawData.GetData()) + SliceIndex * DestSliceSize; for ( int32 Y = 0; Y < Image.SizeY; ++Y ) { for ( int32 X = 0; X < Image.SizeX; ++X ) { uint32 SourceColor = *(RectData + Y * Image.SizeX + X); for ( uint32 YDup = 0; YDup < MultY; ++YDup ) { for ( uint32 XDup = 0; XDup < MultX; ++XDup ) { uint32* DestColor = SquareData + ((Y * MultY + YDup) * SquareSize + (X * MultX + XDup)); *DestColor = SourceColor; } } } } } // Put the new image data into the existing Image (copying from uint32 array to uint8 array) Image.RawData.Empty(SquareSize * SquareSize * Image.NumSlices * sizeof(uint32)); Image.RawData.Init(SquareSize * SquareSize * Image.NumSlices * sizeof(uint32)); uint32* FinalData = (uint32*)Image.RawData.GetData(); FMemory::Memcpy(Image.RawData.GetData(), SquareRawData.GetData(), SquareSize * SquareSize * Image.NumSlices * sizeof(uint32)); Image.SizeX = SquareSize; Image.SizeY = SquareSize; return true; }
void FAsyncSoundFileImportTask::DoWork() { // Create a new sound file object TScopedPointer<FSoundFile> SoundFileInput = TScopedPointer<FSoundFile>(new FSoundFile()); // Open the file ESoundFileError::Type Error = SoundFileInput->OpenFileForReading(ImportSettings.SoundFilePath); SOUND_IMPORT_CHECK(Error); TSharedPtr<ISoundFileData> SoundFileData; Error = SoundFileInput->GetSoundFileData(SoundFileData); SOUND_IMPORT_CHECK(Error); // Get the input file's description const FSoundFileDescription& InputDescription = SoundFileData->GetDescription(); // Get the input file's channel map const TArray<ESoundFileChannelMap::Type>& InputChannelMap = SoundFileData->GetChannelMap(); // Build a description for the new file FSoundFileDescription NewSoundFileDescription; NewSoundFileDescription.NumChannels = InputDescription.NumChannels; NewSoundFileDescription.NumFrames = InputDescription.NumFrames; NewSoundFileDescription.FormatFlags = ImportSettings.Format; NewSoundFileDescription.SampleRate = ImportSettings.SampleRate; NewSoundFileDescription.NumSections = 0; NewSoundFileDescription.bIsSeekable = 1; // Open it as an empty file for reading and writing ISoundFileInternal* SoundFileInternal = (ISoundFileInternal*)SoundFile.Get(); Error = SoundFileInternal->OpenEmptyFileForImport(NewSoundFileDescription, InputChannelMap); SOUND_IMPORT_CHECK(Error); // Set the original description on the new sound file Error = SoundFileInternal->SetImportFileInfo(InputDescription, ImportSettings.SoundFilePath); SOUND_IMPORT_CHECK(Error); // Set the encoding quality (will only do anything if import target is Ogg-Vorbis) Error = SoundFileInternal->SetEncodingQuality(ImportSettings.EncodingQuality); SOUND_IMPORT_CHECK(Error); // Set the state of the sound file to be loading Error = SoundFileInternal->BeginImport(); SOUND_IMPORT_CHECK(Error); // Create a buffer to do the processing SoundFileCount ProcessBufferSamples = 1024 * NewSoundFileDescription.NumChannels; TArray<double> ProcessBuffer; ProcessBuffer.Init(0.0, ProcessBufferSamples); // Find the max value if we've been told to do peak normalization on import double MaxValue = 0.0; SoundFileCount SamplesRead = 0; bool bPerformPeakNormalization = ImportSettings.bPerformPeakNormalization; if (bPerformPeakNormalization) { Error = SoundFileInput->ReadSamples(ProcessBuffer.GetData(), ProcessBufferSamples, SamplesRead); SOUND_IMPORT_CHECK(Error); while (SamplesRead) { for (SoundFileCount Sample = 0; Sample < SamplesRead; ++Sample) { if (ProcessBuffer[Sample] > FMath::Abs(MaxValue)) { MaxValue = ProcessBuffer[Sample]; } } Error = SoundFileInput->ReadSamples(ProcessBuffer.GetData(), ProcessBufferSamples, SamplesRead); SOUND_IMPORT_CHECK(Error); } // If this happens, it means we have a totally silent file if (MaxValue == 0.0) { bPerformPeakNormalization = false; } // Seek the file back to the beginning SoundFileCount OutOffset; SoundFileInput->SeekFrames(0, ESoundFileSeekMode::FROM_START, OutOffset); } // Now perform the encoding to the target file Error = SoundFileInput->ReadSamples(ProcessBuffer.GetData(), ProcessBufferSamples, SamplesRead); SOUND_IMPORT_CHECK(Error); while (SamplesRead) { SOUND_IMPORT_CHECK(SoundFileInput->GetError()); // Normalize the samples if we're told to if (bPerformPeakNormalization) { for (int32 Sample = 0; Sample < SamplesRead; ++Sample) { ProcessBuffer[Sample] /= MaxValue; } } SoundFileCount SamplesWritten; Error = SoundFileInternal->WriteSamples((const double*)ProcessBuffer.GetData(), SamplesRead, SamplesWritten); SOUND_IMPORT_CHECK(Error); Error = SoundFileInput->ReadSamples(ProcessBuffer.GetData(), ProcessBufferSamples, SamplesRead); SOUND_IMPORT_CHECK(Error); } SoundFileInput->ReleaseSoundFileHandle(); SoundFileInternal->ReleaseSoundFileHandle(); // We're done doing the encoding Error = SoundFileInternal->EndImport(); SOUND_IMPORT_CHECK(Error); }