コード例 #1
0
void USoundWave::PostLoad()
{
	Super::PostLoad();

	if (GetOutermost()->HasAnyPackageFlags(PKG_ReloadingForCooker))
	{
		return;
	}

	// Compress to whatever formats the active target platforms want
	// static here as an optimization
	ITargetPlatformManagerModule* TPM = GetTargetPlatformManager();
	if (TPM)
	{
		const TArray<ITargetPlatform*>& Platforms = TPM->GetActiveTargetPlatforms();

		for (int32 Index = 0; Index < Platforms.Num(); Index++)
		{
			GetCompressedData(Platforms[Index]->GetWaveFormat(this));
		}
	}

	// We don't precache default objects and we don't precache in the Editor as the latter will
	// most likely cause us to run out of memory.
	if (!GIsEditor && !IsTemplate( RF_ClassDefaultObject ) && GEngine)
	{
		FAudioDevice* AudioDevice = GEngine->GetMainAudioDevice();
		if (AudioDevice && AudioDevice->AreStartupSoundsPreCached())
		{
			// Upload the data to the hardware, but only if we've precached startup sounds already
			AudioDevice->Precache(this);
		}
		// remove bulk data if no AudioDevice is used and no sounds were initialized
		else if(IsRunningGame())
		{
			RawData.RemoveBulkData();
		}
	}

	// Only add this streaming sound if we're not a dedicated server or if there is an audio device manager
	if (IsStreaming() && !IsRunningDedicatedServer() && GEngine && GEngine->GetAudioDeviceManager())
	{
#if WITH_EDITORONLY_DATA
		FinishCachePlatformData();
#endif // #if WITH_EDITORONLY_DATA
		IStreamingManager::Get().GetAudioStreamingManager().AddStreamingSoundWave(this);
	}

#if WITH_EDITORONLY_DATA
	if (!SourceFilePath_DEPRECATED.IsEmpty() && AssetImportData)
	{
		FAssetImportInfo Info;
		Info.Insert(FAssetImportInfo::FSourceFile(SourceFilePath_DEPRECATED));
		AssetImportData->SourceData = MoveTemp(Info);
	}
#endif // #if WITH_EDITORONLY_DATA

	INC_FLOAT_STAT_BY( STAT_AudioBufferTime, Duration );
	INC_FLOAT_STAT_BY( STAT_AudioBufferTimeChannels, NumChannels * Duration );
}
コード例 #2
0
ファイル: SoundWave.cpp プロジェクト: 1vanK/AHRUnrealEngine
void USoundWave::PostLoad()
{
	Super::PostLoad();

	if (GetOutermost()->PackageFlags & PKG_ReloadingForCooker)
	{
		return;
	}

	// Compress to whatever formats the active target platforms want
	// static here as an optimization
	ITargetPlatformManagerModule* TPM = GetTargetPlatformManager();
	if (TPM)
	{
		const TArray<ITargetPlatform*>& Platforms = TPM->GetActiveTargetPlatforms();

		for (int32 Index = 0; Index < Platforms.Num(); Index++)
		{
			GetCompressedData(Platforms[Index]->GetWaveFormat(this));
		}
	}

	// We don't precache default objects and we don't precache in the Editor as the latter will
	// most likely cause us to run out of memory.
	if( !GIsEditor && !IsTemplate( RF_ClassDefaultObject ) && GEngine )
	{
		FAudioDevice* AudioDevice = GEngine->GetAudioDevice();
		if( AudioDevice && AudioDevice->bStartupSoundsPreCached)
		{
			// Upload the data to the hardware, but only if we've precached startup sounds already
			AudioDevice->Precache( this );
		}
		// remove bulk data if no AudioDevice is used and no sounds were initialized
		else if( IsRunningGame() )
		{
			RawData.RemoveBulkData();
		}
	}

	if (IsStreaming())
	{
#if WITH_EDITORONLY_DATA
		FinishCachePlatformData();
#endif // #if WITH_EDITORONLY_DATA
		IStreamingManager::Get().GetAudioStreamingManager().AddStreamingSoundWave(this);
	}

	INC_FLOAT_STAT_BY( STAT_AudioBufferTime, Duration );
	INC_FLOAT_STAT_BY( STAT_AudioBufferTimeChannels, NumChannels * Duration );
}
コード例 #3
0
		/** Async worker that checks the cache backend and if that fails, calls the deriver to build the data and then puts the results to the cache **/
		void DoWork()
		{
			bool bGetResult;
			{
				INC_DWORD_STAT(STAT_DDC_NumGets);
				STAT(double ThisTime = 0);
				{
					SCOPE_SECONDS_COUNTER(ThisTime);
					bGetResult = FDerivedDataBackend::Get().GetRoot().GetCachedData(*CacheKey, Data);
				}
				INC_FLOAT_STAT_BY(STAT_DDC_SyncGetTime, bSynchronousForStats ? (float)ThisTime : 0.0f);
			}
			if (bGetResult)
			{
				check(Data.Num());
				bSuccess = true;
				delete DataDeriver;
				DataDeriver = NULL;
			}
			else if (DataDeriver)
			{
				{
					INC_DWORD_STAT(STAT_DDC_NumBuilds);
					STAT(double ThisTime = 0);
					{
						SCOPE_SECONDS_COUNTER(ThisTime);
						bSuccess = DataDeriver->Build(Data);
					}
					INC_FLOAT_STAT_BY(STAT_DDC_SyncBuildTime, bSynchronousForStats ? (float)ThisTime : 0.0f);
				}
				delete DataDeriver;
				DataDeriver = NULL;
				if (bSuccess)
				{
					check(Data.Num());
					INC_DWORD_STAT(STAT_DDC_NumPuts);
					STAT(double ThisTime = 0);
					{
						SCOPE_SECONDS_COUNTER(ThisTime);
						FDerivedDataBackend::Get().GetRoot().PutCachedData(*CacheKey, Data, true);
					}
					INC_FLOAT_STAT_BY(STAT_DDC_PutTime, bSynchronousForStats ? (float)ThisTime : 0.0f);
				}
			}
			if (!bSuccess)
			{
				Data.Empty();
			}
			FDerivedDataBackend::Get().AddToAsyncCompletionCounter(-1);
		}
コード例 #4
0
	virtual void Put(const TCHAR* CacheKey, TArray<uint8>& Data, bool bPutEvenIfExists = false) override
	{
		STAT(double ThisTime = 0);
		{
			SCOPE_SECONDS_COUNTER(ThisTime);
			FDerivedDataBackend::Get().GetRoot().PutCachedData(CacheKey, Data, bPutEvenIfExists);
		}
		INC_FLOAT_STAT_BY(STAT_DDC_PutTime,(float)ThisTime);
		INC_DWORD_STAT(STAT_DDC_NumPuts);
	}
コード例 #5
0
	virtual bool CachedDataProbablyExists(const TCHAR* CacheKey) override
	{
		bool bResult;
		INC_DWORD_STAT(STAT_DDC_NumExist);
		STAT(double ThisTime = 0);
		{
			SCOPE_SECONDS_COUNTER(ThisTime);
			bResult = FDerivedDataBackend::Get().GetRoot().CachedDataProbablyExists(CacheKey);
		}
		INC_FLOAT_STAT_BY(STAT_DDC_ExistTime, (float)ThisTime);
		return bResult;
	}
コード例 #6
0
	virtual void WaitAsynchronousCompletion(uint32 Handle) override
	{
		STAT(double ThisTime = 0);
		{
			SCOPE_SECONDS_COUNTER(ThisTime);
			FAsyncTask<FBuildAsyncWorker>* AsyncTask = NULL;
			{
				FScopeLock ScopeLock(&SynchronizationObject);
				AsyncTask = PendingTasks.FindRef(Handle);
			}
			check(AsyncTask);
			AsyncTask->EnsureCompletion();
		}
		INC_FLOAT_STAT_BY(STAT_DDC_ASyncWaitTime,(float)ThisTime);
	}
コード例 #7
0
ファイル: Shader.cpp プロジェクト: amyvmiwei/UnrealEngine4
void FShaderResource::InitializePixelShaderRHI() 
{ 
	if (!IsInitialized())
	{
		STAT(double ShaderInitializationTime = 0);
		{
			SCOPE_CYCLE_COUNTER(STAT_Shaders_FrameRTShaderInitForRenderingTime);
			SCOPE_SECONDS_COUNTER(ShaderInitializationTime);

			InitResourceFromPossiblyParallelRendering();
		}

		INC_FLOAT_STAT_BY(STAT_Shaders_TotalRTShaderInitForRenderingTime,(float)ShaderInitializationTime);
	}

	checkSlow(IsInitialized());
}
コード例 #8
0
void FAsyncIOSystemBase::InternalRead( IFileHandle* FileHandle, int64 Offset, int64 Size, void* Dest )
{
	DECLARE_SCOPE_CYCLE_COUNTER(TEXT("FAsyncIOSystemBase::InternalRead"), STAT_AsyncIOSystemBase_InternalRead, STATGROUP_AsyncIO_Verbose);

	FScopeLock ScopeLock( ExclusiveReadCriticalSection );

	STAT(double ReadTime = 0);
	{	
		SCOPE_SECONDS_COUNTER(ReadTime);
		PlatformReadDoNotCallDirectly( FileHandle, Offset, Size, Dest );
	}	
	INC_FLOAT_STAT_BY(STAT_AsyncIO_PlatformReadTime,(float)ReadTime);

	// The platform might actually read more than Size due to aligning and internal min read sizes
	// though we only really care about throttling requested bandwidth as it's not very accurate
	// to begin with.
	STAT(ConstrainBandwidth(Size, ReadTime));
}
コード例 #9
0
	virtual void WaitAsynchronousCompletion(uint32 Handle) override
	{
		STAT(double ThisTime = 0);
		{
			SCOPE_SECONDS_COUNTER(ThisTime);
			FScopeLock ScopeLock(&SynchronizationObject);
			for (TSet<FDerivedDataRollup*>::TIterator Iter(PendingRollups); Iter; ++Iter)
			{
				if ((*Iter)->Contains(Handle))
				{
					(*Iter)->WaitAsynchronousCompletion(Handle);
					return;
				}
			}
		}
		INC_FLOAT_STAT_BY(STAT_DDC_ASyncWaitTime,(float)ThisTime);
		Super::WaitAsynchronousCompletion(Handle);
	}
コード例 #10
0
ファイル: Shader.cpp プロジェクト: amyvmiwei/UnrealEngine4
const FHullShaderRHIRef& FShaderResource::GetHullShader() 
{ 
	checkSlow(Target.Frequency == SF_Hull);
	if (!IsInitialized())
	{
		STAT(double ShaderInitializationTime = 0);
		{
			SCOPE_CYCLE_COUNTER(STAT_Shaders_FrameRTShaderInitForRenderingTime);
			SCOPE_SECONDS_COUNTER(ShaderInitializationTime);

			InitResourceFromPossiblyParallelRendering();
		}

		INC_FLOAT_STAT_BY(STAT_Shaders_TotalRTShaderInitForRenderingTime,(float)ShaderInitializationTime);
	}

	checkSlow(IsInitialized());

	return HullShader; 
}
コード例 #11
0
void FAsyncIOSystemBase::FulfillCompressedRead( const FAsyncIORequest& IORequest, IFileHandle* FileHandle )
{
	DECLARE_SCOPE_CYCLE_COUNTER(TEXT("FAsyncIOSystemBase::FulfillCompressedRead"), STAT_AsyncIOSystemBase_FulfillCompressedRead, STATGROUP_AsyncIO_Verbose);

	if (GbLogAsyncLoading == true)
	{
		LogIORequest(TEXT("FulfillCompressedRead"), IORequest);
	}

	// Initialize variables.
	FAsyncUncompress*		Uncompressor			= NULL;
	uint8*					UncompressedBuffer		= (uint8*) IORequest.Dest;
	// First compression chunk contains information about total size so we skip that one.
	int32						CurrentChunkIndex		= 1;
	int32						CurrentBufferIndex		= 0;
	bool					bHasProcessedAllData	= false;

	// read the first two ints, which will contain the magic bytes (to detect byteswapping)
	// and the original size the chunks were compressed from
	int64						HeaderData[2];
	int32						HeaderSize = sizeof(HeaderData);

	InternalRead(FileHandle, IORequest.Offset, HeaderSize, HeaderData);
	RETURN_IF_EXIT_REQUESTED;

	// if the magic bytes don't match, then we are byteswapped (or corrupted)
	bool bIsByteswapped = HeaderData[0] != PACKAGE_FILE_TAG;
	// if its potentially byteswapped, make sure it's not just corrupted
	if (bIsByteswapped)
	{
		// if it doesn't equal the swapped version, then data is corrupted
		if (HeaderData[0] != PACKAGE_FILE_TAG_SWAPPED)
		{
			UE_LOG(LogStreaming, Warning, TEXT("Detected data corruption [header] trying to read %lld bytes at offset %lld from '%s'. Please delete file and recook."),
				IORequest.UncompressedSize, 
				IORequest.Offset ,
				*IORequest.FileName );
			check(0);
			FPlatformMisc::HandleIOFailure(*IORequest.FileName);
		}
		// otherwise, we have a valid byteswapped file, so swap the chunk size
		else
		{
			HeaderData[1] = BYTESWAP_ORDER64(HeaderData[1]);
		}
	}

	int32						CompressionChunkSize	= HeaderData[1];
	
	// handle old packages that don't have the chunk size in the header, in which case
	// we can use the old hardcoded size
	if (CompressionChunkSize == PACKAGE_FILE_TAG)
	{
		CompressionChunkSize = LOADING_COMPRESSION_CHUNK_SIZE;
	}

	// calculate the number of chunks based on the size they were compressed from
	int32						TotalChunkCount = (IORequest.UncompressedSize + CompressionChunkSize - 1) / CompressionChunkSize + 1;

	// allocate chunk info data based on number of chunks
	FCompressedChunkInfo*	CompressionChunks		= (FCompressedChunkInfo*)FMemory::Malloc(sizeof(FCompressedChunkInfo) * TotalChunkCount);
	int32						ChunkInfoSize			= (TotalChunkCount) * sizeof(FCompressedChunkInfo);
	void*					CompressedBuffer[2]		= { 0, 0 };
	
	// Read table of compression chunks after seeking to offset (after the initial header data)
	InternalRead( FileHandle, IORequest.Offset + HeaderSize, ChunkInfoSize, CompressionChunks );
	RETURN_IF_EXIT_REQUESTED;

	// Handle byte swapping. This is required for opening a cooked file on the PC.
	int64 CalculatedUncompressedSize = 0;
	if (bIsByteswapped)
	{
		for( int32 ChunkIndex=0; ChunkIndex<TotalChunkCount; ChunkIndex++ )
		{
			CompressionChunks[ChunkIndex].CompressedSize	= BYTESWAP_ORDER64(CompressionChunks[ChunkIndex].CompressedSize);
			CompressionChunks[ChunkIndex].UncompressedSize	= BYTESWAP_ORDER64(CompressionChunks[ChunkIndex].UncompressedSize);
			if (ChunkIndex > 0)
			{
				CalculatedUncompressedSize += CompressionChunks[ChunkIndex].UncompressedSize;
			}
		}
	}
	else
	{
		for( int32 ChunkIndex=1; ChunkIndex<TotalChunkCount; ChunkIndex++ )
		{
			CalculatedUncompressedSize += CompressionChunks[ChunkIndex].UncompressedSize;
		}
	}

	if (CompressionChunks[0].UncompressedSize != CalculatedUncompressedSize)
	{
		UE_LOG(LogStreaming, Warning, TEXT("Detected data corruption [incorrect uncompressed size] calculated %i bytes, requested %i bytes at offset %i from '%s'. Please delete file and recook."),
			CalculatedUncompressedSize,
			IORequest.UncompressedSize, 
			IORequest.Offset ,
			*IORequest.FileName );
		check(0);
		FPlatformMisc::HandleIOFailure(*IORequest.FileName);
	}

	if (ChunkInfoSize + HeaderSize + CompressionChunks[0].CompressedSize > IORequest.Size )
	{
		UE_LOG(LogStreaming, Warning, TEXT("Detected data corruption [undershoot] trying to read %lld bytes at offset %lld from '%s'. Please delete file and recook."),
			IORequest.UncompressedSize, 
			IORequest.Offset ,
			*IORequest.FileName );
		check(0);
		FPlatformMisc::HandleIOFailure(*IORequest.FileName);
	}

	if (IORequest.UncompressedSize != CalculatedUncompressedSize)
	{
		UE_LOG(LogStreaming, Warning, TEXT("Detected data corruption [incorrect uncompressed size] calculated %lld bytes, requested %lld bytes at offset %lld from '%s'. Please delete file and recook."),
			CalculatedUncompressedSize,
			IORequest.UncompressedSize, 
			IORequest.Offset ,
			*IORequest.FileName );
		check(0);
		FPlatformMisc::HandleIOFailure(*IORequest.FileName);
	}

	// Figure out maximum size of compressed data chunk.
	int64 MaxCompressedSize = 0;
	for (int32 ChunkIndex = 1; ChunkIndex < TotalChunkCount; ChunkIndex++)
	{
		MaxCompressedSize = FMath::Max(MaxCompressedSize, CompressionChunks[ChunkIndex].CompressedSize);
		// Verify the all chunks are 'full size' until the last one...
		if (CompressionChunks[ChunkIndex].UncompressedSize < CompressionChunkSize)
		{
			if (ChunkIndex != (TotalChunkCount - 1))
			{
				checkf(0, TEXT("Calculated too many chunks: %d should be last, there are %d from '%s'"), ChunkIndex, TotalChunkCount, *IORequest.FileName);
			}
		}
		check( CompressionChunks[ChunkIndex].UncompressedSize <= CompressionChunkSize );
	}

	int32 Padding = 0;

	// Allocate memory for compressed data.
	CompressedBuffer[0]	= FMemory::Malloc( MaxCompressedSize + Padding );
	CompressedBuffer[1] = FMemory::Malloc( MaxCompressedSize + Padding );

	// Initial read request.
	InternalRead( FileHandle, FileHandle->Tell(), CompressionChunks[CurrentChunkIndex].CompressedSize, CompressedBuffer[CurrentBufferIndex] );
	RETURN_IF_EXIT_REQUESTED;

	// Loop till we're done decompressing all data.
	while( !bHasProcessedAllData )
	{
		FAsyncTask<FAsyncUncompress> UncompressTask(
			IORequest.CompressionFlags,
			UncompressedBuffer,
			CompressionChunks[CurrentChunkIndex].UncompressedSize,
			CompressedBuffer[CurrentBufferIndex],
			CompressionChunks[CurrentChunkIndex].CompressedSize,
			(Padding > 0)
			);

#if BLOCK_ON_DECOMPRESSION
		UncompressTask.StartSynchronousTask();
#else
		UncompressTask.StartBackgroundTask();
#endif

		// Advance destination pointer.
		UncompressedBuffer += CompressionChunks[CurrentChunkIndex].UncompressedSize;
	
		// Check whether we are already done reading.
		if( CurrentChunkIndex < TotalChunkCount-1 )
		{
			// Can't postincrement in if statement as we need it to remain at valid value for one more loop iteration to finish
		// the decompression.
			CurrentChunkIndex++;
			// Swap compression buffers to read into.
			CurrentBufferIndex = 1 - CurrentBufferIndex;
			// Read more data.
			InternalRead( FileHandle, FileHandle->Tell(), CompressionChunks[CurrentChunkIndex].CompressedSize, CompressedBuffer[CurrentBufferIndex] );
			RETURN_IF_EXIT_REQUESTED;
		}
		// We were already done reading the last time around so we are done processing now.
		else
		{
			bHasProcessedAllData = true;
		}
		
		//@todo async loading: should use event for this
		STAT(double UncompressorWaitTime = 0);
		{
			SCOPE_SECONDS_COUNTER(UncompressorWaitTime);
			UncompressTask.EnsureCompletion(); // just decompress on this thread if it isn't started yet
		}
		INC_FLOAT_STAT_BY(STAT_AsyncIO_UncompressorWaitTime,(float)UncompressorWaitTime);
	}

	FMemory::Free(CompressionChunks);
	FMemory::Free(CompressedBuffer[0]);
	FMemory::Free(CompressedBuffer[1] );
}