Example #1
0
void FAsyncIOSystemBase::FulfillCompressedRead( const FAsyncIORequest& IORequest, IFileHandle* FileHandle )
{
	DECLARE_SCOPE_CYCLE_COUNTER(TEXT("FAsyncIOSystemBase::FulfillCompressedRead"), STAT_AsyncIOSystemBase_FulfillCompressedRead, STATGROUP_AsyncIO_Verbose);

	if (GbLogAsyncLoading == true)
	{
		LogIORequest(TEXT("FulfillCompressedRead"), IORequest);
	}

	// Initialize variables.
	FAsyncUncompress*		Uncompressor			= NULL;
	uint8*					UncompressedBuffer		= (uint8*) IORequest.Dest;
	// First compression chunk contains information about total size so we skip that one.
	int32						CurrentChunkIndex		= 1;
	int32						CurrentBufferIndex		= 0;
	bool					bHasProcessedAllData	= false;

	// read the first two ints, which will contain the magic bytes (to detect byteswapping)
	// and the original size the chunks were compressed from
	int64						HeaderData[2];
	int32						HeaderSize = sizeof(HeaderData);

	InternalRead(FileHandle, IORequest.Offset, HeaderSize, HeaderData);
	RETURN_IF_EXIT_REQUESTED;

	// if the magic bytes don't match, then we are byteswapped (or corrupted)
	bool bIsByteswapped = HeaderData[0] != PACKAGE_FILE_TAG;
	// if its potentially byteswapped, make sure it's not just corrupted
	if (bIsByteswapped)
	{
		// if it doesn't equal the swapped version, then data is corrupted
		if (HeaderData[0] != PACKAGE_FILE_TAG_SWAPPED)
		{
			UE_LOG(LogStreaming, Warning, TEXT("Detected data corruption [header] trying to read %lld bytes at offset %lld from '%s'. Please delete file and recook."),
				IORequest.UncompressedSize, 
				IORequest.Offset ,
				*IORequest.FileName );
			check(0);
			FPlatformMisc::HandleIOFailure(*IORequest.FileName);
		}
		// otherwise, we have a valid byteswapped file, so swap the chunk size
		else
		{
			HeaderData[1] = BYTESWAP_ORDER64(HeaderData[1]);
		}
	}

	int32						CompressionChunkSize	= HeaderData[1];
	
	// handle old packages that don't have the chunk size in the header, in which case
	// we can use the old hardcoded size
	if (CompressionChunkSize == PACKAGE_FILE_TAG)
	{
		CompressionChunkSize = LOADING_COMPRESSION_CHUNK_SIZE;
	}

	// calculate the number of chunks based on the size they were compressed from
	int32						TotalChunkCount = (IORequest.UncompressedSize + CompressionChunkSize - 1) / CompressionChunkSize + 1;

	// allocate chunk info data based on number of chunks
	FCompressedChunkInfo*	CompressionChunks		= (FCompressedChunkInfo*)FMemory::Malloc(sizeof(FCompressedChunkInfo) * TotalChunkCount);
	int32						ChunkInfoSize			= (TotalChunkCount) * sizeof(FCompressedChunkInfo);
	void*					CompressedBuffer[2]		= { 0, 0 };
	
	// Read table of compression chunks after seeking to offset (after the initial header data)
	InternalRead( FileHandle, IORequest.Offset + HeaderSize, ChunkInfoSize, CompressionChunks );
	RETURN_IF_EXIT_REQUESTED;

	// Handle byte swapping. This is required for opening a cooked file on the PC.
	int64 CalculatedUncompressedSize = 0;
	if (bIsByteswapped)
	{
		for( int32 ChunkIndex=0; ChunkIndex<TotalChunkCount; ChunkIndex++ )
		{
			CompressionChunks[ChunkIndex].CompressedSize	= BYTESWAP_ORDER64(CompressionChunks[ChunkIndex].CompressedSize);
			CompressionChunks[ChunkIndex].UncompressedSize	= BYTESWAP_ORDER64(CompressionChunks[ChunkIndex].UncompressedSize);
			if (ChunkIndex > 0)
			{
				CalculatedUncompressedSize += CompressionChunks[ChunkIndex].UncompressedSize;
			}
		}
	}
	else
	{
		for( int32 ChunkIndex=1; ChunkIndex<TotalChunkCount; ChunkIndex++ )
		{
			CalculatedUncompressedSize += CompressionChunks[ChunkIndex].UncompressedSize;
		}
	}

	if (CompressionChunks[0].UncompressedSize != CalculatedUncompressedSize)
	{
		UE_LOG(LogStreaming, Warning, TEXT("Detected data corruption [incorrect uncompressed size] calculated %i bytes, requested %i bytes at offset %i from '%s'. Please delete file and recook."),
			CalculatedUncompressedSize,
			IORequest.UncompressedSize, 
			IORequest.Offset ,
			*IORequest.FileName );
		check(0);
		FPlatformMisc::HandleIOFailure(*IORequest.FileName);
	}

	if (ChunkInfoSize + HeaderSize + CompressionChunks[0].CompressedSize > IORequest.Size )
	{
		UE_LOG(LogStreaming, Warning, TEXT("Detected data corruption [undershoot] trying to read %lld bytes at offset %lld from '%s'. Please delete file and recook."),
			IORequest.UncompressedSize, 
			IORequest.Offset ,
			*IORequest.FileName );
		check(0);
		FPlatformMisc::HandleIOFailure(*IORequest.FileName);
	}

	if (IORequest.UncompressedSize != CalculatedUncompressedSize)
	{
		UE_LOG(LogStreaming, Warning, TEXT("Detected data corruption [incorrect uncompressed size] calculated %lld bytes, requested %lld bytes at offset %lld from '%s'. Please delete file and recook."),
			CalculatedUncompressedSize,
			IORequest.UncompressedSize, 
			IORequest.Offset ,
			*IORequest.FileName );
		check(0);
		FPlatformMisc::HandleIOFailure(*IORequest.FileName);
	}

	// Figure out maximum size of compressed data chunk.
	int64 MaxCompressedSize = 0;
	for (int32 ChunkIndex = 1; ChunkIndex < TotalChunkCount; ChunkIndex++)
	{
		MaxCompressedSize = FMath::Max(MaxCompressedSize, CompressionChunks[ChunkIndex].CompressedSize);
		// Verify the all chunks are 'full size' until the last one...
		if (CompressionChunks[ChunkIndex].UncompressedSize < CompressionChunkSize)
		{
			if (ChunkIndex != (TotalChunkCount - 1))
			{
				checkf(0, TEXT("Calculated too many chunks: %d should be last, there are %d from '%s'"), ChunkIndex, TotalChunkCount, *IORequest.FileName);
			}
		}
		check( CompressionChunks[ChunkIndex].UncompressedSize <= CompressionChunkSize );
	}

	int32 Padding = 0;

	// Allocate memory for compressed data.
	CompressedBuffer[0]	= FMemory::Malloc( MaxCompressedSize + Padding );
	CompressedBuffer[1] = FMemory::Malloc( MaxCompressedSize + Padding );

	// Initial read request.
	InternalRead( FileHandle, FileHandle->Tell(), CompressionChunks[CurrentChunkIndex].CompressedSize, CompressedBuffer[CurrentBufferIndex] );
	RETURN_IF_EXIT_REQUESTED;

	// Loop till we're done decompressing all data.
	while( !bHasProcessedAllData )
	{
		FAsyncTask<FAsyncUncompress> UncompressTask(
			IORequest.CompressionFlags,
			UncompressedBuffer,
			CompressionChunks[CurrentChunkIndex].UncompressedSize,
			CompressedBuffer[CurrentBufferIndex],
			CompressionChunks[CurrentChunkIndex].CompressedSize,
			(Padding > 0)
			);

#if BLOCK_ON_DECOMPRESSION
		UncompressTask.StartSynchronousTask();
#else
		UncompressTask.StartBackgroundTask();
#endif

		// Advance destination pointer.
		UncompressedBuffer += CompressionChunks[CurrentChunkIndex].UncompressedSize;
	
		// Check whether we are already done reading.
		if( CurrentChunkIndex < TotalChunkCount-1 )
		{
			// Can't postincrement in if statement as we need it to remain at valid value for one more loop iteration to finish
		// the decompression.
			CurrentChunkIndex++;
			// Swap compression buffers to read into.
			CurrentBufferIndex = 1 - CurrentBufferIndex;
			// Read more data.
			InternalRead( FileHandle, FileHandle->Tell(), CompressionChunks[CurrentChunkIndex].CompressedSize, CompressedBuffer[CurrentBufferIndex] );
			RETURN_IF_EXIT_REQUESTED;
		}
		// We were already done reading the last time around so we are done processing now.
		else
		{
			bHasProcessedAllData = true;
		}
		
		//@todo async loading: should use event for this
		STAT(double UncompressorWaitTime = 0);
		{
			SCOPE_SECONDS_COUNTER(UncompressorWaitTime);
			UncompressTask.EnsureCompletion(); // just decompress on this thread if it isn't started yet
		}
		INC_FLOAT_STAT_BY(STAT_AsyncIO_UncompressorWaitTime,(float)UncompressorWaitTime);
	}

	FMemory::Free(CompressionChunks);
	FMemory::Free(CompressedBuffer[0]);
	FMemory::Free(CompressedBuffer[1] );
}
Example #2
0
void FAsyncIOSystemBase::Tick()
{
	// Create file handles.
	{
		TArray<FString> FileNamesToCacheHandles; 
		// Only enter critical section for copying existing array over. We don't operate on the 
		// real array as creating file handles might take a while and we don't want to have other
		// threads stalling on submission of requests.
		{
			FScopeLock ScopeLock( CriticalSection );

			for( int32 RequestIdx=0; RequestIdx<OutstandingRequests.Num(); RequestIdx++ )
			{
				// Early outs avoid unnecessary work and string copies with implicit allocator churn.
				FAsyncIORequest& OutstandingRequest = OutstandingRequests[RequestIdx];
				if( OutstandingRequest.bHasAlreadyRequestedHandleToBeCached == false
				&&	OutstandingRequest.bIsDestroyHandleRequest == false 
				&&	FindCachedFileHandle( OutstandingRequest.FileNameHash ) == NULL )
				{
					new(FileNamesToCacheHandles)FString(*OutstandingRequest.FileName);
					OutstandingRequest.bHasAlreadyRequestedHandleToBeCached = true;
				}
			}
		}
		// Create file handles for requests down the pipe. This is done here so we can later on
		// use the handles to figure out the sort keys.
		for( int32 FileNameIndex=0; FileNameIndex<FileNamesToCacheHandles.Num(); FileNameIndex++ )
		{
			GetCachedFileHandle( FileNamesToCacheHandles[FileNameIndex] );
		}
	}

	// Copy of request.
	FAsyncIORequest IORequest;
	bool			bIsRequestPending	= false;
	{
		FScopeLock ScopeLock( CriticalSection );
		if( OutstandingRequests.Num() )
		{
			// Gets next request index based on platform specific criteria like layout on disc.
			int32 TheRequestIndex = PlatformGetNextRequestIndex();
			if( TheRequestIndex != INDEX_NONE )
			{					
				// We need to copy as we're going to remove it...
				IORequest = OutstandingRequests[ TheRequestIndex ];
				// ...right here.
				// NOTE: this needs to be a Remove, not a RemoveSwap because the base implementation
				// of PlatformGetNextRequestIndex is a FIFO taking priority into account
				OutstandingRequests.RemoveAt( TheRequestIndex );		
				// We're busy. Updated inside scoped lock to ensure BlockTillAllRequestsFinished works correctly.
				BusyWithRequest.Increment();
				bIsRequestPending = true;
			}
		}
	}

	// We only have work to do if there's a request pending.
	if( bIsRequestPending )
	{
		// handle a destroy handle request from the queue
		if( IORequest.bIsDestroyHandleRequest )
		{
			IFileHandle*	FileHandle = FindCachedFileHandle( IORequest.FileNameHash );
			if( FileHandle )
			{
				// destroy and remove the handle
				delete FileHandle;
				NameHashToHandleMap.Remove(IORequest.FileNameHash);
			}
		}
		else
		{
			// Retrieve cached handle or create it if it wasn't cached. We purposefully don't look at currently
			// set value as it might be stale by now.
			IFileHandle* FileHandle = GetCachedFileHandle( IORequest.FileName );
			if( FileHandle )
			{
				if( IORequest.UncompressedSize )
				{
					// Data is compressed on disc so we need to also decompress.
					FulfillCompressedRead( IORequest, FileHandle );
				}
				else
				{
					// Read data after seeking.
					InternalRead( FileHandle, IORequest.Offset, IORequest.Size, IORequest.Dest );
				}
				INC_DWORD_STAT( STAT_AsyncIO_FulfilledReadCount );
				INC_DWORD_STAT_BY( STAT_AsyncIO_FulfilledReadSize, IORequest.Size );
			}
			else
			{
				//@todo streaming: add warning once we have thread safe logging.
			}

			DEC_DWORD_STAT( STAT_AsyncIO_OutstandingReadCount );
			DEC_DWORD_STAT_BY( STAT_AsyncIO_OutstandingReadSize, IORequest.Size );
		}

		// Request fulfilled.
		if( IORequest.Counter )
		{
			IORequest.Counter->Decrement(); 
		}
		// We're done reading for now.
		BusyWithRequest.Decrement();	
	}
	else
	{
		if( !OutstandingRequests.Num() && FPlatformProcess::SupportsMultithreading() )
		{
			// We're really out of requests now, wait till the calling thread signals further work
			OutstandingRequestsEvent->Wait();
		}
	}
}
 void CommandBuffer::Read(char* out, size_t len, size_t offset) {
     // Add an additional SAFETY_OFFSET to catch what the write pointer has written.
     InternalRead(readerOffset + offset + SAFETY_OFFSET, out, len);
 }