Exemplo n.º 1
0
Arquivo: MTVU.cpp Projeto: Fordi/pcsx2
// Called on Saving/Loading states...
void SaveStateBase::mtvuFreeze()
{
    FreezeTag("MTVU");
    pxAssert(vu1Thread.IsDone());
    if (!IsSaving()) vu1Thread.Reset();
    Freeze(vu1Thread.vuCycles);
    Freeze(vu1Thread.vuCycleIdx);
}
void FPacketCaptureArchive::SerializeCaptureHeader()
{
	Header.SerializeHeader(*this);

	if (IsSaving() && bImmediateFlush)
	{
		Flush();
	}
}
Exemplo n.º 3
0
		void BinarySerializerBase::SetVersion(uint32_t i)
		{
			m_version = i;
			if(IsSaving() && m_bWithMetadata)
			{
				//Update version metadata
				memcpy(m_data + sizeof(int32_t), &i, sizeof(i));
			}
		}
FOodleDictionaryArchive::FOodleDictionaryArchive(FArchive& InInnerArchive)
	: FOodleArchiveBase(InInnerArchive)
	, Header()
{
	if (IsSaving())
	{
		Header.DictionaryVersion = DICTIONARY_FILE_VERSION;
		Header.OodleMajorHeaderVersion = OODLE2_VERSION_MAJOR;
	}
}
void FPacketCaptureArchive::AppendPacketFile(FPacketCaptureArchive& InPacketFile)
{
	check(IsSaving());
	check(Tell() != 0);	// Can't append a packet before writing the header
	check(InPacketFile.IsLoading());
	check(InPacketFile.Tell() == 0);

	// Read past the header
	InPacketFile.SerializeCaptureHeader();

	check(Header.CaptureVersion == InPacketFile.Header.CaptureVersion);

	// For appending, only support 1MB packets
	const uint32 BufferSize = 1024 * 1024;
	uint8* ReadBuffer = new uint8[BufferSize];

	// Iterate through all packets
	while (InPacketFile.Tell() < InPacketFile.TotalSize())
	{
		uint32 PacketSize = BufferSize;

		InPacketFile.SerializePacket((void*)ReadBuffer, PacketSize);

		if (InPacketFile.IsError())
		{
			UE_LOG(OodleHandlerComponentLog, Warning, TEXT("Error reading packet capture data. Skipping rest of file."));

			break;
		}


		SerializePacket(ReadBuffer, PacketSize);
	}

	delete[] ReadBuffer;


	if (IsSaving() && bImmediateFlush)
	{
		Flush();
	}
}
void FPacketCaptureArchive::SerializePacket(void* PacketData, uint32& PacketSize)
{
	check(Header.PacketDataOffset.Get() != 0);
	check(Tell() >= Header.PacketDataOffset.Get());

	uint32 PacketBufferSize = (IsLoading() ? PacketSize : 0);
	uint64 StartPos = Tell();
	InnerArchive << PacketSize;

	if (IsLoading())
	{
		if (Header.CaptureVersion >= CAPTURE_VER_PACKETCOUNT)
		{
			// Added PacketSize here, to deliberately overshoot, in case PacketDataLength is not updated in the file (possible)
			check(Tell() < Header.PacketDataOffset.Get() + Header.PacketDataLength.Get() + (uint32)sizeof(PacketSize) + PacketSize);
		}

		// Max 128MB packet - excessive, but this is not meant to be a perfect security check
		if (PacketBufferSize < PacketSize || !ensure(PacketSize <= 134217728))
		{
			UE_LOG(OodleHandlerComponentLog, Warning, TEXT("Bad PacketSize value '%i' in loading packet capture file"), PacketSize);
			SetError();

			return;
		}

		if (PacketSize > (InnerArchive.TotalSize() - InnerArchive.Tell()))
		{
			UE_LOG(OodleHandlerComponentLog, Warning, TEXT("PacketSize '%i' greater than remaining file data '%i'. Truncated file? ")
					TEXT("(run server with -forcelogflush to reduce chance of truncated capture files)"),
					PacketSize, (InnerArchive.TotalSize() - InnerArchive.Tell()));

			SetError();

			return;
		}
	}

	InnerArchive.Serialize(PacketData, PacketSize);

	if (IsSaving())
	{
		uint32 NewPacketCount = Header.PacketCount.Get() + 1;
		uint32 NewPacketDataLength = Header.PacketDataLength.Get() + (Tell() - StartPos);

		Header.PacketCount.Set(*this, NewPacketCount);
		Header.PacketDataLength.Set(*this, NewPacketDataLength);

		if (bImmediateFlush)
		{
			Flush();
		}
	}
}
uint32 FPacketCaptureArchive::GetPacketCount()
{
	uint32 ReturnVal = 0;

	if (IsSaving())
	{
		ReturnVal = Header.PacketCount.Get();
	}
	else
	{
		if (Header.CaptureVersion >= CAPTURE_VER_PACKETCOUNT)
		{
			ReturnVal = Header.PacketCount.Get();
		}
		// Do it the hard way, by recomputing through stepping through all packets
		// @todo #JohnB: Deprecate this, as part of removing pre-'CAPTURE_VER_PACKETCOUNT' file version support
		else
		{
			int64 ArcTotal = InnerArchive.TotalSize();

			check(Header.PacketDataOffset.Get() != 0);

			SeekPush(Header.PacketDataOffset.Get());

			while ((InnerArchive.Tell() + (int64)sizeof(uint32)) < ArcTotal)
			{
				uint32 PacketSize = 0;

				InnerArchive << PacketSize;

				int64 NewPos = InnerArchive.Tell() + PacketSize;

				if (NewPos <= ArcTotal)
				{
					InnerArchive.Seek(NewPos);
					ReturnVal++;
				}
			}

			SeekPop();
		}
	}

	return ReturnVal;
}
Exemplo n.º 8
0
		void BinarySerializerBase::serialize_impl(void* var, const uint32_t bytes, bool)
		{
			if(!m_bIsValid)
			{
				m_bError = true;
			}
			if(m_bError)
			{
				return;
			}
			if(IsSaving())
			{
				if(m_pos + bytes >= m_capacity*0.75)
				{
					int oldcap = m_capacity;
					uint32_t newPos = m_pos + bytes;
					while(newPos >= m_capacity*0.75)
					{
						m_capacity = m_capacity * 2 + 1;
					}
					char* newdata = new char[m_capacity];
					memcpy(newdata, m_data, oldcap);
					delete[] m_data;
					m_data = newdata;
				}
				memcpy( m_data + m_pos, var, bytes );

				m_size += (uint32_t)bytes;
				if(m_bWithMetadata)
				{
					memcpy( m_data, &m_size, sizeof(int32_t) );
					m_checksum_stale = true;
				}
			}
			else
			{
				if(m_bInitialized && m_pos + bytes > m_size)
				{
					m_bError = true;
				}
				memcpy( var, m_data + m_pos, bytes );
			}

			m_pos += (uint32_t)bytes;
		}
Exemplo n.º 9
0
		void BinarySerializerBase::Reset()
		{
			if(IsSaving())
			{
				m_capacity = 256;
				m_data = new char[m_capacity];
				if(m_bWithMetadata)
				{
					m_pos = ms_headerSize;
					m_size = m_pos;
					//Zero out the entire contents
					memset(m_data, 0, m_capacity);

					m_checksum = 0;
					m_checksum_stale = false;

					//Size, first int
					memcpy( m_data, &m_size, sizeof(int32_t) );
					//Version, second int
					memcpy(m_data + sizeof(int32_t), &m_version, sizeof(m_version));
					//Checksum, third int
					memcpy( m_data + sizeof(int32_t)*2, &m_checksum, sizeof(int32_t) );
				}
				else
				{
					m_pos = 0;
					m_size = 0;
					memset(m_data, 0, m_capacity);
				}
			}
			else
			{
				if(m_bWithMetadata)
					m_pos = ms_headerSize;
				else
					m_pos = 0;
			}
		}
Exemplo n.º 10
0
void
CDocument::RemoveWindow(
	CDocWindow *window)
{
	D_WINDOW(("CDocument::RemoveWindow(%s)\n", window->Name()));

	if (window->IsMasterWindow())
	{
		D_WINDOW((" -> '%s' is master window\n", window->Name()));
		RemoveObserver(window);
		if (!IsSaving())
		{
			D_WINDOW((" -> delete self\n"));
			delete this;
		}
	}
	else
	{
		CWriteLock lock(this);
		m_windows.RemoveItem(window);
		RemoveObserver(window);
	}
}
Exemplo n.º 11
0
void FOodleDictionaryArchive::SetDictionaryHeaderValues(int32 InHashTableSize)
{
	check(IsSaving());

	Header.HashTableSize = InHashTableSize;
}
Exemplo n.º 12
0
/**
 * Serializes and compresses/ uncompresses data. This is a shared helper function for compression
 * support. The data is saved in a way compatible with FIOSystem::LoadCompressedData.
 *
 * @note: the way this code works needs to be in line with FIOSystem::LoadCompressedData implementations
 * @note: the way this code works needs to be in line with FAsyncIOSystemBase::FulfillCompressedRead
 *
 * @param	V		Data pointer to serialize data from/to, or a FileReader if bTreatBufferAsFileReader is true
 * @param	Length	Length of source data if we're saving, unused otherwise
 * @param	Flags	Flags to control what method to use for [de]compression and optionally control memory vs speed when compressing
 * @param	bTreatBufferAsFileReader true if V is actually an FArchive, which is used when saving to read data - helps to avoid single huge allocations of source data
 */
void FArchive::SerializeCompressed( void* V, int64 Length, ECompressionFlags Flags, bool bTreatBufferAsFileReader )
{
	if( IsLoading() )
	{
		// Serialize package file tag used to determine endianess.
		FCompressedChunkInfo PackageFileTag;
		PackageFileTag.CompressedSize	= 0;
		PackageFileTag.UncompressedSize	= 0;
		*this << PackageFileTag;
		bool bWasByteSwapped = PackageFileTag.CompressedSize != PACKAGE_FILE_TAG;

		// Read in base summary.
		FCompressedChunkInfo Summary;
		*this << Summary;

		if (bWasByteSwapped)
		{
			check( PackageFileTag.CompressedSize   == PACKAGE_FILE_TAG_SWAPPED );
			Summary.CompressedSize = BYTESWAP_ORDER64(Summary.CompressedSize);
			Summary.UncompressedSize = BYTESWAP_ORDER64(Summary.UncompressedSize);
			PackageFileTag.UncompressedSize = BYTESWAP_ORDER64(PackageFileTag.UncompressedSize);
		}
		else
		{
			check( PackageFileTag.CompressedSize   == PACKAGE_FILE_TAG );
		}

		// Handle change in compression chunk size in backward compatible way.
		int64 LoadingCompressionChunkSize = PackageFileTag.UncompressedSize;
		if (LoadingCompressionChunkSize == PACKAGE_FILE_TAG)
		{
			LoadingCompressionChunkSize = LOADING_COMPRESSION_CHUNK_SIZE;
		}

		// Figure out how many chunks there are going to be based on uncompressed size and compression chunk size.
		int64	TotalChunkCount	= (Summary.UncompressedSize + LoadingCompressionChunkSize - 1) / LoadingCompressionChunkSize;
		
		// Allocate compression chunk infos and serialize them, keeping track of max size of compression chunks used.
		FCompressedChunkInfo*	CompressionChunks	= new FCompressedChunkInfo[TotalChunkCount];
		int64						MaxCompressedSize	= 0;
		for( int32 ChunkIndex=0; ChunkIndex<TotalChunkCount; ChunkIndex++ )
		{
			*this << CompressionChunks[ChunkIndex];
			if (bWasByteSwapped)
			{
				CompressionChunks[ChunkIndex].CompressedSize	= BYTESWAP_ORDER64( CompressionChunks[ChunkIndex].CompressedSize );
				CompressionChunks[ChunkIndex].UncompressedSize	= BYTESWAP_ORDER64( CompressionChunks[ChunkIndex].UncompressedSize );
			}
			MaxCompressedSize = FMath::Max( CompressionChunks[ChunkIndex].CompressedSize, MaxCompressedSize );
		}

		int64 Padding = 0;

		// Set up destination pointer and allocate memory for compressed chunk[s] (one at a time).
		uint8*	Dest				= (uint8*) V;
		void*	CompressedBuffer	= FMemory::Malloc( MaxCompressedSize + Padding );

		// Iterate over all chunks, serialize them into memory and decompress them directly into the destination pointer
		for( int64 ChunkIndex=0; ChunkIndex<TotalChunkCount; ChunkIndex++ )
		{
			const FCompressedChunkInfo& Chunk = CompressionChunks[ChunkIndex];
			// Read compressed data.
			Serialize( CompressedBuffer, Chunk.CompressedSize );
			// Decompress into dest pointer directly.
			verify( FCompression::UncompressMemory( Flags, Dest, Chunk.UncompressedSize, CompressedBuffer, Chunk.CompressedSize, (Padding > 0) ? true : false ) );
			// And advance it by read amount.
			Dest += Chunk.UncompressedSize;
		}

		// Free up allocated memory.
		FMemory::Free( CompressedBuffer );
		delete [] CompressionChunks;
	}
	else if( IsSaving() )
	{	
		SCOPE_SECONDS_COUNTER(GArchiveSerializedCompressedSavingTime);
		check( Length > 0 );

		// Serialize package file tag used to determine endianess in LoadCompressedData.
		FCompressedChunkInfo PackageFileTag;
		PackageFileTag.CompressedSize	= PACKAGE_FILE_TAG;
		PackageFileTag.UncompressedSize	= GSavingCompressionChunkSize;
		*this << PackageFileTag;

		// Figure out how many chunks there are going to be based on uncompressed size and compression chunk size.
		int64	TotalChunkCount	= (Length + GSavingCompressionChunkSize - 1) / GSavingCompressionChunkSize + 1;
		
		// Keep track of current position so we can later seek back and overwrite stub compression chunk infos.
		int64 StartPosition = Tell();

		// Allocate compression chunk infos and serialize them so we can later overwrite the data.
		FCompressedChunkInfo* CompressionChunks	= new FCompressedChunkInfo[TotalChunkCount];
		for( int64 ChunkIndex=0; ChunkIndex<TotalChunkCount; ChunkIndex++ )
		{
			*this << CompressionChunks[ChunkIndex];
		}

		// The uncompressd size is equal to the passed in length.
		CompressionChunks[0].UncompressedSize	= Length;
		// Zero initialize compressed size so we can update it during chunk compression.
		CompressionChunks[0].CompressedSize		= 0;

#if WITH_MULTI_THREADED_COMPRESSION

#define MAX_COMPRESSION_JOBS (16)
		// Don't scale more than 16x to avoid going overboard wrt temporary memory.
		FAsyncTask<FAsyncCompressionChunk> AsyncChunks[MAX_COMPRESSION_JOBS];

		// used to keep track of which job is the next one we need to retire
		int32 AsyncChunkIndex[MAX_COMPRESSION_JOBS]={0};

		static uint32 GNumUnusedThreads_SerializeCompressed = -1;
		if (GNumUnusedThreads_SerializeCompressed == (uint32)-1)
		{
			// one-time initialization
			GNumUnusedThreads_SerializeCompressed = 1;
			// if we should use all available cores then we want to compress with all
			if( FParse::Param(FCommandLine::Get(), TEXT("USEALLAVAILABLECORES")) == true )
			{
				GNumUnusedThreads_SerializeCompressed = 0;
			}
		}

		// Maximum number of concurrent async tasks we're going to kick off. This is based on the number of processors
		// available in the system.
		int32 MaxConcurrentAsyncChunks = FMath::Clamp<int32>( FPlatformMisc::NumberOfCores() - GNumUnusedThreads_SerializeCompressed, 1, MAX_COMPRESSION_JOBS );
		if (FParse::Param(FCommandLine::Get(), TEXT("MTCHILD")))
		{
			// throttle this back when doing MT cooks
			MaxConcurrentAsyncChunks = FMath::Min<int32>( MaxConcurrentAsyncChunks,4 );
		}

		// Number of chunks left to finalize.
		int64 NumChunksLeftToFinalize	= (Length + GSavingCompressionChunkSize - 1) / GSavingCompressionChunkSize;
		// Number of chunks left to kick off
		int64 NumChunksLeftToKickOff	= NumChunksLeftToFinalize;
		// Start at index 1 as first chunk info is summary.
		int64	CurrentChunkIndex		= 1;
		// Start at index 1 as first chunk info is summary.
		int64	RetireChunkIndex		= 1;
	
		// Number of bytes remaining to kick off compression for.
		int64 BytesRemainingToKickOff	= Length;
		// Pointer to src data if buffer is memory pointer, NULL if it's a FArchive.
		uint8* SrcBuffer = bTreatBufferAsFileReader ? NULL : (uint8*)V;

		check(!bTreatBufferAsFileReader || ((FArchive*)V)->IsLoading());
		check(NumChunksLeftToFinalize);

		// Loop while there is work left to do based on whether we have finalized all chunks yet.
		while( NumChunksLeftToFinalize )
		{
			// If true we are waiting for async tasks to complete and should wait to complete some
			// if there are no async tasks finishing this iteration.
			bool bNeedToWaitForAsyncTask = false;

			// Try to kick off async tasks if there are chunks left to kick off.
			if( NumChunksLeftToKickOff )
			{
				// Find free index based on looking at uncompressed size. We can't use the thread counter
				// for this as that might be a chunk ready for finalization.
				int32 FreeIndex = INDEX_NONE;
				for( int32 i=0; i<MaxConcurrentAsyncChunks; i++ )
				{
					if( !AsyncChunkIndex[i] )
					{
						FreeIndex = i;
						check(AsyncChunks[FreeIndex].IsIdle()); // this is not supposed to be in use
						break;
					}
				}

				// Kick off async compression task if we found a chunk for it.
				if( FreeIndex != INDEX_NONE )
				{
					FAsyncCompressionChunk& NewChunk = AsyncChunks[FreeIndex].GetTask();
					// 2 times the uncompressed size should be more than enough; the compressed data shouldn't be that much larger
					NewChunk.CompressedSize	= 2 * GSavingCompressionChunkSize;
					// Allocate compressed buffer placeholder on first use.
					if( NewChunk.CompressedBuffer == NULL )
					{
						NewChunk.CompressedBuffer = FMemory::Malloc( NewChunk.CompressedSize	);
					}

					// By default everything is chunked up into GSavingCompressionChunkSize chunks.
					NewChunk.UncompressedSize	= FMath::Min( BytesRemainingToKickOff, (int64)GSavingCompressionChunkSize );
					check(NewChunk.UncompressedSize>0);

					// Need to serialize source data if passed in pointer is an FArchive.
					if( bTreatBufferAsFileReader )
					{
						// Allocate memory on first use. We allocate the maximum amount to allow reuse.
						if( !NewChunk.UncompressedBuffer )
						{
							NewChunk.UncompressedBuffer = FMemory::Malloc(GSavingCompressionChunkSize);
						}
						((FArchive*)V)->Serialize(NewChunk.UncompressedBuffer, NewChunk.UncompressedSize);
					}
					// Advance src pointer by amount to be compressed.
					else
					{
						NewChunk.UncompressedBuffer = SrcBuffer;
						SrcBuffer += NewChunk.UncompressedSize;
					}

					// Update status variables for tracking how much work is left, what to do next.
					BytesRemainingToKickOff -= NewChunk.UncompressedSize;
					AsyncChunkIndex[FreeIndex] = CurrentChunkIndex++;
					NewChunk.Flags = Flags;
					NumChunksLeftToKickOff--;

					AsyncChunks[FreeIndex].StartBackgroundTask();
				}
				// No chunks were available to use, complete some
				else
				{
					bNeedToWaitForAsyncTask = true;
				}
			}

			// Index of oldest chunk, needed as we need to serialize in order.
			int32 OldestAsyncChunkIndex = INDEX_NONE;
			for( int32 i=0; i<MaxConcurrentAsyncChunks; i++ )
			{
				check(AsyncChunkIndex[i] == 0 || AsyncChunkIndex[i] >= RetireChunkIndex);
				check(AsyncChunkIndex[i] < RetireChunkIndex + MaxConcurrentAsyncChunks);
				if (AsyncChunkIndex[i] == RetireChunkIndex)
				{
					OldestAsyncChunkIndex = i;
				}
			}
			check(OldestAsyncChunkIndex != INDEX_NONE);  // the retire chunk better be outstanding


			bool ChunkReady;
			if (bNeedToWaitForAsyncTask)
			{
				// This guarantees that the async work has finished, doing it on this thread if it hasn't been started
				AsyncChunks[OldestAsyncChunkIndex].EnsureCompletion();
				ChunkReady = true;
			}
			else
			{
				ChunkReady = AsyncChunks[OldestAsyncChunkIndex].IsDone();
			}
			if (ChunkReady)
			{
				FAsyncCompressionChunk& DoneChunk = AsyncChunks[OldestAsyncChunkIndex].GetTask();
				// Serialize the data via archive.
				Serialize( DoneChunk.CompressedBuffer, DoneChunk.CompressedSize );

				// Update associated chunk.
				int64 CompressionChunkIndex = RetireChunkIndex++;
				check(CompressionChunkIndex<TotalChunkCount);
				CompressionChunks[CompressionChunkIndex].CompressedSize		= DoneChunk.CompressedSize;
				CompressionChunks[CompressionChunkIndex].UncompressedSize	= DoneChunk.UncompressedSize;

				// Keep track of total compressed size, stored in first chunk.
				CompressionChunks[0].CompressedSize	+= DoneChunk.CompressedSize;

				// Clean up chunk. Src and dst buffer are not touched as the contain allocations we keep till the end.
				AsyncChunkIndex[OldestAsyncChunkIndex] = 0;
				DoneChunk.CompressedSize	= 0;
				DoneChunk.UncompressedSize = 0;

				// Finalized one :)
				NumChunksLeftToFinalize--;
				bNeedToWaitForAsyncTask = false;
			}
		}

		// Free intermediate buffer storage.
		for( int32 i=0; i<MaxConcurrentAsyncChunks; i++ )
		{
			// Free temporary compressed buffer storage.
			FMemory::Free( AsyncChunks[i].GetTask().CompressedBuffer );
			AsyncChunks[i].GetTask().CompressedBuffer = NULL;
			// Free temporary uncompressed buffer storage if data was serialized in.
			if( bTreatBufferAsFileReader )
			{
				FMemory::Free( AsyncChunks[i].GetTask().UncompressedBuffer );
				AsyncChunks[i].GetTask().UncompressedBuffer = NULL;
			}
		}

#else
		// Set up source pointer amount of data to copy (in bytes)
		uint8*	Src;
		// allocate memory to read into
		if (bTreatBufferAsFileReader)
		{
			Src = (uint8*)FMemory::Malloc(GSavingCompressionChunkSize);
			check(((FArchive*)V)->IsLoading());
		}
		else
		{
			Src = (uint8*) V;
		}
		int64		BytesRemaining			= Length;
		// Start at index 1 as first chunk info is summary.
		int32		CurrentChunkIndex		= 1;
		// 2 times the uncompressed size should be more than enough; the compressed data shouldn't be that much larger
		int64		CompressedBufferSize	= 2 * GSavingCompressionChunkSize;
		void*	CompressedBuffer		= FMemory::Malloc( CompressedBufferSize );

		while( BytesRemaining > 0 )
		{
			int64 BytesToCompress = FMath::Min( BytesRemaining, (int64)GSavingCompressionChunkSize );
			int64 CompressedSize	= CompressedBufferSize;

			// read in the next chunk from the reader
			if (bTreatBufferAsFileReader)
			{
				((FArchive*)V)->Serialize(Src, BytesToCompress);
			}

			check(CompressedSize < INT_MAX);
			int32 CompressedSizeInt = (int32)CompressedSize;
			verify( FCompression::CompressMemory( Flags, CompressedBuffer, CompressedSizeInt, Src, BytesToCompress ) );
			CompressedSize = CompressedSizeInt;
			// move to next chunk if not reading from file
			if (!bTreatBufferAsFileReader)
			{
				Src += BytesToCompress;
			}
			Serialize( CompressedBuffer, CompressedSize );
			// Keep track of total compressed size, stored in first chunk.
			CompressionChunks[0].CompressedSize	+= CompressedSize;

			// Update current chunk.
			check(CurrentChunkIndex<TotalChunkCount);
			CompressionChunks[CurrentChunkIndex].CompressedSize		= CompressedSize;
			CompressionChunks[CurrentChunkIndex].UncompressedSize	= BytesToCompress;
			CurrentChunkIndex++;
			
			BytesRemaining -= GSavingCompressionChunkSize;
		}

		// free the buffer we read into
		if (bTreatBufferAsFileReader)
		{
			FMemory::Free(Src);
		}

		// Free allocated memory.
		FMemory::Free( CompressedBuffer );
#endif

		// Overrwrite chunk infos by seeking to the beginning, serializing the data and then
		// seeking back to the end.
		auto EndPosition = Tell();
		// Seek to the beginning.
		Seek( StartPosition );
		// Serialize chunk infos.
		for( int32 ChunkIndex=0; ChunkIndex<TotalChunkCount; ChunkIndex++ )
		{
			*this << CompressionChunks[ChunkIndex];
		}
		// Seek back to end.
		Seek( EndPosition );

		// Free intermediate data.
		delete [] CompressionChunks;
	}
}