bool FLevelSequenceObjectReferenceMap::Serialize(FArchive& Ar) { int32 Num = Map.Num(); Ar << Num; if (Ar.IsLoading()) { while(Num-- > 0) { FGuid Key; Ar << Key; FLevelSequenceObjectReference Value; Ar << Value; Map.Add(Key, Value); } } else if (Ar.IsSaving() || Ar.IsCountingMemory() || Ar.IsObjectReferenceCollector()) { for (auto& Pair : Map) { Ar << Pair.Key; Ar << Pair.Value; } } return true; }
void UClassProperty::Serialize( FArchive& Ar ) { Super::Serialize( Ar ); Ar << MetaClass; #if USE_CIRCULAR_DEPENDENCY_LOAD_DEFERRING if (Ar.IsLoading() || Ar.IsObjectReferenceCollector()) { if (ULinkerPlaceholderClass* PlaceholderClass = Cast<ULinkerPlaceholderClass>(MetaClass)) { PlaceholderClass->AddReferencingProperty(this); } } #endif // USE_CIRCULAR_DEPENDENCY_LOAD_DEFERRING if( !(MetaClass||HasAnyFlags(RF_ClassDefaultObject)) ) { // If we failed to load the MetaClass and we're not a CDO, that means we relied on a class that has been removed or doesn't exist. // The most likely cause for this is either an incomplete recompile, or if content was migrated between games that had native class dependencies // that do not exist in this game. We allow blueprint classes to continue, because compile on load will error out, and stub the class that was using it UClass* TestClass = dynamic_cast<UClass*>(GetOwnerStruct()); if( TestClass && TestClass->HasAllClassFlags(CLASS_Native) && !TestClass->HasAllClassFlags(CLASS_NewerVersionExists) && (TestClass->GetOutermost() != GetTransientPackage()) ) { checkf(false, TEXT("Class property tried to serialize a missing class. Did you remove a native class and not fully recompile?")); } } }
bool FEdGraphPinType::Serialize(FArchive& Ar) { if (Ar.UE4Ver() < VER_UE4_EDGRAPHPINTYPE_SERIALIZATION) { return false; } Ar << PinCategory; Ar << PinSubCategory; // See: FArchive& operator<<( FArchive& Ar, FWeakObjectPtr& WeakObjectPtr ) // The PinSubCategoryObject should be serialized into the package. if(!Ar.IsObjectReferenceCollector() || Ar.IsModifyingWeakAndStrongReferences() || Ar.IsPersistent()) { UObject* Object = PinSubCategoryObject.Get(true); Ar << Object; if( Ar.IsLoading() || Ar.IsModifyingWeakAndStrongReferences() ) { PinSubCategoryObject = Object; } } Ar << bIsArray; Ar << bIsReference; Ar << bIsWeakPointer; if (Ar.UE4Ver() >= VER_UE4_MEMBERREFERENCE_IN_PINTYPE) { Ar << PinSubCategoryMemberReference; } else if (Ar.IsLoading() && Ar.IsPersistent()) { if ((PinCategory == TEXT("delegate")) || (PinCategory == TEXT("mcdelegate"))) { if (const UFunction* Signature = Cast<const UFunction>(PinSubCategoryObject.Get())) { PinSubCategoryMemberReference.MemberName = Signature->GetFName(); PinSubCategoryMemberReference.MemberParent = Signature->GetOwnerClass(); PinSubCategoryObject = NULL; } else { ensure(true); } } } if (Ar.UE4Ver() >= VER_UE4_SERIALIZE_PINTYPE_CONST) { Ar << bIsConst; } else if (Ar.IsLoading()) { bIsConst = false; } return true; }
void UDelegateProperty::Serialize( FArchive& Ar ) { Super::Serialize( Ar ); Ar << SignatureFunction; #if USE_CIRCULAR_DEPENDENCY_LOAD_DEFERRING if (Ar.IsLoading() || Ar.IsObjectReferenceCollector()) { if (auto PlaceholderFunc = Cast<ULinkerPlaceholderFunction>(SignatureFunction)) { PlaceholderFunc->AddReferencingProperty(this); } } #endif // USE_CIRCULAR_DEPENDENCY_LOAD_DEFERRING }
void UObjectPropertyBase::Serialize( FArchive& Ar ) { Super::Serialize( Ar ); Ar << PropertyClass; #if USE_CIRCULAR_DEPENDENCY_LOAD_DEFERRING if (Ar.IsLoading() || Ar.IsObjectReferenceCollector()) { if (ULinkerPlaceholderClass* PlaceholderClass = Cast<ULinkerPlaceholderClass>(PropertyClass)) { PlaceholderClass->AddReferencingProperty(this); } } #endif // USE_CIRCULAR_DEPENDENCY_LOAD_DEFERRING }
// UObject interface. void UTransBuffer::Serialize( FArchive& Ar ) { check( !Ar.IsPersistent() ); CheckState(); Super::Serialize( Ar ); if ( IsObjectSerializationEnabled() || !Ar.IsObjectReferenceCollector() ) { Ar << UndoBuffer; } Ar << ResetReason << UndoCount << ActiveCount << ActiveRecordCounts; CheckState(); }
void ULazyObjectProperty::SerializeItem( FArchive& Ar, void* Value, void const* Defaults ) const { // We never serialize our reference while the garbage collector is harvesting references // to objects, because we don't want lazy pointers to keep objects from being garbage collected if( !Ar.IsObjectReferenceCollector() || Ar.IsModifyingWeakAndStrongReferences() ) { UObject* ObjectValue = GetObjectPropertyValue(Value); Ar << *(FLazyObjectPtr*)Value; if ((Ar.IsLoading() || Ar.IsModifyingWeakAndStrongReferences()) && ObjectValue != GetObjectPropertyValue(Value)) { CheckValidObject(Value); } } }
void UAssetObjectProperty::SerializeItem( FArchive& Ar, void* Value, int32 MaxReadBytes, void const* Defaults ) const { // We never serialize our reference while the garbage collector is harvesting references // to objects, because we don't want asset pointers to keep objects from being garbage collected if( !Ar.IsObjectReferenceCollector() || Ar.IsModifyingWeakAndStrongReferences() ) { FAssetPtr OldValue = *(FAssetPtr*)Value; Ar << *(FAssetPtr*)Value; if (Ar.IsLoading() || Ar.IsModifyingWeakAndStrongReferences()) { if (OldValue.GetUniqueID() != ((FAssetPtr*)Value)->GetUniqueID()) { CheckValidObject(Value); } } } }
void UAssetObjectProperty::SerializeItem( FArchive& Ar, void* Value, void const* Defaults ) const { // We never serialize our reference while the garbage collector is harvesting references // to objects, because we don't want asset pointers to keep objects from being garbage collected // Allow persistent archives so they can keep track of string references. (e.g. FArchiveSaveTagImports) if( !Ar.IsObjectReferenceCollector() || Ar.IsModifyingWeakAndStrongReferences() || Ar.IsPersistent() ) { FAssetPtr OldValue = *(FAssetPtr*)Value; Ar << *(FAssetPtr*)Value; if (Ar.IsLoading() || Ar.IsModifyingWeakAndStrongReferences()) { if (OldValue.GetUniqueID() != ((FAssetPtr*)Value)->GetUniqueID()) { CheckValidObject(Value); } } } }
// UObject interface. void UTransBuffer::Serialize( FArchive& Ar ) { check( !Ar.IsPersistent() ); CheckState(); // Handle garbage collection. Super::Serialize( Ar ); // We cannot support undoing across GC if we allow it to eliminate references so we need // to suppress it. if ( IsObjectSerializationEnabled() || !Ar.IsObjectReferenceCollector() ) { Ar.AllowEliminatingReferences( false ); Ar << UndoBuffer; Ar.AllowEliminatingReferences( true ); } Ar << ResetReason << UndoCount << ActiveCount; CheckState(); }
/** * Serialize function used to serialize this bulk data structure. * * @param Ar Archive to serialize with * @param Owner Object owning the bulk data * @param Idx Index of bulk data item being serialized */ void FUntypedBulkData::Serialize( FArchive& Ar, UObject* Owner, int32 Idx ) { check( LockStatus == LOCKSTATUS_Unlocked ); if(Ar.IsTransacting()) { // Special case for transacting bulk data arrays. // constructing the object during load will save it to the transaction buffer. If it tries to load the bulk data now it will try to break it. bool bActuallySave = Ar.IsSaving() && (!Owner || !Owner->HasAnyFlags(RF_NeedLoad)); Ar << bActuallySave; if (bActuallySave) { if(Ar.IsLoading()) { // Flags for bulk data. Ar << BulkDataFlags; // Number of elements in array. Ar << ElementCount; // Allocate bulk data. check(bShouldFreeOnEmpty); BulkData = FMemory::Realloc( BulkData, GetBulkDataSize() ); // Deserialize bulk data. SerializeBulkData( Ar, BulkData ); } else if(Ar.IsSaving()) { // Flags for bulk data. Ar << BulkDataFlags; // Number of elements in array. Ar << ElementCount; // Don't attempt to load or serialize BulkData if the current size is 0. // This could be a newly constructed BulkData that has not yet been loaded, // and allocating 0 bytes now will cause a crash when we load. if (GetBulkDataSize() > 0) { // Make sure bulk data is loaded. MakeSureBulkDataIsLoaded(); // Serialize bulk data. SerializeBulkData(Ar, BulkData); } } } } else if( Ar.IsPersistent() && !Ar.IsObjectReferenceCollector() && !Ar.ShouldSkipBulkData() ) { #if TRACK_BULKDATA_USE FThreadSafeBulkDataToObjectMap::Get().Add( this, Owner ); #endif // Offset where the bulkdata flags are stored int64 SavedBulkDataFlagsPos = Ar.Tell(); Ar << BulkDataFlags; // Number of elements in array. Ar << ElementCount; // We're loading from the persistent archive. if( Ar.IsLoading() ) { Filename = TEXT(""); // @todo when Landscape (and others?) only Lock/Unlock once, we can enable this if (false) // FPlatformProperties::RequiresCookedData()) { // Bulk data that is being serialized via seekfree loading is single use only. This allows us // to free the memory as e.g. the bulk data won't be attached to an archive in the case of // seek free loading. BulkDataFlags |= BULKDATA_SingleUse; } // Size on disk, which in the case of compression is != GetBulkDataSize() Ar << BulkDataSizeOnDisk; Ar << BulkDataOffsetInFile; // fix up the file offset if (Owner != NULL && Owner->GetLinker()) { BulkDataOffsetInFile += Owner->GetLinker()->Summary.BulkDataStartOffset; } // determine whether the payload is stored inline or at the end of the file bool bPayloadInline = !(BulkDataFlags&BULKDATA_PayloadAtEndOfFile); // check( (bPayloadInline && BulkDataOffsetInFile == Ar.Tell()) || // (!bPayloadInline && BulkDataOffsetInFile > Ar.Tell())); // We're allowing defered serialization. if( Ar.IsAllowingLazyLoading() && Owner != NULL) { Linker = Owner->GetLinker(); #if WITH_EDITOR check(Linker); Ar.AttachBulkData( Owner, this ); AttachedAr = &Ar; #else check(Linker.IsValid()); Filename = Linker->Filename; #endif // WITH_EDITOR // only skip over payload, if it's stored inline if (bPayloadInline) { Ar.Seek( Ar.Tell() + BulkDataSizeOnDisk ); } } // Serialize the bulk data right away. else { // memory for bulk data can come from preallocated GPU-accessible resource memory or default to system memory BulkData = GetBulkDataResourceMemory(Owner,Idx); if( !BulkData ) { BulkData = FMemory::Realloc( BulkData, GetBulkDataSize() ); } if (bPayloadInline) { // if the payload is stored inline, just serialize it SerializeBulkData( Ar, BulkData ); } else { // if the payload is NOT stored inline ... // store the current file offset int64 CurOffset = Ar.Tell(); // seek to the location in the file where the payload is stored Ar.Seek(BulkDataOffsetInFile); // serialize the payload SerializeBulkData( Ar, BulkData ); // seek to the location we came from Ar.Seek(CurOffset); } } } // We're saving to the persistent archive. else if( Ar.IsSaving() ) { // check if we save the package compressed UPackage* Pkg = Owner ? dynamic_cast<UPackage*>(Owner->GetOutermost()) : nullptr; if (Pkg && !!(Pkg->PackageFlags & PKG_StoreCompressed) ) { ECompressionFlags BaseCompressionMethod = COMPRESS_Default; if (Ar.IsCooking()) { BaseCompressionMethod = Ar.CookingTarget()->GetBaseCompressionMethod(); } StoreCompressedOnDisk(BaseCompressionMethod); } // Remove single element serialization requirement before saving out bulk data flags. BulkDataFlags &= ~BULKDATA_ForceSingleElementSerialization; // Make sure bulk data is loaded. MakeSureBulkDataIsLoaded(); // Only serialize status information if wanted. int64 SavedBulkDataSizeOnDiskPos = INDEX_NONE; int64 SavedBulkDataOffsetInFilePos = INDEX_NONE; // Keep track of position we are going to serialize placeholder BulkDataSizeOnDisk. SavedBulkDataSizeOnDiskPos = Ar.Tell(); BulkDataSizeOnDisk = INDEX_NONE; // And serialize the placeholder which is going to be overwritten later. Ar << BulkDataSizeOnDisk; // Keep track of position we are going to serialize placeholder BulkDataOffsetInFile. SavedBulkDataOffsetInFilePos = Ar.Tell(); BulkDataOffsetInFile = INDEX_NONE; // And serialize the placeholder which is going to be overwritten later. Ar << BulkDataOffsetInFile; // try to get the linkersave object ULinkerSave* LinkerSave = dynamic_cast<ULinkerSave*>(Ar.GetLinker()); // determine whether we are going to store the payload inline or not. bool bStoreInline = !!(BulkDataFlags&BULKDATA_ForceInlinePayload) || LinkerSave == NULL; if (!bStoreInline) { // set the flag indicating where the payload is stored BulkDataFlags |= BULKDATA_PayloadAtEndOfFile; // with no LinkerSave we have to store the data inline check(LinkerSave != NULL); // add the bulkdata storage info object to the linkersave int32 Index = LinkerSave->BulkDataToAppend.AddZeroed(1); ULinkerSave::FBulkDataStorageInfo& BulkStore = LinkerSave->BulkDataToAppend[Index]; BulkStore.BulkDataOffsetInFilePos = SavedBulkDataOffsetInFilePos; BulkStore.BulkDataSizeOnDiskPos = SavedBulkDataSizeOnDiskPos; BulkStore.BulkData = this; // Serialize bulk data into the storage info BulkDataSizeOnDisk = -1; } else { // set the flag indicating where the payload is stored BulkDataFlags &= ~BULKDATA_PayloadAtEndOfFile; int64 SavedBulkDataStartPos = Ar.Tell(); // Serialize bulk data. SerializeBulkData( Ar, BulkData ); // store the payload endpos int64 SavedBulkDataEndPos = Ar.Tell(); checkf(SavedBulkDataStartPos >= 0 && SavedBulkDataEndPos >= 0, TEXT("Bad archive positions for bulkdata. StartPos=%d EndPos=%d"), SavedBulkDataStartPos, SavedBulkDataEndPos); BulkDataSizeOnDisk = SavedBulkDataEndPos - SavedBulkDataStartPos; BulkDataOffsetInFile = SavedBulkDataStartPos; } // store current file offset before seeking back int64 CurrentFileOffset = Ar.Tell(); // Seek back and overwrite the flags Ar.Seek(SavedBulkDataFlagsPos); Ar << BulkDataFlags; // Seek back and overwrite placeholder for BulkDataSizeOnDisk Ar.Seek( SavedBulkDataSizeOnDiskPos ); Ar << BulkDataSizeOnDisk; // Seek back and overwrite placeholder for BulkDataOffsetInFile Ar.Seek( SavedBulkDataOffsetInFilePos ); Ar << BulkDataOffsetInFile; // Seek to the end of written data so we don't clobber any data in subsequent write // operations Ar.Seek(CurrentFileOffset); } } }