// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - void IndirectCallPromotion::MergeReturnedValues(BlockList& callBlocks, Block* continuationBlock, CallInstr* instr) { // If the call returns 'void' or the result is not used // there are no values to merge. if(instr->IsVoid() || (instr->HasDestinationOp() == false)) { return; } auto unit = callBlocks[0]->ParentFunction()->ParentUnit(); auto& references = unit->References(); // Create the 'phi' that merges the returned values. auto phiResultOp = Temporary::GetTemporary(instr->ResultOp()->GetType()); auto phiInstr = PhiInstr::GetPhi(phiResultOp, callBlocks.Count()); continuationBlock->InsertInstructionFirst(phiInstr); // Now add the incoming operands. for(int i = 0; i < callBlocks.Count(); i++) { auto beforeGoto = callBlocks[i]->LastInstruction()->PreviousInstruction(); auto callInstr = beforeGoto->As<CallInstr>(); DebugValidator::IsNotNull(callInstr); DebugValidator::IsNotNull(callInstr->ResultOp()); auto blockRef = references.GetBlockRef(callBlocks[i]); phiInstr->AddOperand(callInstr->ResultOp(), blockRef); } // The original returned value is replaced by the 'phi' result. instr->ResultOp()->ReplaceWith(phiResultOp); }
void DRReferredToBy( dr_handle entry, void * data, DRSYMREF callback ) /********************************************************************/ { ByData info; info.entry = entry; References( REFERREDBY, entry, &info, ByHook, data, callback ); }
void DRRefersTo( dr_handle entry, void *data, DRSYMREF callback ) /***************************************************************/ { ToData info; info.entry = entry; References( REFERSTO, entry, &info, ToHook, data, callback ); }
void DRReferencedSymbols( dr_sym_type search, void * data, DRSYMREF callback ) /****************************************************************************/ { RefData info; info.search = search; References( REFERREDBY, 0L, &info, RefHook, data, callback ); }
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - void VariableAnalysis::InsertAggregateCandidates(OperandVariableDict& dict, const VariableList& varList) { auto unit = funct_->ParentUnit(); for(int i = 0; i < varList.Count(); i++) { auto variable = varList[i]; if((variable->IsArray() || variable->IsRecord() || variable->IsPointer()) == false) { continue; } variable->SetIsAddresTaken(false); // Presume it's not. auto variableRefType = unit->Types().GetPointer(variable->GetType()); auto variableRef = unit->References().GetVariableRef(variable, variableRefType); dict.Add(variableRef, OVPair(variableRef, variable)); } }
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - void IndirectCallPromotion::ConnectGeneratedBlocks(BlockList& testBlocks, BlockList& callBlocks) { // Connect the blocks based on the following pattern: // TEST_BLOCK0: // t0 = ucmp targetOp, FUNCT_REF0 // if t0, CALL_BLOCK0, TEST_BLOCK1 // CALL_BLOCK0: // call FUNCT_REF0 // goto CONTINUATION_BLOCK // TEST_BLOCK1: // t1 = ucmp targetOp, FUNCT_REF1 // if t1, CALL_BLOCK1, TEST_BLOCK2 // ... // CALL_BLOCK_N: // only if unpromoted targets // call targetOp // goto CONTINUATION_BLOCK // CONTINUATION_BLOCK: auto unit = callBlocks[0]->ParentFunction()->ParentUnit(); auto& references = unit->References(); for(int i = 0; i < testBlocks.Count(); i++) { auto testBlock = testBlocks[i]; auto ucmpResultOp = testBlocks[i]->LastInstruction()->GetDestinationOp(); auto trueBlockRef = references.GetBlockRef(callBlocks[i]); Block* falseBlock; if((i + 1) < testBlocks.Count()) { // There is a next target test. falseBlock = testBlocks[i + 1]; } else { // Unpromoted targets exist, always do the call. falseBlock = callBlocks[i + 1]; } auto falseBlockRef = references.GetBlockRef(falseBlock); auto ifInstr = IfInstr::GetIf(ucmpResultOp, trueBlockRef, falseBlockRef); testBlock->InsertInstruction(ifInstr); } }
rt_private EIF_BOOLEAN rdeepiter(register EIF_REFERENCE target, register EIF_REFERENCE source) { /* Iterate deep isomorphism on normal objects `target' and `source'. * It assumes that `source' and `target' are not NULL and isomorphic. * Return a boolean. */ long count; /* Reference number */ EIF_REFERENCE s_ref; EIF_REFERENCE t_ref; /* Evaluation of the number of references: and iteration on it */ for ( count = References(Dtype(target)); count > 0; count--, source = (EIF_REFERENCE) ((EIF_REFERENCE *)source + 1), target = (EIF_REFERENCE) ((EIF_REFERENCE *) target + 1) ) { s_ref = *(EIF_REFERENCE *)source; t_ref = *(EIF_REFERENCE *)target; /* One test an a de-reference is only useful since the source and * the target are isomorhic */ if (!s_ref) if (!t_ref) continue; else return EIF_FALSE; else if (!t_ref) return EIF_FALSE; else if (!(rdeepiso(t_ref, s_ref))) return EIF_FALSE; } return EIF_TRUE; }
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - GotoInstr* IndirectCallPromotion::CreateGoto(Block* targetBlock) { auto unit = targetBlock->ParentFunction()->ParentUnit(); auto targetBlockRef = unit->References().GetBlockRef(targetBlock); return GotoInstr::GetGoto(targetBlockRef); }
rt_private uint32 pst_store(struct rt_store_context *a_context, EIF_REFERENCE object, uint32 a_object_count) { /* Second pass of the store mechanism: writing on the disk. */ EIF_REFERENCE o_ref; EIF_REFERENCE o_ptr; long i, nb_references; union overhead *zone = HEADER(object); uint16 flags; int is_expanded, has_volatile_attributes = 0; EIF_BOOLEAN object_needs_index; long saved_file_pos = 0; long saved_object_count = a_object_count; REQUIRE ("valid need_index and make_index", (need_index && make_index) || (!need_index && !make_index)); if (need_index) { object_needs_index = (EIF_BOOLEAN) ((EIF_BOOLEAN (*)(EIF_REFERENCE, EIF_REFERENCE))need_index) (server,object); if (object_needs_index) { /* If the object needs an index, the buffer is flushed so that * a new compression header is stored just before the object * thus the decompression will work when starting the retrieve * there */ a_context->flush_buffer_function(); saved_file_pos = file_position + parsing_position; } } else { object_needs_index = 0; } flags = zone->ov_flags; is_expanded = eif_is_nested_expanded(flags); if (!(is_expanded || (flags & EO_STORE))) return a_object_count; /* Unmarked means already stored */ else if (!is_expanded) a_object_count++; zone->ov_flags &= ~EO_STORE; /* Unmark it */ #ifdef DEBUG printf("object 0x%" EIF_POINTER_DISPLAY " [%s %" EIF_POINTER_DISPLAY "]\n", (rt_uint_ptr) object, System(zone->ov_dtype).cn_generator, (rt_uint_ptr) zone->ov_flags); #endif /* Evaluation of the number of references of the object */ if (flags & EO_SPEC) { /* Special object */ if (flags & EO_REF) { /* Special of reference/composite types */ EIF_INTEGER count, elem_size; EIF_REFERENCE ref; count = RT_SPECIAL_COUNT(object); if (flags & EO_TUPLE) { EIF_TYPED_VALUE * l_item = (EIF_TYPED_VALUE *) object; /* Don't forget that first element of TUPLE is the BOOLEAN * `object_comparison' attribute. */ l_item++; count--; for (; count > 0; count--, l_item++) { if (eif_is_reference_tuple_item(l_item)) { o_ref = eif_reference_tuple_item(l_item); if (o_ref) { a_object_count = pst_store (a_context, o_ref, a_object_count); } } } } else if (!(flags & EO_COMP)) { /* Special of references */ for (ref = object; count > 0; count--, ref = (EIF_REFERENCE) ((EIF_REFERENCE *) ref + 1)) { o_ref = *(EIF_REFERENCE *) ref; if (o_ref != (EIF_REFERENCE) 0) a_object_count = pst_store (a_context, o_ref,a_object_count); } } else { /* Special of composites */ elem_size = RT_SPECIAL_ELEM_SIZE(object); for (ref = object + OVERHEAD; count > 0; count --, ref += elem_size) { a_object_count = pst_store (a_context, ref,a_object_count); } } } } else { /* Normal object */ nb_references = References(zone->ov_dtype); /* Traversal of references of `object' */ for ( o_ptr = object, i = 0; i < nb_references; i++, o_ptr = (EIF_REFERENCE) (((EIF_REFERENCE *) o_ptr) +1) ) { o_ref = *(EIF_REFERENCE *)o_ptr; if (o_ref) { if (!EIF_IS_TRANSIENT_ATTRIBUTE(System(zone->ov_dtype), i)) { a_object_count = pst_store (a_context, o_ref, a_object_count); } else { has_volatile_attributes = 1; } } } } if (!is_expanded) { a_context->object_write_function(object, has_volatile_attributes); /* write the object */ } /* Call `make_index' on `server' with `object' */ if (object_needs_index) { (make_index)(server, object, saved_file_pos, a_object_count - saved_object_count); } return a_object_count; }
rt_private void expanded_update(EIF_REFERENCE source, EIF_REFERENCE target, int shallow_or_deep) { /* * Update recursively: * 1. expanded references for target `target'. * 2. offsets within subobjects since `source' and `target' may * come from different containers. * (This is done since copying the source to the target caused the * target's subobjects to have the same offsets and reference points to * its subobjects as the `source'). * It assumes that `target' is not a special object and that it * is a composite object. */ union overhead *zone; /* Target Object header */ long nb_ref; /* Number of references */ uint16 flags; /* Target flags */ EIF_REFERENCE t_reference; /* Target reference */ EIF_REFERENCE s_reference; /* Source reference */ EIF_REFERENCE t_enclosing; /* Enclosing object */ EIF_REFERENCE s_enclosing; /* Enclosing object */ rt_uint_ptr t_offset = 0; /* Offset within target */ rt_uint_ptr s_offset = 0; /* Offset within target */ rt_uint_ptr temp_offset = 0; /* Offset within target */ rt_uint_ptr s_sub_offset = 0; /* Subobject offset */ rt_uint_ptr offset1 = 0; /* Offset within target */ rt_uint_ptr offset2 = 0; /* Offset within target */ EIF_REFERENCE t_expanded; EIF_REFERENCE s_expanded; /* Compute the enclosing object (i.e. the object which contains the target). * Normally this is the object itself, unless the target is expanded. */ t_enclosing = target; /* Default enclosing object is itself */ s_enclosing = source; /* Default enclosing object is itself */ zone = HEADER(target); /* Malloc info zone */ flags = zone->ov_flags; /* Eiffel object flags */ if (eif_is_nested_expanded(flags)) { offset2 = zone->ov_size & B_SIZE; t_offset = zone->ov_size & B_SIZE; /* Target expanded offset within object */ t_enclosing = target - t_offset; /* Address of target enclosing object */ } zone = HEADER(source); flags = zone->ov_flags; /* Source eiffel object flags */ if (eif_is_nested_expanded(flags)) { offset1 = zone->ov_size & B_SIZE; s_offset = zone->ov_size & B_SIZE; /* Expanded offset within object */ s_enclosing = source - s_offset; /* Address of enclosing object */ } nb_ref = References(zone->ov_dtype); /* References in target */ /* Iteration on the references of the object */ for ( /* empty */; nb_ref > 0; nb_ref--, t_offset += REFSIZ, s_offset += REFSIZ ) { t_reference = *(EIF_REFERENCE *) (t_enclosing + t_offset); if (t_reference == NULL) continue; /* Void reference */ zone = HEADER(t_reference); flags = zone->ov_flags; if (eif_is_nested_expanded(flags)) { /* Object is expanded */ /* We reached an intra reference on an expanded object. There are * two points to consider: * 1/ updating intra reference for garbage collection * 2/ possible recursion on the expanded object itself * The size indicated via the zone header is the offset of the * expanded object within its enclosing father. */ s_reference = *(EIF_REFERENCE *) (s_enclosing + s_offset); s_sub_offset = HEADER(s_reference)->ov_size & B_SIZE; /* Get offset from source expanded */ temp_offset = s_sub_offset - offset1; /* Corresponding expanded in target object */ t_expanded = t_enclosing + offset2 + temp_offset; /* Update reference point to sub-object */ *(EIF_REFERENCE *) (t_enclosing + t_offset) = t_expanded; t_reference = *(EIF_REFERENCE *) (t_enclosing + t_offset); /* Update offset in header */ zone = HEADER(t_reference); zone->ov_size = offset2 + temp_offset; s_expanded = s_enclosing + s_sub_offset; /* Recursive updating is needed if we are in a DEEP clone or if * the object is composite, i.e. contains expanded objects. */ if (flags & EO_COMP || shallow_or_deep == DEEP) expanded_update(s_expanded, t_expanded, shallow_or_deep); } else if (shallow_or_deep == DEEP) { /* Not expanded */ /* Run rdeepclone recursively only if the reference is not a C * pointer, i.e. does not refer to a eif_malloc'ed C object which * happens to have been attached to an Eiffel reference. */ rdeepclone(t_reference, t_enclosing, t_offset); } } }
void SSizeMap::GatherDependenciesRecursively( FAssetRegistryModule& AssetRegistryModule, TSharedPtr<FAssetThumbnailPool>& InAssetThumbnailPool, TMap<FName, TSharedPtr<FTreeMapNodeData>>& VisitedAssetPackageNames, const TArray<FName>& AssetPackageNames, const TSharedPtr<FTreeMapNodeData>& Node, TSharedPtr<FTreeMapNodeData>& SharedRootNode, int32& NumAssetsWhichFailedToLoad ) { for( const FName AssetPackageName : AssetPackageNames ) { // Have we already added this asset to the tree? If so, we'll either move it to a "shared" group or (if it's referenced again by the same // root-level asset) ignore it if( VisitedAssetPackageNames.Contains( AssetPackageName ) ) { // OK, we've determined that this asset has already been referenced by something else in our tree. We'll move it to a "shared" group // so all of the assets that are referenced in multiple places can be seen together. TSharedPtr<FTreeMapNodeData> ExistingNode = VisitedAssetPackageNames[ AssetPackageName ]; // Is the existing node not already under the "shared" group? Note that it might still be (indirectly) under // the "shared" group, in which case we'll still want to move it up to the root since we've figured out that it is // actually shared between multiple assets which themselves may be shared if( ExistingNode->Parent != SharedRootNode.Get() ) { // Don't bother moving any of the assets at the root level into a "shared" bucket. We're only trying to best // represent the memory used when all of the root-level assets have become loaded. It's OK if root-level assets // are referenced by other assets in the set -- we don't need to indicate they are shared explicitly FTreeMapNodeData* ExistingNodeParent = ExistingNode->Parent; check( ExistingNodeParent != nullptr ); const bool bExistingNodeIsAtRootLevel = ExistingNodeParent->Parent == nullptr || RootAssetPackageNames.Contains( AssetPackageName ); if( !bExistingNodeIsAtRootLevel ) { // OK, the current asset (AssetPackageName) is definitely not a root level asset, but its already in the tree // somewhere as a non-shared, non-root level asset. We need to make sure that this Node's reference is not from the // same root-level asset as the ExistingNodeInTree. Otherwise, there's no need to move it to a 'shared' group. FTreeMapNodeData* MyParentNode = Node.Get(); check( MyParentNode != nullptr ); FTreeMapNodeData* MyRootLevelAssetNode = MyParentNode; while( MyRootLevelAssetNode->Parent != nullptr && MyRootLevelAssetNode->Parent->Parent != nullptr ) { MyRootLevelAssetNode = MyRootLevelAssetNode->Parent; } if( MyRootLevelAssetNode->Parent == nullptr ) { // No root asset (Node must be a root level asset itself!) MyRootLevelAssetNode = nullptr; } // Find the existing node's root level asset node FTreeMapNodeData* ExistingNodeRootLevelAssetNode = ExistingNodeParent; while( ExistingNodeRootLevelAssetNode->Parent->Parent != nullptr ) { ExistingNodeRootLevelAssetNode = ExistingNodeRootLevelAssetNode->Parent; } // If we're being referenced by another node within the same asset, no need to move it to a 'shared' group. if( MyRootLevelAssetNode != ExistingNodeRootLevelAssetNode ) { // This asset was already referenced by something else (or was in our top level list of assets to display sizes for) if( !SharedRootNode.IsValid() ) { // Find the root-most tree node FTreeMapNodeData* RootNode = MyParentNode; while( RootNode->Parent != nullptr ) { RootNode = RootNode->Parent; } SharedRootNode = MakeShareable( new FTreeMapNodeData() ); RootNode->Children.Add( SharedRootNode ); SharedRootNode->Parent = RootNode; // Keep back-pointer to parent node } // Reparent the node that we've now determined to be shared ExistingNode->Parent->Children.Remove( ExistingNode ); SharedRootNode->Children.Add( ExistingNode ); ExistingNode->Parent = SharedRootNode.Get(); } } } } else { // This asset is new to us so far! Let's add it to the tree. Later as we descend through references, we might find that the // asset is referenced by something else as well, in which case we'll pull it out and move it to a "shared" top-level box // Don't bother showing code references const FString AssetPackageNameString = AssetPackageName.ToString(); if( !AssetPackageNameString.StartsWith( TEXT( "/Script/" ) ) ) { FTreeMapNodeDataRef ChildTreeMapNode = MakeShareable( new FTreeMapNodeData() ); Node->Children.Add( ChildTreeMapNode ); ChildTreeMapNode->Parent = Node.Get(); // Keep back-pointer to parent node VisitedAssetPackageNames.Add( AssetPackageName, ChildTreeMapNode ); FNodeSizeMapData& NodeSizeMapData = NodeSizeMapDataMap.Add( ChildTreeMapNode ); // Set some defaults for this node. These will be used if we can't actually locate the asset. // @todo sizemap urgent: We need a better indication in the UI when there are one or more missing assets. Because missing assets have a size // of zero, they are nearly impossible to zoom into. At the least, we should have some Output Log spew when assets cannot be loaded NodeSizeMapData.AssetData.AssetName = AssetPackageName; NodeSizeMapData.AssetData.AssetClass = FName( *LOCTEXT( "MissingAsset", "MISSING!" ).ToString() ); NodeSizeMapData.AssetSize = 0; NodeSizeMapData.bHasKnownSize = false; // Find the asset using the asset registry // @todo sizemap: Asset registry-based reference gathering is faster but possibly not as exhaustive (no PostLoad created references, etc.) Maybe should be optional? // @todo sizemap: With AR-based reference gathering, sometimes the size map is missing root level dependencies until you reopen it a few times (Buggy BP) // @todo sizemap: With AR-based reference gathering, reference changes at editor-time do not appear in the Size Map until you restart // @todo sizemap: With AR-based reference gathering, opening the size map for all engine content caused the window to not respond until a restart // @todo sizemap: We don't really need the asset registry given we need to load the objects to figure out their size, unless we make that AR-searchable. // ---> This would allow us to not have to wait for AR initialization. But if we made size AR-searchable, we could run very quickly for large data sets! const bool bUseAssetRegistryForDependencies = false; const FString AssetPathString = AssetPackageNameString + TEXT(".") + FPackageName::GetLongPackageAssetName( AssetPackageNameString ); const FAssetData FoundAssetData = AssetRegistryModule.Get().GetAssetByObjectPath( FName( *AssetPathString ) ); if( FoundAssetData.IsValid() ) { NodeSizeMapData.AssetData = FoundAssetData; // Now actually load up the asset. We need it in memory in order to accurately determine its size. // @todo sizemap: We could async load these packages to make the editor experience a bit nicer (smoother progress) UObject* Asset = StaticLoadObject( UObject::StaticClass(), nullptr, *AssetPathString ); if( Asset != nullptr ) { TArray<FName> ReferencedAssetPackageNames; if( bUseAssetRegistryForDependencies ) { AssetRegistryModule.Get().GetDependencies( AssetPackageName, ReferencedAssetPackageNames ); } else { SizeMapInternals::FAssetReferenceFinder References( Asset ); for( UObject* Object : References.GetReferencedAssets() ) { ReferencedAssetPackageNames.Add( FName( *Object->GetOutermost()->GetPathName() ) ); } } // For textures, make sure we're getting the worst case size, not the size of the currently loaded set of mips // @todo sizemap: We should instead have a special EResourceSizeMode that asks for the worst case size. Some assets (like UTextureCube) currently always report resident mip size, even when asked for inclusive size if( Asset->IsA( UTexture2D::StaticClass() ) ) { NodeSizeMapData.AssetSize = Asset->GetResourceSize( EResourceSizeMode::Inclusive ); } else { NodeSizeMapData.AssetSize = Asset->GetResourceSize( EResourceSizeMode::Exclusive ); } NodeSizeMapData.bHasKnownSize = NodeSizeMapData.AssetSize != UObject::RESOURCE_SIZE_NONE && NodeSizeMapData.AssetSize != 0; if( !NodeSizeMapData.bHasKnownSize ) { // Asset has no meaningful size NodeSizeMapData.AssetSize = 0; // @todo sizemap urgent: Try to serialize to figure out how big it is (not into sub-assets though!) // FObjectMemoryAnalyzer ObjectMemoryAnalyzer( Asset ); } // Now visit all of the assets that we are referencing GatherDependenciesRecursively( AssetRegistryModule, InAssetThumbnailPool, VisitedAssetPackageNames, ReferencedAssetPackageNames, ChildTreeMapNode, SharedRootNode, NumAssetsWhichFailedToLoad ); } else { ++NumAssetsWhichFailedToLoad; } } else { ++NumAssetsWhichFailedToLoad; } } } } }