void FSlateRHIResourceManager::CreateTextures( const TArray< const FSlateBrush* >& Resources ) { DECLARE_SCOPE_CYCLE_COUNTER(TEXT("Loading Slate Textures"), STAT_Slate, STATGROUP_LoadTime); TMap<FName,FNewTextureInfo> TextureInfoMap; const uint32 Stride = GPixelFormats[PF_R8G8B8A8].BlockBytes; for( int32 ResourceIndex = 0; ResourceIndex < Resources.Num(); ++ResourceIndex ) { const FSlateBrush& Brush = *Resources[ResourceIndex]; const FName TextureName = Brush.GetResourceName(); if( TextureName != NAME_None && !Brush.HasUObject() && !Brush.IsDynamicallyLoaded() && !ResourceMap.Contains(TextureName) ) { // Find the texture or add it if it doesnt exist (only load the texture once) FNewTextureInfo& Info = TextureInfoMap.FindOrAdd( TextureName ); Info.bSrgb = (Brush.ImageType != ESlateBrushImageType::Linear); // Only atlas the texture if none of the brushes that use it tile it and the image is srgb Info.bShouldAtlas &= ( Brush.Tiling == ESlateBrushTileType::NoTile && Info.bSrgb && AtlasSize > 0 ); // Texture has been loaded if the texture data is valid if( !Info.TextureData.IsValid() ) { uint32 Width = 0; uint32 Height = 0; TArray<uint8> RawData; bool bSucceeded = LoadTexture( Brush, Width, Height, RawData ); Info.TextureData = MakeShareable( new FSlateTextureData( Width, Height, Stride, RawData ) ); const bool bTooLargeForAtlas = (Width >= 256 || Height >= 256 || Width >= AtlasSize || Height >= AtlasSize ); Info.bShouldAtlas &= !bTooLargeForAtlas; if( !bSucceeded || !ensureMsgf( Info.TextureData->GetRawBytes().Num() > 0, TEXT("Slate resource: (%s) contains no data"), *TextureName.ToString() ) ) { TextureInfoMap.Remove( TextureName ); } } } } // Sort textures by size. The largest textures are atlased first which creates a more compact atlas TextureInfoMap.ValueSort( FCompareFNewTextureInfoByTextureSize() ); for( TMap<FName,FNewTextureInfo>::TConstIterator It(TextureInfoMap); It; ++It ) { const FNewTextureInfo& Info = It.Value(); FName TextureName = It.Key(); FString NameStr = TextureName.ToString(); checkSlow( TextureName != NAME_None ); FSlateShaderResourceProxy* NewTexture = GenerateTextureResource( Info ); ResourceMap.Add( TextureName, NewTexture ); } }
/** * Since we don't care about blending, we just change this decoration to OutCurves * @TODO : Fix this if we're saving vectorcurves and blending */ void FRawCurveTracks::EvaluateTransformCurveData(USkeleton * Skeleton, TMap<FName, FTransform>&OutCurves, float CurrentTime, float BlendWeight) const { check (Skeleton); // evaluate the curve data at the CurrentTime and add to Instance for(auto CurveIter = TransformCurves.CreateConstIterator(); CurveIter; ++CurveIter) { const FTransformCurve& Curve = *CurveIter; // if disabled, do not handle if (Curve.GetCurveTypeFlag(ACF_Disabled)) { continue; } FSmartNameMapping* NameMapping = Skeleton->SmartNames.GetContainer(USkeleton::AnimTrackCurveMappingName); // Add or retrieve curve FName CurveName; // make sure it was added if (ensure (NameMapping->GetName(Curve.CurveUid, CurveName))) { // note we're not checking Curve.GetCurveTypeFlags() yet FTransform & Value = OutCurves.FindOrAdd(CurveName); Value = Curve.Evaluate(CurrentTime, BlendWeight); } } }
void FGroupedKeyArea::CopyKeys(FMovieSceneClipboardBuilder& ClipboardBuilder, const TFunctionRef<bool(FKeyHandle, const IKeyArea&)>& KeyMask) const { const FIndexEntry* IndexEntry = GlobalIndex.Find(IndexKey); if (!IndexEntry) { return; } // Since we are a group of nested key areas, we test the key mask for our key handles, and forward on the results to each key area // Using ptr as map key is fine here as we know they will not change TMap<const IKeyArea*, TSet<FKeyHandle>> AllValidHandles; for (auto& Pair : IndexEntry->HandleToGroup) { if (!KeyMask(Pair.Key, *this) || !Groups.IsValidIndex(Pair.Value)) { continue; } const FKeyGrouping& Group = Groups[Pair.Value]; for (const FKeyGrouping::FKeyIndex& KeyIndex : Group.Keys) { const IKeyArea& KeyArea = KeyAreas[KeyIndex.AreaIndex].Get(); AllValidHandles.FindOrAdd(&KeyArea).Add(KeyIndex.KeyHandle); } } for (auto& Pair : AllValidHandles) { Pair.Key->CopyKeys(ClipboardBuilder, [&](FKeyHandle Handle, const IKeyArea&){ return Pair.Value.Contains(Handle); }); } }
void UUMGSequencePlayer::InitSequencePlayer( const UWidgetAnimation& InAnimation, UUserWidget& UserWidget ) { Animation = &InAnimation; UMovieScene* MovieScene = Animation->MovieScene; // Cache the time range of the sequence to determine when we stop TimeRange = MovieScene->GetTimeRange(); RuntimeBindings = NewObject<UMovieSceneBindings>(this); RuntimeBindings->SetRootMovieScene( MovieScene ); UWidgetTree* WidgetTree = UserWidget.WidgetTree; TMap<FGuid, TArray<UObject*> > GuidToRuntimeObjectMap; // Bind to Runtime Objects for (const FWidgetAnimationBinding& Binding : InAnimation.AnimationBindings) { UObject* FoundObject = Binding.FindRuntimeObject( *WidgetTree ); if( FoundObject ) { TArray<UObject*>& Objects = GuidToRuntimeObjectMap.FindOrAdd(Binding.AnimationGuid); Objects.Add(FoundObject); } } for( auto It = GuidToRuntimeObjectMap.CreateConstIterator(); It; ++It ) { RuntimeBindings->AddBinding( It.Key(), It.Value() ); } }
void FOnlineSessionSearchQos::SortSearchResults() { TMap<FString, int32> RegionCounts; static const int32 MaxPerRegion = 5; UE_LOG(LogQos, Verbose, TEXT("Sorting QoS results")); for (int32 SearchResultIdx = 0; SearchResultIdx < SearchResults.Num();) { FString QosRegion; FOnlineSessionSearchResult& SearchResult = SearchResults[SearchResultIdx]; if (!SearchResult.Session.SessionSettings.Get(SETTING_REGION, QosRegion) || QosRegion.IsEmpty()) { UE_LOG(LogQos, Verbose, TEXT("Removed Qos search result, invalid region.")); SearchResults.RemoveAtSwap(SearchResultIdx); continue; } int32& ResultCount = RegionCounts.FindOrAdd(QosRegion); ResultCount++; if (ResultCount > MaxPerRegion) { SearchResults.RemoveAtSwap(SearchResultIdx); continue; } SearchResultIdx++; } for (auto& It : RegionCounts) { UE_LOG(LogQos, Verbose, TEXT("Region: %s Count: %d"), *It.Key, It.Value); } }
void UFaceFXMatineeControl::GetTrackKeyForTime(float InTime, TArray<TPair<int32, const FFaceFXTrackKey*>>& OutResult, TArray<FFaceFXSkelMeshComponentId>* OutNoTracks) const { //build a list of all keys for all skelmesh component ids TMap<int32, TArray<const FFaceFXTrackKey*>> SkelMeshTracks; TMap<int32, FFaceFXSkelMeshComponentId> SkelMeshIds; for(const FFaceFXTrackKey& Key : Keys) { SkelMeshTracks.FindOrAdd(Key.SkelMeshComponentId.Index).Add(&Key); if(OutNoTracks && !SkelMeshIds.Contains(Key.SkelMeshComponentId.Index)) { SkelMeshIds.Add(Key.SkelMeshComponentId.Index, Key.SkelMeshComponentId); } } //then generate the pair results for each skelmesh component for(auto It = SkelMeshTracks.CreateConstIterator(); It; ++It) { const TArray<const FFaceFXTrackKey*>& SkelMeshKeys = It.Value(); const int32 IndexMax = SkelMeshKeys.Num()-1; int32 Index = INDEX_NONE; for(; Index < IndexMax && SkelMeshKeys[Index+1]->Time <= InTime; ++Index); if(Index != INDEX_NONE) { OutResult.Add(TPairInitializer<int32, const FFaceFXTrackKey*>(Index, SkelMeshKeys[Index])); } else if(OutNoTracks) { OutNoTracks->Add(SkelMeshIds.FindChecked(It.Key())); } } }
void FBuildPatchAppManifest::EnumerateChunkPartInventory(const TArray<FGuid>& ChunksRequired, TMap<FGuid, TArray<FFileChunkPart>>& ChunkPartsAvailable) const { ChunkPartsAvailable.Empty(); // Use a set to optimize TSet<FGuid> ChunksReqSet(ChunksRequired); // For each file in the manifest, check what chunks it is made out of, and grab details for the ones in ChunksRequired for (auto FileManifestIt = Data->FileManifestList.CreateConstIterator(); FileManifestIt && !FBuildPatchInstallError::HasFatalError(); ++FileManifestIt) { const FFileManifestData& FileManifest = *FileManifestIt; uint64 FileOffset = 0; for (auto ChunkPartIt = FileManifest.FileChunkParts.CreateConstIterator(); ChunkPartIt && !FBuildPatchInstallError::HasFatalError(); ++ChunkPartIt) { const FChunkPartData& ChunkPart = *ChunkPartIt; if (ChunksReqSet.Contains(ChunkPart.Guid)) { TArray<FFileChunkPart>& FileChunkParts = ChunkPartsAvailable.FindOrAdd(ChunkPart.Guid); FFileChunkPart FileChunkPart; FileChunkPart.Filename = FileManifest.Filename; FileChunkPart.ChunkPart = ChunkPart; FileChunkPart.FileOffset = FileOffset; FileChunkParts.Add(FileChunkPart); } FileOffset += ChunkPart.Size; } } }
void Append(const FNodeClassCounter& Other) { for (auto Iterator : Other.NodeClassUsage) { uint32& Count = NodeClassUsage.FindOrAdd(Iterator.Key); Count += Iterator.Value; } }
void UDestructibleComponent::UpdateDestructibleChunkTM(const TArray<const PxRigidActor*>& ActiveActors) { //We want to consolidate the transforms so that we update each destructible component once by passing it an array of chunks to update. //This helps avoid a lot of duplicated work like marking render dirty, computing inverse world component, etc... TMap<UDestructibleComponent*, TArray<FUpdateChunksInfo> > ComponentUpdateMapping; //prepare map to update destructible components TArray<PxShape*> Shapes; for (const PxRigidActor* RigidActor : ActiveActors) { if (const FDestructibleChunkInfo* DestructibleChunkInfo = FPhysxUserData::Get<FDestructibleChunkInfo>(RigidActor->userData)) { if (GApexModuleDestructible->owns(RigidActor) && DestructibleChunkInfo->OwningComponent.IsValid()) { Shapes.AddUninitialized(RigidActor->getNbShapes()); int32 NumShapes = RigidActor->getShapes(Shapes.GetData(), Shapes.Num()); for (int32 ShapeIdx = 0; ShapeIdx < Shapes.Num(); ++ShapeIdx) { PxShape* Shape = Shapes[ShapeIdx]; int32 ChunkIndex; if (NxDestructibleActor* DestructibleActor = GApexModuleDestructible->getDestructibleAndChunk(Shape, &ChunkIndex)) { const physx::PxMat44 ChunkPoseRT = DestructibleActor->getChunkPose(ChunkIndex); const physx::PxTransform Transform(ChunkPoseRT); if (UDestructibleComponent* DestructibleComponent = Cast<UDestructibleComponent>(FPhysxUserData::Get<UPrimitiveComponent>(DestructibleActor->userData))) { if (DestructibleComponent->IsRegistered()) { TArray<FUpdateChunksInfo>& UpdateInfos = ComponentUpdateMapping.FindOrAdd(DestructibleComponent); FUpdateChunksInfo* UpdateInfo = new (UpdateInfos)FUpdateChunksInfo(ChunkIndex, P2UTransform(Transform)); } } } } Shapes.Empty(Shapes.Num()); //we want to keep largest capacity array to avoid reallocs } } } //update each component for (auto It = ComponentUpdateMapping.CreateIterator(); It; ++It) { UDestructibleComponent* DestructibleComponent = It.Key(); TArray<FUpdateChunksInfo>& UpdateInfos = It.Value(); if (DestructibleComponent->IsFracturedOrInitiallyStatic()) { DestructibleComponent->SetChunksWorldTM(UpdateInfos); } else { //if we haven't fractured it must mean that we're simulating a destructible and so we should update our ComponentToWorld based on the single rigid body DestructibleComponent->SyncComponentToRBPhysics(); } } }
void FSlateD3DTextureManager::CreateTextures( const TArray< const FSlateBrush* >& Resources ) { TMap<FName,FNewTextureInfo> TextureInfoMap; for( int32 ResourceIndex = 0; ResourceIndex < Resources.Num(); ++ResourceIndex ) { const FSlateBrush& Brush = *Resources[ResourceIndex]; const FName TextureName = Brush.GetResourceName(); if( TextureName != NAME_None && !ResourceMap.Contains(TextureName) ) { // Find the texture or add it if it doesn't exist (only load the texture once) FNewTextureInfo& Info = TextureInfoMap.FindOrAdd( TextureName ); Info.bSrgb = (Brush.ImageType != ESlateBrushImageType::Linear); // Only atlas the texture if none of the brushes that use it tile it Info.bShouldAtlas &= (Brush.Tiling == ESlateBrushTileType::NoTile && Info.bSrgb ); if( !Info.TextureData.IsValid()) { uint32 Width = 0; uint32 Height = 0; TArray<uint8> RawData; bool bSucceeded = LoadTexture( Brush, Width, Height, RawData ); const uint32 Stride = 4; // RGBA Info.TextureData = MakeShareable( new FSlateTextureData( Width, Height, Stride, RawData ) ); const bool bTooLargeForAtlas = (Width >= 256 || Height >= 256); Info.bShouldAtlas &= !bTooLargeForAtlas; if( !bSucceeded ) { TextureInfoMap.Remove( TextureName ); } } } } TextureInfoMap.ValueSort( FCompareFNewTextureInfoByTextureSize() ); for( TMap<FName,FNewTextureInfo>::TConstIterator It(TextureInfoMap); It; ++It ) { const FNewTextureInfo& Info = It.Value(); FName TextureName = It.Key(); FString NameStr = TextureName.ToString(); FSlateShaderResourceProxy* NewTexture = GenerateTextureResource( Info ); ResourceMap.Add( TextureName, NewTexture ); } }
void FVisualLoggerHelpers::GetHistogramCategories(const FVisualLogEntry& EntryItem, TMap<FString, TArray<FString> >& OutCategories) { for (const auto& CurrentSample : EntryItem.HistogramSamples) { auto& DataNames = OutCategories.FindOrAdd(CurrentSample.GraphName.ToString()); if (DataNames.Find(CurrentSample.DataName.ToString()) == INDEX_NONE) { DataNames.Add(CurrentSample.DataName.ToString()); } } }
TOptional<ECurrentState> FAutoReimportManager::ProcessAdditions(const FTimeLimit& TimeLimit) { // Override the global feedback context while we do this to avoid popping up dialogs TGuardValue<FFeedbackContext*> ScopedContextOverride(GWarn, FeedbackContextOverride.Get()); TGuardValue<bool> ScopedAssetChangesGuard(bGuardAssetChanges, true); FeedbackContextOverride->GetContent()->SetMainText(GetProgressText()); TMap<FString, TArray<UFactory*>> Factories; TArray<FString> FactoryExtensions; FactoryExtensions.Reserve(16); // Get the list of valid factories for (TObjectIterator<UClass> It ; It ; ++It) { UClass* CurrentClass = (*It); if (CurrentClass->IsChildOf(UFactory::StaticClass()) && !(CurrentClass->HasAnyClassFlags(CLASS_Abstract))) { UFactory* Factory = Cast<UFactory>(CurrentClass->GetDefaultObject()); if (Factory->bEditorImport && Factory->ImportPriority >= 0) { FactoryExtensions.Reset(); Factory->GetSupportedFileExtensions(FactoryExtensions); for (const auto& Ext : FactoryExtensions) { auto& Array = Factories.FindOrAdd(Ext); Array.Add(Factory); } } } } for (auto& Pair : Factories) { Pair.Value.Sort([](const UFactory& A, const UFactory& B) { return A.ImportPriority > B.ImportPriority; }); } const IAssetRegistry& Registry = FModuleManager::LoadModuleChecked<FAssetRegistryModule>("AssetRegistry").Get(); for (auto& Monitor : DirectoryMonitors) { Monitor.ProcessAdditions(Registry, TimeLimit, PackagesToSave, Factories, *FeedbackContextOverride); yield TOptional<ECurrentState>(); } return ECurrentState::ProcessModifications; }
// DEPRECATED void ADEPRECATED_VolumeAdaptiveBuilder::ExpandFrontierTowardsTarget(UDoNNavigationVolumeComponent* current, UDoNNavigationVolumeComponent* neighbor, DoNNavigation::PriorityQueue<UDoNNavigationVolumeComponent*> &frontier, TMap<UDoNNavigationVolumeComponent*, FVector> &entryPointMap, bool &goalFound, UDoNNavigationVolumeComponent* start, UDoNNavigationVolumeComponent* goal, FVector origin, FVector destination, TMap<UDoNNavigationVolumeComponent*, int>& VolumeVsCostMap, bool DrawDebug, TMap<UDoNNavigationVolumeComponent*, TArray<UDoNNavigationVolumeComponent*>> &PathVolumeSolutionMap) { if (DrawDebug) { DisplayDebugVolume(current, FColor::Red); DisplayDebugVolume(neighbor, FColor::Blue); } float SegmentDist = 0; FVector nextEntryPoint; TArray<UDoNNavigationVolumeComponent*> PathSolutionSoFar = PathVolumeSolutionMap.FindOrAdd(current); nextEntryPoint = NavEntryPointsForTraversal(*entryPointMap.Find(current), current, neighbor, SegmentDist, DrawDebug); entryPointMap.Add(neighbor, nextEntryPoint); if (nextEntryPoint == *entryPointMap.Find(current)) // i.e. no traversal solution exists { if (DrawDebug) { DisplayDebugVolume(current, FColor::Red); DisplayDebugVolume(neighbor, FColor::Blue); } UE_LOG(LogTemp, Log, TEXT("Skipping neighbor due to lack of traversal solution")); return; } //int new_cost = *VolumeVsCostMap.Find(current) + graph.cost(current, next); int new_cost = *VolumeVsCostMap.Find(current) + SegmentDist; if (!VolumeVsCostMap.Contains(neighbor) || new_cost < *VolumeVsCostMap.Find(neighbor)) { PathSolutionSoFar.Add(neighbor); PathVolumeSolutionMap.Add(neighbor, PathSolutionSoFar); VolumeVsCostMap.Add(neighbor, new_cost); float heuristic = FVector::Dist(nextEntryPoint, destination); int priority = new_cost + heuristic; if (DrawDebug) { DrawDebugLine(GetWorld(), nextEntryPoint, destination, FColor::Red, true, -1.f, 0, 10.f); FString priorityText = FString::Printf(TEXT("Priority: %d"), priority); UE_LOG(LogTemp, Log, TEXT("%s"), *priorityText); } frontier.put(neighbor, priority); } }
FText UK2Node_InputKey::GetMenuCategory() const { static TMap<FName, FNodeTextCache> CachedCategories; const FName KeyCategory = InputKey.GetMenuCategory(); const FText SubCategoryDisplayName = FText::Format(LOCTEXT("EventsCategory", "{0} Events"), EKeys::GetMenuCategoryDisplayName(KeyCategory)); FNodeTextCache& NodeTextCache = CachedCategories.FindOrAdd(KeyCategory); if (NodeTextCache.IsOutOfDate(this)) { // FText::Format() is slow, so we cache this to save on performance NodeTextCache.SetCachedText(FEditorCategoryUtils::BuildCategoryString(FCommonEditorCategory::Input, SubCategoryDisplayName), this); } return NodeTextCache; }
virtual void RegisterNets(FKismetFunctionContext& Context, UEdGraphNode* Node) { FNodeHandlingFunctor::RegisterNets(Context, Node); const FString BaseNetName = Context.NetNameMap->MakeValidName(Node); // Create a term to store a bool that determines if we're in the first execution of the node or not FBPTerminal* FirstRunTerm = Context.CreateLocalTerminal(); FirstRunTerm->Type.PinCategory = CompilerContext.GetSchema()->PC_Boolean; FirstRunTerm->Source = Node; FirstRunTerm->Name = BaseNetName + TEXT("_FirstRun"); FirstRunTermMap.Add(Node, FirstRunTerm); UK2Node_MultiGate* GateNode = Cast<UK2Node_MultiGate>(Node); // If there is already a data node from expansion phase if (!GateNode || !GateNode->DataNode) { FBPTerminal* DataTerm = Context.CreateLocalTerminal(); DataTerm->Type.PinCategory = CompilerContext.GetSchema()->PC_Int; DataTerm->Source = Node; DataTerm->Name = BaseNetName + TEXT("_Data"); DataTermMap.Add(Node, DataTerm); } FFunctionScopedTerms& FuncLocals = FunctionTermMap.FindOrAdd(Context.Function); // Create a local scratch bool for run-time if there isn't already one if (!FuncLocals.GenericBoolTerm) { FuncLocals.GenericBoolTerm = Context.CreateLocalTerminal(); FuncLocals.GenericBoolTerm->Type.PinCategory = CompilerContext.GetSchema()->PC_Boolean; FuncLocals.GenericBoolTerm->Source = Node; FuncLocals.GenericBoolTerm->Name = BaseNetName + TEXT("_ScratchBool"); } // Create a local scratch int for run-time index tracking if there isn't already one if (!FuncLocals.IndexTerm) { FuncLocals.IndexTerm = Context.CreateLocalTerminal(); FuncLocals.IndexTerm->Type.PinCategory = CompilerContext.GetSchema()->PC_Int; FuncLocals.IndexTerm->Source = Node; FuncLocals.IndexTerm->Name = BaseNetName + TEXT("_ScratchIndex"); } }
void UEdGraphSchema_BehaviorTreeDecorator::GetBreakLinkToSubMenuActions( class FMenuBuilder& MenuBuilder, UEdGraphPin* InGraphPin ) { // Make sure we have a unique name for every entry in the list TMap< FString, uint32 > LinkTitleCount; // Add all the links we could break from for(TArray<class UEdGraphPin*>::TConstIterator Links(InGraphPin->LinkedTo); Links; ++Links) { UEdGraphPin* Pin = *Links; FString TitleString = Pin->GetOwningNode()->GetNodeTitle(ENodeTitleType::ListView).ToString(); FText Title = FText::FromString( TitleString ); if ( Pin->PinName != TEXT("") ) { TitleString = FString::Printf(TEXT("%s (%s)"), *TitleString, *Pin->PinName); // Add name of connection if possible FFormatNamedArguments Args; Args.Add( TEXT("NodeTitle"), Title ); Args.Add( TEXT("PinName"), Pin->GetDisplayName() ); Title = FText::Format( LOCTEXT("BreakDescPin", "{NodeTitle} ({PinName})"), Args ); } uint32 &Count = LinkTitleCount.FindOrAdd( TitleString ); FText Description; FFormatNamedArguments Args; Args.Add( TEXT("NodeTitle"), Title ); Args.Add( TEXT("NumberOfNodes"), Count ); if ( Count == 0 ) { Description = FText::Format( LOCTEXT("BreakDesc", "Break link to {NodeTitle}"), Args ); } else { Description = FText::Format( LOCTEXT("BreakDescMulti", "Break link to {NodeTitle} ({NumberOfNodes})"), Args ); } ++Count; MenuBuilder.AddMenuEntry( Description, Description, FSlateIcon(), FUIAction( FExecuteAction::CreateUObject((USoundClassGraphSchema*const)this, &USoundClassGraphSchema::BreakSinglePinLink, const_cast< UEdGraphPin* >(InGraphPin), *Links) ) ); } }
void UAirBlueprintLib::LogMessage(const FString &prefix, const FString &suffix, LogDebugLevel level, float persist_sec) { if (log_messages_hidden_) return; static TMap<FString, int> loggingKeys; static int counter = 1; int key = loggingKeys.FindOrAdd(prefix); if (key == 0) { key = counter++; loggingKeys[prefix] = key; } FColor color; switch (level) { case LogDebugLevel::Informational: color = FColor(147, 231, 237); //UE_LOG(LogTemp, Log, TEXT("%s%s"), *prefix, *suffix); break; case LogDebugLevel::Success: color = FColor(156, 237, 147); //UE_LOG(LogTemp, Log, TEXT("%s%s"), *prefix, *suffix); break; case LogDebugLevel::Failure: color = FColor(237, 147, 168); //UE_LOG(LogAirSim, Error, TEXT("%s%s"), *prefix, *suffix); break; case LogDebugLevel::Unimportant: color = FColor(237, 228, 147); //UE_LOG(LogTemp, Verbose, TEXT("%s%s"), *prefix, *suffix); break; default: color = FColor::Black; break; } if (GEngine) { GEngine->AddOnScreenDebugMessage(key, persist_sec, color, prefix + suffix); } //GEngine->AddOnScreenDebugMessage(key + 10, 60.0f, color, FString::FromInt(key)); }
void FAssetTypeActions_SoundCue::ExecuteConsolidateAttenuation(TArray<TWeakObjectPtr<USoundCue>> Objects) { TMap<FAttenuationSettings*,TArray<USoundCue*>> UnmatchedAttenuations; for (auto ObjIt = Objects.CreateConstIterator(); ObjIt; ++ObjIt) { USoundCue* SoundCue = (*ObjIt).Get(); bool bFound = false; if ( SoundCue && SoundCue->bOverrideAttenuation ) { for (auto UnmatchedIt = UnmatchedAttenuations.CreateIterator(); UnmatchedIt; ++UnmatchedIt) { // Found attenuation settings to consolidate together if (SoundCue->AttenuationOverrides == *UnmatchedIt.Key()) { UnmatchedIt.Value().Add(SoundCue); bFound = true; break; } } if (!bFound) { UnmatchedAttenuations.FindOrAdd(&SoundCue->AttenuationOverrides).Add(SoundCue); } } } if (UnmatchedAttenuations.Num() > 0) { FString DefaultSuffix; TArray<UObject*> ObjectsToSync; FAssetToolsModule& AssetToolsModule = FModuleManager::GetModuleChecked<FAssetToolsModule>("AssetTools"); USoundAttenuationFactory* Factory = ConstructObject<USoundAttenuationFactory>(USoundAttenuationFactory::StaticClass()); for (auto UnmatchedIt = UnmatchedAttenuations.CreateConstIterator(); UnmatchedIt; ++UnmatchedIt) { if (UnmatchedIt.Value().Num() > 1) { FString Name; FString PackageName; CreateUniqueAssetName("/Game/Sounds/SoundAttenuations/SharedAttenuation", DefaultSuffix, PackageName, Name); USoundAttenuation* SoundAttenuation = Cast<USoundAttenuation>(AssetToolsModule.Get().CreateAsset(Name, FPackageName::GetLongPackagePath(PackageName), USoundAttenuation::StaticClass(), Factory)); if (SoundAttenuation) { SoundAttenuation->Attenuation = *UnmatchedIt.Key(); for (int32 SoundCueIndex = 0; SoundCueIndex < UnmatchedIt.Value().Num(); ++SoundCueIndex) { USoundCue* SoundCue = UnmatchedIt.Value()[SoundCueIndex]; SoundCue->bOverrideAttenuation = false; SoundCue->AttenuationSettings = SoundAttenuation; SoundCue->MarkPackageDirty(); } } } } if ( ObjectsToSync.Num() > 0 ) { FAssetTools::Get().SyncBrowserToAssets(ObjectsToSync); } } }
// TODO: Refactor this (and some other stuff) into a preprocessing step for use by any compiler? bool FNiagaraCompiler::MergeInFunctionNodes() { struct FReconnectionInfo { public: UEdGraphPin* From; TArray<UEdGraphPin*> To; //Fallback default value if an input connection is not connected. FString FallbackDefault; FReconnectionInfo() : From(NULL) {} }; TMap<FName, FReconnectionInfo> InputConnections; TMap<FName, FReconnectionInfo> OutputConnections; TArray<class UEdGraphPin*> FuncCallInputPins; TArray<class UEdGraphPin*> FuncCallOutputPins; //Copies the function graph into the main graph. //Removes the Function call in the main graph and the input and output nodes in the function graph, reconnecting their pins appropriately. auto MergeFunctionIntoMainGraph = [&](UNiagaraNodeFunctionCall* InFunc, UNiagaraGraph* FuncGraph) { InputConnections.Empty(); OutputConnections.Empty(); FuncCallInputPins.Empty(); FuncCallOutputPins.Empty(); check(InFunc && FuncGraph); if (InFunc->FunctionScript) { //Get all the pins that are connected to the inputs of the function call node in the main graph. InFunc->GetInputPins(FuncCallInputPins); for (UEdGraphPin* FuncCallInputPin : FuncCallInputPins) { FName InputName(*FuncCallInputPin->PinName); FReconnectionInfo& InputConnection = InputConnections.FindOrAdd(InputName); if (FuncCallInputPin->LinkedTo.Num() > 0) { check(FuncCallInputPin->LinkedTo.Num() == 1); UEdGraphPin* LinkFrom = FuncCallInputPin->LinkedTo[0]; check(LinkFrom->Direction == EGPD_Output); InputConnection.From = LinkFrom; } else { //This input has no link so we need the default value from the pin. InputConnection.FallbackDefault = FuncCallInputPin->GetDefaultAsString(); } } //Get all the pins that are connected to the outputs of the function call node in the main graph. InFunc->GetOutputPins(FuncCallOutputPins); for (UEdGraphPin* FuncCallOutputPin : FuncCallOutputPins) { FName OutputName(*FuncCallOutputPin->PinName); for (UEdGraphPin* LinkTo : FuncCallOutputPin->LinkedTo) { check(LinkTo->Direction == EGPD_Input); FReconnectionInfo& OutputConnection = OutputConnections.FindOrAdd(OutputName); OutputConnection.To.Add(LinkTo); } } //Remove the function call node from the graph now that we have everything we need from it. SourceGraph->RemoveNode(InFunc); //Keep a list of the Input and Output nodes we see in the function graph so that we can remove (most of) them later. TArray<UEdGraphNode*, TInlineAllocator<64>> ToRemove; //Search the nodes in the function graph, finding any connections to input or output nodes. for (UEdGraphNode* FuncGraphNode : FuncGraph->Nodes) { if (UNiagaraNodeInput* InputNode = Cast<UNiagaraNodeInput>(FuncGraphNode)) { check(InputNode->Pins.Num() == 1); //Get an array of "To" pins from one or more input nodes referencing each named input. FReconnectionInfo& InputConnection = InputConnections.FindOrAdd(InputNode->Input.Name); if (InputConnection.From) { //We have a connection from the function call so remove the input node and connect to that. ToRemove.Add(InputNode); } else { //This input has no connection from the function call so what do we do here? //For now we just leave the input node and connect back to it. //This will mean unconnected pins from the function call will look for constants or attributes. //In some cases we may want to just take the default value from the function call pin instead? //Maybe have some properties on the function call defining that. InputConnection.From = InputNode->Pins[0]; } TArray<UEdGraphPin*>& LinkToPins = InputNode->Pins[0]->LinkedTo; for (UEdGraphPin* ToPin : LinkToPins) { check(ToPin->Direction == EGPD_Input); InputConnection.To.Add(ToPin); } } else if (UNiagaraNodeOutput* OutputNode = Cast<UNiagaraNodeOutput>(FuncGraphNode)) { //Unlike the input nodes, we don't have the option of keeping these if there is no "From" pin. The default values from the node pins should be used. ToRemove.Add(OutputNode); //For each output, get the "From" pin to be reconnected later. for (int32 OutputIdx = 0; OutputIdx < OutputNode->Outputs.Num(); ++OutputIdx) { FName OutputName = OutputNode->Outputs[OutputIdx].Name; UEdGraphPin* OutputNodePin = OutputNode->Pins[OutputIdx]; check(OutputNodePin->LinkedTo.Num() <= 1); FReconnectionInfo& OutputConnection = OutputConnections.FindOrAdd(OutputName); UEdGraphPin* LinkFromPin = OutputNodePin->LinkedTo.Num() == 1 ? OutputNodePin->LinkedTo[0] : NULL; if (LinkFromPin) { check(LinkFromPin->Direction == EGPD_Output); OutputConnection.From = LinkFromPin; } else { //This output is not connected so links to it in the main graph must use it's default value. OutputConnection.FallbackDefault = OutputNodePin->GetDefaultAsString(); } } } } //Remove all the In and Out nodes from the function graph. for (UEdGraphNode* Remove : ToRemove) { FuncGraph->RemoveNode(Remove); } //Copy the nodes from the function graph over into the main graph. FuncGraph->MoveNodesToAnotherGraph(SourceGraph, false); //Finally, do all the reconnection. auto MakeConnection = [&](FReconnectionInfo& Info) { for (UEdGraphPin* LinkTo : Info.To) { if (Info.From) { Info.From->MakeLinkTo(LinkTo); } else { LinkTo->DefaultValue = Info.FallbackDefault; } } }; for (TPair<FName, FReconnectionInfo>& ReconnectInfo : InputConnections){ MakeConnection(ReconnectInfo.Value); } for (TPair<FName, FReconnectionInfo>& ReconnectInfo : OutputConnections){ MakeConnection(ReconnectInfo.Value); } } }; //Helper struct for traversing nested function calls. struct FFunctionContext { //True if this context's function has been merged into the main graph. bool bProcessed; //The index of this context into the ContextPool. int32 PoolIdx; //Pointer back to the parent context for traversal. FFunctionContext* Parent; //The function call node for this function in the source/parent graph. UNiagaraNodeFunctionCall* Function; //The graph for this function that we are going to merge into the main graph. UNiagaraGraph* FunctionGraph; //The script from which the graph is copied. Used for re entrance check. UNiagaraScript* Script; //Contexts for function calls in this function graph. TArray<FFunctionContext*, TInlineAllocator<64>> SubFunctionCalls; FFunctionContext() : bProcessed(false) , PoolIdx(INDEX_NONE) , Parent(NULL) , Function(NULL) , FunctionGraph(NULL) , Script(NULL) { } /** We don't allow re-entrant functions as this would cause an infinite loop of merging in graphs. Maybe in the future if we allow branching in the VM we can allow this. */ bool CheckForReentrance()const { UNiagaraNodeFunctionCall* Func = Function; FFunctionContext* Curr = Parent; while (Curr) { if (Curr->Script == Script) return true; Curr = Curr->Parent; } return false; } FString GetCallstack()const { FString Ret; const FFunctionContext* Curr = this; while (Curr) { if (Curr->Script) { Ret.Append(*(Curr->Script->GetPathName())); } else { Ret.Append(TEXT("Unknown")); } Ret.Append(TEXT("\n")); Curr = Curr->Parent; } return Ret; } }; //A pool of contexts on the stack to avoid loads of needless, small heap allocations. TArray<FFunctionContext, TInlineAllocator<512>> ContextPool; ContextPool.Reserve(512); FFunctionContext RootContext; FFunctionContext* CurrentContext = &RootContext; CurrentContext->FunctionGraph = SourceGraph; CurrentContext->Script = Script; //Depth first traversal of all function calls. while (CurrentContext) { //Find any sub functions and process this function call. if (!CurrentContext->bProcessed) { CurrentContext->bProcessed = true; //Find any sub functions and check for re-entrance. if (CurrentContext->FunctionGraph) { for (UEdGraphNode* Node : CurrentContext->FunctionGraph->Nodes) { UNiagaraNodeFunctionCall* FuncNode = Cast<UNiagaraNodeFunctionCall>(Node); if (FuncNode) { int32 NewIdx = ContextPool.AddZeroed(); FFunctionContext* SubFuncContext = &ContextPool[NewIdx]; CurrentContext->SubFunctionCalls.Push(SubFuncContext); SubFuncContext->Parent = CurrentContext; SubFuncContext->Function = FuncNode; SubFuncContext->PoolIdx = NewIdx; SubFuncContext->Script = FuncNode->FunctionScript; if (SubFuncContext->CheckForReentrance()) { FString Callstack = SubFuncContext->GetCallstack(); MessageLog.Error(TEXT("Reentrant function call!\n%s"), *Callstack); return false; } //Copy the function graph as we'll be modifying it as we merge in with the main graph. UNiagaraScriptSource* FuncSource = CastChecked<UNiagaraScriptSource>(FuncNode->FunctionScript->Source); check(FuncSource); SubFuncContext->FunctionGraph = CastChecked<UNiagaraGraph>(FEdGraphUtilities::CloneGraph(FuncSource->NodeGraph, NULL, &MessageLog)); } } } //Merge this function into the main graph now. if (CurrentContext->Function && CurrentContext->FunctionGraph) { MergeFunctionIntoMainGraph(CurrentContext->Function, CurrentContext->FunctionGraph); } } if (CurrentContext->SubFunctionCalls.Num() > 0) { //Move to the next sub function. CurrentContext = CurrentContext->SubFunctionCalls.Pop(); } else { //Done processing this function so remove it and move back to the parent. if (CurrentContext->PoolIdx != INDEX_NONE) { CurrentContext->FunctionGraph->MarkPendingKill(); ContextPool.RemoveAtSwap(CurrentContext->PoolIdx); } CurrentContext = CurrentContext->Parent; } } return true; }
void FPaperFlipbookHelpers::ExtractFlipbooksFromSprites(TMap<FString, TArray<UPaperSprite*> >& OutSpriteFlipbookMap, const TArray<UPaperSprite*>& Sprites, const TArray<FString>& InSpriteNames) { OutSpriteFlipbookMap.Reset(); // Local copy check((InSpriteNames.Num() == 0) || (InSpriteNames.Num() == Sprites.Num())); TArray<FString> SpriteNames = InSpriteNames; if (InSpriteNames.Num() == 0) { SpriteNames.Reset(); for (int32 SpriteIndex = 0; SpriteIndex < Sprites.Num(); ++SpriteIndex) { check(Sprites[SpriteIndex] != nullptr); SpriteNames.Add(Sprites[SpriteIndex]->GetName()); } } // Group them TMap<FString, UPaperSprite*> SpriteNameMap; TArray<UPaperSprite*> RemainingSprites; for (int32 SpriteIndex = 0; SpriteIndex < Sprites.Num(); ++SpriteIndex) { UPaperSprite* Sprite = Sprites[SpriteIndex]; const FString SpriteName = SpriteNames[SpriteIndex]; SpriteNameMap.Add(SpriteName, Sprite); int32 SpriteNumber = 0; FString SpriteBareString; if (ExtractSpriteNumber(SpriteName, /*out*/ SpriteBareString, /*out*/ SpriteNumber)) { SpriteBareString = ObjectTools::SanitizeObjectName(SpriteBareString); OutSpriteFlipbookMap.FindOrAdd(SpriteBareString).Add(Sprite); } else { RemainingSprites.Add(Sprite); } } // Natural sort using the same method as above struct FSpriteSortPredicate { FSpriteSortPredicate() {} // Sort predicate operator bool operator()(UPaperSprite& LHS, UPaperSprite& RHS) const { FString LeftString; int32 LeftNumber; ExtractSpriteNumber(LHS.GetName(), /*out*/ LeftString, /*out*/ LeftNumber); FString RightString; int32 RightNumber; ExtractSpriteNumber(RHS.GetName(), /*out*/ RightString, /*out*/ RightNumber); return (LeftString == RightString) ? (LeftNumber < RightNumber) : (LeftString < RightString); } }; // Sort sprites TArray<FString> Keys; OutSpriteFlipbookMap.GetKeys(Keys); for (auto SpriteName : Keys) { OutSpriteFlipbookMap[SpriteName].Sort(FSpriteSortPredicate()); } // Create a flipbook from all remaining sprites // Not sure if this is desirable behavior, might want one flipbook per sprite if (RemainingSprites.Num() > 0) { RemainingSprites.Sort(FSpriteSortPredicate()); const FString DesiredName = GetCleanerSpriteName(RemainingSprites[0]->GetName()) + TEXT("_Flipbook"); const FString SanitizedName = ObjectTools::SanitizeObjectName(DesiredName); OutSpriteFlipbookMap.Add(SanitizedName, RemainingSprites); } }
void FFoliageTypePaintingCustomization::ShowFoliagePropertiesForCategory(IDetailLayoutBuilder& DetailLayoutBuilder, const FName CategoryName, TMap<const FName, IDetailPropertyRow*>& OutDetailRowsByPropertyName) { // Properties that have a ReapplyCondition should be disabled behind the specified property when in reapply mode static const FName ReapplyConditionKey("ReapplyCondition"); // Properties with a HideBehind property specified should only be shown if that property is true, non-zero, or not empty static const FName HideBehindKey("HideBehind"); IDetailCategoryBuilder& CategoryBuilder = DetailLayoutBuilder.EditCategory(CategoryName); TArray<TSharedRef<IPropertyHandle>> CategoryProperties; CategoryBuilder.GetDefaultProperties(CategoryProperties, true, true); // Determine whether each property should be shown and how for (auto& PropertyHandle : CategoryProperties) { bool bShowingProperty = false; if (UProperty* Property = PropertyHandle->GetProperty()) { // Check to see if this property can be reapplied TSharedPtr<IPropertyHandle> ReapplyConditionPropertyHandle = DetailLayoutBuilder.GetProperty(*Property->GetMetaData(ReapplyConditionKey)); if (ReapplyConditionPropertyHandle.IsValid() && ReapplyConditionPropertyHandle->IsValidHandle()) { // Create a custom entry that allows explicit enabling/disabling of the property when reapplying TSharedPtr<IPropertyHandle> PropertyHandlePtr = PropertyHandle; OutDetailRowsByPropertyName.FindOrAdd(PropertyHandle->GetProperty()->GetFName()) = &AddFoliageProperty(CategoryBuilder, PropertyHandlePtr, ReapplyConditionPropertyHandle, TAttribute<EVisibility>(), TAttribute<bool>()); } else { TSharedPtr<IPropertyHandle> InvalidProperty; TSharedPtr<IPropertyHandle> PropertyHandlePtr = PropertyHandle; // Check to see if this property is hidden behind another TSharedPtr<IPropertyHandle> HiddenBehindPropertyHandle = DetailLayoutBuilder.GetProperty(*Property->GetMetaData(HideBehindKey)); if (HiddenBehindPropertyHandle.IsValid() && HiddenBehindPropertyHandle->IsValidHandle()) { TAttribute<bool> IsEnabledAttribute; ReapplyConditionPropertyHandle = DetailLayoutBuilder.GetProperty(*HiddenBehindPropertyHandle->GetProperty()->GetMetaData(ReapplyConditionKey)); if (ReapplyConditionPropertyHandle.IsValid() && ReapplyConditionPropertyHandle->IsValidHandle()) { // If the property this is hidden behind has a reapply condition, disable this when the condition is false IsEnabledAttribute = TAttribute<bool>::Create(TAttribute<bool>::FGetter::CreateSP(this, &FFoliageTypePaintingCustomization::IsReapplyPropertyEnabled, ReapplyConditionPropertyHandle)); } TAttribute<EVisibility> VisibilityAttribute; GetHiddenPropertyVisibility(HiddenBehindPropertyHandle, !IsEnabledAttribute.IsSet(), VisibilityAttribute); OutDetailRowsByPropertyName.FindOrAdd(PropertyHandle->GetProperty()->GetFName()) = &AddFoliageProperty(CategoryBuilder, PropertyHandlePtr, InvalidProperty, VisibilityAttribute, IsEnabledAttribute); } else { // This property cannot be reapplied and isn't hidden behind anything, so show it whenever the reapply tool isn't active OutDetailRowsByPropertyName.FindOrAdd(PropertyHandle->GetProperty()->GetFName()) = &AddFoliageProperty(CategoryBuilder, PropertyHandlePtr, InvalidProperty, TAttribute<EVisibility>::Create(TAttribute<EVisibility>::FGetter::CreateSP(this, &FFoliageTypePaintingCustomization::GetNonReapplyPropertyVisibility)), TAttribute<bool>()); } } } } }
void FStatsMemoryDumpCommand::ProcessingUObjectAllocations( const TMap<uint64, FAllocationInfo>& AllocationMap ) { // This code is not optimized. FScopeLogTime SLT( TEXT( "ProcessingUObjectAllocations" ), nullptr, FScopeLogTime::ScopeLog_Seconds ); UE_LOG( LogStats, Warning, TEXT( "Processing UObject allocations" ) ); FDiagnosticTableViewer MemoryReport( *FDiagnosticTableViewer::GetUniqueTemporaryFilePath( TEXT( "MemoryReport-UObject" ) ) ); // Write a row of headings for the table's columns. MemoryReport.AddColumn( TEXT( "Size (bytes)" ) ); MemoryReport.AddColumn( TEXT( "Size (MB)" ) ); MemoryReport.AddColumn( TEXT( "Count" ) ); MemoryReport.AddColumn( TEXT( "UObject class" ) ); MemoryReport.CycleRow(); TMap<FName, FSizeAndCount> UObjectAllocations; // To minimize number of calls to expensive DecodeCallstack. TMap<FName,FName> UObjectCallstackToClassMapping; uint64 NumAllocations = 0; uint64 TotalAllocatedMemory = 0; for( const auto& It : AllocationMap ) { const FAllocationInfo& Alloc = It.Value; FName UObjectClass = UObjectCallstackToClassMapping.FindRef( Alloc.EncodedCallstack ); if( UObjectClass == NAME_None ) { TArray<FString> DecodedCallstack; DecodeCallstack( Alloc.EncodedCallstack, DecodedCallstack ); for( int32 Index = DecodedCallstack.Num() - 1; Index >= 0; --Index ) { NAME_INDEX NameIndex = 0; TTypeFromString<NAME_INDEX>::FromString( NameIndex, *DecodedCallstack[Index] ); const FName LongName = FName( NameIndex, NameIndex, 0 ); const bool bValid = UObjectNames.Contains( LongName ); if( bValid ) { const FString ObjectName = FStatNameAndInfo::GetShortNameFrom( LongName ).GetPlainNameString(); UObjectClass = *ObjectName.Left( ObjectName.Find( TEXT( "//" ) ) );; UObjectCallstackToClassMapping.Add( Alloc.EncodedCallstack, UObjectClass ); break; } } } if( UObjectClass != NAME_None ) { FSizeAndCount& SizeAndCount = UObjectAllocations.FindOrAdd( UObjectClass ); SizeAndCount.Size += Alloc.Size; SizeAndCount.Count += 1; TotalAllocatedMemory += Alloc.Size; NumAllocations++; } } // Dump memory to the log. UObjectAllocations.ValueSort( FSizeAndCountGreater() ); const float MaxPctDisplayed = 0.90f; int32 CurrentIndex = 0; uint64 DisplayedSoFar = 0; UE_LOG( LogStats, Warning, TEXT( "Index, Size (Size MB), Count, UObject class" ) ); for( const auto& It : UObjectAllocations ) { const FSizeAndCount& SizeAndCount = It.Value; const FName& UObjectClass = It.Key; UE_LOG( LogStats, Log, TEXT( "%2i, %llu (%.2f MB), %llu, %s" ), CurrentIndex, SizeAndCount.Size, SizeAndCount.Size / 1024.0f / 1024.0f, SizeAndCount.Count, *UObjectClass.GetPlainNameString() ); // Dump stats MemoryReport.AddColumn( TEXT( "%llu" ), SizeAndCount.Size ); MemoryReport.AddColumn( TEXT( "%.2f MB" ), SizeAndCount.Size / 1024.0f / 1024.0f ); MemoryReport.AddColumn( TEXT( "%llu" ), SizeAndCount.Count ); MemoryReport.AddColumn( *UObjectClass.GetPlainNameString() ); MemoryReport.CycleRow(); CurrentIndex++; DisplayedSoFar += SizeAndCount.Size; const float CurrentPct = (float)DisplayedSoFar / (float)TotalAllocatedMemory; if( CurrentPct > MaxPctDisplayed ) { break; } } UE_LOG( LogStats, Warning, TEXT( "Allocated memory: %llu bytes (%.2f MB)" ), TotalAllocatedMemory, TotalAllocatedMemory / 1024.0f / 1024.0f ); // Add a total row. MemoryReport.CycleRow(); MemoryReport.CycleRow(); MemoryReport.CycleRow(); MemoryReport.AddColumn( TEXT( "%llu" ), TotalAllocatedMemory ); MemoryReport.AddColumn( TEXT( "%.2f MB" ), TotalAllocatedMemory / 1024.0f / 1024.0f ); MemoryReport.AddColumn( TEXT( "%llu" ), NumAllocations ); MemoryReport.AddColumn( TEXT( "TOTAL" ) ); MemoryReport.CycleRow(); }
bool ConvertOverlapResults(int32 NumOverlaps, PxOverlapHit* POverlapResults, const PxFilterData& QueryFilter, TArray<FOverlapResult>& OutOverlaps) { SCOPE_CYCLE_COUNTER(STAT_CollisionConvertOverlap); const int32 ExpectedSize = OutOverlaps.Num() + NumOverlaps; OutOverlaps.Reserve(ExpectedSize); bool bBlockingFound = false; if (ExpectedSize >= GNumOverlapsRequiredForTMap) { // Map from an overlap to the position in the result array (the index has one added to it so 0 can be a sentinel) TMap<FOverlapKey, int32, TInlineSetAllocator<64>> OverlapMap; OverlapMap.Reserve(ExpectedSize); // Fill in the map with existing hits for (int32 ExistingIndex = 0; ExistingIndex < OutOverlaps.Num(); ++ExistingIndex) { const FOverlapResult& ExistingOverlap = OutOverlaps[ExistingIndex]; OverlapMap.Add(FOverlapKey(ExistingOverlap.Component.Get(), ExistingOverlap.ItemIndex), ExistingIndex + 1); } for (int32 PResultIndex = 0; PResultIndex < NumOverlaps; ++PResultIndex) { FOverlapResult NewOverlap; ConvertQueryOverlap(POverlapResults[PResultIndex].shape, POverlapResults[PResultIndex].actor, NewOverlap, QueryFilter); if (NewOverlap.bBlockingHit) { bBlockingFound = true; } // Look for it in the map, newly added elements will start with 0, so we know we need to add it to the results array then (the index is stored as +1) int32& DestinationIndex = OverlapMap.FindOrAdd(FOverlapKey(NewOverlap.Component.Get(), NewOverlap.ItemIndex)); if (DestinationIndex == 0) { DestinationIndex = OutOverlaps.Add(NewOverlap) + 1; } else { FOverlapResult& ExistingOverlap = OutOverlaps[DestinationIndex - 1]; // If we had a non-blocking overlap with this component, but now we have a blocking one, use that one instead! if (!ExistingOverlap.bBlockingHit && NewOverlap.bBlockingHit) { ExistingOverlap = NewOverlap; } } } } else { // N^2 approach, no maps for (int32 i = 0; i < NumOverlaps; i++) { FOverlapResult NewOverlap; ConvertQueryOverlap(POverlapResults[i].shape, POverlapResults[i].actor, NewOverlap, QueryFilter); if (NewOverlap.bBlockingHit) { bBlockingFound = true; } AddUniqueOverlap(OutOverlaps, NewOverlap); } } return bBlockingFound; }
void IGameplayCueInterface::HandleGameplayCue(AActor *Self, FGameplayTag GameplayCueTag, EGameplayCueEvent::Type EventType, FGameplayCueParameters Parameters) { SCOPE_CYCLE_COUNTER(STAT_GameplayCueInterface_HandleGameplayCue); // Look up a custom function for this gameplay tag. UClass* Class = Self->GetClass(); IGameplayTagsModule& GameplayTagsModule = IGameplayTagsModule::Get(); FGameplayTagContainer TagAndParentsContainer = GameplayTagsModule.GetGameplayTagsManager().RequestGameplayTagParents(GameplayCueTag); Parameters.OriginalTag = GameplayCueTag; //Find entry for the class FGameplayCueTagFunctionList& GameplayTagFunctionList = PerClassGameplayTagToFunctionMap.FindOrAdd(Class); TArray<FCueNameAndUFunction>* FunctionList = GameplayTagFunctionList.Find(GameplayCueTag); if (FunctionList == NULL) { //generate new function list FunctionList = &GameplayTagFunctionList.Add(GameplayCueTag); for (auto InnerTagIt = TagAndParentsContainer.CreateConstIterator(); InnerTagIt; ++InnerTagIt) { UFunction* Func = NULL; FName CueName = InnerTagIt->GetTagName(); Func = Class->FindFunctionByName(CueName, EIncludeSuperFlag::IncludeSuper); // If the handler calls ForwardGameplayCueToParent, keep calling functions until one consumes the cue and doesn't forward it while (Func) { FCueNameAndUFunction NewCueFunctionPair; NewCueFunctionPair.Tag = *InnerTagIt; NewCueFunctionPair.Func = Func; FunctionList->Add(NewCueFunctionPair); Func = Func->GetSuperFunction(); } // Native functions cant be named with ".", so look for them with _. FName NativeCueFuncName = *CueName.ToString().Replace(TEXT("."), TEXT("_")); Func = Class->FindFunctionByName(NativeCueFuncName, EIncludeSuperFlag::IncludeSuper); while (Func) { FCueNameAndUFunction NewCueFunctionPair; NewCueFunctionPair.Tag = *InnerTagIt; NewCueFunctionPair.Func = Func; FunctionList->Add(NewCueFunctionPair); Func = Func->GetSuperFunction(); } } } //Iterate through all functions in the list until we should no longer continue check(FunctionList); bool bShouldContinue = true; for (int32 FunctionIndex = 0; bShouldContinue && (FunctionIndex < FunctionList->Num()); ++FunctionIndex) { FCueNameAndUFunction& CueFunctionPair = FunctionList->GetData()[FunctionIndex]; UFunction* Func = CueFunctionPair.Func; Parameters.MatchedTagName = CueFunctionPair.Tag; // Reset the forward parameter now, so we can check it after function bForwardToParent = false; IGameplayCueInterface::DispatchBlueprintCustomHandler(Self, Func, EventType, Parameters); bShouldContinue = bForwardToParent; } if (bShouldContinue) { TArray<UGameplayCueSet*> Sets; GetGameplayCueSets(Sets); for (UGameplayCueSet* Set : Sets) { bShouldContinue = Set->HandleGameplayCue(Self, GameplayCueTag, EventType, Parameters); if (!bShouldContinue) { break; } } } if (bShouldContinue) { Parameters.MatchedTagName = GameplayCueTag; GameplayCueDefaultHandler(EventType, Parameters); } }
/* Create the top level report widget */ TSharedPtr<SWidget> ConstructReportWidget(const FAsyncReportGenerator& Generator) { TSharedPtr<SVerticalBox> Widget = nullptr; TSharedPtr<SWidget> ReportWidget = SNew(SScrollBox) + SScrollBox::Slot() [ SAssignNew(Widget, SVerticalBox) ]; auto Report = Generator.GetReport(); // First show any errors, if there are any if (Report->ErrorList.Num() > 0) { Widget->AddSlot() .Padding(WidgetPadding) .AutoHeight() [ SNew(SExpandableArea) .AreaTitle(FText::FromString(TEXT("Error Summary"))) .AreaTitleFont(FEditorStyle::GetFontStyle(TEXT("DetailsView.CategoryFontStyle"))) .InitiallyCollapsed(false) .BorderBackgroundColor(FLinearColor(0.5f, 0.5f, 0.5f, 1.0f)) .Padding(WidgetPadding) .BodyContent() [ ConstructErrorWidget(Report->ErrorList) ] ]; } // Next show Midgard reports if there are any if (Report->MidgardReports.Num() > 0) { Widget->AddSlot() .Padding(WidgetPadding) .AutoHeight() [ SNew(SExpandableArea) .AreaTitle(FText::FromString(TEXT("Statistics Summary"))) .AreaTitleFont(FEditorStyle::GetFontStyle(TEXT("DetailsView.CategoryFontStyle"))) .InitiallyCollapsed(false) .BorderBackgroundColor(FLinearColor(0.5f, 0.5f, 0.5f, 1.0f)) .Padding(WidgetPadding) .BodyContent() [ SNew(SVerticalBox) + SVerticalBox::Slot() .Padding(WidgetPadding) .AutoHeight() [ GenerateFStringListView(Report->ShaderSummaryStrings) ] + SVerticalBox::Slot() .AutoHeight() [ ConstructMidgardDumpWidget(Report->MidgardSummaryReports, false) ] ] ]; // Dump the rest of the shaders by vertex factory name TMap<FString, TArray<TSharedRef<FMaliOCReport::FMidgardReport>>> VertexFactoryNames; for (auto& report : Report->MidgardReports) { VertexFactoryNames.FindOrAdd(report->VertexFactoryName).Add(report); } for (const auto& name : VertexFactoryNames) { Widget->AddSlot() .Padding(WidgetPadding) .AutoHeight() [ SNew(SExpandableArea) .AreaTitle(FText::FromString(FString::Printf(TEXT("All %s"), *name.Key))) .AreaTitleFont(FEditorStyle::GetFontStyle(TEXT("DetailsView.CategoryFontStyle"))) .InitiallyCollapsed(true) .BorderBackgroundColor(FLinearColor(0.5f, 0.5f, 0.5f, 1.0f)) .Padding(WidgetPadding) .BodyContent() [ ConstructMidgardDumpWidget(name.Value, true) ] ]; } } // Next show Utgard reports, if there are any. These should be mutually exclusive with Midgard reports if (Report->UtgardReports.Num() > 0) { Widget->AddSlot() .Padding(WidgetPadding) .AutoHeight() [ SNew(SExpandableArea) .AreaTitle(FText::FromString(TEXT("Statistics Summary"))) .AreaTitleFont(FEditorStyle::GetFontStyle(TEXT("DetailsView.CategoryFontStyle"))) .InitiallyCollapsed(false) .BorderBackgroundColor(FLinearColor(0.5f, 0.5f, 0.5f, 1.0f)) .Padding(WidgetPadding) .BodyContent() [ SNew(SVerticalBox) + SVerticalBox::Slot() .Padding(WidgetPadding) .AutoHeight() [ GenerateFStringListView(Report->ShaderSummaryStrings) ] + SVerticalBox::Slot() .AutoHeight() [ ConstructUtgardDumpWidget(Report->UtgardSummaryReports, false) ] ] ]; // Dump the rest of the shaders by vertex factory name TMap<FString, TArray<TSharedRef<FMaliOCReport::FUtgardReport>>> VertexFactoryNames; for (auto& report : Report->UtgardReports) { VertexFactoryNames.FindOrAdd(report->VertexFactoryName).Add(report); } for (const auto& name : VertexFactoryNames) { Widget->AddSlot() .Padding(WidgetPadding) .AutoHeight() [ SNew(SExpandableArea) .AreaTitle(FText::FromString(FString::Printf(TEXT("All %s"), *name.Key))) .AreaTitleFont(FEditorStyle::GetFontStyle(TEXT("DetailsView.CategoryFontStyle"))) .InitiallyCollapsed(true) .BorderBackgroundColor(FLinearColor(0.5f, 0.5f, 0.5f, 1.0f)) .Padding(WidgetPadding) .BodyContent() [ ConstructUtgardDumpWidget(name.Value, true) ] ]; } } return ReportWidget; }
void FStatsMemoryDumpCommand::ProcessingScopedAllocations( const TMap<uint64, FAllocationInfo>& AllocationMap ) { // This code is not optimized. FScopeLogTime SLT( TEXT( "ProcessingScopedAllocations" ), nullptr, FScopeLogTime::ScopeLog_Seconds ); UE_LOG( LogStats, Warning, TEXT( "Processing scoped allocations" ) ); FDiagnosticTableViewer MemoryReport( *FDiagnosticTableViewer::GetUniqueTemporaryFilePath( TEXT( "MemoryReport-Scoped" ) ) ); // Write a row of headings for the table's columns. MemoryReport.AddColumn( TEXT( "Size (bytes)" ) ); MemoryReport.AddColumn( TEXT( "Size (MB)" ) ); MemoryReport.AddColumn( TEXT( "Count" ) ); MemoryReport.AddColumn( TEXT( "Callstack" ) ); MemoryReport.CycleRow(); TMap<FName, FSizeAndCount> ScopedAllocations; uint64 NumAllocations = 0; uint64 TotalAllocatedMemory = 0; for( const auto& It : AllocationMap ) { const FAllocationInfo& Alloc = It.Value; FSizeAndCount& SizeAndCount = ScopedAllocations.FindOrAdd( Alloc.EncodedCallstack ); SizeAndCount.Size += Alloc.Size; SizeAndCount.Count += 1; TotalAllocatedMemory += Alloc.Size; NumAllocations++; } // Dump memory to the log. ScopedAllocations.ValueSort( FSizeAndCountGreater() ); const float MaxPctDisplayed = 0.90f; int32 CurrentIndex = 0; uint64 DisplayedSoFar = 0; UE_LOG( LogStats, Warning, TEXT( "Index, Size (Size MB), Count, Stat desc" ) ); for( const auto& It : ScopedAllocations ) { const FSizeAndCount& SizeAndCount = It.Value; const FName& EncodedCallstack = It.Key; const FString AllocCallstack = GetCallstack( EncodedCallstack ); UE_LOG( LogStats, Log, TEXT( "%2i, %llu (%.2f MB), %llu, %s" ), CurrentIndex, SizeAndCount.Size, SizeAndCount.Size / 1024.0f / 1024.0f, SizeAndCount.Count, *AllocCallstack ); // Dump stats MemoryReport.AddColumn( TEXT( "%llu" ), SizeAndCount.Size ); MemoryReport.AddColumn( TEXT( "%.2f MB" ), SizeAndCount.Size / 1024.0f / 1024.0f ); MemoryReport.AddColumn( TEXT( "%llu" ), SizeAndCount.Count ); MemoryReport.AddColumn( *AllocCallstack ); MemoryReport.CycleRow(); CurrentIndex++; DisplayedSoFar += SizeAndCount.Size; const float CurrentPct = (float)DisplayedSoFar / (float)TotalAllocatedMemory; if( CurrentPct > MaxPctDisplayed ) { break; } } UE_LOG( LogStats, Warning, TEXT( "Allocated memory: %llu bytes (%.2f MB)" ), TotalAllocatedMemory, TotalAllocatedMemory / 1024.0f / 1024.0f ); // Add a total row. MemoryReport.CycleRow(); MemoryReport.CycleRow(); MemoryReport.CycleRow(); MemoryReport.AddColumn( TEXT( "%llu" ), TotalAllocatedMemory ); MemoryReport.AddColumn( TEXT( "%.2f MB" ), TotalAllocatedMemory / 1024.0f / 1024.0f ); MemoryReport.AddColumn( TEXT( "%llu" ), NumAllocations ); MemoryReport.AddColumn( TEXT( "TOTAL" ) ); MemoryReport.CycleRow(); }
bool ConvertOverlapResults(int32 NumOverlaps, PxOverlapHit* POverlapResults, const PxFilterData& QueryFilter, TArray<FOverlapResult>& OutOverlaps) { SCOPE_CYCLE_COUNTER(STAT_CollisionConvertOverlap); OutOverlaps.Reserve(OutOverlaps.Num() + NumOverlaps); bool bBlockingFound = false; // This number was not empirically determined, just a rough rule of thumb if (OutOverlaps.Num() + NumOverlaps < 6) { // N^2 approach, no maps for (int32 i = 0; i < NumOverlaps; i++) { FOverlapResult NewOverlap; ConvertQueryOverlap(POverlapResults[i].shape, POverlapResults[i].actor, NewOverlap, QueryFilter); if (NewOverlap.bBlockingHit) { bBlockingFound = true; } AddUniqueOverlap(OutOverlaps, NewOverlap); } } else { // Map from an overlap to the position in the result array TMap<FOverlapKey, int32> OverlapMap; OverlapMap.Reserve(OutOverlaps.Num()); // Fill in the map with existing hits for (int32 ExistingIndex = 0; ExistingIndex < OutOverlaps.Num(); ++ExistingIndex) { const FOverlapResult& ExistingOverlap = OutOverlaps[ExistingIndex]; OverlapMap.Add(FOverlapKey(ExistingOverlap.Component.Get(), ExistingOverlap.ItemIndex), ExistingIndex); } for (int32 PResultIndex = 0; PResultIndex < NumOverlaps; ++PResultIndex) { FOverlapResult NewOverlap; ConvertQueryOverlap(POverlapResults[PResultIndex].shape, POverlapResults[PResultIndex].actor, NewOverlap, QueryFilter); if (NewOverlap.bBlockingHit) { bBlockingFound = true; } int32& DestinationIndex = OverlapMap.FindOrAdd(FOverlapKey(NewOverlap.Component.Get(), NewOverlap.ItemIndex)); if (DestinationIndex < OutOverlaps.Num()) { FOverlapResult& ExistingOverlap = OutOverlaps[DestinationIndex]; // If we had a non-blocking overlap with this component, but now we have a blocking one, use that one instead! if (!ExistingOverlap.bBlockingHit && NewOverlap.bBlockingHit) { ExistingOverlap = NewOverlap; } } else { DestinationIndex = OutOverlaps.Add(NewOverlap); } } } return bBlockingFound; }
void FStatsMemoryDumpCommand::InternalRun() { FParse::Value( FCommandLine::Get(), TEXT( "-INFILE=" ), SourceFilepath ); const int64 Size = IFileManager::Get().FileSize( *SourceFilepath ); if( Size < 4 ) { UE_LOG( LogStats, Error, TEXT( "Could not open: %s" ), *SourceFilepath ); return; } TAutoPtr<FArchive> FileReader( IFileManager::Get().CreateFileReader( *SourceFilepath ) ); if( !FileReader ) { UE_LOG( LogStats, Error, TEXT( "Could not open: %s" ), *SourceFilepath ); return; } if( !Stream.ReadHeader( *FileReader ) ) { UE_LOG( LogStats, Error, TEXT( "Could not open, bad magic: %s" ), *SourceFilepath ); return; } UE_LOG( LogStats, Warning, TEXT( "Reading a raw stats file for memory profiling: %s" ), *SourceFilepath ); const bool bIsFinalized = Stream.Header.IsFinalized(); check( bIsFinalized ); check( Stream.Header.Version == EStatMagicWithHeader::VERSION_5 ); StatsThreadStats.MarkAsLoaded(); TArray<FStatMessage> Messages; if( Stream.Header.bRawStatsFile ) { FScopeLogTime SLT( TEXT( "FStatsMemoryDumpCommand::InternalRun" ), nullptr, FScopeLogTime::ScopeLog_Seconds ); // Read metadata. TArray<FStatMessage> MetadataMessages; Stream.ReadFNamesAndMetadataMessages( *FileReader, MetadataMessages ); StatsThreadStats.ProcessMetaDataOnly( MetadataMessages ); // Find all UObject metadata messages. for( const auto& Meta : MetadataMessages ) { FName LongName = Meta.NameAndInfo.GetRawName(); const FString Desc = FStatNameAndInfo::GetShortNameFrom( LongName ).GetPlainNameString(); const bool bContainsUObject = Desc.Contains( TEXT( "//" ) ); if( bContainsUObject ) { UObjectNames.Add( LongName ); } } const int64 CurrentFilePos = FileReader->Tell(); // Update profiler's metadata. CreateThreadsMapping(); // Read frames offsets. Stream.ReadFramesOffsets( *FileReader ); // Buffer used to store the compressed and decompressed data. TArray<uint8> SrcArray; TArray<uint8> DestArray; const bool bHasCompressedData = Stream.Header.HasCompressedData(); check( bHasCompressedData ); TMap<int64, FStatPacketArray> CombinedHistory; int64 TotalDataSize = 0; int64 TotalStatMessagesNum = 0; int64 MaximumPacketSize = 0; int64 TotalPacketsNum = 0; // Read all packets sequentially, force by the memory profiler which is now a part of the raw stats. // !!CAUTION!! Frame number in the raw stats is pointless, because it is time based, not frame based. // Background threads usually execute time consuming operations, so the frame number won't be valid. // Needs to be combined by the thread and the time, not by the frame number. { // Display log information once per 5 seconds to avoid spamming. double PreviousSeconds = FPlatformTime::Seconds(); const int64 FrameOffset0 = Stream.FramesInfo[0].FrameFileOffset; FileReader->Seek( FrameOffset0 ); const int64 FileSize = FileReader->TotalSize(); while( FileReader->Tell() < FileSize ) { // Read the compressed data. FCompressedStatsData UncompressedData( SrcArray, DestArray ); *FileReader << UncompressedData; if( UncompressedData.HasReachedEndOfCompressedData() ) { break; } FMemoryReader MemoryReader( DestArray, true ); FStatPacket* StatPacket = new FStatPacket(); Stream.ReadStatPacket( MemoryReader, *StatPacket ); const int64 StatPacketFrameNum = StatPacket->Frame; FStatPacketArray& Frame = CombinedHistory.FindOrAdd( StatPacketFrameNum ); // Check if we need to combine packets from the same thread. FStatPacket** CombinedPacket = Frame.Packets.FindByPredicate( [&]( FStatPacket* Item ) -> bool { return Item->ThreadId == StatPacket->ThreadId; } ); const int64 PacketSize = StatPacket->StatMessages.GetAllocatedSize(); TotalStatMessagesNum += StatPacket->StatMessages.Num(); if( CombinedPacket ) { TotalDataSize -= (*CombinedPacket)->StatMessages.GetAllocatedSize(); (*CombinedPacket)->StatMessages += StatPacket->StatMessages; TotalDataSize += (*CombinedPacket)->StatMessages.GetAllocatedSize(); delete StatPacket; } else { Frame.Packets.Add( StatPacket ); TotalDataSize += PacketSize; } const double CurrentSeconds = FPlatformTime::Seconds(); if( CurrentSeconds > PreviousSeconds + NumSecondsBetweenLogs ) { const int32 PctPos = int32( 100.0*FileReader->Tell() / FileSize ); UE_LOG( LogStats, Log, TEXT( "%3i%% %10llu (%.1f MB) read messages, last read frame %4i" ), PctPos, TotalStatMessagesNum, TotalDataSize / 1024.0f / 1024.0f, StatPacketFrameNum ); PreviousSeconds = CurrentSeconds; } MaximumPacketSize = FMath::Max( MaximumPacketSize, PacketSize ); TotalPacketsNum++; } } // Dump frame stats for( const auto& It : CombinedHistory ) { const int64 FrameNum = It.Key; int64 FramePacketsSize = 0; int64 FrameStatMessages = 0; int64 FramePackets = It.Value.Packets.Num(); // Threads for( const auto& It2 : It.Value.Packets ) { FramePacketsSize += It2->StatMessages.GetAllocatedSize(); FrameStatMessages += It2->StatMessages.Num(); } UE_LOG( LogStats, Warning, TEXT( "Frame: %10llu/%3lli Size: %.1f MB / %10lli" ), FrameNum, FramePackets, FramePacketsSize / 1024.0f / 1024.0f, FrameStatMessages ); } UE_LOG( LogStats, Warning, TEXT( "TotalPacketSize: %.1f MB, Max: %1f MB" ), TotalDataSize / 1024.0f / 1024.0f, MaximumPacketSize / 1024.0f / 1024.0f ); TArray<int64> Frames; CombinedHistory.GenerateKeyArray( Frames ); Frames.Sort(); const int64 MiddleFrame = Frames[Frames.Num() / 2]; ProcessMemoryOperations( CombinedHistory ); } }
TArray<TWeakObjectPtr<AGameplayDebuggingReplicator> >& FGameplayDebugger::GetAllReplicators(UWorld* InWorld) { return AllReplicatorsPerWorlds.FindOrAdd(InWorld); }
void BuildSkeletalMeshChunks( const TArray<FMeshFace>& Faces, const TArray<FSoftSkinBuildVertex>& RawVertices, TArray<FSkeletalMeshVertIndexAndZ>& RawVertIndexAndZ, bool bKeepOverlappingVertices, TArray<FSkinnedMeshChunk*>& OutChunks, bool& bOutTooManyVerts ) { TArray<int32> DupVerts; TMultiMap<int32, int32> RawVerts2Dupes; { // Sorting function for vertex Z/index pairs struct FCompareFSkeletalMeshVertIndexAndZ { FORCEINLINE bool operator()(const FSkeletalMeshVertIndexAndZ& A, const FSkeletalMeshVertIndexAndZ& B) const { return A.Z < B.Z; } }; // Sort the vertices by z value RawVertIndexAndZ.Sort(FCompareFSkeletalMeshVertIndexAndZ()); // Search for duplicates, quickly! for(int32 i = 0; i < RawVertIndexAndZ.Num(); i++) { // only need to search forward, since we add pairs both ways for(int32 j = i + 1; j < RawVertIndexAndZ.Num(); j++) { if(FMath::Abs(RawVertIndexAndZ[j].Z - RawVertIndexAndZ[i].Z) > THRESH_POINTS_ARE_SAME) { // our list is sorted, so there can't be any more dupes break; } // check to see if the points are really overlapping if(PointsEqual( RawVertices[RawVertIndexAndZ[i].Index].Position, RawVertices[RawVertIndexAndZ[j].Index].Position)) { RawVerts2Dupes.Add(RawVertIndexAndZ[i].Index, RawVertIndexAndZ[j].Index); RawVerts2Dupes.Add(RawVertIndexAndZ[j].Index, RawVertIndexAndZ[i].Index); } } } } TMap<FSkinnedMeshChunk* , TMap<int32, int32> > ChunkToFinalVerts; uint32 TriangleIndices[3]; for(int32 FaceIndex = 0; FaceIndex < Faces.Num(); FaceIndex++) { const FMeshFace& Face = Faces[FaceIndex]; // Find a chunk which matches this triangle. FSkinnedMeshChunk* Chunk = NULL; for(int32 i = 0; i < OutChunks.Num(); ++i) { if(OutChunks[i]->MaterialIndex == Face.MeshMaterialIndex) { Chunk = OutChunks[i]; break; } } if(Chunk == NULL) { Chunk = new FSkinnedMeshChunk(); Chunk->MaterialIndex = Face.MeshMaterialIndex; Chunk->OriginalSectionIndex = OutChunks.Num(); OutChunks.Add(Chunk); } TMap<int32, int32>& FinalVerts = ChunkToFinalVerts.FindOrAdd( Chunk ); for(int32 VertexIndex = 0; VertexIndex < 3; ++VertexIndex) { int32 WedgeIndex = FaceIndex * 3 + VertexIndex; const FSoftSkinBuildVertex& Vertex = RawVertices[WedgeIndex]; int32 FinalVertIndex = INDEX_NONE; if(bKeepOverlappingVertices) { FinalVertIndex = Chunk->Vertices.Add(RawVertices[WedgeIndex]); } else { DupVerts.Reset(); RawVerts2Dupes.MultiFind(WedgeIndex, DupVerts); DupVerts.Sort(); for(int32 k = 0; k < DupVerts.Num(); k++) { if(DupVerts[k] >= WedgeIndex) { // the verts beyond me haven't been placed yet, so these duplicates are not relevant break; } int32 *Location = FinalVerts.Find(DupVerts[k]); if(Location != NULL) { if(SkeletalMeshTools::AreSkelMeshVerticesEqual(Vertex, Chunk->Vertices[*Location])) { FinalVertIndex = *Location; break; } } } if(FinalVertIndex == INDEX_NONE) { FinalVertIndex = Chunk->Vertices.Add(Vertex); FinalVerts.Add(WedgeIndex, FinalVertIndex); } } // set the index entry for the newly added vertex #if DISALLOW_32BIT_INDICES if(FinalVertIndex > MAX_uint16) { bOutTooManyVerts = true; } TriangleIndices[VertexIndex] = (uint16)FinalVertIndex; #else // TArray internally has int32 for capacity, so no need to test for uint32 as it's larger than int32 TriangleIndices[VertexIndex] = (uint32)FinalVertIndex; #endif } if(TriangleIndices[0] != TriangleIndices[1] && TriangleIndices[0] != TriangleIndices[2] && TriangleIndices[1] != TriangleIndices[2]) { for(uint32 VertexIndex = 0; VertexIndex < 3; VertexIndex++) { Chunk->Indices.Add(TriangleIndices[VertexIndex]); } } } }