void HdChangeTracker::MarkInstancerDirty(SdfPath const& id, HdDirtyBits bits) { if (ARCH_UNLIKELY(bits == HdChangeTracker::Clean)) { TF_CODING_ERROR("MarkInstancerDirty called with bits == clean!"); return; } _IDStateMap::iterator it = _instancerState.find(id); if (!TF_VERIFY(it != _instancerState.end())) return; // not calling _PropagateDirtyBits here. Currenly instancer uses // scale, translate, rotate primvars and there's no dependency between them // unlike points and normals on rprim. it->second = it->second | bits; // Now mark any associated rprims dirty. _InstancerRprimMap::iterator mapIt = _instancerRprimMap.find(id); if (mapIt != _instancerRprimMap.end()) { SdfPathSet &rprimSet = mapIt->second; for (SdfPathSet::iterator rprimIt = rprimSet.begin(); rprimIt != rprimSet.end(); ++rprimIt) { MarkRprimDirty(*rprimIt, DirtyInstancer); } } }
UsdSkelAnimQuery UsdSkel_CacheImpl::ReadScope::FindOrCreateAnimQuery(const UsdPrim& prim) { TRACE_FUNCTION(); if(ARCH_UNLIKELY(!prim || !prim.IsActive())) return UsdSkelAnimQuery(); if(prim.IsInstanceProxy()) return FindOrCreateAnimQuery(prim.GetPrimInMaster()); { _PrimToAnimMap::const_accessor a; if(_cache->_animQueryCache.find(a, prim)) return UsdSkelAnimQuery(a->second); } if (UsdSkelIsSkelAnimationPrim(prim)) { _PrimToAnimMap::accessor a; if(_cache->_animQueryCache.insert(a, prim)) { a->second = UsdSkel_AnimQueryImpl::New(prim); } return UsdSkelAnimQuery(a->second); } return UsdSkelAnimQuery(); }
UsdSkel_SkelDefinitionRefPtr UsdSkel_CacheImpl::ReadScope::FindOrCreateSkelDefinition(const UsdPrim& prim) { TRACE_FUNCTION(); if(ARCH_UNLIKELY(!prim || !prim.IsActive())) return nullptr; if(prim.IsInstanceProxy()) return FindOrCreateSkelDefinition(prim.GetPrimInMaster()); { _PrimToSkelDefinitionMap::const_accessor a; if(_cache->_skelDefinitionCache.find(a, prim)) return a->second; } if(prim.IsA<UsdSkelSkeleton>()) { _PrimToSkelDefinitionMap::accessor a; if(_cache->_skelDefinitionCache.insert(a, prim)) { a->second = UsdSkel_SkelDefinition::New(UsdSkelSkeleton(prim)); } return a->second; } return nullptr; }
void HdChangeTracker::MarkBprimDirty(SdfPath const& id, HdDirtyBits bits) { if (ARCH_UNLIKELY(bits == HdChangeTracker::Clean)) { TF_CODING_ERROR("MarkBprimDirty called with bits == clean!"); return; } _IDStateMap::iterator it = _bprimState.find(id); if (!TF_VERIFY(it != _bprimState.end())) return; it->second = it->second | bits; }
PXR_NAMESPACE_OPEN_SCOPE // TF_TAGGED_ALLOCATION(Tf_Remnant, false); // TF_FIXEDSIZE_ALLOCATION(Tf_Remnant, true); // TF_INSTANTIATE_CLASS_ALLOCATOR(Tf_Remnant); Tf_Remnant::~Tf_Remnant() { if (ARCH_UNLIKELY(_notify)) { Tf_ExpiryNotifier::Invoke(this); } }
void HdChangeTracker::MarkRprimDirty(SdfPath const& id, HdDirtyBits bits) { if (ARCH_UNLIKELY(bits == HdChangeTracker::Clean)) { TF_CODING_ERROR("MarkRprimDirty called with bits == clean!"); return; } _IDStateMap::iterator it = _rprimState.find(id); if (!TF_VERIFY(it != _rprimState.end(), "%s\n", id.GetText())) { return; } // Early out if no new bits are being set. if ((bits & (~it->second)) == 0) { return; } // used ensure the repr has been created. don't touch changeCount if (bits == HdChangeTracker::InitRepr) { it->second |= HdChangeTracker::InitRepr; return; } // set Varying bit if it's not set HdDirtyBits oldBits = it->second; if ((oldBits & HdChangeTracker::Varying) == 0) { TF_DEBUG(HD_VARYING_STATE).Msg("New Varying State %s: %s\n", id.GetText(), StringifyDirtyBits(bits).c_str()); // varying state changed. bits |= HdChangeTracker::Varying; ++_varyingStateVersion; } it->second = oldBits | bits; ++_changeCount; if (bits & DirtyVisibility) { ++_visChangeCount; } if (bits & DirtyRenderTag) { // Need to treat this like a scene edit // - DirtyLists will filter out prims that don't match render tag, // - Batches filter out prim that don't match render tag, // So both need to be rebuilt. // So increment the render index version. ++_indexVersion; } }
static inline int _GetNonVariantPathElementCount(const SdfPath &path) { //return path.StripAllVariantSelections().GetPathElementCount(); if (ARCH_UNLIKELY(path.ContainsPrimVariantSelection())) { SdfPath cur(path); int result = (not cur.IsPrimVariantSelectionPath()); cur = cur.GetParentPath(); for (; cur.ContainsPrimVariantSelection(); cur = cur.GetParentPath()) result += (not cur.IsPrimVariantSelectionPath()); return result + cur.GetPathElementCount(); } else { return path.GetPathElementCount(); } }
void HdRenderIndex::InsertRprim(TfToken const& typeId, HdSceneDelegate* sceneDelegate, SdfPath const& rprimId, SdfPath const& instancerId /*= SdfPath()*/) { HD_TRACE_FUNCTION(); HF_MALLOC_TAG_FUNCTION(); if (ARCH_UNLIKELY(TfMapLookupPtr(_rprimMap, rprimId))) { return; } SdfPath const &sceneDelegateId = sceneDelegate->GetDelegateID(); if (!rprimId.HasPrefix(sceneDelegateId)) { TF_CODING_ERROR("Scene Delegate Id (%s) must prefix prim Id (%s)", sceneDelegateId.GetText(), rprimId.GetText()); return; } HdRprim *rprim = _renderDelegate->CreateRprim(typeId, rprimId, instancerId); if (rprim == nullptr) { return; } _rprimIds.Insert(rprimId); _tracker.RprimInserted(rprimId, rprim->GetInitialDirtyBitsMask()); _AllocatePrimId(rprim); _RprimInfo info = { sceneDelegate, rprim }; _rprimMap[rprimId] = std::move(info); SdfPath instanceId = rprim->GetInstancerId(); if (!instanceId.IsEmpty()) { _tracker.InstancerRPrimInserted(instanceId, rprimId); } }
bool SdfPropertySpec::SetDefaultValue(const VtValue &defaultValue) { if (defaultValue.IsEmpty()) { ClearDefaultValue(); return true; } if (defaultValue.IsHolding<SdfValueBlock>()) { return SetField(SdfFieldKeys->Default, defaultValue); } TfType valueType = GetValueType(); if (valueType.IsUnknown()) { TF_CODING_ERROR("Can't set value on attribute <%s> with " "unknown type \"%s\"", GetPath().GetText(), GetTypeName().GetAsToken().GetText()); return false; } if (ARCH_UNLIKELY(valueType.GetTypeid() == typeid(void))) { // valueType may be provided by a plugin that has not been loaded. // In that case, we cannot get the type info, which is required to cast. // So we load the plugin in that case. if (PlugPluginPtr p = PlugRegistry::GetInstance().GetPluginForType(valueType)) { p->Load(); } } VtValue value = VtValue::CastToTypeid(defaultValue, valueType.GetTypeid()); if (value.IsEmpty()) { TF_CODING_ERROR("Can't set value on <%s> to %s: " "expected a value of type \"%s\"", GetPath().GetText(), TfStringify(defaultValue).c_str(), valueType.GetTypeName().c_str()); return false; } return SetField(SdfFieldKeys->Default, value); }
bool UsdSkelTopology::Validate(std::string* reason) const { TRACE_FUNCTION(); if(!TF_VERIFY(GetNumJoints() == 0 || _parentIndicesData)) return false; for(size_t i = 0; i < GetNumJoints(); ++i) { int parent = _parentIndicesData[i]; if(parent >= 0) { if(ARCH_UNLIKELY(static_cast<size_t>(parent) >= i)) { if(static_cast<size_t>(parent) == i) { if(reason) { *reason = TfStringPrintf( "Joint %zu has itself as its parent.", i); } return false; } if(reason) { *reason = TfStringPrintf( "Joint %zu has mis-ordered parent %d. Joints are " "expected to be ordered with parent joints always " "coming before children.", i, parent); // XXX: Note that this ordering restriction is a schema // requirement primarily because it simplifies hierarchy // evaluation (see UsdSkelConcatJointTransforms) // But a nice side effect for validation purposes is that // it also ensures that topology is non-cyclic. } return false; } } } return true; }
void Usd_PrimData::_ComposeAndCacheFlags(Usd_PrimDataConstPtr parent, bool isMasterPrim) { // We do not have to clear _flags here since in the pseudo root or instance // master case the values never change, and in the ordinary prim case we set // every flag. // Special-case the root (the only prim which has no parent) and // instancing masters. if (ARCH_UNLIKELY(!parent || isMasterPrim)) { _flags[Usd_PrimActiveFlag] = true; _flags[Usd_PrimLoadedFlag] = true; _flags[Usd_PrimModelFlag] = true; _flags[Usd_PrimGroupFlag] = true; _flags[Usd_PrimDefinedFlag] = true; _flags[Usd_PrimMasterFlag] = isMasterPrim; } else { // Compose and cache 'active'. UsdPrim self(Usd_PrimDataIPtr(this), SdfPath()); bool active = true; self.GetMetadata(SdfFieldKeys->Active, &active); _flags[Usd_PrimActiveFlag] = active; // Cache whether or not this prim has a payload. bool hasPayload = _primIndex->HasPayload(); _flags[Usd_PrimHasPayloadFlag] = hasPayload; // An active prim is loaded if it's loadable and in the load set, or // it's not loadable and its parent is loaded. _flags[Usd_PrimLoadedFlag] = active && (hasPayload ? _stage->_GetPcpCache()->IsPayloadIncluded(_primIndex->GetPath()) : parent->IsLoaded()); // According to Model hierarchy rules, only Model Groups may have Model // children (groups or otherwise). So if our parent is not a Model // Group, then this prim cannot be a model (or a model group). // Otherwise we look up the kind metadata and consult the kind registry. bool isGroup = false, isModel = false; if (parent->IsGroup()) { static TfToken kindToken("kind"); TfToken kind; self.GetMetadata(kindToken, &kind); // Use the kind registry to determine model/groupness. if (!kind.IsEmpty()) { isGroup = KindRegistry::IsA(kind, KindTokens->group); isModel = isGroup || KindRegistry::IsA(kind, KindTokens->model); } } _flags[Usd_PrimGroupFlag] = isGroup; _flags[Usd_PrimModelFlag] = isModel; // Get specifier. SdfSpecifier specifier = GetSpecifier(); // This prim is abstract if its parent is or if it's a class. _flags[Usd_PrimAbstractFlag] = parent->IsAbstract() || specifier == SdfSpecifierClass; // Cache whether or not this prim has an authored defining specifier. const bool isDefiningSpec = SdfIsDefiningSpecifier(specifier); _flags[Usd_PrimHasDefiningSpecifierFlag] = isDefiningSpec; // This prim is defined if its parent is and its specifier is defining. _flags[Usd_PrimDefinedFlag] = isDefiningSpec && parent->IsDefined(); // The presence of clips that may affect attributes on this prim // is computed and set in UsdStage. Default to false. _flags[Usd_PrimClipsFlag] = false; // These flags indicate whether this prim is an instance or an // instance master. _flags[Usd_PrimInstanceFlag] = active && _primIndex->IsInstanceable(); _flags[Usd_PrimMasterFlag] = parent->IsInMaster(); } }
Tf_Remnant::~Tf_Remnant() { if (ARCH_UNLIKELY(_notify)) { Tf_ExpiryNotifier::Invoke(this); } }
/*virtual*/ bool HdxSelectionTracker::GetBuffers(HdRenderIndex const* index, VtIntArray* offsets) const { TRACE_FUNCTION(); TfAutoMallocTag2 tag("Hdx", "HdxSelection::GetBuffers"); // XXX: Set minimum size for UBO/SSBO requirements. Seems like this should // be handled by Hydra. Update all uses of minSize below when resolved. const int minSize = 8; size_t numPrims = _selection ? _selection->selectedPrims.size() : 0; if (numPrims == 0) { TF_DEBUG(HDX_SELECTION_SETUP).Msg("No selected prims\n"); offsets->resize(minSize); return false; } // Note that numeric_limits<float>::min for is surprising, so using lowest() // here instead. Doing this for <int> here to avoid copy and paste bugs. int min = std::numeric_limits<int>::max(), max = std::numeric_limits<int>::lowest(); std::vector<int> ids; ids.resize(numPrims); size_t const N = 1000; int const INVALID = -1; WorkParallelForN(numPrims/N + 1, [&ids, &index, this](size_t begin, size_t end) mutable { end = std::min(end*N, ids.size()); begin = begin*N; for (size_t i = begin; i < end; i++) { if (auto const& rprim = index->GetRprim(_selection->selectedPrims[i])) { ids[i] = rprim->GetPrimId(); } else { // silently ignore non-existing prim ids[i] = INVALID; } } }); for (int id : ids) { if (id == INVALID) continue; min = std::min(id, min); max = std::max(id, max); } if (max < min) { offsets->resize(minSize); return false; } // ---------------------------------------------------------------------- // // Buffer Layout // ---------------------------------------------------------------------- // // In the folowing code, we want to build up a buffer that is capable of // driving selection highlighting. To do this, we leverage the fact that all // shaders have access to the drawing coord, namely the ObjectID, // InstanceID, FaceID, VertexID, etc. The idea is to take one such ID and // compare it against a range of known selected values within a given range. // // For example, imaging the ObjectID is 6, then we can know this object is // selected if ID 6 is in the range of selected objects. We then // hierarchically re-apply this scheme for instances and faces. The buffer // layout is as follows: // // Object: [ start index | end index | (offset to next level per object) ] // // So to test if a given object ID is selected, we check if the ID is in the // range [start,end), if so, the object's offset in the buffer is ID-start. // // The value for an object is one of three cases: // // 0 - indicates the object is not selected // 1 - indicates the object is fully selected // N - an offset to the next level of the hierarchy // // The structure described above for objects is also applied for each level // of instancing as well as for faces. All data is aggregated into a single // buffer with the following layout: // // [ object | element | instance level-N | ... | level 0 ] // // Each section above is prefixed with [start,end) ranges and the values of // each range follow the three cases outlined. // // To see these values built incrementally, enable the TF_DEBUG flag // HDX_SELECTION_SETUP. // ---------------------------------------------------------------------- // // Start with individual arrays. Splice arrays once finished. int const SELECT_ALL = 1; int const SELECT_NONE = 0; _DebugPrintArray("ids", ids); std::vector<int> output; output.insert(output.end(), 2+1+max-min, SELECT_NONE); output[0] = min; output[1] = max+1; // XXX: currently, _selectedPrims may have duplicated entries // (e.g. to instances and to faces) for an objPath. // this would cause unreferenced offset buffer allocated in the // result buffer. _DebugPrintArray("objects", output); for (size_t primIndex = 0; primIndex < ids.size(); primIndex++) { // TODO: store ID and path in "ids" vector SdfPath const& objPath = _selection->selectedPrims[primIndex]; int id = ids[primIndex]; if (id == INVALID) continue; TF_DEBUG(HDX_SELECTION_SETUP).Msg("Processing: %d - %s\n", id, objPath.GetText()); // ------------------------------------------------------------------ // // Elements // ------------------------------------------------------------------ // // Find element sizes, for this object. int elemOffset = output.size(); if (VtIntArray const *faceIndices = TfMapLookupPtr(_selection->selectedFaces, objPath)) { if (faceIndices->size()) { int minElem = std::numeric_limits<int>::max(); int maxElem = std::numeric_limits<int>::lowest(); for (int const& elemId : *faceIndices) { minElem = std::min(minElem, elemId); maxElem = std::max(maxElem, elemId); } // Grow the element array to hold elements for this object. output.insert(output.end(), maxElem-minElem+1+2, SELECT_NONE); output[elemOffset+0] = minElem; output[elemOffset+1] = maxElem+1; for (int elemId : *faceIndices) { // TODO: Add support for edge and point selection. output[2+elemOffset+ (elemId-minElem)] = SELECT_ALL; } _DebugPrintArray("elements", output); } else { // Entire object/instance is selected elemOffset = SELECT_ALL; } } else { // Entire object/instance is selected elemOffset = SELECT_ALL; } // ------------------------------------------------------------------ // // Instances // ------------------------------------------------------------------ // // Initialize prevLevel to elemOffset which removes a special case in // the loops below. int prevLevelOffset = elemOffset; if (std::vector<VtIntArray> const * a = TfMapLookupPtr(_selection->selectedInstances, objPath)) { // Different instances can have different number of levels. int numLevels = std::numeric_limits<int>::max(); size_t numInst= a->size(); if (numInst == 0) { numLevels = 0; } else { for (size_t instNum = 0; instNum < numInst; ++instNum) { size_t levelsForInst = a->at(instNum).size(); numLevels = std::min(numLevels, static_cast<int>(levelsForInst)); } } TF_DEBUG(HDX_SELECTION_SETUP).Msg("NumLevels: %d\n", numLevels); if (numLevels == 0) { output[id-min+2] = elemOffset; } for (int level = 0; level < numLevels; ++level) { // Find the required size of the instance vectors. int levelMin = std::numeric_limits<int>::max(); int levelMax = std::numeric_limits<int>::lowest(); for (VtIntArray const &instVec : *a) { _DebugPrintArray("\tinstVec", instVec, false); int instId = instVec[level]; levelMin = std::min(levelMin, instId); levelMax = std::max(levelMax, instId); } TF_DEBUG(HDX_SELECTION_SETUP).Msg( "level-%d: min(%d) max(%d)\n", level, levelMin, levelMax); int objLevelSize = levelMax - levelMin +2+1; int levelOffset = output.size(); output.insert(output.end(), objLevelSize, SELECT_NONE); output[levelOffset + 0] = levelMin; output[levelOffset + 1] = levelMax + 1; for (VtIntArray const& instVec : *a) { int instId = instVec[level] - levelMin+2; output[levelOffset+instId] = prevLevelOffset; } if (level == numLevels-1) { output[id-min+2] = levelOffset; } if (ARCH_UNLIKELY(TfDebug::IsEnabled(HDX_SELECTION_SETUP))){ std::stringstream name; name << "level[" << level << "]"; _DebugPrintArray(name.str(), output); } prevLevelOffset = levelOffset; } } else { output[id-min+2] = elemOffset; } } _DebugPrintArray("final output", output); offsets->resize(output.size()); for (size_t i = 0; i < output.size(); i++) { (*offsets)[i] = output[i]; } _DebugPrintArray("final output", *offsets); return true; }
const PcpPrimIndex & Usd_PrimData::GetPrimIndex() const { static const PcpPrimIndex dummyPrimIndex; return ARCH_UNLIKELY(IsMaster()) ? dummyPrimIndex : *_primIndex; }
void Usd_PrimData::_ComposeAndCacheFlags(Usd_PrimDataConstPtr parent, bool isMasterPrim) { // Special-case the root (the only prim which has no parent) and // instancing masters. if (ARCH_UNLIKELY(not parent or isMasterPrim)) { _flags[Usd_PrimActiveFlag] = true; _flags[Usd_PrimLoadedFlag] = true; _flags[Usd_PrimModelFlag] = true; _flags[Usd_PrimGroupFlag] = true; _flags[Usd_PrimAbstractFlag] = false; _flags[Usd_PrimDefinedFlag] = true; _flags[Usd_PrimClipsFlag] = false; _flags[Usd_PrimInstanceFlag] = false; _flags[Usd_PrimMasterFlag] = isMasterPrim; } else { // Compose and cache 'active'. UsdPrim self(Usd_PrimDataIPtr(this)); bool active = true; self.GetMetadata(SdfFieldKeys->Active, &active); _flags[Usd_PrimActiveFlag] = active; // An active prim is loaded if it's loadable and in the load set, or // it's not loadable and its parent is loaded. _flags[Usd_PrimLoadedFlag] = active and (self.HasPayload() ? _stage->_GetPcpCache()->IsPayloadIncluded(_primIndex->GetPath()) : parent->IsLoaded()); // According to Model hierarchy rules, only Model Groups may have Model // children (groups or otherwise). So if our parent is not a Model // Group, then this prim cannot be a model (or a model group). // Otherwise we look up the kind metadata and consult the kind registry. _flags[Usd_PrimGroupFlag] = _flags[Usd_PrimModelFlag] = false; if (parent->IsGroup()) { static TfToken kindToken("kind"); TfToken kind; self.GetMetadata(kindToken, &kind); // Use the kind registry to determine model/groupness. if (not kind.IsEmpty()) { _flags[Usd_PrimGroupFlag] = KindRegistry::IsA(kind, KindTokens->group); _flags[Usd_PrimModelFlag] = _flags[Usd_PrimGroupFlag] or KindRegistry::IsA(kind, KindTokens->model); } } // Get specifier. SdfSpecifier specifier = GetSpecifier(); // This prim is abstract if its parent is or if it's a class. _flags[Usd_PrimAbstractFlag] = parent->IsAbstract() or specifier == SdfSpecifierClass; // This prim is defined if its parent is defined and its specifier is // defining. const bool specifierIsDefining = SdfIsDefiningSpecifier(specifier); _flags[Usd_PrimDefinedFlag] = parent->IsDefined() and specifierIsDefining; _flags[Usd_PrimHasDefiningSpecifierFlag] = specifierIsDefining; // The presence of clips that may affect attributes on this prim // is computed and set in UsdStage. Default to false. _flags[Usd_PrimClipsFlag] = false; // These flags indicate whether this prim is an instance or an // instance master. _flags[Usd_PrimInstanceFlag] = active and _primIndex->IsInstanceable(); _flags[Usd_PrimMasterFlag] = parent->IsInMaster(); } }
bool UsdSkel_CacheImpl::ReadScope::Populate(const UsdSkelRoot& root) { TRACE_FUNCTION(); TF_DEBUG(USDSKEL_CACHE).Msg("[UsdSkelCache] Populate map from <%s>\n", root.GetPrim().GetPath().GetText()); if(!root) { TF_CODING_ERROR("'root' is invalid."); return false; } std::vector<std::pair<SkinningQueryKey,UsdPrim> > stack(1); UsdPrimRange range = UsdPrimRange::PreAndPostVisit(root.GetPrim(), UsdPrimDefaultPredicate); // UsdPrimIsInstance); for (auto it = range.begin(); it != range.end(); ++it) { if (it.IsPostVisit()) { if (stack.size() > 0 && stack.back().second == *it) { stack.pop_back(); } continue; } if (ARCH_UNLIKELY(!it->IsA<UsdGeomImageable>())) { TF_DEBUG(USDSKEL_CACHE).Msg( "[UsdSkelCache] %sPruning traversal at <%s> " "(prim is not UsdGeomImageable)\n", _MakeIndent(stack.size()).c_str(), it->GetPath().GetText()); it.PruneChildren(); continue; } // TODO: Consider testing whether or not the API has been applied first. UsdSkelBindingAPI binding(*it); SkinningQueryKey key(stack.back().first); UsdSkelSkeleton skel; if (binding.GetSkeleton(&skel)) key.skel = skel.GetPrim(); if (UsdAttribute attr = binding.GetJointIndicesAttr()) key.jointIndicesAttr = attr; if (UsdAttribute attr = binding.GetJointWeightsAttr()) key.jointWeightsAttr = attr; if (UsdAttribute attr = binding.GetGeomBindTransformAttr()) key.geomBindTransformAttr = attr; if (UsdAttribute attr = binding.GetJointsAttr()) key.jointsAttr = attr; if (UsdAttribute attr = binding.GetBlendShapesAttr()) key.blendShapesAttr = attr; if (UsdRelationship rel = binding.GetBlendShapeTargetsRel()) key.blendShapeTargetsRel = rel; if (UsdSkelIsSkinnablePrim(*it)) { _PrimToSkinningQueryMap::accessor a; if (_cache->_primSkinningQueryCache.insert(a, *it)) { a->second = _FindOrCreateSkinningQuery(*it, key); } TF_DEBUG(USDSKEL_CACHE).Msg( "[UsdSkelCache] %sAdded skinning query for prim <%s>\n", _MakeIndent(stack.size()).c_str(), it->GetPath().GetText()); // TODO: How should nested skinnable primitives be handled? // Should we prune traversal at this point? } stack.emplace_back(key, *it); } return true; }
void HdChangeTracker::MarkAllRprimsDirty(HdDirtyBits bits) { HD_TRACE_FUNCTION(); if (ARCH_UNLIKELY(bits == HdChangeTracker::Clean)) { TF_CODING_ERROR("MarkAllRprimsDirty called with bits == clean!"); return; } // // This function runs similar to calling MarkRprimDirty on every prim. // First it checks to see if the request will set any new dirty bits that // are not already set on the prim. If there are, it will set the new bits // as see if the prim is in the varying state. If it is not it will // transition the prim to varying. // // If any prim was transitioned to varying then the varying state version // counter is incremented. // // This complexity is due to some important optimizations. // The main case is dealing with invisible prims, but equally applies // to other cases where dirty bits don't get cleaned during sync. // // For these cases, we want to avoid having the prim in the dirty list // as there would be no work for it to do. This is done by clearing the // varying flag. On the flip-side, we want to avoid thrashing the varying // state, so that if the prim has an attribute that is varying, but // it doesn't get cleared, we don't want to set varying on that prim // every frame. // bool varyingStateUpdated = false; for (_IDStateMap::iterator it = _rprimState.begin(); it != _rprimState.end(); ++it) { HdDirtyBits &rprimDirtyBits = it->second; if ((bits & (~rprimDirtyBits)) != 0) { rprimDirtyBits |= bits; if ((rprimDirtyBits & HdChangeTracker::Varying) == 0) { rprimDirtyBits |= HdChangeTracker::Varying; varyingStateUpdated = true; } } } if (varyingStateUpdated) { ++_varyingStateVersion; } // These counters get updated every time, even if no prims // have moved into the dirty state. ++_changeCount; if (bits & DirtyVisibility) { ++_visChangeCount; } if (bits & DirtyRenderTag) { // Render tags affect dirty lists and batching, so they need to be // treated like a scene edit: see comment in MarkRprimDirty. ++_indexVersion; } }