void ResourceCache::ReleasePackageResources(PackageFile* package, bool force) { HashSet<ShortStringHash> affectedGroups; const HashMap<String, PackageEntry>& entries = package->GetEntries(); for (HashMap<String, PackageEntry>::ConstIterator i = entries.Begin(); i != entries.End(); ++i) { StringHash nameHash(i->first_); // We do not know the actual resource type, so search all type containers for (HashMap<ShortStringHash, ResourceGroup>::Iterator j = resourceGroups_.Begin(); j != resourceGroups_.End(); ++j) { HashMap<StringHash, SharedPtr<Resource> >::Iterator k = j->second_.resources_.Find(nameHash); if (k != j->second_.resources_.End()) { // If other references exist, do not release, unless forced if (k->second_.Refs() == 1 || force) { j->second_.resources_.Erase(k); affectedGroups.Insert(j->first_); } break; } } } for (HashSet<ShortStringHash>::Iterator i = affectedGroups.Begin(); i != affectedGroups.End(); ++i) UpdateResourceGroup(*i); }
void Test(vector<T> &items, HashSet<T> &set) { int count = 0; for (T& x :items) { ++count; set.Insert(x); } // cout<<count<<endl; // cout<<set.GetLoadFactor()<<endl; // set.ForEach([](const T& t){cout<<t<<"-";}); // cout << "count is :" << count << endl; cout << "Number of items: " << set.Size() << endl; cout << "Number of buckets: " << set.NumBuckets() << endl; cout << "Actual load factor: " << set.GetLoad() << endl; cout << "Max number of items in a bucket: " << set.MaxBucketSize() << endl; cout << "Percent of empty buckets: " << set.PercentEmptyBuckets() << endl; // set.DisplayItems(); // set.Remove("Mike"); // cout<<"remove one:------------"<<endl; // set.DisplayItems(); // cout<<"max is:"<<set.MaxBucketSize()<<endl; // cout<<"clear:-----------------"<<endl; // set.Clear(); // cout<<set.IsEmpty()<<endl; // set.DisplayItems(); // set.DisplayBucket(); }
void Animatable::SetAnimationEnabled(bool enable) { if (objectAnimation_) { // In object animation there may be targets in hierarchy. Set same enable/disable state in all HashSet<Animatable*> targets; const HashMap<String, SharedPtr<ValueAnimationInfo> >& infos = objectAnimation_->GetAttributeAnimationInfos(); for (HashMap<String, SharedPtr<ValueAnimationInfo> >::ConstIterator i = infos.Begin(); i != infos.End(); ++i) { String outName; Animatable* target = FindAttributeAnimationTarget(i->first_, outName); if (target && target != this) targets.Insert(target); } for (HashSet<Animatable*>::Iterator i = targets.Begin(); i != targets.End(); ++i) (*i)->animationEnabled_ = enable; } animationEnabled_ = enable; }
void Object::SendEvent(StringHash eventType, VariantMap& eventData) { if (!Thread::IsMainThread()) { LOGERROR("Sending events is only supported from the main thread"); return; } // Make a weak pointer to self to check for destruction during event handling WeakPtr<Object> self(this); Context* context = context_; HashSet<Object*> processed; context->BeginSendEvent(this); // Check first the specific event receivers const HashSet<Object*>* group = context->GetEventReceivers(this, eventType); if (group) { for (HashSet<Object*>::ConstIterator i = group->Begin(); i != group->End();) { HashSet<Object*>::ConstIterator current = i++; Object* receiver = *current; Object* next = 0; if (i != group->End()) next = *i; unsigned oldSize = group->Size(); receiver->OnEvent(this, eventType, eventData); // If self has been destroyed as a result of event handling, exit if (self.Expired()) { context->EndSendEvent(); return; } // If group has changed size during iteration (removed/added subscribers) try to recover /// \todo This is not entirely foolproof, as a subscriber could have been added to make up for the removed one if (group->Size() != oldSize) i = group->Find(next); processed.Insert(receiver); } } // Then the non-specific receivers group = context->GetEventReceivers(eventType); if (group) { if (processed.Empty()) { for (HashSet<Object*>::ConstIterator i = group->Begin(); i != group->End();) { HashSet<Object*>::ConstIterator current = i++; Object* receiver = *current; Object* next = 0; if (i != group->End()) next = *i; unsigned oldSize = group->Size(); receiver->OnEvent(this, eventType, eventData); if (self.Expired()) { context->EndSendEvent(); return; } if (group->Size() != oldSize) i = group->Find(next); } } else { // If there were specific receivers, check that the event is not sent doubly to them for (HashSet<Object*>::ConstIterator i = group->Begin(); i != group->End();) { HashSet<Object*>::ConstIterator current = i++; Object* receiver = *current; Object* next = 0; if (i != group->End()) next = *i; if (!processed.Contains(receiver)) { unsigned oldSize = group->Size(); receiver->OnEvent(this, eventType, eventData); if (self.Expired()) { context->EndSendEvent(); return; } if (group->Size() != oldSize) i = group->Find(next); } } } } context->EndSendEvent(); }
void LoadMesh(const String& inputFileName, bool generateTangents, bool splitSubMeshes, bool exportMorphs) { File meshFileSource(context_); meshFileSource.Open(inputFileName); if (!meshFile_->Load(meshFileSource)) ErrorExit("Could not load input file " + inputFileName); XMLElement root = meshFile_->GetRoot("mesh"); XMLElement subMeshes = root.GetChild("submeshes"); XMLElement skeletonLink = root.GetChild("skeletonlink"); if (root.IsNull()) ErrorExit("Could not load input file " + inputFileName); String skeletonName = skeletonLink.GetAttribute("name"); if (!skeletonName.Empty()) LoadSkeleton(GetPath(inputFileName) + GetFileName(skeletonName) + ".skeleton.xml"); // Check whether there's benefit of avoiding 32bit indices by splitting each submesh into own buffer XMLElement subMesh = subMeshes.GetChild("submesh"); unsigned totalVertices = 0; unsigned maxSubMeshVertices = 0; while (subMesh) { materialNames_.Push(subMesh.GetAttribute("material")); XMLElement geometry = subMesh.GetChild("geometry"); if (geometry) { unsigned vertices = geometry.GetInt("vertexcount"); totalVertices += vertices; if (maxSubMeshVertices < vertices) maxSubMeshVertices = vertices; } ++numSubMeshes_; subMesh = subMesh.GetNext("submesh"); } XMLElement sharedGeometry = root.GetChild("sharedgeometry"); if (sharedGeometry) { unsigned vertices = sharedGeometry.GetInt("vertexcount"); totalVertices += vertices; if (maxSubMeshVertices < vertices) maxSubMeshVertices = vertices; } if (!sharedGeometry && (splitSubMeshes || (totalVertices > 65535 && maxSubMeshVertices <= 65535))) { useOneBuffer_ = false; vertexBuffers_.Resize(numSubMeshes_); indexBuffers_.Resize(numSubMeshes_); } else { vertexBuffers_.Resize(1); indexBuffers_.Resize(1); } subMesh = subMeshes.GetChild("submesh"); unsigned indexStart = 0; unsigned vertexStart = 0; unsigned subMeshIndex = 0; PODVector<unsigned> vertexStarts; vertexStarts.Resize(numSubMeshes_); while (subMesh) { XMLElement geometry = subMesh.GetChild("geometry"); XMLElement faces = subMesh.GetChild("faces"); // If no submesh vertexbuffer, process the shared geometry, but do it only once unsigned vertices = 0; if (!geometry) { vertexStart = 0; if (!subMeshIndex) geometry = root.GetChild("sharedgeometry"); } if (geometry) vertices = geometry.GetInt("vertexcount"); ModelSubGeometryLodLevel subGeometryLodLevel; ModelVertexBuffer* vBuf; ModelIndexBuffer* iBuf; if (useOneBuffer_) { vBuf = &vertexBuffers_[0]; if (vertices) vBuf->vertices_.Resize(vertexStart + vertices); iBuf = &indexBuffers_[0]; subGeometryLodLevel.vertexBuffer_ = 0; subGeometryLodLevel.indexBuffer_ = 0; } else { vertexStart = 0; indexStart = 0; vBuf = &vertexBuffers_[subMeshIndex]; vBuf->vertices_.Resize(vertices); iBuf = &indexBuffers_[subMeshIndex]; subGeometryLodLevel.vertexBuffer_ = subMeshIndex; subGeometryLodLevel.indexBuffer_ = subMeshIndex; } // Store the start vertex for later use vertexStarts[subMeshIndex] = vertexStart; // Ogre may have multiple buffers in one submesh. These will be merged into one XMLElement bufferDef; if (geometry) bufferDef = geometry.GetChild("vertexbuffer"); while (bufferDef) { if (bufferDef.HasAttribute("positions")) vBuf->elementMask_ |= MASK_POSITION; if (bufferDef.HasAttribute("normals")) vBuf->elementMask_ |= MASK_NORMAL; if (bufferDef.HasAttribute("texture_coords")) { vBuf->elementMask_ |= MASK_TEXCOORD1; if (bufferDef.GetInt("texture_coords") > 1) vBuf->elementMask_ |= MASK_TEXCOORD2; } unsigned vertexNum = vertexStart; if (vertices) { XMLElement vertex = bufferDef.GetChild("vertex"); while (vertex) { XMLElement position = vertex.GetChild("position"); if (position) { // Convert from right- to left-handed float x = position.GetFloat("x"); float y = position.GetFloat("y"); float z = position.GetFloat("z"); Vector3 vec(x, y, -z); vBuf->vertices_[vertexNum].position_ = vec; boundingBox_.Merge(vec); } XMLElement normal = vertex.GetChild("normal"); if (normal) { // Convert from right- to left-handed float x = normal.GetFloat("x"); float y = normal.GetFloat("y"); float z = normal.GetFloat("z"); Vector3 vec(x, y, -z); vBuf->vertices_[vertexNum].normal_ = vec; } XMLElement uv = vertex.GetChild("texcoord"); if (uv) { float x = uv.GetFloat("u"); float y = uv.GetFloat("v"); Vector2 vec(x, y); vBuf->vertices_[vertexNum].texCoord1_ = vec; if (vBuf->elementMask_ & MASK_TEXCOORD2) { uv = uv.GetNext("texcoord"); if (uv) { float x = uv.GetFloat("u"); float y = uv.GetFloat("v"); Vector2 vec(x, y); vBuf->vertices_[vertexNum].texCoord2_ = vec; } } } vertexNum++; vertex = vertex.GetNext("vertex"); } } bufferDef = bufferDef.GetNext("vertexbuffer"); } unsigned triangles = faces.GetInt("count"); unsigned indices = triangles * 3; XMLElement triangle = faces.GetChild("face"); while (triangle) { unsigned v1 = triangle.GetInt("v1"); unsigned v2 = triangle.GetInt("v2"); unsigned v3 = triangle.GetInt("v3"); iBuf->indices_.Push(v3 + vertexStart); iBuf->indices_.Push(v2 + vertexStart); iBuf->indices_.Push(v1 + vertexStart); triangle = triangle.GetNext("face"); } subGeometryLodLevel.indexStart_ = indexStart; subGeometryLodLevel.indexCount_ = indices; if (vertexStart + vertices > 65535) iBuf->indexSize_ = sizeof(unsigned); XMLElement boneAssignments = subMesh.GetChild("boneassignments"); if (bones_.Size()) { if (boneAssignments) { XMLElement boneAssignment = boneAssignments.GetChild("vertexboneassignment"); while (boneAssignment) { unsigned vertex = boneAssignment.GetInt("vertexindex") + vertexStart; unsigned bone = boneAssignment.GetInt("boneindex"); float weight = boneAssignment.GetFloat("weight"); BoneWeightAssignment assign; assign.boneIndex_ = bone; assign.weight_ = weight; // Source data might have 0 weights. Disregard these if (assign.weight_ > 0.0f) { subGeometryLodLevel.boneWeights_[vertex].Push(assign); // Require skinning weight to be sufficiently large before vertex contributes to bone hitbox if (assign.weight_ > 0.33f) { // Check distance of vertex from bone to get bone max. radius information Vector3 bonePos = bones_[bone].derivedPosition_; Vector3 vertexPos = vBuf->vertices_[vertex].position_; float distance = (bonePos - vertexPos).Length(); if (distance > bones_[bone].radius_) { bones_[bone].collisionMask_ |= 1; bones_[bone].radius_ = distance; } // Build the hitbox for the bone bones_[bone].boundingBox_.Merge(bones_[bone].inverseWorldTransform_ * (vertexPos)); bones_[bone].collisionMask_ |= 2; } } boneAssignment = boneAssignment.GetNext("vertexboneassignment"); } } if ((subGeometryLodLevel.boneWeights_.Size()) && bones_.Size()) { vBuf->elementMask_ |= MASK_BLENDWEIGHTS | MASK_BLENDINDICES; bool sorted = false; // If amount of bones is larger than supported by HW skinning, must remap per submesh if (bones_.Size() > maxBones_) { HashMap<unsigned, unsigned> usedBoneMap; unsigned remapIndex = 0; for (HashMap<unsigned, PODVector<BoneWeightAssignment> >::Iterator i = subGeometryLodLevel.boneWeights_.Begin(); i != subGeometryLodLevel.boneWeights_.End(); ++i) { // Sort the bone assigns by weight Sort(i->second_.Begin(), i->second_.End(), CompareWeights); // Use only the first 4 weights for (unsigned j = 0; j < i->second_.Size() && j < 4; ++j) { unsigned originalIndex = i->second_[j].boneIndex_; if (!usedBoneMap.Contains(originalIndex)) { usedBoneMap[originalIndex] = remapIndex; remapIndex++; } i->second_[j].boneIndex_ = usedBoneMap[originalIndex]; } } // If still too many bones in one subgeometry, error if (usedBoneMap.Size() > maxBones_) ErrorExit("Too many bones (limit " + String(maxBones_) + ") in submesh " + String(subMeshIndex + 1)); // Write mapping of vertex buffer bone indices to original bone indices subGeometryLodLevel.boneMapping_.Resize(usedBoneMap.Size()); for (HashMap<unsigned, unsigned>::Iterator j = usedBoneMap.Begin(); j != usedBoneMap.End(); ++j) subGeometryLodLevel.boneMapping_[j->second_] = j->first_; sorted = true; } for (HashMap<unsigned, PODVector<BoneWeightAssignment> >::Iterator i = subGeometryLodLevel.boneWeights_.Begin(); i != subGeometryLodLevel.boneWeights_.End(); ++i) { // Sort the bone assigns by weight, if not sorted yet in bone remapping pass if (!sorted) Sort(i->second_.Begin(), i->second_.End(), CompareWeights); float totalWeight = 0.0f; float normalizationFactor = 0.0f; // Calculate normalization factor in case there are more than 4 blend weights, or they do not add up to 1 for (unsigned j = 0; j < i->second_.Size() && j < 4; ++j) totalWeight += i->second_[j].weight_; if (totalWeight > 0.0f) normalizationFactor = 1.0f / totalWeight; for (unsigned j = 0; j < i->second_.Size() && j < 4; ++j) { vBuf->vertices_[i->first_].blendIndices_[j] = i->second_[j].boneIndex_; vBuf->vertices_[i->first_].blendWeights_[j] = i->second_[j].weight_ * normalizationFactor; } // If there are less than 4 blend weights, fill rest with zero for (unsigned j = i->second_.Size(); j < 4; ++j) { vBuf->vertices_[i->first_].blendIndices_[j] = 0; vBuf->vertices_[i->first_].blendWeights_[j] = 0.0f; } vBuf->vertices_[i->first_].hasBlendWeights_ = true; } } } else if (boneAssignments) PrintLine("No skeleton loaded, skipping skinning information"); // Calculate center for the subgeometry Vector3 center = Vector3::ZERO; for (unsigned i = 0; i < iBuf->indices_.Size(); i += 3) { center += vBuf->vertices_[iBuf->indices_[i]].position_; center += vBuf->vertices_[iBuf->indices_[i + 1]].position_; center += vBuf->vertices_[iBuf->indices_[i + 2]].position_; } if (iBuf->indices_.Size()) center /= (float)iBuf->indices_.Size(); subGeometryCenters_.Push(center); indexStart += indices; vertexStart += vertices; OptimizeIndices(&subGeometryLodLevel, vBuf, iBuf); PrintLine("Processed submesh " + String(subMeshIndex + 1) + ": " + String(vertices) + " vertices " + String(triangles) + " triangles"); Vector<ModelSubGeometryLodLevel> thisSubGeometry; thisSubGeometry.Push(subGeometryLodLevel); subGeometries_.Push(thisSubGeometry); subMesh = subMesh.GetNext("submesh"); subMeshIndex++; } // Process LOD levels, if any XMLElement lods = root.GetChild("levelofdetail"); if (lods) { try { // For now, support only generated LODs, where the vertices are the same XMLElement lod = lods.GetChild("lodgenerated"); while (lod) { float distance = M_EPSILON; if (lod.HasAttribute("fromdepthsquared")) distance = sqrtf(lod.GetFloat("fromdepthsquared")); if (lod.HasAttribute("value")) distance = lod.GetFloat("value"); XMLElement lodSubMesh = lod.GetChild("lodfacelist"); while (lodSubMesh) { unsigned subMeshIndex = lodSubMesh.GetInt("submeshindex"); unsigned triangles = lodSubMesh.GetInt("numfaces"); ModelSubGeometryLodLevel newLodLevel; ModelSubGeometryLodLevel& originalLodLevel = subGeometries_[subMeshIndex][0]; // Copy all initial values newLodLevel = originalLodLevel; ModelVertexBuffer* vBuf; ModelIndexBuffer* iBuf; if (useOneBuffer_) { vBuf = &vertexBuffers_[0]; iBuf = &indexBuffers_[0]; } else { vBuf = &vertexBuffers_[subMeshIndex]; iBuf = &indexBuffers_[subMeshIndex]; } unsigned indexStart = iBuf->indices_.Size(); unsigned indexCount = triangles * 3; unsigned vertexStart = vertexStarts[subMeshIndex]; newLodLevel.distance_ = distance; newLodLevel.indexStart_ = indexStart; newLodLevel.indexCount_ = indexCount; // Append indices to the original index buffer XMLElement triangle = lodSubMesh.GetChild("face"); while (triangle) { unsigned v1 = triangle.GetInt("v1"); unsigned v2 = triangle.GetInt("v2"); unsigned v3 = triangle.GetInt("v3"); iBuf->indices_.Push(v3 + vertexStart); iBuf->indices_.Push(v2 + vertexStart); iBuf->indices_.Push(v1 + vertexStart); triangle = triangle.GetNext("face"); } OptimizeIndices(&newLodLevel, vBuf, iBuf); subGeometries_[subMeshIndex].Push(newLodLevel); PrintLine("Processed LOD level for submesh " + String(subMeshIndex + 1) + ": distance " + String(distance)); lodSubMesh = lodSubMesh.GetNext("lodfacelist"); } lod = lod.GetNext("lodgenerated"); } } catch (...) {} } // Process poses/morphs // First find out all pose definitions if (exportMorphs) { try { Vector<XMLElement> poses; XMLElement posesRoot = root.GetChild("poses"); if (posesRoot) { XMLElement pose = posesRoot.GetChild("pose"); while (pose) { poses.Push(pose); pose = pose.GetNext("pose"); } } // Then process animations using the poses XMLElement animsRoot = root.GetChild("animations"); if (animsRoot) { XMLElement anim = animsRoot.GetChild("animation"); while (anim) { String name = anim.GetAttribute("name"); float length = anim.GetFloat("length"); HashSet<unsigned> usedPoses; XMLElement tracks = anim.GetChild("tracks"); if (tracks) { XMLElement track = tracks.GetChild("track"); while (track) { XMLElement keyframes = track.GetChild("keyframes"); if (keyframes) { XMLElement keyframe = keyframes.GetChild("keyframe"); while (keyframe) { float time = keyframe.GetFloat("time"); XMLElement poseref = keyframe.GetChild("poseref"); // Get only the end pose if (poseref && time == length) usedPoses.Insert(poseref.GetInt("poseindex")); keyframe = keyframe.GetNext("keyframe"); } } track = track.GetNext("track"); } } if (usedPoses.Size()) { ModelMorph newMorph; newMorph.name_ = name; if (useOneBuffer_) newMorph.buffers_.Resize(1); else newMorph.buffers_.Resize(usedPoses.Size()); unsigned bufIndex = 0; for (HashSet<unsigned>::Iterator i = usedPoses.Begin(); i != usedPoses.End(); ++i) { XMLElement pose = poses[*i]; unsigned targetSubMesh = pose.GetInt("index"); XMLElement poseOffset = pose.GetChild("poseoffset"); if (useOneBuffer_) newMorph.buffers_[bufIndex].vertexBuffer_ = 0; else newMorph.buffers_[bufIndex].vertexBuffer_ = targetSubMesh; newMorph.buffers_[bufIndex].elementMask_ = MASK_POSITION; ModelVertexBuffer* vBuf = &vertexBuffers_[newMorph.buffers_[bufIndex].vertexBuffer_]; while (poseOffset) { // Convert from right- to left-handed unsigned vertexIndex = poseOffset.GetInt("index") + vertexStarts[targetSubMesh]; float x = poseOffset.GetFloat("x"); float y = poseOffset.GetFloat("y"); float z = poseOffset.GetFloat("z"); Vector3 vec(x, y, -z); if (vBuf->morphCount_ == 0) { vBuf->morphStart_ = vertexIndex; vBuf->morphCount_ = 1; } else { unsigned first = vBuf->morphStart_; unsigned last = first + vBuf->morphCount_ - 1; if (vertexIndex < first) first = vertexIndex; if (vertexIndex > last) last = vertexIndex; vBuf->morphStart_ = first; vBuf->morphCount_ = last - first + 1; } ModelVertex newVertex; newVertex.position_ = vec; newMorph.buffers_[bufIndex].vertices_.Push(MakePair(vertexIndex, newVertex)); poseOffset = poseOffset.GetNext("poseoffset"); } if (!useOneBuffer_) ++bufIndex; } morphs_.Push(newMorph); PrintLine("Processed morph " + name + " with " + String(usedPoses.Size()) + " sub-poses"); } anim = anim.GetNext("animation"); } } } catch (...) {} } // Check any of the buffers for vertices with missing blend weight assignments for (unsigned i = 0; i < vertexBuffers_.Size(); ++i) { if (vertexBuffers_[i].elementMask_ & MASK_BLENDWEIGHTS) { for (unsigned j = 0; j < vertexBuffers_[i].vertices_.Size(); ++j) if (!vertexBuffers_[i].vertices_[j].hasBlendWeights_) ErrorExit("Found a vertex with missing skinning information"); } } // Tangent generation if (generateTangents) { for (unsigned i = 0; i < subGeometries_.Size(); ++i) { for (unsigned j = 0; j < subGeometries_[i].Size(); ++j) { ModelVertexBuffer& vBuf = vertexBuffers_[subGeometries_[i][j].vertexBuffer_]; ModelIndexBuffer& iBuf = indexBuffers_[subGeometries_[i][j].indexBuffer_]; unsigned indexStart = subGeometries_[i][j].indexStart_; unsigned indexCount = subGeometries_[i][j].indexCount_; // If already has tangents, do not regenerate if (vBuf.elementMask_ & MASK_TANGENT || vBuf.vertices_.Empty() || iBuf.indices_.Empty()) continue; vBuf.elementMask_ |= MASK_TANGENT; if ((vBuf.elementMask_ & (MASK_POSITION | MASK_NORMAL | MASK_TEXCOORD1)) != (MASK_POSITION | MASK_NORMAL | MASK_TEXCOORD1)) ErrorExit("To generate tangents, positions normals and texcoords are required"); GenerateTangents(&vBuf.vertices_[0], sizeof(ModelVertex), &iBuf.indices_[0], sizeof(unsigned), indexStart, indexCount, offsetof(ModelVertex, normal_), offsetof(ModelVertex, texCoord1_), offsetof(ModelVertex, tangent_)); PrintLine("Generated tangents"); } } } }
int main() { #ifdef _MSC_VER _CrtSetDbgFlag(_CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF); #endif printf("Size of String: %d\n", sizeof(String)); printf("Size of Vector: %d\n", sizeof(Vector<int>)); printf("Size of List: %d\n", sizeof(List<int>)); printf("Size of HashMap: %d\n", sizeof(HashMap<int, int>)); printf("Size of RefCounted: %d\n", sizeof(RefCounted)); { printf("\nTesting AutoPtr assignment\n"); AutoPtr<Test> ptr1(new Test); AutoPtr<Test> ptr2; ptr2 = ptr1; } { printf("\nTesting AutoPtr copy construction\n"); AutoPtr<Test> ptr1(new Test); AutoPtr<Test> ptr2(ptr1); } { printf("\nTesting AutoPtr detaching\n"); AutoPtr<Test> ptr1(new Test); // We would now have a memory leak if we don't manually delete the object Test* object = ptr1.Detach(); delete object; } { printf("\nTesting AutoPtr inside a vector\n"); Vector<AutoPtr<Test> > vec; printf("Filling vector\n"); for (size_t i = 0; i < 4; ++i) vec.Push(new Test()); printf("Clearing vector\n"); vec.Clear(); } { printf("\nTesting SharedPtr\n"); SharedPtr<TestRefCounted> ptr1(new TestRefCounted); SharedPtr<TestRefCounted> ptr2(ptr1); printf("Number of refs: %d\n", ptr1.Refs()); } { printf("\nTesting WeakPtr\n"); TestRefCounted* object = new TestRefCounted; WeakPtr<TestRefCounted> ptr1(object); WeakPtr<TestRefCounted> ptr2(ptr1); printf("Number of weak refs: %d expired: %d\n", ptr1.WeakRefs(), ptr1.IsExpired()); ptr2.Reset(); delete object; printf("Number of weak refs: %d expired: %d\n", ptr1.WeakRefs(), ptr1.IsExpired()); } { printf("\nTesting Vector\n"); HiresTimer t; Vector<int> vec; SetRandomSeed(0); for (size_t i = 0; i < NUM_ITEMS; ++i) vec.Push(Rand()); int sum = 0; int count = 0; for (auto it = vec.Begin(); it != vec.End(); ++it) { sum += *it; ++count; } int usec = (int)t.ElapsedUSec(); printf("Size: %d capacity: %d\n", vec.Size(), vec.Capacity()); printf("Counted vector items %d, sum: %d\n", count, sum); printf("Processing took %d usec\n", usec); } { printf("\nTesting List\n"); HiresTimer t; List<int> list; SetRandomSeed(0); for (size_t i = 0; i < NUM_ITEMS; ++i) list.Push(Rand()); int sum = 0; int count = 0; for (auto it = list.Begin(); it != list.End(); ++it) { sum += *it; ++count; } int usec = (int)t.ElapsedUSec(); printf("Size: %d\n", list.Size()); printf("Counted list items %d, sum: %d\n", count, sum); printf("Processing took %d usec\n", usec); printf("\nTesting List insertion\n"); List<int> list2; List<int> list3; for (int i = 0; i < 10; ++i) list3.Push(i); list2.Insert(list2.End(), list3); for (auto it = list2.Begin(); it != list2.End(); ++it) printf("%d ", *it); printf("\n"); } { printf("\nTesting String\n"); HiresTimer t; String test; for (size_t i = 0; i < NUM_ITEMS/4; ++i) test += "Test"; String test2; test2.AppendWithFormat("Size: %d capacity: %d\n", test.Length(), test.Capacity()); printf(test2.CString()); test2 = test2.ToUpper(); printf(test2.CString()); test2.Replace("SIZE:", "LENGTH:"); printf(test2.CString()); int usec = (int)t.ElapsedUSec(); printf("Processing took %d usec\n", usec); } { printf("\nTesting HashSet\n"); HiresTimer t; size_t found = 0; unsigned sum = 0; HashSet<int> testHashSet; srand(0); found = 0; sum = 0; printf("Insert, search and iteration, %d keys\n", NUM_ITEMS); for (size_t i = 0; i < NUM_ITEMS; ++i) { int number = (rand() & 32767); testHashSet.Insert(number); } for (int i = 0; i < 32768; ++i) { if (testHashSet.Find(i) != testHashSet.End()) ++found; } for (auto it = testHashSet.Begin(); it != testHashSet.End(); ++it) sum += *it; int usec = (int)t.ElapsedUSec(); printf("Set size and sum: %d %d\n", testHashSet.Size(), sum); printf("Processing took %d usec\n", usec); } { printf("\nTesting HashMap\n"); HashMap<int, int> testHashMap; for (int i = 0; i < 10; ++i) testHashMap.Insert(MakePair(i, rand() & 32767)); printf("Keys: "); Vector<int> keys = testHashMap.Keys(); for (size_t i = 0; i < keys.Size(); ++i) printf("%d ", keys[i]); printf("\n"); printf("Values: "); Vector<int> values = testHashMap.Values(); for (size_t i = 0; i < values.Size(); ++i) printf("%d ", values[i]); printf("\n"); } return 0; }
void NavigationMesh::CollectGeometries(Vector<NavigationGeometryInfo>& geometryList, Node* node, HashSet<Node*>& processedNodes, bool recursive) { // Make sure nodes are not included twice if (processedNodes.Contains(node)) return; // Exclude obstacles and crowd agents from consideration if (node->HasComponent<Obstacle>() || node->HasComponent<CrowdAgent>()) return; processedNodes.Insert(node); Matrix3x4 inverse = node_->GetWorldTransform().Inverse(); #ifdef ATOMIC_PHYSICS // Prefer compatible physics collision shapes (triangle mesh, convex hull, box) if found. // Then fallback to visible geometry PODVector<CollisionShape*> collisionShapes; node->GetComponents<CollisionShape>(collisionShapes); bool collisionShapeFound = false; for (unsigned i = 0; i < collisionShapes.Size(); ++i) { CollisionShape* shape = collisionShapes[i]; if (!shape->IsEnabledEffective()) continue; ShapeType type = shape->GetShapeType(); if ((type == SHAPE_BOX || type == SHAPE_TRIANGLEMESH || type == SHAPE_CONVEXHULL) && shape->GetCollisionShape()) { Matrix3x4 shapeTransform(shape->GetPosition(), shape->GetRotation(), shape->GetSize()); NavigationGeometryInfo info; info.component_ = shape; info.transform_ = inverse * node->GetWorldTransform() * shapeTransform; info.boundingBox_ = shape->GetWorldBoundingBox().Transformed(inverse); geometryList.Push(info); collisionShapeFound = true; } } if (!collisionShapeFound) #endif { PODVector<Drawable*> drawables; node->GetDerivedComponents<Drawable>(drawables); for (unsigned i = 0; i < drawables.Size(); ++i) { /// \todo Evaluate whether should handle other types. Now StaticModel & TerrainPatch are supported, others skipped Drawable* drawable = drawables[i]; if (!drawable->IsEnabledEffective()) continue; NavigationGeometryInfo info; if (drawable->GetType() == StaticModel::GetTypeStatic()) info.lodLevel_ = static_cast<StaticModel*>(drawable)->GetOcclusionLodLevel(); else if (drawable->GetType() == TerrainPatch::GetTypeStatic()) info.lodLevel_ = 0; else continue; info.component_ = drawable; info.transform_ = inverse * node->GetWorldTransform(); info.boundingBox_ = drawable->GetWorldBoundingBox().Transformed(inverse); geometryList.Push(info); } } if (recursive) { const Vector<SharedPtr<Node> >& children = node->GetChildren(); for (unsigned i = 0; i < children.Size(); ++i) CollectGeometries(geometryList, children[i], processedNodes, recursive); } }
void AnimationController::SetNetAnimationsAttr(const PODVector<unsigned char>& value) { MemoryBuffer buf(value); AnimatedModel* model = GetComponent<AnimatedModel>(); // Check which animations we need to remove HashSet<StringHash> processedAnimations; unsigned numAnimations = buf.ReadVLE(); while (numAnimations--) { String animName = buf.ReadString(); StringHash animHash(animName); processedAnimations.Insert(animHash); // Check if the animation state exists. If not, add new AnimationState* state = GetAnimationState(animHash); if (!state) { Animation* newAnimation = GetSubsystem<ResourceCache>()->GetResource<Animation>(animName); state = AddAnimationState(newAnimation); if (!state) { LOGERROR("Animation update applying aborted due to unknown animation"); return; } } // Check if the internal control structure exists. If not, add new unsigned index; for (index = 0; index < animations_.Size(); ++index) { if (animations_[index].hash_ == animHash) break; } if (index == animations_.Size()) { AnimationControl newControl; newControl.name_ = animName; newControl.hash_ = animHash; animations_.Push(newControl); } unsigned char ctrl = buf.ReadUByte(); state->SetLayer(buf.ReadUByte()); state->SetLooped((ctrl & CTRL_LOOPED) != 0); animations_[index].speed_ = (float)buf.ReadShort() / 2048.0f; // 11 bits of decimal precision, max. 16x playback speed animations_[index].targetWeight_ = (float)buf.ReadUByte() / 255.0f; // 8 bits of decimal precision animations_[index].fadeTime_ = (float)buf.ReadUByte() / 64.0f; // 6 bits of decimal precision, max. 4 seconds fade if (ctrl & CTRL_STARTBONE) { StringHash boneHash = buf.ReadStringHash(); if (model) state->SetStartBone(model->GetSkeleton().GetBone(boneHash)); } else state->SetStartBone(0); if (ctrl & CTRL_AUTOFADE) animations_[index].autoFadeTime_ = (float)buf.ReadUByte() / 64.0f; // 6 bits of decimal precision, max. 4 seconds fade else animations_[index].autoFadeTime_ = 0.0f; animations_[index].removeOnCompletion_ = (ctrl & CTRL_REMOVEONCOMPLETION) != 0; if (ctrl & CTRL_SETTIME) { unsigned char setTimeRev = buf.ReadUByte(); unsigned short setTime = buf.ReadUShort(); // Apply set time command only if revision differs if (setTimeRev != animations_[index].setTimeRev_) { state->SetTime(((float)setTime / 65535.0f) * state->GetLength()); animations_[index].setTimeRev_ = setTimeRev; } } if (ctrl & CTRL_SETWEIGHT) { unsigned char setWeightRev = buf.ReadUByte(); unsigned char setWeight = buf.ReadUByte(); // Apply set weight command only if revision differs if (setWeightRev != animations_[index].setWeightRev_) { state->SetWeight((float)setWeight / 255.0f); animations_[index].setWeightRev_ = setWeightRev; } } } // Set any extra animations to fade out for (Vector<AnimationControl>::Iterator i = animations_.Begin(); i != animations_.End(); ++i) { if (!processedAnimations.Contains(i->hash_)) { i->targetWeight_ = 0.0f; i->fadeTime_ = EXTRA_ANIM_FADEOUT_TIME; } } }
void SceneResolver::Resolve() { // Nodes do not have component or node ID attributes, so only have to go through components HashSet<StringHash> noIDAttributes; for (HashMap<unsigned, WeakPtr<Component> >::ConstIterator i = components_.Begin(); i != components_.End(); ++i) { Component* component = i->second_; if (!component || noIDAttributes.Contains(component->GetType())) continue; bool hasIDAttributes = false; const Vector<AttributeInfo>* attributes = component->GetAttributes(); if (!attributes) { noIDAttributes.Insert(component->GetType()); continue; } for (unsigned j = 0; j < attributes->Size(); ++j) { const AttributeInfo& info = attributes->At(j); if (info.mode_ & AM_NODEID) { hasIDAttributes = true; unsigned oldNodeID = component->GetAttribute(j).GetUInt(); if (oldNodeID) { HashMap<unsigned, WeakPtr<Node> >::ConstIterator k = nodes_.Find(oldNodeID); if (k != nodes_.End() && k->second_) { unsigned newNodeID = k->second_->GetID(); component->SetAttribute(j, Variant(newNodeID)); } else URHO3D_LOGWARNING("Could not resolve node ID " + String(oldNodeID)); } } else if (info.mode_ & AM_COMPONENTID) { hasIDAttributes = true; unsigned oldComponentID = component->GetAttribute(j).GetUInt(); if (oldComponentID) { HashMap<unsigned, WeakPtr<Component> >::ConstIterator k = components_.Find(oldComponentID); if (k != components_.End() && k->second_) { unsigned newComponentID = k->second_->GetID(); component->SetAttribute(j, Variant(newComponentID)); } else URHO3D_LOGWARNING("Could not resolve component ID " + String(oldComponentID)); } } else if (info.mode_ & AM_NODEIDVECTOR) { hasIDAttributes = true; const VariantVector& oldNodeIDs = component->GetAttribute(j).GetVariantVector(); if (oldNodeIDs.Size()) { // The first index stores the number of IDs redundantly. This is for editing unsigned numIDs = oldNodeIDs[0].GetUInt(); VariantVector newIDs; newIDs.Push(numIDs); for (unsigned k = 1; k < oldNodeIDs.Size(); ++k) { unsigned oldNodeID = oldNodeIDs[k].GetUInt(); HashMap<unsigned, WeakPtr<Node> >::ConstIterator l = nodes_.Find(oldNodeID); if (l != nodes_.End() && l->second_) newIDs.Push(l->second_->GetID()); else { // If node was not found, retain number of elements, just store ID 0 newIDs.Push(0); URHO3D_LOGWARNING("Could not resolve node ID " + String(oldNodeID)); } } component->SetAttribute(j, newIDs); } } } // If component type had no ID attributes, cache this fact for optimization if (!hasIDAttributes) noIDAttributes.Insert(component->GetType()); } // Attributes have been resolved, so no need to remember the nodes after this Reset(); }