BOOL obj_Building::Load(const char* fname) { if(!parent::Load(fname)) return FALSE; // try to load default skeleton if( MeshLOD[0]->IsSkeletal() ) { LoadSkeleton( fname, &m_BindSkeleton, &m_IsSkeletonShared ) ; if( m_BindSkeleton ) { m_Animation.Init(m_BindSkeleton, &m_AnimPool); m_Animation.Update(0.0f, r3dPoint3D(0, 0, 0), mTransform); } } if( m_pDamageLibEntry ) { if( m_pDamageLibEntry->HasSound ) { m_DestructionSoundID = SoundSys.GetEventIDByPath( m_pDamageLibEntry->SoundName.c_str() ); } } return TRUE; }
cBone* SceneAnimator::LoadSkeleton(std::ifstream& file, cBone* parent){ cBone* internalNode = new cBone();// create a node internalNode->Parent = parent; //set the parent, in the case this is theroot node, it will be null uint32_t nsize=0; file.read(reinterpret_cast<char*>(&nsize), sizeof(uint32_t));// the number of chars char temp[250]; file.read(temp, nsize);// the name of the bone temp[nsize]=0; internalNode->Name = temp; BonesByName[internalNode->Name] = internalNode;// use the name as a key file.read(reinterpret_cast<char*>(&internalNode->Offset), sizeof(internalNode->Offset));// the bone offsets file.read(reinterpret_cast<char*>(&internalNode->OriginalLocalTransform), sizeof(internalNode->OriginalLocalTransform));// original bind pose internalNode->LocalTransform = internalNode->OriginalLocalTransform;// a copy saved CalculateBoneToWorldTransform(internalNode); file.read(reinterpret_cast<char*>(&nsize), sizeof(uint32_t));// the number of children // continue for all child nodes and assign the created internal nodes as our children for( unsigned int a = 0; a < nsize && file; a++){// recursivly call this function on all children internalNode->Children.push_back(LoadSkeleton(file, internalNode)); } return internalNode; }
void Model::Load(std::fstream& fileStream) { uint_t size; //read the material data first, since mesh is dependent on material ModelDeSerializer::ReadAsBytes(fileStream, size); mMaterials.reserve(size); for (int i = 0; i < size; i++) { mMaterials.push_back(new ModelMaterial(*this, fileStream)); } //read the mesh data ModelDeSerializer::ReadAsBytes(fileStream, size); mMeshes.reserve(size); for (int i = 0; i < size; i++) { mMeshes.push_back(new Mesh(*this, fileStream)); } bool hasSkeleton; ModelDeSerializer::ReadAsBytes(fileStream, hasSkeleton); if (hasSkeleton) { ModelDeSerializer::ReadAsBytes(fileStream, size); mBones.reserve(size); for (int i = 0; i < size; i++) { Bone* bone = new Bone(*this, fileStream); mBones.push_back(bone); mBoneIndexMapping[bone->Name()] = bone->Index(); } LoadSkeleton(fileStream, mRootNode); ModelDeSerializer::ReadAsBytes(fileStream, size); mAnimations.reserve(size); for (int i = 0; i < size; i++) { AnimationClip* clip = new AnimationClip(*this, fileStream); mAnimations.push_back(clip); mAnimationsByName.insert(std::pair<std::string, AnimationClip*>(clip->Name(), clip)); } } }
void SceneAnimator::Load(std::ifstream& file){ Release();// make sure to clear this before writing new data Skeleton = LoadSkeleton(file, NULL); uint32_t nsize = 0; file.read(reinterpret_cast<char*>(&nsize), sizeof(uint32_t));// the number of animations Animations.resize(nsize); //OUTPUT_DEBUG_MSG("Extracting Animations . . "); for(uint32_t i(0); i< nsize; i++){ Animations[i].Load(file); } for(uint32_t i(0); i< Animations.size(); i++){// get all the animation names so I can reference them by name and get the correct id AnimationNameToId.insert(std::map<std::string, uint32_t>::value_type(Animations[i].Name, i)); } if(Animations.size() >0) CurrentAnimIndex =0;// set it to the first animation if there are any char bname[250]; file.read(reinterpret_cast<char*>(&nsize), sizeof(uint32_t));// the number of bones Bones.resize(nsize); for(uint32_t i(0); i< Bones.size(); i++){ file.read(reinterpret_cast<char*>(&nsize), sizeof(uint32_t));// the size of the bone name file.read(bname, nsize);// the size of the bone name bname[nsize]=0; std::map<std::string, cBone*>::iterator found = BonesByName.find(bname); BonesToIndex[found->first] = i; cBone* tep = found->second; Bones[i]=tep; } Transforms.resize( Bones.size()); float timestep = 1.0f/30.0f;// 30 per second for(size_t i(0); i< Animations.size(); i++){// pre calculate the animations SetAnimIndex((unsigned int)i); float dt = 0; for(float ticks = 0; ticks < Animations[i].Duration; ticks += Animations[i].TicksPerSecond/30.0f){ dt +=timestep; Calculate(dt); Animations[i].Transforms.push_back(std::vector<mat4>()); std::vector<mat4>& trans = Animations[i].Transforms.back(); for( size_t a = 0; a < Transforms.size(); ++a){ mat4 rotationmat = Bones[a]->Offset * Bones[a]->GlobalTransform; trans.push_back(rotationmat); } } } //OUTPUT_DEBUG_MSG("Finished loading animations with "<<Bones.size()<<" bones"); }
//load the entire BVH file void Skeleton::LoadBVHFile(char *filename) { BVH_File = fopen(filename, "r"); //When the file cannot be opened (e.g., we don't have permission or it doesn't exist when opening for reading), fopen() will return NULL. if(BVH_File == NULL) { cout << "ERROR, could not open BVH file" << endl; return; } //load the skeleton LoadSkeleton(); //load the animation animation->LoadAnimation(BVH_File, totalNumChannels); //initliaze joints for mesh skinning SetOutputJoints(); }
void Model::LoadSkeleton(std::fstream& fileStream, SceneNode* parentNode) { bool isSceneNode; ModelDeSerializer::ReadAsBytes(fileStream, isSceneNode); SceneNode* node; if (isSceneNode) { node = new SceneNode(*this, fileStream); } else { std::string name; ModelDeSerializer::ReadAsBytes(fileStream, name); //find bone and do fixup UINT boneIndex = mBoneIndexMapping[name]; node = mBones[boneIndex]; } node->SetParent(parentNode); if (parentNode != nullptr) { parentNode->Children().push_back(node); } else { mRootNode = node; } uint_t childCount; ModelDeSerializer::ReadAsBytes(fileStream, childCount); node->Children().reserve(childCount); for (int i = 0; i < childCount; i++) { LoadSkeleton(fileStream, node); } }
bool C4DefGraphics::Load(C4Group &hGroup, StdMeshSkeletonLoader &loader, bool fColorByOwner) { char Filename[_MAX_PATH+1]; *Filename=0; // load skeletons hGroup.ResetSearch(); while (hGroup.FindNextEntry("*", Filename, NULL, !!*Filename)) { if (!WildcardMatch(C4CFN_DefSkeleton, Filename) && !WildcardMatch(C4CFN_DefSkeletonXml, Filename)) continue; LoadSkeleton(hGroup, Filename, loader); } // Try from Mesh first if (!LoadMesh(hGroup, C4CFN_DefMesh, loader)) if(!LoadMesh(hGroup, C4CFN_DefMeshXml, loader)) LoadBitmap(hGroup, C4CFN_DefGraphics, C4CFN_ClrByOwner, C4CFN_NormalMap, fColorByOwner); // load additional graphics C4DefGraphics *pLastGraphics = this; const int32_t iOverlayWildcardPos = SCharPos('*', C4CFN_ClrByOwnerEx); hGroup.ResetSearch(); *Filename=0; const char* const AdditionalGraphics[] = { C4CFN_DefGraphicsEx, C4CFN_DefGraphicsExMesh, C4CFN_DefGraphicsExMeshXml, NULL }; while (hGroup.FindNextEntry("*", Filename, NULL, !!*Filename)) { for(const char* const* szWildcard = AdditionalGraphics; *szWildcard != NULL; ++szWildcard) { if(!WildcardMatch(*szWildcard, Filename)) continue; // skip def graphics if (SEqualNoCase(Filename, C4CFN_DefGraphics) || SEqualNoCase(Filename, C4CFN_DefMesh) || SEqualNoCase(Filename, C4CFN_DefMeshXml)) continue; // skip scaled def graphics if (WildcardMatch(C4CFN_DefGraphicsScaled, Filename)) continue; // get name char GrpName[_MAX_PATH+1]; const int32_t iWildcardPos = SCharPos('*', *szWildcard); SCopy(Filename + iWildcardPos, GrpName, _MAX_PATH); RemoveExtension(GrpName); // remove trailing number for scaled graphics int32_t extpos; int scale; if ((extpos = SCharLastPos('.', GrpName)) > -1) if (sscanf(GrpName+extpos+1, "%d", &scale) == 1) GrpName[extpos] = '\0'; // clip to max length GrpName[C4MaxName]=0; // create new graphics pLastGraphics->pNext = new C4AdditionalDefGraphics(pDef, GrpName); pLastGraphics = pLastGraphics->pNext; if(*szWildcard == AdditionalGraphics[0]) { // create overlay-filename char OverlayFn[_MAX_PATH+1]; if(fColorByOwner) { // GraphicsX.png -> OverlayX.png SCopy(C4CFN_ClrByOwnerEx, OverlayFn, _MAX_PATH); OverlayFn[iOverlayWildcardPos]=0; SAppend(Filename + iWildcardPos, OverlayFn); EnforceExtension(OverlayFn, GetExtension(C4CFN_ClrByOwnerEx)); } // create normal filename char NormalFn[_MAX_PATH+1]; SCopy(C4CFN_NormalMapEx, NormalFn, _MAX_PATH); NormalFn[iOverlayWildcardPos]=0; SAppend(Filename + iWildcardPos, NormalFn); EnforceExtension(NormalFn, GetExtension(C4CFN_NormalMapEx)); // load them if (!pLastGraphics->LoadBitmap(hGroup, Filename, fColorByOwner ? OverlayFn : NULL, NormalFn, fColorByOwner)) return false; } else { if (!pLastGraphics->LoadMesh(hGroup, Filename, loader)) return false; } } } // done, success return true; }
void LoadMesh(const String& inputFileName, bool generateTangents, bool splitSubMeshes, bool exportMorphs) { File meshFileSource(context_); meshFileSource.Open(inputFileName); if (!meshFile_->Load(meshFileSource)) ErrorExit("Could not load input file " + inputFileName); XMLElement root = meshFile_->GetRoot("mesh"); XMLElement subMeshes = root.GetChild("submeshes"); XMLElement skeletonLink = root.GetChild("skeletonlink"); if (root.IsNull()) ErrorExit("Could not load input file " + inputFileName); String skeletonName = skeletonLink.GetAttribute("name"); if (!skeletonName.Empty()) LoadSkeleton(GetPath(inputFileName) + GetFileName(skeletonName) + ".skeleton.xml"); // Check whether there's benefit of avoiding 32bit indices by splitting each submesh into own buffer XMLElement subMesh = subMeshes.GetChild("submesh"); unsigned totalVertices = 0; unsigned maxSubMeshVertices = 0; while (subMesh) { materialNames_.Push(subMesh.GetAttribute("material")); XMLElement geometry = subMesh.GetChild("geometry"); if (geometry) { unsigned vertices = geometry.GetInt("vertexcount"); totalVertices += vertices; if (maxSubMeshVertices < vertices) maxSubMeshVertices = vertices; } ++numSubMeshes_; subMesh = subMesh.GetNext("submesh"); } XMLElement sharedGeometry = root.GetChild("sharedgeometry"); if (sharedGeometry) { unsigned vertices = sharedGeometry.GetInt("vertexcount"); totalVertices += vertices; if (maxSubMeshVertices < vertices) maxSubMeshVertices = vertices; } if (!sharedGeometry && (splitSubMeshes || (totalVertices > 65535 && maxSubMeshVertices <= 65535))) { useOneBuffer_ = false; vertexBuffers_.Resize(numSubMeshes_); indexBuffers_.Resize(numSubMeshes_); } else { vertexBuffers_.Resize(1); indexBuffers_.Resize(1); } subMesh = subMeshes.GetChild("submesh"); unsigned indexStart = 0; unsigned vertexStart = 0; unsigned subMeshIndex = 0; PODVector<unsigned> vertexStarts; vertexStarts.Resize(numSubMeshes_); while (subMesh) { XMLElement geometry = subMesh.GetChild("geometry"); XMLElement faces = subMesh.GetChild("faces"); // If no submesh vertexbuffer, process the shared geometry, but do it only once unsigned vertices = 0; if (!geometry) { vertexStart = 0; if (!subMeshIndex) geometry = root.GetChild("sharedgeometry"); } if (geometry) vertices = geometry.GetInt("vertexcount"); ModelSubGeometryLodLevel subGeometryLodLevel; ModelVertexBuffer* vBuf; ModelIndexBuffer* iBuf; if (useOneBuffer_) { vBuf = &vertexBuffers_[0]; if (vertices) vBuf->vertices_.Resize(vertexStart + vertices); iBuf = &indexBuffers_[0]; subGeometryLodLevel.vertexBuffer_ = 0; subGeometryLodLevel.indexBuffer_ = 0; } else { vertexStart = 0; indexStart = 0; vBuf = &vertexBuffers_[subMeshIndex]; vBuf->vertices_.Resize(vertices); iBuf = &indexBuffers_[subMeshIndex]; subGeometryLodLevel.vertexBuffer_ = subMeshIndex; subGeometryLodLevel.indexBuffer_ = subMeshIndex; } // Store the start vertex for later use vertexStarts[subMeshIndex] = vertexStart; // Ogre may have multiple buffers in one submesh. These will be merged into one XMLElement bufferDef; if (geometry) bufferDef = geometry.GetChild("vertexbuffer"); while (bufferDef) { if (bufferDef.HasAttribute("positions")) vBuf->elementMask_ |= MASK_POSITION; if (bufferDef.HasAttribute("normals")) vBuf->elementMask_ |= MASK_NORMAL; if (bufferDef.HasAttribute("texture_coords")) { vBuf->elementMask_ |= MASK_TEXCOORD1; if (bufferDef.GetInt("texture_coords") > 1) vBuf->elementMask_ |= MASK_TEXCOORD2; } unsigned vertexNum = vertexStart; if (vertices) { XMLElement vertex = bufferDef.GetChild("vertex"); while (vertex) { XMLElement position = vertex.GetChild("position"); if (position) { // Convert from right- to left-handed float x = position.GetFloat("x"); float y = position.GetFloat("y"); float z = position.GetFloat("z"); Vector3 vec(x, y, -z); vBuf->vertices_[vertexNum].position_ = vec; boundingBox_.Merge(vec); } XMLElement normal = vertex.GetChild("normal"); if (normal) { // Convert from right- to left-handed float x = normal.GetFloat("x"); float y = normal.GetFloat("y"); float z = normal.GetFloat("z"); Vector3 vec(x, y, -z); vBuf->vertices_[vertexNum].normal_ = vec; } XMLElement uv = vertex.GetChild("texcoord"); if (uv) { float x = uv.GetFloat("u"); float y = uv.GetFloat("v"); Vector2 vec(x, y); vBuf->vertices_[vertexNum].texCoord1_ = vec; if (vBuf->elementMask_ & MASK_TEXCOORD2) { uv = uv.GetNext("texcoord"); if (uv) { float x = uv.GetFloat("u"); float y = uv.GetFloat("v"); Vector2 vec(x, y); vBuf->vertices_[vertexNum].texCoord2_ = vec; } } } vertexNum++; vertex = vertex.GetNext("vertex"); } } bufferDef = bufferDef.GetNext("vertexbuffer"); } unsigned triangles = faces.GetInt("count"); unsigned indices = triangles * 3; XMLElement triangle = faces.GetChild("face"); while (triangle) { unsigned v1 = triangle.GetInt("v1"); unsigned v2 = triangle.GetInt("v2"); unsigned v3 = triangle.GetInt("v3"); iBuf->indices_.Push(v3 + vertexStart); iBuf->indices_.Push(v2 + vertexStart); iBuf->indices_.Push(v1 + vertexStart); triangle = triangle.GetNext("face"); } subGeometryLodLevel.indexStart_ = indexStart; subGeometryLodLevel.indexCount_ = indices; if (vertexStart + vertices > 65535) iBuf->indexSize_ = sizeof(unsigned); XMLElement boneAssignments = subMesh.GetChild("boneassignments"); if (bones_.Size()) { if (boneAssignments) { XMLElement boneAssignment = boneAssignments.GetChild("vertexboneassignment"); while (boneAssignment) { unsigned vertex = boneAssignment.GetInt("vertexindex") + vertexStart; unsigned bone = boneAssignment.GetInt("boneindex"); float weight = boneAssignment.GetFloat("weight"); BoneWeightAssignment assign; assign.boneIndex_ = bone; assign.weight_ = weight; // Source data might have 0 weights. Disregard these if (assign.weight_ > 0.0f) { subGeometryLodLevel.boneWeights_[vertex].Push(assign); // Require skinning weight to be sufficiently large before vertex contributes to bone hitbox if (assign.weight_ > 0.33f) { // Check distance of vertex from bone to get bone max. radius information Vector3 bonePos = bones_[bone].derivedPosition_; Vector3 vertexPos = vBuf->vertices_[vertex].position_; float distance = (bonePos - vertexPos).Length(); if (distance > bones_[bone].radius_) { bones_[bone].collisionMask_ |= 1; bones_[bone].radius_ = distance; } // Build the hitbox for the bone bones_[bone].boundingBox_.Merge(bones_[bone].inverseWorldTransform_ * (vertexPos)); bones_[bone].collisionMask_ |= 2; } } boneAssignment = boneAssignment.GetNext("vertexboneassignment"); } } if ((subGeometryLodLevel.boneWeights_.Size()) && bones_.Size()) { vBuf->elementMask_ |= MASK_BLENDWEIGHTS | MASK_BLENDINDICES; bool sorted = false; // If amount of bones is larger than supported by HW skinning, must remap per submesh if (bones_.Size() > maxBones_) { HashMap<unsigned, unsigned> usedBoneMap; unsigned remapIndex = 0; for (HashMap<unsigned, PODVector<BoneWeightAssignment> >::Iterator i = subGeometryLodLevel.boneWeights_.Begin(); i != subGeometryLodLevel.boneWeights_.End(); ++i) { // Sort the bone assigns by weight Sort(i->second_.Begin(), i->second_.End(), CompareWeights); // Use only the first 4 weights for (unsigned j = 0; j < i->second_.Size() && j < 4; ++j) { unsigned originalIndex = i->second_[j].boneIndex_; if (!usedBoneMap.Contains(originalIndex)) { usedBoneMap[originalIndex] = remapIndex; remapIndex++; } i->second_[j].boneIndex_ = usedBoneMap[originalIndex]; } } // If still too many bones in one subgeometry, error if (usedBoneMap.Size() > maxBones_) ErrorExit("Too many bones (limit " + String(maxBones_) + ") in submesh " + String(subMeshIndex + 1)); // Write mapping of vertex buffer bone indices to original bone indices subGeometryLodLevel.boneMapping_.Resize(usedBoneMap.Size()); for (HashMap<unsigned, unsigned>::Iterator j = usedBoneMap.Begin(); j != usedBoneMap.End(); ++j) subGeometryLodLevel.boneMapping_[j->second_] = j->first_; sorted = true; } for (HashMap<unsigned, PODVector<BoneWeightAssignment> >::Iterator i = subGeometryLodLevel.boneWeights_.Begin(); i != subGeometryLodLevel.boneWeights_.End(); ++i) { // Sort the bone assigns by weight, if not sorted yet in bone remapping pass if (!sorted) Sort(i->second_.Begin(), i->second_.End(), CompareWeights); float totalWeight = 0.0f; float normalizationFactor = 0.0f; // Calculate normalization factor in case there are more than 4 blend weights, or they do not add up to 1 for (unsigned j = 0; j < i->second_.Size() && j < 4; ++j) totalWeight += i->second_[j].weight_; if (totalWeight > 0.0f) normalizationFactor = 1.0f / totalWeight; for (unsigned j = 0; j < i->second_.Size() && j < 4; ++j) { vBuf->vertices_[i->first_].blendIndices_[j] = i->second_[j].boneIndex_; vBuf->vertices_[i->first_].blendWeights_[j] = i->second_[j].weight_ * normalizationFactor; } // If there are less than 4 blend weights, fill rest with zero for (unsigned j = i->second_.Size(); j < 4; ++j) { vBuf->vertices_[i->first_].blendIndices_[j] = 0; vBuf->vertices_[i->first_].blendWeights_[j] = 0.0f; } vBuf->vertices_[i->first_].hasBlendWeights_ = true; } } } else if (boneAssignments) PrintLine("No skeleton loaded, skipping skinning information"); // Calculate center for the subgeometry Vector3 center = Vector3::ZERO; for (unsigned i = 0; i < iBuf->indices_.Size(); i += 3) { center += vBuf->vertices_[iBuf->indices_[i]].position_; center += vBuf->vertices_[iBuf->indices_[i + 1]].position_; center += vBuf->vertices_[iBuf->indices_[i + 2]].position_; } if (iBuf->indices_.Size()) center /= (float)iBuf->indices_.Size(); subGeometryCenters_.Push(center); indexStart += indices; vertexStart += vertices; OptimizeIndices(&subGeometryLodLevel, vBuf, iBuf); PrintLine("Processed submesh " + String(subMeshIndex + 1) + ": " + String(vertices) + " vertices " + String(triangles) + " triangles"); Vector<ModelSubGeometryLodLevel> thisSubGeometry; thisSubGeometry.Push(subGeometryLodLevel); subGeometries_.Push(thisSubGeometry); subMesh = subMesh.GetNext("submesh"); subMeshIndex++; } // Process LOD levels, if any XMLElement lods = root.GetChild("levelofdetail"); if (lods) { try { // For now, support only generated LODs, where the vertices are the same XMLElement lod = lods.GetChild("lodgenerated"); while (lod) { float distance = M_EPSILON; if (lod.HasAttribute("fromdepthsquared")) distance = sqrtf(lod.GetFloat("fromdepthsquared")); if (lod.HasAttribute("value")) distance = lod.GetFloat("value"); XMLElement lodSubMesh = lod.GetChild("lodfacelist"); while (lodSubMesh) { unsigned subMeshIndex = lodSubMesh.GetInt("submeshindex"); unsigned triangles = lodSubMesh.GetInt("numfaces"); ModelSubGeometryLodLevel newLodLevel; ModelSubGeometryLodLevel& originalLodLevel = subGeometries_[subMeshIndex][0]; // Copy all initial values newLodLevel = originalLodLevel; ModelVertexBuffer* vBuf; ModelIndexBuffer* iBuf; if (useOneBuffer_) { vBuf = &vertexBuffers_[0]; iBuf = &indexBuffers_[0]; } else { vBuf = &vertexBuffers_[subMeshIndex]; iBuf = &indexBuffers_[subMeshIndex]; } unsigned indexStart = iBuf->indices_.Size(); unsigned indexCount = triangles * 3; unsigned vertexStart = vertexStarts[subMeshIndex]; newLodLevel.distance_ = distance; newLodLevel.indexStart_ = indexStart; newLodLevel.indexCount_ = indexCount; // Append indices to the original index buffer XMLElement triangle = lodSubMesh.GetChild("face"); while (triangle) { unsigned v1 = triangle.GetInt("v1"); unsigned v2 = triangle.GetInt("v2"); unsigned v3 = triangle.GetInt("v3"); iBuf->indices_.Push(v3 + vertexStart); iBuf->indices_.Push(v2 + vertexStart); iBuf->indices_.Push(v1 + vertexStart); triangle = triangle.GetNext("face"); } OptimizeIndices(&newLodLevel, vBuf, iBuf); subGeometries_[subMeshIndex].Push(newLodLevel); PrintLine("Processed LOD level for submesh " + String(subMeshIndex + 1) + ": distance " + String(distance)); lodSubMesh = lodSubMesh.GetNext("lodfacelist"); } lod = lod.GetNext("lodgenerated"); } } catch (...) {} } // Process poses/morphs // First find out all pose definitions if (exportMorphs) { try { Vector<XMLElement> poses; XMLElement posesRoot = root.GetChild("poses"); if (posesRoot) { XMLElement pose = posesRoot.GetChild("pose"); while (pose) { poses.Push(pose); pose = pose.GetNext("pose"); } } // Then process animations using the poses XMLElement animsRoot = root.GetChild("animations"); if (animsRoot) { XMLElement anim = animsRoot.GetChild("animation"); while (anim) { String name = anim.GetAttribute("name"); float length = anim.GetFloat("length"); HashSet<unsigned> usedPoses; XMLElement tracks = anim.GetChild("tracks"); if (tracks) { XMLElement track = tracks.GetChild("track"); while (track) { XMLElement keyframes = track.GetChild("keyframes"); if (keyframes) { XMLElement keyframe = keyframes.GetChild("keyframe"); while (keyframe) { float time = keyframe.GetFloat("time"); XMLElement poseref = keyframe.GetChild("poseref"); // Get only the end pose if (poseref && time == length) usedPoses.Insert(poseref.GetInt("poseindex")); keyframe = keyframe.GetNext("keyframe"); } } track = track.GetNext("track"); } } if (usedPoses.Size()) { ModelMorph newMorph; newMorph.name_ = name; if (useOneBuffer_) newMorph.buffers_.Resize(1); else newMorph.buffers_.Resize(usedPoses.Size()); unsigned bufIndex = 0; for (HashSet<unsigned>::Iterator i = usedPoses.Begin(); i != usedPoses.End(); ++i) { XMLElement pose = poses[*i]; unsigned targetSubMesh = pose.GetInt("index"); XMLElement poseOffset = pose.GetChild("poseoffset"); if (useOneBuffer_) newMorph.buffers_[bufIndex].vertexBuffer_ = 0; else newMorph.buffers_[bufIndex].vertexBuffer_ = targetSubMesh; newMorph.buffers_[bufIndex].elementMask_ = MASK_POSITION; ModelVertexBuffer* vBuf = &vertexBuffers_[newMorph.buffers_[bufIndex].vertexBuffer_]; while (poseOffset) { // Convert from right- to left-handed unsigned vertexIndex = poseOffset.GetInt("index") + vertexStarts[targetSubMesh]; float x = poseOffset.GetFloat("x"); float y = poseOffset.GetFloat("y"); float z = poseOffset.GetFloat("z"); Vector3 vec(x, y, -z); if (vBuf->morphCount_ == 0) { vBuf->morphStart_ = vertexIndex; vBuf->morphCount_ = 1; } else { unsigned first = vBuf->morphStart_; unsigned last = first + vBuf->morphCount_ - 1; if (vertexIndex < first) first = vertexIndex; if (vertexIndex > last) last = vertexIndex; vBuf->morphStart_ = first; vBuf->morphCount_ = last - first + 1; } ModelVertex newVertex; newVertex.position_ = vec; newMorph.buffers_[bufIndex].vertices_.Push(MakePair(vertexIndex, newVertex)); poseOffset = poseOffset.GetNext("poseoffset"); } if (!useOneBuffer_) ++bufIndex; } morphs_.Push(newMorph); PrintLine("Processed morph " + name + " with " + String(usedPoses.Size()) + " sub-poses"); } anim = anim.GetNext("animation"); } } } catch (...) {} } // Check any of the buffers for vertices with missing blend weight assignments for (unsigned i = 0; i < vertexBuffers_.Size(); ++i) { if (vertexBuffers_[i].elementMask_ & MASK_BLENDWEIGHTS) { for (unsigned j = 0; j < vertexBuffers_[i].vertices_.Size(); ++j) if (!vertexBuffers_[i].vertices_[j].hasBlendWeights_) ErrorExit("Found a vertex with missing skinning information"); } } // Tangent generation if (generateTangents) { for (unsigned i = 0; i < subGeometries_.Size(); ++i) { for (unsigned j = 0; j < subGeometries_[i].Size(); ++j) { ModelVertexBuffer& vBuf = vertexBuffers_[subGeometries_[i][j].vertexBuffer_]; ModelIndexBuffer& iBuf = indexBuffers_[subGeometries_[i][j].indexBuffer_]; unsigned indexStart = subGeometries_[i][j].indexStart_; unsigned indexCount = subGeometries_[i][j].indexCount_; // If already has tangents, do not regenerate if (vBuf.elementMask_ & MASK_TANGENT || vBuf.vertices_.Empty() || iBuf.indices_.Empty()) continue; vBuf.elementMask_ |= MASK_TANGENT; if ((vBuf.elementMask_ & (MASK_POSITION | MASK_NORMAL | MASK_TEXCOORD1)) != (MASK_POSITION | MASK_NORMAL | MASK_TEXCOORD1)) ErrorExit("To generate tangents, positions normals and texcoords are required"); GenerateTangents(&vBuf.vertices_[0], sizeof(ModelVertex), &iBuf.indices_[0], sizeof(unsigned), indexStart, indexCount, offsetof(ModelVertex, normal_), offsetof(ModelVertex, texCoord1_), offsetof(ModelVertex, tangent_)); PrintLine("Generated tangents"); } } } }
scene::SMesh* mdlLoaderV0::load(OS::IStream* stream) { if(!stream) return 0; core::string tmpStr; OS::StreamReader reader(stream); MDLLoaderV0::MDLHeader header; MDLLoaderV0::MeshHeader meshHeader; MDLLoaderV0::SkinClusterHeader skinHeader; MDLLoaderV0::SkinClusterBoneHeader skinBoneHeader; MDLLoaderV0::StreamHeader streamHeader; MDLLoaderV0::IndexStreamHeader indexStreamHeader; stream->read(&header,sizeof(header)); if(header.magic!=0x1A1B) { gLogManager.log(mT("mdlLoaderV0::load() - Incorrect file format"),ELL_WARNING); return 0; } if(header.version!=0x10) { gLogManager.log(mT("mdlLoaderV0::load() - Incorrect file version"),ELL_WARNING); return 0; } scene::SMesh* mesh=new scene::SMesh(); if(header.hasSkeleton) { mesh->setSkeleton(LoadSkeleton(stream)); } for(int buffId=0;buffId<header.objectsCount;++buffId) { scene::MeshBufferData* buffData=mesh->addNewBuffer(); scene::IMeshBuffer* buffer=buffData->getMeshBuffer(); core::stringc str=reader.binReadStringC(); stream->read(&meshHeader,sizeof(meshHeader)); core::char_to_string(str.c_str(),tmpStr); buffer->setMeshName(tmpStr); buffer->setBoundingBox(meshHeader.boundingBox); buffer->setRenderType(meshHeader.renderType); stream->read(&indexStreamHeader,sizeof(indexStreamHeader)); indexStreamHeader.software=false; buffer->createIndexBuffer(indexStreamHeader.indexType,indexStreamHeader.numIndicies,indexStreamHeader.usage,indexStreamHeader.useVirtualBuffer,indexStreamHeader.software); buffer->getIndexData()->firstIndex=indexStreamHeader.firstIndex; buffer->getIndexData()->indexCount=indexStreamHeader.indexCount; video::IHardwareIndexBuffer* indexBuffer=buffer->getIndexData()->indexBuffer.pointer(); void*data= indexBuffer->lock(0,0,video::IHardwareBuffer::ELO_Discard); stream->read(data,indexBuffer->getSizeInBytes()); indexBuffer->unlock(); for(int i=0;i<meshHeader.streamsCount;++i) { stream->read(&streamHeader,sizeof(streamHeader)); streamHeader.software=false; video::IHardwareStreamBuffer* streamBuff=buffer->createStream(streamHeader.streamIndex,streamHeader.streamType,streamHeader.dataType,streamHeader.length,streamHeader.usage,streamHeader.useVirtualBuffer,streamHeader.software); void* data=streamBuff->lock(0,0,video::IHardwareBuffer::ELO_Discard); stream->read(data,streamBuff->getSizeInBytes()); streamBuff->unlock(); } if(meshHeader.hasSkin) { stream->read(&skinHeader,sizeof(skinHeader)); scene::ISkinCluster* cluster= buffer->getSkinCluster(); if(!cluster) { cluster=new scene::ISkinCluster(mesh->getSkeleton()); buffer->setSkinCluster(cluster); } for(int i=0;i<skinHeader.bonesCount;++i) { stream->read(&skinBoneHeader,sizeof(skinBoneHeader)); scene::ISkinCluster::SBoneWeights* weights= cluster->getBoneWeights(skinBoneHeader.index); weights->vertices.resize(skinBoneHeader.vertices); if(skinBoneHeader.vertices>0) stream->read(&weights->vertices[0],sizeof(float)*skinBoneHeader.vertices); } } } stream->close(); mesh->calcCurrBoundingBox(); return mesh; }
WorldObject::WorldObject( __in ResourceManager & ResMan, __in IDebugTextOut * TextWriter, __in const std::vector< std::string > & MDBResRefs, __in const std::string & GR2ResRef ) /*++ Routine Description: This routine constructs a new WorldObject, which represents a object with models that is present in model space. Arguments: ResMan - Supplies the resource manager instance to use to load the model data. TextWriter - Supplies the debug text output writer. MDBResRefs - Supplies the resource names of the model files to load. GR2ResRef - Supplies the resource name of the skeleton file to load. Return Value: The newly constructed object. The routine raises an std::exception on failure, such as a failure to load or parse the model in question. Environment: User mode. --*/ : m_ResMan( ResMan ), m_TextWriter( TextWriter ), m_Facing( PI / 2) // Straight north { memcpy( &m_WorldTrans, &NWN::Matrix44::IDENTITY, sizeof( m_WorldTrans ) ); m_Up.x = 0.0f; m_Up.y = 0.0f; m_Up.z = 1.0f; m_Heading.x = 0.0f; m_Heading.y = 1.0f; m_Heading.z = 0.0f; m_Scale.x = 1.0f; m_Scale.y = 1.0f; m_Scale.z = 1.0f; // // Now load resources. // m_Skeleton = LoadSkeleton( m_ResMan.ResRef32FromStr( GR2ResRef ) ); m_ModelParts.reserve( MDBResRefs.size( ) ); for (std::vector< std::string >::const_iterator it = MDBResRefs.begin( ); it != MDBResRefs.end( ); ++it) { ModelColliderPtr Model = LoadModel( m_ResMan.ResRef32FromStr( *it ) ); m_ModelParts.push_back( Model ); } }