static int Deserializer_Read(duk_context* ctx) { duk_int_t magic = duk_get_current_magic(ctx); duk_push_this(ctx); // safe cast based on type check above Deserializer* deserial = CastToDeserializer(ctx, duk_get_top_index(ctx)); duk_pop(ctx); if (!deserial) { duk_push_boolean(ctx, 0); return 1; } char* data; String str; size_t length; IO_MAGIC_TYPE v = (IO_MAGIC_TYPE) magic; bool success = false; switch(v) { case IO_MAGIC_INT: duk_push_number(ctx, (double) deserial->ReadInt()); return 1; case IO_MAGIC_STRING: length = deserial->GetSize() - deserial->GetPosition(); str.Resize(length + 1); deserial->Read(&str[0], length); str[length] = '\0'; duk_push_string(ctx, str.CString()); return 1; case IO_MAGIC_ZEROSTRING: success = duk_push_string(ctx, deserial->ReadString().CString()); return 1; case IO_MAGIC_BINARY: length = deserial->GetSize() - deserial->GetPosition(); duk_push_fixed_buffer(ctx, length); duk_push_buffer_object(ctx, -1, 0, length, DUK_BUFOBJ_UINT8ARRAY); duk_replace(ctx, -2); data = (char*) duk_require_buffer_data(ctx, 0, &length); success = deserial->Read(data, length); return 1; default: break; } duk_push_undefined(ctx); return 1; }
bool DynamicNavigationMesh::ReadTiles(Deserializer& source, bool silent) { tileQueue_.Clear(); while (!source.IsEof()) { dtTileCacheLayerHeader header; // NOLINT(hicpp-member-init) source.Read(&header, sizeof(dtTileCacheLayerHeader)); const int dataSize = source.ReadInt(); auto* data = (unsigned char*)dtAlloc(dataSize, DT_ALLOC_PERM); if (!data) { URHO3D_LOGERROR("Could not allocate data for navigation mesh tile"); return false; } source.Read(data, (unsigned)dataSize); if (dtStatusFailed(tileCache_->addTile(data, dataSize, DT_TILE_FREE_DATA, nullptr))) { URHO3D_LOGERROR("Failed to add tile"); dtFree(data); return false; } const IntVector2 tileIdx = IntVector2(header.tx, header.ty); if (tileQueue_.Empty() || tileQueue_.Back() != tileIdx) tileQueue_.Push(tileIdx); } for (unsigned i = 0; i < tileQueue_.Size(); ++i) tileCache_->buildNavMeshTilesAt(tileQueue_[i].x_, tileQueue_[i].y_, navMesh_); tileCache_->update(0, navMesh_); // Send event if (!silent) { for (unsigned i = 0; i < tileQueue_.Size(); ++i) { using namespace NavigationTileAdded; VariantMap& eventData = GetContext()->GetEventDataMap(); eventData[P_NODE] = GetNode(); eventData[P_MESH] = this; eventData[P_TILE] = tileQueue_[i]; SendEvent(E_NAVIGATION_TILE_ADDED, eventData); } } return true; }
bool CompressStream(Serializer& dest, Deserializer& src) { unsigned srcSize = src.GetSize() - src.GetPosition(); // Prepend the source and dest. data size in the stream so that we know to buffer & uncompress the right amount if (!srcSize) { dest.WriteUInt(0); dest.WriteUInt(0); return true; } unsigned maxDestSize = LZ4_compressBound(srcSize); SharedArrayPtr<unsigned char> srcBuffer(new unsigned char[srcSize]); SharedArrayPtr<unsigned char> destBuffer(new unsigned char[maxDestSize]); if (src.Read(srcBuffer, srcSize) != srcSize) return false; unsigned destSize = LZ4_compressHC((const char*)srcBuffer.Get(), (char*)destBuffer.Get(), srcSize); bool success = true; success &= dest.WriteUInt(srcSize); success &= dest.WriteUInt(destSize); success &= dest.Write(destBuffer, destSize) == destSize; return success; }
bool Sound::LoadOggVorbis(Deserializer& source) { unsigned dataSize = source.GetSize(); SharedArrayPtr<signed char> data(new signed char[dataSize]); source.Read(data.Get(), dataSize); // Check for validity of data int error; stb_vorbis* vorbis = stb_vorbis_open_memory((unsigned char*)data.Get(), dataSize, &error, 0); if (!vorbis) { LOGERROR("Could not read Ogg Vorbis data from " + source.GetName()); return false; } // Store length, frequency and stereo flag stb_vorbis_info info = stb_vorbis_get_info(vorbis); compressedLength_ = stb_vorbis_stream_length_in_seconds(vorbis); frequency_ = info.sample_rate; stereo_ = info.channels > 1; stb_vorbis_close(vorbis); data_ = data; dataSize_ = dataSize; sixteenBit_ = true; compressed_ = true; SetMemoryUse(dataSize); return true; }
bool Font::Load(Deserializer& source) { PROFILE(LoadFont); // In headless mode, do not actually load, just return success Graphics* graphics = GetSubsystem<Graphics>(); if (!graphics) return true; faces_.Clear(); fontDataSize_ = source.GetSize(); if (fontDataSize_) { fontData_ = new unsigned char[fontDataSize_]; if (source.Read(&fontData_[0], fontDataSize_) != fontDataSize_) return false; } else { fontData_.Reset(); return false; } String ext = GetExtension(GetName()).ToLower(); if (ext == ".ttf") fontType_ = FONT_TTF; else if (ext == ".xml" || ext == ".fnt") fontType_ = FONT_BITMAP; SetMemoryUse(fontDataSize_); return true; }
bool JSONFile::BeginLoad(Deserializer& source) { unsigned dataSize = source.GetSize(); if (!dataSize && !source.GetName().Empty()) { LOGERROR("Zero sized JSON data in " + source.GetName()); return false; } SharedArrayPtr<char> buffer(new char[dataSize + 1]); if (source.Read(buffer.Get(), dataSize) != dataSize) return false; buffer[dataSize] = '\0'; rapidjson::Document document; if (document.Parse<0>(buffer).HasParseError()) { LOGERROR("Could not parse JSON data from " + source.GetName()); return false; } ToJSONValue(root_, document); SetMemoryUse(dataSize); return true; }
bool XMLFile::Load(Deserializer& source) { PROFILE(LoadXMLFile); unsigned dataSize = source.GetSize(); if (!dataSize && !source.GetName().Empty()) { LOGERROR("Zero sized XML data in " + source.GetName()); return false; } SharedArrayPtr<char> buffer(new char[dataSize]); if (source.Read(buffer.Get(), dataSize) != dataSize) return false; if (!document_->load_buffer(buffer.Get(), dataSize)) { LOGERROR("Could not parse XML data from " + source.GetName()); return false; } // Note: this probably does not reflect internal data structure size accurately SetMemoryUse(dataSize); return true; }
void VectorBuffer::SetData(Deserializer& source, unsigned size) { buffer_.Resize(size); unsigned actualSize = source.Read(&buffer_[0], size); if (actualSize != size) buffer_.Resize(actualSize); position_ = 0; size_ = actualSize; }
bool UnknownComponent::Load(Deserializer& source, bool setInstanceDefault) { useXML_ = false; xmlAttributes_.Clear(); xmlAttributeInfos_.Clear(); // Assume we are reading from a component data buffer, and the type has already been read unsigned dataSize = source.GetSize() - source.GetPosition(); binaryAttributes_.Resize(dataSize); return dataSize ? source.Read(&binaryAttributes_[0], dataSize) == dataSize : true; }
bool XMLFile::BeginLoad(Deserializer& source) { unsigned dataSize = source.GetSize(); if (!dataSize && !source.GetName().Empty()) { URHO3D_LOGERROR("Zero sized XML data in " + source.GetName()); return false; } SharedArrayPtr<char> buffer(new char[dataSize]); if (source.Read(buffer.Get(), dataSize) != dataSize) return false; if (!document_->load_buffer(buffer.Get(), dataSize)) { URHO3D_LOGERROR("Could not parse XML data from " + source.GetName()); document_->reset(); return false; } XMLElement rootElem = GetRoot(); String inherit = rootElem.GetAttribute("inherit"); if (!inherit.Empty()) { // The existence of this attribute indicates this is an RFC 5261 patch file ResourceCache* cache = GetSubsystem<ResourceCache>(); // If being async loaded, GetResource() is not safe, so use GetTempResource() instead XMLFile* inheritedXMLFile = GetAsyncLoadState() == ASYNC_DONE ? cache->GetResource<XMLFile>(inherit) : cache->GetTempResource<XMLFile>(inherit); if (!inheritedXMLFile) { URHO3D_LOGERRORF("Could not find inherited XML file: %s", inherit.CString()); return false; } // Patch this XMLFile and leave the original inherited XMLFile as it is pugi::xml_document* patchDocument = document_; document_ = new pugi::xml_document(); document_->reset(*inheritedXMLFile->document_); Patch(rootElem); delete patchDocument; // Store resource dependencies so we know when to reload/repatch when the inherited resource changes cache->StoreResourceDependency(this, inherit); // Approximate patched data size dataSize += inheritedXMLFile->GetMemoryUse(); } // Note: this probably does not reflect internal data structure size accurately SetMemoryUse(dataSize); return true; }
void ParticleSystemComponent::Deserialize(Deserializer &deserializer) { Serialization::ChunkHeader header; deserializer.Read(header); if (header.id == Serialization::ChunkID_ParticleSystemComponent_Main) { String relativeFilePath; deserializer.ReadString(relativeFilePath); this->SetParticleSystem(relativeFilePath.c_str()); deserializer.Read(this->isPlaying); deserializer.Read(this->playOnStartup); } else { g_Context->LogErrorf("Error deserializing particle system component. Unknown chunk ID (%d).", header.id); } }
void Animation::Deserialize(Deserializer &deserializer) { // We are going to use an iteration based deserializer here. Serialization::ChunkHeader header; do { deserializer.Read(header); switch (header.id) { case Serialization::ChunkID_Animation_PlaybackState: { this->currentAnimationTrack.Deserialize(deserializer); deserializer.Read(this->isPlaying); deserializer.Read(this->isPaused); deserializer.Read(this->playbackTime); uint32_t newLoopStartQueueIndex; deserializer.Read(newLoopStartQueueIndex); this->loopStartIndex = static_cast<size_t>(newLoopStartQueueIndex); break; } default: { // We don't know the chunk, so we'll skip over it. deserializer.Seek(header.sizeInBytes); break; } } } while (header.id != Serialization::ChunkID_Null); }
static int Deserializer_Read(duk_context* ctx) { duk_int_t magic = duk_get_current_magic(ctx); duk_push_this(ctx); // safe cast based on type check above Deserializer* deserial = CastToDeserializer(ctx, duk_get_top_index(ctx)); duk_pop(ctx); if (!deserial) { duk_push_boolean(ctx, 0); return 1; } PODVector<unsigned char> buffer; String str; size_t length; IO_MAGIC_TYPE v = (IO_MAGIC_TYPE) magic; bool success = false; switch(v) { case IO_MAGIC_INT: duk_push_number(ctx, (double) deserial->ReadInt()); return 1; case IO_MAGIC_STRING: length = deserial->GetSize() - deserial->GetPosition(); str.Resize(length + 1); deserial->Read(&str[0], length); str[length] = '\0'; duk_push_string(ctx, str.CString()); return 1; case IO_MAGIC_ZEROSTRING: success = duk_push_string(ctx, deserial->ReadString().CString()); return 1; default: break; } duk_push_undefined(ctx); return 1; }
bool LuaFile::BeginLoad(Deserializer& source) { size_ = source.GetSize(); if (size_ == 0) return false; // Read all data. data_ = new char[size_]; if (source.Read(data_, size_) != size_) return false; SetMemoryUse(size_); return true; }
bool DecompressStream(Serializer& dest, Deserializer& src) { if (src.IsEof()) return false; unsigned destSize = src.ReadUInt(); unsigned srcSize = src.ReadUInt(); if (!srcSize || !destSize) return true; // No data if (srcSize > src.GetSize()) return false; // Illegal source (packed data) size reported, possibly not valid data SharedArrayPtr<unsigned char> srcBuffer(new unsigned char[srcSize]); SharedArrayPtr<unsigned char> destBuffer(new unsigned char[destSize]); if (src.Read(srcBuffer, srcSize) != srcSize) return false; LZ4_decompress_fast((const char*)srcBuffer.Get(), (char*)destBuffer.Get(), destSize); return dest.Write(destBuffer, destSize) == destSize; }
bool Font::BeginLoad(Deserializer& source) { // In headless mode, do not actually load, just return success Graphics* graphics = GetSubsystem<Graphics>(); if (!graphics) return true; fontType_ = FONT_NONE; faces_.Clear(); fontDataSize_ = source.GetSize(); if (fontDataSize_) { fontData_ = new unsigned char[fontDataSize_]; if (source.Read(&fontData_[0], fontDataSize_) != fontDataSize_) return false; } else { fontData_.Reset(); return false; } String ext = GetExtension(GetName()); if (ext == ".ttf" || ext == ".otf" || ext == ".woff") { fontType_ = FONT_FREETYPE; LoadParameters(); } else if (ext == ".xml" || ext == ".fnt" || ext == ".sdf") fontType_ = FONT_BITMAP; sdfFont_ = ext == ".sdf"; SetMemoryUse(fontDataSize_); return true; }
bool Skeleton::Load(Deserializer& source) { ClearBones(); if (source.IsEof()) return false; unsigned bones = source.ReadUInt(); bones_.Reserve(bones); for (unsigned i = 0; i < bones; ++i) { Bone newBone; newBone.name_ = source.ReadString(); newBone.nameHash_ = newBone.name_; newBone.parentIndex_ = source.ReadUInt(); newBone.initialPosition_ = source.ReadVector3(); newBone.initialRotation_ = source.ReadQuaternion(); newBone.initialScale_ = source.ReadVector3(); source.Read(&newBone.offsetMatrix_.m00_, sizeof(Matrix3x4)); // Read bone collision data newBone.collisionMask_ = source.ReadUByte(); if (newBone.collisionMask_ & BONECOLLISION_SPHERE) newBone.radius_ = source.ReadFloat(); if (newBone.collisionMask_ & BONECOLLISION_BOX) newBone.boundingBox_ = source.ReadBoundingBox(); if (newBone.parentIndex_ == i) rootBoneIndex_ = i; bones_.Push(newBone); } return true; }
bool ScriptFile::BeginLoad(Deserializer& source) { ReleaseModule(); loadByteCode_.Reset(); asIScriptEngine* engine = script_->GetScriptEngine(); { MutexLock lock(script_->GetModuleMutex()); // Create the module. Discard previous module if there was one scriptModule_ = engine->GetModule(GetName().CString(), asGM_ALWAYS_CREATE); if (!scriptModule_) { LOGERROR("Failed to create script module " + GetName()); return false; } } // Check if this file is precompiled bytecode if (source.ReadFileID() == "ASBC") { // Perform actual parsing in EndLoad(); read data now loadByteCodeSize_ = source.GetSize() - source.GetPosition(); loadByteCode_ = new unsigned char[loadByteCodeSize_]; source.Read(loadByteCode_.Get(), loadByteCodeSize_); return true; } else source.Seek(0); // Not bytecode: add the initial section and check for includes. // Perform actual building during EndLoad(), as AngelScript can not multithread module compilation, // and static initializers may access arbitrary engine functionality which may not be thread-safe return AddScriptSection(engine, source); }
bool ScriptFile::AddScriptSection(asIScriptEngine* engine, Deserializer& source) { ResourceCache* cache = GetSubsystem<ResourceCache>(); unsigned dataSize = source.GetSize(); SharedArrayPtr<char> buffer(new char[dataSize]); source.Read((void*)buffer.Get(), dataSize); // Pre-parse for includes // Adapted from Angelscript's scriptbuilder add-on Vector<String> includeFiles; unsigned pos = 0; while(pos < dataSize) { int len; asETokenClass t = engine->ParseToken(&buffer[pos], dataSize - pos, &len); if (t == asTC_COMMENT || t == asTC_WHITESPACE) { pos += len; continue; } // Is this a preprocessor directive? if (buffer[pos] == '#') { int start = pos++; asETokenClass t = engine->ParseToken(&buffer[pos], dataSize - pos, &len); if (t == asTC_IDENTIFIER) { String token(&buffer[pos], len); if (token == "include") { pos += len; t = engine->ParseToken(&buffer[pos], dataSize - pos, &len); if (t == asTC_WHITESPACE) { pos += len; t = engine->ParseToken(&buffer[pos], dataSize - pos, &len); } if (t == asTC_VALUE && len > 2 && buffer[pos] == '"') { // Get the include file String includeFile(&buffer[pos+1], len - 2); pos += len; // If the file is not found as it is, add the path of current file but only if it is found there if (!cache->Exists(includeFile)) { String prefixedIncludeFile = GetPath(GetName()) + includeFile; if (cache->Exists(prefixedIncludeFile)) includeFile = prefixedIncludeFile; } String includeFileLower = includeFile.ToLower(); // If not included yet, store it for later processing if (!includeFiles_.Contains(includeFileLower)) { includeFiles_.Insert(includeFileLower); includeFiles.Push(includeFile); } // Overwrite the include directive with space characters to avoid compiler error memset(&buffer[start], ' ', pos - start); } } } } // Don't search includes within statement blocks or between tokens in statements else { int len; // Skip until ; or { whichever comes first while (pos < dataSize && buffer[pos] != ';' && buffer[pos] != '{') { engine->ParseToken(&buffer[pos], 0, &len); pos += len; } // Skip entire statement block if (pos < dataSize && buffer[pos] == '{') { ++pos; // Find the end of the statement block int level = 1; while (level > 0 && pos < dataSize) { asETokenClass t = engine->ParseToken(&buffer[pos], 0, &len); if (t == asTC_KEYWORD) { if (buffer[pos] == '{') ++level; else if(buffer[pos] == '}') --level; } pos += len; } } else ++pos; } } // Process includes first for (unsigned i = 0; i < includeFiles.Size(); ++i) { cache->StoreResourceDependency(this, includeFiles[i]); SharedPtr<File> file = cache->GetFile(includeFiles[i]); if (file) { if (!AddScriptSection(engine, *file)) return false; } else { LOGERROR("Could not process all the include directives in " + GetName() + ": missing " + includeFiles[i]); return false; } } // Then add this section if (scriptModule_->AddScriptSection(source.GetName().CString(), (const char*)buffer.Get(), dataSize) < 0) { LOGERROR("Failed to add script section " + source.GetName()); return false; } SetMemoryUse(GetMemoryUse() + dataSize); return true; }
void SceneStateStackStagingArea::Deserialize(Deserializer &deserializer) { // We should clear the staging area just in case. this->Clear(); Serialization::ChunkHeader header; deserializer.Read(header); { assert(header.id == Serialization::ChunkID_SceneStateStackStagingArea); { switch (header.version) { case 1: { // Inserts. uint32_t insertsCount; deserializer.Read(insertsCount); for (uint32_t i = 0; i < insertsCount; ++i) { uint64_t sceneNodeID; deserializer.Read(sceneNodeID); this->inserts.PushBack(sceneNodeID); } // Deletes. uint32_t deletesCount; deserializer.Read(deletesCount); for (uint32_t i = 0; i < deletesCount; ++i) { uint64_t sceneNodeID; deserializer.Read(sceneNodeID); // The next chunk of data is the serialized data of the scene node. What we do here is ready the data into a temp buffer, and then // write that to a new BasicSerializer object. uint32_t serializerSizeInBytes; deserializer.Read(serializerSizeInBytes); void* serializerData = malloc(serializerSizeInBytes); deserializer.Read(serializerData, serializerSizeInBytes); auto sceneNodeSerializer = new BasicSerializer; sceneNodeSerializer->Write(serializerData, serializerSizeInBytes); this->deletes.Add(sceneNodeID, sceneNodeSerializer); free(serializerData); } // Updates. uint32_t updatesCount; deserializer.Read(updatesCount); for (uint32_t i = 0; i < updatesCount; ++i) { uint64_t sceneNodeID; deserializer.Read(sceneNodeID); this->updates.PushBack(sceneNodeID); } // Hierarchy. uint32_t hierarchyCount; deserializer.Read(hierarchyCount); for (uint32_t i = 0; i < hierarchyCount; ++i) { uint64_t sceneNodeID; deserializer.Read(sceneNodeID); uint64_t parentSceneNodeID; deserializer.Read(parentSceneNodeID); this->hierarchy.Add(sceneNodeID, parentSceneNodeID); } break; } default: { g_Context->Logf("Error deserializing SceneStateStackStagingArea. The main chunk is an unsupported version (%d).", header.version); deserializer.Seek(header.sizeInBytes); break; } } } } }
bool ModelDefinition::Deserialize(Deserializer &deserializer) { // Clear everything. this->ClearMeshes(); this->ClearBones(); this->ClearAnimations(true); // <-- 'true' = clear animation segments, too. this->ClearConvexHulls(); // We keep looping until we hit the null or unknown chunk. Serialization::ChunkHeader header; while (deserializer.Peek(&header, sizeof(header)) == sizeof(header)) { bool finished = false; switch (header.id) { case Serialization::ChunkID_Model_Bones: { deserializer.Seek(sizeof(header)); if (header.version == 1) { uint32_t boneCount; deserializer.Read(boneCount); for (uint32_t iBone = 0; iBone < boneCount; ++iBone) { // Name. String name; deserializer.ReadString(name); // Local transform. glm::vec3 position; glm::quat rotation; glm::vec3 scale; deserializer.Read(position); deserializer.Read(rotation); deserializer.Read(scale); // 4x4 offset matrix. glm::mat4 offsetMatrix; deserializer.Read(offsetMatrix); auto bone = new Bone; bone->SetName(name.c_str()); bone->SetPosition(position); bone->SetRotation(rotation); bone->SetScale(scale); bone->SetOffsetMatrix(offsetMatrix); this->AddBone(bone); // We need to create a channel for this bone. We then need to map that channel to a bone. auto &channel = m_animation.CreateChannel(); this->animationChannelBones.Add(bone, &channel); } // Parents. auto boneParentIndices = static_cast<uint32_t*>(malloc(boneCount * sizeof(uint32_t))); deserializer.Read(boneParentIndices, boneCount * sizeof(uint32_t)); for (uint32_t iBone = 0; iBone < boneCount; ++iBone) { uint32_t parentIndex = boneParentIndices[iBone]; if (parentIndex != static_cast<uint32_t>(-1)) { m_bones[parentIndex]->AttachChild(*m_bones[iBone]); } } free(boneParentIndices); } else { // Unsupported Version. deserializer.Seek(header.sizeInBytes); } break; } case Serialization::ChunkID_Model_Meshes: { deserializer.Seek(sizeof(header)); if (header.version == 1) { uint32_t meshCount; deserializer.Read(meshCount); for (uint32_t iMesh = 0; iMesh < meshCount; ++iMesh) { ModelDefinition::Mesh newMesh(m_context); // Name. deserializer.ReadString(newMesh.name); // Material String materialName; deserializer.ReadString(materialName); newMesh.material = m_context.GetMaterialLibrary().Create(materialName.c_str()); // Geometry VertexFormat vertexFormat; vertexFormat.Deserialize(deserializer); newMesh.geometry = Renderer::CreateVertexArray(VertexArrayUsage_Static, vertexFormat); // Vertices. uint32_t vertexCount; deserializer.Read(vertexCount); if (vertexCount > 0) { newMesh.geometry->SetVertexData(nullptr, static_cast<size_t>(vertexCount)); auto vertexData = newMesh.geometry->MapVertexData(); { deserializer.Read(vertexData, vertexCount * vertexFormat.GetSizeInBytes()); } newMesh.geometry->UnmapVertexData(); } // Indices. uint32_t indexCount; deserializer.Read(indexCount); if (indexCount > 0) { newMesh.geometry->SetIndexData(nullptr, static_cast<size_t>(indexCount)); auto indexData = newMesh.geometry->MapIndexData(); { deserializer.Read(indexData, indexCount * sizeof(uint32_t)); } newMesh.geometry->UnmapIndexData(); } // Skinning Vertex Attributes uint32_t skinningVertexAttributeCount; deserializer.Read(skinningVertexAttributeCount); if (skinningVertexAttributeCount > 0) { newMesh.skinningVertexAttributes = new SkinningVertexAttribute[skinningVertexAttributeCount]; auto counts = static_cast<uint16_t*>(malloc(skinningVertexAttributeCount * sizeof(uint16_t))); deserializer.Read(counts, skinningVertexAttributeCount * sizeof(uint16_t)); uint32_t totalBoneWeights; deserializer.Read(totalBoneWeights); auto boneWeights = static_cast<BoneWeightPair*>(malloc(totalBoneWeights * sizeof(BoneWeightPair))); deserializer.Read(boneWeights, totalBoneWeights * sizeof(BoneWeightPair)); auto currentBoneWeight = boneWeights; for (uint32_t iVertex = 0; iVertex < skinningVertexAttributeCount; ++iVertex) { auto count = counts[iVertex]; // Here we allocate the buffer for the bones. We trick the vector here by modifying attributes directly. newMesh.skinningVertexAttributes[iVertex].bones.Reserve(count); newMesh.skinningVertexAttributes[iVertex].bones.count = count; for (uint16_t iBone = 0; iBone < count; ++iBone) { newMesh.skinningVertexAttributes[iVertex].bones[iBone] = *currentBoneWeight++; } } free(counts); free(boneWeights); } // Uniforms. newMesh.defaultUniforms.Deserialize(deserializer); // Finally, add the mesh. this->AddMesh(newMesh); } } else { // Unsupported Version. deserializer.Seek(header.sizeInBytes); } break; } case Serialization::ChunkID_Model_Animation: { deserializer.Seek(sizeof(header)); if (header.version == 1) { uint32_t keyFrameCount; deserializer.Read(keyFrameCount); for (size_t iKeyFrame = 0; iKeyFrame < keyFrameCount; ++iKeyFrame) { float time; deserializer.Read(time); size_t keyFrameIndex = m_animation.AppendKeyFrame(static_cast<double>(time)); // With the key frame added, we now need to iterate over each channel in the key frame. uint32_t channelCount; deserializer.Read(channelCount); for (uint32_t iChannel = 0; iChannel < channelCount; ++iChannel) { uint32_t boneIndex; deserializer.Read(boneIndex); auto bone = m_bones[boneIndex]; assert(bone != nullptr); { auto iChannelBone = this->animationChannelBones.Find(bone); assert(iChannelBone != nullptr); { auto channel = iChannelBone->value; glm::vec3 position; glm::quat rotation; glm::vec3 scale; deserializer.Read(position); deserializer.Read(rotation); deserializer.Read(scale); auto key = new TransformAnimationKey(position, rotation, scale); channel->SetKey(keyFrameIndex, key); // We need to cache the key. this->animationKeyCache.PushBack(key); } } } } deserializer.Read(this->animationAABBPadding); } else { // Unsupported Version. deserializer.Seek(header.sizeInBytes); } break; } case Serialization::ChunkID_Model_AnimationSegments: { deserializer.Seek(sizeof(header)); if (header.version == 1) { uint32_t animationSegmentCount; deserializer.Read(animationSegmentCount); for (uint32_t iSegment = 0; iSegment < animationSegmentCount; ++iSegment) { String name; uint32_t startKeyFrame; uint32_t endKeyFrame; deserializer.ReadString(name); deserializer.Read(startKeyFrame); deserializer.Read(endKeyFrame); m_animation.AddNamedSegment(name.c_str(), static_cast<size_t>(startKeyFrame), static_cast<size_t>(endKeyFrame)); } } else { // Unsupported Version. deserializer.Seek(header.sizeInBytes); } break; } case Serialization::ChunkID_Model_AnimationSequences: { deserializer.Seek(sizeof(header)); if (header.version == 1) { } else { // Unsupported Version. deserializer.Seek(header.sizeInBytes); } break; } case Serialization::ChunkID_Model_ConvexHulls: { deserializer.Seek(sizeof(header)); if (header.version == 1) { uint32_t convexHullCount; deserializer.Read(convexHullCount); uint32_t* vertexCounts = static_cast<uint32_t*>(malloc(convexHullCount * sizeof(uint32_t))); uint32_t* indexCounts = static_cast<uint32_t*>(malloc(convexHullCount * sizeof(uint32_t))); deserializer.Read(vertexCounts, convexHullCount * sizeof(uint32_t)); deserializer.Read(indexCounts, convexHullCount * sizeof(uint32_t)); uint32_t totalVertexCount; deserializer.Read(totalVertexCount); uint32_t totalIndexCount; deserializer.Read(totalIndexCount); auto vertices = static_cast<float* >(malloc(totalVertexCount * sizeof(float))); auto indices = static_cast<uint32_t*>(malloc(totalIndexCount * sizeof(uint32_t))); deserializer.Read(vertices, totalVertexCount * sizeof(float)); deserializer.Read(indices, totalIndexCount * sizeof(uint32_t)); auto currentVertices = vertices; auto currentIndices = indices; for (uint32_t iConvexHull = 0; iConvexHull < convexHullCount; ++iConvexHull) { size_t vertexCount = static_cast<size_t>(vertexCounts[iConvexHull]); size_t indexCount = static_cast<size_t>(indexCounts[iConvexHull]); m_convexHulls.PushBack(new ConvexHull(currentVertices, vertexCount, currentIndices, indexCount)); // Now we need to move our pointers forward. currentVertices += vertexCount * 3; currentIndices += indexCount; } // Build Settings. deserializer.Read(this->convexHullBuildSettings); free(vertexCounts); free(indexCounts); free(vertices); free(indices); } else { // Unsupported Version. deserializer.Seek(header.sizeInBytes); } break; } case Serialization::ChunkID_Null: { deserializer.Seek(sizeof(header)); finished = true; break; } default: { // Unknown chunk = Error. finished = true; return false; } } if (finished) { break; } } // If we get here, we were successful. return true; }
bool Model::Load(Deserializer& source) { PROFILE(LoadModel); // Check ID if (source.ReadFileID() != "UMDL") { LOGERROR(source.GetName() + " is not a valid model file"); return false; } geometries_.Clear(); geometryBoneMappings_.Clear(); geometryCenters_.Clear(); morphs_.Clear(); vertexBuffers_.Clear(); indexBuffers_.Clear(); unsigned memoryUse = sizeof(Model); // Read vertex buffers unsigned numVertexBuffers = source.ReadUInt(); vertexBuffers_.Reserve(numVertexBuffers); morphRangeStarts_.Resize(numVertexBuffers); morphRangeCounts_.Resize(numVertexBuffers); for (unsigned i = 0; i < numVertexBuffers; ++i) { unsigned vertexCount = source.ReadUInt(); unsigned elementMask = source.ReadUInt(); morphRangeStarts_[i] = source.ReadUInt(); morphRangeCounts_[i] = source.ReadUInt(); SharedPtr<VertexBuffer> buffer(new VertexBuffer(context_)); buffer->SetShadowed(true); buffer->SetSize(vertexCount, elementMask); void* dest = buffer->Lock(0, vertexCount); unsigned vertexSize = buffer->GetVertexSize(); source.Read(dest, vertexCount * vertexSize); buffer->Unlock(); memoryUse += sizeof(VertexBuffer) + vertexCount * vertexSize; vertexBuffers_.Push(buffer); } // Read index buffers unsigned numIndexBuffers = source.ReadUInt(); indexBuffers_.Reserve(numIndexBuffers); for (unsigned i = 0; i < numIndexBuffers; ++i) { unsigned indexCount = source.ReadUInt(); unsigned indexSize = source.ReadUInt(); SharedPtr<IndexBuffer> buffer(new IndexBuffer(context_)); buffer->SetShadowed(true); buffer->SetSize(indexCount, indexSize > sizeof(unsigned short)); void* dest = buffer->Lock(0, indexCount); source.Read(dest, indexCount * indexSize); buffer->Unlock(); memoryUse += sizeof(IndexBuffer) + indexCount * indexSize; indexBuffers_.Push(buffer); } // Read geometries unsigned numGeometries = source.ReadUInt(); geometries_.Reserve(numGeometries); geometryBoneMappings_.Reserve(numGeometries); geometryCenters_.Reserve(numGeometries); for (unsigned i = 0; i < numGeometries; ++i) { // Read bone mappings unsigned boneMappingCount = source.ReadUInt(); PODVector<unsigned> boneMapping(boneMappingCount); for (unsigned j = 0; j < boneMappingCount; ++j) boneMapping[j] = source.ReadUInt(); geometryBoneMappings_.Push(boneMapping); unsigned numLodLevels = source.ReadUInt(); Vector<SharedPtr<Geometry> > geometryLodLevels; geometryLodLevels.Reserve(numLodLevels); for (unsigned j = 0; j < numLodLevels; ++j) { float distance = source.ReadFloat(); PrimitiveType type = (PrimitiveType)source.ReadUInt(); unsigned vertexBufferRef = source.ReadUInt(); unsigned indexBufferRef = source.ReadUInt(); unsigned indexStart = source.ReadUInt(); unsigned indexCount = source.ReadUInt(); if (vertexBufferRef >= vertexBuffers_.Size()) { LOGERROR("Vertex buffer index out of bounds"); return false; } if (indexBufferRef >= indexBuffers_.Size()) { LOGERROR("Index buffer index out of bounds"); return false; } SharedPtr<Geometry> geometry(new Geometry(context_)); geometry->SetVertexBuffer(0, vertexBuffers_[vertexBufferRef]); geometry->SetIndexBuffer(indexBuffers_[indexBufferRef]); geometry->SetDrawRange(type, indexStart, indexCount); geometry->SetLodDistance(distance); geometryLodLevels.Push(geometry); memoryUse += sizeof(Geometry); } geometries_.Push(geometryLodLevels); } // Read morphs unsigned numMorphs = source.ReadUInt(); morphs_.Reserve(numMorphs); for (unsigned i = 0; i < numMorphs; ++i) { ModelMorph newMorph; newMorph.name_ = source.ReadString(); newMorph.nameHash_ = newMorph.name_; newMorph.weight_ = 0.0f; unsigned nubuffers_ = source.ReadUInt(); for (unsigned j = 0; j < nubuffers_; ++j) { VertexBufferMorph newBuffer; unsigned bufferIndex = source.ReadUInt(); newBuffer.elementMask_ = source.ReadUInt(); newBuffer.vertexCount_ = source.ReadUInt(); // Base size: size of each vertex index unsigned vertexSize = sizeof(unsigned); // Add size of individual elements if (newBuffer.elementMask_ & MASK_POSITION) vertexSize += sizeof(Vector3); if (newBuffer.elementMask_ & MASK_NORMAL) vertexSize += sizeof(Vector3); if (newBuffer.elementMask_ & MASK_TANGENT) vertexSize += sizeof(Vector3); newBuffer.morphData_ = new unsigned char[newBuffer.vertexCount_ * vertexSize]; source.Read(&newBuffer.morphData_[0], newBuffer.vertexCount_ * vertexSize); newMorph.buffers_[bufferIndex] = newBuffer; memoryUse += sizeof(VertexBufferMorph) + newBuffer.vertexCount_ * vertexSize; } morphs_.Push(newMorph); memoryUse += sizeof(ModelMorph); } // Read skeleton skeleton_.Load(source); memoryUse += skeleton_.GetNumBones() * sizeof(Bone); // Read bounding box boundingBox_ = source.ReadBoundingBox(); // Read geometry centers for (unsigned i = 0; i < geometries_.Size() && !source.IsEof(); ++i) geometryCenters_.Push(source.ReadVector3()); while (geometryCenters_.Size() < geometries_.Size()) geometryCenters_.Push(Vector3::ZERO); memoryUse += sizeof(Vector3) * geometries_.Size(); SetMemoryUse(memoryUse); return true; }
void Mesh::Deserialize(Deserializer &deserializer) { Serialization::ChunkHeader header; do { deserializer.Read(header); switch (header.id) { case Serialization::ChunkID_Mesh_Material: { switch (header.version) { case 1: { String materialPath; deserializer.ReadString(materialPath); this->SetMaterial(materialPath.c_str()); this->material->Deserialize(deserializer); break; } default: { m_context.Logf("Error deserializing Mesh. Material chunk is an unsupported version (%d).", header.version); break; } } break; } case Serialization::ChunkID_Mesh_Geometry: { switch (header.version) { case 1: { if (this->deleteGeometry) { Renderer::DeleteVertexArray(this->geometry); } auto newVA = Renderer::CreateVertexArray(VertexArrayUsage_Static, VertexFormat()); newVA->Deserialize(deserializer); this->SetGeometry(newVA); this->deleteGeometry = true; break; } default: { m_context.Logf("Error deserializing Mesh. Geometry chunk is an unsupported version (%d).", header.version); break; } } break; } default: { // We're not aware of the chunk, so we'll skip it. deserializer.Seek(header.sizeInBytes); break; } } } while (header.id != Serialization::ChunkID_Null); }
void ModelComponent::Deserialize(Deserializer &deserializer) { uint32_t whatChanged = 0; this->LockOnChanged(); Serialization::ChunkHeader header; deserializer.Read(header); assert(header.id == Serialization::ChunkID_ModelComponent_Main); { size_t deserializerStart = deserializer.Tell(); switch (header.version) { case 1: { // Flags are first. uint32_t newFlags; deserializer.Read(newFlags); if (newFlags != this->flags) { this->flags = newFlags; whatChanged |= ChangeFlag_Flags; } // Next is a boolean indicating whether or not a model is defined here. bool hasModel; deserializer.Read(hasModel); // We will only have additional data at this point if we have actually have a model defined. if (hasModel) { auto oldModel = this->model; String modelPath; deserializer.ReadString(modelPath); if (!modelPath.IsEmpty()) { this->SetModel(modelPath.c_str()); // If we failed to set the model (most likely due to the file not existing) we need to skip this chunk and return. if (this->model == nullptr) { const size_t bytesReadSoFar = deserializer.Tell() - deserializerStart; deserializer.Seek(header.sizeInBytes - bytesReadSoFar); return; } } else { if (this->GetContext() != NULL) { ModelDefinition nullDefinition(*this->GetContext()); this->SetModel(new Model(nullDefinition), true); } } assert(this->model != nullptr); { this->model->Deserialize(deserializer); } if (this->model != oldModel) { whatChanged |= ChangeFlag_Model; } } break; } default: { if (this->GetContext() != NULL) { this->GetContext()->Logf("Error deserializing ModelComponent. Main chunk has an unsupported version (%d).", header.version); } break; } } } this->UnlockOnChanged(); if (whatChanged != 0) { this->OnChanged(whatChanged); } }
bool Model::BeginLoad(Deserializer& source) { // Check ID String id = source.ReadFileID(); bool umdl = false; if (id == "UMDL") // we only support UMDL for some current legacy mdl's (ToonTown) umdl = true; if (!umdl && id != "AMDL") { LOGERROR(source.GetName() + " is not a valid model file"); return false; } geometries_.Clear(); geometryBoneMappings_.Clear(); geometryCenters_.Clear(); morphs_.Clear(); vertexBuffers_.Clear(); indexBuffers_.Clear(); unsigned memoryUse = sizeof(Model); bool async = GetAsyncLoadState() == ASYNC_LOADING; // Read vertex buffers unsigned numVertexBuffers = source.ReadUInt(); vertexBuffers_.Reserve(numVertexBuffers); morphRangeStarts_.Resize(numVertexBuffers); morphRangeCounts_.Resize(numVertexBuffers); loadVBData_.Resize(numVertexBuffers); for (unsigned i = 0; i < numVertexBuffers; ++i) { unsigned vertexCount = source.ReadUInt(); unsigned elementMask = source.ReadUInt(); morphRangeStarts_[i] = source.ReadUInt(); morphRangeCounts_[i] = source.ReadUInt(); SharedPtr<VertexBuffer> buffer(new VertexBuffer(context_)); unsigned vertexSize = VertexBuffer::GetVertexSize(elementMask); // Prepare vertex buffer data to be uploaded during EndLoad() if (async) { loadVBData_[i].vertexCount_ = vertexCount; loadVBData_[i].elementMask_ = elementMask; loadVBData_[i].dataSize_ = vertexCount * vertexSize; loadVBData_[i].data_ = new unsigned char[loadVBData_[i].dataSize_]; source.Read(loadVBData_[i].data_.Get(), loadVBData_[i].dataSize_); } else { // If not async loading, use locking to avoid extra allocation & copy loadVBData_[i].data_.Reset(); // Make sure no previous data buffer->SetShadowed(true); buffer->SetSize(vertexCount, elementMask); void* dest = buffer->Lock(0, vertexCount); source.Read(dest, vertexCount * vertexSize); buffer->Unlock(); } memoryUse += sizeof(VertexBuffer) + vertexCount * vertexSize; vertexBuffers_.Push(buffer); } // Read index buffers unsigned numIndexBuffers = source.ReadUInt(); indexBuffers_.Reserve(numIndexBuffers); loadIBData_.Resize(numIndexBuffers); for (unsigned i = 0; i < numIndexBuffers; ++i) { unsigned indexCount = source.ReadUInt(); unsigned indexSize = source.ReadUInt(); SharedPtr<IndexBuffer> buffer(new IndexBuffer(context_)); // Prepare index buffer data to be uploaded during EndLoad() if (async) { loadIBData_[i].indexCount_ = indexCount; loadIBData_[i].indexSize_ = indexSize; loadIBData_[i].dataSize_ = indexCount * indexSize; loadIBData_[i].data_ = new unsigned char[loadIBData_[i].dataSize_]; source.Read(loadIBData_[i].data_.Get(), loadIBData_[i].dataSize_); } else { // If not async loading, use locking to avoid extra allocation & copy loadIBData_[i].data_.Reset(); // Make sure no previous data buffer->SetShadowed(true); buffer->SetSize(indexCount, indexSize > sizeof(unsigned short)); void* dest = buffer->Lock(0, indexCount); source.Read(dest, indexCount * indexSize); buffer->Unlock(); } memoryUse += sizeof(IndexBuffer) + indexCount * indexSize; indexBuffers_.Push(buffer); } // Read geometries unsigned numGeometries = source.ReadUInt(); geometries_.Reserve(numGeometries); geometryBoneMappings_.Reserve(numGeometries); geometryCenters_.Reserve(numGeometries); loadGeometries_.Resize(numGeometries); for (unsigned i = 0; i < numGeometries; ++i) { // Read bone mappings unsigned boneMappingCount = source.ReadUInt(); PODVector<unsigned> boneMapping(boneMappingCount); for (unsigned j = 0; j < boneMappingCount; ++j) boneMapping[j] = source.ReadUInt(); geometryBoneMappings_.Push(boneMapping); unsigned numLodLevels = source.ReadUInt(); Vector<SharedPtr<Geometry> > geometryLodLevels; geometryLodLevels.Reserve(numLodLevels); loadGeometries_[i].Resize(numLodLevels); for (unsigned j = 0; j < numLodLevels; ++j) { float distance = source.ReadFloat(); PrimitiveType type = (PrimitiveType)source.ReadUInt(); unsigned vbRef = source.ReadUInt(); unsigned ibRef = source.ReadUInt(); unsigned indexStart = source.ReadUInt(); unsigned indexCount = source.ReadUInt(); if (vbRef >= vertexBuffers_.Size()) { LOGERROR("Vertex buffer index out of bounds"); loadVBData_.Clear(); loadIBData_.Clear(); loadGeometries_.Clear(); return false; } if (ibRef >= indexBuffers_.Size()) { LOGERROR("Index buffer index out of bounds"); loadVBData_.Clear(); loadIBData_.Clear(); loadGeometries_.Clear(); return false; } SharedPtr<Geometry> geometry(new Geometry(context_)); geometry->SetLodDistance(distance); // Prepare geometry to be defined during EndLoad() loadGeometries_[i][j].type_ = type; loadGeometries_[i][j].vbRef_ = vbRef; loadGeometries_[i][j].ibRef_ = ibRef; loadGeometries_[i][j].indexStart_ = indexStart; loadGeometries_[i][j].indexCount_ = indexCount; geometryLodLevels.Push(geometry); memoryUse += sizeof(Geometry); } geometries_.Push(geometryLodLevels); } // Read morphs unsigned numMorphs = source.ReadUInt(); morphs_.Reserve(numMorphs); for (unsigned i = 0; i < numMorphs; ++i) { ModelMorph newMorph; newMorph.name_ = source.ReadString(); newMorph.nameHash_ = newMorph.name_; newMorph.weight_ = 0.0f; unsigned numBuffers = source.ReadUInt(); for (unsigned j = 0; j < numBuffers; ++j) { VertexBufferMorph newBuffer; unsigned bufferIndex = source.ReadUInt(); newBuffer.elementMask_ = source.ReadUInt(); newBuffer.vertexCount_ = source.ReadUInt(); // Base size: size of each vertex index unsigned vertexSize = sizeof(unsigned); // Add size of individual elements if (newBuffer.elementMask_ & MASK_POSITION) vertexSize += sizeof(Vector3); if (newBuffer.elementMask_ & MASK_NORMAL) vertexSize += sizeof(Vector3); if (newBuffer.elementMask_ & MASK_TANGENT) vertexSize += sizeof(Vector3); newBuffer.dataSize_ = newBuffer.vertexCount_ * vertexSize; newBuffer.morphData_ = new unsigned char[newBuffer.dataSize_]; source.Read(&newBuffer.morphData_[0], newBuffer.vertexCount_ * vertexSize); newMorph.buffers_[bufferIndex] = newBuffer; memoryUse += sizeof(VertexBufferMorph) + newBuffer.vertexCount_ * vertexSize; } morphs_.Push(newMorph); memoryUse += sizeof(ModelMorph); } // Read skeleton skeleton_.Load(source); memoryUse += skeleton_.GetNumBones() * sizeof(Bone); // Read bounding box boundingBox_ = source.ReadBoundingBox(); // Read geometry centers for (unsigned i = 0; i < geometries_.Size() && !source.IsEof(); ++i) geometryCenters_.Push(source.ReadVector3()); while (geometryCenters_.Size() < geometries_.Size()) geometryCenters_.Push(Vector3::ZERO); memoryUse += sizeof(Vector3) * geometries_.Size(); if (umdl) { SetMemoryUse(memoryUse); return true; } // MODEL_VERSION unsigned version = source.ReadUInt(); ResourceRefList animList = source.ReadResourceRefList(); animationsResources_.Clear(); ResourceCache* cache = GetSubsystem<ResourceCache>(); for (unsigned i = 0; i < animList.names_.Size(); ++i) { AddAnimationResource(cache->GetResource<Animation>(animList.names_[i])); } SetMemoryUse(memoryUse); return true; }
bool Sound::LoadWav(Deserializer& source) { WavHeader header; // Try to open memset(&header, 0, sizeof header); source.Read(&header.riffText_, 4); header.totalLength_ = source.ReadUInt(); source.Read(&header.waveText_, 4); if (memcmp("RIFF", header.riffText_, 4) || memcmp("WAVE", header.waveText_, 4)) { LOGERROR("Could not read WAV data from " + source.GetName()); return false; } // Search for the FORMAT chunk for (;;) { source.Read(&header.formatText_, 4); header.formatLength_ = source.ReadUInt(); if (!memcmp("fmt ", &header.formatText_, 4)) break; source.Seek(source.GetPosition() + header.formatLength_); if (!header.formatLength_ || source.GetPosition() >= source.GetSize()) { LOGERROR("Could not read WAV data from " + source.GetName()); return false; } } // Read the FORMAT chunk header.format_ = source.ReadUShort(); header.channels_ = source.ReadUShort(); header.frequency_ = source.ReadUInt(); header.avgBytes_ = source.ReadUInt(); header.blockAlign_ = source.ReadUShort(); header.bits_ = source.ReadUShort(); // Skip data if the format chunk was bigger than what we use source.Seek(source.GetPosition() + header.formatLength_ - 16); // Check for correct format if (header.format_ != 1) { LOGERROR("Could not read WAV data from " + source.GetName()); return false; } // Search for the DATA chunk for (;;) { source.Read(&header.dataText_, 4); header.dataLength_ = source.ReadUInt(); if (!memcmp("data", &header.dataText_, 4)) break; source.Seek(source.GetPosition() + header.dataLength_); if (!header.dataLength_ || source.GetPosition() >= source.GetSize()) { LOGERROR("Could not read WAV data from " + source.GetName()); return false; } } // Allocate sound and load audio data unsigned length = header.dataLength_; SetSize(length); SetFormat(header.frequency_, header.bits_ == 16, header.channels_ == 2); source.Read(data_.Get(), length); // Convert 8-bit audio to signed if (!sixteenBit_) { for (unsigned i = 0; i < length; ++i) data_[i] -= 128; } return true; }
bool Sound::LoadRaw(Deserializer& source) { unsigned dataSize = source.GetSize(); SetSize(dataSize); return source.Read(data_.Get(), dataSize) == dataSize; }