void MeshSaverImplv1_0::WriteSubMeshData(MeshTemplate* templ, const MeshTemplate::PrimitiveGroup& gr, ChunkOutputStream& outStream) { bool sharedMeshBuffer = false; MeshBuffer* buffer = templ->GetSharedMeshBuffer(); if (!gr.buffer || gr.buffer == buffer) { sharedMeshBuffer = true; } else buffer = gr.buffer; { uint8 primType = (uint8)buffer->GetPrimitiveType(); OutputSerializer& ser = outStream.BeginChunk(MCID_SUBMESH_INFO); ser << primType; outStream.EndChunk(); } if (sharedMeshBuffer) { { uint32 start = gr.startVertex; uint32 count = gr.vertexCount; OutputSerializer& ser = outStream.BeginChunk(MCID_SHARED_VERTEX_DATA); ser << start << count; outStream.EndChunk(); } { uint32 start = gr.startIndex; uint32 count = gr.indexCount; OutputSerializer& ser = outStream.BeginChunk(MCID_SHARED_INDEX_DATA); ser << start << count; outStream.EndChunk(); } } else { WriteVertexData(buffer, outStream); WriteIndexData(buffer, outStream); } WriteBoundsInfo(gr.bounds, outStream); if (gr.material && gr.material != templ->GetSharedMaterial()) { WriteMaterialData(gr.material, outStream); } { OutputSerializer& ser = outStream.BeginChunk(MCID_SUBMESH_END); outStream.EndChunk(); } }
bool DiskCachingFileLoaderCache::MakeCacheSpaceFor(size_t blocks) { size_t goal = (size_t)maxBlocks_ - blocks; while (cacheSize_ > goal) { u16 minGeneration = generation_; // We increment the iterator inside because we delete things inside. for (size_t i = 0; i < blockIndexLookup_.size(); ++i) { if (blockIndexLookup_[i] == INVALID_INDEX) { continue; } auto &info = index_[blockIndexLookup_[i]]; // Check for the minimum seen generation. // TODO: Do this smarter? if (info.generation != 0 && info.generation < minGeneration) { minGeneration = info.generation; } // 0 means it was never used yet or was the first read (e.g. block descriptor.) if (info.generation == oldestGeneration_ || info.generation == 0) { info.block = INVALID_BLOCK; info.generation = 0; info.hits = 0; --cacheSize_; // TODO: Doing this in chunks might be a lot better. WriteIndexData(blockIndexLookup_[i], info); blockIndexLookup_[i] = INVALID_INDEX; // Keep going? if (cacheSize_ <= goal) { break; } } } // If we didn't find any, update to the lowest we did find. oldestGeneration_ = minGeneration; } return true; }
void DiskCachingFileLoaderCache::RebalanceGenerations() { // To make things easy, we will subtract oldestGeneration_ and cut in half. // That should give us more space but not break anything. for (size_t i = 0; i < index_.size(); ++i) { auto &info = index_[i]; if (info.block == INVALID_BLOCK) { continue; } if (info.generation > oldestGeneration_) { info.generation = (info.generation - oldestGeneration_) / 2; // TODO: Doing this all at once would be much better. WriteIndexData((u32)i, info); } } oldestGeneration_ = 0; }
void MeshSaverImplv1_0::Save(OutputStreamPtr& out, AssetSaver& saver) { try { MeshTemplate* mesh = static_cast<MeshTemplate*>( saver.GetRequestPtr()->GetStreamedObject()); WriteVersion(out); ChunkOutputStream stream(out); WriteMeshHeader(mesh, stream); WriteBoundsInfo(mesh->GetBounds(), stream); MeshBuffer* buffer = mesh->GetSharedMeshBuffer(); MaterialTemplatePtr material = mesh->GetSharedMaterial(); if (buffer) { WriteVertexData(buffer, stream); WriteIndexData(buffer, stream); } if (material) { WriteMaterialData(material, stream); } uint32 numPrim = mesh->GetNumPrimitiveGroups(); for (uint32 i = 0; i < numPrim; ++i) { OutputSerializer& ser = stream.BeginChunk(MCID_SUBMESH_DATA); WriteSubMeshData(mesh, mesh->GetPrimitive(i), stream); stream.EndChunk(); } } catch (const GracefulErrorExcept& e) { saver.GetRequestPtr()->SetCompleted(false); NEX_THROW(e); } saver.GetRequestPtr()->SetCompleted(true); }
size_t DiskCachingFileLoaderCache::SaveIntoCache(FileLoader *backend, s64 pos, size_t bytes, void *data, FileLoader::Flags flags) { lock_guard guard(lock_); if (!f_) { // Just to keep things working. return backend->ReadAt(pos, bytes, data, flags); } s64 cacheStartPos = pos / blockSize_; s64 cacheEndPos = (pos + bytes - 1) / blockSize_; size_t readSize = 0; size_t offset = (size_t)(pos - (cacheStartPos * (u64)blockSize_)); u8 *p = (u8 *)data; size_t blocksToRead = 0; for (s64 i = cacheStartPos; i <= cacheEndPos; ++i) { auto &info = index_[i]; if (info.block != INVALID_BLOCK) { break; } ++blocksToRead; if (blocksToRead >= MAX_BLOCKS_PER_READ) { break; } } if (!MakeCacheSpaceFor(blocksToRead) || blocksToRead == 0) { return 0; } if (blocksToRead == 1) { auto &info = index_[cacheStartPos]; u8 *buf = new u8[blockSize_]; size_t readBytes = backend->ReadAt(cacheStartPos * (u64)blockSize_, blockSize_, buf, flags); // Check if it was written while we were busy. Might happen if we thread. if (info.block == INVALID_BLOCK && readBytes != 0) { info.block = AllocateBlock((u32)cacheStartPos); WriteBlockData(info, buf); WriteIndexData((u32)cacheStartPos, info); } size_t toRead = std::min(bytes - readSize, (size_t)blockSize_ - offset); memcpy(p + readSize, buf + offset, toRead); readSize += toRead; delete [] buf; } else { u8 *wholeRead = new u8[blocksToRead * blockSize_]; size_t readBytes = backend->ReadAt(cacheStartPos * (u64)blockSize_, blocksToRead * blockSize_, wholeRead, flags); for (size_t i = 0; i < blocksToRead; ++i) { auto &info = index_[cacheStartPos + i]; // Check if it was written while we were busy. Might happen if we thread. if (info.block == INVALID_BLOCK && readBytes != 0) { info.block = AllocateBlock((u32)cacheStartPos + (u32)i); WriteBlockData(info, wholeRead + (i * blockSize_)); // TODO: Doing each index together would probably be better. WriteIndexData((u32)cacheStartPos + (u32)i, info); } size_t toRead = std::min(bytes - readSize, (size_t)blockSize_ - offset); memcpy(p + readSize, wholeRead + (i * blockSize_) + offset, toRead); readSize += toRead; } delete[] wholeRead; } cacheSize_ += blocksToRead; ++generation_; if (generation_ == std::numeric_limits<u16>::max()) { RebalanceGenerations(); } return readSize; }