BlobFileSystem::BlobFileSystem(ptr<File> file) : file(file) { try { void* fileData = file->GetData(); size_t fileSize = file->GetSize(); size_t size = fileSize; //получить терминатор if(size < sizeof(Terminator)) THROW("Can't read terminator"); Terminator* terminator = (Terminator*)((char*)fileData + fileSize) - 1; size -= sizeof(*terminator); //проверить сигнатуру if(memcmp(terminator->magic, Terminator::magicValue, sizeof(terminator->magic)) != 0) THROW("Invalid magic"); //проверить, что заголовок читается if(size < terminator->headerSize) THROW("Can't read header"); //получить читатель заголовка ptr<StreamReader> headerReader = NEW(StreamReader(NEW(FileInputStream( NEW(PartFile(file, (char*)terminator - terminator->headerSize, terminator->headerSize)))))); size -= terminator->headerSize; //считывать файлы, пока есть for(;;) { //считать имя файла String fileName = headerReader->ReadString(); //если оно пустое, значит, это конец if(fileName.empty()) break; //считать смещение до файла и его размер size_t fileOffset = headerReader->ReadShortly(); size_t fileSize = headerReader->ReadShortly(); //проверить, что файл читается if(fileOffset > size || size - fileOffset < fileSize) THROW("Can't read file " + fileName); //добавить файл в карту files[fileName] = NEW(PartFile(file, (char*)fileData + fileOffset, fileSize)); } } catch(Exception* exception) { THROW_SECONDARY("Can't load blob file system", exception); } }
void AsioTcpSocket::Received(const boost::system::error_code& error, size_t transferred) { ptr<ReceiveHandler> receiveHandler; ptr<File> receiveFile; { CriticalCode cc(cs); receiveHandler = this->receiveHandler; receiveFile = this->receiveFile; if(error) { CloseNonSynced(); this->receiveHandler = nullptr; this->receiveFile = nullptr; } else StartReceiving(); } if(receiveHandler) { if(error) { // если корректный конец файла, это не ошибка if(error == boost::asio::error::eof) receiveHandler->FireData(nullptr); else receiveHandler->FireError(AsioService::ConvertError(error)); } else receiveHandler->FireData(NEW(PartFile(receiveFile, receiveFile->GetData(), transferred))); } }
bool ServerRepo::Sync(StreamReader* reader, StreamWriter* writer, const String& userName, bool writeAccess) { BEGIN_TRY(); // push & pull should be in a one transaction Data::SqliteTransaction transaction(db); // get user id long long userId = GetUserId(userName); // get initial revision long long prePushRevision = GetMaxRevision(); // read client revision long long clientRevision = reader->ReadShortlyBig(); // read client upper revision long long clientUpperRevision = reader->ReadShortlyBig(); // correct upper revision if(clientUpperRevision == 0 || clientUpperRevision > prePushRevision) clientUpperRevision = prePushRevision; // determine total size of the pull { Data::SqliteQuery queryPullTotalSize(stmtPullTotalSize); stmtPullTotalSize->Bind(1, clientRevision); if(stmtPullTotalSize->Step() != SQLITE_ROW) THROW("Can't determine total pull size"); // output total size of pull long long pullTotalSize = stmtPullTotalSize->ColumnInt64(0); writer->WriteShortlyBig(pullTotalSize); } //*** push void* key = keyBufferFile->GetData(); void* value = valueBufferFile->GetData(); size_t totalPushSize = 0; // output pre-push revision writer->WriteShortlyBig(prePushRevision); bool pushedSomething = false; // loop for push keys for(size_t i = 0; ; ++i) { // read key size size_t keySize = reader->ReadShortly(); // if it's 0, it's signal of end if(!keySize) break; // if there is no write access right, stop if(!writeAccess) THROW("Write access denied"); // check number of keys if(i >= (size_t)maxPushKeysCount) THROW("Too many keys"); // check key size if(keySize > maxKeySize) THROW("Too big key"); // read key value reader->Read(key, keySize); // read value size_t valueSize = reader->ReadShortly(); if(valueSize > maxValueSize) THROW("Too big value"); if(valueSize) reader->Read(value, valueSize); // check total push size totalPushSize += valueSize; if(totalPushSize > maxPushTotalSize) THROW("Too big push total size"); ptr<File> keyFile = NEW(PartFile(keyBufferFile, key, keySize)); // clear latest flag for that key Data::SqliteQuery queryClearLatest(stmtClearLatest); stmtClearLatest->Bind(1, keyFile); if(stmtClearLatest->Step() != SQLITE_DONE) THROW_SECONDARY("Can't clear latest flag", db->Error()); // do write Data::SqliteQuery queryWrite(stmtWrite); stmtWrite->Bind(1, userId); stmtWrite->Bind(2, keyFile); stmtWrite->Bind(3, NEW(PartFile(valueBufferFile, value, valueSize))); if(stmtWrite->Step() != SQLITE_DONE) THROW_SECONDARY("Can't do write", db->Error()); pushedSomething = true; } // ensure request is over reader->ReadEnd(); // output post-push revision writer->WriteShortlyBig(GetMaxRevision()); //*** pull Data::SqliteQuery queryPull(stmtPull); stmtPull->Bind(1, clientRevision); stmtPull->Bind(2, clientUpperRevision); stmtPull->Bind(3, maxPullKeysCount); long long lastKnownClientRevision = clientRevision; size_t totalPullSize = 0; bool done = false; while(!done) { switch(stmtPull->Step()) { case SQLITE_ROW: // check total pull size totalPullSize += stmtPull->ColumnBlobSize(2); if(totalPullSize > maxPullTotalSize) { // stop pull, that's enough done = true; break; } // output key { ptr<File> key = stmtPull->ColumnBlob(1); size_t keySize = key->GetSize(); writer->WriteShortly(keySize); writer->Write(key->GetData(), keySize); } // output value { ptr<File> value = stmtPull->ColumnBlob(2); size_t valueSize = value->GetSize(); writer->WriteShortly(valueSize); writer->Write(value->GetData(), valueSize); } { // output revision long long revision = stmtPull->ColumnInt64(0); writer->WriteShortlyBig(revision); // remember last revision lastKnownClientRevision = revision; } break; case SQLITE_DONE: // end, no more keys done = true; break; default: // some error THROW_SECONDARY("Error pulling changes", db->Error()); } } // write final zero writer->WriteShortly(0); // write new client revision { Data::SqliteQuery query(stmtGetWeakRevision); stmtGetWeakRevision->Bind(1, lastKnownClientRevision); stmtGetWeakRevision->Bind(2, clientUpperRevision); long long newClientRevision; switch(stmtGetWeakRevision->Step()) { case SQLITE_ROW: newClientRevision = stmtGetWeakRevision->ColumnInt64(0) - 1; break; case SQLITE_DONE: newClientRevision = clientUpperRevision; break; default: THROW_SECONDARY("Error getting new client revision", db->Error()); } writer->WriteShortlyBig(newClientRevision); } // commit transaction transaction.Commit(); return pushedSomething; END_TRY("Can't sync server repo"); }
ptr<File> MemoryStream::ToFile() { Compact(); return dataSize < files[0]->GetSize() ? NEW(PartFile(files[0], files[0]->GetData(), dataSize)) : files[0]; }