//--------------------------------------------------------------------------- // command handlers //--------------------------------------------------------------------------- bool VersionedSlaveCM::_cmdData( Command& command ) { LB_TS_THREAD( _rcvThread ); LBASSERT( command.getNode().isValid( )); if( !_currentIStream ) _currentIStream = _iStreamCache.alloc(); _currentIStream->addDataPacket( command ); if( _currentIStream->isReady( )) { const uint128_t& version = _currentIStream->getVersion(); #if 0 LBLOG( LOG_OBJECTS ) << "v" << version << ", id " << _object->getID() << "." << _object->getInstanceID() << " ready" << std::endl; #endif #ifndef NDEBUG ObjectDataIStream* debugStream = 0; _queuedVersions.getBack( debugStream ); if ( debugStream ) { LBASSERT( debugStream->getVersion() + 1 == version ); } #endif _queuedVersions.push( _currentIStream ); _object->notifyNewHeadVersion( version ); _currentIStream = 0; } return true; }
bool DataIStreamQueue::addDataPacket( const uint128_t& key, Command& command ) { EQ_TS_THREAD( _thread ); EQASSERTINFO( _pending.size() < 100, "More than 100 pending commits"); ObjectDataIStream* istream = 0; PendingStreams::iterator i = _pending.find( key ); if( i == _pending.end( )) istream = _iStreamCache.alloc(); else istream = i->second; istream->addDataPacket( command ); if( istream->isReady( )) { if( i != _pending.end( )) _pending.erase( i ); _queued.push( QueuedStream( key, istream )); EQASSERTINFO( _queued.getSize() < 100, "More than 100 queued commits" ); //EQLOG( LOG_OBJECTS ) << "Queued commit " << key << std::endl; return true; } if( i == _pending.end( )) { _pending[ key ] = istream; //EQLOG( LOG_OBJECTS ) << "New incomplete commit " << key << std::endl; return false; } //EQLOG(LOG_OBJECTS) << "Add data to incomplete commit " << key <<std::endl; return false; }
bool DataIStreamQueue::addDataCommand(const uint128_t& key, ICommand& command) { LB_TS_THREAD(_thread); LBASSERTINFO(_pending.size() < 100, "More than 100 pending commits"); ObjectDataIStream* is = 0; PendingStreams::iterator i = _pending.find(key); if (i == _pending.end()) is = _iStreamCache.alloc(); else is = i->second; is->addDataCommand(command); if (is->isReady()) { if (i != _pending.end()) _pending.erase(i); _queued.push(QueuedStream(key, is)); return true; } if (i == _pending.end()) { _pending[key] = is; return false; } return false; }
uint128_t VersionedSlaveCM::getHeadVersion() const { ObjectDataIStream* is = 0; if( _queuedVersions.getBack( is )) { LBASSERT( is ); return is->getVersion(); } return _version; }
uint128_t MasterCM::sync( const uint128_t& version ) { EQASSERTINFO( version.high() != 0 || version == VERSION_NEXT || version == VERSION_HEAD, version ); #if 0 EQLOG( LOG_OBJECTS ) << "sync to v" << version << ", id " << _object->getID() << "." << _object->getInstanceID() << std::endl; #endif if( version == VERSION_NEXT ) { ObjectDataIStream* is = _queuedDeltas.pop(); _apply( is ); return _version; } // else if( version == VERSION_HEAD ) { ObjectDataIStream* is = 0; while( _queuedDeltas.tryPop( is )) _apply( is ); return _version; } // else apply only concrete slave commit ObjectDataIStream* is = 0; ObjectDataIStreams unusedStreams; while( !is ) { ObjectDataIStream* candidate = _queuedDeltas.pop(); if( candidate->getVersion() == version ) is = candidate; else unusedStreams.push_back( candidate ); } _apply( is ); _queuedDeltas.pushFront( unusedStreams ); return version; }
void VersionedSlaveCM::applyMapData( const uint128_t& version ) { while( true ) { ObjectDataIStream* is = _queuedVersions.pop(); if( is->getVersion() == version ) { LBASSERTINFO( is->hasInstanceData(), *_object ); if( is->hasData( )) // not VERSION_NONE _object->applyInstanceData( *is ); _version = is->getVersion(); LBASSERT( _version != VERSION_INVALID ); LBASSERTINFO( !is->hasData(), lunchbox::className( _object ) << " did not unpack all data, " << is->getRemainingBufferSize() << " bytes, " << is->nRemainingBuffers() << " buffer(s)" ); _releaseStream( is ); #if 0 LBLOG( LOG_OBJECTS ) << "Mapped initial data of " << _object << std::endl; #endif return; } else { // Found the following case: // - p1, t1 calls commit // - p1, t2 calls mapObject // - p1, cmd commits new version // - p1, cmd subscribes object // - p1, rcv attaches object // - p1, cmd receives commit data // -> newly attached object recv new commit data before map data, // ignore it LBASSERTINFO( is->getVersion() > version, is->getVersion() << " <= " << version ); _releaseStream( is ); } } }
void StaticSlaveCM::addInstanceDatas( const ObjectDataIStreamDeque& cache, const uint128_t& /* start */ ) { LB_TS_THREAD( _rcvThread ); LBASSERT( _currentIStream ); LBASSERT( _currentIStream->getDataSize() == 0 ); LBASSERT( cache.size() == 1 ); if( cache.empty( )) return; ObjectDataIStream* stream = cache.front(); LBASSERT( stream ); LBASSERT( stream->isReady( )); LBASSERT( stream->getVersion() == VERSION_FIRST ); if( !stream->isReady() || stream->getVersion() != VERSION_FIRST ) return; LBLOG( LOG_OBJECTS ) << "Adding cached instance data" << std::endl; delete _currentIStream; _currentIStream = new ObjectDataIStream( *stream ); }
void VersionedSlaveCM::addInstanceDatas( const ObjectDataIStreamDeque& cache, const uint128_t& startVersion ) { LB_TS_THREAD( _rcvThread ); #if 0 LBLOG( LOG_OBJECTS ) << lunchbox::disableFlush << "Adding data front "; #endif uint128_t oldest = VERSION_NONE; uint128_t newest = VERSION_NONE; if( !_queuedVersions.isEmpty( )) { ObjectDataIStream* is = 0; LBCHECK( _queuedVersions.getFront( is )); oldest = is->getVersion(); LBCHECK( _queuedVersions.getBack( is )); newest = is->getVersion(); } ObjectDataIStreamDeque head; ObjectDataIStreams tail; for( ObjectDataIStreamDeque::const_iterator i = cache.begin(); i != cache.end(); ++i ) { ObjectDataIStream* stream = *i; const uint128_t& version = stream->getVersion(); if( version < startVersion ) continue; LBASSERT( stream->isReady( )); LBASSERT( stream->hasInstanceData( )); if( !stream->isReady( )) break; if( version < oldest ) head.push_front( stream ); else if( version > newest ) tail.push_back( stream ); } for( ObjectDataIStreamDeque::const_iterator i = head.begin(); i != head.end(); ++i ) { const ObjectDataIStream* stream = *i; #ifndef NDEBUG ObjectDataIStream* debugStream = 0; _queuedVersions.getFront( debugStream ); if( debugStream ) LBASSERT( debugStream->getVersion() == stream->getVersion() + 1); #endif _queuedVersions.pushFront( new ObjectDataIStream( *stream )); #if 0 LBLOG( LOG_OBJECTS ) << stream->getVersion() << ' '; #endif } #if 0 LBLOG( LOG_OBJECTS ) << " back "; #endif for( ObjectDataIStreams::const_iterator i = tail.begin(); i != tail.end(); ++i ) { const ObjectDataIStream* stream = *i; #ifndef NDEBUG ObjectDataIStream* debugStream = 0; _queuedVersions.getBack( debugStream ); if( debugStream ) { LBASSERT( debugStream->getVersion() + 1 == stream->getVersion( )); } #endif _queuedVersions.push( new ObjectDataIStream( *stream )); #if 0 LBLOG( LOG_OBJECTS ) << stream->getVersion() << ' '; #endif } #if 0 LBLOG( LOG_OBJECTS ) << std::endl << lunchbox::enableFlush; #endif }
bool InstanceCache::add( const ObjectVersion& rev, const uint32_t instanceID, Command& command, const uint32_t usage ) { #ifdef EQ_INSTRUMENT_CACHE ++nWrite; #endif const NodeID nodeID = command.getNode()->getNodeID(); base::ScopedMutex<> mutex( _items ); ItemHash::const_iterator i = _items->find( rev.identifier ); if( i == _items->end( )) { Item& item = _items.data[ rev.identifier ]; item.data.masterInstanceID = instanceID; item.from = nodeID; } Item& item = _items.data[ rev.identifier ] ; if( item.data.masterInstanceID != instanceID || item.from != nodeID ) { EQASSERT( !item.access ); // same master with different instance ID?! if( item.access != 0 ) // are accessed - don't add return false; // trash data from different master mapping _releaseStreams( item ); item.data.masterInstanceID = instanceID; item.from = nodeID; item.used = usage; } else item.used = EQ_MAX( item.used, usage ); if( item.data.versions.empty( )) { item.data.versions.push_back( new ObjectDataIStream ); item.times.push_back( _clock.getTime64( )); } else if( item.data.versions.back()->getPendingVersion() == rev.version ) { if( item.data.versions.back()->isReady( )) { #ifdef EQ_INSTRUMENT_CACHE ++nWriteReady; #endif return false; // Already have stream } // else append data to stream } else { const ObjectDataIStream* previous = item.data.versions.back(); EQASSERT( previous->isReady( )); const uint128_t previousVersion = previous->getPendingVersion(); if( previousVersion > rev.version ) { #ifdef EQ_INSTRUMENT_CACHE ++nWriteOld; #endif return false; } if( ( previousVersion + 1 ) != rev.version ) // hole { EQASSERT( previousVersion < rev.version ); if( item.access != 0 ) // are accessed - don't add return false; _releaseStreams( item ); } else { EQASSERT( previous->isReady( )); } item.data.versions.push_back( new ObjectDataIStream ); item.times.push_back( _clock.getTime64( )); } EQASSERT( !item.data.versions.empty( )); ObjectDataIStream* stream = item.data.versions.back(); stream->addDataPacket( command ); if( stream->isReady( )) _size += stream->getDataSize(); _releaseItems( 1 ); _releaseItems( 0 ); #ifdef EQ_INSTRUMENT_CACHE if( _items->find( rev.identifier ) != _items->end( )) ++nWriteHit; else ++nWriteMiss; #endif return true; }
//--------------------------------------------------------------------------- // command handlers //--------------------------------------------------------------------------- bool MasterCM::_cmdSlaveDelta( Command& command ) { EQ_TS_THREAD( _cmdThread ); const ObjectSlaveDeltaPacket* packet = command.get< ObjectSlaveDeltaPacket >(); EQASSERTINFO( _pendingDeltas.size() < 100, "More than 100 unfinished slave commits!?" ); ObjectDataIStream* istream = 0; PendingStreams::iterator i = _pendingDeltas.begin(); for( ; i != _pendingDeltas.end(); ++i ) { PendingStream& pending = *i; if( pending.first == packet->commit ) { istream = pending.second; break; } } if( !istream ) { EQASSERT( i == _pendingDeltas.end( )); istream = _iStreamCache.alloc(); } istream->addDataPacket( command ); if( istream->isReady( )) { if( i != _pendingDeltas.end( )) _pendingDeltas.erase( i ); _queuedDeltas.push( istream ); _object->notifyNewVersion(); EQASSERTINFO( _queuedDeltas.getSize() < 100, "More than 100 queued slave commits!?" ); #if 0 EQLOG( LOG_OBJECTS ) << "Queued slave commit " << packet->commit << " object " << _object->getID() << " " << base::className( _object ) << std::endl; #endif } else if( i == _pendingDeltas.end( )) { _pendingDeltas.push_back( PendingStream( packet->commit, istream )); #if 0 EQLOG( LOG_OBJECTS ) << "New incomplete slave commit " << packet->commit << " object " << _object->getID() << " " << base::className( _object ) << std::endl; #endif } #if 0 else EQLOG( LOG_OBJECTS ) << "Got data for incomplete slave commit " << packet->commit << " object " << _object->getID() << " " << base::className( _object ) << std::endl; #endif return true; }