bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string ns = cmdObj["getShardVersion"].valuestrsafe(); if ( ns.size() == 0 ) { errmsg = "need to specify full namespace"; return false; } result.append( "configServer" , shardingState.getConfigServer() ); result.appendTimestamp( "global" , shardingState.getVersion(ns).toLong() ); ShardedConnectionInfo* info = ShardedConnectionInfo::get( false ); result.appendBool( "inShardedMode" , info != 0 ); if ( info ) result.appendTimestamp( "mine" , info->getVersion(ns).toLong() ); else result.appendTimestamp( "mine" , 0 ); if ( cmdObj["fullMetadata"].trueValue() ) { CollectionMetadataPtr metadata = shardingState.getCollectionMetadata( ns ); if ( metadata ) result.append( "metadata", metadata->toBSON() ); else result.append( "metadata", BSONObj() ); } return true; }
void run(){ Scope * s = globalScriptEngine->createScope(); BSONObjBuilder b; b.appendTimestamp( "a" , 123456789 ); b.appendMinKey( "b" ); b.appendMaxKey( "c" ); b.appendTimestamp( "d" , 1234000 , 9876 ); { BSONObj t = b.done(); ASSERT_EQUALS( 1234000U , t["d"].timestampTime() ); ASSERT_EQUALS( 9876U , t["d"].timestampInc() ); } s->setObject( "z" , b.obj() ); ASSERT( s->invoke( "y = { a : z.a , b : z.b , c : z.c , d: z.d }" , BSONObj() ) == 0 ); BSONObj out = s->getObject( "y" ); ASSERT_EQUALS( Timestamp , out["a"].type() ); ASSERT_EQUALS( MinKey , out["b"].type() ); ASSERT_EQUALS( MaxKey , out["c"].type() ); ASSERT_EQUALS( Timestamp , out["d"].type() ); ASSERT_EQUALS( 9876U , out["d"].timestampInc() ); ASSERT_EQUALS( 1234000U , out["d"].timestampTime() ); ASSERT_EQUALS( 123456789U , out["a"].date() ); delete s; }
void ReplSetImpl::_summarizeStatus(BSONObjBuilder& b) const { vector<BSONObj> v; const Member *_self = this->_self; assert( _self ); // add self { BSONObjBuilder bb; bb.append("_id", (int) _self->id()); bb.append("name", _self->fullName()); bb.append("health", 1.0); bb.append("state", (int) box.getState().s); bb.append("stateStr", box.getState().toString()); bb.appendTimestamp("optime", lastOpTimeWritten.asDate()); bb.appendDate("optimeDate", lastOpTimeWritten.getSecs() * 1000LL); string s = _self->lhb(); if( !s.empty() ) bb.append("errmsg", s); bb.append("self", true); v.push_back(bb.obj()); } Member *m =_members.head(); while( m ) { BSONObjBuilder bb; bb.append("_id", (int) m->id()); bb.append("name", m->fullName()); double h = m->hbinfo().health; bb.append("health", h); bb.append("state", (int) m->state().s); if( h == 0 ) { // if we can't connect the state info is from the past and could be confusing to show bb.append("stateStr", "(not reachable/healthy)"); } else { bb.append("stateStr", m->state().toString()); } bb.append("uptime", (unsigned) (m->hbinfo().upSince ? (time(0)-m->hbinfo().upSince) : 0)); bb.appendTimestamp("optime", m->hbinfo().opTime.asDate()); bb.appendDate("optimeDate", m->hbinfo().opTime.getSecs() * 1000LL); bb.appendTimeT("lastHeartbeat", m->hbinfo().lastHeartbeat); bb.append("ping", m->hbinfo().ping); string s = m->lhb(); if( !s.empty() ) bb.append("errmsg", s); v.push_back(bb.obj()); m = m->next(); } sort(v.begin(), v.end()); b.append("set", name()); b.appendTimeT("date", time(0)); b.append("myState", box.getState().s); if (_currentSyncTarget) { b.append("syncingTo", _currentSyncTarget->fullName()); } b.append("members", v); if( replSetBlind ) b.append("blind",true); // to avoid confusion if set...normally never set except for testing. }
void Chunk::serialize(BSONObjBuilder& to){ if ( _lastmod ) to.appendTimestamp( "lastmod" , _lastmod ); else to.appendTimestamp( "lastmod" ); to << "ns" << _ns; to << "min" << _min; to << "max" << _max; to << "shard" << _shard; }
BSONObj generateSection(const BSONElement& configElement) const { if (!theReplSet) return BSONObj(); BSONObjBuilder result; result.appendTimestamp("latestOptime", theReplSet->lastOpTimeWritten.asDate()); result.appendTimestamp("earliestOptime", theReplSet->getEarliestOpTimeWritten().asDate()); return result.obj(); }
void OplogReader::tailingQueryGTE(const char *ns, OpTime optime, const BSONObj* fields ) { BSONObjBuilder gte; gte.appendTimestamp("$gte", optime.asDate()); BSONObjBuilder query; query.append("ts", gte.done()); tailingQuery(ns, query.done(), fields); }
void run() { OpTime o; { mongo::mutex::scoped_lock lk2(OpTime::m); o = OpTime::now(lk2); } BSONObjBuilder b; b.append("ns","dummy"); b.appendTimestamp("ts", o.asLL()); BSONObj obj = b.obj(); MockInitialSync mock; // all three should succeed std::vector<BSONObj> ops; ops.push_back(obj); replset::multiInitialSyncApply(ops, &mock); mock.failOnStep = MockInitialSync::FAIL_FIRST_APPLY; replset::multiInitialSyncApply(ops, &mock); mock.retry = false; replset::multiInitialSyncApply(ops, &mock); drop(); }
void run() { writelock lk(""); OpTime o1 = OpTime::now(); OpTime o2 = OpTime::now(); BSONObjBuilder b; b.appendTimestamp("ts", o2.asLL()); BSONObj obj = b.obj(); MockInitialSync mock; // all three should succeed mock.applyOp(obj, o1); mock.failOnStep = MockInitialSync::FAIL_FIRST_APPLY; mock.applyOp(obj, o1); mock.retry = false; mock.applyOp(obj, o1); // force failure MockInitialSync mock2; mock2.failOnStep = MockInitialSync::FAIL_BOTH_APPLY; ASSERT_THROWS(mock2.applyOp(obj, o2), UserException); }
void addOp(const string& op, BSONObj o, BSONObj* o2 = NULL, const char* coll = NULL, int version = 0) { OpTime ts(getNextGlobalOptime()); BSONObjBuilder b; b.appendTimestamp("ts", ts.asLL()); if (version != 0) { b.append("v", version); } b.append("op", op); b.append("o", o); if (o2) { b.append("o2", *o2); } if (coll) { b.append("ns", coll); } else { b.append("ns", ns()); } _bgsync->addDoc(b.done()); }
void run() { BSONObj o; { BSONObjBuilder b; b.appendTimestamp( "a" ); b.appendTimestamp( "b" ); b.append( "_id", 1 ); o = b.obj(); } BSONObj fixed = fixDocumentForInsert( o ).getValue(); ASSERT_EQUALS( 3, fixed.nFields() ); ASSERT( fixed.firstElement().fieldNameStringData() == "_id" ); ASSERT( fixed.firstElement().number() == 1 ); BSONElement a = fixed["a"]; ASSERT( o["a"].type() == Timestamp ); ASSERT( o["a"].timestampValue() == 0 ); ASSERT( a.type() == Timestamp ); ASSERT( a.timestampValue() > 0 ); BSONElement b = fixed["b"]; ASSERT( o["b"].type() == Timestamp ); ASSERT( o["b"].timestampValue() == 0 ); ASSERT( b.type() == Timestamp ); ASSERT( b.timestampValue() > 0 ); }
void run(){ Client::initThread( "slaveTracking" ); DBDirectClient db; while ( ! inShutdown() ){ sleepsecs( 1 ); if ( ! _dirty ) continue; writelock lk(NS); list< pair<BSONObj,BSONObj> > todo; { scoped_lock mylk(_mutex); for ( map<Ident,Info>::iterator i=_slaves.begin(); i!=_slaves.end(); i++ ){ BSONObjBuilder temp; temp.appendTimestamp( "syncedTo" , i->second.loc[0].asDate() ); todo.push_back( pair<BSONObj,BSONObj>( i->first.obj.getOwned() , BSON( "$set" << temp.obj() ).getOwned() ) ); } _slaves.clear(); } for ( list< pair<BSONObj,BSONObj> >::iterator i=todo.begin(); i!=todo.end(); i++ ){ db.update( NS , i->first , i->second , true ); } _dirty = false; } }
void Client::appendLastOp( BSONObjBuilder& b ) const { // _lastOp is never set if replication is off if (repl::getGlobalReplicationCoordinator()->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet || !_lastOp.isNull()) { b.appendTimestamp( "lastOp" , _lastOp.asDate() ); } }
void run() { Lock::GlobalWrite lk; OpTime o1,o2; { mongo::mutex::scoped_lock lk2(OpTime::m); o1 = OpTime::now(lk2); o2 = OpTime::now(lk2); } BSONObjBuilder b; b.appendTimestamp("ts", o2.asLL()); BSONObj obj = b.obj(); MockInitialSync mock; // all three should succeed mock.applyOp(obj, o1); mock.failOnStep = MockInitialSync::FAIL_FIRST_APPLY; mock.applyOp(obj, o1); mock.retry = false; mock.applyOp(obj, o1); // force failure MockInitialSync mock2; mock2.failOnStep = MockInitialSync::FAIL_BOTH_APPLY; ASSERT_THROWS(mock2.applyOp(obj, o2), UserException); }
bool run(const char *cmdns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){ string ns = cmdObj["getShardVersion"].valuestrsafe(); if ( ns.size() == 0 ){ errmsg = "need to speciy fully namespace"; return false; } result.append( "configServer" , shardConfigServer.c_str() ); result.appendTimestamp( "global" , globalVersions[ns] ); if ( clientShardVersions.get() ) result.appendTimestamp( "mine" , (*clientShardVersions.get())[ns] ); else result.appendTimestamp( "mine" , 0 ); return true; }
void run() { BSONObjBuilder b; b.appendTimestamp( "a" ); BSONObj o = b.done(); ASSERT( 0 == o.getField( "a" ).date() ); theDataFileMgr.insert( ns(), o ); ASSERT( 0 != o.getField( "a" ).date() ); }
void Client::appendLastOp( BSONObjBuilder& b ) const { if( theReplSet ) { b.append("lastOp" , (long long) _lastOp); } else { OpTime lo(_lastOp); if ( ! lo.isNull() ) b.appendTimestamp( "lastOp" , lo.asDate() ); } }
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { string ns = cmdObj["getShardVersion"].valuestrsafe(); if ( ns.size() == 0 ) { errmsg = "need to speciy fully namespace"; return false; } result.append( "configServer" , shardingState.getConfigServer() ); result.appendTimestamp( "global" , shardingState.getVersion(ns) ); ShardedConnectionInfo* info = ShardedConnectionInfo::get( false ); if ( info ) result.appendTimestamp( "mine" , info->getVersion(ns) ); else result.appendTimestamp( "mine" , 0 ); return true; }
void insertSucceed() { BSONObjBuilder b; OpTime ts(getNextGlobalOptime()); b.appendTimestamp("ts", ts.asLL()); b.append("op", "i"); b.append("o", BSON("_id" << 123 << "x" << 456)); b.append("ns", cappedNs()); verify(apply(b.obj())); }
void insertSucceed() { BSONObjBuilder b; { mongo::mutex::scoped_lock lk2(OpTime::m); b.appendTimestamp("ts", OpTime::now(lk2).asLL()); } b.append("op", "i"); b.append("o", BSON("_id" << 123 << "x" << 456)); b.append("ns", cappedNs()); verify(apply(b.obj())); }
void Shard::serialize(BSONObjBuilder& to){ if ( _lastmod ) to.appendDate( "lastmod" , _lastmod ); else to.appendTimestamp( "lastmod" ); to << "ns" << _ns; to << "min" << _min; to << "max" << _max; to << "server" << _server; }
void updateSucceed() { BSONObjBuilder b; OpTime ts(getNextGlobalOptime()); b.appendTimestamp("ts", ts.asLL()); b.append("op", "u"); b.append("o", BSON("$set" << BSON("x" << 789))); b.append("o2", BSON("x" << 456)); b.append("ns", cappedNs()); verify(apply(b.obj())); }
BSONObj ChunkType::toShardBSON() const { BSONObjBuilder builder; invariant(_min); invariant(_max); invariant(_shard); invariant(_version); builder.append(minShardID.name(), getMin()); builder.append(max.name(), getMax()); builder.append(shard.name(), getShard().toString()); builder.appendTimestamp(lastmod.name(), _version->toLong()); return builder.obj(); }
void updateSucceed() { BSONObjBuilder b; { mongo::mutex::scoped_lock lk2(OpTime::m); b.appendTimestamp("ts", OpTime::now(lk2).asLL()); } b.append("op", "u"); b.append("o", BSON("$set" << BSON("x" << 789))); b.append("o2", BSON("x" << 456)); b.append("ns", cappedNs()); verify(apply(b.obj())); }
void run() { create(); BSONObjBuilder b; b.appendTimestamp("ts", OpTime::now().asLL()); b.append("op", "u"); b.append("o", BSON("$set" << BSON("x" << 456))); b.append("o2", BSON("_id" << 123 << "x" << 123)); b.append("ns", _ns); // in an annoying twist of api, returns true on failure assert(applyOperation_inlock(b.obj(), true)); }
BSONObj updateFail() { BSONObjBuilder b; OpTime ts(getNextGlobalOptime()); b.appendTimestamp("ts", ts.asLL()); b.append("op", "u"); b.append("o", BSON("$set" << BSON("x" << 456))); b.append("o2", BSON("_id" << 123 << "x" << 123)); b.append("ns", _cappedNs); BSONObj o = b.obj(); verify(!apply(o)); return o; }
BSONObj updateFail() { BSONObjBuilder b; { mongo::mutex::scoped_lock lk2(OpTime::m); b.appendTimestamp("ts", OpTime::now(lk2).asLL()); } b.append("op", "u"); b.append("o", BSON("$set" << BSON("x" << 456))); b.append("o2", BSON("_id" << 123 << "x" << 123)); b.append("ns", _ns); BSONObj o = b.obj(); verify(!apply(o)); return o; }
void run() { BSONObjBuilder b; b.appendTimestamp("a"); b.append("_id", 1); BSONObj o = b.done(); BSONObj fixed = fixDocumentForInsert(_opCtx.getServiceContext(), o).getValue(); ASSERT_EQUALS(2, fixed.nFields()); ASSERT(fixed.firstElement().fieldNameStringData() == "_id"); ASSERT(fixed.firstElement().number() == 1); BSONElement a = fixed["a"]; ASSERT(o["a"].type() == bsonTimestamp); ASSERT(o["a"].timestampValue() == 0); ASSERT(a.type() == bsonTimestamp); ASSERT(a.timestampValue() > 0); }
bool setShardVersion( DBClientBase & conn , const string& ns , ShardChunkVersion version , bool authoritative , BSONObj& result ){ BSONObjBuilder cmdBuilder; cmdBuilder.append( "setShardVersion" , ns.c_str() ); cmdBuilder.append( "configdb" , configServer.modelServer() ); cmdBuilder.appendTimestamp( "version" , version ); cmdBuilder.appendOID( "serverID" , &serverID ); if ( authoritative ) cmdBuilder.appendBool( "authoritative" , 1 ); Shard s = Shard::make( conn.getServerAddress() ); cmdBuilder.append( "shard" , s.getName() ); cmdBuilder.append( "shardHost" , s.getConnString() ); BSONObj cmd = cmdBuilder.obj(); log(1) << " setShardVersion " << s.getName() << " " << conn.getServerAddress() << " " << ns << " " << cmd << " " << &conn << endl; return conn.runCommand( "admin" , cmd , result ); }
void run() { OpTime o(getNextGlobalOptime()); BSONObjBuilder b; b.append("ns","dummy"); b.appendTimestamp("ts", o.asLL()); BSONObj obj = b.obj(); MockInitialSync mock; // all three should succeed std::vector<BSONObj> ops; ops.push_back(obj); repl::multiInitialSyncApply(ops, &mock); mock.failOnStep = MockInitialSync::FAIL_FIRST_APPLY; repl::multiInitialSyncApply(ops, &mock); mock.retry = false; repl::multiInitialSyncApply(ops, &mock); drop(); }
bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string ns = cmdObj.firstElement().valuestrsafe(); if ( ns.size() == 0 ) { errmsg = "need to specify fully namespace"; return false; } DBConfigPtr config = grid.getDBConfig( ns ); if ( ! config->isSharded( ns ) ) { errmsg = "ns not sharded."; return false; } ChunkManagerPtr cm = config->getChunkManagerIfExists( ns ); if ( ! cm ) { errmsg = "no chunk manager?"; return false; } cm->_printChunks(); result.appendTimestamp( "version" , cm->getVersion().toLong() ); return 1; }