bool Cloner::finishCloneCollection( const char *fromhost, const char *ns, const BSONObj &query, long long cursorId, string &errmsg ) { char db[256]; nsToClient( ns, db ); auto_ptr< DBClientCursor > cur; { dbtemprelease r; auto_ptr< DBClientConnection > c( new DBClientConnection() ); if ( !c->connect( fromhost, errmsg ) ) return false; if( !replAuthenticate(c.get()) ) return false; conn = c; string logNS = "local.temp.oplog." + string( ns ); if ( cursorId != 0 ) cur = conn->getMore( logNS.c_str(), cursorId ); else cur = conn->query( logNS.c_str(), Query() ); } replayOpLog( cur.get(), query ); { dbtemprelease t; BSONObj info; if ( !conn->runCommand( db, BSON( "logCollection" << ns << "validateComplete" << 1 ), info ) ) { errmsg = "logCollection failed: " + (string)info; return false; } } return true; }
bool SyncSourceFeedback::_connect(const std::string& hostName) { if (hasConnection()) { return true; } log() << "replset setting syncSourceFeedback to " << hostName << rsLog; _connection.reset(new DBClientConnection(false, 0, OplogReader::tcp_timeout)); string errmsg; try { if (!_connection->connect(hostName.c_str(), errmsg) || (getGlobalAuthorizationManager()->isAuthEnabled() && !replAuthenticate())) { resetConnection(); log() << "repl: " << errmsg << endl; return false; } } catch (const DBException& e) { log() << "Error connecting to " << hostName << ": " << e.what(); resetConnection(); return false; } if (!replHandshake()) { if (!supportsUpdater()) { return connectOplogReader(hostName); } return false; } return true; }
bool Cloner::startCloneCollection( const char *fromhost, const char *ns, const BSONObj &query, string &errmsg, bool logForRepl, bool copyIndexes, int logSizeMb, long long &cursorId ) { char db[256]; nsToClient( ns, db ); { dbtemprelease r; auto_ptr< DBClientConnection > c( new DBClientConnection() ); if ( !c->connect( fromhost, errmsg ) ) return false; if( !replAuthenticate(c.get()) ) return false; conn = c; // Start temporary op log BSONObjBuilder cmdSpec; cmdSpec << "logCollection" << ns << "start" << 1; if ( logSizeMb != INT_MIN ) cmdSpec << "logSizeMb" << logSizeMb; BSONObj info; if ( !conn->runCommand( db, cmdSpec.done(), info ) ) { errmsg = "logCollection failed: " + (string)info; return false; } } BSONObj spec = conn->findOne( string( db ) + ".system.namespaces", BSON( "name" << ns ) ); if ( !userCreateNS( ns, spec.getObjectField( "options" ), errmsg, true ) ) return false; copy( ns, ns, false, logForRepl, false, false, query ); if ( copyIndexes ) { string indexNs = string( db ) + ".system.indexes"; copy( indexNs.c_str(), indexNs.c_str(), true, logForRepl, false, false, BSON( "ns" << ns << "name" << NE << "_id_" ) ); } auto_ptr< DBClientCursor > c; { dbtemprelease r; string logNS = "local.temp.oplog." + string( ns ); c = conn->query( logNS.c_str(), Query(), 0, 0, 0, Option_CursorTailable ); } if ( c->more() ) { replayOpLog( c.get(), query ); cursorId = c->getCursorId(); massert( "Expected valid tailing cursor", cursorId != 0 ); } else { massert( "Did not expect valid cursor for empty query result", c->getCursorId() == 0 ); cursorId = 0; } c->decouple(); return true; }
shared_ptr<DBClientConnection> makeConnection( const char *masterHost, string& errmsg ) { verify(!masterSameProcess(masterHost)); ConnectionString cs = ConnectionString::parse(masterHost, errmsg); shared_ptr<DBClientConnection> conn(static_cast<DBClientConnection *>(cs.connect(errmsg))); if (!replAuthenticate(conn.get())) { errmsg = "can't authenticate replication"; conn.reset(); } return conn; }
bool OplogReader::commonConnect(const string& hostName) { if( conn() == 0 ) { _conn = shared_ptr<DBClientConnection>(new DBClientConnection(false, 0, tcp_timeout)); string errmsg; if ( !_conn->connect(hostName.c_str(), errmsg) || (AuthorizationManager::isAuthEnabled() && !replAuthenticate(_conn.get(), true)) ) { resetConnection(); log() << "repl: " << errmsg << endl; return false; } } return true; }
bool OplogReader::commonConnect(const string& hostName, const double default_timeout) { if( conn() == 0 ) { _conn = shared_ptr<DBClientConnection>(new DBClientConnection(false, 0, default_timeout /* tcp timeout */)); string errmsg; if ( !_conn->connect(hostName.c_str(), errmsg) || (!noauth && !replAuthenticate(_conn.get(), true)) ) { resetConnection(); log() << "repl: " << errmsg << endl; return false; } } return true; }
bool OplogReader::connect(const HostAndPort& host) { if (conn() == NULL || _host != host) { resetConnection(); _conn = shared_ptr<DBClientConnection>(new DBClientConnection(false, tcp_timeout)); string errmsg; if (!_conn->connect(host, errmsg) || (getGlobalAuthorizationManager()->isAuthEnabled() && !replAuthenticate(_conn.get()))) { resetConnection(); log() << "repl: " << errmsg << endl; return false; } _host = host; } return true; }
bool OplogReader::connect(string hostName) { if( conn() == 0 ) { _conn = auto_ptr<DBClientConnection>(new DBClientConnection( false, 0, 0 /* tcp timeout */)); string errmsg; ReplInfo r("trying to connect to sync source"); if ( !_conn->connect(hostName.c_str(), errmsg) || (!noauth && !replAuthenticate(_conn.get())) || !replHandshake(_conn.get()) ) { resetConnection(); log() << "repl: " << errmsg << endl; return false; } } return true; }
void RollbackSourceImpl::copyCollectionFromRemote(OperationContext* opCtx, const NamespaceString& nss) const { std::string errmsg; std::unique_ptr<DBClientConnection> tmpConn(new DBClientConnection()); uassert(15908, errmsg, tmpConn->connect(_source, StringData(), errmsg) && replAuthenticate(tmpConn.get())); // cloner owns _conn in unique_ptr Cloner cloner; cloner.setConnection(tmpConn.release()); uassert(15909, str::stream() << "replSet rollback error resyncing collection " << nss.ns() << ' ' << errmsg, cloner.copyCollection(opCtx, nss.ns(), BSONObj(), errmsg, true)); }
bool OplogReader::connect(const HostAndPort& host) { if (conn() == NULL || _host != host) { resetConnection(); _conn = shared_ptr<DBClientConnection>( new DBClientConnection(false, durationCount<Seconds>(kSocketTimeout))); string errmsg; if (!_conn->connect(host, StringData(), errmsg) || !replAuthenticate(_conn.get())) { resetConnection(); error() << errmsg << endl; return false; } _conn->port().setTag(_conn->port().getTag() | executor::NetworkInterface::kMessagingPortKeepOpen); _host = host; } return true; }
bool OplogReader::connect(const HostAndPort& host) { if (conn() == NULL || _host != host) { resetConnection(); _conn = shared_ptr<DBClientConnection>( new DBClientConnection(false, durationCount<Seconds>(kSocketTimeout))); string errmsg; if (!_conn->connect(host, errmsg) || (getGlobalAuthorizationManager()->isAuthEnabled() && !replAuthenticate(_conn.get()))) { resetConnection(); error() << errmsg << endl; return false; } _conn->port().tag |= executor::NetworkInterface::kMessagingPortKeepOpen; _host = host; } return true; }
bool OplogReader::connect(const HostAndPort& host) { if (conn() == NULL || _host != host) { resetConnection(); _conn = std::shared_ptr<DBClientConnection>( new DBClientConnection(false, durationCount<Seconds>(kSocketTimeout))); std::string errmsg; if (!_conn->connect(host, StringData(), errmsg) || !replAuthenticate(_conn.get())) { resetConnection(); error() << errmsg; return false; } _conn->setTags(transport::Session::kKeepOpen); _host = host; } return true; }
void RollbackSourceImpl::copyCollectionFromRemote(OperationContext* opCtx, const NamespaceString& nss) const { std::string errmsg; auto tmpConn = stdx::make_unique<DBClientConnection>(); uassert(15908, errmsg, tmpConn->connect(_source, StringData(), errmsg) && replAuthenticate(tmpConn.get())); // cloner owns _conn in unique_ptr Cloner cloner; cloner.setConnection(std::move(tmpConn)); uassert(15909, str::stream() << "replSet rollback error resyncing collection " << nss.ns() << ' ' << errmsg, cloner.copyCollection( opCtx, nss.ns(), BSONObj(), errmsg, true, CollectionOptions::parseForStorage)); }
bool SyncSourceFeedback::_connect(const std::string& hostName) { if (hasConnection()) { return true; } _connection.reset(new DBClientConnection(false, 0, OplogReader::tcp_timeout)); string errmsg; if (!_connection->connect(hostName.c_str(), errmsg) || (AuthorizationManager::isAuthEnabled() && !replAuthenticate(true))) { resetConnection(); log() << "repl: " << errmsg << endl; return false; } if (!replHandshake()) { if (!supportsUpdater()) { return connectOplogReader(hostName); } return false; } return true; }
bool SyncSourceFeedback::_connect(OperationContext* txn, const HostAndPort& host) { if (hasConnection()) { return true; } log() << "replset setting syncSourceFeedback to " << host.toString() << rsLog; _connection.reset(new DBClientConnection(false, 0, OplogReader::tcp_timeout)); string errmsg; try { if (!_connection->connect(host, errmsg) || (getGlobalAuthorizationManager()->isAuthEnabled() && !replAuthenticate())) { _resetConnection(); log() << "repl: " << errmsg << endl; return false; } } catch (const DBException& e) { log() << "Error connecting to " << host.toString() << ": " << e.what(); _resetConnection(); return false; } return hasConnection(); }
void appendReplicationInfo(OperationContext* txn, BSONObjBuilder& result, int level) { ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator(); if (replCoord->getSettings().usingReplSets()) { if (replCoord->getReplicationMode() != ReplicationCoordinator::modeReplSet || replCoord->getCurrentMemberState().shunned()) { result.append("ismaster", false); result.append("secondary", false); result.append("info", ReplSet::startupStatusMsg.get()); result.append( "isreplicaset" , true ); } else { theReplSet->fillIsMaster(result); } return; } if ( replAllDead ) { result.append("ismaster", 0); string s = string("dead: ") + replAllDead; result.append("info", s); } else { result.appendBool("ismaster", getGlobalReplicationCoordinator()->isMasterForReportingPurposes()); } if (level && replCoord->getSettings().usingReplSets()) { result.append( "info" , "is replica set" ); } else if ( level ) { BSONObjBuilder sources( result.subarrayStart( "sources" ) ); int n = 0; list<BSONObj> src; { const char* localSources = "local.sources"; Client::ReadContext ctx(txn, localSources); auto_ptr<PlanExecutor> exec( InternalPlanner::collectionScan(txn, localSources, ctx.ctx().db()->getCollection(txn, localSources))); BSONObj obj; Runner::RunnerState state; while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&obj, NULL))) { src.push_back(obj); } } for( list<BSONObj>::const_iterator i = src.begin(); i != src.end(); i++ ) { BSONObj s = *i; BSONObjBuilder bb; bb.append( s["host"] ); string sourcename = s["source"].valuestr(); if ( sourcename != "main" ) bb.append( s["source"] ); { BSONElement e = s["syncedTo"]; BSONObjBuilder t( bb.subobjStart( "syncedTo" ) ); t.appendDate( "time" , e.timestampTime() ); t.append( "inc" , e.timestampInc() ); t.done(); } if ( level > 1 ) { wassert(txn->lockState()->threadState() == 0); // note: there is no so-style timeout on this connection; perhaps we should have one. ScopedDbConnection conn(s["host"].valuestr()); DBClientConnection *cliConn = dynamic_cast< DBClientConnection* >( &conn.conn() ); if ( cliConn && replAuthenticate(cliConn) ) { BSONObj first = conn->findOne( (string)"local.oplog.$" + sourcename, Query().sort( BSON( "$natural" << 1 ) ) ); BSONObj last = conn->findOne( (string)"local.oplog.$" + sourcename, Query().sort( BSON( "$natural" << -1 ) ) ); bb.appendDate( "masterFirst" , first["ts"].timestampTime() ); bb.appendDate( "masterLast" , last["ts"].timestampTime() ); double lag = (double) (last["ts"].timestampTime() - s["syncedTo"].timestampTime()); bb.append( "lagSeconds" , lag / 1000 ); } conn.done(); } sources.append( BSONObjBuilder::numStr( n++ ) , bb.obj() ); } sources.done(); } }
bool Cloner::startCloneCollection( const char *fromhost, const char *ns, const BSONObj &query, string &errmsg, bool logForRepl, bool copyIndexes, int logSizeMb, long long &cursorId ) { char db[256]; nsToClient( ns, db ); NamespaceDetails *nsd = nsdetails( ns ); if ( nsd ){ /** note: its ok to clone into a collection, but only if the range you're copying doesn't exist on this server */ string err; if ( runCount( ns , BSON( "query" << query ) , err ) > 0 ){ log() << "WARNING: data already exists for: " << ns << " in range : " << query << " deleting..." << endl; deleteObjects( ns , query , false , logForRepl , false ); } } { dbtemprelease r; auto_ptr< DBClientConnection > c( new DBClientConnection() ); if ( !c->connect( fromhost, errmsg ) ) return false; if( !replAuthenticate(c.get()) ) return false; conn = c; // Start temporary op log BSONObjBuilder cmdSpec; cmdSpec << "logCollection" << ns << "start" << 1; if ( logSizeMb != INT_MIN ) cmdSpec << "logSizeMb" << logSizeMb; BSONObj info; if ( !conn->runCommand( db, cmdSpec.done(), info ) ) { errmsg = "logCollection failed: " + (string)info; return false; } } if ( ! nsd ) { BSONObj spec = conn->findOne( string( db ) + ".system.namespaces", BSON( "name" << ns ) ); if ( !userCreateNS( ns, spec.getObjectField( "options" ), errmsg, true ) ) return false; } copy( ns, ns, false, logForRepl, false, false, query ); if ( copyIndexes ) { string indexNs = string( db ) + ".system.indexes"; copy( indexNs.c_str(), indexNs.c_str(), true, logForRepl, false, false, BSON( "ns" << ns << "name" << NE << "_id_" ) ); } auto_ptr< DBClientCursor > c; { dbtemprelease r; string logNS = "local.temp.oplog." + string( ns ); c = conn->query( logNS.c_str(), Query(), 0, 0, 0, Option_CursorTailable ); } if ( c->more() ) { replayOpLog( c.get(), query ); cursorId = c->getCursorId(); massert( "Expected valid tailing cursor", cursorId != 0 ); } else { massert( "Did not expect valid cursor for empty query result", c->getCursorId() == 0 ); cursorId = 0; } c->decouple(); return true; }
bool Cloner::go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk, bool useReplAuth, bool snapshot) { massert( "useReplAuth is not written to replication log", !useReplAuth || !logForRepl ); string todb = cc().database()->name; stringstream a,b; a << "localhost:" << cmdLine.port; b << "127.0.0.1:" << cmdLine.port; bool masterSameProcess = ( a.str() == masterHost || b.str() == masterHost ); if ( masterSameProcess ) { if ( fromdb == todb && cc().database()->path == dbpath ) { // guard against an "infinite" loop /* if you are replicating, the local.sources config may be wrong if you get this */ errmsg = "can't clone from self (localhost)."; return false; } } /* todo: we can put these releases inside dbclient or a dbclient specialization. or just wait until we get rid of global lock anyway. */ string ns = fromdb + ".system.namespaces"; list<BSONObj> toClone; { dbtemprelease r; auto_ptr<DBClientCursor> c; { if ( !masterSameProcess ) { auto_ptr< DBClientConnection > c( new DBClientConnection() ); if ( !c->connect( masterHost, errmsg ) ) return false; if( !replAuthenticate(c.get()) ) return false; conn = c; } else { conn.reset( new DBDirectClient() ); } c = conn->query( ns.c_str(), BSONObj(), 0, 0, 0, slaveOk ? Option_SlaveOk : 0 ); } if ( c.get() == 0 ) { errmsg = "query failed " + ns; return false; } while ( c->more() ){ BSONObj collection = c->next(); log(2) << "\t cloner got " << collection << endl; BSONElement e = collection.findElement("name"); if ( e.eoo() ) { string s = "bad system.namespaces object " + collection.toString(); massert(s.c_str(), false); } assert( !e.eoo() ); assert( e.type() == String ); const char *from_name = e.valuestr(); if( strstr(from_name, ".system.") ) { /* system.users is cloned -- but nothing else from system. */ if( legalClientSystemNS( from_name , true ) == 0 ){ log(2) << "\t\t not cloning because system collection" << endl; continue; } } else if( strchr(from_name, '$') ) { // don't clone index namespaces -- we take care of those separately below. log(2) << "\t\t not cloning because has $ " << endl; continue; } toClone.push_back( collection.getOwned() ); } } for ( list<BSONObj>::iterator i=toClone.begin(); i != toClone.end(); i++ ){ { dbtemprelease r; } BSONObj collection = *i; log(2) << " really will clone: " << collection << endl; const char * from_name = collection["name"].valuestr(); BSONObj options = collection.getObjectField("options"); /* change name "<fromdb>.collection" -> <todb>.collection */ const char *p = strchr(from_name, '.'); assert(p); string to_name = todb + p; { string err; const char *toname = to_name.c_str(); userCreateNS(toname, options, err, logForRepl); } log(1) << "\t\t cloning " << from_name << " -> " << to_name << endl; Query q; if( snapshot ) q.snapshot(); copy(from_name, to_name.c_str(), false, logForRepl, masterSameProcess, slaveOk, q); } // now build the indexes string system_indexes_from = fromdb + ".system.indexes"; string system_indexes_to = todb + ".system.indexes"; /* [dm]: is the ID index sometimes not called "_id_"? There is other code in the system that looks for a "_id" prefix rather than this exact value. we should standardize. OR, remove names - which is in the bugdb. Anyway, this is dubious here at the moment. */ copy(system_indexes_from.c_str(), system_indexes_to.c_str(), true, logForRepl, masterSameProcess, slaveOk, BSON( "name" << NE << "_id_" ) ); return true; }
void appendReplicationInfo(OperationContext* opCtx, BSONObjBuilder& result, int level) { ReplicationCoordinator* replCoord = ReplicationCoordinator::get(opCtx); if (replCoord->getSettings().usingReplSets()) { IsMasterResponse isMasterResponse; replCoord->fillIsMasterForReplSet(&isMasterResponse); result.appendElements(isMasterResponse.toBSON()); if (level) { replCoord->appendSlaveInfoData(&result); } return; } result.appendBool("ismaster", ReplicationCoordinator::get(opCtx)->isMasterForReportingPurposes()); if (level) { BSONObjBuilder sources(result.subarrayStart("sources")); int n = 0; list<BSONObj> src; { const NamespaceString localSources{"local.sources"}; AutoGetCollectionForReadCommand ctx(opCtx, localSources); auto exec = InternalPlanner::collectionScan( opCtx, localSources.ns(), ctx.getCollection(), PlanExecutor::NO_YIELD); BSONObj obj; PlanExecutor::ExecState state; while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) { src.push_back(obj.getOwned()); } // Non-yielding collection scans from InternalPlanner will never error. invariant(PlanExecutor::IS_EOF == state); } for (list<BSONObj>::const_iterator i = src.begin(); i != src.end(); i++) { BSONObj s = *i; BSONObjBuilder bb; bb.append(s["host"]); string sourcename = s["source"].valuestr(); if (sourcename != "main") bb.append(s["source"]); { BSONElement e = s["syncedTo"]; BSONObjBuilder t(bb.subobjStart("syncedTo")); t.appendDate("time", e.timestampTime()); t.append("inc", e.timestampInc()); t.done(); } if (level > 1) { invariant(!opCtx->lockState()->isLocked()); // note: there is no so-style timeout on this connection; perhaps we should have // one. ScopedDbConnection conn(s["host"].valuestr()); DBClientConnection* cliConn = dynamic_cast<DBClientConnection*>(&conn.conn()); if (cliConn && replAuthenticate(cliConn)) { BSONObj first = conn->findOne((string) "local.oplog.$" + sourcename, Query().sort(BSON("$natural" << 1))); BSONObj last = conn->findOne((string) "local.oplog.$" + sourcename, Query().sort(BSON("$natural" << -1))); bb.appendDate("masterFirst", first["ts"].timestampTime()); bb.appendDate("masterLast", last["ts"].timestampTime()); const auto lag = (last["ts"].timestampTime() - s["syncedTo"].timestampTime()); bb.append("lagSeconds", durationCount<Milliseconds>(lag) / 1000.0); } conn.done(); } sources.append(BSONObjBuilder::numStr(n++), bb.obj()); } sources.done(); replCoord->appendSlaveInfoData(&result); } }
bool Cloner::go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk, bool useReplAuth) { massert( "useReplAuth is not written to replication log", !useReplAuth || !logForRepl ); string todb = database->name; stringstream a,b; a << "localhost:" << port; b << "127.0.0.1:" << port; bool masterSameProcess = ( a.str() == masterHost || b.str() == masterHost ); if ( masterSameProcess ) { if ( fromdb == todb && database->path == dbpath ) { // guard against an "infinite" loop /* if you are replicating, the local.sources config may be wrong if you get this */ errmsg = "can't clone from self (localhost)."; return false; } } /* todo: we can put thesee releases inside dbclient or a dbclient specialization. or just wait until we get rid of global lock anyway. */ string ns = fromdb + ".system.namespaces"; auto_ptr<DBClientCursor> c; { dbtemprelease r; if ( !masterSameProcess ) { auto_ptr< DBClientConnection > c( new DBClientConnection() ); if ( !c->connect( masterHost, errmsg ) ) return false; if( !replAuthenticate(c.get()) ) return false; conn = c; } else { conn.reset( new DBDirectClient() ); } c = conn->query( ns.c_str(), BSONObj(), 0, 0, 0, slaveOk ? Option_SlaveOk : 0 ); } if ( c.get() == 0 ) { errmsg = "query failed " + ns; return false; } while ( 1 ) { { dbtemprelease r; if ( !c->more() ) break; } BSONObj collection = c->next(); BSONElement e = collection.findElement("name"); if ( e.eoo() ) { string s = "bad system.namespaces object " + collection.toString(); /* temp out() << masterHost << endl; out() << ns << endl; out() << e.toString() << endl; exit(1);*/ massert(s.c_str(), false); } assert( !e.eoo() ); assert( e.type() == String ); const char *from_name = e.valuestr(); if( strstr(from_name, ".system.") ) { /* system.users is cloned -- but nothing else from system. */ if( strstr(from_name, ".system.users") == 0 ) continue; } else if( strchr(from_name, '$') ) { // don't clone index namespaces -- we take care of those separately below. continue; } BSONObj options = collection.getObjectField("options"); /* change name "<fromdb>.collection" -> <todb>.collection */ const char *p = strchr(from_name, '.'); assert(p); string to_name = todb + p; //if( !options.isEmpty() ) { string err; const char *toname = to_name.c_str(); userCreateNS(toname, options, err, logForRepl); /* chunks are big enough that we should create the _id index up front, that should be faster. perhaps we should do that for everything? Not doing that yet -- not sure how we want to handle _id-less collections, and we might not want to create the index there. */ if ( strstr(toname, "._chunks") ) ensureHaveIdIndex(toname); } copy(from_name, to_name.c_str(), false, logForRepl, masterSameProcess, slaveOk); } // now build the indexes string system_indexes_from = fromdb + ".system.indexes"; string system_indexes_to = todb + ".system.indexes"; copy(system_indexes_from.c_str(), system_indexes_to.c_str(), true, logForRepl, masterSameProcess, slaveOk, BSON( "name" << NE << "_id_" ) ); return true; }
void appendReplicationInfo(OperationContext* txn, BSONObjBuilder& result, int level) { ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator(); if (replCoord->getSettings().usingReplSets()) { IsMasterResponse isMasterResponse; replCoord->fillIsMasterForReplSet(&isMasterResponse); result.appendElements(isMasterResponse.toBSON()); if (level) { replCoord->appendSlaveInfoData(&result); } return; } // TODO(dannenberg) replAllDead is bad and should be removed when master slave is removed if (replAllDead) { result.append("ismaster", 0); string s = string("dead: ") + replAllDead; result.append("info", s); } else { result.appendBool("ismaster", getGlobalReplicationCoordinator()->isMasterForReportingPurposes()); } if (level) { BSONObjBuilder sources(result.subarrayStart("sources")); int n = 0; list<BSONObj> src; { const char* localSources = "local.sources"; AutoGetCollectionForRead ctx(txn, localSources); unique_ptr<PlanExecutor> exec( InternalPlanner::collectionScan(txn, localSources, ctx.getCollection())); BSONObj obj; PlanExecutor::ExecState state; while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) { src.push_back(obj); } } for (list<BSONObj>::const_iterator i = src.begin(); i != src.end(); i++) { BSONObj s = *i; BSONObjBuilder bb; bb.append(s["host"]); string sourcename = s["source"].valuestr(); if (sourcename != "main") bb.append(s["source"]); { BSONElement e = s["syncedTo"]; BSONObjBuilder t(bb.subobjStart("syncedTo")); t.appendDate("time", e.timestampTime()); t.append("inc", e.timestampInc()); t.done(); } if (level > 1) { wassert(!txn->lockState()->isLocked()); // note: there is no so-style timeout on this connection; perhaps we should have // one. ScopedDbConnection conn(s["host"].valuestr()); DBClientConnection* cliConn = dynamic_cast<DBClientConnection*>(&conn.conn()); if (cliConn && replAuthenticate(cliConn)) { BSONObj first = conn->findOne((string) "local.oplog.$" + sourcename, Query().sort(BSON("$natural" << 1))); BSONObj last = conn->findOne((string) "local.oplog.$" + sourcename, Query().sort(BSON("$natural" << -1))); bb.appendDate("masterFirst", first["ts"].timestampTime()); bb.appendDate("masterLast", last["ts"].timestampTime()); const auto lag = (last["ts"].timestampTime() - s["syncedTo"].timestampTime()); bb.append("lagSeconds", durationCount<Milliseconds>(lag) / 1000.0); } conn.done(); } sources.append(BSONObjBuilder::numStr(n++), bb.obj()); } sources.done(); replCoord->appendSlaveInfoData(&result); } }
void appendReplicationInfo(BSONObjBuilder& result, int level) { if ( replSet ) { if( theReplSet == 0 || theReplSet->state().shunned() ) { result.append("ismaster", false); result.append("secondary", false); result.append("info", ReplSet::startupStatusMsg.get()); result.append( "isreplicaset" , true ); } else { theReplSet->fillIsMaster(result); } return; } if ( replAllDead ) { result.append("ismaster", 0); string s = string("dead: ") + replAllDead; result.append("info", s); } else { result.appendBool("ismaster", _isMaster() ); } if ( level && replSet ) { result.append( "info" , "is replica set" ); } else if ( level ) { BSONObjBuilder sources( result.subarrayStart( "sources" ) ); int n = 0; list<BSONObj> src; { Client::ReadContext ctx("local.sources", dbpath); shared_ptr<Cursor> c = findTableScan("local.sources", BSONObj()); while ( c->ok() ) { src.push_back(c->current()); c->advance(); } } for( list<BSONObj>::const_iterator i = src.begin(); i != src.end(); i++ ) { BSONObj s = *i; BSONObjBuilder bb; bb.append( s["host"] ); string sourcename = s["source"].valuestr(); if ( sourcename != "main" ) bb.append( s["source"] ); { BSONElement e = s["syncedTo"]; BSONObjBuilder t( bb.subobjStart( "syncedTo" ) ); t.appendDate( "time" , e.timestampTime() ); t.append( "inc" , e.timestampInc() ); t.done(); } if ( level > 1 ) { wassert( !Lock::isLocked() ); // note: there is no so-style timeout on this connection; perhaps we should have one. ScopedDbConnection conn(s["host"].valuestr()); DBClientConnection *cliConn = dynamic_cast< DBClientConnection* >( &conn.conn() ); if ( cliConn && replAuthenticate(cliConn, false) ) { BSONObj first = conn->findOne( (string)"local.oplog.$" + sourcename, Query().sort( BSON( "$natural" << 1 ) ) ); BSONObj last = conn->findOne( (string)"local.oplog.$" + sourcename, Query().sort( BSON( "$natural" << -1 ) ) ); bb.appendDate( "masterFirst" , first["ts"].timestampTime() ); bb.appendDate( "masterLast" , last["ts"].timestampTime() ); double lag = (double) (last["ts"].timestampTime() - s["syncedTo"].timestampTime()); bb.append( "lagSeconds" , lag / 1000 ); } conn.done(); } sources.append( BSONObjBuilder::numStr( n++ ) , bb.obj() ); } sources.done(); } }