void startMasterSlave() { oldRepl(); if( !replSettings.slave && !replSettings.master ) return; { Lock::GlobalWrite lk; replLocalAuth(); } if ( replSettings.slave ) { verify( replSettings.slave == SimpleSlave ); LOG(1) << "slave=true" << endl; boost::thread repl_thread(replSlaveThread); } if ( replSettings.master ) { LOG(1) << "master=true" << endl; replSettings.master = true; createOplog(); boost::thread t(replMasterThread); } while( replSettings.fastsync ) // don't allow writes until we've set up from log sleepmillis( 50 ); }
void BackgroundSync::producerThread() { Client::initThread("rsBackgroundSync"); replLocalAuth(); while (!inShutdown()) { if (!theReplSet) { log() << "replSet warning did not receive a valid config yet, sleeping 20 seconds " << rsLog; sleepsecs(20); continue; } try { _producerThread(); } catch (DBException& e) { sethbmsg(str::stream() << "db exception in producer: " << e.toString()); sleepsecs(10); } catch (std::exception& e2) { sethbmsg(str::stream() << "exception in producer: " << e2.what()); sleepsecs(60); } } cc().shutdown(); }
void replSlaveThread() { sleepsecs(1); Client::initThread("replslave"); { Lock::GlobalWrite lk; replLocalAuth(); } while ( 1 ) { try { replMain(); sleepsecs(5); } catch ( AssertionException& ) { ReplInfo r("Assertion in replSlaveThread(): sleeping 5 minutes before retry"); problem() << "Assertion in replSlaveThread(): sleeping 5 minutes before retry" << endl; sleepsecs(300); } catch ( DBException& e ) { problem() << "exception in replSlaveThread(): " << e.what() << ", sleeping 5 minutes before retry" << endl; sleepsecs(300); } catch ( ... ) { problem() << "error in replSlaveThread(): sleeping 5 minutes before retry" << endl; sleepsecs(300); } } }
static void replMasterThread() { sleepsecs(4); Client::initThread("replmaster"); int toSleep = 10; while( 1 ) { sleepsecs( toSleep ); /* write a keep-alive like entry to the log. this will make things like printReplicationStatus() and printSlaveReplicationStatus() stay up-to-date even when things are idle. */ { writelocktry lk(1); if ( lk.got() ) { toSleep = 10; replLocalAuth(); try { logKeepalive(); } catch(...) { log() << "caught exception in replMasterThread()" << endl; } } else { LOG(5) << "couldn't logKeepalive" << endl; toSleep = 1; } } } }
static void initializeWriterThread() { // Only do this once per thread if (!ClientBasic::getCurrent()) { string threadName = str::stream() << "repl writer worker " << replWriterWorkerId.addAndFetch(1); Client::initThread( threadName.c_str() ); replLocalAuth(); } }
void BackgroundSync::notifierThread() { Client::initThread("rsSyncNotifier"); replLocalAuth(); // This makes the initial connection to our sync source for oplog position notification. // It also sets the supportsUpdater flag so we know which method to use. // If this function fails, we ignore that situation because it will be taken care of // the first time markOplog() is called in the loop below. { boost::unique_lock<boost::mutex> oplogLockSSF(theReplSet->syncSourceFeedback.oplock); connectOplogNotifier(); } theReplSet->syncSourceFeedback.go(); while (!inShutdown()) { bool clearTarget = false; if (!theReplSet) { sleepsecs(5); continue; } MemberState state = theReplSet->state(); if (state.primary() || state.fatal() || state.startup()) { sleepsecs(5); continue; } try { { boost::unique_lock<boost::mutex> lock(_lastOpMutex); while (_consumedOpTime == theReplSet->lastOpTimeWritten) { _lastOpCond.wait(lock); } } markOplog(); } catch (DBException &e) { clearTarget = true; log() << "replset tracking exception: " << e.getInfo() << rsLog; sleepsecs(1); } catch (std::exception &e2) { clearTarget = true; log() << "replset tracking error" << e2.what() << rsLog; sleepsecs(1); } if (clearTarget) { boost::unique_lock<boost::mutex> lock(_mutex); _oplogMarkerTarget = NULL; } } cc().shutdown(); }
void IndexBuilder::run() { LOG(2) << "building index " << _index << " on " << _ns << endl; Client::initThread(name().c_str()); replLocalAuth(); Client::WriteContext ctx(_ns); build(); cc().shutdown(); }
void initializeWriterThread() { // Only do this once per thread if (!ClientBasic::getCurrent()) { string threadName = str::stream() << "repl writer worker " << replWriterWorkerId.addAndFetch(1); Client::initThread( threadName.c_str() ); // allow us to get through the magic barrier Lock::ParallelBatchWriterMode::iAmABatchParticipant(); replLocalAuth(); } }
void startSyncThread() { static int n; if( n != 0 ) { log() << "replSet ERROR : more than one sync thread?" << rsLog; verify( n == 0 ); } n++; Client::initThread("rsSync"); replLocalAuth(); theReplSet->syncThread(); cc().shutdown(); }
void BackgroundSync::notifierThread() { Client::initThread("rsSyncNotifier"); replLocalAuth(); theReplSet->syncSourceFeedback.go(); while (!inShutdown()) { bool clearTarget = false; if (!theReplSet) { sleepsecs(5); continue; } MemberState state = theReplSet->state(); if (state.primary() || state.fatal() || state.startup()) { sleepsecs(5); continue; } try { { boost::unique_lock<boost::mutex> lock(_lastOpMutex); while (_consumedOpTime == theReplSet->lastOpTimeWritten) { _lastOpCond.wait(lock); } } markOplog(); } catch (DBException &e) { clearTarget = true; log() << "replset tracking exception: " << e.getInfo() << rsLog; sleepsecs(1); } catch (std::exception &e2) { clearTarget = true; log() << "replset tracking error" << e2.what() << rsLog; sleepsecs(1); } if (clearTarget) { boost::unique_lock<boost::mutex> lock(_mutex); _oplogMarkerTarget = NULL; } } cc().shutdown(); }
void IndexBuilder::run() { LOG(2) << "IndexBuilder building index " << _index; Client::initThread(name().c_str()); replLocalAuth(); cc().curop()->reset(HostAndPort(), dbInsert); NamespaceString ns(_index["ns"].String()); Client::WriteContext ctx(ns.getSystemIndexesCollection()); Status status = build( ctx.ctx() ); if ( !status.isOK() ) { log() << "IndexBuilder could not build index: " << status.toString(); } cc().shutdown(); }
void BackgroundSync::applierThread() { { boost::unique_lock<boost::mutex> lock(_mutex); _applierInProgress = true; } Client::initThread("applier"); replLocalAuth(); // we don't want the applier to be interrupted, // as it must finish work that it starts // done for github issues #770 and #771 cc().setGloballyUninterruptible(true); applyOpsFromOplog(); cc().shutdown(); { boost::unique_lock<boost::mutex> lock(_mutex); _applierInProgress = false; } }
/* forked as a thread during startup it can run quite a while looking for config. but once found, a separate thread takes over as ReplSetImpl::Manager, and this thread terminates. */ void startReplSets(ReplSetSeedList *replSetSeedList) { Client::initThread("rsStart"); OperationContextImpl txn; try { verify( theReplSet == 0 ); if( replSetSeedList == 0 ) { return; } replLocalAuth(); (theReplSet = ReplSet::make(&txn, *replSetSeedList))->go(); } catch(std::exception& e) { log() << "replSet caught exception in startReplSets thread: " << e.what() << rsLog; if( theReplSet ) fassertFailedNoTrace(18756); } cc().shutdown(); }
void BackgroundSync::producerThread() { Client::initThread("rsBackgroundSync"); replLocalAuth(); while (!inShutdown()) { try { _producerThread(); } catch (const DBException& e) { std::string msg(str::stream() << "sync producer problem: " << e.toString()); error() << msg << rsLog; _replCoord->setMyHeartbeatMessage(msg); } catch (const std::exception& e2) { severe() << "sync producer exception: " << e2.what() << rsLog; fassertFailed(28546); } } cc().shutdown(); }
void IndexBuilder::run() { LOG(2) << "IndexBuilder building index " << _index; Client::initThread(name().c_str()); Lock::ParallelBatchWriterMode::iAmABatchParticipant(); replLocalAuth(); cc().curop()->reset(HostAndPort(), dbInsert); NamespaceString ns(_index["ns"].String()); Client::WriteContext ctx(ns.getSystemIndexesCollection()); OperationContextImpl txn; Database* db = dbHolder().get(ns.db().toString(), storageGlobalParams.dbpath); Status status = build(&txn, db); if ( !status.isOK() ) { log() << "IndexBuilder could not build index: " << status.toString(); } cc().shutdown(); }
void BackgroundSync::producerThread() { { boost::unique_lock<boost::mutex> lock(_mutex); _opSyncInProgress = true; } Client::initThread("rsBackgroundSync"); replLocalAuth(); uint32_t timeToSleep = 0; while (!_opSyncShouldExit) { try { if (timeToSleep) { { boost::unique_lock<boost::mutex> lck(_mutex); _opSyncRunning = false; // notify other threads that we are not running _opSyncRunningCondVar.notify_all(); } for (uint32_t i = 0; i < timeToSleep; i++) { sleepsecs(1); // get out if we need to if (_opSyncShouldExit) { break; } } timeToSleep = 0; } // get out if we need to if (_opSyncShouldExit) { break; } { boost::unique_lock<boost::mutex> lck(_mutex); _opSyncRunning = false; while (!_opSyncShouldRun && !_opSyncShouldExit) { // notify other threads that we are not running _opSyncRunningCondVar.notify_all(); // wait for permission that we can run _opSyncCanRunCondVar.wait(lck); } // notify other threads that we are running _opSyncRunningCondVar.notify_all(); _opSyncRunning = true; } // get out if we need to if (_opSyncShouldExit) { break; } MemberState state = theReplSet->state(); if (state.fatal() || state.startup()) { timeToSleep = 5; continue; } // this does the work of reading a remote oplog // and writing it to our oplog timeToSleep = produce(); } catch (DBException& e) { sethbmsg(str::stream() << "db exception in producer: " << e.toString()); timeToSleep = 10; } catch (std::exception& e2) { sethbmsg(str::stream() << "exception in producer: " << e2.what()); timeToSleep = 10; } } cc().shutdown(); { boost::unique_lock<boost::mutex> lock(_mutex); _opSyncRunning = false; _opSyncInProgress = false; } }
void runSyncThread() { Client::initThread("rsSync"); replLocalAuth(); ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator(); // Set initial indexPrefetch setting std::string& prefetch = replCoord->getSettings().rsIndexPrefetch; if (!prefetch.empty()) { BackgroundSync::IndexPrefetchConfig prefetchConfig = BackgroundSync::PREFETCH_ALL; if (prefetch == "none") prefetchConfig = BackgroundSync::PREFETCH_NONE; else if (prefetch == "_id_only") prefetchConfig = BackgroundSync::PREFETCH_ID_ONLY; else if (prefetch == "all") prefetchConfig = BackgroundSync::PREFETCH_ALL; else { warning() << "unrecognized indexPrefetch setting " << prefetch << ", defaulting " << "to \"all\""; } BackgroundSync::get()->setIndexPrefetchConfig(prefetchConfig); } while (!inShutdown()) { // After a reconfig, we may not be in the replica set anymore, so // check that we are in the set (and not an arbiter) before // trying to sync with other replicas. // TODO(spencer): Use a condition variable to await loading a config if (replCoord->getReplicationMode() != ReplicationCoordinator::modeReplSet) { log() << "replSet warning did not receive a valid config yet, sleeping 5 seconds " << rsLog; sleepsecs(5); continue; } const MemberState memberState = replCoord->getCurrentMemberState(); if (replCoord->getCurrentMemberState().arbiter()) { break; } try { if (memberState.primary()) { sleepsecs(1); continue; } bool initialSyncRequested = BackgroundSync::get()->getInitialSyncRequestedFlag(); // Check criteria for doing an initial sync: // 1. If the oplog is empty, do an initial sync // 2. If minValid has _initialSyncFlag set, do an initial sync // 3. If initialSyncRequested is true if (getGlobalReplicationCoordinator()->getMyLastOptime().isNull() || getInitialSyncFlag() || initialSyncRequested) { syncDoInitialSync(); continue; // start from top again in case sync failed. } replCoord->setFollowerMode(MemberState::RS_RECOVERING); /* we have some data. continue tailing. */ SyncTail tail(BackgroundSync::get(), multiSyncApply); tail.oplogApplication(); } catch(const DBException& e) { log() << "Received exception while syncing: " << e.toString(); sleepsecs(10); } catch(...) { sethbmsg("unexpected exception in syncThread()"); // TODO : SET NOT SECONDARY here? sleepsecs(60); } } cc().shutdown(); }
void Manager::starting() { Client::initThread("rsMgr"); replLocalAuth(); }
void GhostSync::starting() { Client::initThread("rsGhostSync"); replLocalAuth(); }
void initializePrefetchThread() { if (!ClientBasic::getCurrent()) { Client::initThread("repl prefetch worker"); replLocalAuth(); } }