bool ReplicationCoordinatorImpl::shouldIgnoreUniqueIndex(const IndexDescriptor* idx) { if (!idx->unique()) { return false; } // Never ignore _id index if (idx->isIdIndex()) { return false; } boost::lock_guard<boost::mutex> lock(_mutex); if (_getReplicationMode_inlock() != modeReplSet) { return false; } // see SERVER-6671 MemberState ms = _getCurrentMemberState_inlock(); if (! ((ms == MemberState::RS_STARTUP2) || (ms == MemberState::RS_RECOVERING) || (ms == MemberState::RS_ROLLBACK))) { return false; } // TODO(spencer): SERVER-14233 Remove support for old oplog versions, or move oplogVersion // into the repl coordinator /* // 2 is the oldest oplog version where operations // are fully idempotent. if (theReplSet->oplogVersion < 2) { return false; }*/ return true; }
Status ReplicationCoordinatorImpl::processHandshake(const OperationContext* txn, const HandshakeArgs& handshake) { LOG(2) << "Received handshake " << handshake.toBSON(); boost::lock_guard<boost::mutex> lock(_mutex); SlaveInfo& slaveInfo = _slaveInfoMap[handshake.getRid()]; if (_getReplicationMode_inlock() == modeReplSet) { int memberID = handshake.getMemberId(); const MemberConfig* member = _rsConfig.findMemberByID(memberID); if (!member) { return Status(ErrorCodes::NodeNotFound, str::stream() << "Node with replica set member ID " << memberID << " could not be found in replica set config during handshake"); } slaveInfo.memberID = memberID; slaveInfo.hostAndPort = member->getHostAndPort(); if (!_getCurrentMemberState_inlock().primary()) { // pass along if we are not primary _externalState->forwardSlaveHandshake(); } } else { // master/slave slaveInfo.memberID = -1; slaveInfo.hostAndPort = _externalState->getClientHostAndPort(txn); } return Status::OK(); }
Status ReplicationCoordinatorImpl::canServeReadsFor(OperationContext* txn, const NamespaceString& ns, bool slaveOk) { if (txn->isGod()) { return Status::OK(); } if (canAcceptWritesForDatabase(ns.db())) { return Status::OK(); } boost::lock_guard<boost::mutex> lk(_mutex); Mode replMode = _getReplicationMode_inlock(); if (replMode == modeMasterSlave && _settings.slave == SimpleSlave) { return Status::OK(); } if (slaveOk) { if (replMode == modeMasterSlave || replMode == modeNone) { return Status::OK(); } if (_getCurrentMemberState_inlock().secondary()) { return Status::OK(); } return Status(ErrorCodes::NotMasterOrSecondaryCode, "not master or secondary; cannot currently read from this replSet member"); } return Status(ErrorCodes::NotMasterNoSlaveOkCode, "not master and slaveOk=false"); }
bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase(const StringData& dbName) { // we must check _settings since getReplicationMode() isn't aware of modeReplSet // until a valid replica set config has been loaded boost::lock_guard<boost::mutex> lk(_mutex); if (_settings.usingReplSets()) { if (_getReplicationMode_inlock() == modeReplSet && _getCurrentMemberState_inlock().primary()) { return true; } return dbName == "local"; } if (!_settings.slave) return true; // TODO(dannenberg) replAllDead is bad and should be removed when master slave is removed if (replAllDead) { return dbName == "local"; } if (_settings.master) { // if running with --master --slave, allow. return true; } return dbName == "local"; }
bool ReplicationCoordinatorImpl::isMasterForReportingPurposes() { if (_settings.usingReplSets()) { boost::lock_guard<boost::mutex> lock(_mutex); if (_getReplicationMode_inlock() == modeReplSet && _getCurrentMemberState_inlock().primary()) { return true; } return false; } if (!_settings.slave) return true; // TODO(dannenberg) replAllDead is bad and should be removed when master slave is removed if (replAllDead) { return false; } if (_settings.master) { // if running with --master --slave, allow. return true; } return false; }
Status ReplicationCoordinatorImpl::setLastOptime(OperationContext* txn, const OID& rid, const OpTime& ts) { boost::lock_guard<boost::mutex> lk(_mutex); OpTime& slaveOpTime = _slaveInfoMap[rid].opTime; if (slaveOpTime < ts) { slaveOpTime = ts; // TODO(spencer): update write concern tags if we're a replSet // Wake up any threads waiting for replication that now have their replication // check satisfied for (std::vector<WaiterInfo*>::iterator it = _replicationWaiterList.begin(); it != _replicationWaiterList.end(); ++it) { WaiterInfo* info = *it; if (_opReplicatedEnough_inlock(*info->opTime, *info->writeConcern)) { info->condVar->notify_all(); } } } if (_getReplicationMode_inlock() == modeReplSet && !_getCurrentMemberState_inlock().primary()) { // pass along if we are not primary _externalState->forwardSlaveProgress(); } return Status::OK(); }
void ReplicationCoordinatorImpl::_heartbeatReconfigFinish( const ReplicationExecutor::CallbackData& cbData, const ReplicaSetConfig& newConfig, StatusWith<int> myIndex) { if (cbData.status == ErrorCodes::CallbackCanceled) { return; } boost::unique_lock<boost::mutex> lk(_mutex); invariant(_rsConfigState == kConfigHBReconfiguring); invariant(!_rsConfig.isInitialized() || _rsConfig.getConfigVersion() < newConfig.getConfigVersion()); if (_getCurrentMemberState_inlock().primary() && !cbData.txn) { // Not having an OperationContext in the CallbackData means we definitely aren't holding // the global lock. Since we're primary and this reconfig could cause us to stepdown, // reschedule this work with the global exclusive lock so the stepdown is safe. // TODO(spencer): When we *do* have an OperationContext, consult it to confirm that // we are indeed holding the global lock. _replExecutor.scheduleWorkWithGlobalExclusiveLock( stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigFinish, this, stdx::placeholders::_1, newConfig, myIndex)); return; } if (!myIndex.isOK()) { switch (myIndex.getStatus().code()) { case ErrorCodes::NodeNotFound: log() << "Cannot find self in new replica set configuration; I must be removed; " << myIndex.getStatus(); break; case ErrorCodes::DuplicateKey: error() << "Several entries in new config represent this node; " "Removing self until an acceptable configuration arrives; " << myIndex.getStatus(); break; default: error() << "Could not validate configuration received from remote node; " "Removing self until an acceptable configuration arrives; " << myIndex.getStatus(); break; } myIndex = StatusWith<int>(-1); } const PostMemberStateUpdateAction action = _setCurrentRSConfig_inlock(newConfig, myIndex.getValue()); lk.unlock(); _performPostMemberStateUpdateAction(action); }
MemberState ReplicationCoordinatorImpl::getCurrentMemberState() const { boost::lock_guard<boost::mutex> lk(_mutex); return _getCurrentMemberState_inlock(); }