/** write an op to the oplog that is already built. todo : make _logOpRS() call this so we don't repeat ourself? */ void _logOpObjRS(OperationContext* txn, const BSONObj& op) { Lock::DBWrite lk(txn->lockState(), "local"); // XXX soon this needs to be part of an outer WUOW not its own. // We can't do this yet due to locking limitations. WriteUnitOfWork wunit(txn); const OpTime ts = op["ts"]._opTime(); long long h = op["h"].numberLong(); { if ( localOplogRSCollection == 0 ) { Client::Context ctx(txn, rsoplog); localDB = ctx.db(); verify( localDB ); localOplogRSCollection = localDB->getCollection(txn, rsoplog); massert(13389, "local.oplog.rs missing. did you drop it? if so restart server", localOplogRSCollection); } Client::Context ctx(txn, rsoplog, localDB); checkOplogInsert(localOplogRSCollection->insertDocument(txn, op, false)); /* todo: now() has code to handle clock skew. but if the skew server to server is large it will get unhappy. this code (or code in now() maybe) should be improved. */ if( theReplSet ) { if( !(theReplSet->lastOpTimeWritten<ts) ) { log() << "replication oplog stream went back in time. previous timestamp: " << theReplSet->lastOpTimeWritten << " newest timestamp: " << ts << ". attempting to sync directly from primary." << endl; BSONObjBuilder result; Status status = theReplSet->forceSyncFrom(theReplSet->box.getPrimary()->fullName(), &result); if (!status.isOK()) { log() << "Can't sync from primary: " << status; } } theReplSet->lastOpTimeWritten = ts; theReplSet->lastH = h; ctx.getClient()->setLastOp( ts ); BackgroundSync::notify(); } } setNewOptime(ts); wunit.commit(); }
/** write an op to the oplog that is already built. todo : make _logOpRS() call this so we don't repeat ourself? */ void _logOpObjRS(const BSONObj& op) { OperationContextImpl txn; Lock::DBWrite lk(txn.lockState(), "local"); const OpTime ts = op["ts"]._opTime(); long long h = op["h"].numberLong(); { if ( localOplogRSCollection == 0 ) { Client::Context ctx(rsoplog, storageGlobalParams.dbpath); localDB = ctx.db(); verify( localDB ); localOplogRSCollection = localDB->getCollection( &txn, rsoplog ); massert(13389, "local.oplog.rs missing. did you drop it? if so restart server", localOplogRSCollection); } Client::Context ctx(rsoplog, localDB); checkOplogInsert( localOplogRSCollection->insertDocument( &txn, op, false ) ); /* todo: now() has code to handle clock skew. but if the skew server to server is large it will get unhappy. this code (or code in now() maybe) should be improved. */ if( theReplSet ) { if( !(theReplSet->lastOpTimeWritten<ts) ) { log() << "replication oplog stream went back in time. previous timestamp: " << theReplSet->lastOpTimeWritten << " newest timestamp: " << ts << ". attempting to sync directly from primary." << endl; std::string errmsg; BSONObjBuilder result; if (!theReplSet->forceSyncFrom(theReplSet->box.getPrimary()->fullName(), errmsg, result)) { log() << "Can't sync from primary: " << errmsg << endl; } } theReplSet->lastOpTimeWritten = ts; theReplSet->lastH = h; ctx.getClient()->setLastOp( ts ); BackgroundSync::notify(); } } setNewOptime(ts); }
/** write an op to the oplog that is already built. todo : make _logOpRS() call this so we don't repeat ourself? */ OpTime _logOpObjRS(OperationContext* txn, const BSONObj& op) { Lock::DBLock lk(txn->lockState(), "local", newlm::MODE_X); // XXX soon this needs to be part of an outer WUOW not its own. // We can't do this yet due to locking limitations. WriteUnitOfWork wunit(txn); const OpTime ts = op["ts"]._opTime(); long long hash = op["h"].numberLong(); { if ( localOplogRSCollection == 0 ) { Client::Context ctx(txn, rsoplog); localDB = ctx.db(); verify( localDB ); localOplogRSCollection = localDB->getCollection(txn, rsoplog); massert(13389, "local.oplog.rs missing. did you drop it? if so restart server", localOplogRSCollection); } Client::Context ctx(txn, rsoplog, localDB); checkOplogInsert(localOplogRSCollection->insertDocument(txn, op, false)); ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator(); OpTime myLastOptime = replCoord->getMyLastOptime(); if (!(myLastOptime < ts)) { severe() << "replication oplog stream went back in time. previous timestamp: " << myLastOptime << " newest timestamp: " << ts; fassertFailedNoTrace(18905); } BackgroundSync* bgsync = BackgroundSync::get(); // Keep this up-to-date, in case we step up to primary. bgsync->setLastAppliedHash(hash); ctx.getClient()->setLastOp( ts ); replCoord->setMyLastOptime(txn, ts); bgsync->notify(); } setNewOptime(ts); wunit.commit(); return ts; }
void ReplicationCoordinatorExternalStateImpl::setGlobalTimestamp(const Timestamp& newTime) { setNewOptime(newTime); }
void ReplicationCoordinatorExternalStateImpl::setGlobalOpTime(const OpTime& newTime) { setNewOptime(newTime); }