void ClientCursor::staticYield(int micros, const StringData& ns, const Record* rec) { bool haveReadLock = Lock::isReadLocked(); killCurrentOp.checkForInterrupt(); { auto_ptr<LockMongoFilesShared> lk; if ( rec ) { // need to lock this else rec->touch won't be safe file could disappear lk.reset( new LockMongoFilesShared() ); } dbtempreleasecond unlock; if ( unlock.unlocked() ) { if ( haveReadLock ) { // This sleep helps reader threads yield to writer threads. // Without this, the underlying reader/writer lock implementations // are not sufficiently writer-greedy. #ifdef _WIN32 SwitchToThread(); #else if ( micros == 0 ) { yieldOrSleepFor1Microsecond(); } else { sleepmicros(1); } #endif } else { if ( micros == -1 ) { sleepmicros(Client::recommendedYieldMicros()); } else if ( micros == 0 ) { yieldOrSleepFor1Microsecond(); } else if ( micros > 0 ) { sleepmicros( micros ); } } } else if ( Listener::getTimeTracker() == 0 ) { // we aren't running a server, so likely a repair, so don't complain } else { CurOp * c = cc().curop(); while ( c->parent() ) c = c->parent(); warning() << "ClientCursor::staticYield can't unlock b/c of recursive lock" << " ns: " << ns << " top: " << c->info() << endl; } if ( rec ) rec->touch(); lk.reset(0); // need to release this before dbtempreleasecond } }
void run() { Timer t; sleepsecs( 1 ); ASSERT_EQUALS( 1 , t.seconds() ); t.reset(); sleepmicros( 1527123 ); ASSERT( t.micros() > 1000000 ); ASSERT( t.micros() < 2000000 ); t.reset(); sleepmillis( 1727 ); ASSERT( t.millis() >= 1000 ); ASSERT( t.millis() <= 2500 ); { int total = 1200; int ms = 2; t.reset(); for ( int i=0; i<(total/ms); i++ ) { sleepmillis( ms ); } { int x = t.millis(); if ( x < 1000 || x > 2500 ) { cout << "sleeptest x: " << x << endl; ASSERT( x >= 1000 ); ASSERT( x <= 20000 ); } } } #ifdef __linux__ { int total = 1200; int micros = 100; t.reset(); int numSleeps = 1000*(total/micros); for ( int i=0; i<numSleeps; i++ ) { sleepmicros( micros ); } { int y = t.millis(); if ( y < 1000 || y > 2500 ) { cout << "sleeptest y: " << y << endl; ASSERT( y >= 1000 ); /* ASSERT( y <= 100000 ); */ } } } #endif }
void ClientCursor::staticYield( int micros , const StringData& ns , Record * rec ) { killCurrentOp.checkForInterrupt( false ); { auto_ptr<LockMongoFilesShared> lk; if ( rec ) { // need to lock this else rec->touch won't be safe file could disappear lk.reset( new LockMongoFilesShared() ); } dbtempreleasecond unlock; if ( unlock.unlocked() ) { if ( micros == -1 ) micros = Client::recommendedYieldMicros(); if ( micros > 0 ) sleepmicros( micros ); } else { CurOp * c = cc().curop(); while ( c->parent() ) c = c->parent(); warning() << "ClientCursor::yield can't unlock b/c of recursive lock" << " ns: " << ns << " top: " << c->info() << endl; } if ( rec ) rec->touch(); lk.reset(0); // need to release this before dbtempreleasecond } }
void yieldOrSleepFor1Microsecond() { #ifdef _WIN32 SwitchToThread(); #elif defined(__linux__) pthread_yield(); #else sleepmicros(1); #endif }
void WriteBatchExecutor::execInserts( const BatchedCommandRequest& request, std::vector<WriteErrorDetail*>* errors ) { // Theory of operation: // // Instantiates an ExecInsertsState, which represents all of the state involved in the batch // insert execution algorithm. Most importantly, encapsulates the lock state. // // Every iteration of the loop in execInserts() processes one document insertion, by calling // insertOne() exactly once for a given value of state.currIndex. // // If the ExecInsertsState indicates that the requisite write locks are not held, insertOne // acquires them and performs lock-acquisition-time checks. However, on non-error // execution, it does not release the locks. Therefore, the yielding logic in the while // loop in execInserts() is solely responsible for lock release in the non-error case. // // Internally, insertOne loops performing the single insert until it completes without a // PageFaultException, or until it fails with some kind of error. Errors are mostly // propagated via the request->error field, but DBExceptions or std::exceptions may escape, // particularly on operation interruption. These kinds of errors necessarily prevent // further insertOne calls, and stop the batch. As a result, the only expected source of // such exceptions are interruptions. ExecInsertsState state(&request); normalizeInserts(request, &state.normalizedInserts, &state.pregeneratedKeys); ElapsedTracker elapsedTracker(128, 10); // 128 hits or 10 ms, matching RunnerYieldPolicy's for (state.currIndex = 0; state.currIndex < state.request->sizeWriteOps(); ++state.currIndex) { if (elapsedTracker.intervalHasElapsed()) { // Consider yielding between inserts. if (state.hasLock()) { int micros = ClientCursor::suggestYieldMicros(); if (micros > 0) { state.unlock(); killCurrentOp.checkForInterrupt(); sleepmicros(micros); } } killCurrentOp.checkForInterrupt(); elapsedTracker.resetLastTime(); } WriteErrorDetail* error = NULL; execOneInsert(&state, &error); if (error) { errors->push_back(error); error->setIndex(state.currIndex); if (request.getOrdered()) return; } } }
bool ClientCursor::yield( int micros ) { // need to store on the stack in case this gets deleted CursorId id = cursorid; bool doingDeletes = _doingDeletes; _doingDeletes = false; updateLocation(); { /* a quick test that our temprelease is safe. todo: make a YieldingCursor class and then make the following code part of a unit test. */ const int test = 0; static bool inEmpty = false; if( test && !inEmpty ) { inEmpty = true; log() << "TEST: manipulate collection during cc:yield" << endl; if( test == 1 ) Helpers::emptyCollection(ns.c_str()); else if( test == 2 ) { BSONObjBuilder b; string m; dropCollection(ns.c_str(), m, b); } else { dropDatabase(ns.c_str()); } } } { dbtempreleasecond unlock; if ( unlock.unlocked() ){ if ( micros == -1 ) micros = Client::recommendedYieldMicros(); if ( micros > 0 ) sleepmicros( micros ); } else { log( LL_WARNING ) << "ClientCursor::yield can't unlock b/c of recursive lock" << endl; } } if ( ClientCursor::find( id , false ) == 0 ){ // i was deleted return false; } _doingDeletes = doingDeletes; return true; }
shared_ptr<Future::CommandResult> Future::spawnCommand( const string& server , const string& db , const BSONObj& cmd ){ shared_ptr<Future::CommandResult> res; res.reset( new Future::CommandResult( server , db , cmd ) ); _grab = &res; boost::thread thr( Future::commandThread ); while ( _grab ) sleepmicros(2); return res; }
void ClientCursor::staticYield( int micros ) { { dbtempreleasecond unlock; if ( unlock.unlocked() ){ if ( micros == -1 ) micros = Client::recommendedYieldMicros(); if ( micros > 0 ) sleepmicros( micros ); } else { log( LL_WARNING ) << "ClientCursor::yield can't unlock b/c of recursive lock" << endl; } } }
void ClientCursor::staticYield( int micros , const StringData& ns , Record * rec ) { bool haveReadLock = Lock::isReadLocked(); killCurrentOp.checkForInterrupt( false ); { auto_ptr<LockMongoFilesShared> lk; if ( rec ) { // need to lock this else rec->touch won't be safe file could disappear lk.reset( new LockMongoFilesShared() ); } dbtempreleasecond unlock; if ( unlock.unlocked() ) { if ( haveReadLock ) { // don't sleep with a read lock } else { if ( micros == -1 ) micros = Client::recommendedYieldMicros(); if ( micros > 0 ) sleepmicros( micros ); } } else if ( Listener::getTimeTracker() == 0 ) { // we aren't running a server, so likely a repair, so don't complain } else { CurOp * c = cc().curop(); while ( c->parent() ) c = c->parent(); warning() << "ClientCursor::yield can't unlock b/c of recursive lock" << " ns: " << ns << " top: " << c->info() << endl; } if ( rec ) rec->touch(); lk.reset(0); // need to release this before dbtempreleasecond } }
void ClientCursor::staticYield( int micros , const StringData& ns ) { killCurrentOp.checkForInterrupt( false ); { dbtempreleasecond unlock; if ( unlock.unlocked() ) { if ( micros == -1 ) micros = Client::recommendedYieldMicros(); if ( micros > 0 ) sleepmicros( micros ); } else { CurOp * c = cc().curop(); while ( c->parent() ) c = c->parent(); warning() << "ClientCursor::yield can't unlock b/c of recursive lock" << " ns: " << ns << " top: " << c->info() << endl; } } }
/* if server is really busy, wait a bit */ void beNice() { sleepmicros( Client::recommendedYieldMicros() ); }
void run() { Timer t; int matches = 0; for( int p = 0; p < 3; p++ ) { sleepsecs( 1 ); int sec = (t.millis() + 2)/1000; if( sec == 1 ) matches++; else mongo::unittest::log() << "temp millis: " << t.millis() << endl; ASSERT( sec >= 0 && sec <= 2 ); t.reset(); } if ( matches < 2 ) mongo::unittest::log() << "matches:" << matches << endl; ASSERT( matches >= 2 ); sleepmicros( 1527123 ); ASSERT( t.micros() > 1000000 ); ASSERT( t.micros() < 2000000 ); t.reset(); sleepmillis( 1727 ); ASSERT( t.millis() >= 1000 ); ASSERT( t.millis() <= 2500 ); { int total = 1200; int ms = 2; t.reset(); for ( int i=0; i<(total/ms); i++ ) { sleepmillis( ms ); } { int x = t.millis(); if ( x < 1000 || x > 2500 ) { cout << "sleeptest finds sleep accuracy to be not great. x: " << x << endl; ASSERT( x >= 1000 ); ASSERT( x <= 20000 ); } } } #ifdef __linux__ { int total = 1200; int micros = 100; t.reset(); int numSleeps = 1000*(total/micros); for ( int i=0; i<numSleeps; i++ ) { sleepmicros( micros ); } { int y = t.millis(); if ( y < 1000 || y > 2500 ) { cout << "sleeptest y: " << y << endl; ASSERT( y >= 1000 ); /* ASSERT( y <= 100000 ); */ } } } #endif }
long long Helpers::removeRange( const string& ns , const BSONObj& min , const BSONObj& max , const BSONObj& keyPattern , bool maxInclusive , bool secondaryThrottle , RemoveCallback * callback, bool fromMigrate ) { Client& c = cc(); long long numDeleted = 0; PageFaultRetryableSection pgrs; long long millisWaitingForReplication = 0; while ( 1 ) { try { Client::WriteContext ctx(ns); scoped_ptr<Cursor> c; { NamespaceDetails* nsd = nsdetails( ns.c_str() ); if ( ! nsd ) break; int ii = nsd->findIndexByKeyPattern( keyPattern ); verify( ii >= 0 ); IndexDetails& i = nsd->idx( ii ); // Extend min to get (min, MinKey, MinKey, ....) BSONObj newMin = Helpers::modifiedRangeBound( min , keyPattern , -1 ); // If upper bound is included, extend max to get (max, MaxKey, MaxKey, ...) // If not included, extend max to get (max, MinKey, MinKey, ....) int minOrMax = maxInclusive ? 1 : -1; BSONObj newMax = Helpers::modifiedRangeBound( max , keyPattern , minOrMax ); c.reset( BtreeCursor::make( nsd , ii , i , newMin , newMax , maxInclusive , 1 ) ); } if ( ! c->ok() ) { // we're done break; } DiskLoc rloc = c->currLoc(); BSONObj obj = c->current(); // this is so that we don't have to handle this cursor in the delete code c.reset(0); if ( callback ) callback->goingToDelete( obj ); logOp( "d" , ns.c_str() , rloc.obj()["_id"].wrap() , 0 , 0 , fromMigrate ); theDataFileMgr.deleteRecord(ns.c_str() , rloc.rec(), rloc); numDeleted++; } catch( PageFaultException& e ) { e.touch(); continue; } Timer secondaryThrottleTime; if ( secondaryThrottle ) { if ( ! waitForReplication( c.getLastOp(), 2, 60 /* seconds to wait */ ) ) { warning() << "replication to secondaries for removeRange at least 60 seconds behind" << endl; } millisWaitingForReplication += secondaryThrottleTime.millis(); } if ( ! Lock::isLocked() ) { int micros = ( 2 * Client::recommendedYieldMicros() ) - secondaryThrottleTime.micros(); if ( micros > 0 ) { LOG(1) << "Helpers::removeRangeUnlocked going to sleep for " << micros << " micros" << endl; sleepmicros( micros ); } } } if ( secondaryThrottle ) log() << "Helpers::removeRangeUnlocked time spent waiting for replication: " << millisWaitingForReplication << "ms" << endl; return numDeleted; }
bool Future::CommandResult::join(){ while ( ! _done ) sleepmicros( 50 ); return _ok; }