static void handleCursorCommand(CursorId id, BSONObj& cmdObj, BSONObjBuilder& result) { BSONElement batchSizeElem = cmdObj.getFieldDotted("cursor.batchSize"); const long long batchSize = batchSizeElem.isNumber() ? batchSizeElem.numberLong() : 101; // same as query // Using limited cursor API that ignores many edge cases. Should be sufficient for commands. ClientCursor::Pin pin(id); ClientCursor* cursor = pin.c(); massert(16958, "Cursor shouldn't have been deleted", cursor); // Make sure this cursor won't disappear on us fassert(16959, !cursor->c()->shouldDestroyOnNSDeletion()); fassert(16960, !cursor->c()->requiresLock()); try { // can't use result BSONObjBuilder directly since it won't handle exceptions correctly. BSONArrayBuilder resultsArray; const int byteLimit = MaxBytesToReturnToClientAtOnce; for (int objs = 0; objs < batchSize && cursor->ok() && resultsArray.len() <= byteLimit; objs++) { // TODO may need special logic if cursor->current() would cause results to be > 16MB resultsArray.append(cursor->current()); cursor->advance(); } // The initial ok() on a cursor may be very expensive so we don't do it when batchSize // is 0 since that indicates a desire for a fast return. if (batchSize != 0 && !cursor->ok()) { // There is no more data. Kill the cursor. pin.release(); ClientCursor::erase(id); id = 0; } BSONObjBuilder cursorObj(result.subobjStart("cursor")); cursorObj.append("id", id); cursorObj.append("ns", cursor->ns()); cursorObj.append("firstBatch", resultsArray.arr()); cursorObj.done(); } catch (...) { // Clean up cursor on way out of scope. pin.release(); ClientCursor::erase(id); throw; } }
static void handleCursorCommand(CursorId id, BSONObj& cmdObj, BSONObjBuilder& result) { BSONElement batchSizeElem = cmdObj.getFieldDotted("cursor.batchSize"); const long long batchSize = batchSizeElem.isNumber() ? batchSizeElem.numberLong() : 101; // same as query // Using limited cursor API that ignores many edge cases. Should be sufficient for commands. ClientCursorPin pin(id); ClientCursor* cursor = pin.c(); massert(16958, "Cursor shouldn't have been deleted", cursor); // Make sure this cursor won't disappear on us fassert(16959, !cursor->c()->shouldDestroyOnNSDeletion()); fassert(16960, !cursor->c()->requiresLock()); try { const string cursorNs = cursor->ns(); // we need this after cursor may have been deleted // can't use result BSONObjBuilder directly since it won't handle exceptions correctly. BSONArrayBuilder resultsArray; const int byteLimit = MaxBytesToReturnToClientAtOnce; for (int objCount = 0; objCount < batchSize && cursor->ok(); objCount++) { BSONObj current = cursor->current(); if (resultsArray.len() + current.objsize() > byteLimit) break; // too big. current will be the first doc in the second batch resultsArray.append(current); cursor->advance(); } // The initial ok() on a cursor may be very expensive so we don't do it when batchSize // is 0 since that indicates a desire for a fast return. if (batchSize != 0 && !cursor->ok()) { // There is no more data. Kill the cursor. pin.release(); ClientCursor::erase(id); id = 0; cursor = NULL; // make it an obvious error to use cursor after this point } if (cursor) { // If a time limit was set on the pipeline, remaining time is "rolled over" to the // cursor (for use by future getmore ops). cursor->setLeftoverMaxTimeMicros( cc().curop()->getRemainingMaxTimeMicros() ); } BSONObjBuilder cursorObj(result.subobjStart("cursor")); cursorObj.append("id", id); cursorObj.append("ns", cursorNs); cursorObj.append("firstBatch", resultsArray.arr()); cursorObj.done(); } catch (...) { // Clean up cursor on way out of scope. pin.release(); ClientCursor::erase(id); throw; } }
void DocumentSourceCursor::loadBatch() { if (!_cursorId) { dispose(); return; } // We have already validated the sharding version when we constructed the cursor // so we shouldn't check it again. Lock::DBRead lk(ns); Client::Context ctx(ns, storageGlobalParams.dbpath, /*doVersion=*/false); ClientCursorPin pin(_cursorId); ClientCursor* cursor = pin.c(); uassert(16950, "Cursor deleted. Was the collection or database dropped?", cursor); cursor->c()->recoverFromYield(); int memUsageBytes = 0; for( ; cursor->ok(); cursor->advance() ) { yieldSometimes(cursor); if ( !cursor->ok() ) { // The cursor was exhausted during the yield. break; } if ( !cursor->currentMatches() || cursor->currentIsDup() ) continue; // grab the matching document if (canUseCoveredIndex(cursor)) { // Can't have collection metadata if we are here BSONObj indexKey = cursor->currKey(); _currentBatch.push_back(Document(cursor->c()->keyFieldsOnly()->hydrate(indexKey))); } else { BSONObj next = cursor->current(); // check to see if this is a new object we don't own yet // because of a chunk migration if (_collMetadata) { KeyPattern kp( _collMetadata->getKeyPattern() ); if ( !_collMetadata->keyBelongsToMe( kp.extractSingleKey( next ) ) ) continue; } _currentBatch.push_back(_projection ? documentFromBsonWithDeps(next, _dependencies) : Document(next)); } if (_limit) { if (++_docsAddedToBatches == _limit->getLimit()) { break; } verify(_docsAddedToBatches < _limit->getLimit()); } memUsageBytes += _currentBatch.back().getApproximateSize(); if (memUsageBytes > MaxBytesToReturnToClientAtOnce) { // End this batch and prepare cursor for yielding. cursor->advance(); if (cursor->c()->supportYields()) { ClientCursor::YieldData data; cursor->prepareToYield(data); } else { cursor->c()->noteLocation(); } return; } } // If we got here, there aren't any more documents. // The Cursor must be released, see SERVER-6123. pin.release(); ClientCursor::erase(_cursorId); _cursorId = 0; _collMetadata.reset(); }