_optAccessPlan ( _dmsStorageUnit *su, const CHAR *collectionName, const BSONObj &query, const BSONObj &orderBy, const BSONObj &hint ) :_useCount(0) { ossMemset( _idxName, 0, sizeof( _idxName ) ) ; ossMemset ( _collectionName, 0, sizeof(_collectionName) ) ; ossStrncpy ( _collectionName, collectionName, sizeof(_collectionName) - 1 ) ; _isInitialized = FALSE ; _scanType = TBSCAN ; _indexCBExtent = DMS_INVALID_EXTENT ; _indexLID = DMS_INVALID_EXTENT ; _su = su ; _query = query.copy() ; _orderBy = orderBy.copy() ; _hint = hint.copy() ; _hintFailed = FALSE ; _predList = NULL ; _hashValue = hash(query, orderBy, hint) ; _apm = NULL ; _sortRequired = FALSE ; _isAutoPlan = FALSE ; }
Status AuthzManagerExternalStateMock::insert( OperationContext* txn, const NamespaceString& collectionName, const BSONObj& document, const BSONObj&) { BSONObj toInsert; if (document["_id"].eoo()) { BSONObjBuilder docWithIdBuilder; docWithIdBuilder.append("_id", OID::gen()); docWithIdBuilder.appendElements(document); toInsert = docWithIdBuilder.obj(); } else { toInsert = document.copy(); } _documents[collectionName].push_back(toInsert); if (_authzManager) { _authzManager->logOp( txn, "i", collectionName.ns().c_str(), toInsert, NULL); } return Status::OK(); }
Status AuthzManagerExternalStateMock::insert( const NamespaceString& collectionName, const BSONObj& document, const BSONObj&) { _documents[collectionName].push_back(document.copy()); return Status::OK(); }
void FailPoint::setMode(Mode mode, ValType val, const BSONObj& extra) { /** * Outline: * * 1. Deactivates fail point to enter write-only mode * 2. Waits for all current readers of the fail point to finish * 3. Sets the new mode. */ stdx::lock_guard<stdx::mutex> scoped(_modMutex); // Step 1 disableFailPoint(); // Step 2 while (_fpInfo.load() != 0) { sleepmillis(50); } _mode = mode; _timesOrPeriod.store(val); _data = extra.copy(); if (_mode != off) { enableFailPoint(); } }
virtual bool run(const string& db, BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool fromRepl) { uassert( 16892, "Must be in a multi-statement transaction to begin a load.", cc().hasTxn()); uassert( 16882, "The ns field must be a string.", cmdObj["ns"].type() == mongo::String ); uassert( 16883, "The indexes field must be an array of index objects.", cmdObj["indexes"].type() == mongo::Array ); uassert( 16884, "The options field must be an object.", !cmdObj["options"].ok() || cmdObj["options"].type() == mongo::Object ); LOG(0) << "Beginning bulk load, cmd: " << cmdObj << endl; const string ns = db + "." + cmdObj["ns"].String(); const BSONObj &optionsObj = cmdObj["options"].Obj(); vector<BSONElement> indexElements = cmdObj["indexes"].Array(); vector<BSONObj> indexes; for (vector<BSONElement>::const_iterator i = indexElements.begin(); i != indexElements.end(); i++) { uassert( 16885, "Each index spec must be an object describing the index to be built", i->type() == mongo::Object ); BSONObj obj = i->Obj(); indexes.push_back(obj.copy()); } cc().beginClientLoad(ns, indexes, optionsObj); result.append("status", "load began"); result.append("ok", true); return true; }
void setInternalUserAuthParams(const BSONObj& authParamsIn) { if (!isInternalAuthSet()) { authParamsSet = true; } boost::mutex::scoped_lock lk(authParamMutex); authParams = authParamsIn.copy(); }
void FailPoint::setMode(Mode mode, ValType val, const BSONObj& extra) { /** * Outline: * * 1. Deactivates fail point to enter write-only mode * 2. Waits for all current readers of the fail point to finish * 3. Sets the new mode. */ boost::lock_guard<boost::mutex> scoped(_modMutex); // Step 1 disableFailPoint(); // Step 2 while (_fpInfo.load() != 0) { sleepmillis(50); } // Step 3 uassert(16442, stream() << "mode not supported " << static_cast<int>(mode), mode >= off && mode < numModes); _mode = mode; _timesOrPeriod.store(val); _data = extra.copy(); if (_mode != off) { enableFailPoint(); } }
BSONObj getInternalUserAuthParamsWithFallback() { if (!authParamsSet) { return BSONObj(); } boost::mutex::scoped_lock lk(authParamMutex); return authParams.copy(); }
_ixmIndexKeyGen::_ixmIndexKeyGen ( const BSONObj &keyDef, IXM_KEYGEN_TYPE genType ) { _keyPattern = keyDef.copy () ; _type = IXM_EXTENT_TYPE_NONE ; _keyGenType = genType ; _init () ; }
/* omagent job */ _omagentJob::_omagentJob ( _omaTask *pTask, const BSONObj &info, void *ptr ) { _pTask = pTask ; _info = info.copy() ; _pointer = ptr ; if ( _pTask ) _jobName = _jobName + "Omagent job for task[" + _pTask->getTaskName() + "]" ; }
IndexDetails::IndexDetails(const BSONObj &info) : _info(info.copy()), _keyPattern(info["key"].Obj().copy()), _unique(info["unique"].trueValue()), _sparse(info["sparse"].trueValue()), _clustering(info["clustering"].trueValue()), _descriptor(new Descriptor(_keyPattern, false, 0, _sparse, _clustering)) { verify(!_info.isEmpty()); verify(!_keyPattern.isEmpty()); }
BSONObj RestAdminAccess::getAdminUser( const string& username ) const { openAdminDb(); Client::GodScope gs; readlocktryassert rl("admin.system.users", 10000); Client::Context cx( "admin.system.users" ); BSONObj user; if ( Helpers::findOne( "admin.system.users" , BSON( "user" << username ) , user ) ) return user.copy(); return BSONObj(); }
bool setInternalUserAuthParams(BSONObj authParams) { if (!isInternalAuthSet()) { internalSecurity.authParams = authParams.copy(); authParamsSet = true; return true; } else { log() << "Internal auth params have already been set" << endl; return false; } }
BSONObj RestAdminAccess::getAdminUser( const string& username ) const { openAdminDb(); Client::GodScope gs; readlocktry rl(/*"admin.system.users", */10000); uassert( 16174 , "couldn't get read lock to check admin user" , rl.got() ); Client::Context cx( "admin.system.users" ); BSONObj user; if ( Helpers::findOne( "admin.system.users" , BSON( "user" << username ) , user ) ) return user.copy(); return BSONObj(); }
BSONObj CETLHostProcessor::GetBSONObjModel(CETLModel* pETLModel) { BSONObj boETLModel; CETLHostModel* pETLHostModel = dynamic_cast<CETLHostModel*>(pETLModel); if (NULL != pETLHostModel) { boETLModel = pETLHostModel->GetBSONModel(); } return boETLModel.copy(); }
/* omagent job */ _omagentJob::_omagentJob ( omaTaskPtr taskPtr, const BSONObj &info, void *ptr ) { _taskPtr = taskPtr ; _info = info.copy() ; _pointer = ptr ; _omaTask *pTask = _taskPtr.get() ; if ( pTask ) _jobName = _jobName + "Omagent job for task[" + pTask->getTaskName() + "]" ; }
void CRackSwitchModel::Load(const BSONObj& boSwitchInfo) { m_strSwitchName = boSwitchInfo.hasField("name") ? boSwitchInfo.getStringField("name") : ""; m_strSwitchSerial = boSwitchInfo.hasField("serial") ? boSwitchInfo.getStringField("serial") : ""; m_strSwitchModel = boSwitchInfo.hasField("model") ? boSwitchInfo.getStringField("model") : ""; if (boSwitchInfo.hasField("vlan_info")) { m_boSwitchInfo = boSwitchInfo.copy(); } CDeviceModel::Load(boSwitchInfo); }
BSONObj RestAdminAccess::getAdminUser( const string& username ) const { Client::Transaction txn(DB_TXN_READ_ONLY | DB_TXN_SNAPSHOT); openAdminDb(); Client::GodScope gs; readlocktry rl(/*"admin.system.users", */10000); uassert( 16174 , "couldn't get read lock to check admin user" , rl.got() ); Client::Context cx( "admin.system.users" ); BSONObj user; NamespaceDetails *d = nsdetails( "admin.system.users" ); if ( d != NULL && d->findOne( BSON( "user" << username ) , user ) ) { txn.commit(); return user.copy(); } return BSONObj(); }
// PD_TRACE_DECLARE_FUNCTION ( SDB_CLSCATAMATCHER_LOADPATTERN, "clsCatalogMatcher::loadPattern" ) INT32 clsCatalogMatcher::loadPattern( const BSONObj &matcher ) { INT32 rc = SDB_OK; PD_TRACE_ENTRY ( SDB_CLSCATAMATCHER_LOADPATTERN ) ; _matcher = matcher.copy(); rc = parseAnObj( _matcher, _predicateSet ) ; _predicateSet.adjustByShardingKey(); PD_RC_CHECK( rc, PDERROR, "Failed to load pattern(rc=%d)", rc ); done: PD_TRACE_EXITRC ( SDB_CLSCATAMATCHER_LOADPATTERN, rc ) ; return rc; error: goto done; }
BSONObj CCentralizeSO6AlertProcessor::CreateBSON() { BSONObj boRecord; boRecord = BSON( "id" << m_pSourceMySQLController->FetchString("id") << "serverid" << m_pSourceMySQLController->FetchString("serverid") << "servername" << m_pSourceMySQLController->FetchString("servername") << "content" << m_pSourceMySQLController->FetchString("content") << "updatetime" << m_pSourceMySQLController->FetchString("updatetime") << "sendalert" << m_pSourceMySQLController->FetchString("sendalert") << "reason" << m_pSourceMySQLController->FetchString("reason") << "priority" << m_pSourceMySQLController->FetchString("priority") << "type_alert" << m_pSourceMySQLController->FetchString("type_alert") ); return boRecord.copy(); }
BSONObj RestAdminAccess::getAdminUser( const string& username ) const { Client::GodScope gs; LOCK_REASON(lockReason, "restapi: checking admin user"); readlocktry rl(10000, lockReason); uassert( 16174 , "couldn't get read lock to check admin user" , rl.got() ); Client::Context cx( "admin.system.users" ); Client::Transaction txn(DB_TXN_READ_ONLY | DB_TXN_SNAPSHOT); BSONObj user; Collection *cl = getCollection( "admin.system.users" ); if ( cl != NULL && cl->findOne( BSON( "user" << username ) , user ) ) { txn.commit(); return user.copy(); } return BSONObj(); }
BSONObj CETLHostProcessor::GetBSONObjCondition(CETLModel* pETLModel) { BSONObj boETLModel; long long lHostId = 0; int iZabbixServerId = 0; CETLHostModel* pETLHostModel = dynamic_cast<CETLHostModel*>(pETLModel); if (NULL != pETLHostModel) { lHostId = pETLHostModel->GetHostId(); iZabbixServerId = pETLHostModel->GetZabbixServerId(); } boETLModel = BSON("hostid" << lHostId << "zabbix_server_id" << iZabbixServerId); return boETLModel.copy(); }
bool authenticateInternalUser(DBClientWithCommands* conn){ if (!isInternalAuthSet()) { log() << "ERROR: No authentication parameters set for internal user" << endl; return false; } try { BSONObj outgoingAuthParams; { boost::mutex::scoped_lock lk(authParamMutex); outgoingAuthParams = authParams.copy(); } conn->auth(outgoingAuthParams); return true; } catch(const UserException& ex) { log() << "can't authenticate to " << conn->toString() << " as internal user, error: " << ex.what() << endl; return false; } }
INT32 _rtnIXScanner::relocateRID ( const BSONObj &keyObj, const dmsRecordID &rid ) { INT32 rc = SDB_OK ; PD_TRACE_ENTRY ( SDB__RTNIXSCAN_RELORID1 ) ; PD_CHECK ( _indexCB, SDB_OOM, error, PDERROR, "Failed to allocate memory for indexCB" ) ; // sanity check, make sure we are on valid index PD_CHECK ( _indexCB->isInitialized(), SDB_RTN_INDEX_NOTEXIST, error, PDERROR, "Index does not exist" ) ; PD_CHECK ( _indexCB->getFlag() == IXM_INDEX_FLAG_NORMAL, SDB_IXM_UNEXPECTED_STATUS, error, PDERROR, "Unexpected index status: %d", _indexCB->getFlag() ) ; { monAppCB * pMonAppCB = _cb ? _cb->getMonAppCB() : NULL ; // get root dmsExtentID rootExtent = _indexCB->getRoot() ; ixmExtent root ( rootExtent, _su->index() ) ; BOOLEAN found = FALSE ; // locate the new key, the returned RID is stored in _curIndexRID rc = root.locate ( keyObj, rid, _order, _curIndexRID, found, _direction, _indexCB ) ; PD_RC_CHECK ( rc, PDERROR, "Failed to locate from new keyobj and rid: %s, %d,%d", keyObj.toString().c_str(), rid._extent, rid._offset ) ; _savedObj = keyObj.copy() ; _savedRID = rid ; DMS_MON_OP_COUNT_INC( pMonAppCB, MON_INDEX_READ, 1 ) ; DMS_MON_CONTEXT_COUNT_INC ( _pMonCtxCB, MON_INDEX_READ, 1 ) ; } // mark _init to true so that advance won't call keyLocate again _init = TRUE ; done : PD_TRACE_EXITRC ( SDB__RTNIXSCAN_RELORID1, rc ) ; return rc ; error : goto done ; }
void setInternalUserAuthParams(const BSONObj& authParamsIn) { if (!isInternalAuthSet()) { authParamsSet = true; } boost::mutex::scoped_lock lk(authParamMutex); if (authParamsIn["mechanism"].String() != "SCRAM-SHA-1") { authParams = authParamsIn.copy(); return; } // Create authParams for legacy MONGODB-CR authentication for 2.6/2.8 mixed // mode if applicable. mmb::Document fallback(authParamsIn); fallback.root().findFirstChildNamed("mechanism").setValueString("MONGODB-CR"); mmb::Document doc(authParamsIn); mmb::Element fallbackEl = doc.makeElementObject("fallbackParams"); fallbackEl.setValueObject(fallback.getObject()); doc.root().pushBack(fallbackEl); authParams = doc.getObject().copy(); }
UpdateResult _updateObjects( const char* ns, const BSONObj& updateobj, const BSONObj& patternOrig, bool upsert, bool multi, bool logop , OpDebug& debug, bool fromMigrate, const QueryPlanSelectionPolicy& planPolicy ) { TOKULOG(2) << "update: " << ns << " update: " << updateobj << " query: " << patternOrig << " upsert: " << upsert << " multi: " << multi << endl; debug.updateobj = updateobj; NamespaceDetails *d = getAndMaybeCreateNS(ns, logop); auto_ptr<ModSet> mods; const bool isOperatorUpdate = updateobj.firstElementFieldName()[0] == '$'; bool modsAreIndexed = false; if ( isOperatorUpdate ) { if ( d->indexBuildInProgress() ) { set<string> bgKeys; d->inProgIdx().keyPattern().getFieldNames(bgKeys); mods.reset( new ModSet(updateobj, d->indexKeys(), &bgKeys) ); } else { mods.reset( new ModSet(updateobj, d->indexKeys()) ); } modsAreIndexed = mods->isIndexed(); } int idIdxNo = -1; if ( planPolicy.permitOptimalIdPlan() && !multi && !modsAreIndexed && (idIdxNo = d->findIdIndex()) >= 0 && mayUpdateById(d, patternOrig) ) { debug.idhack = true; IndexDetails &idx = d->idx(idIdxNo); BSONObj pk = idx.getKeyFromQuery(patternOrig); TOKULOG(3) << "_updateObjects using simple _id query, pattern " << patternOrig << ", pk " << pk << endl; UpdateResult result = _updateById( pk, isOperatorUpdate, mods.get(), d, ns, updateobj, patternOrig, logop, debug, fromMigrate); if ( result.existing || ! upsert ) { return result; } else if ( upsert && ! isOperatorUpdate && ! logop) { debug.upsert = true; BSONObj objModified = updateobj; insertAndLog( ns, d, objModified, logop, fromMigrate ); return UpdateResult( 0 , 0 , 1 , updateobj ); } } int numModded = 0; debug.nscanned = 0; shared_ptr<Cursor> c = getOptimizedCursor( ns, patternOrig, BSONObj(), planPolicy ); if( c->ok() ) { set<BSONObj> seenObjects; MatchDetails details; auto_ptr<ClientCursor> cc; do { debug.nscanned++; if ( mods.get() && mods->hasDynamicArray() ) { // The Cursor must have a Matcher to record an elemMatchKey. But currently // a modifier on a dynamic array field may be applied even if there is no // elemMatchKey, so a matcher cannot be required. //verify( c->matcher() ); details.requestElemMatchKey(); } if ( !c->currentMatches( &details ) ) { c->advance(); continue; } BSONObj currPK = c->currPK(); if ( c->getsetdup( currPK ) ) { c->advance(); continue; } BSONObj currentObj = c->current(); BSONObj pattern = patternOrig; if ( logop ) { BSONObjBuilder idPattern; BSONElement id; // NOTE: If the matching object lacks an id, we'll log // with the original pattern. This isn't replay-safe. // It might make sense to suppress the log instead // if there's no id. if ( currentObj.getObjectID( id ) ) { idPattern.append( id ); pattern = idPattern.obj(); } else { uassert( 10157 , "multi-update requires all modified objects to have an _id" , ! multi ); } } /* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some regular ones at the moment. */ struct LogOpUpdateDetails loud; loud.logop = logop; loud.ns = ns; loud.fromMigrate = fromMigrate; if ( isOperatorUpdate ) { if ( multi ) { // Make our own copies of the currPK and currentObj before we invalidate // them by advancing the cursor. currPK = currPK.copy(); currentObj = currentObj.copy(); // Advance past the document to be modified. This used to be because of SERVER-5198, // but TokuMX does it because we want to avoid needing to do manual deduplication // of this PK on the next iteration if the current update modifies the next // entry in the index. For example, an index scan over a:1 with mod {$inc: {a:1}} // would cause every other key read to be a duplicate if we didn't advance here. while ( c->ok() && currPK == c->currPK() ) { c->advance(); } // Multi updates need to do their own deduplication because updates may modify the // keys the cursor is in the process of scanning over. if ( seenObjects.count( currPK ) ) { continue; } else { seenObjects.insert( currPK ); } } ModSet* useMods = mods.get(); auto_ptr<ModSet> mymodset; if ( details.hasElemMatchKey() && mods->hasDynamicArray() ) { useMods = mods->fixDynamicArray( details.elemMatchKey() ); mymodset.reset( useMods ); } auto_ptr<ModSetState> mss = useMods->prepare( currentObj ); updateUsingMods( d, currPK, currentObj, *mss, &loud ); numModded++; if ( ! multi ) return UpdateResult( 1 , 1 , numModded , BSONObj() ); continue; } // end if operator is update uassert( 10158 , "multi update only works with $ operators" , ! multi ); updateNoMods( d, currPK, currentObj, updateobj, &loud ); return UpdateResult( 1 , 0 , 1 , BSONObj() ); } while ( c->ok() ); } // endif if ( numModded ) return UpdateResult( 1 , 1 , numModded , BSONObj() ); if ( upsert ) { BSONObj newObj = updateobj; if ( updateobj.firstElementFieldName()[0] == '$' ) { // upsert of an $operation. build a default object BSONObj newObj = mods->createNewFromQuery( patternOrig ); debug.fastmodinsert = true; insertAndLog( ns, d, newObj, logop, fromMigrate ); return UpdateResult( 0 , 1 , 1 , newObj ); } uassert( 10159 , "multi update only works with $ operators" , ! multi ); debug.upsert = true; insertAndLog( ns, d, newObj, logop, fromMigrate ); return UpdateResult( 0 , 0 , 1 , newObj ); } return UpdateResult( 0 , isOperatorUpdate , 0 , BSONObj() ); }
bool setInternalUserAuthParams(BSONObj authParams) { internalSecurity.authParams = authParams.copy(); return true; }
/** * Run a query with a cursor provided by the query optimizer, or FindingStartCursor. * @returns true if client cursor was saved, false if the query has completed. */ bool queryWithQueryOptimizer( int queryOptions, const string& ns, const BSONObj &jsobj, CurOp& curop, const BSONObj &query, const BSONObj &order, const shared_ptr<ParsedQuery> &pq_shared, const ConfigVersion &shardingVersionAtStart, const bool getCachedExplainPlan, const bool inMultiStatementTxn, Message &result ) { const ParsedQuery &pq( *pq_shared ); shared_ptr<Cursor> cursor; QueryPlanSummary queryPlan; const bool tailable = pq.hasOption( QueryOption_CursorTailable ) && pq.getNumToReturn() != 1; LOG(1) << "query beginning read-only transaction. tailable: " << tailable << endl; BSONObj oldPlan; if (getCachedExplainPlan) { scoped_ptr<MultiPlanScanner> mps( MultiPlanScanner::make( ns.c_str(), query, order ) ); oldPlan = mps->cachedPlanExplainSummary(); } cursor = getOptimizedCursor( ns.c_str(), query, order, QueryPlanSelectionPolicy::any(), pq_shared, false, &queryPlan ); verify( cursor ); // Tailable cursors must be marked as such before any use. This is so that // the implementation knows that uncommitted data cannot be returned. if ( tailable ) { cursor->setTailable(); } scoped_ptr<QueryResponseBuilder> queryResponseBuilder ( QueryResponseBuilder::make( pq, cursor, queryPlan, oldPlan ) ); bool saveClientCursor = false; int options = QueryOption_NoCursorTimeout; if (pq.hasOption( QueryOption_OplogReplay )) { options |= QueryOption_OplogReplay; } // create a client cursor that does not create a cursorID. // The cursor ID will be created if and only if we save // the client cursor further below ClientCursor::Holder ccPointer( new ClientCursor( options, cursor, ns, BSONObj(), false, false ) ); // for oplog cursors, we check if we are reading data that is too old and might // be stale. bool opChecked = false; bool slaveLocationUpdated = false; BSONObj last; bool lastBSONObjSet = false; for ( ; cursor->ok(); cursor->advance() ) { if ( pq.getMaxScan() && cursor->nscanned() > pq.getMaxScan() ) { break; } if ( !queryResponseBuilder->addMatch() ) { continue; } // Note slave's position in the oplog. if ( pq.hasOption( QueryOption_OplogReplay ) ) { BSONObj current = cursor->current(); last = current.copy(); lastBSONObjSet = true; // the first row returned is equal to the last element that // the slave has synced up to, so we might as well update // the slave location if (!slaveLocationUpdated) { ccPointer->updateSlaveLocation(curop); slaveLocationUpdated = true; } // check if data we are about to return may be too stale if (!opChecked) { ccPointer->storeOpForSlave(current); uassert(16785, "oplog cursor reading data that is too old", !ccPointer->lastOpForSlaveTooOld()); opChecked = true; } } if ( pq.isExplain() ) { if ( queryResponseBuilder->enoughTotalResults() ) { break; } } else if ( queryResponseBuilder->enoughForFirstBatch() ) { // if only 1 requested, no cursor saved for efficiency...we assume it is findOne() if ( pq.wantMore() && pq.getNumToReturn() != 1 ) { queryResponseBuilder->finishedFirstBatch(); if ( cursor->advance() ) { saveClientCursor = true; } } break; } } // If the tailing request succeeded if ( cursor->tailable() ) { saveClientCursor = true; } if ( ! shardingState.getVersion( ns ).isWriteCompatibleWith( shardingVersionAtStart ) ) { // if the version changed during the query // we might be missing some data // and its safe to send this as mongos can resend // at this point throw SendStaleConfigException( ns , "version changed during initial query", shardingVersionAtStart, shardingState.getVersion( ns ) ); } int nReturned = queryResponseBuilder->handoff( result ); ccPointer.reset(); long long cursorid = 0; if ( saveClientCursor ) { // Create a new ClientCursor, with a default timeout. ccPointer.reset( new ClientCursor( queryOptions, cursor, ns, jsobj.getOwned(), inMultiStatementTxn ) ); cursorid = ccPointer->cursorid(); DEV tlog(2) << "query has more, cursorid: " << cursorid << endl; if ( !ccPointer->ok() && ccPointer->c()->tailable() ) { DEV tlog() << "query has no more but tailable, cursorid: " << cursorid << endl; } if( queryOptions & QueryOption_Exhaust ) { curop.debug().exhaust = true; } // Set attributes for getMore. ccPointer->setChunkManager( queryResponseBuilder->chunkManager() ); ccPointer->setPos( nReturned ); ccPointer->pq = pq_shared; ccPointer->fields = pq.getFieldPtr(); if (pq.hasOption( QueryOption_OplogReplay ) && lastBSONObjSet) { ccPointer->storeOpForSlave(last); } if (!inMultiStatementTxn) { // This cursor is not part of a multi-statement transaction, so // we pass off the current client's transaction stack to the // cursor so that it may be live as long as the cursor. cc().swapTransactionStack(ccPointer->transactions); verify(!cc().hasTxn()); } ccPointer.release(); } QueryResult *qr = (QueryResult *) result.header(); qr->cursorId = cursorid; curop.debug().cursorid = ( cursorid == 0 ? -1 : qr->cursorId ); qr->setResultFlagsToOk(); // qr->len is updated automatically by appendData() curop.debug().responseLength = qr->len; qr->setOperation(opReply); qr->startingFrom = 0; qr->nReturned = nReturned; curop.debug().nscanned = ( cursor ? cursor->nscanned() : 0LL ); curop.debug().ntoskip = pq.getSkip(); curop.debug().nreturned = nReturned; return saveClientCursor; }
void MockRemoteDBServer::insert(const string& ns, BSONObj obj, int flags) { scoped_spinlock sLock(_lock); vector<BSONObj>& mockCollection = _dataMgr[ns]; mockCollection.push_back(obj.copy()); }
CMongodbModel::CMongodbModel(BSONObj objBSON) { m_objBSON = objBSON.copy(); }