virtual bool run(const string &db, BSONObj &cmdObj, int options, string &errmsg, BSONObjBuilder &result, bool fromRepl) { string ns = parseNs(db, cmdObj); intrusive_ptr<ExpressionContext> pCtx = new ExpressionContext(InterruptStatusMongod::status, NamespaceString(ns)); /* try to parse the command; if this fails, then we didn't run */ intrusive_ptr<Pipeline> pPipeline = Pipeline::parseCommand(errmsg, cmdObj, pCtx); if (!pPipeline.get()) return false; if (pPipeline->getSplitMongodPipeline()) { // This is only used in testing return executeSplitPipeline(result, errmsg, ns, db, pPipeline, pCtx); } #if _DEBUG // This is outside of the if block to keep the object alive until the pipeline is finished. BSONObj parsed; if (!pPipeline->isExplain() && !pCtx->inShard) { // Make sure all operations round-trip through Pipeline::toBson() // correctly by reparsing every command on DEBUG builds. This is // important because sharded aggregations rely on this ability. // Skipping when inShard because this has already been through the // transformation (and this unsets pCtx->inShard). parsed = pPipeline->serialize().toBson(); pPipeline = Pipeline::parseCommand(errmsg, parsed, pCtx); verify(pPipeline); } #endif // This does the mongod-specific stuff like creating a cursor PipelineD::prepareCursorSource(pPipeline, nsToDatabase(ns), pCtx); pPipeline->stitch(); if (isCursorCommand(cmdObj)) { CursorId id; { // Set up cursor Client::ReadContext ctx(ns); shared_ptr<Cursor> cursor(new PipelineCursor(pPipeline)); // cc will be owned by cursor manager ClientCursor* cc = new ClientCursor(0, cursor, ns, cmdObj.getOwned()); id = cc->cursorid(); } handleCursorCommand(id, cmdObj, result); } else { pPipeline->run(result); } if (DocumentSourceOut* out = dynamic_cast<DocumentSourceOut*>(pPipeline->output())) { result.append("outputNs", out->getOutputNs()); } return true; }
virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) { ActionSet actions; actions.addAction(ActionType::find); out->push_back(Privilege(parseNs(dbname, cmdObj), actions)); }
Status checkAuthForCommand(ClientBasic* client, const std::string& dbname, const BSONObj& cmdObj) override { NamespaceString nss(parseNs(dbname, cmdObj)); auto hasTerm = cmdObj.hasField(kTermField); return AuthorizationSession::get(client)->checkAuthForFind(nss, hasTerm); }
void WriteCmd::addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) { ActionSet actions; actions.addAction(_action); out->push_back(Privilege(parseNs(dbname, cmdObj), actions)); }
Status FindCmd::explain(OperationContext* txn, const std::string& dbname, const BSONObj& cmdObj, ExplainCommon::Verbosity verbosity, BSONObjBuilder* out) const { const string fullns = parseNs(dbname, cmdObj); // Parse the command BSON to a LiteParsedQuery. LiteParsedQuery* rawLpq; bool isExplain = true; Status lpqStatus = LiteParsedQuery::make(fullns, cmdObj, isExplain, &rawLpq); if (!lpqStatus.isOK()) { return lpqStatus; } auto_ptr<LiteParsedQuery> lpq(rawLpq); const NamespaceString nss(fullns); // Finish the parsing step by using the LiteParsedQuery to create a CanonicalQuery. // This requires a lock on the collection in case we're parsing $where: where-specific // parsing code assumes we have a lock and creates execution machinery that requires it. CanonicalQuery* rawCq; WhereCallbackReal whereCallback(txn, nss.db()); Status canonStatus = CanonicalQuery::canonicalize(lpq.release(), &rawCq, whereCallback); if (!canonStatus.isOK()) { return canonStatus; } auto_ptr<CanonicalQuery> cq(rawCq); AutoGetCollectionForRead ctx(txn, nss); // The collection may be NULL. If so, getExecutor() should handle it by returning // an execution tree with an EOFStage. Collection* collection = ctx.getCollection(); // We have a parsed query. Time to get the execution plan for it. PlanExecutor* rawExec; Status execStatus = Status::OK(); if (cq->getParsed().getOptions().oplogReplay) { execStatus = getOplogStartHack(txn, collection, cq.release(), &rawExec); } else { size_t options = QueryPlannerParams::DEFAULT; if (shardingState.needCollectionMetadata(cq->getParsed().ns())) { options |= QueryPlannerParams::INCLUDE_SHARD_FILTER; } execStatus = getExecutor(txn, collection, cq.release(), &rawExec, options); } if (!execStatus.isOK()) { return execStatus; } scoped_ptr<PlanExecutor> exec(rawExec); exec->setYieldPolicy(PlanExecutor::YIELD_AUTO); // Got the execution tree. Explain it. return Explain::explainStages(exec.get(), verbosity, out); }
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) { if ( !globalScriptEngine ) { errmsg = "server-side JavaScript execution is disabled"; return false; } /* db.$cmd.findOne( { group : <p> } ) */ const BSONObj& p = jsobj.firstElement().embeddedObjectUserCheck(); BSONObj q; if ( p["cond"].type() == Object ) q = p["cond"].embeddedObject(); else if ( p["condition"].type() == Object ) q = p["condition"].embeddedObject(); else q = getQuery( p ); BSONObj key; string keyf; if ( p["key"].type() == Object ) { key = p["key"].embeddedObjectUserCheck(); if ( ! p["$keyf"].eoo() ) { errmsg = "can't have key and $keyf"; return false; } } else if ( p["$keyf"].type() ) { keyf = p["$keyf"]._asCode(); } else { // no key specified, will use entire object as key } BSONElement reduce = p["$reduce"]; if ( reduce.eoo() ) { errmsg = "$reduce has to be set"; return false; } BSONElement initial = p["initial"]; if ( initial.type() != Object ) { errmsg = "initial has to be an object"; return false; } string finalize; if (p["finalize"].type()) finalize = p["finalize"]._asCode(); const string ns = parseNs(dbname, jsobj); Client::ReadContext ctx(txn, ns); return group( txn, ctx.ctx().db() , ns , q , key , keyf , reduce._asCode() , reduce.type() != CodeWScope ? 0 : reduce.codeWScopeScopeDataUnsafe() , initial.embeddedObject() , finalize , errmsg , result ); }
ResourcePattern Command::parseResourcePattern(const std::string& dbname, const BSONObj& cmdObj) const { std::string ns = parseNs(dbname, cmdObj); if (ns.find('.') == std::string::npos) { return ResourcePattern::forDatabaseName(ns); } return ResourcePattern::forExactNamespace(NamespaceString(ns)); }
/** * Parses a count command object, 'cmdObj'. * * On success, fills in the out-parameter 'request' and returns an OK status. * * Returns a failure status if 'cmdObj' is not well formed. */ Status parseRequest(const std::string& dbname, const BSONObj& cmdObj, CountRequest* request) const { long long skip = 0; if (cmdObj["skip"].isNumber()) { skip = cmdObj["skip"].numberLong(); if (skip < 0) { return Status(ErrorCodes::BadValue, "skip value is negative in count query"); } } else if (cmdObj["skip"].ok()) { return Status(ErrorCodes::BadValue, "skip value is not a valid number"); } long long limit = 0; if (cmdObj["limit"].isNumber()) { limit = cmdObj["limit"].numberLong(); } else if (cmdObj["limit"].ok()) { return Status(ErrorCodes::BadValue, "limit value is not a valid number"); } // For counts, limit and -limit mean the same thing. if (limit < 0) { limit = -limit; } // We don't validate that "query" is a nested object due to SERVER-15456. BSONObj query = cmdObj.getObjectField("query"); BSONObj hintObj; if (Object == cmdObj["hint"].type()) { hintObj = cmdObj["hint"].Obj(); } else if (String == cmdObj["hint"].type()) { const std::string hint = cmdObj.getStringField("hint"); hintObj = BSON("$hint" << hint); } std::string ns = parseNs(dbname, cmdObj); if (!nsIsFull(ns)) { return Status(ErrorCodes::BadValue, "collection name missing"); } // Parsed correctly. Fill out 'request' with the results. request->ns = ns; request->query = query; request->hint = hintObj; request->limit = limit; request->skip = skip; // By default, count requests are regular count not explain of count. request->explain = false; return Status::OK(); }
Status checkAuthForCommand( ClientBasic* client, const std::string& dbname, const BSONObj& cmdObj ) { return auth::checkAuthForWriteCommand( client->getAuthorizationSession(), _writeType, NamespaceString( parseNs( dbname, cmdObj ) ), cmdObj ); }
virtual bool run(const string &db, BSONObj &cmdObj, int options, string &errmsg, BSONObjBuilder &result, bool fromRepl) { string ns = parseNs(db, cmdObj); intrusive_ptr<ExpressionContext> pCtx = new ExpressionContext(InterruptStatusMongod::status, NamespaceString(ns)); pCtx->tempDir = storageGlobalParams.dbpath + "/_tmp"; /* try to parse the command; if this fails, then we didn't run */ intrusive_ptr<Pipeline> pPipeline = Pipeline::parseCommand(errmsg, cmdObj, pCtx); if (!pPipeline.get()) return false; #if _DEBUG // This is outside of the if block to keep the object alive until the pipeline is finished. BSONObj parsed; if (!pPipeline->isExplain() && !pCtx->inShard) { // Make sure all operations round-trip through Pipeline::toBson() // correctly by reparsing every command on DEBUG builds. This is // important because sharded aggregations rely on this ability. // Skipping when inShard because this has already been through the // transformation (and this unsets pCtx->inShard). parsed = pPipeline->serialize().toBson(); pPipeline = Pipeline::parseCommand(errmsg, parsed, pCtx); verify(pPipeline); } #endif // This does the mongod-specific stuff like creating a cursor PipelineD::prepareCursorSource(pPipeline, pCtx); pPipeline->stitch(); if (pPipeline->isExplain()) { result << "stages" << Value(pPipeline->writeExplainOps()); return true; // don't do any actual execution } if (isCursorCommand(cmdObj)) { CursorId id; { // Set up cursor Client::ReadContext ctx(ns); ClientCursor* cc = new ClientCursor(new PipelineRunner(pPipeline)); cc->isAggCursor = true; // enable special locking and ns deletion behavior id = cc->cursorid(); } handleCursorCommand(id, cmdObj, result); } else { pPipeline->run(result); } return true; }
Status checkAuthForCommand(Client* client, const std::string& dbname, const BSONObj& cmdObj) const override { if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource( ResourcePattern::forExactNamespace(NamespaceString(parseNs(dbname, cmdObj))), ActionType::splitVector)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); }
Status checkAuthForCommand(ClientBasic* client, const std::string& dbname, const BSONObj& cmdObj) { std::string ns = parseNs(dbname, cmdObj); if (!client->getAuthorizationSession()->isAuthorizedForActionsOnNamespace( NamespaceString(ns), ActionType::find)) { return Status(ErrorCodes::Unauthorized, "unauthorized"); } return Status::OK(); }
virtual Status checkAuthForCommand(ClientBasic* client, const std::string& dbname, const BSONObj& cmdObj) { if (!client->getAuthorizationSession()->isAuthorizedForActionsOnResource( ResourcePattern::forExactNamespace(NamespaceString(parseNs(dbname, cmdObj))), ActionType::getShardVersion)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); }
Status CmdCount::parseRequest(const std::string& dbname, const BSONObj& cmdObj, CountRequest* request) const { const string ns = parseNs(dbname, cmdObj); long long skip = 0; if (cmdObj["skip"].isNumber()) { skip = cmdObj["skip"].numberLong(); if (skip < 0) { return Status(ErrorCodes::BadValue, "skip value is negative in count query"); } } else if (cmdObj["skip"].ok()) { return Status(ErrorCodes::BadValue, "skip value is not a valid number"); } long long limit = 0; if (cmdObj["limit"].isNumber()) { limit = cmdObj["limit"].numberLong(); } else if (cmdObj["limit"].ok()) { return Status(ErrorCodes::BadValue, "limit value is not a valid number"); } // For counts, limit and -limit mean the same thing. if (limit < 0) { limit = -limit; } BSONObj query; if (!cmdObj["query"].eoo()) { if (Object != cmdObj["query"].type()) { return Status(ErrorCodes::BadValue, "query field for count must be an object"); } query = cmdObj.getObjectField("query"); } BSONObj hintObj; if (Object == cmdObj["hint"].type()) { hintObj = cmdObj["hint"].Obj(); } else if (String == cmdObj["hint"].type()) { const std::string hint = cmdObj.getStringField("hint"); hintObj = BSON("$hint" << hint); } // Parsed correctly. Fill out 'request' with the results. request->ns = ns; request->query = query; request->hint = hintObj; request->limit = limit; request->skip = skip; return Status::OK(); }
Status GroupCommand::parseRequest(const string& dbname, const BSONObj& cmdObj, GroupRequest* request) const { request->ns = parseNs(dbname, cmdObj); const BSONObj& p = cmdObj.firstElement().embeddedObjectUserCheck(); if (p["cond"].type() == Object) { request->query = p["cond"].embeddedObject().getOwned(); } else if (p["condition"].type() == Object) { request->query = p["condition"].embeddedObject().getOwned(); } else if (p["query"].type() == Object) { request->query = p["query"].embeddedObject().getOwned(); } else if (p["q"].type() == Object) { request->query = p["q"].embeddedObject().getOwned(); } if (p["key"].type() == Object) { request->keyPattern = p["key"].embeddedObjectUserCheck().getOwned(); if (!p["$keyf"].eoo()) { return Status(ErrorCodes::BadValue, "can't have key and $keyf"); } } else if (!p["$keyf"].eoo()) { request->keyFunctionCode = p["$keyf"]._asCode(); } else { // No key specified. Use the entire object as the key. } BSONElement reduce = p["$reduce"]; if (reduce.eoo()) { return Status(ErrorCodes::BadValue, "$reduce has to be set"); } request->reduceCode = reduce._asCode(); if (reduce.type() == CodeWScope) { request->reduceScope = reduce.codeWScopeScopeDataUnsafe(); } if (p["initial"].type() != Object) { return Status(ErrorCodes::BadValue, "initial has to be an object"); } request->initial = p["initial"].embeddedObject().getOwned(); if (!p["finalize"].eoo()) { request->finalize = p["finalize"]._asCode(); } return Status::OK(); }
bool CmdExplain::run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool fromRepl) { // Get the verbosity. Explain::Verbosity verbosity = Explain::QUERY_PLANNER; if (!cmdObj["verbosity"].eoo()) { const char* verbStr = cmdObj["verbosity"].valuestrsafe(); if (mongoutils::str::equals(verbStr, "executionStats")) { verbosity = Explain::EXEC_STATS; } else if (mongoutils::str::equals(verbStr, "allPlansExecution")) { verbosity = Explain::EXEC_ALL_PLANS; } else if (!mongoutils::str::equals(verbStr, "queryPlanner")) { errmsg = "verbosity string must be one of " "{'queryPlanner', 'executionStats', 'allPlansExecution'}"; return false; } } if (Object != cmdObj.firstElement().type()) { errmsg = "explain command requires a nested object"; return false; } // This is the nested command which we are explaining. BSONObj explainObj = cmdObj.firstElement().Obj(); const string ns = parseNs(dbname, explainObj); Command* commToExplain = Command::findCommand(explainObj.firstElementFieldName()); if (NULL == commToExplain) { mongoutils::str::stream ss; ss << "unknown command: " << explainObj.firstElementFieldName(); Status explainStatus(ErrorCodes::CommandNotFound, ss); return appendCommandStatus(result, explainStatus); } // Actually call the nested command's explain(...) method. Status explainStatus = commToExplain->explain(txn, dbname, explainObj, verbosity, &result); if (!explainStatus.isOK()) { return appendCommandStatus(result, explainStatus); } return true; }
Status WriteCmd::checkAuthForCommand( ClientBasic* client, const std::string& dbname, const BSONObj& cmdObj ) { Status status( auth::checkAuthForWriteCommand( client->getAuthorizationSession(), _writeType, NamespaceString( parseNs( dbname, cmdObj ) ), cmdObj )); if ( !status.isOK() ) { setLastError( status.code(), status.reason().c_str() ); } return status; }
virtual Status checkAuthForCommand(ClientBasic* client, const std::string& dbname, const BSONObj& cmdObj) { std::string ns = parseNs(dbname, cmdObj); ActionSet actions; actions.addAction(ActionType::insert); actions.addAction(ActionType::createIndex); // SERVER-11418 if (!client->getAuthorizationSession()->isAuthorizedForActionsOnResource( ResourcePattern::forExactNamespace(NamespaceString(ns)), actions)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); }
Status explain(OperationContext* txn, const std::string& dbname, const BSONObj& cmdObj, ExplainCommon::Verbosity verbosity, const rpc::ServerSelectionMetadata&, BSONObjBuilder* out) const override { const std::string fullns = parseNs(dbname, cmdObj); const NamespaceString nss(fullns); if (!nss.isValid()) { return {ErrorCodes::InvalidNamespace, str::stream() << "Invalid collection name: " << nss.ns()}; } // Parse the command BSON to a LiteParsedQuery. const bool isExplain = true; auto lpqStatus = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain); if (!lpqStatus.isOK()) { return lpqStatus.getStatus(); } // Finish the parsing step by using the LiteParsedQuery to create a CanonicalQuery. ExtensionsCallbackReal extensionsCallback(txn, &nss); auto statusWithCQ = CanonicalQuery::canonicalize(lpqStatus.getValue().release(), extensionsCallback); if (!statusWithCQ.isOK()) { return statusWithCQ.getStatus(); } std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); AutoGetCollectionForRead ctx(txn, nss); // The collection may be NULL. If so, getExecutor() should handle it by returning // an execution tree with an EOFStage. Collection* collection = ctx.getCollection(); // We have a parsed query. Time to get the execution plan for it. auto statusWithPlanExecutor = getExecutorFind(txn, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO); if (!statusWithPlanExecutor.isOK()) { return statusWithPlanExecutor.getStatus(); } std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue()); // Got the execution tree. Explain it. Explain::explainStages(exec.get(), verbosity, out); return Status::OK(); }
virtual bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { string fromhost = cmdObj.getStringField("from"); if ( fromhost.empty() ) { errmsg = "missing 'from' parameter"; return false; } { HostAndPort h(fromhost); if (repl::isSelf(h)) { errmsg = "can't cloneCollection from self"; return false; } } string collection = parseNs(dbname, cmdObj); if ( collection.empty() ) { errmsg = "bad 'cloneCollection' value"; return false; } BSONObj query = cmdObj.getObjectField("query"); if ( query.isEmpty() ) query = BSONObj(); BSONElement copyIndexesSpec = cmdObj.getField("copyindexes"); bool copyIndexes = copyIndexesSpec.isBoolean() ? copyIndexesSpec.boolean() : true; log() << "cloneCollection. db:" << dbname << " collection:" << collection << " from: " << fromhost << " query: " << query << " " << ( copyIndexes ? "" : ", not copying indexes" ) << endl; Cloner cloner; auto_ptr<DBClientConnection> myconn; myconn.reset( new DBClientConnection() ); if ( ! myconn->connect( fromhost , errmsg ) ) return false; cloner.setConnection( myconn.release() ); return cloner.copyCollection(txn, collection, query, errmsg, true, false, copyIndexes); }
Status checkAuthForCommand( ClientBasic* client, const std::string& dbname, const BSONObj& cmdObj ) { Status status = auth::checkAuthForWriteCommand( client->getAuthorizationSession(), _writeType, NamespaceString( parseNs( dbname, cmdObj ) ), cmdObj ); // TODO: Remove this when we standardize GLE reporting from commands if ( !status.isOK() ) { setLastError( status.code(), status.reason().c_str() ); } return status; }
bool PipelineCommand::run(const string &db, BSONObj &cmdObj, int options, string &errmsg, BSONObjBuilder &result, bool fromRepl) { intrusive_ptr<ExpressionContext> pCtx( ExpressionContext::create(&InterruptStatusMongod::status)); /* try to parse the command; if this fails, then we didn't run */ intrusive_ptr<Pipeline> pPipeline( Pipeline::parseCommand(errmsg, cmdObj, pCtx)); if (!pPipeline.get()) return false; string ns(parseNs(db, cmdObj)); if (pPipeline->isExplain()) return runExplain(result, errmsg, ns, db, pPipeline, pCtx); else return runExecute(result, errmsg, ns, db, pPipeline, pCtx); }
virtual bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { const std::string ns = parseNs(dbname, cmdObj); Lock::GlobalWrite globalWriteLock; Client::Context ctx(ns); if (replSettings.usingReplSets()) { if (!theReplSet) { errmsg = "no replication yet active"; return false; } if (theReplSet->isPrimary()) { errmsg = "primaries cannot resync"; return false; } return theReplSet->resync(errmsg); } // below this comment pertains only to master/slave replication if ( cmdObj.getBoolField( "force" ) ) { if ( !waitForSyncToFinish( errmsg ) ) return false; replAllDead = "resync forced"; } if ( !replAllDead ) { errmsg = "not dead, no need to resync"; return false; } if ( !waitForSyncToFinish( errmsg ) ) return false; ReplSource::forceResyncDead( txn, "client" ); result.append( "info", "triggered resync for all sources" ); return true; }
virtual bool run(const string &db, BSONObj &cmdObj, int options, string &errmsg, BSONObjBuilder &result, bool fromRepl) { intrusive_ptr<ExpressionContext> pCtx = ExpressionContext::create(&InterruptStatusMongod::status); /* try to parse the command; if this fails, then we didn't run */ intrusive_ptr<Pipeline> pPipeline = Pipeline::parseCommand(errmsg, cmdObj, pCtx); if (!pPipeline.get()) return false; string ns = parseNs(db, cmdObj); if (pPipeline->getSplitMongodPipeline()) { // This is only used in testing return executeSplitPipeline(result, errmsg, ns, db, pPipeline, pCtx); } #if _DEBUG // This is outside of the if block to keep the object alive until the pipeline is finished. BSONObj parsed; if (!pPipeline->isExplain() && !pCtx->getInShard()) { // Make sure all operations round-trip through Pipeline::toBson() // correctly by reparsing every command on DEBUG builds. This is // important because sharded aggregations rely on this ability. // Skipping when inShard because this has already been through the // transformation (and this unsets pCtx->inShard). BSONObjBuilder bb; pPipeline->toBson(&bb); parsed = bb.obj(); pPipeline = Pipeline::parseCommand(errmsg, parsed, pCtx); verify(pPipeline); } #endif // This does the mongod-specific stuff like creating a cursor PipelineD::prepareCursorSource(pPipeline, nsToDatabase(ns), pCtx); return pPipeline->run(result, errmsg); }
Status ClusterFindCmd::explain(OperationContext* txn, const std::string& dbname, const BSONObj& cmdObj, ExplainCommon::Verbosity verbosity, BSONObjBuilder* out) const { const string fullns = parseNs(dbname, cmdObj); // Parse the command BSON to a LiteParsedQuery. LiteParsedQuery* rawLpq; bool isExplain = true; Status lpqStatus = LiteParsedQuery::make(fullns, cmdObj, isExplain, &rawLpq); if (!lpqStatus.isOK()) { return lpqStatus; } auto_ptr<LiteParsedQuery> lpq(rawLpq); BSONObjBuilder explainCmdBob; ClusterExplain::wrapAsExplain(cmdObj, verbosity, &explainCmdBob); // We will time how long it takes to run the commands on the shards. Timer timer; vector<Strategy::CommandResult> shardResults; STRATEGY->commandOp(dbname, explainCmdBob.obj(), lpq->getOptions().toInt(), fullns, lpq->getFilter(), &shardResults); long long millisElapsed = timer.millis(); const char* mongosStageName = ClusterExplain::getStageNameForReadOp(shardResults, cmdObj); return ClusterExplain::buildExplainResult(shardResults, mongosStageName, millisElapsed, out); }
bool WriteCmd::run(const string& dbName, BSONObj& cmdObj, int options, string& errMsg, BSONObjBuilder& result, bool fromRepl) { verify(!fromRepl); // Can't be run on secondaries (logTheOp() == false, slaveOk() == false). if (cmdObj.firstElementType() != mongo::String) { errMsg = "expected string type for collection name"; return false; } string ns = parseNs(dbName, cmdObj); if (!NamespaceString(ns).isValid()) { errMsg = mongoutils::str::stream() << "invalid namespace: \"" << ns << "\""; return false; } { // Commands with locktype == NONE need to acquire a Context in order to set // CurOp::_ns. Setting a CurOp's namespace is necessary for higher-level // functionality (e.g. profiling) to operate on the correct database (note that // WriteBatchExecutor doesn't do this for us, since its job is to create child CurOp // objects and operate on them). // // Acquire ReadContext momentarily, for satisfying this purpose. Client::ReadContext ctx(dbName + ".$cmd"); } WriteBatch writeBatch(ns, _writeType); if (!writeBatch.parse(cmdObj, &errMsg)) { return false; } WriteBatchExecutor writeBatchExecutor(&cc(), &globalOpCounters, lastError.get()); return writeBatchExecutor.executeBatch(writeBatch, &errMsg, &result); }
virtual bool run(OperationContext* txn, const string &db, BSONObj &cmdObj, int options, string &errmsg, BSONObjBuilder &result, bool fromRepl) { NamespaceString nss(parseNs(db, cmdObj)); if (nss.coll().empty()) { errmsg = "missing collection name"; return false; } intrusive_ptr<ExpressionContext> pCtx = new ExpressionContext(txn, nss); pCtx->tempDir = storageGlobalParams.dbpath + "/_tmp"; /* try to parse the command; if this fails, then we didn't run */ intrusive_ptr<Pipeline> pPipeline = Pipeline::parseCommand(errmsg, cmdObj, pCtx); if (!pPipeline.get()) return false; #if _DEBUG // This is outside of the if block to keep the object alive until the pipeline is finished. BSONObj parsed; if (!pPipeline->isExplain() && !pCtx->inShard) { // Make sure all operations round-trip through Pipeline::toBson() // correctly by reparsing every command on DEBUG builds. This is // important because sharded aggregations rely on this ability. // Skipping when inShard because this has already been through the // transformation (and this unsets pCtx->inShard). parsed = pPipeline->serialize().toBson(); pPipeline = Pipeline::parseCommand(errmsg, parsed, pCtx); verify(pPipeline); } #endif PlanExecutor* exec = NULL; scoped_ptr<ClientCursorPin> pin; // either this OR the execHolder will be non-null auto_ptr<PlanExecutor> execHolder; { // This will throw if the sharding version for this connection is out of date. The // lock must be held continuously from now until we have we created both the output // ClientCursor and the input executor. This ensures that both are using the same // sharding version that we synchronize on here. This is also why we always need to // create a ClientCursor even when we aren't outputting to a cursor. See the comment // on ShardFilterStage for more details. AutoGetCollectionForRead ctx(txn, nss.ns()); Collection* collection = ctx.getCollection(); // This does mongod-specific stuff like creating the input PlanExecutor and adding // it to the front of the pipeline if needed. boost::shared_ptr<PlanExecutor> input = PipelineD::prepareCursorSource(txn, collection, pPipeline, pCtx); pPipeline->stitch(); // Create the PlanExecutor which returns results from the pipeline. The WorkingSet // ('ws') and the PipelineProxyStage ('proxy') will be owned by the created // PlanExecutor. auto_ptr<WorkingSet> ws(new WorkingSet()); auto_ptr<PipelineProxyStage> proxy( new PipelineProxyStage(pPipeline, input, ws.get())); Status execStatus = Status::OK(); if (NULL == collection) { execStatus = PlanExecutor::make(txn, ws.release(), proxy.release(), nss.ns(), PlanExecutor::YIELD_MANUAL, &exec); } else { execStatus = PlanExecutor::make(txn, ws.release(), proxy.release(), collection, PlanExecutor::YIELD_MANUAL, &exec); } invariant(execStatus.isOK()); execHolder.reset(exec); if (!collection && input) { // If we don't have a collection, we won't be able to register any executors, so // make sure that the input PlanExecutor (likely wrapping an EOFStage) doesn't // need to be registered. invariant(!input->collection()); } if (collection) { // XXX const bool isAggCursor = true; // enable special locking behavior ClientCursor* cursor = new ClientCursor(collection, execHolder.release(), 0, BSONObj(), isAggCursor); pin.reset(new ClientCursorPin(collection, cursor->cursorid())); // Don't add any code between here and the start of the try block. } } try { // Unless set to true, the ClientCursor created above will be deleted on block exit. bool keepCursor = false; // If both explain and cursor are specified, explain wins. if (pPipeline->isExplain()) { result << "stages" << Value(pPipeline->writeExplainOps()); } else if (isCursorCommand(cmdObj)) { handleCursorCommand(txn, nss.ns(), pin.get(), exec, cmdObj, result); keepCursor = true; } else { pPipeline->run(result); } if (!keepCursor && pin) pin->deleteUnderlying(); } catch (...) { // Clean up cursor on way out of scope. if (pin) pin->deleteUnderlying(); throw; } // Any code that needs the cursor pinned must be inside the try block, above. return true; }
/** * Runs a query using the following steps: * 1) Parsing. * 2) Acquire locks. * 3) Plan query, obtaining an executor that can run it. * 4) Setup a cursor for the query, which may be used on subsequent getMores. * 5) Generate the first batch. * 6) Save state for getMore. * 7) Generate response to send to the client. * * TODO: Rather than using the sharding version available in thread-local storage (i.e. the * call to ShardingState::needCollectionMetadata() below), shard version information * should be passed as part of the command parameter. */ bool run(OperationContext* txn, const std::string& dbname, BSONObj& cmdObj, int options, std::string& errmsg, BSONObjBuilder& result) override { const std::string fullns = parseNs(dbname, cmdObj); const NamespaceString nss(fullns); if (!nss.isValid()) { return appendCommandStatus(result, {ErrorCodes::InvalidNamespace, str::stream() << "Invalid collection name: " << nss.ns()}); } // Although it is a command, a find command gets counted as a query. globalOpCounters.gotQuery(); if (txn->getClient()->isInDirectClient()) { return appendCommandStatus( result, Status(ErrorCodes::IllegalOperation, "Cannot run find command from eval()")); } // 1a) Parse the command BSON to a LiteParsedQuery. const bool isExplain = false; auto lpqStatus = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain); if (!lpqStatus.isOK()) { return appendCommandStatus(result, lpqStatus.getStatus()); } auto& lpq = lpqStatus.getValue(); // Validate term, if provided. if (auto term = lpq->getReplicationTerm()) { auto replCoord = repl::ReplicationCoordinator::get(txn); Status status = replCoord->updateTerm(*term); // Note: updateTerm returns ok if term stayed the same. if (!status.isOK()) { return appendCommandStatus(result, status); } } // Fill out curop information. long long ntoreturn = lpq->getBatchSize().value_or(0); beginQueryOp(txn, nss, cmdObj, ntoreturn, lpq->getSkip()); // 1b) Finish the parsing step by using the LiteParsedQuery to create a CanonicalQuery. WhereCallbackReal whereCallback(txn, nss.db()); auto statusWithCQ = CanonicalQuery::canonicalize(lpq.release(), whereCallback); if (!statusWithCQ.isOK()) { return appendCommandStatus(result, statusWithCQ.getStatus()); } std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); // 2) Acquire locks. AutoGetCollectionForRead ctx(txn, nss); Collection* collection = ctx.getCollection(); const int dbProfilingLevel = ctx.getDb() ? ctx.getDb()->getProfilingLevel() : serverGlobalParams.defaultProfile; ShardingState* const shardingState = ShardingState::get(txn); // It is possible that the sharding version will change during yield while we are // retrieving a plan executor. If this happens we will throw an error and mongos will // retry. const ChunkVersion shardingVersionAtStart = shardingState->getVersion(nss.ns()); // 3) Get the execution plan for the query. auto statusWithPlanExecutor = getExecutorFind(txn, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO); if (!statusWithPlanExecutor.isOK()) { return appendCommandStatus(result, statusWithPlanExecutor.getStatus()); } std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue()); // TODO: Currently, chunk ranges are kept around until all ClientCursors created while // the chunk belonged on this node are gone. Separating chunk lifetime management from // ClientCursor should allow this check to go away. if (!shardingState->getVersion(nss.ns()).isWriteCompatibleWith(shardingVersionAtStart)) { // Version changed while retrieving a PlanExecutor. Terminate the operation, // signaling that mongos should retry. throw SendStaleConfigException(nss.ns(), "version changed during find command", shardingVersionAtStart, shardingState->getVersion(nss.ns())); } if (!collection) { // No collection. Just fill out curop indicating that there were zero results and // there is no ClientCursor id, and then return. const long long numResults = 0; const CursorId cursorId = 0; endQueryOp(txn, *exec, dbProfilingLevel, numResults, cursorId); appendCursorResponseObject(cursorId, nss.ns(), BSONArray(), &result); return true; } const LiteParsedQuery& pq = exec->getCanonicalQuery()->getParsed(); // 4) If possible, register the execution plan inside a ClientCursor, and pin that // cursor. In this case, ownership of the PlanExecutor is transferred to the // ClientCursor, and 'exec' becomes null. // // First unregister the PlanExecutor so it can be re-registered with ClientCursor. exec->deregisterExec(); // Create a ClientCursor containing this plan executor. We don't have to worry // about leaking it as it's inserted into a global map by its ctor. ClientCursor* cursor = new ClientCursor(collection->getCursorManager(), exec.release(), nss.ns(), txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(), pq.getOptions(), pq.getFilter()); CursorId cursorId = cursor->cursorid(); ClientCursorPin ccPin(collection->getCursorManager(), cursorId); // On early return, get rid of the the cursor. ScopeGuard cursorFreer = MakeGuard(&ClientCursorPin::deleteUnderlying, ccPin); invariant(!exec); PlanExecutor* cursorExec = cursor->getExecutor(); // 5) Stream query results, adding them to a BSONArray as we go. BSONArrayBuilder firstBatch; BSONObj obj; PlanExecutor::ExecState state; long long numResults = 0; while (!enoughForFirstBatch(pq, numResults, firstBatch.len()) && PlanExecutor::ADVANCED == (state = cursorExec->getNext(&obj, NULL))) { // If adding this object will cause us to exceed the BSON size limit, then we stash // it for later. if (firstBatch.len() + obj.objsize() > BSONObjMaxUserSize && numResults > 0) { cursorExec->enqueue(obj); break; } // Add result to output buffer. firstBatch.append(obj); numResults++; } // Throw an assertion if query execution fails for any reason. if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) { const std::unique_ptr<PlanStageStats> stats(cursorExec->getStats()); error() << "Plan executor error during find command: " << PlanExecutor::statestr(state) << ", stats: " << Explain::statsToBSON(*stats); return appendCommandStatus(result, Status(ErrorCodes::OperationFailed, str::stream() << "Executor error during find command: " << WorkingSetCommon::toStatusString(obj))); } // 6) Set up the cursor for getMore. if (shouldSaveCursor(txn, collection, state, cursorExec)) { // State will be restored on getMore. cursorExec->saveState(); cursorExec->detachFromOperationContext(); cursor->setLeftoverMaxTimeMicros(CurOp::get(txn)->getRemainingMaxTimeMicros()); cursor->setPos(numResults); } else { cursorId = 0; } // Fill out curop based on the results. endQueryOp(txn, *cursorExec, dbProfilingLevel, numResults, cursorId); // 7) Generate the response object to send to the client. appendCursorResponseObject(cursorId, nss.ns(), firstBatch.arr(), &result); if (cursorId) { cursorFreer.Dismiss(); } return true; }
// static StatusWith<GetMoreRequest> GetMoreRequest::parseFromBSON(const std::string& dbname, const BSONObj& cmdObj) { invariant(!dbname.empty()); // Required fields. boost::optional<CursorId> cursorid; boost::optional<std::string> fullns; // Optional fields. boost::optional<long long> batchSize; boost::optional<Milliseconds> awaitDataTimeout; boost::optional<long long> term; boost::optional<repl::OpTime> lastKnownCommittedOpTime; for (BSONElement el : cmdObj) { const char* fieldName = el.fieldName(); if (str::equals(fieldName, kGetMoreCommandName)) { if (el.type() != BSONType::NumberLong) { return {ErrorCodes::TypeMismatch, str::stream() << "Field 'getMore' must be of type long in: " << cmdObj}; } cursorid = el.Long(); } else if (str::equals(fieldName, kCollectionField)) { if (el.type() != BSONType::String) { return {ErrorCodes::TypeMismatch, str::stream() << "Field 'collection' must be of type string in: " << cmdObj}; } fullns = parseNs(dbname, cmdObj); } else if (str::equals(fieldName, kBatchSizeField)) { if (!el.isNumber()) { return {ErrorCodes::TypeMismatch, str::stream() << "Field 'batchSize' must be a number in: " << cmdObj}; } batchSize = el.numberLong(); } else if (str::equals(fieldName, kAwaitDataTimeoutField)) { auto maxAwaitDataTime = LiteParsedQuery::parseMaxTimeMS(el); if (!maxAwaitDataTime.isOK()) { return maxAwaitDataTime.getStatus(); } if (maxAwaitDataTime.getValue()) { awaitDataTimeout = Milliseconds(maxAwaitDataTime.getValue()); } } else if (str::equals(fieldName, kTermField)) { if (el.type() != BSONType::NumberLong) { return {ErrorCodes::TypeMismatch, str::stream() << "Field 'term' must be of type NumberLong in: " << cmdObj}; } term = el.Long(); } else if (str::equals(fieldName, kLastKnownCommittedOpTimeField)) { repl::OpTime ot; Status status = bsonExtractOpTimeField(el.wrap(), kLastKnownCommittedOpTimeField, &ot); if (!status.isOK()) { return status; } lastKnownCommittedOpTime = ot; } else if (!str::startsWith(fieldName, "$")) { return {ErrorCodes::FailedToParse, str::stream() << "Failed to parse: " << cmdObj << ". " << "Unrecognized field '" << fieldName << "'."}; } } if (!cursorid) { return {ErrorCodes::FailedToParse, str::stream() << "Field 'getMore' missing in: " << cmdObj}; } if (!fullns) { return {ErrorCodes::FailedToParse, str::stream() << "Field 'collection' missing in: " << cmdObj}; } GetMoreRequest request(NamespaceString(*fullns), *cursorid, batchSize, awaitDataTimeout, term, lastKnownCommittedOpTime); Status validStatus = request.isValid(); if (!validStatus.isOK()) { return validStatus; } return request; }
virtual bool run(OperationContext* txn, const string& db, BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result) { const std::string ns = parseNs(db, cmdObj); if (nsToCollectionSubstring(ns).empty()) { errmsg = "missing collection name"; return false; } NamespaceString nss(ns); // Parse the options for this request. auto request = AggregationRequest::parseFromBSON(nss, cmdObj); if (!request.isOK()) { return appendCommandStatus(result, request.getStatus()); } // Set up the ExpressionContext. intrusive_ptr<ExpressionContext> expCtx = new ExpressionContext(txn, request.getValue()); expCtx->tempDir = storageGlobalParams.dbpath + "/_tmp"; // Parse the pipeline. auto statusWithPipeline = Pipeline::parse(request.getValue().getPipeline(), expCtx); if (!statusWithPipeline.isOK()) { return appendCommandStatus(result, statusWithPipeline.getStatus()); } auto pipeline = std::move(statusWithPipeline.getValue()); auto resolvedNamespaces = resolveInvolvedNamespaces(txn, pipeline, expCtx); if (!resolvedNamespaces.isOK()) { return appendCommandStatus(result, resolvedNamespaces.getStatus()); } expCtx->resolvedNamespaces = std::move(resolvedNamespaces.getValue()); unique_ptr<ClientCursorPin> pin; // either this OR the exec will be non-null unique_ptr<PlanExecutor> exec; auto curOp = CurOp::get(txn); { // This will throw if the sharding version for this connection is out of date. If the // namespace is a view, the lock will be released before re-running the aggregation. // Otherwise, the lock must be held continuously from now until we have we created both // the output ClientCursor and the input executor. This ensures that both are using the // same sharding version that we synchronize on here. This is also why we always need to // create a ClientCursor even when we aren't outputting to a cursor. See the comment on // ShardFilterStage for more details. AutoGetCollectionOrViewForRead ctx(txn, nss); Collection* collection = ctx.getCollection(); // If running $collStats on a view, we do not resolve the view since we want stats // on this view namespace. auto startsWithCollStats = [&pipeline]() { const Pipeline::SourceContainer& sources = pipeline->getSources(); return !sources.empty() && dynamic_cast<DocumentSourceCollStats*>(sources.front().get()); }; // If this is a view, resolve it by finding the underlying collection and stitching view // pipelines and this request's pipeline together. We then release our locks before // recursively calling run, which will re-acquire locks on the underlying collection. // (The lock must be released because recursively acquiring locks on the database will // prohibit yielding.) auto view = ctx.getView(); if (view && !startsWithCollStats()) { auto viewDefinition = ViewShardingCheck::getResolvedViewIfSharded(txn, ctx.getDb(), view); if (!viewDefinition.isOK()) { return appendCommandStatus(result, viewDefinition.getStatus()); } if (!viewDefinition.getValue().isEmpty()) { ViewShardingCheck::appendShardedViewStatus(viewDefinition.getValue(), &result); return false; } auto resolvedView = ctx.getDb()->getViewCatalog()->resolveView(txn, nss); if (!resolvedView.isOK()) { return appendCommandStatus(result, resolvedView.getStatus()); } // With the view resolved, we can relinquish locks. ctx.releaseLocksForView(); // Parse the resolved view into a new aggregation request. auto viewCmd = resolvedView.getValue().asExpandedViewAggregation(request.getValue()); if (!viewCmd.isOK()) { return appendCommandStatus(result, viewCmd.getStatus()); } bool status = this->run(txn, db, viewCmd.getValue(), options, errmsg, result); { // Set the namespace of the curop back to the view namespace so ctx records // stats on this view namespace on destruction. stdx::lock_guard<Client>(*txn->getClient()); curOp->setNS_inlock(nss.ns()); } return status; } // If the pipeline does not have a user-specified collation, set it from the collection // default. if (request.getValue().getCollation().isEmpty() && collection && collection->getDefaultCollator()) { invariant(!expCtx->getCollator()); expCtx->setCollator(collection->getDefaultCollator()->clone()); } // Propagate the ExpressionContext throughout all of the pipeline's stages and // expressions. pipeline->injectExpressionContext(expCtx); // The pipeline must be optimized after the correct collator has been set on it (by // injecting the ExpressionContext containing the collator). This is necessary because // optimization may make string comparisons, e.g. optimizing {$eq: [<str1>, <str2>]} to // a constant. pipeline->optimizePipeline(); if (kDebugBuild && !expCtx->isExplain && !expCtx->inShard) { // Make sure all operations round-trip through Pipeline::serialize() correctly by // re-parsing every command in debug builds. This is important because sharded // aggregations rely on this ability. Skipping when inShard because this has // already been through the transformation (and this un-sets expCtx->inShard). pipeline = reparsePipeline(pipeline, request.getValue(), expCtx); } // This does mongod-specific stuff like creating the input PlanExecutor and adding // it to the front of the pipeline if needed. PipelineD::prepareCursorSource(collection, pipeline); // Create the PlanExecutor which returns results from the pipeline. The WorkingSet // ('ws') and the PipelineProxyStage ('proxy') will be owned by the created // PlanExecutor. auto ws = make_unique<WorkingSet>(); auto proxy = make_unique<PipelineProxyStage>(txn, pipeline, ws.get()); auto statusWithPlanExecutor = (NULL == collection) ? PlanExecutor::make( txn, std::move(ws), std::move(proxy), nss.ns(), PlanExecutor::YIELD_MANUAL) : PlanExecutor::make( txn, std::move(ws), std::move(proxy), collection, PlanExecutor::YIELD_MANUAL); invariant(statusWithPlanExecutor.isOK()); exec = std::move(statusWithPlanExecutor.getValue()); { auto planSummary = Explain::getPlanSummary(exec.get()); stdx::lock_guard<Client>(*txn->getClient()); curOp->setPlanSummary_inlock(std::move(planSummary)); } if (collection) { PlanSummaryStats stats; Explain::getSummaryStats(*exec, &stats); collection->infoCache()->notifyOfQuery(txn, stats.indexesUsed); } if (collection) { const bool isAggCursor = true; // enable special locking behavior ClientCursor* cursor = new ClientCursor(collection->getCursorManager(), exec.release(), nss.ns(), txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(), 0, cmdObj.getOwned(), isAggCursor); pin.reset(new ClientCursorPin(collection->getCursorManager(), cursor->cursorid())); // Don't add any code between here and the start of the try block. } // At this point, it is safe to release the collection lock. // - In the case where we have a collection: we will need to reacquire the // collection lock later when cleaning up our ClientCursorPin. // - In the case where we don't have a collection: our PlanExecutor won't be // registered, so it will be safe to clean it up outside the lock. invariant(!exec || !collection); } try { // Unless set to true, the ClientCursor created above will be deleted on block exit. bool keepCursor = false; // Use of the aggregate command without specifying to use a cursor is deprecated. // Applications should migrate to using cursors. Cursors are strictly more useful than // outputting the results as a single document, since results that fit inside a single // BSONObj will also fit inside a single batch. // // We occasionally log a deprecation warning. if (!request.getValue().isCursorCommand()) { RARELY { warning() << "Use of the aggregate command without the 'cursor' " "option is deprecated. See " "http://dochub.mongodb.org/core/aggregate-without-cursor-deprecation."; } } // If both explain and cursor are specified, explain wins. if (expCtx->isExplain) { result << "stages" << Value(pipeline->writeExplainOps()); } else if (request.getValue().isCursorCommand()) { keepCursor = handleCursorCommand(txn, nss.ns(), pin.get(), pin ? pin->c()->getExecutor() : exec.get(), request.getValue(), result); } else { pipeline->run(result); } if (!expCtx->isExplain) { PlanSummaryStats stats; Explain::getSummaryStats(pin ? *pin->c()->getExecutor() : *exec.get(), &stats); curOp->debug().setPlanSummaryMetrics(stats); curOp->debug().nreturned = stats.nReturned; } // Clean up our ClientCursorPin, if needed. We must reacquire the collection lock // in order to do so. if (pin) { // We acquire locks here with DBLock and CollectionLock instead of using // AutoGetCollectionForRead. AutoGetCollectionForRead will throw if the // sharding version is out of date, and we don't care if the sharding version // has changed. Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_IS); Lock::CollectionLock collLock(txn->lockState(), nss.ns(), MODE_IS); if (keepCursor) { pin->release(); } else { pin->deleteUnderlying(); } } } catch (...) {