virtual bool run(const string &db, BSONObj &cmdObj, int options, string &errmsg, BSONObjBuilder &result, bool fromRepl) { string ns = parseNs(db, cmdObj); intrusive_ptr<ExpressionContext> pCtx = new ExpressionContext(InterruptStatusMongod::status, NamespaceString(ns)); /* try to parse the command; if this fails, then we didn't run */ intrusive_ptr<Pipeline> pPipeline = Pipeline::parseCommand(errmsg, cmdObj, pCtx); if (!pPipeline.get()) return false; if (pPipeline->getSplitMongodPipeline()) { // This is only used in testing return executeSplitPipeline(result, errmsg, ns, db, pPipeline, pCtx); } #if _DEBUG // This is outside of the if block to keep the object alive until the pipeline is finished. BSONObj parsed; if (!pPipeline->isExplain() && !pCtx->inShard) { // Make sure all operations round-trip through Pipeline::toBson() // correctly by reparsing every command on DEBUG builds. This is // important because sharded aggregations rely on this ability. // Skipping when inShard because this has already been through the // transformation (and this unsets pCtx->inShard). parsed = pPipeline->serialize().toBson(); pPipeline = Pipeline::parseCommand(errmsg, parsed, pCtx); verify(pPipeline); } #endif // This does the mongod-specific stuff like creating a cursor PipelineD::prepareCursorSource(pPipeline, nsToDatabase(ns), pCtx); pPipeline->stitch(); if (isCursorCommand(cmdObj)) { CursorId id; { // Set up cursor Client::ReadContext ctx(ns); shared_ptr<Cursor> cursor(new PipelineCursor(pPipeline)); // cc will be owned by cursor manager ClientCursor* cc = new ClientCursor(0, cursor, ns, cmdObj.getOwned()); id = cc->cursorid(); } handleCursorCommand(id, cmdObj, result); } else { pPipeline->run(result); } if (DocumentSourceOut* out = dynamic_cast<DocumentSourceOut*>(pPipeline->output())) { result.append("outputNs", out->getOutputNs()); } return true; }
virtual bool run(const string &db, BSONObj &cmdObj, int options, string &errmsg, BSONObjBuilder &result, bool fromRepl) { string ns = parseNs(db, cmdObj); intrusive_ptr<ExpressionContext> pCtx = new ExpressionContext(InterruptStatusMongod::status, NamespaceString(ns)); pCtx->tempDir = storageGlobalParams.dbpath + "/_tmp"; /* try to parse the command; if this fails, then we didn't run */ intrusive_ptr<Pipeline> pPipeline = Pipeline::parseCommand(errmsg, cmdObj, pCtx); if (!pPipeline.get()) return false; #if _DEBUG // This is outside of the if block to keep the object alive until the pipeline is finished. BSONObj parsed; if (!pPipeline->isExplain() && !pCtx->inShard) { // Make sure all operations round-trip through Pipeline::toBson() // correctly by reparsing every command on DEBUG builds. This is // important because sharded aggregations rely on this ability. // Skipping when inShard because this has already been through the // transformation (and this unsets pCtx->inShard). parsed = pPipeline->serialize().toBson(); pPipeline = Pipeline::parseCommand(errmsg, parsed, pCtx); verify(pPipeline); } #endif // This does the mongod-specific stuff like creating a cursor PipelineD::prepareCursorSource(pPipeline, pCtx); pPipeline->stitch(); if (pPipeline->isExplain()) { result << "stages" << Value(pPipeline->writeExplainOps()); return true; // don't do any actual execution } if (isCursorCommand(cmdObj)) { CursorId id; { // Set up cursor Client::ReadContext ctx(ns); ClientCursor* cc = new ClientCursor(new PipelineRunner(pPipeline)); cc->isAggCursor = true; // enable special locking and ns deletion behavior id = cc->cursorid(); } handleCursorCommand(id, cmdObj, result); } else { pPipeline->run(result); } return true; }
virtual bool run(OperationContext* txn, const string &db, BSONObj &cmdObj, int options, string &errmsg, BSONObjBuilder &result, bool fromRepl) { NamespaceString nss(parseNs(db, cmdObj)); if (nss.coll().empty()) { errmsg = "missing collection name"; return false; } intrusive_ptr<ExpressionContext> pCtx = new ExpressionContext(txn, nss); pCtx->tempDir = storageGlobalParams.dbpath + "/_tmp"; /* try to parse the command; if this fails, then we didn't run */ intrusive_ptr<Pipeline> pPipeline = Pipeline::parseCommand(errmsg, cmdObj, pCtx); if (!pPipeline.get()) return false; #if _DEBUG // This is outside of the if block to keep the object alive until the pipeline is finished. BSONObj parsed; if (!pPipeline->isExplain() && !pCtx->inShard) { // Make sure all operations round-trip through Pipeline::toBson() // correctly by reparsing every command on DEBUG builds. This is // important because sharded aggregations rely on this ability. // Skipping when inShard because this has already been through the // transformation (and this unsets pCtx->inShard). parsed = pPipeline->serialize().toBson(); pPipeline = Pipeline::parseCommand(errmsg, parsed, pCtx); verify(pPipeline); } #endif PlanExecutor* exec = NULL; scoped_ptr<ClientCursorPin> pin; // either this OR the execHolder will be non-null auto_ptr<PlanExecutor> execHolder; { // This will throw if the sharding version for this connection is out of date. The // lock must be held continuously from now until we have we created both the output // ClientCursor and the input executor. This ensures that both are using the same // sharding version that we synchronize on here. This is also why we always need to // create a ClientCursor even when we aren't outputting to a cursor. See the comment // on ShardFilterStage for more details. AutoGetCollectionForRead ctx(txn, nss.ns()); Collection* collection = ctx.getCollection(); // This does mongod-specific stuff like creating the input PlanExecutor and adding // it to the front of the pipeline if needed. boost::shared_ptr<PlanExecutor> input = PipelineD::prepareCursorSource(txn, collection, pPipeline, pCtx); pPipeline->stitch(); // Create the PlanExecutor which returns results from the pipeline. The WorkingSet // ('ws') and the PipelineProxyStage ('proxy') will be owned by the created // PlanExecutor. auto_ptr<WorkingSet> ws(new WorkingSet()); auto_ptr<PipelineProxyStage> proxy( new PipelineProxyStage(pPipeline, input, ws.get())); Status execStatus = Status::OK(); if (NULL == collection) { execStatus = PlanExecutor::make(txn, ws.release(), proxy.release(), nss.ns(), PlanExecutor::YIELD_MANUAL, &exec); } else { execStatus = PlanExecutor::make(txn, ws.release(), proxy.release(), collection, PlanExecutor::YIELD_MANUAL, &exec); } invariant(execStatus.isOK()); execHolder.reset(exec); if (!collection && input) { // If we don't have a collection, we won't be able to register any executors, so // make sure that the input PlanExecutor (likely wrapping an EOFStage) doesn't // need to be registered. invariant(!input->collection()); } if (collection) { // XXX const bool isAggCursor = true; // enable special locking behavior ClientCursor* cursor = new ClientCursor(collection, execHolder.release(), 0, BSONObj(), isAggCursor); pin.reset(new ClientCursorPin(collection, cursor->cursorid())); // Don't add any code between here and the start of the try block. } } try { // Unless set to true, the ClientCursor created above will be deleted on block exit. bool keepCursor = false; // If both explain and cursor are specified, explain wins. if (pPipeline->isExplain()) { result << "stages" << Value(pPipeline->writeExplainOps()); } else if (isCursorCommand(cmdObj)) { handleCursorCommand(txn, nss.ns(), pin.get(), exec, cmdObj, result); keepCursor = true; } else { pPipeline->run(result); } if (!keepCursor && pin) pin->deleteUnderlying(); } catch (...) { // Clean up cursor on way out of scope. if (pin) pin->deleteUnderlying(); throw; } // Any code that needs the cursor pinned must be inside the try block, above. return true; }
virtual bool run(OperationContext* txn, const string &db, BSONObj &cmdObj, int options, string &errmsg, BSONObjBuilder &result, bool fromRepl) { string ns = parseNs(db, cmdObj); intrusive_ptr<ExpressionContext> pCtx = new ExpressionContext(InterruptStatusMongod::status, NamespaceString(ns)); pCtx->tempDir = storageGlobalParams.dbpath + "/_tmp"; /* try to parse the command; if this fails, then we didn't run */ intrusive_ptr<Pipeline> pPipeline = Pipeline::parseCommand(errmsg, cmdObj, pCtx); if (!pPipeline.get()) return false; #if _DEBUG // This is outside of the if block to keep the object alive until the pipeline is finished. BSONObj parsed; if (!pPipeline->isExplain() && !pCtx->inShard) { // Make sure all operations round-trip through Pipeline::toBson() // correctly by reparsing every command on DEBUG builds. This is // important because sharded aggregations rely on this ability. // Skipping when inShard because this has already been through the // transformation (and this unsets pCtx->inShard). parsed = pPipeline->serialize().toBson(); pPipeline = Pipeline::parseCommand(errmsg, parsed, pCtx); verify(pPipeline); } #endif PipelineRunner* runner = NULL; scoped_ptr<ClientCursorPin> pin; // either this OR the runnerHolder will be non-null auto_ptr<PipelineRunner> runnerHolder; { // This will throw if the sharding version for this connection is out of date. The // lock must be held continuously from now until we have we created both the output // ClientCursor and the input Runner. This ensures that both are using the same // sharding version that we synchronize on here. This is also why we always need to // create a ClientCursor even when we aren't outputting to a cursor. See the comment // on ShardFilterStage for more details. Client::ReadContext ctx(ns); Collection* collection = ctx.ctx().db()->getCollection(ns); // This does mongod-specific stuff like creating the input Runner and adding to the // front of the pipeline if needed. boost::shared_ptr<Runner> input = PipelineD::prepareCursorSource(collection, pPipeline, pCtx); pPipeline->stitch(); runnerHolder.reset(new PipelineRunner(pPipeline, input)); runner = runnerHolder.get(); if (!collection && input) { // If we don't have a collection, we won't be able to register any Runners, so // make sure that the input Runner (likely an EOFRunner) doesn't need to be // registered. invariant(!input->collection()); } if (collection) { ClientCursor* cursor = new ClientCursor(collection, runnerHolder.release()); cursor->isAggCursor = true; // enable special locking behavior pin.reset(new ClientCursorPin(collection, cursor->cursorid())); // Don't add any code between here and the start of the try block. } } try { // Unless set to true, the ClientCursor created above will be deleted on block exit. bool keepCursor = false; // If both explain and cursor are specified, explain wins. if (pPipeline->isExplain()) { result << "stages" << Value(pPipeline->writeExplainOps()); } else if (isCursorCommand(cmdObj)) { handleCursorCommand(ns, pin.get(), runner, cmdObj, result); keepCursor = true; } else { pPipeline->run(result); } if (!keepCursor && pin) pin->deleteUnderlying(); } catch (...) { // Clean up cursor on way out of scope. if (pin) pin->deleteUnderlying(); throw; } // Any code that needs the cursor pinned must be inside the try block, above. return true; }