void run() { Client::WriteContext ctx(&_txn, ns()); addIndex(BSON("a" << "2d" << "b" << 1)); addIndex(BSON("a" << "2d")); BSONObj query = fromjson("{$or: [{a: {$geoWithin: {$centerSphere: [[0,0],10]}}}," "{a: {$geoWithin: {$centerSphere: [[1,1],10]}}}]}"); CanonicalQuery* cq; ASSERT_OK(CanonicalQuery::canonicalize(ns(), query, &cq)); Collection* collection = ctx.ctx().db()->getCollection(&_txn, ns()); // Get planner params. QueryPlannerParams plannerParams; fillOutPlannerParams(&_txn, collection, cq, &plannerParams); // We expect creation of the subplan stage to fail. WorkingSet ws; SubplanStage* subplan; ASSERT_NOT_OK(SubplanStage::make(&_txn, collection, &ws, plannerParams, cq, &subplan)); ctx.commit(); }
/** * Use the MultiPlanRunner to pick the best plan for the query 'cq'. Goes through * normal planning to generate solutions and feeds them to the MPR. * * Takes ownership of 'cq'. Caller DOES NOT own the returned QuerySolution*. */ QuerySolution* pickBestPlan(CanonicalQuery* cq) { Client::ReadContext ctx(&_txn, ns); Collection* collection = ctx.ctx().db()->getCollection(&_txn, ns); QueryPlannerParams plannerParams; fillOutPlannerParams(&_txn, collection, cq, &plannerParams); // Turn this off otherwise it pops up in some plans. plannerParams.options &= ~QueryPlannerParams::KEEP_MUTATIONS; // Plan. vector<QuerySolution*> solutions; Status status = QueryPlanner::plan(*cq, plannerParams, &solutions); ASSERT(status.isOK()); ASSERT_GREATER_THAN_OR_EQUALS(solutions.size(), 1U); // Fill out the MPR. _mps.reset(new MultiPlanStage(&_txn, collection, cq)); WorkingSet* ws = new WorkingSet(); // Put each solution from the planner into the MPR. for (size_t i = 0; i < solutions.size(); ++i) { PlanStage* root; ASSERT(StageBuilder::build(&_txn, collection, *solutions[i], ws, &root)); // Takes ownership of all arguments. _mps->addPlan(solutions[i], root, ws); } _mps->pickBestPlan(); // This is what sets a backup plan, should we test for it. ASSERT(_mps->bestPlanChosen()); size_t bestPlanIdx = _mps->bestPlanIdx(); ASSERT_LESS_THAN(bestPlanIdx, solutions.size()); // And return a pointer to the best solution. return _mps->bestSolution(); }
void run() { AutoGetCollectionForRead ctx(&_txn, nss.ns()); Collection* collection = ctx.getCollection(); ASSERT(collection); // Query can be answered by either index on "a" or index on "b". auto statusWithCQ = CanonicalQuery::canonicalize(nss, fromjson("{a: {$gte: 8}, b: 1}")); ASSERT_OK(statusWithCQ.getStatus()); const std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); // We shouldn't have anything in the plan cache for this shape yet. PlanCache* cache = collection->infoCache()->getPlanCache(); ASSERT(cache); CachedSolution* rawCachedSolution; ASSERT_NOT_OK(cache->get(*cq, &rawCachedSolution)); // Get planner params. QueryPlannerParams plannerParams; fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams); // Set up queued data stage to take a long time before returning EOF. Should be long // enough to trigger a replan. const size_t decisionWorks = 10; const size_t mockWorks = 1U + static_cast<size_t>(internalQueryCacheEvictionRatio * decisionWorks); auto mockChild = stdx::make_unique<QueuedDataStage>(&_txn, &_ws); for (size_t i = 0; i < mockWorks; i++) { mockChild->pushBack(PlanStage::NEED_TIME); } CachedPlanStage cachedPlanStage( &_txn, collection, &_ws, cq.get(), plannerParams, decisionWorks, mockChild.release()); // This should succeed after triggering a replan. PlanYieldPolicy yieldPolicy(nullptr, PlanExecutor::YIELD_MANUAL); ASSERT_OK(cachedPlanStage.pickBestPlan(&yieldPolicy)); // Make sure that we get 2 legit results back. size_t numResults = 0; PlanStage::StageState state = PlanStage::NEED_TIME; while (state != PlanStage::IS_EOF) { WorkingSetID id = WorkingSet::INVALID_ID; state = cachedPlanStage.work(&id); ASSERT_NE(state, PlanStage::FAILURE); ASSERT_NE(state, PlanStage::DEAD); if (state == PlanStage::ADVANCED) { WorkingSetMember* member = _ws.get(id); ASSERT(cq->root()->matchesBSON(member->obj.value())); numResults++; } } ASSERT_EQ(numResults, 2U); // This time we expect to find something in the plan cache. Replans after hitting the // works threshold result in a cache entry. ASSERT_OK(cache->get(*cq, &rawCachedSolution)); const std::unique_ptr<CachedSolution> cachedSolution(rawCachedSolution); }
void run() { // Data is just a single {_id: 1, a: 1, b: 1} document. insert(BSON("_id" << 1 << "a" << 1 << "b" << 1)); // Indices on 'a' and 'b'. addIndex(BSON("a" << 1)); addIndex(BSON("b" << 1)); AutoGetCollectionForRead ctx(&_txn, ns()); Collection* collection = ctx.getCollection(); // Query for both 'a' and 'b' and sort on 'b'. CanonicalQuery* cq; verify(CanonicalQuery::canonicalize(ns(), BSON("a" << 1 << "b" << 1), // query BSON("b" << 1), // sort BSONObj(), // proj &cq).isOK()); ASSERT(NULL != cq); boost::scoped_ptr<CanonicalQuery> killCq(cq); // Force index intersection. bool forceIxisectOldValue = internalQueryForceIntersectionPlans; internalQueryForceIntersectionPlans = true; // Get planner params. QueryPlannerParams plannerParams; fillOutPlannerParams(&_txn, collection, cq, &plannerParams); // Turn this off otherwise it pops up in some plans. plannerParams.options &= ~QueryPlannerParams::KEEP_MUTATIONS; // Plan. vector<QuerySolution*> solutions; Status status = QueryPlanner::plan(*cq, plannerParams, &solutions); ASSERT(status.isOK()); // We expect a plan using index {a: 1} and plan using index {b: 1} and // an index intersection plan. ASSERT_EQUALS(solutions.size(), 3U); // Fill out the MultiPlanStage. scoped_ptr<MultiPlanStage> mps(new MultiPlanStage(&_txn, collection, cq)); scoped_ptr<WorkingSet> ws(new WorkingSet()); // Put each solution from the planner into the MPR. for (size_t i = 0; i < solutions.size(); ++i) { PlanStage* root; ASSERT(StageBuilder::build(&_txn, collection, *solutions[i], ws.get(), &root)); // Takes ownership of 'solutions[i]' and 'root'. mps->addPlan(solutions[i], root, ws.get()); } // This sets a backup plan. NULL means that 'mps' will not yield. mps->pickBestPlan(NULL); ASSERT(mps->bestPlanChosen()); ASSERT(mps->hasBackupPlan()); // We should have picked the index intersection plan due to forcing ixisect. QuerySolution* soln = mps->bestSolution(); ASSERT(QueryPlannerTestLib::solutionMatches( "{sort: {pattern: {b: 1}, limit: 0, node: " "{fetch: {filter: null, node: {andSorted: {nodes: [" "{ixscan: {filter: null, pattern: {a:1}}}," "{ixscan: {filter: null, pattern: {b:1}}}]}}}}}}", soln->root.get())); // Get the resulting document. PlanStage::StageState state = PlanStage::NEED_TIME; WorkingSetID wsid; while (state != PlanStage::ADVANCED) { state = mps->work(&wsid); } WorkingSetMember* member = ws->get(wsid); // Check the document returned by the query. ASSERT(member->hasObj()); BSONObj expectedDoc = BSON("_id" << 1 << "a" << 1 << "b" << 1); ASSERT(expectedDoc.woCompare(member->obj) == 0); // The blocking plan became unblocked, so we should no longer have a backup plan, // and the winning plan should still be the index intersection one. ASSERT(!mps->hasBackupPlan()); soln = mps->bestSolution(); ASSERT(QueryPlannerTestLib::solutionMatches( "{sort: {pattern: {b: 1}, limit: 0, node: " "{fetch: {filter: null, node: {andSorted: {nodes: [" "{ixscan: {filter: null, pattern: {a:1}}}," "{ixscan: {filter: null, pattern: {b:1}}}]}}}}}}", soln->root.get())); // Restore index intersection force parameter. internalQueryForceIntersectionPlans = forceIxisectOldValue; }
/** * For a given query, get a runner. The runner could be a SingleSolutionRunner, a * CachedQueryRunner, or a MultiPlanRunner, depending on the cache/query solver/etc. */ Status getRunner(Collection* collection, CanonicalQuery* rawCanonicalQuery, Runner** out, size_t plannerOptions) { verify(rawCanonicalQuery); auto_ptr<CanonicalQuery> canonicalQuery(rawCanonicalQuery); // This can happen as we're called by internal clients as well. if (NULL == collection) { const string& ns = canonicalQuery->ns(); LOG(2) << "Collection " << ns << " does not exist." << " Using EOF runner: " << canonicalQuery->toStringShort(); *out = new EOFRunner(canonicalQuery.release(), ns); return Status::OK(); } // If we have an _id index we can use the idhack runner. if (IDHackRunner::supportsQuery(*canonicalQuery) && collection->getIndexCatalog()->findIdIndex()) { LOG(2) << "Using idhack: " << canonicalQuery->toStringShort(); *out = new IDHackRunner(collection, canonicalQuery.release()); return Status::OK(); } // Tailable: If the query requests tailable the collection must be capped. if (canonicalQuery->getParsed().hasOption(QueryOption_CursorTailable)) { if (!collection->isCapped()) { return Status(ErrorCodes::BadValue, "error processing query: " + canonicalQuery->toString() + " tailable cursor requested on non capped collection"); } // If a sort is specified it must be equal to expectedSort. const BSONObj expectedSort = BSON("$natural" << 1); const BSONObj& actualSort = canonicalQuery->getParsed().getSort(); if (!actualSort.isEmpty() && !(actualSort == expectedSort)) { return Status(ErrorCodes::BadValue, "error processing query: " + canonicalQuery->toString() + " invalid sort specified for tailable cursor: " + actualSort.toString()); } } // Fill out the planning params. We use these for both cached solutions and non-cached. QueryPlannerParams plannerParams; plannerParams.options = plannerOptions; fillOutPlannerParams(collection, rawCanonicalQuery, &plannerParams); // See if the cache has what we're looking for. Status cacheStatus = getRunnerFromCache(canonicalQuery.get(), collection, plannerParams, out); // This can be not-OK and we can carry on. It just means the query wasn't cached. if (cacheStatus.isOK()) { // We got a cached runner. canonicalQuery.release(); return cacheStatus; } if (internalQueryPlanOrChildrenIndependently && SubplanRunner::canUseSubplanRunner(*canonicalQuery)) { QLOG() << "Running query as sub-queries: " << canonicalQuery->toStringShort(); LOG(2) << "Running query as sub-queries: " << canonicalQuery->toStringShort(); SubplanRunner* runner; Status runnerStatus = SubplanRunner::make(collection, plannerParams, canonicalQuery.release(), &runner); if (!runnerStatus.isOK()) { return runnerStatus; } *out = runner; return Status::OK(); } return getRunnerAlwaysPlan(collection, canonicalQuery.release(), plannerParams, out); }
Status getExecutor(OperationContext* txn, Collection* collection, CanonicalQuery* rawCanonicalQuery, PlanExecutor** out, size_t plannerOptions) { invariant(rawCanonicalQuery); auto_ptr<CanonicalQuery> canonicalQuery(rawCanonicalQuery); // This can happen as we're called by internal clients as well. if (NULL == collection) { const string& ns = canonicalQuery->ns(); LOG(2) << "Collection " << ns << " does not exist." << " Using EOF runner: " << canonicalQuery->toStringShort(); EOFStage* eofStage = new EOFStage(); WorkingSet* ws = new WorkingSet(); *out = new PlanExecutor(ws, eofStage, canonicalQuery.release(), collection); return Status::OK(); } // Fill out the planning params. We use these for both cached solutions and non-cached. QueryPlannerParams plannerParams; plannerParams.options = plannerOptions; fillOutPlannerParams(collection, canonicalQuery.get(), &plannerParams); // If we have an _id index we can use the idhack runner. if (IDHackStage::supportsQuery(*canonicalQuery.get()) && collection->getIndexCatalog()->findIdIndex()) { return getExecutorIDHack(txn, collection, canonicalQuery.release(), plannerParams, out); } // Tailable: If the query requests tailable the collection must be capped. if (canonicalQuery->getParsed().hasOption(QueryOption_CursorTailable)) { if (!collection->isCapped()) { return Status(ErrorCodes::BadValue, "error processing query: " + canonicalQuery->toString() + " tailable cursor requested on non capped collection"); } // If a sort is specified it must be equal to expectedSort. const BSONObj expectedSort = BSON("$natural" << 1); const BSONObj& actualSort = canonicalQuery->getParsed().getSort(); if (!actualSort.isEmpty() && !(actualSort == expectedSort)) { return Status(ErrorCodes::BadValue, "error processing query: " + canonicalQuery->toString() + " invalid sort specified for tailable cursor: " + actualSort.toString()); } } // Try to look up a cached solution for the query. CachedSolution* rawCS; if (PlanCache::shouldCacheQuery(*canonicalQuery) && collection->infoCache()->getPlanCache()->get(*canonicalQuery.get(), &rawCS).isOK()) { // We have a CachedSolution. Have the planner turn it into a QuerySolution. boost::scoped_ptr<CachedSolution> cs(rawCS); QuerySolution *qs, *backupQs; QuerySolution*& chosenSolution=qs; // either qs or backupQs Status status = QueryPlanner::planFromCache(*canonicalQuery.get(), plannerParams, *cs, &qs, &backupQs); if (status.isOK()) { // the working set will be shared by the root and backupRoot plans // and owned by the containing single-solution-runner // WorkingSet* sharedWs = new WorkingSet(); PlanStage *root, *backupRoot=NULL; verify(StageBuilder::build(txn, collection, *qs, sharedWs, &root)); if ((plannerParams.options & QueryPlannerParams::PRIVATE_IS_COUNT) && turnIxscanIntoCount(qs)) { LOG(2) << "Using fast count: " << canonicalQuery->toStringShort() << ", planSummary: " << getPlanSummary(*qs); if (NULL != backupQs) { delete backupQs; } } else if (NULL != backupQs) { verify(StageBuilder::build(txn, collection, *backupQs, sharedWs, &backupRoot)); } // add a CachedPlanStage on top of the previous root root = new CachedPlanStage(collection, canonicalQuery.get(), root, backupRoot); *out = new PlanExecutor(sharedWs, root, chosenSolution, canonicalQuery.release(), collection); return Status::OK(); } } if (internalQueryPlanOrChildrenIndependently && SubplanStage::canUseSubplanning(*canonicalQuery)) { QLOG() << "Running query as sub-queries: " << canonicalQuery->toStringShort(); auto_ptr<WorkingSet> ws(new WorkingSet()); SubplanStage* subplan; Status subplanStatus = SubplanStage::make(txn, collection, ws.get(), plannerParams, canonicalQuery.get(), &subplan); if (subplanStatus.isOK()) { LOG(2) << "Running query as sub-queries: " << canonicalQuery->toStringShort(); *out = new PlanExecutor(ws.release(), subplan, canonicalQuery.release(), collection); return Status::OK(); } else { QLOG() << "Subplanner: " << subplanStatus.reason(); } } return getExecutorAlwaysPlan(txn, collection, canonicalQuery.release(), plannerParams, out); }