Exemplo n.º 1
0
    /**
     * For a given query, get a runner.  The runner could be a SingleSolutionRunner, a
     * CachedQueryRunner, or a MultiPlanRunner, depending on the cache/query solver/etc.
     */
    Status getRunner(Collection* collection,
                     CanonicalQuery* rawCanonicalQuery,
                     Runner** out,
                     size_t plannerOptions) {

        verify(rawCanonicalQuery);
        auto_ptr<CanonicalQuery> canonicalQuery(rawCanonicalQuery);

        // This can happen as we're called by internal clients as well.
        if (NULL == collection) {
            const string& ns = canonicalQuery->ns();
            *out = new EOFRunner(canonicalQuery.release(), ns);
            return Status::OK();
        }

        // If we have an _id index we can use the idhack runner.
        if (canUseIDHack(*canonicalQuery) && collection->getIndexCatalog()->findIdIndex()) {
            *out = new IDHackRunner(collection, canonicalQuery.release());
            return Status::OK();
        }

        // If it's not NULL, we may have indices.  Access the catalog and fill out IndexEntry(s)
        QueryPlannerParams plannerParams;

        IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(false);
        while (ii.more()) {
            const IndexDescriptor* desc = ii.next();
            plannerParams.indices.push_back(IndexEntry(desc->keyPattern(),
                                                       desc->isMultikey(),
                                                       desc->isSparse(),
                                                       desc->indexName(),
                                                       desc->infoObj()));
        }

        // If query supports index filters, filter params.indices by indices in query settings.
        QuerySettings* querySettings = collection->infoCache()->getQuerySettings();
        AllowedIndices* allowedIndicesRaw;

        // Filter index catalog if index filters are specified for query.
        // Also, signal to planner that application hint should be ignored.
        if (querySettings->getAllowedIndices(*canonicalQuery, &allowedIndicesRaw)) {
            boost::scoped_ptr<AllowedIndices> allowedIndices(allowedIndicesRaw);
            filterAllowedIndexEntries(*allowedIndices, &plannerParams.indices);
            plannerParams.indexFiltersApplied = true;
        }

        // Tailable: If the query requests tailable the collection must be capped.
        if (canonicalQuery->getParsed().hasOption(QueryOption_CursorTailable)) {
            if (!collection->isCapped()) {
                return Status(ErrorCodes::BadValue,
                              "error processing query: " + canonicalQuery->toString() +
                              " tailable cursor requested on non capped collection");
            }

            // If a sort is specified it must be equal to expectedSort.
            const BSONObj expectedSort = BSON("$natural" << 1);
            const BSONObj& actualSort = canonicalQuery->getParsed().getSort();
            if (!actualSort.isEmpty() && !(actualSort == expectedSort)) {
                return Status(ErrorCodes::BadValue,
                              "error processing query: " + canonicalQuery->toString() +
                              " invalid sort specified for tailable cursor: "
                              + actualSort.toString());
            }
        }

        // Process the planning options.
        plannerParams.options = plannerOptions;
        if (storageGlobalParams.noTableScan) {
            const string& ns = canonicalQuery->ns();
            // There are certain cases where we ignore this restriction:
            bool ignore = canonicalQuery->getQueryObj().isEmpty()
                          || (string::npos != ns.find(".system."))
                          || (0 == ns.find("local."));
            if (!ignore) {
                plannerParams.options |= QueryPlannerParams::NO_TABLE_SCAN;
            }
        }

        if (!(plannerParams.options & QueryPlannerParams::NO_TABLE_SCAN)) {
            plannerParams.options |= QueryPlannerParams::INCLUDE_COLLSCAN;
        }

        // If the caller wants a shard filter, make sure we're actually sharded.
        if (plannerParams.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
            CollectionMetadataPtr collMetadata =
                shardingState.getCollectionMetadata(canonicalQuery->ns());

            if (collMetadata) {
                plannerParams.shardKey = collMetadata->getKeyPattern();
            }
            else {
                // If there's no metadata don't bother w/the shard filter since we won't know what
                // the key pattern is anyway...
                plannerParams.options &= ~QueryPlannerParams::INCLUDE_SHARD_FILTER;
            }
        }

        // Try to look up a cached solution for the query.
        //
        // Skip cache look up for non-cacheable queries.
        // See PlanCache::shouldCacheQuery()
        //
        // TODO: Can the cache have negative data about a solution?
        CachedSolution* rawCS;
        if (PlanCache::shouldCacheQuery(*canonicalQuery) &&
            collection->infoCache()->getPlanCache()->get(*canonicalQuery, &rawCS).isOK()) {
            // We have a CachedSolution.  Have the planner turn it into a QuerySolution.
            boost::scoped_ptr<CachedSolution> cs(rawCS);
            QuerySolution *qs, *backupQs;
            Status status = QueryPlanner::planFromCache(*canonicalQuery, plannerParams, *cs,
                                                        &qs, &backupQs);

            // See SERVER-12438. Unfortunately we have to defer to the backup solution
            // if both a batch size is set and a sort is requested.
            //
            // TODO: it would be really nice to delete this block in the future.
            if (status.isOK() && NULL != backupQs &&
                0 < canonicalQuery->getParsed().getNumToReturn() &&
                !canonicalQuery->getParsed().getSort().isEmpty()) {
                delete qs;

                WorkingSet* ws;
                PlanStage* root;
                verify(StageBuilder::build(*backupQs, &root, &ws));

                // And, run the plan.
                *out = new SingleSolutionRunner(collection,
                                                canonicalQuery.release(),
                                                backupQs, root, ws);
                return Status::OK();
            }

            if (status.isOK()) {
                if (plannerParams.options & QueryPlannerParams::PRIVATE_IS_COUNT) {
                    if (turnIxscanIntoCount(qs)) {
                        WorkingSet* ws;
                        PlanStage* root;
                        verify(StageBuilder::build(*qs, &root, &ws));
                        *out = new SingleSolutionRunner(collection,
                                                        canonicalQuery.release(), qs, root, ws);
                        if (NULL != backupQs) {
                            delete backupQs;
                        }
                        return Status::OK();
                    }
                }

                WorkingSet* ws;
                PlanStage* root;
                verify(StageBuilder::build(*qs, &root, &ws));
                CachedPlanRunner* cpr = new CachedPlanRunner(collection,
                                                             canonicalQuery.release(), qs,
                                                             root, ws);

                if (NULL != backupQs) {
                    WorkingSet* backupWs;
                    PlanStage* backupRoot;
                    verify(StageBuilder::build(*backupQs, &backupRoot, &backupWs));
                    cpr->setBackupPlan(backupQs, backupRoot, backupWs);
                }

                *out = cpr;
                return Status::OK();
            }
        }

        if (enableIndexIntersection) {
            plannerParams.options |= QueryPlannerParams::INDEX_INTERSECTION;
        }

        plannerParams.options |= QueryPlannerParams::KEEP_MUTATIONS;

        vector<QuerySolution*> solutions;
        Status status = QueryPlanner::plan(*canonicalQuery, plannerParams, &solutions);
        if (!status.isOK()) {
            return Status(ErrorCodes::BadValue,
                          "error processing query: " + canonicalQuery->toString() +
                          " planner returned error: " + status.reason());
        }

        // We cannot figure out how to answer the query.  Perhaps it requires an index
        // we do not have?
        if (0 == solutions.size()) {
            return Status(ErrorCodes::BadValue,
                          str::stream()
                          << "error processing query: "
                          << canonicalQuery->toString()
                          << " No query solutions");
        }

        // See if one of our solutions is a fast count hack in disguise.
        if (plannerParams.options & QueryPlannerParams::PRIVATE_IS_COUNT) {
            for (size_t i = 0; i < solutions.size(); ++i) {
                if (turnIxscanIntoCount(solutions[i])) {
                    // Great, we can use solutions[i].  Clean up the other QuerySolution(s).
                    for (size_t j = 0; j < solutions.size(); ++j) {
                        if (j != i) {
                            delete solutions[j];
                        }
                    }

                    // We're not going to cache anything that's fast count.
                    WorkingSet* ws;
                    PlanStage* root;
                    verify(StageBuilder::build(*solutions[i], &root, &ws));
                    *out = new SingleSolutionRunner(collection,
                                                    canonicalQuery.release(),
                                                    solutions[i],
                                                    root,
                                                    ws);
                    return Status::OK();
                }
            }
        }

        if (1 == solutions.size()) {
            // Only one possible plan.  Run it.  Build the stages from the solution.
            WorkingSet* ws;
            PlanStage* root;
            verify(StageBuilder::build(*solutions[0], &root, &ws));

            // And, run the plan.
            *out = new SingleSolutionRunner(collection,
                                            canonicalQuery.release(),solutions[0], root, ws);
            return Status::OK();
        }
        else {
            // See SERVER-12438. In an ideal world we should not arbitrarily prefer certain
            // solutions over others. But unfortunately for historical reasons we are forced
            // to prefer a solution where the index provides the sort, if the batch size
            // is set and a sort is requested. Read SERVER-12438 for details, if you dare.
            //
            // TODO: it would be really nice to delete this entire block in the future.
            if (0 < canonicalQuery->getParsed().getNumToReturn()
                && !canonicalQuery->getParsed().getSort().isEmpty()) {
                // Look for a solution without a blocking sort stage.
                for (size_t i = 0; i < solutions.size(); ++i) {
                    if (!solutions[i]->hasSortStage) {
                        WorkingSet* ws;
                        PlanStage* root;
                        verify(StageBuilder::build(*solutions[i], &root, &ws));

                        // Free unused solutions.
                        for (size_t j = 0; j < solutions.size(); ++j) {
                            if (j != i) {
                                delete solutions[j];
                            }
                        }

                        // And, run the plan.
                        *out = new SingleSolutionRunner(collection,
                                                       canonicalQuery.release(),
                                                       solutions[i], root, ws);
                        return Status::OK();
                    }
                }
            }

            // Many solutions.  Let the MultiPlanRunner pick the best, update the cache, and so on.
            auto_ptr<MultiPlanRunner> mpr(new MultiPlanRunner(collection,canonicalQuery.release()));

            for (size_t i = 0; i < solutions.size(); ++i) {
                WorkingSet* ws;
                PlanStage* root;
                if (solutions[i]->cacheData.get()) {
                    solutions[i]->cacheData->indexFilterApplied = plannerParams.indexFiltersApplied;
                }
                verify(StageBuilder::build(*solutions[i], &root, &ws));
                // Takes ownership of all arguments.
                mpr->addPlan(solutions[i], root, ws);
            }
            *out = mpr.release();
            return Status::OK();
        }
    }
Exemplo n.º 2
0
    Status getRunnerAlwaysPlan(Collection* collection,
                               CanonicalQuery* rawCanonicalQuery,
                               const QueryPlannerParams& plannerParams,
                               Runner** out) {

        invariant(collection);
        invariant(rawCanonicalQuery);
        auto_ptr<CanonicalQuery> canonicalQuery(rawCanonicalQuery);

        vector<QuerySolution*> solutions;
        Status status = QueryPlanner::plan(*canonicalQuery, plannerParams, &solutions);
        if (!status.isOK()) {
            return Status(ErrorCodes::BadValue,
                          "error processing query: " + canonicalQuery->toString() +
                          " planner returned error: " + status.reason());
        }

        // We cannot figure out how to answer the query.  Perhaps it requires an index
        // we do not have?
        if (0 == solutions.size()) {
            return Status(ErrorCodes::BadValue,
                          str::stream()
                          << "error processing query: "
                          << canonicalQuery->toString()
                          << " No query solutions");
        }

        // See if one of our solutions is a fast count hack in disguise.
        if (plannerParams.options & QueryPlannerParams::PRIVATE_IS_COUNT) {
            for (size_t i = 0; i < solutions.size(); ++i) {
                if (turnIxscanIntoCount(solutions[i])) {
                    // Great, we can use solutions[i].  Clean up the other QuerySolution(s).
                    for (size_t j = 0; j < solutions.size(); ++j) {
                        if (j != i) {
                            delete solutions[j];
                        }
                    }

                    LOG(2) << "Using fast count: " << canonicalQuery->toStringShort()
                           << ", planSummary: " << getPlanSummary(*solutions[i]);

                    // We're not going to cache anything that's fast count.
                    WorkingSet* ws;
                    PlanStage* root;
                    verify(StageBuilder::build(collection, *solutions[i], &root, &ws));
                    *out = new SingleSolutionRunner(collection,
                                                    canonicalQuery.release(),
                                                    solutions[i],
                                                    root,
                                                    ws);
                    return Status::OK();
                }
            }
        }

        if (1 == solutions.size()) {
            LOG(2) << "Only one plan is available; it will be run but will not be cached. "
                   << canonicalQuery->toStringShort()
                   << ", planSummary: " << getPlanSummary(*solutions[0]);

            // Only one possible plan.  Run it.  Build the stages from the solution.
            WorkingSet* ws;
            PlanStage* root;
            verify(StageBuilder::build(collection, *solutions[0], &root, &ws));

            // And, run the plan.
            *out = new SingleSolutionRunner(collection,
                                            canonicalQuery.release(),
                                            solutions[0],
                                            root,
                                            ws);
            return Status::OK();
        }
        else {
            // Many solutions.  Let the MultiPlanRunner pick the best, update the cache, and so on.
            auto_ptr<MultiPlanRunner> mpr(new MultiPlanRunner(collection,canonicalQuery.release()));

            for (size_t i = 0; i < solutions.size(); ++i) {
                WorkingSet* ws;
                PlanStage* root;
                if (solutions[i]->cacheData.get()) {
                    solutions[i]->cacheData->indexFilterApplied = plannerParams.indexFiltersApplied;
                }
                verify(StageBuilder::build(collection, *solutions[i], &root, &ws));
                // Takes ownership of all arguments.
                mpr->addPlan(solutions[i], root, ws);
            }
            *out = mpr.release();
            return Status::OK();
        }
    }
Exemplo n.º 3
0
    Status getExecutorAlwaysPlan(OperationContext* txn,
                                 Collection* collection,
                                 CanonicalQuery* rawCanonicalQuery,
                                 const QueryPlannerParams& plannerParams,
                                 PlanExecutor** execOut) {
        invariant(collection);
        invariant(rawCanonicalQuery);
        auto_ptr<CanonicalQuery> canonicalQuery(rawCanonicalQuery);

        *execOut = NULL;

        vector<QuerySolution*> solutions;
        Status status = QueryPlanner::plan(*canonicalQuery.get(), plannerParams, &solutions);
        if (!status.isOK()) {
            return Status(ErrorCodes::BadValue,
                          "error processing query: " + canonicalQuery->toString() +
                          " planner returned error: " + status.reason());
        }

        // We cannot figure out how to answer the query.  Perhaps it requires an index
        // we do not have?
        if (0 == solutions.size()) {
            return Status(ErrorCodes::BadValue,
                          str::stream()
                          << "error processing query: "
                          << canonicalQuery->toString()
                          << " No query solutions");
        }

        // See if one of our solutions is a fast count hack in disguise.
        if (plannerParams.options & QueryPlannerParams::PRIVATE_IS_COUNT) {
            for (size_t i = 0; i < solutions.size(); ++i) {
                if (turnIxscanIntoCount(solutions[i])) {
                    // Great, we can use solutions[i].  Clean up the other QuerySolution(s).
                    for (size_t j = 0; j < solutions.size(); ++j) {
                        if (j != i) {
                            delete solutions[j];
                        }
                    }

                    LOG(2) << "Using fast count: " << canonicalQuery->toStringShort()
                           << ", planSummary: " << getPlanSummary(*solutions[i]);

                    // We're not going to cache anything that's fast count.
                    WorkingSet* ws = new WorkingSet();
                    PlanStage* root;

                    verify(StageBuilder::build(txn, collection, *solutions[i], ws, &root));

                    *execOut = new PlanExecutor(ws, root, solutions[i], canonicalQuery.release(),
                                                collection);
                    return Status::OK();
                }
            }
        }

        if (1 == solutions.size()) {
            LOG(2) << "Only one plan is available; it will be run but will not be cached. "
                   << canonicalQuery->toStringShort()
                   << ", planSummary: " << getPlanSummary(*solutions[0]);

            // Only one possible plan.  Run it.  Build the stages from the solution.
            WorkingSet* ws = new WorkingSet();
            PlanStage* root;

            verify(StageBuilder::build(txn, collection, *solutions[0], ws, &root));

            *execOut = new PlanExecutor(ws, root, solutions[0], canonicalQuery.release(),
                                        collection);
            return Status::OK();
        }
        else {
            // Many solutions.  Create a MultiPlanStage to pick the best, update the cache, and so on.

            // The working set will be shared by all candidate plans and owned by the containing runner
            WorkingSet* sharedWorkingSet = new WorkingSet();

            MultiPlanStage* multiPlanStage = new MultiPlanStage(collection, canonicalQuery.get());

            for (size_t ix = 0; ix < solutions.size(); ++ix) {
                if (solutions[ix]->cacheData.get()) {
                    solutions[ix]->cacheData->indexFilterApplied = plannerParams.indexFiltersApplied;
                }

                // version of StageBuild::build when WorkingSet is shared
                PlanStage* nextPlanRoot;
                verify(StageBuilder::build(txn, collection, *solutions[ix],
                                           sharedWorkingSet, &nextPlanRoot));

                // Owns none of the arguments
                multiPlanStage->addPlan(solutions[ix], nextPlanRoot, sharedWorkingSet);
            }

            // Do the plan selection up front.
            multiPlanStage->pickBestPlan();

            PlanExecutor* exec = new PlanExecutor(sharedWorkingSet, multiPlanStage,
                                                  canonicalQuery.release(), collection);

            *execOut = exec;
            return Status::OK();
        }
    }
Exemplo n.º 4
0
    Status getRunnerFromCache(CanonicalQuery* canonicalQuery,
                              Collection* collection,
                              const QueryPlannerParams& plannerParams,
                              Runner** out) {
        // Skip cache look up for non-cacheable queries.
        if (!PlanCache::shouldCacheQuery(*canonicalQuery)) {
            return Status(ErrorCodes::BadValue, "query is not cacheable");
        }

        CachedSolution* rawCS;
        Status cacheLookupStatus = collection->infoCache()->getPlanCache()->get(*canonicalQuery,
                                                                                &rawCS);
        if (!cacheLookupStatus.isOK()) {
            return cacheLookupStatus;
        }

        // We have a CachedSolution.  Have the planner turn it into a QuerySolution.
        boost::scoped_ptr<CachedSolution> cs(rawCS);
        QuerySolution *qs, *backupQs;
        Status status = QueryPlanner::planFromCache(*canonicalQuery,
                                                    plannerParams,
                                                    *cs,
                                                    &qs,
                                                    &backupQs);
        if (!status.isOK()) {
            return status;
        }

        // If our cached solution is a hit for a count query, try to turn it into a fast count
        // thing.
        if (plannerParams.options & QueryPlannerParams::PRIVATE_IS_COUNT) {
            if (turnIxscanIntoCount(qs)) {
                LOG(2) << "Using fast count: " << canonicalQuery->toStringShort()
                       << ", planSummary: " << getPlanSummary(*qs);

                WorkingSet* ws;
                PlanStage* root;
                verify(StageBuilder::build(collection, *qs, &root, &ws));
                *out = new SingleSolutionRunner(collection,
                                                canonicalQuery, qs, root, ws);
                if (NULL != backupQs) {
                    delete backupQs;
                }
                return Status::OK();
            }
        }

        // If we're here, we're going to used the cached plan and things are normal.
        LOG(2) << "Using cached query plan: " << canonicalQuery->toStringShort()
               << ", planSummary: " << getPlanSummary(*qs);

        WorkingSet* ws;
        PlanStage* root;
        verify(StageBuilder::build(collection, *qs, &root, &ws));
        CachedPlanRunner* cpr = new CachedPlanRunner(collection,
                                                     canonicalQuery,
                                                     qs,
                                                     root,
                                                     ws);

        // If there's a backup solution, let the CachedPlanRunner know about it.
        if (NULL != backupQs) {
            WorkingSet* backupWs;
            PlanStage* backupRoot;
            verify(StageBuilder::build(collection, *backupQs, &backupRoot, &backupWs));
            cpr->setBackupPlan(backupQs, backupRoot, backupWs);
        }

        *out = cpr;
        return Status::OK();
    }
Exemplo n.º 5
0
    Status getExecutor(OperationContext* txn,
                      Collection* collection,
                      CanonicalQuery* rawCanonicalQuery,
                      PlanExecutor** out,
                      size_t plannerOptions) {
        invariant(rawCanonicalQuery);
        auto_ptr<CanonicalQuery> canonicalQuery(rawCanonicalQuery);

        // This can happen as we're called by internal clients as well.
        if (NULL == collection) {
            const string& ns = canonicalQuery->ns();
            LOG(2) << "Collection " << ns << " does not exist."
                   << " Using EOF runner: " << canonicalQuery->toStringShort();
            EOFStage* eofStage = new EOFStage();
            WorkingSet* ws = new WorkingSet();
            *out = new PlanExecutor(ws, eofStage, canonicalQuery.release(), collection);
            return Status::OK();
        }

        // Fill out the planning params.  We use these for both cached solutions and non-cached.
        QueryPlannerParams plannerParams;
        plannerParams.options = plannerOptions;
        fillOutPlannerParams(collection, canonicalQuery.get(), &plannerParams);

        // If we have an _id index we can use the idhack runner.
        if (IDHackStage::supportsQuery(*canonicalQuery.get()) &&
            collection->getIndexCatalog()->findIdIndex()) {
            return getExecutorIDHack(txn, collection, canonicalQuery.release(), plannerParams, out);
        }

        // Tailable: If the query requests tailable the collection must be capped.
        if (canonicalQuery->getParsed().hasOption(QueryOption_CursorTailable)) {
            if (!collection->isCapped()) {
                return Status(ErrorCodes::BadValue,
                              "error processing query: " + canonicalQuery->toString() +
                              " tailable cursor requested on non capped collection");
            }

            // If a sort is specified it must be equal to expectedSort.
            const BSONObj expectedSort = BSON("$natural" << 1);
            const BSONObj& actualSort = canonicalQuery->getParsed().getSort();
            if (!actualSort.isEmpty() && !(actualSort == expectedSort)) {
                return Status(ErrorCodes::BadValue,
                              "error processing query: " + canonicalQuery->toString() +
                              " invalid sort specified for tailable cursor: "
                              + actualSort.toString());
            }
        }

        // Try to look up a cached solution for the query.

        CachedSolution* rawCS;
        if (PlanCache::shouldCacheQuery(*canonicalQuery) &&
            collection->infoCache()->getPlanCache()->get(*canonicalQuery.get(), &rawCS).isOK()) {
            // We have a CachedSolution.  Have the planner turn it into a QuerySolution.
            boost::scoped_ptr<CachedSolution> cs(rawCS);
            QuerySolution *qs, *backupQs;
            QuerySolution*& chosenSolution=qs; // either qs or backupQs
            Status status = QueryPlanner::planFromCache(*canonicalQuery.get(), plannerParams, *cs,
                                                        &qs, &backupQs);

            if (status.isOK()) {
                // the working set will be shared by the root and backupRoot plans
                // and owned by the containing single-solution-runner
                //
                WorkingSet* sharedWs = new WorkingSet();

                PlanStage *root, *backupRoot=NULL;
                verify(StageBuilder::build(txn, collection, *qs, sharedWs, &root));
                if ((plannerParams.options & QueryPlannerParams::PRIVATE_IS_COUNT)
                    && turnIxscanIntoCount(qs)) {
                    LOG(2) << "Using fast count: " << canonicalQuery->toStringShort()
                           << ", planSummary: " << getPlanSummary(*qs);

                    if (NULL != backupQs) {
                        delete backupQs;
                    }
                }
                else if (NULL != backupQs) {
                    verify(StageBuilder::build(txn, collection, *backupQs, sharedWs, &backupRoot));
                }

                // add a CachedPlanStage on top of the previous root
                root = new CachedPlanStage(collection, canonicalQuery.get(), root, backupRoot);

                *out = new PlanExecutor(sharedWs, root, chosenSolution, canonicalQuery.release(),
                                        collection);
                return Status::OK();
            }
        }

        if (internalQueryPlanOrChildrenIndependently
            && SubplanStage::canUseSubplanning(*canonicalQuery)) {

            QLOG() << "Running query as sub-queries: " << canonicalQuery->toStringShort();

            auto_ptr<WorkingSet> ws(new WorkingSet());

            SubplanStage* subplan;
            Status subplanStatus = SubplanStage::make(txn, collection, ws.get(), plannerParams,
                                                      canonicalQuery.get(), &subplan);
            if (subplanStatus.isOK()) {
                LOG(2) << "Running query as sub-queries: " << canonicalQuery->toStringShort();
                *out = new PlanExecutor(ws.release(), subplan, canonicalQuery.release(),
                                        collection);
                return Status::OK();
            }
            else {
                QLOG() << "Subplanner: " << subplanStatus.reason();
            }
        }

        return getExecutorAlwaysPlan(txn, collection, canonicalQuery.release(), plannerParams, out);
    }