static Status _extractFullEqualityMatches(const MatchExpression& root, const FieldRefSet* fullPathsToExtract, EqualityMatches* equalities) { if (root.matchType() == MatchExpression::EQ) { // Extract equality matches const EqualityMatchExpression& eqChild = static_cast<const EqualityMatchExpression&>(root); FieldRef path(eqChild.path()); if (fullPathsToExtract) { FieldRefSet conflictPaths; fullPathsToExtract->findConflicts(&path, &conflictPaths); // Ignore if this path is unrelated to the full paths if (conflictPaths.empty()) return Status::OK(); // Make sure we're a prefix of all the conflict paths Status status = checkPathIsPrefixOf(path, conflictPaths); if (!status.isOK()) return status; } Status status = checkEqualityConflicts(*equalities, path); if (!status.isOK()) return status; equalities->insert(make_pair(eqChild.path(), &eqChild)); } else if (root.matchType() == MatchExpression::AND) { // Further explore $and matches for (size_t i = 0; i < root.numChildren(); ++i) { MatchExpression* child = root.getChild(i); Status status = _extractFullEqualityMatches(*child, fullPathsToExtract, equalities); if (!status.isOK()) return status; } } return Status::OK(); }
// static MatchExpression* CanonicalQuery::normalizeTree(MatchExpression* root) { // root->isLogical() is true now. We care about AND, OR, and NOT. NOR currently scares us. if (MatchExpression::AND == root->matchType() || MatchExpression::OR == root->matchType()) { // We could have AND of AND of AND. Make sure we clean up our children before merging // them. // UNITTEST 11738048 for (size_t i = 0; i < root->getChildVector()->size(); ++i) { (*root->getChildVector())[i] = normalizeTree(root->getChild(i)); } // If any of our children are of the same logical operator that we are, we remove the // child's children and append them to ourselves after we examine all children. std::vector<MatchExpression*> absorbedChildren; for (size_t i = 0; i < root->numChildren();) { MatchExpression* child = root->getChild(i); if (child->matchType() == root->matchType()) { // AND of an AND or OR of an OR. Absorb child's children into ourself. for (size_t j = 0; j < child->numChildren(); ++j) { absorbedChildren.push_back(child->getChild(j)); } // TODO(opt): this is possibly n^2-ish root->getChildVector()->erase(root->getChildVector()->begin() + i); child->getChildVector()->clear(); // Note that this only works because we cleared the child's children delete child; // Don't increment 'i' as the current child 'i' used to be child 'i+1' } else { ++i; } } root->getChildVector()->insert( root->getChildVector()->end(), absorbedChildren.begin(), absorbedChildren.end()); // AND of 1 thing is the thing, OR of 1 thing is the thing. if (1 == root->numChildren()) { MatchExpression* ret = root->getChild(0); root->getChildVector()->clear(); delete root; return ret; } } else if (MatchExpression::NOT == root->matchType()) { // Normalize the rest of the tree hanging off this NOT node. NotMatchExpression* nme = static_cast<NotMatchExpression*>(root); MatchExpression* child = nme->releaseChild(); // normalizeTree(...) takes ownership of 'child', and then // transfers ownership of its return value to 'nme'. nme->resetChild(normalizeTree(child)); } else if (MatchExpression::ELEM_MATCH_VALUE == root->matchType()) { // Just normalize our children. for (size_t i = 0; i < root->getChildVector()->size(); ++i) { (*root->getChildVector())[i] = normalizeTree(root->getChild(i)); } } return root; }
TEST( MatchExpressionParserText, Parse1 ) { BSONObj query = fromjson( "{$text:{$search:\"awesome\", $language:\"english\"}}" ); StatusWithMatchExpression result = MatchExpressionParser::parse( query ); ASSERT_TRUE( result.isOK() ); MatchExpression* exp = result.getValue(); ASSERT_EQUALS( MatchExpression::TEXT, exp->matchType() ); TextMatchExpression* textExp = static_cast<TextMatchExpression*>( exp ); ASSERT_EQUALS( textExp->getQuery(), "awesome" ); ASSERT_EQUALS( textExp->getLanguage(), "english" ); }
TEST( MatchExpressionParserGeoNear, ParseNear ) { BSONObj query = fromjson("{loc:{$near:{$maxDistance:100, " "$geometry:{type:\"Point\", coordinates:[0,0]}}}}"); StatusWithMatchExpression result = MatchExpressionParser::parse( query ); ASSERT_TRUE( result.isOK() ); MatchExpression* exp = result.getValue(); ASSERT_EQUALS(MatchExpression::GEO_NEAR, exp->matchType()); GeoNearMatchExpression* gnexp = static_cast<GeoNearMatchExpression*>(exp); ASSERT_EQUALS(gnexp->getData().maxDistance, 100); }
// For $near, $nearSphere, and $geoNear syntax of: // { // $near/$nearSphere/$geoNear: [ <x>, <y> ], // $minDistance: <distance in radians>, // $maxDistance: <distance in radians> // } TEST(MatchExpressionParserGeoNear, ParseValidNear) { BSONObj query = fromjson("{loc: {$near: [0,0], $maxDistance: 100, $minDistance: 50}}"); StatusWithMatchExpression result = MatchExpressionParser::parse(query); ASSERT_TRUE(result.isOK()); MatchExpression* exp = result.getValue().get(); ASSERT_EQ(MatchExpression::GEO_NEAR, exp->matchType()); GeoNearMatchExpression* gnexp = static_cast<GeoNearMatchExpression*>(exp); ASSERT_EQ(gnexp->getData().maxDistance, 100); ASSERT_EQ(gnexp->getData().minDistance, 50); }
// For $near, $nearSphere, and $geoNear syntax of: // { // $near/$nearSphere/$geoNear: [ <x>, <y> ], // $minDistance: <distance in radians>, // $maxDistance: <distance in radians> // } TEST(MatchExpressionParserGeoNear, ParseValidNear) { BSONObj query = fromjson("{loc: {$near: [0,0], $maxDistance: 100, $minDistance: 50}}"); const CollatorInterface* collator = nullptr; StatusWithMatchExpression result = MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator); ASSERT_TRUE(result.isOK()); MatchExpression* exp = result.getValue().get(); ASSERT_EQ(MatchExpression::GEO_NEAR, exp->matchType()); GeoNearMatchExpression* gnexp = static_cast<GeoNearMatchExpression*>(exp); ASSERT_EQ(gnexp->getData().maxDistance, 100); ASSERT_EQ(gnexp->getData().minDistance, 50); }
// static MatchExpression* CanonicalQuery::normalizeTree(MatchExpression* root) { // root->isLogical() is true now. We care about AND and OR. Negations currently scare us. if (MatchExpression::AND == root->matchType() || MatchExpression::OR == root->matchType()) { // We could have AND of AND of AND. Make sure we clean up our children before merging // them. // UNITTEST 11738048 for (size_t i = 0; i < root->getChildVector()->size(); ++i) { (*root->getChildVector())[i] = normalizeTree(root->getChild(i)); } // If any of our children are of the same logical operator that we are, we remove the // child's children and append them to ourselves after we examine all children. vector<MatchExpression*> absorbedChildren; for (size_t i = 0; i < root->numChildren();) { MatchExpression* child = root->getChild(i); if (child->matchType() == root->matchType()) { // AND of an AND or OR of an OR. Absorb child's children into ourself. for (size_t j = 0; j < child->numChildren(); ++j) { absorbedChildren.push_back(child->getChild(j)); } // TODO(opt): this is possibly n^2-ish root->getChildVector()->erase(root->getChildVector()->begin() + i); child->getChildVector()->clear(); // Note that this only works because we cleared the child's children delete child; // Don't increment 'i' as the current child 'i' used to be child 'i+1' } else { ++i; } } root->getChildVector()->insert(root->getChildVector()->end(), absorbedChildren.begin(), absorbedChildren.end()); // AND of 1 thing is the thing, OR of 1 thing is the thing. if (1 == root->numChildren()) { MatchExpression* ret = root->getChild(0); root->getChildVector()->clear(); delete root; return ret; } } return root; }
TEST(MatchExpressionParserGeoNear, ParseNear) { BSONObj query = fromjson( "{loc:{$near:{$maxDistance:100, " "$geometry:{type:\"Point\", coordinates:[0,0]}}}}"); const CollatorInterface* collator = nullptr; StatusWithMatchExpression result = MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator); ASSERT_TRUE(result.isOK()); MatchExpression* exp = result.getValue().get(); ASSERT_EQUALS(MatchExpression::GEO_NEAR, exp->matchType()); GeoNearMatchExpression* gnexp = static_cast<GeoNearMatchExpression*>(exp); ASSERT_EQUALS(gnexp->getData().maxDistance, 100); }
// static void QueryPlannerIXSelect::stripUnneededAssignments(MatchExpression* node, const std::vector<IndexEntry>& indices) { if (MatchExpression::AND == node->matchType()) { for (size_t i = 0; i < node->numChildren(); i++) { MatchExpression* child = node->getChild(i); if (MatchExpression::EQ != child->matchType()) { continue; } if (!child->getTag()) { continue; } // We found a EQ child of an AND which is tagged. RelevantTag* rt = static_cast<RelevantTag*>(child->getTag()); // Look through all of the indices for which this predicate can be answered with // the leading field of the index. for (std::vector<size_t>::const_iterator i = rt->first.begin(); i != rt->first.end(); ++i) { size_t index = *i; if (indices[index].unique && 1 == indices[index].keyPattern.nFields()) { // Found an EQ predicate which can use a single-field unique index. // Clear assignments from the entire tree, and add back a single assignment // for 'child' to the unique index. clearAssignments(node); RelevantTag* newRt = static_cast<RelevantTag*>(child->getTag()); newRt->first.push_back(index); // Tag state has been reset in the entire subtree at 'root'; nothing // else for us to do. return; } } } } for (size_t i = 0; i < node->numChildren(); i++) { stripUnneededAssignments(node->getChild(i), indices); } }
TEST(MatchExpressionParserGeoNear, ParseValidNearSphere) { BSONObj query = fromjson("{loc: {$nearSphere: [0,0], $maxDistance: 100, $minDistance: 50}}"); const CollatorInterface* collator = nullptr; const boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest()); StatusWithMatchExpression result = MatchExpressionParser::parse(query, collator, expCtx, ExtensionsCallbackNoop(), MatchExpressionParser::kAllowAllSpecialFeatures); ASSERT_TRUE(result.isOK()); MatchExpression* exp = result.getValue().get(); ASSERT_EQ(MatchExpression::GEO_NEAR, exp->matchType()); GeoNearMatchExpression* gnexp = static_cast<GeoNearMatchExpression*>(exp); ASSERT_EQ(gnexp->getData().maxDistance, 100); ASSERT_EQ(gnexp->getData().minDistance, 50); }
/** * Traverse the subtree rooted at 'node' to remove invalid RelevantTag assignments to text index * 'idx', which has prefix paths 'prefixPaths'. */ static void stripInvalidAssignmentsToTextIndex(MatchExpression* node, size_t idx, const unordered_set<StringData, StringData::Hasher>& prefixPaths) { // If we're here, there are prefixPaths and node is either: // 1. a text pred which we can't use as we have nothing over its prefix, or // 2. a non-text pred which we can't use as we don't have a text pred AND-related. if (Indexability::nodeCanUseIndexOnOwnField(node)) { removeIndexRelevantTag(node, idx); return; } // Do not traverse tree beyond negation node. if (node->matchType() == MatchExpression::NOT || node->matchType() == MatchExpression::NOR) { return; } // For anything to use a text index with prefixes, we require that: // 1. The text pred exists in an AND, // 2. The non-text preds that use the text index's prefixes are also in that AND. if (node->matchType() != MatchExpression::AND) { // It's an OR or some kind of array operator. for (size_t i = 0; i < node->numChildren(); ++i) { stripInvalidAssignmentsToTextIndex(node->getChild(i), idx, prefixPaths); } return; } // If we're here, we're an AND. Determine whether the children satisfy the index prefix for // the text index. invariant(node->matchType() == MatchExpression::AND); bool hasText = false; // The AND must have an EQ predicate for each prefix path. When we encounter a child with a // tag we remove it from childrenPrefixPaths. All children exist if this set is empty at // the end. unordered_set<StringData, StringData::Hasher> childrenPrefixPaths = prefixPaths; for (size_t i = 0; i < node->numChildren(); ++i) { MatchExpression* child = node->getChild(i); RelevantTag* tag = static_cast<RelevantTag*>(child->getTag()); if (NULL == tag) { // 'child' could be a logical operator. Maybe there are some assignments hiding // inside. stripInvalidAssignmentsToTextIndex(child, idx, prefixPaths); continue; } bool inFirst = tag->first.end() != std::find(tag->first.begin(), tag->first.end(), idx); bool inNotFirst = tag->notFirst.end() != std::find(tag->notFirst.begin(), tag->notFirst.end(), idx); if (inFirst || inNotFirst) { // Great! 'child' was assigned to our index. if (child->matchType() == MatchExpression::TEXT) { hasText = true; } else { childrenPrefixPaths.erase(child->path()); // One fewer prefix we're looking for, possibly. Note that we could have a // suffix assignment on the index and wind up here. In this case the erase // above won't do anything since a suffix isn't a prefix. } } else { // Recurse on the children to ensure that they're not hiding any assignments // to idx. stripInvalidAssignmentsToTextIndex(child, idx, prefixPaths); } } // Our prereqs for using the text index were not satisfied so we remove the assignments from // all children of the AND. if (!hasText || !childrenPrefixPaths.empty()) { for (size_t i = 0; i < node->numChildren(); ++i) { stripInvalidAssignmentsToTextIndex(node->getChild(i), idx, prefixPaths); } } }
static void stripInvalidAssignmentsTo2dsphereIndex(MatchExpression* node, size_t idx) { if (Indexability::nodeCanUseIndexOnOwnField(node)) { removeIndexRelevantTag(node, idx); return; } const MatchExpression::MatchType nodeType = node->matchType(); // Don't bother peeking inside of negations. if (MatchExpression::NOT == nodeType || MatchExpression::NOR == nodeType) { return; } if (MatchExpression::AND != nodeType) { // It's an OR or some kind of array operator. for (size_t i = 0; i < node->numChildren(); ++i) { stripInvalidAssignmentsTo2dsphereIndex(node->getChild(i), idx); } return; } bool hasGeoField = false; for (size_t i = 0; i < node->numChildren(); ++i) { MatchExpression* child = node->getChild(i); RelevantTag* tag = static_cast<RelevantTag*>(child->getTag()); if (NULL == tag) { // 'child' could be a logical operator. Maybe there are some assignments hiding // inside. stripInvalidAssignmentsTo2dsphereIndex(child, idx); continue; } bool inFirst = tag->first.end() != std::find(tag->first.begin(), tag->first.end(), idx); bool inNotFirst = tag->notFirst.end() != std::find(tag->notFirst.begin(), tag->notFirst.end(), idx); // If there is an index assignment... if (inFirst || inNotFirst) { // And it's a geo predicate... if (MatchExpression::GEO == child->matchType() || MatchExpression::GEO_NEAR == child->matchType()) { hasGeoField = true; } } else { // Recurse on the children to ensure that they're not hiding any assignments // to idx. stripInvalidAssignmentsTo2dsphereIndex(child, idx); } } // If there isn't a geo predicate our results aren't a subset of what's in the geo index, so // if we use the index we'll miss results. if (!hasGeoField) { for (size_t i = 0; i < node->numChildren(); ++i) { stripInvalidAssignmentsTo2dsphereIndex(node->getChild(i), idx); } } }
// static MatchExpression* CanonicalQuery::normalizeTree(MatchExpression* root) { if (MatchExpression::AND == root->matchType() || MatchExpression::OR == root->matchType()) { // We could have AND of AND of AND. Make sure we clean up our children before merging them. for (size_t i = 0; i < root->getChildVector()->size(); ++i) { (*root->getChildVector())[i] = normalizeTree(root->getChild(i)); } // If any of our children are of the same logical operator that we are, we remove the // child's children and append them to ourselves after we examine all children. std::vector<MatchExpression*> absorbedChildren; for (size_t i = 0; i < root->numChildren();) { MatchExpression* child = root->getChild(i); if (child->matchType() == root->matchType()) { // AND of an AND or OR of an OR. Absorb child's children into ourself. for (size_t j = 0; j < child->numChildren(); ++j) { absorbedChildren.push_back(child->getChild(j)); } // TODO(opt): this is possibly n^2-ish root->getChildVector()->erase(root->getChildVector()->begin() + i); child->getChildVector()->clear(); // Note that this only works because we cleared the child's children delete child; // Don't increment 'i' as the current child 'i' used to be child 'i+1' } else { ++i; } } root->getChildVector()->insert( root->getChildVector()->end(), absorbedChildren.begin(), absorbedChildren.end()); // AND of 1 thing is the thing, OR of 1 thing is the thing. if (1 == root->numChildren()) { MatchExpression* ret = root->getChild(0); root->getChildVector()->clear(); delete root; return ret; } } else if (MatchExpression::NOR == root->matchType()) { // First clean up children. for (size_t i = 0; i < root->getChildVector()->size(); ++i) { (*root->getChildVector())[i] = normalizeTree(root->getChild(i)); } // NOR of one thing is NOT of the thing. if (1 == root->numChildren()) { // Detach the child and assume ownership. std::unique_ptr<MatchExpression> child(root->getChild(0)); root->getChildVector()->clear(); // Delete the root when this goes out of scope. std::unique_ptr<NorMatchExpression> ownedRoot(static_cast<NorMatchExpression*>(root)); // Make a NOT to be the new root and transfer ownership of the child to it. auto newRoot = stdx::make_unique<NotMatchExpression>(); newRoot->init(child.release()).transitional_ignore(); return newRoot.release(); } } else if (MatchExpression::NOT == root->matchType()) { // Normalize the rest of the tree hanging off this NOT node. NotMatchExpression* nme = static_cast<NotMatchExpression*>(root); MatchExpression* child = nme->releaseChild(); // normalizeTree(...) takes ownership of 'child', and then // transfers ownership of its return value to 'nme'. nme->resetChild(normalizeTree(child)); } else if (MatchExpression::ELEM_MATCH_OBJECT == root->matchType()) { // Normalize the rest of the tree hanging off this ELEM_MATCH_OBJECT node. ElemMatchObjectMatchExpression* emome = static_cast<ElemMatchObjectMatchExpression*>(root); auto child = emome->releaseChild(); // normalizeTree(...) takes ownership of 'child', and then // transfers ownership of its return value to 'emome'. emome->resetChild(std::unique_ptr<MatchExpression>(normalizeTree(child.release()))); } else if (MatchExpression::ELEM_MATCH_VALUE == root->matchType()) { // Just normalize our children. for (size_t i = 0; i < root->getChildVector()->size(); ++i) { (*root->getChildVector())[i] = normalizeTree(root->getChild(i)); } } else if (MatchExpression::MATCH_IN == root->matchType()) { std::unique_ptr<InMatchExpression> in(static_cast<InMatchExpression*>(root)); // IN of 1 regex is the regex. if (in->getRegexes().size() == 1 && in->getEqualities().empty()) { RegexMatchExpression* childRe = in->getRegexes().begin()->get(); invariant(!childRe->getTag()); // Create a new RegexMatchExpression, because 'childRe' does not have a path. auto re = stdx::make_unique<RegexMatchExpression>(); re->init(in->path(), childRe->getString(), childRe->getFlags()).transitional_ignore(); if (in->getTag()) { re->setTag(in->getTag()->clone()); } return normalizeTree(re.release()); } // IN of 1 equality is the equality. if (in->getEqualities().size() == 1 && in->getRegexes().empty()) { auto eq = stdx::make_unique<EqualityMatchExpression>(); eq->init(in->path(), *(in->getEqualities().begin())).transitional_ignore(); eq->setCollator(in->getCollator()); if (in->getTag()) { eq->setTag(in->getTag()->clone()); } return eq.release(); } return in.release(); } return root; }
// static void QueryPlanner::plan(const CanonicalQuery& query, const QueryPlannerParams& params, vector<QuerySolution*>* out) { QLOG() << "=============================\n" << "Beginning planning, options = " << optionString(params.options) << endl << "Canonical query:\n" << query.toString() << endl << "=============================" << endl; // The shortcut formerly known as IDHACK. See if it's a simple _id query. If so we might // just make an ixscan over the _id index and bypass the rest of planning entirely. if (!query.getParsed().isExplain() && !query.getParsed().showDiskLoc() && isSimpleIdQuery(query.getParsed().getFilter()) && !query.getParsed().hasOption(QueryOption_CursorTailable)) { // See if we can find an _id index. for (size_t i = 0; i < params.indices.size(); ++i) { if (isIdIndex(params.indices[i].keyPattern)) { const IndexEntry& index = params.indices[i]; QLOG() << "IDHACK using index " << index.toString() << endl; // If so, we make a simple scan to find the doc. IndexScanNode* isn = new IndexScanNode(); isn->indexKeyPattern = index.keyPattern; isn->indexIsMultiKey = index.multikey; isn->direction = 1; isn->bounds.isSimpleRange = true; BSONObj key = getKeyFromQuery(index.keyPattern, query.getParsed().getFilter()); isn->bounds.startKey = isn->bounds.endKey = key; isn->bounds.endKeyInclusive = true; isn->computeProperties(); QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, isn); if (NULL != soln) { out->push_back(soln); QLOG() << "IDHACK solution is:\n" << (*out)[0]->toString() << endl; // And that's it. return; } } } } for (size_t i = 0; i < params.indices.size(); ++i) { QLOG() << "idx " << i << " is " << params.indices[i].toString() << endl; } bool canTableScan = !(params.options & QueryPlannerParams::NO_TABLE_SCAN); // If the query requests a tailable cursor, the only solution is a collscan + filter with // tailable set on the collscan. TODO: This is a policy departure. Previously I think you // could ask for a tailable cursor and it just tried to give you one. Now, we fail if we // can't provide one. Is this what we want? if (query.getParsed().hasOption(QueryOption_CursorTailable)) { if (!QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR) && canTableScan) { QuerySolution* soln = buildCollscanSoln(query, true, params); if (NULL != soln) { out->push_back(soln); } } return; } // The hint can be $natural: 1. If this happens, output a collscan. It's a weird way of // saying "table scan for two, please." if (!query.getParsed().getHint().isEmpty()) { BSONElement natural = query.getParsed().getHint().getFieldDotted("$natural"); if (!natural.eoo()) { QLOG() << "forcing a table scan due to hinted $natural\n"; if (canTableScan) { QuerySolution* soln = buildCollscanSoln(query, false, params); if (NULL != soln) { out->push_back(soln); } } return; } } // NOR and NOT we can't handle well with indices. If we see them here, they weren't // rewritten to remove the negation. Just output a collscan for those. if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::NOT) || QueryPlannerCommon::hasNode(query.root(), MatchExpression::NOR)) { // If there's a near predicate, we can't handle this. // TODO: Should canonicalized query detect this? if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)) { warning() << "Can't handle NOT/NOR with GEO_NEAR"; return; } QLOG() << "NOT/NOR in plan, just outtping a collscan\n"; if (canTableScan) { QuerySolution* soln = buildCollscanSoln(query, false, params); if (NULL != soln) { out->push_back(soln); } } return; } // Figure out what fields we care about. unordered_set<string> fields; QueryPlannerIXSelect::getFields(query.root(), "", &fields); for (unordered_set<string>::const_iterator it = fields.begin(); it != fields.end(); ++it) { QLOG() << "predicate over field " << *it << endl; } // Filter our indices so we only look at indices that are over our predicates. vector<IndexEntry> relevantIndices; // Hints require us to only consider the hinted index. BSONObj hintIndex = query.getParsed().getHint(); // Snapshot is a form of a hint. If snapshot is set, try to use _id index to make a real // plan. If that fails, just scan the _id index. if (query.getParsed().isSnapshot()) { // Find the ID index in indexKeyPatterns. It's our hint. for (size_t i = 0; i < params.indices.size(); ++i) { if (isIdIndex(params.indices[i].keyPattern)) { hintIndex = params.indices[i].keyPattern; break; } } } size_t hintIndexNumber = numeric_limits<size_t>::max(); if (!hintIndex.isEmpty()) { // Sigh. If the hint is specified it might be using the index name. BSONElement firstHintElt = hintIndex.firstElement(); if (str::equals("$hint", firstHintElt.fieldName()) && String == firstHintElt.type()) { string hintName = firstHintElt.String(); for (size_t i = 0; i < params.indices.size(); ++i) { if (params.indices[i].name == hintName) { QLOG() << "hint by name specified, restricting indices to " << params.indices[i].keyPattern.toString() << endl; relevantIndices.clear(); relevantIndices.push_back(params.indices[i]); hintIndexNumber = i; hintIndex = params.indices[i].keyPattern; break; } } } else { for (size_t i = 0; i < params.indices.size(); ++i) { if (0 == params.indices[i].keyPattern.woCompare(hintIndex)) { relevantIndices.clear(); relevantIndices.push_back(params.indices[i]); QLOG() << "hint specified, restricting indices to " << hintIndex.toString() << endl; hintIndexNumber = i; break; } } } if (hintIndexNumber == numeric_limits<size_t>::max()) { // This is supposed to be an error. warning() << "Can't find hint for " << hintIndex.toString(); return; } } else { QLOG() << "Finding relevant indices\n"; QueryPlannerIXSelect::findRelevantIndices(fields, params.indices, &relevantIndices); } for (size_t i = 0; i < relevantIndices.size(); ++i) { QLOG() << "relevant idx " << i << " is " << relevantIndices[i].toString() << endl; } // Figure out how useful each index is to each predicate. // query.root() is now annotated with RelevantTag(s). QueryPlannerIXSelect::rateIndices(query.root(), "", relevantIndices); QLOG() << "rated tree" << endl; QLOG() << query.root()->toString() << endl; // If there is a GEO_NEAR it must have an index it can use directly. // XXX: move into data access? MatchExpression* gnNode = NULL; if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR, &gnNode)) { // No index for GEO_NEAR? No query. RelevantTag* tag = static_cast<RelevantTag*>(gnNode->getTag()); if (0 == tag->first.size() && 0 == tag->notFirst.size()) { return; } GeoNearMatchExpression* gnme = static_cast<GeoNearMatchExpression*>(gnNode); vector<size_t> newFirst; // 2d + GEO_NEAR is annoying. Because 2d's GEO_NEAR isn't streaming we have to embed // the full query tree inside it as a matcher. for (size_t i = 0; i < tag->first.size(); ++i) { // GEO_NEAR has a non-2d index it can use. We can deal w/that in normal planning. if (!is2DIndex(relevantIndices[tag->first[i]].keyPattern)) { newFirst.push_back(i); continue; } // If we're here, GEO_NEAR has a 2d index. We create a 2dgeonear plan with the // entire tree as a filter, if possible. GeoNear2DNode* solnRoot = new GeoNear2DNode(); solnRoot->nq = gnme->getData(); if (MatchExpression::GEO_NEAR != query.root()->matchType()) { // root is an AND, clone and delete the GEO_NEAR child. MatchExpression* filterTree = query.root()->shallowClone(); verify(MatchExpression::AND == filterTree->matchType()); bool foundChild = false; for (size_t i = 0; i < filterTree->numChildren(); ++i) { if (MatchExpression::GEO_NEAR == filterTree->getChild(i)->matchType()) { foundChild = true; filterTree->getChildVector()->erase(filterTree->getChildVector()->begin() + i); break; } } verify(foundChild); solnRoot->filter.reset(filterTree); } solnRoot->numWanted = query.getParsed().getNumToReturn(); if (0 == solnRoot->numWanted) { solnRoot->numWanted = 100; } solnRoot->indexKeyPattern = relevantIndices[tag->first[i]].keyPattern; // Remove the 2d index. 2d can only be the first field, and we know there is // only one GEO_NEAR, so we don't care if anyone else was assigned it; it'll // only be first for gnNode. tag->first.erase(tag->first.begin() + i); QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot); if (NULL != soln) { out->push_back(soln); } } // Continue planning w/non-2d indices tagged for this pred. tag->first.swap(newFirst); if (0 == tag->first.size() && 0 == tag->notFirst.size()) { return; } } // Likewise, if there is a TEXT it must have an index it can use directly. MatchExpression* textNode; if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT, &textNode)) { RelevantTag* tag = static_cast<RelevantTag*>(textNode->getTag()); if (0 == tag->first.size() && 0 == tag->notFirst.size()) { return; } } // If we have any relevant indices, we try to create indexed plans. if (0 < relevantIndices.size()) { // The enumerator spits out trees tagged with IndexTag(s). PlanEnumerator isp(query.root(), &relevantIndices); isp.init(); MatchExpression* rawTree; while (isp.getNext(&rawTree)) { QLOG() << "about to build solntree from tagged tree:\n" << rawTree->toString() << endl; // This can fail if enumeration makes a mistake. QuerySolutionNode* solnRoot = QueryPlannerAccess::buildIndexedDataAccess(query, rawTree, false, relevantIndices); if (NULL == solnRoot) { continue; } QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot); if (NULL != soln) { QLOG() << "Planner: adding solution:\n" << soln->toString() << endl; out->push_back(soln); } } } QLOG() << "Planner: outputted " << out->size() << " indexed solutions.\n"; // An index was hinted. If there are any solutions, they use the hinted index. If not, we // scan the entire index to provide results and output that as our plan. This is the // desired behavior when an index is hinted that is not relevant to the query. if (!hintIndex.isEmpty() && (0 == out->size())) { QuerySolution* soln = buildWholeIXSoln(params.indices[hintIndexNumber], query, params); if (NULL != soln) { QLOG() << "Planner: outputting soln that uses hinted index as scan." << endl; out->push_back(soln); } return; } // If a sort order is requested, there may be an index that provides it, even if that // index is not over any predicates in the query. // // XXX XXX: Can we do this even if the index is sparse? Might we miss things? if (!query.getParsed().getSort().isEmpty() && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR) && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)) { // See if we have a sort provided from an index already. bool usingIndexToSort = false; for (size_t i = 0; i < out->size(); ++i) { QuerySolution* soln = (*out)[i]; if (!soln->hasSortStage) { usingIndexToSort = true; break; } } if (!usingIndexToSort) { for (size_t i = 0; i < params.indices.size(); ++i) { const BSONObj& kp = params.indices[i].keyPattern; if (providesSort(query, kp)) { QLOG() << "Planner: outputting soln that uses index to provide sort." << endl; QuerySolution* soln = buildWholeIXSoln(params.indices[i], query, params); if (NULL != soln) { out->push_back(soln); break; } } if (providesSort(query, QueryPlannerCommon::reverseSortObj(kp))) { QLOG() << "Planner: outputting soln that uses (reverse) index " << "to provide sort." << endl; QuerySolution* soln = buildWholeIXSoln(params.indices[i], query, params, -1); if (NULL != soln) { out->push_back(soln); break; } } } } } // TODO: Do we always want to offer a collscan solution? // XXX: currently disabling the always-use-a-collscan in order to find more planner bugs. if ( !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR) && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT) && ((params.options & QueryPlannerParams::INCLUDE_COLLSCAN) || (0 == out->size() && canTableScan))) { QuerySolution* collscan = buildCollscanSoln(query, false, params); if (NULL != collscan) { out->push_back(collscan); QLOG() << "Planner: outputting a collscan:\n"; QLOG() << collscan->toString() << endl; } } }
bool PlanEnumerator::prepMemo(MatchExpression* node) { if (Indexability::nodeCanUseIndexOnOwnField(node)) { // We only get here if our parent is an OR, an array operator, or we're the root. // If we have no index tag there are no indices we can use. if (NULL == node->getTag()) { return false; } RelevantTag* rt = static_cast<RelevantTag*>(node->getTag()); // In order to definitely use an index it must be prefixed with our field. // We don't consider notFirst indices here because we must be AND-related to a node // that uses the first spot in that index, and we currently do not know that // unless we're in an AND node. if (0 == rt->first.size()) { return false; } // We know we can use an index, so grab a memo spot. size_t myMemoID; NodeAssignment* assign; allocateAssignment(node, &assign, &myMemoID); assign->pred.reset(new PredicateAssignment()); assign->pred->expr = node; assign->pred->first.swap(rt->first); return true; } else if (MatchExpression::OR == node->matchType()) { // For an OR to be indexed, all its children must be indexed. for (size_t i = 0; i < node->numChildren(); ++i) { if (!prepMemo(node->getChild(i))) { return false; } } // If we're here we're fully indexed and can be in the memo. size_t myMemoID; NodeAssignment* assign; allocateAssignment(node, &assign, &myMemoID); OrAssignment* orAssignment = new OrAssignment(); for (size_t i = 0; i < node->numChildren(); ++i) { orAssignment->subnodes.push_back(_nodeToId[node->getChild(i)]); } assign->orAssignment.reset(orAssignment); return true; } else if (MatchExpression::AND == node->matchType() || Indexability::arrayUsesIndexOnChildren(node)) { // map from idx id to children that have a pred over it. unordered_map<IndexID, vector<MatchExpression*> > idxToFirst; unordered_map<IndexID, vector<MatchExpression*> > idxToNotFirst; vector<MemoID> subnodes; for (size_t i = 0; i < node->numChildren(); ++i) { MatchExpression* child = node->getChild(i); if (Indexability::nodeCanUseIndexOnOwnField(child)) { RelevantTag* rt = static_cast<RelevantTag*>(child->getTag()); for (size_t j = 0; j < rt->first.size(); ++j) { idxToFirst[rt->first[j]].push_back(child); } for (size_t j = 0 ; j< rt->notFirst.size(); ++j) { idxToNotFirst[rt->notFirst[j]].push_back(child); } } else { if (prepMemo(child)) { verify(_nodeToId.end() != _nodeToId.find(child)); size_t childID = _nodeToId[child]; subnodes.push_back(childID); } } } if (idxToFirst.empty() && (subnodes.size() == 0)) { return false; } AndAssignment* newAndAssignment = new AndAssignment(); newAndAssignment->subnodes.swap(subnodes); // At this point we know how many indices the AND's predicate children are over. newAndAssignment->predChoices.resize(idxToFirst.size()); // This iterates through the predChoices. size_t predChoicesIdx = 0; // For each FIRST, we assign nodes to it. for (unordered_map<IndexID, vector<MatchExpression*> >::iterator it = idxToFirst.begin(); it != idxToFirst.end(); ++it) { OneIndexAssignment* assign = &newAndAssignment->predChoices[predChoicesIdx]; ++predChoicesIdx; // Fill out the OneIndexAssignment with the preds that are over the first field. assign->index = it->first; // We can swap because we're never touching idxToFirst again after this loop over it. assign->preds.swap(it->second); // If it's a multikey index, we can't intersect the bounds, so we only want one pred. if ((*_indices)[it->first].multikey) { // XXX: pick a better pred than the first one that happens to wander in. // XXX: see and3.js, indexq.js, arrayfind7.js QLOG() << "Index " << (*_indices)[it->first].keyPattern.toString() << " is multikey but has >1 pred possible, should be smarter" << " here and pick the best one" << endl; assign->preds.resize(1); } assign->positions.resize(assign->preds.size(), 0); // // Compound analysis here and below. // // Don't compound on multikey indices. (XXX: not whole story...) if ((*_indices)[it->first].multikey) { continue; } // Grab the expressions that are notFirst for the index whose assignments we're filling out. unordered_map<size_t, vector<MatchExpression*> >::const_iterator compoundIt = idxToNotFirst.find(it->first); if (compoundIt == idxToNotFirst.end()) { continue; } const vector<MatchExpression*>& tryCompound = compoundIt->second; // Walk over the key pattern trying to find BSONObjIterator kpIt((*_indices)[it->first].keyPattern); // Skip the first elt as it's already assigned. kpIt.next(); size_t posInIdx = 0; while (kpIt.more()) { BSONElement keyElt = kpIt.next(); ++posInIdx; bool fieldAssigned = false; for (size_t j = 0; j < tryCompound.size(); ++j) { MatchExpression* maybe = tryCompound[j]; // Sigh we grab the full path from the relevant tag. RelevantTag* rt = static_cast<RelevantTag*>(maybe->getTag()); if (keyElt.fieldName() == rt->path) { assign->preds.push_back(maybe); assign->positions.push_back(posInIdx); fieldAssigned = true; } } // If we have (a,b,c) and we can't assign something to 'b' don't try // to assign something to 'c'. if (!fieldAssigned) { break; } } } // Some predicates *require* an index. We stuff these in 'mandatory' inside of the // AndAssignment. // // TODO: We can compute this "on the fly" above somehow, but it's clearer to see what's // going on when we do this as a separate step. // // TODO: Consider annotating mandatory indices in the planner as part of the available // index tagging. // Note we're not incrementing 'i' in the loop. We may erase the i-th element. for (size_t i = 0; i < newAndAssignment->predChoices.size();) { const OneIndexAssignment& oie = newAndAssignment->predChoices[i]; bool hasPredThatRequiresIndex = false; for (size_t j = 0; j < oie.preds.size(); ++j) { MatchExpression* expr = oie.preds[j]; if (MatchExpression::GEO_NEAR == expr->matchType()) { hasPredThatRequiresIndex = true; break; } if (MatchExpression::TEXT == expr->matchType()) { hasPredThatRequiresIndex = true; break; } } if (hasPredThatRequiresIndex) { newAndAssignment->mandatory.push_back(oie); newAndAssignment->predChoices.erase(newAndAssignment->predChoices.begin() + i); } else { ++i; } } newAndAssignment->resetEnumeration(); size_t myMemoID; NodeAssignment* assign; allocateAssignment(node, &assign, &myMemoID); // Takes ownership. assign->newAnd.reset(newAndAssignment); return true; } // Don't know what the node is at this point. return false; }
Status UpdateDriver::populateDocumentWithQueryFields(const CanonicalQuery* query, mutablebson::Document& doc) const { MatchExpression* root = query->root(); MatchExpression::MatchType rootType = root->matchType(); // These copies are needed until we apply the modifiers at the end. std::vector<BSONObj> copies; // We only care about equality and "and"ed equality fields, everything else is ignored if (rootType != MatchExpression::EQ && rootType != MatchExpression::AND) return Status::OK(); if (isDocReplacement()) { BSONElement idElem = query->getQueryObj().getField("_id"); // Replacement mods need the _id field copied explicitly. if (idElem.ok()) { mb::Element elem = doc.makeElement(idElem); return doc.root().pushFront(elem); } return Status::OK(); } // Create a new UpdateDriver to create the base doc from the query Options opts; opts.logOp = false; opts.multi = false; opts.upsert = true; opts.modOptions = modOptions(); UpdateDriver insertDriver(opts); insertDriver.setContext(ModifierInterface::ExecInfo::INSERT_CONTEXT); // If we are a single equality match query if (root->matchType() == MatchExpression::EQ) { EqualityMatchExpression* eqMatch = static_cast<EqualityMatchExpression*>(root); const BSONElement matchData = eqMatch->getData(); BSONElement childElem = matchData; // Make copy to new path if not the same field name (for cases like $all) if (!root->path().empty() && matchData.fieldNameStringData() != root->path()) { BSONObjBuilder copyBuilder; copyBuilder.appendAs(eqMatch->getData(), root->path()); const BSONObj copy = copyBuilder.obj(); copies.push_back(copy); childElem = copy[root->path()]; } // Add this element as a $set modifier Status s = insertDriver.addAndParse(modifiertable::MOD_SET, childElem); if (!s.isOK()) return s; } else { // parse query $set mods, including only equality stuff for (size_t i = 0; i < root->numChildren(); ++i) { MatchExpression* child = root->getChild(i); if (child->matchType() == MatchExpression::EQ) { EqualityMatchExpression* eqMatch = static_cast<EqualityMatchExpression*>(child); const BSONElement matchData = eqMatch->getData(); BSONElement childElem = matchData; // Make copy to new path if not the same field name (for cases like $all) if (!child->path().empty() && matchData.fieldNameStringData() != child->path()) { BSONObjBuilder copyBuilder; copyBuilder.appendAs(eqMatch->getData(), child->path()); const BSONObj copy = copyBuilder.obj(); copies.push_back(copy); childElem = copy[child->path()]; } // Add this element as a $set modifier Status s = insertDriver.addAndParse(modifiertable::MOD_SET, childElem); if (!s.isOK()) return s; } } } // update the document with base field Status s = insertDriver.update(StringData(), &doc); copies.clear(); if (!s.isOK()) { return Status(ErrorCodes::UnsupportedFormat, str::stream() << "Cannot create base during" " insert of update. Caused by :" << s.toString()); } return Status::OK(); }
// static Status QueryPlanner::plan(const CanonicalQuery& query, const QueryPlannerParams& params, std::vector<QuerySolution*>* out) { QLOG() << "=============================\n" << "Beginning planning, options = " << optionString(params.options) << endl << "Canonical query:\n" << query.toString() << endl << "=============================" << endl; for (size_t i = 0; i < params.indices.size(); ++i) { QLOG() << "idx " << i << " is " << params.indices[i].toString() << endl; } bool canTableScan = !(params.options & QueryPlannerParams::NO_TABLE_SCAN); // If the query requests a tailable cursor, the only solution is a collscan + filter with // tailable set on the collscan. TODO: This is a policy departure. Previously I think you // could ask for a tailable cursor and it just tried to give you one. Now, we fail if we // can't provide one. Is this what we want? if (query.getParsed().hasOption(QueryOption_CursorTailable)) { if (!QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR) && canTableScan) { QuerySolution* soln = buildCollscanSoln(query, true, params); if (NULL != soln) { out->push_back(soln); } } return Status::OK(); } // The hint can be $natural: 1. If this happens, output a collscan. It's a weird way of // saying "table scan for two, please." if (!query.getParsed().getHint().isEmpty()) { BSONElement natural = query.getParsed().getHint().getFieldDotted("$natural"); if (!natural.eoo()) { QLOG() << "forcing a table scan due to hinted $natural\n"; // min/max are incompatible with $natural. if (canTableScan && query.getParsed().getMin().isEmpty() && query.getParsed().getMax().isEmpty()) { QuerySolution* soln = buildCollscanSoln(query, false, params); if (NULL != soln) { out->push_back(soln); } } return Status::OK(); } } // Figure out what fields we care about. unordered_set<string> fields; QueryPlannerIXSelect::getFields(query.root(), "", &fields); for (unordered_set<string>::const_iterator it = fields.begin(); it != fields.end(); ++it) { QLOG() << "predicate over field " << *it << endl; } // Filter our indices so we only look at indices that are over our predicates. vector<IndexEntry> relevantIndices; // Hints require us to only consider the hinted index. BSONObj hintIndex = query.getParsed().getHint(); // Snapshot is a form of a hint. If snapshot is set, try to use _id index to make a real // plan. If that fails, just scan the _id index. if (query.getParsed().isSnapshot()) { // Find the ID index in indexKeyPatterns. It's our hint. for (size_t i = 0; i < params.indices.size(); ++i) { if (isIdIndex(params.indices[i].keyPattern)) { hintIndex = params.indices[i].keyPattern; break; } } } size_t hintIndexNumber = numeric_limits<size_t>::max(); if (hintIndex.isEmpty()) { QueryPlannerIXSelect::findRelevantIndices(fields, params.indices, &relevantIndices); } else { // Sigh. If the hint is specified it might be using the index name. BSONElement firstHintElt = hintIndex.firstElement(); if (str::equals("$hint", firstHintElt.fieldName()) && String == firstHintElt.type()) { string hintName = firstHintElt.String(); for (size_t i = 0; i < params.indices.size(); ++i) { if (params.indices[i].name == hintName) { QLOG() << "hint by name specified, restricting indices to " << params.indices[i].keyPattern.toString() << endl; relevantIndices.clear(); relevantIndices.push_back(params.indices[i]); hintIndexNumber = i; hintIndex = params.indices[i].keyPattern; break; } } } else { for (size_t i = 0; i < params.indices.size(); ++i) { if (0 == params.indices[i].keyPattern.woCompare(hintIndex)) { relevantIndices.clear(); relevantIndices.push_back(params.indices[i]); QLOG() << "hint specified, restricting indices to " << hintIndex.toString() << endl; hintIndexNumber = i; break; } } } if (hintIndexNumber == numeric_limits<size_t>::max()) { return Status(ErrorCodes::BadValue, "bad hint"); } } // Deal with the .min() and .max() query options. If either exist we can only use an index // that matches the object inside. if (!query.getParsed().getMin().isEmpty() || !query.getParsed().getMax().isEmpty()) { BSONObj minObj = query.getParsed().getMin(); BSONObj maxObj = query.getParsed().getMax(); // This is the index into params.indices[...] that we use. size_t idxNo = numeric_limits<size_t>::max(); // If there's an index hinted we need to be able to use it. if (!hintIndex.isEmpty()) { if (!minObj.isEmpty() && !indexCompatibleMaxMin(minObj, hintIndex)) { QLOG() << "minobj doesnt work w hint"; return Status(ErrorCodes::BadValue, "hint provided does not work with min query"); } if (!maxObj.isEmpty() && !indexCompatibleMaxMin(maxObj, hintIndex)) { QLOG() << "maxobj doesnt work w hint"; return Status(ErrorCodes::BadValue, "hint provided does not work with max query"); } idxNo = hintIndexNumber; } else { // No hinted index, look for one that is compatible (has same field names and // ordering thereof). for (size_t i = 0; i < params.indices.size(); ++i) { const BSONObj& kp = params.indices[i].keyPattern; BSONObj toUse = minObj.isEmpty() ? maxObj : minObj; if (indexCompatibleMaxMin(toUse, kp)) { idxNo = i; break; } } } if (idxNo == numeric_limits<size_t>::max()) { QLOG() << "Can't find relevant index to use for max/min query"; // Can't find an index to use, bail out. return Status(ErrorCodes::BadValue, "unable to find relevant index for max/min query"); } // maxObj can be empty; the index scan just goes until the end. minObj can't be empty // though, so if it is, we make a minKey object. if (minObj.isEmpty()) { BSONObjBuilder bob; bob.appendMinKey(""); minObj = bob.obj(); } else { // Must strip off the field names to make an index key. minObj = stripFieldNames(minObj); } if (!maxObj.isEmpty()) { // Must strip off the field names to make an index key. maxObj = stripFieldNames(maxObj); } QLOG() << "max/min query using index " << params.indices[idxNo].toString() << endl; // Make our scan and output. QuerySolutionNode* solnRoot = QueryPlannerAccess::makeIndexScan(params.indices[idxNo], query, params, minObj, maxObj); QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot); if (NULL != soln) { out->push_back(soln); } return Status::OK(); } for (size_t i = 0; i < relevantIndices.size(); ++i) { QLOG() << "relevant idx " << i << " is " << relevantIndices[i].toString() << endl; } // Figure out how useful each index is to each predicate. // query.root() is now annotated with RelevantTag(s). QueryPlannerIXSelect::rateIndices(query.root(), "", relevantIndices); QLOG() << "rated tree" << endl; QLOG() << query.root()->toString() << endl; // If there is a GEO_NEAR it must have an index it can use directly. // XXX: move into data access? MatchExpression* gnNode = NULL; if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR, &gnNode)) { // No index for GEO_NEAR? No query. RelevantTag* tag = static_cast<RelevantTag*>(gnNode->getTag()); if (0 == tag->first.size() && 0 == tag->notFirst.size()) { QLOG() << "unable to find index for $geoNear query" << endl; return Status(ErrorCodes::BadValue, "unable to find index for $geoNear query"); } GeoNearMatchExpression* gnme = static_cast<GeoNearMatchExpression*>(gnNode); vector<size_t> newFirst; // 2d + GEO_NEAR is annoying. Because 2d's GEO_NEAR isn't streaming we have to embed // the full query tree inside it as a matcher. for (size_t i = 0; i < tag->first.size(); ++i) { // GEO_NEAR has a non-2d index it can use. We can deal w/that in normal planning. if (!is2DIndex(relevantIndices[tag->first[i]].keyPattern)) { newFirst.push_back(i); continue; } // If we're here, GEO_NEAR has a 2d index. We create a 2dgeonear plan with the // entire tree as a filter, if possible. GeoNear2DNode* solnRoot = new GeoNear2DNode(); solnRoot->nq = gnme->getData(); if (NULL != query.getProj()) { solnRoot->addPointMeta = query.getProj()->wantGeoNearPoint(); solnRoot->addDistMeta = query.getProj()->wantGeoNearDistance(); } if (MatchExpression::GEO_NEAR != query.root()->matchType()) { // root is an AND, clone and delete the GEO_NEAR child. MatchExpression* filterTree = query.root()->shallowClone(); verify(MatchExpression::AND == filterTree->matchType()); bool foundChild = false; for (size_t i = 0; i < filterTree->numChildren(); ++i) { if (MatchExpression::GEO_NEAR == filterTree->getChild(i)->matchType()) { foundChild = true; filterTree->getChildVector()->erase(filterTree->getChildVector()->begin() + i); break; } } verify(foundChild); solnRoot->filter.reset(filterTree); } solnRoot->numWanted = query.getParsed().getNumToReturn(); if (0 == solnRoot->numWanted) { solnRoot->numWanted = 100; } solnRoot->indexKeyPattern = relevantIndices[tag->first[i]].keyPattern; // Remove the 2d index. 2d can only be the first field, and we know there is // only one GEO_NEAR, so we don't care if anyone else was assigned it; it'll // only be first for gnNode. tag->first.erase(tag->first.begin() + i); QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot); if (NULL != soln) { out->push_back(soln); } } // Continue planning w/non-2d indices tagged for this pred. tag->first.swap(newFirst); if (0 == tag->first.size() && 0 == tag->notFirst.size()) { return Status::OK(); } } // Likewise, if there is a TEXT it must have an index it can use directly. MatchExpression* textNode; if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT, &textNode)) { RelevantTag* tag = static_cast<RelevantTag*>(textNode->getTag()); if (0 == tag->first.size() && 0 == tag->notFirst.size()) { return Status::OK(); } } // If we have any relevant indices, we try to create indexed plans. if (0 < relevantIndices.size()) { // The enumerator spits out trees tagged with IndexTag(s). PlanEnumeratorParams enumParams; enumParams.intersect = params.options & QueryPlannerParams::INDEX_INTERSECTION; enumParams.root = query.root(); enumParams.indices = &relevantIndices; PlanEnumerator isp(enumParams); isp.init(); MatchExpression* rawTree; // XXX: have limit on # of indexed solns we'll consider. We could have a perverse // query and index that could make n^2 very unpleasant. while (isp.getNext(&rawTree)) { QLOG() << "about to build solntree from tagged tree:\n" << rawTree->toString() << endl; // This can fail if enumeration makes a mistake. QuerySolutionNode* solnRoot = QueryPlannerAccess::buildIndexedDataAccess(query, rawTree, false, relevantIndices); if (NULL == solnRoot) { continue; } QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot); if (NULL != soln) { QLOG() << "Planner: adding solution:\n" << soln->toString() << endl; out->push_back(soln); } } } QLOG() << "Planner: outputted " << out->size() << " indexed solutions.\n"; // An index was hinted. If there are any solutions, they use the hinted index. If not, we // scan the entire index to provide results and output that as our plan. This is the // desired behavior when an index is hinted that is not relevant to the query. if (!hintIndex.isEmpty()) { if (0 == out->size()) { QuerySolution* soln = buildWholeIXSoln(params.indices[hintIndexNumber], query, params); verify(NULL != soln); QLOG() << "Planner: outputting soln that uses hinted index as scan." << endl; out->push_back(soln); } return Status::OK(); } // If a sort order is requested, there may be an index that provides it, even if that // index is not over any predicates in the query. // if (!query.getParsed().getSort().isEmpty() && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR) && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)) { // See if we have a sort provided from an index already. bool usingIndexToSort = false; for (size_t i = 0; i < out->size(); ++i) { QuerySolution* soln = (*out)[i]; if (!soln->hasSortStage) { usingIndexToSort = true; break; } } if (!usingIndexToSort) { for (size_t i = 0; i < params.indices.size(); ++i) { const IndexEntry& index = params.indices[i]; if (index.sparse) { continue; } const BSONObj kp = LiteParsedQuery::normalizeSortOrder(index.keyPattern); if (providesSort(query, kp)) { QLOG() << "Planner: outputting soln that uses index to provide sort." << endl; QuerySolution* soln = buildWholeIXSoln(params.indices[i], query, params); if (NULL != soln) { out->push_back(soln); break; } } if (providesSort(query, QueryPlannerCommon::reverseSortObj(kp))) { QLOG() << "Planner: outputting soln that uses (reverse) index " << "to provide sort." << endl; QuerySolution* soln = buildWholeIXSoln(params.indices[i], query, params, -1); if (NULL != soln) { out->push_back(soln); break; } } } } } // TODO: Do we always want to offer a collscan solution? // XXX: currently disabling the always-use-a-collscan in order to find more planner bugs. if ( !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR) && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT) && hintIndex.isEmpty() && ((params.options & QueryPlannerParams::INCLUDE_COLLSCAN) || (0 == out->size() && canTableScan))) { QuerySolution* collscan = buildCollscanSoln(query, false, params); if (NULL != collscan) { out->push_back(collscan); QLOG() << "Planner: outputting a collscan:\n"; QLOG() << collscan->toString() << endl; } } return Status::OK(); }