Ejemplo n.º 1
0
    BSONObj Helpers::modifiedRangeBound( const BSONObj& bound ,
                                         const BSONObj& keyPattern ,
                                         int minOrMax ){
        BSONObjBuilder newBound;

        BSONObjIterator src( bound );
        BSONObjIterator pat( keyPattern );

        while( src.more() ){
            massert( 16341 ,
                     str::stream() << "keyPattern " << keyPattern
                                   << " shorter than bound " << bound ,
                     pat.more() );
            BSONElement srcElt = src.next();
            BSONElement patElt = pat.next();
            massert( 16333 ,
                     str::stream() << "field names of bound " << bound
                                   << " do not match those of keyPattern " << keyPattern ,
                     str::equals( srcElt.fieldName() , patElt.fieldName() ) );
            newBound.appendAs( srcElt , "" );
        }
        while( pat.more() ){
            BSONElement patElt = pat.next();
            verify( patElt.isNumber() );
            if( minOrMax * patElt.numberInt() == 1){
                newBound.appendMaxKey("");
            }
            else {
                newBound.appendMinKey("");
            }
        }
        return newBound.obj();
    }
Ejemplo n.º 2
0
    BSONObj Helpers::modifiedRangeBound( const BSONObj& bound ,
                                         const BSONObj& keyPattern ,
                                         int minOrMax ){
        BSONObjBuilder newBound;

        BSONObjIterator src( bound );
        BSONObjIterator pat( keyPattern );

        while( src.more() ){
            BSONElement srcElt = src.next();
            BSONElement patElt = pat.next();
            massert( 16333 , "bound " + bound.toString() +
                             "not extendible to pattern" + keyPattern.toString(),
                     strcmp( srcElt.fieldName() , patElt.fieldName() ) == 0 );
            newBound.appendAs( srcElt , "" );
        }
        while( pat.more() ){
            BSONElement patElt = pat.next();
            verify( patElt.isNumber() );
            if( minOrMax * patElt.numberInt() == 1){
                newBound.appendMaxKey("");
            }
            else {
                newBound.appendMinKey("");
            }
        }
        return newBound.obj();
    }
Ejemplo n.º 3
0
        void run(){
            Scope * s = globalScriptEngine->createScope();

            BSONObjBuilder b;
            b.appendTimestamp( "a" , 123456789 );
            b.appendMinKey( "b" );
            b.appendMaxKey( "c" );
            b.appendTimestamp( "d" , 1234000 , 9876 );
            

            {
                BSONObj t = b.done();
                ASSERT_EQUALS( 1234000U , t["d"].timestampTime() );
                ASSERT_EQUALS( 9876U , t["d"].timestampInc() );
            }

            s->setObject( "z" , b.obj() );
            
            ASSERT( s->invoke( "y = { a : z.a , b : z.b , c : z.c , d: z.d }" , BSONObj() ) == 0 );

            BSONObj out = s->getObject( "y" );
            ASSERT_EQUALS( Timestamp , out["a"].type() );
            ASSERT_EQUALS( MinKey , out["b"].type() );
            ASSERT_EQUALS( MaxKey , out["c"].type() );
            ASSERT_EQUALS( Timestamp , out["d"].type() );

            ASSERT_EQUALS( 9876U , out["d"].timestampInc() );
            ASSERT_EQUALS( 1234000U , out["d"].timestampTime() );
            ASSERT_EQUALS( 123456789U , out["a"].date() );


            delete s;
        }
Ejemplo n.º 4
0
 // static
 void IndexBoundsBuilder::allValuesForField(const BSONElement& elt, OrderedIntervalList* out) {
     // ARGH, BSONValue would make this shorter.
     BSONObjBuilder bob;
     bob.appendMinKey("");
     bob.appendMaxKey("");
     out->name = elt.fieldName();
     out->intervals.push_back(makeRangeInterval(bob.obj(), true, true));
 }
Ejemplo n.º 5
0
    // static
    void IndexBoundsBuilder::allValuesForField(const BSONElement& elt, OrderedIntervalList* out) {
        // ARGH, BSONValue would make this shorter.
        BSONObjBuilder bob;
        if (-1 == elt.number()) {
            // Index should go from MaxKey to MinKey as it's descending.
            bob.appendMaxKey("");
            bob.appendMinKey("");
        }
        else {
            // Index goes from MinKey to MaxKey as it's ascending.
            bob.appendMinKey("");
            bob.appendMaxKey("");
        }

        out->name = elt.fieldName();
        out->intervals.push_back(makeRangeInterval(bob.obj(), true, true));
    }
Ejemplo n.º 6
0
/**
 * "Finishes" the min object for the $min query option by filling in an empty object with
 * MinKey/MaxKey and stripping field names.
 *
 * In the case that 'minObj' is empty, we "finish" it by filling in either MinKey or MaxKey
 * instead. Choosing whether to use MinKey or MaxKey is done by comparing against 'maxObj'.
 * For instance, suppose 'minObj' is empty, 'maxObj' is { a: 3 }, and the key pattern is
 * { a: -1 }. According to the key pattern ordering, { a: 3 } < MinKey. This means that the
 * proper resulting bounds are
 *
 *   start: { '': MaxKey }, end: { '': 3 }
 *
 * as opposed to
 *
 *   start: { '': MinKey }, end: { '': 3 }
 *
 * Suppose instead that the key pattern is { a: 1 }, with the same 'minObj' and 'maxObj'
 * (that is, an empty object and { a: 3 } respectively). In this case, { a: 3 } > MinKey,
 * which means that we use range [{'': MinKey}, {'': 3}]. The proper 'minObj' in this case is
 * MinKey, whereas in the previous example it was MaxKey.
 *
 * If 'minObj' is non-empty, then all we do is strip its field names (because index keys always
 * have empty field names).
 */
static BSONObj finishMinObj(const BSONObj& kp, const BSONObj& minObj, const BSONObj& maxObj) {
    BSONObjBuilder bob;
    bob.appendMinKey("");
    BSONObj minKey = bob.obj();

    if (minObj.isEmpty()) {
        if (0 > minKey.woCompare(maxObj, kp, false)) {
            BSONObjBuilder minKeyBuilder;
            minKeyBuilder.appendMinKey("");
            return minKeyBuilder.obj();
        } else {
            BSONObjBuilder maxKeyBuilder;
            maxKeyBuilder.appendMaxKey("");
            return maxKeyBuilder.obj();
        }
    } else {
        return stripFieldNames(minObj);
    }
}
Ejemplo n.º 7
0
    // static
    void OrderedIntervalList::complement() {
        BSONObjBuilder minBob;
        minBob.appendMinKey("");
        BSONObj minObj = minBob.obj();

        // We complement by scanning the entire range of BSON values
        // from MinKey to MaxKey. The value from which we must begin
        // the next complemented interval is kept in 'curBoundary'.
        BSONElement curBoundary = minObj.firstElement();

        // If 'curInclusive' is true, then 'curBoundary' is
        // included in one of the original intervals, and hence
        // should not be included in the complement (and vice-versa
        // if 'curInclusive' is false).
        bool curInclusive = false;

        // We will build up a list of intervals that represents
        // the inversion of those in the OIL.
        vector<Interval> newIntervals;
        for (size_t j = 0; j < intervals.size(); ++j) {
            Interval curInt = intervals[j];
            if (0 != curInt.start.woCompare(curBoundary) ||
                (!curInclusive && !curInt.startInclusive)) {
                // Make a new interval from 'curBoundary' to
                // the start of 'curInterval'.
                BSONObjBuilder intBob;
                intBob.append(curBoundary);
                intBob.append(curInt.start);
                Interval newInt(intBob.obj(), !curInclusive, !curInt.startInclusive);
                newIntervals.push_back(newInt);
            }

            // Reset the boundary for the next iteration.
            curBoundary = curInt.end;
            curInclusive = curInt.endInclusive;
        }

        // We may have to add a final interval which ends in MaxKey.
        BSONObjBuilder maxBob;
        maxBob.appendMaxKey("");
        BSONObj maxObj = maxBob.obj();
        BSONElement maxKey = maxObj.firstElement();
        if (0 != maxKey.woCompare(curBoundary) || !curInclusive) {
            BSONObjBuilder intBob;
            intBob.append(curBoundary);
            intBob.append(maxKey);
            Interval newInt(intBob.obj(), !curInclusive, true);
            newIntervals.push_back(newInt);
        }

        // Replace the old list of intervals with the new one.
        intervals.clear();
        intervals.insert(intervals.end(), newIntervals.begin(), newIntervals.end());
    }
Ejemplo n.º 8
0
/**
 * "Finishes" the min object for the $min query option by filling in an empty object with
 * MinKey/MaxKey and stripping field names. Also translates keys according to the collation, if
 * necessary.
 *
 * In the case that 'minObj' is empty, we "finish" it by filling in either MinKey or MaxKey
 * instead. Choosing whether to use MinKey or MaxKey is done by comparing against 'maxObj'.
 * For instance, suppose 'minObj' is empty, 'maxObj' is { a: 3 }, and the key pattern is
 * { a: -1 }. According to the key pattern ordering, { a: 3 } < MinKey. This means that the
 * proper resulting bounds are
 *
 *   start: { '': MaxKey }, end: { '': 3 }
 *
 * as opposed to
 *
 *   start: { '': MinKey }, end: { '': 3 }
 *
 * Suppose instead that the key pattern is { a: 1 }, with the same 'minObj' and 'maxObj'
 * (that is, an empty object and { a: 3 } respectively). In this case, { a: 3 } > MinKey,
 * which means that we use range [{'': MinKey}, {'': 3}]. The proper 'minObj' in this case is
 * MinKey, whereas in the previous example it was MaxKey.
 *
 * If 'minObj' is non-empty, then all we do is strip its field names (because index keys always
 * have empty field names).
 */
static BSONObj finishMinObj(const IndexEntry& indexEntry,
                            const BSONObj& minObj,
                            const BSONObj& maxObj) {
    BSONObjBuilder bob;
    bob.appendMinKey("");
    BSONObj minKey = bob.obj();

    if (minObj.isEmpty()) {
        if (0 > minKey.woCompare(maxObj, indexEntry.keyPattern, false)) {
            BSONObjBuilder minKeyBuilder;
            minKeyBuilder.appendMinKey("");
            return minKeyBuilder.obj();
        } else {
            BSONObjBuilder maxKeyBuilder;
            maxKeyBuilder.appendMaxKey("");
            return maxKeyBuilder.obj();
        }
    } else {
        return stripFieldNamesAndApplyCollation(minObj, indexEntry.collator);
    }
}
Ejemplo n.º 9
0
        bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
            const char* ns = jsobj.getStringField( "splitVector" );
            BSONObj keyPattern = jsobj.getObjectField( "keyPattern" );

            BSONObj min = jsobj.getObjectField( "min" );
            BSONObj max = jsobj.getObjectField( "max" );
            if ( min.isEmpty() && max.isEmpty() ){
                BSONObjBuilder minBuilder;
                BSONObjBuilder maxBuilder;
                BSONForEach(key, keyPattern){
                    minBuilder.appendMinKey( key.fieldName() );
                    maxBuilder.appendMaxKey( key.fieldName() );
                }
Ejemplo n.º 10
0
    ShardKeyPattern::ShardKeyPattern( BSONObj p ) : pattern( p.getOwned() ) {
        pattern.getFieldNames(patternfields);

        BSONObjBuilder min;
        BSONObjBuilder max;

        BSONObjIterator it(p);
        while (it.more()) {
            BSONElement e (it.next());
            min.appendMinKey(e.fieldName());
            max.appendMaxKey(e.fieldName());
        }

        gMin = min.obj();
        gMax = max.obj();
    }
Ejemplo n.º 11
0
    BSONObj IndexBounds::toBSON() const {
        BSONObjBuilder builder;
        if (isSimpleRange) {
            // TODO
        }
        else {
            for (vector<OrderedIntervalList>::const_iterator itField = fields.begin();
                 itField != fields.end();
                 ++itField) {
                BSONArrayBuilder fieldBuilder(builder.subarrayStart(itField->name));
                for (vector<Interval>::const_iterator itInterval = itField->intervals.begin();
                     itInterval != itField->intervals.end();
                     ++itInterval) {
                    BSONArrayBuilder intervalBuilder;

                    // Careful to output $minElement/$maxElement if we don't have bounds.
                    if (itInterval->start.eoo()) {
                        BSONObjBuilder minBuilder;
                        minBuilder.appendMinKey("");
                        BSONObj minKeyObj = minBuilder.obj();
                        intervalBuilder.append(minKeyObj.firstElement());
                    }
                    else {
                        intervalBuilder.append(itInterval->start);
                    }

                    if (itInterval->end.eoo()) {
                        BSONObjBuilder maxBuilder;
                        maxBuilder.appendMaxKey("");
                        BSONObj maxKeyObj = maxBuilder.obj();
                        intervalBuilder.append(maxKeyObj.firstElement());
                    }
                    else {
                        intervalBuilder.append(itInterval->end);
                    }

                    fieldBuilder.append(
                        static_cast<BSONArray>(intervalBuilder.arr().clientReadable()));
                }
            }
        }
        return builder.obj();
    }
Ejemplo n.º 12
0
        bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){

            //
            // 1.a We'll parse the parameters in two steps. First, make sure the we can use the split index to get
            //     a good approximation of the size of the chunk -- without needing to access the actual data.
            //

            const char* ns = jsobj.getStringField( "splitVector" );
            BSONObj keyPattern = jsobj.getObjectField( "keyPattern" );

            // If min and max are not provided use the "minKey" and "maxKey" for the sharding key pattern.
            BSONObj min = jsobj.getObjectField( "min" );
            BSONObj max = jsobj.getObjectField( "max" );
            if ( min.isEmpty() && max.isEmpty() ){
                BSONObjBuilder minBuilder;
                BSONObjBuilder maxBuilder;
                BSONForEach(key, keyPattern){
                    minBuilder.appendMinKey( key.fieldName() );
                    maxBuilder.appendMaxKey( key.fieldName() );
                }
Ejemplo n.º 13
0
    bool appendSpecialDBObject( Convertor * c , BSONObjBuilder& b , const string& name , JSObject * o ){

        if ( JS_InstanceOf( c->_context , o , &object_id_class , 0 ) ){
            OID oid;
            oid.init( c->getString( o , "str" ) );
            b.append( name.c_str() , oid );
            return true;
        }

        if ( JS_InstanceOf( c->_context , o , &minkey_class , 0 ) ){
            b.appendMinKey( name.c_str() );
            return true;
        }

        if ( JS_InstanceOf( c->_context , o , &maxkey_class , 0 ) ){
            b.appendMaxKey( name.c_str() );
            return true;
        }
        
        if ( JS_InstanceOf( c->_context , o , &timestamp_class , 0 ) ){
            b.appendTimestamp( name.c_str() , (unsigned long long)c->getNumber( o , "t" ) , (unsigned int )c->getNumber( o , "i" ) );
            return true;
        }
        
        {
            jsdouble d = js_DateGetMsecSinceEpoch( c->_context , o );
            if ( d ){
                b.appendDate( name.c_str() , (unsigned long long)d );
                return true;
            }
        }
        
        

        return false;
    }
Ejemplo n.º 14
0
 Interval IndexBoundsBuilder::allValues() {
     BSONObjBuilder bob;
     bob.appendMinKey("");
     bob.appendMaxKey("");
     return makeRangeInterval(bob.obj(), true, true);
 }
Ejemplo n.º 15
0
    // static
    void IndexBoundsBuilder::translate(const MatchExpression* expr, int direction,
                                       OrderedIntervalList* oilOut, bool* exactOut) {
        Interval interval;
        bool exact = false;

        if (expr->isLeaf()) {
            if (MatchExpression::EQ == expr->matchType()) {
                const EqualityMatchExpression* node = static_cast<const EqualityMatchExpression*>(expr);
                // We have to copy the data out of the parse tree and stuff it into the index bounds.
                // BSONValue will be useful here.
                BSONObj dataObj = objFromElement(node->getData());

                if (dataObj.couldBeArray()) {
                    // XXX: build better bounds
                    warning() << "building lazy bounds for " << expr->toString() << endl;
                    interval = allValues();
                    exact = false;
                }
                else {
                    verify(dataObj.isOwned());
                    interval = makePointInterval(dataObj);
                    exact = true;
                }
            }
            else if (MatchExpression::LTE == expr->matchType()) {
                const LTEMatchExpression* node = static_cast<const LTEMatchExpression*>(expr);
                BSONObjBuilder bob;
                bob.appendMinKey("");
                bob.append(node->getData());
                BSONObj dataObj = bob.obj();
                verify(dataObj.isOwned());
                interval = makeRangeInterval(dataObj, true, true);
                exact = true;
            }
            else if (MatchExpression::LT == expr->matchType()) {
                const LTMatchExpression* node = static_cast<const LTMatchExpression*>(expr);
                BSONObjBuilder bob;
                bob.appendMinKey("");
                bob.append(node->getData());
                BSONObj dataObj = bob.obj();
                verify(dataObj.isOwned());
                interval = makeRangeInterval(dataObj, true, false);
                exact = true;
            }
            else if (MatchExpression::GT == expr->matchType()) {
                const GTMatchExpression* node = static_cast<const GTMatchExpression*>(expr);
                BSONObjBuilder bob;
                bob.append(node->getData());
                bob.appendMaxKey("");
                BSONObj dataObj = bob.obj();
                verify(dataObj.isOwned());
                interval = makeRangeInterval(dataObj, false, true);
                exact = true;
            }
            else if (MatchExpression::GTE == expr->matchType()) {
                const GTEMatchExpression* node = static_cast<const GTEMatchExpression*>(expr);
                BSONObjBuilder bob;
                bob.append(node->getData());
                bob.appendMaxKey("");
                BSONObj dataObj = bob.obj();
                verify(dataObj.isOwned());
                interval = makeRangeInterval(dataObj, true, true);
                exact = true;
            }
            else {
                // XXX: build better bounds
                warning() << "building lazy bounds for " << expr->toString() << endl;
                interval = allValues();
                exact = false;
            }
        }
        else {
            // XXX: build better bounds
            verify(expr->isArray());
            warning() << "building lazy bounds for " << expr->toString() << endl;
            interval = allValues();
            exact = false;
        }

        if (-1 == direction) {
            reverseInterval(&interval);
        }

        oilOut->intervals.push_back(interval);
        *exactOut = exact;
    }
Ejemplo n.º 16
0
    // Returns the min and max keys which bound a particular location.
    // The only time these may be equal is when we actually equal the location
    // itself, otherwise our expanding algorithm will fail.
    // static
    bool BtreeLocation::initial(IndexDescriptor* descriptor, const TwoDIndexingParams& params,
            BtreeLocation& min, BtreeLocation& max, GeoHash start) {
        verify(descriptor);

        min._eof = false;
        max._eof = false;

        // Add the range for the 2d indexed field to the keys used.

        // Two scans: one for min one for max.
        IndexScanParams minParams;
        minParams.direction = -1;
        minParams.forceBtreeAccessMethod = true;
        minParams.descriptor = descriptor->clone();
        minParams.bounds.fields.resize(descriptor->keyPattern().nFields());
        minParams.doNotDedup = true;
        // First field of start key goes (MINKEY, start] (in reverse)
        BSONObjBuilder firstBob;
        firstBob.appendMinKey("");
        start.appendToBuilder(&firstBob, "");
        minParams.bounds.fields[0].intervals.push_back(Interval(firstBob.obj(), false, true));

        IndexScanParams maxParams;
        maxParams.forceBtreeAccessMethod = true;
        maxParams.direction = 1;
        maxParams.descriptor = descriptor->clone();
        maxParams.bounds.fields.resize(descriptor->keyPattern().nFields());
        // Don't have the ixscan dedup since we want dup DiskLocs because of multi-point docs.
        maxParams.doNotDedup = true;
        // First field of end key goes (start, MAXKEY)
        BSONObjBuilder secondBob;
        start.appendToBuilder(&secondBob, "");
        secondBob.appendMaxKey("");
        maxParams.bounds.fields[0].intervals.push_back(Interval(secondBob.obj(), false, false));

        BSONObjIterator it(descriptor->keyPattern());
        BSONElement kpElt = it.next();
        maxParams.bounds.fields[0].name = kpElt.fieldName();
        minParams.bounds.fields[0].name = kpElt.fieldName();
        // Fill out the non-2d indexed fields with the "all values" interval, aligned properly.
        size_t idx = 1;
        while (it.more()) {
            kpElt = it.next();
            maxParams.bounds.fields[idx].intervals.push_back(IndexBoundsBuilder::allValues());
            minParams.bounds.fields[idx].intervals.push_back(IndexBoundsBuilder::allValues());
            maxParams.bounds.fields[idx].name = kpElt.fieldName();
            minParams.bounds.fields[idx].name = kpElt.fieldName();
            if (kpElt.number() == -1) {
                IndexBoundsBuilder::reverseInterval(&minParams.bounds.fields[idx].intervals[0]);
                IndexBoundsBuilder::reverseInterval(&maxParams.bounds.fields[idx].intervals[0]);
            }
            ++idx;
        }

        for (size_t i = 0; i < minParams.bounds.fields.size(); ++i) {
            IndexBoundsBuilder::reverseInterval(&minParams.bounds.fields[i].intervals[0]);
        }

        //cout << "keyPattern " << descriptor->keyPattern().toString() << endl;
        //cout << "minBounds " << minParams.bounds.toString() << endl;
        //cout << "maxBounds " << maxParams.bounds.toString() << endl;
        verify(minParams.bounds.isValidFor(descriptor->keyPattern(), -1));
        verify(maxParams.bounds.isValidFor(descriptor->keyPattern(), 1));

        min._ws.reset(new WorkingSet());
        min._scan.reset(new IndexScan(minParams, min._ws.get(), NULL));

        max._ws.reset(new WorkingSet());
        max._scan.reset(new IndexScan(maxParams, max._ws.get(), NULL));

        min.advance();
        max.advance();

        return !max._eof || !min._eof;
    }
Ejemplo n.º 17
0
    // static
    Status QueryPlanner::plan(const CanonicalQuery& query,
                              const QueryPlannerParams& params,
                              std::vector<QuerySolution*>* out) {

        QLOG() << "=============================\n"
               << "Beginning planning, options = " << optionString(params.options) << endl
               << "Canonical query:\n" << query.toString() << endl
               << "============================="
               << endl;

        for (size_t i = 0; i < params.indices.size(); ++i) {
            QLOG() << "idx " << i << " is " << params.indices[i].toString() << endl;
        }

        bool canTableScan = !(params.options & QueryPlannerParams::NO_TABLE_SCAN);

        // If the query requests a tailable cursor, the only solution is a collscan + filter with
        // tailable set on the collscan.  TODO: This is a policy departure.  Previously I think you
        // could ask for a tailable cursor and it just tried to give you one.  Now, we fail if we
        // can't provide one.  Is this what we want?
        if (query.getParsed().hasOption(QueryOption_CursorTailable)) {
            if (!QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)
                && canTableScan) {
                QuerySolution* soln = buildCollscanSoln(query, true, params);
                if (NULL != soln) {
                    out->push_back(soln);
                }
            }
            return Status::OK();
        }

        // The hint can be $natural: 1.  If this happens, output a collscan.  It's a weird way of
        // saying "table scan for two, please."
        if (!query.getParsed().getHint().isEmpty()) {
            BSONElement natural = query.getParsed().getHint().getFieldDotted("$natural");
            if (!natural.eoo()) {
                QLOG() << "forcing a table scan due to hinted $natural\n";
                // min/max are incompatible with $natural.
                if (canTableScan && query.getParsed().getMin().isEmpty()
                                 && query.getParsed().getMax().isEmpty()) {
                    QuerySolution* soln = buildCollscanSoln(query, false, params);
                    if (NULL != soln) {
                        out->push_back(soln);
                    }
                }
                return Status::OK();
            }
        }

        // Figure out what fields we care about.
        unordered_set<string> fields;
        QueryPlannerIXSelect::getFields(query.root(), "", &fields);

        for (unordered_set<string>::const_iterator it = fields.begin(); it != fields.end(); ++it) {
            QLOG() << "predicate over field " << *it << endl;
        }

        // Filter our indices so we only look at indices that are over our predicates.
        vector<IndexEntry> relevantIndices;

        // Hints require us to only consider the hinted index.
        BSONObj hintIndex = query.getParsed().getHint();

        // Snapshot is a form of a hint.  If snapshot is set, try to use _id index to make a real
        // plan.  If that fails, just scan the _id index.
        if (query.getParsed().isSnapshot()) {
            // Find the ID index in indexKeyPatterns.  It's our hint.
            for (size_t i = 0; i < params.indices.size(); ++i) {
                if (isIdIndex(params.indices[i].keyPattern)) {
                    hintIndex = params.indices[i].keyPattern;
                    break;
                }
            }
        }

        size_t hintIndexNumber = numeric_limits<size_t>::max();

        if (hintIndex.isEmpty()) {
            QueryPlannerIXSelect::findRelevantIndices(fields, params.indices, &relevantIndices);
        }
        else {
            // Sigh.  If the hint is specified it might be using the index name.
            BSONElement firstHintElt = hintIndex.firstElement();
            if (str::equals("$hint", firstHintElt.fieldName()) && String == firstHintElt.type()) {
                string hintName = firstHintElt.String();
                for (size_t i = 0; i < params.indices.size(); ++i) {
                    if (params.indices[i].name == hintName) {
                        QLOG() << "hint by name specified, restricting indices to "
                             << params.indices[i].keyPattern.toString() << endl;
                        relevantIndices.clear();
                        relevantIndices.push_back(params.indices[i]);
                        hintIndexNumber = i;
                        hintIndex = params.indices[i].keyPattern;
                        break;
                    }
                }
            }
            else {
                for (size_t i = 0; i < params.indices.size(); ++i) {
                    if (0 == params.indices[i].keyPattern.woCompare(hintIndex)) {
                        relevantIndices.clear();
                        relevantIndices.push_back(params.indices[i]);
                        QLOG() << "hint specified, restricting indices to " << hintIndex.toString()
                             << endl;
                        hintIndexNumber = i;
                        break;
                    }
                }
            }

            if (hintIndexNumber == numeric_limits<size_t>::max()) {
                return Status(ErrorCodes::BadValue, "bad hint");
            }
        }

        // Deal with the .min() and .max() query options.  If either exist we can only use an index
        // that matches the object inside.
        if (!query.getParsed().getMin().isEmpty() || !query.getParsed().getMax().isEmpty()) {
            BSONObj minObj = query.getParsed().getMin();
            BSONObj maxObj = query.getParsed().getMax();

            // This is the index into params.indices[...] that we use.
            size_t idxNo = numeric_limits<size_t>::max();

            // If there's an index hinted we need to be able to use it.
            if (!hintIndex.isEmpty()) {
                if (!minObj.isEmpty() && !indexCompatibleMaxMin(minObj, hintIndex)) {
                    QLOG() << "minobj doesnt work w hint";
                    return Status(ErrorCodes::BadValue,
                                  "hint provided does not work with min query");
                }

                if (!maxObj.isEmpty() && !indexCompatibleMaxMin(maxObj, hintIndex)) {
                    QLOG() << "maxobj doesnt work w hint";
                    return Status(ErrorCodes::BadValue,
                                  "hint provided does not work with max query");
                }

                idxNo = hintIndexNumber;
            }
            else {
                // No hinted index, look for one that is compatible (has same field names and
                // ordering thereof).
                for (size_t i = 0; i < params.indices.size(); ++i) {
                    const BSONObj& kp = params.indices[i].keyPattern;

                    BSONObj toUse = minObj.isEmpty() ? maxObj : minObj;
                    if (indexCompatibleMaxMin(toUse, kp)) {
                        idxNo = i;
                        break;
                    }
                }
            }
            
            if (idxNo == numeric_limits<size_t>::max()) {
                QLOG() << "Can't find relevant index to use for max/min query";
                // Can't find an index to use, bail out.
                return Status(ErrorCodes::BadValue,
                              "unable to find relevant index for max/min query");
            }

            // maxObj can be empty; the index scan just goes until the end.  minObj can't be empty
            // though, so if it is, we make a minKey object.
            if (minObj.isEmpty()) {
                BSONObjBuilder bob;
                bob.appendMinKey("");
                minObj = bob.obj();
            }
            else {
                // Must strip off the field names to make an index key.
                minObj = stripFieldNames(minObj);
            }

            if (!maxObj.isEmpty()) {
                // Must strip off the field names to make an index key.
                maxObj = stripFieldNames(maxObj);
            }

            QLOG() << "max/min query using index " << params.indices[idxNo].toString() << endl;

            // Make our scan and output.
            QuerySolutionNode* solnRoot = QueryPlannerAccess::makeIndexScan(params.indices[idxNo],
                                                                            query,
                                                                            params,
                                                                            minObj,
                                                                            maxObj);

            QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
            if (NULL != soln) {
                out->push_back(soln);
            }

            return Status::OK();
        }

        for (size_t i = 0; i < relevantIndices.size(); ++i) {
            QLOG() << "relevant idx " << i << " is " << relevantIndices[i].toString() << endl;
        }

        // Figure out how useful each index is to each predicate.
        // query.root() is now annotated with RelevantTag(s).
        QueryPlannerIXSelect::rateIndices(query.root(), "", relevantIndices);

        QLOG() << "rated tree" << endl;
        QLOG() << query.root()->toString() << endl;

        // If there is a GEO_NEAR it must have an index it can use directly.
        // XXX: move into data access?
        MatchExpression* gnNode = NULL;
        if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR, &gnNode)) {
            // No index for GEO_NEAR?  No query.
            RelevantTag* tag = static_cast<RelevantTag*>(gnNode->getTag());
            if (0 == tag->first.size() && 0 == tag->notFirst.size()) {
                QLOG() << "unable to find index for $geoNear query" << endl;
                return Status(ErrorCodes::BadValue, "unable to find index for $geoNear query");
            }

            GeoNearMatchExpression* gnme = static_cast<GeoNearMatchExpression*>(gnNode);

            vector<size_t> newFirst;

            // 2d + GEO_NEAR is annoying.  Because 2d's GEO_NEAR isn't streaming we have to embed
            // the full query tree inside it as a matcher.
            for (size_t i = 0; i < tag->first.size(); ++i) {
                // GEO_NEAR has a non-2d index it can use.  We can deal w/that in normal planning.
                if (!is2DIndex(relevantIndices[tag->first[i]].keyPattern)) {
                    newFirst.push_back(i);
                    continue;
                }

                // If we're here, GEO_NEAR has a 2d index.  We create a 2dgeonear plan with the
                // entire tree as a filter, if possible.

                GeoNear2DNode* solnRoot = new GeoNear2DNode();
                solnRoot->nq = gnme->getData();
                if (NULL != query.getProj()) {
                    solnRoot->addPointMeta = query.getProj()->wantGeoNearPoint();
                    solnRoot->addDistMeta = query.getProj()->wantGeoNearDistance();
                }

                if (MatchExpression::GEO_NEAR != query.root()->matchType()) {
                    // root is an AND, clone and delete the GEO_NEAR child.
                    MatchExpression* filterTree = query.root()->shallowClone();
                    verify(MatchExpression::AND == filterTree->matchType());

                    bool foundChild = false;
                    for (size_t i = 0; i < filterTree->numChildren(); ++i) {
                        if (MatchExpression::GEO_NEAR == filterTree->getChild(i)->matchType()) {
                            foundChild = true;
                            filterTree->getChildVector()->erase(filterTree->getChildVector()->begin() + i);
                            break;
                        }
                    }
                    verify(foundChild);
                    solnRoot->filter.reset(filterTree);
                }

                solnRoot->numWanted = query.getParsed().getNumToReturn();
                if (0 == solnRoot->numWanted) {
                    solnRoot->numWanted = 100;
                }
                solnRoot->indexKeyPattern = relevantIndices[tag->first[i]].keyPattern;

                // Remove the 2d index.  2d can only be the first field, and we know there is
                // only one GEO_NEAR, so we don't care if anyone else was assigned it; it'll
                // only be first for gnNode.
                tag->first.erase(tag->first.begin() + i);

                QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);

                if (NULL != soln) {
                    out->push_back(soln);
                }
            }

            // Continue planning w/non-2d indices tagged for this pred.
            tag->first.swap(newFirst);

            if (0 == tag->first.size() && 0 == tag->notFirst.size()) {
                return Status::OK();
            }
        }

        // Likewise, if there is a TEXT it must have an index it can use directly.
        MatchExpression* textNode;
        if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT, &textNode)) {
            RelevantTag* tag = static_cast<RelevantTag*>(textNode->getTag());
            if (0 == tag->first.size() && 0 == tag->notFirst.size()) {
                return Status::OK();
            }
        }

        // If we have any relevant indices, we try to create indexed plans.
        if (0 < relevantIndices.size()) {
            // The enumerator spits out trees tagged with IndexTag(s).
            PlanEnumeratorParams enumParams;
            enumParams.intersect = params.options & QueryPlannerParams::INDEX_INTERSECTION;
            enumParams.root = query.root();
            enumParams.indices = &relevantIndices;

            PlanEnumerator isp(enumParams);
            isp.init();

            MatchExpression* rawTree;
            // XXX: have limit on # of indexed solns we'll consider.  We could have a perverse
            // query and index that could make n^2 very unpleasant.
            while (isp.getNext(&rawTree)) {
                QLOG() << "about to build solntree from tagged tree:\n" << rawTree->toString()
                       << endl;

                // This can fail if enumeration makes a mistake.
                QuerySolutionNode* solnRoot =
                    QueryPlannerAccess::buildIndexedDataAccess(query, rawTree, false, relevantIndices);

                if (NULL == solnRoot) { continue; }

                QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
                if (NULL != soln) {
                    QLOG() << "Planner: adding solution:\n" << soln->toString() << endl;
                    out->push_back(soln);
                }
            }
        }

        QLOG() << "Planner: outputted " << out->size() << " indexed solutions.\n";

        // An index was hinted.  If there are any solutions, they use the hinted index.  If not, we
        // scan the entire index to provide results and output that as our plan.  This is the
        // desired behavior when an index is hinted that is not relevant to the query.
        if (!hintIndex.isEmpty()) {
            if (0 == out->size()) {
                QuerySolution* soln = buildWholeIXSoln(params.indices[hintIndexNumber], query, params);
                verify(NULL != soln);
                QLOG() << "Planner: outputting soln that uses hinted index as scan." << endl;
                out->push_back(soln);
            }
            return Status::OK();
        }

        // If a sort order is requested, there may be an index that provides it, even if that
        // index is not over any predicates in the query.
        //
        if (!query.getParsed().getSort().isEmpty()
            && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)
            && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)) {

            // See if we have a sort provided from an index already.
            bool usingIndexToSort = false;
            for (size_t i = 0; i < out->size(); ++i) {
                QuerySolution* soln = (*out)[i];
                if (!soln->hasSortStage) {
                    usingIndexToSort = true;
                    break;
                }
            }

            if (!usingIndexToSort) {
                for (size_t i = 0; i < params.indices.size(); ++i) {
                    const IndexEntry& index = params.indices[i];
                    if (index.sparse) {
                        continue;
                    }
                    const BSONObj kp = LiteParsedQuery::normalizeSortOrder(index.keyPattern);
                    if (providesSort(query, kp)) {
                        QLOG() << "Planner: outputting soln that uses index to provide sort."
                               << endl;
                        QuerySolution* soln = buildWholeIXSoln(params.indices[i], query, params);
                        if (NULL != soln) {
                            out->push_back(soln);
                            break;
                        }
                    }
                    if (providesSort(query, QueryPlannerCommon::reverseSortObj(kp))) {
                        QLOG() << "Planner: outputting soln that uses (reverse) index "
                               << "to provide sort." << endl;
                        QuerySolution* soln = buildWholeIXSoln(params.indices[i], query, params, -1);
                        if (NULL != soln) {
                            out->push_back(soln);
                            break;
                        }
                    }
                }
            }
        }

        // TODO: Do we always want to offer a collscan solution?
        // XXX: currently disabling the always-use-a-collscan in order to find more planner bugs.
        if (    !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)
             && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)
             && hintIndex.isEmpty()
             && ((params.options & QueryPlannerParams::INCLUDE_COLLSCAN) || (0 == out->size() && canTableScan)))
        {
            QuerySolution* collscan = buildCollscanSoln(query, false, params);
            if (NULL != collscan) {
                out->push_back(collscan);
                QLOG() << "Planner: outputting a collscan:\n";
                QLOG() << collscan->toString() << endl;
            }
        }

        return Status::OK();
    }
Ejemplo n.º 18
0
 BSONObj objWithMinKey(int start) {
     BSONObjBuilder startKeyBob;
     startKeyBob.append("", start);
     startKeyBob.appendMinKey("");
     return startKeyBob.obj();
 }
Ejemplo n.º 19
0
        void run() {

            int numShards = 10;
            int numInitialChunks = 5;
            int maxChunks = 100000; // Needed to not overflow the BSONArray's max bytes
            int keySize = 2;

            BSONArrayBuilder chunksB;

            BSONObj lastSplitPt;
            ShardChunkVersion version( 1, 0, OID() );

            //
            // Generate numChunks with a given key size over numShards
            // All chunks have double key values, so we can split them a bunch
            //

            for( int i = -1; i < numInitialChunks; i++ ){

                BSONObjBuilder splitPtB;
                for( int k = 0; k < keySize; k++ ){
                    string field = string( "k" ) + string( 1, (char)('0' + k) );
                    if( i < 0 )
                        splitPtB.appendMinKey( field );
                    else if( i < numInitialChunks - 1 )
                        splitPtB.append( field, (double)i );
                    else
                        splitPtB.appendMaxKey( field );
                }
                BSONObj splitPt = splitPtB.obj();

                if( i >= 0 ){
                    BSONObjBuilder chunkB;

                    chunkB.append( "min", lastSplitPt );
                    chunkB.append( "max", splitPt );

                    int shardNum = rand( numShards );
                    chunkB.append( "shard", "shard" + string( 1, (char)('A' + shardNum) ) );

                    rand( 2 ) ? version.incMajor() : version.incMinor();
                    version.addToBSON( chunkB, "lastmod" );

                    chunksB.append( chunkB.obj() );
                }

                lastSplitPt = splitPt;
            }

            BSONArray chunks = chunksB.arr();

            // log() << "Chunks generated : " << chunks << endl;

            DBClientMockCursor chunksCursor( chunks );

            // Setup the empty ranges and versions first
            RangeMap ranges;
            ShardChunkVersion maxVersion = ShardChunkVersion( 0, 0, OID() );
            VersionMap maxShardVersions;

            // Create a differ which will track our progress
            boost::shared_ptr< DefaultDiffAdapter > differ( _inverse ? new InverseDiffAdapter() : new DefaultDiffAdapter() );
            differ->attach( "test", ranges, maxVersion, maxShardVersions );

            // Validate initial load
            differ->calculateConfigDiff( chunksCursor );
            validate( chunks, ranges, maxVersion, maxShardVersions );

            // Generate a lot of diffs, and keep validating that updating from the diffs always
            // gives us the right ranges and versions

            int numDiffs = 135; // Makes about 100000 chunks overall
            int numChunks = numInitialChunks;
            for( int i = 0; i < numDiffs; i++ ){

                // log() << "Generating new diff... " << i << endl;

                BSONArrayBuilder diffsB;
                BSONArrayBuilder newChunksB;
                BSONObjIterator chunksIt( chunks );

                while( chunksIt.more() ){

                    BSONObj chunk = chunksIt.next().Obj();

                    int randChoice = rand( 10 );

                    if( randChoice < 2 && numChunks < maxChunks ){
                        // Simulate a split

                        // log() << " ...starting a split with chunk " << chunk << endl;

                        BSONObjBuilder leftB;
                        BSONObjBuilder rightB;
                        BSONObjBuilder midB;

                        for( int k = 0; k < keySize; k++ ){
                            string field = string( "k" ) + string( 1, (char)('0' + k) );

                            BSONType maxType = chunk["max"].Obj()[field].type();
                            double max = maxType == NumberDouble ? chunk["max"].Obj()[field].Number() : 0.0;
                            BSONType minType = chunk["min"].Obj()[field].type();
                            double min = minType == NumberDouble ? chunk["min"].Obj()[field].Number() : 0.0;

                            if( minType == MinKey ){
                                midB.append( field, max - 1.0 );
                            }
                            else if( maxType == MaxKey ){
                                midB.append( field, min + 1.0 );
                            }
                            else {
                                midB.append( field, ( max + min ) / 2.0 );
                            }
                        }

                        BSONObj midPt = midB.obj();
                        // Only happens if we can't split the min chunk
                        if( midPt.isEmpty() ) continue;

                        leftB.append( chunk["min"] );
                        leftB.append( "max", midPt );
                        rightB.append( "min", midPt );
                        rightB.append( chunk["max"] );

                        leftB.append( chunk["shard"] );
                        rightB.append( chunk["shard"] );

                        version.incMajor();
                        version._minor = 0;
                        version.addToBSON( leftB, "lastmod" );
                        version.incMinor();
                        version.addToBSON( rightB, "lastmod" );

                        BSONObj left = leftB.obj();
                        BSONObj right = rightB.obj();

                        // log() << " ... split into " << left << " and " << right << endl;

                        newChunksB.append( left );
                        newChunksB.append( right );

                        diffsB.append( right );
                        diffsB.append( left );

                        numChunks++;
                    }
                    else if( randChoice < 4 && chunksIt.more() ){
                        // Simulate a migrate

                        // log() << " ...starting a migrate with chunk " << chunk << endl;

                        BSONObj prevShardChunk;
                        while( chunksIt.more() ){
                            prevShardChunk = chunksIt.next().Obj();
                            if( prevShardChunk["shard"].String() == chunk["shard"].String() ) break;

                            // log() << "... appending chunk from diff shard: " << prevShardChunk << endl;
                            newChunksB.append( prevShardChunk );

                            prevShardChunk = BSONObj();
                        }

                        // We need to move between different shards, hence the weirdness in logic here
                        if( ! prevShardChunk.isEmpty() ){

                            BSONObjBuilder newShardB;
                            BSONObjBuilder prevShardB;

                            newShardB.append( chunk["min"] );
                            newShardB.append( chunk["max"] );
                            prevShardB.append( prevShardChunk["min"] );
                            prevShardB.append( prevShardChunk["max"] );

                            int shardNum = rand( numShards );
                            newShardB.append( "shard", "shard" + string( 1, (char)('A' + shardNum) ) );
                            prevShardB.append( prevShardChunk["shard"] );

                            version.incMajor();
                            version._minor = 0;
                            version.addToBSON( newShardB, "lastmod" );
                            version.incMinor();
                            version.addToBSON( prevShardB, "lastmod" );

                            BSONObj newShard = newShardB.obj();
                            BSONObj prevShard = prevShardB.obj();

                            // log() << " ... migrated to " << newShard << " and updated " << prevShard << endl;

                            newChunksB.append( newShard );
                            newChunksB.append( prevShard );

                            diffsB.append( newShard );
                            diffsB.append( prevShard );

                        }
                        else{
                            // log() << "... appending chunk, no more left: " << chunk << endl;
                            newChunksB.append( chunk );
                        }
                    }
                    else{
                        // log() << "Appending chunk : " << chunk << endl;
                        newChunksB.append( chunk );
                    }

                }

                BSONArray diffs = diffsB.arr();
                chunks = newChunksB.arr();

                // log() << "Diffs generated : " << diffs << endl;
                // log() << "All chunks : " << chunks << endl;

                // Rarely entirely clear out our data
                if( rand( 10 ) < 1 ){
                    diffs = chunks;
                    ranges.clear();
                    maxVersion = ShardChunkVersion( 0, 0, OID() );
                    maxShardVersions.clear();
                }

                // log() << "Total number of chunks : " << numChunks << " iteration " << i << endl;

                DBClientMockCursor diffCursor( diffs );

                differ->calculateConfigDiff( diffCursor );

                validate( chunks, ranges, maxVersion, maxShardVersions );

            }

        }
Ejemplo n.º 20
0
    void v8ToMongoElement( BSONObjBuilder & b , v8::Handle<v8::String> name , const string sname , v8::Handle<v8::Value> value ){
        
        if ( value->IsString() ){
            b.append( sname.c_str() , toSTLString( value ).c_str() );
            return;
        }
        
        if ( value->IsFunction() ){
            b.appendCode( sname.c_str() , toSTLString( value ).c_str() );
            return;
        }
    
        if ( value->IsNumber() ){
            if ( value->IsInt32() )
                b.append( sname.c_str(), int( value->ToInt32()->Value() ) );
            else
                b.append( sname.c_str() , value->ToNumber()->Value() );
            return;
        }
    
        if ( value->IsArray() ){
            BSONObj sub = v8ToMongo( value->ToObject() );
            b.appendArray( sname.c_str() , sub );
            return;
        }
    
        if ( value->IsDate() ){
            b.appendDate( sname.c_str() , Date_t(v8::Date::Cast( *value )->NumberValue()) );
            return;
        }

        if ( value->IsExternal() )
            return;
        
        if ( value->IsObject() ){
            // The user could potentially modify the fields of these special objects,
            // wreaking havoc when we attempt to reinterpret them.  Not doing any validation
            // for now...
            Local< v8::Object > obj = value->ToObject();
            if ( obj->InternalFieldCount() && obj->GetInternalField( 0 )->IsNumber() ) {
                switch( obj->GetInternalField( 0 )->ToInt32()->Value() ) { // NOTE Uint32's Value() gave me a linking error, so going with this instead
                    case Timestamp:
                        b.appendTimestamp( sname.c_str(),
                                          Date_t( v8::Date::Cast( *obj->Get( v8::String::New( "time" ) ) )->NumberValue() ),
                                          obj->Get( v8::String::New( "i" ) )->ToInt32()->Value() );
                        return;
                    case MinKey:
                        b.appendMinKey( sname.c_str() );
                        return;
                    case MaxKey:
                        b.appendMaxKey( sname.c_str() );
                        return;
                    default:
                        assert( "invalid internal field" == 0 );
                }
            }
            string s = toSTLString( value );
            if ( s.size() && s[0] == '/' ){
                s = s.substr( 1 );
                string r = s.substr( 0 , s.rfind( "/" ) );
                string o = s.substr( s.rfind( "/" ) + 1 );
                b.appendRegex( sname.c_str() , r.c_str() , o.c_str() );
            }
            else if ( value->ToObject()->GetPrototype()->IsObject() &&
                      value->ToObject()->GetPrototype()->ToObject()->HasRealNamedProperty( v8::String::New( "isObjectId" ) ) ){
                OID oid;
                oid.init( toSTLString( value ) );
                b.appendOID( sname.c_str() , &oid );
            }
            else if ( !value->ToObject()->GetHiddenValue( v8::String::New( "__NumberLong" ) ).IsEmpty() ) {
                // TODO might be nice to potentially speed this up with an indexed internal
                // field, but I don't yet know how to use an ObjectTemplate with a
                // constructor.
                unsigned long long val =
                ( (unsigned long long)( value->ToObject()->Get( v8::String::New( "top" ) )->ToInt32()->Value() ) << 32 ) +
                (unsigned)( value->ToObject()->Get( v8::String::New( "bottom" ) )->ToInt32()->Value() );
                b.append( sname.c_str(), (long long)val );
            }
            else if ( !value->ToObject()->GetHiddenValue( v8::String::New( "__DBPointer" ) ).IsEmpty() ) {
                OID oid;
                oid.init( toSTLString( value->ToObject()->Get( v8::String::New( "id" ) ) ) );
                string ns = toSTLString( value->ToObject()->Get( v8::String::New( "ns" ) ) );
                b.appendDBRef( sname.c_str(), ns.c_str(), oid );                
            }
            else if ( !value->ToObject()->GetHiddenValue( v8::String::New( "__BinData" ) ).IsEmpty() ) {
                int len = obj->Get( v8::String::New( "len" ) )->ToInt32()->Value();
                v8::String::Utf8Value data( obj->Get( v8::String::New( "data" ) ) );
                const char *dataArray = *data;
                assert( data.length() == len );
                b.appendBinData( sname.c_str(),
                                len,
                                mongo::BinDataType( obj->Get( v8::String::New( "type" ) )->ToInt32()->Value() ),
                                dataArray );
            } else {
                BSONObj sub = v8ToMongo( value->ToObject() );
                b.append( sname.c_str() , sub );
            }
            return;
        }
    
        if ( value->IsBoolean() ){
            b.appendBool( sname.c_str() , value->ToBoolean()->Value() );
            return;
        }
    
        else if ( value->IsUndefined() ){
            b.appendUndefined( sname.c_str() );
            return;
        }
    
        else if ( value->IsNull() ){
            b.appendNull( sname.c_str() );
            return;
        }

        cout << "don't know how to convert to mongo field [" << name << "]\t" << value << endl;
    }
Ejemplo n.º 21
0
    // static
    Status QueryPlanner::plan(const CanonicalQuery& query,
                              const QueryPlannerParams& params,
                              std::vector<QuerySolution*>* out) {

        QLOG() << "Beginning planning..." << endl
               << "=============================" << endl
               << "Options = " << optionString(params.options) << endl
               << "Canonical query:" << endl << query.toString()
               << "=============================" << endl;

        for (size_t i = 0; i < params.indices.size(); ++i) {
            QLOG() << "Index " << i << " is " << params.indices[i].toString() << endl;
        }

        bool canTableScan = !(params.options & QueryPlannerParams::NO_TABLE_SCAN);

        // If the query requests a tailable cursor, the only solution is a collscan + filter with
        // tailable set on the collscan.  TODO: This is a policy departure.  Previously I think you
        // could ask for a tailable cursor and it just tried to give you one.  Now, we fail if we
        // can't provide one.  Is this what we want?
        if (query.getParsed().hasOption(QueryOption_CursorTailable)) {
            if (!QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)
                && canTableScan) {
                QuerySolution* soln = buildCollscanSoln(query, true, params);
                if (NULL != soln) {
                    out->push_back(soln);
                }
            }
            return Status::OK();
        }

        // The hint or sort can be $natural: 1.  If this happens, output a collscan. If both
        // a $natural hint and a $natural sort are specified, then the direction of the collscan
        // is determined by the sign of the sort (not the sign of the hint).
        if (!query.getParsed().getHint().isEmpty() || !query.getParsed().getSort().isEmpty()) {
            BSONObj hintObj = query.getParsed().getHint();
            BSONObj sortObj = query.getParsed().getSort();
            BSONElement naturalHint = hintObj.getFieldDotted("$natural");
            BSONElement naturalSort = sortObj.getFieldDotted("$natural");

            // A hint overrides a $natural sort. This means that we don't force a table
            // scan if there is a $natural sort with a non-$natural hint.
            if (!naturalHint.eoo() || (!naturalSort.eoo() && hintObj.isEmpty())) {
                QLOG() << "Forcing a table scan due to hinted $natural\n";
                // min/max are incompatible with $natural.
                if (canTableScan && query.getParsed().getMin().isEmpty()
                                 && query.getParsed().getMax().isEmpty()) {
                    QuerySolution* soln = buildCollscanSoln(query, false, params);
                    if (NULL != soln) {
                        out->push_back(soln);
                    }
                }
                return Status::OK();
            }
        }

        // Figure out what fields we care about.
        unordered_set<string> fields;
        QueryPlannerIXSelect::getFields(query.root(), "", &fields);

        for (unordered_set<string>::const_iterator it = fields.begin(); it != fields.end(); ++it) {
            QLOG() << "Predicate over field '" << *it << "'" << endl;
        }

        // Filter our indices so we only look at indices that are over our predicates.
        vector<IndexEntry> relevantIndices;

        // Hints require us to only consider the hinted index.
        // If index filters in the query settings were used to override
        // the allowed indices for planning, we should not use the hinted index
        // requested in the query.
        BSONObj hintIndex;
        if (!params.indexFiltersApplied) {
            hintIndex = query.getParsed().getHint();
        }

        // Snapshot is a form of a hint.  If snapshot is set, try to use _id index to make a real
        // plan.  If that fails, just scan the _id index.
        if (query.getParsed().isSnapshot()) {
            // Find the ID index in indexKeyPatterns.  It's our hint.
            for (size_t i = 0; i < params.indices.size(); ++i) {
                if (isIdIndex(params.indices[i].keyPattern)) {
                    hintIndex = params.indices[i].keyPattern;
                    break;
                }
            }
        }

        size_t hintIndexNumber = numeric_limits<size_t>::max();

        if (hintIndex.isEmpty()) {
            QueryPlannerIXSelect::findRelevantIndices(fields, params.indices, &relevantIndices);
        }
        else {
            // Sigh.  If the hint is specified it might be using the index name.
            BSONElement firstHintElt = hintIndex.firstElement();
            if (str::equals("$hint", firstHintElt.fieldName()) && String == firstHintElt.type()) {
                string hintName = firstHintElt.String();
                for (size_t i = 0; i < params.indices.size(); ++i) {
                    if (params.indices[i].name == hintName) {
                        QLOG() << "Hint by name specified, restricting indices to "
                               << params.indices[i].keyPattern.toString() << endl;
                        relevantIndices.clear();
                        relevantIndices.push_back(params.indices[i]);
                        hintIndexNumber = i;
                        hintIndex = params.indices[i].keyPattern;
                        break;
                    }
                }
            }
            else {
                for (size_t i = 0; i < params.indices.size(); ++i) {
                    if (0 == params.indices[i].keyPattern.woCompare(hintIndex)) {
                        relevantIndices.clear();
                        relevantIndices.push_back(params.indices[i]);
                        QLOG() << "Hint specified, restricting indices to " << hintIndex.toString()
                               << endl;
                        hintIndexNumber = i;
                        break;
                    }
                }
            }

            if (hintIndexNumber == numeric_limits<size_t>::max()) {
                return Status(ErrorCodes::BadValue, "bad hint");
            }
        }

        // Deal with the .min() and .max() query options.  If either exist we can only use an index
        // that matches the object inside.
        if (!query.getParsed().getMin().isEmpty() || !query.getParsed().getMax().isEmpty()) {
            BSONObj minObj = query.getParsed().getMin();
            BSONObj maxObj = query.getParsed().getMax();

            // This is the index into params.indices[...] that we use.
            size_t idxNo = numeric_limits<size_t>::max();

            // If there's an index hinted we need to be able to use it.
            if (!hintIndex.isEmpty()) {
                if (!minObj.isEmpty() && !indexCompatibleMaxMin(minObj, hintIndex)) {
                    QLOG() << "Minobj doesn't work with hint";
                    return Status(ErrorCodes::BadValue,
                                  "hint provided does not work with min query");
                }

                if (!maxObj.isEmpty() && !indexCompatibleMaxMin(maxObj, hintIndex)) {
                    QLOG() << "Maxobj doesn't work with hint";
                    return Status(ErrorCodes::BadValue,
                                  "hint provided does not work with max query");
                }

                idxNo = hintIndexNumber;
            }
            else {
                // No hinted index, look for one that is compatible (has same field names and
                // ordering thereof).
                for (size_t i = 0; i < params.indices.size(); ++i) {
                    const BSONObj& kp = params.indices[i].keyPattern;

                    BSONObj toUse = minObj.isEmpty() ? maxObj : minObj;
                    if (indexCompatibleMaxMin(toUse, kp)) {
                        idxNo = i;
                        break;
                    }
                }
            }

            if (idxNo == numeric_limits<size_t>::max()) {
                QLOG() << "Can't find relevant index to use for max/min query";
                // Can't find an index to use, bail out.
                return Status(ErrorCodes::BadValue,
                              "unable to find relevant index for max/min query");
            }

            // maxObj can be empty; the index scan just goes until the end.  minObj can't be empty
            // though, so if it is, we make a minKey object.
            if (minObj.isEmpty()) {
                BSONObjBuilder bob;
                bob.appendMinKey("");
                minObj = bob.obj();
            }
            else {
                // Must strip off the field names to make an index key.
                minObj = stripFieldNames(minObj);
            }

            if (!maxObj.isEmpty()) {
                // Must strip off the field names to make an index key.
                maxObj = stripFieldNames(maxObj);
            }

            QLOG() << "Max/min query using index " << params.indices[idxNo].toString() << endl;

            // Make our scan and output.
            QuerySolutionNode* solnRoot = QueryPlannerAccess::makeIndexScan(params.indices[idxNo],
                                                                            query,
                                                                            params,
                                                                            minObj,
                                                                            maxObj);

            QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
            if (NULL != soln) {
                out->push_back(soln);
            }

            return Status::OK();
        }

        for (size_t i = 0; i < relevantIndices.size(); ++i) {
            QLOG() << "Relevant index " << i << " is " << relevantIndices[i].toString() << endl;
            LOG(2) << "Relevant index " << i << " is " << relevantIndices[i].toString() << endl;
        }

        // Figure out how useful each index is to each predicate.
        QueryPlannerIXSelect::rateIndices(query.root(), "", relevantIndices);
        QueryPlannerIXSelect::stripInvalidAssignments(query.root(), relevantIndices);

        // query.root() is now annotated with RelevantTag(s).
        QLOG() << "Rated tree:" << endl << query.root()->toString();

        // If there is a GEO_NEAR it must have an index it can use directly.
        MatchExpression* gnNode = NULL;
        if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR, &gnNode)) {
            // No index for GEO_NEAR?  No query.
            RelevantTag* tag = static_cast<RelevantTag*>(gnNode->getTag());
            if (0 == tag->first.size() && 0 == tag->notFirst.size()) {
                QLOG() << "Unable to find index for $geoNear query." << endl;
                // Don't leave tags on query tree.
                query.root()->resetTag();
                return Status(ErrorCodes::BadValue, "unable to find index for $geoNear query");
            }

            QLOG() << "Rated tree after geonear processing:" << query.root()->toString();
        }

        // Likewise, if there is a TEXT it must have an index it can use directly.
        MatchExpression* textNode = NULL;
        if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT, &textNode)) {
            RelevantTag* tag = static_cast<RelevantTag*>(textNode->getTag());

            // Exactly one text index required for TEXT.  We need to check this explicitly because
            // the text stage can't be built if no text index exists or there is an ambiguity as to
            // which one to use.
            size_t textIndexCount = 0;
            for (size_t i = 0; i < params.indices.size(); i++) {
                if (INDEX_TEXT == params.indices[i].type) {
                    textIndexCount++;
                }
            }
            if (textIndexCount != 1) {
                // Don't leave tags on query tree.
                query.root()->resetTag();
                return Status(ErrorCodes::BadValue, "need exactly one text index for $text query");
            }

            // Error if the text node is tagged with zero indices.
            if (0 == tag->first.size() && 0 == tag->notFirst.size()) {
                // Don't leave tags on query tree.
                query.root()->resetTag();
                return Status(ErrorCodes::BadValue,
                              "failed to use text index to satisfy $text query (if text index is "
                              "compound, are equality predicates given for all prefix fields?)");
            }

            // At this point, we know that there is only one text index and that the TEXT node is
            // assigned to it.
            invariant(1 == tag->first.size() + tag->notFirst.size());

            QLOG() << "Rated tree after text processing:" << query.root()->toString();
        }

        // If we have any relevant indices, we try to create indexed plans.
        if (0 < relevantIndices.size()) {
            // The enumerator spits out trees tagged with IndexTag(s).
            PlanEnumeratorParams enumParams;
            enumParams.intersect = params.options & QueryPlannerParams::INDEX_INTERSECTION;
            enumParams.root = query.root();
            enumParams.indices = &relevantIndices;

            PlanEnumerator isp(enumParams);
            isp.init();

            MatchExpression* rawTree;
            while (isp.getNext(&rawTree) && (out->size() < params.maxIndexedSolutions)) {
                QLOG() << "About to build solntree from tagged tree:" << endl
                       << rawTree->toString();

                // The tagged tree produced by the plan enumerator is not guaranteed
                // to be canonically sorted. In order to be compatible with the cached
                // data, sort the tagged tree according to CanonicalQuery ordering.
                boost::scoped_ptr<MatchExpression> clone(rawTree->shallowClone());
                CanonicalQuery::sortTree(clone.get());

                PlanCacheIndexTree* cacheData;
                Status indexTreeStatus = cacheDataFromTaggedTree(clone.get(), relevantIndices, &cacheData);
                if (!indexTreeStatus.isOK()) {
                    QLOG() << "Query is not cachable: " << indexTreeStatus.reason() << endl;
                }
                auto_ptr<PlanCacheIndexTree> autoData(cacheData);

                // This can fail if enumeration makes a mistake.
                QuerySolutionNode* solnRoot =
                    QueryPlannerAccess::buildIndexedDataAccess(query, rawTree, false, relevantIndices);

                if (NULL == solnRoot) { continue; }

                QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query,
                                                                              params,
                                                                              solnRoot);
                if (NULL != soln) {
                    QLOG() << "Planner: adding solution:" << endl << soln->toString();
                    if (indexTreeStatus.isOK()) {
                        SolutionCacheData* scd = new SolutionCacheData();
                        scd->tree.reset(autoData.release());
                        soln->cacheData.reset(scd);
                    }
                    out->push_back(soln);
                }
            }
        }

        // Don't leave tags on query tree.
        query.root()->resetTag();

        QLOG() << "Planner: outputted " << out->size() << " indexed solutions.\n";

        // Produce legible error message for failed OR planning with a TEXT child.
        // TODO: support collection scan for non-TEXT children of OR.
        if (out->size() == 0 && textNode != NULL &&
            MatchExpression::OR == query.root()->matchType()) {
            MatchExpression* root = query.root();
            for (size_t i = 0; i < root->numChildren(); ++i) {
                if (textNode == root->getChild(i)) {
                    return Status(ErrorCodes::BadValue,
                                  "Failed to produce a solution for TEXT under OR - "
                                  "other non-TEXT clauses under OR have to be indexed as well.");
                }
            }
        }

        // An index was hinted.  If there are any solutions, they use the hinted index.  If not, we
        // scan the entire index to provide results and output that as our plan.  This is the
        // desired behavior when an index is hinted that is not relevant to the query.
        if (!hintIndex.isEmpty()) {
            if (0 == out->size()) {
                QuerySolution* soln = buildWholeIXSoln(params.indices[hintIndexNumber],
                                                       query, params);
                verify(NULL != soln);
                QLOG() << "Planner: outputting soln that uses hinted index as scan." << endl;
                out->push_back(soln);
            }
            return Status::OK();
        }

        // If a sort order is requested, there may be an index that provides it, even if that
        // index is not over any predicates in the query.
        //
        if (!query.getParsed().getSort().isEmpty()
            && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)
            && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)) {

            // See if we have a sort provided from an index already.
            // This is implied by the presence of a non-blocking solution.
            bool usingIndexToSort = false;
            for (size_t i = 0; i < out->size(); ++i) {
                QuerySolution* soln = (*out)[i];
                if (!soln->hasBlockingStage) {
                    usingIndexToSort = true;
                    break;
                }
            }

            if (!usingIndexToSort) {
                for (size_t i = 0; i < params.indices.size(); ++i) {
                    const IndexEntry& index = params.indices[i];
                    // Only regular (non-plugin) indexes can be used to provide a sort.
                    if (index.type != INDEX_BTREE) {
                        continue;
                    }
                    // Only non-sparse indexes can be used to provide a sort.
                    if (index.sparse) {
                        continue;
                    }

                    // TODO: Sparse indexes can't normally provide a sort, because non-indexed
                    // documents could potentially be missing from the result set.  However, if the
                    // query predicate can be used to guarantee that all documents to be returned
                    // are indexed, then the index should be able to provide the sort.
                    //
                    // For example:
                    // - Sparse index {a: 1, b: 1} should be able to provide a sort for
                    //   find({b: 1}).sort({a: 1}).  SERVER-13908.
                    // - Index {a: 1, b: "2dsphere"} (which is "geo-sparse", if
                    //   2dsphereIndexVersion=2) should be able to provide a sort for
                    //   find({b: GEO}).sort({a:1}).  SERVER-10801.

                    const BSONObj kp = LiteParsedQuery::normalizeSortOrder(index.keyPattern);
                    if (providesSort(query, kp)) {
                        QLOG() << "Planner: outputting soln that uses index to provide sort."
                               << endl;
                        QuerySolution* soln = buildWholeIXSoln(params.indices[i],
                                                               query, params);
                        if (NULL != soln) {
                            PlanCacheIndexTree* indexTree = new PlanCacheIndexTree();
                            indexTree->setIndexEntry(params.indices[i]);
                            SolutionCacheData* scd = new SolutionCacheData();
                            scd->tree.reset(indexTree);
                            scd->solnType = SolutionCacheData::WHOLE_IXSCAN_SOLN;
                            scd->wholeIXSolnDir = 1;

                            soln->cacheData.reset(scd);
                            out->push_back(soln);
                            break;
                        }
                    }
                    if (providesSort(query, QueryPlannerCommon::reverseSortObj(kp))) {
                        QLOG() << "Planner: outputting soln that uses (reverse) index "
                               << "to provide sort." << endl;
                        QuerySolution* soln = buildWholeIXSoln(params.indices[i], query,
                                                               params, -1);
                        if (NULL != soln) {
                            PlanCacheIndexTree* indexTree = new PlanCacheIndexTree();
                            indexTree->setIndexEntry(params.indices[i]);
                            SolutionCacheData* scd = new SolutionCacheData();
                            scd->tree.reset(indexTree);
                            scd->solnType = SolutionCacheData::WHOLE_IXSCAN_SOLN;
                            scd->wholeIXSolnDir = -1;

                            soln->cacheData.reset(scd);
                            out->push_back(soln);
                            break;
                        }
                    }
                }
            }
        }

        // geoNear and text queries *require* an index.
        // Also, if a hint is specified it indicates that we MUST use it.
        bool possibleToCollscan = !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)
                               && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)
                               && hintIndex.isEmpty();

        // The caller can explicitly ask for a collscan.
        bool collscanRequested = (params.options & QueryPlannerParams::INCLUDE_COLLSCAN);

        // No indexed plans?  We must provide a collscan if possible or else we can't run the query.
        bool collscanNeeded = (0 == out->size() && canTableScan);

        if (possibleToCollscan && (collscanRequested || collscanNeeded)) {
            QuerySolution* collscan = buildCollscanSoln(query, false, params);
            if (NULL != collscan) {
                SolutionCacheData* scd = new SolutionCacheData();
                scd->solnType = SolutionCacheData::COLLSCAN_SOLN;
                collscan->cacheData.reset(scd);
                out->push_back(collscan);
                QLOG() << "Planner: outputting a collscan:" << endl
                       << collscan->toString();
            }
        }

        return Status::OK();
    }