Beispiel #1
0
    bool dbEval(const string& dbName, BSONObj& cmd, BSONObjBuilder& result, string& errmsg) {
        BSONElement e = cmd.firstElement();
        uassert( 10046 ,  "eval needs Code" , e.type() == Code || e.type() == CodeWScope || e.type() == String );

        const char *code = 0;
        switch ( e.type() ) {
        case String:
        case Code:
            code = e.valuestr();
            break;
        case CodeWScope:
            code = e.codeWScopeCode();
            break;
        default:
            verify(0);
        }
        verify( code );

        if ( ! globalScriptEngine ) {
            errmsg = "db side execution is disabled";
            return false;
        }

        const string userToken = ClientBasic::getCurrent()->getAuthorizationSession()
                                                          ->getAuthenticatedUserNamesToken();
        auto_ptr<Scope> s = globalScriptEngine->getPooledScope( dbName, "dbeval" + userToken );
        ScriptingFunction f = s->createFunction(code);
        if ( f == 0 ) {
            errmsg = (string)"compile failed: " + s->getError();
            return false;
        }

        if ( e.type() == CodeWScope )
            s->init( e.codeWScopeScopeDataUnsafe() );
        s->localConnect( dbName.c_str() );

        BSONObj args;
        {
            BSONElement argsElement = cmd.getField("args");
            if ( argsElement.type() == Array ) {
                args = argsElement.embeddedObject();
                if ( edebug ) {
                    out() << "args:" << args.toString() << endl;
                    out() << "code:\n" << code << endl;
                }
            }
        }

        int res;
        {
            Timer t;
            res = s->invoke(f, &args, 0, cmdLine.quota ? 10 * 60 * 1000 : 0 );
            int m = t.millis();
            if ( m > cmdLine.slowMS ) {
                out() << "dbeval slow, time: " << dec << m << "ms " << dbName << endl;
                if ( m >= 1000 ) log() << code << endl;
                else OCCASIONALLY log() << code << endl;
            }
        }
        if (res || s->isLastRetNativeCode()) {
            result.append("errno", (double) res);
            errmsg = "invoke failed: ";
            if (s->isLastRetNativeCode())
                errmsg += "cannot return native function";
            else
                errmsg += s->getError();
            return false;
        }

        s->append( result , "retval" , "__returnValue" );

        return true;
    }
Beispiel #2
0
    /**
     * Run a query with a cursor provided by the query optimizer, or FindingStartCursor.
     * @yields the db lock.
     */
    string queryWithQueryOptimizer( int queryOptions, const string& ns,
                                    const BSONObj &jsobj, CurOp& curop,
                                    const BSONObj &query, const BSONObj &order,
                                    const shared_ptr<ParsedQuery> &pq_shared,
                                    const BSONObj &oldPlan,
                                    const ChunkVersion &shardingVersionAtStart,
                                    scoped_ptr<PageFaultRetryableSection>& parentPageFaultSection,
                                    scoped_ptr<NoPageFaultsAllowed>& noPageFault,
                                    Message &result ) {

        const ParsedQuery &pq( *pq_shared );
        shared_ptr<Cursor> cursor;
        QueryPlanSummary queryPlan;
        
        if ( pq.hasOption( QueryOption_OplogReplay ) ) {
            cursor = FindingStartCursor::getCursor( ns.c_str(), query, order );
        }
        else {
            cursor = getOptimizedCursor( ns.c_str(),
                                         query,
                                         order,
                                         QueryPlanSelectionPolicy::any(),
                                         pq_shared,
                                         false,
                                         &queryPlan );
        }
        verify( cursor );
        
        scoped_ptr<QueryResponseBuilder> queryResponseBuilder
                ( QueryResponseBuilder::make( pq, cursor, queryPlan, oldPlan ) );
        bool saveClientCursor = false;
        OpTime slaveReadTill;
        ClientCursorHolder ccPointer( new ClientCursor( QueryOption_NoCursorTimeout, cursor,
                                                         ns ) );
        
        for( ; cursor->ok(); cursor->advance() ) {

            bool yielded = false;
            if ( !ccPointer->yieldSometimes( ClientCursor::MaybeCovered, &yielded ) ||
                !cursor->ok() ) {
                cursor.reset();
                queryResponseBuilder->noteYield();
                // !!! TODO The queryResponseBuilder still holds cursor.  Currently it will not do
                // anything unsafe with the cursor in handoff(), but this is very fragile.
                //
                // We don't fail the query since we're fine with returning partial data if the
                // collection was dropped.
                // NOTE see SERVER-2454.
                // TODO This is wrong.  The cursor could be gone if the closeAllDatabases command
                // just ran.
                break;
            }

            if ( yielded ) {
                queryResponseBuilder->noteYield();
            }
            
            if ( pq.getMaxScan() && cursor->nscanned() > pq.getMaxScan() ) {
                break;
            }
            
            if ( !queryResponseBuilder->addMatch() ) {
                continue;
            }
            
            // Note slave's position in the oplog.
            if ( pq.hasOption( QueryOption_OplogReplay ) ) {
                BSONObj current = cursor->current();
                BSONElement e = current["ts"];
                if ( e.type() == Date || e.type() == Timestamp ) {
                    slaveReadTill = e._opTime();
                }
            }
            
            if ( !cursor->supportGetMore() || pq.isExplain() ) {
                if ( queryResponseBuilder->enoughTotalResults() ) {
                    break;
                }
            }
            else if ( queryResponseBuilder->enoughForFirstBatch() ) {
                // if only 1 requested, no cursor saved for efficiency...we assume it is findOne()
                if ( pq.wantMore() && pq.getNumToReturn() != 1 ) {
                    queryResponseBuilder->finishedFirstBatch();
                    if ( cursor->advance() ) {
                        saveClientCursor = true;
                    }
                }
                break;
            }
        }
        
        if ( cursor ) {
            if ( pq.hasOption( QueryOption_CursorTailable ) && pq.getNumToReturn() != 1 ) {
                cursor->setTailable();
            }
            
            // If the tailing request succeeded.
            if ( cursor->tailable() ) {
                saveClientCursor = true;
            }
        }
        
        if ( ! shardingState.getVersion( ns ).isWriteCompatibleWith( shardingVersionAtStart ) ) {
            // if the version changed during the query
            // we might be missing some data
            // and its safe to send this as mongos can resend
            // at this point
            throw SendStaleConfigException(ns, "version changed during initial query",
                                           shardingVersionAtStart,
                                           shardingState.getVersion(ns));
        }
        
        parentPageFaultSection.reset(0);
        noPageFault.reset( new NoPageFaultsAllowed() );

        int nReturned = queryResponseBuilder->handoff( result );

        ccPointer.reset();
        long long cursorid = 0;
        if ( saveClientCursor ) {
            // Create a new ClientCursor, with a default timeout.
            ccPointer.reset( new ClientCursor( queryOptions, cursor, ns,
                                              jsobj.getOwned() ) );
            cursorid = ccPointer->cursorid();
            DEV { MONGO_TLOG(2) << "query has more, cursorid: " << cursorid << endl; }
            if ( cursor->supportYields() ) {
                ClientCursor::YieldData data;
                ccPointer->prepareToYield( data );
            }
            else {
                ccPointer->c()->noteLocation();
            }
            
            // Save slave's position in the oplog.
            if ( pq.hasOption( QueryOption_OplogReplay ) && !slaveReadTill.isNull() ) {
                ccPointer->slaveReadTill( slaveReadTill );
            }
            
            if ( !ccPointer->ok() && ccPointer->c()->tailable() ) {
                DEV {
                    MONGO_TLOG(0) << "query has no more but tailable, cursorid: " << cursorid <<
                        endl;
                }
            }
            
            if( queryOptions & QueryOption_Exhaust ) {
                curop.debug().exhaust = true;
            }
            
            // Set attributes for getMore.
            ccPointer->setCollMetadata( queryResponseBuilder->collMetadata() );
            ccPointer->setPos( nReturned );
            ccPointer->pq = pq_shared;
            ccPointer->fields = pq.getFieldPtr();

            // If the query had a time limit, remaining time is "rolled over" to the cursor (for
            // use by future getmore ops).
            ccPointer->setLeftoverMaxTimeMicros( curop.getRemainingMaxTimeMicros() );

            ccPointer.release();
        }
Beispiel #3
0
    Handle<v8::Value> mongoToV8Element( const BSONElement &f ) {
        switch ( f.type() ){

        case mongo::Code:
            cout << "warning, code saved in database just turned into string right now" << endl;
        case mongo::String: 
            return v8::String::New( f.valuestr() );
            
        case mongo::jstOID: {
            v8::Function * idCons = getObjectIdCons();
            v8::Handle<v8::Value> argv[1];
            argv[0] = v8::String::New( f.__oid().str().c_str() );
            return idCons->NewInstance( 1 , argv );
        }
            
        case mongo::NumberDouble:
        case mongo::NumberInt:
            return v8::Number::New( f.number() );
            
        case mongo::Array:
        case mongo::Object:
            return mongoToV8( f.embeddedObject() , f.type() == mongo::Array );
            
        case mongo::Date:
            return v8::Date::New( f.date() );
            
        case mongo::Bool:
            return v8::Boolean::New( f.boolean() );

        case mongo::EOO:            
        case mongo::jstNULL:
            return v8::Null();
            
        case mongo::RegEx: {
            v8::Function * regex = getNamedCons( "RegExp" );
            
            v8::Handle<v8::Value> argv[2];
            argv[0] = v8::String::New( f.regex() );
            argv[1] = v8::String::New( f.regexFlags() );
            
            return regex->NewInstance( 2 , argv );
            break;
        }
            
        case mongo::BinData: {
            Local<v8::Object> b = v8::Object::New();
            
            int len;
            f.binData( len );
            
            b->Set( v8::String::New( "subtype" ) , v8::Number::New( f.binDataType() ) );
            b->Set( v8::String::New( "length" ) , v8::Number::New( len ) );
            
            return b;
        };
            
        case mongo::Timestamp: {
            Local<v8::Object> sub = v8::Object::New();            
            
            sub->Set( v8::String::New( "time" ) , v8::Date::New( f.timestampTime() ) );
            sub->Set( v8::String::New( "i" ) , v8::Number::New( f.timestampInc() ) );
            
            return sub;
        }
            
        case mongo::MinKey:
            // TODO: make a special type
            return v8::String::New( "MinKey" );
            
        case mongo::MaxKey:
            // TODO: make a special type
            return v8::String::New( "MaxKey" );
            
        case mongo::Undefined:
            return v8::Undefined();
            
        default:
            cout << "can't handle type: ";
			cout  << f.type() << " ";
			cout  << f.toString();
			cout  << endl;
            break;
        }    
        
        return v8::Undefined();
    }
intrusive_ptr<DocumentSource> DocumentSourceSort::createFromBson(
    BSONElement elem, const intrusive_ptr<ExpressionContext>& pExpCtx) {
    uassert(15973, "the $sort key specification must be an object", elem.type() == Object);
    return create(pExpCtx, elem.embeddedObject());
}
Beispiel #5
0
    bool processObj(const BSONObj &obj) {
        if (obj.hasField("$err")) {
            log() << "error getting oplog: " << obj << endl;
            return false;
        }

        static const char *names[] = {"ts", "op", "ns", "o", "b"};
        BSONElement fields[5];
        obj.getFields(5, names, fields);

        BSONElement &tsElt = fields[0];
        if (!tsElt.ok()) {
            log() << "oplog format error: " << obj << " missing 'ts' field." << endl;
            return false;
        }
        if (tsElt.type() != Date && tsElt.type() != Timestamp) {
            log() << "oplog format error: " << obj << " wrong 'ts' field type." << endl;
            return false;
        }
        _thisTime = OpTime(tsElt.date());

        BSONElement &opElt = fields[1];
        if (!opElt.ok()) {
            log() << "oplog format error: " << obj << " missing 'op' field." << endl;
            return false;
        }
        StringData op = opElt.Stringdata();

        // nop
        if (op == "n") {
            if (!_insertBuf.empty()) {
                flushInserts();
            }
            _maxOpTimeSynced = _thisTime;
            _thisTime = OpTime();
            return true;
        }
        // "presence of a database"
        if (op == "db") {
            if (!_insertBuf.empty()) {
                flushInserts();
            }
            _maxOpTimeSynced = _thisTime;
            _thisTime = OpTime();
            return true;
        }
        if (op != "c" && op != "i" && op != "u" && op != "d") {
            log() << "oplog format error: " << obj << " has an invalid 'op' field of '" << op << "'." << endl;
            return false;
        }

        if (op != "i" && !_insertBuf.empty()) {
            flushInserts();
        }

        BSONElement &nsElt = fields[2];
        if (!nsElt.ok()) {
            log() << "oplog format error: " << obj << " missing 'ns' field." << endl;
            return false;
        }
        StringData ns = nsElt.Stringdata();
        size_t i = ns.find('.');
        if (i == string::npos) {
            log() << "oplog format error: invalid namespace '" << ns << "' in op " << obj << "." << endl;
            return false;
        }
        StringData dbname = ns.substr(0, i);
        StringData collname = ns.substr(i + 1);

        BSONElement &oElt = fields[3];
        if (!oElt.ok()) {
            log() << "oplog format error: " << obj << " missing 'o' field." << endl;
            return false;
        }
        BSONObj o = obj["o"].Obj();

        if (op == "c") {
            if (collname != "$cmd") {
                log() << "oplog format error: invalid namespace '" << ns << "' for command in op " << obj << "." << endl;
                return false;
            }
            BSONObj info;
            bool ok = _conn.runCommand(dbname.toString(), o, info);
            if (!ok) {
                StringData fieldName = o.firstElementFieldName();
                BSONElement errmsgElt = info["errmsg"];
                StringData errmsg = errmsgElt.type() == String ? errmsgElt.Stringdata() : "";
                bool isDropIndexes = (fieldName == "dropIndexes" || fieldName == "deleteIndexes");
                if (((fieldName == "drop" || isDropIndexes) && errmsg == "ns not found") ||
                    (isDropIndexes && (errmsg == "index not found" || errmsg.find("can't find index with key:") == 0))) {
                    // This is actually ok.  We don't mind dropping something that's not there.
                    LOG(1) << "Tried to replay " << o << ", got " << info << ", ignoring." << endl;
                }
                else {
                    log() << "replay of command " << o << " failed: " << info << endl;
                    return false;
                }
            }
        }
        else {
            string nsstr = ns.toString();
            if (op == "i") {
                if (collname == "system.indexes") {
                    // Can't ensure multiple indexes in the same batch.
                    flushInserts();

                    // For now, we need to strip out any background fields from
                    // ensureIndex.  Once we do hot indexing we can do something more
                    // like what vanilla applyOperation_inlock does.
                    if (o["background"].trueValue()) {
                        BSONObjBuilder builder;
                        BSONObjIterator it(o);
                        while (it.more()) {
                            BSONElement e = it.next();
                            if (strncmp(e.fieldName(), "background", sizeof("background")) != 0) {
                                builder.append(e);
                            }
                        }
                        o = builder.obj();
                    }
                    // We need to warn very carefully about dropDups.
                    if (o["dropDups"].trueValue()) {
                        BSONObjBuilder builder;
                        BSONObjIterator it(o);
                        while (it.more()) {
                            BSONElement e = it.next();
                            if (strncmp(e.fieldName(), "dropDups", sizeof("dropDups")) != 0) {
                                builder.append(e);
                            }
                        }
                        warning() << "Detected an ensureIndex with dropDups: true in " << o << "." << endl;
                        warning() << "This option is not supported in TokuMX, because it deletes arbitrary data." << endl;
                        warning() << "If it were replayed, it could result in a completely different data set than the source database." << endl;
                        warning() << "We will attempt to replay it without dropDups, but if that fails, you must restart your migration process." << endl;
                        _conn.insert(nsstr, o);
                        string err = _conn.getLastError(dbname.toString(), false, false);
                        if (!err.empty()) {
                            log() << "replay of operation " << obj << " failed: " << err << endl;
                            warning() << "You cannot continue processing this replication stream.  You need to restart the migration process." << endl;
                            _running = false;
                            _logAtExit = false;
                            return true;
                        }
                    }
                }
                pushInsert(nsstr, o);
                // Don't call GLE or update _maxOpTimeSynced yet.
                _thisTime = OpTime();
                return true;
            }
            else if (op == "u") {
                BSONElement o2Elt = obj["o2"];
                if (!o2Elt.ok()) {
                    log() << "oplog format error: " << obj << " missing 'o2' field." << endl;
                    return false;
                }
                BSONElement &bElt = fields[4];
                bool upsert = bElt.booleanSafe();
                BSONObj o2 = o2Elt.Obj();
                _conn.update(nsstr, o2, o, upsert, false);
            }
            else if (op == "d") {
                BSONElement &bElt = fields[4];
                bool justOne = bElt.booleanSafe();
                _conn.remove(nsstr, o, justOne);
            }
            string err = _conn.getLastError(dbname.toString(), false, false);
            if (!err.empty()) {
                log() << "replay of operation " << obj << " failed: " << err << endl;
                return false;
            }
        }

        // If we got here, we completed the operation successfully.
        _maxOpTimeSynced = _thisTime;
        _thisTime = OpTime();
        return true;
    }
Beispiel #6
0
void Strategy::clientCommandOp(OperationContext* txn, Request& request) {
    QueryMessage q(request.d());

    LOG(3) << "command: " << q.ns << " " << q.query << " ntoreturn: " << q.ntoreturn
           << " options: " << q.queryOptions;

    if (q.queryOptions & QueryOption_Exhaust) {
        uasserted(18527,
                  string("the 'exhaust' query option is invalid for mongos commands: ") + q.ns +
                      " " + q.query.toString());
    }

    NamespaceString nss(request.getns());
    // Regular queries are handled in strategy_shard.cpp
    verify(nss.isCommand() || nss.isSpecialCommand());

    if (handleSpecialNamespaces(txn, request, q))
        return;

    int loops = 5;

    while (true) {
        try {
            BSONObj cmdObj = q.query;
            {
                BSONElement e = cmdObj.firstElement();
                if (e.type() == Object &&
                    (e.fieldName()[0] == '$' ? str::equals("query", e.fieldName() + 1)
                                             : str::equals("query", e.fieldName()))) {
                    // Extract the embedded query object.

                    if (cmdObj.hasField(Query::ReadPrefField.name())) {
                        // The command has a read preference setting. We don't want
                        // to lose this information so we copy this to a new field
                        // called $queryOptions.$readPreference
                        BSONObjBuilder finalCmdObjBuilder;
                        finalCmdObjBuilder.appendElements(e.embeddedObject());

                        BSONObjBuilder queryOptionsBuilder(
                            finalCmdObjBuilder.subobjStart("$queryOptions"));
                        queryOptionsBuilder.append(cmdObj[Query::ReadPrefField.name()]);
                        queryOptionsBuilder.done();

                        cmdObj = finalCmdObjBuilder.obj();
                    } else {
                        cmdObj = e.embeddedObject();
                    }
                }
            }

            OpQueryReplyBuilder reply;
            {
                BSONObjBuilder builder(reply.bufBuilderForResults());
                Command::runAgainstRegistered(txn, q.ns, cmdObj, builder, q.queryOptions);
            }
            reply.sendCommandReply(request.p(), request.m());
            return;
        } catch (const StaleConfigException& e) {
            if (loops <= 0)
                throw e;

            loops--;
            log() << "retrying command: " << q.query;

            // For legacy reasons, ns may not actually be set in the exception :-(
            string staleNS = e.getns();
            if (staleNS.size() == 0)
                staleNS = q.ns;

            ShardConnection::checkMyConnectionVersions(txn, staleNS);
            if (loops < 4)
                versionManager.forceRemoteCheckShardVersionCB(txn, staleNS);
        } catch (const DBException& e) {
            OpQueryReplyBuilder reply;
            {
                BSONObjBuilder builder(reply.bufBuilderForResults());
                Command::appendCommandStatus(builder, e.toStatus());
            }
            reply.sendCommandReply(request.p(), request.m());
            return;
        }
    }
}
Status WriteConcernOptions::parse(const BSONObj& obj) {
    reset();
    if (obj.isEmpty()) {
        return Status(ErrorCodes::FailedToParse, "write concern object cannot be empty");
    }

    BSONElement jEl;
    BSONElement fsyncEl;
    BSONElement wEl;


    for (auto e : obj) {
        const auto fieldName = e.fieldNameStringData();
        if (fieldName == kJFieldName) {
            jEl = e;
            if (!jEl.isNumber() && jEl.type() != Bool) {
                return Status(ErrorCodes::FailedToParse, "j must be numeric or a boolean value");
            }
        } else if (fieldName == kFSyncFieldName) {
            fsyncEl = e;
            if (!fsyncEl.isNumber() && fsyncEl.type() != Bool) {
                return Status(ErrorCodes::FailedToParse,
                              "fsync must be numeric or a boolean value");
            }
        } else if (fieldName == kWFieldName) {
            wEl = e;
        } else if (fieldName == kWTimeoutFieldName) {
            wTimeout = e.numberInt();
        } else if (fieldName == kWElectionIdFieldName) {
            // Ignore.
        } else if (fieldName == kWOpTimeFieldName) {
            // Ignore.
        } else if (fieldName.equalCaseInsensitive(kGetLastErrorFieldName)) {
            // Ignore GLE field.
        } else {
            return Status(ErrorCodes::FailedToParse,
                          str::stream() << "unrecognized write concern field: " << fieldName);
        }
    }

    const bool j = jEl.trueValue();
    const bool fsync = fsyncEl.trueValue();

    if (j && fsync)
        return Status(ErrorCodes::FailedToParse, "fsync and j options cannot be used together");

    if (j) {
        syncMode = SyncMode::JOURNAL;
    } else if (fsync) {
        syncMode = SyncMode::FSYNC;
    } else if (!jEl.eoo()) {
        syncMode = SyncMode::NONE;
    }

    if (wEl.isNumber()) {
        wNumNodes = wEl.numberInt();
    } else if (wEl.type() == String) {
        wMode = wEl.valuestrsafe();
    } else if (wEl.eoo() || wEl.type() == jstNULL || wEl.type() == Undefined) {
        wNumNodes = 1;
    } else {
        return Status(ErrorCodes::FailedToParse, "w has to be a number or a string");
    }

    return Status::OK();
}
Beispiel #8
0
void Strategy::clientCommandOp(OperationContext* txn, Request& request) {
    const QueryMessage q(request.d());

    LOG(3) << "command: " << q.ns << " " << redact(q.query) << " ntoreturn: " << q.ntoreturn
           << " options: " << q.queryOptions;

    if (q.queryOptions & QueryOption_Exhaust) {
        uasserted(18527,
                  string("the 'exhaust' query option is invalid for mongos commands: ") + q.ns +
                      " " + q.query.toString());
    }

    const NamespaceString nss(request.getns());
    invariant(nss.isCommand() || nss.isSpecialCommand());

    if (handleSpecialNamespaces(txn, request, q))
        return;

    BSONObj cmdObj = q.query;

    {
        BSONElement e = cmdObj.firstElement();
        if (e.type() == Object && (e.fieldName()[0] == '$' ? str::equals("query", e.fieldName() + 1)
                                                           : str::equals("query", e.fieldName()))) {
            // Extract the embedded query object.
            if (cmdObj.hasField(Query::ReadPrefField.name())) {
                // The command has a read preference setting. We don't want to lose this information
                // so we copy this to a new field called $queryOptions.$readPreference
                BSONObjBuilder finalCmdObjBuilder;
                finalCmdObjBuilder.appendElements(e.embeddedObject());

                BSONObjBuilder queryOptionsBuilder(finalCmdObjBuilder.subobjStart("$queryOptions"));
                queryOptionsBuilder.append(cmdObj[Query::ReadPrefField.name()]);
                queryOptionsBuilder.done();

                cmdObj = finalCmdObjBuilder.obj();
            } else {
                cmdObj = e.embeddedObject();
            }
        }
    }

    // Handle command option maxTimeMS.
    uassert(ErrorCodes::InvalidOptions,
            "no such command option $maxTimeMs; use maxTimeMS instead",
            cmdObj[QueryRequest::queryOptionMaxTimeMS].eoo());

    const int maxTimeMS =
        uassertStatusOK(QueryRequest::parseMaxTimeMS(cmdObj[QueryRequest::cmdOptionMaxTimeMS]));
    if (maxTimeMS > 0) {
        txn->setDeadlineAfterNowBy(Milliseconds{maxTimeMS});
    }

    int loops = 5;

    while (true) {
        try {
            OpQueryReplyBuilder reply;
            {
                BSONObjBuilder builder(reply.bufBuilderForResults());
                runAgainstRegistered(txn, q.ns, cmdObj, builder, q.queryOptions);
            }
            reply.sendCommandReply(request.session(), request.m());
            return;
        } catch (const StaleConfigException& e) {
            if (loops <= 0)
                throw e;

            loops--;

            log() << "Retrying command " << redact(q.query) << causedBy(e);

            // For legacy reasons, ns may not actually be set in the exception :-(
            string staleNS = e.getns();
            if (staleNS.size() == 0)
                staleNS = q.ns;

            ShardConnection::checkMyConnectionVersions(txn, staleNS);
            if (loops < 4)
                versionManager.forceRemoteCheckShardVersionCB(txn, staleNS);
        } catch (const DBException& e) {
            OpQueryReplyBuilder reply;
            {
                BSONObjBuilder builder(reply.bufBuilderForResults());
                Command::appendCommandStatus(builder, e.toStatus());
            }
            reply.sendCommandReply(request.session(), request.m());
            return;
        }
    }
}
Beispiel #9
0
        virtual bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {

            if ( cmdObj.firstElement().type() != Array ) {
                errmsg = "ops has to be an array";
                return false;
            }

            BSONObj ops = cmdObj.firstElement().Obj();

            {
                // check input
                BSONObjIterator i( ops );
                while ( i.more() ) {
                    BSONElement e = i.next();
                    if ( e.type() == Object )
                        continue;
                    errmsg = "op not an object: ";
                    errmsg += e.fieldName();
                    return false;
                }
            }

            // SERVER-4328 todo : is global ok or does this take a long time? i believe multiple 
            // ns used so locking individually requires more analysis
            Lock::GlobalWrite globalWriteLock;

            // Preconditions check reads the database state, so needs to be done locked
            if ( cmdObj["preCondition"].type() == Array ) {
                BSONObjIterator i( cmdObj["preCondition"].Obj() );
                while ( i.more() ) {
                    BSONObj f = i.next().Obj();

                    BSONObj realres = db.findOne( f["ns"].String() , f["q"].Obj() );

                    // Apply-ops would never have a $where matcher, so use the default callback,
                    // which will throw an error if $where is found.
                    Matcher m(f["res"].Obj());
                    if ( ! m.matches( realres ) ) {
                        result.append( "got" , realres );
                        result.append( "whatFailed" , f );
                        errmsg = "pre-condition failed";
                        return false;
                    }
                }
            }

            // apply
            int num = 0;
            int errors = 0;
            
            BSONObjIterator i( ops );
            BSONArrayBuilder ab;
            const bool alwaysUpsert = cmdObj.hasField("alwaysUpsert") ?
                    cmdObj["alwaysUpsert"].trueValue() : true;
            
            while ( i.more() ) {
                BSONElement e = i.next();
                const BSONObj& temp = e.Obj();

                string ns = temp["ns"].String();

                // Run operations under a nested lock as a hack to prevent them from yielding.
                //
                // The list of operations is supposed to be applied atomically; yielding would break
                // atomicity by allowing an interruption or a shutdown to occur after only some
                // operations are applied.  We are already locked globally at this point, so taking
                // a DBWrite on the namespace creates a nested lock, and yields are disallowed for
                // operations that hold a nested lock.
                Lock::DBWrite lk(ns);
                invariant(Lock::nested());

                Client::Context ctx(ns);
                bool failed = applyOperation_inlock(txn, ctx.db(), temp, false, alwaysUpsert);
                ab.append(!failed);
                if ( failed )
                    errors++;

                num++;

                logOpForDbHash(ns.c_str());
            }

            result.append( "applied" , num );
            result.append( "results" , ab.arr() );

            if ( ! fromRepl ) {
                // We want this applied atomically on slaves
                // so we re-wrap without the pre-condition for speed

                string tempNS = str::stream() << dbname << ".$cmd";

                // TODO: possibly use mutable BSON to remove preCondition field
                // once it is available
                BSONObjIterator iter(cmdObj);
                BSONObjBuilder cmdBuilder;

                while (iter.more()) {
                    BSONElement elem(iter.next());
                    if (strcmp(elem.fieldName(), "preCondition") != 0) {
                        cmdBuilder.append(elem);
                    }
                }

                logOp(txn, "c", tempNS.c_str(), cmdBuilder.done());
            }

            return errors == 0;
        }
Beispiel #10
0
Status ProjectionExec::append(BSONObjBuilder* bob,
                              const BSONElement& elt,
                              const MatchDetails* details,
                              const ArrayOpType arrayOpType) const {
    // Skip if the field name matches a computed $meta field.
    // $meta projection fields can exist at the top level of
    // the result document and the field names cannot be dotted.
    if (_meta.find(elt.fieldName()) != _meta.end()) {
        return Status::OK();
    }

    FieldMap::const_iterator field = _fields.find(elt.fieldName());
    if (field == _fields.end()) {
        if (_include) {
            bob->append(elt);
        }
        return Status::OK();
    }

    ProjectionExec& subfm = *field->second;
    if ((subfm._fields.empty() && !subfm._special) ||
        !(elt.type() == Object || elt.type() == Array)) {
        // field map empty, or element is not an array/object
        if (subfm._include) {
            bob->append(elt);
        }
    } else if (elt.type() == Object) {
        BSONObjBuilder subBob;
        BSONObjIterator it(elt.embeddedObject());
        while (it.more()) {
            subfm.append(&subBob, it.next(), details, arrayOpType);
        }
        bob->append(elt.fieldName(), subBob.obj());
    } else {
        // Array
        BSONObjBuilder matchedBuilder;
        if (details && arrayOpType == ARRAY_OP_POSITIONAL) {
            // $ positional operator specified
            if (!details->hasElemMatchKey()) {
                mongoutils::str::stream error;
                error << "positional operator (" << elt.fieldName()
                      << ".$) requires corresponding field"
                      << " in query specifier";
                return Status(ErrorCodes::BadValue, error);
            }

            if (elt.embeddedObject()[details->elemMatchKey()].eoo()) {
                return Status(ErrorCodes::BadValue, "positional operator element mismatch");
            }

            // append as the first and only element in the projected array
            matchedBuilder.appendAs(elt.embeddedObject()[details->elemMatchKey()], "0");
        } else {
            // append exact array; no subarray matcher specified
            subfm.appendArray(&matchedBuilder, elt.embeddedObject());
        }
        bob->appendArray(elt.fieldName(), matchedBuilder.obj());
    }

    return Status::OK();
}
Beispiel #11
0
ProjectionExec::ProjectionExec(const BSONObj& spec,
                               const MatchExpression* queryExpression,
                               const CollatorInterface* collator,
                               const ExtensionsCallback& extensionsCallback)
    : _include(true),
      _special(false),
      _source(spec),
      _includeID(true),
      _skip(0),
      _limit(-1),
      _arrayOpType(ARRAY_OP_NORMAL),
      _queryExpression(queryExpression),
      _hasReturnKey(false),
      _collator(collator) {
    // Whether we're including or excluding fields.
    enum class IncludeExclude { kUninitialized, kInclude, kExclude };
    IncludeExclude includeExclude = IncludeExclude::kUninitialized;

    BSONObjIterator it(_source);
    while (it.more()) {
        BSONElement e = it.next();

        if (Object == e.type()) {
            BSONObj obj = e.embeddedObject();
            verify(1 == obj.nFields());

            BSONElement e2 = obj.firstElement();
            if (mongoutils::str::equals(e2.fieldName(), "$slice")) {
                if (e2.isNumber()) {
                    int i = e2.numberInt();
                    if (i < 0) {
                        add(e.fieldName(), i, -i);  // limit is now positive
                    } else {
                        add(e.fieldName(), 0, i);
                    }
                } else {
                    verify(e2.type() == Array);
                    BSONObj arr = e2.embeddedObject();
                    verify(2 == arr.nFields());

                    BSONObjIterator it(arr);
                    int skip = it.next().numberInt();
                    int limit = it.next().numberInt();

                    verify(limit > 0);

                    add(e.fieldName(), skip, limit);
                }
            } else if (mongoutils::str::equals(e2.fieldName(), "$elemMatch")) {
                _arrayOpType = ARRAY_OP_ELEM_MATCH;

                // Create a MatchExpression for the elemMatch.
                BSONObj elemMatchObj = e.wrap();
                verify(elemMatchObj.isOwned());
                _elemMatchObjs.push_back(elemMatchObj);
                StatusWithMatchExpression statusWithMatcher =
                    MatchExpressionParser::parse(elemMatchObj, extensionsCallback, _collator);
                verify(statusWithMatcher.isOK());
                // And store it in _matchers.
                _matchers[mongoutils::str::before(e.fieldName(), '.').c_str()] =
                    statusWithMatcher.getValue().release();

                add(e.fieldName(), true);
            } else if (mongoutils::str::equals(e2.fieldName(), "$meta")) {
                verify(String == e2.type());
                if (e2.valuestr() == QueryRequest::metaTextScore) {
                    _meta[e.fieldName()] = META_TEXT_SCORE;
                } else if (e2.valuestr() == QueryRequest::metaSortKey) {
                    _sortKeyMetaFields.push_back(e.fieldName());
                    _meta[_sortKeyMetaFields.back()] = META_SORT_KEY;
                } else if (e2.valuestr() == QueryRequest::metaRecordId) {
                    _meta[e.fieldName()] = META_RECORDID;
                } else if (e2.valuestr() == QueryRequest::metaGeoNearPoint) {
                    _meta[e.fieldName()] = META_GEONEAR_POINT;
                } else if (e2.valuestr() == QueryRequest::metaGeoNearDistance) {
                    _meta[e.fieldName()] = META_GEONEAR_DIST;
                } else if (e2.valuestr() == QueryRequest::metaIndexKey) {
                    _hasReturnKey = true;
                } else {
                    // This shouldn't happen, should be caught by parsing.
                    verify(0);
                }
            } else {
                verify(0);
            }
        } else if (mongoutils::str::equals(e.fieldName(), "_id") && !e.trueValue()) {
            _includeID = false;
        } else {
            add(e.fieldName(), e.trueValue());

            // If we haven't specified an include/exclude, initialize includeExclude.
            if (includeExclude == IncludeExclude::kUninitialized) {
                includeExclude =
                    e.trueValue() ? IncludeExclude::kInclude : IncludeExclude::kExclude;
                _include = !e.trueValue();
            }
        }

        if (mongoutils::str::contains(e.fieldName(), ".$")) {
            _arrayOpType = ARRAY_OP_POSITIONAL;
        }
    }
}
Beispiel #12
0
    // static
    Status QueryPlanner::plan(const CanonicalQuery& query,
                              const QueryPlannerParams& params,
                              std::vector<QuerySolution*>* out) {

        QLOG() << "Beginning planning..." << endl
               << "=============================" << endl
               << "Options = " << optionString(params.options) << endl
               << "Canonical query:" << endl << query.toString()
               << "=============================" << endl;

        for (size_t i = 0; i < params.indices.size(); ++i) {
            QLOG() << "Index " << i << " is " << params.indices[i].toString() << endl;
        }

        bool canTableScan = !(params.options & QueryPlannerParams::NO_TABLE_SCAN);

        // If the query requests a tailable cursor, the only solution is a collscan + filter with
        // tailable set on the collscan.  TODO: This is a policy departure.  Previously I think you
        // could ask for a tailable cursor and it just tried to give you one.  Now, we fail if we
        // can't provide one.  Is this what we want?
        if (query.getParsed().hasOption(QueryOption_CursorTailable)) {
            if (!QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)
                && canTableScan) {
                QuerySolution* soln = buildCollscanSoln(query, true, params);
                if (NULL != soln) {
                    out->push_back(soln);
                }
            }
            return Status::OK();
        }

        // The hint or sort can be $natural: 1.  If this happens, output a collscan. If both
        // a $natural hint and a $natural sort are specified, then the direction of the collscan
        // is determined by the sign of the sort (not the sign of the hint).
        if (!query.getParsed().getHint().isEmpty() || !query.getParsed().getSort().isEmpty()) {
            BSONObj hintObj = query.getParsed().getHint();
            BSONObj sortObj = query.getParsed().getSort();
            BSONElement naturalHint = hintObj.getFieldDotted("$natural");
            BSONElement naturalSort = sortObj.getFieldDotted("$natural");

            // A hint overrides a $natural sort. This means that we don't force a table
            // scan if there is a $natural sort with a non-$natural hint.
            if (!naturalHint.eoo() || (!naturalSort.eoo() && hintObj.isEmpty())) {
                QLOG() << "Forcing a table scan due to hinted $natural\n";
                // min/max are incompatible with $natural.
                if (canTableScan && query.getParsed().getMin().isEmpty()
                                 && query.getParsed().getMax().isEmpty()) {
                    QuerySolution* soln = buildCollscanSoln(query, false, params);
                    if (NULL != soln) {
                        out->push_back(soln);
                    }
                }
                return Status::OK();
            }
        }

        // Figure out what fields we care about.
        unordered_set<string> fields;
        QueryPlannerIXSelect::getFields(query.root(), "", &fields);

        for (unordered_set<string>::const_iterator it = fields.begin(); it != fields.end(); ++it) {
            QLOG() << "Predicate over field '" << *it << "'" << endl;
        }

        // Filter our indices so we only look at indices that are over our predicates.
        vector<IndexEntry> relevantIndices;

        // Hints require us to only consider the hinted index.
        // If index filters in the query settings were used to override
        // the allowed indices for planning, we should not use the hinted index
        // requested in the query.
        BSONObj hintIndex;
        if (!params.indexFiltersApplied) {
            hintIndex = query.getParsed().getHint();
        }

        // Snapshot is a form of a hint.  If snapshot is set, try to use _id index to make a real
        // plan.  If that fails, just scan the _id index.
        if (query.getParsed().isSnapshot()) {
            // Find the ID index in indexKeyPatterns.  It's our hint.
            for (size_t i = 0; i < params.indices.size(); ++i) {
                if (isIdIndex(params.indices[i].keyPattern)) {
                    hintIndex = params.indices[i].keyPattern;
                    break;
                }
            }
        }

        size_t hintIndexNumber = numeric_limits<size_t>::max();

        if (hintIndex.isEmpty()) {
            QueryPlannerIXSelect::findRelevantIndices(fields, params.indices, &relevantIndices);
        }
        else {
            // Sigh.  If the hint is specified it might be using the index name.
            BSONElement firstHintElt = hintIndex.firstElement();
            if (str::equals("$hint", firstHintElt.fieldName()) && String == firstHintElt.type()) {
                string hintName = firstHintElt.String();
                for (size_t i = 0; i < params.indices.size(); ++i) {
                    if (params.indices[i].name == hintName) {
                        QLOG() << "Hint by name specified, restricting indices to "
                               << params.indices[i].keyPattern.toString() << endl;
                        relevantIndices.clear();
                        relevantIndices.push_back(params.indices[i]);
                        hintIndexNumber = i;
                        hintIndex = params.indices[i].keyPattern;
                        break;
                    }
                }
            }
            else {
                for (size_t i = 0; i < params.indices.size(); ++i) {
                    if (0 == params.indices[i].keyPattern.woCompare(hintIndex)) {
                        relevantIndices.clear();
                        relevantIndices.push_back(params.indices[i]);
                        QLOG() << "Hint specified, restricting indices to " << hintIndex.toString()
                               << endl;
                        hintIndexNumber = i;
                        break;
                    }
                }
            }

            if (hintIndexNumber == numeric_limits<size_t>::max()) {
                return Status(ErrorCodes::BadValue, "bad hint");
            }
        }

        // Deal with the .min() and .max() query options.  If either exist we can only use an index
        // that matches the object inside.
        if (!query.getParsed().getMin().isEmpty() || !query.getParsed().getMax().isEmpty()) {
            BSONObj minObj = query.getParsed().getMin();
            BSONObj maxObj = query.getParsed().getMax();

            // This is the index into params.indices[...] that we use.
            size_t idxNo = numeric_limits<size_t>::max();

            // If there's an index hinted we need to be able to use it.
            if (!hintIndex.isEmpty()) {
                if (!minObj.isEmpty() && !indexCompatibleMaxMin(minObj, hintIndex)) {
                    QLOG() << "Minobj doesn't work with hint";
                    return Status(ErrorCodes::BadValue,
                                  "hint provided does not work with min query");
                }

                if (!maxObj.isEmpty() && !indexCompatibleMaxMin(maxObj, hintIndex)) {
                    QLOG() << "Maxobj doesn't work with hint";
                    return Status(ErrorCodes::BadValue,
                                  "hint provided does not work with max query");
                }

                idxNo = hintIndexNumber;
            }
            else {
                // No hinted index, look for one that is compatible (has same field names and
                // ordering thereof).
                for (size_t i = 0; i < params.indices.size(); ++i) {
                    const BSONObj& kp = params.indices[i].keyPattern;

                    BSONObj toUse = minObj.isEmpty() ? maxObj : minObj;
                    if (indexCompatibleMaxMin(toUse, kp)) {
                        idxNo = i;
                        break;
                    }
                }
            }

            if (idxNo == numeric_limits<size_t>::max()) {
                QLOG() << "Can't find relevant index to use for max/min query";
                // Can't find an index to use, bail out.
                return Status(ErrorCodes::BadValue,
                              "unable to find relevant index for max/min query");
            }

            // maxObj can be empty; the index scan just goes until the end.  minObj can't be empty
            // though, so if it is, we make a minKey object.
            if (minObj.isEmpty()) {
                BSONObjBuilder bob;
                bob.appendMinKey("");
                minObj = bob.obj();
            }
            else {
                // Must strip off the field names to make an index key.
                minObj = stripFieldNames(minObj);
            }

            if (!maxObj.isEmpty()) {
                // Must strip off the field names to make an index key.
                maxObj = stripFieldNames(maxObj);
            }

            QLOG() << "Max/min query using index " << params.indices[idxNo].toString() << endl;

            // Make our scan and output.
            QuerySolutionNode* solnRoot = QueryPlannerAccess::makeIndexScan(params.indices[idxNo],
                                                                            query,
                                                                            params,
                                                                            minObj,
                                                                            maxObj);

            QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
            if (NULL != soln) {
                out->push_back(soln);
            }

            return Status::OK();
        }

        for (size_t i = 0; i < relevantIndices.size(); ++i) {
            QLOG() << "Relevant index " << i << " is " << relevantIndices[i].toString() << endl;
            LOG(2) << "Relevant index " << i << " is " << relevantIndices[i].toString() << endl;
        }

        // Figure out how useful each index is to each predicate.
        QueryPlannerIXSelect::rateIndices(query.root(), "", relevantIndices);
        QueryPlannerIXSelect::stripInvalidAssignments(query.root(), relevantIndices);

        // query.root() is now annotated with RelevantTag(s).
        QLOG() << "Rated tree:" << endl << query.root()->toString();

        // If there is a GEO_NEAR it must have an index it can use directly.
        MatchExpression* gnNode = NULL;
        if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR, &gnNode)) {
            // No index for GEO_NEAR?  No query.
            RelevantTag* tag = static_cast<RelevantTag*>(gnNode->getTag());
            if (0 == tag->first.size() && 0 == tag->notFirst.size()) {
                QLOG() << "Unable to find index for $geoNear query." << endl;
                // Don't leave tags on query tree.
                query.root()->resetTag();
                return Status(ErrorCodes::BadValue, "unable to find index for $geoNear query");
            }

            QLOG() << "Rated tree after geonear processing:" << query.root()->toString();
        }

        // Likewise, if there is a TEXT it must have an index it can use directly.
        MatchExpression* textNode = NULL;
        if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT, &textNode)) {
            RelevantTag* tag = static_cast<RelevantTag*>(textNode->getTag());

            // Exactly one text index required for TEXT.  We need to check this explicitly because
            // the text stage can't be built if no text index exists or there is an ambiguity as to
            // which one to use.
            size_t textIndexCount = 0;
            for (size_t i = 0; i < params.indices.size(); i++) {
                if (INDEX_TEXT == params.indices[i].type) {
                    textIndexCount++;
                }
            }
            if (textIndexCount != 1) {
                // Don't leave tags on query tree.
                query.root()->resetTag();
                return Status(ErrorCodes::BadValue, "need exactly one text index for $text query");
            }

            // Error if the text node is tagged with zero indices.
            if (0 == tag->first.size() && 0 == tag->notFirst.size()) {
                // Don't leave tags on query tree.
                query.root()->resetTag();
                return Status(ErrorCodes::BadValue,
                              "failed to use text index to satisfy $text query (if text index is "
                              "compound, are equality predicates given for all prefix fields?)");
            }

            // At this point, we know that there is only one text index and that the TEXT node is
            // assigned to it.
            invariant(1 == tag->first.size() + tag->notFirst.size());

            QLOG() << "Rated tree after text processing:" << query.root()->toString();
        }

        // If we have any relevant indices, we try to create indexed plans.
        if (0 < relevantIndices.size()) {
            // The enumerator spits out trees tagged with IndexTag(s).
            PlanEnumeratorParams enumParams;
            enumParams.intersect = params.options & QueryPlannerParams::INDEX_INTERSECTION;
            enumParams.root = query.root();
            enumParams.indices = &relevantIndices;

            PlanEnumerator isp(enumParams);
            isp.init();

            MatchExpression* rawTree;
            while (isp.getNext(&rawTree) && (out->size() < params.maxIndexedSolutions)) {
                QLOG() << "About to build solntree from tagged tree:" << endl
                       << rawTree->toString();

                // The tagged tree produced by the plan enumerator is not guaranteed
                // to be canonically sorted. In order to be compatible with the cached
                // data, sort the tagged tree according to CanonicalQuery ordering.
                boost::scoped_ptr<MatchExpression> clone(rawTree->shallowClone());
                CanonicalQuery::sortTree(clone.get());

                PlanCacheIndexTree* cacheData;
                Status indexTreeStatus = cacheDataFromTaggedTree(clone.get(), relevantIndices, &cacheData);
                if (!indexTreeStatus.isOK()) {
                    QLOG() << "Query is not cachable: " << indexTreeStatus.reason() << endl;
                }
                auto_ptr<PlanCacheIndexTree> autoData(cacheData);

                // This can fail if enumeration makes a mistake.
                QuerySolutionNode* solnRoot =
                    QueryPlannerAccess::buildIndexedDataAccess(query, rawTree, false, relevantIndices);

                if (NULL == solnRoot) { continue; }

                QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query,
                                                                              params,
                                                                              solnRoot);
                if (NULL != soln) {
                    QLOG() << "Planner: adding solution:" << endl << soln->toString();
                    if (indexTreeStatus.isOK()) {
                        SolutionCacheData* scd = new SolutionCacheData();
                        scd->tree.reset(autoData.release());
                        soln->cacheData.reset(scd);
                    }
                    out->push_back(soln);
                }
            }
        }

        // Don't leave tags on query tree.
        query.root()->resetTag();

        QLOG() << "Planner: outputted " << out->size() << " indexed solutions.\n";

        // Produce legible error message for failed OR planning with a TEXT child.
        // TODO: support collection scan for non-TEXT children of OR.
        if (out->size() == 0 && textNode != NULL &&
            MatchExpression::OR == query.root()->matchType()) {
            MatchExpression* root = query.root();
            for (size_t i = 0; i < root->numChildren(); ++i) {
                if (textNode == root->getChild(i)) {
                    return Status(ErrorCodes::BadValue,
                                  "Failed to produce a solution for TEXT under OR - "
                                  "other non-TEXT clauses under OR have to be indexed as well.");
                }
            }
        }

        // An index was hinted.  If there are any solutions, they use the hinted index.  If not, we
        // scan the entire index to provide results and output that as our plan.  This is the
        // desired behavior when an index is hinted that is not relevant to the query.
        if (!hintIndex.isEmpty()) {
            if (0 == out->size()) {
                QuerySolution* soln = buildWholeIXSoln(params.indices[hintIndexNumber],
                                                       query, params);
                verify(NULL != soln);
                QLOG() << "Planner: outputting soln that uses hinted index as scan." << endl;
                out->push_back(soln);
            }
            return Status::OK();
        }

        // If a sort order is requested, there may be an index that provides it, even if that
        // index is not over any predicates in the query.
        //
        if (!query.getParsed().getSort().isEmpty()
            && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)
            && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)) {

            // See if we have a sort provided from an index already.
            // This is implied by the presence of a non-blocking solution.
            bool usingIndexToSort = false;
            for (size_t i = 0; i < out->size(); ++i) {
                QuerySolution* soln = (*out)[i];
                if (!soln->hasBlockingStage) {
                    usingIndexToSort = true;
                    break;
                }
            }

            if (!usingIndexToSort) {
                for (size_t i = 0; i < params.indices.size(); ++i) {
                    const IndexEntry& index = params.indices[i];
                    // Only regular (non-plugin) indexes can be used to provide a sort.
                    if (index.type != INDEX_BTREE) {
                        continue;
                    }
                    // Only non-sparse indexes can be used to provide a sort.
                    if (index.sparse) {
                        continue;
                    }

                    // TODO: Sparse indexes can't normally provide a sort, because non-indexed
                    // documents could potentially be missing from the result set.  However, if the
                    // query predicate can be used to guarantee that all documents to be returned
                    // are indexed, then the index should be able to provide the sort.
                    //
                    // For example:
                    // - Sparse index {a: 1, b: 1} should be able to provide a sort for
                    //   find({b: 1}).sort({a: 1}).  SERVER-13908.
                    // - Index {a: 1, b: "2dsphere"} (which is "geo-sparse", if
                    //   2dsphereIndexVersion=2) should be able to provide a sort for
                    //   find({b: GEO}).sort({a:1}).  SERVER-10801.

                    const BSONObj kp = LiteParsedQuery::normalizeSortOrder(index.keyPattern);
                    if (providesSort(query, kp)) {
                        QLOG() << "Planner: outputting soln that uses index to provide sort."
                               << endl;
                        QuerySolution* soln = buildWholeIXSoln(params.indices[i],
                                                               query, params);
                        if (NULL != soln) {
                            PlanCacheIndexTree* indexTree = new PlanCacheIndexTree();
                            indexTree->setIndexEntry(params.indices[i]);
                            SolutionCacheData* scd = new SolutionCacheData();
                            scd->tree.reset(indexTree);
                            scd->solnType = SolutionCacheData::WHOLE_IXSCAN_SOLN;
                            scd->wholeIXSolnDir = 1;

                            soln->cacheData.reset(scd);
                            out->push_back(soln);
                            break;
                        }
                    }
                    if (providesSort(query, QueryPlannerCommon::reverseSortObj(kp))) {
                        QLOG() << "Planner: outputting soln that uses (reverse) index "
                               << "to provide sort." << endl;
                        QuerySolution* soln = buildWholeIXSoln(params.indices[i], query,
                                                               params, -1);
                        if (NULL != soln) {
                            PlanCacheIndexTree* indexTree = new PlanCacheIndexTree();
                            indexTree->setIndexEntry(params.indices[i]);
                            SolutionCacheData* scd = new SolutionCacheData();
                            scd->tree.reset(indexTree);
                            scd->solnType = SolutionCacheData::WHOLE_IXSCAN_SOLN;
                            scd->wholeIXSolnDir = -1;

                            soln->cacheData.reset(scd);
                            out->push_back(soln);
                            break;
                        }
                    }
                }
            }
        }

        // geoNear and text queries *require* an index.
        // Also, if a hint is specified it indicates that we MUST use it.
        bool possibleToCollscan = !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)
                               && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)
                               && hintIndex.isEmpty();

        // The caller can explicitly ask for a collscan.
        bool collscanRequested = (params.options & QueryPlannerParams::INCLUDE_COLLSCAN);

        // No indexed plans?  We must provide a collscan if possible or else we can't run the query.
        bool collscanNeeded = (0 == out->size() && canTableScan);

        if (possibleToCollscan && (collscanRequested || collscanNeeded)) {
            QuerySolution* collscan = buildCollscanSoln(query, false, params);
            if (NULL != collscan) {
                SolutionCacheData* scd = new SolutionCacheData();
                scd->solnType = SolutionCacheData::COLLSCAN_SOLN;
                collscan->cacheData.reset(scd);
                out->push_back(collscan);
                QLOG() << "Planner: outputting a collscan:" << endl
                       << collscan->toString();
            }
        }

        return Status::OK();
    }
    Status ModifierCurrentDate::init(const BSONElement& modExpr, const Options& opts,
                                     bool* positional) {

        _updatePath.parse(modExpr.fieldName());
        Status status = fieldchecker::isUpdatable(_updatePath);
        if (!status.isOK()) {
            return status;
        }

        // If a $-positional operator was used, get the index in which it occurred
        // and ensure only one occurrence.
        size_t foundCount;
        bool foundDollar = fieldchecker::isPositional(_updatePath,
                                                      &_pathReplacementPosition,
                                                      &foundCount);

        if (positional)
            *positional = foundDollar;

        if (foundDollar && foundCount > 1) {
            return Status(ErrorCodes::BadValue,
                          str::stream() << "Too many positional (i.e. '$') elements found in path '"
                                        << _updatePath.dottedField() << "'");
        }

        // Validate and store the type to produce
        switch (modExpr.type()) {
            case Bool:
                _typeIsDate = true;
                break;
            case Object: {
                const BSONObj argObj = modExpr.embeddedObject();
                const BSONElement typeElem = argObj.getField(kType);
                bool badInput = typeElem.eoo() || !(typeElem.type() == String);

                if (!badInput) {
                    std::string typeVal = typeElem.String();
                    badInput = !(typeElem.String() == kDate || typeElem.String() == kTimestamp);
                    if (!badInput)
                        _typeIsDate = (typeVal == kDate);

                    if (!badInput) {
                        // Check to make sure only the $type field was given as an arg
                        BSONObjIterator i( argObj );
                        const bool onlyHasTypeField = ((i.next().fieldNameStringData() == kType)
                                                            && i.next().eoo());
                        if (!onlyHasTypeField) {
                            return Status(ErrorCodes::BadValue,
                                          str::stream() <<
                                          "The only valid field of the option is '$type': "
                                          "{$currentDate: {field : {$type: 'date/timestamp'}}}; "
                                          << "arg: " << argObj);
                        }

                    }

                }

                if (badInput) {
                    return Status(ErrorCodes::BadValue,
                                  "The '$type' string field is required "
                                  "to be 'date' or 'timestamp': "
                                  "{$currentDate: {field : {$type: 'date'}}}");
                }
                break;
            }
            default:
                return Status(ErrorCodes::BadValue,
                              str::stream() << typeName(modExpr.type())
                              << " is not valid type for $currentDate."
                                 " Please use a boolean ('true')"
                                 " or a $type expression ({$type: 'timestamp/date'}).");
        }

        return Status::OK();
    }
Beispiel #14
0
static bool isHashedPatternEl(const BSONElement& el) {
    return el.type() == String && el.String() == IndexNames::HASHED;
}
Beispiel #15
0
    jsval toval( const BSONElement& e ) {

        switch( e.type() ) {
        case EOO:
        case jstNULL:
        case Undefined:
            return JSVAL_NULL;
        case NumberDouble:
        case NumberInt:
            return toval( e.number() );
        case Symbol: // TODO: should we make a special class for this
        case String:
            return toval( e.valuestr() );
        case Bool:
            return e.boolean() ? JSVAL_TRUE : JSVAL_FALSE;
        case Object: {
            BSONObj embed = e.embeddedObject().getOwned();
            return toval( &embed );
        }
        case Array: {

            BSONObj embed = e.embeddedObject().getOwned();

            if ( embed.isEmpty() ) {
                return OBJECT_TO_JSVAL( JS_NewArrayObject( _context , 0 , 0 ) );
            }

            int n = embed.nFields();

            JSObject * array = JS_NewArrayObject( _context , n , 0 );
            assert( array );

            jsval myarray = OBJECT_TO_JSVAL( array );

            for ( int i=0; i<n; i++ ) {
                jsval v = toval( embed[i] );
                assert( JS_SetElement( _context , array , i , &v ) );
            }

            return myarray;
        }
        case jstOID: {
            OID oid = e.__oid();
            JSObject * o = JS_NewObject( _context , &object_id_class , 0 , 0 );
            setProperty( o , "str" , toval( oid.str().c_str() ) );
            return OBJECT_TO_JSVAL( o );
        }
        case RegEx: {
            const char * flags = e.regexFlags();
            uintN flagNumber = 0;
            while ( *flags ) {
                switch ( *flags ) {
                case 'g':
                    flagNumber |= JSREG_GLOB;
                    break;
                case 'i':
                    flagNumber |= JSREG_FOLD;
                    break;
                case 'm':
                    flagNumber |= JSREG_MULTILINE;
                    break;
                //case 'y': flagNumber |= JSREG_STICKY; break;

                default:
                    log() << "warning: unknown regex flag:" << *flags << endl;
                }
                flags++;
            }

            JSObject * r = JS_NewRegExpObject( _context , (char*)e.regex() , strlen( e.regex() ) , flagNumber );
            assert( r );
            return OBJECT_TO_JSVAL( r );
        }
        case Code: {
            JSFunction * func = compileFunction( e.valuestr() );
            return OBJECT_TO_JSVAL( JS_GetFunctionObject( func ) );
        }
        case CodeWScope: {
            JSFunction * func = compileFunction( e.codeWScopeCode() );

            BSONObj extraScope = e.codeWScopeObject();
            if ( ! extraScope.isEmpty() ) {
                log() << "warning: CodeWScope doesn't transfer to db.eval" << endl;
            }

            return OBJECT_TO_JSVAL( JS_GetFunctionObject( func ) );
        }
        case Date:
            return OBJECT_TO_JSVAL( js_NewDateObjectMsec( _context , (jsdouble) e.date().millis ) );

        case MinKey:
            return OBJECT_TO_JSVAL( JS_NewObject( _context , &minkey_class , 0 , 0 ) );

        case MaxKey:
            return OBJECT_TO_JSVAL( JS_NewObject( _context , &maxkey_class , 0 , 0 ) );

        case Timestamp: {
            JSObject * o = JS_NewObject( _context , &timestamp_class , 0 , 0 );
            setProperty( o , "t" , toval( (double)(e.timestampTime()) ) );
            setProperty( o , "i" , toval( (double)(e.timestampInc()) ) );
            return OBJECT_TO_JSVAL( o );
        }
        case NumberLong: {
            boost::uint64_t val = (boost::uint64_t)e.numberLong();
            JSObject * o = JS_NewObject( _context , &numberlong_class , 0 , 0 );
            setProperty( o , "floatApprox" , toval( (double)(boost::int64_t)( val ) ) );
            if ( (boost::int64_t)val != (boost::int64_t)(double)(boost::int64_t)( val ) ) {
                // using 2 doubles here instead of a single double because certain double
                // bit patterns represent undefined values and sm might trash them
                setProperty( o , "top" , toval( (double)(boost::uint32_t)( val >> 32 ) ) );
                setProperty( o , "bottom" , toval( (double)(boost::uint32_t)( val & 0x00000000ffffffff ) ) );
            }
            return OBJECT_TO_JSVAL( o );
        }
        case DBRef: {
            JSObject * o = JS_NewObject( _context , &dbpointer_class , 0 , 0 );
            setProperty( o , "ns" , toval( e.dbrefNS() ) );

            JSObject * oid = JS_NewObject( _context , &object_id_class , 0 , 0 );
            setProperty( oid , "str" , toval( e.dbrefOID().str().c_str() ) );

            setProperty( o , "id" , OBJECT_TO_JSVAL( oid ) );
            return OBJECT_TO_JSVAL( o );
        }
        case BinData: {
            JSObject * o = JS_NewObject( _context , &bindata_class , 0 , 0 );
            int len;
            const char * data = e.binData( len );
            assert( JS_SetPrivate( _context , o , new BinDataHolder( data ) ) );

            setProperty( o , "len" , toval( len ) );
            setProperty( o , "type" , toval( (int)e.binDataType() ) );
            return OBJECT_TO_JSVAL( o );
        }
        }
Beispiel #16
0
INT32 _fmpController::_runLoop()
{
   INT32 rc = SDB_OK ;
   BSONObj obj ;
   BSONElement ele ;

   while ( TRUE )
   {
      INT32 step = FMP_CONTROL_STEP_INVALID ;
      rc = _readMsg( obj ) ;
      if ( SDB_OK != rc )
      {
         PD_LOG( PDERROR, "failed to read msg:%d",rc ) ;
         goto error ;
      }

      ele = obj.getField( FMP_CONTROL_FIELD ) ;
      if ( ele.eoo() )
      {
         step = FMP_CONTROL_STEP_DOWNLOAD ;
      }
      else if ( NumberInt != ele.type() )
      {
         PD_LOG( PDERROR, "failed to find control filed:%s",
                 obj.toString().c_str() ) ;
         rc = SDB_SYS ;
         goto error ;
      }
      else
      {
         step = ele.Int() ;
      }

      if ( FMP_CONTROL_STEP_QUIT == step )
      {
         _clear() ;
         rc = _writeMsg( OK_RES ) ;
         if ( SDB_OK != rc )
         {
            PD_LOG( PDERROR, "failed to write res of reset:%d", rc  ) ;
            goto error ;
         }
         break ;
      }
      else if ( FMP_CONTROL_STEP_RESET == step )
      {
         _clear() ;
         rc = _writeMsg( OK_RES ) ;
         if ( SDB_OK != rc )
         {
            PD_LOG( PDERROR, "failed to write res of reset:%d", rc  ) ;
            goto error ;
         }
         continue ;
      }
      else
      {
      }

      if ( !FMP_VALID_STEP(step) )
      {
         PD_LOG( PDERROR, "invalid step number[%d], now step[%d]",
                 ele.Int(), _step ) ;
         rc = SDB_SYS ;
         goto error ;
      }

      rc = _handleOneLoop( obj, step ) ;
      if ( SDB_OK != rc )
      {
         PD_LOG( PDERROR, "failed to handle one loop:%d", rc) ;
         _clear() ;
      }

      FMP_STEP_AUTO_CHANGE( step ) ;
   }
done:
   return rc ;
error:
   goto done ;
}
        bool run(OperationContext* txn,
                 const string& dbname,
                 BSONObj& cmdObj,
                 int,
                 string& errmsg,
                 BSONObjBuilder& result,
                 bool /*fromRepl*/) {

            BSONElement first = cmdObj.firstElement();
            uassert(
                28528,
                str::stream() << "Argument to listIndexes must be of type String, not "
                              << typeName(first.type()),
                first.type() == String);
            const NamespaceString ns(parseNs(dbname, cmdObj));
            uassert(
                28529,
                str::stream() << "Argument to listIndexes must be a collection name, "
                              << "not the empty string",
                !ns.coll().empty());

            const long long defaultBatchSize = std::numeric_limits<long long>::max();
            long long batchSize;
            Status parseCursorStatus = parseCommandCursorOptions(cmdObj,
                                                                 defaultBatchSize,
                                                                 &batchSize);
            if (!parseCursorStatus.isOK()) {
                return appendCommandStatus(result, parseCursorStatus);
            }

            AutoGetCollectionForRead autoColl(txn, ns);
            if (!autoColl.getDb()) {
                return appendCommandStatus( result, Status( ErrorCodes::NamespaceNotFound,
                                                            "no database" ) );
            }

            const Collection* collection = autoColl.getCollection();
            if (!collection) {
                return appendCommandStatus( result, Status( ErrorCodes::NamespaceNotFound,
                                                            "no collection" ) );
            }

            const CollectionCatalogEntry* cce = collection->getCatalogEntry();
            invariant(cce);

            vector<string> indexNames;
            MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
                indexNames.clear();
                cce->getAllIndexes( txn, &indexNames );
            } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "listIndexes", ns.ns());

            std::auto_ptr<WorkingSet> ws(new WorkingSet());
            std::auto_ptr<QueuedDataStage> root(new QueuedDataStage(ws.get()));

            for ( size_t i = 0; i < indexNames.size(); i++ ) {
                BSONObj indexSpec;
                MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
                    indexSpec = cce->getIndexSpec( txn, indexNames[i] );
                } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "listIndexes", ns.ns());

                WorkingSetMember member;
                member.state = WorkingSetMember::OWNED_OBJ;
                member.keyData.clear();
                member.loc = RecordId();
                member.obj = Snapshotted<BSONObj>(SnapshotId(), indexSpec.getOwned());
                root->pushBack(member);
            }

            std::string cursorNamespace = str::stream() << dbname << ".$cmd." << name << "."
                                                        << ns.coll();
            dassert(NamespaceString(cursorNamespace).isValid());
            dassert(NamespaceString(cursorNamespace).isListIndexesGetMore());
            dassert(ns == NamespaceString(cursorNamespace).getTargetNSForListIndexesGetMore());

            PlanExecutor* rawExec;
            Status makeStatus = PlanExecutor::make(txn,
                                                   ws.release(),
                                                   root.release(),
                                                   cursorNamespace,
                                                   PlanExecutor::YIELD_MANUAL,
                                                   &rawExec);
            std::auto_ptr<PlanExecutor> exec(rawExec);
            if (!makeStatus.isOK()) {
                return appendCommandStatus( result, makeStatus );
            }

            BSONArrayBuilder firstBatch;

            const int byteLimit = MaxBytesToReturnToClientAtOnce;
            for (long long objCount = 0;
                 objCount < batchSize && firstBatch.len() < byteLimit;
                 objCount++) {
                BSONObj next;
                PlanExecutor::ExecState state = exec->getNext(&next, NULL);
                if ( state == PlanExecutor::IS_EOF ) {
                    break;
                }
                invariant( state == PlanExecutor::ADVANCED );
                firstBatch.append(next);
            }

            CursorId cursorId = 0LL;
            if ( !exec->isEOF() ) {
                exec->saveState();
                ClientCursor* cursor = new ClientCursor(CursorManager::getGlobalCursorManager(),
                                                        exec.release(),
                                                        cursorNamespace);
                cursorId = cursor->cursorid();
            }

            Command::appendCursorResponseObject( cursorId, cursorNamespace, firstBatch.arr(),
                                                 &result );

            return true;
        }
Beispiel #18
0
INT32 _fmpController::_handleOneLoop( const BSONObj &obj,
                                      INT32 step )
{
   INT32 rc = SDB_OK ;
   BSONObj res ;

   if ( FMP_CONTROL_STEP_BEGIN == step )
   {
      UINT32 seqID = 1 ;
      BSONElement beSeq = obj.getField( FMP_SEQ_ID ) ;
      if ( beSeq.isNumber() )
      {
         seqID = (UINT32)beSeq.numberInt() ;
      }
      BSONElement diag = obj.getField( FMP_DIAG_PATH ) ;
      if ( !diag.eoo() && String == diag.type() )
      {
         CHAR diaglogShort[ OSS_MAX_PATHSIZE + 1 ] = { 0 } ;
         ossSnprintf( diaglogShort, OSS_MAX_PATHSIZE, "%s_%u.%s",
                      PD_FMP_DIAGLOG_PREFIX, seqID, PD_FMP_DIAGLOG_SUBFIX ) ;

         CHAR diaglog[ OSS_MAX_PATHSIZE + 1 ] = {0} ;
         engine::utilBuildFullPath( diag.valuestrsafe(), diaglogShort,
                                    OSS_MAX_PATHSIZE, diaglog ) ;
         sdbEnablePD( diaglog ) ;
      }
      BSONElement localService = obj.getField( FMP_LOCAL_SERVICE ) ;
      if ( !localService.eoo() && String == localService.type() &&
           0 == ossStrlen(FMP_COORD_SERVICE) )
      {
         ossMemcpy( FMP_COORD_SERVICE, localService.valuestrsafe(),
                    ossStrlen( localService.valuestrsafe() ) + 1 ) ;
      }
      BSONElement localUser = obj.getField( FMP_LOCAL_USERNAME ) ;
      if ( String == localUser.type() )
      {
         ossStrncpy( g_UserName, localUser.valuestrsafe(),
                     OSS_MAX_PATHSIZE ) ;
      }
      BSONElement localPass = obj.getField( FMP_LOCAL_PASSWORD ) ;
      if ( String == localPass.type() )
      {
         ossStrncpy( g_Password, localPass.valuestrsafe(),
                     OSS_MAX_PATHSIZE ) ;
      }
      BSONElement fType = obj.getField( FMP_FUNC_TYPE ) ;
      if ( fType.eoo() )
      {
         rc = _createVM( FMP_FUNC_TYPE_JS ) ;
         if ( SDB_OK != rc )
         {
            PD_LOG(PDERROR, "failed to create vm:%d", rc ) ;
            res = BSON( FMP_ERR_MSG << "failed to create vm" <<
                        FMP_RES_CODE << rc ) ;
            goto error ;
         }

         rc = _vm->init( obj ) ;
         if ( SDB_OK != rc )
         {
            PD_LOG(PDERROR, "failed to init vm:%d", rc ) ;
            res = BSON( FMP_ERR_MSG << "failed to init vm" <<
                        FMP_RES_CODE << rc ) ;
            goto error ;
         }
      }
      else if ( NumberInt != fType.type() )
      {
         PD_LOG( PDERROR, "invalid type of func type:%s",
                 fType.toString().c_str() ) ;
         rc = SDB_SYS ;
         res = BSON( FMP_ERR_MSG << "invalid type of func type" <<
                     FMP_RES_CODE << SDB_SYS ) ;
         goto error ;
      }
      else
      {
         rc = _createVM( fType.Int() ) ;
         if ( SDB_OK != rc )
         {
            PD_LOG(PDERROR, "failed to create vm:%d", rc ) ;
            res = BSON( FMP_ERR_MSG << "failed to create vm" <<
                        FMP_RES_CODE << rc ) ;
            goto error ;
         }

         rc = _vm->init( obj ) ;
         if ( SDB_OK != rc )
         {
            PD_LOG(PDERROR, "failed to init vm:%d", rc ) ;
            res = BSON( FMP_ERR_MSG << "failed to init vm" <<
                        FMP_RES_CODE << rc ) ;
            goto error ;
         }
      }
   }
   else if ( FMP_CONTROL_STEP_DOWNLOAD == step )
   {
      SDB_ASSERT( NULL != _vm, "impossible" ) ;
      rc = _vm->eval( obj, res ) ;
      if ( SDB_OK  != rc )
      {
         PD_LOG( PDERROR, "failed to pre eval func:%s, rc:%d",
                 obj.toString(FALSE, TRUE).c_str(), rc ) ;
         if ( res.isEmpty() )
         {
            res = BSON( FMP_ERR_MSG << "failed to pre eval func" <<
                        FMP_RES_CODE << rc ) ;
         }
         goto error ;
      }
   }
   else if ( FMP_CONTROL_STEP_EVAL == step )
   {
      rc = _vm->initGlobalDB( res ) ;
      if ( rc )
      {
         PD_LOG( PDWARNING, "Failed to init global db: %s",
                 res.toString( FALSE, TRUE ).c_str() ) ;
      }

      rc = _vm->eval( obj, res ) ;
      if ( SDB_OK != rc )
      {
         PD_LOG( PDERROR, "failed to eval func:%s, rc:%d",
                 obj.toString(FALSE, TRUE).c_str(), rc ) ;
         if ( res.isEmpty() )
         {
            res = BSON( FMP_ERR_MSG << "failed to eval func" <<
                        FMP_RES_CODE << rc ) ;
         }
         goto error ;
      }
   }
   else if ( FMP_CONTROL_STEP_FETCH == step )
   {
      BSONObj next ;
      rc = _vm->fetch( next ) ;
      if ( !next.isEmpty() )
      {
         res = next ;
      }
      else
      {
         PD_LOG( PDERROR, "a empty obj was fetched out" ) ;
         rc = SDB_SYS ;
         res = BSON( FMP_ERR_MSG << "a empty obj was fetched out" <<
                     FMP_RES_CODE << rc ) ;
         goto error ;
      }

      if ( SDB_DMS_EOC == rc )
      {
         _clear() ;
      }
      else if ( SDB_OK != rc )
      {
         goto error ;
      }
   }
   else
   {
      SDB_ASSERT( FALSE, "impossible" ) ;
   }

done:
   {
   INT32 rrc = SDB_OK ;
   if ( !res.isEmpty() )
   {
      rrc = _writeMsg( res ) ;
   }
   else
   {
      rrc = _writeMsg( BSON( FMP_RES_CODE << rc ) ) ;
   }
   if ( SDB_OK != rrc )
   {
      rc = rrc ;
      PD_LOG( PDERROR, "failed to write msg:%d", rc ) ;
   }
   }
   return rc ;
error:
   goto done ;
}
    void Projection::init( const BSONObj& o ) {
        massert( 10371 , "can only add to Projection once", _source.isEmpty());
        _source = o;

        BSONObjIterator i( o );
        int true_false = -1;
        while ( i.more() ) {
            BSONElement e = i.next();

            if ( ! e.isNumber() )
                _hasNonSimple = true;

            if (e.type() == Object) {
                BSONObj obj = e.embeddedObject();
                BSONElement e2 = obj.firstElement();
                if ( strcmp(e2.fieldName(), "$slice") == 0 ) {
                    if (e2.isNumber()) {
                        int i = e2.numberInt();
                        if (i < 0)
                            add(e.fieldName(), i, -i); // limit is now positive
                        else
                            add(e.fieldName(), 0, i);

                    }
                    else if (e2.type() == Array) {
                        BSONObj arr = e2.embeddedObject();
                        uassert(13099, "$slice array wrong size", arr.nFields() == 2 );

                        BSONObjIterator it(arr);
                        int skip = it.next().numberInt();
                        int limit = it.next().numberInt();
                        uassert(13100, "$slice limit must be positive", limit > 0 );
                        add(e.fieldName(), skip, limit);

                    }
                    else {
                        uassert(13098, "$slice only supports numbers and [skip, limit] arrays", false);
                    }
                }
                else {
                    uassert(13097, string("Unsupported projection option: ") + obj.firstElementFieldName(), false);
                }

            }
            else if (!strcmp(e.fieldName(), "_id") && !e.trueValue()) {
                _includeID = false;

            }
            else {

                add (e.fieldName(), e.trueValue());

                // validate input
                if (true_false == -1) {
                    true_false = e.trueValue();
                    _include = !e.trueValue();
                }
                else {
                    uassert( 10053 , "You cannot currently mix including and excluding fields. Contact us if this is an issue." ,
                             (bool)true_false == e.trueValue() );
                }
            }
        }
    }
Beispiel #20
0
        bool handleSpecialNamespaces( Request& r , QueryMessage& q ) {
            const char * ns = strstr( r.getns() , ".$cmd.sys." );
            if ( ! ns )
                return false;
            ns += 10;

            BSONObjBuilder b;
            vector<Shard> shards;

            ClientBasic* client = ClientBasic::getCurrent();
            AuthorizationSession* authSession = client->getAuthorizationSession();
            if ( strcmp( ns , "inprog" ) == 0 ) {
                const bool isAuthorized = authSession->checkAuthorization(
                        AuthorizationManager::SERVER_RESOURCE_NAME, ActionType::inprog);
                audit::logInProgAuthzCheck(
                        client, q.query, isAuthorized ? ErrorCodes::OK : ErrorCodes::Unauthorized);
                uassert(ErrorCodes::Unauthorized, "not authorized to run inprog", isAuthorized);

                Shard::getAllShards( shards );

                BSONArrayBuilder arr( b.subarrayStart( "inprog" ) );

                for ( unsigned i=0; i<shards.size(); i++ ) {
                    Shard shard = shards[i];
                    ScopedDbConnection conn(shard.getConnString());
                    BSONObj temp = conn->findOne( r.getns() , q.query );
                    if ( temp["inprog"].isABSONObj() ) {
                        BSONObjIterator i( temp["inprog"].Obj() );
                        while ( i.more() ) {
                            BSONObjBuilder x;

                            BSONObjIterator j( i.next().Obj() );
                            while( j.more() ) {
                                BSONElement e = j.next();
                                if ( str::equals( e.fieldName() , "opid" ) ) {
                                    stringstream ss;
                                    ss << shard.getName() << ':' << e.numberInt();
                                    x.append( "opid" , ss.str() );
                                }
                                else if ( str::equals( e.fieldName() , "client" ) ) {
                                    x.appendAs( e , "client_s" );
                                }
                                else {
                                    x.append( e );
                                }
                            }
                            arr.append( x.obj() );
                        }
                    }
                    conn.done();
                }

                arr.done();
            }
            else if ( strcmp( ns , "killop" ) == 0 ) {
                const bool isAuthorized = authSession->checkAuthorization(
                        AuthorizationManager::SERVER_RESOURCE_NAME, ActionType::killop);
                audit::logKillOpAuthzCheck(
                        client,
                        q.query,
                        isAuthorized ? ErrorCodes::OK : ErrorCodes::Unauthorized);
                uassert(ErrorCodes::Unauthorized, "not authorized to run killop", isAuthorized);

                BSONElement e = q.query["op"];
                if ( e.type() != String ) {
                    b.append( "err" , "bad op" );
                    b.append( e );
                }
                else {
                    b.append( e );
                    string s = e.String();
                    string::size_type i = s.find( ':' );
                    if ( i == string::npos ) {
                        b.append( "err" , "bad opid" );
                    }
                    else {
                        string shard = s.substr( 0 , i );
                        int opid = atoi( s.substr( i + 1 ).c_str() );
                        b.append( "shard" , shard );
                        b.append( "shardid" , opid );

                        log() << "want to kill op: " << e << endl;
                        Shard s(shard);

                        ScopedDbConnection conn(s.getConnString());
                        conn->findOne( r.getns() , BSON( "op" << opid ) );
                        conn.done();
                    }
                }
            }
            else if ( strcmp( ns , "unlock" ) == 0 ) {
                b.append( "err" , "can't do unlock through mongos" );
            }
            else {
                warning() << "unknown sys command [" << ns << "]" << endl;
                return false;
            }

            BSONObj x = b.done();
            replyToQuery(0, r.p(), r.m(), x);
            return true;
        }
Beispiel #21
0
Status MemberConfig::initialize(const BSONObj& mcfg, ReplicaSetTagConfig* tagConfig) {
    Status status = bsonCheckOnlyHasFields(
        "replica set member configuration", mcfg, kLegalMemberConfigFieldNames);
    if (!status.isOK())
        return status;

    //
    // Parse _id field.
    //
    BSONElement idElement = mcfg[kIdFieldName];
    if (idElement.eoo()) {
        return Status(ErrorCodes::NoSuchKey, str::stream() << kIdFieldName << " field is missing");
    }
    if (!idElement.isNumber()) {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << kIdFieldName << " field has non-numeric type "
                                    << typeName(idElement.type()));
    }
    _id = idElement.numberInt();

    //
    // Parse h field.
    //
    std::string hostAndPortString;
    status = bsonExtractStringField(mcfg, kHostFieldName, &hostAndPortString);
    if (!status.isOK())
        return status;
    boost::trim(hostAndPortString);
    status = _host.initialize(hostAndPortString);
    if (!status.isOK())
        return status;
    if (!_host.hasPort()) {
        // make port explicit even if default.
        _host = HostAndPort(_host.host(), _host.port());
    }

    //
    // Parse votes field.
    //
    BSONElement votesElement = mcfg[kVotesFieldName];
    if (votesElement.eoo()) {
        _votes = kVotesFieldDefault;
    } else if (votesElement.isNumber()) {
        _votes = votesElement.numberInt();
    } else {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << kVotesFieldName << " field value has non-numeric type "
                                    << typeName(votesElement.type()));
    }

    //
    // Parse priority field.
    //
    BSONElement priorityElement = mcfg[kPriorityFieldName];
    if (priorityElement.eoo()) {
        _priority = kPriorityFieldDefault;
    } else if (priorityElement.isNumber()) {
        _priority = priorityElement.numberDouble();
    } else {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << kPriorityFieldName << " field has non-numeric type "
                                    << typeName(priorityElement.type()));
    }

    //
    // Parse arbiterOnly field.
    //
    status = bsonExtractBooleanFieldWithDefault(
        mcfg, kArbiterOnlyFieldName, kArbiterOnlyFieldDefault, &_arbiterOnly);
    if (!status.isOK())
        return status;

    //
    // Parse slaveDelay field.
    //
    BSONElement slaveDelayElement = mcfg[kSlaveDelayFieldName];
    if (slaveDelayElement.eoo()) {
        _slaveDelay = kSlaveDelayFieldDefault;
    } else if (slaveDelayElement.isNumber()) {
        _slaveDelay = Seconds(slaveDelayElement.numberInt());
    } else {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << kSlaveDelayFieldName << " field value has non-numeric type "
                                    << typeName(slaveDelayElement.type()));
    }

    //
    // Parse hidden field.
    //
    status =
        bsonExtractBooleanFieldWithDefault(mcfg, kHiddenFieldName, kHiddenFieldDefault, &_hidden);
    if (!status.isOK())
        return status;

    //
    // Parse buildIndexes field.
    //
    status = bsonExtractBooleanFieldWithDefault(
        mcfg, kBuildIndexesFieldName, kBuildIndexesFieldDefault, &_buildIndexes);
    if (!status.isOK())
        return status;

    //
    // Parse "tags" field.
    //
    _tags.clear();
    BSONElement tagsElement;
    status = bsonExtractTypedField(mcfg, kTagsFieldName, Object, &tagsElement);
    if (status.isOK()) {
        for (BSONObj::iterator tagIter(tagsElement.Obj()); tagIter.more();) {
            const BSONElement& tag = tagIter.next();
            if (tag.type() != String) {
                return Status(ErrorCodes::TypeMismatch,
                              str::stream() << "tags." << tag.fieldName()
                                            << " field has non-string value of type "
                                            << typeName(tag.type()));
            }
            _tags.push_back(tagConfig->makeTag(tag.fieldNameStringData(), tag.valueStringData()));
        }
    } else if (ErrorCodes::NoSuchKey != status) {
        return status;
    }

    //
    // Add internal tags based on other member properties.
    //

    // Add a voter tag if this non-arbiter member votes; use _id for uniquity.
    const std::string id = str::stream() << _id;
    if (isVoter() && !_arbiterOnly) {
        _tags.push_back(tagConfig->makeTag(kInternalVoterTagName, id));
    }

    // Add an electable tag if this member is electable.
    if (isElectable()) {
        _tags.push_back(tagConfig->makeTag(kInternalElectableTagName, id));
    }

    // Add a tag for generic counting of this node.
    if (!_arbiterOnly) {
        _tags.push_back(tagConfig->makeTag(kInternalAllTagName, id));
    }

    return Status::OK();
}
Beispiel #22
0
        virtual void queryOp( Request& r ) {
            QueryMessage q( r.d() );

            LOG(3) << "single query: " << q.ns << "  " << q.query << "  ntoreturn: " << q.ntoreturn << " options : " << q.queryOptions << endl;

            NamespaceString nss( r.getns() );
            // Regular queries are handled in strategy_shard.cpp
            verify( nss.isCommand() || nss.isSpecialCommand() );

            if ( handleSpecialNamespaces( r , q ) )
                return;

            int loops = 5;
            while ( true ) {
                BSONObjBuilder builder;
                try {
                    BSONObj cmdObj = q.query;
                    {
                        BSONElement e = cmdObj.firstElement();
                        if (e.type() == Object && (e.fieldName()[0] == '$'
                                                     ? str::equals("query", e.fieldName()+1)
                                                     : str::equals("query", e.fieldName()))) {
                            // Extract the embedded query object.

                            if (cmdObj.hasField(Query::ReadPrefField.name())) {
                                // The command has a read preference setting. We don't want
                                // to lose this information so we copy this to a new field
                                // called $queryOptions.$readPreference
                                BSONObjBuilder finalCmdObjBuilder;
                                finalCmdObjBuilder.appendElements(e.embeddedObject());

                                BSONObjBuilder queryOptionsBuilder(
                                        finalCmdObjBuilder.subobjStart("$queryOptions"));
                                queryOptionsBuilder.append(cmdObj[Query::ReadPrefField.name()]);
                                queryOptionsBuilder.done();

                                cmdObj = finalCmdObjBuilder.obj();
                            }
                            else {
                                cmdObj = e.embeddedObject();
                            }
                        }
                    }

                    Command::runAgainstRegistered(q.ns, cmdObj, builder, q.queryOptions);
                    BSONObj x = builder.done();
                    replyToQuery(0, r.p(), r.m(), x);
                    return;
                }
                catch ( StaleConfigException& e ) {
                    if ( loops <= 0 )
                        throw e;

                    loops--;
                    log() << "retrying command: " << q.query << endl;

                    // For legacy reasons, ns may not actually be set in the exception :-(
                    string staleNS = e.getns();
                    if( staleNS.size() == 0 ) staleNS = q.ns;

                    ShardConnection::checkMyConnectionVersions( staleNS );
                    if( loops < 4 ) versionManager.forceRemoteCheckShardVersionCB( staleNS );
                }
                catch ( AssertionException& e ) {
                    e.getInfo().append( builder , "assertion" , "assertionCode" );
                    builder.append( "errmsg" , "db assertion failure" );
                    builder.append( "ok" , 0 );
                    BSONObj x = builder.done();
                    replyToQuery(0, r.p(), r.m(), x);
                    return;
                }
            }
        }
Beispiel #23
0
    int ConfigServer::checkConfigVersion( bool upgrade ) {
        int cur = dbConfigVersion();
        if ( cur == VERSION )
            return 0;

        if ( cur == 0 ) {
            ScopedDbConnection conn( _primary );
            conn->insert( "config.version" , BSON( "_id" << 1 << "version" << VERSION ) );
            pool.flush();
            verify( VERSION == dbConfigVersion( conn.conn() ) );
            conn.done();
            return 0;
        }

        if ( cur == 2 ) {

            // need to upgrade
            verify( VERSION == 3 );
            if ( ! upgrade ) {
                log() << "newer version of mongo meta data\n"
                      << "need to --upgrade after shutting all mongos down"
                      << endl;
                return -9;
            }

            ScopedDbConnection conn( _primary );

            // do a backup
            string backupName;
            {
                stringstream ss;
                ss << "config-backup-" << terseCurrentTime(false);
                backupName = ss.str();
            }
            log() << "backing up config to: " << backupName << endl;
            conn->copyDatabase( "config" , backupName );

            map<string,string> hostToShard;
            set<string> shards;
            // shards
            {
                unsigned n = 0;
                auto_ptr<DBClientCursor> c = conn->query( ShardNS::shard , BSONObj() );
                while ( c->more() ) {
                    BSONObj o = c->next();
                    string host = o["host"].String();

                    string name = "";

                    BSONElement id = o["_id"];
                    if ( id.type() == String ) {
                        name = id.String();
                    }
                    else {
                        stringstream ss;
                        ss << "shard" << hostToShard.size();
                        name = ss.str();
                    }

                    hostToShard[host] = name;
                    shards.insert( name );
                    n++;
                }

                verify( n == hostToShard.size() );
                verify( n == shards.size() );

                conn->remove( ShardNS::shard , BSONObj() );

                for ( map<string,string>::iterator i=hostToShard.begin(); i != hostToShard.end(); i++ ) {
                    conn->insert( ShardNS::shard , BSON( "_id" << i->second << "host" << i->first ) );
                }
            }

            // databases
            {
                auto_ptr<DBClientCursor> c = conn->query( ShardNS::database , BSONObj() );
                map<string,BSONObj> newDBs;
                unsigned n = 0;
                while ( c->more() ) {
                    BSONObj old = c->next();
                    n++;

                    if ( old["name"].eoo() ) {
                        // already done
                        newDBs[old["_id"].String()] = old;
                        continue;
                    }

                    BSONObjBuilder b(old.objsize());
                    b.appendAs( old["name"] , "_id" );

                    BSONObjIterator i(old);
                    while ( i.more() ) {
                        BSONElement e = i.next();
                        if ( strcmp( "_id" , e.fieldName() ) == 0 ||
                                strcmp( "name" , e.fieldName() ) == 0 ) {
                            continue;
                        }

                        b.append( e );
                    }

                    BSONObj x = b.obj();
                    log() << old << "\n\t" << x << endl;
                    newDBs[old["name"].String()] = x;
                }

                verify( n == newDBs.size() );

                conn->remove( ShardNS::database , BSONObj() );

                for ( map<string,BSONObj>::iterator i=newDBs.begin(); i!=newDBs.end(); i++ ) {
                    conn->insert( ShardNS::database , i->second );
                }

            }

            // chunks
            {
                unsigned num = 0;
                map<string,BSONObj> chunks;
                auto_ptr<DBClientCursor> c = conn->query( ShardNS::chunk , BSONObj() );
                while ( c->more() ) {
                    BSONObj x = c->next();
                    BSONObjBuilder b;

                    string id = Chunk::genID( x["ns"].String() , x["min"].Obj() );
                    b.append( "_id" , id );

                    BSONObjIterator i(x);
                    while ( i.more() ) {
                        BSONElement e = i.next();
                        if ( strcmp( e.fieldName() , "_id" ) == 0 )
                            continue;
                        b.append( e );
                    }

                    BSONObj n = b.obj();
                    log() << x << "\n\t" << n << endl;
                    chunks[id] = n;
                    num++;
                }

                verify( num == chunks.size() );

                conn->remove( ShardNS::chunk , BSONObj() );
                for ( map<string,BSONObj>::iterator i=chunks.begin(); i!=chunks.end(); i++ ) {
                    conn->insert( ShardNS::chunk , i->second );
                }

            }

            conn->update( "config.version" , BSONObj() , BSON( "_id" << 1 << "version" << VERSION ) );
            conn.done();
            pool.flush();
            return 1;
        }

        log() << "don't know how to upgrade " << cur << " to " << VERSION << endl;
        return -8;
    }
Beispiel #24
0
StatusWith<MongosType> MongosType::fromBSON(const BSONObj& source) {
    MongosType mt;

    {
        std::string mtName;
        Status status = bsonExtractStringField(source, name.name(), &mtName);
        if (!status.isOK())
            return status;
        mt._name = mtName;
    }

    {
        BSONElement mtPingElem;
        Status status = bsonExtractTypedField(source, ping.name(), BSONType::Date, &mtPingElem);
        if (!status.isOK())
            return status;
        mt._ping = mtPingElem.date();
    }

    {
        long long mtUptime;
        Status status = bsonExtractIntegerField(source, uptime.name(), &mtUptime);
        if (!status.isOK())
            return status;
        mt._uptime = mtUptime;
    }

    {
        bool mtWaiting;
        Status status = bsonExtractBooleanField(source, waiting.name(), &mtWaiting);
        if (!status.isOK())
            return status;
        mt._waiting = mtWaiting;
    }

    if (source.hasField(mongoVersion.name())) {
        std::string mtMongoVersion;
        Status status = bsonExtractStringField(source, mongoVersion.name(), &mtMongoVersion);
        if (!status.isOK())
            return status;
        mt._mongoVersion = mtMongoVersion;
    }

    if (source.hasField(configVersion.name())) {
        long long mtConfigVersion;
        Status status = bsonExtractIntegerField(source, configVersion.name(), &mtConfigVersion);
        if (!status.isOK())
            return status;
        mt._configVersion = mtConfigVersion;
    }

    if (source.hasField(advisoryHostFQDNs.name())) {
        mt._advisoryHostFQDNs = std::vector<std::string>();
        BSONElement array;
        Status status = bsonExtractTypedField(source, advisoryHostFQDNs.name(), Array, &array);
        if (!status.isOK())
            return status;

        BSONObjIterator it(array.Obj());
        while (it.more()) {
            BSONElement arrayElement = it.next();
            if (arrayElement.type() != String) {
                return Status(ErrorCodes::TypeMismatch,
                              str::stream() << "Elements in \"" << advisoryHostFQDNs.name()
                                            << "\" array must be strings but found "
                                            << typeName(arrayElement.type()));
            }
            mt._advisoryHostFQDNs->push_back(arrayElement.String());
        }
    }

    return mt;
}
Beispiel #25
0
Status ModifierAddToSet::init(const BSONElement& modExpr, const Options& opts, bool* positional) {
    // Perform standard field name and updateable checks.
    _fieldRef.parse(modExpr.fieldName());
    Status status = fieldchecker::isUpdatable(_fieldRef);
    if (!status.isOK()) {
        return status;
    }

    // If a $-positional operator was used, get the index in which it occurred
    // and ensure only one occurrence.
    size_t foundCount;
    bool foundDollar = fieldchecker::isPositional(_fieldRef, &_posDollar, &foundCount);

    if (positional)
        *positional = foundDollar;

    if (foundDollar && foundCount > 1) {
        return Status(ErrorCodes::BadValue,
                      str::stream() << "Too many positional (i.e. '$') elements found in path '"
                                    << _fieldRef.dottedField()
                                    << "'");
    }

    // TODO: The driver could potentially do this re-writing.

    // If the type of the value is 'Object', we might be dealing with a $each. See if that
    // is the case.
    if (modExpr.type() == mongo::Object) {
        BSONElement modExprObjPayload = modExpr.embeddedObject().firstElement();
        if (!modExprObjPayload.eoo() && StringData(modExprObjPayload.fieldName()) == "$each") {
            // It is a $each. Verify that the payload is an array as is required for $each,
            // set our flag, and store the array as our value.
            if (modExprObjPayload.type() != mongo::Array) {
                return Status(ErrorCodes::BadValue,
                              str::stream() << "The argument to $each in $addToSet must "
                                               "be an array but it was of type "
                                            << typeName(modExprObjPayload.type()));
            }

            status = _valDoc.root().appendElement(modExprObjPayload);
            if (!status.isOK())
                return status;

            _val = _valDoc.root().leftChild();
        }
    }

    // If this wasn't an 'each', turn it into one. No need to sort or de-dup since we only
    // have one element.
    if (_val == _valDoc.end()) {
        mb::Element each = _valDoc.makeElementArray("$each");

        status = each.appendElement(modExpr);
        if (!status.isOK())
            return status;

        status = _valDoc.root().pushBack(each);
        if (!status.isOK())
            return status;

        _val = each;
    }

    // Check if no invalid data (such as fields with '$'s) are being used in the $each
    // clause.
    mb::ConstElement valCursor = _val.leftChild();
    while (valCursor.ok()) {
        const BSONType type = valCursor.getType();
        dassert(valCursor.hasValue());
        switch (type) {
            case mongo::Object: {
                Status s = valCursor.getValueObject().storageValidEmbedded();
                if (!s.isOK())
                    return s;

                break;
            }
            case mongo::Array: {
                Status s = valCursor.getValueArray().storageValidEmbedded();
                if (!s.isOK())
                    return s;

                break;
            }
            default:
                break;
        }

        valCursor = valCursor.rightSibling();
    }

    setCollator(opts.collator);
    return Status::OK();
}
Beispiel #26
0
    void Projection::init(const BSONObj& o,
                          const MatchExpressionParser::WhereCallback& whereCallback) {
        massert( 10371 , "can only add to Projection once", _source.isEmpty());
        _source = o;

        BSONObjIterator i( o );
        int true_false = -1;
        while ( i.more() ) {
            BSONElement e = i.next();

            if ( ! e.isNumber() )
                _hasNonSimple = true;

            if (e.type() == Object) {
                BSONObj obj = e.embeddedObject();
                BSONElement e2 = obj.firstElement();
                if ( mongoutils::str::equals( e2.fieldName(), "$slice" ) ) {
                    if (e2.isNumber()) {
                        int i = e2.numberInt();
                        if (i < 0)
                            add(e.fieldName(), i, -i); // limit is now positive
                        else
                            add(e.fieldName(), 0, i);

                    }
                    else if (e2.type() == Array) {
                        BSONObj arr = e2.embeddedObject();
                        uassert(13099, "$slice array wrong size", arr.nFields() == 2 );

                        BSONObjIterator it(arr);
                        int skip = it.next().numberInt();
                        int limit = it.next().numberInt();
                        uassert(13100, "$slice limit must be positive", limit > 0 );
                        add(e.fieldName(), skip, limit);

                    }
                    else {
                        uassert(13098, "$slice only supports numbers and [skip, limit] arrays", false);
                    }
                }
                else if ( mongoutils::str::equals( e2.fieldName(), "$elemMatch" ) ) {
                    // validate $elemMatch arguments and dependencies
                    uassert( 16342, "elemMatch: invalid argument.  object required.",
                             e2.type() == Object );
                    uassert( 16343, "Cannot specify positional operator and $elemMatch"
                                    " (currently unsupported).",
                             _arrayOpType != ARRAY_OP_POSITIONAL );
                    uassert( 16344, "Cannot use $elemMatch projection on a nested field"
                                    " (currently unsupported).",
                             ! mongoutils::str::contains( e.fieldName(), '.' ) );
                    _arrayOpType = ARRAY_OP_ELEM_MATCH;

                    // initialize new Matcher object(s)

                    _matchers[mongoutils::str::before(e.fieldName(), '.').c_str()]
                        = boost::make_shared<Matcher>(e.wrap(), whereCallback);
                    add( e.fieldName(), true );
                }
                else {
                    uasserted(13097, string("Unsupported projection option: ") +
                                     obj.firstElementFieldName() );
                }

            }
            else if (!strcmp(e.fieldName(), "_id") && !e.trueValue()) {
                _includeID = false;
            }
            else {
                add( e.fieldName(), e.trueValue() );

                // validate input
                if (true_false == -1) {
                    true_false = e.trueValue();
                    _include = !e.trueValue();
                }
                else {
                    uassert( 10053 , "You cannot currently mix including and excluding fields. "
                                     "Contact us if this is an issue." ,
                             (bool)true_false == e.trueValue() );
                }
            }
            if ( mongoutils::str::contains( e.fieldName(), ".$" ) ) {
                // positional op found; verify dependencies
                uassert( 16345, "Cannot exclude array elements with the positional operator"
                                " (currently unsupported).", e.trueValue() );
                uassert( 16346, "Cannot specify more than one positional array element per query"
                                " (currently unsupported).", _arrayOpType != ARRAY_OP_POSITIONAL );
                uassert( 16347, "Cannot specify positional operator and $elemMatch"
                                " (currently unsupported).", _arrayOpType != ARRAY_OP_ELEM_MATCH );
                _arrayOpType = ARRAY_OP_POSITIONAL;
            }
        }
    }
Beispiel #27
0
    virtual bool run(OperationContext* txn,
                     const string& dbname,
                     BSONObj& cmdObj,
                     int options,
                     string& errmsg,
                     BSONObjBuilder& result) {
        // ---  parse

        NamespaceString ns(dbname, cmdObj[name].String());
        Status status = userAllowedWriteNS(ns);
        if (!status.isOK())
            return appendCommandStatus(result, status);

        if (cmdObj["indexes"].type() != Array) {
            errmsg = "indexes has to be an array";
            result.append("cmdObj", cmdObj);
            return false;
        }

        std::vector<BSONObj> specs;
        {
            BSONObjIterator i(cmdObj["indexes"].Obj());
            while (i.more()) {
                BSONElement e = i.next();
                if (e.type() != Object) {
                    errmsg = "everything in indexes has to be an Object";
                    result.append("cmdObj", cmdObj);
                    return false;
                }
                specs.push_back(e.Obj());
            }
        }

        if (specs.size() == 0) {
            errmsg = "no indexes to add";
            return false;
        }

        // check specs
        for (size_t i = 0; i < specs.size(); i++) {
            BSONObj spec = specs[i];
            if (spec["ns"].eoo()) {
                spec = _addNsToSpec(ns, spec);
                specs[i] = spec;
            }

            if (spec["ns"].type() != String) {
                errmsg = "spec has no ns";
                result.append("spec", spec);
                return false;
            }
            if (ns != spec["ns"].String()) {
                errmsg = "namespace mismatch";
                result.append("spec", spec);
                return false;
            }
        }

        // now we know we have to create index(es)
        // Note: createIndexes command does not currently respect shard versioning.
        ScopedTransaction transaction(txn, MODE_IX);
        Lock::DBLock dbLock(txn->lockState(), ns.db(), MODE_X);
        if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)) {
            return appendCommandStatus(
                result,
                Status(ErrorCodes::NotMaster,
                       str::stream() << "Not primary while creating indexes in " << ns.ns()));
        }

        Database* db = dbHolder().get(txn, ns.db());
        if (!db) {
            db = dbHolder().openDb(txn, ns.db());
        }

        Collection* collection = db->getCollection(ns.ns());
        result.appendBool("createdCollectionAutomatically", collection == NULL);
        if (!collection) {
            MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
                WriteUnitOfWork wunit(txn);
                collection = db->createCollection(txn, ns.ns(), CollectionOptions());
                invariant(collection);
                wunit.commit();
            }
            MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createIndexes", ns.ns());
        }
void DatabaseCloner::_listCollectionsCallback(const StatusWith<Fetcher::QueryResponse>& result,
                                              Fetcher::NextAction* nextAction,
                                              BSONObjBuilder* getMoreBob) {
    if (!result.isOK()) {
        _finishCallback(result.getStatus());
        return;
    }

    auto batchData(result.getValue());
    auto&& documents = batchData.documents;

    // We may be called with multiple batches leading to a need to grow _collectionInfos.
    _collectionInfos.reserve(_collectionInfos.size() + documents.size());
    std::copy_if(documents.begin(),
                 documents.end(),
                 std::back_inserter(_collectionInfos),
                 _listCollectionsPredicate);

    // The fetcher will continue to call with kGetMore until an error or the last batch.
    if (*nextAction == Fetcher::NextAction::kGetMore) {
        invariant(getMoreBob);
        getMoreBob->append("getMore", batchData.cursorId);
        getMoreBob->append("collection", batchData.nss.coll());
        return;
    }

    // Nothing to do for an empty database.
    if (_collectionInfos.empty()) {
        _finishCallback(Status::OK());
        return;
    }

    _collectionNamespaces.reserve(_collectionInfos.size());
    std::set<std::string> seen;
    for (auto&& info : _collectionInfos) {
        BSONElement nameElement = info.getField(kNameFieldName);
        if (nameElement.eoo()) {
            _finishCallback(Status(ErrorCodes::FailedToParse,
                                   str::stream() << "collection info must contain '"
                                                 << kNameFieldName << "' "
                                                 << "field : " << info));
            return;
        }
        if (nameElement.type() != mongo::String) {
            _finishCallback(Status(ErrorCodes::TypeMismatch,
                                   str::stream() << "'" << kNameFieldName
                                                 << "' field must be a string: " << info));
            return;
        }
        const std::string collectionName = nameElement.String();
        if (seen.find(collectionName) != seen.end()) {
            _finishCallback(Status(ErrorCodes::DuplicateKey,
                                   str::stream()
                                       << "collection info contains duplicate collection name "
                                       << "'" << collectionName << "': " << info));
            return;
        }

        BSONElement optionsElement = info.getField(kOptionsFieldName);
        if (optionsElement.eoo()) {
            _finishCallback(Status(ErrorCodes::FailedToParse,
                                   str::stream() << "collection info must contain '"
                                                 << kOptionsFieldName << "' "
                                                 << "field : " << info));
            return;
        }
        if (!optionsElement.isABSONObj()) {
            _finishCallback(Status(ErrorCodes::TypeMismatch,
                                   str::stream() << "'" << kOptionsFieldName
                                                 << "' field must be an object: " << info));
            return;
        }
        const BSONObj optionsObj = optionsElement.Obj();
        CollectionOptions options;
        Status parseStatus = options.parse(optionsObj);
        if (!parseStatus.isOK()) {
            _finishCallback(parseStatus);
            return;
        }
        seen.insert(collectionName);

        _collectionNamespaces.emplace_back(_dbname, collectionName);
        auto&& nss = *_collectionNamespaces.crbegin();

        try {
            _collectionCloners.emplace_back(
                _executor,
                _source,
                nss,
                options,
                stdx::bind(
                    &DatabaseCloner::_collectionClonerCallback, this, stdx::placeholders::_1, nss),
                _storageInterface);
        } catch (const UserException& ex) {
            _finishCallback(ex.toStatus());
            return;
        }
    }

    for (auto&& collectionCloner : _collectionCloners) {
        collectionCloner.setScheduleDbWorkFn(_scheduleDbWorkFn);
    }

    // Start first collection cloner.
    _currentCollectionClonerIter = _collectionCloners.begin();

    LOG(1) << "    cloning collection " << _currentCollectionClonerIter->getSourceNamespace();

    Status startStatus = _startCollectionCloner(*_currentCollectionClonerIter);
    if (!startStatus.isOK()) {
        LOG(1) << "    failed to start collection cloning on "
               << _currentCollectionClonerIter->getSourceNamespace() << ": " << startStatus;
        _finishCallback(startStatus);
        return;
    }
}
Status ReplSetHeartbeatResponse::initialize(const BSONObj& doc, long long term) {
    // Old versions set this even though they returned not "ok"
    _mismatch = doc[kMismatchFieldName].trueValue();
    if (_mismatch)
        return Status(ErrorCodes::InconsistentReplicaSetNames, "replica set name doesn't match.");

    // Old versions sometimes set the replica set name ("set") but ok:0
    const BSONElement replSetNameElement = doc[kReplSetFieldName];
    if (replSetNameElement.eoo()) {
        _setName.clear();
    } else if (replSetNameElement.type() != String) {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << "Expected \"" << kReplSetFieldName
                                    << "\" field in response to replSetHeartbeat to have "
                                       "type String, but found "
                                    << typeName(replSetNameElement.type()));
    } else {
        _setName = replSetNameElement.String();
    }

    if (_setName.empty() && !doc[kOkFieldName].trueValue()) {
        std::string errMsg = doc[kErrMsgFieldName].str();

        BSONElement errCodeElem = doc[kErrorCodeFieldName];
        if (errCodeElem.ok()) {
            if (!errCodeElem.isNumber())
                return Status(ErrorCodes::BadValue, "Error code is not a number!");

            int errorCode = errCodeElem.numberInt();
            return Status(ErrorCodes::Error(errorCode), errMsg);
        }
        return Status(ErrorCodes::UnknownError, errMsg);
    }

    const BSONElement hasDataElement = doc[kHasDataFieldName];
    _hasDataSet = !hasDataElement.eoo();
    _hasData = hasDataElement.trueValue();

    const BSONElement electionTimeElement = doc[kElectionTimeFieldName];
    if (electionTimeElement.eoo()) {
        _electionTimeSet = false;
    } else if (electionTimeElement.type() == bsonTimestamp) {
        _electionTimeSet = true;
        _electionTime = electionTimeElement.timestamp();
    } else if (electionTimeElement.type() == Date) {
        _electionTimeSet = true;
        _electionTime = Timestamp(electionTimeElement.date());
    } else {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << "Expected \"" << kElectionTimeFieldName
                                    << "\" field in response to replSetHeartbeat "
                                       "command to have type Date or Timestamp, but found type "
                                    << typeName(electionTimeElement.type()));
    }

    const BSONElement timeElement = doc[kTimeFieldName];
    if (timeElement.eoo()) {
        _timeSet = false;
    } else if (timeElement.isNumber()) {
        _timeSet = true;
        _time = Seconds(timeElement.numberLong());
    } else {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << "Expected \"" << kTimeFieldName
                                    << "\" field in response to replSetHeartbeat "
                                       "command to have a numeric type, but found type "
                                    << typeName(timeElement.type()));
    }

    _isReplSet = doc[kIsReplSetFieldName].trueValue();

    Status termStatus = bsonExtractIntegerField(doc, kTermFieldName, &_term);
    if (!termStatus.isOK() && termStatus != ErrorCodes::NoSuchKey) {
        return termStatus;
    }

    Status status = bsonExtractOpTimeField(doc, kDurableOpTimeFieldName, &_durableOpTime);
    if (!status.isOK()) {
        if (status != ErrorCodes::NoSuchKey) {
            return status;
        }
    } else {
        _durableOpTimeSet = true;
    }

    // In order to support both the 3.0(V0) and 3.2(V1) heartbeats we must parse the OpTime
    // field based on its type. If it is a Date, we parse it as the timestamp and use
    // initialize's term argument to complete the OpTime type. If it is an Object, then it's
    // V1 and we construct an OpTime out of its nested fields.
    const BSONElement appliedOpTimeElement = doc[kAppliedOpTimeFieldName];
    if (appliedOpTimeElement.eoo()) {
        _appliedOpTimeSet = false;
    } else if (appliedOpTimeElement.type() == bsonTimestamp) {
        _appliedOpTimeSet = true;
        _appliedOpTime = OpTime(appliedOpTimeElement.timestamp(), term);
    } else if (appliedOpTimeElement.type() == Date) {
        _appliedOpTimeSet = true;
        _appliedOpTime = OpTime(Timestamp(appliedOpTimeElement.date()), term);
    } else if (appliedOpTimeElement.type() == Object) {
        Status status = bsonExtractOpTimeField(doc, kAppliedOpTimeFieldName, &_appliedOpTime);
        _appliedOpTimeSet = true;
        // since a v1 OpTime was in the response, the member must be part of a replset
        _isReplSet = true;
    } else {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << "Expected \"" << kAppliedOpTimeFieldName
                                    << "\" field in response to replSetHeartbeat "
                                       "command to have type Date or Timestamp, but found type "
                                    << typeName(appliedOpTimeElement.type()));
    }

    const BSONElement electableElement = doc[kIsElectableFieldName];
    if (electableElement.eoo()) {
        _electableSet = false;
    } else {
        _electableSet = true;
        _electable = electableElement.trueValue();
    }

    const BSONElement memberStateElement = doc[kMemberStateFieldName];
    if (memberStateElement.eoo()) {
        _stateSet = false;
    } else if (memberStateElement.type() != NumberInt && memberStateElement.type() != NumberLong) {
        return Status(
            ErrorCodes::TypeMismatch,
            str::stream() << "Expected \"" << kMemberStateFieldName
                          << "\" field in response to replSetHeartbeat "
                             "command to have type NumberInt or NumberLong, but found type "
                          << typeName(memberStateElement.type()));
    } else {
        long long stateInt = memberStateElement.numberLong();
        if (stateInt < 0 || stateInt > MemberState::RS_MAX) {
            return Status(
                ErrorCodes::BadValue,
                str::stream() << "Value for \"" << kMemberStateFieldName
                              << "\" in response to replSetHeartbeat is "
                                 "out of range; legal values are non-negative and no more than "
                              << MemberState::RS_MAX);
        }
        _stateSet = true;
        _state = MemberState(static_cast<int>(stateInt));
    }

    _stateDisagreement = doc[kHasStateDisagreementFieldName].trueValue();


    // Not required for the case of uninitialized members -- they have no config
    const BSONElement configVersionElement = doc[kConfigVersionFieldName];

    // If we have an optime then we must have a configVersion
    if (_appliedOpTimeSet && configVersionElement.eoo()) {
        return Status(ErrorCodes::NoSuchKey,
                      str::stream() << "Response to replSetHeartbeat missing required \""
                                    << kConfigVersionFieldName
                                    << "\" field even though initialized");
    }

    // If there is a "v" (config version) then it must be an int.
    if (!configVersionElement.eoo() && configVersionElement.type() != NumberInt) {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << "Expected \"" << kConfigVersionFieldName
                                    << "\" field in response to replSetHeartbeat to have "
                                       "type NumberInt, but found "
                                    << typeName(configVersionElement.type()));
    }
    _configVersion = configVersionElement.numberInt();

    const BSONElement hbMsgElement = doc[kHbMessageFieldName];
    if (hbMsgElement.eoo()) {
        _hbmsg.clear();
    } else if (hbMsgElement.type() != String) {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << "Expected \"" << kHbMessageFieldName
                                    << "\" field in response to replSetHeartbeat to have "
                                       "type String, but found "
                                    << typeName(hbMsgElement.type()));
    } else {
        _hbmsg = hbMsgElement.String();
    }

    const BSONElement syncingToElement = doc[kSyncSourceFieldName];
    if (syncingToElement.eoo()) {
        _syncingTo = HostAndPort();
    } else if (syncingToElement.type() != String) {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << "Expected \"" << kSyncSourceFieldName
                                    << "\" field in response to replSetHeartbeat to "
                                       "have type String, but found "
                                    << typeName(syncingToElement.type()));
    } else {
        _syncingTo = HostAndPort(syncingToElement.String());
    }

    const BSONElement rsConfigElement = doc[kConfigFieldName];
    if (rsConfigElement.eoo()) {
        _configSet = false;
        _config = ReplicaSetConfig();
        return Status::OK();
    } else if (rsConfigElement.type() != Object) {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << "Expected \"" << kConfigFieldName
                                    << "\" in response to replSetHeartbeat to have type "
                                       "Object, but found "
                                    << typeName(rsConfigElement.type()));
    }
    _configSet = true;

    return _config.initialize(rsConfigElement.Obj());
}
// static
Status ParsedProjection::make(const BSONObj& spec,
                              const MatchExpression* const query,
                              ParsedProjection** out,
                              const ExtensionsCallback& extensionsCallback) {
    // Whether we're including or excluding fields.
    enum class IncludeExclude { kUninitialized, kInclude, kExclude };
    IncludeExclude includeExclude = IncludeExclude::kUninitialized;

    bool requiresDocument = false;

    bool includeID = true;

    bool hasIndexKeyProjection = false;

    bool wantGeoNearPoint = false;
    bool wantGeoNearDistance = false;
    bool wantSortKey = false;

    // Until we see a positional or elemMatch operator we're normal.
    ArrayOpType arrayOpType = ARRAY_OP_NORMAL;

    BSONObjIterator it(spec);
    while (it.more()) {
        BSONElement e = it.next();

        if (Object == e.type()) {
            BSONObj obj = e.embeddedObject();
            if (1 != obj.nFields()) {
                return Status(ErrorCodes::BadValue, ">1 field in obj: " + obj.toString());
            }

            BSONElement e2 = obj.firstElement();
            if (mongoutils::str::equals(e2.fieldName(), "$slice")) {
                if (e2.isNumber()) {
                    // This is A-OK.
                } else if (e2.type() == Array) {
                    BSONObj arr = e2.embeddedObject();
                    if (2 != arr.nFields()) {
                        return Status(ErrorCodes::BadValue, "$slice array wrong size");
                    }

                    BSONObjIterator it(arr);
                    // Skip over 'skip'.
                    it.next();
                    int limit = it.next().numberInt();
                    if (limit <= 0) {
                        return Status(ErrorCodes::BadValue, "$slice limit must be positive");
                    }
                } else {
                    return Status(ErrorCodes::BadValue,
                                  "$slice only supports numbers and [skip, limit] arrays");
                }

                // Projections with $slice aren't covered.
                requiresDocument = true;
            } else if (mongoutils::str::equals(e2.fieldName(), "$elemMatch")) {
                // Validate $elemMatch arguments and dependencies.
                if (Object != e2.type()) {
                    return Status(ErrorCodes::BadValue,
                                  "elemMatch: Invalid argument, object required.");
                }

                if (ARRAY_OP_POSITIONAL == arrayOpType) {
                    return Status(ErrorCodes::BadValue,
                                  "Cannot specify positional operator and $elemMatch.");
                }

                if (mongoutils::str::contains(e.fieldName(), '.')) {
                    return Status(ErrorCodes::BadValue,
                                  "Cannot use $elemMatch projection on a nested field.");
                }

                arrayOpType = ARRAY_OP_ELEM_MATCH;

                // Create a MatchExpression for the elemMatch.
                BSONObj elemMatchObj = e.wrap();
                verify(elemMatchObj.isOwned());

                // TODO: Is there a faster way of validating the elemMatchObj?
                StatusWithMatchExpression statusWithMatcher =
                    MatchExpressionParser::parse(elemMatchObj, extensionsCallback);
                if (!statusWithMatcher.isOK()) {
                    return statusWithMatcher.getStatus();
                }

                // Projections with $elemMatch aren't covered.
                requiresDocument = true;
            } else if (mongoutils::str::equals(e2.fieldName(), "$meta")) {
                // Field for meta must be top level.  We can relax this at some point.
                if (mongoutils::str::contains(e.fieldName(), '.')) {
                    return Status(ErrorCodes::BadValue, "field for $meta cannot be nested");
                }

                // Make sure the argument to $meta is something we recognize.
                // e.g. {x: {$meta: "textScore"}}
                if (String != e2.type()) {
                    return Status(ErrorCodes::BadValue, "unexpected argument to $meta in proj");
                }

                if (e2.valuestr() != LiteParsedQuery::metaTextScore &&
                    e2.valuestr() != LiteParsedQuery::metaRecordId &&
                    e2.valuestr() != LiteParsedQuery::metaIndexKey &&
                    e2.valuestr() != LiteParsedQuery::metaGeoNearDistance &&
                    e2.valuestr() != LiteParsedQuery::metaGeoNearPoint &&
                    e2.valuestr() != LiteParsedQuery::metaSortKey) {
                    return Status(ErrorCodes::BadValue, "unsupported $meta operator: " + e2.str());
                }

                // This clobbers everything else.
                if (e2.valuestr() == LiteParsedQuery::metaIndexKey) {
                    hasIndexKeyProjection = true;
                } else if (e2.valuestr() == LiteParsedQuery::metaGeoNearDistance) {
                    wantGeoNearDistance = true;
                } else if (e2.valuestr() == LiteParsedQuery::metaGeoNearPoint) {
                    wantGeoNearPoint = true;
                } else if (e2.valuestr() == LiteParsedQuery::metaSortKey) {
                    wantSortKey = true;
                }

                // Of the $meta projections, only sortKey can be covered.
                if (e2.valuestr() != LiteParsedQuery::metaSortKey) {
                    requiresDocument = true;
                }
            } else {
                return Status(ErrorCodes::BadValue,
                              string("Unsupported projection option: ") + e.toString());
            }
        } else if (mongoutils::str::equals(e.fieldName(), "_id") && !e.trueValue()) {
            includeID = false;
        } else {
            // Projections of dotted fields aren't covered.
            if (mongoutils::str::contains(e.fieldName(), '.')) {
                requiresDocument = true;
            }

            // If we haven't specified an include/exclude, initialize includeExclude. We expect
            // further include/excludes to match it.
            if (includeExclude == IncludeExclude::kUninitialized) {
                includeExclude =
                    e.trueValue() ? IncludeExclude::kInclude : IncludeExclude::kExclude;
            } else if ((includeExclude == IncludeExclude::kInclude && !e.trueValue()) ||
                       (includeExclude == IncludeExclude::kExclude && e.trueValue())) {
                return Status(ErrorCodes::BadValue,
                              "Projection cannot have a mix of inclusion and exclusion.");
            }
        }

        if (_isPositionalOperator(e.fieldName())) {
            // Validate the positional op.
            if (!e.trueValue()) {
                return Status(ErrorCodes::BadValue,
                              "Cannot exclude array elements with the positional operator.");
            }

            if (ARRAY_OP_POSITIONAL == arrayOpType) {
                return Status(ErrorCodes::BadValue,
                              "Cannot specify more than one positional proj. per query.");
            }

            if (ARRAY_OP_ELEM_MATCH == arrayOpType) {
                return Status(ErrorCodes::BadValue,
                              "Cannot specify positional operator and $elemMatch.");
            }

            std::string after = mongoutils::str::after(e.fieldName(), ".$");
            if (mongoutils::str::contains(after, ".$")) {
                mongoutils::str::stream ss;
                ss << "Positional projection '" << e.fieldName() << "' contains "
                   << "the positional operator more than once.";
                return Status(ErrorCodes::BadValue, ss);
            }

            std::string matchfield = mongoutils::str::before(e.fieldName(), '.');
            if (!_hasPositionalOperatorMatch(query, matchfield)) {
                mongoutils::str::stream ss;
                ss << "Positional projection '" << e.fieldName() << "' does not "
                   << "match the query document.";
                return Status(ErrorCodes::BadValue, ss);
            }

            arrayOpType = ARRAY_OP_POSITIONAL;
        }
    }

    // If includeExclude is uninitialized or set to exclude fields, then we can't use an index
    // because we don't know what fields we're missing.
    if (includeExclude == IncludeExclude::kUninitialized ||
        includeExclude == IncludeExclude::kExclude) {
        requiresDocument = true;
    }

    // Fill out the returned obj.
    unique_ptr<ParsedProjection> pp(new ParsedProjection());

    // The positional operator uses the MatchDetails from the query
    // expression to know which array element was matched.
    pp->_requiresMatchDetails = arrayOpType == ARRAY_OP_POSITIONAL;

    // Save the raw spec.  It should be owned by the LiteParsedQuery.
    verify(spec.isOwned());
    pp->_source = spec;
    pp->_returnKey = hasIndexKeyProjection;
    pp->_requiresDocument = requiresDocument;

    // Add meta-projections.
    pp->_wantGeoNearPoint = wantGeoNearPoint;
    pp->_wantGeoNearDistance = wantGeoNearDistance;
    pp->_wantSortKey = wantSortKey;

    // If it's possible to compute the projection in a covered fashion, populate _requiredFields
    // so the planner can perform projection analysis.
    if (!pp->_requiresDocument) {
        if (includeID) {
            pp->_requiredFields.push_back("_id");
        }

        // The only way we could be here is if spec is only simple non-dotted-field inclusions or
        // the $meta sortKey projection. Therefore we can iterate over spec to get the fields
        // required.
        BSONObjIterator srcIt(spec);
        while (srcIt.more()) {
            BSONElement elt = srcIt.next();
            // We've already handled the _id field before entering this loop.
            if (includeID && mongoutils::str::equals(elt.fieldName(), "_id")) {
                continue;
            }
            // $meta sortKey should not be checked as a part of _requiredFields, since it can
            // potentially produce a covered projection as long as the sort key is covered.
            if (BSONType::Object == elt.type()) {
                dassert(elt.Obj() == BSON("$meta"
                                          << "sortKey"));
                continue;
            }
            if (elt.trueValue()) {
                pp->_requiredFields.push_back(elt.fieldName());
            }
        }
    }

    // returnKey clobbers everything except for sortKey meta-projection.
    if (hasIndexKeyProjection && !wantSortKey) {
        pp->_requiresDocument = false;
    }

    *out = pp.release();
    return Status::OK();
}