void appendReplicationInfo(OperationContext* opCtx, BSONObjBuilder& result, int level) {
    ReplicationCoordinator* replCoord = ReplicationCoordinator::get(opCtx);
    if (replCoord->getSettings().usingReplSets()) {
        IsMasterResponse isMasterResponse;
        replCoord->fillIsMasterForReplSet(&isMasterResponse);
        result.appendElements(isMasterResponse.toBSON());
        if (level) {
            replCoord->appendSlaveInfoData(&result);
        }
        return;
    }

    result.appendBool("ismaster",
                      ReplicationCoordinator::get(opCtx)->isMasterForReportingPurposes());

    if (level) {
        BSONObjBuilder sources(result.subarrayStart("sources"));

        int n = 0;
        list<BSONObj> src;
        {
            const NamespaceString localSources{"local.sources"};
            AutoGetCollectionForReadCommand ctx(opCtx, localSources);
            auto exec = InternalPlanner::collectionScan(
                opCtx, localSources.ns(), ctx.getCollection(), PlanExecutor::NO_YIELD);
            BSONObj obj;
            PlanExecutor::ExecState state;
            while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
                src.push_back(obj.getOwned());
            }

            // Non-yielding collection scans from InternalPlanner will never error.
            invariant(PlanExecutor::IS_EOF == state);
        }

        for (list<BSONObj>::const_iterator i = src.begin(); i != src.end(); i++) {
            BSONObj s = *i;
            BSONObjBuilder bb;
            bb.append(s["host"]);
            string sourcename = s["source"].valuestr();
            if (sourcename != "main")
                bb.append(s["source"]);
            {
                BSONElement e = s["syncedTo"];
                BSONObjBuilder t(bb.subobjStart("syncedTo"));
                t.appendDate("time", e.timestampTime());
                t.append("inc", e.timestampInc());
                t.done();
            }

            if (level > 1) {
                invariant(!opCtx->lockState()->isLocked());
                // note: there is no so-style timeout on this connection; perhaps we should have
                // one.
                ScopedDbConnection conn(s["host"].valuestr());

                DBClientConnection* cliConn = dynamic_cast<DBClientConnection*>(&conn.conn());
                if (cliConn && replAuthenticate(cliConn)) {
                    BSONObj first = conn->findOne((string) "local.oplog.$" + sourcename,
                                                  Query().sort(BSON("$natural" << 1)));
                    BSONObj last = conn->findOne((string) "local.oplog.$" + sourcename,
                                                 Query().sort(BSON("$natural" << -1)));
                    bb.appendDate("masterFirst", first["ts"].timestampTime());
                    bb.appendDate("masterLast", last["ts"].timestampTime());
                    const auto lag = (last["ts"].timestampTime() - s["syncedTo"].timestampTime());
                    bb.append("lagSeconds", durationCount<Milliseconds>(lag) / 1000.0);
                }
                conn.done();
            }

            sources.append(BSONObjBuilder::numStr(n++), bb.obj());
        }

        sources.done();

        replCoord->appendSlaveInfoData(&result);
    }
}
Query ConfigDiffTracker<ValType,ShardType>::
configDiffQuery( const set<ChunkVersion>& extraMinorVersions ) const
{
    verifyAttached();

    //
    // Basic idea behind the query is to find all the chunks $gt the current max version, and
    // then also update chunks that we need minor versions - splits and (2.0) max chunks on
    // shards
    //

    static const int maxMinorVersionClauses = 50;
    BSONObjBuilder queryB;

    int numStaleMinorClauses = extraMinorVersions.size() + _maxShardVersions->size();

#ifdef _DEBUG
    // In debug builds, randomly trigger full reloads to exercise both codepaths
    if( rand() % 2 ) numStaleMinorClauses = maxMinorVersionClauses;
#endif

    queryB.append(ChunkType::ns(), _ns);

    //
    // If we have only a few minor versions to refresh, we can be more selective in our query
    //
    if( numStaleMinorClauses < maxMinorVersionClauses ) {

        //
        // Get any version changes higher than we know currently
        //
        BSONArrayBuilder queryOrB( queryB.subarrayStart( "$or" ) );
        {
            BSONObjBuilder queryNewB( queryOrB.subobjStart() );
            {
                BSONObjBuilder ts(queryNewB.subobjStart(ChunkType::DEPRECATED_lastmod()));
                // We should *always* pull at least a single chunk back, this lets us quickly
                // detect if our collection was unsharded (and most of the time if it was
                // resharded) in the meantime
                ts.appendTimestamp( "$gte", _maxVersion->toLong() );
                ts.done();
            }

            queryNewB.done();
        }

        // Get any shard version changes higher than we know currently
        // Needed since there could have been a split of the max version chunk of any shard
        // TODO: Ideally, we shouldn't care about these
        for( typename map<ShardType, ChunkVersion>::const_iterator it = _maxShardVersions->begin(); it != _maxShardVersions->end(); it++ ) {

            BSONObjBuilder queryShardB( queryOrB.subobjStart() );
            queryShardB.append(ChunkType::shard(), nameFrom( it->first ) );
            {
                BSONObjBuilder ts(queryShardB.subobjStart(ChunkType::DEPRECATED_lastmod()));
                ts.appendTimestamp( "$gt", it->second.toLong() );
                ts.done();
            }
            queryShardB.done();
        }

        // Get any minor version changes we've marked as interesting
        // TODO: Ideally we shouldn't care about these
        for( set<ChunkVersion>::const_iterator it = extraMinorVersions.begin(); it != extraMinorVersions.end(); it++ ) {

            BSONObjBuilder queryShardB( queryOrB.subobjStart() );
            {
                BSONObjBuilder ts(queryShardB.subobjStart(ChunkType::DEPRECATED_lastmod()));
                ts.appendTimestamp( "$gt", it->toLong() );
                ts.appendTimestamp( "$lt",
                                    ChunkVersion( it->majorVersion() + 1, 0, OID() ).toLong() );
                ts.done();
            }
            queryShardB.done();
        }

        queryOrB.done();
    }

    BSONObj query = queryB.obj();

    LOG(2) << "major version query from " << *_maxVersion << " and over "
           << _maxShardVersions->size() << " shards is " << query << endl;

    //
    // NOTE: IT IS IMPORTANT FOR CONSISTENCY THAT WE SORT BY ASC VERSION, TO HANDLE
    // CURSOR YIELDING BETWEEN CHUNKS BEING MIGRATED.
    //
    // This ensures that changes to chunk version (which will always be higher) will always
    // come *after* our current position in the chunk cursor.
    //

    Query queryObj(query);
    queryObj.sort(BSON( "lastmod" << 1 ));

    return Query( query );
}
Exemplo n.º 3
0
    bool run(OperationContext* txn,
             const string& dbname,
             BSONObj& cmdObj,
             int,
             string& errmsg,
             BSONObjBuilder& result) {
        if (!cmdObj["start"].eoo()) {
            errmsg = "using deprecated 'start' argument to geoNear";
            return false;
        }

        const NamespaceString nss(parseNs(dbname, cmdObj));
        AutoGetCollectionForRead ctx(txn, nss);

        Collection* collection = ctx.getCollection();
        if (!collection) {
            errmsg = "can't find ns";
            return false;
        }

        IndexCatalog* indexCatalog = collection->getIndexCatalog();

        // cout << "raw cmd " << cmdObj.toString() << endl;

        // We seek to populate this.
        string nearFieldName;
        bool using2DIndex = false;
        if (!getFieldName(txn, collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) {
            return false;
        }

        PointWithCRS point;
        uassert(17304,
                "'near' field must be point",
                GeoParser::parseQueryPoint(cmdObj["near"], &point).isOK());

        bool isSpherical = cmdObj["spherical"].trueValue();
        if (!using2DIndex) {
            uassert(17301, "2dsphere index must have spherical: true", isSpherical);
        }

        // Build the $near expression for the query.
        BSONObjBuilder nearBob;
        if (isSpherical) {
            nearBob.append("$nearSphere", cmdObj["near"].Obj());
        } else {
            nearBob.append("$near", cmdObj["near"].Obj());
        }

        if (!cmdObj["maxDistance"].eoo()) {
            uassert(17299, "maxDistance must be a number", cmdObj["maxDistance"].isNumber());
            nearBob.append("$maxDistance", cmdObj["maxDistance"].number());
        }

        if (!cmdObj["minDistance"].eoo()) {
            uassert(17298, "minDistance doesn't work on 2d index", !using2DIndex);
            uassert(17300, "minDistance must be a number", cmdObj["minDistance"].isNumber());
            nearBob.append("$minDistance", cmdObj["minDistance"].number());
        }

        if (!cmdObj["uniqueDocs"].eoo()) {
            warning() << nss << ": ignoring deprecated uniqueDocs option in geoNear command";
        }

        // And, build the full query expression.
        BSONObjBuilder queryBob;
        queryBob.append(nearFieldName, nearBob.obj());
        if (!cmdObj["query"].eoo() && cmdObj["query"].isABSONObj()) {
            queryBob.appendElements(cmdObj["query"].Obj());
        }
        BSONObj rewritten = queryBob.obj();

        // Extract the collation, if it exists.
        // TODO SERVER-23473: Pass this collation spec object down so that it can be converted into
        // a CollatorInterface.
        BSONObj collation;
        {
            BSONElement collationElt;
            Status collationEltStatus =
                bsonExtractTypedField(cmdObj, "collation", BSONType::Object, &collationElt);
            if (!collationEltStatus.isOK() && (collationEltStatus != ErrorCodes::NoSuchKey)) {
                return appendCommandStatus(result, collationEltStatus);
            }
            if (collationEltStatus.isOK()) {
                collation = collationElt.Obj();
            }
        }

        long long numWanted = 100;
        const char* limitName = !cmdObj["num"].eoo() ? "num" : "limit";
        BSONElement eNumWanted = cmdObj[limitName];
        if (!eNumWanted.eoo()) {
            uassert(17303, "limit must be number", eNumWanted.isNumber());
            numWanted = eNumWanted.safeNumberLong();
            uassert(17302, "limit must be >=0", numWanted >= 0);
        }

        bool includeLocs = false;
        if (!cmdObj["includeLocs"].eoo()) {
            includeLocs = cmdObj["includeLocs"].trueValue();
        }

        double distanceMultiplier = 1.0;
        BSONElement eDistanceMultiplier = cmdObj["distanceMultiplier"];
        if (!eDistanceMultiplier.eoo()) {
            uassert(17296, "distanceMultiplier must be a number", eDistanceMultiplier.isNumber());
            distanceMultiplier = eDistanceMultiplier.number();
            uassert(17297, "distanceMultiplier must be non-negative", distanceMultiplier >= 0);
        }

        BSONObj projObj = BSON("$pt" << BSON("$meta" << LiteParsedQuery::metaGeoNearPoint) << "$dis"
                                     << BSON("$meta" << LiteParsedQuery::metaGeoNearDistance));

        const ExtensionsCallbackReal extensionsCallback(txn, &nss);
        auto statusWithCQ = CanonicalQuery::canonicalize(
            nss, rewritten, BSONObj(), projObj, 0, numWanted, BSONObj(), extensionsCallback);
        if (!statusWithCQ.isOK()) {
            errmsg = "Can't parse filter / create query";
            return false;
        }
        unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());

        // Prevent chunks from being cleaned up during yields - this allows us to only check the
        // version on initial entry into geoNear.
        RangePreserver preserver(collection);

        auto statusWithPlanExecutor =
            getExecutor(txn, collection, std::move(cq), PlanExecutor::YIELD_AUTO, 0);
        if (!statusWithPlanExecutor.isOK()) {
            errmsg = "can't get query executor";
            return false;
        }

        unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());

        double totalDistance = 0;
        BSONObjBuilder resultBuilder(result.subarrayStart("results"));
        double farthestDist = 0;

        BSONObj currObj;
        long long results = 0;
        PlanExecutor::ExecState state;
        while (PlanExecutor::ADVANCED == (state = exec->getNext(&currObj, NULL))) {
            // Come up with the correct distance.
            double dist = currObj["$dis"].number() * distanceMultiplier;
            totalDistance += dist;
            if (dist > farthestDist) {
                farthestDist = dist;
            }

            // Strip out '$dis' and '$pt' from the result obj.  The rest gets added as 'obj'
            // in the command result.
            BSONObjIterator resIt(currObj);
            BSONObjBuilder resBob;
            while (resIt.more()) {
                BSONElement elt = resIt.next();
                if (!mongoutils::str::equals("$pt", elt.fieldName()) &&
                    !mongoutils::str::equals("$dis", elt.fieldName())) {
                    resBob.append(elt);
                }
            }
            BSONObj resObj = resBob.obj();

            // Don't make a too-big result object.
            if (resultBuilder.len() + resObj.objsize() > BSONObjMaxUserSize) {
                warning() << "Too many geoNear results for query " << rewritten.toString()
                          << ", truncating output.";
                break;
            }

            // Add the next result to the result builder.
            BSONObjBuilder oneResultBuilder(
                resultBuilder.subobjStart(BSONObjBuilder::numStr(results)));
            oneResultBuilder.append("dis", dist);
            if (includeLocs) {
                oneResultBuilder.appendAs(currObj["$pt"], "loc");
            }
            oneResultBuilder.append("obj", resObj);
            oneResultBuilder.done();

            ++results;

            // Break if we have the number of requested result documents.
            if (results >= numWanted) {
                break;
            }
        }

        resultBuilder.done();

        // Return an error if execution fails for any reason.
        if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
            log() << "Plan executor error during geoNear command: " << PlanExecutor::statestr(state)
                  << ", stats: " << Explain::getWinningPlanStats(exec.get());

            return appendCommandStatus(result,
                                       Status(ErrorCodes::OperationFailed,
                                              str::stream()
                                                  << "Executor error during geoNear command: "
                                                  << WorkingSetCommon::toStatusString(currObj)));
        }

        PlanSummaryStats summary;
        Explain::getSummaryStats(*exec, &summary);

        // Fill out the stats subobj.
        BSONObjBuilder stats(result.subobjStart("stats"));

        stats.appendNumber("nscanned", summary.totalKeysExamined);
        stats.appendNumber("objectsLoaded", summary.totalDocsExamined);

        if (results > 0) {
            stats.append("avgDistance", totalDistance / results);
        }
        stats.append("maxDistance", farthestDist);
        stats.append("time", CurOp::get(txn)->elapsedMillis());
        stats.done();

        collection->infoCache()->notifyOfQuery(txn, summary.indexesUsed);

        CurOp::get(txn)->debug().setPlanSummaryMetrics(summary);

        return true;
    }
Exemplo n.º 4
0
        bool handleSpecialNamespaces( Request& r , QueryMessage& q ) {
            const char * ns = r.getns();
            ns = strstr( r.getns() , ".$cmd.sys." );
            if ( ! ns )
                return false;
            ns += 10;

            r.checkAuth( Auth::WRITE );

            BSONObjBuilder b;
            vector<Shard> shards;

            if ( strcmp( ns , "inprog" ) == 0 ) {
                Shard::getAllShards( shards );

                BSONArrayBuilder arr( b.subarrayStart( "inprog" ) );

                for ( unsigned i=0; i<shards.size(); i++ ) {
                    Shard shard = shards[i];
                    ScopedDbConnection conn( shard );
                    BSONObj temp = conn->findOne( r.getns() , BSONObj() );
                    if ( temp["inprog"].isABSONObj() ) {
                        BSONObjIterator i( temp["inprog"].Obj() );
                        while ( i.more() ) {
                            BSONObjBuilder x;

                            BSONObjIterator j( i.next().Obj() );
                            while( j.more() ) {
                                BSONElement e = j.next();
                                if ( str::equals( e.fieldName() , "opid" ) ) {
                                    stringstream ss;
                                    ss << shard.getName() << ':' << e.numberInt();
                                    x.append( "opid" , ss.str() );
                                }
                                else if ( str::equals( e.fieldName() , "client" ) ) {
                                    x.appendAs( e , "client_s" );
                                }
                                else {
                                    x.append( e );
                                }
                            }
                            arr.append( x.obj() );
                        }
                    }
                    conn.done();
                }

                arr.done();
            }
            else if ( strcmp( ns , "killop" ) == 0 ) {
                BSONElement e = q.query["op"];
                if ( strstr( r.getns() , "admin." ) == 0 ) {
                    b.append( "err" , "unauthorized" );
                }
                else if ( e.type() != String ) {
                    b.append( "err" , "bad op" );
                    b.append( e );
                }
                else {
                    b.append( e );
                    string s = e.String();
                    string::size_type i = s.find( ':' );
                    if ( i == string::npos ) {
                        b.append( "err" , "bad opid" );
                    }
                    else {
                        string shard = s.substr( 0 , i );
                        int opid = atoi( s.substr( i + 1 ).c_str() );
                        b.append( "shard" , shard );
                        b.append( "shardid" , opid );

                        log() << "want to kill op: " << e << endl;
                        Shard s(shard);

                        ScopedDbConnection conn( s );
                        conn->findOne( r.getns() , BSON( "op" << opid ) );
                        conn.done();
                    }
                }
            }
            else if ( strcmp( ns , "unlock" ) == 0 ) {
                b.append( "err" , "can't do unlock through mongos" );
            }
            else {
                log( LL_WARNING ) << "unknown sys command [" << ns << "]" << endl;
                return false;
            }

            BSONObj x = b.done();
            replyToQuery(0, r.p(), r.m(), x);
            return true;
        }
Exemplo n.º 5
0
    bool run(OperationContext* txn,
             const std::string& db,
             BSONObj& cmdObj,
             int options,
             std::string& errmsg,
             BSONObjBuilder& result) final {
        const bool includeAll = cmdObj["$all"].trueValue();
        const bool ownOpsOnly = cmdObj["$ownOps"].trueValue();

        // Filter the output
        BSONObj filter;
        {
            BSONObjBuilder b;
            BSONObjIterator i(cmdObj);
            invariant(i.more());
            i.next();  // skip {currentOp: 1} which is required to be the first element
            while (i.more()) {
                BSONElement e = i.next();
                if (str::equals("$all", e.fieldName())) {
                    continue;
                } else if (str::equals("$ownOps", e.fieldName())) {
                    continue;
                }

                b.append(e);
            }
            filter = b.obj();
        }

        // We use ExtensionsCallbackReal here instead of ExtensionsCallbackNoop in order to support
        // the use case of having a $where filter with currentOp. However, since we don't have a
        // collection, we pass in a fake collection name (and this is okay, because $where parsing
        // only relies on the database part of the namespace).
        const NamespaceString fakeNS(db, "$cmd");
        const CollatorInterface* collator = nullptr;
        const Matcher matcher(filter, ExtensionsCallbackReal(txn, &fakeNS), collator);

        BSONArrayBuilder inprogBuilder(result.subarrayStart("inprog"));

        for (ServiceContext::LockedClientsCursor cursor(txn->getClient()->getServiceContext());
                Client* client = cursor.next();) {
            invariant(client);

            stdx::lock_guard<Client> lk(*client);

            if (ownOpsOnly &&
                    !AuthorizationSession::get(txn->getClient())->isCoauthorizedWithClient(client)) {
                continue;
            }

            const OperationContext* opCtx = client->getOperationContext();

            if (!includeAll) {
                // Skip over inactive connections.
                if (!opCtx)
                    continue;
            }

            BSONObjBuilder infoBuilder;

            // The client information
            client->reportState(infoBuilder);

            const auto& clientMetadata =
                ClientMetadataIsMasterState::get(txn->getClient()).getClientMetadata();
            if (clientMetadata) {
                auto appName = clientMetadata.get().getApplicationName();
                if (!appName.empty()) {
                    infoBuilder.append("appName", appName);
                }
            }

            // Operation context specific information
            infoBuilder.appendBool("active", static_cast<bool>(opCtx));
            if (opCtx) {
                infoBuilder.append("opid", opCtx->getOpID());
                if (opCtx->isKillPending()) {
                    infoBuilder.append("killPending", true);
                }

                CurOp::get(opCtx)->reportState(&infoBuilder);

                // LockState
                Locker::LockerInfo lockerInfo;
                opCtx->lockState()->getLockerInfo(&lockerInfo);
                fillLockerInfo(lockerInfo, infoBuilder);
            }

            infoBuilder.done();

            const BSONObj info = infoBuilder.obj();

            if (includeAll || matcher.matches(info)) {
                inprogBuilder.append(info);
            }
        }

        inprogBuilder.done();

        if (lockedForWriting()) {
            result.append("fsyncLock", true);
            result.append("info",
                          "use db.fsyncUnlock() to terminate the fsync write/snapshot lock");
        }

        return true;
    }
Exemplo n.º 6
0
        bool handleSpecialNamespaces( Request& r , QueryMessage& q ) {
            const char * ns = strstr( r.getns() , ".$cmd.sys." );
            if ( ! ns )
                return false;
            ns += 10;

            BSONObjBuilder b;
            vector<Shard> shards;

            ClientBasic* client = ClientBasic::getCurrent();
            AuthorizationSession* authSession = client->getAuthorizationSession();
            if ( strcmp( ns , "inprog" ) == 0 ) {
                const bool isAuthorized = authSession->isAuthorizedForActionsOnResource(
                        ResourcePattern::forClusterResource(), ActionType::inprog);
                audit::logInProgAuthzCheck(
                        client, q.query, isAuthorized ? ErrorCodes::OK : ErrorCodes::Unauthorized);
                uassert(ErrorCodes::Unauthorized, "not authorized to run inprog", isAuthorized);

                Shard::getAllShards( shards );

                BSONArrayBuilder arr( b.subarrayStart( "inprog" ) );

                for ( unsigned i=0; i<shards.size(); i++ ) {
                    Shard shard = shards[i];
                    ScopedDbConnection conn(shard.getConnString());
                    BSONObj temp = conn->findOne( r.getns() , q.query );
                    if ( temp["inprog"].isABSONObj() ) {
                        BSONObjIterator i( temp["inprog"].Obj() );
                        while ( i.more() ) {
                            BSONObjBuilder x;

                            BSONObjIterator j( i.next().Obj() );
                            while( j.more() ) {
                                BSONElement e = j.next();
                                if ( str::equals( e.fieldName() , "opid" ) ) {
                                    stringstream ss;
                                    ss << shard.getName() << ':' << e.numberInt();
                                    x.append( "opid" , ss.str() );
                                }
                                else if ( str::equals( e.fieldName() , "client" ) ) {
                                    x.appendAs( e , "client_s" );
                                }
                                else {
                                    x.append( e );
                                }
                            }
                            arr.append( x.obj() );
                        }
                    }
                    conn.done();
                }

                arr.done();
            }
            else if ( strcmp( ns , "killop" ) == 0 ) {
                const bool isAuthorized = authSession->isAuthorizedForActionsOnResource(
                        ResourcePattern::forClusterResource(), ActionType::killop);
                audit::logKillOpAuthzCheck(
                        client,
                        q.query,
                        isAuthorized ? ErrorCodes::OK : ErrorCodes::Unauthorized);
                uassert(ErrorCodes::Unauthorized, "not authorized to run killop", isAuthorized);

                BSONElement e = q.query["op"];
                if ( e.type() != String ) {
                    b.append( "err" , "bad op" );
                    b.append( e );
                }
                else {
                    b.append( e );
                    string s = e.String();
                    string::size_type i = s.find( ':' );
                    if ( i == string::npos ) {
                        b.append( "err" , "bad opid" );
                    }
                    else {
                        string shard = s.substr( 0 , i );
                        int opid = atoi( s.substr( i + 1 ).c_str() );
                        b.append( "shard" , shard );
                        b.append( "shardid" , opid );

                        log() << "want to kill op: " << e << endl;
                        Shard s(shard);

                        ScopedDbConnection conn(s.getConnString());
                        conn->findOne( r.getns() , BSON( "op" << opid ) );
                        conn.done();
                    }
                }
            }
            else if ( strcmp( ns , "unlock" ) == 0 ) {
                b.append( "err" , "can't do unlock through mongos" );
            }
            else {
                warning() << "unknown sys command [" << ns << "]" << endl;
                return false;
            }

            BSONObj x = b.done();
            replyToQuery(0, r.p(), r.m(), x);
            return true;
        }
        /*
         * Runs the command object cmdobj on the db with name dbname and puts result in result.
         * @param dbname, name of db
         * @param cmdobj, object that contains entire command
         * @param options
         * @param errmsg, reference to error message
         * @param result, reference to builder for result
         * @param fromRepl
         * @return true if successful, false otherwise
         */
        bool FTSCommand::_run(const string& dbname,
                              BSONObj& cmdObj,
                              int cmdOptions,
                              const string& ns,
                              const string& searchString,
                              string language, // "" for not-set
                              int limit,
                              BSONObj& filter,
                              BSONObj& projection,
                              string& errmsg,
                              BSONObjBuilder& result ) {

            Timer comm;

            scoped_ptr<Projection> pr;
            if ( !projection.isEmpty() ) {
                pr.reset( new Projection() );
                pr->init( projection );
            }

            // priority queue for results
            Results results;

            Database* db = cc().database();
            Collection* collection = db->getCollection( ns );

            if ( !collection ) {
                errmsg = "can't find ns";
                return false;
            }

            vector<int> idxMatches;
            collection->details()->findIndexByType( INDEX_NAME, idxMatches );
            if ( idxMatches.size() == 0 ) {
                errmsg = str::stream() << "no text index for: " << ns;
                return false;
            }
            if ( idxMatches.size() > 1 ) {
                errmsg = str::stream() << "too many text indexes for: " << ns;
                return false;
            }

            BSONObj indexPrefix;

            IndexDescriptor* descriptor = collection->getIndexCatalog()->getDescriptor(idxMatches[0]);
            auto_ptr<FTSAccessMethod> fam(new FTSAccessMethod(descriptor));
            if ( language == "" ) {
                language = fam->getSpec().defaultLanguage().str();
            }
            Status s = fam->getSpec().getIndexPrefix( filter, &indexPrefix );
            if ( !s.isOK() ) {
                errmsg = s.toString();
                return false;
            }


            FTSQuery query;
            if ( !query.parse( searchString, language ).isOK() ) {
                errmsg = "can't parse search";
                return false;
            }
            result.append( "queryDebugString", query.debugString() );
            result.append( "language", language );

            FTSSearch search(descriptor, fam->getSpec(), indexPrefix, query, filter );
            search.go( &results, limit );

            // grab underlying container inside priority queue
            vector<ScoredLocation> r( results.dangerous() );

            // sort results by score (not always in correct order, especially w.r.t. multiterm)
            sort( r.begin(), r.end() );

            // build the results bson array shown to user
            BSONArrayBuilder a( result.subarrayStart( "results" ) );

            int tempSize = 1024 * 1024; // leave a mb for other things
            long long numReturned = 0;

            for ( unsigned n = 0; n < r.size(); n++ ) {
                BSONObj obj = BSONObj::make(r[n].rec);
                BSONObj toSendBack = obj;

                if ( pr ) {
                    toSendBack = pr->transform(obj);
                }

                if ( ( tempSize + toSendBack.objsize() ) >= BSONObjMaxUserSize ) {
                    break;
                }

                BSONObjBuilder x( a.subobjStart() );
                x.append( "score" , r[n].score );
                x.append( "obj", toSendBack );

                BSONObj xobj = x.done();
                tempSize += xobj.objsize();

                numReturned++;
            }

            a.done();

            // returns some stats to the user
            BSONObjBuilder bb( result.subobjStart( "stats" ) );
            bb.appendNumber( "nscanned" , search.getKeysLookedAt() );
            bb.appendNumber( "nscannedObjects" , search.getObjLookedAt() );
            bb.appendNumber( "n" , numReturned );
            bb.appendNumber( "nfound" , r.size() );
            bb.append( "timeMicros", (int)comm.micros() );
            bb.done();

            return true;
        }
Exemplo n.º 8
0
        bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
            string ns = dbname + "." + cmdObj.firstElement().valuestr();

            if (!cmdObj["start"].eoo()) {
                errmsg = "using deprecated 'start' argument to geoNear";
                return false;
            }

            Database* db = cc().database();
            if ( !db ) {
                errmsg = "can't find ns";
                return false;
            }

            Collection* collection = db->getCollection( ns );
            if ( !collection ) {
                errmsg = "can't find ns";
                return false;
            }

            IndexCatalog* indexCatalog = collection->getIndexCatalog();

            // cout << "raw cmd " << cmdObj.toString() << endl;

            // We seek to populate this.
            string nearFieldName;
            bool using2DIndex = false;
            if (!getFieldName(collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) {
                return false;
            }

            uassert(17304, "'near' field must be point",
                    !cmdObj["near"].eoo() && cmdObj["near"].isABSONObj()
                    && GeoParser::isPoint(cmdObj["near"].Obj()));

            bool isSpherical = cmdObj["spherical"].trueValue();
            if (!using2DIndex) {
                uassert(17301, "2dsphere index must have spherical: true", isSpherical);
            }

            // Build the $near expression for the query.
            BSONObjBuilder nearBob;
            if (isSpherical) {
                nearBob.append("$nearSphere", cmdObj["near"].Obj());
            }
            else {
                nearBob.append("$near", cmdObj["near"].Obj());
            }

            if (!cmdObj["maxDistance"].eoo()) {
                uassert(17299, "maxDistance must be a number",cmdObj["maxDistance"].isNumber());
                nearBob.append("$maxDistance", cmdObj["maxDistance"].number());
            }

            if (!cmdObj["minDistance"].eoo()) {
                uassert(17298, "minDistance doesn't work on 2d index", !using2DIndex);
                uassert(17300, "minDistance must be a number",cmdObj["minDistance"].isNumber());
                nearBob.append("$minDistance", cmdObj["minDistance"].number());
            }

            if (!cmdObj["uniqueDocs"].eoo()) {
                nearBob.append("$uniqueDocs", cmdObj["uniqueDocs"].trueValue());
            }

            // And, build the full query expression.
            BSONObjBuilder queryBob;
            queryBob.append(nearFieldName, nearBob.obj());
            if (!cmdObj["query"].eoo() && cmdObj["query"].isABSONObj()) {
                queryBob.appendElements(cmdObj["query"].Obj());
            }
            BSONObj rewritten = queryBob.obj();

            // cout << "rewritten query: " << rewritten.toString() << endl;

            int numWanted = 100;
            const char* limitName = !cmdObj["num"].eoo() ? "num" : "limit";
            BSONElement eNumWanted = cmdObj[limitName];
            if (!eNumWanted.eoo()) {
                uassert(17303, "limit must be number", eNumWanted.isNumber());
                numWanted = eNumWanted.numberInt();
                uassert(17302, "limit must be >=0", numWanted >= 0);
            }

            bool includeLocs = false;
            if (!cmdObj["includeLocs"].eoo()) {
                includeLocs = cmdObj["includeLocs"].trueValue();
            }

            double distanceMultiplier = 1.0;
            BSONElement eDistanceMultiplier = cmdObj["distanceMultiplier"];
            if (!eDistanceMultiplier.eoo()) {
                uassert(17296, "distanceMultiplier must be a number", eDistanceMultiplier.isNumber());
                distanceMultiplier = eDistanceMultiplier.number();
                uassert(17297, "distanceMultiplier must be non-negative", distanceMultiplier >= 0);
            }

            BSONObj projObj = BSON("$pt" << BSON("$meta" << LiteParsedQuery::metaGeoNearPoint) <<
                                   "$dis" << BSON("$meta" << LiteParsedQuery::metaGeoNearDistance));

            CanonicalQuery* cq;
            if (!CanonicalQuery::canonicalize(ns, rewritten, BSONObj(), projObj, 0, numWanted, BSONObj(), &cq).isOK()) {
                errmsg = "Can't parse filter / create query";
                return false;
            }

            Runner* rawRunner;
            if (!getRunner(cq, &rawRunner, 0).isOK()) {
                errmsg = "can't get query runner";
                return false;
            }

            auto_ptr<Runner> runner(rawRunner);

            double totalDistance = 0;
            BSONObjBuilder resultBuilder(result.subarrayStart("results"));
            double farthestDist = 0;

            BSONObj currObj;
            int results = 0;
            while ((results < numWanted) && Runner::RUNNER_ADVANCED == runner->getNext(&currObj, NULL)) {
                // cout << "result is " << currObj.toString() << endl;

                double dist = currObj["$dis"].number() * distanceMultiplier;
                // cout << std::setprecision(10) << "HK GEON mul'd dist is " << dist << " raw dist is " << currObj["$dis"].number() << endl;
                totalDistance += dist;
                if (dist > farthestDist) { farthestDist = dist; }

                BSONObjBuilder oneResultBuilder(
                    resultBuilder.subobjStart(BSONObjBuilder::numStr(results)));
                oneResultBuilder.append("dis", dist);
                if (includeLocs) {
                    oneResultBuilder.appendAs(currObj["$pt"], "loc");
                }

                // strip out '$dis' and '$pt' and the rest gets added as 'obj'.
                BSONObjIterator resIt(currObj);
                BSONObjBuilder resBob;
                while (resIt.more()) {
                    BSONElement elt = resIt.next();
                    if (!mongoutils::str::equals("$pt", elt.fieldName())
                        && !mongoutils::str::equals("$dis", elt.fieldName())) {
                        resBob.append(elt);
                    }
                }
                oneResultBuilder.append("obj", resBob.obj());
                oneResultBuilder.done();
                ++results;
            }

            resultBuilder.done();

            // Fill out the stats subobj.
            BSONObjBuilder stats(result.subobjStart("stats"));

            // Fill in nscanned from the explain.
            TypeExplain* bareExplain;
            Status res = runner->getExplainPlan(&bareExplain);
            if (res.isOK()) {
                auto_ptr<TypeExplain> explain(bareExplain);
                stats.append("nscanned", explain->getNScanned());
                stats.append("objectsLoaded", explain->getNScannedObjects());
            }

            stats.append("avgDistance", totalDistance / results);
            stats.append("maxDistance", farthestDist);
            stats.append("time", cc().curop()->elapsedMillis());
            stats.done();

            return true;
        }
Exemplo n.º 9
0
void ProtobufBsonFormatter::formatSingleField(const google::protobuf::Message& message,
    const google::protobuf::FieldDescriptor* field,
    BSONObjBuilder& builder) {
    std::string fieldName("");

    if (field->is_extension()) {
        //TODO
    }
    else if (field->type() == google::protobuf::FieldDescriptor::TYPE_GROUP) {
        // Groups must be serialized with their original capitalization.
        fieldName = field->message_type()->name().c_str();
        //...append values
    }
    else {
        fieldName = field->camelcase_name();
        const google::protobuf::Reflection* reflection = message.GetReflection();

        if (field->is_repeated()) {
            int fieldsize = reflection->FieldSize(message, field);

            switch (field->cpp_type()) {
            case FieldDescriptor::CPPTYPE_INT32:    {       //= 1,     // TYPE_INT32, TYPE_SINT32, TYPE_SFIXED32
                std::vector<int32> values;
                values.reserve(fieldsize);

                for (int i = 0; i < fieldsize; ++i) {
                    values.push_back(reflection->GetRepeatedInt32(message,field,i));
                }

                builder.append(fieldName,values);

                break;
            }

            case FieldDescriptor::CPPTYPE_INT64:    {       //= 2,     // TYPE_INT64, TYPE_SINT64, TYPE_SFIXED64
                std::vector<long long> values;
                values.reserve(fieldsize);

                for (int i = 0; i < fieldsize; ++i) {
                    values.push_back(reflection->GetRepeatedInt64(message,field,i));
                }

                builder.append(fieldName, values);

                break;
            }

            case FieldDescriptor::CPPTYPE_UINT32:   {       //= 3,     // TYPE_UINT32, TYPE_FIXED32
                std::vector<uint32> values;
                values.reserve(fieldsize);

                for (int i = 0; i < fieldsize; ++i) {
                    values.push_back(reflection->GetRepeatedUInt32(message,field,i));
                }

                builder.append(fieldName,values);

                break;
            }

            case FieldDescriptor::CPPTYPE_UINT64:   {       //= 4,     // TYPE_UINT64, TYPE_FIXED64
                std::vector<long long> values;
                values.reserve(fieldsize);

                for (int i = 0; i < fieldsize; ++i) {
                    values.push_back((long long)reflection->GetRepeatedUInt64(message,field,i));
                }

                builder.append(fieldName,values);

                break;
            }

            case FieldDescriptor::CPPTYPE_DOUBLE:   {       //= 5,     // TYPE_DOUBLE
                std::vector<double> values;
                values.reserve(fieldsize);

                for (int i = 0; i < fieldsize; ++i) {
                    values.push_back(reflection->GetRepeatedDouble(message,field,i));
                }

                builder.append(fieldName,values);

                break;
            }

            case FieldDescriptor::CPPTYPE_FLOAT:    {       //= 6,     // TYPE_FLOAT
                std::vector<float> values;
                values.reserve(fieldsize);

                for (int i = 0; i < fieldsize; ++i) {
                    values.push_back(reflection->GetRepeatedFloat(message,field,i));
                }

                builder.append(fieldName,values);

                break;
            }

            case FieldDescriptor::CPPTYPE_BOOL:     {       //= 7,     // TYPE_BOOL
                std::vector<bool> values;
                values.reserve(fieldsize);

                for (int i = 0; i < fieldsize; ++i) {
                    values.push_back(reflection->GetRepeatedBool(message,field,i));
                }

                builder.append(fieldName,values);

                break;
            }

            case FieldDescriptor::CPPTYPE_STRING:   {       //= 9,     // TYPE_STRING, TYPE_BYTES

                std::vector<std::string> values;
                values.reserve(fieldsize);

                for (int i = 0; i < fieldsize; ++i) {
                    values.push_back(reflection->GetRepeatedString(message,field,i));
                }

                builder.append(fieldName,values);

                break;
            }

            case FieldDescriptor::CPPTYPE_ENUM:     {       //= 8,     // TYPE_ENUM
                std::vector<std::string> values;
                values.reserve(fieldsize);

                for (int i = 0; i < fieldsize; ++i) {
                    values.push_back(reflection->GetRepeatedEnum(message,field,i)->name());
                }

                builder.append(fieldName,values);

                break;
            }

            case FieldDescriptor::CPPTYPE_MESSAGE:  {       //= 10,    // TYPE_MESSAGE, TYPE_GROUP
                BSONObjBuilder sub(builder.subarrayStart(fieldName));

                for (int i = 0; i < fieldsize; ++i)  {
                    char number[16] = {0};
                    sprintf(number, "%d", i);
                    BSONObjBuilder obj(sub.subobjStart(number));
                    formatMessage(reflection->GetRepeatedMessage(message, field, i), obj);
                    obj.done();
                }

                sub.done();

                break;
            }

            default:                                {
                break;
            }
            }// end switch
        }
        else { //not repeated
            switch (/*cppType*/field->cpp_type()) {
            case FieldDescriptor::CPPTYPE_INT32:    {       //= 1,     // TYPE_INT32, TYPE_SINT32, TYPE_SFIXED32
                builder.append(fieldName, reflection->GetInt32(message,field));
                break;
            }

            case FieldDescriptor::CPPTYPE_INT64:    {       //= 2,     // TYPE_INT64, TYPE_SINT64, TYPE_SFIXED64
                builder.append(fieldName,
                    static_cast<long long>(reflection->GetInt64(message,field)));
                break;
            }

            case FieldDescriptor::CPPTYPE_UINT32:   {       //= 3,     // TYPE_UINT32, TYPE_FIXED32
                builder.append(fieldName,reflection->GetUInt32(message,field));
                break;
            }

            case FieldDescriptor::CPPTYPE_UINT64:   {       //= 4,     // TYPE_UINT64, TYPE_FIXED64
                builder.append(fieldName,
                    static_cast<long long>(reflection->GetUInt64(message,field)));
                break;
            }

            case FieldDescriptor::CPPTYPE_DOUBLE:   {       //= 5,     // TYPE_DOUBLE
                builder.append(fieldName,reflection->GetDouble(message,field));
                break;
            }

            case FieldDescriptor::CPPTYPE_FLOAT:    {       //= 6,     // TYPE_FLOAT
                builder.append(fieldName,reflection->GetFloat(message,field));
                break;
            }

            case FieldDescriptor::CPPTYPE_BOOL:     {       //= 7,     // TYPE_BOOL
                builder.append(fieldName,reflection->GetBool(message,field));
                break;
            }

            case FieldDescriptor::CPPTYPE_STRING:   {       //= 9,     // TYPE_STRING, TYPE_BYTES
                builder.append(fieldName,reflection->GetString(message,field));
                break;
            }

            case FieldDescriptor::CPPTYPE_ENUM:     {       //= 8,     // TYPE_ENUM
                builder.append(fieldName,reflection->GetEnum(message,field)->name());
                break;
            }

            case FieldDescriptor::CPPTYPE_MESSAGE:  {       //= 10,    // TYPE_MESSAGE, TYPE_GROUP
                BSONObjBuilder sub(builder.subobjStart(fieldName));
                formatMessage(reflection->GetMessage(message, field), sub);
                sub.done();
                break;
            }

            default:                                {
                break;
            }
            }// end switch
        }
    } //end else
}
Exemplo n.º 10
0
        bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
            
            _runCalled = true;

            long long start = Listener::getElapsedTimeMillis();
            BSONObjBuilder timeBuilder(256);

            const ClientBasic* myClientBasic = ClientBasic::getCurrent();
            AuthorizationManager* authManager = myClientBasic->getAuthorizationManager();
            
            // --- basic fields that are global

            result.append("host", prettyHostName() );
            result.append("version", versionString);
            result.append("process",cmdLine.binaryName);
            result.append("pid", (int)getpid());
            result.append("uptime",(double) (time(0)-cmdLine.started));
            result.append("uptimeMillis", (long long)(curTimeMillis64()-_started));
            result.append("uptimeEstimate",(double) (start/1000));
            result.appendDate( "localTime" , jsTime() );

            timeBuilder.appendNumber( "after basic" , Listener::getElapsedTimeMillis() - start );
            
            // --- all sections
            
            for ( SectionMap::const_iterator i = _sections->begin(); i != _sections->end(); ++i ) {
                ServerStatusSection* section = i->second;
                
                std::vector<Privilege> requiredPrivileges;
                section->addRequiredPrivileges(&requiredPrivileges);
                if (!authManager->checkAuthForPrivileges(requiredPrivileges).isOK())
                    continue;

                bool include = section->includeByDefault();
                
                BSONElement e = cmdObj[section->getSectionName()];
                if ( e.type() ) {
                    include = e.trueValue();
                }
                
                if ( ! include )
                    continue;
                
                BSONObj data = section->generateSection(e);
                if ( data.isEmpty() )
                    continue;

                result.append( section->getSectionName(), data );
                timeBuilder.appendNumber( static_cast<string>(str::stream() << "after " << section->getSectionName()), 
                                          Listener::getElapsedTimeMillis() - start );
            }

            // --- counters
            
            if ( MetricTree::theMetricTree ) {
                MetricTree::theMetricTree->appendTo( result );
            }

            // --- some hard coded global things hard to pull out

            {
                RamLog* rl = RamLog::get( "warnings" );
                massert(15880, "no ram log for warnings?" , rl);
                
                if (rl->lastWrite() >= time(0)-(10*60)){ // only show warnings from last 10 minutes
                    vector<const char*> lines;
                    rl->get( lines );
                    
                    BSONArrayBuilder arr( result.subarrayStart( "warnings" ) );
                    for ( unsigned i=std::max(0,(int)lines.size()-10); i<lines.size(); i++ )
                        arr.append( lines[i] );
                    arr.done();
                }
            }
            
            timeBuilder.appendNumber( "at end" , Listener::getElapsedTimeMillis() - start );
            if ( Listener::getElapsedTimeMillis() - start > 1000 ) {
                BSONObj t = timeBuilder.obj();
                log() << "serverStatus was very slow: " << t << endl;
                result.append( "timing" , t );
            }

            return true;
        }
Exemplo n.º 11
0
void appendReplicationInfo(OperationContext* txn, BSONObjBuilder& result, int level) {
    ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
    if (replCoord->getSettings().usingReplSets()) {
        IsMasterResponse isMasterResponse;
        replCoord->fillIsMasterForReplSet(&isMasterResponse);
        result.appendElements(isMasterResponse.toBSON());
        if (level) {
            replCoord->appendSlaveInfoData(&result);
        }
        return;
    }

    // TODO(dannenberg) replAllDead is bad and should be removed when master slave is removed
    if (replAllDead) {
        result.append("ismaster", 0);
        string s = string("dead: ") + replAllDead;
        result.append("info", s);
    } else {
        result.appendBool("ismaster",
                          getGlobalReplicationCoordinator()->isMasterForReportingPurposes());
    }

    if (level) {
        BSONObjBuilder sources(result.subarrayStart("sources"));

        int n = 0;
        list<BSONObj> src;
        {
            const char* localSources = "local.sources";
            AutoGetCollectionForRead ctx(txn, localSources);
            unique_ptr<PlanExecutor> exec(
                InternalPlanner::collectionScan(txn, localSources, ctx.getCollection()));
            BSONObj obj;
            PlanExecutor::ExecState state;
            while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
                src.push_back(obj);
            }
        }

        for (list<BSONObj>::const_iterator i = src.begin(); i != src.end(); i++) {
            BSONObj s = *i;
            BSONObjBuilder bb;
            bb.append(s["host"]);
            string sourcename = s["source"].valuestr();
            if (sourcename != "main")
                bb.append(s["source"]);
            {
                BSONElement e = s["syncedTo"];
                BSONObjBuilder t(bb.subobjStart("syncedTo"));
                t.appendDate("time", e.timestampTime());
                t.append("inc", e.timestampInc());
                t.done();
            }

            if (level > 1) {
                wassert(!txn->lockState()->isLocked());
                // note: there is no so-style timeout on this connection; perhaps we should have
                // one.
                ScopedDbConnection conn(s["host"].valuestr());

                DBClientConnection* cliConn = dynamic_cast<DBClientConnection*>(&conn.conn());
                if (cliConn && replAuthenticate(cliConn)) {
                    BSONObj first = conn->findOne((string) "local.oplog.$" + sourcename,
                                                  Query().sort(BSON("$natural" << 1)));
                    BSONObj last = conn->findOne((string) "local.oplog.$" + sourcename,
                                                 Query().sort(BSON("$natural" << -1)));
                    bb.appendDate("masterFirst", first["ts"].timestampTime());
                    bb.appendDate("masterLast", last["ts"].timestampTime());
                    const auto lag = (last["ts"].timestampTime() - s["syncedTo"].timestampTime());
                    bb.append("lagSeconds", durationCount<Milliseconds>(lag) / 1000.0);
                }
                conn.done();
            }

            sources.append(BSONObjBuilder::numStr(n++), bb.obj());
        }

        sources.done();

        replCoord->appendSlaveInfoData(&result);
    }
}
Exemplo n.º 12
0
BSONObj ReplSetConfig::toBSON() const {
    BSONObjBuilder configBuilder;
    configBuilder.append(kIdFieldName, _replSetName);
    configBuilder.appendIntOrLL(kVersionFieldName, _version);
    if (_configServer) {
        // Only include "configsvr" field if true
        configBuilder.append(kConfigServerFieldName, _configServer);
    }

    // Only include writeConcernMajorityJournalDefault if it is not the default version for this
    // ProtocolVersion to prevent breaking cross version-3.2.1 compatibilty of ReplSetConfigs.
    if (_protocolVersion > 0) {
        configBuilder.append(kProtocolVersionFieldName, _protocolVersion);
        // Only include writeConcernMajorityJournalDefault if it is not the default version for this
        // ProtocolVersion to prevent breaking cross version-3.2.1 compatibilty of
        // ReplSetConfigs.
        if (!_writeConcernMajorityJournalDefault) {
            configBuilder.append(kWriteConcernMajorityJournalDefaultFieldName,
                                 _writeConcernMajorityJournalDefault);
        }
    } else if (_writeConcernMajorityJournalDefault) {
        configBuilder.append(kWriteConcernMajorityJournalDefaultFieldName,
                             _writeConcernMajorityJournalDefault);
    }

    BSONArrayBuilder members(configBuilder.subarrayStart(kMembersFieldName));
    for (MemberIterator mem = membersBegin(); mem != membersEnd(); mem++) {
        members.append(mem->toBSON(getTagConfig()));
    }
    members.done();

    BSONObjBuilder settingsBuilder(configBuilder.subobjStart(kSettingsFieldName));
    settingsBuilder.append(kChainingAllowedFieldName, _chainingAllowed);
    settingsBuilder.appendIntOrLL(kHeartbeatIntervalFieldName,
                                  durationCount<Milliseconds>(_heartbeatInterval));
    settingsBuilder.appendIntOrLL(kHeartbeatTimeoutFieldName,
                                  durationCount<Seconds>(_heartbeatTimeoutPeriod));
    settingsBuilder.appendIntOrLL(kElectionTimeoutFieldName,
                                  durationCount<Milliseconds>(_electionTimeoutPeriod));
    settingsBuilder.appendIntOrLL(kCatchUpTimeoutFieldName,
                                  durationCount<Milliseconds>(_catchUpTimeoutPeriod));
    settingsBuilder.appendIntOrLL(kCatchUpTakeoverDelayFieldName,
                                  durationCount<Milliseconds>(_catchUpTakeoverDelay));


    BSONObjBuilder gleModes(settingsBuilder.subobjStart(kGetLastErrorModesFieldName));
    for (StringMap<ReplSetTagPattern>::const_iterator mode = _customWriteConcernModes.begin();
         mode != _customWriteConcernModes.end();
         ++mode) {
        if (mode->first[0] == '$') {
            // Filter out internal modes
            continue;
        }
        BSONObjBuilder modeBuilder(gleModes.subobjStart(mode->first));
        for (ReplSetTagPattern::ConstraintIterator itr = mode->second.constraintsBegin();
             itr != mode->second.constraintsEnd();
             itr++) {
            modeBuilder.append(_tagConfig.getTagKey(ReplSetTag(itr->getKeyIndex(), 0)),
                               itr->getMinCount());
        }
        modeBuilder.done();
    }
    gleModes.done();

    settingsBuilder.append(kGetLastErrorDefaultsFieldName, _defaultWriteConcern.toBSON());

    if (_replicaSetId.isSet()) {
        settingsBuilder.append(kReplicaSetIdFieldName, _replicaSetId);
    }

    settingsBuilder.done();
    return configBuilder.obj();
}
Exemplo n.º 13
0
    bool run2DSphereGeoNear(const IndexDetails &id, BSONObj& cmdObj, string& errmsg,
                            BSONObjBuilder& result) {
        S2IndexType *idxType = static_cast<S2IndexType*>(id.getSpec().getType());
        verify(&id == idxType->getDetails());

        // We support both "num" and "limit" options to control limit
        int numWanted = 100;
        const char* limitName = cmdObj["num"].isNumber() ? "num" : "limit";
        if (cmdObj[limitName].isNumber()) {
            numWanted = cmdObj[limitName].numberInt();
            verify(numWanted >= 0);
        }

        // Don't count any docs twice.  Isn't this default behavior?  Or will yields screw this up?
        //bool uniqueDocs = false;
        //if (!cmdObj["uniqueDocs"].eoo()) uniqueDocs = cmdObj["uniqueDocs"].trueValue();

        // Add the location information to each result as a field with name 'loc'.
        bool includeLocs = false;
        if (!cmdObj["includeLocs"].eoo()) includeLocs = cmdObj["includeLocs"].trueValue();

        // The actual query point
        uassert(16551, "'near' param missing/invalid", !cmdObj["near"].eoo());
        BSONObj nearObj = cmdObj["near"].embeddedObject();

        // nearObj must be a point.
        uassert(16571, "near must be called with a point, called with " + nearObj.toString(),
                GeoParser::isPoint(nearObj));

        // The non-near query part.
        BSONObj query;
        if (cmdObj["query"].isABSONObj())
            query = cmdObj["query"].embeddedObject();

        // The farthest away we're willing to look.
        double maxDistance = numeric_limits<double>::max();
        if (cmdObj["maxDistance"].isNumber())
            maxDistance = cmdObj["maxDistance"].number();

        vector<string> geoFieldNames;
        idxType->getGeoFieldNames(&geoFieldNames);
        uassert(16552, "geoNear called but no indexed geo fields?", 1 == geoFieldNames.size());
        QueryGeometry queryGeo(geoFieldNames[0]);
        uassert(16553, "geoNear couldn't parse geo: " + nearObj.toString(), queryGeo.parseFrom(nearObj));
        vector<QueryGeometry> regions;
        regions.push_back(queryGeo);

        scoped_ptr<S2NearCursor> cursor(new S2NearCursor(idxType->keyPattern(), idxType->getDetails(),
                                                         query, regions, idxType->getParams(),
                                                         numWanted, maxDistance));

        double totalDistance = 0;
        int results = 0;
        BSONObjBuilder resultBuilder(result.subarrayStart("results"));
        double farthestDist = 0;

        while (cursor->ok()) {
            double dist = cursor->currentDistance();
            totalDistance += dist;
            if (dist > farthestDist) { farthestDist = dist; }

            BSONObjBuilder oneResultBuilder(resultBuilder.subobjStart(BSONObjBuilder::numStr(results)));
            oneResultBuilder.append("dis", dist);
            if (includeLocs) {
                BSONElementSet geoFieldElements;
                cursor->current().getFieldsDotted(geoFieldNames[0], geoFieldElements, false);
                for (BSONElementSet::iterator oi = geoFieldElements.begin();
                        oi != geoFieldElements.end(); ++oi) {
                    if (oi->isABSONObj()) {
                        oneResultBuilder.appendAs(*oi, "loc");
                    }
                }
            }

            oneResultBuilder.append("obj", cursor->current());
            oneResultBuilder.done();
            ++results;
            cursor->advance();
        }

        resultBuilder.done();

        BSONObjBuilder stats(result.subobjStart("stats"));
        stats.append("time", cc().curop()->elapsedMillis());
        stats.appendNumber("nscanned", cursor->nscanned());
        stats.append("avgDistance", totalDistance / results);
        stats.append("maxDistance", farthestDist);
        stats.done();

        return true;
    }
Status CatalogManagerReplicaSet::shardCollection(OperationContext* txn,
                                                 const string& ns,
                                                 const ShardKeyPattern& fieldsAndOrder,
                                                 bool unique,
                                                 const vector<BSONObj>& initPoints,
                                                 const set<ShardId>& initShardIds) {
    // Lock the collection globally so that no other mongos can try to shard or drop the collection
    // at the same time.
    auto scopedDistLock = getDistLockManager()->lock(ns, "shardCollection");
    if (!scopedDistLock.isOK()) {
        return scopedDistLock.getStatus();
    }

    auto status = getDatabase(txn, nsToDatabase(ns));
    if (!status.isOK()) {
        return status.getStatus();
    }

    ShardId dbPrimaryShardId = status.getValue().value.getPrimary();
    const auto primaryShard = grid.shardRegistry()->getShard(txn, dbPrimaryShardId);

    {
        // In 3.0 and prior we include this extra safety check that the collection is not getting
        // sharded concurrently by two different mongos instances. It is not 100%-proof, but it
        // reduces the chance that two invocations of shard collection will step on each other's
        // toes.  Now we take the distributed lock so going forward this check won't be necessary
        // but we leave it around for compatibility with other mongoses from 3.0.
        // TODO(spencer): Remove this after 3.2 ships.
        auto countStatus = _runCountCommandOnConfig(
            txn, NamespaceString(ChunkType::ConfigNS), BSON(ChunkType::ns(ns)));
        if (!countStatus.isOK()) {
            return countStatus.getStatus();
        }
        if (countStatus.getValue() > 0) {
            return Status(ErrorCodes::AlreadyInitialized,
                          str::stream() << "collection " << ns << " already sharded with "
                                        << countStatus.getValue() << " chunks.");
        }
    }

    // Record start in changelog
    {
        BSONObjBuilder collectionDetail;
        collectionDetail.append("shardKey", fieldsAndOrder.toBSON());
        collectionDetail.append("collection", ns);
        collectionDetail.append("primary", primaryShard->toString());

        {
            BSONArrayBuilder initialShards(collectionDetail.subarrayStart("initShards"));
            for (const ShardId& shardId : initShardIds) {
                initialShards.append(shardId);
            }
        }

        collectionDetail.append("numChunks", static_cast<int>(initPoints.size() + 1));

        logChange(txn,
                  txn->getClient()->clientAddress(true),
                  "shardCollection.start",
                  ns,
                  collectionDetail.obj());
    }

    shared_ptr<ChunkManager> manager(new ChunkManager(ns, fieldsAndOrder, unique));
    manager->createFirstChunks(txn, dbPrimaryShardId, &initPoints, &initShardIds);
    manager->loadExistingRanges(txn, nullptr);

    CollectionInfo collInfo;
    collInfo.useChunkManager(manager);
    collInfo.save(txn, ns);
    manager->reload(txn, true);

    // Tell the primary mongod to refresh its data
    // TODO:  Think the real fix here is for mongos to just
    //        assume that all collections are sharded, when we get there
    SetShardVersionRequest ssv = SetShardVersionRequest::makeForVersioningNoPersist(
        grid.shardRegistry()->getConfigServerConnectionString(),
        dbPrimaryShardId,
        primaryShard->getConnString(),
        NamespaceString(ns),
        manager->getVersion(),
        true);

    auto ssvStatus = grid.shardRegistry()->runCommandWithNotMasterRetries(
        txn, dbPrimaryShardId, "admin", ssv.toBSON());
    if (!ssvStatus.isOK()) {
        warning() << "could not update initial version of " << ns << " on shard primary "
                  << dbPrimaryShardId << ssvStatus.getStatus();
    }

    logChange(txn,
              txn->getClient()->clientAddress(true),
              "shardCollection",
              ns,
              BSON("version" << manager->getVersion().toString()));

    return Status::OK();
}
Exemplo n.º 15
0
        bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
            const string ns = dbname + "." + cmdObj.firstElement().valuestr();

            if (!cmdObj["start"].eoo()) {
                errmsg = "using deprecated 'start' argument to geoNear";
                return false;
            }

            Client::ReadContext ctx(txn, ns);

            Database* db = ctx.ctx().db();
            if ( !db ) {
                errmsg = "can't find ns";
                return false;
            }

            Collection* collection = db->getCollection( txn, ns );
            if ( !collection ) {
                errmsg = "can't find ns";
                return false;
            }

            IndexCatalog* indexCatalog = collection->getIndexCatalog();

            // cout << "raw cmd " << cmdObj.toString() << endl;

            // We seek to populate this.
            string nearFieldName;
            bool using2DIndex = false;
            if (!getFieldName(txn, collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) {
                return false;
            }

            PointWithCRS point;
            uassert(17304, "'near' field must be point",
                    GeoParser::parseQueryPoint(cmdObj["near"], &point).isOK());

            bool isSpherical = cmdObj["spherical"].trueValue();
            if (!using2DIndex) {
                uassert(17301, "2dsphere index must have spherical: true", isSpherical);
            }

            // Build the $near expression for the query.
            BSONObjBuilder nearBob;
            if (isSpherical) {
                nearBob.append("$nearSphere", cmdObj["near"].Obj());
            }
            else {
                nearBob.append("$near", cmdObj["near"].Obj());
            }

            if (!cmdObj["maxDistance"].eoo()) {
                uassert(17299, "maxDistance must be a number",cmdObj["maxDistance"].isNumber());
                nearBob.append("$maxDistance", cmdObj["maxDistance"].number());
            }

            if (!cmdObj["minDistance"].eoo()) {
                uassert(17298, "minDistance doesn't work on 2d index", !using2DIndex);
                uassert(17300, "minDistance must be a number",cmdObj["minDistance"].isNumber());
                nearBob.append("$minDistance", cmdObj["minDistance"].number());
            }

            if (!cmdObj["uniqueDocs"].eoo()) {
                warning() << ns << ": ignoring deprecated uniqueDocs option in geoNear command";
            }

            // And, build the full query expression.
            BSONObjBuilder queryBob;
            queryBob.append(nearFieldName, nearBob.obj());
            if (!cmdObj["query"].eoo() && cmdObj["query"].isABSONObj()) {
                queryBob.appendElements(cmdObj["query"].Obj());
            }
            BSONObj rewritten = queryBob.obj();

            // cout << "rewritten query: " << rewritten.toString() << endl;

            int numWanted = 100;
            const char* limitName = !cmdObj["num"].eoo() ? "num" : "limit";
            BSONElement eNumWanted = cmdObj[limitName];
            if (!eNumWanted.eoo()) {
                uassert(17303, "limit must be number", eNumWanted.isNumber());
                numWanted = eNumWanted.numberInt();
                uassert(17302, "limit must be >=0", numWanted >= 0);
            }

            bool includeLocs = false;
            if (!cmdObj["includeLocs"].eoo()) {
                includeLocs = cmdObj["includeLocs"].trueValue();
            }

            double distanceMultiplier = 1.0;
            BSONElement eDistanceMultiplier = cmdObj["distanceMultiplier"];
            if (!eDistanceMultiplier.eoo()) {
                uassert(17296, "distanceMultiplier must be a number", eDistanceMultiplier.isNumber());
                distanceMultiplier = eDistanceMultiplier.number();
                uassert(17297, "distanceMultiplier must be non-negative", distanceMultiplier >= 0);
            }

            BSONObj projObj = BSON("$pt" << BSON("$meta" << LiteParsedQuery::metaGeoNearPoint) <<
                                   "$dis" << BSON("$meta" << LiteParsedQuery::metaGeoNearDistance));

            CanonicalQuery* cq;

            const NamespaceString nss(dbname);
            const WhereCallbackReal whereCallback(txn, nss.db());

            if (!CanonicalQuery::canonicalize(ns,
                                              rewritten,
                                              BSONObj(),
                                              projObj,
                                              0,
                                              numWanted,
                                              BSONObj(),
                                              &cq,
                                              whereCallback).isOK()) {
                errmsg = "Can't parse filter / create query";
                return false;
            }

            PlanExecutor* rawExec;
            if (!getExecutor(txn, collection, cq, &rawExec, 0).isOK()) {
                errmsg = "can't get query runner";
                return false;
            }

            auto_ptr<PlanExecutor> exec(rawExec);
            const ScopedExecutorRegistration safety(exec.get());

            double totalDistance = 0;
            BSONObjBuilder resultBuilder(result.subarrayStart("results"));
            double farthestDist = 0;

            BSONObj currObj;
            int results = 0;
            while ((results < numWanted) && PlanExecutor::ADVANCED == exec->getNext(&currObj, NULL)) {

                // Come up with the correct distance.
                double dist = currObj["$dis"].number() * distanceMultiplier;
                totalDistance += dist;
                if (dist > farthestDist) { farthestDist = dist; }

                // Strip out '$dis' and '$pt' from the result obj.  The rest gets added as 'obj'
                // in the command result.
                BSONObjIterator resIt(currObj);
                BSONObjBuilder resBob;
                while (resIt.more()) {
                    BSONElement elt = resIt.next();
                    if (!mongoutils::str::equals("$pt", elt.fieldName())
                        && !mongoutils::str::equals("$dis", elt.fieldName())) {
                        resBob.append(elt);
                    }
                }
                BSONObj resObj = resBob.obj();

                // Don't make a too-big result object.
                if (resultBuilder.len() + resObj.objsize()> BSONObjMaxUserSize) {
                    warning() << "Too many geoNear results for query " << rewritten.toString()
                              << ", truncating output.";
                    break;
                }

                // Add the next result to the result builder.
                BSONObjBuilder oneResultBuilder(
                    resultBuilder.subobjStart(BSONObjBuilder::numStr(results)));
                oneResultBuilder.append("dis", dist);
                if (includeLocs) {
                    oneResultBuilder.appendAs(currObj["$pt"], "loc");
                }
                oneResultBuilder.append("obj", resObj);
                oneResultBuilder.done();
                ++results;
            }

            resultBuilder.done();

            // Fill out the stats subobj.
            BSONObjBuilder stats(result.subobjStart("stats"));

            // Fill in nscanned from the explain.
            PlanSummaryStats summary;
            Explain::getSummaryStats(exec.get(), &summary);
            stats.appendNumber("nscanned", summary.totalKeysExamined);
            stats.appendNumber("objectsLoaded", summary.totalDocsExamined);

            stats.append("avgDistance", totalDistance / results);
            stats.append("maxDistance", farthestDist);
            stats.append("time", txn->getCurOp()->elapsedMillis());
            stats.done();

            return true;
        }
Exemplo n.º 16
0
StatusWith<BSONObj> ParsedDistinct::asAggregationCommand() const {
    BSONObjBuilder aggregationBuilder;

    invariant(_query);
    const QueryRequest& qr = _query->getQueryRequest();
    aggregationBuilder.append("aggregate", qr.nss().coll());

    // Build a pipeline that accomplishes the distinct request. The building code constructs a
    // pipeline that looks like this, assuming the distinct is on the key "a.b.c"
    //
    //      [
    //          { $match: { ... } },
    //          { $unwind: { path: "a", preserveNullAndEmptyArrays: true } },
    //          { $unwind: { path: "a.b", preserveNullAndEmptyArrays: true } },
    //          { $unwind: { path: "a.b.c", preserveNullAndEmptyArrays: true } },
    //          { $match: {"a": {$_internalSchemaType: "object"},
    //                     "a.b": {$_internalSchemaType: "object"}}}
    //          { $group: { _id: null, distinct: { $addToSet: "$<key>" } } }
    //      ]
    //
    // The purpose of the intermediate $unwind stages is to deal with cases where there is an array
    // along the distinct path. For example, if we're distincting on "a.b" and have a document like
    // {a: [{b: 1}, {b: 2}]}, distinct() should produce two values: 1 and 2. If we were to only
    // unwind on "a.b", the document would pass through the $unwind unmodified, and the $group
    // stage would treat the entire array as a key, rather than each element.
    //
    // The reason for the $match with $_internalSchemaType is to deal with cases of nested
    // arrays. The distinct command will not traverse paths inside of nested arrays. For example, a
    // distinct on "a.b" with the following document will produce no results:
    // {a: [[{b: 1}]]
    //
    // Any arrays remaining after the $unwinds must have been nested arrays, so in order to match
    // the behavior of the distinct() command, we filter them out before the $group.
    BSONArrayBuilder pipelineBuilder(aggregationBuilder.subarrayStart("pipeline"));
    if (!qr.getFilter().isEmpty()) {
        BSONObjBuilder matchStageBuilder(pipelineBuilder.subobjStart());
        matchStageBuilder.append("$match", qr.getFilter());
        matchStageBuilder.doneFast();
    }

    FieldPath path(_key);
    addNestedUnwind(&pipelineBuilder, path);
    addMatchRemovingNestedArrays(&pipelineBuilder, path);

    BSONObjBuilder groupStageBuilder(pipelineBuilder.subobjStart());
    {
        BSONObjBuilder groupBuilder(groupStageBuilder.subobjStart("$group"));
        groupBuilder.appendNull("_id");
        {
            BSONObjBuilder distinctBuilder(groupBuilder.subobjStart("distinct"));
            distinctBuilder.append("$addToSet", str::stream() << "$" << _key);
            distinctBuilder.doneFast();
        }
        groupBuilder.doneFast();
    }
    groupStageBuilder.doneFast();
    pipelineBuilder.doneFast();

    aggregationBuilder.append(kCollationField, qr.getCollation());

    if (qr.getMaxTimeMS() > 0) {
        aggregationBuilder.append(QueryRequest::cmdOptionMaxTimeMS, qr.getMaxTimeMS());
    }

    if (!qr.getReadConcern().isEmpty()) {
        aggregationBuilder.append(repl::ReadConcernArgs::kReadConcernFieldName,
                                  qr.getReadConcern());
    }

    if (!qr.getUnwrappedReadPref().isEmpty()) {
        aggregationBuilder.append(QueryRequest::kUnwrappedReadPrefField, qr.getUnwrappedReadPref());
    }

    if (!qr.getComment().empty()) {
        aggregationBuilder.append(kCommentField, qr.getComment());
    }

    // Specify the 'cursor' option so that aggregation uses the cursor interface.
    aggregationBuilder.append("cursor", BSONObj());

    return aggregationBuilder.obj();
}
Exemplo n.º 17
0
    bool run(OperationContext* opCtx,
             const string& dbname,
             const BSONObj& cmdObj,
             BSONObjBuilder& result) {
        _runCalled = true;

        const auto service = opCtx->getServiceContext();
        const auto clock = service->getFastClockSource();
        const auto runStart = clock->now();
        BSONObjBuilder timeBuilder(256);

        const auto authSession = AuthorizationSession::get(Client::getCurrent());

        // --- basic fields that are global

        result.append("host", prettyHostName());
        result.append("version", VersionInfoInterface::instance().version());
        result.append("process", serverGlobalParams.binaryName);
        result.append("pid", ProcessId::getCurrent().asLongLong());
        result.append("uptime", (double)(time(0) - serverGlobalParams.started));
        auto uptime = clock->now() - _started;
        result.append("uptimeMillis", durationCount<Milliseconds>(uptime));
        result.append("uptimeEstimate", durationCount<Seconds>(uptime));
        result.appendDate("localTime", jsTime());

        timeBuilder.appendNumber("after basic",
                                 durationCount<Milliseconds>(clock->now() - runStart));

        // --- all sections

        for (SectionMap::const_iterator i = _sections.begin(); i != _sections.end(); ++i) {
            ServerStatusSection* section = i->second;

            std::vector<Privilege> requiredPrivileges;
            section->addRequiredPrivileges(&requiredPrivileges);
            if (!authSession->isAuthorizedForPrivileges(requiredPrivileges))
                continue;

            bool include = section->includeByDefault();
            const auto& elem = cmdObj[section->getSectionName()];
            if (elem.type()) {
                include = elem.trueValue();
            }

            if (!include) {
                continue;
            }

            section->appendSection(opCtx, elem, &result);
            timeBuilder.appendNumber(
                static_cast<string>(str::stream() << "after " << section->getSectionName()),
                durationCount<Milliseconds>(clock->now() - runStart));
        }

        // --- counters
        bool includeMetricTree = MetricTree::theMetricTree != NULL;
        if (cmdObj["metrics"].type() && !cmdObj["metrics"].trueValue())
            includeMetricTree = false;

        if (includeMetricTree) {
            MetricTree::theMetricTree->appendTo(result);
        }

        // --- some hard coded global things hard to pull out

        {
            RamLog::LineIterator rl(RamLog::get("warnings"));
            if (rl.lastWrite() >= time(0) - (10 * 60)) {  // only show warnings from last 10 minutes
                BSONArrayBuilder arr(result.subarrayStart("warnings"));
                while (rl.more()) {
                    arr.append(rl.next());
                }
                arr.done();
            }
        }

        auto runElapsed = clock->now() - runStart;
        timeBuilder.appendNumber("at end", durationCount<Milliseconds>(runElapsed));
        if (runElapsed > Milliseconds(1000)) {
            BSONObj t = timeBuilder.obj();
            log() << "serverStatus was very slow: " << t;
            result.append("timing", t);
        }

        return true;
    }
Exemplo n.º 18
0
    Query ConfigDiffTracker<ValType,ShardType>::
        configDiffQuery( const set<ShardChunkVersion>& extraMinorVersions ) const
    {
        verifyAttached();

        //
        // Basic idea behind the query is to find all the chunks $gt the current max version, and
        // then also update chunks that we need minor versions - splits and (2.0) max chunks on
        // shards
        //

        static const int maxMinorVersionClauses = 50;
        BSONObjBuilder queryB;

        int numStaleMinorClauses = extraMinorVersions.size() + _maxShardVersions->size();

#ifdef _DEBUG
        // In debug builds, randomly trigger full reloads to exercise both codepaths
        if( rand() % 2 ) numStaleMinorClauses = maxMinorVersionClauses;
#endif

        if( numStaleMinorClauses < maxMinorVersionClauses ){

            BSONArrayBuilder queryOrB( queryB.subarrayStart( "$or" ) );

            //
            // Get any version changes higher than we know currently
            //

            {
                BSONObjBuilder queryNewB( queryOrB.subobjStart() );

                queryNewB.append( "ns", _ns );
                {
                    BSONObjBuilder ts( queryNewB.subobjStart( "lastmod" ) );
                    // We should *always* pull at least a single chunk back, this lets us quickly
                    // detect if our collection was unsharded (and most of the time if it was
                    // resharded) in the meantime
                    ts.appendTimestamp( "$gte", _maxVersion->toLong() );
                    ts.done();
                }

                queryNewB.done();
            }

            // Get any shard version changes higher than we know currently
            // Needed since there could have been a split of the max version chunk of any shard
            // TODO: Ideally, we shouldn't care about these
            for( typename map<ShardType, ShardChunkVersion>::const_iterator it = _maxShardVersions->begin(); it != _maxShardVersions->end(); it++ ){
                BSONObjBuilder queryShardB( queryOrB.subobjStart() );

                queryShardB.append( "ns", _ns );
                queryShardB.append( "shard", nameFrom( it->first ) );
                {
                    BSONObjBuilder ts( queryShardB.subobjStart( "lastmod" ) );
                    ts.appendTimestamp( "$gt", it->second.toLong() );
                    ts.done();
                }
                queryShardB.done();
            }

            // Get any minor version changes we've marked as interesting
            // TODO: Ideally we shouldn't care about these
            for( set<ShardChunkVersion>::const_iterator it = extraMinorVersions.begin(); it != extraMinorVersions.end(); it++ ){
                BSONObjBuilder queryShardB( queryOrB.subobjStart() );

                queryShardB.append( "ns", _ns );
                {
                    BSONObjBuilder ts( queryShardB.subobjStart( "lastmod" ) );
                    ts.appendTimestamp( "$gt", it->toLong() );
                    ts.appendTimestamp( "$lt",
                                        ShardChunkVersion( it->majorVersion() + 1, 0, OID() ).toLong() );
                    ts.done();
                }
                queryShardB.done();
            }

            queryOrB.done();
        }
        else{

            //
            // We don't want to send a giant $or query to the server, so just get all the chunks
            //

            queryB.append( "ns", _ns );
        }

        BSONObj query = queryB.obj();

        // log() << "major version query from " << *_maxVersion << " and over " << _maxShardVersions->size() << " shards is " << query << endl;

        return Query( query );
    }
Exemplo n.º 19
0
        void searchCommand( NamespaceDetails* nsd , int idxNo ,
                            const BSONObj& n /*near*/ , double maxDistance , const BSONObj& search ,
                            BSONObjBuilder& result , unsigned limit ) {

            Timer t;

            log(1) << "SEARCH near:" << n << " maxDistance:" << maxDistance << " search: " << search << endl;
            int x,y;
            {
                BSONObjIterator i( n );
                x = hash( i.next() );
                y = hash( i.next() );
            }
            int scale = (int)ceil( maxDistance / _bucketSize );

            GeoHaystackSearchHopper hopper(n,maxDistance,limit,_geo);

            long long btreeMatches = 0;

            for ( int a=-scale; a<=scale; a++ ) {
                for ( int b=-scale; b<=scale; b++ ) {

                    BSONObjBuilder bb;
                    bb.append( "" , makeString( x + a , y + b ) );
                    for ( unsigned i=0; i<_other.size(); i++ ) {
                        BSONElement e = search.getFieldDotted( _other[i] );
                        if ( e.eoo() )
                            bb.appendNull( "" );
                        else
                            bb.appendAs( e , "" );
                    }

                    BSONObj key = bb.obj();

                    GEOQUADDEBUG( "KEY: " << key );

                    set<DiskLoc> thisPass;
                    scoped_ptr<BtreeCursor> cursor( BtreeCursor::make( nsd , idxNo , *getDetails() , key , key , true , 1 ) );
                    while ( cursor->ok() ) {
                        pair<set<DiskLoc>::iterator, bool> p = thisPass.insert( cursor->currLoc() );
                        if ( p.second ) {
                            hopper.got( cursor->currLoc() );
                            GEOQUADDEBUG( "\t" << cursor->current() );
                            btreeMatches++;
                        }
                        cursor->advance();
                    }
                }

            }

            BSONArrayBuilder arr( result.subarrayStart( "results" ) );
            int num = hopper.append( arr );
            arr.done();

            {
                BSONObjBuilder b( result.subobjStart( "stats" ) );
                b.append( "time" , t.millis() );
                b.appendNumber( "btreeMatches" , btreeMatches );
                b.append( "n" , num );
                b.done();
            }
        }
Exemplo n.º 20
0
        static bool run2DSphereGeoNear(NamespaceDetails* nsDetails, int idxNo, BSONObj& cmdObj,
                                       const GeoNearArguments &parsedArgs, string& errmsg,
                                       BSONObjBuilder& result) {
            auto_ptr<IndexDescriptor> descriptor(CatalogHack::getDescriptor(nsDetails, idxNo));
            auto_ptr<S2AccessMethod> sam(new S2AccessMethod(descriptor.get()));
            const S2IndexingParams& params = sam->getParams();
            auto_ptr<S2NearIndexCursor> nic(new S2NearIndexCursor(descriptor.get(), params));

            vector<string> geoFieldNames;
            BSONObjIterator i(descriptor->keyPattern());
            while (i.more()) {
                BSONElement e = i.next();
                if (e.type() == String && IndexNames::GEO_2DSPHERE == e.valuestr()) {
                    geoFieldNames.push_back(e.fieldName());
                }
            }

            // NOTE(hk): If we add a new argument to geoNear, we could have a
            // 2dsphere index with multiple indexed geo fields, and the geoNear
            // could pick the one to run over.  Right now, we just require one.
            uassert(16552, "geoNear requires exactly one indexed geo field", 1 == geoFieldNames.size());
            NearQuery nearQuery(geoFieldNames[0]);
            uassert(16679, "Invalid geometry given as arguments to geoNear: " + cmdObj.toString(),
                    nearQuery.parseFromGeoNear(cmdObj, params.radius));
            uassert(16683, "geoNear on 2dsphere index requires spherical",
                    parsedArgs.isSpherical);

            // NOTE(hk): For a speedup, we could look through the query to see if
            // we've geo-indexed any of the fields in it.
            vector<GeoQuery> regions;

            nic->seek(parsedArgs.query, nearQuery, regions);

            // We do pass in the query above, but it's just so we can possibly use it in our index
            // scan.  We have to do our own matching.
            auto_ptr<Matcher> matcher(new Matcher(parsedArgs.query));

            double totalDistance = 0;
            BSONObjBuilder resultBuilder(result.subarrayStart("results"));
            double farthestDist = 0;

            int results;
            for (results = 0; results < parsedArgs.numWanted && !nic->isEOF(); ++results) {
                BSONObj currObj = nic->getValue().obj();
                if (!matcher->matches(currObj)) {
                    --results;
                    nic->next();
                    continue;
                }

                double dist = nic->currentDistance();
                // If we got the distance in radians, output it in radians too.
                if (nearQuery.fromRadians) { dist /= params.radius; }
                dist *= parsedArgs.distanceMultiplier;
                totalDistance += dist;
                if (dist > farthestDist) { farthestDist = dist; }

                BSONObjBuilder oneResultBuilder(
                    resultBuilder.subobjStart(BSONObjBuilder::numStr(results)));
                oneResultBuilder.append("dis", dist);
                if (parsedArgs.includeLocs) {
                    BSONElementSet geoFieldElements;
                    currObj.getFieldsDotted(geoFieldNames[0], geoFieldElements, false);
                    for (BSONElementSet::iterator oi = geoFieldElements.begin();
                            oi != geoFieldElements.end(); ++oi) {
                        if (oi->isABSONObj()) {
                            oneResultBuilder.appendAs(*oi, "loc");
                        }
                    }
                }

                oneResultBuilder.append("obj", currObj);
                oneResultBuilder.done();
                nic->next();
            }

            resultBuilder.done();

            BSONObjBuilder stats(result.subobjStart("stats"));
            stats.appendNumber("nscanned", nic->nscanned());
            stats.append("avgDistance", totalDistance / results);
            stats.append("maxDistance", farthestDist);
            stats.append("time", cc().curop()->elapsedMillis());
            stats.done();

            return true;
        }
Exemplo n.º 21
0
void msg_callback(const rviz_intel::TriangleMesh::ConstPtr& msg)
{
  BSONObjBuilder document;

  Date_t stamp = msg->header.stamp.sec * 1000.0 + msg->header.stamp.nsec / 1000000.0;
  document.append("header", BSON(   "seq" << msg->header.seq
				 << "stamp" << stamp
				 << "frame_id" << msg->header.frame_id));

  BSONArrayBuilder pointsb(document.subarrayStart("points"));
  std::vector<geometry_msgs::Point32>::const_iterator p;
  for (p = msg->points.begin(); p != msg->points.end(); ++p) {
    pointsb.append(BSON(   "x" << p->x
			<< "y" << p->y
			<< "z" << p->z));
  }
  pointsb.doneFast();

  BSONArrayBuilder normalsb(document.subarrayStart("normals"));
  for (p = msg->normals.begin(); p != msg->normals.end(); ++p) {
    normalsb.append(BSON(   "x" << p->x
		         << "y" << p->y
		         << "z" << p->z));
  }
  normalsb.doneFast();

  BSONArrayBuilder colorsb(document.subarrayStart("colors"));
  std::vector<uint32_t>::const_iterator u;
  for (u = msg->colors.begin(); u != msg->colors.end(); ++u) {
    colorsb.append(*u);
  }
  colorsb.doneFast();

  BSONArrayBuilder color_indsb(document.subarrayStart("color_inds"));
  for (u = msg->color_inds.begin(); u != msg->color_inds.end(); ++u) {
    color_indsb.append(*u);
  }
  color_indsb.doneFast();

  BSONArrayBuilder trianglesb(document.subarrayStart("triangles"));
  std::vector<rviz_intel::Triangle>::const_iterator t;
  for (t = msg->triangles.begin(); t != msg->triangles.end(); ++t) {
    trianglesb.append(BSON(   "i" << t->i
		           << "j" << t->j
		           << "k" << t->k));
  }
  trianglesb.doneFast();
  document.append("sending_node", msg->sending_node);

  mongodb_store::add_meta_for_msg<rviz_intel::TriangleMesh>(msg, document);
  mongodb_conn->insert(collection, document.obj());

  // If we'd get access to the message queue this could be more useful
  // https://code.ros.org/trac/ros/ticket/744
  pthread_mutex_lock(&in_counter_mutex);
  ++in_counter;
  pthread_mutex_unlock(&in_counter_mutex);
  pthread_mutex_lock(&out_counter_mutex);
  ++out_counter;
  pthread_mutex_unlock(&out_counter_mutex);
}
Exemplo n.º 22
0
    bool ClientInfo::getLastError( const BSONObj& options , BSONObjBuilder& result , bool fromWriteBackListener ) {
        set<string> * shards = getPrev();

        if ( shards->size() == 0 ) {
            result.appendNull( "err" );
            return true;
        }

        vector<WBInfo> writebacks;

        // handle single server
        if ( shards->size() == 1 ) {
            string theShard = *(shards->begin() );

            ShardConnection conn( theShard , "", true );
            
            BSONObj res;
            bool ok = false;
            try{
            	ok = conn->runCommand( "admin" , options , res );
            }
            catch( std::exception &e ){
                
                warning() << "could not get last error." << causedBy( e ) << endl;
                
                // Catch everything that happens here, since we need to ensure we return our connection when we're
            	// finished.
            	conn.done();
                
            	return false;
            }
            
            res = res.getOwned();
            conn.done();
            

            _addWriteBack( writebacks , res );

            // hit other machines just to block
            for ( set<string>::const_iterator i=sinceLastGetError().begin(); i!=sinceLastGetError().end(); ++i ) {
                string temp = *i;
                if ( temp == theShard )
                    continue;

                ShardConnection conn( temp , "" );
                _addWriteBack( writebacks , conn->getLastErrorDetailed() );
                conn.done();
            }
            clearSinceLastGetError();
            
            if ( writebacks.size() ){
                vector<BSONObj> v = _handleWriteBacks( writebacks , fromWriteBackListener );
                if ( v.size() == 0 && fromWriteBackListener ) {
                    // ok
                }
                else {
                    assert( v.size() == 1 );
                    result.appendElements( v[0] );
                    result.appendElementsUnique( res );
                    result.append( "writebackGLE" , v[0] );
                    result.append( "initialGLEHost" , theShard );
                }
            }
            else {
                result.append( "singleShard" , theShard );
                result.appendElements( res );
            }
            
            return ok;
        }

        BSONArrayBuilder bbb( result.subarrayStart( "shards" ) );
        BSONObjBuilder shardRawGLE;

        long long n = 0;
        
        int updatedExistingStat = 0; // 0 is none, -1 has but false, 1 has true

        // hit each shard
        vector<string> errors;
        vector<BSONObj> errorObjects;
        for ( set<string>::iterator i = shards->begin(); i != shards->end(); i++ ) {
            string theShard = *i;
            bbb.append( theShard );
            ShardConnection conn( theShard , "", true );
            BSONObj res;
            bool ok = false;
            try {
                ok = conn->runCommand( "admin" , options , res );
                shardRawGLE.append( theShard , res );
            }
            catch( std::exception &e ){

        	    // Safe to return here, since we haven't started any extra processing yet, just collecting
        	    // responses.
                
        	    warning() << "could not get last error." << causedBy( e ) << endl;
                conn.done();
                
                return false;
            }
            
            _addWriteBack( writebacks, res );
            
            string temp = DBClientWithCommands::getLastErrorString( res );
            if ( conn->type() != ConnectionString::SYNC && ( ok == false || temp.size() ) ) {
                errors.push_back( temp );
                errorObjects.push_back( res );
            }

            n += res["n"].numberLong();
            if ( res["updatedExisting"].type() ) {
                if ( res["updatedExisting"].trueValue() )
                    updatedExistingStat = 1;
                else if ( updatedExistingStat == 0 )
                    updatedExistingStat = -1;
            }

            conn.done();
        }

        bbb.done();
        result.append( "shardRawGLE" , shardRawGLE.obj() );

        result.appendNumber( "n" , n );
        if ( updatedExistingStat )
            result.appendBool( "updatedExisting" , updatedExistingStat > 0 );

        // hit other machines just to block
        for ( set<string>::const_iterator i=sinceLastGetError().begin(); i!=sinceLastGetError().end(); ++i ) {
            string temp = *i;
            if ( shards->count( temp ) )
                continue;

            ShardConnection conn( temp , "" );
            _addWriteBack( writebacks, conn->getLastErrorDetailed() );
            conn.done();
        }
        clearSinceLastGetError();

        if ( errors.size() == 0 ) {
            result.appendNull( "err" );
            _handleWriteBacks( writebacks , fromWriteBackListener );
            return true;
        }

        result.append( "err" , errors[0].c_str() );

        {
            // errs
            BSONArrayBuilder all( result.subarrayStart( "errs" ) );
            for ( unsigned i=0; i<errors.size(); i++ ) {
                all.append( errors[i].c_str() );
            }
            all.done();
        }

        {
            // errObjects
            BSONArrayBuilder all( result.subarrayStart( "errObjects" ) );
            for ( unsigned i=0; i<errorObjects.size(); i++ ) {
                all.append( errorObjects[i] );
            }
            all.done();
        }
        _handleWriteBacks( writebacks , fromWriteBackListener );
        return true;
    }
Exemplo n.º 23
0
    bool run(OperationContext* txn,
             const string& dbname,
             BSONObj& cmdObj,
             int,
             string& errmsg,
             BSONObjBuilder& result) {
        _runCalled = true;

        long long start = Listener::getElapsedTimeMillis();
        BSONObjBuilder timeBuilder(256);

        const auto authSession = AuthorizationSession::get(ClientBasic::getCurrent());

        // --- basic fields that are global

        result.append("host", prettyHostName());
        result.append("version", versionString);
        result.append("process", serverGlobalParams.binaryName);
        result.append("pid", ProcessId::getCurrent().asLongLong());
        result.append("uptime", (double)(time(0) - serverGlobalParams.started));
        result.append("uptimeMillis", (long long)(curTimeMillis64() - _started));
        result.append("uptimeEstimate", (double)(start / 1000));
        result.appendDate("localTime", jsTime());

        timeBuilder.appendNumber("after basic", Listener::getElapsedTimeMillis() - start);

        // --- all sections

        for (SectionMap::const_iterator i = _sections->begin(); i != _sections->end(); ++i) {
            ServerStatusSection* section = i->second;

            std::vector<Privilege> requiredPrivileges;
            section->addRequiredPrivileges(&requiredPrivileges);
            if (!authSession->isAuthorizedForPrivileges(requiredPrivileges))
                continue;

            bool include = section->includeByDefault();

            BSONElement e = cmdObj[section->getSectionName()];
            if (e.type()) {
                include = e.trueValue();
            }

            if (!include)
                continue;

            BSONObj data = section->generateSection(txn, e);
            if (data.isEmpty())
                continue;

            result.append(section->getSectionName(), data);
            timeBuilder.appendNumber(
                static_cast<string>(str::stream() << "after " << section->getSectionName()),
                Listener::getElapsedTimeMillis() - start);
        }

        // --- counters
        bool includeMetricTree = MetricTree::theMetricTree != NULL;
        if (cmdObj["metrics"].type() && !cmdObj["metrics"].trueValue())
            includeMetricTree = false;

        if (includeMetricTree) {
            MetricTree::theMetricTree->appendTo(result);
        }

        // --- some hard coded global things hard to pull out

        {
            RamLog::LineIterator rl(RamLog::get("warnings"));
            if (rl.lastWrite() >= time(0) - (10 * 60)) {  // only show warnings from last 10 minutes
                BSONArrayBuilder arr(result.subarrayStart("warnings"));
                while (rl.more()) {
                    arr.append(rl.next());
                }
                arr.done();
            }
        }

        timeBuilder.appendNumber("at end", Listener::getElapsedTimeMillis() - start);
        if (Listener::getElapsedTimeMillis() - start > 1000) {
            BSONObj t = timeBuilder.obj();
            log() << "serverStatus was very slow: " << t << endl;
            result.append("timing", t);
        }

        return true;
    }
Exemplo n.º 24
0
 void appendReplicationInfo(BSONObjBuilder& result, int level) {
     if ( replSet ) {
         if( theReplSet == 0 || theReplSet->state().shunned() ) {
             result.append("ismaster", false);
             result.append("secondary", false);
             result.append("info", ReplSet::startupStatusMsg.get());
             result.append( "isreplicaset" , true );
         }
         else {
             theReplSet->fillIsMaster(result);
         }
         return;
     }
     
     if ( replAllDead ) {
         result.append("ismaster", 0);
         string s = string("dead: ") + replAllDead;
         result.append("info", s);
     }
     else {
         result.appendBool("ismaster", _isMaster() );
     }
     
     if ( level && replSet ) {
         result.append( "info" , "is replica set" );
     }
     else if ( level ) {
         BSONObjBuilder sources( result.subarrayStart( "sources" ) );
         
         int n = 0;
         list<BSONObj> src;
         {
             Client::ReadContext ctx("local.sources", dbpath);
             shared_ptr<Cursor> c = findTableScan("local.sources", BSONObj());
             while ( c->ok() ) {
                 src.push_back(c->current());
                 c->advance();
             }
         }
         
         for( list<BSONObj>::const_iterator i = src.begin(); i != src.end(); i++ ) {
             BSONObj s = *i;
             BSONObjBuilder bb;
             bb.append( s["host"] );
             string sourcename = s["source"].valuestr();
             if ( sourcename != "main" )
                 bb.append( s["source"] );
             {
                 BSONElement e = s["syncedTo"];
                 BSONObjBuilder t( bb.subobjStart( "syncedTo" ) );
                 t.appendDate( "time" , e.timestampTime() );
                 t.append( "inc" , e.timestampInc() );
                 t.done();
             }
             
             if ( level > 1 ) {
                 wassert( !Lock::isLocked() );
                 // note: there is no so-style timeout on this connection; perhaps we should have one.
                 ScopedDbConnection conn(s["host"].valuestr());
                 
                 DBClientConnection *cliConn = dynamic_cast< DBClientConnection* >( &conn.conn() );
                 if ( cliConn && replAuthenticate(cliConn, false) ) {
                     BSONObj first = conn->findOne( (string)"local.oplog.$" + sourcename,
                                                           Query().sort( BSON( "$natural" << 1 ) ) );
                     BSONObj last = conn->findOne( (string)"local.oplog.$" + sourcename,
                                                          Query().sort( BSON( "$natural" << -1 ) ) );
                     bb.appendDate( "masterFirst" , first["ts"].timestampTime() );
                     bb.appendDate( "masterLast" , last["ts"].timestampTime() );
                     double lag = (double) (last["ts"].timestampTime() - s["syncedTo"].timestampTime());
                     bb.append( "lagSeconds" , lag / 1000 );
                 }
                 conn.done();
             }
             
             sources.append( BSONObjBuilder::numStr( n++ ) , bb.obj() );
         }
         
         sources.done();
     }
 }
Exemplo n.º 25
0
    bool ClientInfo::getLastError( const string& dbName,
                                   const BSONObj& options,
                                   BSONObjBuilder& result,
                                   string& errmsg,
                                   bool fromWriteBackListener)
    {

        set<string> * shards = getPrev();

        if ( shards->size() == 0 ) {
            result.appendNull( "err" );
            return true;
        }

        vector<WBInfo> writebacks;

        //
        // TODO: These branches should be collapsed into a single codepath
        //

        // handle single server
        if ( shards->size() == 1 ) {
            string theShard = *(shards->begin() );

            BSONObj res;
            bool ok = false;
            {
                LOG(5) << "gathering response for gle from: " << theShard << endl;

                ShardConnection conn( theShard , "" );
                try {
                    ok = conn->runCommand( dbName , options , res );
                }
                catch( std::exception &e ) {

                    string message =
                            str::stream() << "could not get last error from shard " << theShard
                                          << causedBy( e );

                    warning() << message << endl;
                    errmsg = message;

                    // Catch everything that happens here, since we need to ensure we return our connection when we're
                    // finished.
                    conn.done();

                    return false;
                }


                res = res.getOwned();
                conn.done();
            }

            _addWriteBack( writebacks , res );

            LOG(4) << "gathering writebacks from " << sinceLastGetError().size() << " hosts for"
                   << " gle (" << theShard << ")" << endl;

            // hit other machines just to block
            for ( set<string>::const_iterator i=sinceLastGetError().begin(); i!=sinceLastGetError().end(); ++i ) {
                string temp = *i;
                if ( temp == theShard )
                    continue;

                LOG(5) << "gathering writebacks for single-shard gle from: " << temp << endl;

                try {
                    ShardConnection conn( temp , "" );
                    ON_BLOCK_EXIT_OBJ( conn, &ShardConnection::done );
                    _addWriteBack( writebacks , conn->getLastErrorDetailed() );

                }
                catch( std::exception &e ){
                    warning() << "could not clear last error from shard " << temp << causedBy( e ) << endl;
                }

            }
            clearSinceLastGetError();

            LOG(4) << "checking " << writebacks.size() << " writebacks for"
                   << " gle (" << theShard << ")" << endl;

            if ( writebacks.size() ){
                vector<BSONObj> v = _handleWriteBacks( writebacks , fromWriteBackListener );
                if ( v.size() == 0 && fromWriteBackListener ) {
                    // ok
                }
                else {
                    // this will usually be 1
                    // it can be greater than 1 if a write to a different shard
                    // than the last write op had a writeback
                    // all we're going to report is the first
                    // since that's the current write
                    // but we block for all
                    verify( v.size() >= 1 );
                    result.appendElements( v[0] );
                    result.appendElementsUnique( res );
                    result.append( "writebackGLE" , v[0] );
                    result.append( "initialGLEHost" , theShard );
                }
            }
            else {
                result.append( "singleShard" , theShard );
                result.appendElements( res );
            }

            return ok;
        }

        BSONArrayBuilder bbb( result.subarrayStart( "shards" ) );
        BSONObjBuilder shardRawGLE;

        long long n = 0;

        int updatedExistingStat = 0; // 0 is none, -1 has but false, 1 has true

        // hit each shard
        vector<string> errors;
        vector<BSONObj> errorObjects;
        for ( set<string>::iterator i = shards->begin(); i != shards->end(); i++ ) {
            string theShard = *i;
            bbb.append( theShard );

            LOG(5) << "gathering a response for gle from: " << theShard << endl;

            boost::scoped_ptr<ShardConnection> conn;
            BSONObj res;
            bool ok = false;
            try {
                conn.reset( new ShardConnection( theShard , "" ) ); // constructor can throw if shard is down
                ok = (*conn)->runCommand( dbName , options , res );
                shardRawGLE.append( theShard , res );
            }
            catch( std::exception &e ){

                // Safe to return here, since we haven't started any extra processing yet, just collecting
                // responses.

                string message =
                        str::stream() << "could not get last error from a shard " << theShard
                                      << causedBy( e );

                warning() << message << endl;
                errmsg = message;

                if (conn)
                    conn->done();

                return false;
            }

            _addWriteBack( writebacks, res );

            string temp = DBClientWithCommands::getLastErrorString( res );
            if ( (*conn)->type() != ConnectionString::SYNC && ( ok == false || temp.size() ) ) {
                errors.push_back( temp );
                errorObjects.push_back( res );
            }

            n += res["n"].numberLong();
            if ( res["updatedExisting"].type() ) {
                if ( res["updatedExisting"].trueValue() )
                    updatedExistingStat = 1;
                else if ( updatedExistingStat == 0 )
                    updatedExistingStat = -1;
            }

            conn->done();
        }

        bbb.done();
        result.append( "shardRawGLE" , shardRawGLE.obj() );

        result.appendNumber( "n" , n );
        if ( updatedExistingStat )
            result.appendBool( "updatedExisting" , updatedExistingStat > 0 );

        LOG(4) << "gathering writebacks from " << sinceLastGetError().size() << " hosts for"
               << " gle (" << shards->size() << " shards)" << endl;

        // hit other machines just to block
        for ( set<string>::const_iterator i=sinceLastGetError().begin(); i!=sinceLastGetError().end(); ++i ) {
            string temp = *i;
            if ( shards->count( temp ) )
                continue;

            LOG(5) << "gathering writebacks for multi-shard gle from: " << temp << endl;

            ShardConnection conn( temp , "" );
            try {
                _addWriteBack( writebacks, conn->getLastErrorDetailed() );
            }
            catch( std::exception &e ){
                warning() << "could not clear last error from a shard " << temp << causedBy( e ) << endl;
            }
            conn.done();
        }
        clearSinceLastGetError();

        LOG(4) << "checking " << writebacks.size() << " writebacks for"
                << " gle (" << shards->size() << " shards)" << endl;

        if ( errors.size() == 0 ) {
            result.appendNull( "err" );
            _handleWriteBacks( writebacks , fromWriteBackListener );
            return true;
        }

        result.append( "err" , errors[0].c_str() );

        {
            // errs
            BSONArrayBuilder all( result.subarrayStart( "errs" ) );
            for ( unsigned i=0; i<errors.size(); i++ ) {
                all.append( errors[i].c_str() );
            }
            all.done();
        }

        {
            // errObjects
            BSONArrayBuilder all( result.subarrayStart( "errObjects" ) );
            for ( unsigned i=0; i<errorObjects.size(); i++ ) {
                all.append( errorObjects[i] );
            }
            all.done();
        }

        _handleWriteBacks( writebacks , fromWriteBackListener );
        return true;
    }
Exemplo n.º 26
0
        /*
         * Runs the command object cmdobj on the db with name dbname and puts result in result.
         * @param dbname, name of db
         * @param cmdobj, object that contains entire command
         * @param options
         * @param errmsg, reference to error message
         * @param result, reference to builder for result
         * @param fromRepl
         * @return true if successful, false otherwise
         */
        bool FTSCommand::_run(OperationContext* txn,
                              const string& dbname,
                              BSONObj& cmdObj,
                              int cmdOptions,
                              const string& ns,
                              const string& searchString,
                              string language, // "" for not-set
                              int limit,
                              BSONObj& filter,
                              BSONObj& projection,
                              string& errmsg,
                              BSONObjBuilder& result ) {

            Timer comm;

            // Rewrite the cmd as a normal query.
            BSONObjBuilder queryBob;
            queryBob.appendElements(filter);

            BSONObjBuilder textBob;
            textBob.append("$search", searchString);
            if (!language.empty()) {
                textBob.append("$language", language);
            }
            queryBob.append("$text", textBob.obj());

            // This is the query we exec.
            BSONObj queryObj = queryBob.obj();

            // We sort by the score.
            BSONObj sortSpec = BSON("$s" << BSON("$meta" << LiteParsedQuery::metaTextScore));

            // We also project the score into the document and strip it out later during the reformatting
            // of the results.
            BSONObjBuilder projBob;
            projBob.appendElements(projection);
            projBob.appendElements(sortSpec);
            BSONObj projObj = projBob.obj();

            AutoGetCollectionForRead ctx(txn, ns);

            CanonicalQuery* cq;
            Status canonicalizeStatus = 
                    CanonicalQuery::canonicalize(ns, 
                                                 queryObj,
                                                 sortSpec,
                                                 projObj, 
                                                 0,
                                                 limit,
                                                 BSONObj(),
                                                 &cq,
                                                 WhereCallbackReal(txn, dbname));
            if (!canonicalizeStatus.isOK()) {
                errmsg = canonicalizeStatus.reason();
                return false;
            }

            PlanExecutor* rawExec;
            Status getExecStatus = getExecutor(txn, ctx.getCollection(), cq, &rawExec);
            if (!getExecStatus.isOK()) {
                errmsg = getExecStatus.reason();
                return false;
            }

            auto_ptr<PlanExecutor> exec(rawExec);

            BSONArrayBuilder resultBuilder(result.subarrayStart("results"));

            // Quoth: "leave a mb for other things"
            int resultSize = 1024 * 1024;

            int numReturned = 0;

            BSONObj obj;
            while (PlanExecutor::ADVANCED == exec->getNext(&obj, NULL)) {
                if ((resultSize + obj.objsize()) >= BSONObjMaxUserSize) {
                    break;
                }
                // We return an array of results.  Add another element.
                BSONObjBuilder oneResultBuilder(resultBuilder.subobjStart());
                oneResultBuilder.append("score", obj["$s"].number());

                // Strip out the score from the returned obj.
                BSONObjIterator resIt(obj);
                BSONObjBuilder resBob;
                while (resIt.more()) {
                    BSONElement elt = resIt.next();
                    if (!mongoutils::str::equals("$s", elt.fieldName())) {
                        resBob.append(elt);
                    }
                }
                oneResultBuilder.append("obj", resBob.obj());
                BSONObj addedArrayObj = oneResultBuilder.done();
                resultSize += addedArrayObj.objsize();
                numReturned++;
            }

            resultBuilder.done();

            // returns some stats to the user
            BSONObjBuilder stats(result.subobjStart("stats"));

            // Fill in nscanned from the explain.
            PlanSummaryStats summary;
            Explain::getSummaryStats(exec.get(), &summary);
            stats.appendNumber("nscanned", summary.totalKeysExamined);
            stats.appendNumber("nscannedObjects", summary.totalDocsExamined);

            stats.appendNumber( "n" , numReturned );
            stats.append( "timeMicros", (int)comm.micros() );
            stats.done();

            return true;
        }
Exemplo n.º 27
0
            virtual bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
                LastError *le = lastError.disableForCommand();
                {
                    assert( le );
                    if ( le->msg.size() && le->nPrev == 1 ){
                        le->appendSelf( result );
                        return true;
                    }
                }
                
                ClientInfo * client = ClientInfo::get();
                set<string> * shards = client->getPrev();
                
                if ( shards->size() == 0 ){
                    result.appendNull( "err" );
                    return true;
                }

                //log() << "getlasterror enter: " << shards->size() << endl;


                vector<OID> writebacks;
                
                // handle single server
                if ( shards->size() == 1 ){
                    string theShard = *(shards->begin() );
                    result.append( "theshard" , theShard.c_str() );
                    ShardConnection conn( theShard , "" );
                    BSONObj res;
                    bool ok = conn->runCommand( dbName , cmdObj , res );
                    //log() << "\t" << res << endl;
                    result.appendElements( res );
                    conn.done();
                    result.append( "singleShard" , theShard );
                    addWriteBack( writebacks , res );
                    
                    // hit other machines just to block
                    for ( set<string>::const_iterator i=client->sinceLastGetError().begin(); i!=client->sinceLastGetError().end(); ++i ){
                        string temp = *i;
                        if ( temp == theShard )
                            continue;
                        
                        ShardConnection conn( temp , "" );
                        addWriteBack( writebacks , conn->getLastErrorDetailed() );
                        conn.done();
                    }
                    client->clearSinceLastGetError();
                    handleWriteBacks( writebacks );
                    return ok;
                }
                
                BSONArrayBuilder bbb( result.subarrayStart( "shards" ) );

                long long n = 0;

                // hit each shard
                vector<string> errors;
                vector<BSONObj> errorObjects;
                for ( set<string>::iterator i = shards->begin(); i != shards->end(); i++ ){
                    string theShard = *i;
                    bbb.append( theShard );
                    ShardConnection conn( theShard , "" );
                    BSONObj res;
                    bool ok = conn->runCommand( dbName , cmdObj , res );
                    addWriteBack( writebacks, res );
                    string temp = DBClientWithCommands::getLastErrorString( res );
                    if ( ok == false || temp.size() ){
                        errors.push_back( temp );
                        errorObjects.push_back( res );
                    }
                    n += res["n"].numberLong();
                    conn.done();
                }
                
                bbb.done();
                
                result.appendNumber( "n" , n );

                // hit other machines just to block
                for ( set<string>::const_iterator i=client->sinceLastGetError().begin(); i!=client->sinceLastGetError().end(); ++i ){
                    string temp = *i;
                    if ( shards->count( temp ) )
                        continue;
                    
                    ShardConnection conn( temp , "" );
                    addWriteBack( writebacks, conn->getLastErrorDetailed() );
                    conn.done();
                }
                client->clearSinceLastGetError();

                if ( errors.size() == 0 ){
                    result.appendNull( "err" );
                    handleWriteBacks( writebacks );
                    return true;
                }
                
                result.append( "err" , errors[0].c_str() );
                
                { // errs
                    BSONArrayBuilder all( result.subarrayStart( "errs" ) );
                    for ( unsigned i=0; i<errors.size(); i++ ){
                        all.append( errors[i].c_str() );
                    }
                    all.done();
                }

                { // errObjects
                    BSONArrayBuilder all( result.subarrayStart( "errObjects" ) );
                    for ( unsigned i=0; i<errorObjects.size(); i++ ){
                        all.append( errorObjects[i] );
                    }
                    all.done();
                }
                handleWriteBacks( writebacks );
                return true;
            }
Exemplo n.º 28
0
        bool FTSCommand::_run(const string& dbName,
                              BSONObj& cmdObj,
                              int cmdOptions,
                              const string& ns,
                              const string& searchString,
                              string language, // "" for not-set
                              int limit,
                              BSONObj& filter,
                              BSONObj& projection,
                              string& errmsg,
                              BSONObjBuilder& result ) {

            Timer timer;

            map<Shard, BSONObj> results;
            SHARDED->commandOp( dbName, cmdObj, cmdOptions, ns, filter, results );

            vector<Scored> all;
            long long nscanned = 0;
            long long nscannedObjects = 0;

            BSONObjBuilder shardStats;

            for ( map<Shard,BSONObj>::const_iterator i = results.begin(); i != results.end(); ++i ) {
                BSONObj r = i->second;

                LOG(2) << "fts result for shard: " << i->first << "\n" << r << endl;

                if ( !r["ok"].trueValue() ) {
                    errmsg = str::stream() << "failure on shard: " << i->first.toString()
                                           << ": " << r["errmsg"];
                    result.append( "rawresult", r );
                    return false;
                }

                if ( r["stats"].isABSONObj() ) {
                    BSONObj x = r["stats"].Obj();
                    nscanned += x["nscanned"].numberLong();
                    nscannedObjects += x["nscannedObjects"].numberLong();

                    shardStats.append( i->first.getName(), x );
                }

                if ( r["results"].isABSONObj() ) {
                    BSONObjIterator j( r["results"].Obj() );
                    while ( j.more() ) {
                        BSONElement e = j.next();
                        all.push_back( Scored(e.Obj()) );
                    }
                }
            }

            sort( all.begin(), all.end() );
            long long n = 0;
            {
                BSONArrayBuilder arr( result.subarrayStart( "results" ) );
                for ( unsigned i = 0; i < all.size(); i++ ) {
                    arr.append( all[i].full );
                    if ( ++n >= limit )
                        break;
                }
                arr.done();
            }

            {
                BSONObjBuilder stats( result.subobjStart( "stats" ) );
                stats.appendNumber( "nscanned", nscanned );
                stats.appendNumber( "nscannedObjects", nscannedObjects );
                stats.appendNumber( "n", n );
                stats.append( "timeMicros", (int)timer.micros() );

                stats.append( "shards", shardStats.obj() );

                stats.done();
            }

            return true;
        }
StatusWith<InsertGroup::ConstIterator> InsertGroup::groupAndApplyInserts(ConstIterator it) {
    const auto& entry = **it;

    // The following conditions must be met before attempting to group the oplog entries starting
    // at 'oplogEntriesIterator':
    // 1) The CRUD operation must an insert;
    // 2) The namespace that we are inserting into cannot be a capped collection;
    // 3) We have not attempted to group this insert during a previous call to this function.
    if (entry.getOpType() != OpTypeEnum::kInsert) {
        return Status(ErrorCodes::TypeMismatch, "Can only group insert operations.");
    }
    if (entry.isForCappedCollection) {
        return Status(ErrorCodes::InvalidOptions,
                      "Cannot group insert operations on capped collections.");
    }
    if (it <= _doNotGroupBeforePoint) {
        return Status(ErrorCodes::InvalidPath,
                      "Cannot group an insert operation that we previously attempted to group.");
    }

    // Attempt to group 'insert' ops if possible.
    std::vector<BSONObj> toInsert;

    // Make sure to include the first op in the batch size.
    auto batchSize = entry.getObject().objsize();
    auto batchCount = OperationPtrs::size_type(1);
    auto batchNamespace = entry.getNss();

    /**
     * Search for the op that delimits this insert batch, and save its position
     * in endOfGroupableOpsIterator. For example, given the following list of oplog
     * entries with a sequence of groupable inserts:
     *
     *                S--------------E
     *       u, u, u, i, i, i, i, i, d, d
     *
     *       S: start of insert group
     *       E: end of groupable ops
     *
     * E is the position of endOfGroupableOpsIterator. i.e. endOfGroupableOpsIterator
     * will point to the first op that *can't* be added to the current insert group.
     */
    auto endOfGroupableOpsIterator =
        std::find_if(it + 1, _end, [&](const OplogEntry* nextEntry) -> bool {
            auto opNamespace = nextEntry->getNss();
            batchSize += nextEntry->getObject().objsize();
            batchCount += 1;

            // Only add the op to this batch if it passes the criteria.
            return nextEntry->getOpType() != OpTypeEnum::kInsert  // Must be an insert.
                || opNamespace != batchNamespace                  // Must be in the same namespace.
                || batchSize > kInsertGroupMaxBatchSize  // Must not create too large an object.
                ||
                batchCount > kInsertGroupMaxBatchCount;  // Limit number of ops in a single group.
        });

    // See if we were able to create a group that contains more than a single op.
    if (std::distance(it, endOfGroupableOpsIterator) == 1) {
        return Status(ErrorCodes::NoSuchKey,
                      "Not able to create a group with more than a single insert operation");
    }

    // Since we found more than one document, create grouped insert of many docs.
    // We are going to group many 'i' ops into one big 'i' op, with array fields for
    // 'ts', 't', and 'o', corresponding to each individual op.
    // For example:
    // { ts: Timestamp(1,1), t:1, ns: "test.foo", op:"i", o: {_id:1} }
    // { ts: Timestamp(1,2), t:1, ns: "test.foo", op:"i", o: {_id:2} }
    // become:
    // { ts: [Timestamp(1, 1), Timestamp(1, 2)],
    //    t: [1, 1],
    //    o: [{_id: 1}, {_id: 2}],
    //   ns: "test.foo",
    //   op: "i" }
    BSONObjBuilder groupedInsertBuilder;

    // Populate the "ts" field with an array of all the grouped inserts' timestamps.
    {
        BSONArrayBuilder tsArrayBuilder(groupedInsertBuilder.subarrayStart("ts"));
        for (auto groupingIt = it; groupingIt != endOfGroupableOpsIterator; ++groupingIt) {
            tsArrayBuilder.append((*groupingIt)->getTimestamp());
        }
    }

    // Populate the "t" (term) field with an array of all the grouped inserts' terms.
    {
        BSONArrayBuilder tArrayBuilder(groupedInsertBuilder.subarrayStart("t"));
        for (auto groupingIt = it; groupingIt != endOfGroupableOpsIterator; ++groupingIt) {
            auto parsedTerm = (*groupingIt)->getTerm();
            long long term = OpTime::kUninitializedTerm;
            // Term may not be present (pv0)
            if (parsedTerm) {
                term = parsedTerm.get();
            }
            tArrayBuilder.append(term);
        }
    }

    // Populate the "o" field with an array of all the grouped inserts.
    {
        BSONArrayBuilder oArrayBuilder(groupedInsertBuilder.subarrayStart("o"));
        for (auto groupingIt = it; groupingIt != endOfGroupableOpsIterator; ++groupingIt) {
            oArrayBuilder.append((*groupingIt)->getObject());
        }
    }

    // Generate an op object of all elements except for "ts", "t", and "o", since we
    // need to make those fields arrays of all the ts's, t's, and o's.
    groupedInsertBuilder.appendElementsUnique(entry.raw);

    auto groupedInsertObj = groupedInsertBuilder.done();
    try {
        // Apply the group of inserts.
        uassertStatusOK(SyncTail::syncApply(_opCtx, groupedInsertObj, _mode));
        // It succeeded, advance the oplogEntriesIterator to the end of the
        // group of inserts.
        return endOfGroupableOpsIterator - 1;
    } catch (...) {
        // The group insert failed, log an error and fall through to the
        // application of an individual op.
        auto status = exceptionToStatus().withContext(
            str::stream() << "Error applying inserts in bulk: " << redact(groupedInsertObj)
                          << ". Trying first insert as a lone insert: "
                          << redact(entry.raw));

        // It's not an error during initial sync to encounter DuplicateKey errors.
        if (Mode::kInitialSync == _mode && ErrorCodes::DuplicateKey == status) {
            LOG(2) << status;
        } else {
            error() << status;
        }

        // Avoid quadratic run time from failed insert by not retrying until we
        // are beyond this group of ops.
        _doNotGroupBeforePoint = endOfGroupableOpsIterator - 1;

        return status;
    }

    MONGO_UNREACHABLE;
}
Exemplo n.º 30
0
    bool run(OperationContext* txn,
             const string& dbname,
             BSONObj& cmdObj,
             int,
             string& errmsg,
             BSONObjBuilder& result) {
        BSONElement argElt = cmdObj["stageDebug"];
        if (argElt.eoo() || !argElt.isABSONObj()) {
            return false;
        }
        BSONObj argObj = argElt.Obj();

        // Pull out the collection name.
        BSONElement collElt = argObj["collection"];
        if (collElt.eoo() || (String != collElt.type())) {
            return false;
        }

        const NamespaceString nss(dbname, collElt.String());
        uassert(ErrorCodes::InvalidNamespace,
                str::stream() << nss.toString() << " is not a valid namespace",
                nss.isValid());

        // Need a context to get the actual Collection*
        // TODO A write lock is currently taken here to accommodate stages that perform writes
        //      (e.g. DeleteStage).  This should be changed to use a read lock for read-only
        //      execution trees.
        ScopedTransaction transaction(txn, MODE_IX);
        AutoGetCollection autoColl(txn, nss, MODE_IX);

        // Make sure the collection is valid.
        Collection* collection = autoColl.getCollection();
        uassert(ErrorCodes::NamespaceNotFound,
                str::stream() << "Couldn't find collection " << nss.ns(),
                collection);

        // Pull out the plan
        BSONElement planElt = argObj["plan"];
        if (planElt.eoo() || !planElt.isABSONObj()) {
            return false;
        }
        BSONObj planObj = planElt.Obj();

        // Parse the plan into these.
        OwnedPointerVector<MatchExpression> exprs;
        unique_ptr<WorkingSet> ws(new WorkingSet());

        PlanStage* userRoot = parseQuery(txn, collection, planObj, ws.get(), &exprs);
        uassert(16911, "Couldn't parse plan from " + cmdObj.toString(), NULL != userRoot);

        // Add a fetch at the top for the user so we can get obj back for sure.
        // TODO: Do we want to do this for the user?  I think so.
        unique_ptr<PlanStage> rootFetch =
            make_unique<FetchStage>(txn, ws.get(), userRoot, nullptr, collection);

        auto statusWithPlanExecutor = PlanExecutor::make(
            txn, std::move(ws), std::move(rootFetch), collection, PlanExecutor::YIELD_AUTO);
        fassert(28536, statusWithPlanExecutor.getStatus());
        std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());

        BSONArrayBuilder resultBuilder(result.subarrayStart("results"));

        BSONObj obj;
        PlanExecutor::ExecState state;
        while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
            resultBuilder.append(obj);
        }

        resultBuilder.done();

        if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
            error() << "Plan executor error during StageDebug command: "
                    << PlanExecutor::statestr(state)
                    << ", stats: " << Explain::getWinningPlanStats(exec.get());

            return appendCommandStatus(
                result,
                Status(ErrorCodes::OperationFailed,
                       str::stream()
                           << "Executor error during "
                           << "StageDebug command: " << WorkingSetCommon::toStatusString(obj)));
        }

        return true;
    }