示例#1
0
        BSONObj FTSSpec::fixSpec( const BSONObj& spec ) {
            if ( spec["textIndexVersion"].numberInt() == TEXT_INDEX_VERSION_1 ) {
                return _fixSpecV1( spec );
            }

            map<string,int> m;

            BSONObj keyPattern;
            {
                BSONObjBuilder b;

                // Populate m and keyPattern.
                {
                    bool addedFtsStuff = false;
                    BSONObjIterator i( spec["key"].Obj() );
                    while ( i.more() ) {
                        BSONElement e = i.next();
                        if ( str::equals( e.fieldName(), "_fts" ) ) {
                            uassert( 17271,
                                     "expecting _fts:\"text\"",
                                     INDEX_NAME == e.valuestrsafe() );
                            addedFtsStuff = true;
                            b.append( e );
                        }
                        else if ( str::equals( e.fieldName(), "_ftsx" ) ) {
                            uassert( 17272, "expecting _ftsx:1", e.numberInt() == 1 );
                            b.append( e );
                        }
                        else if ( e.type() == String && INDEX_NAME == e.valuestr() ) {

                            if ( !addedFtsStuff ) {
                                _addFTSStuff( &b );
                                addedFtsStuff = true;
                            }

                            m[e.fieldName()] = 1;
                        }
                        else {
                            uassert( 17273,
                                     "expected value 1 or -1 for non-text key in compound index",
                                     e.numberInt() == 1 || e.numberInt() == -1 );
                            b.append( e );
                        }
                    }
                    verify( addedFtsStuff );
                }
                keyPattern = b.obj();

                // Verify that index key is in the correct format: extraBefore fields, then text
                // fields, then extraAfter fields.
                {
                    BSONObjIterator i( spec["key"].Obj() );
                    BSONElement e;

                    // extraBefore fields
                    do {
                        verify( i.more() );
                        e = i.next();
                    } while ( INDEX_NAME != e.valuestrsafe() );

                    // text fields
                    bool alreadyFixed = str::equals( e.fieldName(), "_fts" );
                    if ( alreadyFixed ) {
                        uassert( 17288, "expected _ftsx after _fts", i.more() );
                        e = i.next();
                        uassert( 17274,
                                 "expected _ftsx after _fts",
                                 str::equals( e.fieldName(), "_ftsx" ) );
                        e = i.next();
                    }
                    else {
                        do {
                            uassert( 17289,
                                     "text index with reserved fields _fts/ftsx not allowed",
                                     !str::equals( e.fieldName(), "_fts" ) &&
                                         !str::equals( e.fieldName(), "_ftsx" ) );
                            e = i.next();
                        } while ( !e.eoo() && INDEX_NAME == e.valuestrsafe() );
                    }

                    // extraAfterFields
                    while ( !e.eoo() ) {
                        uassert( 17290,
                                 "compound text index key suffix fields must have value 1",
                                 e.numberInt() == 1 && !str::equals( "_ftsx", e.fieldName() ) );
                        e = i.next();
                    }
                }

            }

            if ( spec["weights"].type() == Object ) {
                BSONObjIterator i( spec["weights"].Obj() );
                while ( i.more() ) {
                    BSONElement e = i.next();
                    uassert( 17283,
                             "weight for text index needs numeric type",
                             e.isNumber() );
                    m[e.fieldName()] = e.numberInt();

                    // Verify weight refers to a valid field.
                    if ( str::equals( e.fieldName(), "$**" ) ) {
                        continue;
                    }
                    FieldRef keyField( e.fieldName() );
                    uassert( 17294,
                             "weight cannot be on an empty field",
                             keyField.numParts() != 0 );
                    for ( size_t i = 0; i < keyField.numParts(); i++ ) {
                        StringData part = keyField.getPart(i);
                        uassert( 17291, "weight cannot have empty path component", !part.empty() );
                        uassert( 17292,
                                 "weight cannot have path component with $ prefix",
                                 !part.startsWith( "$" ) );
                    }
                }
            }
            else if ( spec["weights"].str() == WILDCARD ) {
                m[WILDCARD] = 1;
            }
            else if ( !spec["weights"].eoo() ) {
                uasserted( 17284, "text index option 'weights' must be an object" );
            }

            BSONObj weights;
            {
                BSONObjBuilder b;
                for ( map<string,int>::iterator i = m.begin(); i != m.end(); ++i ) {
                    uassert( 16674, "score for word too high",
                             i->second > 0 && i->second < MAX_WORD_WEIGHT );
                    b.append( i->first, i->second );
                }
                weights = b.obj();
            }

            BSONElement default_language_elt = spec["default_language"];
            string default_language( default_language_elt.str() );
            if ( default_language_elt.eoo() ) {
                default_language = moduleDefaultLanguage;
            }
            else {
                uassert( 17263,
                         "default_language needs a string type",
                         default_language_elt.type() == String );
            }
            uassert( 17264,
                     "default_language is not valid",
                     FTSLanguage::make( default_language,
                                        TEXT_INDEX_VERSION_2 ).getStatus().isOK() );

            BSONElement language_override_elt = spec["language_override"];
            string language_override( language_override_elt.str() );
            if ( language_override_elt.eoo() ) {
                language_override = "language";
            }
            else {
                uassert( 17136,
                         "language_override is not valid",
                         language_override_elt.type() == String
                             && validateOverride( language_override ) );
            }

            int version = -1;
            int textIndexVersion = TEXT_INDEX_VERSION_2;

            BSONObjBuilder b;
            BSONObjIterator i( spec );
            while ( i.more() ) {
                BSONElement e = i.next();
                if ( str::equals( e.fieldName(), "key" ) ) {
                    b.append( "key", keyPattern );
                }
                else if ( str::equals( e.fieldName(), "weights" ) ) {
                    b.append( "weights", weights );
                    weights = BSONObj();
                }
                else if ( str::equals( e.fieldName(), "default_language" ) ) {
                    b.append( "default_language", default_language);
                    default_language = "";
                }
                else if ( str::equals( e.fieldName(), "language_override" ) ) {
                    b.append( "language_override", language_override);
                    language_override = "";
                }
                else if ( str::equals( e.fieldName(), "v" ) ) {
                    version = e.numberInt();
                }
                else if ( str::equals( e.fieldName(), "textIndexVersion" ) ) {
                    uassert( 17293,
                             "text index option 'textIndexVersion' must be a number",
                             e.isNumber() );
                    textIndexVersion = e.numberInt();
                    uassert( 16730,
                             str::stream() << "bad textIndexVersion: " << textIndexVersion,
                             textIndexVersion == TEXT_INDEX_VERSION_2 );
                }
                else {
                    b.append( e );
                }
            }

            if ( !weights.isEmpty() ) {
                b.append( "weights", weights );
            }
            if ( !default_language.empty() ) {
                b.append( "default_language", default_language);
            }
            if ( !language_override.empty() ) {
                b.append( "language_override", language_override);
            }
            if ( version >= 0 ) {
                b.append( "v", version );
            }
            b.append( "textIndexVersion", textIndexVersion );

            return b.obj();
        }
示例#2
0
        void run(){

            BSONObj bound = BSON( "a" << 55 );
            BSONObj longBound = BSON("a" << 55 << "b" << 66);

            //test keyPattern shorter than bound, should fail
            {
                BSONObj keyPat = BSONObj();
                ASSERT_THROWS( Helpers::modifiedRangeBound( bound , keyPat , 1 ) ,
                               MsgAssertionException );

            }

            //test keyPattern doesn't match bound, should fail
            {
                BSONObj keyPat = BSON( "b" << 1 );
                ASSERT_THROWS( Helpers::modifiedRangeBound( bound , keyPat , 1 ) ,
                               MsgAssertionException );
            }
            {
                BSONObj keyPat = BSON( "a" << 1 << "c" << 1);
                ASSERT_THROWS( Helpers::modifiedRangeBound( longBound , keyPat , 1 ) ,
                               MsgAssertionException );

            }

            //test keyPattern same as bound
            {
                BSONObj keyPat = BSON( "a" << 1 );
                BSONObj newB = Helpers::modifiedRangeBound( bound , keyPat , -1 );
                ASSERT_EQUALS( newB , BSON("" << 55) );
            }
            {
                BSONObj keyPat = BSON( "a" << 1 );
                BSONObj newB = Helpers::modifiedRangeBound( bound , keyPat , 1 );
                ASSERT_EQUALS( newB , BSON("" << 55) );
            }

            //test keyPattern longer than bound, simple
            {
                BSONObj keyPat = BSON( "a" << 1 << "b" << 1);
                BSONObj newB = Helpers::modifiedRangeBound( bound , keyPat , -1 );
                ASSERT_EQUALS( newB , BSON("" << 55 << "" << MINKEY ) );
            }
            {
                BSONObj keyPat = BSON( "a" << 1 << "b" << 1);
                BSONObj newB = Helpers::modifiedRangeBound( bound , keyPat , 1 );
                ASSERT_EQUALS( newB , BSON("" << 55 << "" << MAXKEY ) );
            }

            //test keyPattern longer than bound, more complex pattern directions
            {
                BSONObj keyPat = BSON( "a" << 1 << "b" << -1);
                BSONObj newB = Helpers::modifiedRangeBound( bound , keyPat , -1 );
                ASSERT_EQUALS( newB , BSON("" << 55 << "" << MAXKEY ) );
            }
            {
                BSONObj keyPat = BSON( "a" << 1 << "b" << -1);
                BSONObj newB = Helpers::modifiedRangeBound( bound , keyPat , 1 );
                ASSERT_EQUALS( newB , BSON("" << 55 << "" << MINKEY ) );
            }
            {

                BSONObj keyPat = BSON( "a" << 1 << "b" << -1 << "c" << 1 );
                BSONObj newB = Helpers::modifiedRangeBound( bound , keyPat , -1 );
                ASSERT_EQUALS( newB , BSON("" << 55 << "" << MAXKEY << "" << MINKEY ) );
            }
            {
                BSONObj keyPat = BSON( "a" << 1 << "b" << -1 << "c" << 1 );
                BSONObj newB = Helpers::modifiedRangeBound( bound , keyPat , 1 );
                ASSERT_EQUALS( newB , BSON("" << 55 << "" << MINKEY << "" << MAXKEY ) );
            }

        }
示例#3
0
文件: config.cpp 项目: cangove/mongo
    bool DBConfig::dropDatabase( string& errmsg ) {
        /**
         * 1) make sure everything is up
         * 2) update config server
         * 3) drop and reset sharded collections
         * 4) drop and reset primary
         * 5) drop everywhere to clean up loose ends
         */

        log() << "DBConfig::dropDatabase: " << _name << endl;
        configServer.logChange( "dropDatabase.start" , _name , BSONObj() );

        // 1
        if ( ! configServer.allUp( errmsg ) ) {
            log(1) << "\t DBConfig::dropDatabase not all up" << endl;
            return 0;
        }

        // 2
        grid.removeDB( _name );
        {
            ScopedDbConnection conn( configServer.modelServer(), 30.0 );
            conn->remove( ShardNS::database , BSON( "_id" << _name ) );
            errmsg = conn->getLastError();
            if ( ! errmsg.empty() ) {
                log() << "could not drop '" << _name << "': " << errmsg << endl;
                conn.done();
                return false;
            }

            conn.done();
        }

        if ( ! configServer.allUp( errmsg ) ) {
            log() << "error removing from config server even after checking!" << endl;
            return 0;
        }
        log(1) << "\t removed entry from config server for: " << _name << endl;

        set<Shard> allServers;

        // 3
        while ( true ) {
            int num = 0;
            if ( ! _dropShardedCollections( num , allServers , errmsg ) )
                return 0;
            log() << "   DBConfig::dropDatabase: " << _name << " dropped sharded collections: " << num << endl;
            if ( num == 0 )
                break;
        }

        // 4
        {
            ScopedDbConnection conn( _primary, 30.0 );
            BSONObj res;
            if ( ! conn->dropDatabase( _name , &res ) ) {
                errmsg = res.toString();
                return 0;
            }
            conn.done();
        }

        // 5
        for ( set<Shard>::iterator i=allServers.begin(); i!=allServers.end(); i++ ) {
            ScopedDbConnection conn( *i, 30.0 );
            BSONObj res;
            if ( ! conn->dropDatabase( _name , &res ) ) {
                errmsg = res.toString();
                return 0;
            }
            conn.done();
        }

        log(1) << "\t dropped primary db for: " << _name << endl;

        configServer.logChange( "dropDatabase" , _name , BSONObj() );
        return true;
    }
示例#4
0
 BSONObj DBClientInterface::findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn, int queryOptions) {
     vector<BSONObj> v;
     findN(v, ns, query, 1, 0, fieldsToReturn, queryOptions);
     return v.empty() ? BSONObj() : v[0];
 }
Status CatalogManagerReplicaSet::dropCollection(OperationContext* txn, const NamespaceString& ns) {
    logChange(
        txn, txn->getClient()->clientAddress(true), "dropCollection.start", ns.ns(), BSONObj());

    vector<ShardType> allShards;
    Status status = getAllShards(txn, &allShards);
    if (!status.isOK()) {
        return status;
    }

    LOG(1) << "dropCollection " << ns << " started";

    // Lock the collection globally so that split/migrate cannot run
    auto scopedDistLock = getDistLockManager()->lock(ns.ns(), "drop");
    if (!scopedDistLock.isOK()) {
        return scopedDistLock.getStatus();
    }

    LOG(1) << "dropCollection " << ns << " locked";

    std::map<string, BSONObj> errors;
    auto* shardRegistry = grid.shardRegistry();

    for (const auto& shardEntry : allShards) {
        auto dropResult = shardRegistry->runCommandWithNotMasterRetries(
            txn, shardEntry.getName(), ns.db().toString(), BSON("drop" << ns.coll()));

        if (!dropResult.isOK()) {
            return dropResult.getStatus();
        }

        auto dropStatus = getStatusFromCommandResult(dropResult.getValue());
        if (!dropStatus.isOK()) {
            if (dropStatus.code() == ErrorCodes::NamespaceNotFound) {
                continue;
            }

            errors.emplace(shardEntry.getHost(), dropResult.getValue());
        }
    }

    if (!errors.empty()) {
        StringBuilder sb;
        sb << "Dropping collection failed on the following hosts: ";

        for (auto it = errors.cbegin(); it != errors.cend(); ++it) {
            if (it != errors.cbegin()) {
                sb << ", ";
            }

            sb << it->first << ": " << it->second;
        }

        return {ErrorCodes::OperationFailed, sb.str()};
    }

    LOG(1) << "dropCollection " << ns << " shard data deleted";

    // Remove chunk data
    Status result = remove(txn, ChunkType::ConfigNS, BSON(ChunkType::ns(ns.ns())), 0, nullptr);
    if (!result.isOK()) {
        return result;
    }

    LOG(1) << "dropCollection " << ns << " chunk data deleted";

    // Mark the collection as dropped
    CollectionType coll;
    coll.setNs(ns);
    coll.setDropped(true);
    coll.setEpoch(ChunkVersion::DROPPED().epoch());
    coll.setUpdatedAt(grid.shardRegistry()->getNetwork()->now());

    result = updateCollection(txn, ns.ns(), coll);
    if (!result.isOK()) {
        return result;
    }

    LOG(1) << "dropCollection " << ns << " collection marked as dropped";

    // We just called updateCollection above and this would have advanced the config op time, so use
    // the latest value. On the MongoD side, we need to load the latest config metadata, which
    // indicates that the collection was dropped.
    const ChunkVersionAndOpTime droppedVersion(ChunkVersion::DROPPED(),
                                               grid.shardRegistry()->getConfigOpTime());

    for (const auto& shardEntry : allShards) {
        SetShardVersionRequest ssv = SetShardVersionRequest::makeForVersioningNoPersist(
            grid.shardRegistry()->getConfigServerConnectionString(),
            shardEntry.getName(),
            fassertStatusOK(28781, ConnectionString::parse(shardEntry.getHost())),
            ns,
            droppedVersion,
            true);

        auto ssvResult = shardRegistry->runCommandWithNotMasterRetries(
            txn, shardEntry.getName(), "admin", ssv.toBSON());

        if (!ssvResult.isOK()) {
            return ssvResult.getStatus();
        }

        auto ssvStatus = getStatusFromCommandResult(ssvResult.getValue());
        if (!ssvStatus.isOK()) {
            return ssvStatus;
        }

        auto unsetShardingStatus = shardRegistry->runCommandWithNotMasterRetries(
            txn, shardEntry.getName(), "admin", BSON("unsetSharding" << 1));

        if (!unsetShardingStatus.isOK()) {
            return unsetShardingStatus.getStatus();
        }

        auto unsetShardingResult = getStatusFromCommandResult(unsetShardingStatus.getValue());
        if (!unsetShardingResult.isOK()) {
            return unsetShardingResult;
        }
    }

    LOG(1) << "dropCollection " << ns << " completed";

    logChange(txn, txn->getClient()->clientAddress(true), "dropCollection", ns.ns(), BSONObj());

    return Status::OK();
}
示例#6
0
文件: parallel.cpp 项目: hongz1/mongo
BSONObj ParallelConnectionMetadata::toBSON() const {
    return BSON("state" << (pcState ? pcState->toBSON() : BSONObj()) << "retryNext" << retryNext
                        << "init" << initialized << "finish" << finished << "errored" << errored);
}
示例#7
0
   INT32 _rtnIXScanner::advance ( dmsRecordID &rid, BOOLEAN isReadOnly )
   {
      INT32 rc = SDB_OK ;
      PD_TRACE_ENTRY ( SDB__RTNIXSCAN_ADVANCE ) ;
      monAppCB * pMonAppCB = _cb ? _cb->getMonAppCB() : NULL ;

   begin:
      SDB_ASSERT ( _indexCB, "_indexCB can't be NULL, call resumeScan first" ) ;
      if ( !_init )
      {
         dmsExtentID rootExtent = _indexCB->getRoot() ;
         ixmExtent root ( rootExtent, _su->index() ) ;
         rc = root.keyLocate ( _curIndexRID, BSONObj(),0,FALSE,
                               _listIterator.cmp(), _listIterator.inc(),
                               _order, _direction, _cb ) ;
         if ( rc )
         {
            PD_LOG ( PDERROR, "failed to locate first key, rc: %d", rc ) ;
            goto error ;
         }
         _init = TRUE ;
      }
      else
      {
         if ( _curIndexRID.isNull() )
         {
            return SDB_IXM_EOC ;
         }
         ixmExtent indexExtent ( _curIndexRID._extent, _su->index() ) ;


         if ( _savedRID.isNull() )
         {
            rc = indexExtent.advance ( _curIndexRID, _direction ) ;
            if ( rc )
            {
               PD_LOG ( PDERROR, "Failed to advance to next key, rc: %d", rc ) ;
               goto error ;
            }
         }
         else if ( !isReadOnly )
         {
            dmsRecordID onDiskRID = indexExtent.getRID (
                  _curIndexRID._slot ) ;
            if ( onDiskRID.isNull() )
            {
               rc = relocateRID () ;
               if ( rc )
               {
                  PD_LOG ( PDERROR, "Failed to relocate RID, rc: %d", rc ) ;
                  goto error ;
               }
            }
            else if ( onDiskRID == _savedRID )
            {
               CHAR *dataBuffer = indexExtent.getKeyData ( _curIndexRID._slot );
               if ( dataBuffer )
               {
                  try
                  {
                     _curKeyObj = ixmKey(dataBuffer).toBson() ;
                     DMS_MON_OP_COUNT_INC( pMonAppCB, MON_INDEX_READ, 1 ) ;
                     DMS_MON_CONTEXT_COUNT_INC ( _pMonCtxCB, MON_INDEX_READ, 1 ) ;
                     if ( _curKeyObj.shallowEqual(_savedObj) )
                     {
                        rc = indexExtent.advance ( _curIndexRID, _direction ) ;
                        if ( rc )
                        {
                           PD_LOG ( PDERROR, "Failed to advance, rc: %d", rc ) ;
                           goto error ;
                        }
                     }
                     else
                     {
                        rc = relocateRID () ;
                        if ( rc )
                        {
                           PD_LOG ( PDERROR, "Failed to relocate RID, rc: :%d",
                                    rc ) ;
                           goto error ;
                        }
                     }
                  }
                  catch ( std::exception &e )
                  {
                     PD_LOG ( PDERROR, "Failed to convert buffer to bson from "
                              "current rid: %d,%d: %s", _curIndexRID._extent,
                              _curIndexRID._slot, e.what() ) ;
                     rc = SDB_SYS ;
                     goto error ;
                  }
               } // if ( dataBuffer )
               else
               {
                  PD_LOG ( PDERROR, "Unable to get buffer" ) ;
                  rc = SDB_SYS ;
                  goto error ;
               }
            } // if ( onDiskRID == _savedRID )
            else
            {
               rc = relocateRID () ;
               if ( rc )
               {
                  PD_LOG ( PDERROR, "Failed to relocate RID, rc: %d", rc ) ;
                  goto error ;
               }
            }
         } // if ( !isReadOnly )
         else
         {
            _savedRID.reset() ;
         }
      }
      while ( TRUE )
      {
         if ( _curIndexRID.isNull() )
         {
            return SDB_IXM_EOC ;
         }
         ixmExtent indexExtent ( _curIndexRID._extent, _su->index() ) ;
         CHAR *dataBuffer = indexExtent.getKeyData ( _curIndexRID._slot ) ;
         if ( !dataBuffer )
         {
            PD_LOG ( PDERROR, "Failed to get buffer from current rid: %d,%d",
                     _curIndexRID._extent, _curIndexRID._slot ) ;
            rc = SDB_SYS ;
            goto error ;
         }
         try
         {
            try
            {
               _curKeyObj = ixmKey(dataBuffer).toBson() ;
               DMS_MON_OP_COUNT_INC( pMonAppCB, MON_INDEX_READ, 1 ) ;
               DMS_MON_CONTEXT_COUNT_INC ( _pMonCtxCB, MON_INDEX_READ, 1 ) ;
            }
            catch ( std::exception &e )
            {
               PD_RC_CHECK ( SDB_SYS, PDERROR,
                             "Failed to convert from buffer "
                             "to bson, rid: %d,%d: %s",
                             _curIndexRID._extent,
                             _curIndexRID._slot, e.what() ) ;
            }
            rc = _listIterator.advance ( _curKeyObj ) ;
            if ( -2 == rc )
            {
               rc = SDB_IXM_EOC ;
               goto done ;
            }
            else if ( rc >= 0 )
            {
               rc = indexExtent.keyAdvance ( _curIndexRID, _curKeyObj, rc,
                                             _listIterator.after(),
                                             _listIterator.cmp(),
                                             _listIterator.inc(),
                                             _order, _direction, _cb ) ;
               PD_RC_CHECK ( rc, PDERROR,
                             "Failed to advance, rc = %d", rc ) ;
               continue ;
            }
            else
            {
               _savedRID =
                     indexExtent.getRID ( _curIndexRID._slot ) ;
               if ( _savedRID.isNull() ||
                    _dupBuffer.end() != _dupBuffer.find ( _savedRID ) )
               {

                  _savedRID.reset() ;
                  goto begin ;
               }
               /*if ( _dupBuffer.size() >= _dedupBufferSize )
               {
                  rc = SDB_IXM_DEDUP_BUF_MAX ;
                  goto error ;
               }*/
               _dupBuffer.insert ( _savedRID ) ;
               rid = _savedRID ;
               if ( !isReadOnly )
               {
                  _savedObj = _curKeyObj.getOwned() ;
               }
               else
               {
                  _savedRID.reset() ;
               }
               rc = SDB_OK ;
               break ;
            }
         } // try
         catch ( std::exception &e )
         {
            PD_RC_CHECK ( SDB_SYS, PDERROR,
                          "exception during advance index tree: %s",
                          e.what() ) ;
         }
      } // while ( TRUE )
   done :
      PD_TRACE_EXITRC( SDB__RTNIXSCAN_ADVANCE, rc ) ;
      return rc ;
   error :
      goto done ;
   }
示例#8
0
 void Helpers::putSingleton(const char *ns, BSONObj obj) {
     OpDebug debug;
     Client::Context context(ns);
     updateObjects(ns, obj, /*pattern=*/BSONObj(), /*upsert=*/true, /*multi=*/false , /*logtheop=*/true , debug );
     context.getClient()->curop()->done();
 }
示例#9
0
 void Helpers::putSingletonGod(const char *ns, BSONObj obj, bool logTheOp) {
     OpDebug debug;
     Client::Context context(ns);
     _updateObjects(/*god=*/true, ns, obj, /*pattern=*/BSONObj(), /*upsert=*/true, /*multi=*/false , logTheOp , debug );
     context.getClient()->curop()->done();
 }
    void ReplSetImpl::loadConfig(OperationContext* txn) {
        startupStatus = LOADINGCONFIG;
        startupStatusMsg.set("loading " + rsConfigNs + " config (LOADINGCONFIG)");
        LOG(1) << "loadConfig() " << rsConfigNs << endl;

        while (1) {
            try {
                OwnedPointerVector<ReplSetConfig> configs;
                try {
                    configs.mutableVector().push_back(ReplSetConfig::makeDirect());
                }
                catch (DBException& e) {
                    log() << "replSet exception loading our local replset configuration object : "
                          << e.toString() << rsLog;
                }
                for (vector<HostAndPort>::const_iterator i = _seeds->begin();
                        i != _seeds->end();
                        i++) {
                    try {
                        configs.mutableVector().push_back(ReplSetConfig::make(*i));
                    }
                    catch (DBException& e) {
                        log() << "replSet exception trying to load config from " << *i
                              << " : " << e.toString() << rsLog;
                    }
                }
                ReplSettings& replSettings = getGlobalReplicationCoordinator()->getSettings();
                {
                    scoped_lock lck(replSettings.discoveredSeeds_mx);
                    if (replSettings.discoveredSeeds.size() > 0) {
                        for (set<string>::iterator i = replSettings.discoveredSeeds.begin(); 
                             i != replSettings.discoveredSeeds.end(); 
                             i++) {
                            try {
                                configs.mutableVector().push_back(
                                                            ReplSetConfig::make(HostAndPort(*i)));
                            }
                            catch (DBException&) {
                                LOG(1) << "replSet exception trying to load config from discovered "
                                          "seed " << *i << rsLog;
                                replSettings.discoveredSeeds.erase(*i);
                            }
                        }
                    }
                }

                if (!replSettings.reconfig.isEmpty()) {
                    try {
                        configs.mutableVector().push_back(ReplSetConfig::make(replSettings.reconfig,
                                                                       true));
                    }
                    catch (DBException& re) {
                        log() << "replSet couldn't load reconfig: " << re.what() << rsLog;
                        replSettings.reconfig = BSONObj();
                    }
                }

                int nok = 0;
                int nempty = 0;
                for (vector<ReplSetConfig*>::iterator i = configs.mutableVector().begin();
                     i != configs.mutableVector().end(); i++) {
                    if ((*i)->ok())
                        nok++;
                    if ((*i)->empty())
                        nempty++;
                }
                if (nok == 0) {

                    if (nempty == (int) configs.mutableVector().size()) {
                        startupStatus = EMPTYCONFIG;
                        startupStatusMsg.set("can't get " + rsConfigNs +
                                             " config from self or any seed (EMPTYCONFIG)");
                        log() << "replSet can't get " << rsConfigNs
                              << " config from self or any seed (EMPTYCONFIG)" << rsLog;
                        static unsigned once;
                        if (++once == 1) {
                            log() << "replSet info you may need to run replSetInitiate -- rs.initia"
                                     "te() in the shell -- if that is not already done" << rsLog;
                        }
                        if (_seeds->size() == 0) {
                            LOG(1) << "replSet info no seed hosts were specified on the --replSet "
                                      "command line" << rsLog;
                        }
                    }
                    else {
                        startupStatus = EMPTYUNREACHABLE;
                        startupStatusMsg.set("can't currently get " + rsConfigNs +
                                             " config from self or any seed (EMPTYUNREACHABLE)");
                        log() << "replSet can't get " << rsConfigNs
                              << " config from self or any seed (yet)" << rsLog;
                    }

                    sleepsecs(1);
                    continue;
                }

                if (!_loadConfigFinish(txn, configs.mutableVector())) {
                    log() << "replSet info Couldn't load config yet. Sleeping 20sec and will try "
                             "again." << rsLog;
                    sleepsecs(20);
                    continue;
                }
            }
            catch (DBException& e) {
                startupStatus = BADCONFIG;
                startupStatusMsg.set("replSet error loading set config (BADCONFIG)");
                log() << "replSet error loading configurations " << e.toString() << rsLog;
                log() << "replSet error replication will not start" << rsLog;
                sethbmsg("error loading set config");
                _fatal();
                throw;
            }
            break;
        }
        startupStatusMsg.set("? started");
        startupStatus = STARTED;
    }
TEST(FindAndModifyRequest, ParseWithUpdateOnlyRequiredFields) {
    BSONObj cmdObj(fromjson(R"json({
            query: { x: 1 },
            update: { y: 1 }
        })json"));

    auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
    ASSERT_OK(parseStatus.getStatus());

    auto request = parseStatus.getValue();
    ASSERT_EQUALS(NamespaceString("a.b").toString(), request.getNamespaceString().toString());
    ASSERT_BSONOBJ_EQ(BSON("x" << 1), request.getQuery());
    ASSERT_BSONOBJ_EQ(BSON("y" << 1), request.getUpdateObj());
    ASSERT_EQUALS(false, request.isUpsert());
    ASSERT_EQUALS(false, request.isRemove());
    ASSERT_BSONOBJ_EQ(BSONObj(), request.getFields());
    ASSERT_BSONOBJ_EQ(BSONObj(), request.getSort());
    ASSERT_BSONOBJ_EQ(BSONObj(), request.getCollation());
    ASSERT_EQUALS(0u, request.getArrayFilters().size());
    ASSERT_EQUALS(false, request.shouldReturnNew());
}

TEST(FindAndModifyRequest, ParseWithUpdateFullSpec) {
    BSONObj cmdObj(fromjson(R"json({
            query: { x: 1 },
            update: { y: 1 },
            upsert: true,
            fields: { x: 1, y: 1 },
            sort: { z: -1 },
            collation: {locale: 'en_US' },
            arrayFilters: [ { i: 0 } ],
示例#12
0
    long long runCount( const string& ns, const BSONObj &cmd, string &err, int &errCode ) {
        // Lock 'ns'.
        Client::Context cx(ns);
        Collection* collection = cx.db()->getCollection(ns);

        if (NULL == collection) {
            err = "ns missing";
            return -1;
        }

        BSONObj query = cmd.getObjectField("query");
        const std::string hint = cmd.getStringField("hint");
        const BSONObj hintObj = hint.empty() ? BSONObj() : BSON("$hint" << hint);
        
        // count of all objects
        if (query.isEmpty()) {
            return applySkipLimit(collection->numRecords(), cmd);
        }

        Runner* rawRunner;
        long long skip = cmd["skip"].numberLong();
        long long limit = cmd["limit"].numberLong();

        if (limit < 0) {
            limit = -limit;
        }

        uassertStatusOK(getRunnerCount(collection, query, hintObj, &rawRunner));
        auto_ptr<Runner> runner(rawRunner);

        try {
            const ScopedRunnerRegistration safety(runner.get());
            runner->setYieldPolicy(Runner::YIELD_AUTO);

            long long count = 0;
            Runner::RunnerState state;
            while (Runner::RUNNER_ADVANCED == (state = runner->getNext(NULL, NULL))) {
                if (skip > 0) {
                    --skip;
                }
                else {
                    ++count;
                    // Fast-path. There's no point in iterating all over the runner if limit
                    // is set.
                    if (count >= limit && limit != 0) {
                        break;
                    }
                }
            }

            // Emulate old behavior and return the count even if the runner was killed.  This
            // happens when the underlying collection is dropped.
            return count;
        }
        catch (const DBException &e) {
            err = e.toString();
            errCode = e.getCode();
        } 
        catch (const std::exception &e) {
            err = e.what();
            errCode = 0;
        } 

        // Historically we have returned zero in many count assertion cases - see SERVER-2291.
        log() << "Count with ns: " << ns << " and query: " << query
              << " failed with exception: " << err << " code: " << errCode
              << endl;

        return -2;
    }
示例#13
0
    string DBHashCmd::hashCollection( OperationContext* opCtx, Database* db, const string& fullCollectionName, bool* fromCache ) {
        scoped_ptr<scoped_lock> cachedHashedLock;

        if ( isCachable( fullCollectionName ) ) {
            cachedHashedLock.reset( new scoped_lock( _cachedHashedMutex ) );
            string hash = _cachedHashed[fullCollectionName];
            if ( hash.size() > 0 ) {
                *fromCache = true;
                return hash;
            }
        }

        *fromCache = false;
        Collection* collection = db->getCollection( opCtx, fullCollectionName );
        if ( !collection )
            return "";

        IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex( opCtx );

        auto_ptr<PlanExecutor> exec;
        if ( desc ) {
            exec.reset(InternalPlanner::indexScan(opCtx,
                                                  collection,
                                                  desc,
                                                  BSONObj(),
                                                  BSONObj(),
                                                  false,
                                                  InternalPlanner::FORWARD,
                                                  InternalPlanner::IXSCAN_FETCH));
        }
        else if ( collection->isCapped() ) {
            exec.reset(InternalPlanner::collectionScan(opCtx,
                                                       fullCollectionName,
                                                       collection));
        }
        else {
            log() << "can't find _id index for: " << fullCollectionName << endl;
            return "no _id _index";
        }

        md5_state_t st;
        md5_init(&st);

        long long n = 0;
        PlanExecutor::ExecState state;
        BSONObj c;
        verify(NULL != exec.get());
        while (PlanExecutor::ADVANCED == (state = exec->getNext(&c, NULL))) {
            md5_append( &st , (const md5_byte_t*)c.objdata() , c.objsize() );
            n++;
        }
        if (PlanExecutor::IS_EOF != state) {
            warning() << "error while hashing, db dropped? ns=" << fullCollectionName << endl;
        }
        md5digest d;
        md5_finish(&st, d);
        string hash = digestToString( d );

        if ( cachedHashedLock.get() ) {
            _cachedHashed[fullCollectionName] = hash;
        }

        return hash;
    }
示例#14
0
文件: utils.cpp 项目: alanw/mongo
 BSONObj JSVersion( const BSONObj& args ){
     cout << "version: " << versionString << endl;
     if ( strstr( versionString , "+" ) )
         printGitVersion();
     return BSONObj();
 }
示例#15
0
    UpdateResult update(UpdateRequest& request, UpdateDriver* driver) {

        const NamespaceString& nsString = request.getNamespaceString();

        validateUpdate( nsString.ns().c_str(), request.getUpdates(), request.getQuery() );

        NamespaceDetails* nsDetails = nsdetails( nsString.ns() );
        NamespaceDetailsTransient* nsDetailsTransient =
            &NamespaceDetailsTransient::get( nsString.ns().c_str() );

        OpDebug& debug = request.getDebug();

        // TODO: This seems a bit circuitious.
        debug.updateobj = request.getUpdates();

        driver->refreshIndexKeys( nsDetailsTransient->indexKeys() );

        shared_ptr<Cursor> cursor = getOptimizedCursor(
            nsString.ns(), request.getQuery(), BSONObj(), request.getQueryPlanSelectionPolicy() );

        // If the update was marked with '$isolated' (a.k.a '$atomic'), we are not allowed to
        // yield while evaluating the update loop below.
        //
        // TODO: Old code checks this repeatedly within the update loop. Is that necessary? It seems
        // that once atomic should be always atomic.
        const bool isolated =
            cursor->ok() &&
            cursor->matcher() &&
            cursor->matcher()->docMatcher().atomic();

        // The 'cursor' the optimizer gave us may contain query plans that generate duplicate
        // diskloc's. We set up here the mechanims that will prevent us from processing those
        // twice if we see them. We also set up a 'ClientCursor' so that we can support
        // yielding.
        //
        // TODO: Is it valid to call this on a non-ok cursor?
        const bool dedupHere = cursor->autoDedup();

        //
        // We'll start assuming we have one or more documents for this update. (Othwerwise,
        // we'll fallback to upserting.)
        //

        // We record that this will not be an upsert, in case a mod doesn't want to be applied
        // when in strict update mode.
        driver->setContext( ModifierInterface::ExecInfo::UPDATE_CONTEXT );

        // Let's fetch each of them and pipe them through the update expression, making sure to
        // keep track of the necessary stats. Recall that we'll be pulling documents out of
        // cursors and some of them do not deduplicate the entries they generate. We have
        // deduping logic in here, too -- for now.
        unordered_set<DiskLoc, DiskLoc::Hasher> seenLocs;
        int numMatched = 0;
        debug.nscanned = 0;

        Client& client = cc();

        mutablebson::Document doc;

        // If we are going to be yielding, we will need a ClientCursor scoped to this loop. We
        // only loop as long as the underlying cursor is OK.
        for ( auto_ptr<ClientCursor> clientCursor; cursor->ok(); ) {

            // If we haven't constructed a ClientCursor, and if the client allows us to throw
            // page faults, and if we are referring to a location that is likely not in
            // physical memory, then throw a PageFaultException. The entire operation will be
            // restarted.
            if ( clientCursor.get() == NULL &&
                 client.allowedToThrowPageFaultException() &&
                 !cursor->currLoc().isNull() &&
                 !cursor->currLoc().rec()->likelyInPhysicalMemory() ) {
                // We should never throw a PFE if we have already updated items.
                dassert((numMatched == 0) || (numMatched == debug.nupdateNoops));
                throw PageFaultException( cursor->currLoc().rec() );
            }

            if ( !isolated && debug.nscanned != 0 ) {

                // We are permitted to yield. To do so we need a ClientCursor, so create one
                // now if we have not yet done so.
                if ( !clientCursor.get() )
                    clientCursor.reset(
                        new ClientCursor( QueryOption_NoCursorTimeout, cursor, nsString.ns() ) );

                // Ask the client cursor to yield. We get two bits of state back: whether or not
                // we yielded, and whether or not we correctly recovered from yielding.
                bool yielded = false;
                const bool recovered = clientCursor->yieldSometimes(
                    ClientCursor::WillNeed, &yielded );

                if ( !recovered ) {
                    // If we failed to recover from the yield, then the ClientCursor is already
                    // gone. Release it so we don't destroy it a second time.
                    clientCursor.release();
                    break;
                }

                if ( !cursor->ok() ) {
                    // If the cursor died while we were yielded, just get out of the update loop.
                    break;
                }

                if ( yielded ) {
                    // We yielded and recovered OK, and our cursor is still good. Details about
                    // our namespace may have changed while we were yielded, so we re-acquire
                    // them here. If we can't do so, escape the update loop. Otherwise, refresh
                    // the driver so that it knows about what is currently indexed.
                    nsDetails = nsdetails( nsString.ns() );
                    if ( !nsDetails )
                        break;
                    nsDetailsTransient = &NamespaceDetailsTransient::get( nsString.ns().c_str() );

                    // TODO: This copies the index keys, but it may not need to do so.
                    driver->refreshIndexKeys( nsDetailsTransient->indexKeys() );
                }

            }

            // Let's fetch the next candidate object for this update.
            Record* record = cursor->_current();
            DiskLoc loc = cursor->currLoc();
            const BSONObj oldObj = loc.obj();

            // We count how many documents we scanned even though we may skip those that are
            // deemed duplicated. The final 'numUpdated' and 'nscanned' numbers may differ for
            // that reason.
            debug.nscanned++;

            // Skips this document if it:
            // a) doesn't match the query portion of the update
            // b) was deemed duplicate by the underlying cursor machinery
            //
            // Now, if we are going to update the document,
            // c) we don't want to do so while the cursor is at it, as that may invalidate
            // the cursor. So, we advance to next document, before issuing the update.
            MatchDetails matchDetails;
            matchDetails.requestElemMatchKey();
            if ( !cursor->currentMatches( &matchDetails ) ) {
                // a)
                cursor->advance();
                continue;
            }
            else if ( cursor->getsetdup( loc ) && dedupHere ) {
                // b)
                cursor->advance();
                continue;
            }
            else if (!driver->isDocReplacement() && request.isMulti()) {
                // c)
                cursor->advance();
                if ( dedupHere ) {
                    if ( seenLocs.count( loc ) ) {
                        continue;
                    }
                }

                // There are certain kind of cursors that hold multiple pointers to data
                // underneath. $or cursors is one example. In a $or cursor, it may be the case
                // that when we did the last advance(), we finished consuming documents from
                // one of $or child and started consuming the next one. In that case, it is
                // possible that the last document of the previous child is the same as the
                // first document of the next (see SERVER-5198 and jstests/orp.js).
                //
                // So we advance the cursor here until we see a new diskloc.
                //
                // Note that we won't be yielding, and we may not do so for a while if we find
                // a particularly duplicated sequence of loc's. That is highly unlikely,
                // though.  (See SERVER-5725, if curious, but "stage" based $or will make that
                // ticket moot).
                while( cursor->ok() && loc == cursor->currLoc() ) {
                    cursor->advance();
                }
            }

            // For some (unfortunate) historical reasons, not all cursors would be valid after
            // a write simply because we advanced them to a document not affected by the write.
            // To protect in those cases, not only we engaged in the advance() logic above, but
            // we also tell the cursor we're about to write a document that we've just seen.
            // prepareToTouchEarlierIterate() requires calling later
            // recoverFromTouchingEarlierIterate(), so we make a note here to do so.
            bool touchPreviousDoc = request.isMulti() && cursor->ok();
            if ( touchPreviousDoc ) {
                if ( clientCursor.get() )
                    clientCursor->setDoingDeletes( true );
                cursor->prepareToTouchEarlierIterate();
            }

            // Found a matching document
            numMatched++;

            // Ask the driver to apply the mods. It may be that the driver can apply those "in
            // place", that is, some values of the old document just get adjusted without any
            // change to the binary layout on the bson layer. It may be that a whole new
            // document is needed to accomodate the new bson layout of the resulting document.
            doc.reset( oldObj, mutablebson::Document::kInPlaceEnabled );
            BSONObj logObj;

            // If there was a matched field, obtain it.
            string matchedField;
            if (matchDetails.hasElemMatchKey())
                matchedField = matchDetails.elemMatchKey();

            Status status = driver->update( matchedField, &doc, &logObj );
            if ( !status.isOK() ) {
                uasserted( 16837, status.reason() );
            }

            // If the driver applied the mods in place, we can ask the mutable for what
            // changed. We call those changes "damages". :) We use the damages to inform the
            // journal what was changed, and then apply them to the original document
            // ourselves. If, however, the driver applied the mods out of place, we ask it to
            // generate a new, modified document for us. In that case, the file manager will
            // take care of the journaling details for us.
            //
            // This code flow is admittedly odd. But, right now, journaling is baked in the file
            // manager. And if we aren't using the file manager, we have to do jounaling
            // ourselves.
            bool objectWasChanged = false;
            BSONObj newObj;
            const char* source = NULL;
            mutablebson::DamageVector damages;
            bool inPlace = doc.getInPlaceUpdates(&damages, &source);
            if ( inPlace && !damages.empty() && !driver->modsAffectIndices() ) {
                nsDetails->paddingFits();

                // All updates were in place. Apply them via durability and writing pointer.
                mutablebson::DamageVector::const_iterator where = damages.begin();
                const mutablebson::DamageVector::const_iterator end = damages.end();
                for( ; where != end; ++where ) {
                    const char* sourcePtr = source + where->sourceOffset;
                    void* targetPtr = getDur().writingPtr(
                        const_cast<char*>(oldObj.objdata()) + where->targetOffset,
                        where->size);
                    std::memcpy(targetPtr, sourcePtr, where->size);
                }
                newObj = oldObj;
                debug.fastmod = true;

                objectWasChanged = true;
            }
            else {

                // The updates were not in place. Apply them through the file manager.
                newObj = doc.getObject();
                DiskLoc newLoc = theDataFileMgr.updateRecord(nsString.ns().c_str(),
                                                             nsDetails,
                                                             nsDetailsTransient,
                                                             record,
                                                             loc,
                                                             newObj.objdata(),
                                                             newObj.objsize(),
                                                             debug);

                // If we've moved this object to a new location, make sure we don't apply
                // that update again if our traversal picks the objecta again.
                //
                // We also take note that the diskloc if the updates are affecting indices.
                // Chances are that we're traversing one of them and they may be multi key and
                // therefore duplicate disklocs.
                if ( newLoc != loc || driver->modsAffectIndices()  ) {
                    seenLocs.insert( newLoc );
                }

                objectWasChanged = true;
            }

            // Log Obj
            if ( request.shouldUpdateOpLog() ) {
                if ( driver->isDocReplacement() || !logObj.isEmpty() ) {
                    BSONObj idQuery = driver->makeOplogEntryQuery(newObj, request.isMulti());
                    logOp("u", nsString.ns().c_str(), logObj , &idQuery,
                          NULL, request.isFromMigration(), &newObj);
                }
            }

            // If it was noop since the document didn't change, record that.
            if (!objectWasChanged)
                debug.nupdateNoops++;

            if (!request.isMulti()) {
                break;
            }

            // If we used the cursor mechanism that prepares an earlier seen document for a
            // write we need to tell such mechanisms that the write is over.
            if ( touchPreviousDoc ) {
                cursor->recoverFromTouchingEarlierIterate();
            }

            getDur().commitIfNeeded();

        }

        // TODO: Can this be simplified?
        if ((numMatched > 0) || (numMatched == 0 && !request.isUpsert()) ) {
            debug.nupdated = numMatched;
            return UpdateResult( numMatched > 0 /* updated existing object(s) */,
                                 !driver->isDocReplacement() /* $mod or obj replacement */,
                                 numMatched /* # of docments update, even no-ops */,
                                 BSONObj() );
        }

        //
        // We haven't found any existing document so an insert is done
        // (upsert is true).
        //
        debug.upsert = true;

        // Since this is an insert (no docs found and upsert:true), we will be logging it
        // as an insert in the oplog. We don't need the driver's help to build the
        // oplog record, then. We also set the context of the update driver to the INSERT_CONTEXT.
        // Some mods may only work in that context (e.g. $setOnInsert).
        driver->setLogOp( false );
        driver->setContext( ModifierInterface::ExecInfo::INSERT_CONTEXT );

        BSONObj baseObj;

        // Reset the document we will be writing to
        doc.reset( baseObj, mutablebson::Document::kInPlaceDisabled );
        if ( request.getQuery().hasElement("_id") ) {
            uassertStatusOK(doc.root().appendElement(request.getQuery().getField("_id")));
        }


        // If this is a $mod base update, we need to generate a document by examining the
        // query and the mods. Otherwise, we can use the object replacement sent by the user
        // update command that was parsed by the driver before.
        // In the following block we handle the query part, and then do the regular mods after.
        if ( *request.getUpdates().firstElementFieldName() == '$' ) {
            uassertStatusOK(UpdateDriver::createFromQuery(request.getQuery(), doc));
            debug.fastmodinsert = true;
        }

        // Apply the update modifications and then log the update as an insert manually.
        Status status = driver->update( StringData(), &doc, NULL /* no oplog record */);
        if ( !status.isOK() ) {
            uasserted( 16836, status.reason() );
        }

        BSONObj newObj = doc.getObject();
        theDataFileMgr.insertWithObjMod( nsString.ns().c_str(), newObj, false, request.isGod() );
        if ( request.shouldUpdateOpLog() ) {
            logOp( "i", nsString.ns().c_str(), newObj,
                   NULL, NULL, request.isFromMigration(), &newObj );
        }

        debug.nupdated = 1;
        return UpdateResult( false /* updated a non existing document */,
                             !driver->isDocReplacement() /* $mod or obj replacement? */,
                             1 /* count of updated documents */,
                             newObj /* object that was upserted */ );
    }
示例#16
0
 void Helpers::emptyCollection(const char *ns) {
     Client::Context context(ns);
     deleteObjects(ns, BSONObj(), false);
 }
示例#17
0
        bool handleSpecialNamespaces( Request& r , QueryMessage& q ) {
            const char * ns = r.getns();
            ns = strstr( r.getns() , ".$cmd.sys." );
            if ( ! ns )
                return false;
            ns += 10;

            BSONObjBuilder b;
            vector<Shard> shards;

            if ( strcmp( ns , "inprog" ) == 0 ) {
                Shard::getAllShards( shards );

                BSONArrayBuilder arr( b.subarrayStart( "inprog" ) );

                for ( unsigned i=0; i<shards.size(); i++ ) {
                    Shard shard = shards[i];
                    ScopedDbConnection conn( shard );
                    BSONObj temp = conn->findOne( r.getns() , BSONObj() );
                    if ( temp["inprog"].isABSONObj() ) {
                        BSONObjIterator i( temp["inprog"].Obj() );
                        while ( i.more() ) {
                            BSONObjBuilder x;

                            BSONObjIterator j( i.next().Obj() );
                            while( j.more() ) {
                                BSONElement e = j.next();
                                if ( str::equals( e.fieldName() , "opid" ) ) {
                                    stringstream ss;
                                    ss << shard.getName() << ':' << e.numberInt();
                                    x.append( "opid" , ss.str() );
                                }
                                else if ( str::equals( e.fieldName() , "client" ) ) {
                                    x.appendAs( e , "client_s" );
                                }
                                else {
                                    x.append( e );
                                }
                            }
                            arr.append( x.obj() );
                        }
                    }
                    conn.done();
                }

                arr.done();
            }
            else if ( strcmp( ns , "killop" ) == 0 ) {
                BSONElement e = q.query["op"];
                if ( strstr( r.getns() , "admin." ) != 0 ) {
                    b.append( "err" , "unauthorized" );
                }
                else if ( e.type() != String ) {
                    b.append( "err" , "bad op" );
                    b.append( e );
                }
                else {
                    b.append( e );
                    string s = e.String();
                    string::size_type i = s.find( ':' );
                    if ( i == string::npos ) {
                        b.append( "err" , "bad opid" );
                    }
                    else {
                        string shard = s.substr( 0 , i );
                        int opid = atoi( s.substr( i + 1 ).c_str() );
                        b.append( "shard" , shard );
                        b.append( "shardid" , opid );

                        log() << "want to kill op: " << e << endl;
                        Shard s(shard);

                        ScopedDbConnection conn( s );
                        conn->findOne( r.getns() , BSON( "op" << opid ) );
                        conn.done();
                    }
                }
            }
            else if ( strcmp( ns , "unlock" ) == 0 ) {
                b.append( "err" , "can't do unlock through mongos" );
            }
            else {
                log( LL_WARNING ) << "unknown sys command [" << ns << "]" << endl;
                return false;
            }

            BSONObj x = b.done();
            replyToQuery(0, r.p(), r.m(), x);
            return true;
        }
示例#18
0
    void QueryPlanSet::init() {
        DEBUGQO( "QueryPlanSet::init " << ns << "\t" << _originalQuery );
        _plans.clear();
        _mayRecordPlan = true;
        _usingPrerecordedPlan = false;

        const char *ns = _frsp->ns();
        NamespaceDetails *d = nsdetails( ns );
        if ( !d || !_frsp->matchPossible() ) {
            // Table scan plan, when no matches are possible
            _plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_frsp, *_originalFrsp, _originalQuery, _order ) ) );
            return;
        }

        BSONElement hint = _hint.firstElement();
        if ( !hint.eoo() ) {
            _mayRecordPlan = false;
            IndexDetails *id = parseHint( hint, d );
            if ( id ) {
                addHint( *id );
            }
            else {
                massert( 10366 ,  "natural order cannot be specified with $min/$max", _min.isEmpty() && _max.isEmpty() );
                // Table scan plan
                _plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_frsp, *_originalFrsp, _originalQuery, _order ) ) );
            }
            return;
        }

        if ( !_min.isEmpty() || !_max.isEmpty() ) {
            string errmsg;
            BSONObj keyPattern;
            IndexDetails *idx = indexDetailsForRange( ns, errmsg, _min, _max, keyPattern );
            massert( 10367 ,  errmsg, idx );
            _plans.push_back( QueryPlanPtr( new QueryPlan( d, d->idxNo(*idx), *_frsp, *_originalFrsp, _originalQuery, _order, _min, _max ) ) );
            return;
        }

        if ( isSimpleIdQuery( _originalQuery ) ) {
            int idx = d->findIdIndex();
            if ( idx >= 0 ) {
                _usingPrerecordedPlan = true;
                _mayRecordPlan = false;
                _plans.push_back( QueryPlanPtr( new QueryPlan( d , idx , *_frsp , *_originalFrsp , _originalQuery, _order ) ) );
                return;
            }
        }

        if ( _originalQuery.isEmpty() && _order.isEmpty() ) {
            _plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_frsp, *_originalFrsp, _originalQuery, _order ) ) );
            return;
        }

        DEBUGQO( "\t special : " << _frsp->getSpecial() );
        if ( _frsp->getSpecial().size() ) {
            _special = _frsp->getSpecial();
            NamespaceDetails::IndexIterator i = d->ii();
            while( i.more() ) {
                int j = i.pos();
                IndexDetails& ii = i.next();
                const IndexSpec& spec = ii.getSpec();
                if ( spec.getTypeName() == _special && spec.suitability( _originalQuery , _order ) ) {
                    _usingPrerecordedPlan = true;
                    _mayRecordPlan = false;
                    _plans.push_back( QueryPlanPtr( new QueryPlan( d , j , *_frsp , *_originalFrsp , _originalQuery, _order ,
                                                    BSONObj() , BSONObj() , _special ) ) );
                    return;
                }
            }
            uassert( 13038 , (string)"can't find special index: " + _special + " for: " + _originalQuery.toString() , 0 );
        }

        if ( _honorRecordedPlan ) {
            pair< BSONObj, long long > best = QueryUtilIndexed::bestIndexForPatterns( *_frsp, _order );
            BSONObj bestIndex = best.first;
            long long oldNScanned = best.second;
            if ( !bestIndex.isEmpty() ) {
                QueryPlanPtr p;
                _oldNScanned = oldNScanned;
                if ( !strcmp( bestIndex.firstElement().fieldName(), "$natural" ) ) {
                    // Table scan plan
                    p.reset( new QueryPlan( d, -1, *_frsp, *_originalFrsp, _originalQuery, _order ) );
                }

                NamespaceDetails::IndexIterator i = d->ii();
                while( i.more() ) {
                    int j = i.pos();
                    IndexDetails& ii = i.next();
                    if( ii.keyPattern().woCompare(bestIndex) == 0 ) {
                        p.reset( new QueryPlan( d, j, *_frsp, *_originalFrsp, _originalQuery, _order ) );
                    }
                }

                massert( 10368 ,  "Unable to locate previously recorded index", p.get() );
                if ( !( _bestGuessOnly && p->scanAndOrderRequired() ) ) {
                    _usingPrerecordedPlan = true;
                    _mayRecordPlan = false;
                    _plans.push_back( p );
                    return;
                }
            }
        }

        addOtherPlans( false );
    }
示例#19
0
        void KillMongoProgramInstances() {
            vector< int > ports;
            registry.getRegisteredPorts( ports );
            for( vector< int >::iterator i = ports.begin(); i != ports.end(); ++i )
                killDb( *i, ProcessId::fromNative(0), SIGTERM );
            vector< ProcessId > pids;
            registry.getRegisteredPids( pids );
            for( vector< ProcessId >::iterator i = pids.begin(); i != pids.end(); ++i )
                killDb( 0, *i, SIGTERM );
        }

        MongoProgramScope::~MongoProgramScope() {
            DESTRUCTOR_GUARD(
                KillMongoProgramInstances();
                ClearRawMongoProgramOutput( BSONObj(), 0 );
            )
        }

        void installShellUtilsLauncher( Scope& scope ) {
            scope.injectNative( "_startMongoProgram", StartMongoProgram );
            scope.injectNative( "runProgram", RunProgram );
            scope.injectNative( "run", RunProgram );
            scope.injectNative( "_runMongoProgram", RunMongoProgram );
            scope.injectNative( "stopMongod", StopMongoProgram );
            scope.injectNative( "stopMongoProgram", StopMongoProgram );
            scope.injectNative( "stopMongoProgramByPid", StopMongoProgramByPid );
            scope.injectNative( "rawMongoProgramOutput", RawMongoProgramOutput );
            scope.injectNative( "clearRawMongoProgramOutput", ClearRawMongoProgramOutput );
            scope.injectNative( "waitProgram" , WaitProgram );
            scope.injectNative( "checkProgram" , CheckProgram );
示例#20
0
    UpdateResult _updateObjects( bool su,
                                 const char* ns,
                                 const BSONObj& updateobj,
                                 const BSONObj& patternOrig,
                                 bool upsert,
                                 bool multi,
                                 bool logop ,
                                 OpDebug& debug,
                                 RemoveSaver* rs,
                                 bool fromMigrate,
                                 const QueryPlanSelectionPolicy& planPolicy,
                                 bool forReplication ) {

        DEBUGUPDATE( "update: " << ns
                     << " update: " << updateobj
                     << " query: " << patternOrig
                     << " upsert: " << upsert << " multi: " << multi );

        Client& client = cc();
        int profile = client.database()->profile;

        debug.updateobj = updateobj;

        // The idea with these here it to make them loop invariant for
        // multi updates, and thus be a bit faster for that case.  The
        // pointers may be left invalid on a failed or terminal yield
        // recovery.
        NamespaceDetails* d = nsdetails(ns); // can be null if an upsert...
        NamespaceDetailsTransient* nsdt = &NamespaceDetailsTransient::get(ns);

        auto_ptr<ModSet> mods;
        bool isOperatorUpdate = updateobj.firstElementFieldName()[0] == '$';
        int modsIsIndexed = false; // really the # of indexes
        if ( isOperatorUpdate ) {
            if( d && d->indexBuildInProgress ) {
                set<string> bgKeys;
                d->inProgIdx().keyPattern().getFieldNames(bgKeys);
                mods.reset( new ModSet(updateobj, nsdt->indexKeys(), &bgKeys, forReplication) );
            }
            else {
                mods.reset( new ModSet(updateobj, nsdt->indexKeys(), NULL, forReplication) );
            }
            modsIsIndexed = mods->isIndexed();
        }

        if( planPolicy.permitOptimalIdPlan() && !multi && isSimpleIdQuery(patternOrig) && d &&
           !modsIsIndexed ) {
            int idxNo = d->findIdIndex();
            if( idxNo >= 0 ) {
                debug.idhack = true;

                UpdateResult result = _updateById( isOperatorUpdate,
                                                   idxNo,
                                                   mods.get(),
                                                   profile,
                                                   d,
                                                   nsdt,
                                                   su,
                                                   ns,
                                                   updateobj,
                                                   patternOrig,
                                                   logop,
                                                   debug,
                                                   fromMigrate);
                if ( result.existing || ! upsert ) {
                    return result;
                }
                else if ( upsert && ! isOperatorUpdate && ! logop) {
                    // this handles repl inserts
                    checkNoMods( updateobj );
                    debug.upsert = true;
                    BSONObj no = updateobj;
                    theDataFileMgr.insertWithObjMod(ns, no, false, su);
                    return UpdateResult( 0 , 0 , 1 , no );
                }
            }
        }

        int numModded = 0;
        debug.nscanned = 0;
        shared_ptr<Cursor> c =
            NamespaceDetailsTransient::getCursor( ns, patternOrig, BSONObj(), planPolicy );
        d = nsdetails(ns);
        nsdt = &NamespaceDetailsTransient::get(ns);
        bool autoDedup = c->autoDedup();

        if( c->ok() ) {
            set<DiskLoc> seenObjects;
            MatchDetails details;
            auto_ptr<ClientCursor> cc;
            do {

                if ( cc.get() == 0 &&
                     client.allowedToThrowPageFaultException() &&
                     ! c->currLoc().isNull() &&
                     ! c->currLoc().rec()->likelyInPhysicalMemory() ) {
                    throw PageFaultException( c->currLoc().rec() );
                }

                bool atomic = c->matcher() && c->matcher()->docMatcher().atomic();

                if ( ! atomic && debug.nscanned > 0 ) {
                    // we need to use a ClientCursor to yield
                    if ( cc.get() == 0 ) {
                        shared_ptr< Cursor > cPtr = c;
                        cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , cPtr , ns ) );
                    }

                    bool didYield;
                    if ( ! cc->yieldSometimes( ClientCursor::WillNeed, &didYield ) ) {
                        cc.release();
                        break;
                    }
                    if ( !c->ok() ) {
                        break;
                    }

                    if ( didYield ) {
                        d = nsdetails(ns);
                        if ( ! d )
                            break;
                        nsdt = &NamespaceDetailsTransient::get(ns);
                        if ( mods.get() && ! mods->isIndexed() ) {
                            // we need to re-check indexes
                            set<string> bgKeys;
                            if ( d->indexBuildInProgress )
                                d->inProgIdx().keyPattern().getFieldNames(bgKeys);
                            mods->updateIsIndexed( nsdt->indexKeys() , &bgKeys );
                            modsIsIndexed = mods->isIndexed();
                        }

                    }

                } // end yielding block

                debug.nscanned++;

                if ( mods.get() && mods->hasDynamicArray() ) {
                    // The Cursor must have a Matcher to record an elemMatchKey.  But currently
                    // a modifier on a dynamic array field may be applied even if there is no
                    // elemMatchKey, so a matcher cannot be required.
                    //verify( c->matcher() );
                    details.requestElemMatchKey();
                }

                if ( !c->currentMatches( &details ) ) {
                    c->advance();
                    continue;
                }

                Record* r = c->_current();
                DiskLoc loc = c->currLoc();

                if ( c->getsetdup( loc ) && autoDedup ) {
                    c->advance();
                    continue;
                }

                BSONObj js = BSONObj::make(r);

                BSONObj pattern = patternOrig;

                if ( logop ) {
                    BSONObjBuilder idPattern;
                    BSONElement id;
                    // NOTE: If the matching object lacks an id, we'll log
                    // with the original pattern.  This isn't replay-safe.
                    // It might make sense to suppress the log instead
                    // if there's no id.
                    if ( js.getObjectID( id ) ) {
                        idPattern.append( id );
                        pattern = idPattern.obj();
                    }
                    else {
                        uassert( 10157 ,  "multi-update requires all modified objects to have an _id" , ! multi );
                    }
                }

                /* look for $inc etc.  note as listed here, all fields to inc must be this type, you can't set some
                    regular ones at the moment. */
                if ( isOperatorUpdate ) {

                    if ( multi ) {
                        // go to next record in case this one moves
                        c->advance();

                        // Update operations are deduped for cursors that implement their own
                        // deduplication.  In particular, some geo cursors are excluded.
                        if ( autoDedup ) {

                            if ( seenObjects.count( loc ) ) {
                                continue;
                            }

                            // SERVER-5198 Advance past the document to be modified, provided
                            // deduplication is enabled, but see SERVER-5725.
                            while( c->ok() && loc == c->currLoc() ) {
                                c->advance();
                            }
                        }
                    }

                    const BSONObj& onDisk = loc.obj();

                    ModSet* useMods = mods.get();

                    auto_ptr<ModSet> mymodset;
                    if ( details.hasElemMatchKey() && mods->hasDynamicArray() ) {
                        useMods = mods->fixDynamicArray( details.elemMatchKey() );
                        mymodset.reset( useMods );
                    }

                    auto_ptr<ModSetState> mss = useMods->prepare( onDisk );

                    bool willAdvanceCursor = multi && c->ok() && ( modsIsIndexed || ! mss->canApplyInPlace() );

                    if ( willAdvanceCursor ) {
                        if ( cc.get() ) {
                            cc->setDoingDeletes( true );
                        }
                        c->prepareToTouchEarlierIterate();
                    }

                    if ( modsIsIndexed <= 0 && mss->canApplyInPlace() ) {
                        mss->applyModsInPlace( true );// const_cast<BSONObj&>(onDisk) );

                        DEBUGUPDATE( "\t\t\t doing in place update" );
                        if ( profile && !multi )
                            debug.fastmod = true;

                        if ( modsIsIndexed ) {
                            seenObjects.insert( loc );
                        }

                        d->paddingFits();
                    }
                    else {
                        if ( rs )
                            rs->goingToDelete( onDisk );

                        BSONObj newObj = mss->createNewFromMods();
                        checkTooLarge(newObj);
                        DiskLoc newLoc = theDataFileMgr.updateRecord(ns,
                                                                     d,
                                                                     nsdt,
                                                                     r,
                                                                     loc,
                                                                     newObj.objdata(),
                                                                     newObj.objsize(),
                                                                     debug);

                        if ( newLoc != loc || modsIsIndexed ){
                            // log() << "Moved obj " << newLoc.obj()["_id"] << " from " << loc << " to " << newLoc << endl;
                            // object moved, need to make sure we don' get again
                            seenObjects.insert( newLoc );
                        }

                    }

                    if ( logop ) {
                        DEV verify( mods->size() );
                        BSONObj logObj = mss->getOpLogRewrite();
                        DEBUGUPDATE( "\t rewrite update: " << logObj );

                        // It is possible that the entire mod set was a no-op over this
                        // document.  We would have an empty log record in that case. If we
                        // call logOp, with an empty record, that would be replicated as "clear
                        // this record", which is not what we want. Therefore, to get a no-op
                        // in the replica, we simply don't log.
                        if ( logObj.nFields() ) {
                            logOp("u", ns, logObj , &pattern, 0, fromMigrate );
                        }
                    }
                    numModded++;
                    if ( ! multi )
                        return UpdateResult( 1 , 1 , numModded , BSONObj() );
                    if ( willAdvanceCursor )
                        c->recoverFromTouchingEarlierIterate();

                    getDur().commitIfNeeded();

                    continue;
                }

                uassert( 10158 ,  "multi update only works with $ operators" , ! multi );

                BSONElementManipulator::lookForTimestamps( updateobj );
                checkNoMods( updateobj );
                theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , updateobj.objdata(), updateobj.objsize(), debug, su);
                if ( logop ) {
                    DEV wassert( !su ); // super used doesn't get logged, this would be bad.
                    logOp("u", ns, updateobj, &pattern, 0, fromMigrate );
                }
                return UpdateResult( 1 , 0 , 1 , BSONObj() );
            } while ( c->ok() );
        } // endif

        if ( numModded )
            return UpdateResult( 1 , 1 , numModded , BSONObj() );

        if ( upsert ) {
            if ( updateobj.firstElementFieldName()[0] == '$' ) {
                // upsert of an $operation. build a default object
                BSONObj newObj = mods->createNewFromQuery( patternOrig );
                checkNoMods( newObj );
                debug.fastmodinsert = true;
                theDataFileMgr.insertWithObjMod(ns, newObj, false, su);
                if ( logop )
                    logOp( "i", ns, newObj, 0, 0, fromMigrate );

                return UpdateResult( 0 , 1 , 1 , newObj );
            }
            uassert( 10159 ,  "multi update only works with $ operators" , ! multi );
            checkNoMods( updateobj );
            debug.upsert = true;
            BSONObj no = updateobj;
            theDataFileMgr.insertWithObjMod(ns, no, false, su);
            if ( logop )
                logOp( "i", ns, no, 0, 0, fromMigrate );
            return UpdateResult( 0 , 0 , 1 , no );
        }

        return UpdateResult( 0 , isOperatorUpdate , 0 , BSONObj() );
    }
示例#21
0
 BSONObj Query::getHint() const {
     if ( ! isComplex() )
         return BSONObj();
     return obj.getObjectField( "$hint" );
 }
示例#22
0
    /* note: this is only (as-is) called for

             - not multi
             - not mods is indexed
             - not upsert
    */
    static UpdateResult _updateById(bool isOperatorUpdate,
                                    int idIdxNo,
                                    ModSet* mods,
                                    int profile,
                                    NamespaceDetails* d,
                                    NamespaceDetailsTransient *nsdt,
                                    bool su,
                                    const char* ns,
                                    const BSONObj& updateobj,
                                    BSONObj patternOrig,
                                    bool logop,
                                    OpDebug& debug,
                                    bool fromMigrate = false) {

        DiskLoc loc;
        {
            IndexDetails& i = d->idx(idIdxNo);
            BSONObj key = i.getKeyFromQuery( patternOrig );
            loc = i.idxInterface().findSingle(i, i.head, key);
            if( loc.isNull() ) {
                // no upsert support in _updateById yet, so we are done.
                return UpdateResult( 0 , 0 , 0 , BSONObj() );
            }
        }
        Record* r = loc.rec();

        if ( cc().allowedToThrowPageFaultException() && ! r->likelyInPhysicalMemory() ) {
            throw PageFaultException( r );
        }

        /* look for $inc etc.  note as listed here, all fields to inc must be this type, you can't set some
           regular ones at the moment. */
        if ( isOperatorUpdate ) {
            const BSONObj& onDisk = loc.obj();
            auto_ptr<ModSetState> mss = mods->prepare( onDisk );

            if( mss->canApplyInPlace() ) {
                mss->applyModsInPlace(true);
                DEBUGUPDATE( "\t\t\t updateById doing in place update" );
            }
            else {
                BSONObj newObj = mss->createNewFromMods();
                checkTooLarge(newObj);
                verify(nsdt);
                theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , newObj.objdata(), newObj.objsize(), debug);
            }

            if ( logop ) {
                DEV verify( mods->size() );
                BSONObj pattern = patternOrig;
                BSONObj logObj = mss->getOpLogRewrite();
                DEBUGUPDATE( "\t rewrite update: " << logObj );

                // It is possible that the entire mod set was a no-op over this document.  We
                // would have an empty log record in that case. If we call logOp, with an empty
                // record, that would be replicated as "clear this record", which is not what
                // we want. Therefore, to get a no-op in the replica, we simply don't log.
                if ( logObj.nFields() ) {
                    logOp("u", ns, logObj, &pattern, 0, fromMigrate );
                }
            }
            return UpdateResult( 1 , 1 , 1 , BSONObj() );

        } // end $operator update

        // regular update
        BSONElementManipulator::lookForTimestamps( updateobj );
        checkNoMods( updateobj );
        verify(nsdt);
        theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , updateobj.objdata(), updateobj.objsize(), debug );
        if ( logop ) {
            logOp("u", ns, updateobj, &patternOrig, 0, fromMigrate );
        }
        return UpdateResult( 1 , 0 , 1 , BSONObj() );
    }
示例#23
0
    Status getRunnerDistinct(Collection* collection,
                             const BSONObj& query,
                             const string& field,
                             Runner** out) {

        Database* db = cc().database();
        verify(db);

        // This should'a been checked by the distinct command.
        verify(collection);

        // TODO: check for idhack here?

        // When can we do a fast distinct hack?
        // 1. There is a plan with just one leaf and that leaf is an ixscan.
        // 2. The ixscan indexes the field we're interested in.
        // 2a: We are correct if the index contains the field but for now we look for prefix.
        // 3. The query is covered/no fetch.
        //
        // We go through normal planning (with limited parameters) to see if we can produce
        // a soln with the above properties.

        QueryPlannerParams plannerParams;
        plannerParams.options = QueryPlannerParams::NO_TABLE_SCAN;

        IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(false);
        while (ii.more()) {
            const IndexDescriptor* desc = ii.next();
            // The distinct hack can work if any field is in the index but it's not always clear
            // if it's a win unless it's the first field.
            if (desc->keyPattern().firstElement().fieldName() == field) {
                plannerParams.indices.push_back(IndexEntry(desc->keyPattern(),
                                                           desc->isMultikey(),
                                                           desc->isSparse(),
                                                           desc->indexName(),
                                                           desc->infoObj()));
            }
        }

        // We only care about the field that we're projecting over.  Have to drop the _id field
        // explicitly because those are .find() semantics.
        //
        // Applying a projection allows the planner to try to give us covered plans.
        BSONObj projection;
        if ("_id" == field) {
            projection = BSON("_id" << 1);
        }
        else {
            projection = BSON("_id" << 0 << field << 1);
        }

        // Apply a projection of the key.  Empty BSONObj() is for the sort.
        CanonicalQuery* cq;
        Status status = CanonicalQuery::canonicalize(collection->ns().ns(), query, BSONObj(), projection, &cq);
        if (!status.isOK()) {
            return status;
        }

        // No index has the field we're looking for.  Punt to normal planning.
        if (plannerParams.indices.empty()) {
            // Takes ownership of cq.
            return getRunner(cq, out);
        }

        // If we're here, we have an index prefixed by the field we're distinct-ing over.

        // If there's no query, we can just distinct-scan one of the indices.
        if (query.isEmpty()) {
            DistinctNode* dn = new DistinctNode();
            dn->indexKeyPattern = plannerParams.indices[0].keyPattern;
            dn->direction = 1;
            IndexBoundsBuilder::allValuesBounds(dn->indexKeyPattern, &dn->bounds);
            dn->fieldNo = 0;

            QueryPlannerParams params;

            // Takes ownership of 'dn'.
            QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(*cq, params, dn);
            verify(soln);

            WorkingSet* ws;
            PlanStage* root;
            verify(StageBuilder::build(*soln, &root, &ws));
            *out = new SingleSolutionRunner(collection, cq, soln, root, ws);
            return Status::OK();
        }

        // See if we can answer the query in a fast-distinct compatible fashion.
        vector<QuerySolution*> solutions;
        status = QueryPlanner::plan(*cq, plannerParams, &solutions);
        if (!status.isOK()) {
            return getRunner(cq, out);
        }

        // XXX: why do we need to do this?  planner should prob do this internally
        cq->root()->resetTag();

        // We look for a solution that has an ixscan we can turn into a distinctixscan
        for (size_t i = 0; i < solutions.size(); ++i) {
            if (turnIxscanIntoDistinctIxscan(solutions[i], field)) {
                // Great, we can use solutions[i].  Clean up the other QuerySolution(s).
                for (size_t j = 0; j < solutions.size(); ++j) {
                    if (j != i) {
                        delete solutions[j];
                    }
                }

                // Build and return the SSR over solutions[i].
                WorkingSet* ws;
                PlanStage* root;
                verify(StageBuilder::build(*solutions[i], &root, &ws));
                *out = new SingleSolutionRunner(collection, cq, solutions[i], root, ws);
                return Status::OK();
            }
        }

        // If we're here, the planner made a soln with the restricted index set but we couldn't
        // translate any of them into a distinct-compatible soln.  So, delete the solutions and just
        // go through normal planning.
        for (size_t i = 0; i < solutions.size(); ++i) {
            delete solutions[i];
        }

        return getRunner(cq, out);
    }
示例#24
0
BSONObj KeyPattern::globalMin() const {
    return extendRangeBound(BSONObj(), false);
}
示例#25
0
 ReplyBuilderInterface&
 ReplyBuilderInterface::setCommandReply(StatusWith<BSONObj> commandReply) {
     auto reply = commandReply.isOK() ? std::move(commandReply.getValue()) : BSONObj();
     return setRawCommandReply(augmentReplyWithStatus(commandReply.getStatus(), reply));
 }
示例#26
0
BSONObj KeyPattern::globalMax() const {
    return extendRangeBound(BSONObj(), true);
}
示例#27
0
 void IndexCursor::findKey(const BSONObj &key) {
     const bool isSecondary = !_cl->isPKIndex(_idx);
     const BSONObj &pk = forward() ? minKey : maxKey;
     setPosition(key, isSecondary ? pk : BSONObj());
 };
示例#28
0
void DatabasesCloner::_onListDatabaseFinish(
    const executor::TaskExecutor::RemoteCommandCallbackArgs& cbd) {
    Status respStatus = cbd.response.status;
    if (respStatus.isOK()) {
        respStatus = getStatusFromCommandResult(cbd.response.data);
    }

    UniqueLock lk(_mutex);
    if (!respStatus.isOK()) {
        LOG(1) << "'listDatabases' failed: " << respStatus;
        _fail_inlock(&lk, respStatus);
        return;
    }

    // There should not be any cloners yet.
    invariant(_databaseCloners.size() == 0);
    const auto respBSON = cbd.response.data;

    auto databasesArray = _parseListDatabasesResponse(respBSON);
    if (!databasesArray.isOK()) {
        LOG(1) << "'listDatabases' returned a malformed response: "
               << databasesArray.getStatus().toString();
        _fail_inlock(&lk, databasesArray.getStatus());
        return;
    }

    auto dbsArray = databasesArray.getValue();
    // Ensure that the 'admin' database is the first element in the array of databases so that it
    // will be the first to be cloned. This allows users to authenticate against a database while
    // initial sync is occurring.
    _setAdminAsFirst(dbsArray);

    for (BSONElement arrayElement : dbsArray) {
        const BSONObj dbBSON = arrayElement.Obj();

        // Check to see if we want to exclude this db from the clone.
        if (!_includeDbFn(dbBSON)) {
            LOG(1) << "Excluding database from the 'listDatabases' response: " << dbBSON;
            continue;
        }

        if (!dbBSON.hasField("name")) {
            LOG(1) << "Excluding database due to the 'listDatabases' response not containing a "
                      "'name' field for this entry: "
                   << dbBSON;
        }

        const std::string dbName = dbBSON["name"].str();
        std::shared_ptr<DatabaseCloner> dbCloner{nullptr};

        // filters for DatabasesCloner.
        const auto collectionFilterPred = [dbName](const BSONObj& collInfo) {
            const auto collName = collInfo["name"].str();
            const NamespaceString ns(dbName, collName);
            if (ns.isSystem() && !ns.isLegalClientSystemNS()) {
                LOG(1) << "Skipping 'system' collection: " << ns.ns();
                return false;
            }
            if (!ns.isNormal()) {
                LOG(1) << "Skipping non-normal collection: " << ns.ns();
                return false;
            }

            LOG(2) << "Allowing cloning of collectionInfo: " << collInfo;
            return true;
        };
        const auto onCollectionFinish = [](const Status& status, const NamespaceString& srcNss) {
            if (status.isOK()) {
                LOG(1) << "collection clone finished: " << srcNss;
            } else {
                error() << "collection clone for '" << srcNss << "' failed due to "
                        << status.toString();
            }
        };
        const auto onDbFinish = [this, dbName](const Status& status) {
            _onEachDBCloneFinish(status, dbName);
        };
        Status startStatus = Status::OK();
        try {
            dbCloner.reset(new DatabaseCloner(
                _exec,
                _dbWorkThreadPool,
                _source,
                dbName,
                BSONObj(),  // do not filter collections out during listCollections call.
                collectionFilterPred,
                _storage,  // use storage provided.
                onCollectionFinish,
                onDbFinish));
            if (_scheduleDbWorkFn) {
                dbCloner->setScheduleDbWorkFn_forTest(_scheduleDbWorkFn);
            }
            if (_startCollectionClonerFn) {
                dbCloner->setStartCollectionClonerFn(_startCollectionClonerFn);
            }
            // Start first database cloner.
            if (_databaseCloners.empty()) {
                startStatus = dbCloner->startup();
            }
        } catch (...) {
            startStatus = exceptionToStatus();
        }

        if (!startStatus.isOK()) {
            std::string err = str::stream() << "could not create cloner for database: " << dbName
                                            << " due to: " << startStatus.toString();
            _setStatus_inlock({ErrorCodes::InitialSyncFailure, err});
            error() << err;
            break;  // exit for_each loop
        }

        // add cloner to list.
        _databaseCloners.push_back(dbCloner);
    }
    if (_databaseCloners.size() == 0) {
        if (_status.isOK()) {
            _succeed_inlock(&lk);
        } else {
            _fail_inlock(&lk, _status);
        }
    }
}
示例#29
0
文件: config.cpp 项目: cangove/mongo
    bool ConfigServer::checkConfigServersConsistent( string& errmsg , int tries ) const {
        if ( tries <= 0 )
            return false;

        unsigned firstGood = 0;
        int up = 0;
        vector<BSONObj> res;
        for ( unsigned i=0; i<_config.size(); i++ ) {
            BSONObj x;
            try {
                ScopedDbConnection conn( _config[i], 30.0 );
                if ( ! conn->simpleCommand( "config" , &x , "dbhash" ) )
                    x = BSONObj();
                else {
                    x = x.getOwned();
                    if ( up == 0 )
                        firstGood = i;
                    up++;
                }
                conn.done();
            }
            catch ( SocketException& e ) {
                warning() << " couldn't check on config server:" << _config[i] << " ok for now : " << e.toString() << endl;
            }
            res.push_back(x);
        }

        if ( _config.size() == 1 )
            return true;

        if ( up == 0 ) {
            errmsg = "no config servers reachable";
            return false;
        }

        if ( up == 1 ) {
            log( LL_WARNING ) << "only 1 config server reachable, continuing" << endl;
            return true;
        }

        BSONObj base = res[firstGood];
        for ( unsigned i=firstGood+1; i<res.size(); i++ ) {
            if ( res[i].isEmpty() )
                continue;

            string c1 = base.getFieldDotted( "collections.chunks" );
            string c2 = res[i].getFieldDotted( "collections.chunks" );

            string d1 = base.getFieldDotted( "collections.databases" );
            string d2 = res[i].getFieldDotted( "collections.databases" );

            if ( c1 == c2 && d1 == d2 )
                continue;

            stringstream ss;
            ss << "config servers " << _config[firstGood] << " and " << _config[i] << " differ";
            log( LL_WARNING ) << ss.str();
            if ( tries <= 1 ) {
                ss << "\n" << c1 << "\t" << c2 << "\n" << d1 << "\t" << d2;
                errmsg = ss.str();
                return false;
            }

            return checkConfigServersConsistent( errmsg , tries - 1 );
        }

        return true;
    }
示例#30
0
 BSONObj ReplicationCoordinatorMock::getGetLastErrorDefault() {
     // TODO
     return BSONObj();
 }