示例#1
0
bool BSONObjBuilder::hasField(StringData name) const {
    BSONObjIterator i = iterator();
    while (i.more())
        if (name == i.next().fieldName())
            return true;
    return false;
}
示例#2
0
文件: run.cpp 项目: bsu-lps/tretretre
Graph* buildGraph(DBClientBase* conn)
{
    auto_ptr<DBClientCursor> cursor = conn->query("vkuu.friends_list", BSONObj());

    if (!cursor.get()) {
        cout << "query failure" << endl;
        return 0;
    }

    unique_ptr<Graph> g(new Graph);

    for (int i=0; cursor->more(); ++i ) {
        BSONObj r = cursor->next();

        int uid = r.getField("uid").numberInt();
        BSONObj friends = r.getObjectField("friends");

        BSONObjIterator frIter (friends);

        while (frIter.more()) {
            int fr = frIter.next().numberInt();
            g->addEdge(uid, fr);
        }

        if (i%5000==0)
            cout << "Vertex candidate " << i << " passed" << endl;
    }

    return g.release();
}
示例#3
0
    // Skip the key comprised of the first k fields of currentKey and the
    // rest set to max/min key for direction > 0 or < 0 respectively.
    void IndexCursor::skipPrefix(const BSONObj &key, const int k) {
        TOKULOG(3) << "skipPrefix skipping first " << k << " elements in key " << key << endl;
        BSONObjBuilder b(key.objsize());
        BSONObjIterator it = key.begin();
        const int nFields = key.nFields();
        for ( int i = 0; i < nFields; i++ ) {
            if ( i < k ) {
                b.append( it.next() );
            } else {
                if ( _ordering.descending( 1 << i ) ) {
                    // Descending sort order, so min key skips forward.
                    forward() ? b.appendMinKey( "" ) : b.appendMaxKey( "" );
                } else {
                    // Regular ascending order. Max key skips forward.
                    forward() ? b.appendMaxKey( "" ) : b.appendMinKey( "" );
                }
            }
        }

        // This differs from findKey in that we set PK to max to move forward and min
        // to move backward, resulting in a "skip" of the key prefix, not a "find".
        const bool isSecondary = !_cl->isPKIndex(_idx);
        const BSONObj &pk = forward() ? maxKey : minKey;
        setPosition( b.done(), isSecondary ? pk : BSONObj() );
    }
void CollectionMetadata::fillKeyPatternFields() {
    // Parse the shard keys into the states 'keys' and 'keySet' members.
    BSONObjIterator patternIter = _keyPattern.begin();
    while (patternIter.more()) {
        BSONElement current = patternIter.next();

        _keyFields.mutableVector().push_back(new FieldRef);
        FieldRef* const newFieldRef = _keyFields.mutableVector().back();
        newFieldRef->parse(current.fieldNameStringData());
    }
}
示例#5
0
static void bson_to_array(lua_State *L, const BSONObj &obj) {
    BSONObjIterator it = BSONObjIterator(obj);

    lua_newtable(L);

    int n = 1;
    while (it.more()) {
        BSONElement elem = it.next();

        lua_push_value(L, elem);
        lua_rawseti(L, -2, n++);
    }
}
示例#6
0
static void bson_to_table(lua_State *L, const BSONObj &obj) {
    BSONObjIterator it = BSONObjIterator(obj);

    lua_newtable(L);

    while (it.more()) {
        BSONElement elem = it.next();
        const char *key = elem.fieldName();

        lua_pushstring(L, key);
        lua_push_value(L, elem);
        lua_rawset(L, -3);
    }
}
示例#7
0
 INT32 _ixmIndexCB::keyPatternOffset( const CHAR *key ) const
 {
    SDB_ASSERT ( _isInitialized,
                 "index details must be initialized first" ) ;
    BSONObjIterator i ( keyPattern() ) ;
    INT32 n = 0 ;
    while ( i.more() )
    {
       BSONElement e = i.next() ;
       if ( ossStrcmp ( key, e.fieldName() ) == 0 )
          return n ;
       n++ ;
    }
    return -1 ;
 }
示例#8
0
/* add all the fields from the object specified to this object if they don't exist */
BSONObjBuilder& BSONObjBuilder::appendElementsUnique(BSONObj x) {
    std::set<std::string> have;
    {
        BSONObjIterator i = iterator();
        while (i.more())
            have.insert(i.next().fieldName());
    }

    BSONObjIterator it(x);
    while (it.more()) {
        BSONElement e = it.next();
        if (have.count(e.fieldName()))
            continue;
        append(e);
    }
    return *this;
}
示例#9
0
 BOOLEAN _ixmIndexKeyGen::validateKeyDef ( const BSONObj &keyDef )
 {
    BSONObjIterator i ( keyDef ) ;
    INT32 count = 0 ;
    while ( i.more () )
    {
       ++count ;
       BSONElement ie = i.next () ;
       if ( ie.type() != NumberInt ||
            ( ie.numberInt() != -1 &&
              ie.numberInt() != 1 ) )
       {
          return FALSE ;
       }
    }
    return 0 != count ;
 }
示例#10
0
ExitCode _initAndListen(int listenPort) {
    Client::initThread("initandlisten");

    _initWireSpec();
    auto globalServiceContext = getGlobalServiceContext();

    globalServiceContext->setFastClockSource(FastClockSourceFactory::create(Milliseconds(10)));
    globalServiceContext->setOpObserver(stdx::make_unique<OpObserver>());

    DBDirectClientFactory::get(globalServiceContext)
        .registerImplementation([](OperationContext* txn) {
            return std::unique_ptr<DBClientBase>(new DBDirectClient(txn));
        });

    const repl::ReplSettings& replSettings = repl::getGlobalReplicationCoordinator()->getSettings();

    {
        ProcessId pid = ProcessId::getCurrent();
        LogstreamBuilder l = log(LogComponent::kControl);
        l << "MongoDB starting : pid=" << pid << " port=" << serverGlobalParams.port
          << " dbpath=" << storageGlobalParams.dbpath;
        if (replSettings.isMaster())
            l << " master=" << replSettings.isMaster();
        if (replSettings.isSlave())
            l << " slave=" << (int)replSettings.isSlave();

        const bool is32bit = sizeof(int*) == 4;
        l << (is32bit ? " 32" : " 64") << "-bit host=" << getHostNameCached() << endl;
    }

    DEV log(LogComponent::kControl) << "DEBUG build (which is slower)" << endl;

#if defined(_WIN32)
    VersionInfoInterface::instance().logTargetMinOS();
#endif

    logProcessDetails();

    checked_cast<ServiceContextMongoD*>(getGlobalServiceContext())->createLockFile();

    transport::TransportLayerLegacy::Options options;
    options.port = listenPort;
    options.ipList = serverGlobalParams.bind_ip;

    auto sep =
        stdx::make_unique<ServiceEntryPointMongod>(getGlobalServiceContext()->getTransportLayer());
    auto sepPtr = sep.get();

    getGlobalServiceContext()->setServiceEntryPoint(std::move(sep));

    // Create, start, and attach the TL
    auto transportLayer = stdx::make_unique<transport::TransportLayerLegacy>(options, sepPtr);
    auto res = transportLayer->setup();
    if (!res.isOK()) {
        error() << "Failed to set up listener: " << res;
        return EXIT_NET_ERROR;
    }

    std::shared_ptr<DbWebServer> dbWebServer;
    if (serverGlobalParams.isHttpInterfaceEnabled) {
        dbWebServer.reset(new DbWebServer(serverGlobalParams.bind_ip,
                                          serverGlobalParams.port + 1000,
                                          getGlobalServiceContext(),
                                          new RestAdminAccess()));
        if (!dbWebServer->setupSockets()) {
            error() << "Failed to set up sockets for HTTP interface during startup.";
            return EXIT_NET_ERROR;
        }
    }

    getGlobalServiceContext()->initializeGlobalStorageEngine();

#ifdef MONGO_CONFIG_WIREDTIGER_ENABLED
    if (WiredTigerCustomizationHooks::get(getGlobalServiceContext())->restartRequired()) {
        exitCleanly(EXIT_CLEAN);
    }
#endif

    // Warn if we detect configurations for multiple registered storage engines in
    // the same configuration file/environment.
    if (serverGlobalParams.parsedOpts.hasField("storage")) {
        BSONElement storageElement = serverGlobalParams.parsedOpts.getField("storage");
        invariant(storageElement.isABSONObj());
        BSONObj storageParamsObj = storageElement.Obj();
        BSONObjIterator i = storageParamsObj.begin();
        while (i.more()) {
            BSONElement e = i.next();
            // Ignore if field name under "storage" matches current storage engine.
            if (storageGlobalParams.engine == e.fieldName()) {
                continue;
            }

            // Warn if field name matches non-active registered storage engine.
            if (getGlobalServiceContext()->isRegisteredStorageEngine(e.fieldName())) {
                warning() << "Detected configuration for non-active storage engine "
                          << e.fieldName() << " when current storage engine is "
                          << storageGlobalParams.engine;
            }
        }
    }

    if (!getGlobalServiceContext()->getGlobalStorageEngine()->getSnapshotManager()) {
        if (moe::startupOptionsParsed.count("replication.enableMajorityReadConcern") &&
            moe::startupOptionsParsed["replication.enableMajorityReadConcern"].as<bool>()) {
            // Note: we are intentionally only erroring if the user explicitly requested that we
            // enable majority read concern. We do not error if the they are implicitly enabled for
            // CSRS because a required step in the upgrade procedure can involve an mmapv1 node in
            // the CSRS in the REMOVED state. This is handled by the TopologyCoordinator.
            invariant(replSettings.isMajorityReadConcernEnabled());
            severe() << "Majority read concern requires a storage engine that supports"
                     << " snapshots, such as wiredTiger. " << storageGlobalParams.engine
                     << " does not support snapshots.";
            exitCleanly(EXIT_BADOPTIONS);
        }
    }

    logMongodStartupWarnings(storageGlobalParams, serverGlobalParams);

    {
        stringstream ss;
        ss << endl;
        ss << "*********************************************************************" << endl;
        ss << " ERROR: dbpath (" << storageGlobalParams.dbpath << ") does not exist." << endl;
        ss << " Create this directory or give existing directory in --dbpath." << endl;
        ss << " See http://dochub.mongodb.org/core/startingandstoppingmongo" << endl;
        ss << "*********************************************************************" << endl;
        uassert(10296, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.dbpath));
    }

    {
        stringstream ss;
        ss << "repairpath (" << storageGlobalParams.repairpath << ") does not exist";
        uassert(12590, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.repairpath));
    }

    // TODO:  This should go into a MONGO_INITIALIZER once we have figured out the correct
    // dependencies.
    if (snmpInit) {
        snmpInit();
    }

    if (!storageGlobalParams.readOnly) {
        boost::filesystem::remove_all(storageGlobalParams.dbpath + "/_tmp/");
    }

    if (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalRecoverOnly)
        return EXIT_NET_ERROR;

    if (mongodGlobalParams.scriptingEnabled) {
        ScriptEngine::setup();
    }

    auto startupOpCtx = getGlobalServiceContext()->makeOperationContext(&cc());

    repairDatabasesAndCheckVersion(startupOpCtx.get());

    if (storageGlobalParams.upgrade) {
        log() << "finished checking dbs";
        exitCleanly(EXIT_CLEAN);
    }

    uassertStatusOK(getGlobalAuthorizationManager()->initialize(startupOpCtx.get()));

    /* this is for security on certain platforms (nonce generation) */
    srand((unsigned)(curTimeMicros64() ^ startupSrandTimer.micros()));

    // The snapshot thread provides historical collection level and lock statistics for use
    // by the web interface. Only needed when HTTP is enabled.
    if (serverGlobalParams.isHttpInterfaceEnabled) {
        statsSnapshotThread.go();

        invariant(dbWebServer);
        stdx::thread web(stdx::bind(&webServerListenThread, dbWebServer));
        web.detach();
    }

#ifndef _WIN32
    mongo::signalForkSuccess();
#endif
    AuthorizationManager* globalAuthzManager = getGlobalAuthorizationManager();
    if (globalAuthzManager->shouldValidateAuthSchemaOnStartup()) {
        Status status = authindex::verifySystemIndexes(startupOpCtx.get());
        if (!status.isOK()) {
            log() << redact(status);
            exitCleanly(EXIT_NEED_UPGRADE);
        }

        // SERVER-14090: Verify that auth schema version is schemaVersion26Final.
        int foundSchemaVersion;
        status =
            globalAuthzManager->getAuthorizationVersion(startupOpCtx.get(), &foundSchemaVersion);
        if (!status.isOK()) {
            log() << "Auth schema version is incompatible: "
                  << "User and role management commands require auth data to have "
                  << "at least schema version " << AuthorizationManager::schemaVersion26Final
                  << " but startup could not verify schema version: " << status;
            exitCleanly(EXIT_NEED_UPGRADE);
        }
        if (foundSchemaVersion < AuthorizationManager::schemaVersion26Final) {
            log() << "Auth schema version is incompatible: "
                  << "User and role management commands require auth data to have "
                  << "at least schema version " << AuthorizationManager::schemaVersion26Final
                  << " but found " << foundSchemaVersion << ". In order to upgrade "
                  << "the auth schema, first downgrade MongoDB binaries to version "
                  << "2.6 and then run the authSchemaUpgrade command.";
            exitCleanly(EXIT_NEED_UPGRADE);
        }
    } else if (globalAuthzManager->isAuthEnabled()) {
        error() << "Auth must be disabled when starting without auth schema validation";
        exitCleanly(EXIT_BADOPTIONS);
    } else {
        // If authSchemaValidation is disabled and server is running without auth,
        // warn the user and continue startup without authSchema metadata checks.
        log() << startupWarningsLog;
        log() << "** WARNING: Startup auth schema validation checks are disabled for the "
                 "database."
              << startupWarningsLog;
        log() << "**          This mode should only be used to manually repair corrupted auth "
                 "data."
              << startupWarningsLog;
    }

    auto shardingInitialized =
        uassertStatusOK(ShardingState::get(startupOpCtx.get())
                            ->initializeShardingAwarenessIfNeeded(startupOpCtx.get()));
    if (shardingInitialized) {
        reloadShardRegistryUntilSuccess(startupOpCtx.get());
    }

    if (!storageGlobalParams.readOnly) {
        logStartup(startupOpCtx.get());

        startFTDC();

        getDeleter()->startWorkers();

        restartInProgressIndexesFromLastShutdown(startupOpCtx.get());

        if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
            // Note: For replica sets, ShardingStateRecovery happens on transition to primary.
            if (!repl::getGlobalReplicationCoordinator()->isReplEnabled()) {
                uassertStatusOK(ShardingStateRecovery::recover(startupOpCtx.get()));
            }
        } else if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
            uassertStatusOK(
                initializeGlobalShardingStateForMongod(startupOpCtx.get(),
                                                       ConnectionString::forLocal(),
                                                       kDistLockProcessIdForConfigServer));
            Balancer::create(startupOpCtx->getServiceContext());
        }

        repl::getGlobalReplicationCoordinator()->startup(startupOpCtx.get());

        const unsigned long long missingRepl =
            checkIfReplMissingFromCommandLine(startupOpCtx.get());
        if (missingRepl) {
            log() << startupWarningsLog;
            log() << "** WARNING: mongod started without --replSet yet " << missingRepl
                  << " documents are present in local.system.replset" << startupWarningsLog;
            log() << "**          Restart with --replSet unless you are doing maintenance and "
                  << " no other clients are connected." << startupWarningsLog;
            log() << "**          The TTL collection monitor will not start because of this."
                  << startupWarningsLog;
            log() << "**         ";
            log() << " For more info see http://dochub.mongodb.org/core/ttlcollections";
            log() << startupWarningsLog;
        } else {
            startTTLBackgroundJob();
        }

        if (!replSettings.usingReplSets() && !replSettings.isSlave() &&
            storageGlobalParams.engine != "devnull") {
            ScopedTransaction transaction(startupOpCtx.get(), MODE_X);
            Lock::GlobalWrite lk(startupOpCtx.get()->lockState());
            FeatureCompatibilityVersion::setIfCleanStartup(
                startupOpCtx.get(), repl::StorageInterface::get(getGlobalServiceContext()));
        }
    }

    startClientCursorMonitor();

    PeriodicTask::startRunningPeriodicTasks();

    // MessageServer::run will return when exit code closes its socket and we don't need the
    // operation context anymore
    startupOpCtx.reset();

    auto start = getGlobalServiceContext()->addAndStartTransportLayer(std::move(transportLayer));
    if (!start.isOK()) {
        error() << "Failed to start the listener: " << start.toString();
        return EXIT_NET_ERROR;
    }

    return waitForShutdown();
}
示例#11
0
    void IndexScanNode::computeProperties() {
        _sorts.clear();

        BSONObj sortPattern = QueryPlannerAnalysis::getSortPattern(indexKeyPattern);
        if (direction == -1) {
            sortPattern = QueryPlannerCommon::reverseSortObj(sortPattern);
        }

        _sorts.insert(sortPattern);

        const int nFields = sortPattern.nFields();
        if (nFields > 1) {
            // We're sorted not only by sortPattern but also by all prefixes of it.
            for (int i = 0; i < nFields; ++i) {
                // Make obj out of fields [0,i]
                BSONObjIterator it(sortPattern);
                BSONObjBuilder prefixBob;
                for (int j = 0; j <= i; ++j) {
                    prefixBob.append(it.next());
                }
                _sorts.insert(prefixBob.obj());
            }
        }

        // If we are using the index {a:1, b:1} to answer the predicate {a: 10}, it's sorted
        // both by the index key pattern and by the pattern {b: 1}.

        // See if there are any fields with equalities for bounds.  We can drop these
        // from any sort orders created.
        set<string> equalityFields;
        if (!bounds.isSimpleRange) {
            // Figure out how many fields are point intervals.
            for (size_t i = 0; i < bounds.fields.size(); ++i) {
                const OrderedIntervalList& oil = bounds.fields[i];
                if (oil.intervals.size() != 1) {
                    continue;
                }
                const Interval& ival = oil.intervals[0];
                if (!ival.isPoint()) {
                    continue;
                }
                equalityFields.insert(oil.name);
            }
        }

        if (equalityFields.empty()) {
            return;
        }

        // TODO: Each field in equalityFields could be dropped from the sort order since it is
        // a point interval.  The full set of sort orders is as follows:
        // For each sort in _sorts:
        //    For each drop in powerset(equalityFields):
        //        Remove fields in 'drop' from 'sort' and add resulting sort to output.
        //
        // Since this involves a powerset, we don't generate the full set of possibilities.
        // Instead, we generate sort orders by removing possible contiguous prefixes of equality
        // predicates. For example, if the key pattern is {a: 1, b: 1, c: 1, d: 1, e: 1}
        // and and there are equality predicates on 'a', 'b', and 'c', then here we add the sort
        // orders {b: 1, c: 1, d: 1, e: 1} and {c: 1, d: 1, e: 1}. (We also end up adding
        // {d: 1, e: 1} and {d: 1}, but this is done later on.)
        BSONObjIterator it(sortPattern);
        BSONObjBuilder suffixBob;
        while (it.more()) {
            BSONElement elt = it.next();
            // TODO: string slowness.  fix when bounds are stringdata not string.
            if (equalityFields.end() == equalityFields.find(string(elt.fieldName()))) {
                suffixBob.append(elt);
                // This field isn't a point interval, can't drop.
                break;
            }

            // We add the sort obtained by dropping 'elt' and all preceding elements from the index
            // key pattern.
            BSONObjIterator droppedPrefixIt = it;
            BSONObjBuilder droppedPrefixBob;
            while (droppedPrefixIt.more()) {
                droppedPrefixBob.append(droppedPrefixIt.next());
            }
            _sorts.insert(droppedPrefixBob.obj());
        }

        while (it.more()) {
            suffixBob.append(it.next());
        }

        // We've found the suffix following the contiguous prefix of equality fields.
        //   Ex. For index {a: 1, b: 1, c: 1, d: 1} and query {a: 3, b: 5}, this suffix
        //   of the key pattern is {c: 1, d: 1}.
        //
        // Now we have to add all prefixes of this suffix as possible sort orders.
        //   Ex. Continuing the example from above, we have to include sort orders
        //   {c: 1} and {c: 1, d: 1}.
        BSONObj filterPointsObj = suffixBob.obj();
        for (int i = 0; i < filterPointsObj.nFields(); ++i) {
            // Make obj out of fields [0,i]
            BSONObjIterator it(filterPointsObj);
            BSONObjBuilder prefixBob;
            for (int j = 0; j <= i; ++j) {
                prefixBob.append(it.next());
            }
            _sorts.insert(prefixBob.obj());
        }
    }
示例#12
0
文件: db.cpp 项目: WeetLee/mongo
static void _initAndListen(int listenPort) {
    Client::initThread("initandlisten");

    // Due to SERVER-15389, we must setupSockets first thing at startup in order to avoid
    // obtaining too high a file descriptor for our calls to select().
    MessageServer::Options options;
    options.port = listenPort;
    options.ipList = serverGlobalParams.bind_ip;

    MessageServer* server = createServer(options, new MyMessageHandler());
    server->setAsTimeTracker();

    // This is what actually creates the sockets, but does not yet listen on them because we
    // do not want connections to just hang if recovery takes a very long time.
    server->setupSockets();

    std::shared_ptr<DbWebServer> dbWebServer;
    if (serverGlobalParams.isHttpInterfaceEnabled) {
        dbWebServer.reset(new DbWebServer(
            serverGlobalParams.bind_ip, serverGlobalParams.port + 1000, new RestAdminAccess()));
        dbWebServer->setupSockets();
    }

    getGlobalServiceContext()->initializeGlobalStorageEngine();

    // Warn if we detect configurations for multiple registered storage engines in
    // the same configuration file/environment.
    if (serverGlobalParams.parsedOpts.hasField("storage")) {
        BSONElement storageElement = serverGlobalParams.parsedOpts.getField("storage");
        invariant(storageElement.isABSONObj());
        BSONObj storageParamsObj = storageElement.Obj();
        BSONObjIterator i = storageParamsObj.begin();
        while (i.more()) {
            BSONElement e = i.next();
            // Ignore if field name under "storage" matches current storage engine.
            if (storageGlobalParams.engine == e.fieldName()) {
                continue;
            }

            // Warn if field name matches non-active registered storage engine.
            if (getGlobalServiceContext()->isRegisteredStorageEngine(e.fieldName())) {
                warning() << "Detected configuration for non-active storage engine "
                          << e.fieldName() << " when current storage engine is "
                          << storageGlobalParams.engine;
            }
        }
    }

    getGlobalServiceContext()->setOpObserver(stdx::make_unique<OpObserver>());

    const repl::ReplSettings& replSettings = repl::getGlobalReplicationCoordinator()->getSettings();

    {
        ProcessId pid = ProcessId::getCurrent();
        LogstreamBuilder l = log(LogComponent::kControl);
        l << "MongoDB starting : pid=" << pid << " port=" << serverGlobalParams.port
          << " dbpath=" << storageGlobalParams.dbpath;
        if (replSettings.master)
            l << " master=" << replSettings.master;
        if (replSettings.slave)
            l << " slave=" << (int)replSettings.slave;

        const bool is32bit = sizeof(int*) == 4;
        l << (is32bit ? " 32" : " 64") << "-bit host=" << getHostNameCached() << endl;
    }

    DEV log(LogComponent::kControl) << "DEBUG build (which is slower)" << endl;
    logMongodStartupWarnings(storageGlobalParams);

#if defined(_WIN32)
    printTargetMinOS();
#endif

    logProcessDetails();

    {
        stringstream ss;
        ss << endl;
        ss << "*********************************************************************" << endl;
        ss << " ERROR: dbpath (" << storageGlobalParams.dbpath << ") does not exist." << endl;
        ss << " Create this directory or give existing directory in --dbpath." << endl;
        ss << " See http://dochub.mongodb.org/core/startingandstoppingmongo" << endl;
        ss << "*********************************************************************" << endl;
        uassert(10296, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.dbpath));
    }

    {
        stringstream ss;
        ss << "repairpath (" << storageGlobalParams.repairpath << ") does not exist";
        uassert(12590, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.repairpath));
    }

    // TODO:  This should go into a MONGO_INITIALIZER once we have figured out the correct
    // dependencies.
    if (snmpInit) {
        snmpInit();
    }

    boost::filesystem::remove_all(storageGlobalParams.dbpath + "/_tmp/");

    if (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalRecoverOnly)
        return;

    if (mongodGlobalParams.scriptingEnabled) {
        ScriptEngine::setup();
    }

    repairDatabasesAndCheckVersion();

    if (storageGlobalParams.upgrade) {
        log() << "finished checking dbs" << endl;
        exitCleanly(EXIT_CLEAN);
    }

    {
        OperationContextImpl txn;
        uassertStatusOK(getGlobalAuthorizationManager()->initialize(&txn));
    }

    /* this is for security on certain platforms (nonce generation) */
    srand((unsigned)(curTimeMicros64() ^ startupSrandTimer.micros()));

    // The snapshot thread provides historical collection level and lock statistics for use
    // by the web interface. Only needed when HTTP is enabled.
    if (serverGlobalParams.isHttpInterfaceEnabled) {
        statsSnapshotThread.go();

        invariant(dbWebServer);
        stdx::thread web(stdx::bind(&webServerListenThread, dbWebServer));
        web.detach();
    }

    {
        OperationContextImpl txn;

#ifndef _WIN32
        mongo::signalForkSuccess();
#endif

        Status status = authindex::verifySystemIndexes(&txn);
        if (!status.isOK()) {
            log() << status.reason();
            exitCleanly(EXIT_NEED_UPGRADE);
        }

        // SERVER-14090: Verify that auth schema version is schemaVersion26Final.
        int foundSchemaVersion;
        status =
            getGlobalAuthorizationManager()->getAuthorizationVersion(&txn, &foundSchemaVersion);
        if (!status.isOK()) {
            log() << "Auth schema version is incompatible: "
                  << "User and role management commands require auth data to have "
                  << "at least schema version " << AuthorizationManager::schemaVersion26Final
                  << " but startup could not verify schema version: " << status.toString() << endl;
            exitCleanly(EXIT_NEED_UPGRADE);
        }
        if (foundSchemaVersion < AuthorizationManager::schemaVersion26Final) {
            log() << "Auth schema version is incompatible: "
                  << "User and role management commands require auth data to have "
                  << "at least schema version " << AuthorizationManager::schemaVersion26Final
                  << " but found " << foundSchemaVersion << ". In order to upgrade "
                  << "the auth schema, first downgrade MongoDB binaries to version "
                  << "2.6 and then run the authSchemaUpgrade command." << endl;
            exitCleanly(EXIT_NEED_UPGRADE);
        }

        getDeleter()->startWorkers();

        restartInProgressIndexesFromLastShutdown(&txn);

        repl::getGlobalReplicationCoordinator()->startReplication(&txn);

        const unsigned long long missingRepl = checkIfReplMissingFromCommandLine(&txn);
        if (missingRepl) {
            log() << startupWarningsLog;
            log() << "** WARNING: mongod started without --replSet yet " << missingRepl
                  << " documents are present in local.system.replset" << startupWarningsLog;
            log() << "**          Restart with --replSet unless you are doing maintenance and "
                  << " no other clients are connected." << startupWarningsLog;
            log() << "**          The TTL collection monitor will not start because of this."
                  << startupWarningsLog;
            log() << "**         ";
            log() << " For more info see http://dochub.mongodb.org/core/ttlcollections";
            log() << startupWarningsLog;
        } else {
            startTTLBackgroundJob();
        }
    }

    startClientCursorMonitor();

    PeriodicTask::startRunningPeriodicTasks();

    logStartup();

    // MessageServer::run will return when exit code closes its socket
    server->run();
}
示例#13
0
文件: model.cpp 项目: 7segments/mongo
    void Model::save( bool safe ) {
        scoped_ptr<ScopedDbConnection> conn(
                ScopedDbConnection::getScopedDbConnection (modelServer() ) );

        BSONObjBuilder b;
        serialize( b );

        BSONElement myId;
        {
            BSONObjIterator i = b.iterator();
            while ( i.more() ) {
                BSONElement e = i.next();
                if ( strcmp( e.fieldName() , "_id" ) == 0 ) {
                    myId = e;
                    break;
                }
            }
        }

        if ( myId.type() ) {
            if ( _id.isEmpty() ) {
                _id = myId.wrap();
            }
            else if ( myId.woCompare( _id.firstElement() ) ) {
                stringstream ss;
                ss << "_id from serialize and stored differ: ";
                ss << '[' << myId << "] != ";
                ss << '[' << _id.firstElement() << ']';
                throw UserException( 13121 , ss.str() );
            }
        }

        if ( _id.isEmpty() ) {
            OID oid;
            oid.init();
            b.appendOID( "_id" , &oid );

            BSONObj o = b.obj();
            conn->get()->insert( getNS() , o );
            _id = o["_id"].wrap().getOwned();

            LOG(4) << "inserted new model " << getNS() << "  " << o << endl;
        }
        else {
            if ( myId.eoo() ) {
                myId = _id["_id"];
                b.append( myId );
            }

            verify( ! myId.eoo() );

            BSONObjBuilder qb;
            qb.append( myId );

            BSONObj q = qb.obj();
            BSONObj o = b.obj();

            LOG(4) << "updated model" << getNS() << "  " << q << " " << o << endl;

            conn->get()->update( getNS() , q , o , true );

        }

        string errmsg = "";
        if ( safe )
            errmsg = conn->get()->getLastError();

        conn->done();

        if ( safe && errmsg.size() )
            throw UserException( 9003 , (string)"error on Model::save: " + errmsg );
    }
示例#14
0
   INT32 _omAgentNodeMgr::_addANode( const CHAR *arg1, const CHAR *arg2,
                                     BOOLEAN needLock, BOOLEAN isModify,
                                     string *omsvc )
   {
      INT32 rc = SDB_OK ;
      const CHAR *pSvcName = NULL ;
      const CHAR *pDBPath = NULL ;
      string otherCfg ;

      CHAR dbPath[ OSS_MAX_PATHSIZE + 1 ] = { 0 } ;
      CHAR cfgPath[ OSS_MAX_PATHSIZE + 1 ] = { 0 } ;
      CHAR cfgFile[ OSS_MAX_PATHSIZE + 1 ] = { 0 } ;

      BOOLEAN createDBPath    = FALSE ;
      BOOLEAN createCfgPath   = FALSE ;
      BOOLEAN createCfgFile   = FALSE ;
      BOOLEAN hasLock         = FALSE ;

      try
      {
         stringstream ss ;
         BSONObj objArg1( arg1 ) ;
         BSONObjIterator it ( objArg1 ) ;
         while ( it.more() )
         {
            BSONElement e = it.next() ;
            if ( 0 == ossStrcmp( e.fieldName(), PMD_OPTION_SVCNAME ) )
            {
               if ( e.type() != String )
               {
                  PD_LOG( PDERROR, "Param[%s] type[%d] is not string",
                          PMD_OPTION_SVCNAME, e.type() ) ;
                  rc = SDB_INVALIDARG ;
                  goto error ;
               }
               pSvcName = e.valuestrsafe() ;
            }
            else if ( 0 == ossStrcmp( e.fieldName(), PMD_OPTION_DBPATH ) )
            {
               if ( e.type() != String )
               {
                  PD_LOG( PDERROR, "Param[%s] type[%d] is not string",
                          PMD_OPTION_DBPATH, e.type() ) ;
                  rc = SDB_INVALIDARG ;
                  goto error ;
               }
               pDBPath = e.valuestrsafe() ;
            }
            else
            {
               ss << e.fieldName() << "=" ;
               switch( e.type() )
               {
                  case NumberDouble :
                     ss << e.numberDouble () ;
                     break ;
                  case NumberInt :
                     ss << e.numberLong () ;
                     break ;
                  case NumberLong :
                     ss << e.numberInt () ;
                     break ;
                  case String :
                     ss << e.valuestrsafe () ;
                     break ;
                  case Bool :
                     ss << ( e.boolean() ? "TRUE" : "FALSE" ) ;
                     break ;
                  default :
                     PD_LOG ( PDERROR, "Unexpected type[%d] for %s",
                              e.type(), e.toString().c_str() ) ;
                     rc = SDB_INVALIDARG ;
                     goto error ;
               }
               ss << endl ;
            }
         }
         otherCfg = ss.str() ;
      }
      catch( std::exception &e )
      {
         PD_LOG( PDERROR, "Occur exception: %s", e.what() ) ;
         rc = SDB_INVALIDARG ;
         goto error ;
      }

      if ( !pSvcName || !pDBPath )
      {
         PD_LOG( PDERROR, "Param [%s] or [%s] is not config",
                 PMD_OPTION_SVCNAME, PMD_OPTION_DBPATH ) ;
         rc = SDB_INVALIDARG ;
         goto error ;
      }

      if ( !ossGetRealPath( pDBPath, dbPath, OSS_MAX_PATHSIZE ) )
      {
         PD_LOG( PDERROR, "Invalid db path: %s", pDBPath ) ;
         rc = SDB_INVALIDARG ;
         goto error ;
      }

      if ( needLock )
      {
         lockBucket( pSvcName ) ;
         hasLock = TRUE ;
      }

      if ( isModify && !getNodeProcessInfo( pSvcName ) )
      {
         rc = SDBCM_NODE_NOTEXISTED ;
         goto error ;
      }

      rc = ossAccess( dbPath, W_OK ) ;
      if ( SDB_PERM == rc )
      {
         PD_LOG ( PDERROR, "Permission error for path: %s", dbPath ) ;
         goto error ;
      }
      else if ( SDB_FNE == rc )
      {
         rc = ossMkdir ( dbPath, OSS_CREATE|OSS_READWRITE ) ;
         if ( rc )
         {
            PD_LOG ( PDERROR, "Failed to create config file in path: %s, "
                     "rc: %d", dbPath, rc ) ;
            goto error ;
         }
         createDBPath = TRUE ;
      }
      else if ( rc )
      {
         PD_LOG ( PDERROR, "System error for access path: %s, rc: %d",
                  dbPath, rc ) ;
         goto error ;
      }

      rc = utilBuildFullPath( sdbGetOMAgentOptions()->getLocalCfgPath(),
                              pSvcName, OSS_MAX_PATHSIZE, cfgPath ) ;
      if ( rc )
      {
         PD_LOG( PDERROR, "Build config path for service[%s] failed, rc: %d",
                 pSvcName, rc ) ;
         goto error ;
      }

      rc = ossAccess( cfgPath, W_OK ) ;
      if ( SDB_PERM == rc )
      {
         PD_LOG ( PDERROR, "Permission error for path[%s]", cfgPath ) ;
         goto error ;
      }
      else if ( SDB_FNE == rc )
      {
         rc = ossMkdir ( cfgPath, OSS_CREATE|OSS_READWRITE ) ;
         if ( rc )
         {
            PD_LOG ( PDERROR, "Failed to create directory: %s, rc: %d",
                     cfgPath, rc ) ;
            goto error ;
         }
         createCfgPath = TRUE ;
      }
      else if ( rc )
      {
         PD_LOG ( PDERROR, "System error for access path: %s, rc: %d",
                  cfgPath, rc ) ;
         goto error ;
      }
      else if ( !isModify )
      {
         PD_LOG ( PDERROR, "service[%s] node existed", pSvcName ) ;
         rc = SDBCM_NODE_EXISTED ;
         goto error ;
      }

      rc = utilBuildFullPath( cfgPath, PMD_DFT_CONF, OSS_MAX_PATHSIZE,
                              cfgFile ) ;
      if ( rc )
      {
         PD_LOG ( PDERROR, "Build config file for service[%s] failed, rc: %d",
                  pSvcName, rc ) ;
         goto error ;
      }
      {
         pmdOptionsCB nodeOptions ;
         stringstream ss ;
         ss << PMD_OPTION_SVCNAME << "=" << pSvcName << endl ;
         ss << PMD_OPTION_DBPATH << "=" << dbPath << endl ;
         ss << otherCfg ;

         rc = utilWriteConfigFile( cfgFile, ss.str().c_str(),
                                   isModify ? FALSE : TRUE ) ;
         if ( rc )
         {
            PD_LOG( PDERROR, "Write config file[%s] failed, rc: %d",
                    cfgFile, rc ) ;
            goto error ;
         }
         createCfgFile = TRUE ;

         rc = nodeOptions.initFromFile( cfgFile, FALSE ) ;
         if ( rc )
         {
            PD_LOG( PDERROR, "Extract node[%s] config failed, rc: %d",
                    pSvcName, rc ) ;
            goto error ;
         }
         if ( omsvc )
         {
            *omsvc = nodeOptions.getOMService() ;
         }
      }

      if ( isModify || !arg2 )
      {
         goto done ;
      }

      try
      {
         CHAR cataCfgFile[ OSS_MAX_PATHSIZE + 1 ] = { 0 } ;
         BSONObj objArg2( arg2 ) ;
         stringstream ss ;
         if ( objArg2.isEmpty() )
         {
            goto done ;
         }
         rc = utilBuildFullPath( cfgPath, PMD_DFT_CAT, OSS_MAX_PATHSIZE,
                                 cataCfgFile ) ;
         if ( rc )
         {
            PD_LOG( PDERROR, "Build cat config file failed in service[%s], "
                    "rc: %d", pSvcName, rc ) ;
            goto error ;
         }
         ss << objArg2 << endl ;

         rc = utilWriteConfigFile( cataCfgFile, ss.str().c_str(), TRUE ) ;
         if ( rc )
         {
            PD_LOG( PDERROR, "Write cat file[%s] failed in service[%s], rc: %d",
                    cataCfgFile, pSvcName, rc ) ;
            goto error ;
         }
      }
      catch( std::exception &e )
      {
         PD_LOG( PDERROR, "Occur exeption for extract the second args for "
                 "service[%s]: %s", pSvcName, e.what() ) ;
         rc = SDB_INVALIDARG ;
         goto error ;
      }

   done:
      if ( SDB_OK == rc )
      {
         if ( !isModify )
         {
            addNodeProcessInfo( pSvcName ) ;
            PD_LOG( PDEVENT, "Add node[%s] succeed", pSvcName ) ;
         }
         else
         {
            PD_LOG( PDEVENT, "Modify node[%s] succeed", pSvcName ) ;
         }
      }
      if ( hasLock )
      {
         releaseBucket( pSvcName ) ;
      }
      return rc ;
   error:
      if ( createCfgFile )
      {
         ossDelete( cfgFile ) ;
      }
      if ( createCfgPath )
      {
         ossDelete( cfgPath ) ;
      }
      if ( createDBPath )
      {
         ossDelete( dbPath ) ;
      }
      goto done ;
   }
示例#15
0
    int IndexCursor::skipToNextKey( const BSONObj &currentKey ) {
        int skipPrefixIndex = _boundsIterator->advance( currentKey );
        if ( skipPrefixIndex == -2 ) { 
            // We are done iterating completely.
            _ok = false;
            return -2;
        }
        else if ( skipPrefixIndex == -1 ) { 
            // We should skip nothing.
            return -1;
        }
    
        // We should skip to a further key, efficiently.
        //
        // If after(), skip to the first key greater/less than the key comprised
        // of the first "skipPrefixIndex" elements of currentKey, and the rest
        // set to MaxKey/MinKey for direction > 0 and direction < 0 respectively.
        // eg: skipPrefixIndex = 1, currKey {a:1, b:2, c:1}, direction > 0,  so we skip
        // to the first key greater than {a:1, b:maxkey, c:maxkey}
        //
        // If after() is false, we use the same key prefix but set the reamining
        // elements to the elements described by cmp(), in order.
        // eg: skipPrefixIndex = 1, currKey {a:1, b:2, c:1}) and cmp() [b:5, c:11]
        // so we use skip to {a:1, b:5, c:11}, also noting direction.
        if ( _boundsIterator->after() ) {
            skipPrefix( currentKey, skipPrefixIndex );
        } else {
            BSONObjBuilder b(currentKey.objsize());
            BSONObjIterator it = currentKey.begin();
            const vector<const BSONElement *> &endKeys = _boundsIterator->cmp();
            const int nFields = currentKey.nFields();
            for ( int i = 0; i < nFields; i++ ) {
                if ( i < skipPrefixIndex ) {
                    verify( it.more() );
                    b.append( it.next() );
                } else {
                    b.appendAs( *endKeys[i] , "" );
                }
            }
            findKey( b.done() );

            // Skip passed key prefixes that are not supposed to be inclusive
            // as described by _boundsIterator->inc() and endKeys
            //
            // We'll spend at worst nFields^2 time ensuring all key elements
            // are properly set if all the inclusive bits are false and we
            // keep landing on keys where the ith element of curr == endkeys[i].
            //
            // This complexity is usually ok, since this skipping is supposed to
            // save us from really big linear scans across the key space in
            // some pathological cases. It's not clear whether or not small
            // cases are hurt too badly by this algorithm.
            bool allInclusive = true;
            const vector<bool> &inclusive = _boundsIterator->inc();
            for ( int i = 0; i < nFields; i++ ) {
                if ( !inclusive[i] ) {
                    allInclusive = false;
                    break;
                }
            }
again:      while ( !allInclusive && ok() ) {
                BSONObj key = _currKey;
                it = key.begin();
                dassert( nFields == key.nFields() );
                for ( int i = 0; i < nFields; i++ ) {
                    const BSONElement e = it.next();
                    if ( i >= skipPrefixIndex && !inclusive[i] && e.valuesEqual(*endKeys[i]) ) {
                        // The ith element equals the ith endKey but it's not supposed to be inclusive.
                        // Skipping to the next value for the ith element involves skipping a prefix 
                        // with i + 1 elements.
                        skipPrefix( key, i + 1 );
                        goto again;
                    }
                }
                break;
            }
        }
        return 0;
    }