Пример #1
0
    Status handlePreValidationMongosOptions(const moe::Environment& params,
                                            const std::vector<std::string>& args) {
        if (params.count("help")) {
            printMongosHelp(serverOptions);
            ::_exit(EXIT_SUCCESS);
        }
        if (params.count("version")) {
            printShardingVersionInfo(true);
            ::_exit(EXIT_SUCCESS);
        }

        return Status::OK();
    }
Пример #2
0
    bool handlePreValidationMongosOptions(const moe::Environment& params,
                                            const std::vector<std::string>& args) {
        if (params.count("help")) {
            printMongosHelp(moe::startupOptions);
            return false;
        }
        if (params.count("version")) {
            printShardingVersionInfo(true);
            return false;
        }
        if ( params.count( "test" ) ) {
            ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
                    ::mongo::logger::LogSeverity::Debug(5));
            StartupTest::runTests();
            return false;
        }

        return true;
    }
Пример #3
0
ExitCode runMongosServer(ServiceContext* serviceContext) {
    Client::initThread("mongosMain");
    printShardingVersionInfo(false);

    initWireSpec();

    serviceContext->setServiceEntryPoint(
        stdx::make_unique<ServiceEntryPointMongos>(serviceContext));

    auto tl =
        transport::TransportLayerManager::createWithConfig(&serverGlobalParams, serviceContext);
    auto res = tl->setup();
    if (!res.isOK()) {
        error() << "Failed to set up listener: " << res;
        return EXIT_NET_ERROR;
    }
    serviceContext->setTransportLayer(std::move(tl));

    auto unshardedHookList = stdx::make_unique<rpc::EgressMetadataHookList>();
    unshardedHookList->addHook(stdx::make_unique<rpc::LogicalTimeMetadataHook>(serviceContext));
    unshardedHookList->addHook(
        stdx::make_unique<rpc::ShardingEgressMetadataHookForMongos>(serviceContext));
    // TODO SERVER-33053: readReplyMetadata is not called on hooks added through
    // ShardingConnectionHook with _shardedConnections=false, so this hook will not run for
    // connections using globalConnPool.
    unshardedHookList->addHook(stdx::make_unique<rpc::CommittedOpTimeMetadataHook>(serviceContext));

    // Add sharding hooks to both connection pools - ShardingConnectionHook includes auth hooks
    globalConnPool.addHook(new ShardingConnectionHook(false, std::move(unshardedHookList)));

    auto shardedHookList = stdx::make_unique<rpc::EgressMetadataHookList>();
    shardedHookList->addHook(stdx::make_unique<rpc::LogicalTimeMetadataHook>(serviceContext));
    shardedHookList->addHook(
        stdx::make_unique<rpc::ShardingEgressMetadataHookForMongos>(serviceContext));
    shardedHookList->addHook(stdx::make_unique<rpc::CommittedOpTimeMetadataHook>(serviceContext));

    shardConnectionPool.addHook(new ShardingConnectionHook(true, std::move(shardedHookList)));

    // Hook up a Listener for changes from the ReplicaSetMonitor
    // This will last for the scope of this function. i.e. until shutdown finishes
    auto shardingRSCL =
        ReplicaSetMonitor::getNotifier().makeListener<ShardingReplicaSetChangeListener>(
            serviceContext);

    // Mongos connection pools already takes care of authenticating new connections so the
    // replica set connection shouldn't need to.
    DBClientReplicaSet::setAuthPooledSecondaryConn(false);

    if (getHostName().empty()) {
        quickExit(EXIT_BADOPTIONS);
    }

    auto opCtx = cc().makeOperationContext();

    auto logicalClock = stdx::make_unique<LogicalClock>(opCtx->getServiceContext());
    LogicalClock::set(opCtx->getServiceContext(), std::move(logicalClock));

    {
        Status status = initializeSharding(opCtx.get());
        if (!status.isOK()) {
            if (status == ErrorCodes::CallbackCanceled) {
                invariant(globalInShutdownDeprecated());
                log() << "Shutdown called before mongos finished starting up";
                return EXIT_CLEAN;
            }
            error() << "Error initializing sharding system: " << status;
            return EXIT_SHARDING_ERROR;
        }

        Grid::get(opCtx.get())
            ->getBalancerConfiguration()
            ->refreshAndCheck(opCtx.get())
            .transitional_ignore();
    }

    startMongoSFTDC();

    Status status = AuthorizationManager::get(serviceContext)->initialize(opCtx.get());
    if (!status.isOK()) {
        error() << "Initializing authorization data failed: " << status;
        return EXIT_SHARDING_ERROR;
    }

    // Construct the sharding uptime reporter after the startup parameters have been parsed in order
    // to ensure that it picks up the server port instead of reporting the default value.
    shardingUptimeReporter.emplace();
    shardingUptimeReporter->startPeriodicThread();

    clusterCursorCleanupJob.go();

    UserCacheInvalidator cacheInvalidatorThread(AuthorizationManager::get(serviceContext));
    {
        cacheInvalidatorThread.initialize(opCtx.get());
        cacheInvalidatorThread.go();
    }

    PeriodicTask::startRunningPeriodicTasks();

    // Set up the periodic runner for background job execution
    auto runner = makePeriodicRunner(serviceContext);
    runner->startup();
    serviceContext->setPeriodicRunner(std::move(runner));

    SessionKiller::set(serviceContext,
                       std::make_shared<SessionKiller>(serviceContext, killSessionsRemote));

    LogicalSessionCache::set(
        serviceContext,
        stdx::make_unique<LogicalSessionCacheImpl>(stdx::make_unique<ServiceLiaisonMongos>(),
                                                   stdx::make_unique<SessionsCollectionSharded>(),
                                                   RouterSessionCatalog::reapSessionsOlderThan));

    status = serviceContext->getServiceExecutor()->start();
    if (!status.isOK()) {
        error() << "Failed to start the service executor: " << redact(status);
        return EXIT_NET_ERROR;
    }

    status = serviceContext->getServiceEntryPoint()->start();
    if (!status.isOK()) {
        error() << "Failed to start the service entry point: " << redact(status);
        return EXIT_NET_ERROR;
    }

    status = serviceContext->getTransportLayer()->start();
    if (!status.isOK()) {
        error() << "Failed to start the transport layer: " << redact(status);
        return EXIT_NET_ERROR;
    }

    serviceContext->notifyStartupComplete();

#if !defined(_WIN32)
    signalForkSuccess();
#else
    if (ntservice::shouldStartService()) {
        ntservice::reportStatus(SERVICE_RUNNING);
        log() << "Service running";
    }
#endif

    // Block until shutdown.
    MONGO_IDLE_THREAD_BLOCK;
    return waitForShutdown();
}