XmlOutputter::XmlOutputter(CppUnit::TextTestRunner* runner,std::ostream& stream) : CppUnit::XmlOutputter(&runner->result(),stream) { m_results = new CppUnit::TimerTestResult() ; m_listener = new CppUnit::TimerTestListener(m_results) ; m_hook = new CppUnit::TimerXmlOutputterHook(m_results) ; runner->eventManager().addListener(m_listener) ; addHook(m_hook) ; }
void loadHookList(Buffer &b) { int n; freeHookList(); b.read((char*)&n, sizeof(n)); for (int i = 0; i < n; i++) { unsigned int addr; b.read((char*)&addr, sizeof(addr)); char *name; b.readString(&name); hookfunc hf = findAvailableHookFunc(name); if (hf) { //need to find a way to pass valid id here msg("x86emu: Adding hook for %s at %X\n", name, addr); addHook(name, addr, hf, 0); } free(name); } }
void ReplicationCoordinatorExternalStateImpl::startThreads(const ReplSettings& settings) { stdx::lock_guard<stdx::mutex> lk(_threadMutex); if (_startedThreads) { return; } log() << "Starting replication storage threads"; _service->getGlobalStorageEngine()->setJournalListener(this); auto hookList = stdx::make_unique<rpc::EgressMetadataHookList>(); hookList->addHook(stdx::make_unique<rpc::LogicalTimeMetadataHook>(_service)); _taskExecutor = stdx::make_unique<executor::ThreadPoolTaskExecutor>( makeThreadPool(), executor::makeNetworkInterface("NetworkInterfaceASIO-RS", nullptr, std::move(hookList))); _taskExecutor->startup(); _writerPool = SyncTail::makeWriterPool(); _startedThreads = true; }
void ScriptManager::addHookFromScript(const std::string& hook, const std::string& func) { auto* ctx = asGetActiveContext(); if (mRegisteredHooks.count(hook) == 0) { ctx->SetException(("No hook named '" + hook + "'").c_str()); return; } asIScriptObject* obj = (asIScriptObject*)ctx->GetThisPointer(); auto* funcptr = obj->GetObjectType()->GetMethodByName(func.c_str()); if (!addHook(hook, funcptr, obj)) { std::string decl = funcptr->GetDeclaration(false); std::string name = funcptr->GetName(); auto off = decl.find(name); decl.replace(off, name.length(), "f"); ctx->SetException(("Invalid declaration for hook '" + hook + "'\n" + "Got " + decl + " but expects " + mRegisteredHooks.at(hook)).c_str()); return; } static std::function<void(asIScriptObject* oldObj, asIScriptFunction* func, asIScriptObject* newObj)> updateChangeNotice; if (!updateChangeNotice) updateChangeNotice = [this](asIScriptObject* oldObj, asIScriptFunction* func, asIScriptObject* newObj) { for (auto& hooks : mScriptHooks) { auto it = std::find_if(hooks.second.begin(), hooks.second.end(), [oldObj, func](const ScriptHook& h) { return h.Function == func && h.Object == oldObj; }); if (it != hooks.second.end()) { auto* decl = it->Function->GetDeclaration(false); if (newObj) newObj->GetWeakRefFlag()->AddRef(); it->WeakRef->Release(); removeChangeNotice(it->Object); if (newObj) { it->Object = newObj; it->Function = newObj->GetObjectType()->GetMethodByDecl(decl); it->WeakRef = it->Object->GetWeakRefFlag(); addChangeNotice(newObj, [=](asIScriptObject* evenNewerObj) { updateChangeNotice(newObj, func, evenNewerObj); }); } hooks.second.erase(it); } } }; addChangeNotice(obj, [=](asIScriptObject* newObj) { updateChangeNotice(obj, funcptr, newObj); }); }
ExitCode runMongosServer(ServiceContext* serviceContext) { Client::initThread("mongosMain"); printShardingVersionInfo(false); initWireSpec(); serviceContext->setServiceEntryPoint( stdx::make_unique<ServiceEntryPointMongos>(serviceContext)); auto tl = transport::TransportLayerManager::createWithConfig(&serverGlobalParams, serviceContext); auto res = tl->setup(); if (!res.isOK()) { error() << "Failed to set up listener: " << res; return EXIT_NET_ERROR; } serviceContext->setTransportLayer(std::move(tl)); auto unshardedHookList = stdx::make_unique<rpc::EgressMetadataHookList>(); unshardedHookList->addHook(stdx::make_unique<rpc::LogicalTimeMetadataHook>(serviceContext)); unshardedHookList->addHook( stdx::make_unique<rpc::ShardingEgressMetadataHookForMongos>(serviceContext)); // TODO SERVER-33053: readReplyMetadata is not called on hooks added through // ShardingConnectionHook with _shardedConnections=false, so this hook will not run for // connections using globalConnPool. unshardedHookList->addHook(stdx::make_unique<rpc::CommittedOpTimeMetadataHook>(serviceContext)); // Add sharding hooks to both connection pools - ShardingConnectionHook includes auth hooks globalConnPool.addHook(new ShardingConnectionHook(false, std::move(unshardedHookList))); auto shardedHookList = stdx::make_unique<rpc::EgressMetadataHookList>(); shardedHookList->addHook(stdx::make_unique<rpc::LogicalTimeMetadataHook>(serviceContext)); shardedHookList->addHook( stdx::make_unique<rpc::ShardingEgressMetadataHookForMongos>(serviceContext)); shardedHookList->addHook(stdx::make_unique<rpc::CommittedOpTimeMetadataHook>(serviceContext)); shardConnectionPool.addHook(new ShardingConnectionHook(true, std::move(shardedHookList))); // Hook up a Listener for changes from the ReplicaSetMonitor // This will last for the scope of this function. i.e. until shutdown finishes auto shardingRSCL = ReplicaSetMonitor::getNotifier().makeListener<ShardingReplicaSetChangeListener>( serviceContext); // Mongos connection pools already takes care of authenticating new connections so the // replica set connection shouldn't need to. DBClientReplicaSet::setAuthPooledSecondaryConn(false); if (getHostName().empty()) { quickExit(EXIT_BADOPTIONS); } auto opCtx = cc().makeOperationContext(); auto logicalClock = stdx::make_unique<LogicalClock>(opCtx->getServiceContext()); LogicalClock::set(opCtx->getServiceContext(), std::move(logicalClock)); { Status status = initializeSharding(opCtx.get()); if (!status.isOK()) { if (status == ErrorCodes::CallbackCanceled) { invariant(globalInShutdownDeprecated()); log() << "Shutdown called before mongos finished starting up"; return EXIT_CLEAN; } error() << "Error initializing sharding system: " << status; return EXIT_SHARDING_ERROR; } Grid::get(opCtx.get()) ->getBalancerConfiguration() ->refreshAndCheck(opCtx.get()) .transitional_ignore(); } startMongoSFTDC(); Status status = AuthorizationManager::get(serviceContext)->initialize(opCtx.get()); if (!status.isOK()) { error() << "Initializing authorization data failed: " << status; return EXIT_SHARDING_ERROR; } // Construct the sharding uptime reporter after the startup parameters have been parsed in order // to ensure that it picks up the server port instead of reporting the default value. shardingUptimeReporter.emplace(); shardingUptimeReporter->startPeriodicThread(); clusterCursorCleanupJob.go(); UserCacheInvalidator cacheInvalidatorThread(AuthorizationManager::get(serviceContext)); { cacheInvalidatorThread.initialize(opCtx.get()); cacheInvalidatorThread.go(); } PeriodicTask::startRunningPeriodicTasks(); // Set up the periodic runner for background job execution auto runner = makePeriodicRunner(serviceContext); runner->startup(); serviceContext->setPeriodicRunner(std::move(runner)); SessionKiller::set(serviceContext, std::make_shared<SessionKiller>(serviceContext, killSessionsRemote)); LogicalSessionCache::set( serviceContext, stdx::make_unique<LogicalSessionCacheImpl>(stdx::make_unique<ServiceLiaisonMongos>(), stdx::make_unique<SessionsCollectionSharded>(), RouterSessionCatalog::reapSessionsOlderThan)); status = serviceContext->getServiceExecutor()->start(); if (!status.isOK()) { error() << "Failed to start the service executor: " << redact(status); return EXIT_NET_ERROR; } status = serviceContext->getServiceEntryPoint()->start(); if (!status.isOK()) { error() << "Failed to start the service entry point: " << redact(status); return EXIT_NET_ERROR; } status = serviceContext->getTransportLayer()->start(); if (!status.isOK()) { error() << "Failed to start the transport layer: " << redact(status); return EXIT_NET_ERROR; } serviceContext->notifyStartupComplete(); #if !defined(_WIN32) signalForkSuccess(); #else if (ntservice::shouldStartService()) { ntservice::reportStatus(SERVICE_RUNNING); log() << "Service running"; } #endif // Block until shutdown. MONGO_IDLE_THREAD_BLOCK; return waitForShutdown(); }
Status initializeSharding(OperationContext* opCtx) { auto targeterFactory = stdx::make_unique<RemoteCommandTargeterFactoryImpl>(); auto targeterFactoryPtr = targeterFactory.get(); ShardFactory::BuilderCallable setBuilder = [targeterFactoryPtr](const ShardId& shardId, const ConnectionString& connStr) { return stdx::make_unique<ShardRemote>( shardId, connStr, targeterFactoryPtr->create(connStr)); }; ShardFactory::BuilderCallable masterBuilder = [targeterFactoryPtr](const ShardId& shardId, const ConnectionString& connStr) { return stdx::make_unique<ShardRemote>( shardId, connStr, targeterFactoryPtr->create(connStr)); }; ShardFactory::BuildersMap buildersMap{ {ConnectionString::SET, std::move(setBuilder)}, {ConnectionString::MASTER, std::move(masterBuilder)}, }; auto shardFactory = stdx::make_unique<ShardFactory>(std::move(buildersMap), std::move(targeterFactory)); CatalogCacheLoader::set(opCtx->getServiceContext(), stdx::make_unique<ConfigServerCatalogCacheLoader>()); Status status = initializeGlobalShardingState( opCtx, mongosGlobalParams.configdbs, generateDistLockProcessId(opCtx), std::move(shardFactory), stdx::make_unique<CatalogCache>(CatalogCacheLoader::get(opCtx)), [opCtx]() { auto hookList = stdx::make_unique<rpc::EgressMetadataHookList>(); hookList->addHook( stdx::make_unique<rpc::LogicalTimeMetadataHook>(opCtx->getServiceContext())); hookList->addHook( stdx::make_unique<rpc::CommittedOpTimeMetadataHook>(opCtx->getServiceContext())); hookList->addHook(stdx::make_unique<rpc::ShardingEgressMetadataHookForMongos>( opCtx->getServiceContext())); return hookList; }, boost::none); if (!status.isOK()) { return status; } status = waitForShardRegistryReload(opCtx); if (!status.isOK()) { return status; } status = waitForSigningKeys(opCtx); if (!status.isOK()) { return status; } Grid::get(opCtx)->setShardingInitialized(); return Status::OK(); }