/** * Test that client cursors which have been marked as killed but are still pinned *do not* time out. */ TEST_F(CursorManagerTest, InactiveKilledCursorsThatAreStillPinnedShouldNotTimeout) { CursorManager* cursorManager = useCursorManager(); auto clock = useClock(); // Make a cursor from the plan executor, and immediately kill it. auto cursorPin = cursorManager->registerCursor( _opCtx.get(), {makeFakePlanExecutor(), NamespaceString{"test.collection"}, {}, false, BSONObj()}); const bool collectionGoingAway = false; cursorManager->invalidateAll( _opCtx.get(), collectionGoingAway, "KilledCursorsShouldTimeoutTest"); // Advance the clock to simulate time passing. clock->advance(Milliseconds(CursorManager::kDefaultCursorTimeoutMinutes)); // The pin is still in scope, so it should not time out. ASSERT_EQ(0UL, cursorManager->timeoutCursors(_opCtx.get(), clock->now())); }
void FrameHistory::record(const TimePoint& now, float zoom, const Duration& duration) { int16_t zoomIndex = std::floor(zoom * 10.0); if (firstFrame) { changeTimes.fill(now); for (int16_t z = 0; z <= zoomIndex; z++) { opacities[z] = 255u; } firstFrame = false; } if (zoomIndex < previousZoomIndex) { for (int16_t z = zoomIndex + 1; z <= previousZoomIndex; z++) { changeTimes[z] = now; changeOpacities[z] = opacities[z]; } } else { for (int16_t z = zoomIndex; z > previousZoomIndex; z--) { changeTimes[z] = now; changeOpacities[z] = opacities[z]; } } for (int16_t z = 0; z <= 255; z++) { std::chrono::duration<float> timeDiff = now - changeTimes[z]; int32_t opacityChange = (duration == Milliseconds(0) ? 1 : (timeDiff / duration)) * 255; if (z <= zoomIndex) { opacities[z] = util::min(255, changeOpacities[z] + opacityChange); } else { opacities[z] = util::max(0, changeOpacities[z] - opacityChange); } } changed = true; if (zoomIndex != previousZoomIndex) { previousZoomIndex = zoomIndex; previousTime = now; } time = now; }
namespace mongo { const WriteConcernOptions DistLockCatalog::kLocalWriteConcern(1, WriteConcernOptions::SyncMode::UNSET, Milliseconds(0)); const WriteConcernOptions DistLockCatalog::kMajorityWriteConcern( WriteConcernOptions::kMajority, // Note: Even though we're setting UNSET here, kMajority implies JOURNAL if journaling is // supported by this mongod. WriteConcernOptions::SyncMode::UNSET, WriteConcernOptions::kWriteConcernTimeoutSystem); DistLockCatalog::DistLockCatalog() = default; DistLockCatalog::ServerInfo::ServerInfo(Date_t time, OID _electionId) : serverTime(std::move(time)), electionId(std::move(_electionId)) {} } // namespace mongo
void NoopWriter::_writeNoop(OperationContext* opCtx) { // Use GlobalLock + lockMMAPV1Flush instead of DBLock to allow return when the lock is not // available. It may happen when the primary steps down and a shared global lock is acquired. Lock::GlobalLock lock( opCtx, MODE_IX, Date_t::now() + Milliseconds(1), Lock::InterruptBehavior::kLeaveUnlocked); if (!lock.isLocked()) { LOG(1) << "Global lock is not available skipping noopWrite"; return; } opCtx->lockState()->lockMMAPV1Flush(); auto replCoord = ReplicationCoordinator::get(opCtx); // Its a proxy for being a primary if (!replCoord->canAcceptWritesForDatabase(opCtx, "admin")) { LOG(1) << "Not a primary, skipping the noop write"; return; } auto lastAppliedOpTime = replCoord->getMyLastAppliedOpTime(); // _lastKnownOpTime is not protected by lock as its used only by one thread. if (lastAppliedOpTime != _lastKnownOpTime) { LOG(1) << "Not scheduling a noop write. Last known OpTime: " << _lastKnownOpTime << " != last primary OpTime: " << lastAppliedOpTime; } else { if (writePeriodicNoops.load()) { const auto logLevel = getTestCommandsEnabled() ? 0 : 1; LOG(logLevel) << "Writing noop to oplog as there has been no writes to this replica set in over " << _writeInterval; writeConflictRetry( opCtx, "writeNoop", NamespaceString::kRsOplogNamespace.ns(), [&opCtx] { WriteUnitOfWork uow(opCtx); opCtx->getClient()->getServiceContext()->getOpObserver()->onOpMessage(opCtx, kMsgObj); uow.commit(); }); } } _lastKnownOpTime = replCoord->getMyLastAppliedOpTime(); LOG(1) << "Set last known op time to " << _lastKnownOpTime; }
TEST(AsyncTimerMock, Cancel) { AsyncTimerFactoryMock factory; // Set a timer bool fired = false; auto timer = factory.make(Milliseconds(100)); timer->asyncWait([&fired](std::error_code ec) { // This timer should have been canceled ASSERT(ec); ASSERT(ec == asio::error::operation_aborted); fired = true; }); // Cancel timer timer->cancel(); // Ensure that its handler was called ASSERT(fired); }
void ScheduleTasks() override { scheduler.Schedule(Seconds(8), Seconds(10), [this](TaskContext task) { DoCastAOE(SPELL_ARCANE_BARRAGE_VOLLEY); task.Repeat(Seconds(8), Seconds(10)); }); scheduler.Schedule(Seconds(10), Seconds(11), [this](TaskContext task) { if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 45.0f, true)) DoCast(target, SPELL_ARCANE_BUFFET); task.Repeat(Seconds(15), Seconds(20)); }); scheduler.Schedule(Seconds(5), [this](TaskContext task) { Talk(SAY_REPEAT_SUMMON); std::list<uint8> summonSpells = { 0, 1, 2 }; uint8 spell = Trinity::Containers::SelectRandomContainerElement(summonSpells); DoCast(me, EtherealSphereSummonSpells[spell]); summonSpells.remove(spell); if (IsHeroic()) { spell = Trinity::Containers::SelectRandomContainerElement(summonSpells); task.Schedule(Milliseconds(2500), [this, spell](TaskContext /*task*/) { DoCast(me, EtherealSphereHeroicSummonSpells[spell]); }); } task.Schedule(Seconds(33), Seconds(35), [this](TaskContext /*task*/) { DummyEntryCheckPredicate pred; summons.DoAction(ACTION_SUMMON, pred); }); task.Repeat(Seconds(45), Seconds(47)); }); }
/** * Test that client cursors time out and get deleted. */ TEST_F(CursorManagerTest, InactiveCursorShouldTimeout) { CursorManager* cursorManager = useCursorManager(); auto clock = useClock(); cursorManager->registerCursor( _opCtx.get(), {makeFakePlanExecutor(), NamespaceString{"test.collection"}, {}, false, BSONObj()}); ASSERT_EQ(0UL, cursorManager->timeoutCursors(_opCtx.get(), Date_t())); clock->advance(Milliseconds(CursorManager::kDefaultCursorTimeoutMinutes)); ASSERT_EQ(1UL, cursorManager->timeoutCursors(_opCtx.get(), clock->now())); ASSERT_EQ(0UL, cursorManager->numCursors()); cursorManager->registerCursor( _opCtx.get(), {makeFakePlanExecutor(), NamespaceString{"test.collection"}, {}, false, BSONObj()}); ASSERT_EQ(1UL, cursorManager->timeoutCursors(_opCtx.get(), Date_t::max())); ASSERT_EQ(0UL, cursorManager->numCursors()); }
void NetworkInterfaceMock::_enqueueOperation_inlock( mongo::executor::NetworkInterfaceMock::NetworkOperation&& op) { auto insertBefore = std::upper_bound(std::begin(_unscheduled), std::end(_unscheduled), op, [](const NetworkOperation& a, const NetworkOperation& b) { return a.getNextConsiderationDate() < b.getNextConsiderationDate(); }); _unscheduled.emplace(insertBefore, std::move(op)); if (op.getRequest().timeout != RemoteCommandRequest::kNoTimeout) { invariant(op.getRequest().timeout >= Milliseconds(0)); ResponseStatus response(ErrorCodes::NetworkTimeout, "Network timeout"); auto action = stdx::bind( &NetworkInterfaceMock::_cancelCommand_inlock, this, op.getCallbackHandle(), response); _alarms.emplace(_now_inlock() + op.getRequest().timeout, action); } }
namespace util { constexpr float tileSize = 512; /* * The maximum extent of a feature that can be safely stored in the buffer. * In practice, all features are converted to this extent before being added. * * Positions are stored as signed 16bit integers. * One bit is lost for signedness to support features extending past the left edge of the tile. * One bit is lost because the line vertex buffer packs 1 bit of other data into the int. * One bit is lost to support features extending past the extent on the right edge of the tile. * This leaves us with 2^13 = 8192 */ constexpr int32_t EXTENT = 8192; constexpr double DEG2RAD = M_PI / 180.0; constexpr double RAD2DEG = 180.0 / M_PI; constexpr double M2PI = M_PI * 2; constexpr double EARTH_RADIUS_M = 6378137; constexpr double LATITUDE_MAX = 85.051128779806604; constexpr double LONGITUDE_MAX = 180; constexpr double DEGREES_MAX = 360; constexpr double PITCH_MAX = M_PI / 3; constexpr double MIN_ZOOM = 0.0; constexpr double MAX_ZOOM = 25.5; constexpr float MIN_ZOOM_F = MIN_ZOOM; constexpr float MAX_ZOOM_F = MAX_ZOOM; constexpr uint64_t DEFAULT_MAX_CACHE_SIZE = 50 * 1024 * 1024; constexpr Duration DEFAULT_FADE_DURATION = Milliseconds(300); constexpr Seconds CLOCK_SKEW_RETRY_TIMEOUT { 30 }; constexpr UnitBezier DEFAULT_TRANSITION_EASE = { 0, 0, 0.25, 1 }; constexpr int DEFAULT_RATE_LIMIT_TIMEOUT = 5; constexpr const char* API_BASE_URL = "https://api.mapbox.com"; } // namespace util
TEST_F(TransactionCoordinatorServiceTest, CoordinatorIsNotCanceledIfDeadlinePassesButHasReceivedParticipantList) { auto coordinatorService = TransactionCoordinatorService::get(operationContext()); const auto deadline = executor()->now() + Milliseconds(1000 * 60 * 10 /* 10 hours */); coordinatorService->createCoordinator(operationContext(), _lsid, _txnNumber, deadline); // Deliver the participant list before the deadline. ASSERT(boost::none != coordinatorService->coordinateCommit( operationContext(), _lsid, _txnNumber, kTwoShardIdSet)); // Reach the deadline. network()->enterNetwork(); network()->advanceTime(deadline); network()->exitNetwork(); // The coordinator should still exist. ASSERT(boost::none != coordinatorService->coordinateCommit( operationContext(), _lsid, _txnNumber, kTwoShardIdSet)); }
void Reset() override { me->SetReactState(REACT_PASSIVE); me->SetDisplayId(me->GetCreatureTemplate()->Modelid2); DoCastSelf(SPELL_PUTRID_MUSHROOM); DoCastSelf(SPELL_SHRINK, true); DoCastSelf(SPELL_GROW, true); if (me->GetEntry() == NPC_HEALTHY_MUSHROOM) { DoCastSelf(SPELL_POWER_MUSHROOM_VISUAL_AURA); _active = true; } else DoCastSelf(SPELL_POISONOUS_MUSHROOM_VISUAL_AURA); _scheduler.Schedule(Milliseconds(800), [this](TaskContext /*context*/) { DoCastSelf(SPELL_GROW, true); }); }
void tick( Milliseconds current_time_in_milliseconds ) { if ( m_last_flush_time == Milliseconds( 0 ) ) { // initialize last_flush_time m_last_flush_time = current_time_in_milliseconds; } else { // is it time to check? if ( ( current_time_in_milliseconds - m_last_flush_time ) > m_flush_period_in_milliseconds ) { // yes, remember when we did m_last_flush_time = current_time_in_milliseconds; // flush any expired addresses flushKnownAddressesFromMap( m_known_entity_addresses, m_expiry_time_in_milliseconds, current_time_in_milliseconds ); } } }
AutoGetCollection::AutoGetCollection(OperationContext* opCtx, const NamespaceString& nss, LockMode modeDB, LockMode modeColl, ViewMode viewMode) : _viewMode(viewMode), _autoDb(opCtx, nss.db(), modeDB), _collLock(opCtx->lockState(), nss.ns(), modeColl), _coll(_autoDb.getDb() ? _autoDb.getDb()->getCollection(opCtx, nss) : nullptr) { Database* db = _autoDb.getDb(); // If the database exists, but not the collection, check for views. if (_viewMode == ViewMode::kViewsForbidden && db && !_coll && db->getViewCatalog()->lookup(opCtx, nss.ns())) uasserted(ErrorCodes::CommandNotSupportedOnView, str::stream() << "Namespace " << nss.ns() << " is a view, not a collection"); // Wait for a configured amount of time after acquiring locks if the failpoint is enabled. MONGO_FAIL_POINT_BLOCK(setAutoGetCollectionWait, customWait) { const BSONObj& data = customWait.getData(); sleepFor(Milliseconds(data["waitForMillis"].numberInt())); } }
void UpdateAI(uint32 diff) override { if (!UpdateVictim()) return; events.Update(diff); if (me->HasUnitState(UNIT_STATE_CASTING)) return; while (uint32 eventId = events.ExecuteEvent()) { switch (eventId) { case EVENT_POISON: if (!me->HasAura(SPELL_WIDOWS_EMBRACE_HELPER)) DoCastAOE(SPELL_POISON_BOLT_VOLLEY); events.Repeat(randtime(Seconds(8), Seconds(15))); break; case EVENT_FIRE: if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0)) DoCast(target, SPELL_RAIN_OF_FIRE); events.Repeat(randtime(Seconds(6), Seconds(18))); break; case EVENT_FRENZY: if (Aura* widowsEmbrace = me->GetAura(SPELL_WIDOWS_EMBRACE_HELPER)) events.ScheduleEvent(EVENT_FRENZY, Milliseconds(widowsEmbrace->GetDuration()+1)); else { DoCast(SPELL_FRENZY); Talk(EMOTE_FRENZY); events.Repeat(Minutes(1) + randtime(Seconds(0), Seconds(20))); } break; } } DoMeleeAttackIfReady(); }
bool WindowImpl::PopEvent(Event& event, bool block) { // If the event queue is empty, let's first check if new events are available from the OS if (myEvents.empty()) { if (!block) { // Non-blocking mode: process events and continue ProcessJoystickEvents(); ProcessEvents(); } else { // Blocking mode: process events until one is triggered // Here we use a manual wait loop instead of the optimized // wait-event provided by the OS, so that we don't skip joystick // events (which require polling) while (myEvents.empty()) { ProcessJoystickEvents(); ProcessEvents(); Sleep(Milliseconds(10)); } } } // Pop the first event of the queue, if it is not empty if (!myEvents.empty()) { event = myEvents.front(); myEvents.pop(); return true; } return false; }
Status SyncSourceFeedback::_updateUpstream(OperationContext* txn, BackgroundSync* bgsync) { Reporter* reporter; { stdx::lock_guard<stdx::mutex> lock(_mtx); reporter = _reporter; } auto syncTarget = reporter->getTarget(); auto triggerStatus = reporter->trigger(); if (!triggerStatus.isOK()) { warning() << "unable to schedule reporter to update replication progress on " << syncTarget << ": " << triggerStatus; return triggerStatus; } auto status = reporter->join(); if (!status.isOK()) { log() << "SyncSourceFeedback error sending update to " << syncTarget << ": " << status; // Some errors should not cause result in blacklisting the sync source. if (status != ErrorCodes::InvalidSyncSource) { // The command could not be created because the node is now primary. } else if (status != ErrorCodes::NodeNotFound) { // The command could not be created, likely because this node was removed from the set. } else { // Blacklist sync target for .5 seconds and find a new one. stdx::lock_guard<stdx::mutex> lock(_mtx); auto replCoord = repl::ReplicationCoordinator::get(txn); replCoord->blacklistSyncSource(syncTarget, Date_t::now() + Milliseconds(500)); bgsync->clearSyncTarget(); } } return status; }
Status MigrationChunkClonerSourceLegacy::nextCloneBatch(OperationContext* txn, Collection* collection, BSONArrayBuilder* arrBuilder) { dassert(txn->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IS)); ElapsedTracker tracker(txn->getServiceContext()->getFastClockSource(), internalQueryExecYieldIterations, Milliseconds(internalQueryExecYieldPeriodMS.load())); stdx::lock_guard<stdx::mutex> sl(_mutex); std::set<RecordId>::iterator it; for (it = _cloneLocs.begin(); it != _cloneLocs.end(); ++it) { // We must always make progress in this method by at least one document because empty return // indicates there is no more initial clone data. if (arrBuilder->arrSize() && tracker.intervalHasElapsed()) { break; } Snapshotted<BSONObj> doc; if (collection->findDoc(txn, *it, &doc)) { // Use the builder size instead of accumulating the document sizes directly so that we // take into consideration the overhead of BSONArray indices. if (arrBuilder->arrSize() && (arrBuilder->len() + doc.value().objsize() + 1024) > BSONObjMaxUserSize) { break; } arrBuilder->append(doc.value()); } } _cloneLocs.erase(_cloneLocs.begin(), it); return Status::OK(); }
void NetworkTestEnv::onCommandWithMetadata(OnCommandWithMetadataFunction func) { _mockNetwork->enterNetwork(); const NetworkInterfaceMock::NetworkOperationIterator noi = _mockNetwork->getNextReadyRequest(); const RemoteCommandRequest& request = noi->getRequest(); const auto cmdResponseStatus = func(request); BSONObjBuilder result; if (cmdResponseStatus.isOK()) { result.appendElements(cmdResponseStatus.data); Command::appendCommandStatus(result, cmdResponseStatus.status); const RemoteCommandResponse response( result.obj(), cmdResponseStatus.metadata, Milliseconds(1)); _mockNetwork->scheduleResponse(noi, _mockNetwork->now(), response); } else { _mockNetwork->scheduleResponse(noi, _mockNetwork->now(), cmdResponseStatus.status); } _mockNetwork->runReadyNetworkOperations(); _mockNetwork->exitNetwork(); }
// This test has been temporarily disabled due to MESOS-1257. TEST_F(ExternalContainerizerTest, DISABLED_Launch) { Try<PID<Master> > master = this->StartMaster(); ASSERT_SOME(master); Flags testFlags; slave::Flags flags = this->CreateSlaveFlags(); flags.isolation = "external"; flags.containerizer_path = testFlags.build_dir + "/src/examples/python/test-containerizer"; MockExternalContainerizer containerizer(flags); Try<PID<Slave> > slave = this->StartSlave(&containerizer, flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); Future<FrameworkID> frameworkId; EXPECT_CALL(sched, registered(&driver, _, _)) .WillOnce(FutureArg<1>(&frameworkId)); Future<vector<Offer> > offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(frameworkId); AWAIT_READY(offers); EXPECT_NE(0u, offers.get().size()); TaskInfo task; task.set_name("isolator_test"); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->CopyFrom(offers.get()[0].slave_id()); task.mutable_resources()->CopyFrom(offers.get()[0].resources()); Resources resources(offers.get()[0].resources()); Option<Bytes> mem = resources.mem(); ASSERT_SOME(mem); Option<double> cpus = resources.cpus(); ASSERT_SOME(cpus); const std::string& file = path::join(flags.work_dir, "ready"); // This task induces user/system load in a child process by // running top in a child process for ten seconds. task.mutable_command()->set_value( #ifdef __APPLE__ // Use logging mode with 30,000 samples with no interval. "top -l 30000 -s 0 2>&1 > /dev/null & " #else // Batch mode, with 30,000 samples with no interval. "top -b -d 0 -n 30000 2>&1 > /dev/null & " #endif "touch " + file + "; " // Signals that the top command is running. "sleep 60"); Future<TaskStatus> status; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status)) .WillRepeatedly(Return()); // Ignore rest for now. Future<ContainerID> containerId; EXPECT_CALL(containerizer, launch(_, _, _, _, _, _, _, _)) .WillOnce(DoAll(FutureArg<0>(&containerId), Invoke(&containerizer, &MockExternalContainerizer::_launch))); driver.launchTasks(offers.get()[0].id(), {task}); AWAIT_READY(containerId); AWAIT_READY(status); EXPECT_EQ(TASK_RUNNING, status.get().state()); // Wait for the task to begin inducing cpu time. while (!os::exists(file)); ExecutorID executorId; executorId.set_value(task.task_id().value()); // We'll wait up to 10 seconds for the child process to induce // 1/8 of a second of user and system cpu time in total. // TODO(bmahler): Also induce rss memory consumption, by re-using // the balloon framework. ResourceStatistics statistics; Duration waited = Duration::zero(); do { Future<ResourceStatistics> usage = containerizer.usage(containerId.get()); AWAIT_READY(usage); statistics = usage.get(); // If we meet our usage expectations, we're done! // NOTE: We are currently getting dummy-data from the test- // containerizer python script matching these expectations. // TODO(tillt): Consider working with real data. if (statistics.cpus_user_time_secs() >= 0.120 && statistics.cpus_system_time_secs() >= 0.05 && statistics.mem_rss_bytes() >= 1024u) { break; } os::sleep(Milliseconds(100)); waited += Milliseconds(100); } while (waited < Seconds(10)); EXPECT_GE(statistics.cpus_user_time_secs(), 0.120); EXPECT_GE(statistics.cpus_system_time_secs(), 0.05); EXPECT_EQ(statistics.cpus_limit(), cpus.get()); EXPECT_GE(statistics.mem_rss_bytes(), 1024u); EXPECT_EQ(statistics.mem_limit_bytes(), mem.get().bytes()); EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status)); driver.killTask(task.task_id()); AWAIT_READY(status); EXPECT_EQ(TASK_KILLED, status.get().state()); driver.stop(); driver.join(); this->Shutdown(); }
void StatisticsWriterHtml::Write(const Statistics *stats) { *stream() << "<!DOCTYPE html>\n" "<html>\n" "<head>\n" " <title>" << "Profile of '" << script_name() << "'</title>\n" " <script type=\"text/javascript\"\n" " src=\"http://code.jquery.com/jquery-latest.min.js\">\n" " </script>\n" " <script type=\"text/javascript\"\n" " src=\"http://tablesorter.com/__jquery.tablesorter.min.js\">\n" " </script>\n" " <script type=\"text/javascript\">\n" " $(document).ready(function() {\n" " $('#data').tablesorter();\n" " });\n" " </script>\n" "</head>\n" "<body>\n" ; *stream() << " <style type=\"text/css\">\n" " table {\n" " border-spacing: 0;\n" " border-collapse: collapse;\n" " }\n" " table#data {\n" " width: 100%;\n" " }\n" " table, th, td {\n" " border-width: thin;\n" " border-style: solid;\n" " border-color: #aaaaaa;\n" " }\n" " th {\n" " text-align: left;\n" " background-color: #cccccc;\n" " }\n" " th.group {\n" " text-align: center;\n" " }\n" " td {\n" " text-align: left;\n" " font-family: Consolas, \"DejaVu Sans Mono\", \"Courier New\", Monospace;\n" " }\n" " tbody tr:nth-child(odd) {\n" " background-color: #eeeeee;\n" " }\n" " tbody tr:hover {\n" " background-color: #c0e3eb;\n" " }\n" " </style>\n" " <table id=\"meta\">\n" " <thead>\n" " <tr>\n" " <th>Name</th>\n" " <th>Value</th>\n" " </tr>\n" " </thead>\n" " <tbody>\n" ; if (print_date()) { *stream() << " <tr>\n" " <td>Date</td>\n" " <td>" << CTime() << "</td>\n" " </tr>\n" ; } if (print_run_time()) { *stream() << " <tr>\n" " <td>Duration</td>\n" " <td>" << TimeSpan(stats->GetTotalRunTime()) << "</td>\n" " </tr>\n" ; } *stream() << " </tbody>\n" " </table>\n" " <br/>\n" " <table id=\"data\" class=\"tablesorter\">\n" " <thead>\n" " <tr>\n" " <th rowspan=\"2\">Type</th>\n" " <th rowspan=\"2\">Name</th>\n" " <th rowspan=\"2\">Calls</th>\n" " <th colspan=\"4\" class=\"group\">Self Time</th>\n" " <th colspan=\"4\" class=\"group\">Total Time</th>\n" " </tr>\n" " <tr>\n" " <th>%</th>\n" " <th>Overall</th>\n" " <th>Average</th>\n" " <th>Worst</th>\n" " <th>%</th>\n" " <th>Overall</th>\n" " <th>Average</th>\n" " <th>Worst</th>\n" " </tr>\n" " </thead>\n" " <tbody>\n" ; std::vector<FunctionStatistics*> all_fn_stats; stats->GetStatistics(all_fn_stats); Nanoseconds self_time_all; for (std::vector<FunctionStatistics*>::const_iterator iterator = all_fn_stats.begin(); iterator != all_fn_stats.end(); ++iterator) { const FunctionStatistics *fn_stats = *iterator; self_time_all += fn_stats->self_time(); }; Nanoseconds total_time_all; for (std::vector<FunctionStatistics*>::const_iterator iterator = all_fn_stats.begin(); iterator != all_fn_stats.end(); ++iterator) { const FunctionStatistics *fn_stats = *iterator; total_time_all += fn_stats->total_time(); }; std::ostream::fmtflags flags = stream()->flags(); stream()->flags(flags | std::ostream::fixed); for (std::vector<FunctionStatistics*>::const_iterator iterator = all_fn_stats.begin(); iterator != all_fn_stats.end(); ++iterator) { const FunctionStatistics *fn_stats = *iterator; double self_time_percent = fn_stats->self_time().count() * 100 / self_time_all.count(); double total_time_percent = fn_stats->total_time().count() * 100 / total_time_all.count(); double self_time = Seconds(fn_stats->self_time()).count(); double total_time = Seconds(fn_stats->total_time()).count(); double avg_self_time = Milliseconds(fn_stats->self_time()).count() / fn_stats->num_calls(); double avg_total_time = Milliseconds(fn_stats->total_time()).count() / fn_stats->num_calls(); double worst_self_time = Milliseconds(fn_stats->worst_self_time()).count(); double worst_total_time = Milliseconds(fn_stats->worst_total_time()).count(); *stream() << " <tr>\n" << " <td>" << fn_stats->function()->GetTypeString() << "</td>\n" << " <td>" << fn_stats->function()->name() << "</td>\n" << " <td>" << fn_stats->num_calls() << "</td>\n" << " <td>" << std::setprecision(2) << self_time_percent << "%</td>\n" << " <td>" << std::setprecision(1) << self_time << "</td>\n" << " <td>" << std::setprecision(1) << avg_self_time << "</td>\n" << " <td>" << std::setprecision(1) << worst_self_time << "</td>\n" << " <td>" << std::setprecision(2) << total_time_percent << "%</td>\n" << " <td>" << std::setprecision(1) << total_time << "</td>\n" << " <td>" << std::setprecision(1) << avg_total_time << "</td>\n" << " <td>" << std::setprecision(1) << worst_total_time << "</td>\n" << " </tr>\n"; }; stream()->flags(flags); *stream() << " </tbody>\n" " </table>\n" "</body>\n" "</html>\n" ; }
ExitCode _initAndListen(int listenPort) { Client::initThread("initandlisten"); _initWireSpec(); auto globalServiceContext = getGlobalServiceContext(); globalServiceContext->setFastClockSource(FastClockSourceFactory::create(Milliseconds(10))); globalServiceContext->setOpObserver(stdx::make_unique<OpObserver>()); DBDirectClientFactory::get(globalServiceContext) .registerImplementation([](OperationContext* txn) { return std::unique_ptr<DBClientBase>(new DBDirectClient(txn)); }); const repl::ReplSettings& replSettings = repl::getGlobalReplicationCoordinator()->getSettings(); { ProcessId pid = ProcessId::getCurrent(); LogstreamBuilder l = log(LogComponent::kControl); l << "MongoDB starting : pid=" << pid << " port=" << serverGlobalParams.port << " dbpath=" << storageGlobalParams.dbpath; if (replSettings.isMaster()) l << " master=" << replSettings.isMaster(); if (replSettings.isSlave()) l << " slave=" << (int)replSettings.isSlave(); const bool is32bit = sizeof(int*) == 4; l << (is32bit ? " 32" : " 64") << "-bit host=" << getHostNameCached() << endl; } DEV log(LogComponent::kControl) << "DEBUG build (which is slower)" << endl; #if defined(_WIN32) VersionInfoInterface::instance().logTargetMinOS(); #endif logProcessDetails(); checked_cast<ServiceContextMongoD*>(getGlobalServiceContext())->createLockFile(); transport::TransportLayerLegacy::Options options; options.port = listenPort; options.ipList = serverGlobalParams.bind_ip; auto sep = stdx::make_unique<ServiceEntryPointMongod>(getGlobalServiceContext()->getTransportLayer()); auto sepPtr = sep.get(); getGlobalServiceContext()->setServiceEntryPoint(std::move(sep)); // Create, start, and attach the TL auto transportLayer = stdx::make_unique<transport::TransportLayerLegacy>(options, sepPtr); auto res = transportLayer->setup(); if (!res.isOK()) { error() << "Failed to set up listener: " << res; return EXIT_NET_ERROR; } std::shared_ptr<DbWebServer> dbWebServer; if (serverGlobalParams.isHttpInterfaceEnabled) { dbWebServer.reset(new DbWebServer(serverGlobalParams.bind_ip, serverGlobalParams.port + 1000, getGlobalServiceContext(), new RestAdminAccess())); if (!dbWebServer->setupSockets()) { error() << "Failed to set up sockets for HTTP interface during startup."; return EXIT_NET_ERROR; } } getGlobalServiceContext()->initializeGlobalStorageEngine(); #ifdef MONGO_CONFIG_WIREDTIGER_ENABLED if (WiredTigerCustomizationHooks::get(getGlobalServiceContext())->restartRequired()) { exitCleanly(EXIT_CLEAN); } #endif // Warn if we detect configurations for multiple registered storage engines in // the same configuration file/environment. if (serverGlobalParams.parsedOpts.hasField("storage")) { BSONElement storageElement = serverGlobalParams.parsedOpts.getField("storage"); invariant(storageElement.isABSONObj()); BSONObj storageParamsObj = storageElement.Obj(); BSONObjIterator i = storageParamsObj.begin(); while (i.more()) { BSONElement e = i.next(); // Ignore if field name under "storage" matches current storage engine. if (storageGlobalParams.engine == e.fieldName()) { continue; } // Warn if field name matches non-active registered storage engine. if (getGlobalServiceContext()->isRegisteredStorageEngine(e.fieldName())) { warning() << "Detected configuration for non-active storage engine " << e.fieldName() << " when current storage engine is " << storageGlobalParams.engine; } } } if (!getGlobalServiceContext()->getGlobalStorageEngine()->getSnapshotManager()) { if (moe::startupOptionsParsed.count("replication.enableMajorityReadConcern") && moe::startupOptionsParsed["replication.enableMajorityReadConcern"].as<bool>()) { // Note: we are intentionally only erroring if the user explicitly requested that we // enable majority read concern. We do not error if the they are implicitly enabled for // CSRS because a required step in the upgrade procedure can involve an mmapv1 node in // the CSRS in the REMOVED state. This is handled by the TopologyCoordinator. invariant(replSettings.isMajorityReadConcernEnabled()); severe() << "Majority read concern requires a storage engine that supports" << " snapshots, such as wiredTiger. " << storageGlobalParams.engine << " does not support snapshots."; exitCleanly(EXIT_BADOPTIONS); } } logMongodStartupWarnings(storageGlobalParams, serverGlobalParams); { stringstream ss; ss << endl; ss << "*********************************************************************" << endl; ss << " ERROR: dbpath (" << storageGlobalParams.dbpath << ") does not exist." << endl; ss << " Create this directory or give existing directory in --dbpath." << endl; ss << " See http://dochub.mongodb.org/core/startingandstoppingmongo" << endl; ss << "*********************************************************************" << endl; uassert(10296, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.dbpath)); } { stringstream ss; ss << "repairpath (" << storageGlobalParams.repairpath << ") does not exist"; uassert(12590, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.repairpath)); } // TODO: This should go into a MONGO_INITIALIZER once we have figured out the correct // dependencies. if (snmpInit) { snmpInit(); } if (!storageGlobalParams.readOnly) { boost::filesystem::remove_all(storageGlobalParams.dbpath + "/_tmp/"); } if (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalRecoverOnly) return EXIT_NET_ERROR; if (mongodGlobalParams.scriptingEnabled) { ScriptEngine::setup(); } auto startupOpCtx = getGlobalServiceContext()->makeOperationContext(&cc()); repairDatabasesAndCheckVersion(startupOpCtx.get()); if (storageGlobalParams.upgrade) { log() << "finished checking dbs"; exitCleanly(EXIT_CLEAN); } uassertStatusOK(getGlobalAuthorizationManager()->initialize(startupOpCtx.get())); /* this is for security on certain platforms (nonce generation) */ srand((unsigned)(curTimeMicros64() ^ startupSrandTimer.micros())); // The snapshot thread provides historical collection level and lock statistics for use // by the web interface. Only needed when HTTP is enabled. if (serverGlobalParams.isHttpInterfaceEnabled) { statsSnapshotThread.go(); invariant(dbWebServer); stdx::thread web(stdx::bind(&webServerListenThread, dbWebServer)); web.detach(); } #ifndef _WIN32 mongo::signalForkSuccess(); #endif AuthorizationManager* globalAuthzManager = getGlobalAuthorizationManager(); if (globalAuthzManager->shouldValidateAuthSchemaOnStartup()) { Status status = authindex::verifySystemIndexes(startupOpCtx.get()); if (!status.isOK()) { log() << redact(status); exitCleanly(EXIT_NEED_UPGRADE); } // SERVER-14090: Verify that auth schema version is schemaVersion26Final. int foundSchemaVersion; status = globalAuthzManager->getAuthorizationVersion(startupOpCtx.get(), &foundSchemaVersion); if (!status.isOK()) { log() << "Auth schema version is incompatible: " << "User and role management commands require auth data to have " << "at least schema version " << AuthorizationManager::schemaVersion26Final << " but startup could not verify schema version: " << status; exitCleanly(EXIT_NEED_UPGRADE); } if (foundSchemaVersion < AuthorizationManager::schemaVersion26Final) { log() << "Auth schema version is incompatible: " << "User and role management commands require auth data to have " << "at least schema version " << AuthorizationManager::schemaVersion26Final << " but found " << foundSchemaVersion << ". In order to upgrade " << "the auth schema, first downgrade MongoDB binaries to version " << "2.6 and then run the authSchemaUpgrade command."; exitCleanly(EXIT_NEED_UPGRADE); } } else if (globalAuthzManager->isAuthEnabled()) { error() << "Auth must be disabled when starting without auth schema validation"; exitCleanly(EXIT_BADOPTIONS); } else { // If authSchemaValidation is disabled and server is running without auth, // warn the user and continue startup without authSchema metadata checks. log() << startupWarningsLog; log() << "** WARNING: Startup auth schema validation checks are disabled for the " "database." << startupWarningsLog; log() << "** This mode should only be used to manually repair corrupted auth " "data." << startupWarningsLog; } auto shardingInitialized = uassertStatusOK(ShardingState::get(startupOpCtx.get()) ->initializeShardingAwarenessIfNeeded(startupOpCtx.get())); if (shardingInitialized) { reloadShardRegistryUntilSuccess(startupOpCtx.get()); } if (!storageGlobalParams.readOnly) { logStartup(startupOpCtx.get()); startFTDC(); getDeleter()->startWorkers(); restartInProgressIndexesFromLastShutdown(startupOpCtx.get()); if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { // Note: For replica sets, ShardingStateRecovery happens on transition to primary. if (!repl::getGlobalReplicationCoordinator()->isReplEnabled()) { uassertStatusOK(ShardingStateRecovery::recover(startupOpCtx.get())); } } else if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { uassertStatusOK( initializeGlobalShardingStateForMongod(startupOpCtx.get(), ConnectionString::forLocal(), kDistLockProcessIdForConfigServer)); Balancer::create(startupOpCtx->getServiceContext()); } repl::getGlobalReplicationCoordinator()->startup(startupOpCtx.get()); const unsigned long long missingRepl = checkIfReplMissingFromCommandLine(startupOpCtx.get()); if (missingRepl) { log() << startupWarningsLog; log() << "** WARNING: mongod started without --replSet yet " << missingRepl << " documents are present in local.system.replset" << startupWarningsLog; log() << "** Restart with --replSet unless you are doing maintenance and " << " no other clients are connected." << startupWarningsLog; log() << "** The TTL collection monitor will not start because of this." << startupWarningsLog; log() << "** "; log() << " For more info see http://dochub.mongodb.org/core/ttlcollections"; log() << startupWarningsLog; } else { startTTLBackgroundJob(); } if (!replSettings.usingReplSets() && !replSettings.isSlave() && storageGlobalParams.engine != "devnull") { ScopedTransaction transaction(startupOpCtx.get(), MODE_X); Lock::GlobalWrite lk(startupOpCtx.get()->lockState()); FeatureCompatibilityVersion::setIfCleanStartup( startupOpCtx.get(), repl::StorageInterface::get(getGlobalServiceContext())); } } startClientCursorMonitor(); PeriodicTask::startRunningPeriodicTasks(); // MessageServer::run will return when exit code closes its socket and we don't need the // operation context anymore startupOpCtx.reset(); auto start = getGlobalServiceContext()->addAndStartTransportLayer(std::move(transportLayer)); if (!start.isOK()) { error() << "Failed to start the listener: " << start.toString(); return EXIT_NET_ERROR; } return waitForShutdown(); }
StatusWith<ReadPreferenceSetting> ReadPreferenceSetting::fromBSON(const BSONObj& readPrefObj) { std::string modeStr; auto modeExtractStatus = bsonExtractStringField(readPrefObj, kModeFieldName, &modeStr); if (!modeExtractStatus.isOK()) { return modeExtractStatus; } ReadPreference mode; auto swReadPrefMode = parseReadPreferenceMode(modeStr); if (!swReadPrefMode.isOK()) { return swReadPrefMode.getStatus(); } mode = std::move(swReadPrefMode.getValue()); TagSet tags; BSONElement tagsElem; auto tagExtractStatus = bsonExtractTypedField(readPrefObj, kTagsFieldName, mongo::Array, &tagsElem); if (tagExtractStatus.isOK()) { tags = TagSet{BSONArray(tagsElem.Obj().getOwned())}; // In accordance with the read preference spec, passing the default wildcard tagset // '[{}]' is the same as not passing a TagSet at all. Furthermore, passing an empty // TagSet with a non-primary ReadPreference is equivalent to passing the wildcard // ReadPreference. if (tags == TagSet() || tags == TagSet::primaryOnly()) { tags = defaultTagSetForMode(mode); } // If we are using a user supplied TagSet, check that it is compatible with // the readPreference mode. else if (ReadPreference::PrimaryOnly == mode && (tags != TagSet::primaryOnly())) { return Status(ErrorCodes::BadValue, "Only empty tags are allowed with primary read preference"); } } else if (ErrorCodes::NoSuchKey == tagExtractStatus) { tags = defaultTagSetForMode(mode); } else { return tagExtractStatus; } long long maxStalenessMSValue; auto maxStalenessMSExtractStatus = bsonExtractIntegerFieldWithDefault( readPrefObj, kMaxStalenessMSFieldName, 0, &maxStalenessMSValue); if (!maxStalenessMSExtractStatus.isOK()) { return maxStalenessMSExtractStatus; } if (maxStalenessMSValue < 0) { return Status(ErrorCodes::BadValue, str::stream() << kMaxStalenessMSFieldName << " must be a non negative integer"); } if (maxStalenessMSValue >= Milliseconds::max().count()) { return Status(ErrorCodes::BadValue, str::stream() << kMaxStalenessMSFieldName << " value can not exceed" << Milliseconds::max().count()); } return ReadPreferenceSetting(mode, tags, Milliseconds(maxStalenessMSValue)); }
/// <summary> /// ストップウォッチの経過時間を返します。 /// </summary> /// <returns> /// ストップウォッチの経過時間 /// </returns> Milliseconds elapsed() const { return Milliseconds(ns() / 1'000'000LL); }
void ReplicationCoordinatorImpl::_onFreshnessCheckComplete() { stdx::lock_guard<stdx::mutex> lk(_mutex); invariant(_freshnessChecker); invariant(!_electCmdRunner); LoseElectionGuard lossGuard(_topCoord.get(), _replExecutor.get(), &_freshnessChecker, &_electCmdRunner, &_electionFinishedEvent); if (_freshnessChecker->isCanceled()) { LOG(2) << "Election canceled during freshness check phase"; return; } const Date_t now(_replExecutor->now()); const FreshnessChecker::ElectionAbortReason abortReason = _freshnessChecker->shouldAbortElection(); // need to not sleep after last time sleeping, switch (abortReason) { case FreshnessChecker::None: break; case FreshnessChecker::FreshnessTie: if ((_selfIndex != 0) && !_sleptLastElection) { const auto ms = Milliseconds(_nextRandomInt64_inlock(1000) + 50); const Date_t nextCandidateTime = now + ms; log() << "possible election tie; sleeping " << ms << " until " << dateToISOStringLocal(nextCandidateTime); _topCoord->setElectionSleepUntil(nextCandidateTime); _scheduleWorkAt(nextCandidateTime, stdx::bind(&ReplicationCoordinatorImpl::_recoverFromElectionTie, this, stdx::placeholders::_1)); _sleptLastElection = true; return; } _sleptLastElection = false; break; case FreshnessChecker::FresherNodeFound: log() << "not electing self, we are not freshest"; return; case FreshnessChecker::QuorumUnreachable: log() << "not electing self, we could not contact enough voting members"; return; default: log() << "not electing self due to election abort message :" << static_cast<int>(abortReason); return; } log() << "running for election" << (abortReason == FreshnessChecker::FreshnessTie ? "; slept last election, so running regardless of possible tie" : ""); // Secure our vote for ourself first if (!_topCoord->voteForMyself(now)) { return; } _electCmdRunner.reset(new ElectCmdRunner); StatusWith<executor::TaskExecutor::EventHandle> nextPhaseEvh = _electCmdRunner->start( _replExecutor.get(), _rsConfig, _selfIndex, _topCoord->getMaybeUpHostAndPorts()); if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) { return; } fassert(18685, nextPhaseEvh.getStatus()); _replExecutor ->onEvent(nextPhaseEvh.getValue(), stdx::bind(&ReplicationCoordinatorImpl::_onElectCmdRunnerComplete, this)) .status_with_transitional_ignore(); lossGuard.dismiss(); }
void checkTime(int expected, int now_time, int period) { ASSERT_TRUE(Date_t::fromMillisSinceEpoch(expected) == FTDCUtil::roundTime(Date_t::fromMillisSinceEpoch(now_time), Milliseconds(period))); }
void NetworkInterfaceASIO::_networkErrorCallback(AsyncOp* op, const std::error_code& ec) { ErrorCodes::Error errorCode = (ec.category() == mongoErrorCategory()) ? ErrorCodes::fromInt(ec.value()) : ErrorCodes::HostUnreachable; _completeOperation(op, {errorCode, ec.message(), Milliseconds(now() - op->_start)}); }
void OplogReader::connectToSyncSource(OperationContext* txn, const OpTime& lastOpTimeFetched, ReplicationCoordinator* replCoord) { const Timestamp sentinelTimestamp(duration_cast<Seconds>(Milliseconds(curTimeMillis64())), 0); const OpTime sentinel(sentinelTimestamp, std::numeric_limits<long long>::max()); OpTime oldestOpTimeSeen = sentinel; invariant(conn() == NULL); while (true) { HostAndPort candidate = replCoord->chooseNewSyncSource(lastOpTimeFetched.getTimestamp()); if (candidate.empty()) { if (oldestOpTimeSeen == sentinel) { // If, in this invocation of connectToSyncSource(), we did not successfully // connect to any node ahead of us, // we apparently have no sync sources to connect to. // This situation is common; e.g. if there are no writes to the primary at // the moment. return; } // Connected to at least one member, but in all cases we were too stale to use them // as a sync source. error() << "too stale to catch up"; log() << "our last optime : " << lastOpTimeFetched; log() << "oldest available is " << oldestOpTimeSeen; log() << "See http://dochub.mongodb.org/core/resyncingaverystalereplicasetmember"; setMinValid(txn, oldestOpTimeSeen); bool worked = replCoord->setFollowerMode(MemberState::RS_RECOVERING); if (!worked) { warning() << "Failed to transition into " << MemberState(MemberState::RS_RECOVERING) << ". Current state: " << replCoord->getMemberState(); } return; } if (!connect(candidate)) { LOG(2) << "can't connect to " << candidate.toString() << " to read operations"; resetConnection(); replCoord->blacklistSyncSource(candidate, Date_t::now() + Seconds(10)); continue; } // Read the first (oldest) op and confirm that it's not newer than our last // fetched op. Otherwise, we have fallen off the back of that source's oplog. BSONObj remoteOldestOp(findOne(rsOplogName.c_str(), Query())); OpTime remoteOldOpTime = fassertStatusOK(28776, OpTime::parseFromBSON(remoteOldestOp)); // remoteOldOpTime may come from a very old config, so we cannot compare their terms. if (!lastOpTimeFetched.isNull() && lastOpTimeFetched.getTimestamp() < remoteOldOpTime.getTimestamp()) { // We're too stale to use this sync source. resetConnection(); replCoord->blacklistSyncSource(candidate, Date_t::now() + Minutes(1)); if (oldestOpTimeSeen.getTimestamp() > remoteOldOpTime.getTimestamp()) { warning() << "we are too stale to use " << candidate.toString() << " as a sync source"; oldestOpTimeSeen = remoteOldOpTime; } continue; } // Got a valid sync source. return; } // while (true) }
// Calculate how many bytes will fit in our intermediate buffer based on // the baud rate and propagation delay. static unsigned calculateCapacity (Milliseconds propagationDelay, Baud baud) { return std::ceil(double(baud / 8) * propagationDelay / Milliseconds(1000)); }
ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorMock::awaitReplicationOfLastOpApplied( const OperationContext* txn, const WriteConcernOptions& writeConcern) { return StatusAndDuration(Status::OK(), Milliseconds(0)); }
void NetworkInterfaceMock::_connectThenEnqueueOperation_inlock(const HostAndPort& target, NetworkOperation&& op) { invariant(_hook); // if there is no hook, we shouldn't even hit this codepath invariant(!_connections.count(target)); auto handshakeReplyIter = _handshakeReplies.find(target); auto handshakeReply = (handshakeReplyIter != std::end(_handshakeReplies)) ? handshakeReplyIter->second : RemoteCommandResponse(BSONObj(), BSONObj(), Milliseconds(0)); auto valid = _hook->validateHost(target, handshakeReply); if (!valid.isOK()) { op.setResponse(_now_inlock(), valid); op.finishResponse(); return; } auto swHookPostconnectCommand = _hook->makeRequest(target); if (!swHookPostconnectCommand.isOK()) { op.setResponse(_now_inlock(), swHookPostconnectCommand.getStatus()); op.finishResponse(); return; } boost::optional<RemoteCommandRequest> hookPostconnectCommand = std::move(swHookPostconnectCommand.getValue()); if (!hookPostconnectCommand) { // If we don't have a post connect command, enqueue the actual command. _enqueueOperation_inlock(std::move(op)); _connections.emplace(op.getRequest().target); return; } // The completion handler for the postconnect command schedules the original command. auto postconnectCompletionHandler = [this, op](ResponseStatus rs) mutable { stdx::lock_guard<stdx::mutex> lk(_mutex); if (!rs.isOK()) { op.setResponse(_now_inlock(), rs); op.finishResponse(); return; } auto handleStatus = _hook->handleReply(op.getRequest().target, std::move(rs)); if (!handleStatus.isOK()) { op.setResponse(_now_inlock(), handleStatus); op.finishResponse(); return; } _enqueueOperation_inlock(std::move(op)); _connections.emplace(op.getRequest().target); }; auto postconnectOp = NetworkOperation(op.getCallbackHandle(), std::move(*hookPostconnectCommand), _now_inlock(), std::move(postconnectCompletionHandler)); _enqueueOperation_inlock(std::move(postconnectOp)); }