bool TicketHolder::waitForTicketUntil(OperationContext* opCtx, Date_t until) { stdx::unique_lock<stdx::mutex> lk(_mutex); if (opCtx) { return opCtx->waitForConditionOrInterruptUntil( _newTicket, lk, until, [this] { return _tryAcquire(); }); } else { return _newTicket.wait_until( lk, until.toSystemTimePoint(), [this] { return _tryAcquire(); }); } }
void UserCacheInvalidator::run() { Client::initThread("UserCacheInvalidator"); lastInvalidationTime = Date_t::now(); while (true) { stdx::unique_lock<stdx::mutex> lock(invalidationIntervalMutex); Date_t sleepUntil = lastInvalidationTime + Seconds(userCacheInvalidationIntervalSecs.load()); Date_t now = Date_t::now(); while (now < sleepUntil) { MONGO_IDLE_THREAD_BLOCK; invalidationIntervalChangedCondition.wait_until(lock, sleepUntil.toSystemTimePoint()); sleepUntil = lastInvalidationTime + Seconds(userCacheInvalidationIntervalSecs.load()); now = Date_t::now(); } lastInvalidationTime = now; lock.unlock(); if (globalInShutdownDeprecated()) { break; } auto opCtx = cc().makeOperationContext(); StatusWith<OID> currentGeneration = getCurrentCacheGeneration(opCtx.get()); if (!currentGeneration.isOK()) { if (currentGeneration.getStatus().code() == ErrorCodes::CommandNotFound) { warning() << "_getUserCacheGeneration command not found on config server(s), " "this most likely means you are running an outdated version of mongod " "on the config servers"; } else { warning() << "An error occurred while fetching current user cache generation " "to check if user cache needs invalidation: " << currentGeneration.getStatus(); } // When in doubt, invalidate the cache _authzManager->invalidateUserCache(); continue; } if (currentGeneration.getValue() != _previousCacheGeneration) { log() << "User cache generation changed from " << _previousCacheGeneration << " to " << currentGeneration.getValue() << "; invalidating user cache"; _authzManager->invalidateUserCache(); _previousCacheGeneration = currentGeneration.getValue(); } } }
Status NetworkInterfaceASIO::setAlarm(Date_t when, const stdx::function<void()>& action) { if (inShutdown()) { return {ErrorCodes::ShutdownInProgress, "NetworkInterfaceASIO shutdown in progress"}; } // "alarm" must stay alive until it expires, hence the shared_ptr. auto alarm = std::make_shared<asio::system_timer>(_io_service, when.toSystemTimePoint()); alarm->async_wait([alarm, this, action](std::error_code ec) { if (!ec) { return action(); } else if (ec != asio::error::operation_aborted) { // When the network interface is shut down, it will cancel all pending // alarms, raising an "operation_aborted" error here, which we ignore. warning() << "setAlarm() received an error: " << ec.message(); } }); return Status::OK(); };
// Theory of operation for waitForConditionOrInterruptNoAssertUntil and markKilled: // // An operation indicates to potential killers that it is waiting on a condition variable by setting // _waitMutex and _waitCV, while holding the lock on its parent Client. It then unlocks its Client, // unblocking any killers, which are required to have locked the Client before calling markKilled. // // When _waitMutex and _waitCV are set, killers must lock _waitMutex before setting the _killCode, // and must signal _waitCV before releasing _waitMutex. Unfortunately, they must lock _waitMutex // without holding a lock on Client to avoid a deadlock with callers of // waitForConditionOrInterruptNoAssertUntil(). So, in the event that _waitMutex is set, the killer // increments _numKillers, drops the Client lock, acquires _waitMutex and then re-acquires the // Client lock. We know that the Client, its OperationContext and _waitMutex will remain valid // during this period because the caller of waitForConditionOrInterruptNoAssertUntil will not return // while _numKillers > 0 and will not return until it has itself reacquired _waitMutex. Instead, // that caller will keep waiting on _waitCV until _numKillers drops to 0. // // In essence, when _waitMutex is set, _killCode is guarded by _waitMutex and _waitCV, but when // _waitMutex is not set, it is guarded by the Client spinlock. Changing _waitMutex is itself // guarded by the Client spinlock and _numKillers. // // When _numKillers does drop to 0, the waiter will null out _waitMutex and _waitCV. // // This implementation adds a minimum of two spinlock acquire-release pairs to every condition // variable wait. StatusWith<stdx::cv_status> OperationContext::waitForConditionOrInterruptNoAssertUntil( stdx::condition_variable& cv, stdx::unique_lock<stdx::mutex>& m, Date_t deadline) noexcept { invariant(getClient()); { stdx::lock_guard<Client> clientLock(*getClient()); invariant(!_waitMutex); invariant(!_waitCV); invariant(0 == _numKillers); // This interrupt check must be done while holding the client lock, so as not to race with a // concurrent caller of markKilled. auto status = checkForInterruptNoAssert(); if (!status.isOK()) { return status; } _waitMutex = m.mutex(); _waitCV = &cv; } if (hasDeadline()) { deadline = std::min(deadline, getDeadline()); } const auto waitStatus = [&] { if (Date_t::max() == deadline) { cv.wait(m); return stdx::cv_status::no_timeout; } const auto clockSource = getServiceContext()->getPreciseClockSource(); if (clockSource->tracksSystemClock()) { return cv.wait_until(m, deadline.toSystemTimePoint()); } // The following cases only occur during testing, when the precise clock source is // virtualized and does not track the system clock. return cvWaitUntilWithClockSource(clockSource, cv, m, deadline); }(); // Continue waiting on cv until no other thread is attempting to kill this one. cv.wait(m, [this] { stdx::lock_guard<Client> clientLock(*getClient()); if (0 == _numKillers) { _waitMutex = nullptr; _waitCV = nullptr; return true; } return false; }); auto status = checkForInterruptNoAssert(); if (!status.isOK()) { return status; } if (hasDeadline() && waitStatus == stdx::cv_status::timeout && deadline == getDeadline()) { // It's possible that the system clock used in stdx::condition_variable::wait_until // is slightly ahead of the FastClock used in checkForInterrupt. In this case, // we treat the operation as though it has exceeded its time limit, just as if the // FastClock and system clock had agreed. markKilled(ErrorCodes::ExceededTimeLimit); return Status(ErrorCodes::ExceededTimeLimit, "operation exceeded time limit"); } return waitStatus; }