/** * Test that a manager whose cursors do not have sessions does not return them. */ TEST_F(CursorManagerTestCustomOpCtx, CursorsWithoutSessions) { // Add a cursor with no session to the cursor manager. auto opCtx = _queryServiceContext->makeOperationContext(); auto pinned = makeCursor(opCtx.get()); ASSERT_EQUALS(pinned.getCursor()->getSessionId(), boost::none); // Retrieve all sessions active in manager - set should be empty. LogicalSessionIdSet lsids; useCursorManager()->appendActiveSessions(&lsids); ASSERT(lsids.empty()); }
LogicalSessionIdSet makeLogicalSessionIds(const std::vector<LogicalSessionFromClient>& sessions, OperationContext* opCtx, std::initializer_list<Privilege> allowSpoof) { LogicalSessionIdSet lsids; lsids.reserve(sessions.size()); for (auto&& session : sessions) { lsids.emplace(makeLogicalSessionId(session, opCtx, allowSpoof)); } return lsids; }
StatusWith<LogicalSessionIdSet> SessionsCollection::doFetch(const NamespaceString& ns, const LogicalSessionIdSet& sessions, FindBatchFn send) { auto makeT = [] { return std::vector<LogicalSessionId>{}; }; auto add = [](std::vector<LogicalSessionId>& batch, const LogicalSessionId& record) { batch.push_back(record); }; LogicalSessionIdSet removed = sessions; auto wrappedSend = [&](BSONObj batch) { auto swBatchResult = send(batch); if (!swBatchResult.isOK()) { return swBatchResult.getStatus(); } else { auto result = SessionsCollectionFetchResult::parse("SessionsCollectionFetchResult"_sd, swBatchResult.getValue()); for (const auto& lsid : result.getCursor().getFirstBatch()) { removed.erase(lsid.get_id()); } return Status::OK(); } }; auto sendLocal = [&](std::vector<LogicalSessionId>& batch) { SessionsCollectionFetchRequest request; request.setFind(ns.coll()); request.setFilter({}); request.getFilter().set_id({}); request.getFilter().get_id().setIn(batch); request.setProjection({}); request.getProjection().set_id(1); request.setBatchSize(batch.size()); request.setLimit(batch.size()); request.setSingleBatch(true); return wrappedSend(request.toBSON()); }; auto status = runBulkGeneric(makeT, add, sendLocal, sessions); if (!status.isOK()) { return status; } return removed; }
/** * Removes the specified set of session ids from the persistent sessions collection and returns the * number of sessions actually removed. */ int removeSessionsRecords(OperationContext* opCtx, SessionsCollection& sessionsCollection, const LogicalSessionIdSet& sessionIdsToRemove) { if (sessionIdsToRemove.empty()) { return 0; } Locker* locker = opCtx->lockState(); Locker::LockSnapshot snapshot; invariant(locker->saveLockStateAndUnlock(&snapshot)); const auto guard = MakeGuard([&] { UninterruptibleLockGuard noInterrupt(opCtx->lockState()); locker->restoreLockState(opCtx, snapshot); }); // Top-level locks are freed, release any potential low-level (storage engine-specific // locks). If we are yielding, we are at a safe place to do so. opCtx->recoveryUnit()->abandonSnapshot(); // Track the number of yields in CurOp. CurOp::get(opCtx)->yielded(); auto removed = uassertStatusOK(sessionsCollection.findRemovedSessions(opCtx, sessionIdsToRemove)); uassertStatusOK(sessionsCollection.removeTransactionRecords(opCtx, removed)); return removed.size(); }
/** * Test a manager with multiple cursors running inside of different sessions. */ TEST_F(CursorManagerTestCustomOpCtx, MultipleCursorsMultipleSessions) { auto lsid1 = makeLogicalSessionIdForTest(); auto lsid2 = makeLogicalSessionIdForTest(); CursorId cursor1; CursorId cursor2; // Cursor with session 1. { auto opCtx1 = _queryServiceContext->makeOperationContext(lsid1, boost::none); cursor1 = makeCursor(opCtx1.get()).getCursor()->cursorid(); } // Cursor with session 2. { auto opCtx2 = _queryServiceContext->makeOperationContext(lsid2, boost::none); cursor2 = makeCursor(opCtx2.get()).getCursor()->cursorid(); } // Cursor with no session. { auto opCtx3 = _queryServiceContext->makeOperationContext(); makeCursor(opCtx3.get()).getCursor(); } // Retrieve all sessions - should be both lsids. LogicalSessionIdSet lsids; useCursorManager()->appendActiveSessions(&lsids); ASSERT_EQ(lsids.size(), size_t(2)); ASSERT(lsids.find(lsid1) != lsids.end()); ASSERT(lsids.find(lsid2) != lsids.end()); // Retrieve cursors for each session - should be just one. auto cursors1 = useCursorManager()->getCursorsForSession(lsid1); ASSERT_EQ(cursors1.size(), size_t(1)); ASSERT(cursors1.find(cursor1) != cursors1.end()); auto cursors2 = useCursorManager()->getCursorsForSession(lsid2); ASSERT_EQ(cursors2.size(), size_t(1)); ASSERT(cursors2.find(cursor2) != cursors2.end()); }
/** * Test a manager that has one cursor running inside of a session. */ TEST_F(CursorManagerTestCustomOpCtx, OneCursorWithASession) { // Add a cursor with a session to the cursor manager. auto lsid = makeLogicalSessionIdForTest(); auto opCtx = _queryServiceContext->makeOperationContext(lsid, boost::none); auto pinned = makeCursor(opCtx.get()); // Retrieve all sessions active in manager - set should contain just lsid. LogicalSessionIdSet lsids; useCursorManager()->appendActiveSessions(&lsids); ASSERT_EQ(lsids.size(), size_t(1)); ASSERT(lsids.find(lsid) != lsids.end()); // Retrieve all cursors for this lsid - should be just ours. auto cursors = useCursorManager()->getCursorsForSession(lsid); ASSERT_EQ(cursors.size(), size_t(1)); auto cursorId = pinned.getCursor()->cursorid(); ASSERT(cursors.find(cursorId) != cursors.end()); // Remove the cursor from the manager. pinned.release(); ASSERT_OK(useCursorManager()->killCursor(opCtx.get(), cursorId, false)); // There should be no more cursor entries by session id. LogicalSessionIdSet sessions; useCursorManager()->appendActiveSessions(&sessions); ASSERT(sessions.empty()); ASSERT(useCursorManager()->getCursorsForSession(lsid).empty()); }
void LogicalSessionCacheImpl::_refresh(Client* client) { // Stats for serverStatus: { stdx::lock_guard<stdx::mutex> lk(_cacheMutex); // Clear the refresh-related stats with the beginning of our run. _stats.setLastSessionsCollectionJobDurationMillis(0); _stats.setLastSessionsCollectionJobEntriesRefreshed(0); _stats.setLastSessionsCollectionJobEntriesEnded(0); _stats.setLastSessionsCollectionJobCursorsClosed(0); // Start the new run. _stats.setLastSessionsCollectionJobTimestamp(now()); _stats.setSessionsCollectionJobCount(_stats.getSessionsCollectionJobCount() + 1); } // This will finish timing _refresh for our stats no matter when we return. const auto timeRefreshJob = MakeGuard([this] { stdx::lock_guard<stdx::mutex> lk(_cacheMutex); auto millis = now() - _stats.getLastSessionsCollectionJobTimestamp(); _stats.setLastSessionsCollectionJobDurationMillis(millis.count()); }); // get or make an opCtx boost::optional<ServiceContext::UniqueOperationContext> uniqueCtx; auto* const opCtx = [&client, &uniqueCtx] { if (client->getOperationContext()) { return client->getOperationContext(); } uniqueCtx.emplace(client->makeOperationContext()); return uniqueCtx->get(); }(); auto res = _sessionsColl->setupSessionsCollection(opCtx); if (!res.isOK()) { log() << "Sessions collection is not set up; " << "waiting until next sessions refresh interval: " << res.reason(); return; } LogicalSessionIdSet staleSessions; LogicalSessionIdSet explicitlyEndingSessions; LogicalSessionIdMap<LogicalSessionRecord> activeSessions; // backSwapper creates a guard that in the case of a exception // replaces the ending or active sessions that swapped out of of LogicalSessionCache, // and merges in any records that had been added since we swapped them // out. auto backSwapper = [this](auto& member, auto& temp) { return MakeGuard([this, &member, &temp] { stdx::lock_guard<stdx::mutex> lk(_cacheMutex); using std::swap; swap(member, temp); for (const auto& it : temp) { member.emplace(it); } }); }; { using std::swap; stdx::lock_guard<stdx::mutex> lk(_cacheMutex); swap(explicitlyEndingSessions, _endingSessions); swap(activeSessions, _activeSessions); } auto activeSessionsBackSwapper = backSwapper(_activeSessions, activeSessions); auto explicitlyEndingBackSwaper = backSwapper(_endingSessions, explicitlyEndingSessions); // remove all explicitlyEndingSessions from activeSessions for (const auto& lsid : explicitlyEndingSessions) { activeSessions.erase(lsid); } // refresh all recently active sessions as well as for sessions attached to running ops LogicalSessionRecordSet activeSessionRecords{}; auto runningOpSessions = _service->getActiveOpSessions(); for (const auto& it : runningOpSessions) { // if a running op is the cause of an upsert, we won't have a user name for the record if (explicitlyEndingSessions.count(it) > 0) { continue; } activeSessionRecords.insert(makeLogicalSessionRecord(it, now())); } for (const auto& it : activeSessions) { activeSessionRecords.insert(it.second); } // Refresh the active sessions in the sessions collection. uassertStatusOK(_sessionsColl->refreshSessions(opCtx, activeSessionRecords)); activeSessionsBackSwapper.Dismiss(); { stdx::lock_guard<stdx::mutex> lk(_cacheMutex); _stats.setLastSessionsCollectionJobEntriesRefreshed(activeSessionRecords.size()); } // Remove the ending sessions from the sessions collection. uassertStatusOK(_sessionsColl->removeRecords(opCtx, explicitlyEndingSessions)); explicitlyEndingBackSwaper.Dismiss(); { stdx::lock_guard<stdx::mutex> lk(_cacheMutex); _stats.setLastSessionsCollectionJobEntriesEnded(explicitlyEndingSessions.size()); } // Find which running, but not recently active sessions, are expired, and add them // to the list of sessions to kill cursors for KillAllSessionsByPatternSet patterns; auto openCursorSessions = _service->getOpenCursorSessions(); // think about pruning ending and active out of openCursorSessions auto statusAndRemovedSessions = _sessionsColl->findRemovedSessions(opCtx, openCursorSessions); if (statusAndRemovedSessions.isOK()) { auto removedSessions = statusAndRemovedSessions.getValue(); for (const auto& lsid : removedSessions) { patterns.emplace(makeKillAllSessionsByPattern(opCtx, lsid)); } } else { // Ignore errors. } // Add all of the explicitly ended sessions to the list of sessions to kill cursors for. for (const auto& lsid : explicitlyEndingSessions) { patterns.emplace(makeKillAllSessionsByPattern(opCtx, lsid)); } SessionKiller::Matcher matcher(std::move(patterns)); auto killRes = _service->killCursorsWithMatchingSessions(opCtx, std::move(matcher)); { stdx::lock_guard<stdx::mutex> lk(_cacheMutex); _stats.setLastSessionsCollectionJobCursorsClosed(killRes.second); } }