// Lock conflict matrix tests static void checkConflict(LockMode existingMode, LockMode newMode, bool hasConflict) { LockManager lockMgr; const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection")); MMAPV1LockerImpl lockerExisting; TrackingLockGrantNotification notifyExisting; LockRequest requestExisting; requestExisting.initNew(&lockerExisting, ¬ifyExisting); ASSERT(LOCK_OK == lockMgr.lock(resId, &requestExisting, existingMode)); MMAPV1LockerImpl lockerNew; TrackingLockGrantNotification notifyNew; LockRequest requestNew; requestNew.initNew(&lockerNew, ¬ifyNew); LockResult result = lockMgr.lock(resId, &requestNew, newMode); if (hasConflict) { ASSERT_EQUALS(LOCK_WAITING, result); } else { ASSERT_EQUALS(LOCK_OK, result); } lockMgr.unlock(&requestNew); lockMgr.unlock(&requestExisting); }
TEST(LockManager, GrantRecursiveNonCompatibleConvertDown) { LockManager lockMgr; const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection")); LockState locker; TrackingLockGrantNotification notify; LockRequest request; request.initNew(&locker, ¬ify); ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_X)); ASSERT(request.mode == MODE_X); ASSERT(request.recursiveCount == 1); ASSERT(notify.numNotifies == 0); // Acquire again, in *non-compatible*, but less strict mode ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_S)); ASSERT(request.mode == MODE_X); ASSERT(request.recursiveCount == 2); ASSERT(notify.numNotifies == 0); // Release first acquire lockMgr.unlock(&request); ASSERT(request.mode == MODE_X); ASSERT(request.recursiveCount == 1); // Release second acquire lockMgr.unlock(&request); ASSERT(request.recursiveCount == 0); }
LockResult LockerImpl::lock(const ResourceId& resId, LockMode mode, unsigned timeoutMs) { _notify.clear(); _lock.lock(); LockRequest* request = _find(resId); if (request == NULL) { request = new LockRequest(); request->initNew(resId, this, &_notify); _requests.insert(LockRequestsPair(resId, request)); } else { invariant(request->recursiveCount > 0); request->notify = &_notify; } _lock.unlock(); // Methods on the Locker class are always called single-threadly, so it is safe to release // the spin lock, which protects the Locker here. The only thing which could alter the // state of the request is deadlock detection, which however would synchronize on the // LockManager calls. LockResult result = globalLockManagerPtr->lock(resId, request, mode); if (result == LOCK_WAITING) { // Under MMAP V1 engine a deadlock can occur if a thread goes to sleep waiting on DB // lock, while holding the flush lock, so it has to be released. This is only correct // to do if not in a write unit of work. bool unlockedFlushLock = false; if (!inAWriteUnitOfWork() && (resId != resourceIdGlobal) && (resId != resourceIdMMAPV1Flush) && (resId != resourceIdLocalDB)) { invariant(unlock(resourceIdMMAPV1Flush)); unlockedFlushLock = true; } // Do the blocking outside of the flush lock (if not in a write unit of work) result = _notify.wait(timeoutMs); if (unlockedFlushLock) { // We cannot obey the timeout here, because it is not correct to return from the // lock request with the flush lock released. invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, getLockMode(resourceIdGlobal), UINT_MAX)); } } if (result != LOCK_OK) { // Can only be LOCK_TIMEOUT, because the lock manager does not return any other errors // at this point. Could be LOCK_DEADLOCK, when deadlock detection is implemented. invariant(result == LOCK_TIMEOUT); invariant(_unlockAndUpdateRequestsList(resId, request)); } return result; }
TEST(LockManager, Grant) { LockManager lockMgr; const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection")); MMAPV1LockerImpl locker; TrackingLockGrantNotification notify; LockRequest request; request.initNew(&locker, ¬ify); ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_S)); ASSERT(request.mode == MODE_S); ASSERT(request.recursiveCount == 1); ASSERT(notify.numNotifies == 0); lockMgr.unlock(&request); ASSERT(request.recursiveCount == 0); }
void LockManager::run() { deadlock_detector_->start(); const std::uint64_t kMaxTryIncoming = static_cast<std::uint64_t>(FLAGS_max_try_incoming); const std::uint64_t kMaxTryInner = static_cast<std::uint64_t>(FLAGS_max_try_incoming); while (true) { for (std::uint64_t tries = 0; tries < kMaxTryIncoming; ++tries) { if (!incoming_requests_.empty()) { const LockRequest request = incoming_requests_.popOne(); if (request.getRequestType() == RequestType::kReleaseLocks) { CHECK(releaseAllLocks(request.getTransactionId())) << "Unexpected condition occured."; } else if (acquireLock(request.getTransactionId(), request.getResourceId(), request.getAccessMode())) { LOG(INFO) << "Transaction " << std::to_string(request.getTransactionId()) << " is waiting " + request.getResourceId().toString(); inner_pending_requests_.push(request); } else { LOG(INFO) << "Transaction " << std::to_string(request.getTransactionId()) << " acquired " + request.getResourceId().toString(); permitted_requests_.push(request); } } } for (std::uint64_t tries = 0; tries < kMaxTryInner; ++tries) { if (!inner_pending_requests_.empty()) { const LockRequest request = inner_pending_requests_.front(); if (acquireLock(request.getTransactionId(), request.getResourceId(), request.getAccessMode())) { inner_pending_requests_.pop(); permitted_requests_.push(request); } } } // Resolve deadlocks. killVictims(); } }