void LockerImpl<IsForMMAPV1>::lockMMAPV1Flush() { if (!IsForMMAPV1) return; // The flush lock always has a reference count of 1, because it is dropped at the end of // each write unit of work in order to allow the flush thread to run. See the comments in // the header for information on how the MMAP V1 journaling system works. LockRequest* globalLockRequest = _requests.find(resourceIdGlobal).objAddr(); if (globalLockRequest->recursiveCount == 1) { invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock())); } dassert(getLockMode(resourceIdMMAPV1Flush) == _getModeForMMAPV1FlushLock()); }
void LockerImpl<IsForMMAPV1>::restoreLockState(OperationContext* opCtx, const Locker::LockSnapshot& state) { // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork. invariant(!inAWriteUnitOfWork()); invariant(_modeForTicket == MODE_NONE); std::vector<OneLock>::const_iterator it = state.locks.begin(); // If we locked the PBWM, it must be locked before the resourceIdGlobal resource. if (it != state.locks.end() && it->resourceId == resourceIdParallelBatchWriterMode) { invariant(LOCK_OK == lock(opCtx, it->resourceId, it->mode)); it++; } invariant(LOCK_OK == lockGlobal(opCtx, state.globalMode)); for (; it != state.locks.end(); it++) { // This is a sanity check that lockGlobal restored the MMAP V1 flush lock in the // expected mode. if (IsForMMAPV1 && (it->resourceId == resourceIdMMAPV1Flush)) { invariant(it->mode == _getModeForMMAPV1FlushLock()); } else { invariant(LOCK_OK == lock(it->resourceId, it->mode)); } } invariant(_modeForTicket != MODE_NONE); }
void LockerImpl<IsForMMAPV1>::endWriteUnitOfWork() { invariant(_wuowNestingLevel > 0); if (--_wuowNestingLevel > 0) { // Don't do anything unless leaving outermost WUOW. return; } LockRequestsMap::Iterator it = _requests.begin(); while (_numResourcesToUnlockAtEndUnitOfWork > 0) { if (it->unlockPending) { invariant(!it.finished()); _numResourcesToUnlockAtEndUnitOfWork--; } while (it->unlockPending > 0) { // If a lock is converted, unlock() may be called multiple times on a resource within // the same WriteUnitOfWork. All such unlock() requests must thus be fulfilled here. it->unlockPending--; unlock(it.key()); } it.next(); } // For MMAP V1, we need to yield the flush lock so that the flush thread can run if (IsForMMAPV1) { invariant(unlock(resourceIdMMAPV1Flush)); invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock())); } }
LockResult LockerImpl<IsForMMAPV1>::lockBegin(OperationContext* opCtx, ResourceId resId, LockMode mode) { dassert(!getWaitingResource().isValid()); LockRequest* request; bool isNew = true; LockRequestsMap::Iterator it = _requests.find(resId); if (!it) { scoped_spinlock scopedLock(_lock); LockRequestsMap::Iterator itNew = _requests.insert(resId); itNew->initNew(this, &_notify); request = itNew.objAddr(); } else { request = it.objAddr(); isNew = false; } // If unlockPending is nonzero, that means a LockRequest already exists for this resource but // is planned to be released at the end of this WUOW due to two-phase locking. Rather than // unlocking the existing request, we can reuse it if the existing mode matches the new mode. if (request->unlockPending && isModeCovered(mode, request->mode)) { request->unlockPending--; if (!request->unlockPending) { _numResourcesToUnlockAtEndUnitOfWork--; } return LOCK_OK; } // Making this call here will record lock re-acquisitions and conversions as well. globalStats.recordAcquisition(_id, resId, mode); _stats.recordAcquisition(resId, mode); // Give priority to the full modes for global, parallel batch writer mode, // and flush lock so we don't stall global operations such as shutdown or flush. const ResourceType resType = resId.getType(); if (resType == RESOURCE_GLOBAL || (IsForMMAPV1 && resId == resourceIdMMAPV1Flush)) { if (mode == MODE_S || mode == MODE_X) { request->enqueueAtFront = true; request->compatibleFirst = true; } } else if (resType != RESOURCE_MUTEX) { // This is all sanity checks that the global and flush locks are always be acquired // before any other lock has been acquired and they must be in sync with the nesting. DEV { const LockRequestsMap::Iterator itGlobal = _requests.find(resourceIdGlobal); invariant(itGlobal->recursiveCount > 0); invariant(itGlobal->mode != MODE_NONE); // Check the MMAP V1 flush lock is held in the appropriate mode invariant(!IsForMMAPV1 || isLockHeldForMode(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock())); }; }
LockResult LockerImpl<IsForMMAPV1>::lockBegin(ResourceId resId, LockMode mode) { dassert(!getWaitingResource().isValid()); LockRequest* request; bool isNew = true; LockRequestsMap::Iterator it = _requests.find(resId); if (!it) { scoped_spinlock scopedLock(_lock); LockRequestsMap::Iterator itNew = _requests.insert(resId); itNew->initNew(this, &_notify); request = itNew.objAddr(); } else { request = it.objAddr(); isNew = false; } // Making this call here will record lock re-acquisitions and conversions as well. globalStats.recordAcquisition(_id, resId, mode); _stats.recordAcquisition(resId, mode); // Give priority to the full modes for global, parallel batch writer mode, // and flush lock so we don't stall global operations such as shutdown or flush. const ResourceType resType = resId.getType(); if (resType == RESOURCE_GLOBAL || (IsForMMAPV1 && resId == resourceIdMMAPV1Flush)) { if (mode == MODE_S || mode == MODE_X) { request->enqueueAtFront = true; request->compatibleFirst = true; } } else if (resType != RESOURCE_MUTEX) { // This is all sanity checks that the global and flush locks are always be acquired // before any other lock has been acquired and they must be in sync with the nesting. DEV { const LockRequestsMap::Iterator itGlobal = _requests.find(resourceIdGlobal); invariant(itGlobal->recursiveCount > 0); invariant(itGlobal->mode != MODE_NONE); // Check the MMAP V1 flush lock is held in the appropriate mode invariant(!IsForMMAPV1 || isLockHeldForMode(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock())); }; }
void LockerImpl<IsForMMAPV1>::endWriteUnitOfWork() { invariant(_wuowNestingLevel > 0); if (--_wuowNestingLevel > 0) { // Don't do anything unless leaving outermost WUOW. return; } while (!_resourcesToUnlockAtEndOfUnitOfWork.empty()) { unlock(_resourcesToUnlockAtEndOfUnitOfWork.front()); _resourcesToUnlockAtEndOfUnitOfWork.pop(); } // For MMAP V1, we need to yield the flush lock so that the flush thread can run if (IsForMMAPV1) { invariant(unlock(resourceIdMMAPV1Flush)); invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock())); } }