Esempio n. 1
0
bool LockerImpl::saveLockStateAndUnlock(Locker::LockSnapshot* stateOut) {
    // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork.
    invariant(!inAWriteUnitOfWork());

    // Clear out whatever is in stateOut.
    stateOut->locks.clear();
    stateOut->globalMode = MODE_NONE;

    // First, we look at the global lock.  There is special handling for this (as the flush
    // lock goes along with it) so we store it separately from the more pedestrian locks.
    LockRequestsMap::Iterator globalRequest = _requests.find(resourceIdGlobal);
    if (!globalRequest) {
        // If there's no global lock there isn't really anything to do. Check that.
        for (auto it = _requests.begin(); !it.finished(); it.next()) {
            invariant(it.key().getType() == RESOURCE_MUTEX);
        }
        return false;
    }

    // If the global lock or RSTL has been acquired more than once, we're probably somewhere in a
    // DBDirectClient call.  It's not safe to release and reacquire locks -- the context using
    // the DBDirectClient is probably not prepared for lock release.
    LockRequestsMap::Iterator rstlRequest =
        _requests.find(resourceIdReplicationStateTransitionLock);
    if (globalRequest->recursiveCount > 1 || (rstlRequest && rstlRequest->recursiveCount > 1)) {
        return false;
    }

    // The global lock must have been acquired just once
    stateOut->globalMode = globalRequest->mode;
    invariant(unlock(resourceIdGlobal));

    // Next, the non-global locks.
    for (LockRequestsMap::Iterator it = _requests.begin(); !it.finished(); it.next()) {
        const ResourceId resId = it.key();
        const ResourceType resType = resId.getType();
        if (resType == RESOURCE_MUTEX)
            continue;

        // We should never have to save and restore metadata locks.
        invariant(RESOURCE_DATABASE == resId.getType() || RESOURCE_COLLECTION == resId.getType() ||
                  (RESOURCE_GLOBAL == resId.getType() && isSharedLockMode(it->mode)) ||
                  (resourceIdReplicationStateTransitionLock == resId && it->mode == MODE_IX));

        // And, stuff the info into the out parameter.
        OneLock info;
        info.resourceId = resId;
        info.mode = it->mode;

        stateOut->locks.push_back(info);

        invariant(unlock(resId));
    }
    invariant(!isLocked());

    // Sort locks by ResourceId. They'll later be acquired in this canonical locking order.
    std::sort(stateOut->locks.begin(), stateOut->locks.end());

    return true;
}
Esempio n. 2
0
void LockerImpl<IsForMMAPV1>::restoreLockState(OperationContext* opCtx,
                                               const Locker::LockSnapshot& state) {
    // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork.
    invariant(!inAWriteUnitOfWork());
    invariant(_modeForTicket == MODE_NONE);

    std::vector<OneLock>::const_iterator it = state.locks.begin();
    // If we locked the PBWM, it must be locked before the resourceIdGlobal resource.
    if (it != state.locks.end() && it->resourceId == resourceIdParallelBatchWriterMode) {
        invariant(LOCK_OK == lock(opCtx, it->resourceId, it->mode));
        it++;
    }

    invariant(LOCK_OK == lockGlobal(opCtx, state.globalMode));
    for (; it != state.locks.end(); it++) {
        // This is a sanity check that lockGlobal restored the MMAP V1 flush lock in the
        // expected mode.
        if (IsForMMAPV1 && (it->resourceId == resourceIdMMAPV1Flush)) {
            invariant(it->mode == _getModeForMMAPV1FlushLock());
        } else {
            invariant(LOCK_OK == lock(it->resourceId, it->mode));
        }
    }
    invariant(_modeForTicket != MODE_NONE);
}
Esempio n. 3
0
void LockerImpl::restoreLockState(OperationContext* opCtx, const Locker::LockSnapshot& state) {
    // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork.
    invariant(!inAWriteUnitOfWork());
    invariant(_modeForTicket == MODE_NONE);

    std::vector<OneLock>::const_iterator it = state.locks.begin();
    // If we locked the PBWM, it must be locked before the resourceIdGlobal and
    // resourceIdReplicationStateTransitionLock resources.
    if (it != state.locks.end() && it->resourceId == resourceIdParallelBatchWriterMode) {
        invariant(LOCK_OK == lock(opCtx, it->resourceId, it->mode));
        it++;
    }

    // If we locked the RSTL, it must be locked before the resourceIdGlobal resource.
    if (it != state.locks.end() && it->resourceId == resourceIdReplicationStateTransitionLock) {
        invariant(LOCK_OK == lock(opCtx, it->resourceId, it->mode));
        it++;
    }

    invariant(LOCK_OK == lockGlobal(opCtx, state.globalMode));
    for (; it != state.locks.end(); it++) {
        invariant(LOCK_OK == lock(it->resourceId, it->mode));
    }
    invariant(_modeForTicket != MODE_NONE);
}
Esempio n. 4
0
 void LockerImpl::_yieldFlushLockForMMAPV1() {
     if (!inAWriteUnitOfWork()) {
         invariant(unlock(resourceIdMMAPV1Flush));
         invariant(LOCK_OK ==
             lock(resourceIdMMAPV1Flush, getLockMode(resourceIdGlobal), UINT_MAX));
     }
 }
Esempio n. 5
0
 LockerImpl::~LockerImpl() {
     // Cannot delete the Locker while there are still outstanding requests, because the
     // LockManager may attempt to access deleted memory. Besides it is probably incorrect
     // to delete with unaccounted locks anyways.
     invariant(!inAWriteUnitOfWork());
     invariant(_resourcesToUnlockAtEndOfUnitOfWork.empty());
     invariant(_requests.empty());
 }
Esempio n. 6
0
    LockResult LockerImpl::lock(const ResourceId& resId, LockMode mode, unsigned timeoutMs) {
        _notify.clear();

        _lock.lock();
        LockRequest* request = _find(resId);
        if (request == NULL) {
            request = new LockRequest();
            request->initNew(resId, this, &_notify);

            _requests.insert(LockRequestsPair(resId, request));
        }
        else {
            invariant(request->recursiveCount > 0);
            request->notify = &_notify;
        }
        _lock.unlock();

        // Methods on the Locker class are always called single-threadly, so it is safe to release
        // the spin lock, which protects the Locker here. The only thing which could alter the
        // state of the request is deadlock detection, which however would synchronize on the
        // LockManager calls.

        LockResult result = globalLockManagerPtr->lock(resId, request, mode);
        if (result == LOCK_WAITING) {
            // Under MMAP V1 engine a deadlock can occur if a thread goes to sleep waiting on DB
            // lock, while holding the flush lock, so it has to be released. This is only correct
            // to do if not in a write unit of work.
            bool unlockedFlushLock = false;

            if (!inAWriteUnitOfWork() && 
                (resId != resourceIdGlobal) &&
                (resId != resourceIdMMAPV1Flush) &&
                (resId != resourceIdLocalDB)) {

                invariant(unlock(resourceIdMMAPV1Flush));
                unlockedFlushLock = true;
            }

            // Do the blocking outside of the flush lock (if not in a write unit of work)
            result = _notify.wait(timeoutMs);

            if (unlockedFlushLock) {
                // We cannot obey the timeout here, because it is not correct to return from the
                // lock request with the flush lock released.
                invariant(LOCK_OK ==
                    lock(resourceIdMMAPV1Flush, getLockMode(resourceIdGlobal), UINT_MAX));
            }
        }

        if (result != LOCK_OK) {
            // Can only be LOCK_TIMEOUT, because the lock manager does not return any other errors
            // at this point. Could be LOCK_DEADLOCK, when deadlock detection is implemented.
            invariant(result == LOCK_TIMEOUT);
            invariant(_unlockAndUpdateRequestsList(resId, request));
        }

        return result;
    }
Esempio n. 7
0
bool LockerImpl<IsForMMAPV1>::unlock(ResourceId resId) {
    LockRequestsMap::Iterator it = _requests.find(resId);
    if (inAWriteUnitOfWork() && shouldDelayUnlock(it.key(), (it->mode))) {
        _resourcesToUnlockAtEndOfUnitOfWork.push(it.key());
        return false;
    }

    return _unlockImpl(&it);
}
Esempio n. 8
0
LockerImpl<IsForMMAPV1>::~LockerImpl() {
    // Cannot delete the Locker while there are still outstanding requests, because the
    // LockManager may attempt to access deleted memory. Besides it is probably incorrect
    // to delete with unaccounted locks anyways.
    invariant(!inAWriteUnitOfWork());
    invariant(_resourcesToUnlockAtEndOfUnitOfWork.empty());
    invariant(_requests.empty());
    invariant(_modeForTicket == MODE_NONE);

    // Reset the locking statistics so the object can be reused
    _stats.reset();
}
Esempio n. 9
0
    bool LockerImpl::unlock(const ResourceId& resId) {
        LockRequest* request = _find(resId);
        invariant(request->mode != MODE_NONE);

        // Methods on the Locker class are always called single-threadly, so it is safe to release
        // the spin lock, which protects the Locker here. The only thing which could alter the
        // state of the request is deadlock detection, which however would synchronize on the
        // LockManager calls.

        if (inAWriteUnitOfWork() && shouldDelayUnlock(resId, request->mode)) {
            _resourcesToUnlockAtEndOfUnitOfWork.push(resId);
            return false;
        }

        return _unlockAndUpdateRequestsList(resId, request);
    }
Esempio n. 10
0
    void LockerImpl::downgradeGlobalXtoSForMMAPV1() {
        invariant(!inAWriteUnitOfWork());

        // Only Global and Flush lock could be held at this point.
        invariant(_requests.size() == 2);

        LockRequest* globalLockRequest = _find(resourceIdGlobal);
        LockRequest* flushLockRequest = _find(resourceIdMMAPV1Flush);

        invariant(globalLockRequest->mode == MODE_X);
        invariant(globalLockRequest->recursiveCount == 1);
        invariant(flushLockRequest->mode == MODE_X);
        invariant(flushLockRequest->recursiveCount == 1);

        globalLockManagerPtr->downgrade(globalLockRequest, MODE_S);
        globalLockManagerPtr->downgrade(flushLockRequest, MODE_S);
    }
Esempio n. 11
0
bool LockerImpl<IsForMMAPV1>::unlock(ResourceId resId) {
    LockRequestsMap::Iterator it = _requests.find(resId);
    if (inAWriteUnitOfWork() && _shouldDelayUnlock(it.key(), (it->mode))) {
        if (!it->unlockPending) {
            _numResourcesToUnlockAtEndUnitOfWork++;
        }
        it->unlockPending++;
        // unlockPending will only be incremented if a lock is converted and unlock() is called
        // multiple times on one ResourceId.
        invariant(it->unlockPending < LockModesCount);

        return false;
    }

    // Don't attempt to unlock twice. This can happen when an interrupted global lock is destructed.
    if (it.finished())
        return false;
    return _unlockImpl(&it);
}
Esempio n. 12
0
bool LockerImpl<IsForMMAPV1>::unlockGlobal() {
    if (!unlock(resourceIdGlobal)) {
        return false;
    }

    invariant(!inAWriteUnitOfWork());

    LockRequestsMap::Iterator it = _requests.begin();
    while (!it.finished()) {
        // If we're here we should only have one reference to any lock. It is a programming
        // error for any lock used with multi-granularity locking to have more references than
        // the global lock, because every scope starts by calling lockGlobal.
        if (it.key().getType() == RESOURCE_GLOBAL || it.key().getType() == RESOURCE_MUTEX) {
            it.next();
        } else {
            invariant(_unlockImpl(&it));
        }
    }

    return true;
}
Esempio n. 13
0
void LockerImpl<IsForMMAPV1>::downgradeGlobalXtoSForMMAPV1() {
    invariant(!inAWriteUnitOfWork());

    LockRequest* globalLockRequest = _requests.find(resourceIdGlobal).objAddr();
    invariant(globalLockRequest->mode == MODE_X);
    invariant(globalLockRequest->recursiveCount == 1);
    invariant(_modeForTicket == MODE_X);
    // Note that this locker will not actually have a ticket (as MODE_X has no TicketHolder) or
    // acquire one now, but at most a single thread can be in this downgraded MODE_S situation,
    // so it's OK.

    // Making this call here will record lock downgrades as acquisitions, which is acceptable
    globalStats.recordAcquisition(_id, resourceIdGlobal, MODE_S);
    _stats.recordAcquisition(resourceIdGlobal, MODE_S);

    globalLockManager.downgrade(globalLockRequest, MODE_S);

    if (IsForMMAPV1) {
        invariant(unlock(resourceIdMMAPV1Flush));
    }
}