void LockHead::changeConflictModeCount(LockMode mode, ChangeModeCountAction action) { if (action == Increment) { invariant(conflictCounts[mode] >= 0); if (++conflictCounts[mode] == 1) { invariant((conflictModes & modeMask(mode)) == 0); conflictModes |= modeMask(mode); } } else { invariant(action == Decrement); invariant(conflictCounts[mode] >= 1); if (--conflictCounts[mode] == 0) { invariant((conflictModes & modeMask(mode)) == modeMask(mode)); conflictModes &= ~modeMask(mode); } } }
void LockHead::changeGrantedModeCount(LockMode mode, ChangeModeCountAction action) { if (action == Increment) { invariant(grantedCounts[mode] >= 0); if (++grantedCounts[mode] == 1) { invariant((grantedModes & modeMask(mode)) == 0); grantedModes |= modeMask(mode); } } else { invariant(action == Decrement); invariant(grantedCounts[mode] >= 1); if (--grantedCounts[mode] == 0) { invariant((grantedModes & modeMask(mode)) == modeMask(mode)); grantedModes &= ~modeMask(mode); } } }
LockResult LockManager::lock(const ResourceId& resId, LockRequest* request, LockMode mode) { dassert(mode > MODE_NONE); // Fast path for acquiring the same lock multiple times in modes, which are already covered // by the current mode. It is safe to do this without locking, because 1) all calls for the // same lock request must be done on the same thread and 2) if there are lock requests // hanging off a given LockHead, then this lock will never disappear. if ((LockConflictsTable[request->mode] | LockConflictsTable[mode]) == LockConflictsTable[request->mode]) { request->recursiveCount++; return LOCK_OK; } // TODO: For the time being we do not need conversions between unrelated lock modes (i.e., // modes which both add and remove to the conflicts set), so these are not implemented yet // (e.g., S -> IX). invariant((LockConflictsTable[request->mode] | LockConflictsTable[mode]) == LockConflictsTable[mode]); LockBucket* bucket = _getBucket(resId); scoped_spinlock scopedLock(bucket->mutex); LockHead* lock; LockHeadMap::iterator it = bucket->data.find(resId); if (it == bucket->data.end()) { // Lock is free (not on the map) invariant(request->status == LockRequest::STATUS_NEW); lock = new LockHead(resId); bucket->data.insert(LockHeadPair(resId, lock)); } else { // Lock is not free lock = it->second; } // Sanity check if requests are being reused invariant(request->lock == NULL || request->lock == lock); request->lock = lock; request->recursiveCount++; if (request->status == LockRequest::STATUS_NEW) { invariant(request->recursiveCount == 1); // New lock request if (conflicts(mode, lock->grantedModes)) { request->status = LockRequest::STATUS_WAITING; request->mode = mode; request->convertMode = MODE_NONE; // Put it on the conflict queue. This is the place where various policies could be // applied for where in the wait queue does a request go. lock->addToConflictQueue(request); lock->changeConflictModeCount(mode, LockHead::Increment); return LOCK_WAITING; } else { // No conflict, new request request->status = LockRequest::STATUS_GRANTED; request->mode = mode; request->convertMode = MODE_NONE; lock->addToGrantedQueue(request); lock->changeGrantedModeCount(mode, LockHead::Increment); return LOCK_OK; } } else { // If we are here, we already hold the lock in some mode. In order to keep it simple, // we do not allow requesting a conversion while a lock is already waiting or pending // conversion, hence the assertion below. invariant(request->status == LockRequest::STATUS_GRANTED); invariant(request->recursiveCount > 1); invariant(request->mode != mode); // Construct granted mask without our current mode, so that it is not counted as // conflicting uint32_t grantedModesWithoutCurrentRequest = 0; // We start the counting at 1 below, because LockModesCount also includes MODE_NONE // at position 0, which can never be acquired/granted. for (uint32_t i = 1; i < LockModesCount; i++) { const uint32_t currentRequestHolds = (request->mode == static_cast<LockMode>(i) ? 1 : 0); if (lock->grantedCounts[i] > currentRequestHolds) { grantedModesWithoutCurrentRequest |= modeMask(static_cast<LockMode>(i)); } } // This check favours conversion requests over pending requests. For example: // // T1 requests lock L in IS // T2 requests lock L in X // T1 then upgrades L from IS -> S // // Because the check does not look into the conflict modes bitmap, it will grant L to // T1 in S mode, instead of block, which would otherwise cause deadlock. if (conflicts(mode, grantedModesWithoutCurrentRequest)) { request->status = LockRequest::STATUS_CONVERTING; request->convertMode = mode; lock->conversionsCount++; lock->changeGrantedModeCount(request->convertMode, LockHead::Increment); return LOCK_WAITING; } else { // No conflict, existing request lock->changeGrantedModeCount(mode, LockHead::Increment); lock->changeGrantedModeCount(request->mode, LockHead::Decrement); request->mode = mode; return LOCK_OK; } } }
void LockManager::_onLockModeChanged(LockHead* lock, bool checkConflictQueue) { // Unblock any converting requests (because conversions are still counted as granted and // are on the granted queue). for (LockRequest* iter = lock->grantedQueue; (iter != NULL) && (lock->conversionsCount > 0); iter = iter->next) { // Conversion requests are going in a separate queue if (iter->status == LockRequest::STATUS_CONVERTING) { invariant(iter->convertMode != 0); // Construct granted mask without our current mode, so that it is not accounted as // a conflict uint32_t grantedModesWithoutCurrentRequest = 0; // We start the counting at 1 below, because LockModesCount also includes // MODE_NONE at position 0, which can never be acquired/granted. for (uint32_t i = 1; i < LockModesCount; i++) { const uint32_t currentRequestHolds = (iter->mode == static_cast<LockMode>(i) ? 1 : 0); const uint32_t currentRequestWaits = (iter->convertMode == static_cast<LockMode>(i) ? 1 : 0); // We cannot both hold and wait on the same lock mode invariant(currentRequestHolds + currentRequestWaits <= 1); if (lock->grantedCounts[i] > (currentRequestHolds + currentRequestWaits)) { grantedModesWithoutCurrentRequest |= modeMask(static_cast<LockMode>(i)); } } if (!conflicts(iter->convertMode, grantedModesWithoutCurrentRequest)) { lock->conversionsCount--; lock->changeGrantedModeCount(iter->mode, LockHead::Decrement); iter->status = LockRequest::STATUS_GRANTED; iter->mode = iter->convertMode; iter->convertMode = MODE_NONE; iter->notify->notify(lock->resourceId, LOCK_OK); } } } // Grant any conflicting requests, which might now be unblocked LockRequest* iterNext = NULL; for (LockRequest* iter = lock->conflictQueueBegin; (iter != NULL) && checkConflictQueue; iter = iterNext) { invariant(iter->status == LockRequest::STATUS_WAITING); // Store the actual next pointer, because we muck with the iter below and move it to // the granted queue. iterNext = iter->next; if (conflicts(iter->mode, lock->grantedModes)) continue; iter->status = LockRequest::STATUS_GRANTED; lock->removeFromConflictQueue(iter); lock->addToGrantedQueue(iter); lock->changeGrantedModeCount(iter->mode, LockHead::Increment); lock->changeConflictModeCount(iter->mode, LockHead::Decrement); iter->notify->notify(lock->resourceId, LOCK_OK); } // This is a convenient place to check that the state of the two request queues is in sync // with the bitmask on the modes. invariant((lock->grantedModes == 0) ^ (lock->grantedQueue != NULL)); invariant((lock->conflictModes == 0) ^ (lock->conflictQueueBegin != NULL)); }
void DeadlockDetector::_processNextNode(const UnprocessedNode& node) { // Locate the request LockManager::LockBucket* bucket = _lockMgr._getBucket(node.resId); SimpleMutex::scoped_lock scopedLock(bucket->mutex); LockManager::LockHeadMap::const_iterator iter = bucket->data.find(node.resId); if (iter == bucket->data.end()) { return; } const LockHead* lock = iter->second; LockRequest* request = lock->findRequest(node.lockerId); // It is possible that a request which was thought to be waiting suddenly became // granted, so check that before proceeding if (!request || (request->status == LockRequest::STATUS_GRANTED)) { return; } std::pair<WaitForGraph::iterator, bool> val = _graph.insert(WaitForGraphPair(node.lockerId, Edges(node.resId))); if (!val.second) { // We already saw this locker id, which means we have a cycle. if (!_foundCycle) { _foundCycle = (node.lockerId == _initialLockerId); } return; } Edges& edges = val.first->second; bool seen = false; for (LockRequest* it = lock->grantedQueueEnd; it != NULL; it = it->prev) { // We can't conflict with ourselves if (it == request) { seen = true; continue; } // If we are a regular conflicting request, both granted and conversion modes need to // be checked for conflict, since conversions will be granted first. if (request->status == LockRequest::STATUS_WAITING) { if (conflicts(request->mode, modeMask(it->mode)) || conflicts(request->mode, modeMask(it->convertMode))) { const LockerId lockerId = it->locker->getId(); const ResourceId waitResId = it->locker->getWaitingResource(); if (waitResId.isValid()) { _queue.push_front(UnprocessedNode(lockerId, waitResId)); edges.owners.push_back(lockerId); } } continue; } // If we are a conversion request, only requests, which are before us need to be // accounted for. invariant(request->status == LockRequest::STATUS_CONVERTING); if (conflicts(request->convertMode, modeMask(it->mode)) || (seen && conflicts(request->convertMode, modeMask(it->convertMode)))) { const LockerId lockerId = it->locker->getId(); const ResourceId waitResId = it->locker->getWaitingResource(); if (waitResId.isValid()) { _queue.push_front(UnprocessedNode(lockerId, waitResId)); edges.owners.push_back(lockerId); } } } // All conflicting waits, which would be granted before us for (LockRequest* it = request->prev; (request->status == LockRequest::STATUS_WAITING) && (it != NULL); it = it->prev) { // We started from the previous element, so we should never see ourselves invariant(it != request); if (conflicts(request->mode, modeMask(it->mode))) { const LockerId lockerId = it->locker->getId(); const ResourceId waitResId = it->locker->getWaitingResource(); if (waitResId.isValid()) { _queue.push_front(UnprocessedNode(lockerId, waitResId)); edges.owners.push_back(lockerId); } } } }