Beispiel #1
0
bool LockerImpl::saveLockStateAndUnlock(Locker::LockSnapshot* stateOut) {
    // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork.
    invariant(!inAWriteUnitOfWork());

    // Clear out whatever is in stateOut.
    stateOut->locks.clear();
    stateOut->globalMode = MODE_NONE;

    // First, we look at the global lock.  There is special handling for this (as the flush
    // lock goes along with it) so we store it separately from the more pedestrian locks.
    LockRequestsMap::Iterator globalRequest = _requests.find(resourceIdGlobal);
    if (!globalRequest) {
        // If there's no global lock there isn't really anything to do. Check that.
        for (auto it = _requests.begin(); !it.finished(); it.next()) {
            invariant(it.key().getType() == RESOURCE_MUTEX);
        }
        return false;
    }

    // If the global lock or RSTL has been acquired more than once, we're probably somewhere in a
    // DBDirectClient call.  It's not safe to release and reacquire locks -- the context using
    // the DBDirectClient is probably not prepared for lock release.
    LockRequestsMap::Iterator rstlRequest =
        _requests.find(resourceIdReplicationStateTransitionLock);
    if (globalRequest->recursiveCount > 1 || (rstlRequest && rstlRequest->recursiveCount > 1)) {
        return false;
    }

    // The global lock must have been acquired just once
    stateOut->globalMode = globalRequest->mode;
    invariant(unlock(resourceIdGlobal));

    // Next, the non-global locks.
    for (LockRequestsMap::Iterator it = _requests.begin(); !it.finished(); it.next()) {
        const ResourceId resId = it.key();
        const ResourceType resType = resId.getType();
        if (resType == RESOURCE_MUTEX)
            continue;

        // We should never have to save and restore metadata locks.
        invariant(RESOURCE_DATABASE == resId.getType() || RESOURCE_COLLECTION == resId.getType() ||
                  (RESOURCE_GLOBAL == resId.getType() && isSharedLockMode(it->mode)) ||
                  (resourceIdReplicationStateTransitionLock == resId && it->mode == MODE_IX));

        // And, stuff the info into the out parameter.
        OneLock info;
        info.resourceId = resId;
        info.mode = it->mode;

        stateOut->locks.push_back(info);

        invariant(unlock(resId));
    }
    invariant(!isLocked());

    // Sort locks by ResourceId. They'll later be acquired in this canonical locking order.
    std::sort(stateOut->locks.begin(), stateOut->locks.end());

    return true;
}
Beispiel #2
0
bool LockerImpl<IsForMMAPV1>::_shouldDelayUnlock(ResourceId resId, LockMode mode) const {
    switch (resId.getType()) {
        // The flush lock must not participate in two-phase locking because we need to temporarily
        // yield it while blocked waiting to acquire other locks.
        case RESOURCE_MMAPV1_FLUSH:
        case RESOURCE_MUTEX:
            return false;

        case RESOURCE_GLOBAL:
        case RESOURCE_DATABASE:
        case RESOURCE_COLLECTION:
        case RESOURCE_METADATA:
            break;

        default:
            MONGO_UNREACHABLE;
    }

    switch (mode) {
        case MODE_X:
        case MODE_IX:
            return true;

        case MODE_IS:
        case MODE_S:
            return _sharedLocksShouldTwoPhaseLock;

        default:
            MONGO_UNREACHABLE;
    }
}
Beispiel #3
0
LockResult LockerImpl<IsForMMAPV1>::lockBegin(OperationContext* opCtx,
                                              ResourceId resId,
                                              LockMode mode) {
    dassert(!getWaitingResource().isValid());

    LockRequest* request;
    bool isNew = true;

    LockRequestsMap::Iterator it = _requests.find(resId);
    if (!it) {
        scoped_spinlock scopedLock(_lock);
        LockRequestsMap::Iterator itNew = _requests.insert(resId);
        itNew->initNew(this, &_notify);

        request = itNew.objAddr();
    } else {
        request = it.objAddr();
        isNew = false;
    }

    // If unlockPending is nonzero, that means a LockRequest already exists for this resource but
    // is planned to be released at the end of this WUOW due to two-phase locking. Rather than
    // unlocking the existing request, we can reuse it if the existing mode matches the new mode.
    if (request->unlockPending && isModeCovered(mode, request->mode)) {
        request->unlockPending--;
        if (!request->unlockPending) {
            _numResourcesToUnlockAtEndUnitOfWork--;
        }
        return LOCK_OK;
    }

    // Making this call here will record lock re-acquisitions and conversions as well.
    globalStats.recordAcquisition(_id, resId, mode);
    _stats.recordAcquisition(resId, mode);

    // Give priority to the full modes for global, parallel batch writer mode,
    // and flush lock so we don't stall global operations such as shutdown or flush.
    const ResourceType resType = resId.getType();
    if (resType == RESOURCE_GLOBAL || (IsForMMAPV1 && resId == resourceIdMMAPV1Flush)) {
        if (mode == MODE_S || mode == MODE_X) {
            request->enqueueAtFront = true;
            request->compatibleFirst = true;
        }
    } else if (resType != RESOURCE_MUTEX) {
        // This is all sanity checks that the global and flush locks are always be acquired
        // before any other lock has been acquired and they must be in sync with the nesting.
        DEV {
            const LockRequestsMap::Iterator itGlobal = _requests.find(resourceIdGlobal);
            invariant(itGlobal->recursiveCount > 0);
            invariant(itGlobal->mode != MODE_NONE);

            // Check the MMAP V1 flush lock is held in the appropriate mode
            invariant(!IsForMMAPV1 ||
                      isLockHeldForMode(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
        };
    }
Beispiel #4
0
Resource FileResourceLoader::get(const ResourceId &id)
{
	if (id.getType() & "File")
	{
		fs::path p((m_basePath / id.getName()).native_directory_string());

		if (fs::exists(p))
		{
			FileData *pData = new FileData(this);
			pData->setId(id);
			pData->setPath(p);
			return FileResource(pData);
		}
	}
	return Resource();
}
Beispiel #5
0
reg_t kLock(EngineState *s, int argc, reg_t *argv) {
	int state = argc > 2 ? argv[2].toUint16() : 1;
	ResourceType type = g_sci->getResMan()->convertResType(argv[0].toUint16());
	ResourceId id = ResourceId(type, argv[1].toUint16());

	Resource *which;

	switch (state) {
	case 1 :
		g_sci->getResMan()->findResource(id, 1);
		break;
	case 0 :
		if (id.getNumber() == 0xFFFF) {
			// Unlock all resources of the requested type
			Common::List<ResourceId> *resources = g_sci->getResMan()->listResources(type);
			Common::List<ResourceId>::iterator itr = resources->begin();

			while (itr != resources->end()) {
				Resource *res = g_sci->getResMan()->testResource(*itr);
				if (res->isLocked())
					g_sci->getResMan()->unlockResource(res);
				++itr;
			}

			delete resources;
		} else {
			which = g_sci->getResMan()->findResource(id, 0);

			if (which)
				g_sci->getResMan()->unlockResource(which);
			else {
				if (id.getType() == kResourceTypeInvalid)
				  warning("[resMan] Attempt to unlock resource %i of invalid type %i", id.getNumber(), argv[0].toUint16());
				else
					// Happens in CD games (e.g. LSL6CD) with the message
					// resource. It isn't fatal, and it's usually caused
					// by leftover scripts.
					debugC(kDebugLevelResMan, "[resMan] Attempt to unlock non-existant resource %s", id.toString().c_str());
			}
		}
		break;
	}
	return s->r_acc;
}
Beispiel #6
0
LockResult LockerImpl<IsForMMAPV1>::lockBegin(ResourceId resId, LockMode mode) {
    dassert(!getWaitingResource().isValid());

    LockRequest* request;
    bool isNew = true;

    LockRequestsMap::Iterator it = _requests.find(resId);
    if (!it) {
        scoped_spinlock scopedLock(_lock);
        LockRequestsMap::Iterator itNew = _requests.insert(resId);
        itNew->initNew(this, &_notify);

        request = itNew.objAddr();
    } else {
        request = it.objAddr();
        isNew = false;
    }

    // Making this call here will record lock re-acquisitions and conversions as well.
    globalStats.recordAcquisition(_id, resId, mode);
    _stats.recordAcquisition(resId, mode);

    // Give priority to the full modes for global, parallel batch writer mode,
    // and flush lock so we don't stall global operations such as shutdown or flush.
    const ResourceType resType = resId.getType();
    if (resType == RESOURCE_GLOBAL || (IsForMMAPV1 && resId == resourceIdMMAPV1Flush)) {
        if (mode == MODE_S || mode == MODE_X) {
            request->enqueueAtFront = true;
            request->compatibleFirst = true;
        }
    } else if (resType != RESOURCE_MUTEX) {
        // This is all sanity checks that the global and flush locks are always be acquired
        // before any other lock has been acquired and they must be in sync with the nesting.
        DEV {
            const LockRequestsMap::Iterator itGlobal = _requests.find(resourceIdGlobal);
            invariant(itGlobal->recursiveCount > 0);
            invariant(itGlobal->mode != MODE_NONE);

            // Check the MMAP V1 flush lock is held in the appropriate mode
            invariant(!IsForMMAPV1 ||
                      isLockHeldForMode(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
        };
    }
Beispiel #7
0
std::string Lock::ResourceMutex::getName(ResourceId resourceId) {
    invariant(resourceId.getType() == RESOURCE_MUTEX);
    return ResourceIdFactory::nameForId(resourceId);
}
Beispiel #8
0
reg_t kLock(EngineState *s, int argc, reg_t *argv) {
	// NOTE: In SSCI, kLock uses a boolean lock flag, not a lock counter.
	// ScummVM's current counter-based implementation should be better than SSCI
	// at dealing with game scripts that unintentionally lock & unlock the same
	// resource multiple times (e.g. through recursion), but it will introduce
	// memory bugs (resource leaks lasting until the engine is restarted, or
	// destruction of kernel locks that lead to a use-after-free) that are
	// masked by ResourceManager's LRU cache if scripts rely on kLock being
	// idempotent like it was in SSCI.
	//
	// Like SSCI, resource locks are not persisted in save games in ScummVM
	// until GK2, so it is also possible that kLock bugs will appear only after
	// restoring a save game.
	//
	// See also kUnLoad.

	ResourceType type = g_sci->getResMan()->convertResType(argv[0].toUint16());
	if (type == kResourceTypeSound && getSciVersion() >= SCI_VERSION_1_1) {
		type = g_sci->_soundCmd->getSoundResourceType(argv[1].toUint16());
	}

	const ResourceId id(type, argv[1].toUint16());
	const bool lock = argc > 2 ? argv[2].toUint16() : true;

#ifdef ENABLE_SCI32
	// SSCI GK2+SCI3 also saves lock states for View, Pic, and Sync resources,
	// but so far it seems like audio resources are the only ones that actually
	// need to be handled
	if (g_sci->_features->hasSci3Audio() && type == kResourceTypeAudio) {
		g_sci->_audio32->lockResource(id, lock);
		return s->r_acc;
	}
#endif

	if (getSciVersion() == SCI_VERSION_1_1 &&
		(type == kResourceTypeAudio36 || type == kResourceTypeSync36)) {
		return s->r_acc;
	}

	if (lock) {
		g_sci->getResMan()->findResource(id, true);
	} else {
		if (getSciVersion() < SCI_VERSION_2 && id.getNumber() == 0xFFFF) {
			// Unlock all resources of the requested type
			Common::List<ResourceId> resources = g_sci->getResMan()->listResources(type);
			Common::List<ResourceId>::iterator itr;
			for (itr = resources.begin(); itr != resources.end(); ++itr) {
				Resource *res = g_sci->getResMan()->testResource(*itr);
				if (res->isLocked())
					g_sci->getResMan()->unlockResource(res);
			}
		} else {
			Resource *which = g_sci->getResMan()->findResource(id, false);

			if (which)
				g_sci->getResMan()->unlockResource(which);
			else {
				if (id.getType() == kResourceTypeInvalid)
				  warning("[resMan] Attempt to unlock resource %i of invalid type %i", id.getNumber(), argv[0].toUint16());
				else
					// Happens in CD games (e.g. LSL6CD) with the message
					// resource. It isn't fatal, and it's usually caused
					// by leftover scripts.
					debugC(kDebugLevelResMan, "[resMan] Attempt to unlock non-existent resource %s", id.toString().c_str());
			}
		}
	}
	return s->r_acc;
}
Beispiel #9
0
    LockResult LockManager::lock(const ResourceId& resId, LockRequest* request, LockMode mode) {
        dassert(mode > MODE_NONE);

        // Fast path for acquiring the same lock multiple times in modes, which are already covered
        // by the current mode. It is safe to do this without locking, because 1) all calls for the
        // same lock request must be done on the same thread and 2) if there are lock requests
        // hanging off a given LockHead, then this lock will never disappear.
        if ((LockConflictsTable[request->mode] | LockConflictsTable[mode]) == 
                LockConflictsTable[request->mode]) {
            request->recursiveCount++;
            return LOCK_OK;
        }

        // TODO: For the time being we do not need conversions between unrelated lock modes (i.e.,
        // modes which both add and remove to the conflicts set), so these are not implemented yet
        // (e.g., S -> IX).
        invariant((LockConflictsTable[request->mode] | LockConflictsTable[mode]) == 
                LockConflictsTable[mode]);

        LockBucket* bucket = _getBucket(resId);
        scoped_spinlock scopedLock(bucket->mutex);

        LockHead* lock;

        LockHeadMap::iterator it = bucket->data.find(resId);
        if (it == bucket->data.end()) {
            // Lock is free (not on the map)
            invariant(request->status == LockRequest::STATUS_NEW);

            lock = new LockHead(resId);
            bucket->data.insert(LockHeadPair(resId, lock));
        }
        else {
            // Lock is not free
            lock = it->second;
        }

        // Sanity check if requests are being reused
        invariant(request->lock == NULL || request->lock == lock);

        request->lock = lock;
        request->recursiveCount++;

        if (request->status == LockRequest::STATUS_NEW) {
            invariant(request->recursiveCount == 1);

            // The flush lock must be FIFO ordered or else we will starve the dur threads and not
            // block new writers. This means that if anyone is blocking on a mustFIFO resource we
            // must get in line, even if we don't conflict with the grantedModes and could take the
            // lock immediately.
            const bool mustFIFO = (resId.getType() == RESOURCE_MMAPV1_FLUSH);

            // New lock request
            if (conflicts(mode, lock->grantedModes)
                    || (mustFIFO && conflicts(mode, lock->conflictModes))) {
                request->status = LockRequest::STATUS_WAITING;
                request->mode = mode;
                request->convertMode = MODE_NONE;

                // Put it on the conflict queue. This is the place where various policies could be
                // applied for where in the wait queue does a request go.
                lock->addToConflictQueue(request);
                lock->changeConflictModeCount(mode, LockHead::Increment);

                return LOCK_WAITING;
            }
            else {  // No conflict, new request
                request->status = LockRequest::STATUS_GRANTED;
                request->mode = mode;
                request->convertMode = MODE_NONE;

                lock->addToGrantedQueue(request);
                lock->changeGrantedModeCount(mode, LockHead::Increment);

                return LOCK_OK;
            }
        }
        else {
            // If we are here, we already hold the lock in some mode. In order to keep it simple,
            // we do not allow requesting a conversion while a lock is already waiting or pending
            // conversion, hence the assertion below.
            invariant(request->status == LockRequest::STATUS_GRANTED);
            invariant(request->recursiveCount > 1);
            invariant(request->mode != mode);

            // Construct granted mask without our current mode, so that it is not counted as
            // conflicting
            uint32_t grantedModesWithoutCurrentRequest = 0;

            // We start the counting at 1 below, because LockModesCount also includes MODE_NONE
            // at position 0, which can never be acquired/granted.
            for (uint32_t i = 1; i < LockModesCount; i++) {
                const uint32_t currentRequestHolds =
                                    (request->mode == static_cast<LockMode>(i) ? 1 : 0);

                if (lock->grantedCounts[i] > currentRequestHolds) {
                    grantedModesWithoutCurrentRequest |= modeMask(static_cast<LockMode>(i));
                }
            }

            // This check favours conversion requests over pending requests. For example:
            //
            // T1 requests lock L in IS
            // T2 requests lock L in X
            // T1 then upgrades L from IS -> S
            //
            // Because the check does not look into the conflict modes bitmap, it will grant L to
            // T1 in S mode, instead of block, which would otherwise cause deadlock.
            if (conflicts(mode, grantedModesWithoutCurrentRequest)) {
                request->status = LockRequest::STATUS_CONVERTING;
                request->convertMode = mode;

                lock->conversionsCount++;
                lock->changeGrantedModeCount(request->convertMode, LockHead::Increment);

                return LOCK_WAITING;
            }
            else {  // No conflict, existing request
                lock->changeGrantedModeCount(mode, LockHead::Increment);
                lock->changeGrantedModeCount(request->mode, LockHead::Decrement);
                request->mode = mode;

                return LOCK_OK;
            }
        }
    }