Example #1
0
BSONObj LocksType::toBSON() const {
    BSONObjBuilder builder;

    if (_name)
        builder.append(name.name(), getName());
    if (_state)
        builder.append(state.name(), getState());
    if (_process)
        builder.append(process.name(), getProcess());
    if (_lockID)
        builder.append(lockID.name(), getLockID());
    if (_who)
        builder.append(who.name(), getWho());
    if (_why)
        builder.append(why.name(), getWhy());

    return builder.obj();
}
int ldelete(int ldes)
{
	STATWORD ps;
	
	struct lentry *lptr;
	int pid, lockID;
	
	disable(ps);
	lockID = getLockID(ldes);
	
	if( isbadlock(lockID) || (lptr=&locktab[lockID])->lstate == LOCKFREE )
	{
		restore(ps);
		return(SYSERR);
	}
	
	//lptr = &locktab[ldes];
	lptr->lstate = LOCKFREE;
	lptr->locked = 0;
	lptr->acquiredby[currpid]=-1;
	lptr->lockType = DELETED;
	lptr->lprio=-1;
	
	
	if( nonempty(lptr->lhead) )
	{
		while( (pid=getlast(lptr->lhead)) != EMPTY )
		{
			proctab[pid].waitPriority = -1;
			proctab[pid].lockID = -1;
			proctab[pid].procLockType[lockID] = DELETED;
			proctab[pid].pwaitret = DELETED;
			
			ready(pid, RESCHNO);
		}
		resched();
	}
	
	restore(ps);
	return(OK);
}
int lock(int ldes, int type, int priority)		//priority here = process wait priority
{
	STATWORD ps;
	struct lentry *lptr;
	struct	pentry	*pptr;
	int lockNotGivenToRead = 0;
	int temp = -1;
	int lockID;
	
	disable(ps);
	pptr = &proctab[currpid];
	
	lockID = getLockID(ldes);
	
	//kprintf("\n IN lock: %d...lock des..%d", lockID, ldes);
	if( isbadlock(lockID) || (lptr=&locktab[lockID])->lstate == LOCKFREE || type == DELETED)
	{
		restore(ps);
		return(SYSERR);
	}
	
	
	if( lptr->locked == 0 )
	{
		lptr->lstate = LOCKUSED;
		lptr->locked = 1;					//indicates someone has acquired lockID
		lptr->acquiredby[currpid] = 1;			//stores the PID of process which has acquired lockID
		lptr->lockType = type;	
		lptr->lprio = pptr->pprio;
		
		
		pptr->procLockType[lockID] = type;	//indicates type of locking (WRITE or READ) for a process
		pptr->waitPriority = priority;		//sets wait priority of process
		pptr->lockID = -1;					//process not in wait queue so its lock id is -1;
		
		
		restore(ps);
		return(OK);
	}
	
	else if( lptr->locked != 0 )
	{
	
		if(lptr->lockType == WRITE)
		{
			pptr->pstate = PRWAIT;
			pptr->lockID = ldes;
			pptr->procLockType[lockID] = type;	//indicates type of locking (WRITE or READ) for a process
			pptr->waitPriority = priority;		//sets wait priority of process
			pptr->timeInWaiting = ctr1000;
			rampUpPriority(lockID, pptr->pprio);
			
			insert(currpid, lptr->lhead, priority);
			
			//pptr->pwaitret = OK;
			resched();
		
			restore(ps);
			return(OK);
		}
		
		else if(lptr->lockType == READ )
		{
		
			if( type == READ )
			{
				//check if incoming process is reader and has wait priority larger than any waiting writer process wait priority
				temp = lptr->lhead;
				while( q[temp].qnext != lptr->ltail )
				{ 
					if( (proctab[q[temp].qnext].procLockType[lockID] == WRITE) && (priority < proctab[q[temp].qnext].waitPriority) )
					{
						lockNotGivenToRead = 1;		//1 value indicates the lock should not be given because a writer with larger wait prio than this read is waiting
					}
					
					temp = q[temp].qnext;
				}
				
				if(lockNotGivenToRead == 1)
				{
					pptr->pstate = PRWAIT;
					pptr->lockID = ldes;
					pptr->procLockType[lockID] = type;	//indicates type of locking (WRITE or READ) for a process
					pptr->waitPriority = priority;		//sets wait priority of process

					rampUpPriority(lockID, pptr->pprio);
					pptr->timeInWaiting = ctr1000;
					insert(currpid, lptr->lhead, priority);
					
					//pptr->pwaitret = OK;
					resched();
				
					restore(ps);
					return(OK);
				}
				else if(lockNotGivenToRead != 1)				//READER can be given lockID as no writer with wait prio greater than currpid wait prio is there
				{
					
					if( lptr->lprio < pptr->pprio )
					{
						lptr->lprio = pptr->pprio;
					}
					
					lptr->acquiredby[currpid] = 1;
					pptr->procLockType[lockID] = type;			//indicates type of locking (WRITE or READ) for a process
					pptr->waitPriority = priority;
					pptr->lockID = -1;
					
					restore(ps);
					return(OK);
				}
				
			}
			else if( type == WRITE )
			{
				pptr->pstate = PRWAIT;
				pptr->lockID = ldes;
				pptr->procLockType[lockID] = type;	//indicates type of locking (WRITE or READ) for a process
				pptr->waitPriority = priority;		//sets wait priority of process

				rampUpPriority(lockID, pptr->pprio);
				pptr->timeInWaiting = ctr1000;
				insert(currpid, lptr->lhead, priority);
				
				//pptr->pwaitret = OK;
				resched();
			
				restore(ps);
				return(OK);
			}
		}
	}
	
	
	
	restore(ps);
	return(OK);
}
StatusWith<DistLockManager::ScopedDistLock> ReplSetDistLockManager::lock(
    StringData name, StringData whyMessage, milliseconds waitFor, milliseconds lockTryInterval) {
    Timer timer(_serviceContext->getTickSource());
    Timer msgTimer(_serviceContext->getTickSource());

    while (waitFor <= milliseconds::zero() || milliseconds(timer.millis()) < waitFor) {
        OID lockSessionID = OID::gen();
        string who = str::stream() << _processID << ":" << getThreadName();

        auto lockExpiration = _lockExpiration;
        MONGO_FAIL_POINT_BLOCK(setDistLockTimeout, customTimeout) {
            const BSONObj& data = customTimeout.getData();
            lockExpiration = stdx::chrono::milliseconds(data["timeoutMs"].numberInt());
        }
        LOG(1) << "trying to acquire new distributed lock for " << name
               << " ( lock timeout : " << durationCount<Milliseconds>(lockExpiration)
               << " ms, ping interval : " << durationCount<Milliseconds>(_pingInterval)
               << " ms, process : " << _processID << " )"
               << " with lockSessionID: " << lockSessionID << ", why: " << whyMessage;

        auto lockResult =
            _catalog->grabLock(name, lockSessionID, who, _processID, Date_t::now(), whyMessage);

        auto status = lockResult.getStatus();

        if (status.isOK()) {
            // Lock is acquired since findAndModify was able to successfully modify
            // the lock document.
            LOG(0) << "distributed lock '" << name << "' acquired, ts : " << lockSessionID;
            return ScopedDistLock(lockSessionID, this);
        }

        if (status != ErrorCodes::LockStateChangeFailed) {
            // An error occurred but the write might have actually been applied on the
            // other side. Schedule an unlock to clean it up just in case.
            queueUnlock(lockSessionID);
            return status;
        }

        // Get info from current lock and check if we can overtake it.
        auto getLockStatusResult = _catalog->getLockByName(name);
        const auto& getLockStatus = getLockStatusResult.getStatus();

        if (!getLockStatusResult.isOK() && getLockStatus != ErrorCodes::LockNotFound) {
            return getLockStatus;
        }

        // Note: Only attempt to overtake locks that actually exists. If lock was not
        // found, use the normal grab lock path to acquire it.
        if (getLockStatusResult.isOK()) {
            auto currentLock = getLockStatusResult.getValue();
            auto canOvertakeResult = canOvertakeLock(currentLock, lockExpiration);

            if (!canOvertakeResult.isOK()) {
                return canOvertakeResult.getStatus();
            }

            if (canOvertakeResult.getValue()) {
                auto overtakeResult = _catalog->overtakeLock(name,
                                                             lockSessionID,
                                                             currentLock.getLockID(),
                                                             who,
                                                             _processID,
                                                             Date_t::now(),
                                                             whyMessage);

                const auto& overtakeStatus = overtakeResult.getStatus();

                if (overtakeResult.isOK()) {
                    // Lock is acquired since findAndModify was able to successfully modify
                    // the lock document.

                    LOG(0) << "lock '" << name << "' successfully forced";
                    LOG(0) << "distributed lock '" << name << "' acquired, ts : " << lockSessionID;
                    return ScopedDistLock(lockSessionID, this);
                }

                if (overtakeStatus != ErrorCodes::LockStateChangeFailed) {
                    // An error occurred but the write might have actually been applied on the
                    // other side. Schedule an unlock to clean it up just in case.
                    queueUnlock(lockSessionID);
                    return overtakeStatus;
                }
            }
        }

        LOG(1) << "distributed lock '" << name << "' was not acquired.";

        if (waitFor == milliseconds::zero()) {
            break;
        }

        // Periodically message for debugging reasons
        if (msgTimer.seconds() > 10) {
            LOG(0) << "waited " << timer.seconds() << "s for distributed lock " << name << " for "
                   << whyMessage;

            msgTimer.reset();
        }

        milliseconds timeRemaining =
            std::max(milliseconds::zero(), waitFor - milliseconds(timer.millis()));
        sleepFor(std::min(lockTryInterval, timeRemaining));
    }

    return {ErrorCodes::LockBusy, str::stream() << "timed out waiting for " << name};
}
SYSCALL kill(int pid)
{
	STATWORD ps;    
	struct	pentry	*pptr;		/* points to proc. table for pid*/
	int	dev, i, callResched, lockID;
	callResched = RESCHNO;
	
	disable(ps);
	if (isbadpid(pid) || (pptr= &proctab[pid])->pstate==PRFREE) {
		restore(ps);
		return(SYSERR);
	}
	if (--numproc == 0)
		xdone();

	dev = pptr->pdevs[0];
	if (! isbaddev(dev) )
		close(dev);
	dev = pptr->pdevs[1];
	if (! isbaddev(dev) )
		close(dev);
	dev = pptr->ppagedev;
	if (! isbaddev(dev) )
		close(dev);
	
	for( i=0; i<NLOCKS; i++)
	{
		if( locktab[i].acquiredby[pid] == 1)		//this process has acquired some lock and holding it, so release it and resched
		{
			//kprintf("\n Kill is holding lock %s", proctab[pid].pname);
			callResched = RESCHYES;
			relLock(pid, i, 0);
		}
	}
	
	send(pptr->pnxtkin, pid);

	freestk(pptr->pbase, pptr->pstklen);
	switch (pptr->pstate) {

	case PRCURR:	pptr->pstate = PRFREE;	/* suicide */
			resched();

	case PRWAIT:	semaph[pptr->psem].semcnt++;
					//if this proc is in wait queue of any lock i, remove it ... a proc can be in waiting q of any one lock only
					/* for( i=0; i<NLOCKS; i++)
					{
						
						if( (proctab[pid].procLockType[i]!=DELETED) && (locktab[i].acquiredby[pid]!=1) )
						{
							kprintf("Killed Process %d waiting on lock %d", pid, i);
							locktab[j].effectOfPriorityInheritance = 1;
							rampUpPriority(j, proctab[i].pprio);
						}
					} */
					
					
					lockID = getLockID(pptr->lockID);
					if( !isbadlock(lockID) || locktab[lockID].lstate != LOCKFREE )
					{
						//dequeue(pid);
						//kprintf("\nKill proc getting called in PRWAIT, releasing wait lock %d..%d", lockID, pid);
						proctab[pid].pprio = -1;
						proctab[pid].mainPrio = -1;
						relLock(pid, lockID, 1);
						//dequeue(pid);
					}
					

	case PRREADY:	dequeue(pid);
			pptr->pstate = PRFREE;
			break;

	case PRSLEEP:
	case PRTRECV:	unsleep(pid);
						/* fall through	*/
	default:	pptr->pstate = PRFREE;
	}
	
	if(callResched)
	{
		//kprintf("\n About to call resched");
		//indicates that this proc was holding lock and is deleted
		//in between so now it should release lock and call resched
		resched();
	}
	
	restore(ps);
	return(OK);
}
StatusWith<DistLockManager::ScopedDistLock> ReplSetDistLockManager::lock(
    OperationContext* txn,
    StringData name,
    StringData whyMessage,
    milliseconds waitFor,
    milliseconds lockTryInterval) {
    Timer timer(_serviceContext->getTickSource());
    Timer msgTimer(_serviceContext->getTickSource());

    // Counts how many attempts have been made to grab the lock, which have failed with network
    // error. This value is reset for each lock acquisition attempt because these are
    // independent write operations.
    int networkErrorRetries = 0;

    // Distributed lock acquisition works by tring to update the state of the lock to 'taken'. If
    // the lock is currently taken, we will back off and try the acquisition again, repeating this
    // until the lockTryInterval has been reached. If a network error occurs at each lock
    // acquisition attempt, the lock acquisition will be retried immediately.
    while (waitFor <= milliseconds::zero() || milliseconds(timer.millis()) < waitFor) {
        const OID lockSessionID = OID::gen();
        const string who = str::stream() << _processID << ":" << getThreadName();

        auto lockExpiration = _lockExpiration;
        MONGO_FAIL_POINT_BLOCK(setDistLockTimeout, customTimeout) {
            const BSONObj& data = customTimeout.getData();
            lockExpiration = stdx::chrono::milliseconds(data["timeoutMs"].numberInt());
        }

        LOG(1) << "trying to acquire new distributed lock for " << name
               << " ( lock timeout : " << durationCount<Milliseconds>(lockExpiration)
               << " ms, ping interval : " << durationCount<Milliseconds>(_pingInterval)
               << " ms, process : " << _processID << " )"
               << " with lockSessionID: " << lockSessionID << ", why: " << whyMessage;

        auto lockResult = _catalog->grabLock(
            txn, name, lockSessionID, who, _processID, Date_t::now(), whyMessage);

        auto status = lockResult.getStatus();

        if (status.isOK()) {
            // Lock is acquired since findAndModify was able to successfully modify
            // the lock document.
            log() << "distributed lock '" << name << "' acquired for '" << whyMessage
                  << "', ts : " << lockSessionID;
            return ScopedDistLock(txn, lockSessionID, this);
        }

        // If a network error occurred, unlock the lock synchronously and try again
        if (ShardRegistry::kAllRetriableErrors.count(status.code()) &&
            networkErrorRetries < kMaxNumLockAcquireRetries) {
            LOG(1) << "Failed to acquire distributed lock because of retriable error. Retrying "
                      "acquisition by first unlocking the stale entry, which possibly exists now"
                   << causedBy(status);

            networkErrorRetries++;

            status = _catalog->unlock(txn, lockSessionID);
            if (status.isOK()) {
                // We certainly do not own the lock, so we can retry
                continue;
            }

            // Fall-through to the error checking logic below
            invariant(status != ErrorCodes::LockStateChangeFailed);

            LOG(1)
                << "Failed to retry acqusition of distributed lock. No more attempts will be made"
                << causedBy(status);
        }

        if (status != ErrorCodes::LockStateChangeFailed) {
            // An error occurred but the write might have actually been applied on the
            // other side. Schedule an unlock to clean it up just in case.
            queueUnlock(lockSessionID);
            return status;
        }

        // Get info from current lock and check if we can overtake it.
        auto getLockStatusResult = _catalog->getLockByName(txn, name);
        const auto& getLockStatus = getLockStatusResult.getStatus();

        if (!getLockStatusResult.isOK() && getLockStatus != ErrorCodes::LockNotFound) {
            return getLockStatus;
        }

        // Note: Only attempt to overtake locks that actually exists. If lock was not
        // found, use the normal grab lock path to acquire it.
        if (getLockStatusResult.isOK()) {
            auto currentLock = getLockStatusResult.getValue();
            auto canOvertakeResult = canOvertakeLock(txn, currentLock, lockExpiration);

            if (!canOvertakeResult.isOK()) {
                return canOvertakeResult.getStatus();
            }

            if (canOvertakeResult.getValue()) {
                auto overtakeResult = _catalog->overtakeLock(txn,
                                                             name,
                                                             lockSessionID,
                                                             currentLock.getLockID(),
                                                             who,
                                                             _processID,
                                                             Date_t::now(),
                                                             whyMessage);

                const auto& overtakeStatus = overtakeResult.getStatus();

                if (overtakeResult.isOK()) {
                    // Lock is acquired since findAndModify was able to successfully modify
                    // the lock document.

                    LOG(0) << "lock '" << name << "' successfully forced";
                    LOG(0) << "distributed lock '" << name << "' acquired, ts : " << lockSessionID;
                    return ScopedDistLock(txn, lockSessionID, this);
                }

                if (overtakeStatus != ErrorCodes::LockStateChangeFailed) {
                    // An error occurred but the write might have actually been applied on the
                    // other side. Schedule an unlock to clean it up just in case.
                    queueUnlock(lockSessionID);
                    return overtakeStatus;
                }
            }
        }

        LOG(1) << "distributed lock '" << name << "' was not acquired.";

        if (waitFor == milliseconds::zero()) {
            break;
        }

        // Periodically message for debugging reasons
        if (msgTimer.seconds() > 10) {
            LOG(0) << "waited " << timer.seconds() << "s for distributed lock " << name << " for "
                   << whyMessage;

            msgTimer.reset();
        }

        // A new lock acquisition attempt will begin now (because the previous found the lock to be
        // busy, so reset the retries counter)
        networkErrorRetries = 0;

        const milliseconds timeRemaining =
            std::max(milliseconds::zero(), waitFor - milliseconds(timer.millis()));
        sleepFor(std::min(lockTryInterval, timeRemaining));
    }

    return {ErrorCodes::LockBusy, str::stream() << "timed out waiting for " << name};
}