示例#1
0
文件: lwlock.c 项目: fgp/lockbench
/*
 * LWLockConditionalAcquire - acquire a lightweight lock in the specified mode
 *
 * If the lock is not available, return FALSE with no side-effects.
 *
 * If successful, cancel/die interrupts are held off until lock release.
 */
bool
LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
{
	volatile LWLock *lock = &(LWLockArray[lockid].lock);
#if LWLOCK_LOCK_PARTS > 1
	volatile LWLockPart *part = LWLOCK_PART(lock, lockid, MyBackendId);
#endif
	bool		mustwait;

	PRINT_LWDEBUG("LWLockConditionalAcquire", lockid, lock);

	/* Ensure we will have room to remember the lock */
	if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
		elog(ERROR, "too many LWLocks taken");

	/*
	 * Lock out cancel/die interrupts until we exit the code section protected
	 * by the LWLock.  This ensures that interrupts will not interfere with
	 * manipulations of data structures in shared memory.
	 */
	HOLD_INTERRUPTS();

	if (mode == LW_SHARED)
	{
#ifdef LWLOCK_PART_SHARED_OPS_ATOMIC
		/* Increment shared counter partition. If there's no contention,
		 * this is sufficient to take the lock
		 */
		LWLOCK_PART_SHARED_POSTINC_ATOMIC(lock, lockid, part, MyBackendId);
		LWLOCK_PART_SHARED_FENCE();

		/* A concurrent exclusive locking attempt does the following
		 * three steps
		 *   1) Acquire mutex
		 *   2) Check shared counter partitions for readers.
		 *   3a) If found add proc to wait queue, block, restart at (1)
		 *   3b) If not found, set exclusive flag, continue with (4)
		 *   4) Enter protected section
		 * The fence after the atomic add above ensures that no further
		 * such attempt can proceed to (3b) or beyond. There may be
		 * pre-existing exclusive locking attempts at step (3b) or beyond,
		 * but we can recognize those by either the mutex being taken, or
		 * the exclusive flag being set. Conversely, if we see neither, we
		 * may proceed and enter the protected section.
		 *
		 * FIXME: This doesn't work if slock_t is a struct or doesn't
		 * use 0 for state "unlocked".
		 */

		if ((lock->mutex == 0) && (lock->exclusive == 0))
			goto lock_acquired;

		/* At this point, we don't know if the concurrent exclusive locker
		 * has proceeded to (3b) or blocked. We must take the mutex and
		 * re-check
		 */
#endif /* LWLOCK_PART_SHARED_OPS_ATOMIC */

		/* Acquire mutex.  Time spent holding mutex should be short! */
		SpinLockAcquire(&lock->mutex);

		if (lock->exclusive == 0)
		{
#ifdef LWLOCK_PART_SHARED_OPS_ATOMIC
			/* Already incremented the shared counter partition above */
#else
			lock->shared++;
#endif
			mustwait = false;
		}
		else
		{
#ifdef LWLOCK_PART_SHARED_OPS_ATOMIC
			/* Must undo shared counter partition increment. Note that
			 * we *need* to do that while holding the mutex. Otherwise,
			 * the exclusive lock could be released and attempted to be
			 * re-acquired before we undo the increment. That attempt
			 * would then block, even though there'd be no lock holder
			 * left
			 */
			LWLOCK_PART_SHARED_POSTDEC_ATOMIC(lock, lockid, part, MyBackendId);
#endif
			mustwait = true;
		}
	}
	else
	{
		/* Step (1). Acquire mutex. Time spent holding mutex should be
		 *                          short!
		 */
		SpinLockAcquire(&lock->mutex);

		if (lock->exclusive == 0)
		{
			/* Step (2). Check for shared lockers. This surely happens
			 * after (1), otherwise SpinLockAcquire() is broken. Lock
			 * acquire semantics demand that no load must be re-ordered
			 * from after a lock acquisition to before, for obvious
			 * reasons.
			 */

			LWLOCK_IS_SHARED(mustwait, lock, lockid);

			if (!mustwait) {
				/* Step (3a). Set exclusive flag. This surely happens
				 * after (2) because it depends on the result of (2),
				 * no matter how much reordering is going on here.
				 */
				lock->exclusive++;
			}
		}
		else
			mustwait = true;
	}

	/* We are done updating shared state of the lock itself. */
	SpinLockRelease(&lock->mutex);

	if (mustwait)
	{
		/* Failed to get lock, so release interrupt holdoff */
		RESUME_INTERRUPTS();
		LOG_LWDEBUG("LWLockConditionalAcquire", lockid, "failed");
		TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL(lockid, mode);
		
		return false;
	}

#ifdef LWLOCK_PART_SHARED_OPS_ATOMIC
lock_acquired:
#endif

	TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE(lockid, mode);

	/* Add lock to list of locks held by this backend */
	held_lwlocks[num_held_lwlocks] = lockid;
	held_lwlocks_mode[num_held_lwlocks] = mode;
	++num_held_lwlocks;

	return true;
}
示例#2
0
/*
 * LWLockConditionalAcquire - acquire a lightweight lock in the specified mode
 *
 * If the lock is not available, return FALSE with no side-effects.
 *
 * If successful, cancel/die interrupts are held off until lock release.
 */
bool
LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
{
	volatile LWLock *lock = &(LWLockArray[lockid].lock);
	bool		mustwait;

	PRINT_LWDEBUG("LWLockConditionalAcquire", lockid, lock);

	/* Ensure we will have room to remember the lock */
	if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
		elog(ERROR, "too many LWLocks taken");

	/*
	 * Lock out cancel/die interrupts until we exit the code section protected
	 * by the LWLock.  This ensures that interrupts will not interfere with
	 * manipulations of data structures in shared memory.
	 */
	HOLD_INTERRUPTS();

	/* Acquire mutex.  Time spent holding mutex should be short! */
	SpinLockAcquire(&lock->mutex);

	/* If I can get the lock, do so quickly. */
	if (mode == LW_EXCLUSIVE)
	{
		if (lock->exclusive == 0 && lock->shared == 0)
		{
			lock->exclusive++;
			mustwait = false;
		}
		else
			mustwait = true;
	}
	else
	{
		if (lock->exclusive == 0)
		{
			lock->shared++;
			mustwait = false;
		}
		else
			mustwait = true;
	}

	/* We are done updating shared state of the lock itself. */
	SpinLockRelease(&lock->mutex);

	if (mustwait)
	{
		/* Failed to get lock, so release interrupt holdoff */
		RESUME_INTERRUPTS();
		LOG_LWDEBUG("LWLockConditionalAcquire", lockid, "failed");
		TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL(lockid, mode);
	}
	else
	{
		/* Add lock to list of locks held by this backend */
		held_lwlocks[num_held_lwlocks++] = lockid;
		TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE(lockid, mode);
	}

	return !mustwait;
}