Exemplo n.º 1
0
/*
 * LWLockUpdateVar - Update a variable and wake up waiters atomically
 *
 * Sets *valptr to 'val', and wakes up all processes waiting for us with
 * LWLockWaitForVar().  Setting the value and waking up the processes happen
 * atomically so that any process calling LWLockWaitForVar() on the same lock
 * is guaranteed to see the new value, and act accordingly.
 *
 * The caller must be holding the lock in exclusive mode.
 */
void
LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 val)
{
	PGPROC	   *head;
	PGPROC	   *proc;
	PGPROC	   *next;

	/* Acquire mutex.  Time spent holding mutex should be short! */
	SpinLockAcquire(&lock->mutex);

	/* we should hold the lock */
	Assert(lock->exclusive == 1);

	/* Update the lock's value */
	*valptr = val;

	/*
	 * See if there are any LW_WAIT_UNTIL_FREE waiters that need to be woken
	 * up. They are always in the front of the queue.
	 */
	head = lock->head;

	if (head != NULL && head->lwWaitMode == LW_WAIT_UNTIL_FREE)
	{
		proc = head;
		next = proc->lwWaitLink;
		while (next && next->lwWaitMode == LW_WAIT_UNTIL_FREE)
		{
			proc = next;
			next = next->lwWaitLink;
		}

		/* proc is now the last PGPROC to be released */
		lock->head = next;
		proc->lwWaitLink = NULL;
	}
	else
		head = NULL;

	/* We are done updating shared state of the lock itself. */
	SpinLockRelease(&lock->mutex);

	/*
	 * Awaken any waiters I removed from the queue.
	 */
	while (head != NULL)
	{
		proc = head;
		head = proc->lwWaitLink;
		proc->lwWaitLink = NULL;
		/* check comment in LWLockRelease() about this barrier */
		pg_write_barrier();
		proc->lwWaiting = false;
		PGSemaphoreUnlock(&proc->sem);
	}
}
Exemplo n.º 2
0
/*
 * Insert a TOC entry.
 *
 * The idea here is that process setting up the shared memory segment will
 * register the addresses of data structures within the segment using this
 * function.  Each data structure will be identified using a 64-bit key, which
 * is assumed to be a well-known or discoverable integer.  Other processes
 * accessing the shared memory segment can pass the same key to
 * shm_toc_lookup() to discover the addresses of those data structures.
 *
 * Since the shared memory segment may be mapped at different addresses within
 * different backends, we store relative rather than absolute pointers.
 *
 * This won't scale well to a large number of keys.  Hopefully, that isn't
 * necessary; if it proves to be, we might need to provide a more sophisticated
 * data structure here.  But the real idea here is just to give someone mapping
 * a dynamic shared memory the ability to find the bare minimum number of
 * pointers that they need to bootstrap.  If you're storing a lot of stuff in
 * here, you're doing it wrong.
 */
void
shm_toc_insert(shm_toc *toc, uint64 key, void *address)
{
	volatile shm_toc *vtoc = toc;
	uint64		total_bytes;
	uint64		allocated_bytes;
	uint64		nentry;
	uint64		toc_bytes;
	uint64		offset;

	/* Relativize pointer. */
	Assert(address > (void *) toc);
	offset = ((char *) address) - (char *) toc;

	SpinLockAcquire(&toc->toc_mutex);

	total_bytes = vtoc->toc_total_bytes;
	allocated_bytes = vtoc->toc_allocated_bytes;
	nentry = vtoc->toc_nentry;
	toc_bytes = offsetof(shm_toc, toc_entry) +nentry * sizeof(shm_toc_entry)
		+ allocated_bytes;

	elog(LOG, "shm_toc_insert 1");
	/* Check for memory exhaustion and overflow. */
	if (toc_bytes + sizeof(shm_toc_entry) > total_bytes ||
		toc_bytes + sizeof(shm_toc_entry) < toc_bytes)
	{
		elog(LOG, "insert out");
		SpinLockRelease(&toc->toc_mutex);
		ereport(ERROR,
				(errcode(ERRCODE_OUT_OF_MEMORY),
				 errmsg("out of shared memory")));
	}

	Assert(offset < total_bytes);
	vtoc->toc_entry[nentry].key = key;
	vtoc->toc_entry[nentry].offset = offset;

	/*
	 * By placing a write barrier after filling in the entry and before
	 * updating the number of entries, we make it safe to read the TOC
	 * unlocked.
	 */
	pg_write_barrier();

	vtoc->toc_nentry++;

	SpinLockRelease(&toc->toc_mutex);
}
Exemplo n.º 3
0
/*
 * LWLockRelease - release a previously acquired lock
 */
void
LWLockRelease(LWLock *lock)
{
	PGPROC	   *head;
	PGPROC	   *proc;
	int			i;

	PRINT_LWDEBUG("LWLockRelease", lock);

	/*
	 * Remove lock from list of locks held.  Usually, but not always, it will
	 * be the latest-acquired lock; so search array backwards.
	 */
	for (i = num_held_lwlocks; --i >= 0;)
	{
		if (lock == held_lwlocks[i])
			break;
	}
	if (i < 0)
		elog(ERROR, "lock %s %d is not held", T_NAME(lock), T_ID(lock));
	num_held_lwlocks--;
	for (; i < num_held_lwlocks; i++)
		held_lwlocks[i] = held_lwlocks[i + 1];

	/* Acquire mutex.  Time spent holding mutex should be short! */
	SpinLockAcquire(&lock->mutex);

	/* Release my hold on lock */
	if (lock->exclusive > 0)
		lock->exclusive--;
	else
	{
		Assert(lock->shared > 0);
		lock->shared--;
	}

	/*
	 * See if I need to awaken any waiters.  If I released a non-last shared
	 * hold, there cannot be anything to do.  Also, do not awaken any waiters
	 * if someone has already awakened waiters that haven't yet acquired the
	 * lock.
	 */
	head = lock->head;
	if (head != NULL)
	{
		if (lock->exclusive == 0 && lock->shared == 0 && lock->releaseOK)
		{
			/*
			 * Remove the to-be-awakened PGPROCs from the queue.
			 */
			bool		releaseOK = true;

			proc = head;

			/*
			 * First wake up any backends that want to be woken up without
			 * acquiring the lock.
			 */
			while (proc->lwWaitMode == LW_WAIT_UNTIL_FREE && proc->lwWaitLink)
				proc = proc->lwWaitLink;

			/*
			 * If the front waiter wants exclusive lock, awaken him only.
			 * Otherwise awaken as many waiters as want shared access.
			 */
			if (proc->lwWaitMode != LW_EXCLUSIVE)
			{
				while (proc->lwWaitLink != NULL &&
					   proc->lwWaitLink->lwWaitMode != LW_EXCLUSIVE)
				{
					if (proc->lwWaitMode != LW_WAIT_UNTIL_FREE)
						releaseOK = false;
					proc = proc->lwWaitLink;
				}
			}
			/* proc is now the last PGPROC to be released */
			lock->head = proc->lwWaitLink;
			proc->lwWaitLink = NULL;

			/*
			 * Prevent additional wakeups until retryer gets to run. Backends
			 * that are just waiting for the lock to become free don't retry
			 * automatically.
			 */
			if (proc->lwWaitMode != LW_WAIT_UNTIL_FREE)
				releaseOK = false;

			lock->releaseOK = releaseOK;
		}
		else
		{
			/* lock is still held, can't awaken anything */
			head = NULL;
		}
	}

	/* We are done updating shared state of the lock itself. */
	SpinLockRelease(&lock->mutex);

	TRACE_POSTGRESQL_LWLOCK_RELEASE(T_NAME(lock), T_ID(lock));

	/*
	 * Awaken any waiters I removed from the queue.
	 */
	while (head != NULL)
	{
		LOG_LWDEBUG("LWLockRelease", T_NAME(lock), T_ID(lock),
					"release waiter");
		proc = head;
		head = proc->lwWaitLink;
		proc->lwWaitLink = NULL;
		/*
		 * Guarantee that lwWaiting being unset only becomes visible once the
		 * unlink from the link has completed. Otherwise the target backend
		 * could be woken up for other reason and enqueue for a new lock - if
		 * that happens before the list unlink happens, the list would end up
		 * being corrupted.
		 *
		 * The barrier pairs with the SpinLockAcquire() when enqueing for
		 * another lock.
		 */
		pg_write_barrier();
		proc->lwWaiting = false;
		PGSemaphoreUnlock(&proc->sem);
	}

	/*
	 * Now okay to allow cancel/die interrupts.
	 */
	RESUME_INTERRUPTS();
}
Exemplo n.º 4
0
Arquivo: lwlock.c Projeto: 50wu/gpdb
/*
 * LWLockRelease - release a previously acquired lock
 */
void
LWLockRelease(LWLockId lockid)
{
	volatile LWLock *lock = &(LWLockArray[lockid].lock);
	PGPROC	   *head;
	PGPROC	   *proc;
	int			i;
	bool		saveExclusive;

	PRINT_LWDEBUG("LWLockRelease", lockid, lock);

	/*
	 * Remove lock from list of locks held.  Usually, but not always, it will
	 * be the latest-acquired lock; so search array backwards.
	 */
	for (i = num_held_lwlocks; --i >= 0;)
	{
		if (lockid == held_lwlocks[i])
			break;
	}
	if (i < 0)
		elog(ERROR, "lock %d is not held", (int) lockid);

	saveExclusive = held_lwlocks_exclusive[i];
	if (InterruptHoldoffCount <= 0)
		elog(PANIC, "upon entering lock release, the interrupt holdoff count is bad (%d) for release of lock %d (%s)", 
			 InterruptHoldoffCount,
			 (int)lockid,
			 (saveExclusive ? "Exclusive" : "Shared"));

#ifdef LWLOCK_TRACE_MIRROREDLOCK
	if (lockid == MirroredLock)
		elog(LOG, 
			 "LWLockRelease: release for MirroredLock by PID %u in held_lwlocks[%d] %s", 
			 MyProcPid, 
			 i,
			 (held_lwlocks_exclusive[i] ? "Exclusive" : "Shared"));
#endif
	
	num_held_lwlocks--;
	for (; i < num_held_lwlocks; i++)
	{
		held_lwlocks_exclusive[i] = held_lwlocks_exclusive[i + 1];
		held_lwlocks[i] = held_lwlocks[i + 1];
#ifdef USE_TEST_UTILS_X86
		/* shift stack traces */
		held_lwlocks_depth[i] = held_lwlocks_depth[i + 1];
		memcpy
			(
			held_lwlocks_addresses[i],
			held_lwlocks_addresses[i + 1],
			held_lwlocks_depth[i] * sizeof(*held_lwlocks_depth)
			)
			;
#endif /* USE_TEST_UTILS_X86 */
	}

	// Clear out old last entry.
	held_lwlocks_exclusive[num_held_lwlocks] = false;
	held_lwlocks[num_held_lwlocks] = 0;
#ifdef USE_TEST_UTILS_X86
	held_lwlocks_depth[num_held_lwlocks] = 0;
#endif /* USE_TEST_UTILS_X86 */

	/* Acquire mutex.  Time spent holding mutex should be short! */
	SpinLockAcquire(&lock->mutex);

	/* Release my hold on lock */
	if (lock->exclusive > 0)
	{
		lock->exclusive--;
		lock->exclusivePid = 0;
	}
	else
	{
		Assert(lock->shared > 0);
		lock->shared--;
	}

	/*
	 * See if I need to awaken any waiters.  If I released a non-last shared
	 * hold, there cannot be anything to do.  Also, do not awaken any waiters
	 * if someone has already awakened waiters that haven't yet acquired the
	 * lock.
	 */
	head = lock->head;
	if (head != NULL)
	{
		if (lock->exclusive == 0 && lock->shared == 0 && lock->releaseOK)
		{
			/*
			 * Remove the to-be-awakened PGPROCs from the queue.  If the front
			 * waiter wants exclusive lock, awaken him only. Otherwise awaken
			 * as many waiters as want shared access.
			 */
			proc = head;
			if (!proc->lwExclusive)
			{
				while (proc->lwWaitLink != NULL &&
					   !proc->lwWaitLink->lwExclusive)
				{
					proc = proc->lwWaitLink;
					if (proc->pid != 0)
					{
						lock->releaseOK = false;
					}					
				}
			}
			/* proc is now the last PGPROC to be released */
			lock->head = proc->lwWaitLink;
			proc->lwWaitLink = NULL;
			
			/* proc->pid can be 0 if process exited while waiting for lock */
			if (proc->pid != 0)
			{
				/* prevent additional wakeups until retryer gets to run */
				lock->releaseOK = false;
			}
		}
		else
		{
			/* lock is still held, can't awaken anything */
			head = NULL;
		}
	}

	/* We are done updating shared state of the lock itself. */
	SpinLockRelease(&lock->mutex);

	PG_TRACE1(lwlock__release, lockid);

	/*
	 * Awaken any waiters I removed from the queue.
	 */
	while (head != NULL)
	{
#ifdef LWLOCK_TRACE_MIRROREDLOCK
		if (lockid == MirroredLock)
			elog(LOG, "LWLockRelease: release waiter for MirroredLock (this PID %u", MyProcPid);
#endif
		LOG_LWDEBUG("LWLockRelease", lockid, "release waiter");
		proc = head;
		head = proc->lwWaitLink;
		proc->lwWaitLink = NULL;
		pg_write_barrier();
		proc->lwWaiting = false;
		PGSemaphoreUnlock(&proc->sem);
	}

	/*
	 * Now okay to allow cancel/die interrupts.
	 */
	if (InterruptHoldoffCount <= 0)
		elog(PANIC, "upon exiting lock release, the interrupt holdoff count is bad (%d) for release of lock %d (%s)", 
			 InterruptHoldoffCount,
			 (int)lockid,
			 (saveExclusive ? "Exclusive" : "Shared"));
	RESUME_INTERRUPTS();
}