Esempio n. 1
0
/*
 * Compute shmem space needed for LWLocks.
 */
Size
LWLockShmemSize(void)
{
	Size		size;
	int			numLocks = NumLWLocks();

	/* Space for the LWLock array. */
	size = mul_size(numLocks, sizeof(LWLockPadded));
	
	/* Space for dynamic allocation counter,
	 * plus room for alignment of LWLockArray.
	 */
	size = add_size(size, 2 * sizeof(int) + LWLOCK_PADDED_SIZE);
	
#if LWLOCK_PART_SIZE > 1
	/* Space for the LWLockPart array */
	size = add_size(size, mul_size(LWLOCK_PARTS(numLocks),
	                               sizeof(LWLockPart)));
	
	/* Room for alignment of LWLockPartArray */
	size = add_size(size, sizeof(LWLockPart));
#endif

	return size;
}
Esempio n. 2
0
/*
 * Allocate shmem space for LWLocks and initialize the locks.
 */
void
CreateLWLocks(void)
{
	int			numLocks = NumLWLocks();
	uint32		spaceLocks = LWLockShmemSize();
	LWLock	   *lock;
	int			id;

	/* Allocate space */
	LWLockArray = (LWLock *) ShmemAlloc(spaceLocks);

	/*
	 * Initialize all LWLocks to "unlocked" state
	 */
	for (id = 0, lock = LWLockArray; id < numLocks; id++, lock++)
	{
		SpinLockInit(&lock->mutex);
		lock->releaseOK = true;
		lock->exclusive = 0;
		lock->shared = 0;
		lock->head = NULL;
		lock->tail = NULL;
	}

	/*
	 * Initialize the dynamic-allocation counter at the end of the array
	 */
	LWLockCounter = (int *) lock;
	LWLockCounter[0] = (int) NumFixedLWLocks;
	LWLockCounter[1] = numLocks;
}
Esempio n. 3
0
/*
 * Allocate shmem space for the main LWLock array and initialize it.  We also
 * register the main tranch here.
 */
void
CreateLWLocks(void)
{
	if (!IsUnderPostmaster)
	{
		int			numLocks = NumLWLocks();
		Size		spaceLocks = LWLockShmemSize();
		LWLockPadded *lock;
		int		   *LWLockCounter;
		char	   *ptr;
		int			id;

		/* Allocate space */
		ptr = (char *) ShmemAlloc(spaceLocks);

		/* Leave room for dynamic allocation of locks and tranches */
		ptr += 3 * sizeof(int);

		/* Ensure desired alignment of LWLock array */
		ptr += LWLOCK_PADDED_SIZE - ((uintptr_t) ptr) % LWLOCK_PADDED_SIZE;

		MainLWLockArray = (LWLockPadded *) ptr;

		/* Initialize all LWLocks in main array */
		for (id = 0, lock = MainLWLockArray; id < numLocks; id++, lock++)
			LWLockInitialize(&lock->lock, 0);

		/*
		 * Initialize the dynamic-allocation counters, which are stored just
		 * before the first LWLock.  LWLockCounter[0] is the allocation
		 * counter for lwlocks, LWLockCounter[1] is the maximum number that
		 * can be allocated from the main array, and LWLockCounter[2] is the
		 * allocation counter for tranches.
		 */
		LWLockCounter = (int *) ((char *) MainLWLockArray - 3 * sizeof(int));
		LWLockCounter[0] = NUM_FIXED_LWLOCKS;
		LWLockCounter[1] = numLocks;
		LWLockCounter[2] = 1;	/* 0 is the main array */
	}

	if (LWLockTrancheArray == NULL)
	{
		LWLockTranchesAllocated = 16;
		LWLockTrancheArray = (LWLockTranche **)
			MemoryContextAlloc(TopMemoryContext,
						  LWLockTranchesAllocated * sizeof(LWLockTranche *));
	}

	MainLWLockTranche.name = "main";
	MainLWLockTranche.array_base = MainLWLockArray;
	MainLWLockTranche.array_stride = sizeof(LWLockPadded);
	LWLockRegisterTranche(0, &MainLWLockTranche);
}
Esempio n. 4
0
/*
 * Report number of semaphores needed to support spinlocks.
 */
int
SpinlockSemas(void)
{
	/*
	 * It would be cleaner to distribute this logic into the affected modules,
	 * similar to the way shmem space estimation is handled.
	 *
	 * For now, though, we just need a few spinlocks (10 should be plenty)
	 * plus one for each LWLock.
	 */
	return NumLWLocks() + 10;
}
Esempio n. 5
0
/*
 * Compute shmem space needed for LWLocks.
 */
int
LWLockShmemSize(void)
{
	int			numLocks = NumLWLocks();
	uint32		spaceLocks;

	/* Allocate the LWLocks plus space for shared allocation counter. */
	spaceLocks = numLocks * sizeof(LWLock) + 2 * sizeof(int);
	spaceLocks = MAXALIGN(spaceLocks);

	return (int) spaceLocks;
}
Esempio n. 6
0
/*
 * Compute shmem space needed for LWLocks.
 */
Size
LWLockShmemSize(void)
{
	Size		size;
	int			numLocks = NumLWLocks();

	/* Space for the LWLock array. */
	size = mul_size(numLocks, sizeof(LWLockPadded));

	/* Space for dynamic allocation counter, plus room for alignment. */
	size = add_size(size, 2 * sizeof(int) + LWLOCK_PADDED_SIZE);

	return size;
}
Esempio n. 7
0
/*
 * Allocate shmem space for LWLocks and initialize the locks.
 */
void
CreateLWLocks(void)
{
	int			numLocks = NumLWLocks();
	Size		spaceLocks = LWLockShmemSize();
	LWLockPadded *lock;
	int		   *LWLockCounter;
	char	   *ptr;
	int			id;

	/* Allocate space */
	ptr = (char *) ShmemAlloc(spaceLocks);

	/* Leave room for dynamic allocation counter */
	ptr += 2 * sizeof(int);

	/* Ensure desired alignment of LWLock array */
	ptr += LWLOCK_PADDED_SIZE - ((unsigned long) ptr) % LWLOCK_PADDED_SIZE;

	LWLockArray = (LWLockPadded *) ptr;

	/*
	 * Initialize all LWLocks to "unlocked" state
	 */
	for (id = 0, lock = LWLockArray; id < numLocks; id++, lock++)
	{
		SpinLockInit(&lock->lock.mutex);
		lock->lock.releaseOK = true;
		lock->lock.exclusive = 0;
		lock->lock.shared = 0;
		lock->lock.head = NULL;
		lock->lock.tail = NULL;
	}

	/*
	 * Initialize the dynamic-allocation counter, which is stored just before
	 * the first LWLock.
	 */
	LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
	LWLockCounter[0] = (int) NumFixedLWLocks;
	LWLockCounter[1] = numLocks;
}
Esempio n. 8
0
/*
 * Report number of semaphores needed to support spinlocks.
 */
int
SpinlockSemas(void)
{
	int			nsemas;

	/*
	 * It would be cleaner to distribute this logic into the affected modules,
	 * similar to the way shmem space estimation is handled.
	 *
	 * For now, though, there are few enough users of spinlocks that we just
	 * keep the knowledge here.
	 */
	nsemas = NumLWLocks();		/* one for each lwlock */
	nsemas += NBuffers;			/* one for each buffer header */
	nsemas += max_wal_senders;	/* one for each wal sender process */
	nsemas += num_xloginsert_slots; /* one for each WAL insertion slot */
	nsemas += 30;				/* plus a bunch for other small-scale use */

	return nsemas;
}
Esempio n. 9
0
/*
 * Allocate shmem space for LWLocks and initialize the locks.
 */
void
CreateLWLocks(void)
{
	int			numLocks = NumLWLocks();
	Size		spaceLocks = LWLockShmemSize();
	LWLockPadded *lock;
#if LWLOCK_PART_SIZE > 1
	LWLockPart *part;
#endif
	int		   *LWLockCounter;
	char	   *ptr;
	int			id;

	/* Ensure that we didn't mess up the computation of LWLOCK_PART_LOCKS */
	Assert(sizeof(LWLockPart) == LWLOCK_PART_SIZE);

	/* Allocate space */
	ptr = (char *) ShmemAlloc(spaceLocks);

	/* Leave room for dynamic allocation counter */
	ptr += 2 * sizeof(int);

	/* Ensure desired alignment of LWLock array */
	ptr += LWLOCK_PADDED_SIZE - ((uintptr_t) ptr) % LWLOCK_PADDED_SIZE;

	LWLockArray = (LWLockPadded *) ptr;
	ptr += sizeof(LWLockPadded) * numLocks;
	
#if LWLOCK_LOCK_PARTS > 1
	/* Ensure desired alignment of LWLockPart array */
	ptr += LWLOCK_PART_SIZE - ((uintptr_t) ptr) % LWLOCK_PART_SIZE;
	
	LWLockPartArray = (LWLockPart *) ptr;
	ptr += sizeof(LWLockPart) * LWLOCK_PARTS(numLocks);
#endif

	/*
	 * Initialize all LWLocks to "unlocked" state
	 */

	for (id = 0, lock = LWLockArray; id < numLocks; id++, lock++)
	{
		SpinLockInit(&lock->lock.mutex);
		lock->lock.releaseOK = true;
		lock->lock.exclusive = 0;
#if LWLOCK_LOCK_PARTS == 1
		lock->lock.shared = 0;
#endif
		lock->lock.head = NULL;
		lock->lock.tail = NULL;
	}

#if LWLOCK_LOCK_PARTS > 1
	for(id = 0, part = LWLockPartArray; id < LWLOCK_PARTS(numLocks); id++, part++) {
#ifndef LWLOCK_PART_SHARED_OPS_ATOMIC
		SpinLockInit(&part->mutex);
#endif
		memset((char *) part->shared, 0, sizeof(int) * LWLOCK_PART_LOCKS);
	}
#endif

	/*
	 * Initialize the dynamic-allocation counter, which is stored just before
	 * the first LWLock.
	 */
	LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
	LWLockCounter[0] = (int) NumFixedLWLocks;
	LWLockCounter[1] = numLocks;
}