Esempio n. 1
0
/*
 * InitProcGlobal -
 *	  Initialize the global process table during postmaster or standalone
 *	  backend startup.
 *
 *	  We also create all the per-process semaphores we will need to support
 *	  the requested number of backends.  We used to allocate semaphores
 *	  only when backends were actually started up, but that is bad because
 *	  it lets Postgres fail under load --- a lot of Unix systems are
 *	  (mis)configured with small limits on the number of semaphores, and
 *	  running out when trying to start another backend is a common failure.
 *	  So, now we grab enough semaphores to support the desired max number
 *	  of backends immediately at initialization --- if the sysadmin has set
 *	  MaxConnections or autovacuum_max_workers higher than his kernel will
 *	  support, he'll find out sooner rather than later.
 *
 *	  Another reason for creating semaphores here is that the semaphore
 *	  implementation typically requires us to create semaphores in the
 *	  postmaster, not in backends.
 *
 * Note: this is NOT called by individual backends under a postmaster,
 * not even in the EXEC_BACKEND case.  The ProcGlobal and AuxiliaryProcs
 * pointers must be propagated specially for EXEC_BACKEND operation.
 */
void
InitProcGlobal(int mppLocalProcessCounter)
{
	PGPROC	   *procs;
	int			i;
	bool		found;

	/* Create the ProcGlobal shared structure */
	ProcGlobal = (PROC_HDR *)
		ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
	Assert(!found);

	/*
	 * Create the PGPROC structures for auxiliary (bgwriter) processes, too.
	 * These do not get linked into the freeProcs list.
	 */
	AuxiliaryProcs = (PGPROC *)
		ShmemInitStruct("AuxiliaryProcs", NUM_AUXILIARY_PROCS * sizeof(PGPROC),
						&found);
	Assert(!found);

	/*
	 * Initialize the data structures.
	 */
	ProcGlobal->freeProcs = INVALID_OFFSET;

	ProcGlobal->spins_per_delay = DEFAULT_SPINS_PER_DELAY;

	ProcGlobal->mppLocalProcessCounter = mppLocalProcessCounter;

	/*
	 * Pre-create the PGPROC structures and create a semaphore for each.
	 */
	procs = (PGPROC *) ShmemAlloc(MaxBackends * sizeof(PGPROC));
	if (!procs)
		ereport(FATAL,
				(errcode(ERRCODE_OUT_OF_MEMORY),
				 errmsg("out of shared memory")));
	MemSet(procs, 0, MaxBackends * sizeof(PGPROC));
	for (i = 0; i < MaxBackends; i++)
	{
		PGSemaphoreCreate(&(procs[i].sem));

		procs[i].links.next = ProcGlobal->freeProcs;
		ProcGlobal->freeProcs = MAKE_OFFSET(&procs[i]);
	}
	ProcGlobal->procs = procs;
	ProcGlobal->numFreeProcs = MaxBackends;

	MemSet(AuxiliaryProcs, 0, NUM_AUXILIARY_PROCS * sizeof(PGPROC));
	for (i = 0; i < NUM_AUXILIARY_PROCS; i++)
	{
		AuxiliaryProcs[i].pid = 0;		/* marks auxiliary proc as not in use */
		AuxiliaryProcs[i].postmasterResetRequired = true;
		PGSemaphoreCreate(&(AuxiliaryProcs[i].sem));
	}
}
Esempio n. 2
0
/*
 * Initialize semaphores.
 */
extern void
SpinlockSemaInit(PGSemaphore spinsemas)
{
	int			i;
	int			nsemas = SpinlockSemas();

	for (i = 0; i < nsemas; ++i)
		PGSemaphoreCreate(&spinsemas[i]);
	SpinlockSemaArray = spinsemas;
}
Esempio n. 3
0
void BgwPoolInit(BgwPool* pool, BgwPoolExecutor executor, char const* dbname, size_t queueSize, size_t nWorkers)
{
    pool->queue = (char*)ShmemAlloc(queueSize);
    pool->executor = executor;
    PGSemaphoreCreate(&pool->available);
    PGSemaphoreCreate(&pool->overflow);
    PGSemaphoreReset(&pool->available);
    PGSemaphoreReset(&pool->overflow);
    SpinLockInit(&pool->lock);
    pool->producerBlocked = false;
    pool->head = 0;
    pool->tail = 0;
    pool->size = queueSize;
    pool->active = 0;
    pool->pending = 0;
	pool->nWorkers = nWorkers;
	pool->lastPeakTime = 0;
    strcpy(pool->dbname, dbname);
}
Esempio n. 4
0
int
main(int argc, char **argv)
{
	MyStorage  *storage;
	int			cpid;

	printf("Creating shared memory ... ");
	fflush(stdout);

	storage = (MyStorage *) PGSharedMemoryCreate(8192, false, 5433);

	storage->flag = 1234;

	printf("OK\n");

	printf("Creating semaphores ... ");
	fflush(stdout);

	PGReserveSemaphores(2, 5433);

	PGSemaphoreCreate(&storage->sem);

	printf("OK\n");

	/* sema initial value is 1, so lock should work */

	printf("Testing Lock ... ");
	fflush(stdout);

	PGSemaphoreLock(&storage->sem, false);

	printf("OK\n");

	/* now sema value is 0, so trylock should fail */

	printf("Testing TryLock ... ");
	fflush(stdout);

	if (PGSemaphoreTryLock(&storage->sem))
		printf("unexpected result!\n");
	else
		printf("OK\n");

	/* unlocking twice and then locking twice should work... */

	printf("Testing Multiple Lock ... ");
	fflush(stdout);

	PGSemaphoreUnlock(&storage->sem);
	PGSemaphoreUnlock(&storage->sem);

	PGSemaphoreLock(&storage->sem, false);
	PGSemaphoreLock(&storage->sem, false);

	printf("OK\n");

	/* check Reset too */

	printf("Testing Reset ... ");
	fflush(stdout);

	PGSemaphoreUnlock(&storage->sem);

	PGSemaphoreReset(&storage->sem);

	if (PGSemaphoreTryLock(&storage->sem))
		printf("unexpected result!\n");
	else
		printf("OK\n");

	/* Fork a child process and see if it can communicate */

	printf("Forking child process ... ");
	fflush(stdout);

	cpid = fork();
	if (cpid == 0)
	{
		/* In child */
		on_exit_reset();
		sleep(3);
		storage->flag++;
		PGSemaphoreUnlock(&storage->sem);
		proc_exit(0);
	}
	if (cpid < 0)
	{
		/* Fork failed */
		printf("failed: %s\n", strerror(errno));
		proc_exit(1);
	}

	printf("forked child pid %d OK\n", cpid);

	if (storage->flag != 1234)
		printf("Wrong value found in shared memory!\n");

	printf("Waiting for child (should wait 3 sec here) ... ");
	fflush(stdout);

	PGSemaphoreLock(&storage->sem, false);

	printf("OK\n");

	if (storage->flag != 1235)
		printf("Wrong value found in shared memory!\n");

	/* Test shutdown */

	printf("Running shmem_exit processing ... ");
	fflush(stdout);

	shmem_exit(0);

	printf("OK\n");

	printf("Tests complete.\n");

	proc_exit(0);

	return 0;					/* not reached */
}
Esempio n. 5
0
/*
 * InitProcGlobal -
 *	  Initialize the global process table during postmaster or standalone
 *	  backend startup.
 *
 *	  We also create all the per-process semaphores we will need to support
 *	  the requested number of backends.  We used to allocate semaphores
 *	  only when backends were actually started up, but that is bad because
 *	  it lets Postgres fail under load --- a lot of Unix systems are
 *	  (mis)configured with small limits on the number of semaphores, and
 *	  running out when trying to start another backend is a common failure.
 *	  So, now we grab enough semaphores to support the desired max number
 *	  of backends immediately at initialization --- if the sysadmin has set
 *	  MaxConnections, max_worker_processes, or autovacuum_max_workers higher
 *	  than his kernel will support, he'll find out sooner rather than later.
 *
 *	  Another reason for creating semaphores here is that the semaphore
 *	  implementation typically requires us to create semaphores in the
 *	  postmaster, not in backends.
 *
 * Note: this is NOT called by individual backends under a postmaster,
 * not even in the EXEC_BACKEND case.  The ProcGlobal and AuxiliaryProcs
 * pointers must be propagated specially for EXEC_BACKEND operation.
 */
void
InitProcGlobal(void)
{
	PGPROC	   *procs;
	PGXACT	   *pgxacts;
	int			i,
				j;
	bool		found;
	uint32		TotalProcs = MaxBackends + NUM_AUXILIARY_PROCS + max_prepared_xacts;

	/* Create the ProcGlobal shared structure */
	ProcGlobal = (PROC_HDR *)
		ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
	Assert(!found);

	/*
	 * Initialize the data structures.
	 */
	ProcGlobal->spins_per_delay = DEFAULT_SPINS_PER_DELAY;
	ProcGlobal->freeProcs = NULL;
	ProcGlobal->autovacFreeProcs = NULL;
	ProcGlobal->bgworkerFreeProcs = NULL;
	ProcGlobal->startupProc = NULL;
	ProcGlobal->startupProcPid = 0;
	ProcGlobal->startupBufferPinWaitBufId = -1;
	ProcGlobal->walwriterLatch = NULL;
	ProcGlobal->checkpointerLatch = NULL;

	/*
	 * Create and initialize all the PGPROC structures we'll need.  There are
	 * five separate consumers: (1) normal backends, (2) autovacuum workers
	 * and the autovacuum launcher, (3) background workers, (4) auxiliary
	 * processes, and (5) prepared transactions.  Each PGPROC structure is
	 * dedicated to exactly one of these purposes, and they do not move
	 * between groups.
	 */
	procs = (PGPROC *) ShmemAlloc(TotalProcs * sizeof(PGPROC));
	ProcGlobal->allProcs = procs;
	/* XXX allProcCount isn't really all of them; it excludes prepared xacts */
	ProcGlobal->allProcCount = MaxBackends + NUM_AUXILIARY_PROCS;
	elog(DEBUG3, "InitProcGlobal of size %d :: TID : %d", TotalProcs, GetBackendThreadId());
	if (!procs)
		ereport(FATAL,
				(errcode(ERRCODE_OUT_OF_MEMORY),
				 errmsg("out of shared memory")));
	MemSet(procs, 0, TotalProcs * sizeof(PGPROC));

	/*
	 * Also allocate a separate array of PGXACT structures.  This is separate
	 * from the main PGPROC array so that the most heavily accessed data is
	 * stored contiguously in memory in as few cache lines as possible. This
	 * provides significant performance benefits, especially on a
	 * multiprocessor system.  There is one PGXACT structure for every PGPROC
	 * structure.
	 */
	pgxacts = (PGXACT *) ShmemAlloc(TotalProcs * sizeof(PGXACT));
	MemSet(pgxacts, 0, TotalProcs * sizeof(PGXACT));
	ProcGlobal->allPgXact = pgxacts;

	for (i = 0; i < TotalProcs; i++)
	{
		/* Common initialization for all PGPROCs, regardless of type. */

		/*
		 * Set up per-PGPROC semaphore, latch, and backendLock. Prepared xact
		 * dummy PGPROCs don't need these though - they're never associated
		 * with a real process
		 */
		if (i < MaxBackends + NUM_AUXILIARY_PROCS)
		{
			PGSemaphoreCreate(&(procs[i].sem));
			InitSharedLatch(&(procs[i].procLatch));
			procs[i].backendLock = LWLockAssign();
		}
		procs[i].pgprocno = i;

		/*
		 * Newly created PGPROCs for normal backends, autovacuum and bgworkers
		 * must be queued up on the appropriate free list.  Because there can
		 * only ever be a small, fixed number of auxiliary processes, no free
		 * list is used in that case; InitAuxiliaryProcess() instead uses a
		 * linear search.   PGPROCs for prepared transactions are added to a
		 * free list by TwoPhaseShmemInit().
		 */
		if (i < MaxConnections)
		{
			/* PGPROC for normal backend, add to freeProcs list */
			procs[i].links.next = (SHM_QUEUE *) ProcGlobal->freeProcs;
			ProcGlobal->freeProcs = &procs[i];
			//elog(DEBUG3, "freeProcs %d = %p", i, ProcGlobal->freeProcs);
			//Assert(ShmemAddrIsValid(ProcGlobal->freeProcs));
		}
		else if (i < MaxConnections + autovacuum_max_workers + 1)
		{
			/* PGPROC for AV launcher/worker, add to autovacFreeProcs list */
			procs[i].links.next = (SHM_QUEUE *) ProcGlobal->autovacFreeProcs;
			ProcGlobal->autovacFreeProcs = &procs[i];
		}
		else if (i < MaxBackends)
		{
			/* PGPROC for bgworker, add to bgworkerFreeProcs list */
			procs[i].links.next = (SHM_QUEUE *) ProcGlobal->bgworkerFreeProcs;
			ProcGlobal->bgworkerFreeProcs = &procs[i];
		}

		/* Initialize myProcLocks[] shared memory queues. */
		for (j = 0; j < NUM_LOCK_PARTITIONS; j++)
			SHMQueueInit(&(procs[i].myProcLocks[j]));
	}

	/*
	 * Save pointers to the blocks of PGPROC structures reserved for auxiliary
	 * processes and prepared transactions.
	 */
	AuxiliaryProcs = &procs[MaxBackends];
	PreparedXactProcs = &procs[MaxBackends + NUM_AUXILIARY_PROCS];

	/* Create ProcStructLock spinlock, too */
	ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
	SpinLockInit(ProcStructLock);
}
Esempio n. 6
0
void
s_init_lock_sema(volatile slock_t *lock)
{
	PGSemaphoreCreate((PGSemaphore) lock);
}