/* Allocate and initialize walsender-related shared memory */ void WalSndShmemInit(void) { bool found; int i; WalSndCtl = (WalSndCtlData *) ShmemInitStruct("Wal Sender Ctl", WalSndShmemSize(), &found); if (!found) { /* First time through, so initialize */ MemSet(WalSndCtl, 0, WalSndShmemSize()); SHMQueueInit(&(WalSndCtl->SyncRepQueue)); for (i = 0; i < max_wal_senders; i++) { WalSnd *walsnd = &WalSndCtl->walsnds[i]; SpinLockInit(&walsnd->mutex); InitSharedLatch(&walsnd->latch); } } }
/* Allocate and initialize walreceiver-related shared memory */ void WalRcvShmemInit(void) { bool found; WalRcv = (WalRcvData *) ShmemInitStruct("Wal Receiver Ctl", WalRcvShmemSize(), &found); if (!found) { /* First time through, so initialize */ MemSet(WalRcv, 0, WalRcvShmemSize()); WalRcv->walRcvState = WALRCV_STOPPED; SpinLockInit(&WalRcv->mutex); InitSharedLatch(&WalRcv->latch); } }
/* * InitProcGlobal - * Initialize the global process table during postmaster or standalone * backend startup. * * We also create all the per-process semaphores we will need to support * the requested number of backends. We used to allocate semaphores * only when backends were actually started up, but that is bad because * it lets Postgres fail under load --- a lot of Unix systems are * (mis)configured with small limits on the number of semaphores, and * running out when trying to start another backend is a common failure. * So, now we grab enough semaphores to support the desired max number * of backends immediately at initialization --- if the sysadmin has set * MaxConnections, max_worker_processes, or autovacuum_max_workers higher * than his kernel will support, he'll find out sooner rather than later. * * Another reason for creating semaphores here is that the semaphore * implementation typically requires us to create semaphores in the * postmaster, not in backends. * * Note: this is NOT called by individual backends under a postmaster, * not even in the EXEC_BACKEND case. The ProcGlobal and AuxiliaryProcs * pointers must be propagated specially for EXEC_BACKEND operation. */ void InitProcGlobal(void) { PGPROC *procs; PGXACT *pgxacts; int i, j; bool found; uint32 TotalProcs = MaxBackends + NUM_AUXILIARY_PROCS + max_prepared_xacts; /* Create the ProcGlobal shared structure */ ProcGlobal = (PROC_HDR *) ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found); Assert(!found); /* * Initialize the data structures. */ ProcGlobal->spins_per_delay = DEFAULT_SPINS_PER_DELAY; ProcGlobal->freeProcs = NULL; ProcGlobal->autovacFreeProcs = NULL; ProcGlobal->bgworkerFreeProcs = NULL; ProcGlobal->startupProc = NULL; ProcGlobal->startupProcPid = 0; ProcGlobal->startupBufferPinWaitBufId = -1; ProcGlobal->walwriterLatch = NULL; ProcGlobal->checkpointerLatch = NULL; /* * Create and initialize all the PGPROC structures we'll need. There are * five separate consumers: (1) normal backends, (2) autovacuum workers * and the autovacuum launcher, (3) background workers, (4) auxiliary * processes, and (5) prepared transactions. Each PGPROC structure is * dedicated to exactly one of these purposes, and they do not move * between groups. */ procs = (PGPROC *) ShmemAlloc(TotalProcs * sizeof(PGPROC)); ProcGlobal->allProcs = procs; /* XXX allProcCount isn't really all of them; it excludes prepared xacts */ ProcGlobal->allProcCount = MaxBackends + NUM_AUXILIARY_PROCS; elog(DEBUG3, "InitProcGlobal of size %d :: TID : %d", TotalProcs, GetBackendThreadId()); if (!procs) ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"))); MemSet(procs, 0, TotalProcs * sizeof(PGPROC)); /* * Also allocate a separate array of PGXACT structures. This is separate * from the main PGPROC array so that the most heavily accessed data is * stored contiguously in memory in as few cache lines as possible. This * provides significant performance benefits, especially on a * multiprocessor system. There is one PGXACT structure for every PGPROC * structure. */ pgxacts = (PGXACT *) ShmemAlloc(TotalProcs * sizeof(PGXACT)); MemSet(pgxacts, 0, TotalProcs * sizeof(PGXACT)); ProcGlobal->allPgXact = pgxacts; for (i = 0; i < TotalProcs; i++) { /* Common initialization for all PGPROCs, regardless of type. */ /* * Set up per-PGPROC semaphore, latch, and backendLock. Prepared xact * dummy PGPROCs don't need these though - they're never associated * with a real process */ if (i < MaxBackends + NUM_AUXILIARY_PROCS) { PGSemaphoreCreate(&(procs[i].sem)); InitSharedLatch(&(procs[i].procLatch)); procs[i].backendLock = LWLockAssign(); } procs[i].pgprocno = i; /* * Newly created PGPROCs for normal backends, autovacuum and bgworkers * must be queued up on the appropriate free list. Because there can * only ever be a small, fixed number of auxiliary processes, no free * list is used in that case; InitAuxiliaryProcess() instead uses a * linear search. PGPROCs for prepared transactions are added to a * free list by TwoPhaseShmemInit(). */ if (i < MaxConnections) { /* PGPROC for normal backend, add to freeProcs list */ procs[i].links.next = (SHM_QUEUE *) ProcGlobal->freeProcs; ProcGlobal->freeProcs = &procs[i]; //elog(DEBUG3, "freeProcs %d = %p", i, ProcGlobal->freeProcs); //Assert(ShmemAddrIsValid(ProcGlobal->freeProcs)); } else if (i < MaxConnections + autovacuum_max_workers + 1) { /* PGPROC for AV launcher/worker, add to autovacFreeProcs list */ procs[i].links.next = (SHM_QUEUE *) ProcGlobal->autovacFreeProcs; ProcGlobal->autovacFreeProcs = &procs[i]; } else if (i < MaxBackends) { /* PGPROC for bgworker, add to bgworkerFreeProcs list */ procs[i].links.next = (SHM_QUEUE *) ProcGlobal->bgworkerFreeProcs; ProcGlobal->bgworkerFreeProcs = &procs[i]; } /* Initialize myProcLocks[] shared memory queues. */ for (j = 0; j < NUM_LOCK_PARTITIONS; j++) SHMQueueInit(&(procs[i].myProcLocks[j])); } /* * Save pointers to the blocks of PGPROC structures reserved for auxiliary * processes and prepared transactions. */ AuxiliaryProcs = &procs[MaxBackends]; PreparedXactProcs = &procs[MaxBackends + NUM_AUXILIARY_PROCS]; /* Create ProcStructLock spinlock, too */ ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t)); SpinLockInit(ProcStructLock); }