Example #1
0
/*
 * Allocate and initialize walsender-related shared memory.
 */
void
ReplicationSlotsShmemInit(void)
{
	bool		found;

	if (max_replication_slots == 0)
		return;

	ReplicationSlotCtl = (ReplicationSlotCtlData *)
		ShmemInitStruct("ReplicationSlot Ctl", ReplicationSlotsShmemSize(),
						&found);

	LWLockRegisterTranche(LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS,
						  "replication_slot_io");

	if (!found)
	{
		int			i;

		/* First time through, so initialize */
		MemSet(ReplicationSlotCtl, 0, ReplicationSlotsShmemSize());

		for (i = 0; i < max_replication_slots; i++)
		{
			ReplicationSlot *slot = &ReplicationSlotCtl->replication_slots[i];

			/* everything else is zeroed by the memset above */
			SpinLockInit(&slot->mutex);
			LWLockInitialize(&slot->io_in_progress_lock, LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS);
			ConditionVariableInit(&slot->active_cv);
		}
	}
}
Example #2
0
/*
 * Allocate shmem space for the main LWLock array and initialize it.  We also
 * register the main tranch here.
 */
void
CreateLWLocks(void)
{
	if (!IsUnderPostmaster)
	{
		int			numLocks = NumLWLocks();
		Size		spaceLocks = LWLockShmemSize();
		LWLockPadded *lock;
		int		   *LWLockCounter;
		char	   *ptr;
		int			id;

		/* Allocate space */
		ptr = (char *) ShmemAlloc(spaceLocks);

		/* Leave room for dynamic allocation of locks and tranches */
		ptr += 3 * sizeof(int);

		/* Ensure desired alignment of LWLock array */
		ptr += LWLOCK_PADDED_SIZE - ((uintptr_t) ptr) % LWLOCK_PADDED_SIZE;

		MainLWLockArray = (LWLockPadded *) ptr;

		/* Initialize all LWLocks in main array */
		for (id = 0, lock = MainLWLockArray; id < numLocks; id++, lock++)
			LWLockInitialize(&lock->lock, 0);

		/*
		 * Initialize the dynamic-allocation counters, which are stored just
		 * before the first LWLock.  LWLockCounter[0] is the allocation
		 * counter for lwlocks, LWLockCounter[1] is the maximum number that
		 * can be allocated from the main array, and LWLockCounter[2] is the
		 * allocation counter for tranches.
		 */
		LWLockCounter = (int *) ((char *) MainLWLockArray - 3 * sizeof(int));
		LWLockCounter[0] = NUM_FIXED_LWLOCKS;
		LWLockCounter[1] = numLocks;
		LWLockCounter[2] = 1;	/* 0 is the main array */
	}

	if (LWLockTrancheArray == NULL)
	{
		LWLockTranchesAllocated = 16;
		LWLockTrancheArray = (LWLockTranche **)
			MemoryContextAlloc(TopMemoryContext,
						  LWLockTranchesAllocated * sizeof(LWLockTranche *));
	}

	MainLWLockTranche.name = "main";
	MainLWLockTranche.array_base = MainLWLockArray;
	MainLWLockTranche.array_stride = sizeof(LWLockPadded);
	LWLockRegisterTranche(0, &MainLWLockTranche);
}
Example #3
0
/*
 * IPCMessageBrokerShmemInit
 */
void
IPCMessageBrokerShmemInit(void)
{
	bool found;
	Size size = IPCMessageBrokerShmemSize();

	LWLockAcquire(IPCMessageBrokerIndexLock, LW_EXCLUSIVE);

	broker_meta = ShmemInitStruct("BrokerMeta", size, &found);

	if (!found)
	{
		HASHCTL ctl;
		int i;

		MemSet(broker_meta, 0, size);

		pg_atomic_init_flag(&broker_meta->waiting);

		MemSet(&ctl, 0, sizeof(HASHCTL));

		ctl.keysize = sizeof(Oid);
		ctl.entrysize = broker_db_meta_size;
		ctl.hash = oid_hash;

		broker_meta->db_meta_hash = ShmemInitHash("BrokerDBMetaHash", 4, 16, &ctl, HASH_ELEM | HASH_FUNCTION);

		/* Initialize LWLocks that we'll use for all IPC queues */
		broker_meta->tranche_id = LWLockNewTrancheId();

		broker_meta->tranche.name = "BrokerMetaLWLocks";
		broker_meta->tranche.array_base = (void *) broker_meta->locks;
		broker_meta->tranche.array_stride = sizeof(lw_lock_slot);

		for (i = 0; i < max_worker_processes; i++)
		{
			lw_lock_slot *slot = &broker_meta->locks[i];
			LWLockInitialize(&slot->lock, broker_meta->tranche_id);
			slot->dbid = InvalidOid;
		}
	}

	LWLockRelease(IPCMessageBrokerIndexLock);

	LWLockRegisterTranche(broker_meta->tranche_id, &broker_meta->tranche);
}
Example #4
0
/*
 * Allocate and initialize walsender-related shared memory.
 */
void
ReplicationSlotsShmemInit(void)
{
	bool		found;

	if (max_replication_slots == 0)
		return;

	ReplicationSlotCtl = (ReplicationSlotCtlData *)
		ShmemInitStruct("ReplicationSlot Ctl", ReplicationSlotsShmemSize(),
						&found);

	ReplSlotIOLWLockTranche.name = "Replication Slot IO Locks";
	ReplSlotIOLWLockTranche.array_base =
		((char *) ReplicationSlotCtl) + offsetof(ReplicationSlotCtlData, replication_slots) +offsetof(ReplicationSlot, io_in_progress_lock);
	ReplSlotIOLWLockTranche.array_stride = sizeof(ReplicationSlot);
	LWLockRegisterTranche(LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS,
						  &ReplSlotIOLWLockTranche);

	if (!found)
	{
		int			i;

		/* First time through, so initialize */
		MemSet(ReplicationSlotCtl, 0, ReplicationSlotsShmemSize());

		for (i = 0; i < max_replication_slots; i++)
		{
			ReplicationSlot *slot = &ReplicationSlotCtl->replication_slots[i];

			/* everything else is zeroed by the memset above */
			SpinLockInit(&slot->mutex);
			LWLockInitialize(&slot->io_in_progress_lock, LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS);
		}
	}
}
Example #5
0
/*
 * Initialize shared buffer pool
 *
 * This is called once during shared-memory initialization (either in the
 * postmaster, or in a standalone backend).
 */
void
InitBufferPool(void)
{
	bool		foundBufs,
				foundDescs,
				foundIOLocks,
				foundBufCkpt;

	/* Align descriptors to a cacheline boundary. */
	BufferDescriptors = (BufferDescPadded *)
		ShmemInitStruct("Buffer Descriptors",
						NBuffers * sizeof(BufferDescPadded),
						&foundDescs);

	BufferBlocks = (char *)
		ShmemInitStruct("Buffer Blocks",
						NBuffers * (Size) BLCKSZ, &foundBufs);

	/* Align lwlocks to cacheline boundary */
	BufferIOLWLockArray = (LWLockMinimallyPadded *)
		ShmemInitStruct("Buffer IO Locks",
						NBuffers * (Size) sizeof(LWLockMinimallyPadded),
						&foundIOLocks);

	LWLockRegisterTranche(LWTRANCHE_BUFFER_IO_IN_PROGRESS, "buffer_io");
	LWLockRegisterTranche(LWTRANCHE_BUFFER_CONTENT, "buffer_content");

	/*
	 * The array used to sort to-be-checkpointed buffer ids is located in
	 * shared memory, to avoid having to allocate significant amounts of
	 * memory at runtime. As that'd be in the middle of a checkpoint, or when
	 * the checkpointer is restarted, memory allocation failures would be
	 * painful.
	 */
	CkptBufferIds = (CkptSortItem *)
		ShmemInitStruct("Checkpoint BufferIds",
						NBuffers * sizeof(CkptSortItem), &foundBufCkpt);

	if (foundDescs || foundBufs || foundIOLocks || foundBufCkpt)
	{
		/* should find all of these, or none of them */
		Assert(foundDescs && foundBufs && foundIOLocks && foundBufCkpt);
		/* note: this path is only taken in EXEC_BACKEND case */
	}
	else
	{
		int			i;

		/*
		 * Initialize all the buffer headers.
		 */
		for (i = 0; i < NBuffers; i++)
		{
			BufferDesc *buf = GetBufferDescriptor(i);

			CLEAR_BUFFERTAG(buf->tag);

			pg_atomic_init_u32(&buf->state, 0);
			buf->wait_backend_pid = 0;

			buf->buf_id = i;

			/*
			 * Initially link all the buffers together as unused. Subsequent
			 * management of this list is done by freelist.c.
			 */
			buf->freeNext = i + 1;

			LWLockInitialize(BufferDescriptorGetContentLock(buf),
							 LWTRANCHE_BUFFER_CONTENT);

			LWLockInitialize(BufferDescriptorGetIOLock(buf),
							 LWTRANCHE_BUFFER_IO_IN_PROGRESS);
		}

		/* Correct last entry of linked list */
		GetBufferDescriptor(NBuffers - 1)->freeNext = FREENEXT_END_OF_LIST;
	}

	/* Init other shared buffer-management stuff */
	StrategyInitialize(!foundDescs);

	/* Initialize per-backend file flush context */
	WritebackContextInit(&BackendWritebackContext,
						 &backend_flush_after);
}
Example #6
0
/*
 * Initialize shared buffer pool
 *
 * This is called once during shared-memory initialization (either in the
 * postmaster, or in a standalone backend).
 */
void
InitBufferPool(void)
{
	bool		foundBufs,
				foundDescs,
				foundIOLocks;

	/* Align descriptors to a cacheline boundary. */
	BufferDescriptors = (BufferDescPadded *)
		CACHELINEALIGN(
					   ShmemInitStruct("Buffer Descriptors",
									   NBuffers * sizeof(BufferDescPadded)
									   + PG_CACHE_LINE_SIZE,
									   &foundDescs));

	BufferBlocks = (char *)
		ShmemInitStruct("Buffer Blocks",
						NBuffers * (Size) BLCKSZ, &foundBufs);

	/* Align lwlocks to cacheline boundary */
	BufferIOLWLockArray = (LWLockMinimallyPadded *)
		CACHELINEALIGN(ShmemInitStruct("Buffer IO Locks",
							  NBuffers * (Size) sizeof(LWLockMinimallyPadded)
									   + PG_CACHE_LINE_SIZE,
									   &foundIOLocks));

	BufferIOLWLockTranche.name = "buffer_io";
	BufferIOLWLockTranche.array_base = BufferIOLWLockArray;
	BufferIOLWLockTranche.array_stride = sizeof(LWLockMinimallyPadded);
	LWLockRegisterTranche(LWTRANCHE_BUFFER_IO_IN_PROGRESS,
						  &BufferIOLWLockTranche);

	BufferContentLWLockTranche.name = "buffer_content";
	BufferContentLWLockTranche.array_base =
		((char *) BufferDescriptors) + offsetof(BufferDesc, content_lock);
	BufferContentLWLockTranche.array_stride = sizeof(BufferDescPadded);
	LWLockRegisterTranche(LWTRANCHE_BUFFER_CONTENT,
						  &BufferContentLWLockTranche);

	if (foundDescs || foundBufs || foundIOLocks)
	{
		/* should find all of these, or none of them */
		Assert(foundDescs && foundBufs && foundIOLocks);
		/* note: this path is only taken in EXEC_BACKEND case */
	}
	else
	{
		int			i;

		/*
		 * Initialize all the buffer headers.
		 */
		for (i = 0; i < NBuffers; i++)
		{
			BufferDesc *buf = GetBufferDescriptor(i);

			CLEAR_BUFFERTAG(buf->tag);
			buf->flags = 0;
			buf->usage_count = 0;
			buf->refcount = 0;
			buf->wait_backend_pid = 0;

			SpinLockInit(&buf->buf_hdr_lock);

			buf->buf_id = i;

			/*
			 * Initially link all the buffers together as unused. Subsequent
			 * management of this list is done by freelist.c.
			 */
			buf->freeNext = i + 1;

			LWLockInitialize(BufferDescriptorGetContentLock(buf),
							 LWTRANCHE_BUFFER_CONTENT);

			LWLockInitialize(BufferDescriptorGetIOLock(buf),
							 LWTRANCHE_BUFFER_IO_IN_PROGRESS);
		}

		/* Correct last entry of linked list */
		GetBufferDescriptor(NBuffers - 1)->freeNext = FREENEXT_END_OF_LIST;
	}

	/* Init other shared buffer-management stuff */
	StrategyInitialize(!foundDescs);
}
Example #7
0
void
SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns,
			  LWLock *ctllock, const char *subdir, int tranche_id)
{
	SlruShared	shared;
	bool		found;

	shared = (SlruShared) ShmemInitStruct(name,
										  SimpleLruShmemSize(nslots, nlsns),
										  &found);

	if (!IsUnderPostmaster)
	{
		/* Initialize locks and shared memory area */
		char	   *ptr;
		Size		offset;
		int			slotno;

		Assert(!found);

		memset(shared, 0, sizeof(SlruSharedData));

		shared->ControlLock = ctllock;

		shared->num_slots = nslots;
		shared->lsn_groups_per_page = nlsns;

		shared->cur_lru_count = 0;

		/* shared->latest_page_number will be set later */

		ptr = (char *) shared;
		offset = MAXALIGN(sizeof(SlruSharedData));
		shared->page_buffer = (char **) (ptr + offset);
		offset += MAXALIGN(nslots * sizeof(char *));
		shared->page_status = (SlruPageStatus *) (ptr + offset);
		offset += MAXALIGN(nslots * sizeof(SlruPageStatus));
		shared->page_dirty = (bool *) (ptr + offset);
		offset += MAXALIGN(nslots * sizeof(bool));
		shared->page_number = (int *) (ptr + offset);
		offset += MAXALIGN(nslots * sizeof(int));
		shared->page_lru_count = (int *) (ptr + offset);
		offset += MAXALIGN(nslots * sizeof(int));

		if (nlsns > 0)
		{
			shared->group_lsn = (XLogRecPtr *) (ptr + offset);
			offset += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr));
		}

		/* Initialize LWLocks */
		shared->buffer_locks = (LWLockPadded *) ShmemAlloc(sizeof(LWLockPadded) * nslots);

		Assert(strlen(name) + 1 < SLRU_MAX_NAME_LENGTH);
		strlcpy(shared->lwlock_tranche_name, name, SLRU_MAX_NAME_LENGTH);
		shared->lwlock_tranche_id = tranche_id;

		ptr += BUFFERALIGN(offset);
		for (slotno = 0; slotno < nslots; slotno++)
		{
			LWLockInitialize(&shared->buffer_locks[slotno].lock,
							 shared->lwlock_tranche_id);

			shared->page_buffer[slotno] = ptr;
			shared->page_status[slotno] = SLRU_PAGE_EMPTY;
			shared->page_dirty[slotno] = false;
			shared->page_lru_count[slotno] = 0;
			ptr += BLCKSZ;
		}
	}
	else
		Assert(found);

	/* Register SLRU tranche in the main tranches array */
	LWLockRegisterTranche(shared->lwlock_tranche_id,
						  shared->lwlock_tranche_name);

	/*
	 * Initialize the unshared control struct, including directory path. We
	 * assume caller set PagePrecedes.
	 */
	ctl->shared = shared;
	ctl->do_fsync = true;		/* default behavior */
	StrNCpy(ctl->Dir, subdir, sizeof(ctl->Dir));
}
Example #8
0
/*
 * BackendManagementShmemInit is the callback that is to be called on shared
 * memory startup hook. The function sets up the necessary shared memory
 * segment for the backend manager.
 */
static void
BackendManagementShmemInit(void)
{
	bool alreadyInitialized = false;

	/* we may update the shmem, acquire lock exclusively */
	LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE);

	backendManagementShmemData =
		(BackendManagementShmemData *) ShmemInitStruct(
			"Backend Management Shmem",
			BackendManagementShmemSize(),
			&alreadyInitialized);

	if (!alreadyInitialized)
	{
		int backendIndex = 0;
		char *trancheName = "Backend Management Tranche";

#if (PG_VERSION_NUM >= 100000)
		NamedLWLockTranche *namedLockTranche =
			&backendManagementShmemData->namedLockTranche;

#else
		LWLockTranche *lockTranche = &backendManagementShmemData->lockTranche;
#endif

		/* start by zeroing out all the memory */
		memset(backendManagementShmemData, 0,
			   BackendManagementShmemSize());

#if (PG_VERSION_NUM >= 100000)
		namedLockTranche->trancheId = LWLockNewTrancheId();

		LWLockRegisterTranche(namedLockTranche->trancheId, trancheName);
		LWLockInitialize(&backendManagementShmemData->lock,
						 namedLockTranche->trancheId);
#else
		backendManagementShmemData->trancheId = LWLockNewTrancheId();

		/* we only need a single lock */
		lockTranche->array_base = &backendManagementShmemData->lock;
		lockTranche->array_stride = sizeof(LWLock);
		lockTranche->name = trancheName;

		LWLockRegisterTranche(backendManagementShmemData->trancheId, lockTranche);
		LWLockInitialize(&backendManagementShmemData->lock,
						 backendManagementShmemData->trancheId);
#endif

		/* start the distributed transaction ids from 1 */
		pg_atomic_init_u64(&backendManagementShmemData->nextTransactionNumber, 1);

		/*
		 * We need to init per backend's spinlock before any backend
		 * starts its execution.
		 */
		for (backendIndex = 0; backendIndex < MaxBackends; ++backendIndex)
		{
			SpinLockInit(&backendManagementShmemData->backends[backendIndex].mutex);
		}
	}

	LWLockRelease(AddinShmemInitLock);

	if (prev_shmem_startup_hook != NULL)
	{
		prev_shmem_startup_hook();
	}
}
Example #9
0
File: pipe.c Project: orafce/orafce
bool
ora_lock_shmem(size_t size, int max_pipes, int max_events, int max_locks, bool reset)
{
	int i;
	bool found;

	sh_memory *sh_mem;

	if (pipes == NULL)
	{
		sh_mem = ShmemInitStruct("dbms_pipe", size, &found);
		if (sh_mem == NULL)
			ereport(FATAL,
					(errcode(ERRCODE_OUT_OF_MEMORY),
					 errmsg("out of memory"),
					 errdetail("Failed while allocation block %lu bytes in shared memory.", (unsigned long) size)));

		if (!found)
		{

#if PG_VERSION_NUM >= 90600

			sh_mem->tranche_id = LWLockNewTrancheId();
			LWLockInitialize(&sh_mem->shmem_lock, sh_mem->tranche_id);

			{

#if PG_VERSION_NUM >= 100000

				LWLockRegisterTranche(sh_mem->tranche_id, "orafce");

#else

				static LWLockTranche tranche;

				tranche.name = "orafce";
				tranche.array_base = &sh_mem->shmem_lock;
				tranche.array_stride = sizeof(LWLock);
				LWLockRegisterTranche(sh_mem->tranche_id, &tranche);

#endif

				shmem_lockid = &sh_mem->shmem_lock;
			}

#else

			shmem_lockid = sh_mem->shmem_lockid = LWLockAssign();

#endif

			LWLockAcquire(shmem_lockid, LW_EXCLUSIVE);

			sh_mem->size = size - sh_memory_size;
			ora_sinit(sh_mem->data, size, true);
			pipes = sh_mem->pipes = ora_salloc(max_pipes*sizeof(pipe));
			sid = sh_mem->sid = 1;
			for (i = 0; i < max_pipes; i++)
				pipes[i].is_valid = false;

			events = sh_mem->events = ora_salloc(max_events*sizeof(alert_event));
			locks = sh_mem->locks = ora_salloc(max_locks*sizeof(alert_lock));

			for (i = 0; i < max_events; i++)
			{
				events[i].event_name = NULL;
				events[i].max_receivers = 0;
				events[i].receivers = NULL;
				events[i].messages = NULL;
			}
			for (i = 0; i < max_locks; i++)
			{
				locks[i].sid = -1;
				locks[i].echo = NULL;
			}

		}
		else if (pipes == NULL)
		{

#if PG_VERSION_NUM >= 90600


#if PG_VERSION_NUM >= 100000

			LWLockRegisterTranche(sh_mem->tranche_id, "orafce");

#else

			static LWLockTranche tranche;

			tranche.name = "orafce";
			tranche.array_base = &sh_mem->shmem_lock;
			tranche.array_stride = sizeof(LWLock);
			LWLockRegisterTranche(sh_mem->tranche_id, &tranche);

#endif

			shmem_lockid = &sh_mem->shmem_lock;

#else

			shmem_lockid = sh_mem->shmem_lockid;

#endif

			pipes = sh_mem->pipes;
			LWLockAcquire(shmem_lockid, LW_EXCLUSIVE);

			ora_sinit(sh_mem->data, sh_mem->size, reset);
			sid = ++(sh_mem->sid);
			events = sh_mem->events;
			locks = sh_mem->locks;
		}
	}
	else
	{
		LWLockAcquire(shmem_lockid, LW_EXCLUSIVE);
	}

	return pipes != NULL;
}