Example #1
0
/*
 * Allocate shmem space for LWLocks and initialize the locks.
 */
void
CreateLWLocks(void)
{
	int			numLocks = NumLWLocks();
	uint32		spaceLocks = LWLockShmemSize();
	LWLock	   *lock;
	int			id;

	/* Allocate space */
	LWLockArray = (LWLock *) ShmemAlloc(spaceLocks);

	/*
	 * Initialize all LWLocks to "unlocked" state
	 */
	for (id = 0, lock = LWLockArray; id < numLocks; id++, lock++)
	{
		SpinLockInit(&lock->mutex);
		lock->releaseOK = true;
		lock->exclusive = 0;
		lock->shared = 0;
		lock->head = NULL;
		lock->tail = NULL;
	}

	/*
	 * Initialize the dynamic-allocation counter at the end of the array
	 */
	LWLockCounter = (int *) lock;
	LWLockCounter[0] = (int) NumFixedLWLocks;
	LWLockCounter[1] = numLocks;
}
Example #2
0
/*
 * ApplyLauncherShmemInit
 *		Allocate and initialize replication launcher shared memory
 */
void
ApplyLauncherShmemInit(void)
{
	bool		found;

	LogicalRepCtx = (LogicalRepCtxStruct *)
		ShmemInitStruct("Logical Replication Launcher Data",
						ApplyLauncherShmemSize(),
						&found);

	if (!found)
	{
		int			slot;

		memset(LogicalRepCtx, 0, ApplyLauncherShmemSize());

		/* Initialize memory and spin locks for each worker slot. */
		for (slot = 0; slot < max_logical_replication_workers; slot++)
		{
			LogicalRepWorker *worker = &LogicalRepCtx->workers[slot];

			memset(worker, 0, sizeof(LogicalRepWorker));
			SpinLockInit(&worker->relmutex);
		}
	}
}
Example #3
0
/*
 * Allocate and initialize walsender-related shared memory.
 */
void
ReplicationSlotsShmemInit(void)
{
	bool		found;

	if (max_replication_slots == 0)
		return;

	ReplicationSlotCtl = (ReplicationSlotCtlData *)
		ShmemInitStruct("ReplicationSlot Ctl", ReplicationSlotsShmemSize(),
						&found);

	LWLockRegisterTranche(LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS,
						  "replication_slot_io");

	if (!found)
	{
		int			i;

		/* First time through, so initialize */
		MemSet(ReplicationSlotCtl, 0, ReplicationSlotsShmemSize());

		for (i = 0; i < max_replication_slots; i++)
		{
			ReplicationSlot *slot = &ReplicationSlotCtl->replication_slots[i];

			/* everything else is zeroed by the memset above */
			SpinLockInit(&slot->mutex);
			LWLockInitialize(&slot->io_in_progress_lock, LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS);
			ConditionVariableInit(&slot->active_cv);
		}
	}
}
Example #4
0
/*
 * Allocate and initialize walsender-related shared memory.
 */
void
ReplicationSlotsShmemInit(void)
{
	bool		found;

	if (max_replication_slots == 0)
		return;

	ReplicationSlotCtl = (ReplicationSlotCtlData *)
		ShmemInitStruct("ReplicationSlot Ctl", ReplicationSlotsShmemSize(),
						&found);

	if (!found)
	{
		int			i;

		/* First time through, so initialize */
		MemSet(ReplicationSlotCtl, 0, ReplicationSlotsShmemSize());

		for (i = 0; i < max_replication_slots; i++)
		{
			ReplicationSlot *slot = &ReplicationSlotCtl->replication_slots[i];

			/* everything else is zeroed by the memset above */
			SpinLockInit(&slot->mutex);
			slot->io_in_progress_lock = LWLockAssign();
		}
	}
}
Example #5
0
/*
 * LatchShmemInit
 *		Allocate and initialize shared memory needed for latches
 */
void
LatchShmemInit(void)
{
	Size		size = LatchShmemSize();
	bool		found;

	sharedHandles = ShmemInitStruct("SharedEventHandles", size, &found);

	/* If we're first, initialize the struct and allocate handles */
	if (!found)
	{
		int i;
		SECURITY_ATTRIBUTES sa;

		/*
		 * Set up security attributes to specify that the events are
		 * inherited.
		 */
		ZeroMemory(&sa, sizeof(sa));
		sa.nLength = sizeof(sa);
		sa.bInheritHandle = TRUE;

		SpinLockInit(&sharedHandles->mutex);
		sharedHandles->maxhandles = NumSharedLatches();
		sharedHandles->nfreehandles = sharedHandles->maxhandles;
		for (i = 0; i < sharedHandles->maxhandles; i++)
		{
			sharedHandles->handles[i] = CreateEvent(&sa, TRUE, FALSE, NULL);
			if (sharedHandles->handles[i] == NULL)
				elog(ERROR, "CreateEvent failed: error code %d", (int) GetLastError());
		}
	}
}
Example #6
0
/*
 * Place platform-specific startup hacks here.  This is the right
 * place to put code that must be executed early in the launch of any new
 * server process.  Note that this code will NOT be executed when a backend
 * or sub-bootstrap process is forked, unless we are in a fork/exec
 * environment (ie EXEC_BACKEND is defined).
 *
 * XXX The need for code here is proof that the platform in question
 * is too brain-dead to provide a standard C execution environment
 * without help.  Avoid adding more here, if you can.
 */
static void
startup_hacks(const char *progname)
{
	/*
	 * Windows-specific execution environment hacking.
	 */
#ifdef WIN32
	{
		WSADATA		wsaData;
		int			err;

		/* Make output streams unbuffered by default */
		setvbuf(stdout, NULL, _IONBF, 0);
		setvbuf(stderr, NULL, _IONBF, 0);

		/* Prepare Winsock */
		err = WSAStartup(MAKEWORD(2, 2), &wsaData);
		if (err != 0)
		{
			write_stderr("%s: WSAStartup failed: %d\n",
						 progname, err);
			exit(1);
		}

		/* In case of general protection fault, don't show GUI popup box */
		SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX);
	}
#endif   /* WIN32 */

	/*
	 * Initialize dummy_spinlock, in case we are on a platform where we have
	 * to use the fallback implementation of pg_memory_barrier().
	 */
	SpinLockInit(&dummy_spinlock);
}
Example #7
0
/*
 * CreateSharedInvalidationState
 *		Create and initialize the SI message buffer
 */
void
CreateSharedInvalidationState(void)
{
	int			i;
	bool		found;

	/* Allocate space in shared memory */
	shmInvalBuffer = (SISeg *)
		ShmemInitStruct("shmInvalBuffer", SInvalShmemSize(), &found);
	if (found)
		return;

	/* Clear message counters, save size of procState array, init spinlock */
	shmInvalBuffer->minMsgNum = 0;
	shmInvalBuffer->maxMsgNum = 0;
	shmInvalBuffer->nextThreshold = CLEANUP_MIN;
	shmInvalBuffer->lastBackend = 0;
	shmInvalBuffer->maxBackends = MaxBackends;
	SpinLockInit(&shmInvalBuffer->msgnumLock);

	/* The buffer[] array is initially all unused, so we need not fill it */

	/* Mark all backends inactive, and initialize nextLXID */
	for (i = 0; i < shmInvalBuffer->maxBackends; i++)
	{
		shmInvalBuffer->procState[i].procPid = 0;		/* inactive */
		shmInvalBuffer->procState[i].proc = NULL;
		shmInvalBuffer->procState[i].nextMsgNum = 0;	/* meaningless */
		shmInvalBuffer->procState[i].resetState = false;
		shmInvalBuffer->procState[i].signaled = false;
		shmInvalBuffer->procState[i].hasMessages = false;
		shmInvalBuffer->procState[i].nextLXID = InvalidLocalTransactionId;
	}
}
Example #8
0
/*
 * Initialize a space for temporary files that can be opened for read-only
 * access by other backends.  Other backends must attach to it before
 * accessing it.  Associate this SharedFileSet with 'seg'.  Any contained
 * files will be deleted when the last backend detaches.
 *
 * Files will be distributed over the tablespaces configured in
 * temp_tablespaces.
 *
 * Under the covers the set is one or more directories which will eventually
 * be deleted when there are no backends attached.
 */
void
SharedFileSetInit(SharedFileSet *fileset, dsm_segment *seg)
{
	static uint32 counter = 0;

	SpinLockInit(&fileset->mutex);
	fileset->refcnt = 1;
	fileset->creator_pid = MyProcPid;
	fileset->number = counter;
	counter = (counter + 1) % INT_MAX;

	/* Capture the tablespace OIDs so that all backends agree on them. */
	PrepareTempTablespaces();
	fileset->ntablespaces =
		GetTempTablespaces(&fileset->tablespaces[0],
						   lengthof(fileset->tablespaces));
	if (fileset->ntablespaces == 0)
	{
		fileset->tablespaces[0] = DEFAULTTABLESPACE_OID;
		fileset->ntablespaces = 1;
	}

	/* Register our cleanup callback. */
	on_dsm_detach(seg, SharedFileSetOnDetach, PointerGetDatum(fileset));
}
Example #9
0
/*
 * Allocate a new hashtable entry.
 * caller must hold an exclusive lock on pgss->lock
 *
 * Note: despite needing exclusive lock, it's not an error for the target
 * entry to already exist.	This is because pgss_store releases and
 * reacquires lock after failing to find a match; so someone else could
 * have made the entry while we waited to get exclusive lock.
 */
static pgssEntry *
entry_alloc(pgssHashKey *key)
{
	pgssEntry  *entry;
	bool		found;

	/* Caller must have clipped query properly */
	Assert(key->query_len < pgss->query_size);

	/* Make space if needed */
	while (hash_get_num_entries(pgss_hash) >= pgss_max)
		entry_dealloc();

	/* Find or create an entry with desired hash code */
	entry = (pgssEntry *) hash_search(pgss_hash, key, HASH_ENTER, &found);

	if (!found)
	{
		/* New entry, initialize it */

		/* dynahash tried to copy the key for us, but must fix query_ptr */
		entry->key.query_ptr = entry->query;
		/* reset the statistics */
		memset(&entry->counters, 0, sizeof(Counters));
		entry->counters.usage = USAGE_INIT;
		/* re-initialize the mutex each time ... we assume no one using it */
		SpinLockInit(&entry->mutex);
		/* ... and don't forget the query text */
		memcpy(entry->query, key->query_ptr, key->query_len);
		entry->query[key->query_len] = '\0';
	}

	return entry;
}
Example #10
0
StreamBatch *StreamBatchCreate(Bitmapset *readers, int num_tuples)
{
	char *ptr = ShmemDynAlloc0(sizeof(StreamBatch) + BITMAPSET_SIZE(readers->nwords) + (bms_num_members(readers) * sizeof(int)));
	StreamBatch *batch = (StreamBatch *) ptr;
	int cq_id;
	int i = 0;

	batch->id = rand() ^ (int) MyProcPid;
	batch->num_tups = num_tuples;
	batch->num_wtups = bms_num_members(readers) * num_tuples;
	SpinLockInit(&batch->mutex);

	ptr += sizeof(StreamBatch);
	batch->readers = (Bitmapset *) ptr;
	memcpy(batch->readers, readers, BITMAPSET_SIZE(readers->nwords));


	ptr += BITMAPSET_SIZE(readers->nwords);
	batch->proc_runs = (int *) ptr;

	readers = bms_copy(readers);
	while ((cq_id = bms_first_member(readers)) != -1)
	{
		CQProcEntry *pentry = GetCQProcEntry(cq_id);
		batch->proc_runs[i] = Max(pentry->proc_runs, pentry->pg_size);
		i++;
	}
	pfree(readers);

	return batch;
}
Example #11
0
/*
 *	InitShmemAllocation() --- set up shared-memory space allocation.
 *
 * This should be called only in the postmaster or a standalone backend.
 */
void
InitShmemAllocation(void)
{
	PGShmemHeader *shmhdr = ShmemSegHdr;

	Assert(shmhdr != NULL);

	/*
	 * Initialize the spinlock used by ShmemAlloc.	We have to do the space
	 * allocation the hard way, since obviously ShmemAlloc can't be called
	 * yet.
	 */
	ShmemLock = (slock_t *) (((char *) shmhdr) + shmhdr->freeoffset);
	shmhdr->freeoffset += MAXALIGN(sizeof(slock_t));
	Assert(shmhdr->freeoffset <= shmhdr->totalsize);

	SpinLockInit(ShmemLock);

	/* ShmemIndex can't be set up yet (need LWLocks first) */
	shmhdr->index = NULL;
	ShmemIndex = (HTAB *) NULL;

	/*
	 * Initialize ShmemVariableCache for transaction manager. (This doesn't
	 * really belong here, but not worth moving.)
	 */
	ShmemVariableCache = (VariableCache)
		ShmemAlloc(sizeof(*ShmemVariableCache));
	memset(ShmemVariableCache, 0, sizeof(*ShmemVariableCache));
}
Example #12
0
/* Allocate and initialize walsender-related shared memory */
void
WalSndShmemInit(void)
{
	bool		found;
	int			i;

	WalSndCtl = (WalSndCtlData *)
		ShmemInitStruct("Wal Sender Ctl", WalSndShmemSize(), &found);

	if (!found)
	{
		/* First time through, so initialize */
		MemSet(WalSndCtl, 0, WalSndShmemSize());

		SHMQueueInit(&(WalSndCtl->SyncRepQueue));

		for (i = 0; i < max_wal_senders; i++)
		{
			WalSnd	   *walsnd = &WalSndCtl->walsnds[i];

			SpinLockInit(&walsnd->mutex);
			InitSharedLatch(&walsnd->latch);
		}
	}
}
Example #13
0
FSTATUS
MulticastInitialize(void)
{
	FSTATUS Status;
	
	_DBG_ENTER_LVL(_DBG_LVL_FUNC_TRACE, InitializeMulticast);

	McSdHandle = NULL;

	QListInit(&MasterMcGroupList);
	QListInit(&MasterMcClientList);
	SpinLockInitState(&MulticastLock);
	SpinLockInit(&MulticastLock);
	
	TimerInitState(&MaintenanceTimer);
	TimerInit(&MaintenanceTimer, McMaintenance, NULL);
	
	MaintenanceTimerActivated = FALSE;
	
	Status = iba_sd_register(&McSdHandle, NULL);
	if (Status != FSUCCESS)
	{
		McSdHandle = NULL;
		_DBG_ERROR(("Multicast Module Not Able To Register With Subnet Driver "
		            "Status = %d.\n", Status));
	}
	
	_DBG_LEAVE_LVL( _DBG_LVL_FUNC_TRACE );
	
	return Status;
}
Example #14
0
/* ----------------------------------------------------------------
 *		ExecBitmapHeapInitializeDSM
 *
 *		Set up a parallel bitmap heap scan descriptor.
 * ----------------------------------------------------------------
 */
void
ExecBitmapHeapInitializeDSM(BitmapHeapScanState *node,
							ParallelContext *pcxt)
{
	ParallelBitmapHeapState *pstate;
	EState	   *estate = node->ss.ps.state;
	dsa_area   *dsa = node->ss.ps.state->es_query_dsa;

	/* If there's no DSA, there are no workers; initialize nothing. */
	if (dsa == NULL)
		return;

	pstate = shm_toc_allocate(pcxt->toc, node->pscan_len);

	pstate->tbmiterator = 0;
	pstate->prefetch_iterator = 0;

	/* Initialize the mutex */
	SpinLockInit(&pstate->mutex);
	pstate->prefetch_pages = 0;
	pstate->prefetch_target = 0;
	pstate->state = BM_INITIAL;

	ConditionVariableInit(&pstate->cv);
	SerializeSnapshot(estate->es_snapshot, pstate->phs_snapshot_data);

	shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id, pstate);
	node->pstate = pstate;
}
Example #15
0
/*
 * Initialize shared buffer pool
 *
 * This is called once during shared-memory initialization (either in the
 * postmaster, or in a standalone backend).
 */
void
InitBufferPool(void)
{
	bool		foundBufs,
				foundDescs;

	BufferDescriptors = (BufferDesc *)
		ShmemInitStruct("Buffer Descriptors",
						NBuffers * sizeof(BufferDesc), &foundDescs);

	BufferBlocks = (char *)
		ShmemInitStruct("Buffer Blocks",
						NBuffers * (Size) BLCKSZ, &foundBufs);

	if (foundDescs || foundBufs)
	{
		/* both should be present or neither */
		Assert(foundDescs && foundBufs);
		/* note: this path is only taken in EXEC_BACKEND case */
	}
	else
	{
		BufferDesc *buf;
		int			i;

		buf = BufferDescriptors;

		/*
		 * Initialize all the buffer headers.
		 */
		for (i = 0; i < NBuffers; buf++, i++)
		{
			CLEAR_BUFFERTAG(buf->tag);
			buf->flags = 0;
			buf->usage_count = 0;
			buf->refcount = 0;
			buf->wait_backend_pid = 0;

			SpinLockInit(&buf->buf_hdr_lock);

			buf->buf_id = i;

			/*
			 * Initially link all the buffers together as unused. Subsequent
			 * management of this list is done by freelist.c.
			 */
			buf->freeNext = i + 1;

			buf->io_in_progress_lock = LWLockAssign();
			buf->content_lock = LWLockAssign();
		}

		/* Correct last entry of linked list */
		BufferDescriptors[NBuffers - 1].freeNext = FREENEXT_END_OF_LIST;
	}

	/* Init other shared buffer-management stuff */
	StrategyInitialize(!foundDescs);
}
Example #16
0
inline static void fence()
{
	slock_t		fence;

	/* For the lack of a better method ... */
	SpinLockInit(&fence);
	SpinLockAcquire(&fence);
	SpinLockRelease(&fence);
}
Example #17
0
//
// Allocate and initialize the memory tracker object.
//
static __inline boolean
CreateMemTracker( void )
{
	MEM_TRACKER *tmp;

	if( pMemTracker )
		return TRUE;

	// Allocate the memory tracker object. Don't update global until we're done
	tmp = (MEM_TRACKER*)MEMORY_ALLOCATE_PRIV( sizeof(MEM_TRACKER), IBA_MEM_FLAG_LEGACY, TRK_TAG );

	if( !tmp )
		return FALSE;

	// Pre-initialize all objects in the memory tracker object.
	QListInitState( &tmp->AllocList );
	SpinLockInitState( &tmp->Lock );
	QListInitState( &tmp->FreeHrdList );

	// Initialize the list.
	if( !QListInit( &tmp->AllocList ) )
	{
		/* global isn't initialize, don't call Destroy func; do the clean up */
		MEMORY_DEALLOCATE_PRIV( tmp );
		return FALSE;
	}

	// Initialize the spin lock to protect list operations.
	if( !SpinLockInit( &tmp->Lock ) )
	{
		/* global isn't initialize, don't call Destroy func; do the clean up */
		QListDestroy( &tmp->AllocList );
		SpinLockDestroy( &tmp->Lock );
		MEMORY_DEALLOCATE_PRIV( tmp );
		return FALSE;
	}

	// Initialize the free list.
	if( !QListInit( &tmp->FreeHrdList ) )
	{
		/* global isn't initialize, don't call Destroy func; do the clean up */
		QListDestroy( &tmp->AllocList );
		SpinLockDestroy( &tmp->Lock );
		MEMORY_DEALLOCATE_PRIV( tmp );
		return FALSE;
	}

//	MsgOut( "\n\n\n*** Memory tracker object address = %p ***\n\n\n", tmp );
	MsgOut( "\n*** Memory tracker enabled ***\n" );

	/* NOW update the global */
	pMemTracker = tmp;

	return TRUE;
}
Example #18
0
/*
 * Initialize this barrier.  To use a static party size, provide the number of
 * participants to wait for at each phase indicating that that number of
 * backends is implicitly attached.  To use a dynamic party size, specify zero
 * here and then use BarrierAttach() and
 * BarrierDetach()/BarrierArriveAndDetach() to register and deregister
 * participants explicitly.
 */
void
BarrierInit(Barrier *barrier, int participants)
{
	SpinLockInit(&barrier->mutex);
	barrier->participants = participants;
	barrier->arrived = 0;
	barrier->phase = 0;
	barrier->elected = 0;
	barrier->static_party = participants > 0;
	ConditionVariableInit(&barrier->condition_variable);
}
Example #19
0
/*
 * StrategyInitialize -- initialize the buffer cache replacement
 *		strategy.
 *
 * Assumes: All of the buffers are already built into a linked list.
 *		Only called by postmaster and only during initialization.
 */
void
StrategyInitialize(bool init)
{
	bool		found;

	/*
	 * Initialize the shared buffer lookup hashtable.
	 *
	 * Since we can't tolerate running out of lookup table entries, we must be
	 * sure to specify an adequate table size here.  The maximum steady-state
	 * usage is of course NBuffers entries, but BufferAlloc() tries to insert
	 * a new___ entry before deleting the old.  In principle this could be
	 * happening in each partition concurrently, so we could need as many as
	 * NBuffers + NUM_BUFFER_PARTITIONS entries.
	 */
	InitBufTable(NBuffers + NUM_BUFFER_PARTITIONS);

	/*
	 * Get or create the shared strategy control block
	 */
	StrategyControl = (BufferStrategyControl *)
		ShmemInitStruct("Buffer Strategy Status",
						sizeof(BufferStrategyControl),
						&found);

	if (!found)
	{
		/*
		 * Only done once, usually in postmaster
		 */
		Assert(init);

		SpinLockInit(&StrategyControl->buffer_strategy_lock);

		/*
		 * Grab the whole linked list of free buffers for our strategy. We
		 * assume it was previously set up by InitBufferPool().
		 */
		StrategyControl->firstFreeBuffer = 0;
		StrategyControl->lastFreeBuffer = NBuffers - 1;

		/* Initialize the clock sweep pointer */
		pg_atomic_init_u32(&StrategyControl->nextVictimBuffer, 0);

		/* Clear statistics */
		StrategyControl->completePasses = 0;
		pg_atomic_init_u32(&StrategyControl->numBufferAllocs, 0);

		/* No pending notification */
		StrategyControl->bgwprocno = -1;
	}
	else
		Assert(!init);
}
Example #20
0
/*
 * btinitparallelscan -- initialize BTParallelScanDesc for parallel btree scan
 */
void
btinitparallelscan(void *target)
{
	BTParallelScanDesc bt_target = (BTParallelScanDesc) target;

	SpinLockInit(&bt_target->btps_mutex);
	bt_target->btps_scanPage = InvalidBlockNumber;
	bt_target->btps_pageStatus = BTPARALLEL_NOT_INITIALIZED;
	bt_target->btps_arrayKeyCount = 0;
	ConditionVariableInit(&bt_target->btps_cv);
}
Example #21
0
/*
 * LWLockInitialize - initialize a new lwlock; it's initially unlocked
 */
void
LWLockInitialize(LWLock *lock, int tranche_id)
{
	SpinLockInit(&lock->mutex);
	lock->releaseOK = true;
	lock->exclusive = 0;
	lock->shared = 0;
	lock->tranche = tranche_id;
	lock->head = NULL;
	lock->tail = NULL;
}
/*
 * Callback function to clear an anchor after inserting it into the hashtable,
 * before returning it to the client.
 *
 * This function is not synchronized. The caller must hold a lock on the anchor
 */
static void
Cache_InitAnchor(void *entry)
{
	Assert(NULL != entry);

	CacheAnchor *anchor = (CacheAnchor *) entry;

	SpinLockInit(&anchor->spinlock);
	anchor->firstEntry = NULL;
	anchor->lastEntry = NULL;
	anchor->pinCount = 0;
}
Example #23
0
/*
 * Place platform-specific startup hacks here.  This is the right
 * place to put code that must be executed early in the launch of any new
 * server process.  Note that this code will NOT be executed when a backend
 * or sub-bootstrap process is forked, unless we are in a fork/exec
 * environment (ie EXEC_BACKEND is defined).
 *
 * XXX The need for code here is proof that the platform in question
 * is too brain-dead to provide a standard C execution environment
 * without help.  Avoid adding more here, if you can.
 */
static void
startup_hacks(const char *progname)
{
	/*
	 * Windows-specific execution environment hacking.
	 */
#ifdef WIN32
	{
		WSADATA		wsaData;
		int			err;

		/* Make output streams unbuffered by default */
		setvbuf(stdout, NULL, _IONBF, 0);
		setvbuf(stderr, NULL, _IONBF, 0);

		/* Prepare Winsock */
		err = WSAStartup(MAKEWORD(2, 2), &wsaData);
		if (err != 0)
		{
			write_stderr("%s: WSAStartup failed: %d\n",
						 progname, err);
			exit(1);
		}

		/* In case of general protection fault, don't show GUI popup box */
		SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX);

#if defined(_M_AMD64) && _MSC_VER == 1800

		/*----------
		 * Avoid crashing in certain floating-point operations if we were
		 * compiled for x64 with MS Visual Studio 2013 and are running on
		 * Windows prior to 7/2008R2 SP1 on an AVX2-capable CPU.
		 *
		 * Ref: https://connect.microsoft.com/VisualStudio/feedback/details/811093/visual-studio-2013-rtm-c-x64-code-generation-bug-for-avx2-instructions
		 *----------
		 */
		if (!IsWindows7SP1OrGreater())
		{
			_set_FMA3_enable(0);
		}
#endif   /* defined(_M_AMD64) && _MSC_VER == 1800 */

	}
#endif   /* WIN32 */

	/*
	 * Initialize dummy_spinlock, in case we are on a platform where we have
	 * to use the fallback implementation of pg_memory_barrier().
	 */
	SpinLockInit(&dummy_spinlock);
}
Example #24
0
static void
test_setup(WalSndCtlData *data, int pid, WalSndState state)
{
	max_wal_senders = 1;
	WalSndCtl = data;

	data->walsnds[0].pid = pid;
	data->walsnds[0].state = state;
	data->walsnds[0].is_for_gp_walreceiver = true;
	SpinLockInit(&data->walsnds[0].mutex);

	expect_lwlock(LW_SHARED);
}
Example #25
0
/*
 *	InitShmemAllocation() --- set up shared-memory space allocation.
 *
 * This should be called only in the postmaster or a standalone backend.
 */
void
InitShmemAllocation(void)
{
	PGShmemHeader *shmhdr = ShmemSegHdr;
	char	   *aligned;

	Assert(shmhdr != NULL);

	/*
	 * If spinlocks are disabled, initialize emulation layer.  We have to do
	 * the space allocation the hard way, since obviously ShmemAlloc can't be
	 * called yet.
	 */
#ifndef HAVE_SPINLOCKS
	{
		PGSemaphore spinsemas;

		spinsemas = (PGSemaphore) (((char *) shmhdr) + shmhdr->freeoffset);
		shmhdr->freeoffset += MAXALIGN(SpinlockSemaSize());
		SpinlockSemaInit(spinsemas);
		Assert(shmhdr->freeoffset <= shmhdr->totalsize);
	}
#endif

	/*
	 * Initialize the spinlock used by ShmemAlloc; we have to do this the hard
	 * way, too, for the same reasons as above.
	 */
	ShmemLock = (slock_t *) (((char *) shmhdr) + shmhdr->freeoffset);
	shmhdr->freeoffset += MAXALIGN(sizeof(slock_t));
	Assert(shmhdr->freeoffset <= shmhdr->totalsize);

	/* Make sure the first allocation begins on a cache line boundary. */
	aligned = (char *)
		(CACHELINEALIGN((((char *) shmhdr) + shmhdr->freeoffset)));
	shmhdr->freeoffset = aligned - (char *) shmhdr;

	SpinLockInit(ShmemLock);

	/* ShmemIndex can't be set up yet (need LWLocks first) */
	shmhdr->index = NULL;
	ShmemIndex = (HTAB *) NULL;

	/*
	 * Initialize ShmemVariableCache for transaction manager. (This doesn't
	 * really belong here, but not worth moving.)
	 */
	ShmemVariableCache = (VariableCache)
		ShmemAlloc(sizeof(*ShmemVariableCache));
	memset(ShmemVariableCache, 0, sizeof(*ShmemVariableCache));
}
Example #26
0
/*
 * Initialize a region of shared memory with a table of contents.
 */
shm_toc *
shm_toc_create(uint64 magic, void *address, Size nbytes)
{
	shm_toc    *toc = (shm_toc *) address;

	Assert(nbytes > offsetof(shm_toc, toc_entry));
	toc->toc_magic = magic;
	SpinLockInit(&toc->toc_mutex);
	toc->toc_total_bytes = nbytes;
	toc->toc_allocated_bytes = 0;
	toc->toc_nentry = 0;

	return toc;
}
Example #27
0
void
SubscribeInitialize()
{
	_DBG_ENTER_LVL(_DBG_LVL_FUNC_TRACE, SubscribeInitialize);

	iba_sd_register( &SdClientHandle, NULL ); /* TODO: Error Handling*/
	QListInit(&SubscribedTrapsList);
	SpinLockInitState(&SubscribedTrapsListLock);
	SpinLockInit(&SubscribedTrapsListLock);

	SubscribeInitialized = 1;

	_DBG_LEAVE_LVL( _DBG_LVL_FUNC_TRACE );
}
Example #28
0
/*
 * InitShmemDynAllocator
 */
void
ShmemDynAllocShmemInit(void)
{
	bool found;

	ShemDynAllocShmem = (ShemDynAllocShmemStruct *) ShmemInitStruct("ShemDynAllocState", sizeof(ShemDynAllocShmemStruct) , &found);

	if (!found)
	{
		ShemDynAllocShmem->head = NULL;
		ShemDynAllocShmem->tail = NULL;
		SpinLockInit(&ShemDynAllocShmem->mutex);
	}
}
/*
 * Initialize a new CacheEntry structure to initial values
 */
static void
Cache_InitCacheEntry(Cache *cache, CacheEntry *entry)
{
	SpinLockInit(&entry->spinlock);
	entry->state = CACHE_ENTRY_FREE;
	entry->pinCount = 0;
	entry->size = 0L;
	entry->utility = 0;

#ifdef USE_ASSERT_CHECKING
			Cache_MemsetPayload(cache, entry);
			MemSet(&entry->hashvalue, CACHE_MEMSET_BYTE_PATTERN, sizeof(entry->hashvalue));
#endif
}
Example #30
0
/* Allocate and initialize walreceiver-related shared memory */
void
WalRcvShmemInit(void)
{
	bool		found;

	WalRcv = (WalRcvData *)
		ShmemInitStruct("Wal Receiver Ctl", WalRcvShmemSize(), &found);

	if (!found)
	{
		/* First time through, so initialize */
		MemSet(WalRcv, 0, WalRcvShmemSize());
		WalRcv->walRcvState = WALRCV_STOPPED;
		SpinLockInit(&WalRcv->mutex);
	}
}