Size table_block_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan) { ParallelBlockTableScanDesc bpscan = (ParallelBlockTableScanDesc) pscan; bpscan->base.phs_relid = RelationGetRelid(rel); bpscan->phs_nblocks = RelationGetNumberOfBlocks(rel); /* compare phs_syncscan initialization to similar logic in initscan */ bpscan->base.phs_syncscan = synchronize_seqscans && !RelationUsesLocalBuffers(rel) && bpscan->phs_nblocks > NBuffers / 4; SpinLockInit(&bpscan->phs_mutex); bpscan->phs_startblock = InvalidBlockNumber; pg_atomic_init_u64(&bpscan->phs_nallocated, 0); return sizeof(ParallelBlockTableScanDescData); }
static void test_atomic_uint64(void) { pg_atomic_uint64 var; uint64 expected; int i; pg_atomic_init_u64(&var, 0); if (pg_atomic_read_u64(&var) != 0) elog(ERROR, "atomic_read_u64() #1 wrong"); pg_atomic_write_u64(&var, 3); if (pg_atomic_read_u64(&var) != 3) elog(ERROR, "atomic_read_u64() #2 wrong"); if (pg_atomic_fetch_add_u64(&var, 1) != 3) elog(ERROR, "atomic_fetch_add_u64() #1 wrong"); if (pg_atomic_fetch_sub_u64(&var, 1) != 4) elog(ERROR, "atomic_fetch_sub_u64() #1 wrong"); if (pg_atomic_sub_fetch_u64(&var, 3) != 0) elog(ERROR, "atomic_sub_fetch_u64() #1 wrong"); if (pg_atomic_add_fetch_u64(&var, 10) != 10) elog(ERROR, "atomic_add_fetch_u64() #1 wrong"); if (pg_atomic_exchange_u64(&var, 5) != 10) elog(ERROR, "pg_atomic_exchange_u64() #1 wrong"); if (pg_atomic_exchange_u64(&var, 0) != 5) elog(ERROR, "pg_atomic_exchange_u64() #0 wrong"); /* fail exchange because of old expected */ expected = 10; if (pg_atomic_compare_exchange_u64(&var, &expected, 1)) elog(ERROR, "atomic_compare_exchange_u64() changed value spuriously"); /* CAS is allowed to fail due to interrupts, try a couple of times */ for (i = 0; i < 100; i++) { expected = 0; if (!pg_atomic_compare_exchange_u64(&var, &expected, 1)) break; } if (i == 100) elog(ERROR, "atomic_compare_exchange_u64() never succeeded"); if (pg_atomic_read_u64(&var) != 1) elog(ERROR, "atomic_compare_exchange_u64() didn't set value properly"); pg_atomic_write_u64(&var, 0); /* try setting flagbits */ if (pg_atomic_fetch_or_u64(&var, 1) & 1) elog(ERROR, "pg_atomic_fetch_or_u64() #1 wrong"); if (!(pg_atomic_fetch_or_u64(&var, 2) & 1)) elog(ERROR, "pg_atomic_fetch_or_u64() #2 wrong"); if (pg_atomic_read_u64(&var) != 3) elog(ERROR, "invalid result after pg_atomic_fetch_or_u64()"); /* try clearing flagbits */ if ((pg_atomic_fetch_and_u64(&var, ~2) & 3) != 3) elog(ERROR, "pg_atomic_fetch_and_u64() #1 wrong"); if (pg_atomic_fetch_and_u64(&var, ~1) != 1) elog(ERROR, "pg_atomic_fetch_and_u64() #2 wrong: is " UINT64_FORMAT, pg_atomic_read_u64(&var)); /* no bits set anymore */ if (pg_atomic_fetch_and_u64(&var, ~0) != 0) elog(ERROR, "pg_atomic_fetch_and_u64() #3 wrong"); }
/* * BackendManagementShmemInit is the callback that is to be called on shared * memory startup hook. The function sets up the necessary shared memory * segment for the backend manager. */ static void BackendManagementShmemInit(void) { bool alreadyInitialized = false; /* we may update the shmem, acquire lock exclusively */ LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); backendManagementShmemData = (BackendManagementShmemData *) ShmemInitStruct( "Backend Management Shmem", BackendManagementShmemSize(), &alreadyInitialized); if (!alreadyInitialized) { int backendIndex = 0; char *trancheName = "Backend Management Tranche"; #if (PG_VERSION_NUM >= 100000) NamedLWLockTranche *namedLockTranche = &backendManagementShmemData->namedLockTranche; #else LWLockTranche *lockTranche = &backendManagementShmemData->lockTranche; #endif /* start by zeroing out all the memory */ memset(backendManagementShmemData, 0, BackendManagementShmemSize()); #if (PG_VERSION_NUM >= 100000) namedLockTranche->trancheId = LWLockNewTrancheId(); LWLockRegisterTranche(namedLockTranche->trancheId, trancheName); LWLockInitialize(&backendManagementShmemData->lock, namedLockTranche->trancheId); #else backendManagementShmemData->trancheId = LWLockNewTrancheId(); /* we only need a single lock */ lockTranche->array_base = &backendManagementShmemData->lock; lockTranche->array_stride = sizeof(LWLock); lockTranche->name = trancheName; LWLockRegisterTranche(backendManagementShmemData->trancheId, lockTranche); LWLockInitialize(&backendManagementShmemData->lock, backendManagementShmemData->trancheId); #endif /* start the distributed transaction ids from 1 */ pg_atomic_init_u64(&backendManagementShmemData->nextTransactionNumber, 1); /* * We need to init per backend's spinlock before any backend * starts its execution. */ for (backendIndex = 0; backendIndex < MaxBackends; ++backendIndex) { SpinLockInit(&backendManagementShmemData->backends[backendIndex].mutex); } } LWLockRelease(AddinShmemInitLock); if (prev_shmem_startup_hook != NULL) { prev_shmem_startup_hook(); } }