Exemplo n.º 1
0
/*
 * get the next page to scan
 *
 * Get the next page to scan.  Even if there are no pages left to scan,
 * another backend could have grabbed a page to scan and not yet finished
 * looking at it, so it doesn't follow that the scan is done when the first
 * backend gets an InvalidBlockNumber return.
 */
BlockNumber
table_block_parallelscan_nextpage(Relation rel, ParallelBlockTableScanDesc pbscan)
{
	BlockNumber page;
	uint64		nallocated;

	/*
	 * phs_nallocated tracks how many pages have been allocated to workers
	 * already.  When phs_nallocated >= rs_nblocks, all blocks have been
	 * allocated.
	 *
	 * Because we use an atomic fetch-and-add to fetch the current value, the
	 * phs_nallocated counter will exceed rs_nblocks, because workers will
	 * still increment the value, when they try to allocate the next block but
	 * all blocks have been allocated already. The counter must be 64 bits
	 * wide because of that, to avoid wrapping around when rs_nblocks is close
	 * to 2^32.
	 *
	 * The actual page to return is calculated by adding the counter to the
	 * starting block number, modulo nblocks.
	 */
	nallocated = pg_atomic_fetch_add_u64(&pbscan->phs_nallocated, 1);
	if (nallocated >= pbscan->phs_nblocks)
		page = InvalidBlockNumber;	/* all blocks have been allocated */
	else
		page = (nallocated + pbscan->phs_startblock) % pbscan->phs_nblocks;

	/*
	 * Report scan location.  Normally, we report the current page number.
	 * When we reach the end of the scan, though, we report the starting page,
	 * not the ending page, just so the starting positions for later scans
	 * doesn't slew backwards.  We only report the position at the end of the
	 * scan once, though: subsequent callers will report nothing.
	 */
	if (pbscan->base.phs_syncscan)
	{
		if (page != InvalidBlockNumber)
			ss_report_location(rel, page);
		else if (nallocated == pbscan->phs_nblocks)
			ss_report_location(rel, pbscan->phs_startblock);
	}

	return page;
}
Exemplo n.º 2
0
/*
 * AssignDistributedTransactionId generates a new distributed transaction id and
 * sets it for the current backend. It also sets the databaseId and
 * processId fields.
 *
 * This function should only be called on BeginCoordinatedTransaction(). Any other
 * callers is very likely to break the distributed transction management.
 */
void
AssignDistributedTransactionId(void)
{
	pg_atomic_uint64 *transactionNumberSequence =
		&backendManagementShmemData->nextTransactionNumber;

	uint64 nextTransactionNumber = pg_atomic_fetch_add_u64(transactionNumberSequence, 1);
	int localGroupId = GetLocalGroupId();
	TimestampTz currentTimestamp = GetCurrentTimestamp();

	SpinLockAcquire(&MyBackendData->mutex);

	MyBackendData->databaseId = MyDatabaseId;

	MyBackendData->transactionId.initiatorNodeIdentifier = localGroupId;
	MyBackendData->transactionId.transactionOriginator = true;
	MyBackendData->transactionId.transactionNumber =
		nextTransactionNumber;
	MyBackendData->transactionId.timestamp = currentTimestamp;

	SpinLockRelease(&MyBackendData->mutex);
}
Exemplo n.º 3
0
static void
test_atomic_uint64(void)
{
	pg_atomic_uint64 var;
	uint64		expected;
	int			i;

	pg_atomic_init_u64(&var, 0);

	if (pg_atomic_read_u64(&var) != 0)
		elog(ERROR, "atomic_read_u64() #1 wrong");

	pg_atomic_write_u64(&var, 3);

	if (pg_atomic_read_u64(&var) != 3)
		elog(ERROR, "atomic_read_u64() #2 wrong");

	if (pg_atomic_fetch_add_u64(&var, 1) != 3)
		elog(ERROR, "atomic_fetch_add_u64() #1 wrong");

	if (pg_atomic_fetch_sub_u64(&var, 1) != 4)
		elog(ERROR, "atomic_fetch_sub_u64() #1 wrong");

	if (pg_atomic_sub_fetch_u64(&var, 3) != 0)
		elog(ERROR, "atomic_sub_fetch_u64() #1 wrong");

	if (pg_atomic_add_fetch_u64(&var, 10) != 10)
		elog(ERROR, "atomic_add_fetch_u64() #1 wrong");

	if (pg_atomic_exchange_u64(&var, 5) != 10)
		elog(ERROR, "pg_atomic_exchange_u64() #1 wrong");

	if (pg_atomic_exchange_u64(&var, 0) != 5)
		elog(ERROR, "pg_atomic_exchange_u64() #0 wrong");

	/* fail exchange because of old expected */
	expected = 10;
	if (pg_atomic_compare_exchange_u64(&var, &expected, 1))
		elog(ERROR, "atomic_compare_exchange_u64() changed value spuriously");

	/* CAS is allowed to fail due to interrupts, try a couple of times */
	for (i = 0; i < 100; i++)
	{
		expected = 0;
		if (!pg_atomic_compare_exchange_u64(&var, &expected, 1))
			break;
	}
	if (i == 100)
		elog(ERROR, "atomic_compare_exchange_u64() never succeeded");
	if (pg_atomic_read_u64(&var) != 1)
		elog(ERROR, "atomic_compare_exchange_u64() didn't set value properly");

	pg_atomic_write_u64(&var, 0);

	/* try setting flagbits */
	if (pg_atomic_fetch_or_u64(&var, 1) & 1)
		elog(ERROR, "pg_atomic_fetch_or_u64() #1 wrong");

	if (!(pg_atomic_fetch_or_u64(&var, 2) & 1))
		elog(ERROR, "pg_atomic_fetch_or_u64() #2 wrong");

	if (pg_atomic_read_u64(&var) != 3)
		elog(ERROR, "invalid result after pg_atomic_fetch_or_u64()");

	/* try clearing flagbits */
	if ((pg_atomic_fetch_and_u64(&var, ~2) & 3) != 3)
		elog(ERROR, "pg_atomic_fetch_and_u64() #1 wrong");

	if (pg_atomic_fetch_and_u64(&var, ~1) != 1)
		elog(ERROR, "pg_atomic_fetch_and_u64() #2 wrong: is " UINT64_FORMAT,
			 pg_atomic_read_u64(&var));
	/* no bits set anymore */
	if (pg_atomic_fetch_and_u64(&var, ~0) != 0)
		elog(ERROR, "pg_atomic_fetch_and_u64() #3 wrong");
}