Example #1
0
/*
 * register_dirty_segment() -- Mark a relation segment as needing fsync
 *
 * If there is a local pending-ops table, just make an entry in it for
 * mdsync to process later.  Otherwise, try to pass off the fsync request
 * to the background writer process.  If that fails, just do the fsync
 * locally before returning (we expect this will not happen often enough
 * to be a performance problem).
 *
 * A false result implies I/O failure during local fsync.  errno will be
 * valid for error reporting.
 */
static bool
register_dirty_segment(SMgrRelation reln, MdfdVec *seg)
{
	if (pendingOpsTable)
	{
		PendingOperationEntry entry;

		/* ensure any pad bytes in the struct are zeroed */
		MemSet(&entry, 0, sizeof(entry));
		entry.rnode = reln->smgr_rnode;
		entry.segno = seg->mdfd_segno;

		(void) hash_search(pendingOpsTable, &entry, HASH_ENTER, NULL);
		return true;
	}
	else
	{
		if (ForwardFsyncRequest(reln->smgr_rnode, seg->mdfd_segno))
			return true;
	}

	if (FileSync(seg->mdfd_vfd) < 0)
		return false;
	return true;
}
Example #2
0
/*
 * ForgetRelationFsyncRequests -- ensure any fsyncs for a rel are forgotten
 */
void
ForgetRelationFsyncRequests(RelFileNode rnode)
{
	if (pendingOpsTable)
	{
		/* standalone backend or startup process: fsync state is local */
		RememberFsyncRequest(rnode, FORGET_RELATION_FSYNC);
	}
	else if (IsUnderPostmaster)
	{
		/*
		 * Notify the bgwriter about it.  If we fail to queue the revoke
		 * message, we have to sleep and try again ... ugly, but hopefully
		 * won't happen often.
		 *
		 * XXX should we CHECK_FOR_INTERRUPTS in this loop?  Escaping with
		 * an error would leave the no-longer-used file still present on
		 * disk, which would be bad, so I'm inclined to assume that the
		 * bgwriter will always empty the queue soon.
		 */
		while (!ForwardFsyncRequest(rnode, FORGET_RELATION_FSYNC))
			pg_usleep(10000L);	/* 10 msec seems a good number */
		/*
		 * Note we don't wait for the bgwriter to actually absorb the
		 * revoke message; see mdsync() for the implications.
		 */
	}
}
Example #3
0
/*
 * Basic enqueue tests, including compaction upon enqueuing into a
 * full queue.
 */
void
test__ForwardFsyncRequest_enqueue(void **state)
{
	bool ret;
	int i;
	RelFileNode dummy = {1,1,1};
	init_request_queue();
	ProcGlobal->checkpointerLatch = NULL;
	expect_value(LWLockAcquire, l, CheckpointerCommLock);
	expect_value(LWLockAcquire, mode, LW_EXCLUSIVE);
	will_return(LWLockAcquire, true);
	expect_value(LWLockRelease, l, CheckpointerCommLock);
	will_be_called(LWLockRelease);
	/* basic enqueue */
	ret = ForwardFsyncRequest(dummy, MAIN_FORKNUM, 1);
	assert_true(ret);
	assert_true(CheckpointerShmem->num_requests == 1);
	/* fill up the queue */
	for (i=2; i<=MAX_BGW_REQUESTS; i++)
	{
		expect_value(LWLockAcquire, l, CheckpointerCommLock);
		expect_value(LWLockAcquire, mode, LW_EXCLUSIVE);
		will_return(LWLockAcquire, true);
		expect_value(LWLockRelease, l, CheckpointerCommLock);
		will_be_called(LWLockRelease);
		ret = ForwardFsyncRequest(dummy, MAIN_FORKNUM, i);
		assert_true(ret);
	}
	expect_value(LWLockAcquire, l, CheckpointerCommLock);
	expect_value(LWLockAcquire, mode, LW_EXCLUSIVE);
	will_return(LWLockAcquire, true);
	expect_value(LWLockRelease, l, CheckpointerCommLock);
	will_be_called(LWLockRelease);
#ifdef USE_ASSERT_CHECKING
	expect_value(LWLockHeldByMe, l, CheckpointerCommLock);
	will_return(LWLockHeldByMe, true);
#endif
	/*
	 * This enqueue request should trigger compaction but no
	 * duplicates are in the queue.  So the queue should remain
	 * full.
	 */
	ret = ForwardFsyncRequest(dummy, MAIN_FORKNUM, 0);
	assert_false(ret);
	assert_true(CheckpointerShmem->num_requests == CheckpointerShmem->max_requests);
	free(CheckpointerShmem);
}
Example #4
0
/*
 * register_dirty_segment() -- Mark a relation segment as needing fsync
 *
 * If there is a local pending-ops table, just make an entry in it for
 * mdsync to process later.  Otherwise, try to pass off the fsync request
 * to the background writer process.  If that fails, just do the fsync
 * locally before returning (we expect this will not happen often enough
 * to be a performance problem).
 *
 * A false result implies I/O failure during local fsync.  errno will be
 * valid for error reporting.
 */
static bool
register_dirty_segment(SMgrRelation reln, MdfdVec *seg)
{
	if (pendingOpsTable)
	{
		/* push it into local pending-ops table */
		RememberFsyncRequest(reln->smgr_rnode, seg->mdfd_segno);
		return true;
	}
	else
	{
		if (ForwardFsyncRequest(reln->smgr_rnode, seg->mdfd_segno))
			return true;
	}

	if (FileSync(seg->mdfd_vfd) < 0)
		return false;
	return true;
}
Example #5
0
/*
 * ForgetDatabaseFsyncRequests -- ensure any fsyncs for a DB are forgotten
 */
void
ForgetDatabaseFsyncRequests(Oid dbid)
{
	RelFileNode rnode;

	rnode.dbNode = dbid;
	rnode.spcNode = 0;
	rnode.relNode = 0;

	if (pendingOpsTable)
	{
		/* standalone backend or startup process: fsync state is local */
		RememberFsyncRequest(rnode, FORGET_DATABASE_FSYNC);
	}
	else if (IsUnderPostmaster)
	{
		/* see notes in ForgetRelationFsyncRequests */
		while (!ForwardFsyncRequest(rnode, FORGET_DATABASE_FSYNC))
			pg_usleep(10000L);	/* 10 msec seems a good number */
	}
}