Exemplo n.º 1
0
/*
 * Main worker routine. Accepts dsm_handle as an argument
 */
static void
bg_worker_main(Datum main_arg)
{
	PartitionArgs  *args;
	dsm_handle		handle = DatumGetInt32(main_arg);

	/* Create resource owner */
	CurrentResourceOwner = ResourceOwnerCreate(NULL, "CreatePartitionsWorker");

	/* Attach to dynamic shared memory */
	if (!handle)
	{
		ereport(WARNING,
                (errmsg("pg_pathman worker: invalid dsm_handle")));
	}
	segment = dsm_attach(handle);
	args = dsm_segment_address(segment);

	/* Establish connection and start transaction */
	BackgroundWorkerInitializeConnectionByOid(args->dbid, InvalidOid);
	StartTransactionCommand();
	SPI_connect();
	PushActiveSnapshot(GetTransactionSnapshot());

	/* Create partitions */
	args->result = create_partitions(args->relid, PATHMAN_GET_DATUM(args->value, args->by_val), args->value_type, &args->crashed);

	/* Cleanup */
	SPI_finish();
	PopActiveSnapshot();
	CommitTransactionCommand();

	dsm_detach(segment);
}
Exemplo n.º 2
0
/* attach worker to the shared memory segment, read the job structure */
static void
initialize_worker(uint32 segment)
{
	dsm_segment   *seg;
	ResourceOwner old,
				  tmp;
	/* Connect to dynamic shared memory segment.
	 *
	 * In order to attach a dynamic shared memory segment, we need a
	 * resource owner. We cannot to StartTransactionCommand here, since
	 * we haven't yet attached to the database: to do this, we need to
	 * fetch information about connection properties from the shared
	 * memory segment.
	 */
	 old = CurrentResourceOwner;
	 CurrentResourceOwner = ResourceOwnerCreate(NULL, "Background Worker");
	 seg = dsm_attach(segment);
	 if (seg == NULL)
	 	ereport(ERROR,
	 			(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
	 			 errmsg("unable to map dynamic shared memory segment")));
	 dsm_pin_mapping(seg);
	 tmp = CurrentResourceOwner;
	 CurrentResourceOwner = old;
	 ResourceOwnerDelete(tmp);

	 job = palloc(sizeof(JobDesc));
	 /* copy the arguments from shared memory segment */
	 memcpy(job, dsm_segment_address(seg), sizeof(JobDesc));

	 /* and detach it right away */
	 dsm_detach(seg);
	 Assert(job->magic == JOB_MAGIC);

	 job_run_function.schema = quote_identifier(job->schemaname);
	 job_run_function.name = quote_identifier("run_job");
}
Exemplo n.º 3
0
static void
ResourceOwnerReleaseInternal(ResourceOwner owner,
							 ResourceReleasePhase phase,
							 bool isCommit,
							 bool isTopLevel)
{
	ResourceOwner child;
	ResourceOwner save;
	ResourceReleaseCallbackItem *item;
	Datum		foundres;

	/* Recurse to handle descendants */
	for (child = owner->firstchild; child != NULL; child = child->nextchild)
		ResourceOwnerReleaseInternal(child, phase, isCommit, isTopLevel);

	/*
	 * Make CurrentResourceOwner point to me, so that ReleaseBuffer etc don't
	 * get confused.
	 */
	save = CurrentResourceOwner;
	CurrentResourceOwner = owner;

	if (phase == RESOURCE_RELEASE_BEFORE_LOCKS)
	{
		/*
		 * Release buffer pins.  Note that ReleaseBuffer will remove the
		 * buffer entry from our array, so we just have to iterate till there
		 * are none.
		 *
		 * During a commit, there shouldn't be any remaining pins --- that
		 * would indicate failure to clean up the executor correctly --- so
		 * issue warnings.  In the abort case, just clean up quietly.
		 */
		while (ResourceArrayGetAny(&(owner->bufferarr), &foundres))
		{
			Buffer		res = DatumGetBuffer(foundres);

			if (isCommit)
				PrintBufferLeakWarning(res);
			ReleaseBuffer(res);
		}

		/* Ditto for relcache references */
		while (ResourceArrayGetAny(&(owner->relrefarr), &foundres))
		{
			Relation	res = (Relation) DatumGetPointer(foundres);

			if (isCommit)
				PrintRelCacheLeakWarning(res);
			RelationClose(res);
		}

		/* Ditto for dynamic shared memory segments */
		while (ResourceArrayGetAny(&(owner->dsmarr), &foundres))
		{
			dsm_segment *res = (dsm_segment *) DatumGetPointer(foundres);

			if (isCommit)
				PrintDSMLeakWarning(res);
			dsm_detach(res);
		}

		/* Ditto for JIT contexts */
		while (ResourceArrayGetAny(&(owner->jitarr), &foundres))
		{
			JitContext *context = (JitContext *) PointerGetDatum(foundres);

			jit_release_context(context);
		}
	}
	else if (phase == RESOURCE_RELEASE_LOCKS)
	{
		if (isTopLevel)
		{
			/*
			 * For a top-level xact we are going to release all locks (or at
			 * least all non-session locks), so just do a single lmgr call at
			 * the top of the recursion.
			 */
			if (owner == TopTransactionResourceOwner)
			{
				ProcReleaseLocks(isCommit);
				ReleasePredicateLocks(isCommit);
			}
		}
		else
		{
			/*
			 * Release locks retail.  Note that if we are committing a
			 * subtransaction, we do NOT release its locks yet, but transfer
			 * them to the parent.
			 */
			LOCALLOCK **locks;
			int			nlocks;

			Assert(owner->parent != NULL);

			/*
			 * Pass the list of locks owned by this resource owner to the lock
			 * manager, unless it has overflowed.
			 */
			if (owner->nlocks > MAX_RESOWNER_LOCKS)
			{
				locks = NULL;
				nlocks = 0;
			}
			else
			{
				locks = owner->locks;
				nlocks = owner->nlocks;
			}

			if (isCommit)
				LockReassignCurrentOwner(locks, nlocks);
			else
				LockReleaseCurrentOwner(locks, nlocks);
		}
	}
	else if (phase == RESOURCE_RELEASE_AFTER_LOCKS)
	{
		/*
		 * Release catcache references.  Note that ReleaseCatCache will remove
		 * the catref entry from our array, so we just have to iterate till
		 * there are none.
		 *
		 * As with buffer pins, warn if any are left at commit time.
		 */
		while (ResourceArrayGetAny(&(owner->catrefarr), &foundres))
		{
			HeapTuple	res = (HeapTuple) DatumGetPointer(foundres);

			if (isCommit)
				PrintCatCacheLeakWarning(res);
			ReleaseCatCache(res);
		}

		/* Ditto for catcache lists */
		while (ResourceArrayGetAny(&(owner->catlistrefarr), &foundres))
		{
			CatCList   *res = (CatCList *) DatumGetPointer(foundres);

			if (isCommit)
				PrintCatCacheListLeakWarning(res);
			ReleaseCatCacheList(res);
		}

		/* Ditto for plancache references */
		while (ResourceArrayGetAny(&(owner->planrefarr), &foundres))
		{
			CachedPlan *res = (CachedPlan *) DatumGetPointer(foundres);

			if (isCommit)
				PrintPlanCacheLeakWarning(res);
			ReleaseCachedPlan(res, true);
		}

		/* Ditto for tupdesc references */
		while (ResourceArrayGetAny(&(owner->tupdescarr), &foundres))
		{
			TupleDesc	res = (TupleDesc) DatumGetPointer(foundres);

			if (isCommit)
				PrintTupleDescLeakWarning(res);
			DecrTupleDescRefCount(res);
		}

		/* Ditto for snapshot references */
		while (ResourceArrayGetAny(&(owner->snapshotarr), &foundres))
		{
			Snapshot	res = (Snapshot) DatumGetPointer(foundres);

			if (isCommit)
				PrintSnapshotLeakWarning(res);
			UnregisterSnapshot(res);
		}

		/* Ditto for temporary files */
		while (ResourceArrayGetAny(&(owner->filearr), &foundres))
		{
			File		res = DatumGetFile(foundres);

			if (isCommit)
				PrintFileLeakWarning(res);
			FileClose(res);
		}
	}

	/* Let add-on modules get a chance too */
	for (item = ResourceRelease_callbacks; item; item = item->next)
		item->callback(phase, isCommit, isTopLevel, item->arg);

	CurrentResourceOwner = save;
}
Exemplo n.º 4
0
/*
 * Background worker entrypoint.
 *
 * This is intended to demonstrate how a background worker can be used to
 * facilitate a parallel computation.  Most of the logic here is fairly
 * boilerplate stuff, designed to attach to the shared memory segment,
 * notify the user backend that we're alive, and so on.  The
 * application-specific bits of logic that you'd replace for your own worker
 * are attach_to_queues() and copy_messages().
 */
void
test_shm_mq_main(Datum main_arg)
{
	dsm_segment *seg;
	shm_toc    *toc;
	shm_mq_handle *inqh;
	shm_mq_handle *outqh;
	volatile test_shm_mq_header *hdr;
	int			myworkernumber;
	PGPROC	   *registrant;

	/*
	 * Establish signal handlers.
	 *
	 * We want CHECK_FOR_INTERRUPTS() to kill off this worker process just as
	 * it would a normal user backend.  To make that happen, we establish a
	 * signal handler that is a stripped-down version of die().  We don't have
	 * any equivalent of the backend's command-read loop, where interrupts can
	 * be processed immediately, so make sure ImmediateInterruptOK is turned
	 * off.
	 */
	pqsignal(SIGTERM, handle_sigterm);
	ImmediateInterruptOK = false;
	BackgroundWorkerUnblockSignals();

	/*
	 * Connect to the dynamic shared memory segment.
	 *
	 * The backend that registered this worker passed us the ID of a shared
	 * memory segment to which we must attach for further instructions.  In
	 * order to attach to dynamic shared memory, we need a resource owner.
	 * Once we've mapped the segment in our address space, attach to the table
	 * of contents so we can locate the various data structures we'll need to
	 * find within the segment.
	 */
	CurrentResourceOwner = ResourceOwnerCreate(NULL, "test_shm_mq worker");
	seg = dsm_attach(DatumGetInt32(main_arg));
	if (seg == NULL)
		ereport(ERROR,
				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
				 errmsg("unable to map dynamic shared memory segment")));
	toc = shm_toc_attach(PG_TEST_SHM_MQ_MAGIC, dsm_segment_address(seg));
	if (toc == NULL)
		ereport(ERROR,
				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
			   errmsg("bad magic number in dynamic shared memory segment")));

	/*
	 * Acquire a worker number.
	 *
	 * By convention, the process registering this background worker should
	 * have stored the control structure at key 0.  We look up that key to
	 * find it.  Our worker number gives our identity: there may be just one
	 * worker involved in this parallel operation, or there may be many.
	 */
	hdr = shm_toc_lookup(toc, 0);
	SpinLockAcquire(&hdr->mutex);
	myworkernumber = ++hdr->workers_attached;
	SpinLockRelease(&hdr->mutex);
	if (myworkernumber > hdr->workers_total)
		ereport(ERROR,
				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
				 errmsg("too many message queue testing workers already")));

	/*
	 * Attach to the appropriate message queues.
	 */
	attach_to_queues(seg, toc, myworkernumber, &inqh, &outqh);

	/*
	 * Indicate that we're fully initialized and ready to begin the main part
	 * of the parallel operation.
	 *
	 * Once we signal that we're ready, the user backend is entitled to assume
	 * that our on_dsm_detach callbacks will fire before we disconnect from
	 * the shared memory segment and exit.  Generally, that means we must have
	 * attached to all relevant dynamic shared memory data structures by now.
	 */
	SpinLockAcquire(&hdr->mutex);
	++hdr->workers_ready;
	SpinLockRelease(&hdr->mutex);
	registrant = BackendPidGetProc(MyBgworkerEntry->bgw_notify_pid);
	if (registrant == NULL)
	{
		elog(DEBUG1, "registrant backend has exited prematurely");
		proc_exit(1);
	}
	SetLatch(&registrant->procLatch);

	/* Do the work. */
	copy_messages(inqh, outqh);

	/*
	 * We're done.  Explicitly detach the shared memory segment so that we
	 * don't get a resource leak warning at commit time.  This will fire any
	 * on_dsm_detach callbacks we've registered, as well.  Once that's done,
	 * we can go ahead and exit.
	 */
	dsm_detach(seg);
	proc_exit(1);
}
Exemplo n.º 5
0
static void
ResourceOwnerReleaseInternal(ResourceOwner owner,
							 ResourceReleasePhase phase,
							 bool isCommit,
							 bool isTopLevel)
{
	ResourceOwner child;
	ResourceOwner save;
	ResourceReleaseCallbackItem *item;

	/* Recurse to handle descendants */
	for (child = owner->firstchild; child != NULL; child = child->nextchild)
		ResourceOwnerReleaseInternal(child, phase, isCommit, isTopLevel);

	/*
	 * Make CurrentResourceOwner point to me, so that ReleaseBuffer etc don't
	 * get confused.  We needn't PG_TRY here because the outermost level will
	 * fix it on error abort.
	 */
	save = CurrentResourceOwner;
	CurrentResourceOwner = owner;

	if (phase == RESOURCE_RELEASE_BEFORE_LOCKS)
	{
		/*
		 * Release buffer pins.  Note that ReleaseBuffer will remove the
		 * buffer entry from my list, so I just have to iterate till there are
		 * none.
		 *
		 * During a commit, there shouldn't be any remaining pins --- that
		 * would indicate failure to clean up the executor correctly --- so
		 * issue warnings.	In the abort case, just clean up quietly.
		 *
		 * We are careful to do the releasing back-to-front, so as to avoid
		 * O(N^2) behavior in ResourceOwnerForgetBuffer().
		 */
		while (owner->nbuffers > 0)
		{
			if (isCommit)
				PrintBufferLeakWarning(owner->buffers[owner->nbuffers - 1]);
			ReleaseBuffer(owner->buffers[owner->nbuffers - 1]);
		}

		/*
		 * Release relcache references.  Note that RelationClose will remove
		 * the relref entry from my list, so I just have to iterate till there
		 * are none.
		 *
		 * As with buffer pins, warn if any are left at commit time, and
		 * release back-to-front for speed.
		 */
		while (owner->nrelrefs > 0)
		{
			if (isCommit)
				PrintRelCacheLeakWarning(owner->relrefs[owner->nrelrefs - 1]);
			RelationClose(owner->relrefs[owner->nrelrefs - 1]);
		}

		/*
		 * Release dynamic shared memory segments.  Note that dsm_detach()
		 * will remove the segment from my list, so I just have to iterate
		 * until there are none.
		 *
		 * As in the preceding cases, warn if there are leftover at commit
		 * time.
		 */
		while (owner->ndsms > 0)
		{
			if (isCommit)
				PrintDSMLeakWarning(owner->dsms[owner->ndsms - 1]);
			dsm_detach(owner->dsms[owner->ndsms - 1]);
		}
	}
	else if (phase == RESOURCE_RELEASE_LOCKS)
	{
		if (isTopLevel)
		{
			/*
			 * For a top-level xact we are going to release all locks (or at
			 * least all non-session locks), so just do a single lmgr call at
			 * the top of the recursion.
			 */
			if (owner == TopTransactionResourceOwner)
			{
				ProcReleaseLocks(isCommit);
				ReleasePredicateLocks(isCommit);
			}
		}
		else
		{
			/*
			 * Release locks retail.  Note that if we are committing a
			 * subtransaction, we do NOT release its locks yet, but transfer
			 * them to the parent.
			 */
			LOCALLOCK **locks;
			int			nlocks;

			Assert(owner->parent != NULL);

			/*
			 * Pass the list of locks owned by this resource owner to the lock
			 * manager, unless it has overflowed.
			 */
			if (owner->nlocks > MAX_RESOWNER_LOCKS)
			{
				locks = NULL;
				nlocks = 0;
			}
			else
			{
				locks = owner->locks;
				nlocks = owner->nlocks;
			}

			if (isCommit)
				LockReassignCurrentOwner(locks, nlocks);
			else
				LockReleaseCurrentOwner(locks, nlocks);
		}
	}
	else if (phase == RESOURCE_RELEASE_AFTER_LOCKS)
	{
		/*
		 * Release catcache references.  Note that ReleaseCatCache will remove
		 * the catref entry from my list, so I just have to iterate till there
		 * are none.
		 *
		 * As with buffer pins, warn if any are left at commit time, and
		 * release back-to-front for speed.
		 */
		while (owner->ncatrefs > 0)
		{
			if (isCommit)
				PrintCatCacheLeakWarning(owner->catrefs[owner->ncatrefs - 1]);
			ReleaseCatCache(owner->catrefs[owner->ncatrefs - 1]);
		}
		/* Ditto for catcache lists */
		while (owner->ncatlistrefs > 0)
		{
			if (isCommit)
				PrintCatCacheListLeakWarning(owner->catlistrefs[owner->ncatlistrefs - 1]);
			ReleaseCatCacheList(owner->catlistrefs[owner->ncatlistrefs - 1]);
		}
		/* Ditto for plancache references */
		while (owner->nplanrefs > 0)
		{
			if (isCommit)
				PrintPlanCacheLeakWarning(owner->planrefs[owner->nplanrefs - 1]);
			ReleaseCachedPlan(owner->planrefs[owner->nplanrefs - 1], true);
		}
		/* Ditto for tupdesc references */
		while (owner->ntupdescs > 0)
		{
			if (isCommit)
				PrintTupleDescLeakWarning(owner->tupdescs[owner->ntupdescs - 1]);
			DecrTupleDescRefCount(owner->tupdescs[owner->ntupdescs - 1]);
		}
		/* Ditto for snapshot references */
		while (owner->nsnapshots > 0)
		{
			if (isCommit)
				PrintSnapshotLeakWarning(owner->snapshots[owner->nsnapshots - 1]);
			UnregisterSnapshot(owner->snapshots[owner->nsnapshots - 1]);
		}

		/* Ditto for temporary files */
		while (owner->nfiles > 0)
		{
			if (isCommit)
				PrintFileLeakWarning(owner->files[owner->nfiles - 1]);
			FileClose(owner->files[owner->nfiles - 1]);
		}

		/* Clean up index scans too */
		ReleaseResources_hash();
	}

	/* Let add-on modules get a chance too */
	for (item = ResourceRelease_callbacks; item; item = item->next)
		(*item->callback) (phase, isCommit, isTopLevel, item->arg);

	CurrentResourceOwner = save;
}
Exemplo n.º 6
0
Datum
worker_test(PG_FUNCTION_ARGS)
{
	int i, nworkers;
	dsm_segment *seg;
	test_shm_mq_header *hdr;
	MemoryContext oldcontext;
	worker_state *wstate;

	nworkers = PG_GETARG_INT32(0);

#if PG_VERSION_NUM >= 90500
	seg = dsm_create(sizeof(test_shm_mq_header), 0);
#else
	seg = dsm_create(sizeof(test_shm_mq_header)
#endif

	hdr = dsm_segment_address(seg);

	printf("begin worker_test: %d, %p\n", dsm_segment_handle(seg), hdr);

	MemSet(hdr, 0, sizeof(test_shm_mq_header));

	SpinLockInit(&hdr->mutex);

	strncpy(hdr->dbname, get_database_name(MyDatabaseId), sizeof(hdr->dbname));

	oldcontext = MemoryContextSwitchTo(CurTransactionContext);

	wstate = MemoryContextAlloc(TopTransactionContext,
								offsetof(worker_state, handle) +
								sizeof(BackgroundWorkerHandle *) * nworkers);

	MemSet(wstate, 0, offsetof(worker_state, handle) + sizeof(BackgroundWorkerHandle *) * nworkers);

	on_dsm_detach(seg, cleanup_background_workers,
				  PointerGetDatum(wstate));

	for (i = 0; i < nworkers; i++)
	{
		BackgroundWorker worker;

		MemSet(&worker, 0, sizeof(worker));

		worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION;
		worker.bgw_start_time = BgWorkerStart_ConsistentState;
		worker.bgw_restart_time = BGW_NEVER_RESTART;
		worker.bgw_main = NULL;		/* new worker might not have library loaded */

		sprintf(worker.bgw_library_name, "worker_test");
		sprintf(worker.bgw_function_name, "worker_test_main");
		snprintf(worker.bgw_name, BGW_MAXLEN, "worker_test %d", i);

		worker.bgw_main_arg = UInt32GetDatum(dsm_segment_handle(seg));
		worker.bgw_notify_pid = MyProcPid;

		if (!RegisterDynamicBackgroundWorker(&worker, &wstate->handle[i]))
			ereport(ERROR,
					(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
					 errmsg("could not register background process"),
				 errhint("You may need to increase max_worker_processes.")));

		++wstate->nworkers;
	}

	for (i = 0; i < nworkers; i++)
	{
		BgwHandleStatus status;
		pid_t pid;

		status = WaitForBackgroundWorkerStartup(wstate->handle[i], &pid);

		if (status == BGWH_STOPPED)
			ereport(ERROR,
					(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
					 errmsg("could not start background process"),
					 errhint("More details may be available in the server log.")));
		if (status == BGWH_POSTMASTER_DIED)
			ereport(ERROR,
					(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
					 errmsg("cannot start background processes without postmaster"),
					 errhint("Kill all remaining database processes and restart the database.")));
		Assert(status == BGWH_STARTED);
	}

	wait_for_workers_to_become_ready(wstate, hdr);

	cancel_on_dsm_detach(seg, cleanup_background_workers,
						 PointerGetDatum(wstate));

	dsm_detach(seg);

	pfree(wstate);

	MemoryContextSwitchTo(oldcontext);


	PG_RETURN_VOID();
}
Exemplo n.º 7
0
void
worker_test_main(Datum main_arg)
{
	dsm_segment *seg;
	volatile test_shm_mq_header *hdr;
	PGPROC *registrant;

	pqsignal(SIGHUP,  handle_sighup);
	pqsignal(SIGTERM, handle_sigterm);

	BackgroundWorkerUnblockSignals();

	printf("worker_test_main: %d\n", DatumGetInt32(main_arg));

	CurrentResourceOwner = ResourceOwnerCreate(NULL, "worker test");

	seg = dsm_attach(DatumGetInt32(main_arg));

	if (seg == NULL)
		ereport(ERROR,
				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
				 errmsg("unable to map dynamic shared memory segment")));

	hdr = dsm_segment_address(seg);

	/* 開始 */
	SpinLockAcquire(&hdr->mutex);
	hdr->workers_ready++;
	hdr->workers_attached++;
	SpinLockRelease(&hdr->mutex);

	registrant = BackendPidGetProc(MyBgworkerEntry->bgw_notify_pid);
	if (registrant == NULL)
	{
		elog(DEBUG1, "registrant backend has exited prematurely");
		proc_exit(1);
	}
	SetLatch(&registrant->procLatch);

	/* Do the work */

	BackgroundWorkerInitializeConnection(hdr->dbname, NULL);

	printf("DSM: %p\n", dsm_segment_address);

#if 0
	SetCurrentStatementStartTimestamp();
	StartTransactionCommand();
	SPI_connect();
	PushActiveSnapshot(GetTransactionSnapshot());
	pgstat_report_activity(STATE_RUNNING, "initializing spi_worker schema");	

	SPI_finish();
	PopActiveSnapshot();
	CommitTransactionCommand();
	pgstat_report_activity(STATE_IDLE, NULL);
#endif

	dsm_detach(seg);

	proc_exit(0);
}
Exemplo n.º 8
0
/*
 * Simple test of the shared memory message queue infrastructure.
 *
 * We set up a ring of message queues passing through 1 or more background
 * processes and eventually looping back to ourselves.  We then send a message
 * through the ring a number of times indicated by the loop count.  At the end,
 * we check whether the final message matches the one we started with.
 */
Datum
test_shm_mq(PG_FUNCTION_ARGS)
{
	int64		queue_size = PG_GETARG_INT64(0);
	text	   *message = PG_GETARG_TEXT_PP(1);
	char	   *message_contents = VARDATA_ANY(message);
	int			message_size = VARSIZE_ANY_EXHDR(message);
	int32		loop_count = PG_GETARG_INT32(2);
	int32		nworkers = PG_GETARG_INT32(3);
	dsm_segment *seg;
	shm_mq_handle *outqh;
	shm_mq_handle *inqh;
	shm_mq_result res;
	Size		len;
	void	   *data;

	/* A negative loopcount is nonsensical. */
	if (loop_count < 0)
		ereport(ERROR,
				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
				 errmsg("repeat count size must be a non-negative integer")));

	/*
	 * Since this test sends data using the blocking interfaces, it cannot
	 * send data to itself.  Therefore, a minimum of 1 worker is required. Of
	 * course, a negative worker count is nonsensical.
	 */
	if (nworkers < 1)
		ereport(ERROR,
				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
				 errmsg("number of workers must be a positive integer")));

	/* Set up dynamic shared memory segment and background workers. */
	test_shm_mq_setup(queue_size, nworkers, &seg, &outqh, &inqh);

	/* Send the initial message. */
	res = shm_mq_send(outqh, message_size, message_contents, false);
	if (res != SHM_MQ_SUCCESS)
		ereport(ERROR,
				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
				 errmsg("could not send message")));

	/*
	 * Receive a message and send it back out again.  Do this a number of
	 * times equal to the loop count.
	 */
	for (;;)
	{
		/* Receive a message. */
		res = shm_mq_receive(inqh, &len, &data, false);
		if (res != SHM_MQ_SUCCESS)
			ereport(ERROR,
					(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
					 errmsg("could not receive message")));

		/* If this is supposed to be the last iteration, stop here. */
		if (--loop_count <= 0)
			break;

		/* Send it back out. */
		res = shm_mq_send(outqh, len, data, false);
		if (res != SHM_MQ_SUCCESS)
			ereport(ERROR,
					(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
					 errmsg("could not send message")));
	}

	/*
	 * Finally, check that we got back the same message from the last
	 * iteration that we originally sent.
	 */
	verify_message(message_size, message_contents, len, data);

	/* Clean up. */
	dsm_detach(seg);

	PG_RETURN_VOID();
}
Exemplo n.º 9
0
/*
 * Pipelined test of the shared memory message queue infrastructure.
 *
 * As in the basic test, we set up a ring of message queues passing through
 * 1 or more background processes and eventually looping back to ourselves.
 * Then, we send N copies of the user-specified message through the ring and
 * receive them all back.  Since this might fill up all message queues in the
 * ring and then stall, we must be prepared to begin receiving the messages
 * back before we've finished sending them.
 */
Datum
test_shm_mq_pipelined(PG_FUNCTION_ARGS)
{
	int64		queue_size = PG_GETARG_INT64(0);
	text	   *message = PG_GETARG_TEXT_PP(1);
	char	   *message_contents = VARDATA_ANY(message);
	int			message_size = VARSIZE_ANY_EXHDR(message);
	int32		loop_count = PG_GETARG_INT32(2);
	int32		nworkers = PG_GETARG_INT32(3);
	bool		verify = PG_GETARG_BOOL(4);
	int32		send_count = 0;
	int32		receive_count = 0;
	dsm_segment *seg;
	shm_mq_handle *outqh;
	shm_mq_handle *inqh;
	shm_mq_result res;
	Size		len;
	void	   *data;

	/* A negative loopcount is nonsensical. */
	if (loop_count < 0)
		ereport(ERROR,
				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
				 errmsg("repeat count size must be a non-negative integer")));

	/*
	 * Using the nonblocking interfaces, we can even send data to ourselves,
	 * so the minimum number of workers for this test is zero.
	 */
	if (nworkers < 0)
		ereport(ERROR,
				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
				 errmsg("number of workers must be a non-negative integer")));

	/* Set up dynamic shared memory segment and background workers. */
	test_shm_mq_setup(queue_size, nworkers, &seg, &outqh, &inqh);

	/* Main loop. */
	for (;;)
	{
		bool		wait = true;

		/*
		 * If we haven't yet sent the message the requisite number of times,
		 * try again to send it now.  Note that when shm_mq_send() returns
		 * SHM_MQ_WOULD_BLOCK, the next call to that function must pass the
		 * same message size and contents; that's not an issue here because
		 * we're sending the same message every time.
		 */
		if (send_count < loop_count)
		{
			res = shm_mq_send(outqh, message_size, message_contents, true);
			if (res == SHM_MQ_SUCCESS)
			{
				++send_count;
				wait = false;
			}
			else if (res == SHM_MQ_DETACHED)
				ereport(ERROR,
						(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
						 errmsg("could not send message")));
		}

		/*
		 * If we haven't yet received the message the requisite number of
		 * times, try to receive it again now.
		 */
		if (receive_count < loop_count)
		{
			res = shm_mq_receive(inqh, &len, &data, true);
			if (res == SHM_MQ_SUCCESS)
			{
				++receive_count;
				/* Verifying every time is slow, so it's optional. */
				if (verify)
					verify_message(message_size, message_contents, len, data);
				wait = false;
			}
			else if (res == SHM_MQ_DETACHED)
				ereport(ERROR,
						(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
						 errmsg("could not receive message")));
		}
		else
		{
			/*
			 * Otherwise, we've received the message enough times.  This
			 * shouldn't happen unless we've also sent it enough times.
			 */
			if (send_count != receive_count)
				ereport(ERROR,
						(errcode(ERRCODE_INTERNAL_ERROR),
					   errmsg("message sent %d times, but received %d times",
							  send_count, receive_count)));
			break;
		}

		if (wait)
		{
			/*
			 * If we made no progress, wait for one of the other processes to
			 * which we are connected to set our latch, indicating that they
			 * have read or written data and therefore there may now be work
			 * for us to do.
			 */
			WaitLatch(MyLatch, WL_LATCH_SET, 0, PG_WAIT_EXTENSION);
			ResetLatch(MyLatch);
			CHECK_FOR_INTERRUPTS();
		}
	}

	/* Clean up. */
	dsm_detach(seg);

	PG_RETURN_VOID();
}
Exemplo n.º 10
0
/*
 * Starts background worker that will create new partitions,
 * waits till it finishes the job and returns the result (new partition oid)
 */
Oid
create_partitions_bg_worker(Oid relid, Datum value, Oid value_type, bool *crashed)
{
	BackgroundWorker		worker;
	BackgroundWorkerHandle *worker_handle;
	BgwHandleStatus			status;
	dsm_segment	   *segment;
	dsm_handle		segment_handle;
	pid_t 			pid;
	PartitionArgs  *args;
	Oid 			child_oid;
	TypeCacheEntry *tce;

	/* Create a dsm segment for the worker to pass arguments */
	segment = dsm_create(sizeof(PartitionArgs), 0);
	segment_handle = dsm_segment_handle(segment);

	tce = lookup_type_cache(value_type, 0);

	/* Fill arguments structure */
	args = (PartitionArgs *) dsm_segment_address(segment);
	args->dbid = MyDatabaseId;
	args->relid = relid;
	if (tce->typbyval)
		args->value = value;
	else
		memcpy(&args->value, DatumGetPointer(value), sizeof(args->value));
	args->by_val = tce->typbyval;
	args->value_type = value_type;
	args->result = 0;

	/* Initialize worker struct */
	worker.bgw_flags = BGWORKER_SHMEM_ACCESS |
		BGWORKER_BACKEND_DATABASE_CONNECTION;
	worker.bgw_start_time = BgWorkerStart_RecoveryFinished;
	worker.bgw_restart_time = BGW_NEVER_RESTART;
	worker.bgw_main = bg_worker_main;
	worker.bgw_main_arg = Int32GetDatum(segment_handle);
	worker.bgw_notify_pid = MyProcPid;

	/* Start dynamic worker */
	if (!RegisterDynamicBackgroundWorker(&worker, &worker_handle))
	{
		elog(WARNING, "Unable to create background worker for pg_pathman");
	}

	status = WaitForBackgroundWorkerStartup(worker_handle, &pid);
	if (status == BGWH_POSTMASTER_DIED)
	{
		ereport(WARNING,
                (errmsg("Postmaster died during the pg_pathman background worker process"),
                 errhint("More details may be available in the server log.")));
	}

	/* Wait till the worker finishes its job */
	status = WaitForBackgroundWorkerShutdown(worker_handle);
	*crashed = args->crashed;
	child_oid = args->result;

	/* Free dsm segment */
	dsm_detach(segment);

	return child_oid;
}
Exemplo n.º 11
0
/*
 * purge_dropped_db_segments
 */
static void
purge_dropped_db_segments(bool force)
{
	static TimestampTz last_purge_time = 0;
	List *db_oids;
	List *dbs_to_remove = NIL;
	HASH_SEQ_STATUS status;
	broker_db_meta *db_meta;

	if (!force && !TimestampDifferenceExceeds(last_purge_time, GetCurrentTimestamp(), 10 * 1000)) /* 10s */
		return;

	db_oids = get_database_oids();

	LWLockAcquire(IPCMessageBrokerIndexLock, LW_SHARED);

	hash_seq_init(&status, broker_meta->db_meta_hash);
	while ((db_meta = (broker_db_meta *) hash_seq_search(&status)) != NULL)
	{
		bool found = false;
		ListCell *lc;

		foreach(lc, db_oids)
		{
			if (lfirst_oid(lc) == db_meta->dbid)
			{
				found = true;
				break;
			}
		}

		if (!found)
			dbs_to_remove = lappend_oid(dbs_to_remove, db_meta->dbid);
	}

	LWLockRelease(IPCMessageBrokerIndexLock);

	if (list_length(dbs_to_remove))
	{
		ListCell *lc;

		LWLockAcquire(IPCMessageBrokerIndexLock, LW_EXCLUSIVE);

		foreach(lc, dbs_to_remove)
		{
			Oid dbid = lfirst_oid(lc);
			bool found;

			db_meta = hash_search(broker_meta->db_meta_hash, &dbid, HASH_FIND, &found);
			Assert(found);

			Assert(db_meta->handle > 0);

			/* detach from main db segment */
			if (db_meta->segment)
				dsm_detach(db_meta->segment);

			if (db_meta->lqueues)
			{
				int i;

				for (i = 0; i < continuous_query_num_workers; i++)
				{
					local_queue *local_buf = &db_meta->lqueues[i];
					if (local_buf->slots)
						list_free_deep(local_buf->slots);
				}

				pfree(db_meta->lqueues);
			}

			hash_search(broker_meta->db_meta_hash, &dbid, HASH_REMOVE, &found);
			Assert(found);
		}

		mark_unused_locks_as_free(db_oids);

		LWLockRelease(IPCMessageBrokerIndexLock);
	}