Esempio n. 1
0
/* ----------------------------------------------------------------
 *		ExecGather(node)
 *
 *		Scans the relation via multiple workers and returns
 *		the next qualifying tuple.
 * ----------------------------------------------------------------
 */
TupleTableSlot *
ExecGather(GatherState *node)
{
	TupleTableSlot *fslot = node->funnel_slot;
	int			i;
	TupleTableSlot *slot;
	TupleTableSlot *resultSlot;
	ExprDoneCond isDone;
	ExprContext *econtext;

	/*
	 * Initialize the parallel context and workers on first execution. We do
	 * this on first execution rather than during node initialization, as it
	 * needs to allocate large dynamic segement, so it is better to do if it
	 * is really needed.
	 */
	if (!node->initialized)
	{
		EState	   *estate = node->ps.state;
		Gather	   *gather = (Gather *) node->ps.plan;

		/*
		 * Sometimes we might have to run without parallelism; but if
		 * parallel mode is active then we can try to fire up some workers.
		 */
		if (gather->num_workers > 0 && IsInParallelMode())
		{
			ParallelContext *pcxt;
			bool	got_any_worker = false;

			/* Initialize the workers required to execute Gather node. */
			if (!node->pei)
				node->pei = ExecInitParallelPlan(node->ps.lefttree,
												 estate,
												 gather->num_workers);

			/*
			 * Register backend workers. We might not get as many as we
			 * requested, or indeed any at all.
			 */
			pcxt = node->pei->pcxt;
			LaunchParallelWorkers(pcxt);

			/* Set up tuple queue readers to read the results. */
			if (pcxt->nworkers > 0)
			{
				node->nreaders = 0;
				node->reader =
					palloc(pcxt->nworkers * sizeof(TupleQueueReader *));

				for (i = 0; i < pcxt->nworkers; ++i)
				{
					if (pcxt->worker[i].bgwhandle == NULL)
						continue;

					shm_mq_set_handle(node->pei->tqueue[i],
									  pcxt->worker[i].bgwhandle);
					node->reader[node->nreaders++] =
						CreateTupleQueueReader(node->pei->tqueue[i],
											   fslot->tts_tupleDescriptor);
					got_any_worker = true;
				}
			}

			/* No workers?  Then never mind. */
			if (!got_any_worker)
				ExecShutdownGatherWorkers(node);
		}

		/* Run plan locally if no workers or not single-copy. */
		node->need_to_scan_locally = (node->reader == NULL)
			|| !gather->single_copy;
		node->initialized = true;
	}

	/*
	 * Check to see if we're still projecting out tuples from a previous scan
	 * tuple (because there is a function-returning-set in the projection
	 * expressions).  If so, try to project another one.
	 */
	if (node->ps.ps_TupFromTlist)
	{
		resultSlot = ExecProject(node->ps.ps_ProjInfo, &isDone);
		if (isDone == ExprMultipleResult)
			return resultSlot;
		/* Done with that source tuple... */
		node->ps.ps_TupFromTlist = false;
	}

	/*
	 * Reset per-tuple memory context to free any expression evaluation
	 * storage allocated in the previous tuple cycle.  Note we can't do this
	 * until we're done projecting.
	 */
	econtext = node->ps.ps_ExprContext;
	ResetExprContext(econtext);

	/* Get and return the next tuple, projecting if necessary. */
	for (;;)
	{
		/*
		 * Get next tuple, either from one of our workers, or by running the
		 * plan ourselves.
		 */
		slot = gather_getnext(node);
		if (TupIsNull(slot))
			return NULL;

		/*
		 * form the result tuple using ExecProject(), and return it --- unless
		 * the projection produces an empty set, in which case we must loop
		 * back around for another tuple
		 */
		econtext->ecxt_outertuple = slot;
		resultSlot = ExecProject(node->ps.ps_ProjInfo, &isDone);

		if (isDone != ExprEndResult)
		{
			node->ps.ps_TupFromTlist = (isDone == ExprMultipleResult);
			return resultSlot;
		}
	}

	return slot;
}
Esempio n. 2
0
/* ----------------------------------------------------------------
 *		ExecGatherMerge(node)
 *
 *		Scans the relation via multiple workers and returns
 *		the next qualifying tuple.
 * ----------------------------------------------------------------
 */
static TupleTableSlot *
ExecGatherMerge(PlanState *pstate)
{
	GatherMergeState *node = castNode(GatherMergeState, pstate);
	TupleTableSlot *slot;
	ExprContext *econtext;
	int			i;

	CHECK_FOR_INTERRUPTS();

	/*
	 * As with Gather, we don't launch workers until this node is actually
	 * executed.
	 */
	if (!node->initialized)
	{
		EState	   *estate = node->ps.state;
		GatherMerge *gm = (GatherMerge *) node->ps.plan;

		/*
		 * Sometimes we might have to run without parallelism; but if parallel
		 * mode is active then we can try to fire up some workers.
		 */
		if (gm->num_workers > 0 && IsInParallelMode())
		{
			ParallelContext *pcxt;

			/* Initialize data structures for workers. */
			if (!node->pei)
				node->pei = ExecInitParallelPlan(node->ps.lefttree,
												 estate,
												 gm->num_workers);

			/* Try to launch workers. */
			pcxt = node->pei->pcxt;
			LaunchParallelWorkers(pcxt);
			node->nworkers_launched = pcxt->nworkers_launched;

			/* Set up tuple queue readers to read the results. */
			if (pcxt->nworkers_launched > 0)
			{
				node->nreaders = 0;
				node->reader = palloc(pcxt->nworkers_launched *
									  sizeof(TupleQueueReader *));

				Assert(gm->numCols);

				for (i = 0; i < pcxt->nworkers_launched; ++i)
				{
					shm_mq_set_handle(node->pei->tqueue[i],
									  pcxt->worker[i].bgwhandle);
					node->reader[node->nreaders++] =
						CreateTupleQueueReader(node->pei->tqueue[i],
											   node->tupDesc);
				}
			}
			else
			{
				/* No workers?	Then never mind. */
				ExecShutdownGatherMergeWorkers(node);
			}
		}

		/* always allow leader to participate */
		node->need_to_scan_locally = true;
		node->initialized = true;
	}

	/*
	 * Reset per-tuple memory context to free any expression evaluation
	 * storage allocated in the previous tuple cycle.
	 */
	econtext = node->ps.ps_ExprContext;
	ResetExprContext(econtext);

	/*
	 * Get next tuple, either from one of our workers, or by running the plan
	 * ourselves.
	 */
	slot = gather_merge_getnext(node);
	if (TupIsNull(slot))
		return NULL;

	/*
	 * form the result tuple using ExecProject(), and return it --- unless the
	 * projection produces an empty set, in which case we must loop back
	 * around for another tuple
	 */
	econtext->ecxt_outertuple = slot;
	return ExecProject(node->ps.ps_ProjInfo);
}