Exemple #1
0
/* ----------------------------------------------------------------
 *		ExecEndGather
 *
 *		frees any storage allocated through C routines.
 * ----------------------------------------------------------------
 */
void
ExecEndGather(GatherState *node)
{
	ExecShutdownGather(node);
	ExecFreeExprContext(&node->ps);
	ExecClearTuple(node->ps.ps_ResultTupleSlot);
	ExecEndNode(outerPlanState(node));
}
Exemple #2
0
/* ----------------------------------------------------------------
 *		ExecGather(node)
 *
 *		Scans the relation via multiple workers and returns
 *		the next qualifying tuple.
 * ----------------------------------------------------------------
 */
TupleTableSlot *
ExecGather(GatherState *node)
{
	TupleTableSlot *fslot = node->funnel_slot;
	int			i;
	TupleTableSlot *slot;
	TupleTableSlot *resultSlot;
	ExprDoneCond isDone;
	ExprContext *econtext;

	/*
	 * Initialize the parallel context and workers on first execution. We do
	 * this on first execution rather than during node initialization, as it
	 * needs to allocate large dynamic segement, so it is better to do if it
	 * is really needed.
	 */
	if (!node->initialized)
	{
		EState	   *estate = node->ps.state;
		Gather	   *gather = (Gather *) node->ps.plan;

		/*
		 * Sometimes we might have to run without parallelism; but if
		 * parallel mode is active then we can try to fire up some workers.
		 */
		if (gather->num_workers > 0 && IsInParallelMode())
		{
			ParallelContext *pcxt;
			bool	got_any_worker = false;

			/* Initialize the workers required to execute Gather node. */
			if (!node->pei)
				node->pei = ExecInitParallelPlan(node->ps.lefttree,
												 estate,
												 gather->num_workers);

			/*
			 * Register backend workers. We might not get as many as we
			 * requested, or indeed any at all.
			 */
			pcxt = node->pei->pcxt;
			LaunchParallelWorkers(pcxt);

			/* Set up tuple queue readers to read the results. */
			if (pcxt->nworkers > 0)
			{
				node->nreaders = 0;
				node->reader =
					palloc(pcxt->nworkers * sizeof(TupleQueueReader *));

				for (i = 0; i < pcxt->nworkers; ++i)
				{
					if (pcxt->worker[i].bgwhandle == NULL)
						continue;

					shm_mq_set_handle(node->pei->tqueue[i],
									  pcxt->worker[i].bgwhandle);
					node->reader[node->nreaders++] =
						CreateTupleQueueReader(node->pei->tqueue[i],
											   fslot->tts_tupleDescriptor);
					got_any_worker = true;
				}
			}

			/* No workers?  Then never mind. */
			if (!got_any_worker)
				ExecShutdownGather(node);
		}

		/* Run plan locally if no workers or not single-copy. */
		node->need_to_scan_locally = (node->reader == NULL)
			|| !gather->single_copy;
		node->initialized = true;
	}

	/*
	 * Check to see if we're still projecting out tuples from a previous scan
	 * tuple (because there is a function-returning-set in the projection
	 * expressions).  If so, try to project another one.
	 */
	if (node->ps.ps_TupFromTlist)
	{
		resultSlot = ExecProject(node->ps.ps_ProjInfo, &isDone);
		if (isDone == ExprMultipleResult)
			return resultSlot;
		/* Done with that source tuple... */
		node->ps.ps_TupFromTlist = false;
	}

	/*
	 * Reset per-tuple memory context to free any expression evaluation
	 * storage allocated in the previous tuple cycle.  Note we can't do this
	 * until we're done projecting.
	 */
	econtext = node->ps.ps_ExprContext;
	ResetExprContext(econtext);

	/* Get and return the next tuple, projecting if necessary. */
	for (;;)
	{
		/*
		 * Get next tuple, either from one of our workers, or by running the
		 * plan ourselves.
		 */
		slot = gather_getnext(node);
		if (TupIsNull(slot))
			return NULL;

		/*
		 * form the result tuple using ExecProject(), and return it --- unless
		 * the projection produces an empty set, in which case we must loop
		 * back around for another tuple
		 */
		econtext->ecxt_outertuple = slot;
		resultSlot = ExecProject(node->ps.ps_ProjInfo, &isDone);

		if (isDone != ExprEndResult)
		{
			node->ps.ps_TupFromTlist = (isDone == ExprMultipleResult);
			return resultSlot;
		}
	}

	return slot;
}