Esempio n. 1
0
/* ----------------------------------------------------------------
 *		ExecUnique
 * ----------------------------------------------------------------
 */
TupleTableSlot *				/* return: a tuple or NULL */
ExecUnique(UniqueState *node)
{
	Unique	   *plannode = (Unique *) node->ps.plan;
	TupleTableSlot *resultTupleSlot;
	TupleTableSlot *slot;
	PlanState  *outerPlan;

	/*
	 * get information from the node
	 */
	outerPlan = outerPlanState(node);
	resultTupleSlot = node->ps.ps_ResultTupleSlot;

	/*
	 * now loop, returning only non-duplicate tuples. We assume that the
	 * tuples arrive in sorted order so we can detect duplicates easily. The
	 * first tuple of each group is returned.
	 */
	for (;;)
	{
		/*
		 * fetch a tuple from the outer subplan
		 */
		slot = ExecProcNode(outerPlan);
		if (TupIsNull(slot))
		{
			/* end of subplan, so we're done */
			ExecClearTuple(resultTupleSlot);
			return NULL;
		}

		/*
		 * Always return the first tuple from the subplan.
		 */
		if (TupIsNull(resultTupleSlot))
			break;

		/*
		 * Else test if the new tuple and the previously returned tuple match.
		 * If so then we loop back and fetch another new tuple from the
		 * subplan.
		 */
		if (!execTuplesMatch(slot, resultTupleSlot,
							 plannode->numCols, plannode->uniqColIdx,
							 node->eqfunctions,
							 node->tempContext))
			break;
	}

	/*
	 * We have a new tuple different from the previous saved tuple (if any).
	 * Save it and return it.  We must copy it because the source subplan
	 * won't guarantee that this source tuple is still accessible after
	 * fetching the next source tuple.
	 */
	return ExecCopySlot(resultTupleSlot, slot);
}
Esempio n. 2
0
TupleTableSlot *
ExecTwice(TwiceState *node) {

	TupleTableSlot *resultTupleSlot;
	TupleTableSlot *slot;
	PlanState *outerPlan;

	/*
	 * get information from the node
	 */
	outerPlan = outerPlanState(node);
	resultTupleSlot = node->ps.ps_ResultTupleSlot;

	/*
	 * Fetch a tuple from outer plan, and make it a result tuple.
	 */
	if(node->isFirst)
	{
		/*
		 * fetch a tuple from the outer subplan
		 */
		slot = ExecProcNode(outerPlan);
		if (TupIsNull(slot))
		{
			/* end of subplan, so we're done */
			ExecClearTuple(resultTupleSlot);
			return NULL;
		}
		node->isFirst = false;
		return ExecCopySlot(resultTupleSlot, slot);
	}

	/*
	 * If we used the current tuple already, copy it a second time. Do not
	 * proceed to the next tuple.
	 */
	node->isFirst = true;
	return ExecCopySlot(resultTupleSlot, resultTupleSlot);
}
Esempio n. 3
0
/* ----------------------------------------------------------------
 *		ExecMaterial
 *
 *		As long as we are at the end of the data collected in the tuplestore,
 *		we collect one new row from the subplan on each call, and stash it
 *		aside in the tuplestore before returning it.  The tuplestore is
 *		only read if we are asked to scan backwards, rescan, or mark/restore.
 *
 * ----------------------------------------------------------------
 */
TupleTableSlot *				/* result tuple from subplan */
ExecMaterial(MaterialState *node)
{
	EState	   *estate;
	ScanDirection dir;
	bool		forward;

	NTupleStore *ts;
	NTupleStoreAccessor *tsa;

	bool		eof_tuplestore;
	TupleTableSlot *slot;
	Material *ma;
	
	/*
	 * get state info from node
	 */
	estate = node->ss.ps.state;
	dir = estate->es_direction;
	forward = ScanDirectionIsForward(dir);

	ts = node->ts_state->matstore;
	tsa = (NTupleStoreAccessor *) node->ts_pos;

	ma = (Material *) node->ss.ps.plan;
	Assert(IsA(ma, Material));

	/*
	 * If first time through, and we need a tuplestore, initialize it.
	 */
	if (ts == NULL && (ma->share_type != SHARE_NOTSHARED || node->randomAccess))
	{
		/* 
		 * For cross slice material, we only run ExecMaterial on DriverSlice 
		 */
		if(ma->share_type == SHARE_MATERIAL_XSLICE)
		{
			char rwfile_prefix[100];

			if(ma->driver_slice != currentSliceId)
			{
				elog(LOG, "Material Exec on CrossSlice, current slice %d", currentSliceId);
				return NULL;
			}
			
			shareinput_create_bufname_prefix(rwfile_prefix, sizeof(rwfile_prefix), ma->share_id); 
			elog(LOG, "Material node creates shareinput rwfile %s", rwfile_prefix);

			ts = ntuplestore_create_readerwriter(rwfile_prefix, PlanStateOperatorMemKB((PlanState *)node) * 1024, true);
			tsa = ntuplestore_create_accessor(ts, true);
		}
		else
		{
			/* Non-shared Materialize node */
			bool isWriter = true;
			workfile_set *work_set = NULL;

			if (gp_workfile_caching)
			{
				work_set = workfile_mgr_find_set( &node->ss.ps);

				if (NULL != work_set)
				{
					/* Reusing cached workfiles. Tell subplan we won't be needing any tuples */
					elog(gp_workfile_caching_loglevel, "Materialize reusing cached workfiles, initiating Squelch walker");

					isWriter = false;
					ExecSquelchNode(outerPlanState(node));
					node->eof_underlying = true;
					node->cached_workfiles_found = true;

					if (node->ss.ps.instrument)
					{
						node->ss.ps.instrument->workfileReused = true;
					}
				}
			}

			if (NULL == work_set)
			{
				/*
				 * No work_set found, this is because:
				 *  a. workfile caching is enabled but we didn't find any reusable set
				 *  b. workfile caching is disabled
				 * Creating new empty workset
				 */
				Assert(!node->cached_workfiles_found);

				/* Don't try to cache when running under a ShareInputScan node */
				bool can_reuse = (ma->share_type == SHARE_NOTSHARED);

				work_set = workfile_mgr_create_set(BUFFILE, can_reuse, &node->ss.ps, NULL_SNAPSHOT);
				isWriter = true;
			}

			Assert(NULL != work_set);
			AssertEquivalent(node->cached_workfiles_found, !isWriter);

			ts = ntuplestore_create_workset(work_set, node->cached_workfiles_found,
					PlanStateOperatorMemKB((PlanState *) node) * 1024);
			tsa = ntuplestore_create_accessor(ts, isWriter);
		}
		
		Assert(ts && tsa);
		node->ts_state->matstore = ts;
		node->ts_pos = (void *) tsa;

        /* CDB: Offer extra info for EXPLAIN ANALYZE. */
        if (node->ss.ps.instrument)
        {
            /* Let the tuplestore share our Instrumentation object. */
			ntuplestore_setinstrument(ts, node->ss.ps.instrument);

            /* Request a callback at end of query. */
            node->ss.ps.cdbexplainfun = ExecMaterialExplainEnd;
        }

		/*
		 * MPP: If requested, fetch all rows from subplan and put them
		 * in the tuplestore.  This decouples a middle slice's receiving
		 * and sending Motion operators to neutralize a deadlock hazard.
		 * MPP TODO: Remove when a better solution is implemented.
		 *
		 * ShareInput: if the material node
		 * is used to share input, we will need to fetch all rows and put
		 * them in tuple store
		 */
		while (((Material *) node->ss.ps.plan)->cdb_strict
				|| ma->share_type != SHARE_NOTSHARED)
		{
			/*
			 * When reusing cached workfiles, we already have all the tuples,
			 * and we don't need to read anything from subplan.
			 */
			if (node->cached_workfiles_found)
			{
				break;
			}
			TupleTableSlot *outerslot = ExecProcNode(outerPlanState(node));

			if (TupIsNull(outerslot))
			{
				node->eof_underlying = true;

				if (ntuplestore_created_reusable_workfiles(ts))
				{
					ntuplestore_flush(ts);
					ntuplestore_mark_workset_complete(ts);
				}

				ntuplestore_acc_seek_bof(tsa);

				break;
			}
			Gpmon_M_Incr(GpmonPktFromMaterialState(node), GPMON_QEXEC_M_ROWSIN); 

			ntuplestore_acc_put_tupleslot(tsa, outerslot);
		}
	
		CheckSendPlanStateGpmonPkt(&node->ss.ps);

		if(forward)
			ntuplestore_acc_seek_bof(tsa);
		else
			ntuplestore_acc_seek_eof(tsa);

		/* for share input, material do not need to return any tuple */
		if(ma->share_type != SHARE_NOTSHARED)
		{
			Assert(ma->share_type == SHARE_MATERIAL || ma->share_type == SHARE_MATERIAL_XSLICE);
			/* 
			 * if the material is shared across slice, notify consumers that
			 * it is ready.
			 */
			if(ma->share_type == SHARE_MATERIAL_XSLICE) 
			{
				if (ma->driver_slice == currentSliceId)
				{
					ntuplestore_flush(ts);

					node->share_lk_ctxt = shareinput_writer_notifyready(ma->share_id, ma->nsharer_xslice,
							estate->es_plannedstmt->planGen);
				}
			}
			return NULL;
		}
	}

	if(ma->share_type != SHARE_NOTSHARED)
		return NULL;

	/*
	 * If we can fetch another tuple from the tuplestore, return it.
	 */
	slot = node->ss.ps.ps_ResultTupleSlot;

	if(forward)
		eof_tuplestore = (tsa == NULL) || !ntuplestore_acc_advance(tsa, 1);
	else
		eof_tuplestore = (tsa == NULL) || !ntuplestore_acc_advance(tsa, -1);

	if(tsa!=NULL && ntuplestore_acc_tell(tsa, NULL))
	{
		ntuplestore_acc_current_tupleslot(tsa, slot);
          	if (!TupIsNull(slot))
                {
          		Gpmon_M_Incr_Rows_Out(GpmonPktFromMaterialState(node)); 
                        CheckSendPlanStateGpmonPkt(&node->ss.ps);
                }
		return slot;
	}

	/*
	 * If necessary, try to fetch another row from the subplan.
	 *
	 * Note: the eof_underlying state variable exists to short-circuit further
	 * subplan calls.  It's not optional, unfortunately, because some plan
	 * node types are not robust about being called again when they've already
	 * returned NULL.
	 * If reusing cached workfiles, there is no need to execute subplan at all.
	 */
	if (eof_tuplestore && !node->eof_underlying)
	{
		PlanState  *outerNode;
		TupleTableSlot *outerslot;

		Assert(!node->cached_workfiles_found && "we shouldn't get here when using cached workfiles");

		/*
		 * We can only get here with forward==true, so no need to worry about
		 * which direction the subplan will go.
		 */
		outerNode = outerPlanState(node);
		outerslot = ExecProcNode(outerNode);
		if (TupIsNull(outerslot))
		{
			node->eof_underlying = true;
			if (ntuplestore_created_reusable_workfiles(ts))
			{
				ntuplestore_flush(ts);
				ntuplestore_mark_workset_complete(ts);
			}

			if (!node->ss.ps.delayEagerFree)
			{
				ExecEagerFreeMaterial(node);
			}

			return NULL;
		}

		Gpmon_M_Incr(GpmonPktFromMaterialState(node), GPMON_QEXEC_M_ROWSIN); 

		if (tsa)
			ntuplestore_acc_put_tupleslot(tsa, outerslot);

		/*
		 * And return a copy of the tuple.	(XXX couldn't we just return the
		 * outerslot?)
		 */
          	Gpmon_M_Incr_Rows_Out(GpmonPktFromMaterialState(node)); 
                CheckSendPlanStateGpmonPkt(&node->ss.ps);
		return ExecCopySlot(slot, outerslot); 
	}


	if (!node->ss.ps.delayEagerFree)
	{
		ExecEagerFreeMaterial(node);
	}

	/*
	 * Nothing left ...
	 */
	return NULL;
}
Esempio n. 4
0
/* ----------------------------------------------------------------
 *		CteScanNext
 *
 *		This is a workhorse for ExecCteScan
 * ----------------------------------------------------------------
 */
static TupleTableSlot *
CteScanNext(CteScanState *node)
{
	EState	   *estate;
	ScanDirection dir;
	bool		forward;
	Tuplestorestate *tuplestorestate;
	bool		eof_tuplestore;
	TupleTableSlot *slot;

	/*
	 * get state info from node
	 */
	estate = node->ss.ps.state;
	dir = estate->es_direction;
	forward = ScanDirectionIsForward(dir);
	tuplestorestate = node->leader->cte_table;
	tuplestore_select_read_pointer(tuplestorestate, node->readptr);
	slot = node->ss.ss_ScanTupleSlot;

	/*
	 * If we are not at the end of the tuplestore, or are going backwards, try
	 * to fetch a tuple from tuplestore.
	 */
	eof_tuplestore = tuplestore_ateof(tuplestorestate);

	if (!forward && eof_tuplestore)
	{
		if (!node->leader->eof_cte)
		{
			/*
			 * When reversing direction at tuplestore EOF, the first
			 * gettupleslot call will fetch the last-added tuple; but we want
			 * to return the one before that, if possible. So do an extra
			 * fetch.
			 */
			if (!tuplestore_advance(tuplestorestate, forward))
				return NULL;	/* the tuplestore must be empty */
		}
		eof_tuplestore = false;
	}

	/*
	 * If we can fetch another tuple from the tuplestore, return it.
	 *
	 * Note: we have to use copy=true in the tuplestore_gettupleslot call,
	 * because we are sharing the tuplestore with other nodes that might write
	 * into the tuplestore before we get called again.
	 */
	if (!eof_tuplestore)
	{
		if (tuplestore_gettupleslot(tuplestorestate, forward, true, slot))
			return slot;
		if (forward)
			eof_tuplestore = true;
	}

	/*
	 * If necessary, try to fetch another row from the CTE query.
	 *
	 * Note: the eof_cte state variable exists to short-circuit further calls
	 * of the CTE plan.  It's not optional, unfortunately, because some plan
	 * node types are not robust about being called again when they've already
	 * returned NULL.
	 */
	if (eof_tuplestore && !node->leader->eof_cte)
	{
		TupleTableSlot *cteslot;

		/*
		 * We can only get here with forward==true, so no need to worry about
		 * which direction the subplan will go.
		 */
		cteslot = ExecProcNode(node->cteplanstate);
		if (TupIsNull(cteslot))
		{
			node->leader->eof_cte = true;
			return NULL;
		}

		/*
		 * There are corner cases where the subplan could change which
		 * tuplestore read pointer is active, so be sure to reselect ours
		 * before storing the tuple we got.
		 */
		tuplestore_select_read_pointer(tuplestorestate, node->readptr);

		/*
		 * Append a copy of the returned tuple to tuplestore.  NOTE: because
		 * our read pointer is certainly in EOF state, its read position will
		 * move forward over the added tuple.  This is what we want.  Also,
		 * any other readers will *not* move past the new tuple, which is what
		 * they want.
		 */
		tuplestore_puttupleslot(tuplestorestate, cteslot);

		/*
		 * We MUST copy the CTE query's output tuple into our own slot. This
		 * is because other CteScan nodes might advance the CTE query before
		 * we are called again, and our output tuple must stay stable over
		 * that.
		 */
		return ExecCopySlot(slot, cteslot);
	}

	/*
	 * Nothing left ...
	 */
	return ExecClearTuple(slot);
}
Esempio n. 5
0
/* ----------------------------------------------------------------
 *		ExecSetOp
 * ----------------------------------------------------------------
 */
TupleTableSlot *				/* return: a tuple or NULL */
ExecSetOp(SetOpState *node)
{
	SetOp	   *plannode = (SetOp *) node->ps.plan;
	TupleTableSlot *resultTupleSlot;
	PlanState  *outerPlan;

	/*
	 * get information from the node
	 */
	outerPlan = outerPlanState(node);
	resultTupleSlot = node->ps.ps_ResultTupleSlot;

	/*
	 * If the previously-returned tuple needs to be returned more than once,
	 * keep returning it.
	 */
	if (node->numOutput > 0)
	{
		node->numOutput--;
		return resultTupleSlot;
	}

	/* Flag that we have no current tuple */
	ExecClearTuple(resultTupleSlot);

	/*
	 * Absorb groups of duplicate tuples, counting them, and saving the first
	 * of each group as a possible return value. At the end of each group,
	 * decide whether to return anything.
	 *
	 * We assume that the tuples arrive in sorted order so we can detect
	 * duplicates easily.
	 */
	for (;;)
	{
		TupleTableSlot *inputTupleSlot;
		bool		endOfGroup;

		/*
		 * fetch a tuple from the outer subplan, unless we already did.
		 */
		if (node->ps.ps_OuterTupleSlot == NULL &&
			!node->subplan_done)
		{
			node->ps.ps_OuterTupleSlot =
				ExecProcNode(outerPlan);
			if (TupIsNull(node->ps.ps_OuterTupleSlot))
				node->subplan_done = true;
		}
		inputTupleSlot = node->ps.ps_OuterTupleSlot;

		if (TupIsNull(resultTupleSlot))
		{
			/*
			 * First of group: save a copy in result slot, and reset
			 * duplicate-counters for new group.
			 */
			if (node->subplan_done)
				return NULL;	/* no more tuples */
			ExecCopySlot(resultTupleSlot, inputTupleSlot);
			node->numLeft = 0;
			node->numRight = 0;
			endOfGroup = false;
		}
		else if (node->subplan_done)
		{
			/*
			 * Reached end of input, so finish processing final group
			 */
			endOfGroup = true;
		}
		else
		{
			/*
			 * Else test if the new tuple and the previously saved tuple
			 * match.
			 */
			if (execTuplesMatch(inputTupleSlot,
								resultTupleSlot,
								plannode->numCols, plannode->dupColIdx,
								node->eqfunctions,
								node->tempContext))
				endOfGroup = false;
			else
				endOfGroup = true;
		}

		if (endOfGroup)
		{
			/*
			 * We've reached the end of the group containing resultTuple.
			 * Decide how many copies (if any) to emit.  This logic is
			 * straight from the SQL92 specification.
			 */
			switch (plannode->cmd)
			{
				case SETOPCMD_INTERSECT:
					if (node->numLeft > 0 && node->numRight > 0)
						node->numOutput = 1;
					else
						node->numOutput = 0;
					break;
				case SETOPCMD_INTERSECT_ALL:
					node->numOutput =
						(node->numLeft < node->numRight) ?
						node->numLeft : node->numRight;
					break;
				case SETOPCMD_EXCEPT:
					if (node->numLeft > 0 && node->numRight == 0)
						node->numOutput = 1;
					else
						node->numOutput = 0;
					break;
				case SETOPCMD_EXCEPT_ALL:
					node->numOutput =
						(node->numLeft < node->numRight) ?
						0 : (node->numLeft - node->numRight);
					break;
				default:
					elog(ERROR, "unrecognized set op: %d",
						 (int) plannode->cmd);
					break;
			}
			/* Fall out of for-loop if we have tuples to emit */
			if (node->numOutput > 0)
				break;
			/* Else flag that we have no current tuple, and loop around */
			ExecClearTuple(resultTupleSlot);
		}
		else
		{
			/*
			 * Current tuple is member of same group as resultTuple. Count it
			 * in the appropriate counter.
			 */
			int			flag;
			bool		isNull;

			flag = DatumGetInt32(slot_getattr(inputTupleSlot,
											  plannode->flagColIdx,
											  &isNull));
			Assert(!isNull);
			if (flag)
				node->numRight++;
			else
				node->numLeft++;
			/* Set flag to fetch a new input tuple, and loop around */
			node->ps.ps_OuterTupleSlot = NULL;
		}
	}

	/*
	 * If we fall out of loop, then we need to emit at least one copy of
	 * resultTuple.
	 */
	Assert(node->numOutput > 0);
	node->numOutput--;
	return resultTupleSlot;
}
Esempio n. 6
0
/*
 *	 ExecGroup -
 *
 *		Return one tuple for each group of matching input tuples.
 */
TupleTableSlot *
ExecGroup(GroupState *node)
{
	ExprContext *econtext;
	int			numCols;
	AttrNumber *grpColIdx;
	TupleTableSlot *firsttupleslot;
	TupleTableSlot *outerslot;

	/*
	 * get state info from node
	 */
	if (node->grp_done)
		return NULL;
	econtext = node->ss.ps.ps_ExprContext;
	numCols = ((Group *) node->ss.ps.plan)->numCols;
	grpColIdx = ((Group *) node->ss.ps.plan)->grpColIdx;

	/*
	 * The ScanTupleSlot holds the (copied) first tuple of each group.
	 */
	firsttupleslot = node->ss.ss_ScanTupleSlot;

	/*
	 * We need not call ResetExprContext here because execTuplesMatch will
	 * reset the per-tuple memory context once per input tuple.
	 */

	/*
	 * If first time through, acquire first input tuple and determine whether
	 * to return it or not.
	 */
	if (TupIsNull(firsttupleslot))
	{
		outerslot = ExecProcNode(outerPlanState(node));
		if (TupIsNull(outerslot))
		{
			/* empty input, so return nothing */
			node->grp_done = TRUE;
			return NULL;
		}
		/* Copy tuple, set up as input for qual test and projection */
		ExecCopySlot(firsttupleslot, outerslot);
		econtext->ecxt_scantuple = firsttupleslot;

		/*
		 * Check the qual (HAVING clause); if the group does not match, ignore
		 * it and fall into scan loop.
		 */
		if (ExecQual(node->ss.ps.qual, econtext, false))
		{
			/*
			 * Form and return a projection tuple using the first input tuple.
			 */
			return ExecProject(node->ss.ps.ps_ProjInfo, NULL);
		}
	}

	/*
	 * This loop iterates once per input tuple group.  At the head of the
	 * loop, we have finished processing the first tuple of the group and now
	 * need to scan over all the other group members.
	 */
	for (;;)
	{
		/*
		 * Scan over all remaining tuples that belong to this group
		 */
		for (;;)
		{
			outerslot = ExecProcNode(outerPlanState(node));
			if (TupIsNull(outerslot))
			{
				/* no more groups, so we're done */
				node->grp_done = TRUE;
				return NULL;
			}

			/*
			 * Compare with first tuple and see if this tuple is of the same
			 * group.  If so, ignore it and keep scanning.
			 */
			if (!execTuplesMatch(firsttupleslot, outerslot,
								 numCols, grpColIdx,
								 node->eqfunctions,
								 econtext->ecxt_per_tuple_memory))
				break;
		}

		/*
		 * We have the first tuple of the next input group.  See if we want to
		 * return it.
		 */
		/* Copy tuple, set up as input for qual test and projection */
		ExecCopySlot(firsttupleslot, outerslot);
		econtext->ecxt_scantuple = firsttupleslot;

		/*
		 * Check the qual (HAVING clause); if the group does not match, ignore
		 * it and loop back to scan the rest of the group.
		 */
		if (ExecQual(node->ss.ps.qual, econtext, false))
		{
			/*
			 * Form and return a projection tuple using the first input tuple.
			 */
			return ExecProject(node->ss.ps.ps_ProjInfo, NULL);
		}
	}

	/* NOTREACHED */
	return NULL;
}
Esempio n. 7
0
/* ----------------------------------------------------------------
 *		ExecMaterial
 *
 *		As long as we are at the end of the data collected in the tuplestore,
 *		we collect one new row from the subplan on each call, and stash it
 *		aside in the tuplestore before returning it.  The tuplestore is
 *		only read if we are asked to scan backwards, rescan, or mark/restore.
 *
 * ----------------------------------------------------------------
 */
static TupleTableSlot *			/* result tuple from subplan */
ExecMaterial(PlanState *pstate)
{
	MaterialState *node = castNode(MaterialState, pstate);
	EState	   *estate;
	ScanDirection dir;
	bool		forward;
	Tuplestorestate *tuplestorestate;
	bool		eof_tuplestore;
	TupleTableSlot *slot;

	CHECK_FOR_INTERRUPTS();

	/*
	 * get state info from node
	 */
	estate = node->ss.ps.state;
	dir = estate->es_direction;
	forward = ScanDirectionIsForward(dir);
	tuplestorestate = node->tuplestorestate;

	/*
	 * If first time through, and we need a tuplestore, initialize it.
	 */
	if (tuplestorestate == NULL && node->eflags != 0)
	{
		tuplestorestate = tuplestore_begin_heap(true, false, work_mem);
		tuplestore_set_eflags(tuplestorestate, node->eflags);
		if (node->eflags & EXEC_FLAG_MARK)
		{
			/*
			 * Allocate a second read pointer to serve as the mark. We know it
			 * must have index 1, so needn't store that.
			 */
			int			ptrno PG_USED_FOR_ASSERTS_ONLY;

			ptrno = tuplestore_alloc_read_pointer(tuplestorestate,
												  node->eflags);
			Assert(ptrno == 1);
		}
		node->tuplestorestate = tuplestorestate;
	}

	/*
	 * If we are not at the end of the tuplestore, or are going backwards, try
	 * to fetch a tuple from tuplestore.
	 */
	eof_tuplestore = (tuplestorestate == NULL) ||
		tuplestore_ateof(tuplestorestate);

	if (!forward && eof_tuplestore)
	{
		if (!node->eof_underlying)
		{
			/*
			 * When reversing direction at tuplestore EOF, the first
			 * gettupleslot call will fetch the last-added tuple; but we want
			 * to return the one before that, if possible. So do an extra
			 * fetch.
			 */
			if (!tuplestore_advance(tuplestorestate, forward))
				return NULL;	/* the tuplestore must be empty */
		}
		eof_tuplestore = false;
	}

	/*
	 * If we can fetch another tuple from the tuplestore, return it.
	 */
	slot = node->ss.ps.ps_ResultTupleSlot;
	if (!eof_tuplestore)
	{
		if (tuplestore_gettupleslot(tuplestorestate, forward, false, slot))
			return slot;
		if (forward)
			eof_tuplestore = true;
	}

	/*
	 * If necessary, try to fetch another row from the subplan.
	 *
	 * Note: the eof_underlying state variable exists to short-circuit further
	 * subplan calls.  It's not optional, unfortunately, because some plan
	 * node types are not robust about being called again when they've already
	 * returned NULL.
	 */
	if (eof_tuplestore && !node->eof_underlying)
	{
		PlanState  *outerNode;
		TupleTableSlot *outerslot;

		/*
		 * We can only get here with forward==true, so no need to worry about
		 * which direction the subplan will go.
		 */
		outerNode = outerPlanState(node);
		outerslot = ExecProcNode(outerNode);
		if (TupIsNull(outerslot))
		{
			node->eof_underlying = true;
			return NULL;
		}

		/*
		 * Append a copy of the returned tuple to tuplestore.  NOTE: because
		 * the tuplestore is certainly in EOF state, its read position will
		 * move forward over the added tuple.  This is what we want.
		 */
		if (tuplestorestate)
			tuplestore_puttupleslot(tuplestorestate, outerslot);

		ExecCopySlot(slot, outerslot);
		return slot;
	}

	/*
	 * Nothing left ...
	 */
	return ExecClearTuple(slot);
}
Esempio n. 8
0
/*
 *	 ExecGroup -
 *
 *		Return one tuple for each group of matching input tuples.
 */
TupleTableSlot *
ExecGroup(GroupState *node)
{
    ExprContext *econtext;
    int			numCols;
    AttrNumber *grpColIdx;
    TupleTableSlot *firsttupleslot;
    TupleTableSlot *outerslot;

    /*
     * get state info from node
     */
    if (node->grp_done)
        return NULL;
    econtext = node->ss.ps.ps_ExprContext;
    numCols = ((Group *) node->ss.ps.plan)->numCols;
    grpColIdx = ((Group *) node->ss.ps.plan)->grpColIdx;

    /*
     * Check to see if we're still projecting out tuples from a previous group
     * tuple (because there is a function-returning-set in the projection
     * expressions).  If so, try to project another one.
     */
    if (node->ss.ps.ps_TupFromTlist)
    {
        TupleTableSlot *result;
        ExprDoneCond isDone;

        result = ExecProject(node->ss.ps.ps_ProjInfo, &isDone);
        if (isDone == ExprMultipleResult)
            return result;
        /* Done with that source tuple... */
        node->ss.ps.ps_TupFromTlist = false;
    }

    /*
     * The ScanTupleSlot holds the (copied) first tuple of each group.
     */
    firsttupleslot = node->ss.ss_ScanTupleSlot;

    /*
     * We need not call ResetExprContext here because execTuplesMatch will
     * reset the per-tuple memory context once per input tuple.
     */

    /*
     * If first time through, acquire first input tuple and determine whether
     * to return it or not.
     */
    if (TupIsNull(firsttupleslot))
    {
        outerslot = ExecProcNode(outerPlanState(node));
        if (TupIsNull(outerslot))
        {
            /* empty input, so return nothing */
            node->grp_done = TRUE;
            return NULL;
        }
        /* Copy tuple into firsttupleslot */
        ExecCopySlot(firsttupleslot, outerslot);

        /*
         * Set it up as input for qual test and projection.  The expressions
         * will access the input tuple as varno OUTER.
         */
        econtext->ecxt_outertuple = firsttupleslot;

        /*
         * Check the qual (HAVING clause); if the group does not match, ignore
         * it and fall into scan loop.
         */
        if (ExecQual(node->ss.ps.qual, econtext, false))
        {
            /*
             * Form and return a projection tuple using the first input tuple.
             */
            TupleTableSlot *result;
            ExprDoneCond isDone;

            result = ExecProject(node->ss.ps.ps_ProjInfo, &isDone);

            if (isDone != ExprEndResult)
            {
                node->ss.ps.ps_TupFromTlist = (isDone == ExprMultipleResult);
                return result;
            }
        }
        else
            InstrCountFiltered1(node, 1);
    }

    /*
     * This loop iterates once per input tuple group.  At the head of the
     * loop, we have finished processing the first tuple of the group and now
     * need to scan over all the other group members.
     */
    for (;;)
    {
        /*
         * Scan over all remaining tuples that belong to this group
         */
        for (;;)
        {
            outerslot = ExecProcNode(outerPlanState(node));
            if (TupIsNull(outerslot))
            {
                /* no more groups, so we're done */
                node->grp_done = TRUE;
                return NULL;
            }

            /*
             * Compare with first tuple and see if this tuple is of the same
             * group.  If so, ignore it and keep scanning.
             */
            if (!execTuplesMatch(firsttupleslot, outerslot,
                                 numCols, grpColIdx,
                                 node->eqfunctions,
                                 econtext->ecxt_per_tuple_memory))
                break;
        }

        /*
         * We have the first tuple of the next input group.  See if we want to
         * return it.
         */
        /* Copy tuple, set up as input for qual test and projection */
        ExecCopySlot(firsttupleslot, outerslot);
        econtext->ecxt_outertuple = firsttupleslot;

        /*
         * Check the qual (HAVING clause); if the group does not match, ignore
         * it and loop back to scan the rest of the group.
         */
        if (ExecQual(node->ss.ps.qual, econtext, false))
        {
            /*
             * Form and return a projection tuple using the first input tuple.
             */
            TupleTableSlot *result;
            ExprDoneCond isDone;

            result = ExecProject(node->ss.ps.ps_ProjInfo, &isDone);

            if (isDone != ExprEndResult)
            {
                node->ss.ps.ps_TupFromTlist = (isDone == ExprMultipleResult);
                return result;
            }
        }
        else
            InstrCountFiltered1(node, 1);
    }
}