Exemple #1
0
/*
 * Build a per-output-tuple ExprContext for an EState.
 *
 * This is normally invoked via GetPerTupleExprContext() macro,
 * not directly.
 */
ExprContext *
MakePerTupleExprContext(EState *estate)
{
	if (estate->es_per_tuple_exprcontext == NULL)
		estate->es_per_tuple_exprcontext = CreateExprContext(estate);

	return estate->es_per_tuple_exprcontext;
}
extern TuplestoreScanState *
ExecInitTuplestoreScan(TuplestoreScan *node, EState *estate, int eflags)
{
	TuplestoreScanState *tss = makeNode(TuplestoreScanState);
	tss->ss.ps.plan = (Plan *) node;
	tss->ss.ps.state = estate;
	tss->ss.ps.ps_ExprContext = CreateExprContext(estate);

	ExecInitResultTupleSlot(estate, &tss->ss.ps);
	ExecInitScanTupleSlot(estate, &tss->ss);

	ExecSetSlotDescriptor(tss->ss.ss_ScanTupleSlot, node->desc);
	ExecSetSlotDescriptor(tss->ss.ps.ps_ResultTupleSlot, node->desc);

	tss->ss.ps.targetlist = node->scan.plan.targetlist;

	tuplestore_rescan(node->store);

	return tss;
}
Exemple #3
0
/* ----------------
 *		ExecAssignExprContext
 *
 *		This initializes the ps_ExprContext field.  It is only necessary
 *		to do this for nodes which use ExecQual or ExecProject
 *		because those routines require an econtext. Other nodes that
 *		don't have to evaluate expressions don't need to do this.
 * ----------------
 */
void
ExecAssignExprContext(EState *estate, PlanState *planstate)
{
	planstate->ps_ExprContext = CreateExprContext(estate);
}
Exemple #4
0
/*
 * Function preprocess_initplans() is called from ExecutorRun running a
 * parallel plan on the QD.  The call happens prior to dispatch of the
 * main plan, and only if there are some initplans.
 *
 * Argument queryDesc is the one passed in to ExecutorRun.
 *
 * The function loops through the estate->es_param_exec_vals array, which
 * has plan->nParamExec elements.  Each element is a ParamExecData struct,
 * and the index of the element in the array is the paramid of the Param
 * node in the Plan that corresponds to the result of the subquery.
 *
 * The execPlan member points to a SubPlanState struct for the
 * subquery.  The value and isnull members hold the result
 * of executing the SubPlan.
 * I think that the order of the elements in this array guarantees
 * that for a subplan X within a subplan Y, X will come before Y in the array.
 * If a subplan returns multiple columns (like a MULTIEXPR_SUBLINK), each will be
 * a separate entry in the es_param_exec_vals array, but they will all have
 * the same value for execPlan.
 * In order to evaluate a subplan, we call ExecSetParamPlan.
 * This is a postgres function, but has been modified from its original form
 * to parallelize subplans. Inside ExecSetParamPlan, the
 * datum result(s) of the subplan are stuffed into the value field
 * of the ParamExecData struct(s).	It finds the proper one based on the
 * setParam list in the SubPlan node.
 * In order to handle SubPlans of SubPlans, we pass in the values of the
 * estate->es_param_exec_vals as ParamListInfo structs to the ExecSetParamPlan call.
 * These are then serialized into the mppexec all as parameters.  In this manner, the
 * result of a SubPlan of a SubPlan is available.
 */
void
preprocess_initplans(QueryDesc *queryDesc)
{
	ParamListInfo originalPli,
				augmentedPli;
	int			i;
	EState	   *estate = queryDesc->estate;
	int			originalSlice,
				rootIndex;

	if (queryDesc->plannedstmt->nCrossLevelParams == 0)
		return;

	originalPli = queryDesc->params;

    originalSlice = LocallyExecutingSliceIndex(queryDesc->estate);
	Assert(originalSlice == 0); /* Original slice being executed is slice 0 */

	/*
	 * Loop through the estate->es_param_exec_vals. This array has an element
	 * for each PARAM_EXEC (internal) param, and a pointer to the SubPlanState
	 * to execute to evaluate it. It seems that they are created in the proper
	 * order, i.e. if a subplan x has a sublan y, then y will come before x in
	 * the es_param_exec_vals array.
	 */
	for (i = 0; i < queryDesc->plannedstmt->nCrossLevelParams; i++)
	{
		ParamExecData *prm;
		SubPlanState *sps;

		prm = &estate->es_param_exec_vals[i];
		sps = (SubPlanState *) prm->execPlan;

		/*
		 * Append all the es_param_exec_vals datum values on to the external
		 * parameter list so they can be serialized in the mppexec call to the
		 * QEs.  Do this inside the loop since later initplans may depend on
		 * the results of earlier ones.
		 *
		 * TODO Some of the work of addRemoteExecParamsToParmList could be
		 *		factored out of the loop.
		 */
		augmentedPli = addRemoteExecParamsToParamList(queryDesc->plannedstmt,
													  originalPli,
													  estate->es_param_exec_vals);

		if (isParamExecutableNow(sps, estate->es_param_exec_vals))
		{
            SubPlan    *subplan = (SubPlan *)sps->xprstate.expr;

            Assert(IsA(subplan, SubPlan) &&
                   subplan->qDispSliceId > 0);

			sps->planstate->plan->nParamExec = queryDesc->plannedstmt->nCrossLevelParams;
			sps->planstate->plan->nMotionNodes = queryDesc->plannedstmt->nMotionNodes;
			sps->planstate->plan->dispatch = DISPATCH_PARALLEL;

			/*
			 * Adjust for the slice to execute on the QD.
			 */
			rootIndex = subplan->qDispSliceId;
			queryDesc->estate->es_sliceTable->localSlice = rootIndex;

			/* set our global sliceid variable for elog. */
			currentSliceId = rootIndex;

			/*
			 * This runs the SubPlan and puts the answer back into prm->value.
			 */
			queryDesc->params = augmentedPli;

			/*
			 * Use ExprContext to set the param. If ExprContext is not initialized,
			 * create a new one here. (see MPP-3511)
			 */
			if (sps->planstate->ps_ExprContext == NULL)
				sps->planstate->ps_ExprContext = CreateExprContext(estate);
			
			/* MPP-12048: Set the right slice index before execution. */
			Assert( (subplan->qDispSliceId > queryDesc->plannedstmt->nMotionNodes)  &&
					(subplan->qDispSliceId <=
							(queryDesc->plannedstmt->nMotionNodes
							+ queryDesc->plannedstmt->nInitPlans) )   );

			Assert(LocallyExecutingSliceIndex(sps->planstate->state) == subplan->qDispSliceId);
		    //sps->planstate->state->es_cur_slice_idx = subplan->qDispSliceId;

			ExecSetParamPlan(sps, sps->planstate->ps_ExprContext, queryDesc);

			/*
			 * We dispatched, and have returned. We may have used the
			 * interconnect; so let's bump the interconnect-id.
			 */
			queryDesc->estate->es_sliceTable->ic_instance_id = ++gp_interconnect_id;
		}

		queryDesc->params = originalPli;
		queryDesc->estate->es_sliceTable->localSlice = originalSlice;
		currentSliceId = originalSlice;

		pfree(augmentedPli);
	}
}
Exemple #5
0
/*
 * Construct an empty TupleHashTable
 *
 *	numCols, keyColIdx: identify the tuple fields to use as lookup key
 *	eqfunctions: equality comparison functions to use
 *	hashfunctions: datatype-specific hashing functions to use
 *	nbuckets: initial estimate of hashtable size
 *	additionalsize: size of data stored in ->additional
 *	tablecxt: memory context in which to store table and table entries
 *	tempcxt: short-lived context for evaluation hash and comparison functions
 *
 * The function arrays may be made with execTuplesHashPrepare().  Note they
 * are not cross-type functions, but expect to see the table datatype(s)
 * on both sides.
 *
 * Note that keyColIdx, eqfunctions, and hashfunctions must be allocated in
 * storage that will live as long as the hashtable does.
 */
TupleHashTable
BuildTupleHashTable(PlanState *parent,
					TupleDesc inputDesc,
					int numCols, AttrNumber *keyColIdx,
					Oid *eqfuncoids,
					FmgrInfo *hashfunctions,
					long nbuckets, Size additionalsize,
					MemoryContext tablecxt, MemoryContext tempcxt,
					bool use_variable_hash_iv)
{
	TupleHashTable hashtable;
	Size		entrysize = sizeof(TupleHashEntryData) + additionalsize;
	MemoryContext oldcontext;

	Assert(nbuckets > 0);

	/* Limit initial table size request to not more than work_mem */
	nbuckets = Min(nbuckets, (long) ((work_mem * 1024L) / entrysize));

	hashtable = (TupleHashTable)
		MemoryContextAlloc(tablecxt, sizeof(TupleHashTableData));

	hashtable->numCols = numCols;
	hashtable->keyColIdx = keyColIdx;
	hashtable->tab_hash_funcs = hashfunctions;
	hashtable->tablecxt = tablecxt;
	hashtable->tempcxt = tempcxt;
	hashtable->entrysize = entrysize;
	hashtable->tableslot = NULL;	/* will be made on first lookup */
	hashtable->inputslot = NULL;
	hashtable->in_hash_funcs = NULL;
	hashtable->cur_eq_func = NULL;

	/*
	 * If parallelism is in use, even if the master backend is performing the
	 * scan itself, we don't want to create the hashtable exactly the same way
	 * in all workers. As hashtables are iterated over in keyspace-order,
	 * doing so in all processes in the same way is likely to lead to
	 * "unbalanced" hashtables when the table size initially is
	 * underestimated.
	 */
	if (use_variable_hash_iv)
		hashtable->hash_iv = murmurhash32(ParallelWorkerNumber);
	else
		hashtable->hash_iv = 0;

	hashtable->hashtab = tuplehash_create(tablecxt, nbuckets, hashtable);

	oldcontext = MemoryContextSwitchTo(hashtable->tablecxt);

	/*
	 * We copy the input tuple descriptor just for safety --- we assume all
	 * input tuples will have equivalent descriptors.
	 */
	hashtable->tableslot = MakeSingleTupleTableSlot(CreateTupleDescCopy(inputDesc),
													&TTSOpsMinimalTuple);

	/* build comparator for all columns */
	/* XXX: should we support non-minimal tuples for the inputslot? */
	hashtable->tab_eq_func = ExecBuildGroupingEqual(inputDesc, inputDesc,
													&TTSOpsMinimalTuple, &TTSOpsMinimalTuple,
													numCols,
													keyColIdx, eqfuncoids,
													parent);

	MemoryContextSwitchTo(oldcontext);

	hashtable->exprcontext = CreateExprContext(parent->state);

	return hashtable;
}