Example #1
0
/**
 * Init nodeDML, which initializes the insert TupleTableSlot.
 * */
DMLState*
ExecInitDML(DML *node, EState *estate, int eflags)
{
	
	/* check for unsupported flags */
	Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK | EXEC_FLAG_REWIND)));
	
	DMLState *dmlstate = makeNode(DMLState);
	dmlstate->ps.plan = (Plan *)node;
	dmlstate->ps.state = estate;

	ExecInitResultTupleSlot(estate, &dmlstate->ps);

	dmlstate->ps.targetlist = (List *)
						ExecInitExpr((Expr *) node->plan.targetlist,
						(PlanState *) dmlstate);

	Plan *outerPlan  = outerPlan(node);
	outerPlanState(dmlstate) = ExecInitNode(outerPlan, estate, eflags);

	ExecAssignResultTypeFromTL(&dmlstate->ps);

	/* Create expression evaluation context. This will be used for projections */
	ExecAssignExprContext(estate, &dmlstate->ps);

	/*
	 * Create projection info from the child tuple descriptor and our target list
	 * Projection will be placed in the ResultSlot
	 */
	TupleTableSlot *childResultSlot = outerPlanState(dmlstate)->ps_ResultTupleSlot;
	ExecAssignProjectionInfo(&dmlstate->ps, childResultSlot->tts_tupleDescriptor);
	
	/*
	 * Initialize slot to insert/delete using output relation descriptor.
	 */
	dmlstate->cleanedUpSlot = ExecInitExtraTupleSlot(estate);

	/*
	 * Both input and output of the junk filter include dropped attributes, so
	 * the junk filter doesn't need to do anything special there about them
	 */
	TupleDesc cleanTupType = CreateTupleDescCopy(dmlstate->ps.state->es_result_relation_info->ri_RelationDesc->rd_att); 
	dmlstate->junkfilter = ExecInitJunkFilter(node->plan.targetlist,
			cleanTupType,
			dmlstate->cleanedUpSlot);

	if (estate->es_instrument)
	{
	        dmlstate->ps.cdbexplainbuf = makeStringInfo();

	        /* Request a callback at end of query. */
	        dmlstate->ps.cdbexplainfun = ExecDMLExplainEnd;
	}

	initGpmonPktForDML((Plan *)node, &dmlstate->ps.gpmon_pkt, estate);
	
	return dmlstate;
}
Example #2
0
void
CheckerInit(Checker *checker, Relation rel, TupleChecker *tchecker)
{
	TupleDesc	desc;

	checker->tchecker = tchecker;

	/*
	 * When specify ENCODING, we check the input data encoding.
	 * Convert encoding if the client and server encodings are different.
	 */
	checker->db_encoding = GetDatabaseEncoding();
	if (checker->encoding != -1 &&
		(checker->encoding != PG_SQL_ASCII ||
		checker->db_encoding != PG_SQL_ASCII))
		checker->check_encoding = true;

	if (!rel)
		return;

	/* When specify CHECK_CONSTRAINTS, we check the constraints */
	desc = RelationGetDescr(rel);
	if (desc->constr &&
		(checker->check_constraints || desc->constr->has_not_null))
	{
		if (checker->check_constraints)
			checker->has_constraints = true;

		if (desc->constr->has_not_null)
			checker->has_not_null = true;

		checker->resultRelInfo = makeNode(ResultRelInfo);
		checker->resultRelInfo->ri_RangeTableIndex = 1;		/* dummy */
		checker->resultRelInfo->ri_RelationDesc = rel;
		checker->resultRelInfo->ri_TrigDesc = NULL; /* TRIGGER is not supported */
		checker->resultRelInfo->ri_TrigInstrument = NULL;
	}

	if (checker->has_constraints)
	{
		checker->estate = CreateExecutorState();
		checker->estate->es_result_relations = checker->resultRelInfo;
		checker->estate->es_num_result_relations = 1;
		checker->estate->es_result_relation_info = checker->resultRelInfo;

		/* Set up a tuple slot too */
		checker->slot = MakeSingleTupleTableSlot(desc);
	}

	if (!checker->has_constraints && checker->has_not_null)
	{
		int	i;

		checker->desc = CreateTupleDescCopy(desc);
		for (i = 0; i < desc->natts; i++)
			checker->desc->attrs[i]->attnotnull = desc->attrs[i]->attnotnull;
	}
}
Example #3
0
/*
 * deflist_to_tuplestore - Helper function to convert DefElem list to
 * tuplestore usable in SRF.
 */
static void
deflist_to_tuplestore(ReturnSetInfo *rsinfo, List *options)
{
	ListCell   *cell;
	TupleDesc	tupdesc;
	Tuplestorestate *tupstore;
	Datum		values[2];
	bool		nulls[2];
	MemoryContext per_query_ctx;
	MemoryContext oldcontext;

	/* check to see if caller supports us returning a tuplestore */
	if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
		ereport(ERROR,
				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
				 errmsg("set-valued function called in context that cannot accept a set")));
	if (!(rsinfo->allowedModes & SFRM_Materialize) ||
		rsinfo->expectedDesc == NULL)
		ereport(ERROR,
				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
				 errmsg("materialize mode required, but it is not allowed in this context")));

	per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
	oldcontext = MemoryContextSwitchTo(per_query_ctx);

	/*
	 * Now prepare the result set.
	 */
	tupdesc = CreateTupleDescCopy(rsinfo->expectedDesc);
	tupstore = tuplestore_begin_heap(true, false, work_mem);
	rsinfo->returnMode = SFRM_Materialize;
	rsinfo->setResult = tupstore;
	rsinfo->setDesc = tupdesc;

	foreach(cell, options)
	{
		DefElem    *def = lfirst(cell);

		values[0] = CStringGetTextDatum(def->defname);
		nulls[0] = false;
		if (def->arg)
		{
			values[1] = CStringGetTextDatum(((Value *) (def->arg))->val.str);
			nulls[1] = false;
		}
		else
		{
			values[1] = (Datum) 0;
			nulls[1] = true;
		}
		tuplestore_putvalues(tupstore, tupdesc, values, nulls);
	}
Example #4
0
/*
 * RelationNameGetTupleDesc
 *
 * Given a (possibly qualified) relation name, build a TupleDesc.
 *
 * Note: while this works as advertised, it's seldom the best way to
 * build a tupdesc for a function's result type.  It's kept around
 * only for backwards compatibility with existing user-written code.
 */
TupleDesc
RelationNameGetTupleDesc(const char *relname)
{
	RangeVar   *relvar;
	Relation	rel;
	TupleDesc	tupdesc;
	List	   *relname_list;

	/* Open relation and copy the tuple description */
	relname_list = stringToQualifiedNameList(relname);
	relvar = makeRangeVarFromNameList(relname_list);
	rel = relation_openrv(relvar, AccessShareLock);
	tupdesc = CreateTupleDescCopy(RelationGetDescr(rel));
	relation_close(rel, AccessShareLock);

	return tupdesc;
}
Example #5
0
Relation
copy_heap(Oid OIDOldHeap)
{
    char NewName[NAMEDATALEN];
    TupleDesc OldHeapDesc, tupdesc;
    Oid OIDNewHeap;
    Relation NewHeap, OldHeap;

    /*
     *  Create a new heap relation with a temporary name, which has the
     *  same tuple description as the old one.
     */
    sprintf(NewName,"temp_%x", OIDOldHeap);

    OldHeap= heap_open(OIDOldHeap);
    OldHeapDesc= RelationGetTupleDescriptor(OldHeap);

    /*
     * Need to make a copy of the tuple descriptor, heap_create modifies
     * it.
     */

    tupdesc = CreateTupleDescCopy(OldHeapDesc);
    
    OIDNewHeap=heap_create(NewName,
			   NULL,
			   OldHeap->rd_rel->relarch,
			   OldHeap->rd_rel->relsmgr,
			   tupdesc);

    if (!OidIsValid(OIDNewHeap))
	elog(WARN,"clusterheap: cannot create temporary heap relation\n");

    NewHeap=heap_open(OIDNewHeap);

    heap_close(NewHeap);
    heap_close(OldHeap);

    return NewHeap;
}
Example #6
0
static void
setup_firstcall(FunctionCallInfo fcinfo, FuncCallContext *funcctx, Oid prsid)
{
	TupleDesc	tupdesc;
	MemoryContext oldcontext;
	TypeStorage *st;
	WParserInfo *prs = findprs(prsid);

	oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);

	st = (TypeStorage *) palloc(sizeof(TypeStorage));
	st->cur = 0;
	st->list = (LexDescr *) DatumGetPointer(
					OidFunctionCall1(prs->lextype, PointerGetDatum(prs->prs))
		);
	funcctx->user_fctx = (void *) st;
	if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
		elog(ERROR, "return type must be a row type");
	tupdesc = CreateTupleDescCopy(tupdesc);
	funcctx->attinmeta = TupleDescGetAttInMetadata(tupdesc);
	MemoryContextSwitchTo(oldcontext);
}
Example #7
0
void DMLUtils::PrepareAbstractJoinPlanState(AbstractJoinPlanState *j_plan_state,
                                            const JoinState &j_state) {
  // Copy join type
  j_plan_state->jointype = j_state.jointype;

  // Copy join qual expr states
  j_plan_state->joinqual = CopyExprStateList(j_state.joinqual);

  // Copy ps qual
  j_plan_state->qual = CopyExprStateList(j_state.ps.qual);

  // Copy target list
  j_plan_state->targetlist = CopyExprStateList(j_state.ps.targetlist);

  // Copy tuple desc
  auto tup_desc = j_state.ps.ps_ResultTupleSlot->tts_tupleDescriptor;
  j_plan_state->tts_tupleDescriptor = CreateTupleDescCopy(tup_desc);

  // Construct projection info
  j_plan_state->ps_ProjInfo =
      BuildProjectInfo(j_state.ps.ps_ProjInfo, tup_desc->natts);
}
Example #8
0
/*
 * Prepare to receive tuples from executor.
 */
static void
producerStartupReceiver(DestReceiver *self, int operation, TupleDesc typeinfo)
{
	ProducerState *myState = (ProducerState *) self;

	if (ActivePortal)
	{
		/* Normally ExecutorContext is current here. However we should better
		 * create local producer storage in the Portal's context: producer
		 * may keep pushing records to consumers after executor is destroyed.
		 */
		MemoryContext savecontext;
		savecontext = MemoryContextSwitchTo(PortalGetHeapMemory(ActivePortal));
		myState->typeinfo = CreateTupleDescCopy(typeinfo);
		MemoryContextSwitchTo(savecontext);
	}
	else
		myState->typeinfo = typeinfo;

	if (myState->consumer)
		(*myState->consumer->rStartup) (myState->consumer, operation, typeinfo);
}
Example #9
0
/*
 * spi_dest_startup
 *		Initialize to receive tuples from Executor into SPITupleTable
 *		of current SPI procedure
 */
void
spi_dest_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
{
	SPITupleTable *tuptable;
	MemoryContext oldcxt;
	MemoryContext tuptabcxt;

	/*
	 * When called by Executor _SPI_curid expected to be equal to
	 * _SPI_connected
	 */
	if (_SPI_curid != _SPI_connected || _SPI_connected < 0)
		elog(ERROR, "improper call to spi_dest_startup");
	if (_SPI_current != &(_SPI_stack[_SPI_curid]))
		elog(ERROR, "SPI stack corrupted");

	if (_SPI_current->tuptable != NULL)
		elog(ERROR, "improper call to spi_dest_startup");

	oldcxt = _SPI_procmem();	/* switch to procedure memory context */

	tuptabcxt = AllocSetContextCreate(CurrentMemoryContext,
									  "SPI TupTable",
									  ALLOCSET_DEFAULT_MINSIZE,
									  ALLOCSET_DEFAULT_INITSIZE,
									  ALLOCSET_DEFAULT_MAXSIZE);
	MemoryContextSwitchTo(tuptabcxt);

	_SPI_current->tuptable = tuptable = (SPITupleTable *)
		palloc(sizeof(SPITupleTable));
	tuptable->tuptabcxt = tuptabcxt;
	tuptable->alloced = tuptable->free = 128;
	tuptable->vals = (HeapTuple *) palloc(tuptable->alloced * sizeof(HeapTuple));
	tuptable->tupdesc = CreateTupleDescCopy(typeinfo);

	MemoryContextSwitchTo(oldcxt);
}
Example #10
0
/*
 * PersistHoldablePortal
 *
 * Prepare the specified Portal for access outside of the current
 * transaction. When this function returns, all future accesses to the
 * portal must be done via the Tuplestore (not by invoking the
 * executor).
 */
void
PersistHoldablePortal(Portal portal)
{
	QueryDesc  *queryDesc = PortalGetQueryDesc(portal);
	Portal		saveActivePortal;
	ResourceOwner saveResourceOwner;
	MemoryContext savePortalContext;
	MemoryContext oldcxt;

	/*
	 * If we're preserving a holdable portal, we had better be inside the
	 * transaction that originally created it.
	 */
	Assert(portal->createSubid != InvalidSubTransactionId);
	Assert(queryDesc != NULL);

	/*
	 * Caller must have created the tuplestore already.
	 */
	Assert(portal->holdContext != NULL);
	Assert(portal->holdStore != NULL);

	/*
	 * Before closing down the executor, we must copy the tupdesc into
	 * long-term memory, since it was created in executor memory.
	 */
	oldcxt = MemoryContextSwitchTo(portal->holdContext);

	portal->tupDesc = CreateTupleDescCopy(portal->tupDesc);

	MemoryContextSwitchTo(oldcxt);

	/*
	 * Check for improper portal use, and mark portal active.
	 */
	if (portal->status != PORTAL_READY)
		ereport(ERROR,
				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
				 errmsg("portal \"%s\" cannot be run", portal->name)));
	portal->status = PORTAL_ACTIVE;

	/*
	 * Set up global portal context pointers.
	 */
	saveActivePortal = ActivePortal;
	saveResourceOwner = CurrentResourceOwner;
	savePortalContext = PortalContext;
	PG_TRY();
	{
		ActivePortal = portal;
		CurrentResourceOwner = portal->resowner;
		PortalContext = PortalGetHeapMemory(portal);

		MemoryContextSwitchTo(PortalContext);

		PushActiveSnapshot(queryDesc->snapshot);

		/*
		 * Rewind the executor: we need to store the entire result set in the
		 * tuplestore, so that subsequent backward FETCHs can be processed.
		 */
		ExecutorRewind(queryDesc);

		/*
		 * Change the destination to output to the tuplestore.	Note we tell
		 * the tuplestore receiver to detoast all data passed through it.
		 */
		queryDesc->dest = CreateDestReceiver(DestTuplestore);
		SetTuplestoreDestReceiverParams(queryDesc->dest,
										portal->holdStore,
										portal->holdContext,
										true);

		/* Fetch the result set into the tuplestore */
		ExecutorRun(queryDesc, ForwardScanDirection, 0L);

		(*queryDesc->dest->rDestroy) (queryDesc->dest);
		queryDesc->dest = NULL;

		/*
		 * Now shut down the inner executor.
		 */
		portal->queryDesc = NULL;		/* prevent double shutdown */
		/* we do not need AfterTriggerEndQuery() here */
		ExecutorEnd(queryDesc);
		FreeQueryDesc(queryDesc);

		/*
		 * Set the position in the result set: ideally, this could be
		 * implemented by just skipping straight to the tuple # that we need
		 * to be at, but the tuplestore API doesn't support that. So we start
		 * at the beginning of the tuplestore and iterate through it until we
		 * reach where we need to be.  FIXME someday?  (Fortunately, the
		 * typical case is that we're supposed to be at or near the start of
		 * the result set, so this isn't as bad as it sounds.)
		 */
		MemoryContextSwitchTo(portal->holdContext);

		if (portal->atEnd)
		{
			/* we can handle this case even if posOverflow */
			while (tuplestore_advance(portal->holdStore, true))
				 /* continue */ ;
		}
		else
		{
			long		store_pos;

			if (portal->posOverflow)	/* oops, cannot trust portalPos */
				ereport(ERROR,
						(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
						 errmsg("could not reposition held cursor")));

			tuplestore_rescan(portal->holdStore);

			for (store_pos = 0; store_pos < portal->portalPos; store_pos++)
			{
				if (!tuplestore_advance(portal->holdStore, true))
					elog(ERROR, "unexpected end of tuple stream");
			}
		}
	}
	PG_CATCH();
	{
		/* Uncaught error while executing portal: mark it dead */
		portal->status = PORTAL_FAILED;

		/* Restore global vars and propagate error */
		ActivePortal = saveActivePortal;
		CurrentResourceOwner = saveResourceOwner;
		PortalContext = savePortalContext;

		PG_RE_THROW();
	}
	PG_END_TRY();

	MemoryContextSwitchTo(oldcxt);

	/* Mark portal not active */
	portal->status = PORTAL_READY;

	ActivePortal = saveActivePortal;
	CurrentResourceOwner = saveResourceOwner;
	PortalContext = savePortalContext;

	PopActiveSnapshot();

	/*
	 * We can now release any subsidiary memory of the portal's heap context;
	 * we'll never use it again.  The executor already dropped its context,
	 * but this will clean up anything that glommed onto the portal's heap via
	 * PortalContext.
	 */
	MemoryContextDeleteChildren(PortalGetHeapMemory(portal));
}
Example #11
0
/*
 * pgstattuple_real
 *
 * The real work occurs here
 */
static Datum
pgstattuple_real(Relation rel, FunctionCallInfo fcinfo)
{
	HeapScanDesc scan;
	HeapTuple	tuple;
	BlockNumber nblocks;
	BlockNumber block = 0;		/* next block to count free space in */
	BlockNumber tupblock;
	Buffer		buffer;
	uint64		table_len;
	uint64		tuple_len = 0;
	uint64		dead_tuple_len = 0;
	uint64		tuple_count = 0;
	uint64		dead_tuple_count = 0;
	double		tuple_percent;
	double		dead_tuple_percent;
	uint64		free_space = 0; /* free/reusable space in bytes */
	double		free_percent;	/* free/reusable space in % */
	TupleDesc	tupdesc;
	AttInMetadata *attinmeta;
	char	  **values;
	int			i;
	Datum		result;

	/* Build a tuple descriptor for our result type */
	if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
		elog(ERROR, "return type must be a row type");

	/* make sure we have a persistent copy of the tupdesc */
	tupdesc = CreateTupleDescCopy(tupdesc);

	/*
	 * Generate attribute metadata needed later to produce tuples from raw C
	 * strings
	 */
	attinmeta = TupleDescGetAttInMetadata(tupdesc);

	scan = heap_beginscan(rel, SnapshotAny, 0, NULL);

	nblocks = scan->rs_nblocks; /* # blocks to be scanned */

	/* scan the relation */
	while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
	{
		/* must hold a buffer lock to call HeapTupleSatisfiesNow */
		LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);

		if (HeapTupleSatisfiesNow(tuple->t_data, scan->rs_cbuf))
		{
			tuple_len += tuple->t_len;
			tuple_count++;
		}
		else
		{
			dead_tuple_len += tuple->t_len;
			dead_tuple_count++;
		}

		LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);

		/*
		 * To avoid physically reading the table twice, try to do the
		 * free-space scan in parallel with the heap scan.	However,
		 * heap_getnext may find no tuples on a given page, so we cannot
		 * simply examine the pages returned by the heap scan.
		 */
		tupblock = BlockIdGetBlockNumber(&tuple->t_self.ip_blkid);

		while (block <= tupblock)
		{
			buffer = ReadBuffer(rel, block);
			LockBuffer(buffer, BUFFER_LOCK_SHARE);
			free_space += PageGetFreeSpace((Page) BufferGetPage(buffer));
			LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
			ReleaseBuffer(buffer);
			block++;
		}
	}
	heap_endscan(scan);

	while (block < nblocks)
	{
		buffer = ReadBuffer(rel, block);
		free_space += PageGetFreeSpace((Page) BufferGetPage(buffer));
		ReleaseBuffer(buffer);
		block++;
	}

	heap_close(rel, AccessShareLock);

	table_len = (uint64) nblocks *BLCKSZ;

	if (nblocks == 0)
	{
		tuple_percent = 0.0;
		dead_tuple_percent = 0.0;
		free_percent = 0.0;
	}
	else
	{
		tuple_percent = (double) tuple_len *100.0 / table_len;
		dead_tuple_percent = (double) dead_tuple_len *100.0 / table_len;
		free_percent = (double) free_space *100.0 / table_len;
	}

	/*
	 * Prepare a values array for constructing the tuple. This should be an
	 * array of C strings which will be processed later by the appropriate
	 * "in" functions.
	 */
	values = (char **) palloc(NCOLUMNS * sizeof(char *));
	for (i = 0; i < NCOLUMNS; i++)
		values[i] = (char *) palloc(NCHARS * sizeof(char));
	i = 0;
	snprintf(values[i++], NCHARS, INT64_FORMAT, table_len);
	snprintf(values[i++], NCHARS, INT64_FORMAT, tuple_count);
	snprintf(values[i++], NCHARS, INT64_FORMAT, tuple_len);
	snprintf(values[i++], NCHARS, "%.2f", tuple_percent);
	snprintf(values[i++], NCHARS, INT64_FORMAT, dead_tuple_count);
	snprintf(values[i++], NCHARS, INT64_FORMAT, dead_tuple_len);
	snprintf(values[i++], NCHARS, "%.2f", dead_tuple_percent);
	snprintf(values[i++], NCHARS, INT64_FORMAT, free_space);
	snprintf(values[i++], NCHARS, "%.2f", free_percent);

	/* build a tuple */
	tuple = BuildTupleFromCStrings(attinmeta, values);

	/* make the tuple into a datum */
	result = HeapTupleGetDatum(tuple);

	/* Clean up */
	for (i = 0; i < NCOLUMNS; i++)
		pfree(values[i]);
	pfree(values);

	return (result);
}
Example #12
0
/*
 * Initialize a single motion node.  This is called by the executor when a
 * motion node in the plan tree is being initialized.
 *
 * This function is called from:  ExecInitMotion()
 */
void
UpdateMotionLayerNode(MotionLayerState *mlStates, int16 motNodeID, bool preserveOrder, TupleDesc tupDesc, uint64 operatorMemKB)
{
	MemoryContext oldCtxt;
	MotionNodeEntry *pEntry;

	AssertArg(tupDesc != NULL);

	/*
	 * Switch to the Motion Layer's memory-context, so that the motion node
	 * can be reset later.
	 */
	oldCtxt = MemoryContextSwitchTo(mlStates->motion_layer_mctx);
	
	if (motNodeID > mlStates->mneCount)
	{
		AddMotionLayerNode(mlStates, motNodeID);
	}

	pEntry = &mlStates->mnEntries[motNodeID - 1];

	if (!pEntry->valid)
	{
		/*
		 * we'll just set this to 0.  later, ml_ipc will call
		 * setExpectedReceivers() to set this if we are a "Receiving"
		 * motion node.
		 */
		pEntry->num_senders = 0;
	}

	pEntry->motion_node_id = motNodeID;

	pEntry->valid = true;

	/* Finish up initialization of the motion node entry. */
	pEntry->preserve_order = preserveOrder;
	pEntry->tuple_desc = CreateTupleDescCopy(tupDesc);
	InitSerTupInfo(pEntry->tuple_desc, &pEntry->ser_tup_info);

	pEntry->memKB = operatorMemKB;

	if (!preserveOrder)
	{
		Assert(pEntry->memKB > 0);
		
		/* Create a tuple-store for the motion node's incoming tuples. */
		pEntry->ready_tuples = htfifo_create(pEntry->memKB);
	}
	else
		pEntry->ready_tuples = NULL;


	pEntry->num_stream_ends_recvd = 0;

	/* Initialize statistics counters. */
	pEntry->stat_total_chunks_sent = 0;
	pEntry->stat_total_bytes_sent = 0;
	pEntry->stat_tuple_bytes_sent = 0;
	pEntry->stat_total_sends = 0;
	pEntry->stat_total_recvs = 0;
	pEntry->stat_tuples_available = 0;
	pEntry->stat_tuples_available_hwm = 0;
	pEntry->stat_total_chunks_recvd = 0;
	pEntry->stat_total_bytes_recvd = 0;
	pEntry->stat_tuple_bytes_recvd = 0;
	pEntry->sel_rd_wait = 0;
	pEntry->sel_wr_wait = 0;

	pEntry->cleanedUp = false;
	pEntry->stopped = false;
	pEntry->moreNetWork = true;


	/* All done!  Go back to caller memory-context. */
	MemoryContextSwitchTo(oldCtxt);
}
Example #13
0
static inline Datum
each_worker(PG_FUNCTION_ARGS, bool as_text)
{
	text	   *json = PG_GETARG_TEXT_P(0);
	JsonLexContext *lex = makeJsonLexContext(json, true);
	JsonSemAction sem;
	ReturnSetInfo *rsi;
	MemoryContext old_cxt;
	TupleDesc	tupdesc;
	EachState	state;

	state = palloc0(sizeof(eachState));
	sem = palloc0(sizeof(jsonSemAction));

	rsi = (ReturnSetInfo *) fcinfo->resultinfo;

	if (!rsi || !IsA(rsi, ReturnSetInfo) ||
		(rsi->allowedModes & SFRM_Materialize) == 0 ||
		rsi->expectedDesc == NULL)
		ereport(ERROR,
				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
				 errmsg("set-valued function called in context that "
						"cannot accept a set")));


	rsi->returnMode = SFRM_Materialize;

	(void) get_call_result_type(fcinfo, NULL, &tupdesc);

	/* make these in a sufficiently long-lived memory context */
	old_cxt = MemoryContextSwitchTo(rsi->econtext->ecxt_per_query_memory);

	state->ret_tdesc = CreateTupleDescCopy(tupdesc);
	BlessTupleDesc(state->ret_tdesc);
	state->tuple_store =
		tuplestore_begin_heap(rsi->allowedModes & SFRM_Materialize,
							  false, work_mem);

	MemoryContextSwitchTo(old_cxt);

	sem->semstate = (void *) state;
	sem->array_start = each_array_start;
	sem->scalar = each_scalar;
	sem->object_field_start = each_object_field_start;
	sem->object_field_end = each_object_field_end;

	state->normalize_results = as_text;
	state->next_scalar = false;

	state->lex = lex;
	state->tmp_cxt = AllocSetContextCreate(CurrentMemoryContext,
										   "json_each temporary cxt",
										   ALLOCSET_DEFAULT_MINSIZE,
										   ALLOCSET_DEFAULT_INITSIZE,
										   ALLOCSET_DEFAULT_MAXSIZE);

	pg_parse_json(lex, sem);

	rsi->setResult = state->tuple_store;
	rsi->setDesc = state->ret_tdesc;

	PG_RETURN_NULL();
}
Example #14
0
/*
 * SQL function json_populate_recordset
 *
 * set fields in a set of records from the argument json,
 * which must be an array of objects.
 *
 * similar to json_populate_record, but the tuple-building code
 * is pushed down into the semantic action handlers so it's done
 * per object in the array.
 */
Datum
json_populate_recordset(PG_FUNCTION_ARGS)
{
	Oid			argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
	text	   *json = PG_GETARG_TEXT_P(1);
	bool		use_json_as_text = PG_GETARG_BOOL(2);
	ReturnSetInfo *rsi;
	MemoryContext old_cxt;
	Oid			tupType;
	int32		tupTypmod;
	HeapTupleHeader rec;
	TupleDesc	tupdesc;
	RecordIOData *my_extra;
	int			ncolumns;
	JsonLexContext *lex;
	JsonSemAction sem;
	PopulateRecordsetState state;

	if (!type_is_rowtype(argtype))
		ereport(ERROR,
				(errcode(ERRCODE_DATATYPE_MISMATCH),
				 errmsg("first argument must be a rowtype")));

	rsi = (ReturnSetInfo *) fcinfo->resultinfo;

	if (!rsi || !IsA(rsi, ReturnSetInfo) ||
		(rsi->allowedModes & SFRM_Materialize) == 0 ||
		rsi->expectedDesc == NULL)
		ereport(ERROR,
				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
				 errmsg("set-valued function called in context that "
						"cannot accept a set")));


	rsi->returnMode = SFRM_Materialize;

	/*
	 * get the tupdesc from the result set info - it must be a record type
	 * because we already checked that arg1 is a record type.
	 */
	(void) get_call_result_type(fcinfo, NULL, &tupdesc);

	state = palloc0(sizeof(populateRecordsetState));
	sem = palloc0(sizeof(jsonSemAction));


	/* make these in a sufficiently long-lived memory context */
	old_cxt = MemoryContextSwitchTo(rsi->econtext->ecxt_per_query_memory);

	state->ret_tdesc = CreateTupleDescCopy(tupdesc);
	BlessTupleDesc(state->ret_tdesc);
	state->tuple_store =
		tuplestore_begin_heap(rsi->allowedModes & SFRM_Materialize,
							  false, work_mem);

	MemoryContextSwitchTo(old_cxt);

	/* if the json is null send back an empty set */
	if (PG_ARGISNULL(1))
		PG_RETURN_NULL();

	if (PG_ARGISNULL(0))
		rec = NULL;
	else
		rec = PG_GETARG_HEAPTUPLEHEADER(0);

	tupType = tupdesc->tdtypeid;
	tupTypmod = tupdesc->tdtypmod;
	ncolumns = tupdesc->natts;

	lex = makeJsonLexContext(json, true);

	/*
	 * We arrange to look up the needed I/O info just once per series of
	 * calls, assuming the record type doesn't change underneath us.
	 */
	my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
	if (my_extra == NULL ||
		my_extra->ncolumns != ncolumns)
	{
		fcinfo->flinfo->fn_extra =
			MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
							   sizeof(RecordIOData) - sizeof(ColumnIOData)
							   + ncolumns * sizeof(ColumnIOData));
		my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
		my_extra->record_type = InvalidOid;
		my_extra->record_typmod = 0;
	}

	if (my_extra->record_type != tupType ||
		my_extra->record_typmod != tupTypmod)
	{
		MemSet(my_extra, 0,
			   sizeof(RecordIOData) - sizeof(ColumnIOData)
			   + ncolumns * sizeof(ColumnIOData));
		my_extra->record_type = tupType;
		my_extra->record_typmod = tupTypmod;
		my_extra->ncolumns = ncolumns;
	}

	sem->semstate = (void *) state;
	sem->array_start = populate_recordset_array_start;
	sem->array_element_start = populate_recordset_array_element_start;
	sem->scalar = populate_recordset_scalar;
	sem->object_field_start = populate_recordset_object_field_start;
	sem->object_field_end = populate_recordset_object_field_end;
	sem->object_start = populate_recordset_object_start;
	sem->object_end = populate_recordset_object_end;

	state->lex = lex;

	state->my_extra = my_extra;
	state->rec = rec;
	state->use_json_as_text = use_json_as_text;
	state->fn_mcxt = fcinfo->flinfo->fn_mcxt;

	pg_parse_json(lex, sem);

	rsi->setResult = state->tuple_store;
	rsi->setDesc = state->ret_tdesc;

	PG_RETURN_NULL();

}
Example #15
0
Datum
xpath_table(PG_FUNCTION_ARGS)
{
	/* Function parameters */
	char	   *pkeyfield = text_to_cstring(PG_GETARG_TEXT_PP(0));
	char	   *xmlfield = text_to_cstring(PG_GETARG_TEXT_PP(1));
	char	   *relname = text_to_cstring(PG_GETARG_TEXT_PP(2));
	char	   *xpathset = text_to_cstring(PG_GETARG_TEXT_PP(3));
	char	   *condition = text_to_cstring(PG_GETARG_TEXT_PP(4));

	/* SPI (input tuple) support */
	SPITupleTable *tuptable;
	HeapTuple	spi_tuple;
	TupleDesc	spi_tupdesc;

	/* Output tuple (tuplestore) support */
	Tuplestorestate *tupstore = NULL;
	TupleDesc	ret_tupdesc;
	HeapTuple	ret_tuple;

	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
	AttInMetadata *attinmeta;
	MemoryContext per_query_ctx;
	MemoryContext oldcontext;

	char	  **values;
	xmlChar   **xpaths;
	char	   *pos;
	const char *pathsep = "|";

	int			numpaths;
	int			ret;
	int			proc;
	int			i;
	int			j;
	int			rownr;			/* For issuing multiple rows from one original
								 * document */
	bool		had_values;		/* To determine end of nodeset results */
	StringInfoData query_buf;
	PgXmlErrorContext *xmlerrcxt;
	volatile xmlDocPtr doctree = NULL;

	/* We only have a valid tuple description in table function mode */
	if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
		ereport(ERROR,
				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
				 errmsg("set-valued function called in context that cannot accept a set")));
	if (rsinfo->expectedDesc == NULL)
		ereport(ERROR,
				(errcode(ERRCODE_SYNTAX_ERROR),
				 errmsg("xpath_table must be called as a table function")));

	/*
	 * We want to materialise because it means that we don't have to carry
	 * libxml2 parser state between invocations of this function
	 */
	if (!(rsinfo->allowedModes & SFRM_Materialize))
		ereport(ERROR,
				(errcode(ERRCODE_SYNTAX_ERROR),
			   errmsg("xpath_table requires Materialize mode, but it is not "
					  "allowed in this context")));

	/*
	 * The tuplestore must exist in a higher context than this function call
	 * (per_query_ctx is used)
	 */
	per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
	oldcontext = MemoryContextSwitchTo(per_query_ctx);

	/*
	 * Create the tuplestore - work_mem is the max in-memory size before a
	 * file is created on disk to hold it.
	 */
	tupstore =
		tuplestore_begin_heap(rsinfo->allowedModes & SFRM_Materialize_Random,
							  false, work_mem);

	MemoryContextSwitchTo(oldcontext);

	/* get the requested return tuple description */
	ret_tupdesc = CreateTupleDescCopy(rsinfo->expectedDesc);

	/* must have at least one output column (for the pkey) */
	if (ret_tupdesc->natts < 1)
		ereport(ERROR,
				(errcode(ERRCODE_SYNTAX_ERROR),
				 errmsg("xpath_table must have at least one output column")));

	/*
	 * At the moment we assume that the returned attributes make sense for the
	 * XPath specififed (i.e. we trust the caller). It's not fatal if they get
	 * it wrong - the input function for the column type will raise an error
	 * if the path result can't be converted into the correct binary
	 * representation.
	 */

	attinmeta = TupleDescGetAttInMetadata(ret_tupdesc);

	/* Set return mode and allocate value space. */
	rsinfo->returnMode = SFRM_Materialize;
	rsinfo->setDesc = ret_tupdesc;

	values = (char **) palloc(ret_tupdesc->natts * sizeof(char *));
	xpaths = (xmlChar **) palloc(ret_tupdesc->natts * sizeof(xmlChar *));

	/*
	 * Split XPaths. xpathset is a writable CString.
	 *
	 * Note that we stop splitting once we've done all needed for tupdesc
	 */
	numpaths = 0;
	pos = xpathset;
	while (numpaths < (ret_tupdesc->natts - 1))
	{
		xpaths[numpaths++] = (xmlChar *) pos;
		pos = strstr(pos, pathsep);
		if (pos != NULL)
		{
			*pos = '\0';
			pos++;
		}
		else
			break;
	}

	/* Now build query */
	initStringInfo(&query_buf);

	/* Build initial sql statement */
	appendStringInfo(&query_buf, "SELECT %s, %s FROM %s WHERE %s",
					 pkeyfield,
					 xmlfield,
					 relname,
					 condition);

	if ((ret = SPI_connect()) < 0)
		elog(ERROR, "xpath_table: SPI_connect returned %d", ret);

	if ((ret = SPI_exec(query_buf.data, 0)) != SPI_OK_SELECT)
		elog(ERROR, "xpath_table: SPI execution failed for query %s",
			 query_buf.data);

	proc = SPI_processed;
	/* elog(DEBUG1,"xpath_table: SPI returned %d rows",proc); */
	tuptable = SPI_tuptable;
	spi_tupdesc = tuptable->tupdesc;

	/* Switch out of SPI context */
	MemoryContextSwitchTo(oldcontext);

	/*
	 * Check that SPI returned correct result. If you put a comma into one of
	 * the function parameters, this will catch it when the SPI query returns
	 * e.g. 3 columns.
	 */
	if (spi_tupdesc->natts != 2)
	{
		ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
						errmsg("expression returning multiple columns is not valid in parameter list"),
						errdetail("Expected two columns in SPI result, got %d.", spi_tupdesc->natts)));
	}

	/*
	 * Setup the parser.  This should happen after we are done evaluating the
	 * query, in case it calls functions that set up libxml differently.
	 */
	xmlerrcxt = pgxml_parser_init(PG_XML_STRICTNESS_LEGACY);

	PG_TRY();
	{
		/* For each row i.e. document returned from SPI */
		for (i = 0; i < proc; i++)
		{
			char	   *pkey;
			char	   *xmldoc;
			xmlXPathContextPtr ctxt;
			xmlXPathObjectPtr res;
			xmlChar    *resstr;
			xmlXPathCompExprPtr comppath;

			/* Extract the row data as C Strings */
			spi_tuple = tuptable->vals[i];
			pkey = SPI_getvalue(spi_tuple, spi_tupdesc, 1);
			xmldoc = SPI_getvalue(spi_tuple, spi_tupdesc, 2);

			/*
			 * Clear the values array, so that not-well-formed documents
			 * return NULL in all columns.	Note that this also means that
			 * spare columns will be NULL.
			 */
			for (j = 0; j < ret_tupdesc->natts; j++)
				values[j] = NULL;

			/* Insert primary key */
			values[0] = pkey;

			/* Parse the document */
			if (xmldoc)
				doctree = xmlParseMemory(xmldoc, strlen(xmldoc));
			else	/* treat NULL as not well-formed */
				doctree = NULL;

			if (doctree == NULL)
			{
				/* not well-formed, so output all-NULL tuple */
				ret_tuple = BuildTupleFromCStrings(attinmeta, values);
				tuplestore_puttuple(tupstore, ret_tuple);
				heap_freetuple(ret_tuple);
			}
			else
			{
				/* New loop here - we have to deal with nodeset results */
				rownr = 0;

				do
				{
					/* Now evaluate the set of xpaths. */
					had_values = false;
					for (j = 0; j < numpaths; j++)
					{
						ctxt = xmlXPathNewContext(doctree);
						ctxt->node = xmlDocGetRootElement(doctree);

						/* compile the path */
						comppath = xmlXPathCompile(xpaths[j]);
						if (comppath == NULL)
							xml_ereport(xmlerrcxt, ERROR,
										ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
										"XPath Syntax Error");

						/* Now evaluate the path expression. */
						res = xmlXPathCompiledEval(comppath, ctxt);
						xmlXPathFreeCompExpr(comppath);

						if (res != NULL)
						{
							switch (res->type)
							{
								case XPATH_NODESET:
									/* We see if this nodeset has enough nodes */
									if (res->nodesetval != NULL &&
										rownr < res->nodesetval->nodeNr)
									{
										resstr = xmlXPathCastNodeToString(res->nodesetval->nodeTab[rownr]);
										had_values = true;
									}
									else
										resstr = NULL;

									break;

								case XPATH_STRING:
									resstr = xmlStrdup(res->stringval);
									break;

								default:
									elog(NOTICE, "unsupported XQuery result: %d", res->type);
									resstr = xmlStrdup((const xmlChar *) "<unsupported/>");
							}

							/*
							 * Insert this into the appropriate column in the
							 * result tuple.
							 */
							values[j + 1] = (char *) resstr;
						}
						xmlXPathFreeContext(ctxt);
					}

					/* Now add the tuple to the output, if there is one. */
					if (had_values)
					{
						ret_tuple = BuildTupleFromCStrings(attinmeta, values);
						tuplestore_puttuple(tupstore, ret_tuple);
						heap_freetuple(ret_tuple);
					}

					rownr++;
				} while (had_values);
			}

			if (doctree != NULL)
				xmlFreeDoc(doctree);
			doctree = NULL;

			if (pkey)
				pfree(pkey);
			if (xmldoc)
				pfree(xmldoc);
		}
	}
	PG_CATCH();
	{
		if (doctree != NULL)
			xmlFreeDoc(doctree);

		pg_xml_done(xmlerrcxt, true);

		PG_RE_THROW();
	}
	PG_END_TRY();

	if (doctree != NULL)
		xmlFreeDoc(doctree);

	pg_xml_done(xmlerrcxt, false);

	tuplestore_donestoring(tupstore);

	SPI_finish();

	rsinfo->setResult = tupstore;

	/*
	 * SFRM_Materialize mode expects us to return a NULL Datum. The actual
	 * tuples are in our tuplestore and passed back through rsinfo->setResult.
	 * rsinfo->setDesc is set to the tuple description that we actually used
	 * to build our tuples with, so the caller can verify we did what it was
	 * expecting.
	 */
	return (Datum) 0;
}
Example #16
0
Datum
crosstab_hash(PG_FUNCTION_ARGS)
{
	char	   *sql = text_to_cstring(PG_GETARG_TEXT_PP(0));
	char	   *cats_sql = text_to_cstring(PG_GETARG_TEXT_PP(1));
	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
	TupleDesc	tupdesc;
	MemoryContext per_query_ctx;
	MemoryContext oldcontext;
	HTAB	   *crosstab_hash;

	/* check to see if caller supports us returning a tuplestore */
	if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
		ereport(ERROR,
				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
				 errmsg("set-valued function called in context that cannot accept a set")));
	if (!(rsinfo->allowedModes & SFRM_Materialize) ||
		rsinfo->expectedDesc == NULL)
		ereport(ERROR,
				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
				 errmsg("materialize mode required, but it is not " \
						"allowed in this context")));

	per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
	oldcontext = MemoryContextSwitchTo(per_query_ctx);

	/* get the requested return tuple description */
	tupdesc = CreateTupleDescCopy(rsinfo->expectedDesc);

	/*
	 * Check to make sure we have a reasonable tuple descriptor
	 *
	 * Note we will attempt to coerce the values into whatever the return
	 * attribute type is and depend on the "in" function to complain if
	 * needed.
	 */
	if (tupdesc->natts < 2)
		ereport(ERROR,
				(errcode(ERRCODE_SYNTAX_ERROR),
				 errmsg("query-specified return tuple and " \
						"crosstab function are not compatible")));

	/* load up the categories hash table */
	crosstab_hash = load_categories_hash(cats_sql, per_query_ctx);

	/* let the caller know we're sending back a tuplestore */
	rsinfo->returnMode = SFRM_Materialize;

	/* now go build it */
	rsinfo->setResult = get_crosstab_tuplestore(sql,
												crosstab_hash,
												tupdesc,
												per_query_ctx,
							 rsinfo->allowedModes & SFRM_Materialize_Random);

	/*
	 * SFRM_Materialize mode expects us to return a NULL Datum. The actual
	 * tuples are in our tuplestore and passed back through rsinfo->setResult.
	 * rsinfo->setDesc is set to the tuple description that we actually used
	 * to build our tuples with, so the caller can verify we did what it was
	 * expecting.
	 */
	rsinfo->setDesc = tupdesc;
	MemoryContextSwitchTo(oldcontext);

	return (Datum) 0;
}
Example #17
0
/*
 * Note: plperl_return_next is called both in Postgres and Perl contexts.
 * We report any errors in Postgres fashion (via ereport).	If called in
 * Perl context, it is SPI.xs's responsibility to catch the error and
 * convert to a Perl error.  We assume (perhaps without adequate justification)
 * that we need not abort the current transaction if the Perl code traps the
 * error.
 */
void
plperl_return_next(SV *sv)
{
	plperl_proc_desc *prodesc = plperl_current_prodesc;
	FunctionCallInfo fcinfo = plperl_current_caller_info;
	ReturnSetInfo *rsi = (ReturnSetInfo *) fcinfo->resultinfo;
	MemoryContext cxt;
	HeapTuple	tuple;
	TupleDesc	tupdesc;

	if (!sv)
		return;

	if (!prodesc->fn_retisset)
		ereport(ERROR,
				(errcode(ERRCODE_SYNTAX_ERROR),
				 errmsg("cannot use return_next in a non-SETOF function")));

	if (prodesc->fn_retistuple &&
		!(SvOK(sv) && SvTYPE(sv) == SVt_RV && SvTYPE(SvRV(sv)) == SVt_PVHV))
		ereport(ERROR,
				(errcode(ERRCODE_DATATYPE_MISMATCH),
				 errmsg("setof-composite-returning Perl function "
						"must call return_next with reference to hash")));

	cxt = MemoryContextSwitchTo(rsi->econtext->ecxt_per_query_memory);

	if (!plperl_current_tuple_store)
		plperl_current_tuple_store =
			tuplestore_begin_heap(true, false, work_mem);

	if (prodesc->fn_retistuple)
	{
		TypeFuncClass rettype;
		AttInMetadata *attinmeta;

		rettype = get_call_result_type(fcinfo, NULL, &tupdesc);
		tupdesc = CreateTupleDescCopy(tupdesc);
		attinmeta = TupleDescGetAttInMetadata(tupdesc);
		tuple = plperl_build_tuple_result((HV *) SvRV(sv), attinmeta);
	}
	else
	{
		Datum		ret;
		bool		isNull;

		tupdesc = CreateTupleDescCopy(rsi->expectedDesc);

		if (SvOK(sv) && SvTYPE(sv) != SVt_NULL)
		{
			char	   *val = SvPV(sv, PL_na);

			ret = FunctionCall3(&prodesc->result_in_func,
								PointerGetDatum(val),
								ObjectIdGetDatum(prodesc->result_typioparam),
								Int32GetDatum(-1));
			isNull = false;
		}
		else
		{
			ret = (Datum) 0;
			isNull = true;
		}

		tuple = heap_form_tuple(tupdesc, &ret, &isNull);
	}

	if (!plperl_current_tuple_desc)
		plperl_current_tuple_desc = tupdesc;

	tuplestore_puttuple(plperl_current_tuple_store, tuple);
	heap_freetuple(tuple);
	MemoryContextSwitchTo(cxt);
}
Example #18
0
/*
 * is_null is true, when we assign NULL expression and type should not be checked.
 */
void
plpgsql_check_recval_assign_tupdesc(PLpgSQL_checkstate *cstate, PLpgSQL_rec *rec, TupleDesc tupdesc, bool is_null)
{

#if PG_VERSION_NUM >= 110000

	PLpgSQL_execstate	   *estate = cstate->estate;
	ExpandedRecordHeader   *newerh;
	MemoryContext			mcontext;
	TupleDesc	var_tupdesc;
	Datum	   *newvalues;
	bool	   *newnulls;
	char	   *chunk;
	int			vtd_natts;
	int			i;

	mcontext = get_eval_mcontext(estate);
	plpgsql_check_recval_release(rec);

	/*
	 * code is reduced version of make_expanded_record_for_rec
	 */
	if (rec->rectypeid != RECORDOID)
	{
		newerh = make_expanded_record_from_typeid(rec->rectypeid, -1,
													  mcontext);
	}
	else
	{
		if (!tupdesc)
			return;

		newerh = make_expanded_record_from_tupdesc(tupdesc,
													   mcontext);
	}

	/*
	 * code is reduced version of exec_move_row_from_field
	 */
	var_tupdesc = expanded_record_get_tupdesc(newerh);
	vtd_natts = var_tupdesc->natts;

	if (!is_null && tupdesc != NULL && !compatible_tupdescs(var_tupdesc, tupdesc))
	{
		int		i = 0;
		int		j = 0;
		int		target_nfields = 0;
		int		src_nfields = 0;
		bool	src_field_is_valid = false;
		bool	target_field_is_valid = false;
		Form_pg_attribute sattr = NULL;
		Form_pg_attribute tattr = NULL;

		while (i < var_tupdesc->natts || j < tupdesc->natts)
		{
			if (!target_field_is_valid && i < var_tupdesc->natts)
			{
				tattr = TupleDescAttr(var_tupdesc, i);
				if (tattr->attisdropped)
				{
					i += 1;
					continue;
				}
				target_field_is_valid = true;
				target_nfields += 1;
			}

			if (!src_field_is_valid && j < tupdesc->natts)
			{
				sattr = TupleDescAttr(tupdesc, j);
				if (sattr->attisdropped)
				{
					j += 1;
					continue;
				}
				src_field_is_valid = true;
				src_nfields += 1;
			}

			if (src_field_is_valid && target_field_is_valid)
			{
				plpgsql_check_assign_to_target_type(cstate,
												tattr->atttypid, tattr->atttypmod,
												sattr->atttypid,
												false);

				/* try to search next tuple of fields */
				src_field_is_valid =  false;
				target_field_is_valid = false;
				i += 1;
				j += 1;
			}
			else
				break;
		}

		if (src_nfields < target_nfields)
			plpgsql_check_put_error(cstate,
						  0, 0,
						  "too few attributes for composite variable",
						  NULL,
						  NULL,
						  PLPGSQL_CHECK_WARNING_OTHERS,
						  0, NULL, NULL);
		else if (src_nfields > target_nfields)
			plpgsql_check_put_error(cstate,
						  0, 0,
						  "too many attributes for composite variable",
						  NULL,
						  NULL,
						  PLPGSQL_CHECK_WARNING_OTHERS,
						  0, NULL, NULL);
	}

	chunk = eval_mcontext_alloc(estate,
								vtd_natts * (sizeof(Datum) + sizeof(bool)));
	newvalues = (Datum *) chunk;
	newnulls = (bool *) (chunk + vtd_natts * sizeof(Datum));

	for (i = 0; i < vtd_natts; i++)
	{
		newvalues[i] = (Datum) 0;
		newnulls[i] = true;
	}

	expanded_record_set_fields(newerh, newvalues, newnulls, true);

	TransferExpandedRecord(newerh, estate->datum_context);
	rec->erh = newerh;

#else

	bool	   *nulls;
	HeapTuple	tup;

	plpgsql_check_recval_release(rec);

	if (!tupdesc)
		return;

	/* initialize rec by NULLs */
	nulls = (bool *) palloc(tupdesc->natts * sizeof(bool));
	memset(nulls, true, tupdesc->natts * sizeof(bool));

	rec->tupdesc = CreateTupleDescCopy(tupdesc);
	rec->freetupdesc = true;

	tup = heap_form_tuple(tupdesc, NULL, nulls);
	if (HeapTupleIsValid(tup))
	{
		rec->tup = tup;
		rec->freetup = true;
	}
	else
		elog(ERROR, "cannot to build valid composite value");

#endif

}
Example #19
0
/*
 * TypeGetTupleDesc
 *
 * Given a type Oid, build a TupleDesc.  (In most cases you should be
 * using get_call_result_type or one of its siblings instead of this
 * routine, so that you can handle OUT parameters, RECORD result type,
 * and polymorphic results.)
 *
 * If the type is composite, *and* a colaliases List is provided, *and*
 * the List is of natts length, use the aliases instead of the relation
 * attnames.  (NB: this usage is deprecated since it may result in
 * creation of unnecessary transient record types.)
 *
 * If the type is a base type, a single item alias List is required.
 */
TupleDesc
TypeGetTupleDesc(Oid typeoid, List *colaliases)
{
	TypeFuncClass functypclass = get_type_func_class(typeoid);
	TupleDesc	tupdesc = NULL;

	/*
	 * Build a suitable tupledesc representing the output rows
	 */
	if (functypclass == TYPEFUNC_COMPOSITE)
	{
		/* Composite data type, e.g. a table's row type */
		tupdesc = CreateTupleDescCopy(lookup_rowtype_tupdesc(typeoid, -1));

		if (colaliases != NIL)
		{
			int			natts = tupdesc->natts;
			int			varattno;

			/* does the list length match the number of attributes? */
			if (list_length(colaliases) != natts)
				ereport(ERROR,
						(errcode(ERRCODE_DATATYPE_MISMATCH),
						 errmsg("number of aliases does not match number of columns")));

			/* OK, use the aliases instead */
			for (varattno = 0; varattno < natts; varattno++)
			{
				char	   *label = strVal(list_nth(colaliases, varattno));

				if (label != NULL)
					namestrcpy(&(tupdesc->attrs[varattno]->attname), label);
			}

			/* The tuple type is now an anonymous record type */
			tupdesc->tdtypeid = RECORDOID;
			tupdesc->tdtypmod = -1;
		}
	}
	else if (functypclass == TYPEFUNC_SCALAR)
	{
		/* Base data type, i.e. scalar */
		char	   *attname;

		/* the alias list is required for base types */
		if (colaliases == NIL)
			ereport(ERROR,
					(errcode(ERRCODE_DATATYPE_MISMATCH),
					 errmsg("no column alias was provided")));

		/* the alias list length must be 1 */
		if (list_length(colaliases) != 1)
			ereport(ERROR,
					(errcode(ERRCODE_DATATYPE_MISMATCH),
			  errmsg("number of aliases does not match number of columns")));

		/* OK, get the column alias */
		attname = strVal(linitial(colaliases));

		tupdesc = CreateTemplateTupleDesc(1, false);
		TupleDescInitEntry(tupdesc,
						   (AttrNumber) 1,
						   attname,
						   typeoid,
						   -1,
						   0);
	}
	else if (functypclass == TYPEFUNC_RECORD)
	{
		/* XXX can't support this because typmod wasn't passed in ... */
		ereport(ERROR,
				(errcode(ERRCODE_DATATYPE_MISMATCH),
				 errmsg("could not determine row description for function returning record")));
	}
	else
	{
		/* crummy error message, but parser should have caught this */
		elog(ERROR, "function in FROM has unsupported return type");
	}

	return tupdesc;
}
Example #20
0
/* ----------------------------------------------------------------
 *		ExecInitFunctionScan
 * ----------------------------------------------------------------
 */
FunctionScanState *
ExecInitFunctionScan(FunctionScan *node, EState *estate, int eflags)
{
	FunctionScanState *scanstate;
	RangeTblEntry *rte;
	Oid			funcrettype;
	TypeFuncClass functypclass;
	TupleDesc	tupdesc = NULL;

	/*
	 * FunctionScan should not have any children.
	 */
	Assert(outerPlan(node) == NULL);
	Assert(innerPlan(node) == NULL);

	/*
	 * create new ScanState for node
	 */
	scanstate = makeNode(FunctionScanState);
	scanstate->ss.ps.plan = (Plan *) node;
	scanstate->ss.ps.state = estate;

	/*
	 * Miscellaneous initialization
	 *
	 * create expression context for node
	 */
	ExecAssignExprContext(estate, &scanstate->ss.ps);

#define FUNCTIONSCAN_NSLOTS 2

	/*
	 * tuple table initialization
	 */
	ExecInitResultTupleSlot(estate, &scanstate->ss.ps);
	ExecInitScanTupleSlot(estate, &scanstate->ss);

	/*
	 * initialize child expressions
	 */
	scanstate->ss.ps.targetlist = (List *)
		ExecInitExpr((Expr *) node->scan.plan.targetlist,
					 (PlanState *) scanstate);
	scanstate->ss.ps.qual = (List *)
		ExecInitExpr((Expr *) node->scan.plan.qual,
					 (PlanState *) scanstate);

	/* Check if targetlist or qual contains a var node referencing the ctid column */
	scanstate->cdb_want_ctid = contain_ctid_var_reference(&node->scan);

    ItemPointerSet(&scanstate->cdb_fake_ctid, 0, 0);
    ItemPointerSet(&scanstate->cdb_mark_ctid, 0, 0);

	/*
	 * get info about function
	 */
	rte = rt_fetch(node->scan.scanrelid, estate->es_range_table);
	Assert(rte->rtekind == RTE_FUNCTION);

	/*
	 * Now determine if the function returns a simple or composite type, and
	 * build an appropriate tupdesc.
	 */
	functypclass = get_expr_result_type(rte->funcexpr,
										&funcrettype,
										&tupdesc);

	if (functypclass == TYPEFUNC_COMPOSITE)
	{
		/* Composite data type, e.g. a table's row type */
		Assert(tupdesc);
		/* Must copy it out of typcache for safety */
		tupdesc = CreateTupleDescCopy(tupdesc);
	}
	else if (functypclass == TYPEFUNC_SCALAR)
	{
		/* Base data type, i.e. scalar */
		char	   *attname = strVal(linitial(rte->eref->colnames));

		tupdesc = CreateTemplateTupleDesc(1, false);
		TupleDescInitEntry(tupdesc,
						   (AttrNumber) 1,
						   attname,
						   funcrettype,
						   -1,
						   0);
	}
	else if (functypclass == TYPEFUNC_RECORD)
	{
		tupdesc = BuildDescFromLists(rte->eref->colnames,
									 rte->funccoltypes,
									 rte->funccoltypmods);
	}
	else
	{
		/* crummy error message, but parser should have caught this */
		elog(ERROR, "function in FROM has unsupported return type");
	}

	/*
	 * For RECORD results, make sure a typmod has been assigned.  (The
	 * function should do this for itself, but let's cover things in case it
	 * doesn't.)
	 */
	BlessTupleDesc(tupdesc);

	scanstate->tupdesc = tupdesc;
	ExecAssignScanType(&scanstate->ss, tupdesc);

	/*
	 * Other node-specific setup
	 */
	scanstate->tuplestorestate = NULL;
	scanstate->funcexpr = ExecInitExpr((Expr *) rte->funcexpr,
									   (PlanState *) scanstate);

	/*
	 * Initialize result tuple type and projection info.
	 */
	ExecAssignResultTypeFromTL(&scanstate->ss.ps);
	ExecAssignScanProjectionInfo(&scanstate->ss);

	initGpmonPktForFunctionScan((Plan *)node, &scanstate->ss.ps.gpmon_pkt, estate);
	
	if (gp_resqueue_memory_policy != RESQUEUE_MEMORY_POLICY_NONE)
	{
		SPI_ReserveMemory(((Plan *)node)->operatorMemKB * 1024L);
	}

	return scanstate;
}
Example #21
0
/*
 * Find or create a hashtable entry for the tuple group containing the
 * given tuple.  The tuple must be the same type as the hashtable entries.
 *
 * If isnew is NULL, we do not create new entries; we return NULL if no
 * match is found.
 *
 * If isnew isn't NULL, then a new entry is created if no existing entry
 * matches.  On return, *isnew is true if the entry is newly created,
 * false if it existed already.  ->additional_data in the new entry has
 * been zeroed.
 */
TupleHashEntry
LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
					 bool *isnew)
{
	TupleHashEntryData *entry;
	MemoryContext oldContext;
	bool		found;
	MinimalTuple key;

	/* If first time through, clone the input slot to make table slot */
	if (hashtable->tableslot == NULL)
	{
		TupleDesc	tupdesc;

		oldContext = MemoryContextSwitchTo(hashtable->tablecxt);

		/*
		 * We copy the input tuple descriptor just for safety --- we assume
		 * all input tuples will have equivalent descriptors.
		 */
		tupdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
		hashtable->tableslot = MakeSingleTupleTableSlot(tupdesc);
		MemoryContextSwitchTo(oldContext);
	}

	/* Need to run the hash functions in short-lived context */
	oldContext = MemoryContextSwitchTo(hashtable->tempcxt);

	/* set up data needed by hash and match functions */
	hashtable->inputslot = slot;
	hashtable->in_hash_funcs = hashtable->tab_hash_funcs;
	hashtable->cur_eq_funcs = hashtable->tab_eq_funcs;

	key = NULL; /* flag to reference inputslot */

	if (isnew)
	{
		entry = tuplehash_insert(hashtable->hashtab, key, &found);

		if (found)
		{
			/* found pre-existing entry */
			*isnew = false;
		}
		else
		{
			/* created new entry */
			*isnew = true;
			/* zero caller data */
			entry->additional = NULL;
			MemoryContextSwitchTo(hashtable->tablecxt);
			/* Copy the first tuple into the table context */
			entry->firstTuple = ExecCopySlotMinimalTuple(slot);
		}
	}
	else
	{
		entry = tuplehash_lookup(hashtable->hashtab, key);
	}

	MemoryContextSwitchTo(oldContext);

	return entry;
}
Example #22
0
/*
 * ExecFilterRecommend
 *
 * This function just borrows a tuple descriptor from the RecView,
 * but we create the data ourselves through various means.
 */
static TupleTableSlot*
ExecFilterRecommend(RecScanState *recnode,
					 ExecScanAccessMtd accessMtd,
					 ExecScanRecheckMtd recheckMtd)
{
	ExprContext *econtext;
	List	   *qual;
	ProjectionInfo *projInfo;
	ExprDoneCond isDone;
	TupleTableSlot *resultSlot;
	ScanState *node;
	AttributeInfo *attributes;

	node = recnode->subscan;
	attributes = (AttributeInfo*) recnode->attributes;

	/*
	 * Fetch data from node
	 */
	qual = node->ps.qual;
	projInfo = node->ps.ps_ProjInfo;
	econtext = node->ps.ps_ExprContext;

	/*
	 * Check to see if we're still projecting out tuples from a previous scan
	 * tuple (because there is a function-returning-set in the projection
	 * expressions).  If so, try to project another one.
	 */
	if (node->ps.ps_TupFromTlist)
	{
		Assert(projInfo);		/* can't get here if not projecting */
		resultSlot = ExecProject(projInfo, &isDone);
		if (isDone == ExprMultipleResult)
			return resultSlot;
		/* Done with that source tuple... */
		node->ps.ps_TupFromTlist = false;
	}

	/*
	 * Reset per-tuple memory context to free any expression evaluation
	 * storage allocated in the previous tuple cycle.  Note this can't happen
	 * until we're done projecting out tuples from a scan tuple.
	 */
	ResetExprContext(econtext);

	/*
	 * get a tuple from the access method.	Loop until we obtain a tuple that
	 * passes the qualification.
	 */
	for (;;)
	{
		TupleTableSlot *slot;
		int natts, i, userID, userindex, itemID, itemindex;

		CHECK_FOR_INTERRUPTS();

		slot = recnode->ss.ps.ps_ResultTupleSlot;

		/* The first thing we need to do is initialize our recommender
		 * model and other things, if we haven't done so already. */
		if (!recnode->initialized)
			InitializeRecommender(recnode);

		/*
		 * If we've exhausted our item list, then we're totally
		 * finished. We set a flag for this. It's possible that
		 * we'll be in the inner loop of a join, through poor
		 * planning, so we'll reset the appropriate data in case
		 * we have to do this again, though our JoinRecommend
		 * should assure this doesn't happen.
		 */
		if (recnode->finished) {
			recnode->finished = false;
			recnode->userNum = 0;
			recnode->itemNum = 0;
			return NULL;
		}

		/* We're only going to fetch one tuple and store its tuple
		 * descriptor. We can use this tuple descriptor to make as
		 * many new tuples as we want. */
		if (recnode->base_slot == NULL) {
			slot = ExecRecFetch(node, accessMtd, recheckMtd);
			recnode->base_slot = CreateTupleDescCopy(slot->tts_tupleDescriptor);
		}

		/* Create a new slot to operate on. */
		slot = MakeSingleTupleTableSlot(recnode->base_slot);
		slot->tts_isempty = false;

		/*
		 * place the current tuple into the expr context
		 */
		econtext->ecxt_scantuple = slot;

		/* Mark all slots as usable. */
		natts = slot->tts_tupleDescriptor->natts;
		for (i = 0; i < natts; i++) {
			/* Mark slot. */
			slot->tts_values[i] = Int32GetDatum(0);
			slot->tts_isnull[i] = false;
			slot->tts_nvalid++;
		}

		/* While we're here, record what tuple attributes
		 * correspond to our key columns. This will save
		 * us unnecessary strcmp functions. */
		if (recnode->useratt < 0) {
			for (i = 0; i < natts; i++) {
				char* col_name = slot->tts_tupleDescriptor->attrs[i]->attname.data;
//printf("%s\n",col_name);
				if (strcmp(col_name,attributes->userkey) == 0)
					recnode->useratt = i;
				else if (strcmp(col_name,attributes->itemkey) == 0)
					recnode->itematt = i;
				else if (strcmp(col_name,attributes->eventval) == 0)
					recnode->eventatt = i;
			}
		}

		/*
		 * We now have a problem: we need to create prediction structures
		 * for a user before we do filtering, so that we can have a proper
		 * item list. But we also need to filter before creating those
		 * structures, so we don't end up taking forever with it. The
		 * solution is to filter twice.
		 */
		userID = -1; itemID = -1;

		/* First, replace the user ID. */
		userindex = recnode->userNum;
		userID = recnode->userList[userindex];

		/*
		 * We now have a blank tuple slot that we need to fill with data.
		 * We have a working user ID, but not a valid item list. We'd like to
		 * use the filter to determine if this is a good user, but we can't
		 * do that without an item, in many cases. The solution is to add in
		 * dummy items, then compare it against the filter. If a given user ID
		 * doesn't make it past the filter with any item ID, then that user is
		 * being filtered out, and we'll move on to the next.
		 */
		if (recnode->newUser) {
			recnode->fullItemNum = 0;
			itemindex = recnode->fullItemNum;
			itemID = recnode->fullItemList[itemindex];

			slot->tts_values[recnode->useratt] = Int32GetDatum(userID);
			slot->tts_values[recnode->itematt] = Int32GetDatum(itemID);
			slot->tts_values[recnode->eventatt] = Int32GetDatum(-1);

			/* We have a preliminary slot - let's test it. */
			while (qual && !ExecQual(qual, econtext, false)) {
				/* We failed the test. Try the next item. */
				recnode->fullItemNum++;
				if (recnode->fullItemNum >= recnode->fullTotalItems) {
					/* If we've reached the last item, move onto the next user.
					 * If we've reached the last user, we're done. */
					InstrCountFiltered1(node, recnode->fullTotalItems);
					recnode->userNum++;
					recnode->newUser = true;
					recnode->fullItemNum = 0;
					if (recnode->userNum >= recnode->totalUsers) {
						recnode->userNum = 0;
						recnode->itemNum = 0;
						return NULL;
					}
					userindex = recnode->userNum;
					userID = recnode->userList[userindex];
				}

				itemindex = recnode->fullItemNum;
				itemID = recnode->fullItemList[itemindex];
				slot->tts_values[recnode->useratt] = Int32GetDatum(userID);
				slot->tts_values[recnode->itematt] = Int32GetDatum(itemID);
			}

			/* If we get here, then we found a user who will be actually
			 * returned in the results. One quick reset here. */
			recnode->fullItemNum = 0;
		}

		/* Mark the user ID and index. */
		attributes->userID = userID;
		recnode->userindex = userindex;

		/* With the user ID determined, we need to investigate and see
		 * if this is a new user. If so, attempt to create prediction
		 * data structures, or report that this user is invalid. We have
		 * to do this here, so we can establish the item list. */
		if (recnode->newUser) {
			recnode->validUser = prepUserForRating(recnode,userID);
			recnode->newUser = false;
		}

		/* Now replace the item ID, if the user is valid. Otherwise,
		 * leave the item ID as is, as it doesn't matter what it is. */
		if (recnode->validUser)
			itemID = recnode->itemList[recnode->itemNum];
		while (recnode->fullItemList[recnode->fullItemNum] < itemID)
			recnode->fullItemNum++;
		itemindex = recnode->fullItemNum;
		if (recnode->fullItemList[itemindex] > itemID)
			elog(ERROR, "critical item mismatch in ExecRecommend");

		/* Plug in the data, marking those columns full. We also need to
		 * mark the rating column with something temporary. */
		slot->tts_values[recnode->useratt] = Int32GetDatum(userID);
		slot->tts_values[recnode->itematt] = Int32GetDatum(itemID);
		slot->tts_values[recnode->eventatt] = Int32GetDatum(-1);

		/* It's possible our filter criteria involves the RecScore somehow.
		 * If that's the case, we need to calculate it before we do the
		 * qual filtering. Also, if we're doing a JoinRecommend, we should
		 * not calculate the RecScore in this node. In the current version
		 * of RecDB, an OP_NOFILTER shouldn't be allowed. */
		if (attributes->opType == OP_NOFILTER)
			applyRecScore(recnode, slot, itemID, itemindex);

		/* Move onto the next item, for next time. If we're doing a RecJoin,
		 * though, we'll move onto the next user instead. */
		recnode->itemNum++;
		if (recnode->itemNum >= recnode->totalItems ||
			attributes->opType == OP_JOIN ||
			attributes->opType == OP_GENERATEJOIN) {
			/* If we've reached the last item, move onto the next user.
			 * If we've reached the last user, we're done. */
			recnode->userNum++;
			recnode->newUser = true;
			recnode->itemNum = 0;
			recnode->fullItemNum = 0;
			if (recnode->userNum >= recnode->totalUsers)
				recnode->finished = true;
		}

		/*
		 * check that the current tuple satisfies the qual-clause
		 *
		 * check for non-nil qual here to avoid a function call to ExecQual()
		 * when the qual is nil ... saves only a few cycles, but they add up
		 * ...
		 */
		if (!qual || ExecQual(qual, econtext, false))
		{
			/*
			 * If this is an invalid user, then we'll skip this tuple,
			 * adding one to the filter count.
			 */
			if (!recnode->validUser) {
				InstrCountFiltered1(node, 1);
				ResetExprContext(econtext);
				ExecDropSingleTupleTableSlot(slot);
				continue;
			}

			/*
			 * Found a satisfactory scan tuple. This is usually when
			 * we will calculate and apply the RecScore.
			 */
			if (attributes->opType == OP_FILTER || attributes->opType == OP_GENERATE)
				applyRecScore(recnode, slot, itemID, itemindex);

			if (projInfo)
			{
				/*
				 * Form a projection tuple, store it in the result tuple slot
				 * and return it --- unless we find we can project no tuples
				 * from this scan tuple, in which case continue scan.
				 */
				resultSlot = ExecProject(projInfo, &isDone);
				if (isDone != ExprEndResult)
				{
					node->ps.ps_TupFromTlist = (isDone == ExprMultipleResult);

					return resultSlot;
				}
			}
			else
			{
				/*
				 * Here, we aren't projecting, so just return scan tuple.
				 */

				return slot;
			}
		}
		else
			InstrCountFiltered1(node, 1);

		/*
		 * Tuple fails qual, so free per-tuple memory and try again.
		 */
		ResetExprContext(econtext);
		ExecDropSingleTupleTableSlot(slot);
	}

}
Example #23
0
/* ----------------------------------------------------------------
 *		ExecInitFunctionScan
 * ----------------------------------------------------------------
 */
FunctionScanState *
ExecInitFunctionScan(FunctionScan *node, EState *estate, int eflags)
{
	FunctionScanState *scanstate;
	Oid			funcrettype;
	TypeFuncClass functypclass;
	TupleDesc	tupdesc = NULL;

	/* check for unsupported flags */
	Assert(!(eflags & EXEC_FLAG_MARK));

	/*
	 * FunctionScan should not have any children.
	 */
	Assert(outerPlan(node) == NULL);
	Assert(innerPlan(node) == NULL);

	/*
	 * create new ScanState for node
	 */
	scanstate = makeNode(FunctionScanState);
	scanstate->ss.ps.plan = (Plan *) node;
	scanstate->ss.ps.state = estate;
	scanstate->eflags = eflags;

	/*
	 * Miscellaneous initialization
	 *
	 * create expression context for node
	 */
	ExecAssignExprContext(estate, &scanstate->ss.ps);

	/*
	 * tuple table initialization
	 */
	ExecInitResultTupleSlot(estate, &scanstate->ss.ps);
	ExecInitScanTupleSlot(estate, &scanstate->ss);

	/*
	 * initialize child expressions
	 */
	scanstate->ss.ps.targetlist = (List *)
		ExecInitExpr((Expr *) node->scan.plan.targetlist,
					 (PlanState *) scanstate);
	scanstate->ss.ps.qual = (List *)
		ExecInitExpr((Expr *) node->scan.plan.qual,
					 (PlanState *) scanstate);

	/*
	 * Now determine if the function returns a simple or composite type, and
	 * build an appropriate tupdesc.
	 */
	functypclass = get_expr_result_type(node->funcexpr,
										&funcrettype,
										&tupdesc);

	if (functypclass == TYPEFUNC_COMPOSITE)
	{
		/* Composite data type, e.g. a table's row type */
		Assert(tupdesc);
		/* Must copy it out of typcache for safety */
		tupdesc = CreateTupleDescCopy(tupdesc);
	}
	else if (functypclass == TYPEFUNC_SCALAR)
	{
		/* Base data type, i.e. scalar */
		char	   *attname = strVal(linitial(node->funccolnames));

		tupdesc = CreateTemplateTupleDesc(1, false);
		TupleDescInitEntry(tupdesc,
						   (AttrNumber) 1,
						   attname,
						   funcrettype,
						   -1,
						   0);
	}
	else if (functypclass == TYPEFUNC_RECORD)
	{
		tupdesc = BuildDescFromLists(node->funccolnames,
									 node->funccoltypes,
									 node->funccoltypmods);
	}
	else
	{
		/* crummy error message, but parser should have caught this */
		elog(ERROR, "function in FROM has unsupported return type");
	}

	/*
	 * For RECORD results, make sure a typmod has been assigned.  (The
	 * function should do this for itself, but let's cover things in case it
	 * doesn't.)
	 */
	BlessTupleDesc(tupdesc);

	scanstate->tupdesc = tupdesc;
	ExecAssignScanType(&scanstate->ss, tupdesc);

	/*
	 * Other node-specific setup
	 */
	scanstate->tuplestorestate = NULL;
	scanstate->funcexpr = ExecInitExpr((Expr *) node->funcexpr,
									   (PlanState *) scanstate);

	scanstate->ss.ps.ps_TupFromTlist = false;

	/*
	 * Initialize result tuple type and projection info.
	 */
	ExecAssignResultTypeFromTL(&scanstate->ss.ps);
	ExecAssignScanProjectionInfo(&scanstate->ss);

	return scanstate;
}
Example #24
0
/*
 * Construct an empty TupleHashTable
 *
 *	numCols, keyColIdx: identify the tuple fields to use as lookup key
 *	eqfunctions: equality comparison functions to use
 *	hashfunctions: datatype-specific hashing functions to use
 *	nbuckets: initial estimate of hashtable size
 *	additionalsize: size of data stored in ->additional
 *	tablecxt: memory context in which to store table and table entries
 *	tempcxt: short-lived context for evaluation hash and comparison functions
 *
 * The function arrays may be made with execTuplesHashPrepare().  Note they
 * are not cross-type functions, but expect to see the table datatype(s)
 * on both sides.
 *
 * Note that keyColIdx, eqfunctions, and hashfunctions must be allocated in
 * storage that will live as long as the hashtable does.
 */
TupleHashTable
BuildTupleHashTable(PlanState *parent,
					TupleDesc inputDesc,
					int numCols, AttrNumber *keyColIdx,
					Oid *eqfuncoids,
					FmgrInfo *hashfunctions,
					long nbuckets, Size additionalsize,
					MemoryContext tablecxt, MemoryContext tempcxt,
					bool use_variable_hash_iv)
{
	TupleHashTable hashtable;
	Size		entrysize = sizeof(TupleHashEntryData) + additionalsize;
	MemoryContext oldcontext;

	Assert(nbuckets > 0);

	/* Limit initial table size request to not more than work_mem */
	nbuckets = Min(nbuckets, (long) ((work_mem * 1024L) / entrysize));

	hashtable = (TupleHashTable)
		MemoryContextAlloc(tablecxt, sizeof(TupleHashTableData));

	hashtable->numCols = numCols;
	hashtable->keyColIdx = keyColIdx;
	hashtable->tab_hash_funcs = hashfunctions;
	hashtable->tablecxt = tablecxt;
	hashtable->tempcxt = tempcxt;
	hashtable->entrysize = entrysize;
	hashtable->tableslot = NULL;	/* will be made on first lookup */
	hashtable->inputslot = NULL;
	hashtable->in_hash_funcs = NULL;
	hashtable->cur_eq_func = NULL;

	/*
	 * If parallelism is in use, even if the master backend is performing the
	 * scan itself, we don't want to create the hashtable exactly the same way
	 * in all workers. As hashtables are iterated over in keyspace-order,
	 * doing so in all processes in the same way is likely to lead to
	 * "unbalanced" hashtables when the table size initially is
	 * underestimated.
	 */
	if (use_variable_hash_iv)
		hashtable->hash_iv = murmurhash32(ParallelWorkerNumber);
	else
		hashtable->hash_iv = 0;

	hashtable->hashtab = tuplehash_create(tablecxt, nbuckets, hashtable);

	oldcontext = MemoryContextSwitchTo(hashtable->tablecxt);

	/*
	 * We copy the input tuple descriptor just for safety --- we assume all
	 * input tuples will have equivalent descriptors.
	 */
	hashtable->tableslot = MakeSingleTupleTableSlot(CreateTupleDescCopy(inputDesc),
													&TTSOpsMinimalTuple);

	/* build comparator for all columns */
	/* XXX: should we support non-minimal tuples for the inputslot? */
	hashtable->tab_eq_func = ExecBuildGroupingEqual(inputDesc, inputDesc,
													&TTSOpsMinimalTuple, &TTSOpsMinimalTuple,
													numCols,
													keyColIdx, eqfuncoids,
													parent);

	MemoryContextSwitchTo(oldcontext);

	hashtable->exprcontext = CreateExprContext(parent->state);

	return hashtable;
}
Example #25
0
void
TupleFormerInit(TupleFormer *former, Filter *filter, TupleDesc desc)
{
	AttrNumber			natts;
	AttrNumber			maxatts;
	int					i;
	Oid					in_func_oid;

	former->desc = CreateTupleDescCopy(desc);
	for (i = 0; i < desc->natts; i++)
		former->desc->attrs[i]->attnotnull = desc->attrs[i]->attnotnull;

	/*
	 * allocate buffer to store columns or function arguments
	 */
	if (filter->funcstr)
	{
		natts = filter->nargs;
		maxatts = Max(natts, desc->natts);
	}
	else
		natts = maxatts = desc->natts;

	former->values = palloc(sizeof(Datum) * maxatts);
	former->isnull = palloc(sizeof(bool) * maxatts);
	MemSet(former->isnull, true, sizeof(bool) * maxatts);

	/*
	 * get column information of the target relation
	 */
	former->typId = (Oid *) palloc(natts * sizeof(Oid));
	former->typIOParam = (Oid *) palloc(natts * sizeof(Oid));
	former->typInput = (FmgrInfo *) palloc(natts * sizeof(FmgrInfo));
	former->typMod = (Oid *) palloc(natts * sizeof(Oid));
	former->attnum = palloc(natts * sizeof(int));

	if (filter->funcstr)
	{
		former->maxfields = natts;
		former->minfields = former->maxfields - filter->fn_ndargs;

		for (i = 0; i < natts; i++)
		{
			/* get type information and input function */
			getTypeInputInfo(filter->argtypes[i],
						 &in_func_oid, &former->typIOParam[i]);
			fmgr_info(in_func_oid, &former->typInput[i]);

			former->typMod[i] = -1;
			former->attnum[i] = i;
			former->typId[i] = filter->argtypes[i];
		}
	}
	else
	{
		Form_pg_attribute  *attrs;

		attrs = desc->attrs;
		former->maxfields = 0;
		for (i = 0; i < natts; i++)
		{
			/* ignore dropped columns */
			if (attrs[i]->attisdropped)
				continue;

			/* get type information and input function */
			getTypeInputInfo(attrs[i]->atttypid,
							 &in_func_oid, &former->typIOParam[i]);
			fmgr_info(in_func_oid, &former->typInput[i]);

			former->typMod[i] = attrs[i]->atttypmod;
			former->typId[i] = attrs[i]->atttypid;

			/* update valid column information */
			former->attnum[former->maxfields] = i;
			former->maxfields++;
		}

		former->minfields = former->maxfields;
	}
}
Example #26
0
static PyObject *
PLy_spi_execute_fetch_result(SPITupleTable *tuptable, int rows, int status)
{
	PLyResultObject *result;
	volatile MemoryContext oldcontext;

	result = (PLyResultObject *) PLy_result_new();
	Py_DECREF(result->status);
	result->status = PyInt_FromLong(status);

	if (status > 0 && tuptable == NULL)
	{
		Py_DECREF(result->nrows);
		result->nrows = PyInt_FromLong(rows);
	}
	else if (status > 0 && tuptable != NULL)
	{
		PLyTypeInfo args;
		int			i;

		Py_DECREF(result->nrows);
		result->nrows = PyInt_FromLong(rows);
		PLy_typeinfo_init(&args);

		oldcontext = CurrentMemoryContext;
		PG_TRY();
		{
			MemoryContext oldcontext2;

			/*
			 * Save tuple descriptor for later use by result set metadata
			 * functions.  Save it in TopMemoryContext so that it survives
			 * outside of an SPI context.  We trust that PLy_result_dealloc()
			 * will clean it up when the time is right.
			 */
			oldcontext2 = MemoryContextSwitchTo(TopMemoryContext);
			result->tupdesc = CreateTupleDescCopy(tuptable->tupdesc);
			MemoryContextSwitchTo(oldcontext2);

			if (rows)
			{
				Py_DECREF(result->rows);
				result->rows = PyList_New(rows);

				PLy_input_tuple_funcs(&args, tuptable->tupdesc);
				for (i = 0; i < rows; i++)
				{
					PyObject   *row = PLyDict_FromTuple(&args, tuptable->vals[i],
														tuptable->tupdesc);

					PyList_SetItem(result->rows, i, row);
				}
			}
		}
		PG_CATCH();
		{
			MemoryContextSwitchTo(oldcontext);
			if (!PyErr_Occurred())
				PLy_exception_set(PLy_exc_error,
					   "unrecognized error in PLy_spi_execute_fetch_result");
			PLy_typeinfo_dealloc(&args);
			SPI_freetuptable(tuptable);
			Py_DECREF(result);
			return NULL;
		}
		PG_END_TRY();

		PLy_typeinfo_dealloc(&args);
		SPI_freetuptable(tuptable);
	}

	return (PyObject *) result;
}
Example #27
0
Datum
crosstab(PG_FUNCTION_ARGS)
{
	char	   *sql = text_to_cstring(PG_GETARG_TEXT_PP(0));
	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
	Tuplestorestate *tupstore;
	TupleDesc	tupdesc;
	int			call_cntr;
	int			max_calls;
	AttInMetadata *attinmeta;
	SPITupleTable *spi_tuptable;
	TupleDesc	spi_tupdesc;
	bool		firstpass;
	char	   *lastrowid;
	int			i;
	int			num_categories;
	MemoryContext per_query_ctx;
	MemoryContext oldcontext;
	int			ret;
	int			proc;

	/* check to see if caller supports us returning a tuplestore */
	if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
		ereport(ERROR,
				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
				 errmsg("set-valued function called in context that cannot accept a set")));
	if (!(rsinfo->allowedModes & SFRM_Materialize))
		ereport(ERROR,
				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
				 errmsg("materialize mode required, but it is not " \
						"allowed in this context")));

	per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;

	/* Connect to SPI manager */
	if ((ret = SPI_connect()) < 0)
		/* internal error */
		elog(ERROR, "crosstab: SPI_connect returned %d", ret);

	/* Retrieve the desired rows */
	ret = SPI_execute(sql, true, 0);
	proc = SPI_processed;

	/* If no qualifying tuples, fall out early */
	if (ret != SPI_OK_SELECT || proc <= 0)
	{
		SPI_finish();
		rsinfo->isDone = ExprEndResult;
		PG_RETURN_NULL();
	}

	spi_tuptable = SPI_tuptable;
	spi_tupdesc = spi_tuptable->tupdesc;

	/*----------
	 * The provided SQL query must always return three columns.
	 *
	 * 1. rowname
	 *	the label or identifier for each row in the final result
	 * 2. category
	 *	the label or identifier for each column in the final result
	 * 3. values
	 *	the value for each column in the final result
	 *----------
	 */
	if (spi_tupdesc->natts != 3)
		ereport(ERROR,
				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
				 errmsg("invalid source data SQL statement"),
				 errdetail("The provided SQL must return 3 "
						   "columns: rowid, category, and values.")));

	/* get a tuple descriptor for our result type */
	switch (get_call_result_type(fcinfo, NULL, &tupdesc))
	{
		case TYPEFUNC_COMPOSITE:
			/* success */
			break;
		case TYPEFUNC_RECORD:
			/* failed to determine actual type of RECORD */
			ereport(ERROR,
					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
					 errmsg("function returning record called in context "
							"that cannot accept type record")));
			break;
		default:
			/* result type isn't composite */
			elog(ERROR, "return type must be a row type");
			break;
	}

	/*
	 * Check that return tupdesc is compatible with the data we got from SPI,
	 * at least based on number and type of attributes
	 */
	if (!compatCrosstabTupleDescs(tupdesc, spi_tupdesc))
		ereport(ERROR,
				(errcode(ERRCODE_SYNTAX_ERROR),
				 errmsg("return and sql tuple descriptions are " \
						"incompatible")));

	/*
	 * switch to long-lived memory context
	 */
	oldcontext = MemoryContextSwitchTo(per_query_ctx);

	/* make sure we have a persistent copy of the result tupdesc */
	tupdesc = CreateTupleDescCopy(tupdesc);

	/* initialize our tuplestore in long-lived context */
	tupstore =
		tuplestore_begin_heap(rsinfo->allowedModes & SFRM_Materialize_Random,
							  false, work_mem);

	MemoryContextSwitchTo(oldcontext);

	/*
	 * Generate attribute metadata needed later to produce tuples from raw C
	 * strings
	 */
	attinmeta = TupleDescGetAttInMetadata(tupdesc);

	/* total number of tuples to be examined */
	max_calls = proc;

	/* the return tuple always must have 1 rowid + num_categories columns */
	num_categories = tupdesc->natts - 1;

	firstpass = true;
	lastrowid = NULL;

	for (call_cntr = 0; call_cntr < max_calls; call_cntr++)
	{
		bool		skip_tuple = false;
		char	  **values;

		/* allocate and zero space */
		values = (char **) palloc0((1 + num_categories) * sizeof(char *));

		/*
		 * now loop through the sql results and assign each value in sequence
		 * to the next category
		 */
		for (i = 0; i < num_categories; i++)
		{
			HeapTuple	spi_tuple;
			char	   *rowid;

			/* see if we've gone too far already */
			if (call_cntr >= max_calls)
				break;

			/* get the next sql result tuple */
			spi_tuple = spi_tuptable->vals[call_cntr];

			/* get the rowid from the current sql result tuple */
			rowid = SPI_getvalue(spi_tuple, spi_tupdesc, 1);

			/*
			 * If this is the first pass through the values for this rowid,
			 * set the first column to rowid
			 */
			if (i == 0)
			{
				xpstrdup(values[0], rowid);

				/*
				 * Check to see if the rowid is the same as that of the last
				 * tuple sent -- if so, skip this tuple entirely
				 */
				if (!firstpass && xstreq(lastrowid, rowid))
				{
					xpfree(rowid);
					skip_tuple = true;
					break;
				}
			}

			/*
			 * If rowid hasn't changed on us, continue building the output
			 * tuple.
			 */
			if (xstreq(rowid, values[0]))
			{
				/*
				 * Get the next category item value, which is always attribute
				 * number three.
				 *
				 * Be careful to assign the value to the array index based on
				 * which category we are presently processing.
				 */
				values[1 + i] = SPI_getvalue(spi_tuple, spi_tupdesc, 3);

				/*
				 * increment the counter since we consume a row for each
				 * category, but not for last pass because the outer loop will
				 * do that for us
				 */
				if (i < (num_categories - 1))
					call_cntr++;
				xpfree(rowid);
			}
			else
			{
				/*
				 * We'll fill in NULLs for the missing values, but we need to
				 * decrement the counter since this sql result row doesn't
				 * belong to the current output tuple.
				 */
				call_cntr--;
				xpfree(rowid);
				break;
			}
		}

		if (!skip_tuple)
		{
			HeapTuple	tuple;

			/* build the tuple and store it */
			tuple = BuildTupleFromCStrings(attinmeta, values);
			tuplestore_puttuple(tupstore, tuple);
			heap_freetuple(tuple);
		}

		/* Remember current rowid */
		xpfree(lastrowid);
		xpstrdup(lastrowid, values[0]);
		firstpass = false;

		/* Clean up */
		for (i = 0; i < num_categories + 1; i++)
			if (values[i] != NULL)
				pfree(values[i]);
		pfree(values);
	}

	/* let the caller know we're sending back a tuplestore */
	rsinfo->returnMode = SFRM_Materialize;
	rsinfo->setResult = tupstore;
	rsinfo->setDesc = tupdesc;

	/* release SPI related resources (and return to caller's context) */
	SPI_finish();

	return (Datum) 0;
}
Example #28
0
/*
 * SQL function json_array_elements
 *
 * get the elements from a json array
 *
 * a lot of this processing is similar to the json_each* functions
 */
Datum
json_array_elements(PG_FUNCTION_ARGS)
{
	text	   *json = PG_GETARG_TEXT_P(0);

	/* elements doesn't need any escaped strings, so use false here */
	JsonLexContext *lex = makeJsonLexContext(json, false);
	JsonSemAction sem;
	ReturnSetInfo *rsi;
	MemoryContext old_cxt;
	TupleDesc	tupdesc;
	ElementsState state;

	state = palloc0(sizeof(elementsState));
	sem = palloc0(sizeof(jsonSemAction));

	rsi = (ReturnSetInfo *) fcinfo->resultinfo;

	if (!rsi || !IsA(rsi, ReturnSetInfo) ||
		(rsi->allowedModes & SFRM_Materialize) == 0 ||
		rsi->expectedDesc == NULL)
		ereport(ERROR,
				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
				 errmsg("set-valued function called in context that "
						"cannot accept a set")));


	rsi->returnMode = SFRM_Materialize;

	/* it's a simple type, so don't use get_call_result_type() */
	tupdesc = rsi->expectedDesc;

	/* make these in a sufficiently long-lived memory context */
	old_cxt = MemoryContextSwitchTo(rsi->econtext->ecxt_per_query_memory);

	state->ret_tdesc = CreateTupleDescCopy(tupdesc);
	BlessTupleDesc(state->ret_tdesc);
	state->tuple_store =
		tuplestore_begin_heap(rsi->allowedModes & SFRM_Materialize,
							  false, work_mem);

	MemoryContextSwitchTo(old_cxt);

	sem->semstate = (void *) state;
	sem->object_start = elements_object_start;
	sem->scalar = elements_scalar;
	sem->array_element_start = elements_array_element_start;
	sem->array_element_end = elements_array_element_end;

	state->lex = lex;
	state->tmp_cxt = AllocSetContextCreate(CurrentMemoryContext,
										 "json_array_elements temporary cxt",
										   ALLOCSET_DEFAULT_MINSIZE,
										   ALLOCSET_DEFAULT_INITSIZE,
										   ALLOCSET_DEFAULT_MAXSIZE);

	pg_parse_json(lex, sem);

	rsi->setResult = state->tuple_store;
	rsi->setDesc = state->ret_tdesc;

	PG_RETURN_NULL();
}
Example #29
0
Datum
connectby_text(PG_FUNCTION_ARGS)
{
	char	   *relname = text_to_cstring(PG_GETARG_TEXT_PP(0));
	char	   *key_fld = text_to_cstring(PG_GETARG_TEXT_PP(1));
	char	   *parent_key_fld = text_to_cstring(PG_GETARG_TEXT_PP(2));
	char	   *start_with = text_to_cstring(PG_GETARG_TEXT_PP(3));
	int			max_depth = PG_GETARG_INT32(4);
	char	   *branch_delim = NULL;
	bool		show_branch = false;
	bool		show_serial = false;
	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
	TupleDesc	tupdesc;
	AttInMetadata *attinmeta;
	MemoryContext per_query_ctx;
	MemoryContext oldcontext;

	/* check to see if caller supports us returning a tuplestore */
	if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
		ereport(ERROR,
				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
				 errmsg("set-valued function called in context that cannot accept a set")));
	if (!(rsinfo->allowedModes & SFRM_Materialize) ||
		rsinfo->expectedDesc == NULL)
		ereport(ERROR,
				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
				 errmsg("materialize mode required, but it is not " \
						"allowed in this context")));

	if (fcinfo->nargs == 6)
	{
		branch_delim = text_to_cstring(PG_GETARG_TEXT_PP(5));
		show_branch = true;
	}
	else
		/* default is no show, tilde for the delimiter */
		branch_delim = pstrdup("~");

	per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
	oldcontext = MemoryContextSwitchTo(per_query_ctx);

	/* get the requested return tuple description */
	tupdesc = CreateTupleDescCopy(rsinfo->expectedDesc);

	/* does it meet our needs */
	validateConnectbyTupleDesc(tupdesc, show_branch, show_serial);

	/* OK, use it then */
	attinmeta = TupleDescGetAttInMetadata(tupdesc);

	/* OK, go to work */
	rsinfo->returnMode = SFRM_Materialize;
	rsinfo->setResult = connectby(relname,
								  key_fld,
								  parent_key_fld,
								  NULL,
								  branch_delim,
								  start_with,
								  max_depth,
								  show_branch,
								  show_serial,
								  per_query_ctx,
							  rsinfo->allowedModes & SFRM_Materialize_Random,
								  attinmeta);
	rsinfo->setDesc = tupdesc;

	MemoryContextSwitchTo(oldcontext);

	/*
	 * SFRM_Materialize mode expects us to return a NULL Datum. The actual
	 * tuples are in our tuplestore and passed back through rsinfo->setResult.
	 * rsinfo->setDesc is set to the tuple description that we actually used
	 * to build our tuples with, so the caller can verify we did what it was
	 * expecting.
	 */
	return (Datum) 0;
}
Example #30
0
/*
 * Find or create a hashtable entry for the tuple group containing the
 * given tuple.
 *
 * If isnew is NULL, we do not create new entries; we return NULL if no
 * match is found.
 *
 * If isnew isn't NULL, then a new entry is created if no existing entry
 * matches.  On return, *isnew is true if the entry is newly created,
 * false if it existed already.  Any extra space in a new entry has been
 * zeroed.
 */
TupleHashEntry
LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
					 bool *isnew)
{
	TupleHashEntry entry;
	MemoryContext oldContext;
	TupleHashTable saveCurHT;
	TupleHashEntryData dummy;
	bool		found;

	/* If first time through, clone the input slot to make table slot */
	if (hashtable->tableslot == NULL)
	{
		TupleDesc	tupdesc;

		oldContext = MemoryContextSwitchTo(hashtable->tablecxt);

		/*
		 * We copy the input tuple descriptor just for safety --- we assume
		 * all input tuples will have equivalent descriptors.
		 */
		tupdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
		hashtable->tableslot = MakeSingleTupleTableSlot(tupdesc);
		MemoryContextSwitchTo(oldContext);
	}

	/* Need to run the hash functions in short-lived context */
	oldContext = MemoryContextSwitchTo(hashtable->tempcxt);

	/*
	 * Set up data needed by hash and match functions
	 *
	 * We save and restore CurTupleHashTable just in case someone manages to
	 * invoke this code re-entrantly.
	 */
	hashtable->inputslot = slot;
	saveCurHT = CurTupleHashTable;
	CurTupleHashTable = hashtable;

	/* Search the hash table */
	dummy.firstTuple = NULL;	/* flag to reference inputslot */
	entry = (TupleHashEntry) hash_search(hashtable->hashtab,
										 &dummy,
										 isnew ? HASH_ENTER : HASH_FIND,
										 &found);

	if (isnew)
	{
		if (found)
		{
			/* found pre-existing entry */
			*isnew = false;
		}
		else
		{
			/*
			 * created new entry
			 *
			 * Zero any caller-requested space in the entry.  (This zaps the
			 * "key data" dynahash.c copied into the new entry, but we don't
			 * care since we're about to overwrite it anyway.)
			 */
			MemSet(entry, 0, hashtable->entrysize);
			/* Copy the first tuple into the table context */
			MemoryContextSwitchTo(hashtable->tablecxt);
			entry->firstTuple = ExecCopySlotMinimalTuple(slot);


			*isnew = true;
		}
	}

	CurTupleHashTable = saveCurHT;

	MemoryContextSwitchTo(oldContext);

	return entry;
}