예제 #1
0
파일: nodeSeqscan.c 프로젝트: AnLingm/gpdb
TupleTableSlot *
ExecSeqScan(SeqScanState *node)
{
	/*
	 * use SeqNext as access method
	 */
	TupleTableSlot *slot;

	if((node->scan_state & SCAN_SCAN) == 0)
		OpenScanRelation(node);

	slot = ExecScan((ScanState *) node, (ExecScanAccessMtd) SeqNext);
     	if (!TupIsNull(slot))
        {
     		Gpmon_M_Incr_Rows_Out(GpmonPktFromSeqScanState(node));
                CheckSendPlanStateGpmonPkt(&node->ps);
        }

	if(TupIsNull(slot) && !node->ps.delayEagerFree)
	{
		CloseScanRelation(node);
	}

	return slot;
}
예제 #2
0
/*
 * DynamicScan_GetNextTuple
 *		Gets the next tuple. If it needs to open a new relation,
 *		it takes care of that by asking for the next relation
 *		using DynamicScan_GetNextRelation.
 *
 *		Returns the tuple fetched, or a NULL tuple
 *		if it exhausts all the relations/partitions.
 */
TupleTableSlot *
DynamicScan_GetNextTuple(ScanState *scanState, PartitionInitMethod *partitionInitMethod,
		PartitionEndMethod *partitionEndMethod, PartitionReScanMethod *partitionReScanMethod,
		PartitionScanTupleMethod *partitionScanTupleMethod)
{
	TupleTableSlot *slot = NULL;

	while (TupIsNull(slot) && (SCAN_SCAN == scanState->scan_state ||
			SCAN_SCAN == DynamicScan_Controller(scanState, SCAN_SCAN, partitionInitMethod,
					partitionEndMethod, partitionReScanMethod)))
	{
		slot = partitionScanTupleMethod(scanState);

		if (TupIsNull(slot))
		{
			/* The underlying scanner should not change the scan status */
			Assert(SCAN_SCAN == scanState->scan_state);

			if (SCAN_DONE == DynamicScan_Controller(scanState, SCAN_NEXT, partitionInitMethod, partitionEndMethod, partitionReScanMethod) ||
					SCAN_SCAN != DynamicScan_Controller(scanState, SCAN_SCAN, partitionInitMethod, partitionEndMethod, partitionReScanMethod))
			{
				break;
			}

			Assert(SCAN_SCAN == scanState->scan_state);
		}
	}

	return slot;
}
예제 #3
0
/*
 * Compare the tuples in the two given slots.
 */
static int32
heap_compare_slots(Datum a, Datum b, void *arg)
{
	GatherMergeState *node = (GatherMergeState *) arg;
	SlotNumber	slot1 = DatumGetInt32(a);
	SlotNumber	slot2 = DatumGetInt32(b);

	TupleTableSlot *s1 = node->gm_slots[slot1];
	TupleTableSlot *s2 = node->gm_slots[slot2];
	int			nkey;

	Assert(!TupIsNull(s1));
	Assert(!TupIsNull(s2));

	for (nkey = 0; nkey < node->gm_nkeys; nkey++)
	{
		SortSupport sortKey = node->gm_sortkeys + nkey;
		AttrNumber	attno = sortKey->ssup_attno;
		Datum		datum1,
					datum2;
		bool		isNull1,
					isNull2;
		int			compare;

		datum1 = slot_getattr(s1, attno, &isNull1);
		datum2 = slot_getattr(s2, attno, &isNull2);

		compare = ApplySortComparator(datum1, isNull1,
									  datum2, isNull2,
									  sortKey);
		if (compare != 0)
			return -compare;
	}
	return 0;
}
예제 #4
0
/* ----------------------------------------------------------------
 *		ExecUnique
 * ----------------------------------------------------------------
 */
TupleTableSlot *				/* return: a tuple or NULL */
ExecUnique(UniqueState *node)
{
	Unique	   *plannode = (Unique *) node->ps.plan;
	TupleTableSlot *resultTupleSlot;
	TupleTableSlot *slot;
	PlanState  *outerPlan;

	/*
	 * get information from the node
	 */
	outerPlan = outerPlanState(node);
	resultTupleSlot = node->ps.ps_ResultTupleSlot;

	/*
	 * now loop, returning only non-duplicate tuples. We assume that the
	 * tuples arrive in sorted order so we can detect duplicates easily. The
	 * first tuple of each group is returned.
	 */
	for (;;)
	{
		/*
		 * fetch a tuple from the outer subplan
		 */
		slot = ExecProcNode(outerPlan);
		if (TupIsNull(slot))
		{
			/* end of subplan, so we're done */
			ExecClearTuple(resultTupleSlot);
			return NULL;
		}

		/*
		 * Always return the first tuple from the subplan.
		 */
		if (TupIsNull(resultTupleSlot))
			break;

		/*
		 * Else test if the new tuple and the previously returned tuple match.
		 * If so then we loop back and fetch another new tuple from the
		 * subplan.
		 */
		if (!execTuplesMatch(slot, resultTupleSlot,
							 plannode->numCols, plannode->uniqColIdx,
							 node->eqfunctions,
							 node->tempContext))
			break;
	}

	/*
	 * We have a new tuple different from the previous saved tuple (if any).
	 * Save it and return it.  We must copy it because the source subplan
	 * won't guarantee that this source tuple is still accessible after
	 * fetching the next source tuple.
	 */
	return ExecCopySlot(resultTupleSlot, slot);
}
예제 #5
0
TupleTableSlot *
ExecDynamicTableScan(DynamicTableScanState *node)
{
	ScanState *scanState = (ScanState *)node;
	TupleTableSlot *slot = NULL;

	/*
	 * If this is called the first time, find the pid index that contains all unique
	 * partition pids for this node to scan.
	 */
	if (node->pidIndex == NULL)
	{
		setPidIndex(node);
		Assert(node->pidIndex != NULL);
		
		hash_seq_init(&node->pidStatus, node->pidIndex);
		node->shouldCallHashSeqTerm = true;
	}

	/*
	 * Scan the table to find next tuple to return. If the current table
	 * is finished, close it and open the next table for scan.
	 */
	while (TupIsNull(slot) &&
		   initNextTableToScan(node))
	{
		slot = ExecTableScanRelation(scanState);
		
#ifdef FAULT_INJECTOR
    FaultInjector_InjectFaultIfSet(
    		FaultDuringExecDynamicTableScan,
            DDLNotSpecified,
            "",  // databaseName
            ""); // tableName
#endif

		if (!TupIsNull(slot))
		{
			Gpmon_M_Incr_Rows_Out(GpmonPktFromDynamicTableScanState(node));
			CheckSendPlanStateGpmonPkt(&scanState->ps);
		}
		else
		{
			CleanupOnePartition(scanState);
		}
	}

	return slot;
}
예제 #6
0
/* --------------------------------
 *		ExecCopySlotHeadTupleTo
 * 			Copy heapTuple to a preallocated buffer.   Code adapted from ExecCopySlotTuple
 *
 * 			return the copied heaptule if there is enough space, or, if the memorycontext is
 *              not null, which the function will alloc enough space from the context.  One can
 *				test if the tuple is alloced (ret == dest)
 *
 *			return NULL and set *len to space need if there is not enough space and the mem context is null.
 *			return NULL if heap tuple is not valid, and set *len = 0.  See slot->tts_tuple case below.
 * -------------------------------
 */
HeapTuple ExecCopySlotHeapTupleTo(TupleTableSlot *slot, MemoryContext pctxt, char* dest, unsigned int *len)
{
	uint32 dumlen;
	HeapTuple tup = NULL;

	Assert(!TupIsNull(slot));
	Assert(slot->tts_tupleDescriptor);

	if(!len)
		len = &dumlen;
	
	if (slot->PRIVATE_tts_heaptuple)
	{
		tup = heaptuple_copy_to(slot->PRIVATE_tts_heaptuple, (HeapTuple) dest, len);

		if(tup || !pctxt)
			return tup;

		tup = (HeapTuple) ctxt_alloc(pctxt, *len);
		tup = heaptuple_copy_to(slot->PRIVATE_tts_heaptuple, tup, len);
		Assert(tup);

		return tup;
	}

	slot_getallattrs(slot);
	tup = heaptuple_form_to(slot->tts_tupleDescriptor, slot_get_values(slot), slot_get_isnull(slot), (HeapTuple) dest, len);

	if(tup || !pctxt)
		return tup;
	tup = (HeapTuple) ctxt_alloc(pctxt, *len);
	tup = heaptuple_form_to(slot->tts_tupleDescriptor, slot_get_values(slot), slot_get_isnull(slot), tup, len);
	Assert(tup);
	return tup;
}
예제 #7
0
/* ----------------------------------------------------------------
 *		SubqueryNext
 *
 *		This is a workhorse for ExecSubqueryScan
 * ----------------------------------------------------------------
 */
static TupleTableSlot *
SubqueryNext(SubqueryScanState *node)
{
	TupleTableSlot *slot;

	/*
	 * Get the next tuple from the sub-query.
	 */
	slot = ExecProcNode(node->subplan);

	/*
	 * We just return the subplan's result slot, rather than expending extra
	 * cycles for ExecCopySlot().  (Our own ScanTupleSlot is used only for
	 * EvalPlanQual rechecks.)
	 *
	 * We do need to mark the slot contents read-only to prevent interference
	 * between different functions reading the same datum from the slot. It's
	 * a bit hokey to do this to the subplan's slot, but should be safe
	 * enough.
	 */
	if (!TupIsNull(slot))
		slot = ExecMakeSlotContentsReadOnly(slot);

	return slot;
}
예제 #8
0
static void
fetch_next_tuple(CustomScanState *node)
{
	RuntimeAppendState	   *scan_state = (RuntimeAppendState *) node;

	while (scan_state->running_idx < scan_state->ncur_plans)
	{
		ChildScanCommon		child = scan_state->cur_plans[scan_state->running_idx];
		PlanState		   *state = child->content.plan_state;

		for (;;)
		{
			TupleTableSlot *slot = ExecProcNode(state);

			if (TupIsNull(slot))
				break;

			scan_state->slot = slot;
			return;
		}

		scan_state->running_idx++;
	}

	scan_state->slot = NULL;
}
예제 #9
0
/* ----------------------------------------------------------------
 *		ForeignNext
 *
 *		This is a workhorse for ExecForeignScan
 * ----------------------------------------------------------------
 */
static TupleTableSlot *
ForeignNext(ForeignScanState *node)
{
	TupleTableSlot *slot;
	ForeignScan *plan = (ForeignScan *) node->ss.ps.plan;
	ExprContext *econtext = node->ss.ps.ps_ExprContext;
	MemoryContext oldcontext;

	/* Call the Iterate function in short-lived context */
	oldcontext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
	slot = node->fdwroutine->IterateForeignScan(node);
	MemoryContextSwitchTo(oldcontext);

	/*
	 * If any system columns are requested, we have to force the tuple into
	 * physical-tuple form to avoid "cannot extract system attribute from
	 * virtual tuple" errors later.  We also insert a valid value for
	 * tableoid, which is the only actually-useful system column.
	 */
	if (plan->fsSystemCol && !TupIsNull(slot))
	{
		HeapTuple	tup = ExecMaterializeSlot(slot);

		tup->t_tableOid = RelationGetRelid(node->ss.ss_currentRelation);
	}

	return slot;
}
예제 #10
0
/*
 * Executes underlying scan method to fetch the next matching tuple.
 */
TupleTableSlot *
BitmapTableScanFetchNext(ScanState *node)
{
	BitmapTableScanState *scanState = (BitmapTableScanState *) node;
	TupleTableSlot *slot = BitmapTableScanPlanQualTuple(scanState);

	while (TupIsNull(slot))
	{
		/* If we haven't already obtained the required bitmap, do so */
		readBitmap(scanState);

		/* If we have exhausted the current bitmap page, fetch the next one */
		if (!scanState->needNewBitmapPage || fetchNextBitmapPage(scanState))
		{
			slot = ExecScan(&scanState->ss, (ExecScanAccessMtd) getBitmapTableScanMethod(scanState->ss.tableType)->accessMethod);
		}
		else
		{
			/*
			 * Needed a new bitmap page, but couldn't fetch one. Therefore,
			 * try the next partition.
			 */
			break;
		}
	}

	return slot;
}
/* ----------------------------------------------------------------
*		ExternalNext
*
*		This is a workhorse for ExecExtScan
* ----------------------------------------------------------------
*/
static TupleTableSlot *
ExternalNext(ExternalScanState *node)
{
	HeapTuple	tuple;
	FileScanDesc scandesc;
	Index		scanrelid;
	EState	   *estate;
	ScanDirection direction;
	TupleTableSlot *slot;

	/*
	 * get information from the estate and scan state
	 */
	estate = node->ss.ps.state;
	scandesc = node->ess_ScanDesc;
	scanrelid = ((ExternalScan *) node->ss.ps.plan)->scan.scanrelid;
	direction = estate->es_direction;
	slot = node->ss.ss_ScanTupleSlot;

	/*
	 * get the next tuple from the file access methods
	 */
	tuple = external_getnext(scandesc, direction);

	/*
	 * save the tuple and the buffer returned to us by the access methods in
	 * our scan tuple slot and return the slot.  Note: we pass 'false' because
	 * tuples returned by heap_getnext() are pointers onto disk pages and were
	 * not created with palloc() and so should not be pfree()'d.  Note also
	 * that ExecStoreTuple will increment the refcount of the buffer; the
	 * refcount will not be dropped until the tuple table slot is cleared.
	 */
	if (tuple)
	{
		Gpmon_M_Incr_Rows_Out(GpmonPktFromExtScanState(node));
		CheckSendPlanStateGpmonPkt(&node->ss.ps);
		ExecStoreGenericTuple(tuple, slot, true);

	    /*
	     * CDB: Label each row with a synthetic ctid if needed for subquery dedup.
	     */
	    if (node->cdb_want_ctid &&
	        !TupIsNull(slot))
	    {
	    	slot_set_ctid_from_fake(slot, &node->cdb_fake_ctid);
	    }
	}
	else
	{
		ExecClearTuple(slot);

		if (!node->ss.ps.delayEagerFree)
		{
			ExecEagerFreeExternalScan(node);
		}
	}


	return slot;
}
예제 #12
0
파일: nodeTableScan.c 프로젝트: 50wu/gpdb
TupleTableSlot *
ExecTableScan(TableScanState *node)
{
	ScanState *scanState = (ScanState *)node;

	if (scanState->scan_state == SCAN_INIT ||
		scanState->scan_state == SCAN_DONE)
	{
		BeginTableScanRelation(scanState);
	}

	TupleTableSlot *slot = ExecTableScanRelation(scanState);
	
	if (!TupIsNull(slot))
	{
		Gpmon_M_Incr_Rows_Out(GpmonPktFromTableScanState(node));
		CheckSendPlanStateGpmonPkt(&scanState->ps);
	}
	
	else if (!scanState->ps.delayEagerFree)
	{
		EndTableScanRelation(scanState);
	}

	return slot;
}
예제 #13
0
MemTuple ExecCopySlotMemTupleTo(TupleTableSlot *slot, MemoryContext pctxt, char *dest, unsigned int *len)
{
	uint32 dumlen;
	MemTuple mtup = NULL;

	Assert(!TupIsNull(slot));
	Assert(slot->tts_mt_bind);

	if(!len)
		len = &dumlen;
	
	if (TupHasMemTuple(slot))
	{
		mtup = memtuple_copy_to(slot->PRIVATE_tts_memtuple, slot->tts_mt_bind, (MemTuple) dest, len);
		if(mtup || !pctxt)
			return mtup;

		mtup = (MemTuple) ctxt_alloc(pctxt, *len);
		mtup = memtuple_copy_to(slot->PRIVATE_tts_memtuple, slot->tts_mt_bind, mtup, len);
		Assert(mtup);

		return mtup;
	}

	slot_getallattrs(slot);
	mtup = memtuple_form_to(slot->tts_mt_bind, slot_get_values(slot), slot_get_isnull(slot), (MemTuple) dest, len, false);

	if(mtup || !pctxt)
		return mtup;
	mtup = (MemTuple) ctxt_alloc(pctxt, *len);
	mtup = memtuple_form_to(slot->tts_mt_bind, slot_get_values(slot), slot_get_isnull(slot), mtup, len, false);

	Assert(mtup);
	return mtup;
}
예제 #14
0
/*
 * Execution of DynamicIndexScan
 */
TupleTableSlot *
ExecDynamicIndexScan(DynamicIndexScanState *node)
{
	Assert(node);

	IndexScanState *indexState = &(node->indexScanState);

	TupleTableSlot *slot = NULL;
	
	/*
	 * If this is called the first time, find the pid index that contains all unique
	 * partition pids for this node to scan.
	 */
	if (node->pidxIndex == NULL)
	{
		setPidIndex(node);
		Assert(node->pidxIndex != NULL);
		
		hash_seq_init(&node->pidxStatus, node->pidxIndex);
		node->shouldCallHashSeqTerm = true;
	}

	/*
	 * Scan index to find next tuple to return. If the current index
	 * is exhausted, close it and open the next index for scan.
	 */
	while (TupIsNull(slot) &&
		   initNextIndexToScan(node))
	{
		slot = ExecScan(&indexState->ss, (ExecScanAccessMtd) IndexNext);

		if (!TupIsNull(slot))
		{
			/* Report output rows to Gpmon */
			Gpmon_M_Incr_Rows_Out(GpmonPktFromDynamicIndexScanState(node));
			CheckSendPlanStateGpmonPkt(&indexState->ss.ps);
		}
		else
		{
			CleanupOnePartition(indexState);
		}

	}
	return slot;
}
예제 #15
0
/* ----------------------------------------------------------------
 *		ExecHash
 *
 *		build hash table for hashjoin, doing partitioning if more
 *		than one batch is required.
 * ----------------------------------------------------------------
 */
TupleTableSlot *
ExecHash(HashState *node)
{
	EState	   *estate;
	PlanState  *outerNode;
	List	   *hashkeys;
	HashJoinTable hashtable;
	TupleTableSlot *slot;
	ExprContext *econtext;
	int			nbatch;
	int			i;

	/*
	 * get state info from node
	 */
	estate = node->ps.state;
	outerNode = outerPlanState(node);

	hashtable = node->hashtable;
	nbatch = hashtable->nbatch;

	if (nbatch > 0)
	{
		/*
		 * Open temp files for inner batches, if needed. Note that file
		 * buffers are palloc'd in regular executor context.
		 */
		for (i = 0; i < nbatch; i++)
			hashtable->innerBatchFile[i] = BufFileCreateTemp(false);
	}

	/*
	 * set expression context
	 */
	hashkeys = node->hashkeys;
	econtext = node->ps.ps_ExprContext;

	/*
	 * get all inner tuples and insert into the hash table (or temp files)
	 */
	for (;;)
	{
		slot = ExecProcNode(outerNode);
		if (TupIsNull(slot))
			break;
		hashtable->hashNonEmpty = true;
		econtext->ecxt_innertuple = slot;
		ExecHashTableInsert(hashtable, econtext, hashkeys);
		ExecClearTuple(slot);
	}

	/*
	 * Return the slot so that we have the tuple descriptor when we need
	 * to save/restore them.  -Jeff 11 July 1991
	 */
	return slot;
}
예제 #16
0
파일: nodeHash.c 프로젝트: merlintang/sgb
/* ----------------------------------------------------------------
 *		MultiExecHash
 *
 *		build hash table for hashjoin, doing partitioning if more
 *		than one batch is required.
 * ----------------------------------------------------------------
 */
Node *
MultiExecHash(HashState *node)
{
	PlanState  *outerNode;
	List	   *hashkeys;
	HashJoinTable hashtable;
	TupleTableSlot *slot;
	ExprContext *econtext;
	uint32		hashvalue;

	/* must provide our own instrumentation support */
	if (node->ps.instrument)
		InstrStartNode(node->ps.instrument);

	/*
	 * get state info from node
	 */
	outerNode = outerPlanState(node);
	hashtable = node->hashtable;

	/*
	 * set expression context
	 */
	hashkeys = node->hashkeys;
	econtext = node->ps.ps_ExprContext;

	/*
	 * get all inner tuples and insert into the hash table (or temp files)
	 */
	for (;;)
	{
		slot = ExecProcNode(outerNode);
		if (TupIsNull(slot))
			break;
		hashtable->totalTuples += 1;
		/* We have to compute the hash value */
		econtext->ecxt_innertuple = slot;
		hashvalue = ExecHashGetHashValue(hashtable, econtext, hashkeys);
		ExecHashTableInsert(hashtable, slot, hashvalue);
	}

	/* must provide our own instrumentation support */
	if (node->ps.instrument)
		InstrStopNode(node->ps.instrument, hashtable->totalTuples);

	/*
	 * We do not return the hash table directly because it's not a subtype of
	 * Node, and so would violate the MultiExecProcNode API.  Instead, our
	 * parent Hashjoin node is expected to know how to fish it out of our node
	 * state.  Ugly but not really worth cleaning up, since Hashjoin knows
	 * quite a bit more about Hash besides that.
	 */
	return NULL;
}
예제 #17
0
/* ----------------------------------------------------------------
 *		SubqueryNext
 *
 *		This is a workhorse for ExecSubqueryScan
 * ----------------------------------------------------------------
 */
static TupleTableSlot *
SubqueryNext(SubqueryScanState *node)
{
	TupleTableSlot *slot;

	/*
	 * We need not support EvalPlanQual here, since we are not scanning a real
	 * relation.
	 */

	/*
	 * Get the next tuple from the sub-query.
	 */
	slot = ExecProcNode(node->subplan);

	/*
	 * We just overwrite our ScanTupleSlot with the subplan's result slot,
	 * rather than expending the cycles for ExecCopySlot().
	 */
	node->ss.ss_ScanTupleSlot = slot;

    /*
     * CDB: Label each row with a synthetic ctid if needed for subquery dedup.
     */
    if (node->cdb_want_ctid &&
        !TupIsNull(slot))
    {
    	slot_set_ctid_from_fake(slot, &node->cdb_fake_ctid);
    }

    if (!TupIsNull(slot))
    {
        Gpmon_M_Incr_Rows_Out(GpmonPktFromSubqueryScanState(node));
        CheckSendPlanStateGpmonPkt(&node->ss.ps);
    }

    return slot;
}
예제 #18
0
/* --------------------------------
 *		ExecCopySlot
 *			Copy the source slot's contents into the destination slot.
 *
 *		The destination acquires a private copy that will not go away
 *		if the source is cleared.
 *
 *		The caller must ensure the slots have compatible tupdescs.
 * --------------------------------
 */
TupleTableSlot *
ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
{
	Assert(!TupIsNull(srcslot));

	ExecClearTuple(dstslot);
	TupClearIsEmpty(dstslot);

	/* heap tuple stuff */
	if(srcslot->PRIVATE_tts_heaptuple && !srcslot->PRIVATE_tts_memtuple) {

		uint32 tuplen = dstslot->PRIVATE_tts_htup_buf_len;
		HeapTuple htup = heaptuple_copy_to(srcslot->PRIVATE_tts_heaptuple, dstslot->PRIVATE_tts_htup_buf, &tuplen);

		if(!htup)
		{
			dstslot->PRIVATE_tts_htup_buf = MemoryContextAlloc(dstslot->tts_mcxt, tuplen);
			dstslot->PRIVATE_tts_htup_buf_len = tuplen;

			htup = heaptuple_copy_to(srcslot->PRIVATE_tts_heaptuple, dstslot->PRIVATE_tts_htup_buf, &tuplen);
		}

		Assert(htup);
		dstslot->PRIVATE_tts_heaptuple = htup;
		dstslot->PRIVATE_tts_nvalid = 0;
	}
	else
	{
		uint32 tuplen = dstslot->PRIVATE_tts_mtup_buf_len;
		MemTuple mtup;

		Assert(srcslot->tts_mt_bind != NULL && dstslot->tts_mt_bind != NULL);

		mtup = ExecCopySlotMemTupleTo(srcslot, NULL, dstslot->PRIVATE_tts_mtup_buf, &tuplen);
		if(!mtup)
		{
			dstslot->PRIVATE_tts_mtup_buf = MemoryContextAlloc(dstslot->tts_mcxt, tuplen);
			dstslot->PRIVATE_tts_mtup_buf_len = tuplen;

			mtup = ExecCopySlotMemTupleTo(srcslot, NULL, dstslot->PRIVATE_tts_mtup_buf, &tuplen);
		}

		Assert(mtup);
		dstslot->PRIVATE_tts_memtuple = mtup;
		dstslot->PRIVATE_tts_nvalid = 0;
	}

	return dstslot;
}
예제 #19
0
TupleTableSlot *
ExecAppendOnlyScan(AppendOnlyScanState *node)
{
	/*
	 * use AppendOnlyNext as access method
	 */
	TupleTableSlot *slot;
	
	if((node->ss.scan_state & SCAN_SCAN) == 0)
		OpenAOScanRelation(node);
			
	slot = ExecScan((ScanState *) node, (ExecScanAccessMtd) AppendOnlyNext);
     	if (!TupIsNull(slot))
        {
     		Gpmon_M_Incr_Rows_Out(GpmonPktFromAppOnlyScanState(node));
                CheckSendPlanStateGpmonPkt(&node->ss.ps);
        }

	Assert((node->ss.scan_state & SCAN_MARKPOS) == 0);
	if(TupIsNull(slot))
		CloseAOScanRelation(node);

	return slot;
}
예제 #20
0
/* ----------------------------------------------------------------
 *		ExecCteScan(node)
 *
 *		Scans the CTE sequentially and returns the next qualifying tuple.
 *		We call the ExecScan() routine and pass it the appropriate
 *		access method functions.
 * ----------------------------------------------------------------
 */
TupleTableSlot *
ExecCteScan(CteScanState *node)
{
	TupleTableSlot *slot;
	CteScan *plan;

	slot = ExecScan(&node->ss,
					(ExecScanAccessMtd) CteScanNext,
					(ExecScanRecheckMtd) CteScanRecheck);

	plan = (CteScan *) node->ss.ps.plan;
	if (!TupIsNull(slot) && plan->cteStop)
		((RecursiveUnionState *) node->cteplanstate)->end = true;

	return slot;
}
예제 #21
0
/*
 * Read the next tuple.  We might fetch a tuple from one of the tuple queues
 * using gather_readnext, or if no tuple queue contains a tuple and the
 * single_copy flag is not set, we might generate one locally instead.
 */
static TupleTableSlot *
gather_getnext(GatherState *gatherstate)
{
	PlanState  *outerPlan = outerPlanState(gatherstate);
	TupleTableSlot *outerTupleSlot;
	TupleTableSlot *fslot = gatherstate->funnel_slot;
	HeapTuple	tup;

	while (gatherstate->nreaders > 0 || gatherstate->need_to_scan_locally)
	{
		CHECK_FOR_INTERRUPTS();

		if (gatherstate->nreaders > 0)
		{
			tup = gather_readnext(gatherstate);

			if (HeapTupleIsValid(tup))
			{
				ExecStoreTuple(tup, /* tuple to store */
							   fslot,	/* slot in which to store the tuple */
							   InvalidBuffer,	/* buffer associated with this
												 * tuple */
							   true);	/* pfree tuple when done with it */
				return fslot;
			}
		}

		if (gatherstate->need_to_scan_locally)
		{
			EState *estate = gatherstate->ps.state;

			/* Install our DSA area while executing the plan. */
			estate->es_query_dsa =
				gatherstate->pei ? gatherstate->pei->area : NULL;
			outerTupleSlot = ExecProcNode(outerPlan);
			estate->es_query_dsa = NULL;

			if (!TupIsNull(outerTupleSlot))
				return outerTupleSlot;

			gatherstate->need_to_scan_locally = false;
		}
	}

	return ExecClearTuple(fslot);
}
예제 #22
0
/* --------------------------------
 *		ExecFetchSlotMinimalTuple
 *			Fetch the slot's minimal physical tuple.
 *
 *		If the slot contains a virtual tuple, we convert it to minimal
 *		physical form.	The slot retains ownership of the physical tuple.
 *		Likewise, if it contains a regular tuple we convert to minimal form.
 *
 * As above, the result must be treated as read-only.
 * --------------------------------
 */
MemTuple ExecFetchSlotMemTuple(TupleTableSlot *slot, bool inline_toast)
{
	MemTuple newTuple;
	MemTuple oldTuple = NULL;
	uint32 tuplen;

	Assert(!TupIsNull(slot));
	Assert(slot->tts_mt_bind);

	if(slot->PRIVATE_tts_memtuple)
	{
		if(!inline_toast || !memtuple_get_hasext(slot->PRIVATE_tts_memtuple, slot->tts_mt_bind))
			return slot->PRIVATE_tts_memtuple;

		oldTuple = slot->PRIVATE_tts_mtup_buf;
		slot->PRIVATE_tts_mtup_buf = NULL;
		slot->PRIVATE_tts_mtup_buf_len = 0;
	}

	slot_getallattrs(slot);

	tuplen = slot->PRIVATE_tts_mtup_buf_len;
	newTuple = memtuple_form_to(slot->tts_mt_bind, slot_get_values(slot), slot_get_isnull(slot),
			(MemTuple) slot->PRIVATE_tts_mtup_buf, &tuplen, inline_toast);

	if(!newTuple)
	{
		if(slot->PRIVATE_tts_mtup_buf)
			pfree(slot->PRIVATE_tts_mtup_buf);

		slot->PRIVATE_tts_mtup_buf = MemoryContextAlloc(slot->tts_mcxt, tuplen);
		slot->PRIVATE_tts_mtup_buf_len = tuplen;

		newTuple = memtuple_form_to(slot->tts_mt_bind, slot_get_values(slot), slot_get_isnull(slot),
			(MemTuple) slot->PRIVATE_tts_mtup_buf, &tuplen, inline_toast);
	}

	Assert(newTuple);
	slot->PRIVATE_tts_memtuple = newTuple;

	if(oldTuple)
		pfree(oldTuple);

	return newTuple;
}
예제 #23
0
/* ----------------------------------------------------------------
 *	   ExecAppend
 *
 *		Handles iteration over multiple subplans.
 * ----------------------------------------------------------------
 */
TupleTableSlot *
ExecAppend(AppendState *node)
{
	for (;;)
	{
		PlanState  *subnode;
		TupleTableSlot *result;

		/*
		 * figure out which subplan we are currently processing
		 */
		subnode = node->appendplans[node->as_whichplan];

		/*
		 * get a tuple from the subplan
		 */
		result = ExecProcNode(subnode);

		if (!TupIsNull(result))
		{
			/*
			 * If the subplan gave us something then return it as-is. We do
			 * NOT make use of the result slot that was set up in
			 * ExecInitAppend, first because there's no reason to and second
			 * because it may have the wrong tuple descriptor in
			 * inherited-UPDATE cases.
			 */
			return result;
		}

		/*
		 * Go on to the "next" subplan in the appropriate direction. If no
		 * more subplans, return the empty slot set up for us by
		 * ExecInitAppend.
		 */
		if (ScanDirectionIsForward(node->ps.state->es_direction))
			node->as_whichplan++;
		else
			node->as_whichplan--;
		if (!exec_append_initialize_next(node))
			return ExecClearTuple(node->ps.ps_ResultTupleSlot);

		/* Else loop back and try to get a tuple from the new subplan */
	}
}
예제 #24
0
/*
 * Retrieves the next tuple from the BitmapTableScan's underlying relation.
 */
TupleTableSlot *
ExecBitmapTableScan(BitmapTableScanState *node)
{
	ScanState *scanState = (ScanState *)node;

	TupleTableSlot *slot = DynamicScan_GetNextTuple(scanState, BitmapTableScanBeginPartition,
			BitmapTableScanEndPartition, BitmapTableScanReScanPartition, BitmapTableScanFetchNext);

	if (!TupIsNull(slot))
	{
		Gpmon_Incr_Rows_Out(GpmonPktFromBitmapTableScanState(node));
		CheckSendPlanStateGpmonPkt(&scanState->ps);
	}
	else if (!scanState->ps.delayEagerFree)
	{
		ExecEagerFreeBitmapTableScan(node);
	}

	return slot;
}
예제 #25
0
/* --------------------------------
 *		ExecFetchSlotTuple
 *			Fetch the slot's regular physical tuple.
 *
 *		If the slot contains a virtual tuple, we convert it to physical
 *		form.  The slot retains ownership of the physical tuple.
 *		Likewise, if it contains a minimal tuple we convert to regular form.
 *
 * The difference between this and ExecMaterializeSlot() is that this
 * does not guarantee that the contained tuple is local storage.
 * Hence, the result must be treated as read-only.
 * --------------------------------
 */
HeapTuple
ExecFetchSlotHeapTuple(TupleTableSlot *slot)
{
	uint32 tuplen;
	HeapTuple htup;

	/*
	 * sanity checks
	 */
	Assert(!TupIsNull(slot));

	/*
	 * If we have a regular physical tuple then just return it.
	 */
	if(slot->PRIVATE_tts_heaptuple)
		return slot->PRIVATE_tts_heaptuple;

	slot_getallattrs(slot);

	Assert(TupHasVirtualTuple(slot));
	Assert(slot->PRIVATE_tts_nvalid == slot->tts_tupleDescriptor->natts);

	tuplen = slot->PRIVATE_tts_htup_buf_len;
	htup = heaptuple_form_to(slot->tts_tupleDescriptor, slot_get_values(slot), slot_get_isnull(slot),
			slot->PRIVATE_tts_htup_buf, &tuplen);
	
	if(!htup)
	{
		if(slot->PRIVATE_tts_htup_buf)
			pfree(slot->PRIVATE_tts_htup_buf);
		slot->PRIVATE_tts_htup_buf = (HeapTuple) MemoryContextAlloc(slot->tts_mcxt, tuplen);
		slot->PRIVATE_tts_htup_buf_len = tuplen;

		htup = heaptuple_form_to(slot->tts_tupleDescriptor, slot_get_values(slot), slot_get_isnull(slot),
			slot->PRIVATE_tts_htup_buf, &tuplen);
		Assert(htup);
	}

	slot->PRIVATE_tts_heaptuple = htup;
	return htup;
}
예제 #26
0
/* --------------------------------
 *		ExecCopySlotTuple
 *			Obtain a copy of a slot's regular physical tuple.  The copy is
 *			palloc'd in the current memory context.
 *
 *		This works even if the slot contains a virtual or minimal tuple;
 *		however the "system columns" of the result will not be meaningful.
 * --------------------------------
 */
HeapTuple
ExecCopySlotHeapTuple(TupleTableSlot *slot)
{
	/*
	 * sanity checks
	 */
	Assert(!TupIsNull(slot));

	if(slot->PRIVATE_tts_heaptuple)
		return heap_copytuple(slot->PRIVATE_tts_heaptuple);


	slot_getallattrs(slot);

	/*
	 * Otherwise we need to build a tuple from the Datum array.
	 */
	return heap_form_tuple(slot->tts_tupleDescriptor,
						   slot_get_values(slot),
						   slot_get_isnull(slot));
}
예제 #27
0
/* --------------------------------
 *		ExecCopySlotMinimalTuple
 *			Obtain a copy of a slot's minimal physical tuple.  The copy is
 *			palloc'd in the current memory context.
 * --------------------------------
 */
MemTuple ExecCopySlotMemTuple(TupleTableSlot *slot)
{
	/*
	 * sanity checks
	 */
	Assert(!TupIsNull(slot));
	Assert(slot->tts_mt_bind);

	/*
	 * If we have a physical tuple then just copy it.
	 */
	if (slot->PRIVATE_tts_memtuple)
		return memtuple_copy_to(slot->PRIVATE_tts_memtuple, slot->tts_mt_bind, NULL, NULL);
	
	slot_getallattrs(slot);

	/*
	 * Otherwise we need to build a tuple from the Datum array.
	 */
	return memtuple_form_to(slot->tts_mt_bind, slot_get_values(slot), slot_get_isnull(slot), NULL, 0, false);
}
예제 #28
0
TupleTableSlot *
ExecTwice(TwiceState *node) {

	TupleTableSlot *resultTupleSlot;
	TupleTableSlot *slot;
	PlanState *outerPlan;

	/*
	 * get information from the node
	 */
	outerPlan = outerPlanState(node);
	resultTupleSlot = node->ps.ps_ResultTupleSlot;

	/*
	 * Fetch a tuple from outer plan, and make it a result tuple.
	 */
	if(node->isFirst)
	{
		/*
		 * fetch a tuple from the outer subplan
		 */
		slot = ExecProcNode(outerPlan);
		if (TupIsNull(slot))
		{
			/* end of subplan, so we're done */
			ExecClearTuple(resultTupleSlot);
			return NULL;
		}
		node->isFirst = false;
		return ExecCopySlot(resultTupleSlot, slot);
	}

	/*
	 * If we used the current tuple already, copy it a second time. Do not
	 * proceed to the next tuple.
	 */
	node->isFirst = true;
	return ExecCopySlot(resultTupleSlot, resultTupleSlot);
}
예제 #29
0
/*
 * Read the next tuple.  We might fetch a tuple from one of the tuple queues
 * using gather_readnext, or if no tuple queue contains a tuple and the
 * single_copy flag is not set, we might generate one locally instead.
 */
static TupleTableSlot *
gather_getnext(GatherState *gatherstate)
{
	PlanState  *outerPlan = outerPlanState(gatherstate);
	TupleTableSlot *outerTupleSlot;
	TupleTableSlot *fslot = gatherstate->funnel_slot;
	HeapTuple	tup;

	while (gatherstate->reader != NULL || gatherstate->need_to_scan_locally)
	{
		if (gatherstate->reader != NULL)
		{
			tup = gather_readnext(gatherstate);

			if (HeapTupleIsValid(tup))
			{
				ExecStoreTuple(tup,		/* tuple to store */
							   fslot,	/* slot in which to store the tuple */
							   InvalidBuffer,	/* buffer associated with this
												 * tuple */
							   true);	/* pfree this pointer if not from heap */

				return fslot;
			}
		}

		if (gatherstate->need_to_scan_locally)
		{
			outerTupleSlot = ExecProcNode(outerPlan);

			if (!TupIsNull(outerTupleSlot))
				return outerTupleSlot;

			gatherstate->need_to_scan_locally = false;
		}
	}

	return ExecClearTuple(fslot);
}
예제 #30
0
/* ----------------------------------------------------------------
 *		ExecMaterial
 *
 *		As long as we are at the end of the data collected in the tuplestore,
 *		we collect one new row from the subplan on each call, and stash it
 *		aside in the tuplestore before returning it.  The tuplestore is
 *		only read if we are asked to scan backwards, rescan, or mark/restore.
 *
 * ----------------------------------------------------------------
 */
TupleTableSlot *				/* result tuple from subplan */
ExecMaterial(MaterialState *node)
{
	EState	   *estate;
	ScanDirection dir;
	bool		forward;

	NTupleStore *ts;
	NTupleStoreAccessor *tsa;

	bool		eof_tuplestore;
	TupleTableSlot *slot;
	Material *ma;
	
	/*
	 * get state info from node
	 */
	estate = node->ss.ps.state;
	dir = estate->es_direction;
	forward = ScanDirectionIsForward(dir);

	ts = node->ts_state->matstore;
	tsa = (NTupleStoreAccessor *) node->ts_pos;

	ma = (Material *) node->ss.ps.plan;
	Assert(IsA(ma, Material));

	/*
	 * If first time through, and we need a tuplestore, initialize it.
	 */
	if (ts == NULL && (ma->share_type != SHARE_NOTSHARED || node->randomAccess))
	{
		/* 
		 * For cross slice material, we only run ExecMaterial on DriverSlice 
		 */
		if(ma->share_type == SHARE_MATERIAL_XSLICE)
		{
			char rwfile_prefix[100];

			if(ma->driver_slice != currentSliceId)
			{
				elog(LOG, "Material Exec on CrossSlice, current slice %d", currentSliceId);
				return NULL;
			}
			
			shareinput_create_bufname_prefix(rwfile_prefix, sizeof(rwfile_prefix), ma->share_id); 
			elog(LOG, "Material node creates shareinput rwfile %s", rwfile_prefix);

			ts = ntuplestore_create_readerwriter(rwfile_prefix, PlanStateOperatorMemKB((PlanState *)node) * 1024, true);
			tsa = ntuplestore_create_accessor(ts, true);
		}
		else
		{
			/* Non-shared Materialize node */
			bool isWriter = true;
			workfile_set *work_set = NULL;

			if (gp_workfile_caching)
			{
				work_set = workfile_mgr_find_set( &node->ss.ps);

				if (NULL != work_set)
				{
					/* Reusing cached workfiles. Tell subplan we won't be needing any tuples */
					elog(gp_workfile_caching_loglevel, "Materialize reusing cached workfiles, initiating Squelch walker");

					isWriter = false;
					ExecSquelchNode(outerPlanState(node));
					node->eof_underlying = true;
					node->cached_workfiles_found = true;

					if (node->ss.ps.instrument)
					{
						node->ss.ps.instrument->workfileReused = true;
					}
				}
			}

			if (NULL == work_set)
			{
				/*
				 * No work_set found, this is because:
				 *  a. workfile caching is enabled but we didn't find any reusable set
				 *  b. workfile caching is disabled
				 * Creating new empty workset
				 */
				Assert(!node->cached_workfiles_found);

				/* Don't try to cache when running under a ShareInputScan node */
				bool can_reuse = (ma->share_type == SHARE_NOTSHARED);

				work_set = workfile_mgr_create_set(BUFFILE, can_reuse, &node->ss.ps, NULL_SNAPSHOT);
				isWriter = true;
			}

			Assert(NULL != work_set);
			AssertEquivalent(node->cached_workfiles_found, !isWriter);

			ts = ntuplestore_create_workset(work_set, node->cached_workfiles_found,
					PlanStateOperatorMemKB((PlanState *) node) * 1024);
			tsa = ntuplestore_create_accessor(ts, isWriter);
		}
		
		Assert(ts && tsa);
		node->ts_state->matstore = ts;
		node->ts_pos = (void *) tsa;

        /* CDB: Offer extra info for EXPLAIN ANALYZE. */
        if (node->ss.ps.instrument)
        {
            /* Let the tuplestore share our Instrumentation object. */
			ntuplestore_setinstrument(ts, node->ss.ps.instrument);

            /* Request a callback at end of query. */
            node->ss.ps.cdbexplainfun = ExecMaterialExplainEnd;
        }

		/*
		 * MPP: If requested, fetch all rows from subplan and put them
		 * in the tuplestore.  This decouples a middle slice's receiving
		 * and sending Motion operators to neutralize a deadlock hazard.
		 * MPP TODO: Remove when a better solution is implemented.
		 *
		 * ShareInput: if the material node
		 * is used to share input, we will need to fetch all rows and put
		 * them in tuple store
		 */
		while (((Material *) node->ss.ps.plan)->cdb_strict
				|| ma->share_type != SHARE_NOTSHARED)
		{
			/*
			 * When reusing cached workfiles, we already have all the tuples,
			 * and we don't need to read anything from subplan.
			 */
			if (node->cached_workfiles_found)
			{
				break;
			}
			TupleTableSlot *outerslot = ExecProcNode(outerPlanState(node));

			if (TupIsNull(outerslot))
			{
				node->eof_underlying = true;

				if (ntuplestore_created_reusable_workfiles(ts))
				{
					ntuplestore_flush(ts);
					ntuplestore_mark_workset_complete(ts);
				}

				ntuplestore_acc_seek_bof(tsa);

				break;
			}
			Gpmon_M_Incr(GpmonPktFromMaterialState(node), GPMON_QEXEC_M_ROWSIN); 

			ntuplestore_acc_put_tupleslot(tsa, outerslot);
		}
	
		CheckSendPlanStateGpmonPkt(&node->ss.ps);

		if(forward)
			ntuplestore_acc_seek_bof(tsa);
		else
			ntuplestore_acc_seek_eof(tsa);

		/* for share input, material do not need to return any tuple */
		if(ma->share_type != SHARE_NOTSHARED)
		{
			Assert(ma->share_type == SHARE_MATERIAL || ma->share_type == SHARE_MATERIAL_XSLICE);
			/* 
			 * if the material is shared across slice, notify consumers that
			 * it is ready.
			 */
			if(ma->share_type == SHARE_MATERIAL_XSLICE) 
			{
				if (ma->driver_slice == currentSliceId)
				{
					ntuplestore_flush(ts);

					node->share_lk_ctxt = shareinput_writer_notifyready(ma->share_id, ma->nsharer_xslice,
							estate->es_plannedstmt->planGen);
				}
			}
			return NULL;
		}
	}

	if(ma->share_type != SHARE_NOTSHARED)
		return NULL;

	/*
	 * If we can fetch another tuple from the tuplestore, return it.
	 */
	slot = node->ss.ps.ps_ResultTupleSlot;

	if(forward)
		eof_tuplestore = (tsa == NULL) || !ntuplestore_acc_advance(tsa, 1);
	else
		eof_tuplestore = (tsa == NULL) || !ntuplestore_acc_advance(tsa, -1);

	if(tsa!=NULL && ntuplestore_acc_tell(tsa, NULL))
	{
		ntuplestore_acc_current_tupleslot(tsa, slot);
          	if (!TupIsNull(slot))
                {
          		Gpmon_M_Incr_Rows_Out(GpmonPktFromMaterialState(node)); 
                        CheckSendPlanStateGpmonPkt(&node->ss.ps);
                }
		return slot;
	}

	/*
	 * If necessary, try to fetch another row from the subplan.
	 *
	 * Note: the eof_underlying state variable exists to short-circuit further
	 * subplan calls.  It's not optional, unfortunately, because some plan
	 * node types are not robust about being called again when they've already
	 * returned NULL.
	 * If reusing cached workfiles, there is no need to execute subplan at all.
	 */
	if (eof_tuplestore && !node->eof_underlying)
	{
		PlanState  *outerNode;
		TupleTableSlot *outerslot;

		Assert(!node->cached_workfiles_found && "we shouldn't get here when using cached workfiles");

		/*
		 * We can only get here with forward==true, so no need to worry about
		 * which direction the subplan will go.
		 */
		outerNode = outerPlanState(node);
		outerslot = ExecProcNode(outerNode);
		if (TupIsNull(outerslot))
		{
			node->eof_underlying = true;
			if (ntuplestore_created_reusable_workfiles(ts))
			{
				ntuplestore_flush(ts);
				ntuplestore_mark_workset_complete(ts);
			}

			if (!node->ss.ps.delayEagerFree)
			{
				ExecEagerFreeMaterial(node);
			}

			return NULL;
		}

		Gpmon_M_Incr(GpmonPktFromMaterialState(node), GPMON_QEXEC_M_ROWSIN); 

		if (tsa)
			ntuplestore_acc_put_tupleslot(tsa, outerslot);

		/*
		 * And return a copy of the tuple.	(XXX couldn't we just return the
		 * outerslot?)
		 */
          	Gpmon_M_Incr_Rows_Out(GpmonPktFromMaterialState(node)); 
                CheckSendPlanStateGpmonPkt(&node->ss.ps);
		return ExecCopySlot(slot, outerslot); 
	}


	if (!node->ss.ps.delayEagerFree)
	{
		ExecEagerFreeMaterial(node);
	}

	/*
	 * Nothing left ...
	 */
	return NULL;
}