コード例 #1
0
/* Here we use the absolute path name as the lock name.  See fd.c 
 * for how the name is created (GP_TEMP_FILE_DIR and make_database_relative).
 */
static void sisc_lockname(char* p, int size, int share_id, const char* name)
{
	if (snprintf(p, size,
			"%s/%s/%s_gpcdb2.sisc_%d_%d_%d_%d_%s",
			getCurrentTempFilePath, PG_TEMP_FILES_DIR, PG_TEMP_FILE_PREFIX, 
			GetQEIndex(), gp_session_id, gp_command_count, share_id, name
			) > size)
	{
		ereport(ERROR, (errmsg("cannot generate path %s/%s/%s_gpcdb2.sisc_%d_%d_%d_%d_%s",
                        getCurrentTempFilePath, PG_TEMP_FILES_DIR, PG_TEMP_FILE_PREFIX,
                        GetQEIndex(), gp_session_id, gp_command_count, share_id, name)));
	}
}
コード例 #2
0
ファイル: execGpmon.c プロジェクト: BALDELab/incubator-hawq
/*
 * InitPlanNodeGpmonPkt -- initialize the init gpmon package, and send it off.
 */
void InitPlanNodeGpmonPkt(Plan *plan, gpmon_packet_t *gpmon_pkt, EState *estate,
						  PerfmonNodeType type,
						  int64 rowsout_est,
						  char* relname)
{
	int rowsout_adjustment_factor = 0;

	if(!plan)
		return;

	/* The estimates are now global so we need to adjust by
	 * the number of segments in the array.
	 */
	rowsout_adjustment_factor = GetQEGangNum();

	/* Make sure we don't div by zero below */
	if (rowsout_adjustment_factor < 1)
		rowsout_adjustment_factor = 1;

	Assert(rowsout_adjustment_factor >= 1);

	memset(gpmon_pkt, 0, sizeof(gpmon_packet_t));

	gpmon_pkt->magic = GPMON_MAGIC;
	gpmon_pkt->version = GPMON_PACKET_VERSION;
	gpmon_pkt->pkttype = GPMON_PKTTYPE_QEXEC;

	gpmon_gettmid(&gpmon_pkt->u.qexec.key.tmid);
	gpmon_pkt->u.qexec.key.ssid = gp_session_id;
	gpmon_pkt->u.qexec.key.ccnt = gp_command_count;
	gpmon_pkt->u.qexec.key.hash_key.segid = GetQEIndex();
	gpmon_pkt->u.qexec.key.hash_key.pid = MyProcPid;
	gpmon_pkt->u.qexec.key.hash_key.nid = plan->plan_node_id;

	gpmon_pkt->u.qexec.pnid = plan->plan_parent_node_id;


	gpmon_pkt->u.qexec.nodeType = (apr_uint16_t)type;

	gpmon_pkt->u.qexec.rowsout = 0;
	gpmon_pkt->u.qexec.rowsout_est = rowsout_est / rowsout_adjustment_factor;

	if (relname)
	{
		snprintf(gpmon_pkt->u.qexec.relation_name, sizeof(gpmon_pkt->u.qexec.relation_name), "%s", relname);
	}

	gpmon_pkt->u.qexec.status = (uint8)PMNS_Initialize;

	if(gp_enable_gpperfmon && estate)
	{
		gpmon_send(gpmon_pkt);
	}

	gpmon_pkt->u.qexec.status = (uint8)PMNS_Executing;
}
コード例 #3
0
/*
 * Clears entire contents of workfile cache
 *
 *  If seg_id == UNDEF_SEGMENT run on all segments, otherwise run only
 *  on segment seg_id.
 *
 *  Returns the number of entries removed
 */
int32
workfile_mgr_clear_cache(int seg_id)
{
	int no_cleared = 0;
	if (seg_id == UNDEF_SEGMENT || GetQEIndex() == seg_id)
	{
		Cache *cache = workfile_mgr_get_cache();
		no_cleared = Cache_Clear(cache);
	}

	return no_cleared;
}
コード例 #4
0
ファイル: cdbutil.c プロジェクト: BALDELab/incubator-hawq
/*
 * performs all necessary cleanup required when leaving Greenplum
 * Database mode.  This is also called when the process exits.
 *
 * NOTE: the arguments to this function are here only so that we can
 *		 register it with on_proc_exit().  These parameters should not
 *		 be used since there are some callers to this that pass them
 *		 as NULL.
 *
 */
void
cdb_cleanup(int code __attribute__((unused)) , Datum arg __attribute__((unused)) )
{
	elog(DEBUG1, "Cleaning up Greenplum components...");

	executormgr_cleanup_env();

	if (Gp_role != GP_ROLE_UTILITY)
	{
		/* shutdown our listener socket */
		CleanUpMotionLayerIPC();

		/* Move self out from CGroup. */
		if (GetQEIndex() != -1)
		{
			OnMoveOutCGroupForQE();
		}
	}
}
コード例 #5
0
ファイル: execScan.c プロジェクト: BALDELab/incubator-hawq
/*
 * InitScanStateRelationDetails
 *   Opens a relation and sets various relation specific ScanState fields.
 */
void
InitScanStateRelationDetails(ScanState *scanState, Plan *plan, EState *estate)
{
	Assert(NULL != scanState);
	PlanState *planState = &scanState->ps;

	/* Initialize child expressions */
	planState->targetlist = (List *)ExecInitExpr((Expr *)plan->targetlist, planState);
	planState->qual = (List *)ExecInitExpr((Expr *)plan->qual, planState);

	Relation currentRelation = ExecOpenScanRelation(estate, ((Scan *)plan)->scanrelid);
	scanState->ss_currentRelation = currentRelation;

  if (RelationIsAoRows(currentRelation) || RelationIsParquet(currentRelation))
  {
    scanState->splits = GetFileSplitsOfSegment(estate->es_plannedstmt->scantable_splits,
                    currentRelation->rd_id, GetQEIndex());
  }

	ExecAssignScanType(scanState, RelationGetDescr(currentRelation));
	ExecAssignScanProjectionInfo(scanState);

	scanState->tableType = getTableType(scanState->ss_currentRelation);
}
コード例 #6
0
ファイル: execDML.c プロジェクト: BALDELab/incubator-hawq
/* ----------------------------------------------------------------
 *		ExecInsert
 *
 *		INSERTs have to add the tuple into
 *		the base relation and insert appropriate tuples into the
 *		index relations.
 *		Insert can be part of an update operation when
 *		there is a preceding SplitUpdate node. 
 * ----------------------------------------------------------------
 */
void
ExecInsert(TupleTableSlot *slot,
		   DestReceiver *dest,
		   EState *estate,
		   PlanGenerator planGen,
		   bool isUpdate)
{
	void		*tuple = NULL;
	ResultRelInfo *resultRelInfo = NULL;
	Relation	resultRelationDesc = NULL;
	Oid			newId = InvalidOid;
	TupleTableSlot *partslot = NULL;

	AOTupleId	aoTupleId = AOTUPLEID_INIT;

	bool		rel_is_heap = false;
	bool 		rel_is_aorows = false;
	bool		rel_is_external = false;
    bool		rel_is_parquet = false;

	/*
	 * get information on the (current) result relation
	 */
	if (estate->es_result_partitions)
	{
		resultRelInfo = slot_get_partition(slot, estate);
		estate->es_result_relation_info = resultRelInfo;

		if (NULL != resultRelInfo->ri_parquetSendBack)
		{
			/*
			 * The Parquet part we are about to insert into
			 * has sendBack information. This means we're inserting into the
			 * part twice, which is not supported. Error out (GPSQL-2291)
			 */
			Assert(gp_parquet_insert_sort);
			ereport(ERROR, (errcode(ERRCODE_CDB_FEATURE_NOT_YET),
					errmsg("Cannot insert out-of-order tuples in parquet partitions"),
					errhint("Sort the data on the partitioning key(s) before inserting"),
					errOmitLocation(true)));
		}

		/*
		 * Check if we need to close the last parquet partition we
		 * inserted into (GPSQL-2291).
		 */
		Oid new_part_oid = resultRelInfo->ri_RelationDesc->rd_id;
		if (gp_parquet_insert_sort &&
				PLANGEN_OPTIMIZER == planGen &&
				InvalidOid != estate->es_last_parq_part &&
				new_part_oid != estate->es_last_parq_part)
		{

			Assert(NULL != estate->es_partition_state->result_partition_hash);

			ResultPartHashEntry *entry = hash_search(estate->es_partition_state->result_partition_hash,
									&estate->es_last_parq_part,
									HASH_FIND,
									NULL /* found */);

			Assert(NULL != entry);
			Assert(entry->offset < estate->es_num_result_relations);

			ResultRelInfo *oldResultRelInfo = & estate->es_result_relations[entry->offset];

			elog(DEBUG1, "Switching from old part oid=%d name=[%s] to new part oid=%d name=[%s]",
					estate->es_last_parq_part,
					oldResultRelInfo->ri_RelationDesc->rd_rel->relname.data,
					new_part_oid,
					resultRelInfo->ri_RelationDesc->rd_rel->relname.data);

			/*
			 * We are opening a new partition, and the last partition we
			 * inserted into was a Parquet part. Let's close the old
			 * parquet insert descriptor to free the memory before
			 * opening the new one.
			 */
			ParquetInsertDescData *oldInsertDesc = oldResultRelInfo->ri_parquetInsertDesc;

			/*
			 * We need to preserve the "sendback" information that needs to be
			 * sent back to the QD process from this part.
			 * Compute it here, and store it for later use.
			 */
			QueryContextDispatchingSendBack sendback =
					CreateQueryContextDispatchingSendBack(1);
			sendback->relid = RelationGetRelid(oldResultRelInfo->ri_RelationDesc);
			oldInsertDesc->sendback = sendback;
			parquet_insert_finish(oldInsertDesc);

			/* Store the sendback information in the resultRelInfo for this part */
			oldResultRelInfo->ri_parquetSendBack = sendback;

			/* Record in the resultRelInfo that we closed the parquet insert descriptor */
			oldResultRelInfo->ri_parquetInsertDesc = NULL;

			/* Reset the last parquet part Oid, it's now closed */
			estate->es_last_parq_part = InvalidOid;
		}
	}
	else
	{
		resultRelInfo = estate->es_result_relation_info;
	}

	Assert (!resultRelInfo->ri_projectReturning);

	resultRelationDesc = resultRelInfo->ri_RelationDesc;

	rel_is_heap = RelationIsHeap(resultRelationDesc);
	rel_is_aorows = RelationIsAoRows(resultRelationDesc);
	rel_is_external = RelationIsExternal(resultRelationDesc);
    rel_is_parquet = RelationIsParquet(resultRelationDesc);

	/* Validate that insert is not part of an non-allowed update operation. */
	if (isUpdate && (rel_is_aorows || rel_is_parquet))
	{
		ereport(ERROR,
			(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
				errmsg("Append-only tables are not updatable. Operation not permitted."),
				errOmitLocation(true)));
	}

	partslot = reconstructMatchingTupleSlot(slot, resultRelInfo);
	if (rel_is_heap || rel_is_external)
	{
		tuple = ExecFetchSlotHeapTuple(partslot);
	}
	else if (rel_is_aorows)
	{
		tuple = ExecFetchSlotMemTuple(partslot, false);
	}
	else if (rel_is_parquet)
	{
		tuple = NULL;
	}

	Assert( partslot != NULL );
	Assert( rel_is_parquet || (tuple != NULL));

	/* Execute triggers in Planner-generated plans */
	if (planGen == PLANGEN_PLANNER)
	{
		/* BEFORE ROW INSERT Triggers */
		if (resultRelInfo->ri_TrigDesc &&
			resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
		{
			HeapTuple	newtuple;

			/* NYI */
			if(rel_is_parquet)
				elog(ERROR, "triggers are not supported on tables that use column-oriented storage");

			newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);

			if (newtuple == NULL)	/* "do nothing" */
			{
				return;
			}

			if (newtuple != tuple)	/* modified by Trigger(s) */
			{
				/*
				 * Put the modified tuple into a slot for convenience of routines
				 * below.  We assume the tuple was allocated in per-tuple memory
				 * context, and therefore will go away by itself. The tuple table
				 * slot should not try to clear it.
				 */
				TupleTableSlot *newslot = estate->es_trig_tuple_slot;

				if (newslot->tts_tupleDescriptor != partslot->tts_tupleDescriptor)
					ExecSetSlotDescriptor(newslot, partslot->tts_tupleDescriptor);
				ExecStoreGenericTuple(newtuple, newslot, false);
				newslot->tts_tableOid = partslot->tts_tableOid; /* for constraints */
				tuple = newtuple;
				partslot = newslot;
			}
		}
	}
	/*
	 * Check the constraints of the tuple
	 */
	if (resultRelationDesc->rd_att->constr &&
			planGen == PLANGEN_PLANNER)
	{
		ExecConstraints(resultRelInfo, partslot, estate);
	}
	/*
	 * insert the tuple
	 *
	 * Note: heap_insert returns the tid (location) of the new tuple in the
	 * t_self field.
	 *
	 * NOTE: for append-only relations we use the append-only access methods.
	 */
	if (rel_is_aorows)
	{
		if (resultRelInfo->ri_aoInsertDesc == NULL)
		{
			ResultRelSegFileInfo *segfileinfo = NULL;
			/* Set the pre-assigned fileseg number to insert into */
			ResultRelInfoSetSegFileInfo(resultRelInfo, estate->es_result_segfileinfos);
			segfileinfo = (ResultRelSegFileInfo *)list_nth(resultRelInfo->ri_aosegfileinfos, GetQEIndex());
			resultRelInfo->ri_aoInsertDesc =
				appendonly_insert_init(resultRelationDesc,
									   segfileinfo);

		}

		appendonly_insert(resultRelInfo->ri_aoInsertDesc, tuple, &newId, &aoTupleId);
	}
	else if (rel_is_external)
	{
		/* Writable external table */
		if (resultRelInfo->ri_extInsertDesc == NULL)
			resultRelInfo->ri_extInsertDesc = external_insert_init(
					resultRelationDesc, 0);

		newId = external_insert(resultRelInfo->ri_extInsertDesc, tuple);
	}
    else if(rel_is_parquet)
	{
		/* If there is no parquet insert descriptor, create it now. */
		if (resultRelInfo->ri_parquetInsertDesc == NULL)
		{
			ResultRelSegFileInfo *segfileinfo = NULL;
			ResultRelInfoSetSegFileInfo(resultRelInfo, estate->es_result_segfileinfos);
			segfileinfo = (ResultRelSegFileInfo *)list_nth(resultRelInfo->ri_aosegfileinfos, GetQEIndex());
			resultRelInfo->ri_parquetInsertDesc = parquet_insert_init(resultRelationDesc, segfileinfo);

			/*
			 * Just opened a new parquet partition for insert. Save the Oid
			 * in estate, so that we can close it when switching to a
			 * new partition (GPSQL-2291)
			 */
			elog(DEBUG1, "Saving es_last_parq_part. Old=%d, new=%d", estate->es_last_parq_part, resultRelationDesc->rd_id);
			estate->es_last_parq_part = resultRelationDesc->rd_id;
		}

		newId = parquet_insert(resultRelInfo->ri_parquetInsertDesc, partslot);
	}
	else
	{
		Insist(rel_is_heap);

		newId = heap_insert(resultRelationDesc,
							tuple,
							estate->es_snapshot->curcid,
							true, true, GetCurrentTransactionId());
	}

	IncrAppended();
	(estate->es_processed)++;
	(resultRelInfo->ri_aoprocessed)++;
	estate->es_lastoid = newId;

	partslot->tts_tableOid = RelationGetRelid(resultRelationDesc);

	if (rel_is_aorows || rel_is_parquet)
	{

		/* NOTE: Current version does not support index upon parquet table. */
		/*
		 * insert index entries for AO Row-Store tuple
		 */
		if (resultRelInfo->ri_NumIndices > 0 && !rel_is_parquet)
			ExecInsertIndexTuples(partslot, (ItemPointer)&aoTupleId, estate, false);
	}
	else
	{
		/* Use parttuple for index update in case this is an indexed heap table. */
		TupleTableSlot *xslot = partslot;
		void *xtuple = tuple;

		setLastTid(&(((HeapTuple) xtuple)->t_self));

		/*
		 * insert index entries for tuple
		 */
		if (resultRelInfo->ri_NumIndices > 0)
			ExecInsertIndexTuples(xslot, &(((HeapTuple) xtuple)->t_self), estate, false);

	}

	if (planGen == PLANGEN_PLANNER)
	{
		/* AFTER ROW INSERT Triggers */
		ExecARInsertTriggers(estate, resultRelInfo, tuple);
	}
}
コード例 #7
0
void
EndMotionLayerNode(MotionLayerState *mlStates, int16 motNodeID, bool flushCommLayer)
{
    MotionNodeEntry *pMNEntry;
    ChunkSorterEntry *pCSEntry;
    int i;

    pMNEntry = getMotionNodeEntry(mlStates, motNodeID, "EndMotionLayerNode");

#ifdef AMS_VERBOSE_LOGGING
    elog(DEBUG5, "Cleaning up Motion Layer details for motion node %d.",
         motNodeID);
#endif

    /*
     * Iterate through all entries in the motion layer's chunk-sort map, to
     * see if we have gotten end-of-stream from all senders.
     */
    if (pMNEntry->preserve_order && pMNEntry->ready_tuple_lists != NULL)
    {
        for (i=0; i < GetQEGangNum(); i++)
        {
            pCSEntry = &pMNEntry->ready_tuple_lists[i];

            if (pMNEntry->preserve_order &&
                    gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG)
            {
                /* Print chunk-sorter entry statistics. */
                elog(DEBUG4, "Chunk-sorter entry [route=%d,node=%d] statistics:\n"
                     "\tAvailable Tuples High-Watermark: " UINT64_FORMAT,
                     i, pMNEntry->motion_node_id,
                     pMNEntry->stat_tuples_available_hwm);
            }

            if (!pMNEntry->stopped && !pCSEntry->end_of_stream)
            {
                if (flushCommLayer)
                {
                    elog(FATAL, "Motion layer node %d cleanup - did not receive"
                         " end-of-stream from sender %d.", motNodeID, i);

                    /*** TODO - get chunks until end-of-stream comes in. ***/
                }
                else
                {
                    elog(LOG, "Motion layer node %d cleanup - did not receive"
                         " end-of-stream from sender %d.", motNodeID, i);
                }
            }
            else
            {
                /* End-of-stream is marked for this entry. */

                /*** TODO - do more than just complain! ***/

                if (pCSEntry->chunk_list.num_chunks > 0)
                {
                    elog(LOG, "Motion layer node %d cleanup - there are still"
                         " %d chunks enqueued from sender %d.", motNodeID,
                         pCSEntry->chunk_list.num_chunks, i );
                }

                /***
                	TODO - Make sure there are no outstanding tuples in the
                	tuple-store.
                ***/
            }

            /*
             * Clean up the chunk-sorter entry, then remove it from the hash
             * table.
             */
            clearTCList(&pMNEntry->ser_tup_info.chunkCache, &pCSEntry->chunk_list);
            if (pMNEntry->preserve_order)	/* Clean up the tuple-store. */
                htfifo_destroy(pCSEntry->ready_tuples);
        }
    }
    pMNEntry->cleanedUp = true;

    /* Clean up the motion-node entry, then remove it from the hash table. */
    if (gp_log_interconnect >= GPVARS_VERBOSITY_VERBOSE)
    {
        if (pMNEntry->stat_total_bytes_sent > 0 ||
                pMNEntry->sel_wr_wait > 0)
        {
            elog(LOG, "Interconnect seg%d slice%d sent " UINT64_FORMAT " tuples, "
                 UINT64_FORMAT " total bytes, " UINT64_FORMAT " tuple bytes, "
                 UINT64_FORMAT " chunks; waited " UINT64_FORMAT " usec.",
                 GetQEIndex(),
                 currentSliceId,
                 pMNEntry->stat_total_sends,
                 pMNEntry->stat_total_bytes_sent,
                 pMNEntry->stat_tuple_bytes_sent,
                 pMNEntry->stat_total_chunks_sent,
                 pMNEntry->sel_wr_wait
                );
        }
        if (pMNEntry->stat_total_bytes_recvd > 0 ||
                pMNEntry->sel_rd_wait > 0)
        {
            elog(LOG, "Interconnect seg%d slice%d received from slice%d: " UINT64_FORMAT " tuples, "
                 UINT64_FORMAT " total bytes, " UINT64_FORMAT " tuple bytes, "
                 UINT64_FORMAT " chunks; waited " UINT64_FORMAT " usec.",
                 GetQEIndex(),
                 currentSliceId,
                 motNodeID,
                 pMNEntry->stat_total_recvs,
                 pMNEntry->stat_total_bytes_recvd,
                 pMNEntry->stat_tuple_bytes_recvd,
                 pMNEntry->stat_total_chunks_recvd,
                 pMNEntry->sel_rd_wait
                );
        }
    }

    CleanupSerTupInfo(&pMNEntry->ser_tup_info);
    FreeTupleDesc(pMNEntry->tuple_desc);
    if (!pMNEntry->preserve_order)
        htfifo_destroy(pMNEntry->ready_tuples);

    pMNEntry->valid = false;
}
コード例 #8
0
void shareinput_create_bufname_prefix(char* p, int size, int share_id)
{
	snprintf(p, size, "%s_SIRW_%d_%d_%d_%d", 
            PG_TEMP_FILE_PREFIX, 
            GetQEIndex(), gp_session_id, gp_command_count, share_id);
}
コード例 #9
0
			if (pxf_service_singlecluster)
			{
				if (server_ip == NULL)
				{
					server_ip = rest_server->host;
				}
				else if (are_ips_equal(server_ip, rest_server->host))
				{
					port++;
				}
			}
			rest_server->port = port;
		}

		/* choose server by segment id */
		server_index = GetQEIndex() % list_length(rest_servers);
		elog(DEBUG3, "get_pxf_server: server index %d, segment id %d, rest servers number %d",
			server_index, GetQEIndex(), list_length(rest_servers));

		found_server = (PxfServer*)list_nth(rest_servers, server_index);
		ret_server->host = pstrdup(found_server->host);
		ret_server->port = found_server->port;

		free_datanode_rest_servers(rest_servers);
	}
	else /* Isilon */
	{
		ret_server->host = pstrdup("localhost"); /* TODO: should it always be localhost? */
		ret_server->port = port;
		elog(DEBUG2, "get_pxf_server: writing data to an Isilon target storage system");
	}
コード例 #10
0
/* ----------------------------------------------------------------
*		ExecInitExternalScan
* ----------------------------------------------------------------
*/
ExternalScanState *
ExecInitExternalScan(ExternalScan *node, EState *estate, int eflags)
{
	ResultRelSegFileInfo *segfileinfo = NULL;
	ExternalScanState *externalstate;
	Relation	currentRelation;
	FileScanDesc currentScanDesc;

	Assert(outerPlan(node) == NULL);
	Assert(innerPlan(node) == NULL);

	/*
	 * create state structure
	 */
	externalstate = makeNode(ExternalScanState);
	externalstate->ss.ps.plan = (Plan *) node;
	externalstate->ss.ps.state = estate;

	/*
	 * Miscellaneous initialization
	 *
	 * create expression context for node
	 */
	ExecAssignExprContext(estate, &externalstate->ss.ps);

	/*
	 * initialize child expressions
	 */
	externalstate->ss.ps.targetlist = (List *)
		ExecInitExpr((Expr *) node->scan.plan.targetlist,
					 (PlanState *) externalstate);
	externalstate->ss.ps.qual = (List *)
		ExecInitExpr((Expr *) node->scan.plan.qual,
					 (PlanState *) externalstate);

	/* Check if targetlist or qual contains a var node referencing the ctid column */
	externalstate->cdb_want_ctid = contain_ctid_var_reference(&node->scan);
	ItemPointerSetInvalid(&externalstate->cdb_fake_ctid);

#define EXTSCAN_NSLOTS 2

	/*
	 * tuple table initialization
	 */
	ExecInitResultTupleSlot(estate, &externalstate->ss.ps);
	ExecInitScanTupleSlot(estate, &externalstate->ss);

	/*
	 * get the relation object id from the relid'th entry in the range table
	 * and open that relation.
	 */
	currentRelation = ExecOpenScanExternalRelation(estate, node->scan.scanrelid);

	if (Gp_role == GP_ROLE_EXECUTE && node->err_aosegfileinfos)
	{
		segfileinfo = (ResultRelSegFileInfo *)list_nth(node->err_aosegfileinfos, GetQEIndex());
	}
	else
	{
		segfileinfo = NULL;
	}
	currentScanDesc = external_beginscan(currentRelation,
									 node->scan.scanrelid,
									 node->scancounter,
									 node->uriList,
									 node->fmtOpts,
									 node->fmtType,
									 node->isMasterOnly,
									 node->rejLimit,
									 node->rejLimitInRows,
									 node->fmterrtbl,
									 segfileinfo,
									 node->encoding,
									 node->scan.plan.qual);

	externalstate->ss.ss_currentRelation = currentRelation;
	externalstate->ess_ScanDesc = currentScanDesc;

	ExecAssignScanType(&externalstate->ss, RelationGetDescr(currentRelation));

	/*
	 * Initialize result tuple type and projection info.
	 */
	ExecAssignResultTypeFromTL(&externalstate->ss.ps);
	ExecAssignScanProjectionInfo(&externalstate->ss);

	/*
	 * If eflag contains EXEC_FLAG_REWIND or EXEC_FLAG_BACKWARD or EXEC_FLAG_MARK,
	 * then this node is not eager free safe.
	 */
	externalstate->ss.ps.delayEagerFree =
		((eflags & (EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)) != 0);

	initGpmonPktForExternalScan((Plan *)node, &externalstate->ss.ps.gpmon_pkt, estate);

	return externalstate;
}