/*
 * Finish the open by positioning the next read and saving information.
 *
 * file			- The open file.
 * filePathName - name of the segment file to open.
 * version		- AO table format version the file is in.
 * logicalEof	- snapshot version of the EOF value to use as the read end
 *				  of the segment file.
 */
static void
AppendOnlyStorageRead_FinishOpenFile(AppendOnlyStorageRead *storageRead,
									 File file,
									 char *filePathName,
									 int version,
									 int64 logicalEof)
{
	int64		seekResult;
	MemoryContext oldMemoryContext;
	int			segmentFileNameLen;

	AORelationVersion_CheckValid(version);

	/*
	 * Seek to the beginning of the file.
	 */
	seekResult = FileSeek(file, 0, SEEK_SET);
	if (seekResult != 0)
	{
		FileClose(file);
		ereport(ERROR,
				(errcode(ERRCODE_IO_ERROR),
				 errmsg("Append-only Storage Read error on segment file '%s' for relation '%s'.  FileSeek offset = 0.  Error code = %d (%s)",
						filePathName,
						storageRead->relationName,
						(int) seekResult,
						strerror((int) seekResult))));
	}

	storageRead->file = file;
	storageRead->formatVersion = version;

	/*
	 * When reading multiple segment files, we throw away the old segment file
	 * name strings.
	 */
	oldMemoryContext = MemoryContextSwitchTo(storageRead->memoryContext);

	if (storageRead->segmentFileName != NULL)
		pfree(storageRead->segmentFileName);

	segmentFileNameLen = strlen(filePathName);
	storageRead->segmentFileName = (char *) palloc(segmentFileNameLen + 1);
	memcpy(storageRead->segmentFileName, filePathName, segmentFileNameLen + 1);

	/* Allocation is done.  Go back to caller memory-context. */
	MemoryContextSwitchTo(oldMemoryContext);

	storageRead->logicalEof = logicalEof;

	BufferedReadSetFile(
						&storageRead->bufferedRead,
						storageRead->file,
						storageRead->segmentFileName,
						logicalEof);
}
Beispiel #2
0
/**
 *begin scanning of a parquet relation
 */
ParquetScanDesc
parquet_beginscan(
		Relation relation,
		Snapshot parquetMetaDataSnapshot,
		TupleDesc relationTupleDesc,
		bool *proj)
{
	ParquetScanDesc 			scan;
	AppendOnlyEntry				*aoEntry;

	AppendOnlyStorageAttributes	*attr;

	/*
	 * increment relation ref count while scanning relation
	 *
	 * This is just to make really sure the relcache entry won't go away while
	 * the scan has a pointer to it.  Caller should be holding the rel open
	 * anyway, so this is redundant in all normal scenarios...
	 */
	RelationIncrementReferenceCount(relation);

	/* allocate scan descriptor */
	scan = (ParquetScanDescData *)palloc0(sizeof(ParquetScanDescData));

	/*
	 * Get the pg_appendonly information for this table
	 */
	aoEntry = GetAppendOnlyEntry(RelationGetRelid(relation), parquetMetaDataSnapshot);
	scan->aoEntry = aoEntry;
	Assert(aoEntry->majorversion == 1 && aoEntry->minorversion == 0);

#ifdef FAULT_INJECTOR
				FaultInjector_InjectFaultIfSet(
											   FailQeWhenBeginParquetScan,
											   DDLNotSpecified,
											   "",	// databaseName
											   ""); // tableName
#endif

	/*
	 * initialize the scan descriptor
	 */
	scan->pqs_filenamepath_maxlen = AOSegmentFilePathNameLen(relation) + 1;
	scan->pqs_filenamepath = (char*)palloc0(scan->pqs_filenamepath_maxlen);
	scan->pqs_rd = relation;
	scan->parquetScanInitContext = CurrentMemoryContext;

	/*
	 * Fill in Parquet Storage layer attributes.
	 */
	attr = &scan->storageAttributes;

	/*
	 * These attributes describe the AppendOnly format to be scanned.
	 */
	if (aoEntry->compresstype == NULL || pg_strcasecmp(aoEntry->compresstype, "none") == 0)
		attr->compress = false;
	else
		attr->compress = true;
	if (aoEntry->compresstype != NULL)
		attr->compressType = aoEntry->compresstype;
	else
		attr->compressType = "none";
	attr->compressLevel     = aoEntry->compresslevel;
	attr->checksum			= aoEntry->checksum;
	attr->safeFSWriteSize	= aoEntry->safefswritesize;
	attr->splitsize = aoEntry->splitsize;
	attr->version			= aoEntry->version;


	AORelationVersion_CheckValid(attr->version);

	scan->proj = proj;

	scan->pqs_tupDesc = (relationTupleDesc == NULL) ? RelationGetDescr(relation) : relationTupleDesc;

	scan->hawqAttrToParquetColChunks = (int*)palloc0(scan->pqs_tupDesc->natts * sizeof(int));

	initscan(scan);

	return scan ;
}