Esempio n. 1
0
void
datumstreamread_block_content(DatumStreamRead * acc)
{
	Assert(acc);

	/*
	 * Clear out state from previous block.
	 */
	DatumStreamBlockRead_Reset(&acc->blockRead);

	acc->largeObjectState = DatumStreamLargeObjectState_None;

	/*
	 * Read in data.
	 */
	if (acc->getBlockInfo.execBlockKind == AOCSBK_BLOCK)
	{
		Assert(!acc->getBlockInfo.isLarge);

		if (acc->getBlockInfo.isCompressed)
		{
			/* Compressed, need to decompress to our own buffer.  */
			if (acc->large_object_buffer_size < acc->getBlockInfo.contentLen)
			{
				MemoryContext oldCtxt;

				oldCtxt = MemoryContextSwitchTo(acc->memctxt);

				if (acc->large_object_buffer)
				{
					pfree(acc->large_object_buffer);
					acc->large_object_buffer = NULL;

					SIMPLE_FAULT_INJECTOR(MallocFailure);
				}

				acc->large_object_buffer_size = acc->getBlockInfo.contentLen;
				acc->large_object_buffer = palloc(acc->getBlockInfo.contentLen);
				MemoryContextSwitchTo(oldCtxt);
			}

			AppendOnlyStorageRead_Content(
										  &acc->ao_read,
										  (uint8 *) acc->large_object_buffer,
										  acc->getBlockInfo.contentLen);

			acc->buffer_beginp = acc->large_object_buffer;
		}
		else
		{
			acc->buffer_beginp = AppendOnlyStorageRead_GetBuffer(&acc->ao_read);
		}


		if (Debug_appendonly_print_datumstream)
			elog(LOG,
				 "datumstream_read_block_content filePathName %s firstRowNum " INT64_FORMAT " rowCnt %u "
				 "ndatum %u contentLen %d datump %p",
				 acc->ao_read.bufferedRead.filePathName,
				 acc->getBlockInfo.firstRow,
				 acc->getBlockInfo.rowCnt,
				 acc->blockRead.logical_row_count,
				 acc->getBlockInfo.contentLen, acc->blockRead.datump);

	}
	else if (acc->getBlockInfo.execBlockKind == AOCSBK_BLOB)
	{
		Assert(acc->getBlockInfo.rowCnt == 1);

		if (acc->typeInfo.datumlen >= 0)
		{
			elog(ERROR, "Large object must be variable length objects (varlena)");
		}

		/*
		 * NOTE: Do not assert the content is large.  What appears to be large
		 * content
		 */
		/* NOTE: can compress into one AO storage block. */

		if (acc->large_object_buffer_size < acc->getBlockInfo.contentLen)
		{
			MemoryContext oldCtxt;

			oldCtxt = MemoryContextSwitchTo(acc->memctxt);

			if (acc->large_object_buffer)
				pfree(acc->large_object_buffer);

			acc->large_object_buffer_size = acc->getBlockInfo.contentLen;
			acc->large_object_buffer = palloc(acc->getBlockInfo.contentLen);
			MemoryContextSwitchTo(oldCtxt);
		}

		AppendOnlyStorageRead_Content(
									  &acc->ao_read,
									  acc->large_object_buffer,
									  acc->getBlockInfo.contentLen);

		acc->buffer_beginp = acc->large_object_buffer;
		acc->largeObjectState = DatumStreamLargeObjectState_HaveAoContent;

		if (Debug_datumstream_read_check_large_varlena_integrity)
		{
			datumstreamread_check_large_varlena_integrity(
														  acc,
														  acc->buffer_beginp,
											   acc->getBlockInfo.contentLen);
		}
	}
	else
	{
		elog(ERROR,
			 "Unexpected Append-Only Column Store executor kind %d",
			 acc->getBlockInfo.execBlockKind);
	}

	/*
	 * Unpack the information from the block headers and get ready to read the first datum.
	 */
	datumstreamread_block_get_ready(acc);
}
Esempio n. 2
0
/*
 * Copy the large and/or decompressed content out.
 *
 * The contentOutLen parameter value must match the contentLen from the
 * AppendOnlyStorageReadGetBlockInfo call.
 *
 * Note this routine will work for small non-compressed content, too.
 *
 * contentOut	- memory to receive the contiguous content.
 * contentOutLen - byte length of the contentOut buffer.
 */
void
AppendOnlyStorageRead_Content(AppendOnlyStorageRead *storageRead,
							  uint8 *contentOut,
							  int32 contentOutLen)
{
	Assert(storageRead != NULL);
	Assert(storageRead->isActive);
	Assert(contentOutLen == storageRead->current.uncompressedLen);

	if (storageRead->current.isLarge)
	{
		int64		largeContentPosition;		/* Position of the large
												 * content metadata block. */
		int32		largeContentLen;	/* Total length of the large content. */
		int32		remainingLargeContentLen;	/* The remaining number of
												 * bytes to read for the large
												 * content. */
		uint8	   *contentNext;/* Pointer inside the contentOut buffer to put
								 * the next byte. */
		int32		regularBlockReadCount;		/* Number of regular blocks
												 * read after the metadata
												 * block. */
		int32		regularContentLen;	/* Length of the current regular
										 * block's content. */

		/*
		 * Large content.
		 *
		 * We have the LargeContent "metadata" AO block with the total length
		 * (already read) followed by N SmallContent blocks with the fragments
		 * of the large content.
		 */


		/*
		 * Save any values needed from the current* members since they will be
		 * modifed as we read the regular blocks.
		 */
		largeContentPosition = storageRead->current.headerOffsetInFile;
		largeContentLen = storageRead->current.uncompressedLen;

		/*
		 * Loop to read regular blocks.
		 */
		contentNext = contentOut;
		remainingLargeContentLen = largeContentLen;
		regularBlockReadCount = 0;
		while (true)
		{
			/*
			 * Read next regular block.
			 */
			regularBlockReadCount++;
			if (!AppendOnlyStorageRead_ReadNextBlock(storageRead))
			{
				/*
				 * Unexpected end of file.
				 */
				ereport(ERROR,
						(errcode(ERRCODE_GP_INTERNAL_ERROR),
						 errmsg("Unexpected end of file trying to read block %d of large content in segment file '%s' of table '%s'.  "
								"Large content metadata block is at position " INT64_FORMAT "  "
								"Large content length %d",
								regularBlockReadCount,
								storageRead->segmentFileName,
								storageRead->relationName,
								largeContentPosition,
								largeContentLen)));
			}
			if (storageRead->current.headerKind != AoHeaderKind_SmallContent)
			{
				/*
				 * Unexpected headerKind.
				 */
				ereport(ERROR,
						(errcode(ERRCODE_GP_INTERNAL_ERROR),
						 errmsg("Expected header kind 'Block' for block %d of large content in segment file '%s' of table '%s'.  "
								"Large content metadata block is at position " INT64_FORMAT "  "
								"Large content length %d",
								regularBlockReadCount,
								storageRead->segmentFileName,
								storageRead->relationName,
								largeContentPosition,
								largeContentLen)));
			}
			Assert(!storageRead->current.isLarge);

			regularContentLen = storageRead->current.uncompressedLen;
			remainingLargeContentLen -= regularContentLen;
			if (remainingLargeContentLen < 0)
			{
				/*
				 * Too much data found???
				 */
				ereport(ERROR,
						(errcode(ERRCODE_GP_INTERNAL_ERROR),
						 errmsg("Too much data found after reading %d blocks for large content in segment file '%s' of table '%s'.  "
								"Large content metadata block is at position " INT64_FORMAT "  "
							 "Large content length %d; extra data length %d",
								regularBlockReadCount,
								storageRead->segmentFileName,
								storageRead->relationName,
								largeContentPosition,
								largeContentLen,
								-remainingLargeContentLen)));
			}

			/*
			 * We can safely recurse one level here.
			 */
			AppendOnlyStorageRead_Content(storageRead,
										  contentNext,
										  regularContentLen);

			if (remainingLargeContentLen == 0)
				break;

			/*
			 * Advance our pointer inside the contentOut buffer to put the
			 * next bytes.
			 */
			contentNext += regularContentLen;
		}
	}
	else
	{
		uint8	   *header;
		uint8	   *content;

		/*
		 * "Small" content in one regular block.
		 */

		/*
		 * Fetch pointers to content.
		 */
		AppendOnlyStorageRead_InternalGetBuffer(storageRead,
												&header,
												&content);

		if (!storageRead->current.isCompressed)
		{
			/*
			 * Not compressed.
			 */
			memcpy(contentOut,
				   content,
				   storageRead->current.uncompressedLen);

			if (Debug_appendonly_print_scan)
				elog(LOG,
					 "Append-only Storage Read non-compressed block for table '%s' "
				  "(length = %d, segment file '%s', header offset in file = "
					 INT64_FORMAT ", block count " INT64_FORMAT ")",
					 storageRead->relationName,
					 storageRead->current.uncompressedLen,
					 storageRead->segmentFileName,
					 storageRead->current.headerOffsetInFile,
					 storageRead->bufferCount);
		}
		else
		{
			/*
			 * Compressed.
			 */
			PGFunction	decompressor;
			PGFunction *cfns = storageRead->compression_functions;

			/*
			 * How can it be valid that decompressor is NULL,
			 * gp_decompress_new will always crash if decompresor is NULL
			 */
			if (cfns == NULL)
				decompressor = NULL;
			else
				decompressor = cfns[COMPRESSION_DECOMPRESS];

			gp_decompress_new(content,	/* Compressed data in block. */
							  storageRead->current.compressedLen,
							  contentOut,
							  storageRead->current.uncompressedLen,
							  decompressor,
							  storageRead->compressionState,
							  storageRead->bufferCount);

			if (Debug_appendonly_print_scan)
				elog(LOG,
				"Append-only Storage Read decompressed block for table '%s' "
					 "(compressed length %d, uncompressed length = %d, segment file '%s', "
					 "header offset in file = " INT64_FORMAT ", block count " INT64_FORMAT ")",
					 storageRead->relationName,
					 AppendOnlyStorageFormat_GetCompressedLen(header),
					 storageRead->current.uncompressedLen,
					 storageRead->segmentFileName,
					 storageRead->current.headerOffsetInFile,
					 storageRead->bufferCount);
		}
	}
}