Exemple #1
0
/* ----------------
 *		heap_copytuple
 *
 *		returns a copy of an entire tuple
 *
 * The HeapTuple struct, tuple header, and tuple data are all allocated
 * as a single palloc() block.
 * ----------------
 */
HeapTuple
heaptuple_copy_to(HeapTuple tuple, HeapTuple dest, uint32 *destlen)
{
	HeapTuple	newTuple;
	uint32 len;

	if (!HeapTupleIsValid(tuple) || tuple->t_data == NULL)
		return NULL;

	Assert(!is_heaptuple_memtuple(tuple));

	len = HEAPTUPLESIZE + tuple->t_len;
	if(destlen && *destlen < len)
	{
		*destlen = len;
		return NULL;
	}

	if(destlen)
	{
		*destlen = len;
		newTuple = dest;
	}
	else
		newTuple = (HeapTuple) palloc(HEAPTUPLESIZE + tuple->t_len);

	newTuple->t_len = tuple->t_len;
	newTuple->t_self = tuple->t_self;
	newTuple->t_data = (HeapTupleHeader) ((char *) newTuple + HEAPTUPLESIZE);
	memcpy((char *) newTuple->t_data, (char *) tuple->t_data, tuple->t_len);
	return newTuple;
}
Exemple #2
0
static void
writetup_heap(Tuplestorestate *state, TuplestorePos *pos, void *tup)
{
	uint32 tuplen = 0; 
	Size         memsize = 0;

	if(is_heaptuple_memtuple((HeapTuple) tup))
		tuplen = memtuple_get_size((MemTuple) tup, NULL);
	else
	{
		Assert(!is_heaptuple_splitter((HeapTuple) tup));
		tuplen = heaptuple_get_size((HeapTuple) tup);
	}

	if (BufFileWrite(state->myfile, (void *) tup, tuplen) != (size_t) tuplen)
		elog(ERROR, "write failed");
	if (state->eflags & EXEC_FLAG_BACKWARD)		/* need trailing length word? */
		if (BufFileWrite(state->myfile, (void *) &tuplen,
						 sizeof(tuplen)) != sizeof(tuplen))
			elog(ERROR, "write failed");

	memsize = GetMemoryChunkSpace(tup);

	state->spilledBytes += memsize;
	FREEMEM(state, memsize);

	pfree(tup);
}
Exemple #3
0
/* ----------------
 *		heap_attisnull	- returns TRUE iff tuple attribute is not present
 * ----------------
 */
bool
heap_attisnull(HeapTuple tup, int attnum)
{
	Assert(!is_heaptuple_memtuple(tup));

	if (attnum > (int) HeapTupleHeaderGetNatts(tup->t_data))
		return true;

	if (attnum > 0)
	{
		if (HeapTupleNoNulls(tup))
			return false;
		return att_isnull(attnum - 1, tup->t_data->t_bits);
	}

	switch (attnum)
	{
		case TableOidAttributeNumber:
		case SelfItemPointerAttributeNumber:
		case ObjectIdAttributeNumber:
		case MinTransactionIdAttributeNumber:
		case MinCommandIdAttributeNumber:
		case MaxTransactionIdAttributeNumber:
		case MaxCommandIdAttributeNumber:
        case GpSegmentIdAttributeNumber:       /*CDB*/
			/* these are never null */
			break;

		default:
			elog(ERROR, "invalid attnum: %d", attnum);
	}

	return false;
}
Exemple #4
0
/*
 * heap_modify_tuple
 *		form a new tuple from an old tuple and a set of replacement values.
 *
 * The replValues, replIsnull, and doReplace arrays must be of the length
 * indicated by tupleDesc->natts.  The new tuple is constructed using the data
 * from replValues/replIsnull at columns where doReplace is true, and using
 * the data from the old tuple at columns where doReplace is false.
 *
 * The result is allocated in the current memory context.
 */
HeapTuple
heap_modify_tuple(HeapTuple tuple,
				  TupleDesc tupleDesc,
				  Datum *replValues,
				  bool *replIsnull,
				  bool *doReplace)
{
	int			numberOfAttributes = tupleDesc->natts;
	int			attoff;
	Datum	   *values;
	bool	   *isnull;
	HeapTuple	newTuple;

	Assert(!is_heaptuple_memtuple(tuple));

	/*
	 * allocate and fill values and isnull arrays from either the tuple or the
	 * repl information, as appropriate.
	 *
	 * NOTE: it's debatable whether to use heap_deform_tuple() here or just
	 * heap_getattr() only the non-replaced colums.  The latter could win if
	 * there are many replaced columns and few non-replaced ones. However,
	 * heap_deform_tuple costs only O(N) while the heap_getattr way would cost
	 * O(N^2) if there are many non-replaced columns, so it seems better to
	 * err on the side of linear cost.
	 */
	values = (Datum *) palloc(numberOfAttributes * sizeof(Datum));
	isnull = (bool *) palloc(numberOfAttributes * sizeof(bool));

	heap_deform_tuple(tuple, tupleDesc, values, isnull);

	for (attoff = 0; attoff < numberOfAttributes; attoff++)
	{
		if (doReplace[attoff])
		{
			values[attoff] = replValues[attoff];
			isnull[attoff] = replIsnull[attoff];
		}
	}

	/*
	 * create a new tuple from the values and isnull arrays
	 */
	newTuple = heap_form_tuple(tupleDesc, values, isnull);

	pfree(values);
	pfree(isnull);

	/*
	 * copy the identification info of the old tuple: t_ctid, t_self, and OID
	 * (if any)
	 */
	newTuple->t_data->t_ctid = tuple->t_data->t_ctid;
	newTuple->t_self = tuple->t_self;
	if (tupleDesc->tdhasoid)
		HeapTupleSetOid(newTuple, HeapTupleGetOid(tuple));

	return newTuple;
}
Exemple #5
0
static void *
copytup_heap(Tuplestorestate *state, TuplestorePos *pos, void *tup)
{
	if(!is_heaptuple_memtuple((HeapTuple) tup))
		return heaptuple_copy_to((HeapTuple) tup, NULL, NULL);

	return memtuple_copy_to((MemTuple) tup, NULL, NULL, NULL);
}
Exemple #6
0
/* ----------
 * toast_delete -
 *
 *	Cascaded delete toast-entries on DELETE
 * ----------
 */
void
toast_delete(Relation rel, HeapTuple oldtup, MemTupleBinding *pbind)
{
	TupleDesc	tupleDesc;
	Form_pg_attribute *att;
	int			numAttrs;
	int			i;
	Datum		toast_values[MaxHeapAttributeNumber];
	bool		toast_isnull[MaxHeapAttributeNumber];
	bool 		ismemtuple = is_heaptuple_memtuple(oldtup);
	
	AssertImply(ismemtuple, pbind);
	AssertImply(!ismemtuple, !pbind);

	/*
	 * We should only ever be called for tuples of plain relations ---
	 * recursing on a toast rel is bad news.
	 */
	Assert(rel->rd_rel->relkind == RELKIND_RELATION);

	/*
	 * Get the tuple descriptor and break down the tuple into fields.
	 *
	 * NOTE: it's debatable whether to use heap_deform_tuple() here or just
	 * heap_getattr() only the varlena columns.  The latter could win if there
	 * are few varlena columns and many non-varlena ones. However,
	 * heap_deform_tuple costs only O(N) while the heap_getattr way would cost
	 * O(N^2) if there are many varlena columns, so it seems better to err on
	 * the side of linear cost.  (We won't even be here unless there's at
	 * least one varlena column, by the way.)
	 */
	tupleDesc = rel->rd_att;
	att = tupleDesc->attrs;
	numAttrs = tupleDesc->natts;

	Assert(numAttrs <= MaxHeapAttributeNumber);

	if(ismemtuple)
		memtuple_deform((MemTuple) oldtup, pbind, toast_values, toast_isnull);
	else
		heap_deform_tuple(oldtup, tupleDesc, toast_values, toast_isnull);

	/*
	 * Check for external stored attributes and delete them from the secondary
	 * relation.
	 */
	for (i = 0; i < numAttrs; i++)
	{
		if (att[i]->attlen == -1)
		{
			Datum		value = toast_values[i];

			if (!toast_isnull[i] && VARATT_IS_EXTERNAL_D(value))
				toast_delete_datum(rel, value);
		}
	}
}
Exemple #7
0
/* ----------------
 *		heap_getsysattr
 *
 *		Fetch the value of a system attribute for a tuple.
 *
 * This is a support routine for the heap_getattr macro.  The macro
 * has already determined that the attnum refers to a system attribute.
 * ----------------
 */
Datum
heap_getsysattr(HeapTuple tup, int attnum, bool *isnull)
{
	Datum		result;

	Assert(tup);
	Assert(!is_heaptuple_memtuple(tup));

	/* Currently, no sys attribute ever reads as NULL. */
	if (isnull)
		*isnull = false;

	switch (attnum)
	{
		case SelfItemPointerAttributeNumber:
			/* pass-by-reference datatype */
			result = PointerGetDatum(&(tup->t_self));
			break;
		case ObjectIdAttributeNumber:
			result = ObjectIdGetDatum(HeapTupleGetOid(tup));
			break;
		case MinTransactionIdAttributeNumber:
			result = TransactionIdGetDatum(HeapTupleHeaderGetXmin(tup->t_data));
			break;
		case MaxTransactionIdAttributeNumber:
			result = TransactionIdGetDatum(HeapTupleHeaderGetXmax(tup->t_data));
			break;
		case MinCommandIdAttributeNumber:
		case MaxCommandIdAttributeNumber:

			/*
			 * cmin and cmax are now both aliases for the same field, which
			 * can in fact also be a combo command id.	XXX perhaps we should
			 * return the "real" cmin or cmax if possible, that is if we are
			 * inside the originating transaction?
			 */
			result = CommandIdGetDatum(HeapTupleHeaderGetRawCommandId(tup->t_data));
			break;
		case TableOidAttributeNumber:
            /* CDB: Must now use a TupleTableSlot to access the 'tableoid'. */
			result = ObjectIdGetDatum(InvalidOid);
			elog(ERROR, "Invalid reference to \"tableoid\" system attribute");
			break;
		case GpSegmentIdAttributeNumber:                       /*CDB*/
			result = Int32GetDatum(Gp_segment);
			break;
		default:
			elog(ERROR, "invalid attnum: %d", attnum);
			result = 0;			/* keep compiler quiet */
			break;
	}
	return result;
}
Exemple #8
0
/* ----------------
 *		heap_copytuple_with_tuple
 *
 *		copy a tuple into a caller-supplied HeapTuple management struct
 *
 * Note that after calling this function, the "dest" HeapTuple will not be
 * allocated as a single palloc() block (unlike with heap_copytuple()).
 * ----------------
 */
void
heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest)
{
	if (!HeapTupleIsValid(src) || src->t_data == NULL)
	{
		dest->t_data = NULL;
		return;
	}

	Assert(!is_heaptuple_memtuple(src));

	dest->t_len = src->t_len;
	dest->t_self = src->t_self;
	dest->t_data = (HeapTupleHeader) palloc(src->t_len);
	memcpy((char *) dest->t_data, (char *) src->t_data, src->t_len);
}
Exemple #9
0
/* --------------------------------
 *		ExecStoreMinimalTuple
 *
 *		Like ExecStoreTuple, but insert a "minimal" tuple into the slot.
 *
 * No 'buffer' parameter since minimal tuples are never stored in relations.
 * --------------------------------
 */
TupleTableSlot *
ExecStoreMinimalTuple(MemTuple mtup,
					  TupleTableSlot *slot,
					  bool shouldFree)
{
	/*
	 * sanity checks
	 */
	Assert(mtup != NULL);
	Assert(slot != NULL);
	Assert(slot->tts_tupleDescriptor != NULL);
	Assert(slot->tts_mt_bind != NULL);

	/* Acctually we are storing a HeapTuple! */
	if(!is_heaptuple_memtuple((HeapTuple) mtup))
		return ExecStoreHeapTuple((HeapTuple) mtup, slot, InvalidBuffer, shouldFree);

	/*
	 * Free any old physical tuple belonging to the slot.
	 */
	free_heaptuple_memtuple(slot);
	
	/*
	 * Store the new tuple into the specified slot.
	 */

	/* Clear tts_flags, here isempty set to false */
	if(shouldFree)
		TupSetShouldFree(slot);
	else
		TupClearShouldFree(slot);

	slot->PRIVATE_tts_memtuple = mtup;

	TupClearIsEmpty(slot);
	slot->PRIVATE_tts_nvalid = 0;

	/*
	 * Drop the pin on the referenced buffer, if there is one.
	 */
	if (BufferIsValid(slot->tts_buffer))
		ReleaseBuffer(slot->tts_buffer);

	slot->tts_buffer = InvalidBuffer;
	return slot;
}
Exemple #10
0
HeapTuple
toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, 
					   MemTupleBinding *pbind, int toast_tuple_target,
					   bool isFrozen)
{
	HeapTuple	result_tuple;
	TupleDesc	tupleDesc;
	Form_pg_attribute *att;
	int			numAttrs;
	int			i;

	bool		need_change = false;
	bool		need_free = false;
	bool		need_delold = false;
	bool		has_nulls = false;

	Size		maxDataLen;

	char		toast_action[MaxHeapAttributeNumber];
	bool		toast_isnull[MaxHeapAttributeNumber];
	bool		toast_oldisnull[MaxHeapAttributeNumber];
	Datum		toast_values[MaxHeapAttributeNumber];
	Datum		toast_oldvalues[MaxHeapAttributeNumber];
	int32		toast_sizes[MaxHeapAttributeNumber];
	bool		toast_free[MaxHeapAttributeNumber];
	bool		toast_delold[MaxHeapAttributeNumber];

	bool 		ismemtuple = is_heaptuple_memtuple(newtup);

	AssertImply(ismemtuple, pbind);
	AssertImply(!ismemtuple, !pbind);
	AssertImply(ismemtuple && oldtup, is_heaptuple_memtuple(oldtup));
	Assert(toast_tuple_target > 0);
	
	/*
	 * We should only ever be called for tuples of plain relations ---
	 * recursing on a toast rel is bad news.
	 */
	//Assert(rel->rd_rel->relkind == RELKIND_RELATION);
	if (rel->rd_rel->relkind != RELKIND_RELATION)
		elog(LOG,"Why are we toasting a non-relation! %c ",rel->rd_rel->relkind);

	/*
	 * Get the tuple descriptor and break down the tuple(s) into fields.
	 */
	tupleDesc = rel->rd_att;
	att = tupleDesc->attrs;
	numAttrs = tupleDesc->natts;

	Assert(numAttrs <= MaxHeapAttributeNumber);

	if(ismemtuple)
		memtuple_deform((MemTuple) newtup, pbind, toast_values, toast_isnull);
	else
		heap_deform_tuple(newtup, tupleDesc, toast_values, toast_isnull);

	if (oldtup != NULL)
	{
		if(ismemtuple)
			memtuple_deform((MemTuple) oldtup, pbind, toast_oldvalues, toast_oldisnull);
		else
			heap_deform_tuple(oldtup, tupleDesc, toast_oldvalues, toast_oldisnull);
	}
	/* ----------
	 * Then collect information about the values given
	 *
	 * NOTE: toast_action[i] can have these values:
	 *		' '		default handling
	 *		'p'		already processed --- don't touch it
	 *		'x'		incompressible, but OK to move off
	 *
	 * NOTE: toast_sizes[i] is only made valid for varlena attributes with
	 *		toast_action[i] different from 'p'.
	 * ----------
	 */
	memset(toast_action, ' ', numAttrs * sizeof(char));
	memset(toast_free, 0, numAttrs * sizeof(bool));
	memset(toast_delold, 0, numAttrs * sizeof(bool));

	for (i = 0; i < numAttrs; i++)
	{
		varattrib *old_value;
		varattrib *new_value;

		if (oldtup != NULL)
		{
			/*
			 * For UPDATE get the old and new values of this attribute
			 */
			old_value = (varattrib *) DatumGetPointer(toast_oldvalues[i]);
			new_value = (varattrib *) DatumGetPointer(toast_values[i]);

			/*
			 * If the old value is an external stored one, check if it has
			 * changed so we have to delete it later.
			 */
			if (att[i]->attlen == -1 && !toast_oldisnull[i] &&
				VARATT_IS_EXTERNAL(old_value))
			{
				if (toast_isnull[i] || !VARATT_IS_EXTERNAL(new_value) ||
					memcmp((char *) old_value, (char *) new_value,
						   VARSIZE_EXTERNAL(old_value)) != 0)
				{
					/*
					 * The old external stored value isn't needed any more
					 * after the update
					 */
					toast_delold[i] = true;
					need_delold = true;
				}
				else
				{
					/*
					 * This attribute isn't changed by this update so we reuse
					 * the original reference to the old value in the new
					 * tuple.
					 */
					toast_action[i] = 'p';
					continue;
				}
			}
		}
		else
		{
			/*
			 * For INSERT simply get the new value
			 */
			new_value = (varattrib *) DatumGetPointer(toast_values[i]);
		}

		/*
		 * Handle NULL attributes
		 */
		if (toast_isnull[i])
		{
			toast_action[i] = 'p';
			has_nulls = true;
			continue;
		}

		/*
		 * Now look at varlena attributes
		 */
		if (att[i]->attlen == -1)
		{
			/*
			 * If the table's attribute says PLAIN always, force it so.
			 */
			if (att[i]->attstorage == 'p')
				toast_action[i] = 'p';

			/*
			 * We took care of UPDATE above, so any external value we find
			 * still in the tuple must be someone else's we cannot reuse.
			 * Fetch it back (without decompression, unless we are forcing
			 * PLAIN storage).	If necessary, we'll push it out as a new
			 * external value below.
			 */
			if (VARATT_IS_EXTERNAL(new_value))
			{
				if (att[i]->attstorage == 'p')
					new_value = (varattrib *)heap_tuple_untoast_attr((struct varlena *)new_value);
				else
					new_value = (varattrib *)heap_tuple_fetch_attr((struct varlena *)new_value);
				toast_values[i] = PointerGetDatum(new_value);
				toast_free[i] = true;
				need_change = true;
				need_free = true;
			}

			/*
			 * Remember the size of this attribute
			 */
			toast_sizes[i] = VARSIZE_ANY(new_value);
		}
		else
		{
			/*
			 * Not a varlena attribute, plain storage always
			 */
			toast_action[i] = 'p';
		}
	}

	/* ----------
	 * Compress and/or save external until data fits into target length
	 *
	 *	1: Inline compress attributes with attstorage 'x', and store very
	 *	   large attributes with attstorage 'x' or 'e' external immediately
	 *	2: Store attributes with attstorage 'x' or 'e' external
	 *	3: Inline compress attributes with attstorage 'm'
	 *	4: Store attributes with attstorage 'm' external
	 * ----------
	 */

	if(!ismemtuple)
	{
		/* compute header overhead --- this should match heap_form_tuple() */
		maxDataLen = offsetof(HeapTupleHeaderData, t_bits);
		if (has_nulls)
			maxDataLen += BITMAPLEN(numAttrs);
		if (newtup->t_data->t_infomask & HEAP_HASOID)
			maxDataLen += sizeof(Oid);
		maxDataLen = MAXALIGN(maxDataLen);
		Assert(maxDataLen == newtup->t_data->t_hoff);
		/* now convert to a limit on the tuple data size */
		maxDataLen = toast_tuple_target - maxDataLen;
	}
	else
		maxDataLen = toast_tuple_target;

	/*
	 * Look for attributes with attstorage 'x' to compress.  Also find large
	 * attributes with attstorage 'x' or 'e', and store them external.
	 */
	while (compute_dest_tuplen(tupleDesc, pbind, has_nulls, toast_values, toast_isnull) > maxDataLen)
	{
		int			biggest_attno = -1;
		int32		biggest_size = MAXALIGN(sizeof(varattrib));
		Datum		old_value;
		Datum		new_value;

		/*
		 * Search for the biggest yet unprocessed internal attribute
		 */
		for (i = 0; i < numAttrs; i++)
		{
			if (toast_action[i] != ' ')
				continue;
			if (VARATT_IS_EXTERNAL_D(toast_values[i]))
				continue;
			if (VARATT_IS_COMPRESSED_D(toast_values[i]))
				continue;
			if (att[i]->attstorage != 'x')
				continue;
			if (toast_sizes[i] > biggest_size)
			{
				biggest_attno = i;
				biggest_size = toast_sizes[i];
			}
		}

		if (biggest_attno < 0)
			break;

		/*
		 * Attempt to compress it inline, if it has attstorage 'x'
		 */
		i = biggest_attno;
		old_value = toast_values[i];
		new_value = toast_compress_datum(old_value);

		if (DatumGetPointer(new_value) != NULL)
		{
			/* successful compression */
			if (toast_free[i])
				pfree(DatumGetPointer(old_value));
			toast_values[i] = new_value;
			toast_free[i] = true;
			toast_sizes[i] = VARSIZE_D(toast_values[i]);
			need_change = true;
			need_free = true;
		}
		else
		{
			/*
			 * incompressible data, ignore on subsequent compression passes
			 */
			toast_action[i] = 'x';
		}
	}

	/*
	 * Second we look for attributes of attstorage 'x' or 'e' that are still
	 * inline.
	 */
	while (compute_dest_tuplen(tupleDesc, pbind, has_nulls, toast_values, toast_isnull) > maxDataLen &&
		   rel->rd_rel->reltoastrelid != InvalidOid)
	{
		int			biggest_attno = -1;
		int32		biggest_size = MAXALIGN(sizeof(varattrib));
		Datum		old_value;

		/*------
		 * Search for the biggest yet inlined attribute with
		 * attstorage equals 'x' or 'e'
		 *------
		 */
		for (i = 0; i < numAttrs; i++)
		{
			if (toast_action[i] == 'p')
				continue;
			if (VARATT_IS_EXTERNAL_D(toast_values[i]))
				continue;
			if (att[i]->attstorage != 'x' && att[i]->attstorage != 'e')
				continue;
			if (toast_sizes[i] > biggest_size)
			{
				biggest_attno = i;
				biggest_size = toast_sizes[i];
			}
		}

		if (biggest_attno < 0)
			break;

		/*
		 * Store this external
		 */
		i = biggest_attno;
		old_value = toast_values[i];
		toast_action[i] = 'p';
		toast_values[i] = toast_save_datum(rel, toast_values[i], isFrozen);
		if (toast_free[i])
			pfree(DatumGetPointer(old_value));
		toast_free[i] = true;

		need_change = true;
		need_free = true;
	}

	/*
	 * Round 3 - this time we take attributes with storage 'm' into
	 * compression
	 */
	while (compute_dest_tuplen(tupleDesc, pbind, has_nulls, toast_values, toast_isnull) > maxDataLen)
	{
		int			biggest_attno = -1;
		int32		biggest_size = MAXALIGN(sizeof(varattrib));
		Datum		old_value;
		Datum		new_value;

		/*
		 * Search for the biggest yet uncompressed internal attribute
		 */
		for (i = 0; i < numAttrs; i++)
		{
			if (toast_action[i] != ' ')
				continue;
			if (VARATT_IS_EXTERNAL_D(toast_values[i]))
				continue;		/* can't happen, toast_action would be 'p' */
			if (VARATT_IS_COMPRESSED_D(toast_values[i]))
				continue;
			if (att[i]->attstorage != 'm')
				continue;
			if (toast_sizes[i] > biggest_size)
			{
				biggest_attno = i;
				biggest_size = toast_sizes[i];
			}
		}

		if (biggest_attno < 0)
			break;

		/*
		 * Attempt to compress it inline
		 */
		i = biggest_attno;
		old_value = toast_values[i];
		new_value = toast_compress_datum(old_value);

		if (DatumGetPointer(new_value) != NULL)
		{
			/* successful compression */
			if (toast_free[i])
				pfree(DatumGetPointer(old_value));
			toast_values[i] = new_value;
			toast_free[i] = true;
			toast_sizes[i] = VARSIZE_D(toast_values[i]);
			need_change = true;
			need_free = true;
		}
		else
		{
			/* incompressible, ignore on subsequent compression passes */
			toast_action[i] = 'x';
		}
	}

	/*
	 * Finally we store attributes of type 'm' external, if possible.
	 */
	while (compute_dest_tuplen(tupleDesc, pbind, has_nulls, toast_values, toast_isnull) > maxDataLen &&
		   rel->rd_rel->reltoastrelid != InvalidOid)
	{
		int			biggest_attno = -1;
		int32		biggest_size = MAXALIGN(sizeof(varattrib));
		Datum		old_value;

		/*--------
		 * Search for the biggest yet inlined attribute with
		 * attstorage = 'm'
		 *--------
		 */
		for (i = 0; i < numAttrs; i++)
		{
			if (toast_action[i] == 'p')
				continue;
			if (VARATT_IS_EXTERNAL_D(toast_values[i]))
				continue;		/* can't happen, toast_action would be 'p' */
			if (att[i]->attstorage != 'm')
				continue;
			if (toast_sizes[i] > biggest_size)
			{
				biggest_attno = i;
				biggest_size = toast_sizes[i];
			}
		}

		if (biggest_attno < 0)
			break;

		/*
		 * Store this external
		 */
		i = biggest_attno;
		old_value = toast_values[i];
		toast_action[i] = 'p';
		toast_values[i] = toast_save_datum(rel, toast_values[i], isFrozen);
		if (toast_free[i])
			pfree(DatumGetPointer(old_value));
		toast_free[i] = true;

		need_change = true;
		need_free = true;
	}

	/* XXX Maybe we should check here for any compressed inline attributes that
	 * didn't save enough to warrant keeping. In particular attributes whose
	 * rawsize is < 128 bytes and didn't save at least 3 bytes... or even maybe
	 * more given alignment issues 
	 */

	/*
	 * In the case we toasted any values, we need to build a new heap tuple
	 * with the changed values.
	 */
	if (need_change)
	{
		if(ismemtuple)
			result_tuple = (HeapTuple) memtuple_form_to(pbind, toast_values, toast_isnull, NULL, NULL, false);
		else
		{
			HeapTupleHeader olddata = newtup->t_data;
			HeapTupleHeader new_data;
			int32		new_len;

			/*
			 * Calculate the new size of the tuple.  Header size should not
			 * change, but data size might.
			 */
			new_len = offsetof(HeapTupleHeaderData, t_bits);
			if (has_nulls)
				new_len += BITMAPLEN(numAttrs);
			if (olddata->t_infomask & HEAP_HASOID)
				new_len += sizeof(Oid);
			new_len = MAXALIGN(new_len);
			Assert(new_len == olddata->t_hoff);
			new_len += heap_compute_data_size(tupleDesc,
					toast_values, toast_isnull);

			/*
			 * Allocate and zero the space needed, and fill HeapTupleData fields.
			 */
			result_tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + new_len);
			result_tuple->t_len = new_len;
			result_tuple->t_self = newtup->t_self;
			new_data = (HeapTupleHeader) ((char *) result_tuple + HEAPTUPLESIZE);
			result_tuple->t_data = new_data;

			/*
			 * Put the existing tuple header and the changed values into place
			 */
			memcpy(new_data, olddata, olddata->t_hoff);

			heap_fill_tuple(tupleDesc,
					toast_values,
					toast_isnull,
					(char *) new_data + olddata->t_hoff,
					&(new_data->t_infomask),
					has_nulls ? new_data->t_bits : NULL);
		}
	}
	else
		result_tuple = newtup;

	/*
	 * Free allocated temp values
	 */
	if (need_free)
		for (i = 0; i < numAttrs; i++)
			if (toast_free[i])
				pfree(DatumGetPointer(toast_values[i]));

	/*
	 * Delete external values from the old tuple
	 */
	if (need_delold)
		for (i = 0; i < numAttrs; i++)
			if (toast_delold[i])
				toast_delete_datum(rel, toast_oldvalues[i]);

	return result_tuple;
}
Exemple #11
0
/*
 * heap_form_tuple
 *		construct a tuple from the given values[] and isnull[] arrays,
 *		which are of the length indicated by tupleDescriptor->natts
 *
 * The result is allocated in the current memory context.
 */
HeapTuple
heaptuple_form_to(TupleDesc tupleDescriptor, Datum *values, bool *isnull, HeapTuple dst, uint32 *dstlen)
{
	HeapTuple	tuple;			/* return tuple */
	HeapTupleHeader td;			/* tuple data */
	Size		actual_len;
	Size		len,
				data_len;
	int			hoff;
	bool		hasnull = false;
	Form_pg_attribute *att = tupleDescriptor->attrs;
	int			numberOfAttributes = tupleDescriptor->natts;
	int			i;

	if (numberOfAttributes > MaxTupleAttributeNumber)
		ereport(ERROR,
				(errcode(ERRCODE_TOO_MANY_COLUMNS),
				 errmsg("number of columns (%d) exceeds limit (%d)",
						numberOfAttributes, MaxTupleAttributeNumber)));

	/*
	 * Check for nulls and embedded tuples; expand any toasted attributes in
	 * embedded tuples.  This preserves the invariant that toasting can only
	 * go one level deep.
	 *
	 * We can skip calling toast_flatten_tuple_attribute() if the attribute
	 * couldn't possibly be of composite type.  All composite datums are
	 * varlena and have alignment 'd'; furthermore they aren't arrays. Also,
	 * if an attribute is already toasted, it must have been sent to disk
	 * already and so cannot contain toasted attributes.
	 */
	for (i = 0; i < numberOfAttributes; i++)
	{
		if (isnull[i])
			hasnull = true;
		else if (att[i]->attlen == -1 &&
				 att[i]->attalign == 'd' &&
				 att[i]->attndims == 0 &&
				 !VARATT_IS_EXTENDED(DatumGetPointer(values[i])))
		{
			values[i] = toast_flatten_tuple_attribute(values[i],
													  att[i]->atttypid,
													  att[i]->atttypmod);
		}
	}

	/*
	 * Determine total space needed
	 */
	len = offsetof(HeapTupleHeaderData, t_bits);

	if (hasnull)
		len += BITMAPLEN(numberOfAttributes);

	if (tupleDescriptor->tdhasoid)
		len += sizeof(Oid);

	hoff = len = MAXALIGN(len); /* align user data safely */

	data_len = heap_compute_data_size(tupleDescriptor, values, isnull);

	len += data_len;

	if (dstlen && (*dstlen) < (HEAPTUPLESIZE + len))
	{
		*dstlen = HEAPTUPLESIZE + len;
		return NULL;
	}

	if (dstlen)
	{
		*dstlen = HEAPTUPLESIZE + len;
		tuple = dst;
		memset(tuple, 0, HEAPTUPLESIZE + len);
	}
	else
		tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len);

	/*
	 * Allocate and zero the space needed.	Note that the tuple body and
	 * HeapTupleData management structure are allocated in one chunk.
	 */
	tuple->t_data = td = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);

	/*
	 * And fill in the information.  Note we fill the Datum fields even though
	 * this tuple may never become a Datum.
	 */
	tuple->t_len = len;
	ItemPointerSetInvalid(&(tuple->t_self));

	HeapTupleHeaderSetDatumLength(td, len);
	HeapTupleHeaderSetTypeId(td, tupleDescriptor->tdtypeid);
	HeapTupleHeaderSetTypMod(td, tupleDescriptor->tdtypmod);

	HeapTupleHeaderSetNatts(td, numberOfAttributes);
	td->t_hoff = hoff;

	if (tupleDescriptor->tdhasoid)		/* else leave infomask = 0 */
		td->t_infomask = HEAP_HASOID;

	actual_len = heap_fill_tuple(tupleDescriptor,
								 values,
								 isnull,
								 (char *) td + hoff,
								 data_len,
								 &td->t_infomask,
								 (hasnull ? td->t_bits : NULL));

	Assert(data_len == actual_len);
	Assert(!is_heaptuple_memtuple(tuple));

	return tuple;
}
Exemple #12
0
/*
 * heap_deform_tuple
 *		Given a tuple, extract data into values/isnull arrays; this is
 *		the inverse of heap_form_tuple.
 *
 *		Storage for the values/isnull arrays is provided by the caller;
 *		it should be sized according to tupleDesc->natts not tuple->t_natts.
 *
 *		Note that for pass-by-reference datatypes, the pointer placed
 *		in the Datum will point into the given tuple.
 *
 *		When all or most of a tuple's fields need to be extracted,
 *		this routine will be significantly quicker than a loop around
 *		heap_getattr; the loop will become O(N^2) as soon as any
 *		noncacheable attribute offsets are involved.
 */
void
heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc,
				  Datum *values, bool *isnull)
{
	HeapTupleHeader tup = tuple->t_data;
	bool		hasnulls = HeapTupleHasNulls(tuple);
	Form_pg_attribute *att = tupleDesc->attrs;
	int			tdesc_natts = tupleDesc->natts;
	int			natts;			/* number of atts to extract */
	int			attnum;
	char	   *tp;				/* ptr to tuple data */
	long		off;			/* offset in tuple data */
	bits8	   *bp = tup->t_bits;		/* ptr to null bitmap in tuple */
	bool		slow = false;	/* can we use/set attcacheoff? */

	Assert(!is_heaptuple_memtuple(tuple));
	natts = HeapTupleHeaderGetNatts(tup);

	/*
	 * In inheritance situations, it is possible that the given tuple actually
	 * has more fields than the caller is expecting.  Don't run off the end of
	 * the caller's arrays.
	 */
	natts = Min(natts, tdesc_natts);

	tp = (char *) tup + tup->t_hoff;

	off = 0;

	for (attnum = 0; attnum < natts; attnum++)
	{
		Form_pg_attribute thisatt = att[attnum];

		if (hasnulls && att_isnull(attnum, bp))
		{
			values[attnum] = (Datum) 0;
			isnull[attnum] = true;
			slow = true;		/* can't use attcacheoff anymore */
			continue;
		}

		isnull[attnum] = false;

		if (!slow && thisatt->attcacheoff >= 0)
			off = thisatt->attcacheoff;
		else
		{
			/* if it's a varlena it may or may not be aligned, so check for
			 * something that looks like a padding byte before aligning. If
			 * we're already aligned it may be the leading byte of a 4-byte
			 * header but then the att_align is harmless. Don't bother looking
			 * if it's not a varlena though.*/
			if (thisatt->attlen != -1 || !tp[off])
				off = att_align(off, thisatt->attalign);

			if (!slow && thisatt->attlen != -1)
				thisatt->attcacheoff = off;

		}
		if (!slow && thisatt->attlen < 0)
			slow = true;

		values[attnum] = fetchatt(thisatt, tp + off);

#ifdef USE_ASSERT_CHECKING
		/* Ignore attributes with dropped types */
		if (thisatt->attlen == -1 && !thisatt->attisdropped)
		{
			Assert(VARATT_IS_SHORT_D(values[attnum]) || 
				   !VARATT_COULD_SHORT_D(values[attnum]) ||
				   thisatt->atttypid == OIDVECTOROID ||
				   thisatt->atttypid == INT2VECTOROID ||
				   thisatt->atttypid >= FirstNormalObjectId);	
		}
#endif

		off = att_addlength(off, thisatt->attlen, PointerGetDatum(tp + off));
	}

	/*
	 * If tuple doesn't have all the atts indicated by tupleDesc, read the
	 * rest as null
	 */
	for (; attnum < tdesc_natts; attnum++)
	{
		values[attnum] = (Datum) 0;
		isnull[attnum] = true;
	}
}
Exemple #13
0
/*
 * heap_deform_tuple
 *		Given a tuple, extract data into values/isnull arrays; this is
 *		the inverse of heap_form_tuple.
 *
 *		Storage for the values/isnull arrays is provided by the caller;
 *		it should be sized according to tupleDesc->natts not tuple->t_natts.
 *
 *		Note that for pass-by-reference datatypes, the pointer placed
 *		in the Datum will point into the given tuple.
 *
 *		When all or most of a tuple's fields need to be extracted,
 *		this routine will be significantly quicker than a loop around
 *		heap_getattr; the loop will become O(N^2) as soon as any
 *		noncacheable attribute offsets are involved.
 */
void
heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc,
				  Datum *values, bool *isnull)
{
	HeapTupleHeader tup = tuple->t_data;
	bool		hasnulls = HeapTupleHasNulls(tuple);
	Form_pg_attribute *att = tupleDesc->attrs;
	int			tdesc_natts = tupleDesc->natts;
	int			natts;			/* number of atts to extract */
	int			attnum;
	char	   *tp;				/* ptr to tuple data */
	long		off;			/* offset in tuple data */
	bits8	   *bp = tup->t_bits;		/* ptr to null bitmap in tuple */
	bool		slow = false;	/* can we use/set attcacheoff? */

	Assert(!is_heaptuple_memtuple(tuple));
	natts = HeapTupleHeaderGetNatts(tup);

	/*
	 * In inheritance situations, it is possible that the given tuple actually
	 * has more fields than the caller is expecting.  Don't run off the end of
	 * the caller's arrays.
	 */
	natts = Min(natts, tdesc_natts);

	tp = (char *) tup + tup->t_hoff;

	off = 0;

	for (attnum = 0; attnum < natts; attnum++)
	{
		Form_pg_attribute thisatt = att[attnum];

		if (hasnulls && att_isnull(attnum, bp))
		{
			values[attnum] = (Datum) 0;
			isnull[attnum] = true;
			slow = true;		/* can't use attcacheoff anymore */
			continue;
		}

		isnull[attnum] = false;

		if (!slow && thisatt->attcacheoff >= 0)
			off = thisatt->attcacheoff;
		else if (thisatt->attlen == -1)
		{
			/*
			 * We can only cache the offset for a varlena attribute if the
			 * offset is already suitably aligned, so that there would be no
			 * pad bytes in any case: then the offset will be valid for either
			 * an aligned or unaligned value.
			 */
			if (!slow &&
				off == att_align_nominal(off, thisatt->attalign))
				thisatt->attcacheoff = off;
			else
			{
				off = att_align_pointer(off, thisatt->attalign, -1,
										tp + off);
				slow = true;
			}
		}
		else
		{
			/* not varlena, so safe to use att_align_nominal */
			off = att_align_nominal(off, thisatt->attalign);

			if (!slow)
				thisatt->attcacheoff = off;

		}
		if (!slow && thisatt->attlen < 0)
			slow = true;

		values[attnum] = fetchatt(thisatt, tp + off);

#ifdef USE_ASSERT_CHECKING
		/* Ignore attributes with dropped types */
		if (thisatt->attlen == -1 && !thisatt->attisdropped)
		{
			Assert(VARATT_IS_SHORT(DatumGetPointer(values[attnum])) ||
				   !VARATT_CAN_MAKE_SHORT(DatumGetPointer(values[attnum])) ||
				   thisatt->atttypid == OIDVECTOROID ||
				   thisatt->atttypid == INT2VECTOROID ||
				   thisatt->atttypid >= FirstNormalObjectId);	
		}
#endif

		off = att_addlength_pointer(off, thisatt->attlen, tp + off);
	}

	/*
	 * If tuple doesn't have all the atts indicated by tupleDesc, read the
	 * rest as null
	 */
	for (; attnum < tdesc_natts; attnum++)
	{
		values[attnum] = (Datum) 0;
		isnull[attnum] = true;
	}
}
Exemple #14
0
/* --------------------------------
 *		ExecStoreTuple
 *
 *		This function is used to store a physical tuple into a specified
 *		slot in the tuple table.
 *
 *		tuple:	tuple to store
 *		slot:	slot to store it in
 *		buffer: disk buffer if tuple is in a disk page, else InvalidBuffer
 *		shouldFree: true if ExecClearTuple should pfree() the tuple
 *					when done with it
 *
 * If 'buffer' is not InvalidBuffer, the tuple table code acquires a pin
 * on the buffer which is held until the slot is cleared, so that the tuple
 * won't go away on us.
 *
 * shouldFree is normally set 'true' for tuples constructed on-the-fly.
 * It must always be 'false' for tuples that are stored in disk pages,
 * since we don't want to try to pfree those.
 *
 * Another case where it is 'false' is when the referenced tuple is held
 * in a tuple table slot belonging to a lower-level executor Proc node.
 * In this case the lower-level slot retains ownership and responsibility
 * for eventually releasing the tuple.	When this method is used, we must
 * be certain that the upper-level Proc node will lose interest in the tuple
 * sooner than the lower-level one does!  If you're not certain, copy the
 * lower-level tuple with heap_copytuple and let the upper-level table
 * slot assume ownership of the copy!
 *
 * Return value is just the passed-in slot pointer.
 *
 * NOTE: before PostgreSQL 8.1, this function would accept a NULL tuple
 * pointer and effectively behave like ExecClearTuple (though you could
 * still specify a buffer to pin, which would be an odd combination).
 * This saved a couple lines of code in a few places, but seemed more likely
 * to mask logic errors than to be really useful, so it's now disallowed.
 * --------------------------------
 */
TupleTableSlot *
ExecStoreHeapTuple(HeapTuple tuple,
			   TupleTableSlot *slot,
			   Buffer buffer,
			   bool shouldFree)
{
	/*
	 * sanity checks
	 */
	Assert(tuple != NULL);
	Assert(slot != NULL);
	Assert(slot->tts_tupleDescriptor != NULL);
	/* passing shouldFree=true for a tuple on a disk page is not sane */
	Assert(BufferIsValid(buffer) ? (!shouldFree) : true);

	/*
	 * Actually we are storing a memtuple!
	 */
	if(is_heaptuple_memtuple(tuple))
	{
		Assert(buffer == InvalidBuffer);
		return ExecStoreMinimalTuple((MemTuple) tuple, slot, shouldFree);
	}

	/*
	 * Free any old physical tuple belonging to the slot.
	 */
	free_heaptuple_memtuple(slot);

	/*
	 * Store the new tuple into the specified slot.
	 */

	/* Clear tts_flags, here isempty set to false */
	slot->PRIVATE_tts_flags = shouldFree ? TTS_SHOULDFREE : 0;

	/* store the tuple */
	slot->PRIVATE_tts_heaptuple = (void *) tuple;

	/* Mark extracted state invalid */
	slot->PRIVATE_tts_nvalid = 0;

	/*
	 * If tuple is on a disk page, keep the page pinned as long as we hold a
	 * pointer into it.  We assume the caller already has such a pin.
	 *
	 * This is coded to optimize the case where the slot previously held a
	 * tuple on the same disk page: in that case releasing and re-acquiring
	 * the pin is a waste of cycles.  This is a common situation during
	 * seqscans, so it's worth troubling over.
	 */
	if (slot->tts_buffer != buffer)
	{
		if (BufferIsValid(slot->tts_buffer))
			ReleaseBuffer(slot->tts_buffer);
		slot->tts_buffer = buffer;
		if (BufferIsValid(buffer))
			IncrBufferRefCount(buffer);
	}

	return slot;
}
Exemple #15
0
/*
 * Convert a HeapTuple into a byte-sequence, and store it directly
 * into a chunklist for transmission.
 *
 * This code is based on the printtup_internal_20() function in printtup.c.
 */
void
SerializeTupleIntoChunks(HeapTuple tuple, SerTupInfo * pSerInfo, TupleChunkList tcList)
{
	TupleChunkListItem tcItem = NULL;
	MemoryContext oldCtxt;
	TupleDesc	tupdesc;
	int			i,
		natts;
	bool		fHandled;

	AssertArg(tcList != NULL);
	AssertArg(tuple != NULL);
	AssertArg(pSerInfo != NULL);

	tupdesc = pSerInfo->tupdesc;
	natts = tupdesc->natts;

	/* get ready to go */
	tcList->p_first = NULL;
	tcList->p_last = NULL;
	tcList->num_chunks = 0;
	tcList->serialized_data_length = 0;
	tcList->max_chunk_length = Gp_max_tuple_chunk_size;

	if (natts == 0)
	{
		tcItem = getChunkFromCache(&pSerInfo->chunkCache);
		if (tcItem == NULL)
		{
			ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY),
							errmsg("Could not allocate space for first chunk item in new chunk list.")));
		}

		/* TC_EMTPY is just one chunk */
		SetChunkType(tcItem->chunk_data, TC_EMPTY);
		tcItem->chunk_length = TUPLE_CHUNK_HEADER_SIZE;
		appendChunkToTCList(tcList, tcItem);

		return;
	}

	tcItem = getChunkFromCache(&pSerInfo->chunkCache);
	if (tcItem == NULL)
	{
		ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY),
						errmsg("Could not allocate space for first chunk item in new chunk list.")));
	}

	/* assume that we'll take a single chunk */
	SetChunkType(tcItem->chunk_data, TC_WHOLE);
	tcItem->chunk_length = TUPLE_CHUNK_HEADER_SIZE;
	appendChunkToTCList(tcList, tcItem);

	AssertState(s_tupSerMemCtxt != NULL);

	if (is_heaptuple_memtuple(tuple))
	{
		addByteStringToChunkList(tcList, (char *)tuple, memtuple_get_size((MemTuple)tuple, NULL), &pSerInfo->chunkCache);
		addPadding(tcList, &pSerInfo->chunkCache, memtuple_get_size((MemTuple)tuple, NULL));
	}
	else
	{
		TupSerHeader tsh;

		unsigned int	datalen;
		unsigned int	nullslen;

		HeapTupleHeader t_data = tuple->t_data;

		datalen = tuple->t_len - t_data->t_hoff;
		if (HeapTupleHasNulls(tuple))
			nullslen = BITMAPLEN(HeapTupleHeaderGetNatts(t_data));
		else
			nullslen = 0;

		tsh.tuplen = sizeof(TupSerHeader) + TYPEALIGN(TUPLE_CHUNK_ALIGN,nullslen) + datalen;
		tsh.natts = HeapTupleHeaderGetNatts(t_data);
		tsh.infomask = t_data->t_infomask;

		addByteStringToChunkList(tcList, (char *)&tsh, sizeof(TupSerHeader), &pSerInfo->chunkCache);
		/* If we don't have any attributes which have been toasted, we
		 * can be very very simple: just send the raw data. */
		if ((tsh.infomask & HEAP_HASEXTERNAL) == 0)
		{
			if (nullslen)
			{
				addByteStringToChunkList(tcList, (char *)t_data->t_bits, nullslen, &pSerInfo->chunkCache);
				addPadding(tcList,&pSerInfo->chunkCache,nullslen);
			}

			addByteStringToChunkList(tcList, (char *)t_data + t_data->t_hoff, datalen, &pSerInfo->chunkCache);
			addPadding(tcList,&pSerInfo->chunkCache,datalen);
		}
		else
		{
			/* We have to be more careful when we have tuples that
			 * have been toasted. Ideally we'd like to send the
			 * untoasted attributes in as "raw" a format as possible
			 * but that makes rebuilding the tuple harder .
			 */
			oldCtxt = MemoryContextSwitchTo(s_tupSerMemCtxt);

			/* deconstruct the tuple (faster than a heap_getattr loop) */
			heap_deform_tuple(tuple, tupdesc, pSerInfo->values, pSerInfo->nulls);

			MemoryContextSwitchTo(oldCtxt);

			/* Send the nulls character-array. */
			addByteStringToChunkList(tcList, pSerInfo->nulls, natts, &pSerInfo->chunkCache);
			addPadding(tcList,&pSerInfo->chunkCache,natts);

			/*
			 * send the attributes of this tuple: NOTE anything which allocates
			 * temporary space (e.g. could result in a PG_DETOAST_DATUM) should be
			 * executed with the memory context set to s_tupSerMemCtxt
			 */
			for (i = 0; i < natts; ++i)
			{
				SerAttrInfo *attrInfo = pSerInfo->myinfo + i;
				Datum		origattr = pSerInfo->values[i],
					attr;
				bytea	   *outputbytes=0;

				/* skip null attributes (already taken care of above) */
				if (pSerInfo->nulls[i])
					continue;

				/*
				 * If we have a toasted datum, forcibly detoast it here to avoid
				 * memory leakage: we want to force the detoast allocation(s) to
				 * happen in our reset-able serialization context.
				 */
				if (attrInfo->typisvarlena)
				{
					oldCtxt = MemoryContextSwitchTo(s_tupSerMemCtxt);
					/* we want to detoast but leave compressed, if
					 * possible, but we have to handle varlena
					 * attributes (and others ?) differently than we
					 * currently do (first step is to use
					 * heap_tuple_fetch_attr() instead of
					 * PG_DETOAST_DATUM()). */
					attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
					MemoryContextSwitchTo(oldCtxt);
				}
				else
					attr = origattr;

				/*
				 * Assume that the data's output will be handled by the special IO
				 * code, and if not then we can handle it the slow way.
				 */
				fHandled = true;
				switch (attrInfo->atttypid)
				{
					case INT4OID:
						addInt32ToChunkList(tcList, DatumGetInt32(attr), &pSerInfo->chunkCache);
						break;
					case CHAROID:
						addCharToChunkList(tcList, DatumGetChar(attr), &pSerInfo->chunkCache);
						addPadding(tcList,&pSerInfo->chunkCache,1);
						break;
					case BPCHAROID:
					case VARCHAROID:
					case INT2VECTOROID: /* postgres serialization logic broken, use our own */
					case OIDVECTOROID: /* postgres serialization logic broken, use our own */
					case ANYARRAYOID:
					{
						text	   *pText = DatumGetTextP(attr);
						int32		textSize = VARSIZE(pText) - VARHDRSZ;

						addInt32ToChunkList(tcList, textSize, &pSerInfo->chunkCache);
						addByteStringToChunkList(tcList, (char *) VARDATA(pText), textSize, &pSerInfo->chunkCache);
						addPadding(tcList,&pSerInfo->chunkCache,textSize);
						break;
					}
					case DATEOID:
					{
						DateADT date = DatumGetDateADT(attr);

						addByteStringToChunkList(tcList, (char *) &date, sizeof(DateADT), &pSerInfo->chunkCache);
						break;
					}
					case NUMERICOID:
					{
						/*
						 * Treat the numeric as a varlena variable, and just push
						 * the whole shebang to the output-buffer.	We don't care
						 * about the guts of the numeric.
						 */
						Numeric		num = DatumGetNumeric(attr);
						int32		numSize = VARSIZE(num) - VARHDRSZ;

						addInt32ToChunkList(tcList, numSize, &pSerInfo->chunkCache);
						addByteStringToChunkList(tcList, (char *) VARDATA(num), numSize, &pSerInfo->chunkCache);
						addPadding(tcList,&pSerInfo->chunkCache,numSize);
						break;
					}

					case ACLITEMOID:
					{
						AclItem		*aip = DatumGetAclItemP(attr);
						char		*outputstring;
						int32		aclSize ;

						outputstring = DatumGetCString(DirectFunctionCall1(aclitemout,
																		   PointerGetDatum(aip)));

						aclSize = strlen(outputstring);
						addInt32ToChunkList(tcList, aclSize, &pSerInfo->chunkCache);
						addByteStringToChunkList(tcList, outputstring,aclSize, &pSerInfo->chunkCache);
						addPadding(tcList,&pSerInfo->chunkCache,aclSize);
						break;
					}	

					case 210: /* storage manager */
					{
						char		*smgrstr;
						int32		strsize;

						smgrstr = DatumGetCString(DirectFunctionCall1(smgrout, 0));
						strsize = strlen(smgrstr);
						addInt32ToChunkList(tcList, strsize, &pSerInfo->chunkCache);
						addByteStringToChunkList(tcList, smgrstr, strsize, &pSerInfo->chunkCache);
						addPadding(tcList,&pSerInfo->chunkCache,strsize);
						break;
					}

					default:
						fHandled = false;
				}

				if (fHandled)
					continue;

				/*
				 * the FunctionCall2 call into the send function may result in some
				 * allocations which we'd like to have contained by our reset-able
				 * context
				 */
				oldCtxt = MemoryContextSwitchTo(s_tupSerMemCtxt);						  
							  
				/* Call the attribute type's binary input converter. */
				if (attrInfo->send_finfo.fn_nargs == 1)
					outputbytes =
						DatumGetByteaP(FunctionCall1(&attrInfo->send_finfo,
													 attr));
				else if (attrInfo->send_finfo.fn_nargs == 2)
					outputbytes =
						DatumGetByteaP(FunctionCall2(&attrInfo->send_finfo,
													 attr,
													 ObjectIdGetDatum(attrInfo->send_typio_param)));
				else if (attrInfo->send_finfo.fn_nargs == 3)
					outputbytes =
						DatumGetByteaP(FunctionCall3(&attrInfo->send_finfo,
													 attr,
													 ObjectIdGetDatum(attrInfo->send_typio_param),
													 Int32GetDatum(tupdesc->attrs[i]->atttypmod)));
				else
				{
					ereport(ERROR,
							(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
							 errmsg("Conversion function takes %d args",attrInfo->recv_finfo.fn_nargs)));
				}
		
				MemoryContextSwitchTo(oldCtxt);

				/* We assume the result will not have been toasted */
				addInt32ToChunkList(tcList, VARSIZE(outputbytes) - VARHDRSZ, &pSerInfo->chunkCache);
				addByteStringToChunkList(tcList, VARDATA(outputbytes),
										 VARSIZE(outputbytes) - VARHDRSZ, &pSerInfo->chunkCache);
				addPadding(tcList,&pSerInfo->chunkCache,VARSIZE(outputbytes) - VARHDRSZ);

				/*
				 * this was allocated in our reset-able context, but we *are* done
				 * with it; and for tuples with several large columns it'd be nice to
				 * free the memory back to the context
				 */
				pfree(outputbytes);

			}

			MemoryContextReset(s_tupSerMemCtxt);
		}
	}

	/*
	 * if we have more than 1 chunk we have to set the chunk types on our
	 * first chunk and last chunk
	 */
	if (tcList->num_chunks > 1)
	{
		TupleChunkListItem first,
			last;

		first = tcList->p_first;
		last = tcList->p_last;

		Assert(first != NULL);
		Assert(first != last);
		Assert(last != NULL);

		SetChunkType(first->chunk_data, TC_PARTIAL_START);
		SetChunkType(last->chunk_data, TC_PARTIAL_END);

		/*
		 * any intervening chunks are already set to TC_PARTIAL_MID when
		 * allocated
		 */
	}

	return;
}
Exemple #16
0
/* ----------------
 *		nocachegetattr
 *
 *		This only gets called from fastgetattr() macro, in cases where
 *		we can't use a cacheoffset and the value is not null.
 *
 *		This caches attribute offsets in the attribute descriptor.
 *
 *		An alternative way to speed things up would be to cache offsets
 *		with the tuple, but that seems more difficult unless you take
 *		the storage hit of actually putting those offsets into the
 *		tuple you send to disk.  Yuck.
 *
 *		This scheme will be slightly slower than that, but should
 *		perform well for queries which hit large #'s of tuples.  After
 *		you cache the offsets once, examining all the other tuples using
 *		the same attribute descriptor will go much quicker. -cim 5/4/91
 *
 *		NOTE: if you need to change this code, see also heap_deform_tuple.
 *		Also see nocache_index_getattr, which is the same code for index
 *		tuples.
 * ----------------
 */
Datum
nocachegetattr(HeapTuple tuple,
			   int attnum,
			   TupleDesc tupleDesc)
{
	HeapTupleHeader tup = tuple->t_data;
	Form_pg_attribute *att = tupleDesc->attrs;
	char	   *tp;				/* ptr to data part of tuple */
	bits8	   *bp = tup->t_bits;		/* ptr to null bitmap in tuple */
	bool		slow = false;	/* do we have to walk attrs? */
	int			off;			/* current offset within data */

	Assert(!is_heaptuple_memtuple(tuple));

	/* ----------------
	 *	 Three cases:
	 *
	 *	 1: No nulls and no variable-width attributes.
	 *	 2: Has a null or a var-width AFTER att.
	 *	 3: Has nulls or var-widths BEFORE att.
	 * ----------------
	 */

#ifdef IN_MACRO
/* This is handled in the macro */
	Assert(attnum > 0);

	if (isnull)
		*isnull = false;
#endif

	attnum--;

	if (HeapTupleNoNulls(tuple))
	{
#ifdef IN_MACRO
/* This is handled in the macro */
		if (att[attnum]->attcacheoff >= 0)
		{
			return fetchatt(att[attnum],
							(char *) tup + tup->t_hoff +
							att[attnum]->attcacheoff);
		}
#endif
	}
	else
	{
		/*
		 * there's a null somewhere in the tuple
		 *
		 * check to see if desired att is null
		 */

#ifdef IN_MACRO
/* This is handled in the macro */
		if (att_isnull(attnum, bp))
		{
			if (isnull)
				*isnull = true;
			return (Datum) NULL;
		}
#endif

		/*
		 * Now check to see if any preceding bits are null...
		 */
		{
			int			byte = attnum >> 3;
			int			finalbit = attnum & 0x07;

			/* check for nulls "before" final bit of last byte */
			if ((~bp[byte]) & ((1 << finalbit) - 1))
				slow = true;
			else
			{
				/* check for nulls in any "earlier" bytes */
				int			i;

				for (i = 0; i < byte; i++)
				{
					if (bp[i] != 0xFF)
					{
						slow = true;
						break;
					}
				}
			}
		}
	}

	tp = (char *) tup + tup->t_hoff;

	if (!slow)
	{
		/*
		 * If we get here, there are no nulls up to and including the target
		 * attribute.  If we have a cached offset, we can use it.
		 */
		if (att[attnum]->attcacheoff >= 0)
		{
			return fetchatt(att[attnum],
							tp + att[attnum]->attcacheoff);
		}

		/*
		 * Otherwise, check for non-fixed-length attrs up to and including
		 * target.	If there aren't any, it's safe to cheaply initialize the
		 * cached offsets for these attrs.
		 */
		if (HeapTupleHasVarWidth(tuple))
		{
			int			j;

			for (j = 0; j <= attnum; j++)
			{
				if (att[j]->attlen <= 0)
				{
					slow = true;
					break;
				}
			}
		}
	}

	if (!slow)
	{
		int			natts = tupleDesc->natts;
		int			j = 1;

		/*
		 * If we get here, we have a tuple with no nulls or var-widths up to
		 * and including the target attribute, so we can use the cached offset
		 * ... only we don't have it yet, or we'd not have got here.  Since
		 * it's cheap to compute offsets for fixed-width columns, we take the
		 * opportunity to initialize the cached offsets for *all* the leading
		 * fixed-width columns, in hope of avoiding future visits to this
		 * routine.
		 */
		att[0]->attcacheoff = 0;

		/* we might have set some offsets in the slow path previously */
		while (j < natts && att[j]->attcacheoff > 0)
			j++;

		off = att[j - 1]->attcacheoff + att[j - 1]->attlen;

		for (; j < natts; j++)
		{
			if (att[j]->attlen <= 0)
				break;

			off = att_align_nominal(off, att[j]->attalign);

			att[j]->attcacheoff = off;

			off += att[j]->attlen;
		}

		Assert(j > attnum);

		off = att[attnum]->attcacheoff;
	}
	else
	{
		bool		usecache = true;
		int			i;

		/* this is always true */
		att[0]->attcacheoff = 0;

		/*
		 * Now we know that we have to walk the tuple CAREFULLY.  But we still
		 * might be able to cache some offsets for next time.
		 *
		 * Note - This loop is a little tricky.  For each non-null attribute,
		 * we have to first account for alignment padding before the attr,
		 * then advance over the attr based on its length.	Nulls have no
		 * storage and no alignment padding either.  We can use/set
		 * attcacheoff until we reach either a null or a var-width attribute.
		 */
		off = 0;
		for (i = 0;; i++)		/* loop exit is at "break" */
		{
			if (HeapTupleHasNulls(tuple) && att_isnull(i, bp))
			{
				usecache = false;
				continue;		/* this cannot be the target att */
			}

			/* If we know the next offset, we can skip the rest */
			if (usecache && att[i]->attcacheoff >= 0)
				off = att[i]->attcacheoff;
			else if (att[i]->attlen == -1)
			{
				/*
				 * We can only cache the offset for a varlena attribute if the
				 * offset is already suitably aligned, so that there would be
				 * no pad bytes in any case: then the offset will be valid for
				 * either an aligned or unaligned value.
				 */
				if (usecache &&
					off == att_align_nominal(off, att[i]->attalign))
					att[i]->attcacheoff = off;
				else
				{
					off = att_align_pointer(off, att[i]->attalign, -1,
											tp + off);
					usecache = false;
				}
			}
			else
			{
				/* not varlena, so safe to use att_align_nominal */
				off = att_align_nominal(off, att[i]->attalign);

				if (usecache)
					att[i]->attcacheoff = off;
			}

			if (i == attnum)
				break;

			off = att_addlength_pointer(off, att[i]->attlen, tp + off);

			if (usecache && att[i]->attlen <= 0)
				usecache = false;
		}
	}

	return fetchatt(att[attnum], tp + off);
}
Exemple #17
0
/*
 * Serialize a tuple directly into a buffer.
 *
 * We're called with at least enough space for a tuple-chunk-header.
 */
int
SerializeTupleDirect(HeapTuple tuple, SerTupInfo * pSerInfo, struct directTransportBuffer *b)
{
	int natts;
	int dataSize = TUPLE_CHUNK_HEADER_SIZE;
	TupleDesc	tupdesc;

	AssertArg(tuple != NULL);
	AssertArg(pSerInfo != NULL);
	AssertArg(b != NULL);

	tupdesc = pSerInfo->tupdesc;
	natts = tupdesc->natts;

	do
	{
		if (natts == 0)
		{
			/* TC_EMTPY is just one chunk */
			SetChunkType(b->pri, TC_EMPTY);
			SetChunkDataSize(b->pri, 0);

			break;
		}

		/* easy case */
		if (is_heaptuple_memtuple(tuple))
		{
			int tupleSize;
			int paddedSize;

			tupleSize = memtuple_get_size((MemTuple)tuple, NULL);
			paddedSize = TYPEALIGN(TUPLE_CHUNK_ALIGN, tupleSize);

			if (paddedSize + TUPLE_CHUNK_HEADER_SIZE > b->prilen)
				return 0;

			/* will fit. */
			memcpy(b->pri + TUPLE_CHUNK_HEADER_SIZE, tuple, tupleSize);
			memset(b->pri + TUPLE_CHUNK_HEADER_SIZE + tupleSize, 0, paddedSize - tupleSize);

			dataSize += paddedSize;

			SetChunkType(b->pri, TC_WHOLE);
			SetChunkDataSize(b->pri, dataSize - TUPLE_CHUNK_HEADER_SIZE);
			break;
		}
		else
		{
			TupSerHeader tsh;

			unsigned int	datalen;
			unsigned int	nullslen;

			HeapTupleHeader t_data = tuple->t_data;

			unsigned char *pos;

			datalen = tuple->t_len - t_data->t_hoff;
			if (HeapTupleHasNulls(tuple))
				nullslen = BITMAPLEN(HeapTupleHeaderGetNatts(t_data));
			else
				nullslen = 0;

			tsh.tuplen = sizeof(TupSerHeader) + TYPEALIGN(TUPLE_CHUNK_ALIGN, nullslen) + TYPEALIGN(TUPLE_CHUNK_ALIGN, datalen);
			tsh.natts = HeapTupleHeaderGetNatts(t_data);
			tsh.infomask = t_data->t_infomask;

			if (dataSize + tsh.tuplen > b->prilen ||
				(tsh.infomask & HEAP_HASEXTERNAL) != 0)
				return 0;

			pos = b->pri + TUPLE_CHUNK_HEADER_SIZE;

			memcpy(pos, (char *)&tsh, sizeof(TupSerHeader));
			pos += sizeof(TupSerHeader);

			if (nullslen)
			{
				memcpy(pos, (char *)t_data->t_bits, nullslen);
				pos += nullslen;
				memset(pos, 0, TYPEALIGN(TUPLE_CHUNK_ALIGN, nullslen) - nullslen);
				pos += TYPEALIGN(TUPLE_CHUNK_ALIGN, nullslen) - nullslen;
			}

			memcpy(pos,  (char *)t_data + t_data->t_hoff, datalen);
			pos += datalen;
			memset(pos, 0, TYPEALIGN(TUPLE_CHUNK_ALIGN, datalen) - datalen);
			pos += TYPEALIGN(TUPLE_CHUNK_ALIGN, datalen) - datalen;

			dataSize += tsh.tuplen;

			SetChunkType(b->pri, TC_WHOLE);
			SetChunkDataSize(b->pri, dataSize - TUPLE_CHUNK_HEADER_SIZE);

			break;
		}

		/* tuple that we can't handle here (big ?) -- do the older "out-of-line" serialization */
		return 0;
	}
	while (0);

	return dataSize;   
}
Exemple #18
0
/* ----------------
 *		nocachegetattr
 *
 *		This only gets called from fastgetattr() macro, in cases where
 *		we can't use a cacheoffset and the value is not null.
 *
 *		This caches attribute offsets in the attribute descriptor.
 *
 *		An alternative way to speed things up would be to cache offsets
 *		with the tuple, but that seems more difficult unless you take
 *		the storage hit of actually putting those offsets into the
 *		tuple you send to disk.  Yuck.
 *
 *		This scheme will be slightly slower than that, but should
 *		perform well for queries which hit large #'s of tuples.  After
 *		you cache the offsets once, examining all the other tuples using
 *		the same attribute descriptor will go much quicker. -cim 5/4/91
 *
 *		NOTE: if you need to change this code, see also heap_deform_tuple.
 *		Also see nocache_index_getattr, which is the same code for index
 *		tuples.
 * ----------------
 */
Datum
nocachegetattr(HeapTuple tuple,
			   int attnum,
			   TupleDesc tupleDesc)
{
	HeapTupleHeader tup = tuple->t_data;
	Form_pg_attribute *att = tupleDesc->attrs;
	char	   *tp;				/* ptr to att in tuple */
	bits8	   *bp = tup->t_bits;		/* ptr to null bitmap in tuple */
	bool		slow = false;	/* do we have to walk nulls? */

	Assert(!is_heaptuple_memtuple(tuple));
	/* If any cached offsets are there we can check that they make sense, but
	 * there may not be any at all, so pass -1 for the attnum we know is valid */

#ifdef IN_MACRO
/* This is handled in the macro */
	Assert(attnum > 0);

	if (isnull)
		*isnull = false;
#endif

	attnum--;

	/* ----------------
	 *	 Three cases:
	 *
	 *	 1: No nulls and no variable-width attributes.
	 *	 2: Has a null or a var-width AFTER att.
	 *	 3: Has nulls or var-widths BEFORE att.
	 * ----------------
	 */

	if (HeapTupleNoNulls(tuple))
	{
#ifdef IN_MACRO
/* This is handled in the macro */
		if (att[attnum]->attcacheoff != -1)
		{
			return fetchatt(att[attnum],
							(char *) tup + tup->t_hoff +
							att[attnum]->attcacheoff);
		}
#endif
	}
	else
	{
		/*
		 * there's a null somewhere in the tuple
		 *
		 * check to see if desired att is null
		 */

#ifdef IN_MACRO
/* This is handled in the macro */
		if (att_isnull(attnum, bp))
		{
			if (isnull)
				*isnull = true;
			return (Datum) NULL;
		}
#endif

		/*
		 * Now check to see if any preceding bits are null...
		 */
		{
			int			byte = attnum >> 3;
			int			finalbit = attnum & 0x07;

			/* check for nulls "before" final bit of last byte */
			if ((~bp[byte]) & ((1 << finalbit) - 1))
				slow = true;
			else
			{
				/* check for nulls in any "earlier" bytes */
				int			i;

				for (i = 0; i < byte; i++)
				{
					if (bp[i] != 0xFF)
					{
						slow = true;
						break;
					}
				}
			}
		}
	}

	tp = (char *) tup + tup->t_hoff;

	/*
	 * now check for any non-fixed length attrs before our attribute
	 */
	if (!slow)
	{
		/*
		 * If we get here, there are no nulls up to and including the target
		 * attribute.  If we have a cached offset, we can use it.
		 */
		if (att[attnum]->attcacheoff >= 0)
		{
			return fetchatt(att[attnum],
							tp + att[attnum]->attcacheoff);
		}

		/*
		 * Otherwise, check for non-fixed-length attrs up to and including
		 * target.	If there aren't any, it's safe to cheaply initialize the
		 * cached offsets for these attrs.
		 */
		if (HeapTupleHasVarWidth(tuple))
		{
			int			j;

			/*
			 * In for(), we test <= and not < because we want to see if we can
			 * go past it in initializing offsets.
			 */
			for (j = 0; j <= attnum; j++)
			{
				if (att[j]->attlen <= 0)
				{
					slow = true;
					break;
				}
			}
		}
	}

	/*
	 * If slow is false, and we got here, we know that we have a tuple with no
	 * nulls or var-widths before the target attribute. If possible, we also
	 * want to initialize the remainder of the attribute cached offset values.
	 */
	if (!slow)
	{
		int			j = 1;
		long		off;
		int			natts = HeapTupleHeaderGetNatts(tup);

		/*
		 * If we get here, we have a tuple with no nulls or var-widths up to
		 * and including the target attribute, so we can use the cached offset
		 * ... only we don't have it yet, or we'd not have got here.  Since
		 * it's cheap to compute offsets for fixed-width columns, we take the
		 * opportunity to initialize the cached offsets for *all* the leading
		 * fixed-width columns, in hope of avoiding future visits to this
		 * routine.
		 */

		/* this is always true */
		att[0]->attcacheoff = 0;

		while (j < attnum && att[j]->attcacheoff > 0)
			j++;

		off = att[j - 1]->attcacheoff + att[j - 1]->attlen;

		for (; j <= attnum ||
		/* Can we compute more?  We will probably need them */
			 (j < natts &&
			  att[j]->attcacheoff == -1 &&
			  (HeapTupleNoNulls(tuple) || !att_isnull(j, bp)) &&
			  (HeapTupleAllFixed(tuple) || att[j]->attlen > 0)); j++)
		{
			/* don't need to worry about shortvarlenas here since we're only
			 * looking at non-varlenas. Note that it's important that we check
			 * that the target attribute itself is a nonvarlena too since we
			 * can't use cached offsets for even the first varlena any more. */
			off = att_align(off, att[j]->attalign);

			att[j]->attcacheoff = off;

			off = att_addlength(off, att[j]->attlen, PointerGetDatum(tp + off));
		}

		return fetchatt(att[attnum], tp + att[attnum]->attcacheoff);
	}
	else
	{
		bool		usecache = true;
		int			off = 0;
		int			i;

		/* this is always true */
		att[0]->attcacheoff = 0;

		/*
		 * Now we know that we have to walk the tuple CAREFULLY.
		 *
		 * Note - This loop is a little tricky.  For each non-null attribute,
		 * we have to first account for alignment padding before the attr,
		 * then advance over the attr based on its length.	Nulls have no
		 * storage and no alignment padding either.  We can use/set
		 * attcacheoff until we reach either a null or a var-width attribute.
		 */

		for (i = 0; i < attnum; i++)
		{
			if (HeapTupleHasNulls(tuple) && att_isnull(i, bp))
			{
				usecache = false;
				continue;
			}

			/* If we know the next offset, we can skip the alignment calc */
			if (usecache && att[i]->attcacheoff != -1)
				off = att[i]->attcacheoff;
			else
			{
				/* if it's a varlena it may or may not be aligned, so check for
				 * something that looks like a padding byte before aligning. If
				 * we're already aligned it may be the leading byte of a 4-byte
				 * header but then the att_align is harmless. Don't bother
				 * looking if it's not a varlena though.*/
				if (att[i]->attlen != -1 || !tp[off])
					off = att_align(off, att[i]->attalign);
				if (usecache && att[i]->attlen != -1)
					att[i]->attcacheoff = off;
			}

			if (att[i]->attlen < 0)
				usecache = false;

			off = att_addlength(off, att[i]->attlen, PointerGetDatum(tp + off));
		}

		if (att[attnum]->attlen != -1 || !tp[off])
			off = att_align(off, att[attnum]->attalign);

		return fetchatt(att[attnum], tp + off);
	}
}