Ejemplo n.º 1
0
/* ----------------
 *		heap_attisnull	- returns TRUE iff tuple attribute is not present
 * ----------------
 */
bool
heap_attisnull(HeapTuple tup, int attnum)
{
	Assert(!is_memtuple((GenericTuple) tup));

	if (attnum > (int) HeapTupleHeaderGetNatts(tup->t_data))
		return true;

	if (attnum > 0)
	{
		if (HeapTupleNoNulls(tup))
			return false;
		return att_isnull(attnum - 1, tup->t_data->t_bits);
	}

	switch (attnum)
	{
		case TableOidAttributeNumber:
		case SelfItemPointerAttributeNumber:
		case ObjectIdAttributeNumber:
		case MinTransactionIdAttributeNumber:
		case MinCommandIdAttributeNumber:
		case MaxTransactionIdAttributeNumber:
		case MaxCommandIdAttributeNumber:
        case GpSegmentIdAttributeNumber:       /*CDB*/
			/* these are never null */
			break;

		default:
			elog(ERROR, "invalid attnum: %d", attnum);
	}

	return false;
}
Ejemplo n.º 2
0
/* ----------------
 *		heap_copytuple
 *
 *		returns a copy of an entire tuple
 *
 * The HeapTuple struct, tuple header, and tuple data are all allocated
 * as a single palloc() block.
 * ----------------
 */
HeapTuple
heaptuple_copy_to(HeapTuple tuple, HeapTuple dest, uint32 *destlen)
{
	HeapTuple	newTuple;
	uint32 len;

	if (!HeapTupleIsValid(tuple) || tuple->t_data == NULL)
		return NULL;

	Assert(!is_memtuple((GenericTuple) tuple));

	len = HEAPTUPLESIZE + tuple->t_len;
	if(destlen && *destlen < len)
	{
		*destlen = len;
		return NULL;
	}

	if(destlen)
	{
		*destlen = len;
		newTuple = dest;
	}
	else
		newTuple = (HeapTuple) palloc(HEAPTUPLESIZE + tuple->t_len);

	newTuple->t_len = tuple->t_len;
	newTuple->t_self = tuple->t_self;
	newTuple->t_data = (HeapTupleHeader) ((char *) newTuple + HEAPTUPLESIZE);
	memcpy((char *) newTuple->t_data, (char *) tuple->t_data, tuple->t_len);
	return newTuple;
}
Ejemplo n.º 3
0
static void
writetup_heap(Tuplestorestate *state, void *tup)
{
	uint32		tuplen = 0;
	Size		memsize = 0;

	if (is_memtuple((GenericTuple) tup))
		tuplen = memtuple_get_size((MemTuple) tup);
	else
	{
		Assert(!is_heaptuple_splitter((HeapTuple) tup));
		tuplen = heaptuple_get_size((HeapTuple) tup);
	}

	if (BufFileWrite(state->myfile, (void *) tup, tuplen) != (size_t) tuplen)
		elog(ERROR, "write failed");
	if (state->backward)		/* need trailing length word? */
		if (BufFileWrite(state->myfile, (void *) &tuplen,
						 sizeof(tuplen)) != sizeof(tuplen))
			elog(ERROR, "write failed");

	memsize = GetMemoryChunkSpace(tup);

	state->spilledBytes += memsize;
	FREEMEM(state, memsize);

	pfree(tup);
}
Ejemplo n.º 4
0
/*
 * tuplestore_gettupleslot - exported function to fetch a tuple into a slot
 *
 * If successful, put tuple in slot and return TRUE; else, clear the slot
 * and return FALSE.
 *
 * If copy is TRUE, the slot receives a copied tuple (allocated in current
 * memory context) that will stay valid regardless of future manipulations of
 * the tuplestore's state.  If copy is FALSE, the slot may just receive a
 * pointer to a tuple held within the tuplestore.  The latter is more
 * efficient but the slot contents may be corrupted if additional writes to
 * the tuplestore occur.  (If using tuplestore_trim, see comments therein.)
 */
bool
tuplestore_gettupleslot(Tuplestorestate *state, bool forward,
						bool copy, TupleTableSlot *slot)
{
	GenericTuple tuple;
	bool		should_free;

	tuple = tuplestore_gettuple(state, forward, &should_free);

	if (tuple)
	{
		if (copy && !should_free)
		{
			if (is_memtuple(tuple))
				tuple = (GenericTuple) memtuple_copy_to((MemTuple) tuple, NULL, NULL);
			else
				tuple = (GenericTuple) heap_copytuple((HeapTuple) tuple);
			should_free = true;
		}
		ExecStoreGenericTuple(tuple, slot, should_free);
		return true;
	}
	else
	{
		ExecClearTuple(slot);
		return false;
	}
}
Ejemplo n.º 5
0
/*
 * heap_modify_tuple
 *		form a new tuple from an old tuple and a set of replacement values.
 *
 * The replValues, replIsnull, and doReplace arrays must be of the length
 * indicated by tupleDesc->natts.  The new tuple is constructed using the data
 * from replValues/replIsnull at columns where doReplace is true, and using
 * the data from the old tuple at columns where doReplace is false.
 *
 * The result is allocated in the current memory context.
 */
HeapTuple
heap_modify_tuple(HeapTuple tuple,
				  TupleDesc tupleDesc,
				  Datum *replValues,
				  bool *replIsnull,
				  bool *doReplace)
{
	int			numberOfAttributes = tupleDesc->natts;
	int			attoff;
	Datum	   *values;
	bool	   *isnull;
	HeapTuple	newTuple;

	Assert(!is_memtuple((GenericTuple) tuple));

	/*
	 * allocate and fill values and isnull arrays from either the tuple or the
	 * repl information, as appropriate.
	 *
	 * NOTE: it's debatable whether to use heap_deform_tuple() here or just
	 * heap_getattr() only the non-replaced colums.  The latter could win if
	 * there are many replaced columns and few non-replaced ones. However,
	 * heap_deform_tuple costs only O(N) while the heap_getattr way would cost
	 * O(N^2) if there are many non-replaced columns, so it seems better to
	 * err on the side of linear cost.
	 */
	values = (Datum *) palloc(numberOfAttributes * sizeof(Datum));
	isnull = (bool *) palloc(numberOfAttributes * sizeof(bool));

	heap_deform_tuple(tuple, tupleDesc, values, isnull);

	for (attoff = 0; attoff < numberOfAttributes; attoff++)
	{
		if (doReplace[attoff])
		{
			values[attoff] = replValues[attoff];
			isnull[attoff] = replIsnull[attoff];
		}
	}

	/*
	 * create a new tuple from the values and isnull arrays
	 */
	newTuple = heap_form_tuple(tupleDesc, values, isnull);

	pfree(values);
	pfree(isnull);

	/*
	 * copy the identification info of the old tuple: t_ctid, t_self, and OID
	 * (if any)
	 */
	newTuple->t_data->t_ctid = tuple->t_data->t_ctid;
	newTuple->t_self = tuple->t_self;
	if (tupleDesc->tdhasoid)
		HeapTupleSetOid(newTuple, HeapTupleGetOid(tuple));

	return newTuple;
}
Ejemplo n.º 6
0
static void *
copytup_heap(Tuplestorestate *state, void *tup)
{
	if (!is_memtuple((GenericTuple) tup))
		return heaptuple_copy_to((HeapTuple) tup, NULL, NULL);
	else
		return memtuple_copy_to((MemTuple) tup, NULL, NULL);
}
Ejemplo n.º 7
0
/* ----------------
 *		heap_getsysattr
 *
 *		Fetch the value of a system attribute for a tuple.
 *
 * This is a support routine for the heap_getattr macro.  The macro
 * has already determined that the attnum refers to a system attribute.
 * ----------------
 */
Datum
heap_getsysattr(HeapTuple tup, int attnum, bool *isnull)
{
	Datum		result;

	Assert(tup);
	Assert(!is_memtuple((GenericTuple) tup));

	/* Currently, no sys attribute ever reads as NULL. */
	if (isnull)
		*isnull = false;

	switch (attnum)
	{
		case SelfItemPointerAttributeNumber:
			/* pass-by-reference datatype */
			result = PointerGetDatum(&(tup->t_self));
			break;
		case ObjectIdAttributeNumber:
			result = ObjectIdGetDatum(HeapTupleGetOid(tup));
			break;
		case MinTransactionIdAttributeNumber:
			result = TransactionIdGetDatum(HeapTupleHeaderGetXmin(tup->t_data));
			break;
		case MaxTransactionIdAttributeNumber:
			result = TransactionIdGetDatum(HeapTupleHeaderGetXmax(tup->t_data));
			break;
		case MinCommandIdAttributeNumber:
		case MaxCommandIdAttributeNumber:

			/*
			 * cmin and cmax are now both aliases for the same field, which
			 * can in fact also be a combo command id.	XXX perhaps we should
			 * return the "real" cmin or cmax if possible, that is if we are
			 * inside the originating transaction?
			 */
			result = CommandIdGetDatum(HeapTupleHeaderGetRawCommandId(tup->t_data));
			break;
		case TableOidAttributeNumber:
            /* CDB: Must now use a TupleTableSlot to access the 'tableoid'. */
			result = ObjectIdGetDatum(InvalidOid);
			elog(ERROR, "Invalid reference to \"tableoid\" system attribute");
			break;
		case GpSegmentIdAttributeNumber:                       /*CDB*/
			result = Int32GetDatum(Gp_segment);
			break;
		default:
			elog(ERROR, "invalid attnum: %d", attnum);
			result = 0;			/* keep compiler quiet */
			break;
	}
	return result;
}
Ejemplo n.º 8
0
/* ----------------
 *		heap_copytuple_with_tuple
 *
 *		copy a tuple into a caller-supplied HeapTuple management struct
 *
 * Note that after calling this function, the "dest" HeapTuple will not be
 * allocated as a single palloc() block (unlike with heap_copytuple()).
 * ----------------
 */
void
heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest)
{
	if (!HeapTupleIsValid(src) || src->t_data == NULL)
	{
		dest->t_data = NULL;
		return;
	}

	Assert(!is_memtuple((GenericTuple) src));

	dest->t_len = src->t_len;
	dest->t_self = src->t_self;
	dest->t_data = (HeapTupleHeader) palloc(src->t_len);
	memcpy((char *) dest->t_data, (char *) src->t_data, src->t_len);
}
Ejemplo n.º 9
0
/*
 * Serialize a tuple directly into a buffer.
 *
 * We're called with at least enough space for a tuple-chunk-header.
 */
int
SerializeTupleDirect(GenericTuple gtuple, SerTupInfo *pSerInfo, struct directTransportBuffer *b)
{
	int natts;
	int dataSize = TUPLE_CHUNK_HEADER_SIZE;
	TupleDesc	tupdesc;

	AssertArg(gtuple != NULL);
	AssertArg(pSerInfo != NULL);
	AssertArg(b != NULL);

	tupdesc = pSerInfo->tupdesc;
	natts = tupdesc->natts;

	do
	{
		if (natts == 0)
		{
			/* TC_EMTPY is just one chunk */
			SetChunkType(b->pri, TC_EMPTY);
			SetChunkDataSize(b->pri, 0);

			break;
		}

		/* easy case */
		if (is_memtuple(gtuple))
		{
			MemTuple	tuple = (MemTuple) gtuple;
			int			tupleSize;
			int			paddedSize;

			tupleSize = memtuple_get_size(tuple);
			paddedSize = TYPEALIGN(TUPLE_CHUNK_ALIGN, tupleSize);

			if (paddedSize + TUPLE_CHUNK_HEADER_SIZE > b->prilen)
				return 0;

			/* will fit. */
			memcpy(b->pri + TUPLE_CHUNK_HEADER_SIZE, tuple, tupleSize);
			memset(b->pri + TUPLE_CHUNK_HEADER_SIZE + tupleSize, 0, paddedSize - tupleSize);

			dataSize += paddedSize;

			SetChunkType(b->pri, TC_WHOLE);
			SetChunkDataSize(b->pri, dataSize - TUPLE_CHUNK_HEADER_SIZE);
			break;
		}
		else
		{
			HeapTuple	tuple = (HeapTuple) gtuple;
			TupSerHeader tsh;

			unsigned int	datalen;
			unsigned int	nullslen;

			HeapTupleHeader t_data = tuple->t_data;

			unsigned char *pos;

			datalen = tuple->t_len - t_data->t_hoff;
			if (HeapTupleHasNulls(tuple))
				nullslen = BITMAPLEN(HeapTupleHeaderGetNatts(t_data));
			else
				nullslen = 0;

			tsh.tuplen = sizeof(TupSerHeader) + TYPEALIGN(TUPLE_CHUNK_ALIGN, nullslen) + TYPEALIGN(TUPLE_CHUNK_ALIGN, datalen);
			tsh.natts = HeapTupleHeaderGetNatts(t_data);
			tsh.infomask = t_data->t_infomask;

			if (dataSize + tsh.tuplen > b->prilen ||
				(tsh.infomask & HEAP_HASEXTERNAL) != 0)
				return 0;

			pos = b->pri + TUPLE_CHUNK_HEADER_SIZE;

			memcpy(pos, (char *)&tsh, sizeof(TupSerHeader));
			pos += sizeof(TupSerHeader);

			if (nullslen)
			{
				memcpy(pos, (char *)t_data->t_bits, nullslen);
				pos += nullslen;
				memset(pos, 0, TYPEALIGN(TUPLE_CHUNK_ALIGN, nullslen) - nullslen);
				pos += TYPEALIGN(TUPLE_CHUNK_ALIGN, nullslen) - nullslen;
			}

			memcpy(pos,  (char *)t_data + t_data->t_hoff, datalen);
			pos += datalen;
			memset(pos, 0, TYPEALIGN(TUPLE_CHUNK_ALIGN, datalen) - datalen);
			pos += TYPEALIGN(TUPLE_CHUNK_ALIGN, datalen) - datalen;

			dataSize += tsh.tuplen;

			SetChunkType(b->pri, TC_WHOLE);
			SetChunkDataSize(b->pri, dataSize - TUPLE_CHUNK_HEADER_SIZE);

			break;
		}

		/* tuple that we can't handle here (big ?) -- do the older "out-of-line" serialization */
		return 0;
	}
	while (0);

	return dataSize;   
}
Ejemplo n.º 10
0
/*
 * Convert a HeapTuple into a byte-sequence, and store it directly
 * into a chunklist for transmission.
 *
 * This code is based on the printtup_internal_20() function in printtup.c.
 */
void
SerializeTupleIntoChunks(GenericTuple gtuple, SerTupInfo *pSerInfo, TupleChunkList tcList)
{
	TupleChunkListItem tcItem = NULL;
	MemoryContext oldCtxt;
	TupleDesc	tupdesc;
	int			i,
		natts;

	AssertArg(tcList != NULL);
	AssertArg(gtuple != NULL);
	AssertArg(pSerInfo != NULL);

	tupdesc = pSerInfo->tupdesc;
	natts = tupdesc->natts;

	/* get ready to go */
	tcList->p_first = NULL;
	tcList->p_last = NULL;
	tcList->num_chunks = 0;
	tcList->serialized_data_length = 0;
	tcList->max_chunk_length = Gp_max_tuple_chunk_size;

	if (natts == 0)
	{
		tcItem = getChunkFromCache(&pSerInfo->chunkCache);
		if (tcItem == NULL)
		{
			ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY),
							errmsg("Could not allocate space for first chunk item in new chunk list.")));
		}

		/* TC_EMTPY is just one chunk */
		SetChunkType(tcItem->chunk_data, TC_EMPTY);
		tcItem->chunk_length = TUPLE_CHUNK_HEADER_SIZE;
		appendChunkToTCList(tcList, tcItem);

		return;
	}

	tcItem = getChunkFromCache(&pSerInfo->chunkCache);
	if (tcItem == NULL)
	{
		ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY),
						errmsg("Could not allocate space for first chunk item in new chunk list.")));
	}

	/* assume that we'll take a single chunk */
	SetChunkType(tcItem->chunk_data, TC_WHOLE);
	tcItem->chunk_length = TUPLE_CHUNK_HEADER_SIZE;
	appendChunkToTCList(tcList, tcItem);

	AssertState(s_tupSerMemCtxt != NULL);

	if (is_memtuple(gtuple))
	{
		MemTuple mtuple = (MemTuple) gtuple;
		addByteStringToChunkList(tcList, (char *) mtuple, memtuple_get_size(mtuple), &pSerInfo->chunkCache);
		addPadding(tcList, &pSerInfo->chunkCache, memtuple_get_size(mtuple));
	}
	else
	{
		HeapTuple tuple = (HeapTuple) gtuple;
		HeapTupleHeader t_data = tuple->t_data;
		TupSerHeader tsh;

		unsigned int	datalen;
		unsigned int	nullslen;

		datalen = tuple->t_len - t_data->t_hoff;
		if (HeapTupleHasNulls(tuple))
			nullslen = BITMAPLEN(HeapTupleHeaderGetNatts(t_data));
		else
			nullslen = 0;

		tsh.tuplen = sizeof(TupSerHeader) + TYPEALIGN(TUPLE_CHUNK_ALIGN,nullslen) + datalen;
		tsh.natts = HeapTupleHeaderGetNatts(t_data);
		tsh.infomask = t_data->t_infomask;

		addByteStringToChunkList(tcList, (char *)&tsh, sizeof(TupSerHeader), &pSerInfo->chunkCache);
		/* If we don't have any attributes which have been toasted, we
		 * can be very very simple: just send the raw data. */
		if ((tsh.infomask & HEAP_HASEXTERNAL) == 0)
		{
			if (nullslen)
			{
				addByteStringToChunkList(tcList, (char *)t_data->t_bits, nullslen, &pSerInfo->chunkCache);
				addPadding(tcList,&pSerInfo->chunkCache,nullslen);
			}

			addByteStringToChunkList(tcList, (char *)t_data + t_data->t_hoff, datalen, &pSerInfo->chunkCache);
			addPadding(tcList,&pSerInfo->chunkCache,datalen);
		}
		else
		{
			/* We have to be more careful when we have tuples that
			 * have been toasted. Ideally we'd like to send the
			 * untoasted attributes in as "raw" a format as possible
			 * but that makes rebuilding the tuple harder .
			 */
			oldCtxt = MemoryContextSwitchTo(s_tupSerMemCtxt);

			/* deconstruct the tuple (faster than a heap_getattr loop) */
			heap_deform_tuple(tuple, tupdesc, pSerInfo->values, pSerInfo->nulls);

			MemoryContextSwitchTo(oldCtxt);

			/* Send the nulls character-array. */
			addByteStringToChunkList(tcList, pSerInfo->nulls, natts, &pSerInfo->chunkCache);
			addPadding(tcList,&pSerInfo->chunkCache,natts);

			/*
			 * send the attributes of this tuple: NOTE anything which allocates
			 * temporary space (e.g. could result in a PG_DETOAST_DATUM) should be
			 * executed with the memory context set to s_tupSerMemCtxt
			 */
			for (i = 0; i < natts; ++i)
			{
				SerAttrInfo *attrInfo = pSerInfo->myinfo + i;
				Datum		origattr = pSerInfo->values[i],
					attr;

				/* skip null attributes (already taken care of above) */
				if (pSerInfo->nulls[i])
					continue;

				if (attrInfo->typlen == -1)
				{
					int32		sz;
					char	   *data;

					/*
					 * If we have a toasted datum, forcibly detoast it here to avoid
					 * memory leakage: we want to force the detoast allocation(s) to
					 * happen in our reset-able serialization context.
					 */
					oldCtxt = MemoryContextSwitchTo(s_tupSerMemCtxt);
					attr = PointerGetDatum(PG_DETOAST_DATUM_PACKED(origattr));
					MemoryContextSwitchTo(oldCtxt);

					sz = VARSIZE_ANY_EXHDR(attr);
					data = VARDATA_ANY(attr);

					/* Send length first, then data */
					addInt32ToChunkList(tcList, sz, &pSerInfo->chunkCache);
					addByteStringToChunkList(tcList, data, sz, &pSerInfo->chunkCache);
					addPadding(tcList, &pSerInfo->chunkCache, sz);
				}
				else if (attrInfo->typlen == -2)
				{
					int32		sz;
					char	   *data;

					/* CString, we would send the string with the terminating '\0' */
					data = DatumGetCString(origattr);
					sz = strlen(data) + 1;

					/* Send length first, then data */
					addInt32ToChunkList(tcList, sz, &pSerInfo->chunkCache);
					addByteStringToChunkList(tcList, data, sz, &pSerInfo->chunkCache);
					addPadding(tcList, &pSerInfo->chunkCache, sz);
				}
				else if (attrInfo->typbyval)
				{
					/*
					 * We send a full-width Datum for all pass-by-value types, regardless of
					 * the actual size.
					 */
					addByteStringToChunkList(tcList, (char *) &origattr, sizeof(Datum), &pSerInfo->chunkCache);
					addPadding(tcList, &pSerInfo->chunkCache, sizeof(Datum));
				}
				else
				{
					addByteStringToChunkList(tcList, DatumGetPointer(origattr), attrInfo->typlen, &pSerInfo->chunkCache);
					addPadding(tcList, &pSerInfo->chunkCache, attrInfo->typlen);

					attr = origattr;
				}
			}

			MemoryContextReset(s_tupSerMemCtxt);
		}
	}

	/*
	 * if we have more than 1 chunk we have to set the chunk types on our
	 * first chunk and last chunk
	 */
	if (tcList->num_chunks > 1)
	{
		TupleChunkListItem first,
			last;

		first = tcList->p_first;
		last = tcList->p_last;

		Assert(first != NULL);
		Assert(first != last);
		Assert(last != NULL);

		SetChunkType(first->chunk_data, TC_PARTIAL_START);
		SetChunkType(last->chunk_data, TC_PARTIAL_END);

		/*
		 * any intervening chunks are already set to TC_PARTIAL_MID when
		 * allocated
		 */
	}

	return;
}
Ejemplo n.º 11
0
/*
 * heap_deform_tuple
 *		Given a tuple, extract data into values/isnull arrays; this is
 *		the inverse of heap_form_tuple.
 *
 *		Storage for the values/isnull arrays is provided by the caller;
 *		it should be sized according to tupleDesc->natts not tuple->t_natts.
 *
 *		Note that for pass-by-reference datatypes, the pointer placed
 *		in the Datum will point into the given tuple.
 *
 *		When all or most of a tuple's fields need to be extracted,
 *		this routine will be significantly quicker than a loop around
 *		heap_getattr; the loop will become O(N^2) as soon as any
 *		noncacheable attribute offsets are involved.
 */
void
heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc,
				  Datum *values, bool *isnull)
{
	HeapTupleHeader tup = tuple->t_data;
	bool		hasnulls = HeapTupleHasNulls(tuple);
	Form_pg_attribute *att = tupleDesc->attrs;
	int			tdesc_natts = tupleDesc->natts;
	int			natts;			/* number of atts to extract */
	int			attnum;
	char	   *tp;				/* ptr to tuple data */
	long		off;			/* offset in tuple data */
	bits8	   *bp = tup->t_bits;		/* ptr to null bitmap in tuple */
	bool		slow = false;	/* can we use/set attcacheoff? */

	Assert(!is_memtuple((GenericTuple) tuple));
	natts = HeapTupleHeaderGetNatts(tup);

	/*
	 * In inheritance situations, it is possible that the given tuple actually
	 * has more fields than the caller is expecting.  Don't run off the end of
	 * the caller's arrays.
	 */
	natts = Min(natts, tdesc_natts);

	tp = (char *) tup + tup->t_hoff;

	off = 0;

	for (attnum = 0; attnum < natts; attnum++)
	{
		Form_pg_attribute thisatt = att[attnum];

		if (hasnulls && att_isnull(attnum, bp))
		{
			values[attnum] = (Datum) 0;
			isnull[attnum] = true;
			slow = true;		/* can't use attcacheoff anymore */
			continue;
		}

		isnull[attnum] = false;

		if (!slow && thisatt->attcacheoff >= 0)
			off = thisatt->attcacheoff;
		else if (thisatt->attlen == -1)
		{
			/*
			 * We can only cache the offset for a varlena attribute if the
			 * offset is already suitably aligned, so that there would be no
			 * pad bytes in any case: then the offset will be valid for either
			 * an aligned or unaligned value.
			 */
			if (!slow &&
				off == att_align_nominal(off, thisatt->attalign))
				thisatt->attcacheoff = off;
			else
			{
				off = att_align_pointer(off, thisatt->attalign, -1,
										tp + off);
				slow = true;
			}
		}
		else
		{
			/* not varlena, so safe to use att_align_nominal */
			off = att_align_nominal(off, thisatt->attalign);

			if (!slow)
				thisatt->attcacheoff = off;

		}
		if (!slow && thisatt->attlen < 0)
			slow = true;

		values[attnum] = fetchatt(thisatt, tp + off);

		off = att_addlength_pointer(off, thisatt->attlen, tp + off);
	}

	/*
	 * If tuple doesn't have all the atts indicated by tupleDesc, read the
	 * rest as null
	 */
	for (; attnum < tdesc_natts; attnum++)
	{
		values[attnum] = (Datum) 0;
		isnull[attnum] = true;
	}
}
Ejemplo n.º 12
0
/*
 * heap_form_tuple
 *		construct a tuple from the given values[] and isnull[] arrays,
 *		which are of the length indicated by tupleDescriptor->natts
 *
 * The result is allocated in the current memory context.
 */
HeapTuple
heaptuple_form_to(TupleDesc tupleDescriptor, Datum *values, bool *isnull, HeapTuple dst, uint32 *dstlen)
{
	HeapTuple	tuple;			/* return tuple */
	HeapTupleHeader td;			/* tuple data */
	Size		actual_len;
	Size		len,
				data_len;
	int			hoff;
	bool		hasnull = false;
	Form_pg_attribute *att = tupleDescriptor->attrs;
	int			numberOfAttributes = tupleDescriptor->natts;
	int			i;

	if (numberOfAttributes > MaxTupleAttributeNumber)
		ereport(ERROR,
				(errcode(ERRCODE_TOO_MANY_COLUMNS),
				 errmsg("number of columns (%d) exceeds limit (%d)",
						numberOfAttributes, MaxTupleAttributeNumber)));

	/*
	 * Check for nulls and embedded tuples; expand any toasted attributes in
	 * embedded tuples.  This preserves the invariant that toasting can only
	 * go one level deep.
	 *
	 * We can skip calling toast_flatten_tuple_attribute() if the attribute
	 * couldn't possibly be of composite type.  All composite datums are
	 * varlena and have alignment 'd'; furthermore they aren't arrays. Also,
	 * if an attribute is already toasted, it must have been sent to disk
	 * already and so cannot contain toasted attributes.
	 */
	for (i = 0; i < numberOfAttributes; i++)
	{
		if (isnull[i])
			hasnull = true;
		else if (att[i]->attlen == -1 &&
				 att[i]->attalign == 'd' &&
				 att[i]->attndims == 0 &&
				 !VARATT_IS_EXTENDED(DatumGetPointer(values[i])))
		{
			values[i] = toast_flatten_tuple_attribute(values[i],
													  att[i]->atttypid,
													  att[i]->atttypmod);
		}
	}

	/*
	 * Determine total space needed
	 */
	len = offsetof(HeapTupleHeaderData, t_bits);

	if (hasnull)
		len += BITMAPLEN(numberOfAttributes);

	if (tupleDescriptor->tdhasoid)
		len += sizeof(Oid);

	hoff = len = MAXALIGN(len); /* align user data safely */

	data_len = heap_compute_data_size(tupleDescriptor, values, isnull);

	len += data_len;

	if (dstlen && (*dstlen) < (HEAPTUPLESIZE + len))
	{
		*dstlen = HEAPTUPLESIZE + len;
		return NULL;
	}

	if (dstlen)
	{
		*dstlen = HEAPTUPLESIZE + len;
		tuple = dst;
		memset(tuple, 0, HEAPTUPLESIZE + len);
	}
	else
		tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len);

	/*
	 * Allocate and zero the space needed.	Note that the tuple body and
	 * HeapTupleData management structure are allocated in one chunk.
	 */
	tuple->t_data = td = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);

	/*
	 * And fill in the information.  Note we fill the Datum fields even though
	 * this tuple may never become a Datum.
	 */
	tuple->t_len = len;
	ItemPointerSetInvalid(&(tuple->t_self));

	HeapTupleHeaderSetDatumLength(td, len);
	HeapTupleHeaderSetTypeId(td, tupleDescriptor->tdtypeid);
	HeapTupleHeaderSetTypMod(td, tupleDescriptor->tdtypmod);

	HeapTupleHeaderSetNatts(td, numberOfAttributes);
	td->t_hoff = hoff;

	if (tupleDescriptor->tdhasoid)		/* else leave infomask = 0 */
		td->t_infomask = HEAP_HASOID;

	actual_len = heap_fill_tuple(tupleDescriptor,
								 values,
								 isnull,
								 (char *) td + hoff,
								 data_len,
								 &td->t_infomask,
								 (hasnull ? td->t_bits : NULL));

	Assert(data_len == actual_len);
	Assert(!is_memtuple((GenericTuple) tuple));

	return tuple;
}
Ejemplo n.º 13
0
/* ----------------
 *		nocachegetattr
 *
 *		This only gets called from fastgetattr() macro, in cases where
 *		we can't use a cacheoffset and the value is not null.
 *
 *		This caches attribute offsets in the attribute descriptor.
 *
 *		An alternative way to speed things up would be to cache offsets
 *		with the tuple, but that seems more difficult unless you take
 *		the storage hit of actually putting those offsets into the
 *		tuple you send to disk.  Yuck.
 *
 *		This scheme will be slightly slower than that, but should
 *		perform well for queries which hit large #'s of tuples.  After
 *		you cache the offsets once, examining all the other tuples using
 *		the same attribute descriptor will go much quicker. -cim 5/4/91
 *
 *		NOTE: if you need to change this code, see also heap_deform_tuple.
 *		Also see nocache_index_getattr, which is the same code for index
 *		tuples.
 * ----------------
 */
Datum
nocachegetattr(HeapTuple tuple,
			   int attnum,
			   TupleDesc tupleDesc)
{
	HeapTupleHeader tup = tuple->t_data;
	Form_pg_attribute *att = tupleDesc->attrs;
	char	   *tp;				/* ptr to data part of tuple */
	bits8	   *bp = tup->t_bits;		/* ptr to null bitmap in tuple */
	bool		slow = false;	/* do we have to walk attrs? */
	int			off;			/* current offset within data */

	Assert(!is_memtuple((GenericTuple) tuple));

	/* ----------------
	 *	 Three cases:
	 *
	 *	 1: No nulls and no variable-width attributes.
	 *	 2: Has a null or a var-width AFTER att.
	 *	 3: Has nulls or var-widths BEFORE att.
	 * ----------------
	 */

#ifdef IN_MACRO
/* This is handled in the macro */
	Assert(attnum > 0);

	if (isnull)
		*isnull = false;
#endif

	attnum--;

	if (HeapTupleNoNulls(tuple))
	{
#ifdef IN_MACRO
/* This is handled in the macro */
		if (att[attnum]->attcacheoff >= 0)
		{
			return fetchatt(att[attnum],
							(char *) tup + tup->t_hoff +
							att[attnum]->attcacheoff);
		}
#endif
	}
	else
	{
		/*
		 * there's a null somewhere in the tuple
		 *
		 * check to see if desired att is null
		 */

#ifdef IN_MACRO
/* This is handled in the macro */
		if (att_isnull(attnum, bp))
		{
			if (isnull)
				*isnull = true;
			return (Datum) NULL;
		}
#endif

		/*
		 * Now check to see if any preceding bits are null...
		 */
		{
			int byte = attnum >> 3;
			int			finalbit = attnum & 0x07;

			/* check for nulls "before" final bit of last byte */
			if ((~bp[byte]) & ((1 << finalbit) - 1))
				slow = true;
			else
			{
				/* check for nulls in any "earlier" bytes */
				int			i;

				for (i = 0; i < byte; i++)
				{
					if (bp[i] != 0xFF)
					{
						slow = true;
						break;
					}
				}
			}
		}
	}

	tp = (char *) tup + tup->t_hoff;

	if (!slow)
	{
		/*
		 * If we get here, there are no nulls up to and including the target
		 * attribute.  If we have a cached offset, we can use it.
		 */
		if (att[attnum]->attcacheoff >= 0)
		{
			return fetchatt(att[attnum],
							tp + att[attnum]->attcacheoff);
		}

		/*
		 * Otherwise, check for non-fixed-length attrs up to and including
		 * target.	If there aren't any, it's safe to cheaply initialize the
		 * cached offsets for these attrs.
		 */
		if (HeapTupleHasVarWidth(tuple))
		{
			int			j;

			for (j = 0; j <= attnum; j++)
			{
				if (att[j]->attlen <= 0)
				{
					slow = true;
					break;
				}
			}
		}
	}

	if (!slow)
	{
		int			natts = tupleDesc->natts;
		int			j = 1;

		/*
		 * If we get here, we have a tuple with no nulls or var-widths up to
		 * and including the target attribute, so we can use the cached offset
		 * ... only we don't have it yet, or we'd not have got here.  Since
		 * it's cheap to compute offsets for fixed-width columns, we take the
		 * opportunity to initialize the cached offsets for *all* the leading
		 * fixed-width columns, in hope of avoiding future visits to this
		 * routine.
		 */
		att[0]->attcacheoff = 0;

		/* we might have set some offsets in the slow path previously */
		while (j < natts && att[j]->attcacheoff > 0)
			j++;

		off = att[j - 1]->attcacheoff + att[j - 1]->attlen;

		for (; j < natts; j++)
		{
			if (att[j]->attlen <= 0)
				break;

			off = att_align_nominal(off, att[j]->attalign);

			att[j]->attcacheoff = off;

			off += att[j]->attlen;
		}

		Assert(j > attnum);

		off = att[attnum]->attcacheoff;
	}
	else
	{
		bool		usecache = true;
		int			i;

		/* this is always true */
		att[0]->attcacheoff = 0;

		/*
		 * Now we know that we have to walk the tuple CAREFULLY.  But we still
		 * might be able to cache some offsets for next time.
		 *
		 * Note - This loop is a little tricky.  For each non-null attribute,
		 * we have to first account for alignment padding before the attr,
		 * then advance over the attr based on its length.	Nulls have no
		 * storage and no alignment padding either.  We can use/set
		 * attcacheoff until we reach either a null or a var-width attribute.
		 */
		off = 0;
		for (i = 0;; i++)		/* loop exit is at "break" */
		{
			if (HeapTupleHasNulls(tuple) && att_isnull(i, bp))
			{
				usecache = false;
				continue;		/* this cannot be the target att */
			}

			/* If we know the next offset, we can skip the rest */
			if (usecache && att[i]->attcacheoff >= 0)
				off = att[i]->attcacheoff;
			else if (att[i]->attlen == -1)
			{
				/*
				 * We can only cache the offset for a varlena attribute if the
				 * offset is already suitably aligned, so that there would be
				 * no pad bytes in any case: then the offset will be valid for
				 * either an aligned or unaligned value.
				 */
				if (usecache &&
					off == att_align_nominal(off, att[i]->attalign))
					att[i]->attcacheoff = off;
				else
				{
					off = att_align_pointer(off, att[i]->attalign, -1,
											tp + off);
					usecache = false;
				}
			}
			else
			{
				/* not varlena, so safe to use att_align_nominal */
				off = att_align_nominal(off, att[i]->attalign);

				if (usecache)
					att[i]->attcacheoff = off;
			}

			if (i == attnum)
				break;

			off = att_addlength_pointer(off, att[i]->attlen, tp + off);

			if (usecache && att[i]->attlen <= 0)
				usecache = false;
		}
	}

	return fetchatt(att[attnum], tp + off);
}