コード例 #1
0
ファイル: cube.c プロジェクト: adam8157/gpdb
/*
** Allows the construction of a zero-volume cube from a float[]
*/
Datum
cube_a_f8(PG_FUNCTION_ARGS)
{
	ArrayType  *ur = PG_GETARG_ARRAYTYPE_P(0);
	NDBOX	   *result;
	int			i;
	int			dim;
	int			size;
	double	   *dur;

	if (array_contains_nulls(ur))
		ereport(ERROR,
				(errcode(ERRCODE_ARRAY_ELEMENT_ERROR),
				 errmsg("cannot work with arrays containing NULLs")));

	dim = ARRNELEMS(ur);
	if (dim > CUBE_MAX_DIM)
		ereport(ERROR,
				(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
				 errmsg("array is too long"),
				 errdetail("A cube cannot have more than %d dimensions.",
							   CUBE_MAX_DIM)));

	dur = ARRPTR(ur);

	size = POINT_SIZE(dim);
	result = (NDBOX *) palloc0(size);
	SET_VARSIZE(result, size);
	SET_DIM(result, dim);
	SET_POINT_BIT(result);

	for (i = 0; i < dim; i++)
		result->x[i] = dur[i];

	PG_RETURN_NDBOX(result);
}
コード例 #2
0
ファイル: rdkit_gist.c プロジェクト: Acpharis/rdkit
/*
 * Compress/decompress
 */
static GISTENTRY*
compressAllTrue(GISTENTRY *entry) 
{
  GISTENTRY  *retval = entry;

  bytea   *b = (bytea*)DatumGetPointer(entry->key);
  unsigned char *sign = (unsigned char*)VARDATA(b);
  int i;
                

  for(i=0; i<SIGLEN(b); i++)
    if ( sign[i] != 0xff )
      return retval;

  retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
  b = palloc(VARHDRSZ);
  SET_VARSIZE(b, VARHDRSZ);

  gistentryinit(*retval, PointerGetDatum(b),
                entry->rel, entry->page,
                entry->offset, FALSE);

  return retval;
}
コード例 #3
0
ファイル: cube.c プロジェクト: AXLEproject/postgres
/* Add a dimension to an existing cube with the same values for the new
   coordinate */
Datum
cube_c_f8(PG_FUNCTION_ARGS)
{
	NDBOX	   *c = PG_GETARG_NDBOX(0);
	double		x = PG_GETARG_FLOAT8(1);
	NDBOX	   *result;
	int			size;
	int			i;

	size = offsetof(NDBOX, x[0]) +sizeof(double) * (c->dim + 1) *2;
	result = (NDBOX *) palloc0(size);
	SET_VARSIZE(result, size);
	result->dim = c->dim + 1;
	for (i = 0; i < c->dim; i++)
	{
		result->x[i] = c->x[i];
		result->x[result->dim + i] = c->x[c->dim + i];
	}
	result->x[result->dim - 1] = x;
	result->x[2 * result->dim - 1] = x;

	PG_FREE_IF_COPY(c, 0);
	PG_RETURN_NDBOX(result);
}
コード例 #4
0
Datum
pg_random_bytes(PG_FUNCTION_ARGS)
{
	int			err;
	int			len = PG_GETARG_INT32(0);
	bytea	   *res;

	if (len < 1 || len > 1024)
		ereport(ERROR,
				(errcode(ERRCODE_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION),
				 errmsg("Length not in range")));

	res = palloc(VARHDRSZ + len);
	SET_VARSIZE(res, VARHDRSZ + len);

	/* generate result */
	err = px_get_random_bytes((uint8 *) VARDATA(res), len);
	if (err < 0)
		ereport(ERROR,
				(errcode(ERRCODE_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION),
				 errmsg("Random generator error: %s", px_strerror(err))));

	PG_RETURN_BYTEA_P(res);
}
コード例 #5
0
Datum
musicbrainz_collate (PG_FUNCTION_ARGS)
{
    UChar *unicode;
    uint8_t *sortkey = NULL;
    int32_t sortkeylen;
    bytea *output;

    if (PG_ARGISNULL (0)) 
    {
        PG_RETURN_NULL();
    }

    unicode = unicode_from_pg_text (PG_GETARG_TEXT_P(0));
    if (!unicode)
    {
        PG_RETURN_NULL();
    }

    sortkeylen = sortkey_from_unicode (unicode, &sortkey);
    if (!sortkeylen)
    {
        PG_RETURN_NULL();
    }

    output = (bytea *)palloc (sortkeylen + VARHDRSZ);

    SET_VARSIZE (output, sortkeylen + VARHDRSZ);

    memcpy (VARDATA (output), sortkey, sortkeylen);

    pfree (unicode);
    pfree (sortkey);

    PG_RETURN_BYTEA_P( output );
}
コード例 #6
0
ファイル: pgcrypto.c プロジェクト: dreamsxin/postgresql-1
Datum
pg_random_bytes(PG_FUNCTION_ARGS)
{
#ifdef HAVE_STRONG_RANDOM
	int			len = PG_GETARG_INT32(0);
	bytea	   *res;

	if (len < 1 || len > 1024)
		ereport(ERROR,
				(errcode(ERRCODE_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION),
				 errmsg("Length not in range")));

	res = palloc(VARHDRSZ + len);
	SET_VARSIZE(res, VARHDRSZ + len);

	/* generate result */
	if (!pg_strong_random(VARDATA(res), len))
		px_THROW_ERROR(PXE_NO_RANDOM);

	PG_RETURN_BYTEA_P(res);
#else
	px_THROW_ERROR(PXE_NO_RANDOM);
#endif
}
コード例 #7
0
ファイル: tsvector_op.c プロジェクト: bocap/postgres
Datum
tsvector_concat(PG_FUNCTION_ARGS)
{
	TSVector	in1 = PG_GETARG_TSVECTOR(0);
	TSVector	in2 = PG_GETARG_TSVECTOR(1);
	TSVector	out;
	WordEntry  *ptr;
	WordEntry  *ptr1,
			   *ptr2;
	WordEntryPos *p;
	int			maxpos = 0,
				i,
				j,
				i1,
				i2,
				dataoff,
				output_bytes,
				output_size;
	char	   *data,
			   *data1,
			   *data2;

	/* Get max position in in1; we'll need this to offset in2's positions */
	ptr = ARRPTR(in1);
	i = in1->size;
	while (i--)
	{
		if ((j = POSDATALEN(in1, ptr)) != 0)
		{
			p = POSDATAPTR(in1, ptr);
			while (j--)
			{
				if (WEP_GETPOS(*p) > maxpos)
					maxpos = WEP_GETPOS(*p);
				p++;
			}
		}
		ptr++;
	}

	ptr1 = ARRPTR(in1);
	ptr2 = ARRPTR(in2);
	data1 = STRPTR(in1);
	data2 = STRPTR(in2);
	i1 = in1->size;
	i2 = in2->size;

	/*
	 * Conservative estimate of space needed.  We might need all the data in
	 * both inputs, and conceivably add a pad byte before position data for
	 * each item where there was none before.
	 */
	output_bytes = VARSIZE(in1) + VARSIZE(in2) + i1 + i2;

	out = (TSVector) palloc0(output_bytes);
	SET_VARSIZE(out, output_bytes);

	/*
	 * We must make out->size valid so that STRPTR(out) is sensible.  We'll
	 * collapse out any unused space at the end.
	 */
	out->size = in1->size + in2->size;

	ptr = ARRPTR(out);
	data = STRPTR(out);
	dataoff = 0;
	while (i1 && i2)
	{
		int			cmp = compareEntry(data1, ptr1, data2, ptr2);

		if (cmp < 0)
		{						/* in1 first */
			ptr->haspos = ptr1->haspos;
			ptr->len = ptr1->len;
			memcpy(data + dataoff, data1 + ptr1->pos, ptr1->len);
			ptr->pos = dataoff;
			dataoff += ptr1->len;
			if (ptr->haspos)
			{
				dataoff = SHORTALIGN(dataoff);
				memcpy(data + dataoff, _POSVECPTR(in1, ptr1), POSDATALEN(in1, ptr1) * sizeof(WordEntryPos) + sizeof(uint16));
				dataoff += POSDATALEN(in1, ptr1) * sizeof(WordEntryPos) + sizeof(uint16);
			}

			ptr++;
			ptr1++;
			i1--;
		}
		else if (cmp > 0)
		{						/* in2 first */
			ptr->haspos = ptr2->haspos;
			ptr->len = ptr2->len;
			memcpy(data + dataoff, data2 + ptr2->pos, ptr2->len);
			ptr->pos = dataoff;
			dataoff += ptr2->len;
			if (ptr->haspos)
			{
				int			addlen = add_pos(in2, ptr2, out, ptr, maxpos);

				if (addlen == 0)
					ptr->haspos = 0;
				else
				{
					dataoff = SHORTALIGN(dataoff);
					dataoff += addlen * sizeof(WordEntryPos) + sizeof(uint16);
				}
			}

			ptr++;
			ptr2++;
			i2--;
		}
		else
		{
			ptr->haspos = ptr1->haspos | ptr2->haspos;
			ptr->len = ptr1->len;
			memcpy(data + dataoff, data1 + ptr1->pos, ptr1->len);
			ptr->pos = dataoff;
			dataoff += ptr1->len;
			if (ptr->haspos)
			{
				if (ptr1->haspos)
				{
					dataoff = SHORTALIGN(dataoff);
					memcpy(data + dataoff, _POSVECPTR(in1, ptr1), POSDATALEN(in1, ptr1) * sizeof(WordEntryPos) + sizeof(uint16));
					dataoff += POSDATALEN(in1, ptr1) * sizeof(WordEntryPos) + sizeof(uint16);
					if (ptr2->haspos)
						dataoff += add_pos(in2, ptr2, out, ptr, maxpos) * sizeof(WordEntryPos);
				}
				else	/* must have ptr2->haspos */
				{
					int			addlen = add_pos(in2, ptr2, out, ptr, maxpos);

					if (addlen == 0)
						ptr->haspos = 0;
					else
					{
						dataoff = SHORTALIGN(dataoff);
						dataoff += addlen * sizeof(WordEntryPos) + sizeof(uint16);
					}
				}
			}

			ptr++;
			ptr1++;
			ptr2++;
			i1--;
			i2--;
		}
	}

	while (i1)
	{
		ptr->haspos = ptr1->haspos;
		ptr->len = ptr1->len;
		memcpy(data + dataoff, data1 + ptr1->pos, ptr1->len);
		ptr->pos = dataoff;
		dataoff += ptr1->len;
		if (ptr->haspos)
		{
			dataoff = SHORTALIGN(dataoff);
			memcpy(data + dataoff, _POSVECPTR(in1, ptr1), POSDATALEN(in1, ptr1) * sizeof(WordEntryPos) + sizeof(uint16));
			dataoff += POSDATALEN(in1, ptr1) * sizeof(WordEntryPos) + sizeof(uint16);
		}

		ptr++;
		ptr1++;
		i1--;
	}

	while (i2)
	{
		ptr->haspos = ptr2->haspos;
		ptr->len = ptr2->len;
		memcpy(data + dataoff, data2 + ptr2->pos, ptr2->len);
		ptr->pos = dataoff;
		dataoff += ptr2->len;
		if (ptr->haspos)
		{
			int			addlen = add_pos(in2, ptr2, out, ptr, maxpos);

			if (addlen == 0)
				ptr->haspos = 0;
			else
			{
				dataoff = SHORTALIGN(dataoff);
				dataoff += addlen * sizeof(WordEntryPos) + sizeof(uint16);
			}
		}

		ptr++;
		ptr2++;
		i2--;
	}

	/*
	 * Instead of checking each offset individually, we check for overflow of
	 * pos fields once at the end.
	 */
	if (dataoff > MAXSTRPOS)
		ereport(ERROR,
				(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
				 errmsg("string is too long for tsvector (%d bytes, max %d bytes)", dataoff, MAXSTRPOS)));

	/*
	 * Adjust sizes (asserting that we didn't overrun the original estimates)
	 * and collapse out any unused array entries.
	 */
	output_size = ptr - ARRPTR(out);
	Assert(output_size <= out->size);
	out->size = output_size;
	if (data != STRPTR(out))
		memmove(STRPTR(out), data, dataoff);
	output_bytes = CALCDATASIZE(out->size, dataoff);
	Assert(output_bytes <= VARSIZE(out));
	SET_VARSIZE(out, output_bytes);

	PG_FREE_IF_COPY(in1, 0);
	PG_FREE_IF_COPY(in2, 1);
	PG_RETURN_POINTER(out);
}
コード例 #8
0
ファイル: tuptoaster.c プロジェクト: LittleForker/postgres
/* ----------
 * heap_tuple_untoast_attr_slice -
 *
 *		Public entry point to get back part of a toasted value
 *		from compression or external storage.
 * ----------
 */
struct varlena *
heap_tuple_untoast_attr_slice(struct varlena * attr,
							  int32 sliceoffset, int32 slicelength)
{
	struct varlena *preslice;
	struct varlena *result;
	char	   *attrdata;
	int32		attrsize;

	if (VARATT_IS_EXTERNAL(attr))
	{
		struct varatt_external toast_pointer;

		VARATT_EXTERNAL_GET_POINTER(toast_pointer, attr);

		/* fast path for non-compressed external datums */
		if (!VARATT_EXTERNAL_IS_COMPRESSED(toast_pointer))
			return toast_fetch_datum_slice(attr, sliceoffset, slicelength);

		/* fetch it back (compressed marker will get set automatically) */
		preslice = toast_fetch_datum(attr);
	}
	else
		preslice = attr;

	if (VARATT_IS_COMPRESSED(preslice))
	{
		PGLZ_Header *tmp = (PGLZ_Header *) preslice;
		Size		size = PGLZ_RAW_SIZE(tmp) + VARHDRSZ;

		preslice = (struct varlena *) palloc(size);
		SET_VARSIZE(preslice, size);
		pglz_decompress(tmp, VARDATA(preslice));

		if (tmp != (PGLZ_Header *) attr)
			pfree(tmp);
	}

	if (VARATT_IS_SHORT(preslice))
	{
		attrdata = VARDATA_SHORT(preslice);
		attrsize = VARSIZE_SHORT(preslice) - VARHDRSZ_SHORT;
	}
	else
	{
		attrdata = VARDATA(preslice);
		attrsize = VARSIZE(preslice) - VARHDRSZ;
	}

	/* slicing of datum for compressed cases and plain value */

	if (sliceoffset >= attrsize)
	{
		sliceoffset = 0;
		slicelength = 0;
	}

	if (((sliceoffset + slicelength) > attrsize) || slicelength < 0)
		slicelength = attrsize - sliceoffset;

	result = (struct varlena *) palloc(slicelength + VARHDRSZ);
	SET_VARSIZE(result, slicelength + VARHDRSZ);

	memcpy(VARDATA(result), attrdata + sliceoffset, slicelength);

	if (preslice != attr)
		pfree(preslice);

	return result;
}
コード例 #9
0
ファイル: tuptoaster.c プロジェクト: LittleForker/postgres
/* ----------
 * toast_fetch_datum -
 *
 *	Reconstruct an in memory Datum from the chunks saved
 *	in the toast relation
 * ----------
 */
static struct varlena *
toast_fetch_datum(struct varlena * attr)
{
	Relation	toastrel;
	Relation	toastidx;
	ScanKeyData toastkey;
	SysScanDesc toastscan;
	HeapTuple	ttup;
	TupleDesc	toasttupDesc;
	struct varlena *result;
	struct varatt_external toast_pointer;
	int32		ressize;
	int32		residx,
				nextidx;
	int32		numchunks;
	Pointer		chunk;
	bool		isnull;
	char	   *chunkdata;
	int32		chunksize;

	/* Must copy to access aligned fields */
	VARATT_EXTERNAL_GET_POINTER(toast_pointer, attr);

	ressize = toast_pointer.va_extsize;
	numchunks = ((ressize - 1) / TOAST_MAX_CHUNK_SIZE) + 1;

	result = (struct varlena *) palloc(ressize + VARHDRSZ);

	if (VARATT_EXTERNAL_IS_COMPRESSED(toast_pointer))
		SET_VARSIZE_COMPRESSED(result, ressize + VARHDRSZ);
	else
		SET_VARSIZE(result, ressize + VARHDRSZ);

	/*
	 * Open the toast relation and its index
	 */
	toastrel = heap_open(toast_pointer.va_toastrelid, AccessShareLock);
	toasttupDesc = toastrel->rd_att;
	toastidx = index_open(toastrel->rd_rel->reltoastidxid, AccessShareLock);

	/*
	 * Setup a scan key to fetch from the index by va_valueid
	 */
	ScanKeyInit(&toastkey,
				(AttrNumber) 1,
				BTEqualStrategyNumber, F_OIDEQ,
				ObjectIdGetDatum(toast_pointer.va_valueid));

	/*
	 * Read the chunks by index
	 *
	 * Note that because the index is actually on (valueid, chunkidx) we will
	 * see the chunks in chunkidx order, even though we didn't explicitly ask
	 * for it.
	 */
	nextidx = 0;

	toastscan = systable_beginscan_ordered(toastrel, toastidx,
										   SnapshotToast, 1, &toastkey);
	while ((ttup = systable_getnext_ordered(toastscan, ForwardScanDirection)) != NULL)
	{
		/*
		 * Have a chunk, extract the sequence number and the data
		 */
		residx = DatumGetInt32(fastgetattr(ttup, 2, toasttupDesc, &isnull));
		Assert(!isnull);
		chunk = DatumGetPointer(fastgetattr(ttup, 3, toasttupDesc, &isnull));
		Assert(!isnull);
		if (!VARATT_IS_EXTENDED(chunk))
		{
			chunksize = VARSIZE(chunk) - VARHDRSZ;
			chunkdata = VARDATA(chunk);
		}
		else if (VARATT_IS_SHORT(chunk))
		{
			/* could happen due to heap_form_tuple doing its thing */
			chunksize = VARSIZE_SHORT(chunk) - VARHDRSZ_SHORT;
			chunkdata = VARDATA_SHORT(chunk);
		}
		else
		{
			/* should never happen */
			elog(ERROR, "found toasted toast chunk for toast value %u in %s",
				 toast_pointer.va_valueid,
				 RelationGetRelationName(toastrel));
			chunksize = 0;		/* keep compiler quiet */
			chunkdata = NULL;
		}

		/*
		 * Some checks on the data we've found
		 */
		if (residx != nextidx)
			elog(ERROR, "unexpected chunk number %d (expected %d) for toast value %u in %s",
				 residx, nextidx,
				 toast_pointer.va_valueid,
				 RelationGetRelationName(toastrel));
		if (residx < numchunks - 1)
		{
			if (chunksize != TOAST_MAX_CHUNK_SIZE)
				elog(ERROR, "unexpected chunk size %d (expected %d) in chunk %d of %d for toast value %u in %s",
					 chunksize, (int) TOAST_MAX_CHUNK_SIZE,
					 residx, numchunks,
					 toast_pointer.va_valueid,
					 RelationGetRelationName(toastrel));
		}
		else if (residx == numchunks - 1)
		{
			if ((residx * TOAST_MAX_CHUNK_SIZE + chunksize) != ressize)
				elog(ERROR, "unexpected chunk size %d (expected %d) in final chunk %d for toast value %u in %s",
					 chunksize,
					 (int) (ressize - residx * TOAST_MAX_CHUNK_SIZE),
					 residx,
					 toast_pointer.va_valueid,
					 RelationGetRelationName(toastrel));
		}
		else
			elog(ERROR, "unexpected chunk number %d (out of range %d..%d) for toast value %u in %s",
				 residx,
				 0, numchunks - 1,
				 toast_pointer.va_valueid,
				 RelationGetRelationName(toastrel));

		/*
		 * Copy the data into proper place in our result
		 */
		memcpy(VARDATA(result) + residx * TOAST_MAX_CHUNK_SIZE,
			   chunkdata,
			   chunksize);

		nextidx++;
	}

	/*
	 * Final checks that we successfully fetched the datum
	 */
	if (nextidx != numchunks)
		elog(ERROR, "missing chunk number %d for toast value %u in %s",
			 nextidx,
			 toast_pointer.va_valueid,
			 RelationGetRelationName(toastrel));

	/*
	 * End scan and close relations
	 */
	systable_endscan_ordered(toastscan);
	index_close(toastidx, AccessShareLock);
	heap_close(toastrel, AccessShareLock);

	return result;
}
コード例 #10
0
ファイル: compression_test.c プロジェクト: jiaoyk/pg_plugins
/*
 * get_raw_page
 *
 * Returns a copy of a page from shared buffers as a bytea, with hole
 * filled with zeros or simply without hole, with the length of the page
 * offset to be able to reconstitute the page entirely using the data
 * returned by this function.
 */
Datum
get_raw_page(PG_FUNCTION_ARGS)
{
	Oid			relid = PG_GETARG_OID(0);
	uint32		blkno = PG_GETARG_UINT32(1);
	bool		with_hole = PG_GETARG_BOOL(2);
	bytea	   *raw_page;
	Relation	rel;
	char	    raw_page_data[BLCKSZ];
	Buffer		buf;
	TupleDesc	tupdesc;
	Datum       result;
	Datum		values[2];
	bool		nulls[2];
	HeapTuple	tuple;
	PageHeader	page_header;
	int16		hole_offset, hole_length;

	if (!superuser())
		ereport(ERROR,
				(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
				 (errmsg("must be superuser to use raw functions"))));

	rel = relation_open(relid, AccessShareLock);

	/* Check that this relation has storage */
	if (rel->rd_rel->relkind == RELKIND_VIEW)
		ereport(ERROR,
				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
				 errmsg("cannot get raw page from view \"%s\"",
						RelationGetRelationName(rel))));
	if (rel->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
		ereport(ERROR,
				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
				 errmsg("cannot get raw page from composite type \"%s\"",
						RelationGetRelationName(rel))));
	if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
		ereport(ERROR,
				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
				 errmsg("cannot get raw page from foreign table \"%s\"",
						RelationGetRelationName(rel))));

	/*
	 * Reject attempts to read non-local temporary relations; we would be
	 * likely to get wrong data since we have no visibility into the owning
	 * session's local buffers.
	 */
	if (RELATION_IS_OTHER_TEMP(rel))
		ereport(ERROR,
				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
				 errmsg("cannot access temporary tables of other sessions")));

	if (blkno >= RelationGetNumberOfBlocksInFork(rel, MAIN_FORKNUM))
		ereport(ERROR,
				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
				 errmsg("block number %u is out of range for relation \"%s\"",
						blkno, RelationGetRelationName(rel))));

	/* Build a tuple descriptor for our result type */
	if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
		elog(ERROR, "return type must be a row type");

	/* Take a copy of the page to work on */
	buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, NULL);
	LockBuffer(buf, BUFFER_LOCK_SHARE);
	memcpy(raw_page_data, BufferGetPage(buf), BLCKSZ);
	LockBuffer(buf, BUFFER_LOCK_UNLOCK);
	ReleaseBuffer(buf);
	relation_close(rel, AccessShareLock);

	page_header = (PageHeader) raw_page_data;
	hole_length = page_header->pd_upper - page_header->pd_lower;
	hole_offset = page_header->pd_lower;

	/*
	 * If hole is wanted in the page returned, fill it with zeros.
	 * If not, copy to the return buffer the page without the hole.
	 */
	if (with_hole)
	{
		raw_page = (bytea *) palloc(BLCKSZ + VARHDRSZ);
		SET_VARSIZE(raw_page, BLCKSZ + VARHDRSZ);
		memcpy(VARDATA(raw_page), raw_page_data, BLCKSZ);
		MemSet(raw_page_data + hole_offset, 0, hole_length);
	}
	else
	{
		raw_page = (bytea *) palloc(BLCKSZ + VARHDRSZ - hole_length);
		SET_VARSIZE(raw_page, BLCKSZ + VARHDRSZ - hole_length);
		memcpy(VARDATA(raw_page), raw_page_data, hole_offset);
		memcpy(VARDATA(raw_page) + hole_offset,
			   raw_page_data + hole_offset + hole_length,
			   BLCKSZ - (hole_offset + hole_length));
	}

	/* Build and return the tuple. */
	values[0] = PointerGetDatum(raw_page);
	if (with_hole)
		values[1] = UInt16GetDatum(0);
	else
		values[1] = UInt16GetDatum(hole_offset);

	memset(nulls, 0, sizeof(nulls));

	tuple = heap_form_tuple(tupdesc, values, nulls);
	result = HeapTupleGetDatum(tuple);
	PG_RETURN_DATUM(result);
}
コード例 #11
0
Datum pgq_finish_varbuf(StringInfo buf)
{
    SET_VARSIZE(buf->data, buf->len);
    return PointerGetDatum(buf->data);
}
コード例 #12
0
ファイル: tupser.c プロジェクト: 50wu/gpdb
/*
 * Deserialize a HeapTuple's data from a byte-array.
 *
 * This code is based on the binary input handling functions in copy.c.
 */
HeapTuple
DeserializeTuple(SerTupInfo * pSerInfo, StringInfo serialTup)
{
	MemoryContext oldCtxt;
	TupleDesc	tupdesc;
	HeapTuple	htup;
	int			natts;
	SerAttrInfo *attrInfo;
	uint32		attr_size;

	int			i;
	StringInfoData attr_data;
	bool		fHandled;

	AssertArg(pSerInfo != NULL);
	AssertArg(serialTup != NULL);

	tupdesc = pSerInfo->tupdesc;
	natts = tupdesc->natts;

	/*
	 * Flip to our tuple-serialization memory-context, to speed up memory
	 * reclamation operations.
	 */
	AssertState(s_tupSerMemCtxt != NULL);
	oldCtxt = MemoryContextSwitchTo(s_tupSerMemCtxt);

	/* Receive nulls character-array. */
	pq_copymsgbytes(serialTup, pSerInfo->nulls, natts);
	skipPadding(serialTup);

	/* Deserialize the non-NULL attributes of this tuple */
	initStringInfo(&attr_data);
	for (i = 0; i < natts; ++i)
	{
		attrInfo = pSerInfo->myinfo + i;

		if (pSerInfo->nulls[i])	/* NULL field. */
		{
			pSerInfo->values[i] = (Datum) 0;
			continue;
		}

		/*
		 * Assume that the data's output will be handled by the special IO
		 * code, and if not then we can handle it the slow way.
		 */
		fHandled = true;
		switch (attrInfo->atttypid)
		{
			case INT4OID:
				pSerInfo->values[i] = Int32GetDatum(stringInfoGetInt32(serialTup));
				break;

			case CHAROID:
				pSerInfo->values[i] = CharGetDatum(pq_getmsgbyte(serialTup));
				skipPadding(serialTup);
				break;

			case BPCHAROID:
			case VARCHAROID:
			case INT2VECTOROID: /* postgres serialization logic broken, use our own */
			case OIDVECTOROID: /* postgres serialization logic broken, use our own */
			case ANYARRAYOID:
			{
				text	   *pText;
				int			textSize;

				textSize = stringInfoGetInt32(serialTup);

#ifdef TUPSER_SCRATCH_SPACE
				if (textSize + VARHDRSZ <= attrInfo->varlen_scratch_size)
					pText = (text *) attrInfo->pv_varlen_scratch;
				else
					pText = (text *) palloc(textSize + VARHDRSZ);
#else
				pText = (text *) palloc(textSize + VARHDRSZ);
#endif

				SET_VARSIZE(pText, textSize + VARHDRSZ);
				pq_copymsgbytes(serialTup, VARDATA(pText), textSize);
				skipPadding(serialTup);
				pSerInfo->values[i] = PointerGetDatum(pText);
				break;
			}

			case DATEOID:
			{
				/*
				 * TODO:  I would LIKE to do something more efficient, but
				 * DateADT is not strictly limited to 4 bytes by its
				 * definition.
				 */
				DateADT date;

				pq_copymsgbytes(serialTup, (char *) &date, sizeof(DateADT));
				skipPadding(serialTup);
				pSerInfo->values[i] = DateADTGetDatum(date);
				break;
			}

			case NUMERICOID:
			{
				/*
				 * Treat the numeric as a varlena variable, and just push
				 * the whole shebang to the output-buffer.	We don't care
				 * about the guts of the numeric.
				 */
				Numeric		num;
				int			numSize;

				numSize = stringInfoGetInt32(serialTup);

#ifdef TUPSER_SCRATCH_SPACE
				if (numSize + VARHDRSZ <= attrInfo->varlen_scratch_size)
					num = (Numeric) attrInfo->pv_varlen_scratch;
				else
					num = (Numeric) palloc(numSize + VARHDRSZ);
#else
				num = (Numeric) palloc(numSize + VARHDRSZ);
#endif

				SET_VARSIZE(num, numSize + VARHDRSZ);
				pq_copymsgbytes(serialTup, VARDATA(num), numSize);
				skipPadding(serialTup);
				pSerInfo->values[i] = NumericGetDatum(num);
				break;
			}

			case ACLITEMOID:
			{
				int		aclSize, k, cnt;
				char		*inputstring, *starsfree;

				aclSize = stringInfoGetInt32(serialTup);
				inputstring = (char*) palloc(aclSize  + 1);
				starsfree = (char*) palloc(aclSize  + 1);
				cnt = 0;
	

				pq_copymsgbytes(serialTup, inputstring, aclSize);
				skipPadding(serialTup);
				inputstring[aclSize] = '\0';
				for(k=0; k<aclSize; k++)
				{					
					if( inputstring[k] != '*')
					{
						starsfree[cnt] = inputstring[k];
						cnt++;
					}
				}
				starsfree[cnt] = '\0';

				pSerInfo->values[i] = DirectFunctionCall1(aclitemin, CStringGetDatum(starsfree));
				pfree(inputstring);
				break;
			}

			case 210:
			{
				int 		strsize;
				char		*smgrstr;

				strsize = stringInfoGetInt32(serialTup);
				smgrstr = (char*) palloc(strsize + 1);
				pq_copymsgbytes(serialTup, smgrstr, strsize);
				skipPadding(serialTup);
				smgrstr[strsize] = '\0';

				pSerInfo->values[i] = DirectFunctionCall1(smgrin, CStringGetDatum(smgrstr));
				break;
			}
			default:
				fHandled = false;
		}

		if (fHandled)
			continue;

		attr_size = stringInfoGetInt32(serialTup);

		/* reset attr_data to empty, and load raw data into it */

		attr_data.len = 0;
		attr_data.data[0] = '\0';
		attr_data.cursor = 0;

		appendBinaryStringInfo(&attr_data,
							   pq_getmsgbytes(serialTup, attr_size), attr_size);
		skipPadding(serialTup);

		/* Call the attribute type's binary input converter. */
		if (attrInfo->recv_finfo.fn_nargs == 1)
			pSerInfo->values[i] = FunctionCall1(&attrInfo->recv_finfo,
												PointerGetDatum(&attr_data));
		else if (attrInfo->recv_finfo.fn_nargs == 2)
			pSerInfo->values[i] = FunctionCall2(&attrInfo->recv_finfo,
												PointerGetDatum(&attr_data),
												ObjectIdGetDatum(attrInfo->recv_typio_param));
		else if (attrInfo->recv_finfo.fn_nargs == 3)
			pSerInfo->values[i] = FunctionCall3(&attrInfo->recv_finfo,
												PointerGetDatum(&attr_data),
												ObjectIdGetDatum(attrInfo->recv_typio_param),
												Int32GetDatum(tupdesc->attrs[i]->atttypmod) );  
		else
		{
			ereport(ERROR,
					(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
					 errmsg("Conversion function takes %d args",attrInfo->recv_finfo.fn_nargs)));
		}

		/* Trouble if it didn't eat the whole buffer */
		if (attr_data.cursor != attr_data.len)
		{
			ereport(ERROR,
					(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
					 errmsg("incorrect binary data format")));
		}
	}

	/*
	 * Construct the tuple from the Datums and nulls values.  NOTE:  Switch
	 * out of our temporary context before we form the tuple!
	 */
	MemoryContextSwitchTo(oldCtxt);

	htup = heap_form_tuple(tupdesc, pSerInfo->values, pSerInfo->nulls);

	MemoryContextReset(s_tupSerMemCtxt);

	/* All done.  Return the result. */
	return htup;
}
コード例 #13
0
ファイル: pg_kc.c プロジェクト: cloudflare/SortaSQL
Datum kc_expand(PG_FUNCTION_ARGS) {

    KC_ENTRY                        *search;
    FuncCallContext                 *funcctx;
    int                             call_cntr;
    char                            *kbuf;
    size_t                          ksiz, vsiz;
    const char                      *cvbuf;
    char                            *kv_kbuf = NULL; 
    size_t                          kv_ksiz;
    int                             done;

    /* stuff done only on the first call of the function */
    if (SRF_IS_FIRSTCALL()) {
        MemoryContext   oldcontext;

        /* create a function context for cross-call persistence */
        funcctx = SRF_FIRSTCALL_INIT();

        /* switch to memory context appropriate for multiple function calls */
        oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);

        // Make sure that there are enough args.
        if (PG_NARGS() < MIN_ARGS) {
            ereport(ERROR,
                    (errcode(ERRCODE_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION),
                     errmsg("Must run expand with at least %d args!", MIN_ARGS)));
        }

        /* Make the kcdb here. */
        search = (KC_ENTRY *)palloc(sizeof(KC_ENTRY)); 
        search->db = kcdbnew();
        if (open_db (search->db, text_to_cstring(PG_GETARG_TEXT_PP(0)), text_to_cstring(PG_GETARG_TEXT_PP(1)))) {

            // Set the key to jump into:
            // Call with -- map_name, result_id, class, doctype, pop, psource
            // Here, map_name describes a db to open.
            // Otherwise, result_id:class:doctype:pop:psource
            (search->jump_key) = (char *) palloc(MAX_JUMP_KEY_LEN * sizeof(char));

            int index_point;
            search->jump_key = text_to_cstring(PG_GETARG_TEXT_PP(2));
            int size_left = MAX_JUMP_KEY_LEN;
            for (index_point = START_VARIABLE_INDEX; index_point < END_VARIABLE_INDEX; index_point++) {
                if (PG_NARGS() > index_point) {
                    char *next_idx = text_to_cstring(PG_GETARG_TEXT_PP(index_point));
                    if (next_idx != NULL) {
                        size_left = size_left - (2 + strlen(next_idx));
                        strncat (search->jump_key, CF_LABEL_SEP, size_left);
                        strncat (search->jump_key, next_idx, size_left);
                    }
                }
            }
            
#ifdef CF_DUBUG
            ereport(NOTICE,
                    (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                     errmsg("Setting jump buffer -- [%s]", search->jump_key)));
#endif
            
            // Create a cursor, and set it to the base point looking for entries.
            search->cur = kcdbcursor(search->db);
            kccurjumpkey(search->cur, search->jump_key, MAX_JUMP_KEY_LEN);
        } else {
            search->db = NULL;
        }

        search->next_map = 0;
        search->msg = NULL;
        
        // Save the search struct for the subsequent calls.
        funcctx->user_fctx = search;

        MemoryContextSwitchTo(oldcontext);
    }

    /* stuff done on every call of the function */
    funcctx = SRF_PERCALL_SETUP();

    call_cntr = funcctx->call_cntr;
    search = (KC_ENTRY *) funcctx->user_fctx;
    
    // If no current msg, try to get the next one.
    done = 1;

#ifdef CF_DUBUG
    ereport(NOTICE,
            (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
             errmsg("beginning run")));
#endif

    if (search->msg) {

#ifdef CF_DUBUG  
        ereport(NOTICE,
                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                 errmsg("Incrementing next from map %d -- %zu", search->next_map, search->msg->n_map_entry)));
#endif

        // Case if we are using the external cursor running over kv map.
        // Ready the next 
        if (search->msg->kv_map_file) {
            
            if ((kv_kbuf = kccurgetkey(search->kv_cur, &kv_ksiz, 1)) == NULL) {
                done = 1;
                kccurdel(search->kv_cur);
                kcdbendtran (search->kv_db, 1);            
                if (!kcdbclose(search->kv_db)) {
                    ereport(ERROR,
                            (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION),
                             errmsg("Error Closeing db: \"%s\"", kcecodename(kcdbecode(search->kv_db)))));
                }

                // Also need to free this.
                cloudflare__zone_time_bucket__free_unpacked(search->msg, NULL);
                search->msg = NULL;

            } else {
                done = 0;
            }


        } else {
            if (search->next_map >= search->msg->n_map_entry) {
                // Done with this msg -- move on to the next one.
                cloudflare__zone_time_bucket__free_unpacked(search->msg, NULL);
                search->msg = NULL;
            } else {
                done = 0;
            }
        }
    }

    if (search->db && !search->msg) {
      
#ifdef CF_DUBUG  
        ereport(NOTICE,
                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                 errmsg("Getting new buf -- %s", search->jump_key)));
#endif        

        if ((kbuf = kccurget(search->cur, &ksiz, &cvbuf, &vsiz, 1)) != NULL) {
            // Pull up the PB and expand it.
            search->msg = cloudflare__zone_time_bucket__unpack(NULL, vsiz, (const uint8_t *)cvbuf);
            if (search->msg == NULL) {   // Something failed
                ereport(ERROR,
                        (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION),
                         errmsg("error unpacking incoming message")));
                done = 1;
            } else {
                // Does the buffer match the searched for string?
                // @TODO -- bound this?
                if (strstr(search->msg->db_key, search->jump_key)) {
                    done = 0;
                    search->next_map = 0;

                    // And load the kvkc if needed.
                    if (search->msg->kv_map_file) {
                        
#ifdef CF_DUBUG  
                        ereport(NOTICE,
                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                                 errmsg("Switching to kvs %s", search->msg->kv_map_file)));
#endif

                        search->kv_db = kcdbnew();
                        
                        if (!kcdbopen(search->kv_db, search->msg->kv_map_file, KCOWRITER)) {
#ifdef CF_NO_DB_IS_ERR
                            ereport(ERROR,
                                    (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION),
                                     errmsg("Error opening db: \"%s\", \"%s\". Make sure that the map_name is valid.", 
                                            search->msg->kv_map_file, kcecodename(kcdbecode(search->kv_db)))));
#endif
#ifdef CF_DUBUG
                            ereport(NOTICE,
                                    (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                                     errmsg("Error opening db: \"%s\", \"%s\". Make sure that the map_name is valid.", 
                                            search->msg->kv_map_file, kcecodename(kcdbecode(search->kv_db)))));
#endif
                            done = 1;
                        } else {
                            kcdbbegintran (search->kv_db, 0);
                            search->kv_cur = kcdbcursor(search->kv_db);
                            kccurjump(search->kv_cur);   

                            if ((kv_kbuf = kccurgetkey(search->kv_cur, &kv_ksiz, 1)) == NULL) {
                                done = 1;
                                kccurdel(search->kv_cur);
                                kcdbendtran (search->kv_db, 1);
                                if (!kcdbclose(search->kv_db)) {
                                    ereport(ERROR,
                                            (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION),
                                             errmsg("Error Closeing db: \"%s\"", kcecodename(kcdbecode(search->kv_db)))));
                                }
                            } else {
                                done = 0;
                            }
                        }
                    }
                } else {
                    done = 1;
                }
            }
            kcfree(kbuf);
        } else {
#ifdef CF_DUBUG
            ereport(NOTICE,
                    (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                     errmsg("no msg to find")));
#endif
            done = 1;
        }
    }

#ifdef CF_DUBUG
    ereport(NOTICE,
            (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
             errmsg("Done? %d -- next buf -- %d", done, search->next_map)));   
#endif

    // Take the next itteration over the cursor. If the next is NULL or else not matching the resultid passed in
    // End. Otherwise, parse the value, populating the next row of the returning tuple.
    if (!done) {
        KC_ROW                          *out;
        Datum                           result;

        size_t size = sizeof(KC_ROW);
        out = (KC_ROW *)palloc(size);
        memset(out, '0', size);
        SET_VARSIZE(out, size);

        out->classification = (char *)palloc(MAX_KC_ROW_ENTRY * sizeof(char));
        out->doctype = (char *)palloc(MAX_KC_ROW_ENTRY * sizeof(char));
        out->pop = (char *)palloc(MAX_KC_ROW_ENTRY * sizeof(char));
        out->psource = (char *)palloc(MAX_KC_ROW_ENTRY * sizeof(char));
        out->key = (char *)palloc(MAX_KC_ROW_ENTRY * sizeof(char));

        strncpy(out->classification, search->msg->classification, MAX_KC_ROW_ENTRY);
        strncpy(out->doctype, search->msg->doctype, MAX_KC_ROW_ENTRY);
        strncpy(out->pop, search->msg->pop, MAX_KC_ROW_ENTRY);
        strncpy(out->psource, search->msg->psource, MAX_KC_ROW_ENTRY);

        if (search->msg->kv_map_file) {

#ifdef CF_DUBUG
            ereport(NOTICE,
                    (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                     errmsg("getting val from -- [%s]", search->msg->kv_map_file)));
#endif

            snprintf(out->key, MAX_KC_ROW_ENTRY, "%s", kv_kbuf);
            out->value = kcdbincrint (search->kv_db, kv_kbuf, kv_ksiz, 0);

            if (out->value == INT64_MIN) {
                ereport(NOTICE,
                        (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION),
                         errmsg("ERROR Getting val from key -- [%s], %s", kv_kbuf, kcecodename(kcdbecode(search->kv_db)))));
            }

            kcfree(kv_kbuf);
        } else {

#ifdef CF_DUBUG
            ereport(NOTICE,
                    (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                     errmsg("Loading %s %ld", search->msg->map_entry[search->next_map]->key, 
                            search->msg->map_entry[search->next_map]->value)));
#endif

            snprintf(out->key, MAX_KC_ROW_ENTRY, "%s", search->msg->map_entry[search->next_map]->key);        
            out->value = search->msg->map_entry[search->next_map]->value;
        }

        result = PointerGetDatum(out);

        /* clean up (this is not really necessary) */
        pfree(out->classification);
        pfree(out->doctype);
        pfree(out->pop);
        pfree(out->psource);
        pfree(out->key);
        pfree(out);

        // Remember that we are going to the next step.
        search->next_map++;

        SRF_RETURN_NEXT(funcctx, result);
    } else {    /* do when there is no more left */
        if (search->db) {
            kccurdel(search->cur);
            if (!kcdbclose(search->db)) {
                ereport(ERROR,
                        (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION),
                         errmsg("Error Closeing db: \"%s\"", kcecodename(kcdbecode(search->db)))));
            }
            
            if (search->msg != NULL) {
                cloudflare__zone_time_bucket__free_unpacked(search->msg, NULL);
            }
            
            pfree(search->jump_key);
        }
        pfree(search);

#ifdef CF_DUBUG
        ereport(NOTICE,
                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                 errmsg("Done with run")));
#endif

        // Don't delete db, this leads to segfaults.
        SRF_RETURN_DONE(funcctx);
    }
}
コード例 #14
0
ファイル: pgp-pgsql.c プロジェクト: adunstan/pg-cvs-mirror
static bytea *
decrypt_internal(int is_pubenc, int need_text, text *data,
				 text *key, text *keypsw, text *args)
{
	int			err;
	MBuf	   *src = NULL,
			   *dst = NULL;
	uint8		tmp[VARHDRSZ];
	uint8	   *restmp;
	bytea	   *res;
	int			res_len;
	PGP_Context *ctx = NULL;
	struct debug_expect ex;
	int			got_unicode = 0;


	init_work(&ctx, need_text, args, &ex);

	src = mbuf_create_from_data((uint8 *) VARDATA(data),
								VARSIZE(data) - VARHDRSZ);
	dst = mbuf_create(VARSIZE(data) + 2048);

	/*
	 * reserve room for header
	 */
	mbuf_append(dst, tmp, VARHDRSZ);

	/*
	 * set key
	 */
	if (is_pubenc)
	{
		uint8	   *psw = NULL;
		int			psw_len = 0;
		MBuf	   *kbuf;

		if (keypsw)
		{
			psw = (uint8 *) VARDATA(keypsw);
			psw_len = VARSIZE(keypsw) - VARHDRSZ;
		}
		kbuf = create_mbuf_from_vardata(key);
		err = pgp_set_pubkey(ctx, kbuf, psw, psw_len, 1);
		mbuf_free(kbuf);
	}
	else
		err = pgp_set_symkey(ctx, (uint8 *) VARDATA(key),
							 VARSIZE(key) - VARHDRSZ);

	/*
	 * decrypt
	 */
	if (err >= 0)
		err = pgp_decrypt(ctx, src, dst);

	/*
	 * failed?
	 */
	if (err < 0)
		goto out;

	if (ex.expect)
		check_expect(ctx, &ex);

	/* remember the setting */
	got_unicode = pgp_get_unicode_mode(ctx);

out:
	if (src)
		mbuf_free(src);
	if (ctx)
		pgp_free(ctx);

	if (err)
	{
		px_set_debug_handler(NULL);
		if (dst)
			mbuf_free(dst);
		ereport(ERROR,
				(errcode(ERRCODE_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION),
				 errmsg("%s", px_strerror(err))));
	}

	res_len = mbuf_steal_data(dst, &restmp);
	mbuf_free(dst);

	/* res_len includes VARHDRSZ */
	res = (bytea *) restmp;
	SET_VARSIZE(res, res_len);

	if (need_text && got_unicode)
	{
		text	   *utf = convert_from_utf8(res);

		if (utf != res)
		{
			clear_and_pfree(res);
			res = utf;
		}
	}
	px_set_debug_handler(NULL);

	/*
	 * add successfull decryptions also into RNG
	 */
	add_entropy(res, key, keypsw);

	return res;
}
コード例 #15
0
ファイル: pgp-pgsql.c プロジェクト: adunstan/pg-cvs-mirror
static bytea *
encrypt_internal(int is_pubenc, int is_text,
				 text *data, text *key, text *args)
{
	MBuf	   *src,
			   *dst;
	uint8		tmp[VARHDRSZ];
	uint8	   *restmp;
	bytea	   *res;
	int			res_len;
	PGP_Context *ctx;
	int			err;
	struct debug_expect ex;
	text	   *tmp_data = NULL;

	/*
	 * Add data and key info RNG.
	 */
	add_entropy(data, key, NULL);

	init_work(&ctx, is_text, args, &ex);

	if (is_text && pgp_get_unicode_mode(ctx))
	{
		tmp_data = convert_to_utf8(data);
		if (tmp_data == data)
			tmp_data = NULL;
		else
			data = tmp_data;
	}

	src = create_mbuf_from_vardata(data);
	dst = mbuf_create(VARSIZE(data) + 128);

	/*
	 * reserve room for header
	 */
	mbuf_append(dst, tmp, VARHDRSZ);

	/*
	 * set key
	 */
	if (is_pubenc)
	{
		MBuf	   *kbuf = create_mbuf_from_vardata(key);

		err = pgp_set_pubkey(ctx, kbuf,
							 NULL, 0, 0);
		mbuf_free(kbuf);
	}
	else
		err = pgp_set_symkey(ctx, (uint8 *) VARDATA(key),
							 VARSIZE(key) - VARHDRSZ);

	/*
	 * encrypt
	 */
	if (err >= 0)
		err = pgp_encrypt(ctx, src, dst);

	/*
	 * check for error
	 */
	if (err)
	{
		if (ex.debug)
			px_set_debug_handler(NULL);
		if (tmp_data)
			clear_and_pfree(tmp_data);
		pgp_free(ctx);
		mbuf_free(src);
		mbuf_free(dst);
		ereport(ERROR,
				(errcode(ERRCODE_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION),
				 errmsg("%s", px_strerror(err))));
	}

	/* res_len includes VARHDRSZ */
	res_len = mbuf_steal_data(dst, &restmp);
	res = (bytea *) restmp;
	SET_VARSIZE(res, res_len);

	if (tmp_data)
		clear_and_pfree(tmp_data);
	pgp_free(ctx);
	mbuf_free(src);
	mbuf_free(dst);

	px_set_debug_handler(NULL);

	return res;
}
コード例 #16
0
ファイル: trgm_op.c プロジェクト: dchichkov/postgres
TRGM *
generate_trgm(char *str, int slen)
{
	TRGM	   *trg;
	char	   *buf;
	trgm	   *tptr;
	int			len,
				charlen,
				bytelen;
	char	   *bword,
			   *eword;

	trg = (TRGM *) palloc(TRGMHDRSIZE + sizeof(trgm) * (slen / 2 + 1) *3);
	trg->flag = ARRKEY;
	SET_VARSIZE(trg, TRGMHDRSIZE);

	if (slen + LPADDING + RPADDING < 3 || slen == 0)
		return trg;

	tptr = GETARR(trg);

	buf = palloc(sizeof(char) * (slen + 4));

	if (LPADDING > 0)
	{
		*buf = ' ';
		if (LPADDING > 1)
			*(buf + 1) = ' ';
	}

	eword = str;
	while ((bword = find_word(eword, slen - (eword - str), &eword, &charlen)) != NULL)
	{
#ifdef IGNORECASE
		bword = lowerstr_with_len(bword, eword - bword);
		bytelen = strlen(bword);
#else
		bytelen = eword - bword;
#endif

		memcpy(buf + LPADDING, bword, bytelen);

#ifdef IGNORECASE
		pfree(bword);
#endif
		buf[LPADDING + bytelen] = ' ';
		buf[LPADDING + bytelen + 1] = ' ';

		/*
		 * count trigrams
		 */
		tptr = make_trigrams(tptr, buf, bytelen + LPADDING + RPADDING,
							 charlen + LPADDING + RPADDING);
	}

	pfree(buf);

	if ((len = tptr - GETARR(trg)) == 0)
		return trg;

	if (len > 0)
	{
		qsort((void *) GETARR(trg), len, sizeof(trgm), comp_trgm);
		len = unique_array(GETARR(trg), len);
	}

	SET_VARSIZE(trg, CALCGTSIZE(ARRKEY, len));

	return trg;
}
コード例 #17
0
ファイル: trgm_op.c プロジェクト: dchichkov/postgres
/*
 * Generates trigrams for wildcard search string.
 *
 * Returns array of trigrams that must occur in any string that matches the
 * wildcard string.  For example, given pattern "a%bcd%" the trigrams
 * " a", "bcd" would be extracted.
 */
TRGM *
generate_wildcard_trgm(const char *str, int slen)
{
	TRGM	   *trg;
	char	   *buf,
			   *buf2;
	trgm	   *tptr;
	int			len,
				charlen,
				bytelen;
	const char *eword;

	trg = (TRGM *) palloc(TRGMHDRSIZE + sizeof(trgm) * (slen / 2 + 1) *3);
	trg->flag = ARRKEY;
	SET_VARSIZE(trg, TRGMHDRSIZE);

	if (slen + LPADDING + RPADDING < 3 || slen == 0)
		return trg;

	tptr = GETARR(trg);

	buf = palloc(sizeof(char) * (slen + 4));

	/*
	 * Extract trigrams from each substring extracted by get_wildcard_part.
	 */
	eword = str;
	while ((eword = get_wildcard_part(eword, slen - (eword - str),
									  buf, &bytelen, &charlen)) != NULL)
	{
#ifdef IGNORECASE
		buf2 = lowerstr_with_len(buf, bytelen);
		bytelen = strlen(buf2);
#else
		buf2 = buf;
#endif

		/*
		 * count trigrams
		 */
		tptr = make_trigrams(tptr, buf2, bytelen, charlen);
#ifdef IGNORECASE
		pfree(buf2);
#endif
	}

	pfree(buf);

	if ((len = tptr - GETARR(trg)) == 0)
		return trg;

	/*
	 * Make trigrams unique.
	 */
	if (len > 0)
	{
		qsort((void *) GETARR(trg), len, sizeof(trgm), comp_trgm);
		len = unique_array(GETARR(trg), len);
	}

	SET_VARSIZE(trg, CALCGTSIZE(ARRKEY, len));

	return trg;
}
コード例 #18
0
ファイル: xmlnode.c プロジェクト: andreypopp/pg_xnode
Datum
xmlnode_to_xmldoc(PG_FUNCTION_ARGS)
{
	XMLCompNodeHdr rootNode,
				rootDoc;
	unsigned int sizeNew,
				dataSizeNew;
	xmlnode		node = (xmlnode) PG_GETARG_VARLENA_P(0);
	xmldoc		document = NULL;
	char	   *docData;
	unsigned int sizeOrig = VARSIZE(node);
	unsigned int dataSizeOrig = sizeOrig - VARHDRSZ;
	char	   *nodeData = (char *) VARDATA(node);

	/*
	 * The new root will start where last value of the array (i.e. offset of
	 * the current root) was so far
	 */
	XMLNodeOffset rootOffsetNew = dataSizeOrig - sizeof(XMLNodeOffset);

	/* Find that 'old last (root offset) value' ... */
	XMLNodeOffset *rootOffPtrOrig = (XMLNodeOffset *) (nodeData + rootOffsetNew);

	/* ... and read it */
	XMLNodeOffset rootOffsetOrig = *rootOffPtrOrig;

	/*
	 * Compute 'relative reference' of the 'old root' that the document ('new
	 * root') will remember
	 */
	XMLNodeOffset dist = rootOffsetNew - rootOffsetOrig;
	XMLNodeOffset *rootOffPtrNew;

	char		bwidth = getXMLNodeOffsetByteWidth(dist);

	rootNode = (XMLCompNodeHdr) (nodeData + rootOffsetOrig);
	if (rootNode->common.kind == XMLNODE_ELEMENT)
	{
		/*
		 * If document should contain only one node, it must be element. See
		 * http://www.w3.org/TR/2008/REC-xml-20081126/#NT-document
		 */
		char	   *refTargPtr;

		sizeNew = sizeOrig + sizeof(XMLCompNodeHdrData) + bwidth;
		dataSizeNew = sizeNew - VARHDRSZ;
		document = (xmldoc) palloc(sizeNew);
		docData = (char *) VARDATA(document);
		memcpy(docData, nodeData, rootOffsetNew);
		rootDoc = (XMLCompNodeHdr) (docData + rootOffsetNew);
		rootDoc->common.kind = XMLNODE_DOC;
		rootDoc->common.flags = 0;
		XNODE_SET_REF_BWIDTH(rootDoc, bwidth);
		rootDoc->children = 1;
		refTargPtr = (char *) rootDoc + sizeof(XMLCompNodeHdrData);
		writeXMLNodeOffset(dist, &refTargPtr, bwidth, false);
		rootOffPtrNew = (XMLNodeOffset *) (docData + dataSizeNew - sizeof(XMLNodeOffset));
		*rootOffPtrNew = rootOffsetNew;
		SET_VARSIZE(document, sizeNew);
	}
	else if (rootNode->common.kind == XMLNODE_DOC_FRAGMENT)
	{
		checkXMLWellFormedness(rootNode);
		document = (xmldoc) palloc(sizeOrig);
		docData = (char *) VARDATA(document);
		memcpy(document, node, sizeOrig);
		rootDoc = (XMLCompNodeHdr) (docData + rootOffsetOrig);
		rootDoc->common.kind = XMLNODE_DOC;
		SET_VARSIZE(document, sizeOrig);
	}
	else
	{
		elog(ERROR, "%s can't be cast to XML document", getXMLNodeKindStr(rootNode->common.kind));
	}
	PG_RETURN_POINTER(document);
}
コード例 #19
0
ファイル: ltree_op.c プロジェクト: hqinnus/postgres-9.0.1-fyp
ltree *
lca_inner(ltree **a, int len)
{
    int			tmp,
                num = ((*a)->numlevel) ? (*a)->numlevel - 1 : 0;
    ltree	  **ptr = a + 1;
    int			i,
                reslen = LTREE_HDRSIZE;
    ltree_level *l1,
                *l2;
    ltree	   *res;


    if ((*a)->numlevel == 0)
        return NULL;

    while (ptr - a < len)
    {
        if ((*ptr)->numlevel == 0)
            return NULL;
        else if ((*ptr)->numlevel == 1)
            num = 0;
        else
        {
            l1 = LTREE_FIRST(*a);
            l2 = LTREE_FIRST(*ptr);
            tmp = num;
            num = 0;
            for (i = 0; i < Min(tmp, (*ptr)->numlevel - 1); i++)
            {
                if (l1->len == l2->len && strncmp(l1->name, l2->name, l1->len) == 0)
                    num = i + 1;
                else
                    break;
                l1 = LEVEL_NEXT(l1);
                l2 = LEVEL_NEXT(l2);
            }
        }
        ptr++;
    }

    l1 = LTREE_FIRST(*a);
    for (i = 0; i < num; i++)
    {
        reslen += MAXALIGN(l1->len + LEVEL_HDRSIZE);
        l1 = LEVEL_NEXT(l1);
    }

    res = (ltree *) palloc(reslen);
    SET_VARSIZE(res, reslen);
    res->numlevel = num;

    l1 = LTREE_FIRST(*a);
    l2 = LTREE_FIRST(res);

    for (i = 0; i < num; i++)
    {
        memcpy(l2, l1, MAXALIGN(l1->len + LEVEL_HDRSIZE));
        l1 = LEVEL_NEXT(l1);
        l2 = LEVEL_NEXT(l2);
    }

    return res;
}
コード例 #20
0
ファイル: xmlnode.c プロジェクト: andreypopp/pg_xnode
Datum
xmlelement(PG_FUNCTION_ARGS)
{
	Datum		nameText;
	ArrayType  *attrs = NULL;
	char	   *elName;
	unsigned int nameLen,
				resSizeMax;
	unsigned int childSize = 0;
	char	   *c,
			   *result,
			   *resData,
			   *resCursor,
			   *nameDst;
	XMLCompNodeHdr element;
	XMLNodeOffset *rootOffPtr;
	bool		nameFirstChar = true;
	char	  **attrNames = NULL;
	char	  **attrValues = NULL;
	char	   *attrValFlags = NULL;
	XMLNodeHdr *attrNodes = NULL;
	XMLNodeHdr	child = NULL;
	char	  **newNds = NULL;
	char	   *newNd = NULL;
	unsigned int attrCount = 0;
	unsigned int attrsSizeTotal = 0;
	unsigned short childCount = 0;

	if (PG_ARGISNULL(0))
	{
		elog(ERROR, "invalid element name");
	}
	nameText = PG_GETARG_DATUM(0);
	elName = TextDatumGetCString(nameText);

	nameLen = strlen(elName);
	if (nameLen == 0)
	{
		elog(ERROR, "invalid element name");
	}

	if (!PG_ARGISNULL(1))
	{
		int		   *dims;
		Oid			elType,
					arrType;
		int16		arrLen,
					elLen;
		bool		elByVal,
					elIsNull;
		char		elAlign;
		unsigned int i;

		attrs = PG_GETARG_ARRAYTYPE_P(1);
		if (ARR_NDIM(attrs) != 2)
		{
			elog(ERROR, "attributes must be passed in 2 dimensional array");
		}
		dims = ARR_DIMS(attrs);
		if (dims[1] != 2)
		{
			elog(ERROR, "the second dimension of attribute array must be 2");
		}

		attrCount = dims[0];
		Assert(attrCount > 0);

		elType = attrs->elemtype;
		arrType = get_array_type(elType);
		arrLen = get_typlen(arrType);
		Assert(arrType != InvalidOid);
		get_typlenbyvalalign(elType, &elLen, &elByVal, &elAlign);
		attrNames = (char **) palloc(attrCount * sizeof(char *));
		attrValues = (char **) palloc(attrCount * sizeof(char *));
		attrValFlags = (bool *) palloc(attrCount * sizeof(char));

		for (i = 1; i <= attrCount; i++)
		{
			int			subscrName[] = {i, 1};
			int			subscrValue[] = {i, 2};
			Datum		elDatum;
			char	   *nameStr,
					   *valueStr;
			bool		valueHasRefs = false;

			elDatum = array_ref(attrs, 2, subscrName, arrLen, elLen, elByVal, elAlign, &elIsNull);
			if (elIsNull)
			{
				elog(ERROR, "attribute name must not be null");
			}
			nameStr = text_to_cstring(DatumGetTextP(elDatum));
			if (strlen(nameStr) == 0)
			{
				elog(ERROR, "attribute name must be a string of non-zero length");
			}
			else
			{					/* Check validity of characters. */
				char	   *c = nameStr;
				int			cWidth = pg_utf_mblen((unsigned char *) c);

				if (!XNODE_VALID_NAME_START(c))
				{
					elog(ERROR, "attribute name starts with invalid character");
				}
				do
				{
					c += cWidth;
					cWidth = pg_utf_mblen((unsigned char *) c);
				} while (XNODE_VALID_NAME_CHAR(c));
				if (*c != '\0')
				{
					elog(ERROR, "invalid character in attribute name");
				}
			}

			/* Check uniqueness of the attribute name. */
			if (i > 1)
			{
				unsigned short j;

				for (j = 0; j < (i - 1); j++)
				{
					if (strcmp(nameStr, attrNames[j]) == 0)
					{
						elog(ERROR, "attribute name '%s' is not unique", nameStr);
					}
				}
			}

			elDatum = array_ref(attrs, 2, subscrValue, arrLen, elLen, elByVal, elAlign, &elIsNull);
			if (elIsNull)
			{
				elog(ERROR, "attribute value must not be null");
			}
			valueStr = text_to_cstring(DatumGetTextP(elDatum));

			attrValFlags[i - 1] = 0;

			if (strlen(valueStr) > 0)
			{
				XMLNodeParserStateData state;
				char	   *valueStrOrig = valueStr;

				/* Parse the value and check validity. */
				initXMLParserState(&state, valueStr, true);
				valueStr = readXMLAttValue(&state, true, &valueHasRefs);

				/*
				 * If the value contains quotation mark, then apostrophe is
				 * the delimiter.
				 */
				if (strchr(valueStr, XNODE_CHAR_QUOTMARK) != NULL)
				{
					attrValFlags[i - 1] |= XNODE_ATTR_APOSTROPHE;
				}
				finalizeXMLParserState(&state);
				pfree(valueStrOrig);
			}

			attrNames[i - 1] = nameStr;
			attrValues[i - 1] = valueStr;
			if (valueHasRefs)
			{
				attrValFlags[i - 1] |= XNODE_ATTR_CONTAINS_REF;
			}
			attrsSizeTotal += sizeof(XMLNodeHdrData) + strlen(nameStr) + strlen(valueStr) + 2;
		}
	}

	if (!PG_ARGISNULL(2))
	{
		Datum		childNodeDatum = PG_GETARG_DATUM(2);
		xmlnode		childRaw = (xmlnode) PG_DETOAST_DATUM(childNodeDatum);

		child = XNODE_ROOT(childRaw);
		if (child->kind == XMLNODE_DOC_FRAGMENT)
		{
			childSize = getXMLNodeSize(child, true) - getXMLNodeSize(child, false);
		}
		else
		{
			childSize = getXMLNodeSize(child, true);
		}
	}

	/* Make sure the element name is valid. */
	c = elName;
	while (*c != '\0')
	{
		if ((nameFirstChar && !XNODE_VALID_NAME_START(c)) || (!nameFirstChar && !XNODE_VALID_NAME_CHAR(c)))
		{
			elog(ERROR, "unrecognized character '%c' in element name", *c);
		}
		if (nameFirstChar)
		{
			nameFirstChar = false;
		}
		c += pg_utf_mblen((unsigned char *) c);
	};

	if (child != NULL)
	{
		if (child->kind == XMLNODE_DOC_FRAGMENT)
		{
			childCount = ((XMLCompNodeHdr) child)->children;
		}
		else
		{
			childCount = 1;
		}
	}

	/*
	 * It's hard to determine the byte width of references until the copying
	 * has finished. Therefore we assume the worst case: 4 bytes per
	 * reference.
	 */
	resSizeMax = VARHDRSZ + attrsSizeTotal + childSize + (attrCount + childCount) * 4 +
		sizeof(XMLCompNodeHdrData) + nameLen + 1 + sizeof(XMLNodeOffset);
	result = (char *) palloc(resSizeMax);
	resCursor = resData = VARDATA(result);

	if (attrCount > 0)
	{							/* Copy attributes. */
		unsigned short i;

		Assert(attrNames != NULL && attrValues != NULL && attrValFlags != NULL);

		attrNodes = (XMLNodeHdr *) palloc(attrCount * sizeof(XMLNodeHdr));
		for (i = 0; i < attrCount; i++)
		{
			XMLNodeHdr	attrNode = (XMLNodeHdr) resCursor;
			char	   *name = attrNames[i];
			unsigned int nameLen = strlen(name);
			char	   *value = attrValues[i];
			unsigned int valueLen = strlen(value);

			attrNodes[i] = attrNode;
			attrNode->kind = XMLNODE_ATTRIBUTE;
			attrNode->flags = attrValFlags[i];

			if (xmlAttrValueIsNumber(value))
			{
				attrNode->flags |= XNODE_ATTR_NUMBER;
			}

			resCursor = XNODE_CONTENT(attrNode);
			memcpy(resCursor, name, nameLen);
			resCursor += nameLen;
			*(resCursor++) = '\0';
			pfree(name);

			memcpy(resCursor, value, valueLen);
			resCursor += valueLen;
			*(resCursor++) = '\0';
			pfree(value);
		}
		pfree(attrNames);
		pfree(attrValues);
		pfree(attrValFlags);
	}

	if (child != NULL)
	{
		XMLNodeKind k = child->kind;

		/*
		 * Check if the node to be inserted is of a valid kind. If the node is
		 * document fragment, its assumed that invalid node kinds are never
		 * added. Otherwise we'd have to check the node fragment (recursively)
		 * not only here.
		 */
		if (k != XMLNODE_DOC_FRAGMENT)
		{
			if (k == XMLNODE_DOC || k == XMLNODE_DTD || k == XMLNODE_ATTRIBUTE)
			{
				elog(ERROR, "the nested node must not be %s", getXMLNodeKindStr(k));
			}
		}
		copyXMLNodeOrDocFragment(child, childSize, &resCursor, &newNd, &newNds);
	}

	element = (XMLCompNodeHdr) resCursor;
	element->common.kind = XMLNODE_ELEMENT;
	element->common.flags = (child == NULL) ? XNODE_EMPTY : 0;
	element->children = attrCount + childCount;

	if (childCount > 0 || attrCount > 0)
	{
		XMLNodeOffset childOff,
					childOffMax;
		char		bwidth;
		char	   *refPtr;

		/* Save relative offset(s) of the child node(s). */

		if (attrCount > 0)
		{
			childOffMax = (char *) element - resData;
		}
		else if (childCount > 0)
		{
			if (child->kind == XMLNODE_DOC_FRAGMENT)
			{
				Assert(newNds != NULL);
				childOffMax = (char *) element - newNds[0];
			}
			else
			{
				childOffMax = (char *) element - newNd;
			}
		}
		else
		{
			childOffMax = 0;
		}
		bwidth = getXMLNodeOffsetByteWidth(childOffMax);
		XNODE_SET_REF_BWIDTH(element, bwidth);

		refPtr = XNODE_FIRST_REF(element);

		if (attrCount > 0)
		{
			unsigned short i;

			/* The attribute references first... */
			for (i = 0; i < attrCount; i++)
			{
				XMLNodeHdr	node = attrNodes[i];

				childOff = (char *) element - (char *) node;
				writeXMLNodeOffset(childOff, &refPtr, bwidth, true);
			}
			pfree(attrNodes);
		}


		if (childCount > 0)
		{
			/* ...followed by those of the other children. */
			if (child->kind == XMLNODE_DOC_FRAGMENT)
			{
				unsigned short i;

				for (i = 0; i < childCount; i++)
				{
					childOff = (char *) element - newNds[i];
					writeXMLNodeOffset(childOff, &refPtr, bwidth, true);
				}
				pfree(newNds);
			}
			else
			{
				childOff = (char *) element - newNd;
				writeXMLNodeOffset(childOff, &refPtr, bwidth, true);
			}
		}
	}

	/* And finally set the element name. */
	nameDst = XNODE_ELEMENT_NAME(element);
	memcpy(nameDst, elName, nameLen);
	nameDst[nameLen] = '\0';
	resCursor = nameDst + strlen(elName) + 1;

	SET_VARSIZE(result, (char *) resCursor - result + sizeof(XMLNodeOffset));
	rootOffPtr = XNODE_ROOT_OFFSET_PTR(result);
	*rootOffPtr = (char *) element - resData;
	PG_RETURN_POINTER(result);
}
コード例 #21
0
ファイル: mvdistinct.c プロジェクト: adityavs/postgres
/*
 * statext_ndistinct_serialize
 *		serialize ndistinct to the on-disk bytea format
 */
bytea *
statext_ndistinct_serialize(MVNDistinct *ndistinct)
{
	int			i;
	bytea	   *output;
	char	   *tmp;
	Size		len;

	Assert(ndistinct->magic == STATS_NDISTINCT_MAGIC);
	Assert(ndistinct->type == STATS_NDISTINCT_TYPE_BASIC);

	/*
	 * Base size is size of scalar fields in the struct, plus one base struct
	 * for each item, including number of items for each.
	 */
	len = VARHDRSZ + SizeOfMVNDistinct +
		ndistinct->nitems * (offsetof(MVNDistinctItem, attrs) + sizeof(int));

	/* and also include space for the actual attribute numbers */
	for (i = 0; i < ndistinct->nitems; i++)
	{
		int			nmembers;

		nmembers = bms_num_members(ndistinct->items[i].attrs);
		Assert(nmembers >= 2);
		len += sizeof(AttrNumber) * nmembers;
	}

	output = (bytea *) palloc(len);
	SET_VARSIZE(output, len);

	tmp = VARDATA(output);

	/* Store the base struct values (magic, type, nitems) */
	memcpy(tmp, &ndistinct->magic, sizeof(uint32));
	tmp += sizeof(uint32);
	memcpy(tmp, &ndistinct->type, sizeof(uint32));
	tmp += sizeof(uint32);
	memcpy(tmp, &ndistinct->nitems, sizeof(uint32));
	tmp += sizeof(uint32);

	/*
	 * store number of attributes and attribute numbers for each ndistinct
	 * entry
	 */
	for (i = 0; i < ndistinct->nitems; i++)
	{
		MVNDistinctItem item = ndistinct->items[i];
		int			nmembers = bms_num_members(item.attrs);
		int			x;

		memcpy(tmp, &item.ndistinct, sizeof(double));
		tmp += sizeof(double);
		memcpy(tmp, &nmembers, sizeof(int));
		tmp += sizeof(int);

		x = -1;
		while ((x = bms_next_member(item.attrs, x)) >= 0)
		{
			AttrNumber	value = (AttrNumber) x;

			memcpy(tmp, &value, sizeof(AttrNumber));
			tmp += sizeof(AttrNumber);
		}

		Assert(tmp <= ((char *) output + len));
	}

	return output;
}
コード例 #22
0
ファイル: _int_bool.c プロジェクト: markwkm/postgres
/*
 * input
 */
Datum
bqarr_in(PG_FUNCTION_ARGS)
{
	char	   *buf = (char *) PG_GETARG_POINTER(0);
	WORKSTATE	state;
	int4		i;
	QUERYTYPE  *query;
	int4		commonlen;
	ITEM	   *ptr;
	NODE	   *tmp;
	int4		pos = 0;

#ifdef BS_DEBUG
	StringInfoData pbuf;
#endif

	state.buf = buf;
	state.state = WAITOPERAND;
	state.count = 0;
	state.num = 0;
	state.str = NULL;

	/* make polish notation (postfix, but in reverse order) */
	makepol(&state);
	if (!state.num)
		ereport(ERROR,
				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
				 errmsg("empty query")));

	commonlen = COMPUTESIZE(state.num);
	query = (QUERYTYPE *) palloc(commonlen);
	SET_VARSIZE(query, commonlen);
	query->size = state.num;
	ptr = GETQUERY(query);

	for (i = state.num - 1; i >= 0; i--)
	{
		ptr[i].type = state.str->type;
		ptr[i].val = state.str->val;
		tmp = state.str->next;
		pfree(state.str);
		state.str = tmp;
	}

	pos = query->size - 1;
	findoprnd(ptr, &pos);
#ifdef BS_DEBUG
	initStringInfo(&pbuf);
	for (i = 0; i < query->size; i++)
	{
		if (ptr[i].type == OPR)
			appendStringInfo(&pbuf, "%c(%d) ", ptr[i].val, ptr[i].left);
		else
			appendStringInfo(&pbuf, "%d ", ptr[i].val);
	}
	elog(DEBUG3, "POR: %s", pbuf.data);
	pfree(pbuf.data);
#endif

	PG_RETURN_POINTER(query);
}
コード例 #23
0
ファイル: tuptoaster.c プロジェクト: LittleForker/postgres
/* ----------
 * toast_save_datum -
 *
 *	Save one single datum into the secondary relation and return
 *	a Datum reference for it.
 * ----------
 */
static Datum
toast_save_datum(Relation rel, Datum value, int options)
{
	Relation	toastrel;
	Relation	toastidx;
	HeapTuple	toasttup;
	TupleDesc	toasttupDesc;
	Datum		t_values[3];
	bool		t_isnull[3];
	CommandId	mycid = GetCurrentCommandId(true);
	struct varlena *result;
	struct varatt_external toast_pointer;
	struct
	{
		struct varlena hdr;
		char		data[TOAST_MAX_CHUNK_SIZE]; /* make struct big enough */
		int32		align_it;	/* ensure struct is aligned well enough */
	}			chunk_data;
	int32		chunk_size;
	int32		chunk_seq = 0;
	char	   *data_p;
	int32		data_todo;
	Pointer		dval = DatumGetPointer(value);

	/*
	 * Open the toast relation and its index.  We can use the index to check
	 * uniqueness of the OID we assign to the toasted item, even though it has
	 * additional columns besides OID.
	 */
	toastrel = heap_open(rel->rd_rel->reltoastrelid, RowExclusiveLock);
	toasttupDesc = toastrel->rd_att;
	toastidx = index_open(toastrel->rd_rel->reltoastidxid, RowExclusiveLock);

	/*
	 * Get the data pointer and length, and compute va_rawsize and va_extsize.
	 *
	 * va_rawsize is the size of the equivalent fully uncompressed datum, so
	 * we have to adjust for short headers.
	 *
	 * va_extsize is the actual size of the data payload in the toast records.
	 */
	if (VARATT_IS_SHORT(dval))
	{
		data_p = VARDATA_SHORT(dval);
		data_todo = VARSIZE_SHORT(dval) - VARHDRSZ_SHORT;
		toast_pointer.va_rawsize = data_todo + VARHDRSZ;		/* as if not short */
		toast_pointer.va_extsize = data_todo;
	}
	else if (VARATT_IS_COMPRESSED(dval))
	{
		data_p = VARDATA(dval);
		data_todo = VARSIZE(dval) - VARHDRSZ;
		/* rawsize in a compressed datum is just the size of the payload */
		toast_pointer.va_rawsize = VARRAWSIZE_4B_C(dval) + VARHDRSZ;
		toast_pointer.va_extsize = data_todo;
		/* Assert that the numbers look like it's compressed */
		Assert(VARATT_EXTERNAL_IS_COMPRESSED(toast_pointer));
	}
	else
	{
		data_p = VARDATA(dval);
		data_todo = VARSIZE(dval) - VARHDRSZ;
		toast_pointer.va_rawsize = VARSIZE(dval);
		toast_pointer.va_extsize = data_todo;
	}

	/*
	 * Insert the correct table OID into the result TOAST pointer.
	 *
	 * Normally this is the actual OID of the target toast table, but during
	 * table-rewriting operations such as CLUSTER, we have to insert the OID
	 * of the table's real permanent toast table instead.  rd_toastoid is set
	 * if we have to substitute such an OID.
	 */
	if (OidIsValid(rel->rd_toastoid))
		toast_pointer.va_toastrelid = rel->rd_toastoid;
	else
		toast_pointer.va_toastrelid = RelationGetRelid(toastrel);

	/*
	 * Choose an unused OID within the toast table for this toast value.
	 */
	toast_pointer.va_valueid = GetNewOidWithIndex(toastrel,
												  RelationGetRelid(toastidx),
												  (AttrNumber) 1);

	/*
	 * Initialize constant parts of the tuple data
	 */
	t_values[0] = ObjectIdGetDatum(toast_pointer.va_valueid);
	t_values[2] = PointerGetDatum(&chunk_data);
	t_isnull[0] = false;
	t_isnull[1] = false;
	t_isnull[2] = false;

	/*
	 * Split up the item into chunks
	 */
	while (data_todo > 0)
	{
		/*
		 * Calculate the size of this chunk
		 */
		chunk_size = Min(TOAST_MAX_CHUNK_SIZE, data_todo);

		/*
		 * Build a tuple and store it
		 */
		t_values[1] = Int32GetDatum(chunk_seq++);
		SET_VARSIZE(&chunk_data, chunk_size + VARHDRSZ);
		memcpy(VARDATA(&chunk_data), data_p, chunk_size);
		toasttup = heap_form_tuple(toasttupDesc, t_values, t_isnull);

		heap_insert(toastrel, toasttup, mycid, options, NULL);

		/*
		 * Create the index entry.	We cheat a little here by not using
		 * FormIndexDatum: this relies on the knowledge that the index columns
		 * are the same as the initial columns of the table.
		 *
		 * Note also that there had better not be any user-created index on
		 * the TOAST table, since we don't bother to update anything else.
		 */
		index_insert(toastidx, t_values, t_isnull,
					 &(toasttup->t_self),
					 toastrel,
					 toastidx->rd_index->indisunique ?
					 UNIQUE_CHECK_YES : UNIQUE_CHECK_NO);

		/*
		 * Free memory
		 */
		heap_freetuple(toasttup);

		/*
		 * Move on to next chunk
		 */
		data_todo -= chunk_size;
		data_p += chunk_size;
	}

	/*
	 * Done - close toast relation
	 */
	index_close(toastidx, RowExclusiveLock);
	heap_close(toastrel, RowExclusiveLock);

	/*
	 * Create the TOAST pointer value that we'll return
	 */
	result = (struct varlena *) palloc(TOAST_POINTER_SIZE);
	SET_VARSIZE_EXTERNAL(result, TOAST_POINTER_SIZE);
	memcpy(VARDATA_EXTERNAL(result), &toast_pointer, sizeof(toast_pointer));

	return PointerGetDatum(result);
}
コード例 #24
0
ファイル: nseq_io.c プロジェクト: ergo70/nseq
static NSEQ *make_nseq(const char* sequence, const size_t seqlen, const bool isRNA)
{
    NSEQ *retval = NULL;
    char *buffer = NULL;
    char tmp;
    uint32 i, offset = 0;
    uint32 bufsize = seqlen / BLOCKSIZE;
    //COMPRESSED_DATA *compressed_data;
    int32 histogram[HISTSZ] = {0,0,0,0};
    bool run = true;

    if((seqlen % BLOCKSIZE) != 0)
    {
        bufsize += 1;
    }

    buffer = (char*) palloc0(bufsize*sizeof(char));

    while (run && (offset < bufsize))
    {
        for(i = 0; i<BLOCKSIZE; i++)
        {
            tmp = toupper(sequence[(offset*BLOCKSIZE)+i]);

            switch(tmp)
            {
            case 'A':
                buffer[offset] |= (0x0 << (i*2));
                histogram[0]++;
                break;
            case 'C':
                buffer[offset] |= (0x1 << (i*2));
                histogram[1]++;
                break;
            case 'G':
                buffer[offset] |= (0x2 << (i*2));
                histogram[2]++;
                break;
            case 'U':
                if(isRNA)
                {
                    buffer[offset] |= (0x3 << (i*2));
                    histogram[3]++;
                }
                else
                {
                    elog(ERROR, "Unknown nucleotide for DNA: %c\n", tmp);
                    run = false;
                }
                break;
            case 'T':
                if(isRNA)
                {
                    elog(ERROR, "Unknown nucleotide for RNA: %c\n", tmp);
                    run = false;
                }
                else
                {
                    buffer[offset] |= (0x3 << (i*2));
                    histogram[3]++;
                }
                break;
            case '\0':
                run = false;
                break;
            }
        }
        offset++;
    }

    //compressed_data = compress_data(buffer, bufsize);

    retval = palloc(CALCDATASZ(bufsize));

    retval->rna = isRNA;
    retval->size = seqlen;
    retval->compressed_size = bufsize;
    memcpy(retval->histogram, &histogram, sizeof(histogram));

    memcpy(DATAPTR(retval), buffer, bufsize);

    SET_VARSIZE (retval,CALCDATASZ(bufsize));

    //elog(INFO,"make %d %d", seqlen, bufsize);

    return retval;
}
コード例 #25
0
ファイル: tuptoaster.c プロジェクト: LittleForker/postgres
/* ----------
 * toast_fetch_datum_slice -
 *
 *	Reconstruct a segment of a Datum from the chunks saved
 *	in the toast relation
 * ----------
 */
static struct varlena *
toast_fetch_datum_slice(struct varlena * attr, int32 sliceoffset, int32 length)
{
	Relation	toastrel;
	Relation	toastidx;
	ScanKeyData toastkey[3];
	int			nscankeys;
	SysScanDesc toastscan;
	HeapTuple	ttup;
	TupleDesc	toasttupDesc;
	struct varlena *result;
	struct varatt_external toast_pointer;
	int32		attrsize;
	int32		residx;
	int32		nextidx;
	int			numchunks;
	int			startchunk;
	int			endchunk;
	int32		startoffset;
	int32		endoffset;
	int			totalchunks;
	Pointer		chunk;
	bool		isnull;
	char	   *chunkdata;
	int32		chunksize;
	int32		chcpystrt;
	int32		chcpyend;

	Assert(VARATT_IS_EXTERNAL(attr));

	/* Must copy to access aligned fields */
	VARATT_EXTERNAL_GET_POINTER(toast_pointer, attr);

	/*
	 * It's nonsense to fetch slices of a compressed datum -- this isn't lo_*
	 * we can't return a compressed datum which is meaningful to toast later
	 */
	Assert(!VARATT_EXTERNAL_IS_COMPRESSED(toast_pointer));

	attrsize = toast_pointer.va_extsize;
	totalchunks = ((attrsize - 1) / TOAST_MAX_CHUNK_SIZE) + 1;

	if (sliceoffset >= attrsize)
	{
		sliceoffset = 0;
		length = 0;
	}

	if (((sliceoffset + length) > attrsize) || length < 0)
		length = attrsize - sliceoffset;

	result = (struct varlena *) palloc(length + VARHDRSZ);

	if (VARATT_EXTERNAL_IS_COMPRESSED(toast_pointer))
		SET_VARSIZE_COMPRESSED(result, length + VARHDRSZ);
	else
		SET_VARSIZE(result, length + VARHDRSZ);

	if (length == 0)
		return result;			/* Can save a lot of work at this point! */

	startchunk = sliceoffset / TOAST_MAX_CHUNK_SIZE;
	endchunk = (sliceoffset + length - 1) / TOAST_MAX_CHUNK_SIZE;
	numchunks = (endchunk - startchunk) + 1;

	startoffset = sliceoffset % TOAST_MAX_CHUNK_SIZE;
	endoffset = (sliceoffset + length - 1) % TOAST_MAX_CHUNK_SIZE;

	/*
	 * Open the toast relation and its index
	 */
	toastrel = heap_open(toast_pointer.va_toastrelid, AccessShareLock);
	toasttupDesc = toastrel->rd_att;
	toastidx = index_open(toastrel->rd_rel->reltoastidxid, AccessShareLock);

	/*
	 * Setup a scan key to fetch from the index. This is either two keys or
	 * three depending on the number of chunks.
	 */
	ScanKeyInit(&toastkey[0],
				(AttrNumber) 1,
				BTEqualStrategyNumber, F_OIDEQ,
				ObjectIdGetDatum(toast_pointer.va_valueid));

	/*
	 * Use equality condition for one chunk, a range condition otherwise:
	 */
	if (numchunks == 1)
	{
		ScanKeyInit(&toastkey[1],
					(AttrNumber) 2,
					BTEqualStrategyNumber, F_INT4EQ,
					Int32GetDatum(startchunk));
		nscankeys = 2;
	}
	else
	{
		ScanKeyInit(&toastkey[1],
					(AttrNumber) 2,
					BTGreaterEqualStrategyNumber, F_INT4GE,
					Int32GetDatum(startchunk));
		ScanKeyInit(&toastkey[2],
					(AttrNumber) 2,
					BTLessEqualStrategyNumber, F_INT4LE,
					Int32GetDatum(endchunk));
		nscankeys = 3;
	}

	/*
	 * Read the chunks by index
	 *
	 * The index is on (valueid, chunkidx) so they will come in order
	 */
	nextidx = startchunk;
	toastscan = systable_beginscan_ordered(toastrel, toastidx,
										 SnapshotToast, nscankeys, toastkey);
	while ((ttup = systable_getnext_ordered(toastscan, ForwardScanDirection)) != NULL)
	{
		/*
		 * Have a chunk, extract the sequence number and the data
		 */
		residx = DatumGetInt32(fastgetattr(ttup, 2, toasttupDesc, &isnull));
		Assert(!isnull);
		chunk = DatumGetPointer(fastgetattr(ttup, 3, toasttupDesc, &isnull));
		Assert(!isnull);
		if (!VARATT_IS_EXTENDED(chunk))
		{
			chunksize = VARSIZE(chunk) - VARHDRSZ;
			chunkdata = VARDATA(chunk);
		}
		else if (VARATT_IS_SHORT(chunk))
		{
			/* could happen due to heap_form_tuple doing its thing */
			chunksize = VARSIZE_SHORT(chunk) - VARHDRSZ_SHORT;
			chunkdata = VARDATA_SHORT(chunk);
		}
		else
		{
			/* should never happen */
			elog(ERROR, "found toasted toast chunk for toast value %u in %s",
				 toast_pointer.va_valueid,
				 RelationGetRelationName(toastrel));
			chunksize = 0;		/* keep compiler quiet */
			chunkdata = NULL;
		}

		/*
		 * Some checks on the data we've found
		 */
		if ((residx != nextidx) || (residx > endchunk) || (residx < startchunk))
			elog(ERROR, "unexpected chunk number %d (expected %d) for toast value %u in %s",
				 residx, nextidx,
				 toast_pointer.va_valueid,
				 RelationGetRelationName(toastrel));
		if (residx < totalchunks - 1)
		{
			if (chunksize != TOAST_MAX_CHUNK_SIZE)
				elog(ERROR, "unexpected chunk size %d (expected %d) in chunk %d of %d for toast value %u in %s when fetching slice",
					 chunksize, (int) TOAST_MAX_CHUNK_SIZE,
					 residx, totalchunks,
					 toast_pointer.va_valueid,
					 RelationGetRelationName(toastrel));
		}
		else if (residx == totalchunks - 1)
		{
			if ((residx * TOAST_MAX_CHUNK_SIZE + chunksize) != attrsize)
				elog(ERROR, "unexpected chunk size %d (expected %d) in final chunk %d for toast value %u in %s when fetching slice",
					 chunksize,
					 (int) (attrsize - residx * TOAST_MAX_CHUNK_SIZE),
					 residx,
					 toast_pointer.va_valueid,
					 RelationGetRelationName(toastrel));
		}
		else
			elog(ERROR, "unexpected chunk number %d (out of range %d..%d) for toast value %u in %s",
				 residx,
				 0, totalchunks - 1,
				 toast_pointer.va_valueid,
				 RelationGetRelationName(toastrel));

		/*
		 * Copy the data into proper place in our result
		 */
		chcpystrt = 0;
		chcpyend = chunksize - 1;
		if (residx == startchunk)
			chcpystrt = startoffset;
		if (residx == endchunk)
			chcpyend = endoffset;

		memcpy(VARDATA(result) +
			   (residx * TOAST_MAX_CHUNK_SIZE - sliceoffset) + chcpystrt,
			   chunkdata + chcpystrt,
			   (chcpyend - chcpystrt) + 1);

		nextidx++;
	}

	/*
	 * Final checks that we successfully fetched the datum
	 */
	if (nextidx != (endchunk + 1))
		elog(ERROR, "missing chunk number %d for toast value %u in %s",
			 nextidx,
			 toast_pointer.va_valueid,
			 RelationGetRelationName(toastrel));

	/*
	 * End scan and close relations
	 */
	systable_endscan_ordered(toastscan);
	index_close(toastidx, AccessShareLock);
	heap_close(toastrel, AccessShareLock);

	return result;
}
コード例 #26
0
ファイル: regexp.c プロジェクト: BioBD/Hypothetical_Indexes
/*
 * similar_escape()
 * Convert a SQL:2008 regexp pattern to POSIX style, so it can be used by
 * our regexp engine.
 */
Datum
similar_escape(PG_FUNCTION_ARGS)
{
	text	   *pat_text;
	text	   *esc_text;
	text	   *result;
	char	   *p,
			   *e,
			   *r;
	int			plen,
				elen;
	bool		afterescape = false;
	bool		incharclass = false;
	int			nquotes = 0;

	/* This function is not strict, so must test explicitly */
	if (PG_ARGISNULL(0))
		PG_RETURN_NULL();
	pat_text = PG_GETARG_TEXT_PP(0);
	p = VARDATA_ANY(pat_text);
	plen = VARSIZE_ANY_EXHDR(pat_text);
	if (PG_ARGISNULL(1))
	{
		/* No ESCAPE clause provided; default to backslash as escape */
		e = "\\";
		elen = 1;
	}
	else
	{
		esc_text = PG_GETARG_TEXT_PP(1);
		e = VARDATA_ANY(esc_text);
		elen = VARSIZE_ANY_EXHDR(esc_text);
		if (elen == 0)
			e = NULL;			/* no escape character */
		else if (elen != 1)
			ereport(ERROR,
					(errcode(ERRCODE_INVALID_ESCAPE_SEQUENCE),
					 errmsg("invalid escape string"),
				  errhint("Escape string must be empty or one character.")));
	}

	/*----------
	 * We surround the transformed input string with
	 *			^(?: ... )$
	 * which requires some explanation.  We need "^" and "$" to force
	 * the pattern to match the entire input string as per SQL99 spec.
	 * The "(?:" and ")" are a non-capturing set of parens; we have to have
	 * parens in case the string contains "|", else the "^" and "$" will
	 * be bound into the first and last alternatives which is not what we
	 * want, and the parens must be non capturing because we don't want them
	 * to count when selecting output for SUBSTRING.
	 *----------
	 */

	/*
	 * We need room for the prefix/postfix plus as many as 3 output bytes per
	 * input byte; since the input is at most 1GB this can't overflow
	 */
	result = (text *) palloc(VARHDRSZ + 6 + 3 * plen);
	r = VARDATA(result);

	*r++ = '^';
	*r++ = '(';
	*r++ = '?';
	*r++ = ':';

	while (plen > 0)
	{
		char		pchar = *p;

		if (afterescape)
		{
			if (pchar == '"' && !incharclass)	/* for SUBSTRING patterns */
				*r++ = ((nquotes++ % 2) == 0) ? '(' : ')';
			else
			{
				*r++ = '\\';
				*r++ = pchar;
			}
			afterescape = false;
		}
		else if (e && pchar == *e)
		{
			/* SQL99 escape character; do not send to output */
			afterescape = true;
		}
		else if (incharclass)
		{
			if (pchar == '\\')
				*r++ = '\\';
			*r++ = pchar;
			if (pchar == ']')
				incharclass = false;
		}
		else if (pchar == '[')
		{
			*r++ = pchar;
			incharclass = true;
		}
		else if (pchar == '%')
		{
			*r++ = '.';
			*r++ = '*';
		}
		else if (pchar == '_')
			*r++ = '.';
		else if (pchar == '(')
		{
			/* convert to non-capturing parenthesis */
			*r++ = '(';
			*r++ = '?';
			*r++ = ':';
		}
		else if (pchar == '\\' || pchar == '.' ||
				 pchar == '^' || pchar == '$')
		{
			*r++ = '\\';
			*r++ = pchar;
		}
		else
			*r++ = pchar;
		p++, plen--;
	}

	*r++ = ')';
	*r++ = '$';

	SET_VARSIZE(result, r - ((char *) result));

	PG_RETURN_TEXT_P(result);
}
コード例 #27
0
ファイル: tsvector_op.c プロジェクト: bocap/postgres
static Datum
tsvector_update_trigger(PG_FUNCTION_ARGS, bool config_column)
{
	TriggerData *trigdata;
	Trigger    *trigger;
	Relation	rel;
	HeapTuple	rettuple = NULL;
	int			tsvector_attr_num,
				i;
	ParsedText	prs;
	Datum		datum;
	bool		isnull;
	text	   *txt;
	Oid			cfgId;

	/* Check call context */
	if (!CALLED_AS_TRIGGER(fcinfo))		/* internal error */
		elog(ERROR, "tsvector_update_trigger: not fired by trigger manager");

	trigdata = (TriggerData *) fcinfo->context;
	if (!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event))
		elog(ERROR, "tsvector_update_trigger: must be fired for row");
	if (!TRIGGER_FIRED_BEFORE(trigdata->tg_event))
		elog(ERROR, "tsvector_update_trigger: must be fired BEFORE event");

	if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
		rettuple = trigdata->tg_trigtuple;
	else if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
		rettuple = trigdata->tg_newtuple;
	else
		elog(ERROR, "tsvector_update_trigger: must be fired for INSERT or UPDATE");

	trigger = trigdata->tg_trigger;
	rel = trigdata->tg_relation;

	if (trigger->tgnargs < 3)
		elog(ERROR, "tsvector_update_trigger: arguments must be tsvector_field, ts_config, text_field1, ...)");

	/* Find the target tsvector column */
	tsvector_attr_num = SPI_fnumber(rel->rd_att, trigger->tgargs[0]);
	if (tsvector_attr_num == SPI_ERROR_NOATTRIBUTE)
		ereport(ERROR,
				(errcode(ERRCODE_UNDEFINED_COLUMN),
				 errmsg("tsvector column \"%s\" does not exist",
						trigger->tgargs[0])));
	if (!IsBinaryCoercible(SPI_gettypeid(rel->rd_att, tsvector_attr_num),
						  TSVECTOROID))
		ereport(ERROR,
				(errcode(ERRCODE_DATATYPE_MISMATCH),
				 errmsg("column \"%s\" is not of tsvector type",
						trigger->tgargs[0])));

	/* Find the configuration to use */
	if (config_column)
	{
		int			config_attr_num;

		config_attr_num = SPI_fnumber(rel->rd_att, trigger->tgargs[1]);
		if (config_attr_num == SPI_ERROR_NOATTRIBUTE)
			ereport(ERROR,
					(errcode(ERRCODE_UNDEFINED_COLUMN),
					 errmsg("configuration column \"%s\" does not exist",
							trigger->tgargs[1])));
		if (!IsBinaryCoercible(SPI_gettypeid(rel->rd_att, config_attr_num),
							  REGCONFIGOID))
			ereport(ERROR,
					(errcode(ERRCODE_DATATYPE_MISMATCH),
					 errmsg("column \"%s\" is not of regconfig type",
							trigger->tgargs[1])));

		datum = SPI_getbinval(rettuple, rel->rd_att, config_attr_num, &isnull);
		if (isnull)
			ereport(ERROR,
					(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
					 errmsg("configuration column \"%s\" must not be null",
							trigger->tgargs[1])));
		cfgId = DatumGetObjectId(datum);
	}
	else
	{
		List	   *names;

		names = stringToQualifiedNameList(trigger->tgargs[1]);
		/* require a schema so that results are not search path dependent */
		if (list_length(names) < 2)
			ereport(ERROR,
					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
					 errmsg("text search configuration name \"%s\" must be schema-qualified",
							trigger->tgargs[1])));
		cfgId = get_ts_config_oid(names, false);
	}

	/* initialize parse state */
	prs.lenwords = 32;
	prs.curwords = 0;
	prs.pos = 0;
	prs.words = (ParsedWord *) palloc(sizeof(ParsedWord) * prs.lenwords);

	/* find all words in indexable column(s) */
	for (i = 2; i < trigger->tgnargs; i++)
	{
		int			numattr;

		numattr = SPI_fnumber(rel->rd_att, trigger->tgargs[i]);
		if (numattr == SPI_ERROR_NOATTRIBUTE)
			ereport(ERROR,
					(errcode(ERRCODE_UNDEFINED_COLUMN),
					 errmsg("column \"%s\" does not exist",
							trigger->tgargs[i])));
		if (!IsBinaryCoercible(SPI_gettypeid(rel->rd_att, numattr), TEXTOID))
			ereport(ERROR,
					(errcode(ERRCODE_DATATYPE_MISMATCH),
					 errmsg("column \"%s\" is not of a character type",
							trigger->tgargs[i])));

		datum = SPI_getbinval(rettuple, rel->rd_att, numattr, &isnull);
		if (isnull)
			continue;

		txt = DatumGetTextP(datum);

		parsetext(cfgId, &prs, VARDATA(txt), VARSIZE(txt) - VARHDRSZ);

		if (txt != (text *) DatumGetPointer(datum))
			pfree(txt);
	}

	/* make tsvector value */
	if (prs.curwords)
	{
		datum = PointerGetDatum(make_tsvector(&prs));
		rettuple = SPI_modifytuple(rel, rettuple, 1, &tsvector_attr_num,
								   &datum, NULL);
		pfree(DatumGetPointer(datum));
	}
	else
	{
		TSVector	out = palloc(CALCDATASIZE(0, 0));

		SET_VARSIZE(out, CALCDATASIZE(0, 0));
		out->size = 0;
		datum = PointerGetDatum(out);
		rettuple = SPI_modifytuple(rel, rettuple, 1, &tsvector_attr_num,
								   &datum, NULL);
		pfree(prs.words);
	}

	if (rettuple == NULL)		/* internal error */
		elog(ERROR, "tsvector_update_trigger: %d returned by SPI_modifytuple",
			 SPI_result);

	return PointerGetDatum(rettuple);
}
コード例 #28
0
ファイル: reloptions.c プロジェクト: Distrotech/postgresql
	/*
	 * If CREATE/SET, add new options to array; if RESET, just check that the
	 * user didn't say RESET (option=val).  (Must do this because the grammar
	 * doesn't enforce it.)
	 */
	foreach(cell, defList)
	{
		DefElem    *def = (DefElem *) lfirst(cell);

		if (isReset)
		{
			if (def->arg != NULL)
				ereport(ERROR,
						(errcode(ERRCODE_SYNTAX_ERROR),
					errmsg("RESET must not include values for parameters")));
		}
		else
		{
			text	   *t;
			const char *value;
			Size		len;

			/*
			 * Error out if the namespace is not valid.  A NULL namespace is
			 * always valid.
			 */
			if (def->defnamespace != NULL)
			{
				bool		valid = false;
				int			i;

				if (validnsps)
				{
					for (i = 0; validnsps[i]; i++)
					{
						if (pg_strcasecmp(def->defnamespace,
										  validnsps[i]) == 0)
						{
							valid = true;
							break;
						}
					}
				}

				if (!valid)
					ereport(ERROR,
							(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
							 errmsg("unrecognized parameter namespace \"%s\"",
									def->defnamespace)));
			}

			if (ignoreOids && pg_strcasecmp(def->defname, "oids") == 0)
				continue;

			/* ignore if not in the same namespace */
			if (namspace == NULL)
			{
				if (def->defnamespace != NULL)
					continue;
			}
			else if (def->defnamespace == NULL)
				continue;
			else if (pg_strcasecmp(def->defnamespace, namspace) != 0)
				continue;

			/*
			 * Flatten the DefElem into a text string like "name=arg". If we
			 * have just "name", assume "name=true" is meant.  Note: the
			 * namespace is not output.
			 */
			if (def->arg != NULL)
				value = defGetString(def);
			else
				value = "true";
			len = VARHDRSZ + strlen(def->defname) + 1 + strlen(value);
			/* +1 leaves room for sprintf's trailing null */
			t = (text *) palloc(len + 1);
			SET_VARSIZE(t, len);
			sprintf(VARDATA(t), "%s=%s", def->defname, value);

			astate = accumArrayResult(astate, PointerGetDatum(t),
									  false, TEXTOID,
									  CurrentMemoryContext);
		}
	}
コード例 #29
0
ファイル: array_userfuncs.c プロジェクト: HBPSP8Repo/NoDB
/*-----------------------------------------------------------------------------
 * array_cat :
 *		concatenate two nD arrays to form an nD array, or
 *		push an (n-1)D array onto the end of an nD array
 *----------------------------------------------------------------------------
 */
Datum
array_cat(PG_FUNCTION_ARGS)
{
	ArrayType  *v1,
			   *v2;
	ArrayType  *result;
	int		   *dims,
			   *lbs,
				ndims,
				nitems,
				ndatabytes,
				nbytes;
	int		   *dims1,
			   *lbs1,
				ndims1,
				nitems1,
				ndatabytes1;
	int		   *dims2,
			   *lbs2,
				ndims2,
				nitems2,
				ndatabytes2;
	int			i;
	char	   *dat1,
			   *dat2;
	bits8	   *bitmap1,
			   *bitmap2;
	Oid			element_type;
	Oid			element_type1;
	Oid			element_type2;
	int32		dataoffset;

	/* Concatenating a null array is a no-op, just return the other input */
	if (PG_ARGISNULL(0))
	{
		if (PG_ARGISNULL(1))
			PG_RETURN_NULL();
		result = PG_GETARG_ARRAYTYPE_P(1);
		PG_RETURN_ARRAYTYPE_P(result);
	}
	if (PG_ARGISNULL(1))
	{
		result = PG_GETARG_ARRAYTYPE_P(0);
		PG_RETURN_ARRAYTYPE_P(result);
	}

	v1 = PG_GETARG_ARRAYTYPE_P(0);
	v2 = PG_GETARG_ARRAYTYPE_P(1);

	element_type1 = ARR_ELEMTYPE(v1);
	element_type2 = ARR_ELEMTYPE(v2);

	/* Check we have matching element types */
	if (element_type1 != element_type2)
		ereport(ERROR,
				(errcode(ERRCODE_DATATYPE_MISMATCH),
				 errmsg("cannot concatenate incompatible arrays"),
				 errdetail("Arrays with element types %s and %s are not "
						   "compatible for concatenation.",
						   format_type_be(element_type1),
						   format_type_be(element_type2))));

	/* OK, use it */
	element_type = element_type1;

	/*----------
	 * We must have one of the following combinations of inputs:
	 * 1) one empty array, and one non-empty array
	 * 2) both arrays empty
	 * 3) two arrays with ndims1 == ndims2
	 * 4) ndims1 == ndims2 - 1
	 * 5) ndims1 == ndims2 + 1
	 *----------
	 */
	ndims1 = ARR_NDIM(v1);
	ndims2 = ARR_NDIM(v2);

	/*
	 * short circuit - if one input array is empty, and the other is not, we
	 * return the non-empty one as the result
	 *
	 * if both are empty, return the first one
	 */
	if (ndims1 == 0 && ndims2 > 0)
		PG_RETURN_ARRAYTYPE_P(v2);

	if (ndims2 == 0)
		PG_RETURN_ARRAYTYPE_P(v1);

	/* the rest fall under rule 3, 4, or 5 */
	if (ndims1 != ndims2 &&
		ndims1 != ndims2 - 1 &&
		ndims1 != ndims2 + 1)
		ereport(ERROR,
				(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
				 errmsg("cannot concatenate incompatible arrays"),
				 errdetail("Arrays of %d and %d dimensions are not "
						   "compatible for concatenation.",
						   ndims1, ndims2)));

	/* get argument array details */
	lbs1 = ARR_LBOUND(v1);
	lbs2 = ARR_LBOUND(v2);
	dims1 = ARR_DIMS(v1);
	dims2 = ARR_DIMS(v2);
	dat1 = ARR_DATA_PTR(v1);
	dat2 = ARR_DATA_PTR(v2);
	bitmap1 = ARR_NULLBITMAP(v1);
	bitmap2 = ARR_NULLBITMAP(v2);
	nitems1 = ArrayGetNItems(ndims1, dims1);
	nitems2 = ArrayGetNItems(ndims2, dims2);
	ndatabytes1 = ARR_SIZE(v1) - ARR_DATA_OFFSET(v1);
	ndatabytes2 = ARR_SIZE(v2) - ARR_DATA_OFFSET(v2);

	if (ndims1 == ndims2)
	{
		/*
		 * resulting array is made up of the elements (possibly arrays
		 * themselves) of the input argument arrays
		 */
		ndims = ndims1;
		dims = (int *) palloc(ndims * sizeof(int));
		lbs = (int *) palloc(ndims * sizeof(int));

		dims[0] = dims1[0] + dims2[0];
		lbs[0] = lbs1[0];

		for (i = 1; i < ndims; i++)
		{
			if (dims1[i] != dims2[i] || lbs1[i] != lbs2[i])
				ereport(ERROR,
						(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
						 errmsg("cannot concatenate incompatible arrays"),
					errdetail("Arrays with differing element dimensions are "
							  "not compatible for concatenation.")));

			dims[i] = dims1[i];
			lbs[i] = lbs1[i];
		}
	}
	else if (ndims1 == ndims2 - 1)
	{
		/*
		 * resulting array has the second argument as the outer array, with
		 * the first argument inserted at the front of the outer dimension
		 */
		ndims = ndims2;
		dims = (int *) palloc(ndims * sizeof(int));
		lbs = (int *) palloc(ndims * sizeof(int));
		memcpy(dims, dims2, ndims * sizeof(int));
		memcpy(lbs, lbs2, ndims * sizeof(int));

		/* increment number of elements in outer array */
		dims[0] += 1;

		/* make sure the added element matches our existing elements */
		for (i = 0; i < ndims1; i++)
		{
			if (dims1[i] != dims[i + 1] || lbs1[i] != lbs[i + 1])
				ereport(ERROR,
						(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
						 errmsg("cannot concatenate incompatible arrays"),
						 errdetail("Arrays with differing dimensions are not "
								   "compatible for concatenation.")));
		}
	}
	else
	{
		/*
		 * (ndims1 == ndims2 + 1)
		 *
		 * resulting array has the first argument as the outer array, with the
		 * second argument appended to the end of the outer dimension
		 */
		ndims = ndims1;
		dims = (int *) palloc(ndims * sizeof(int));
		lbs = (int *) palloc(ndims * sizeof(int));
		memcpy(dims, dims1, ndims * sizeof(int));
		memcpy(lbs, lbs1, ndims * sizeof(int));

		/* increment number of elements in outer array */
		dims[0] += 1;

		/* make sure the added element matches our existing elements */
		for (i = 0; i < ndims2; i++)
		{
			if (dims2[i] != dims[i + 1] || lbs2[i] != lbs[i + 1])
				ereport(ERROR,
						(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
						 errmsg("cannot concatenate incompatible arrays"),
						 errdetail("Arrays with differing dimensions are not "
								   "compatible for concatenation.")));
		}
	}

	/* Do this mainly for overflow checking */
	nitems = ArrayGetNItems(ndims, dims);

	/* build the result array */
	ndatabytes = ndatabytes1 + ndatabytes2;
	if (ARR_HASNULL(v1) || ARR_HASNULL(v2))
	{
		dataoffset = ARR_OVERHEAD_WITHNULLS(ndims, nitems);
		nbytes = ndatabytes + dataoffset;
	}
	else
	{
		dataoffset = 0;			/* marker for no null bitmap */
		nbytes = ndatabytes + ARR_OVERHEAD_NONULLS(ndims);
	}
	result = (ArrayType *) palloc(nbytes);
	SET_VARSIZE(result, nbytes);
	result->ndim = ndims;
	result->dataoffset = dataoffset;
	result->elemtype = element_type;
	memcpy(ARR_DIMS(result), dims, ndims * sizeof(int));
	memcpy(ARR_LBOUND(result), lbs, ndims * sizeof(int));
	/* data area is arg1 then arg2 */
	memcpy(ARR_DATA_PTR(result), dat1, ndatabytes1);
	memcpy(ARR_DATA_PTR(result) + ndatabytes1, dat2, ndatabytes2);
	/* handle the null bitmap if needed */
	if (ARR_HASNULL(result))
	{
		array_bitmap_copy(ARR_NULLBITMAP(result), 0,
						  bitmap1, 0,
						  nitems1);
		array_bitmap_copy(ARR_NULLBITMAP(result), nitems1,
						  bitmap2, 0,
						  nitems2);
	}

	PG_RETURN_ARRAYTYPE_P(result);
}
コード例 #30
0
Datum
tsvector_concat(PG_FUNCTION_ARGS)
{
	TSVector	in1 = PG_GETARG_TSVECTOR(0);
	TSVector	in2 = PG_GETARG_TSVECTOR(1);
	TSVector	out;
	WordEntry  *ptr;
	WordEntry  *ptr1,
			   *ptr2;
	WordEntryPos *p;
	int			maxpos = 0,
				i,
				j,
				i1,
				i2,
				dataoff;
	char	   *data,
			   *data1,
			   *data2;

	ptr = ARRPTR(in1);
	i = in1->size;
	while (i--)
	{
		if ((j = POSDATALEN(in1, ptr)) != 0)
		{
			p = POSDATAPTR(in1, ptr);
			while (j--)
			{
				if (WEP_GETPOS(*p) > maxpos)
					maxpos = WEP_GETPOS(*p);
				p++;
			}
		}
		ptr++;
	}

	ptr1 = ARRPTR(in1);
	ptr2 = ARRPTR(in2);
	data1 = STRPTR(in1);
	data2 = STRPTR(in2);
	i1 = in1->size;
	i2 = in2->size;
	/* conservative estimate of space needed */
	out = (TSVector) palloc0(VARSIZE(in1) + VARSIZE(in2));
	SET_VARSIZE(out, VARSIZE(in1) + VARSIZE(in2));
	out->size = in1->size + in2->size;
	ptr = ARRPTR(out);
	data = STRPTR(out);
	dataoff = 0;
	while (i1 && i2)
	{
		int			cmp = compareEntry(data1, ptr1, data2, ptr2);

		if (cmp < 0)
		{						/* in1 first */
			ptr->haspos = ptr1->haspos;
			ptr->len = ptr1->len;
			memcpy(data + dataoff, data1 + ptr1->pos, ptr1->len);
			ptr->pos = dataoff;
			dataoff += ptr1->len;
			if (ptr->haspos)
			{
				dataoff = SHORTALIGN(dataoff);
				memcpy(data + dataoff, _POSVECPTR(in1, ptr1), POSDATALEN(in1, ptr1) * sizeof(WordEntryPos) + sizeof(uint16));
				dataoff += POSDATALEN(in1, ptr1) * sizeof(WordEntryPos) + sizeof(uint16);
			}

			ptr++;
			ptr1++;
			i1--;
		}
		else if (cmp > 0)
		{						/* in2 first */
			ptr->haspos = ptr2->haspos;
			ptr->len = ptr2->len;
			memcpy(data + dataoff, data2 + ptr2->pos, ptr2->len);
			ptr->pos = dataoff;
			dataoff += ptr2->len;
			if (ptr->haspos)
			{
				int			addlen = add_pos(in2, ptr2, out, ptr, maxpos);

				if (addlen == 0)
					ptr->haspos = 0;
				else
				{
					dataoff = SHORTALIGN(dataoff);
					dataoff += addlen * sizeof(WordEntryPos) + sizeof(uint16);
				}
			}

			ptr++;
			ptr2++;
			i2--;
		}
		else
		{
			ptr->haspos = ptr1->haspos | ptr2->haspos;
			ptr->len = ptr1->len;
			memcpy(data + dataoff, data1 + ptr1->pos, ptr1->len);
			ptr->pos = dataoff;
			dataoff += ptr1->len;
			if (ptr->haspos)
			{
				if (ptr1->haspos)
				{
					dataoff = SHORTALIGN(dataoff);
					memcpy(data + dataoff, _POSVECPTR(in1, ptr1), POSDATALEN(in1, ptr1) * sizeof(WordEntryPos) + sizeof(uint16));
					dataoff += POSDATALEN(in1, ptr1) * sizeof(WordEntryPos) + sizeof(uint16);
					if (ptr2->haspos)
						dataoff += add_pos(in2, ptr2, out, ptr, maxpos) * sizeof(WordEntryPos);
				}
				else	/* must have ptr2->haspos */
				{
					int			addlen = add_pos(in2, ptr2, out, ptr, maxpos);

					if (addlen == 0)
						ptr->haspos = 0;
					else
					{
						dataoff = SHORTALIGN(dataoff);
						dataoff += addlen * sizeof(WordEntryPos) + sizeof(uint16);
					}
				}
			}

			ptr++;
			ptr1++;
			ptr2++;
			i1--;
			i2--;
		}
	}

	while (i1)
	{
		ptr->haspos = ptr1->haspos;
		ptr->len = ptr1->len;
		memcpy(data + dataoff, data1 + ptr1->pos, ptr1->len);
		ptr->pos = dataoff;
		dataoff += ptr1->len;
		if (ptr->haspos)
		{
			dataoff = SHORTALIGN(dataoff);
			memcpy(data + dataoff, _POSVECPTR(in1, ptr1), POSDATALEN(in1, ptr1) * sizeof(WordEntryPos) + sizeof(uint16));
			dataoff += POSDATALEN(in1, ptr1) * sizeof(WordEntryPos) + sizeof(uint16);
		}

		ptr++;
		ptr1++;
		i1--;
	}

	while (i2)
	{
		ptr->haspos = ptr2->haspos;
		ptr->len = ptr2->len;
		memcpy(data + dataoff, data2 + ptr2->pos, ptr2->len);
		ptr->pos = dataoff;
		dataoff += ptr2->len;
		if (ptr->haspos)
		{
			int			addlen = add_pos(in2, ptr2, out, ptr, maxpos);

			if (addlen == 0)
				ptr->haspos = 0;
			else
			{
				dataoff = SHORTALIGN(dataoff);
				dataoff += addlen * sizeof(WordEntryPos) + sizeof(uint16);
			}
		}

		ptr++;
		ptr2++;
		i2--;
	}

	/*
	 * Instead of checking each offset individually, we check for overflow of
	 * pos fields once at the end.
	 */
	if (dataoff > MAXSTRPOS)
		ereport(ERROR,
				(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
				 errmsg("string is too long for tsvector (%d bytes, max %d bytes)", dataoff, MAXSTRPOS)));

	out->size = ptr - ARRPTR(out);
	SET_VARSIZE(out, CALCDATASIZE(out->size, dataoff));
	if (data != STRPTR(out))
		memmove(STRPTR(out), data, dataoff);

	PG_FREE_IF_COPY(in1, 0);
	PG_FREE_IF_COPY(in2, 1);
	PG_RETURN_POINTER(out);
}