Example #1
0
Datum 
nb_classify_combine(PG_FUNCTION_ARGS)
{
	HeapTupleHeader    tup;
	TupleDesc          resultDesc;
	HeapTuple          result;
	Datum              resultDatum[3];
	bool               resultNull[3];
	nb_classify_state  state[2];
	float8            *prior_data1;
	float8            *prior_data2;
	int                nclasses;
	int                i;

	/* Need to be called with two arguments */
	if (PG_NARGS() != 2)
	{
		ereport(ERROR,
				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
				 errmsg("nb_classify_final called with %d arguments", 
						PG_NARGS())));
	}
	if (PG_ARGISNULL(0) && PG_ARGISNULL(1))
		PG_RETURN_NULL();
	if (PG_ARGISNULL(1))
		PG_RETURN_DATUM(PG_GETARG_DATUM(0));
	if (PG_ARGISNULL(0))
		PG_RETURN_DATUM(PG_GETARG_DATUM(1));


	tup = (fcinfo->context && IsA(fcinfo->context, AggState))
		? PG_GETARG_HEAPTUPLEHEADER(0)
		: PG_GETARG_HEAPTUPLEHEADER_COPY(0);
	get_nb_state(tup, &state[0], 0);
	nclasses = ARR_DIMS(state[0].classes)[0];

	get_nb_state(PG_GETARG_HEAPTUPLEHEADER(1), &state[1], nclasses);

	/* The the prior with maximum likelyhood */
	prior_data1 = (float8*) ARR_DATA_PTR(state[0].accum);
	prior_data2 = (float8*) ARR_DATA_PTR(state[1].accum);
	nclasses = ARR_DIMS(state[0].classes)[0];
	for (i = 0; i < nclasses; i++)
		prior_data1[i] += prior_data2[i];

	/* Construct the return tuple */
	if (get_call_result_type(fcinfo, NULL, &resultDesc) != TYPEFUNC_COMPOSITE)
		elog(ERROR, "return type must be a row type");
	BlessTupleDesc(resultDesc);
	
	resultDatum[0] = PointerGetDatum(state[0].classes);
	resultDatum[1] = PointerGetDatum(state[0].accum);
	resultDatum[2] = PointerGetDatum(state[0].total);
	resultNull[0]  = false;
	resultNull[1]  = false;
	resultNull[2]  = false;

	result = heap_form_tuple(resultDesc, resultDatum, resultNull);
	PG_RETURN_DATUM(HeapTupleGetDatum(result));
}
Example #2
0
Datum
pgx_complex_divide(PG_FUNCTION_ARGS) {
    TupleDesc tupdesc;
    HeapTuple tuple;
    double re[2];
    double im[2];
    int i;
    double q;
    Datum datum[2];
    bool isnull[2];
 
    // build a tuple descriptor for our result type 
    if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
        ereport(ERROR,
                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                 errmsg("function returning record called in context "
                        "that cannot accept type record")));

    // unwrap values.    
    for (i = 0; i < 2; i++) {
        HeapTupleHeader t = PG_GETARG_HEAPTUPLEHEADER(i);
        bool isnull[2];
        Datum dr, di;

        dr = GetAttributeByName(t, "re", &isnull[0]);
        di = GetAttributeByName(t, "im", &isnull[1]);

        // STRICT prevents the 'complex' value from being null but does
        // not prevent its components from being null.        
        if (isnull[0] || isnull[1]) {
            PG_RETURN_NULL();
        }
        
        re[i] = DatumGetFloat8(dr);
        im[i] = DatumGetFloat8(di);
    }

    // the denominator is the square of the magnitude of the divisor.
    q = re[1] * re[1] + im[1] * im[1];
    
    // should I throw error instead of returning null?
    if (q == 0.0) {
        PG_RETURN_NULL();
    }

    datum[0] = Float8GetDatum((re[0] * re[1] + im[0] * im[1]) / q);
    datum[1] = Float8GetDatum((im[0] * re[1] - im[1] * re[0]) / q);

    BlessTupleDesc(tupdesc);
    tuple = heap_form_tuple(tupdesc, datum, isnull);
 
    PG_RETURN_DATUM(HeapTupleGetDatum(tuple));
}
Example #3
0
Datum
overpaid(PG_FUNCTION_ARGS)
{
	HeapTupleHeader tuple = PG_GETARG_HEAPTUPLEHEADER(0);
	bool		isnull;
	int32		salary;

	salary = DatumGetInt32(GetAttributeByName(tuple, "salary", &isnull));
	if (isnull)
		PG_RETURN_NULL();
	PG_RETURN_BOOL(salary > 699);
}
Example #4
0
Datum
pgx_complex_norm(PG_FUNCTION_ARGS) {
    HeapTupleHeader t = PG_GETARG_HEAPTUPLEHEADER(0);
    TupleDesc tupdesc;
    HeapTuple tuple;
    double re;
    double im;
    bool isnull[2];
    Datum datum[2];
    double m;
 
    // build a tuple descriptor for our result type 
    if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
        ereport(ERROR,
                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                 errmsg("function returning record called in context "
                        "that cannot accept type record")));
        
    // unwrap values.    
    datum[0] = GetAttributeByName(t, "re", &isnull[0]);
    datum[1] = GetAttributeByName(t, "im", &isnull[1]);

    // STRICT prevents the 'complex' value from being null but does
    // not prevent its components from being null.        
    if (isnull[0] || isnull[1]) {
        PG_RETURN_NULL();
    }
        
    re = DatumGetFloat8(datum[0]);
    im = DatumGetFloat8(datum[1]);

    m = hypot(re, im);
   
    // should I throw error instead of returning null?
    if (m == 0.0) {
        PG_RETURN_NULL();
    } 

    datum[0] = Float8GetDatum(re / m);
    datum[1] = Float8GetDatum(im / m);

    BlessTupleDesc(tupdesc);
    tuple = heap_form_tuple(tupdesc, datum, isnull);
 
    PG_RETURN_DATUM(HeapTupleGetDatum(tuple));
}
Example #5
0
Datum
c_overpaid(PG_FUNCTION_ARGS)
{
	HeapTupleHeader t = PG_GETARG_HEAPTUPLEHEADER(0);
	int32		limit = PG_GETARG_INT32(1);
	bool		isnull;
	int32		salary;

	salary = DatumGetInt32(GetAttributeByName(t, "salary", &isnull));
	if (isnull)
		PG_RETURN_BOOL(false);

	/*
	 * Alternatively, we might prefer to do PG_RETURN_NULL() for null salary
	 */

	PG_RETURN_BOOL(salary > limit);
}
Example #6
0
File: pipe.c Project: orafce/orafce
Datum
dbms_pipe_pack_message_record(PG_FUNCTION_ARGS)
{
	HeapTupleHeader rec = PG_GETARG_HEAPTUPLEHEADER(0);
	Oid tupType;
	bytea *data;

#if PG_VERSION_NUM >= 120000

	LOCAL_FCINFO(info, 3);

#else

	FunctionCallInfoData info_data;
	FunctionCallInfo info = &info_data;

#endif


	tupType = HeapTupleHeaderGetTypeId(rec);

	/*
	 * Normally one would call record_send() using DirectFunctionCall3,
	 * but that does not work since record_send wants to cache some data
	 * using fcinfo->flinfo->fn_extra.  So we need to pass it our own
	 * flinfo parameter.
	 */
	InitFunctionCallInfoData(*info, fcinfo->flinfo, 3, InvalidOid, NULL, NULL);
	init_args_3(info, PointerGetDatum(rec), ObjectIdGetDatum(tupType), Int32GetDatum(-1));

	data = (bytea*) DatumGetPointer(record_send(info));

	output_buffer = check_buffer(output_buffer, LOCALMSGSZ);
	pack_field(output_buffer, IT_RECORD,
			   VARSIZE(data), VARDATA(data), tupType);

	PG_RETURN_VOID();
}
Example #7
0
Datum
pgx_complex_near(PG_FUNCTION_ARGS) {
    double re[2];
    double im[2];
    double p, q;
    int i;

    // unwrap values.    
    for (i = 0; i < 2; i++) {
        HeapTupleHeader t = PG_GETARG_HEAPTUPLEHEADER(i);
        bool isnull[2];

        Datum dr = GetAttributeByName(t, "re", &isnull[0]);
        Datum di = GetAttributeByName(t, "im", &isnull[1]);

        // STRICT prevents the 'complex' value from being null but does
        // not prevent its components from being null.        
        if (isnull[0] || isnull[1]) {
            PG_RETURN_NULL();
        }
        
        re[i] = DatumGetFloat8(dr);
        im[i] = DatumGetFloat8(di);
    }

    // compute distance between points, distance of points from origin.
    p = hypot(re[0] - re[1], im[0] - im[1]);
    q = hypot(re[0], im[0]) + hypot(re[1], im[1]);
    
    if (q == 0) {
        PG_RETURN_BOOL(1);
    }
    
    // we consider the points 'near' each other if the distance between them is small
    // relative to the size of them. 
    PG_RETURN_BOOL(p / q < 1e-8); 
}
Example #8
0
/*
 * record_out		- output routine for any composite type.
 */
Datum
record_out(PG_FUNCTION_ARGS)
{
	HeapTupleHeader rec = PG_GETARG_HEAPTUPLEHEADER(0);
	Oid			tupType;
	int32		tupTypmod;
	TupleDesc	tupdesc;
	HeapTupleData tuple;
	RecordIOData *my_extra;
	bool		needComma = false;
	int			ncolumns;
	int			i;
	Datum	   *values;
	bool	   *nulls;
	StringInfoData buf;

	/* Extract type info from the tuple itself */
	tupType = HeapTupleHeaderGetTypeId(rec);
	tupTypmod = HeapTupleHeaderGetTypMod(rec);
	tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
	ncolumns = tupdesc->natts;

	/* Build a temporary HeapTuple control structure */
	tuple.t_len = HeapTupleHeaderGetDatumLength(rec);
	ItemPointerSetInvalid(&(tuple.t_self));
	tuple.t_tableOid = InvalidOid;
	tuple.t_data = rec;

	/*
	 * We arrange to look up the needed I/O info just once per series of
	 * calls, assuming the record type doesn't change underneath us.
	 */
	my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
	if (my_extra == NULL ||
		my_extra->ncolumns != ncolumns)
	{
		fcinfo->flinfo->fn_extra =
			MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
							   sizeof(RecordIOData) - sizeof(ColumnIOData)
							   + ncolumns * sizeof(ColumnIOData));
		my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
		my_extra->record_type = InvalidOid;
		my_extra->record_typmod = 0;
	}

	if (my_extra->record_type != tupType ||
		my_extra->record_typmod != tupTypmod)
	{
		MemSet(my_extra, 0,
			   sizeof(RecordIOData) - sizeof(ColumnIOData)
			   + ncolumns * sizeof(ColumnIOData));
		my_extra->record_type = tupType;
		my_extra->record_typmod = tupTypmod;
		my_extra->ncolumns = ncolumns;
	}

	values = (Datum *) palloc(ncolumns * sizeof(Datum));
	nulls = (bool *) palloc(ncolumns * sizeof(bool));

	/* Break down the tuple into fields */
	heap_deform_tuple(&tuple, tupdesc, values, nulls);

	/* And build the result string */
	initStringInfo(&buf);

	appendStringInfoChar(&buf, '(');

	for (i = 0; i < ncolumns; i++)
	{
		ColumnIOData *column_info = &my_extra->columns[i];
		Oid			column_type = tupdesc->attrs[i]->atttypid;
		char	   *value;
		char	   *tmp;
		bool		nq;

		/* Ignore dropped columns in datatype */
		if (tupdesc->attrs[i]->attisdropped)
			continue;

		if (needComma)
			appendStringInfoChar(&buf, ',');
		needComma = true;

		if (nulls[i])
		{
			/* emit nothing... */
			continue;
		}

		/*
		 * Convert the column value to text
		 */
		if (column_info->column_type != column_type)
		{
			bool		typIsVarlena;

			getTypeOutputInfo(column_type,
							  &column_info->typiofunc,
							  &typIsVarlena);
			fmgr_info_cxt(column_info->typiofunc, &column_info->proc,
						  fcinfo->flinfo->fn_mcxt);
			column_info->column_type = column_type;
		}

		value = OutputFunctionCall(&column_info->proc, values[i]);

		/* Detect whether we need double quotes for this value */
		nq = (value[0] == '\0');	/* force quotes for empty string */
		for (tmp = value; *tmp; tmp++)
		{
			char		ch = *tmp;

			if (ch == '"' || ch == '\\' ||
				ch == '(' || ch == ')' || ch == ',' ||
				isspace((unsigned char) ch))
			{
				nq = true;
				break;
			}
		}

		/* And emit the string */
		if (nq)
			appendStringInfoChar(&buf, '"');
		for (tmp = value; *tmp; tmp++)
		{
			char		ch = *tmp;

			if (ch == '"' || ch == '\\')
				appendStringInfoChar(&buf, ch);
			appendStringInfoChar(&buf, ch);
		}
		if (nq)
			appendStringInfoChar(&buf, '"');
	}

	appendStringInfoChar(&buf, ')');

	pfree(values);
	pfree(nulls);
	ReleaseTupleDesc(tupdesc);

	PG_RETURN_CSTRING(buf.data);
}
Example #9
0
File: hstore_io.c Project: d/gpdb
Datum
hstore_populate_record(PG_FUNCTION_ARGS)
{
	Oid			argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
	HStore	   *hs;
	HEntry	   *entries;
	char	   *ptr;
	HeapTupleHeader rec;
	Oid			tupType;
	int32		tupTypmod;
	TupleDesc	tupdesc;
	HeapTupleData tuple;
	HeapTuple	rettuple;
	RecordIOData *my_extra;
	int			ncolumns;
	int			i;
	Datum	   *values;
	bool	   *nulls;

	if (!type_is_rowtype(argtype))
		ereport(ERROR,
				(errcode(ERRCODE_DATATYPE_MISMATCH),
				 errmsg("first argument must be a rowtype")));

	if (PG_ARGISNULL(0))
	{
		if (PG_ARGISNULL(1))
			PG_RETURN_NULL();

		rec = NULL;

		/*
		 * have no tuple to look at, so the only source of type info is the
		 * argtype. The lookup_rowtype_tupdesc call below will error out if we
		 * don't have a known composite type oid here.
		 */
		tupType = argtype;
		tupTypmod = -1;
	}
	else
	{
		rec = PG_GETARG_HEAPTUPLEHEADER(0);

		if (PG_ARGISNULL(1))
			PG_RETURN_POINTER(rec);

		/* Extract type info from the tuple itself */
		tupType = HeapTupleHeaderGetTypeId(rec);
		tupTypmod = HeapTupleHeaderGetTypMod(rec);
	}

	hs = PG_GETARG_HS(1);
	entries = ARRPTR(hs);
	ptr = STRPTR(hs);

	/*
	 * if the input hstore is empty, we can only skip the rest if we were
	 * passed in a non-null record, since otherwise there may be issues with
	 * domain nulls.
	 */

	if (HS_COUNT(hs) == 0 && rec)
		PG_RETURN_POINTER(rec);

	tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
	ncolumns = tupdesc->natts;

	if (rec)
	{
		/* Build a temporary HeapTuple control structure */
		tuple.t_len = HeapTupleHeaderGetDatumLength(rec);
		ItemPointerSetInvalid(&(tuple.t_self));
		//tuple.t_tableOid = InvalidOid;
		tuple.t_data = rec;
	}

	/*
	 * We arrange to look up the needed I/O info just once per series of
	 * calls, assuming the record type doesn't change underneath us.
	 */
	my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
	if (my_extra == NULL ||
		my_extra->ncolumns != ncolumns)
	{
		fcinfo->flinfo->fn_extra =
			MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
							   sizeof(RecordIOData) - sizeof(ColumnIOData)
							   + ncolumns * sizeof(ColumnIOData));
		my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
		my_extra->record_type = InvalidOid;
		my_extra->record_typmod = 0;
	}

	if (my_extra->record_type != tupType ||
		my_extra->record_typmod != tupTypmod)
	{
		MemSet(my_extra, 0,
			   sizeof(RecordIOData) - sizeof(ColumnIOData)
			   + ncolumns * sizeof(ColumnIOData));
		my_extra->record_type = tupType;
		my_extra->record_typmod = tupTypmod;
		my_extra->ncolumns = ncolumns;
	}

	values = (Datum *) palloc(ncolumns * sizeof(Datum));
	nulls = (bool *) palloc(ncolumns * sizeof(bool));

	if (rec)
	{
		/* Break down the tuple into fields */
		heap_deform_tuple(&tuple, tupdesc, values, nulls);
	}
	else
	{
		for (i = 0; i < ncolumns; ++i)
		{
			values[i] = (Datum) 0;
			nulls[i] = true;
		}
	}

	for (i = 0; i < ncolumns; ++i)
	{
		ColumnIOData *column_info = &my_extra->columns[i];
		Oid			column_type = tupdesc->attrs[i]->atttypid;
		char	   *value;
		int			idx;
		int			vallen;

		/* Ignore dropped columns in datatype */
		if (tupdesc->attrs[i]->attisdropped)
		{
			nulls[i] = true;
			continue;
		}

		idx = hstoreFindKey(hs, 0,
							NameStr(tupdesc->attrs[i]->attname),
							strlen(NameStr(tupdesc->attrs[i]->attname)));

		/*
		 * we can't just skip here if the key wasn't found since we might have
		 * a domain to deal with. If we were passed in a non-null record
		 * datum, we assume that the existing values are valid (if they're
		 * not, then it's not our fault), but if we were passed in a null,
		 * then every field which we don't populate needs to be run through
		 * the input function just in case it's a domain type.
		 */
		if (idx < 0 && rec)
			continue;

		/*
		 * Prepare to convert the column value from text
		 */
		if (column_info->column_type != column_type)
		{
			getTypeInputInfo(column_type,
							 &column_info->typiofunc,
							 &column_info->typioparam);
			fmgr_info_cxt(column_info->typiofunc, &column_info->proc,
						  fcinfo->flinfo->fn_mcxt);
			column_info->column_type = column_type;
		}

		if (idx < 0 || HS_VALISNULL(entries, idx))
		{
			/*
			 * need InputFunctionCall to happen even for nulls, so that domain
			 * checks are done
			 */
			values[i] = InputFunctionCall(&column_info->proc, NULL,
										  column_info->typioparam,
										  tupdesc->attrs[i]->atttypmod);
			nulls[i] = true;
		}
		else
		{
			vallen = HS_VALLEN(entries, idx);
			value = palloc(1 + vallen);
			memcpy(value, HS_VAL(entries, ptr, idx), vallen);
			value[vallen] = 0;

			values[i] = InputFunctionCall(&column_info->proc, value,
										  column_info->typioparam,
										  tupdesc->attrs[i]->atttypmod);
			nulls[i] = false;
		}
	}

	rettuple = heap_form_tuple(tupdesc, values, nulls);

	ReleaseTupleDesc(tupdesc);

	PG_RETURN_DATUM(HeapTupleGetDatum(rettuple));
}
Example #10
0
File: hstore_io.c Project: d/gpdb
Datum
hstore_from_record(PG_FUNCTION_ARGS)
{
	HeapTupleHeader rec;
	int4		buflen;
	HStore	   *out;
	Pairs	   *pairs;
	Oid			tupType;
	int32		tupTypmod;
	TupleDesc	tupdesc;
	HeapTupleData tuple;
	RecordIOData *my_extra;
	int			ncolumns;
	int			i,
				j;
	Datum	   *values;
	bool	   *nulls;

	if (PG_ARGISNULL(0))
	{
		Oid			argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);

		/*
		 * have no tuple to look at, so the only source of type info is the
		 * argtype. The lookup_rowtype_tupdesc call below will error out if we
		 * don't have a known composite type oid here.
		 */
		tupType = argtype;
		tupTypmod = -1;

		rec = NULL;
	}
	else
	{
		rec = PG_GETARG_HEAPTUPLEHEADER(0);

		/* Extract type info from the tuple itself */
		tupType = HeapTupleHeaderGetTypeId(rec);
		tupTypmod = HeapTupleHeaderGetTypMod(rec);
	}

	tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
	ncolumns = tupdesc->natts;

	/*
	 * We arrange to look up the needed I/O info just once per series of
	 * calls, assuming the record type doesn't change underneath us.
	 */
	my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
	if (my_extra == NULL ||
		my_extra->ncolumns != ncolumns)
	{
		fcinfo->flinfo->fn_extra =
			MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
							   sizeof(RecordIOData) - sizeof(ColumnIOData)
							   + ncolumns * sizeof(ColumnIOData));
		my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
		my_extra->record_type = InvalidOid;
		my_extra->record_typmod = 0;
	}

	if (my_extra->record_type != tupType ||
		my_extra->record_typmod != tupTypmod)
	{
		MemSet(my_extra, 0,
			   sizeof(RecordIOData) - sizeof(ColumnIOData)
			   + ncolumns * sizeof(ColumnIOData));
		my_extra->record_type = tupType;
		my_extra->record_typmod = tupTypmod;
		my_extra->ncolumns = ncolumns;
	}

	pairs = palloc(ncolumns * sizeof(Pairs));

	if (rec)
	{
		/* Build a temporary HeapTuple control structure */
		tuple.t_len = HeapTupleHeaderGetDatumLength(rec);
		ItemPointerSetInvalid(&(tuple.t_self));
		//tuple.t_tableOid = InvalidOid;
		tuple.t_data = rec;

		values = (Datum *) palloc(ncolumns * sizeof(Datum));
		nulls = (bool *) palloc(ncolumns * sizeof(bool));

		/* Break down the tuple into fields */
		heap_deform_tuple(&tuple, tupdesc, values, nulls);
	}
	else
	{
		values = NULL;
		nulls = NULL;
	}

	for (i = 0, j = 0; i < ncolumns; ++i)
	{
		ColumnIOData *column_info = &my_extra->columns[i];
		Oid			column_type = tupdesc->attrs[i]->atttypid;
		char	   *value;

		/* Ignore dropped columns in datatype */
		if (tupdesc->attrs[i]->attisdropped)
			continue;

		pairs[j].key = NameStr(tupdesc->attrs[i]->attname);
		pairs[j].keylen = hstoreCheckKeyLen(strlen(NameStr(tupdesc->attrs[i]->attname)));

		if (!nulls || nulls[i])
		{
			pairs[j].val = NULL;
			pairs[j].vallen = 4;
			pairs[j].isnull = true;
			pairs[j].needfree = false;
			++j;
			continue;
		}

		/*
		 * Convert the column value to text
		 */
		if (column_info->column_type != column_type)
		{
			bool		typIsVarlena;

			getTypeOutputInfo(column_type,
							  &column_info->typiofunc,
							  &typIsVarlena);
			fmgr_info_cxt(column_info->typiofunc, &column_info->proc,
						  fcinfo->flinfo->fn_mcxt);
			column_info->column_type = column_type;
		}

		value = OutputFunctionCall(&column_info->proc, values[i]);

		pairs[j].val = value;
		pairs[j].vallen = hstoreCheckValLen(strlen(value));
		pairs[j].isnull = false;
		pairs[j].needfree = false;
		++j;
	}

	ncolumns = hstoreUniquePairs(pairs, j, &buflen);

	out = hstorePairs(pairs, ncolumns, buflen);

	ReleaseTupleDesc(tupdesc);

	PG_RETURN_POINTER(out);
}
Example #11
0
/*
 * SQL function json_populate_recordset
 *
 * set fields in a set of records from the argument json,
 * which must be an array of objects.
 *
 * similar to json_populate_record, but the tuple-building code
 * is pushed down into the semantic action handlers so it's done
 * per object in the array.
 */
Datum
json_populate_recordset(PG_FUNCTION_ARGS)
{
	Oid			argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
	text	   *json = PG_GETARG_TEXT_P(1);
	bool		use_json_as_text = PG_GETARG_BOOL(2);
	ReturnSetInfo *rsi;
	MemoryContext old_cxt;
	Oid			tupType;
	int32		tupTypmod;
	HeapTupleHeader rec;
	TupleDesc	tupdesc;
	RecordIOData *my_extra;
	int			ncolumns;
	JsonLexContext *lex;
	JsonSemAction sem;
	PopulateRecordsetState state;

	if (!type_is_rowtype(argtype))
		ereport(ERROR,
				(errcode(ERRCODE_DATATYPE_MISMATCH),
				 errmsg("first argument must be a rowtype")));

	rsi = (ReturnSetInfo *) fcinfo->resultinfo;

	if (!rsi || !IsA(rsi, ReturnSetInfo) ||
		(rsi->allowedModes & SFRM_Materialize) == 0 ||
		rsi->expectedDesc == NULL)
		ereport(ERROR,
				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
				 errmsg("set-valued function called in context that "
						"cannot accept a set")));


	rsi->returnMode = SFRM_Materialize;

	/*
	 * get the tupdesc from the result set info - it must be a record type
	 * because we already checked that arg1 is a record type.
	 */
	(void) get_call_result_type(fcinfo, NULL, &tupdesc);

	state = palloc0(sizeof(populateRecordsetState));
	sem = palloc0(sizeof(jsonSemAction));


	/* make these in a sufficiently long-lived memory context */
	old_cxt = MemoryContextSwitchTo(rsi->econtext->ecxt_per_query_memory);

	state->ret_tdesc = CreateTupleDescCopy(tupdesc);
	BlessTupleDesc(state->ret_tdesc);
	state->tuple_store =
		tuplestore_begin_heap(rsi->allowedModes & SFRM_Materialize,
							  false, work_mem);

	MemoryContextSwitchTo(old_cxt);

	/* if the json is null send back an empty set */
	if (PG_ARGISNULL(1))
		PG_RETURN_NULL();

	if (PG_ARGISNULL(0))
		rec = NULL;
	else
		rec = PG_GETARG_HEAPTUPLEHEADER(0);

	tupType = tupdesc->tdtypeid;
	tupTypmod = tupdesc->tdtypmod;
	ncolumns = tupdesc->natts;

	lex = makeJsonLexContext(json, true);

	/*
	 * We arrange to look up the needed I/O info just once per series of
	 * calls, assuming the record type doesn't change underneath us.
	 */
	my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
	if (my_extra == NULL ||
		my_extra->ncolumns != ncolumns)
	{
		fcinfo->flinfo->fn_extra =
			MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
							   sizeof(RecordIOData) - sizeof(ColumnIOData)
							   + ncolumns * sizeof(ColumnIOData));
		my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
		my_extra->record_type = InvalidOid;
		my_extra->record_typmod = 0;
	}

	if (my_extra->record_type != tupType ||
		my_extra->record_typmod != tupTypmod)
	{
		MemSet(my_extra, 0,
			   sizeof(RecordIOData) - sizeof(ColumnIOData)
			   + ncolumns * sizeof(ColumnIOData));
		my_extra->record_type = tupType;
		my_extra->record_typmod = tupTypmod;
		my_extra->ncolumns = ncolumns;
	}

	sem->semstate = (void *) state;
	sem->array_start = populate_recordset_array_start;
	sem->array_element_start = populate_recordset_array_element_start;
	sem->scalar = populate_recordset_scalar;
	sem->object_field_start = populate_recordset_object_field_start;
	sem->object_field_end = populate_recordset_object_field_end;
	sem->object_start = populate_recordset_object_start;
	sem->object_end = populate_recordset_object_end;

	state->lex = lex;

	state->my_extra = my_extra;
	state->rec = rec;
	state->use_json_as_text = use_json_as_text;
	state->fn_mcxt = fcinfo->flinfo->fn_mcxt;

	pg_parse_json(lex, sem);

	rsi->setResult = state->tuple_store;
	rsi->setDesc = state->ret_tdesc;

	PG_RETURN_NULL();

}
Example #12
0
/*
 * SQL function json_populate_record
 *
 * set fields in a record from the argument json
 *
 * Code adapted shamelessly from hstore's populate_record
 * which is in turn partly adapted from record_out.
 *
 * The json is decomposed into a hash table, in which each
 * field in the record is then looked up by name.
 */
Datum
json_populate_record(PG_FUNCTION_ARGS)
{
	Oid			argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
	text	   *json = PG_GETARG_TEXT_P(1);
	bool		use_json_as_text = PG_GETARG_BOOL(2);
	HTAB	   *json_hash;
	HeapTupleHeader rec;
	Oid			tupType;
	int32		tupTypmod;
	TupleDesc	tupdesc;
	HeapTupleData tuple;
	HeapTuple	rettuple;
	RecordIOData *my_extra;
	int			ncolumns;
	int			i;
	Datum	   *values;
	bool	   *nulls;
	char		fname[NAMEDATALEN];
	JsonHashEntry hashentry;


	if (!type_is_rowtype(argtype))
		ereport(ERROR,
				(errcode(ERRCODE_DATATYPE_MISMATCH),
				 errmsg("first argument must be a rowtype")));

	if (PG_ARGISNULL(0))
	{
		if (PG_ARGISNULL(1))
			PG_RETURN_NULL();

		rec = NULL;

		/*
		 * have no tuple to look at, so the only source of type info is the
		 * argtype. The lookup_rowtype_tupdesc call below will error out if we
		 * don't have a known composite type oid here.
		 */
		tupType = argtype;
		tupTypmod = -1;
	}
	else
	{
		rec = PG_GETARG_HEAPTUPLEHEADER(0);

		if (PG_ARGISNULL(1))
			PG_RETURN_POINTER(rec);

		/* Extract type info from the tuple itself */
		tupType = HeapTupleHeaderGetTypeId(rec);
		tupTypmod = HeapTupleHeaderGetTypMod(rec);
	}

	json_hash = get_json_object_as_hash(json, "json_populate_record", use_json_as_text);

	/*
	 * if the input json is empty, we can only skip the rest if we were passed
	 * in a non-null record, since otherwise there may be issues with domain
	 * nulls.
	 */
	if (hash_get_num_entries(json_hash) == 0 && rec)
		PG_RETURN_POINTER(rec);


	tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
	ncolumns = tupdesc->natts;

	if (rec)
	{
		/* Build a temporary HeapTuple control structure */
		tuple.t_len = HeapTupleHeaderGetDatumLength(rec);
		ItemPointerSetInvalid(&(tuple.t_self));
		tuple.t_data = rec;
	}

	/*
	 * We arrange to look up the needed I/O info just once per series of
	 * calls, assuming the record type doesn't change underneath us.
	 */
	my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
	if (my_extra == NULL ||
		my_extra->ncolumns != ncolumns)
	{
		fcinfo->flinfo->fn_extra =
			MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
							   sizeof(RecordIOData) - sizeof(ColumnIOData)
							   + ncolumns * sizeof(ColumnIOData));
		my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
		my_extra->record_type = InvalidOid;
		my_extra->record_typmod = 0;
	}

	if (my_extra->record_type != tupType ||
		my_extra->record_typmod != tupTypmod)
	{
		MemSet(my_extra, 0,
			   sizeof(RecordIOData) - sizeof(ColumnIOData)
			   + ncolumns * sizeof(ColumnIOData));
		my_extra->record_type = tupType;
		my_extra->record_typmod = tupTypmod;
		my_extra->ncolumns = ncolumns;
	}

	values = (Datum *) palloc(ncolumns * sizeof(Datum));
	nulls = (bool *) palloc(ncolumns * sizeof(bool));

	if (rec)
	{
		/* Break down the tuple into fields */
		heap_deform_tuple(&tuple, tupdesc, values, nulls);
	}
	else
	{
		for (i = 0; i < ncolumns; ++i)
		{
			values[i] = (Datum) 0;
			nulls[i] = true;
		}
	}

	for (i = 0; i < ncolumns; ++i)
	{
		ColumnIOData *column_info = &my_extra->columns[i];
		Oid			column_type = tupdesc->attrs[i]->atttypid;
		char	   *value;

		/* Ignore dropped columns in datatype */
		if (tupdesc->attrs[i]->attisdropped)
		{
			nulls[i] = true;
			continue;
		}

		memset(fname, 0, NAMEDATALEN);
		strncpy(fname, NameStr(tupdesc->attrs[i]->attname), NAMEDATALEN);
		hashentry = hash_search(json_hash, fname, HASH_FIND, NULL);

		/*
		 * we can't just skip here if the key wasn't found since we might have
		 * a domain to deal with. If we were passed in a non-null record
		 * datum, we assume that the existing values are valid (if they're
		 * not, then it's not our fault), but if we were passed in a null,
		 * then every field which we don't populate needs to be run through
		 * the input function just in case it's a domain type.
		 */
		if (hashentry == NULL && rec)
			continue;

		/*
		 * Prepare to convert the column value from text
		 */
		if (column_info->column_type != column_type)
		{
			getTypeInputInfo(column_type,
							 &column_info->typiofunc,
							 &column_info->typioparam);
			fmgr_info_cxt(column_info->typiofunc, &column_info->proc,
						  fcinfo->flinfo->fn_mcxt);
			column_info->column_type = column_type;
		}
		if (hashentry == NULL || hashentry->isnull)
		{
			/*
			 * need InputFunctionCall to happen even for nulls, so that domain
			 * checks are done
			 */
			values[i] = InputFunctionCall(&column_info->proc, NULL,
										  column_info->typioparam,
										  tupdesc->attrs[i]->atttypmod);
			nulls[i] = true;
		}
		else
		{
			value = hashentry->val;

			values[i] = InputFunctionCall(&column_info->proc, value,
										  column_info->typioparam,
										  tupdesc->attrs[i]->atttypmod);
			nulls[i] = false;
		}
	}

	rettuple = heap_form_tuple(tupdesc, values, nulls);

	ReleaseTupleDesc(tupdesc);

	PG_RETURN_DATUM(HeapTupleGetDatum(rettuple));
}
Example #13
0
Datum
make_tuple_indirect(PG_FUNCTION_ARGS)
{
	HeapTupleHeader rec = PG_GETARG_HEAPTUPLEHEADER(0);
	HeapTupleData tuple;
	int			ncolumns;
	Datum	   *values;
	bool	   *nulls;

	Oid			tupType;
	int32		tupTypmod;
	TupleDesc	tupdesc;

	HeapTuple	newtup;

	int			i;

	MemoryContext old_context;

	/* Extract type info from the tuple itself */
	tupType = HeapTupleHeaderGetTypeId(rec);
	tupTypmod = HeapTupleHeaderGetTypMod(rec);
	tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
	ncolumns = tupdesc->natts;

	/* Build a temporary HeapTuple control structure */
	tuple.t_len = HeapTupleHeaderGetDatumLength(rec);
	ItemPointerSetInvalid(&(tuple.t_self));
	tuple.t_tableOid = InvalidOid;
	tuple.t_data = rec;

	values = (Datum *) palloc(ncolumns * sizeof(Datum));
	nulls = (bool *) palloc(ncolumns * sizeof(bool));

	heap_deform_tuple(&tuple, tupdesc, values, nulls);

	old_context = MemoryContextSwitchTo(TopTransactionContext);

	for (i = 0; i < ncolumns; i++)
	{
		struct varlena *attr;
		struct varlena *new_attr;
		struct varatt_indirect redirect_pointer;

		/* only work on existing, not-null varlenas */
		if (TupleDescAttr(tupdesc, i)->attisdropped ||
			nulls[i] ||
			TupleDescAttr(tupdesc, i)->attlen != -1)
			continue;

		attr = (struct varlena *) DatumGetPointer(values[i]);

		/* don't recursively indirect */
		if (VARATT_IS_EXTERNAL_INDIRECT(attr))
			continue;

		/* copy datum, so it still lives later */
		if (VARATT_IS_EXTERNAL_ONDISK(attr))
			attr = heap_tuple_fetch_attr(attr);
		else
		{
			struct varlena *oldattr = attr;

			attr = palloc0(VARSIZE_ANY(oldattr));
			memcpy(attr, oldattr, VARSIZE_ANY(oldattr));
		}

		/* build indirection Datum */
		new_attr = (struct varlena *) palloc0(INDIRECT_POINTER_SIZE);
		redirect_pointer.pointer = attr;
		SET_VARTAG_EXTERNAL(new_attr, VARTAG_INDIRECT);
		memcpy(VARDATA_EXTERNAL(new_attr), &redirect_pointer,
			   sizeof(redirect_pointer));

		values[i] = PointerGetDatum(new_attr);
	}

	newtup = heap_form_tuple(tupdesc, values, nulls);
	pfree(values);
	pfree(nulls);
	ReleaseTupleDesc(tupdesc);

	MemoryContextSwitchTo(old_context);

	/*
	 * We intentionally don't use PG_RETURN_HEAPTUPLEHEADER here, because that
	 * would cause the indirect toast pointers to be flattened out of the
	 * tuple immediately, rendering subsequent testing irrelevant.  So just
	 * return the HeapTupleHeader pointer as-is.  This violates the general
	 * rule that composite Datums shouldn't contain toast pointers, but so
	 * long as the regression test scripts don't insert the result of this
	 * function into a container type (record, array, etc) it should be OK.
	 */
	PG_RETURN_POINTER(newtup->t_data);
}
Example #14
0
Datum uri_out(PG_FUNCTION_ARGS)
{
	HeapTupleHeader ud = PG_GETARG_HEAPTUPLEHEADER(0);
	PG_RETURN_CSTRING(uri_char(ud, 0, 1));
}
Example #15
0
Datum uri_show(PG_FUNCTION_ARGS)
{
	HeapTupleHeader ud = PG_GETARG_HEAPTUPLEHEADER(0);
	PG_RETURN_TEXT_P(uri_char(ud, 1, 0));
}
Example #16
0
Datum
make_tuple_indirect(PG_FUNCTION_ARGS)
{
	HeapTupleHeader rec = PG_GETARG_HEAPTUPLEHEADER(0);
	HeapTupleData tuple;
	int			ncolumns;
	Datum	   *values;
	bool	   *nulls;

	Oid			tupType;
	int32		tupTypmod;
	TupleDesc	tupdesc;

	HeapTuple	newtup;

	int			i;

	MemoryContext old_context;

	/* Extract type info from the tuple itself */
	tupType = HeapTupleHeaderGetTypeId(rec);
	tupTypmod = HeapTupleHeaderGetTypMod(rec);
	tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
	ncolumns = tupdesc->natts;

	/* Build a temporary HeapTuple control structure */
	tuple.t_len = HeapTupleHeaderGetDatumLength(rec);
	ItemPointerSetInvalid(&(tuple.t_self));
	tuple.t_tableOid = InvalidOid;
	tuple.t_data = rec;

	values = (Datum *) palloc(ncolumns * sizeof(Datum));
	nulls = (bool *) palloc(ncolumns * sizeof(bool));

	heap_deform_tuple(&tuple, tupdesc, values, nulls);

	old_context = MemoryContextSwitchTo(TopTransactionContext);

	for (i = 0; i < ncolumns; i++)
	{
		struct varlena *attr;
		struct varlena *new_attr;
		struct varatt_indirect redirect_pointer;

		/* only work on existing, not-null varlenas */
		if (tupdesc->attrs[i]->attisdropped ||
			nulls[i] ||
			tupdesc->attrs[i]->attlen != -1)
			continue;

		attr = (struct varlena *) DatumGetPointer(values[i]);

		/* don't recursively indirect */
		if (VARATT_IS_EXTERNAL_INDIRECT(attr))
			continue;

		/* copy datum, so it still lives later */
		if (VARATT_IS_EXTERNAL_ONDISK(attr))
			attr = heap_tuple_fetch_attr(attr);
		else
		{
			struct varlena *oldattr = attr;
			attr = palloc0(VARSIZE_ANY(oldattr));
			memcpy(attr, oldattr, VARSIZE_ANY(oldattr));
		}

		/* build indirection Datum */
		new_attr = (struct varlena *) palloc0(INDIRECT_POINTER_SIZE);
		redirect_pointer.pointer = attr;
		SET_VARTAG_EXTERNAL(new_attr, VARTAG_INDIRECT);
		memcpy(VARDATA_EXTERNAL(new_attr), &redirect_pointer,
			   sizeof(redirect_pointer));

		values[i] = PointerGetDatum(new_attr);
	}

	newtup = heap_form_tuple(tupdesc, values, nulls);
	pfree(values);
	pfree(nulls);
	ReleaseTupleDesc(tupdesc);

	MemoryContextSwitchTo(old_context);

	PG_RETURN_HEAPTUPLEHEADER(newtup->t_data);
}
Example #17
0
Datum new_order(PG_FUNCTION_ARGS)
{
	/* Input variables. */
	int32 w_id = PG_GETARG_INT32(0);
	int32 d_id = PG_GETARG_INT32(1);
	int32 c_id = PG_GETARG_INT32(2);
	int32 o_all_local = PG_GETARG_INT32(3);
	int32 o_ol_cnt = PG_GETARG_INT32(4);

	TupleDesc tupdesc;
	SPITupleTable *tuptable;
	HeapTuple tuple;

	int32 ol_i_id[15];
	int32 ol_supply_w_id[15];
	int32 ol_quantity[15];

	int i, j;

	int ret;
	char query[1024];

	char *w_tax = NULL;

	char *d_tax = NULL;
	char *d_next_o_id = NULL;

	char *c_discount = NULL;
	char *c_last = NULL;
	char *c_credit = NULL;

	float order_amount = 0.0;

	char *i_price[15];
	char *i_name[15];
	char *i_data[15];

	float ol_amount[15];
	char *s_quantity[15];
	char *my_s_dist[15];
	char *s_data[15];

	/* Loop through the last set of parameters. */
	for (i = 0, j = 5; i < 15; i++) {
		bool isnull;
		HeapTupleHeader  t = PG_GETARG_HEAPTUPLEHEADER(j++);
		ol_i_id[i] = DatumGetInt32(GetAttributeByName(t, "ol_i_id", &isnull));
		ol_supply_w_id[i] = DatumGetInt32(GetAttributeByName(t,
				"ol_supply_w_id", &isnull));
		ol_quantity[i] = DatumGetInt32(GetAttributeByName(t, "ol_quantity",
				&isnull));
	}

	elog(DEBUG1, "%d w_id = %d", (int) getpid(), w_id);
	elog(DEBUG1, "%d d_id = %d", (int) getpid(), d_id);
	elog(DEBUG1, "%d c_id = %d", (int) getpid(), c_id);
	elog(DEBUG1, "%d o_all_local = %d", (int) getpid(), o_all_local);
	elog(DEBUG1, "%d o_ol_cnt = %d", (int) getpid(), o_ol_cnt);

	elog(DEBUG1, "%d ##  ol_i_id  ol_supply_w_id  ol_quantity",
		(int) getpid());
	elog(DEBUG1, "%d --  -------  --------------  -----------",
		(int) getpid());
	for (i = 0; i < o_ol_cnt; i++) {
		elog(DEBUG1, "%d %2d  %7d  %14d  %11d", (int) getpid(),
				i + 1, ol_i_id[i], ol_supply_w_id[i], ol_quantity[i]);
	}

	SPI_connect();

	sprintf(query, NEW_ORDER_1, w_id);

	elog(DEBUG1, "%d %s", (int) getpid(), query);

	ret = SPI_exec(query, 0);
	if (ret == SPI_OK_SELECT && SPI_processed > 0) {
		tupdesc = SPI_tuptable->tupdesc;
		tuptable = SPI_tuptable;
		tuple = tuptable->vals[0];

		w_tax = SPI_getvalue(tuple, tupdesc, 1);
		elog(DEBUG1, "%d w_tax = %s", (int) getpid(), w_tax);
	} else {
		elog(WARNING, "NEW_ORDER_1 failed");
		SPI_finish();
		PG_RETURN_INT32(10);
	}

	sprintf(query, NEW_ORDER_2, w_id, d_id);
	elog(DEBUG1, "%s", query);
	ret = SPI_exec(query, 0);
	if (ret == SPI_OK_SELECT && SPI_processed > 0) {
		tupdesc = SPI_tuptable->tupdesc;
		tuptable = SPI_tuptable;
		tuple = tuptable->vals[0];

		d_tax = SPI_getvalue(tuple, tupdesc, 1);
		d_next_o_id = SPI_getvalue(tuple, tupdesc, 2);
		elog(DEBUG1, "%d d_tax = %s", (int) getpid(), d_tax);
		elog(DEBUG1, "%d d_next_o_id = %s", (int) getpid(),
			d_next_o_id);
	} else {
		elog(WARNING, "NEW_ORDER_2 failed");
		SPI_finish();
		PG_RETURN_INT32(11);
	}

	sprintf(query, NEW_ORDER_3, w_id, d_id);
	elog(DEBUG1, "%d %s", (int) getpid(), query);
	ret = SPI_exec(query, 0);
	if (ret != SPI_OK_UPDATE) {
		elog(WARNING, "NEW_ORDER_3 failed");
		SPI_finish();
		PG_RETURN_INT32(12);
	}

	sprintf(query, NEW_ORDER_4, w_id, d_id, c_id);
	elog(DEBUG1, "%d %s", (int) getpid(), query);
	ret = SPI_exec(query, 0);
	if (ret == SPI_OK_SELECT && SPI_processed > 0) {
		tupdesc = SPI_tuptable->tupdesc;
		tuptable = SPI_tuptable;
		tuple = tuptable->vals[0];

		c_discount = SPI_getvalue(tuple, tupdesc, 1);
		c_last = SPI_getvalue(tuple, tupdesc, 2);
		c_credit = SPI_getvalue(tuple, tupdesc, 3);
		elog(DEBUG1, "%d c_discount = %s", (int) getpid(), c_discount);
		elog(DEBUG1, "%d c_last = %s", (int) getpid(), c_last);
		elog(DEBUG1, "%d c_credit = %s", (int) getpid(), c_credit);
	} else {
		elog(WARNING, "NEW_ORDER_4 failed");
		SPI_finish();
		PG_RETURN_INT32(13);
	}

	sprintf(query, NEW_ORDER_5, d_next_o_id, w_id, d_id);
	elog(DEBUG1, "%d %s", (int) getpid(), query);
	ret = SPI_exec(query, 0);
	if (ret != SPI_OK_INSERT) {
		elog(WARNING, "NEW_ORDER_5 failed %d", ret);
		SPI_finish();
		PG_RETURN_INT32(14);
	}

	sprintf(query, NEW_ORDER_6, d_next_o_id, d_id, w_id, c_id, o_ol_cnt,
		o_all_local);
	elog(DEBUG1, "%d %s", (int) getpid(), query);
	ret = SPI_exec(query, 0);
	if (ret != SPI_OK_INSERT) {
		elog(WARNING, "NEW_ORDER_6 failed");
		SPI_finish();
		PG_RETURN_INT32(15);
	}

	for (i = 0; i < o_ol_cnt; i++) {
		sprintf(query, NEW_ORDER_7, ol_i_id[i]);
		elog(DEBUG1, "%d %s", (int) getpid(), query);
		ret = SPI_exec(query, 0);
		/*
		 * Shouldn't have to check if ol_i_id is 0, but if the row
		 * doesn't exist, the query still passes.
		 */
		if (ol_i_id[i] != 0 && ret == SPI_OK_SELECT && SPI_processed > 0) {
			tupdesc = SPI_tuptable->tupdesc;
			tuptable = SPI_tuptable;
			tuple = tuptable->vals[0];

			i_price[i] = SPI_getvalue(tuple, tupdesc, 1);
			i_name[i] = SPI_getvalue(tuple, tupdesc, 2);
			i_data[i] = SPI_getvalue(tuple, tupdesc, 3);
			elog(DEBUG1, "%d i_price[%d] = %s", (int) getpid(), i,
				i_price[i]);
			elog(DEBUG1, "%d i_name[%d] = %s", (int) getpid(), i,
				i_name[i]);
			elog(DEBUG1, "%d i_data[%d] = %s", (int) getpid(), i,
				i_data[i]);
		} else {
			/* Item doesn't exist, rollback transaction. */
			SPI_finish();
			PG_RETURN_INT32(2);
		}

		ol_amount[i] = atof(i_price[i]) * (float) ol_quantity[i];
		sprintf(query, NEW_ORDER_8, s_dist[d_id - 1], ol_i_id[i], w_id);
		elog(DEBUG1, "%d %s", (int) getpid(), query);
		ret = SPI_exec(query, 0);
		if (ret == SPI_OK_SELECT && SPI_processed > 0) {
			tupdesc = SPI_tuptable->tupdesc;
			tuptable = SPI_tuptable;
			tuple = tuptable->vals[0];

			s_quantity[i] = SPI_getvalue(tuple, tupdesc, 1);
			my_s_dist[i] = SPI_getvalue(tuple, tupdesc, 2);
			s_data[i] = SPI_getvalue(tuple, tupdesc, 3);
			elog(DEBUG1, "%d s_quantity[%d] = %s", (int) getpid(),
				i, s_quantity[i]);
			elog(DEBUG1, "%d my_s_dist[%d] = %s", (int) getpid(),
				i, my_s_dist[i]);
			elog(DEBUG1, "%d s_data[%d] = %s", (int) getpid(), i,
				s_data[i]);
		} else {
			elog(WARNING, "NEW_ORDER_8 failed");
			SPI_finish();
			PG_RETURN_INT32(16);
		}
		order_amount += ol_amount[i];

		if (atoi(s_quantity[i]) > ol_quantity[i] + 10) {
			sprintf(query, NEW_ORDER_9, ol_quantity[i],
				ol_i_id[i], w_id);
		} else {
			sprintf(query, NEW_ORDER_9, ol_quantity[i] - 91,
				ol_i_id[i], w_id);
		}
		elog(DEBUG1, "%d %s", (int) getpid(), query);
		ret = SPI_exec(query, 0);
		if (ret != SPI_OK_UPDATE) {
			elog(WARNING, "NEW_ORDER_9 failed");
			SPI_finish();
			PG_RETURN_INT32(17);
		}

		sprintf(query, NEW_ORDER_10, d_next_o_id, d_id, w_id, i + 1,
			ol_i_id[i], ol_supply_w_id[i], ol_quantity[i],
			ol_amount[i], my_s_dist[i]);
		elog(DEBUG1, "%d %s", getpid(), query);
		ret = SPI_exec(query, 0);
		if (ret != SPI_OK_INSERT) {
			elog(WARNING, "NEW_ORDER_10 failed");
			SPI_finish();
			PG_RETURN_INT32(18);
		}
	}

	SPI_finish();
	PG_RETURN_INT32(0);
}
Example #18
0
/*
 * record_send		- binary output routine for any composite type.
 */
Datum
record_send(PG_FUNCTION_ARGS)
{
	HeapTupleHeader rec = PG_GETARG_HEAPTUPLEHEADER(0);
	Oid			tupType;
	int32		tupTypmod;
	TupleDesc	tupdesc;
	HeapTupleData tuple;
	RecordIOData *my_extra;
	int			ncolumns;
	int			validcols;
	int			i;
	Datum	   *values;
	bool	   *nulls;
	StringInfoData buf;

	/* Extract type info from the tuple itself */
	tupType = HeapTupleHeaderGetTypeId(rec);
	tupTypmod = HeapTupleHeaderGetTypMod(rec);
	tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
	ncolumns = tupdesc->natts;

	/* Build a temporary HeapTuple control structure */
	tuple.t_len = HeapTupleHeaderGetDatumLength(rec);
	ItemPointerSetInvalid(&(tuple.t_self));
	tuple.t_tableOid = InvalidOid;
	tuple.t_data = rec;

	/*
	 * We arrange to look up the needed I/O info just once per series of
	 * calls, assuming the record type doesn't change underneath us.
	 */
	my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
	if (my_extra == NULL ||
		my_extra->ncolumns != ncolumns)
	{
		fcinfo->flinfo->fn_extra =
			MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
							   sizeof(RecordIOData) - sizeof(ColumnIOData)
							   + ncolumns * sizeof(ColumnIOData));
		my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
		my_extra->record_type = InvalidOid;
		my_extra->record_typmod = 0;
	}

	if (my_extra->record_type != tupType ||
		my_extra->record_typmod != tupTypmod)
	{
		MemSet(my_extra, 0,
			   sizeof(RecordIOData) - sizeof(ColumnIOData)
			   + ncolumns * sizeof(ColumnIOData));
		my_extra->record_type = tupType;
		my_extra->record_typmod = tupTypmod;
		my_extra->ncolumns = ncolumns;
	}

	values = (Datum *) palloc(ncolumns * sizeof(Datum));
	nulls = (bool *) palloc(ncolumns * sizeof(bool));

	/* Break down the tuple into fields */
	heap_deform_tuple(&tuple, tupdesc, values, nulls);

	/* And build the result string */
	pq_begintypsend(&buf);

	/* Need to scan to count nondeleted columns */
	validcols = 0;
	for (i = 0; i < ncolumns; i++)
	{
		if (!tupdesc->attrs[i]->attisdropped)
			validcols++;
	}
	pq_sendint(&buf, validcols, 4);

	for (i = 0; i < ncolumns; i++)
	{
		ColumnIOData *column_info = &my_extra->columns[i];
		Oid			column_type = tupdesc->attrs[i]->atttypid;
		bytea	   *outputbytes;

		/* Ignore dropped columns in datatype */
		if (tupdesc->attrs[i]->attisdropped)
			continue;

		pq_sendint(&buf, column_type, sizeof(Oid));

		if (nulls[i])
		{
			/* emit -1 data length to signify a NULL */
			pq_sendint(&buf, -1, 4);
			continue;
		}

		/*
		 * Convert the column value to binary
		 */
		if (column_info->column_type != column_type)
		{
			bool		typIsVarlena;

			getTypeBinaryOutputInfo(column_type,
									&column_info->typiofunc,
									&typIsVarlena);
			fmgr_info_cxt(column_info->typiofunc, &column_info->proc,
						  fcinfo->flinfo->fn_mcxt);
			column_info->column_type = column_type;
		}

		outputbytes = SendFunctionCall(&column_info->proc, values[i]);

		/* We assume the result will not have been toasted */
		pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4);
		pq_sendbytes(&buf, VARDATA(outputbytes),
					 VARSIZE(outputbytes) - VARHDRSZ);
		pfree(outputbytes);
	}

	pfree(values);
	pfree(nulls);
	ReleaseTupleDesc(tupdesc);

	PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
Example #19
0
/*
 * record_image_eq :
 *		  compares two records for identical contents, based on byte images
 * result :
 *		  returns true if the records are identical, false otherwise.
 *
 * Note: we do not use record_image_cmp here, since we can avoid
 * de-toasting for unequal lengths this way.
 */
Datum
record_image_eq(PG_FUNCTION_ARGS)
{
	HeapTupleHeader record1 = PG_GETARG_HEAPTUPLEHEADER(0);
	HeapTupleHeader record2 = PG_GETARG_HEAPTUPLEHEADER(1);
	bool		result = true;
	Oid			tupType1;
	Oid			tupType2;
	int32		tupTypmod1;
	int32		tupTypmod2;
	TupleDesc	tupdesc1;
	TupleDesc	tupdesc2;
	HeapTupleData tuple1;
	HeapTupleData tuple2;
	int			ncolumns1;
	int			ncolumns2;
	RecordCompareData *my_extra;
	int			ncols;
	Datum	   *values1;
	Datum	   *values2;
	bool	   *nulls1;
	bool	   *nulls2;
	int			i1;
	int			i2;
	int			j;

	/* Extract type info from the tuples */
	tupType1 = HeapTupleHeaderGetTypeId(record1);
	tupTypmod1 = HeapTupleHeaderGetTypMod(record1);
	tupdesc1 = lookup_rowtype_tupdesc(tupType1, tupTypmod1);
	ncolumns1 = tupdesc1->natts;
	tupType2 = HeapTupleHeaderGetTypeId(record2);
	tupTypmod2 = HeapTupleHeaderGetTypMod(record2);
	tupdesc2 = lookup_rowtype_tupdesc(tupType2, tupTypmod2);
	ncolumns2 = tupdesc2->natts;

	/* Build temporary HeapTuple control structures */
	tuple1.t_len = HeapTupleHeaderGetDatumLength(record1);
	ItemPointerSetInvalid(&(tuple1.t_self));
	tuple1.t_tableOid = InvalidOid;
	tuple1.t_data = record1;
	tuple2.t_len = HeapTupleHeaderGetDatumLength(record2);
	ItemPointerSetInvalid(&(tuple2.t_self));
	tuple2.t_tableOid = InvalidOid;
	tuple2.t_data = record2;

	/*
	 * We arrange to look up the needed comparison info just once per series
	 * of calls, assuming the record types don't change underneath us.
	 */
	ncols = Max(ncolumns1, ncolumns2);
	my_extra = (RecordCompareData *) fcinfo->flinfo->fn_extra;
	if (my_extra == NULL ||
		my_extra->ncolumns < ncols)
	{
		fcinfo->flinfo->fn_extra =
			MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
							   offsetof(RecordCompareData, columns) +
							   ncols * sizeof(ColumnCompareData));
		my_extra = (RecordCompareData *) fcinfo->flinfo->fn_extra;
		my_extra->ncolumns = ncols;
		my_extra->record1_type = InvalidOid;
		my_extra->record1_typmod = 0;
		my_extra->record2_type = InvalidOid;
		my_extra->record2_typmod = 0;
	}

	if (my_extra->record1_type != tupType1 ||
		my_extra->record1_typmod != tupTypmod1 ||
		my_extra->record2_type != tupType2 ||
		my_extra->record2_typmod != tupTypmod2)
	{
		MemSet(my_extra->columns, 0, ncols * sizeof(ColumnCompareData));
		my_extra->record1_type = tupType1;
		my_extra->record1_typmod = tupTypmod1;
		my_extra->record2_type = tupType2;
		my_extra->record2_typmod = tupTypmod2;
	}

	/* Break down the tuples into fields */
	values1 = (Datum *) palloc(ncolumns1 * sizeof(Datum));
	nulls1 = (bool *) palloc(ncolumns1 * sizeof(bool));
	heap_deform_tuple(&tuple1, tupdesc1, values1, nulls1);
	values2 = (Datum *) palloc(ncolumns2 * sizeof(Datum));
	nulls2 = (bool *) palloc(ncolumns2 * sizeof(bool));
	heap_deform_tuple(&tuple2, tupdesc2, values2, nulls2);

	/*
	 * Scan corresponding columns, allowing for dropped columns in different
	 * places in the two rows.  i1 and i2 are physical column indexes, j is
	 * the logical column index.
	 */
	i1 = i2 = j = 0;
	while (i1 < ncolumns1 || i2 < ncolumns2)
	{
		/*
		 * Skip dropped columns
		 */
		if (i1 < ncolumns1 && tupdesc1->attrs[i1]->attisdropped)
		{
			i1++;
			continue;
		}
		if (i2 < ncolumns2 && tupdesc2->attrs[i2]->attisdropped)
		{
			i2++;
			continue;
		}
		if (i1 >= ncolumns1 || i2 >= ncolumns2)
			break;				/* we'll deal with mismatch below loop */

		/*
		 * Have two matching columns, they must be same type
		 */
		if (tupdesc1->attrs[i1]->atttypid !=
			tupdesc2->attrs[i2]->atttypid)
			ereport(ERROR,
					(errcode(ERRCODE_DATATYPE_MISMATCH),
					 errmsg("cannot compare dissimilar column types %s and %s at record column %d",
							format_type_be(tupdesc1->attrs[i1]->atttypid),
							format_type_be(tupdesc2->attrs[i2]->atttypid),
							j + 1)));

		/*
		 * We consider two NULLs equal; NULL > not-NULL.
		 */
		if (!nulls1[i1] || !nulls2[i2])
		{
			if (nulls1[i1] || nulls2[i2])
			{
				result = false;
				break;
			}

			/* Compare the pair of elements */
			if (tupdesc1->attrs[i1]->attlen == -1)
			{
				Size		len1,
							len2;

				len1 = toast_raw_datum_size(values1[i1]);
				len2 = toast_raw_datum_size(values2[i2]);
				/* No need to de-toast if lengths don't match. */
				if (len1 != len2)
					result = false;
				else
				{
					struct varlena *arg1val;
					struct varlena *arg2val;

					arg1val = PG_DETOAST_DATUM_PACKED(values1[i1]);
					arg2val = PG_DETOAST_DATUM_PACKED(values2[i2]);

					result = (memcmp(VARDATA_ANY(arg1val),
									 VARDATA_ANY(arg2val),
									 len1 - VARHDRSZ) == 0);

					/* Only free memory if it's a copy made here. */
					if ((Pointer) arg1val != (Pointer) values1[i1])
						pfree(arg1val);
					if ((Pointer) arg2val != (Pointer) values2[i2])
						pfree(arg2val);
				}
			}
			else if (tupdesc1->attrs[i1]->attbyval)
			{
				switch (tupdesc1->attrs[i1]->attlen)
				{
					case 1:
						result = (GET_1_BYTE(values1[i1]) ==
								  GET_1_BYTE(values2[i2]));
						break;
					case 2:
						result = (GET_2_BYTES(values1[i1]) ==
								  GET_2_BYTES(values2[i2]));
						break;
					case 4:
						result = (GET_4_BYTES(values1[i1]) ==
								  GET_4_BYTES(values2[i2]));
						break;
#if SIZEOF_DATUM == 8
					case 8:
						result = (GET_8_BYTES(values1[i1]) ==
								  GET_8_BYTES(values2[i2]));
						break;
#endif
					default:
						Assert(false);	/* cannot happen */
				}
			}
			else
			{
				result = (memcmp(DatumGetPointer(values1[i1]),
								 DatumGetPointer(values2[i2]),
								 tupdesc1->attrs[i1]->attlen) == 0);
			}
			if (!result)
				break;
		}

		/* equal, so continue to next column */
		i1++, i2++, j++;
	}

	/*
	 * If we didn't break out of the loop early, check for column count
	 * mismatch.  (We do not report such mismatch if we found unequal column
	 * values; is that a feature or a bug?)
	 */
	if (result)
	{
		if (i1 != ncolumns1 || i2 != ncolumns2)
			ereport(ERROR,
					(errcode(ERRCODE_DATATYPE_MISMATCH),
					 errmsg("cannot compare record types with different numbers of columns")));
	}

	pfree(values1);
	pfree(nulls1);
	pfree(values2);
	pfree(nulls2);
	ReleaseTupleDesc(tupdesc1);
	ReleaseTupleDesc(tupdesc2);

	/* Avoid leaking memory when handed toasted input. */
	PG_FREE_IF_COPY(record1, 0);
	PG_FREE_IF_COPY(record2, 1);

	PG_RETURN_BOOL(result);
}
Datum serialize_record( PG_FUNCTION_ARGS )
{
//	FILE* log;

//	log = fopen("/var/lib/postgresql/serializer.log", "a");
	HeapTupleHeader rec = PG_GETARG_HEAPTUPLEHEADER(0);

	HeapTupleData tuple;
	bool		needComma = false;
	int		 i;
	Datum	  *values;
	bool	   *nulls;
	StringInfoData buf;
	char *conversion_buf;

	/* Extract type info from the tuple itself */
	Oid tupType = HeapTupleHeaderGetTypeId(rec);
	int32 tupTypmod = HeapTupleHeaderGetTypMod(rec);
	TupleDesc tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
	int ncolumns = tupdesc->natts;

	/* Build a temporary HeapTuple control structure */
	tuple.t_len = HeapTupleHeaderGetDatumLength(rec);
	ItemPointerSetInvalid(&(tuple.t_self));
	tuple.t_tableOid = InvalidOid;
	tuple.t_data = rec;

//	fprintf(log, "Doing serialize_record\n");
//	fflush(log);

	values = (Datum *) palloc(ncolumns * sizeof(Datum));
	nulls = (bool *) palloc(ncolumns * sizeof(bool));

	/* Break down the tuple into fields */
	heap_deform_tuple(&tuple, tupdesc, values, nulls);

	/* And build the result string */
	initStringInfo(&buf);

	appendStringInfoChar(&buf, '{');

	for (i = 0; i < ncolumns; i++)
	{
		Oid		 column_type = tupdesc->attrs[ i ]->atttypid;
		char	   *value;
		char	   *column_name;
		char 		type_category;
		HeapTuple 	type_tuple;
		FmgrInfo flinfo;

		/* Ignore dropped columns in datatype */
		if (tupdesc->attrs[i]->attisdropped)
			continue;

		if (nulls[i])
		{
			/* emit nothing... */
			continue;
		}

		if (needComma)
			appendStringInfoChar(&buf, ',');

		needComma = true;


		/* obtain column name */
		column_name = SPI_fname( tupdesc, i + 1 );

		/* obtain type information from pg_catalog */
		type_tuple = SearchSysCache1( TYPEOID, ObjectIdGetDatum(column_type) );
		if (!HeapTupleIsValid( type_tuple ))
			elog(ERROR, "cache lookup failed for relation %u", column_type);

		type_category = ((Form_pg_type) GETSTRUCT( type_tuple ))->typcategory;

		ReleaseSysCache( type_tuple );

		/* append column name */
		appendStringInfoChar(&buf, '"');
		appendStringInfoString(&buf, column_name);
		appendStringInfoString(&buf, "\":");

		switch( type_category )
		{
			// http://www.postgresql.org/docs/current/static/catalog-pg-type.html#CATALOG-TYPCATEGORY-TABLE

			case 'A': //array
				//call to serialize_array( ... )

				MemSet( &flinfo, 0, sizeof( flinfo ) );
				flinfo.fn_addr = serialize_array;
				flinfo.fn_nargs = 1;
				flinfo.fn_mcxt = fcinfo->flinfo->fn_mcxt;

				value = PG_TEXT_DATUM_GET_CSTR( FunctionCall1( &flinfo, values[ i ] ) );

				appendStringInfoString(&buf, value);
			break;

			case 'C': //composite
				//recursive call to serialize_record( ... )
				MemSet( &flinfo, 0, sizeof( flinfo ) );
				flinfo.fn_addr = serialize_record;
				flinfo.fn_nargs = 1;
				flinfo.fn_mcxt = fcinfo->flinfo->fn_mcxt;

				value = PG_TEXT_DATUM_GET_CSTR( FunctionCall1( &flinfo, values[ i ] ) );

				appendStringInfoString(&buf, value);
			break;

			case 'N': //numeric

				conversion_buf = NULL;
				// get column text value
//				fprintf(log, "Calling ConvertToText\n");
//				fflush(log);
				value = ConvertToText( values[ i ], column_type, fcinfo->flinfo->fn_mcxt, &conversion_buf );
//				fprintf(log, "ConvertToText succeded\n");
//				fflush(log);

				appendStringInfoString(&buf, value);
//				fprintf(log, "append.... succeded\n");
//				fflush(log);

				if(conversion_buf != NULL) {
					pfree(conversion_buf);
					conversion_buf = NULL;
				}

			break;

			case 'B': //boolean
				appendStringInfoString(&buf,
					// get column boolean value
					DatumGetBool( values[ i ] ) ? "true" : "false"
				);
			break;

			default: //another

				conversion_buf = NULL;
				// get column text value
//				fprintf(log, "Calling ConvertToText\n");
//				fflush(log);
				value = ConvertToText( values[ i ], column_type, fcinfo->flinfo->fn_mcxt, &conversion_buf );
//				fprintf(log, "ConvertToText succeded\n");
//				fflush(log);

				appendStringInfoQuotedString(&buf, value);
//				fprintf(log, "append.... succeded\n");
//				fflush(log);

				if(conversion_buf != NULL) {
					pfree(conversion_buf);
					conversion_buf = NULL;
				}
		}
	}

	appendStringInfoChar(&buf, '}');

	pfree(values);
	pfree(nulls);
	ReleaseTupleDesc(tupdesc);

//	fclose(log);

	PG_RETURN_TEXT_P( PG_CSTR_GET_TEXT( buf.data ) );
}
Example #21
0
Datum 
formatter_export(PG_FUNCTION_ARGS)
{
	HeapTupleHeader		rec	= PG_GETARG_HEAPTUPLEHEADER(0);
	TupleDesc           tupdesc;
	HeapTupleData		tuple;
	int                 ncolumns = 0;
	format_t           *myData;
	char               *data;
	int                 datlen;
	int                 i;

	/* Must be called via the external table format manager */
	if (!CALLED_AS_FORMATTER(fcinfo))
		elog(ERROR, "formatter_export: not called by format manager");
	
	tupdesc = FORMATTER_GET_TUPDESC(fcinfo);
	
	/* Get our internal description of the formatter */
	ncolumns = tupdesc->natts;
	myData = (format_t *) FORMATTER_GET_USER_CTX(fcinfo);
	if (myData == NULL)
	{
		myData          = palloc(sizeof(format_t));
		myData->ncols   = ncolumns;
		myData->values  = palloc(sizeof(Datum) * ncolumns);
		myData->nulls   = palloc(sizeof(bool) * ncolumns);
		
		/* Determine required buffer size */
		myData->buflen = 0;
		for (i = 0; i < ncolumns; i++)
		{
			Oid   type   = tupdesc->attrs[i]->atttypid;
			int32 typmod = tupdesc->attrs[i]->atttypmod;

			/* Don't know how to format dropped columns, error for now */
			if (tupdesc->attrs[i]->attisdropped)
				elog(ERROR, "formatter_export: dropped columns");

			switch (type)
			{
				case FLOAT8OID:
				{
					myData->buflen += sizeof(double);
					break;
				}
	
				case VARCHAROID:
				case BPCHAROID:
				case TEXTOID:
				{
					myData->buflen += (typmod > 0) ? typmod : MAX_FORMAT_STRING;
					break;
				}
					
				default:
				{
					elog(ERROR, "formatter_export error: unsupported data type");
					break;
				}
			}
		}

		myData->buflen = Max(128, myData->buflen);  /* allocate at least 128 bytes */
		myData->buffer = palloc(myData->buflen + VARHDRSZ);

		FORMATTER_SET_USER_CTX(fcinfo, myData);
	}
	if (myData->ncols != ncolumns)
		elog(ERROR, "formatter_export: unexpected change of output record type");

	/* break the input tuple into fields */
	tuple.t_len = HeapTupleHeaderGetDatumLength(rec);
	ItemPointerSetInvalid(&(tuple.t_self));
	tuple.t_data = rec;
	heap_deform_tuple(&tuple, tupdesc, myData->values, myData->nulls);

	datlen = 0;
	data = VARDATA(myData->buffer);

	
	/* =======================================================================
	 *                            MAIN FORMATTING CODE
	 *
	 * Currently this code assumes:
	 *  - Homogoneos hardware => No need to convert data to network byte order
	 *  - Support for TEXT/VARCHAR/BPCHAR/FLOAT8 only
	 *  - Length Prefixed strings
	 *  - No end of record tags, checksums, or optimizations for alignment.
	 *  - NULL values are cast to some sensible default value (NaN, "")
	 *
	 * ======================================================================= */
	for (i = 0; i < ncolumns; i++)
	{
		Oid	  type    = tupdesc->attrs[i]->atttypid;
		int   typmod  = tupdesc->attrs[i]->atttypmod;
		Datum val     = myData->values[i];
		bool  nul     = myData->nulls[i];
		
		switch (type)
		{
			case FLOAT8OID:
			{
				float8 value;

				if (datlen + sizeof(value) >= myData->buflen)
					elog(ERROR, "formatter_export: buffer too small");
				
				if (nul)
					value = NULL_FLOAT8_VALUE;
				else
					value = DatumGetFloat8(val);

				memcpy(&data[datlen], &value, sizeof(value));
				datlen += sizeof(value);
				break;
			}

			case TEXTOID:
			case VARCHAROID:
			case BPCHAROID:
			{
				text  *str;
				int32  len;
			   
				if (nul)
				{
					str = NULL;
					len = 0;
				}
				else
				{
					str = DatumGetTextP(val);
					len = VARSIZE(str) - VARHDRSZ;
					if (typmod < 0)
						len  = Min(len, MAX_FORMAT_STRING);
				}

				if (datlen + sizeof(len) + len >= myData->buflen)
					elog(ERROR, "formatter_export: buffer too small");
				memcpy(&data[datlen], &len, sizeof(len));
				datlen += sizeof(len);

				if (len > 0)
				{
					memcpy(&data[datlen], VARDATA(str), len);
					datlen += len;
				}
				break;
			}

			default:
				elog(ERROR, "formatter_export: unsupported datatype");
				break;
		}	
	}
	/* ======================================================================= */
	
	SET_VARSIZE(myData->buffer, datlen + VARHDRSZ);
	PG_RETURN_BYTEA_P(myData->buffer);
}
Example #22
0
/*
 * record_cmp()
 * Internal comparison function for records.
 *
 * Returns -1, 0 or 1
 *
 * Do not assume that the two inputs are exactly the same record type;
 * for instance we might be comparing an anonymous ROW() construct against a
 * named composite type.  We will compare as long as they have the same number
 * of non-dropped columns of the same types.
 */
static int
record_cmp(FunctionCallInfo fcinfo)
{
	HeapTupleHeader record1 = PG_GETARG_HEAPTUPLEHEADER(0);
	HeapTupleHeader record2 = PG_GETARG_HEAPTUPLEHEADER(1);
	int			result = 0;
	Oid			tupType1;
	Oid			tupType2;
	int32		tupTypmod1;
	int32		tupTypmod2;
	TupleDesc	tupdesc1;
	TupleDesc	tupdesc2;
	HeapTupleData tuple1;
	HeapTupleData tuple2;
	int			ncolumns1;
	int			ncolumns2;
	RecordCompareData *my_extra;
	int			ncols;
	Datum	   *values1;
	Datum	   *values2;
	bool	   *nulls1;
	bool	   *nulls2;
	int			i1;
	int			i2;
	int			j;

	/* Extract type info from the tuples */
	tupType1 = HeapTupleHeaderGetTypeId(record1);
	tupTypmod1 = HeapTupleHeaderGetTypMod(record1);
	tupdesc1 = lookup_rowtype_tupdesc(tupType1, tupTypmod1);
	ncolumns1 = tupdesc1->natts;
	tupType2 = HeapTupleHeaderGetTypeId(record2);
	tupTypmod2 = HeapTupleHeaderGetTypMod(record2);
	tupdesc2 = lookup_rowtype_tupdesc(tupType2, tupTypmod2);
	ncolumns2 = tupdesc2->natts;

	/* Build temporary HeapTuple control structures */
	tuple1.t_len = HeapTupleHeaderGetDatumLength(record1);
	ItemPointerSetInvalid(&(tuple1.t_self));
	tuple1.t_tableOid = InvalidOid;
	tuple1.t_data = record1;
	tuple2.t_len = HeapTupleHeaderGetDatumLength(record2);
	ItemPointerSetInvalid(&(tuple2.t_self));
	tuple2.t_tableOid = InvalidOid;
	tuple2.t_data = record2;

	/*
	 * We arrange to look up the needed comparison info just once per series
	 * of calls, assuming the record types don't change underneath us.
	 */
	ncols = Max(ncolumns1, ncolumns2);
	my_extra = (RecordCompareData *) fcinfo->flinfo->fn_extra;
	if (my_extra == NULL ||
		my_extra->ncolumns < ncols)
	{
		fcinfo->flinfo->fn_extra =
			MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
						sizeof(RecordCompareData) - sizeof(ColumnCompareData)
							   + ncols * sizeof(ColumnCompareData));
		my_extra = (RecordCompareData *) fcinfo->flinfo->fn_extra;
		my_extra->ncolumns = ncols;
		my_extra->record1_type = InvalidOid;
		my_extra->record1_typmod = 0;
		my_extra->record2_type = InvalidOid;
		my_extra->record2_typmod = 0;
	}

	if (my_extra->record1_type != tupType1 ||
		my_extra->record1_typmod != tupTypmod1 ||
		my_extra->record2_type != tupType2 ||
		my_extra->record2_typmod != tupTypmod2)
	{
		MemSet(my_extra->columns, 0, ncols * sizeof(ColumnCompareData));
		my_extra->record1_type = tupType1;
		my_extra->record1_typmod = tupTypmod1;
		my_extra->record2_type = tupType2;
		my_extra->record2_typmod = tupTypmod2;
	}

	/* Break down the tuples into fields */
	values1 = (Datum *) palloc(ncolumns1 * sizeof(Datum));
	nulls1 = (bool *) palloc(ncolumns1 * sizeof(bool));
	heap_deform_tuple(&tuple1, tupdesc1, values1, nulls1);
	values2 = (Datum *) palloc(ncolumns2 * sizeof(Datum));
	nulls2 = (bool *) palloc(ncolumns2 * sizeof(bool));
	heap_deform_tuple(&tuple2, tupdesc2, values2, nulls2);

	/*
	 * Scan corresponding columns, allowing for dropped columns in different
	 * places in the two rows.	i1 and i2 are physical column indexes, j is
	 * the logical column index.
	 */
	i1 = i2 = j = 0;
	while (i1 < ncolumns1 || i2 < ncolumns2)
	{
		TypeCacheEntry *typentry;
		Oid			collation;
		FunctionCallInfoData locfcinfo;
		int32		cmpresult;

		/*
		 * Skip dropped columns
		 */
		if (i1 < ncolumns1 && tupdesc1->attrs[i1]->attisdropped)
		{
			i1++;
			continue;
		}
		if (i2 < ncolumns2 && tupdesc2->attrs[i2]->attisdropped)
		{
			i2++;
			continue;
		}
		if (i1 >= ncolumns1 || i2 >= ncolumns2)
			break;				/* we'll deal with mismatch below loop */

		/*
		 * Have two matching columns, they must be same type
		 */
		if (tupdesc1->attrs[i1]->atttypid !=
			tupdesc2->attrs[i2]->atttypid)
			ereport(ERROR,
					(errcode(ERRCODE_DATATYPE_MISMATCH),
					 errmsg("cannot compare dissimilar column types %s and %s at record column %d",
							format_type_be(tupdesc1->attrs[i1]->atttypid),
							format_type_be(tupdesc2->attrs[i2]->atttypid),
							j + 1)));

		/*
		 * If they're not same collation, we don't complain here, but the
		 * comparison function might.
		 */
		collation = tupdesc1->attrs[i1]->attcollation;
		if (collation != tupdesc2->attrs[i2]->attcollation)
			collation = InvalidOid;

		/*
		 * Lookup the comparison function if not done already
		 */
		typentry = my_extra->columns[j].typentry;
		if (typentry == NULL ||
			typentry->type_id != tupdesc1->attrs[i1]->atttypid)
		{
			typentry = lookup_type_cache(tupdesc1->attrs[i1]->atttypid,
										 TYPECACHE_CMP_PROC_FINFO);
			if (!OidIsValid(typentry->cmp_proc_finfo.fn_oid))
				ereport(ERROR,
						(errcode(ERRCODE_UNDEFINED_FUNCTION),
				errmsg("could not identify a comparison function for type %s",
					   format_type_be(typentry->type_id))));
			my_extra->columns[j].typentry = typentry;
		}

		/*
		 * We consider two NULLs equal; NULL > not-NULL.
		 */
		if (!nulls1[i1] || !nulls2[i2])
		{
			if (nulls1[i1])
			{
				/* arg1 is greater than arg2 */
				result = 1;
				break;
			}
			if (nulls2[i2])
			{
				/* arg1 is less than arg2 */
				result = -1;
				break;
			}

			/* Compare the pair of elements */
			InitFunctionCallInfoData(locfcinfo, &typentry->cmp_proc_finfo, 2,
									 collation, NULL, NULL);
			locfcinfo.arg[0] = values1[i1];
			locfcinfo.arg[1] = values2[i2];
			locfcinfo.argnull[0] = false;
			locfcinfo.argnull[1] = false;
			locfcinfo.isnull = false;
			cmpresult = DatumGetInt32(FunctionCallInvoke(&locfcinfo));

			if (cmpresult < 0)
			{
				/* arg1 is less than arg2 */
				result = -1;
				break;
			}
			else if (cmpresult > 0)
			{
				/* arg1 is greater than arg2 */
				result = 1;
				break;
			}
		}

		/* equal, so continue to next column */
		i1++, i2++, j++;
	}

	/*
	 * If we didn't break out of the loop early, check for column count
	 * mismatch.  (We do not report such mismatch if we found unequal column
	 * values; is that a feature or a bug?)
	 */
	if (result == 0)
	{
		if (i1 != ncolumns1 || i2 != ncolumns2)
			ereport(ERROR,
					(errcode(ERRCODE_DATATYPE_MISMATCH),
					 errmsg("cannot compare record types with different numbers of columns")));
	}

	pfree(values1);
	pfree(nulls1);
	pfree(values2);
	pfree(nulls2);
	ReleaseTupleDesc(tupdesc1);
	ReleaseTupleDesc(tupdesc2);

	/* Avoid leaking memory when handed toasted input. */
	PG_FREE_IF_COPY(record1, 0);
	PG_FREE_IF_COPY(record2, 1);

	return result;
}
Example #23
0
/*
 * nb_classify_accum - Naive Bayesian Classification Accumulator
 */
Datum 
nb_classify_accum(PG_FUNCTION_ARGS)
{
	nb_classify_state  state;
	TupleDesc          resultDesc;
	int                i;
	int                nclasses;
	ArrayType         *classes;          /* arg[1] */
	int64              attr_count;       /* arg[2] */
	ArrayType         *class_count;      /* arg[3] */
	ArrayType         *class_total;      /* arg[4] */
	HeapTuple          result;
	Datum              resultDatum[3];
	bool               resultNull[3];
	bool               skip;
	int64             *class_data;
	int64             *total_data;
	float8            *prior_data;
	int64             *stotal_data;

	/* Check input parameters */
	if (PG_NARGS() != 5)
	{
		ereport(ERROR,
				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
				 errmsg("nb_classify_accum called with %d arguments", 
						PG_NARGS())));
	}

	/* Skip rows with NULLs */
	if (PG_ARGISNULL(1) || PG_ARGISNULL(3) || PG_ARGISNULL(4))
	{
		if (PG_ARGISNULL(0))
			PG_RETURN_NULL();
		PG_RETURN_DATUM(PG_GETARG_DATUM(0));
	}
	classes     = PG_GETARG_ARRAYTYPE_P(1);
	attr_count  = PG_ARGISNULL(2) ? 0 : PG_GETARG_INT64(2);
	class_count = PG_GETARG_ARRAYTYPE_P(3);
	class_total = PG_GETARG_ARRAYTYPE_P(4);

	if (ARR_NDIM(classes) != 1 || 
		ARR_NDIM(class_count) != 1 || 
		ARR_NDIM(class_total) != 1)
	{
		ereport(ERROR,
				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
				 errmsg("nb_classify cannot accumulate multidimensional arrays")));
	}

	/* All three arrays must be equal cardinality */
	nclasses = ARR_DIMS(classes)[0];
	if (ARR_DIMS(class_count)[0] != nclasses ||
		ARR_DIMS(class_total)[0] != nclasses)
	{
		ereport(ERROR,
				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
				 errmsg("nb_classify: non-conformable arrays")));
	}
	
	class_data = (int64*) ARR_DATA_PTR(class_count);
	total_data = (int64*) ARR_DATA_PTR(class_total);

	/* It is an error for class_data, total_data, or attr_count to be a 
	   negative number */
	if (attr_count < 0) 
	{
		ereport(ERROR,
				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
				 errmsg("nb_classify: attr_count value must be >= 0")));

	}
	skip = false;
	for (i = 0; i < nclasses; i++) 
	{
		if (class_data[i] < 0) 
		{
			ereport(ERROR,
					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
					 errmsg("nb_classify: class_data values must be >= 0")));
		}
		if (total_data[i] < 0) 
		{
			ereport(ERROR,
					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
					 errmsg("nb_classify: total_data values must be >= 0")));
		}
		if (class_data[i] > total_data[i]) 
		{
			ereport(ERROR,
					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
					 errmsg("nb_classify: class_data values must be <= total_data")));
		}

		/* 
		 * If we do not have an adjustment value and any class has a zero value
		 * then we must skip this row, otherwise we will try to calculate ln(0)
		 */
		if (attr_count == 0 && class_data[i] == 0) 
		{
			skip = true;
		}
	}
	if (skip)
	{
		PG_RETURN_DATUM(PG_GETARG_DATUM(0));			
	}



	/* Get/create the accumulation state */
	if (!PG_ARGISNULL(0))
	{
		HeapTupleHeader tup;
		tup = (fcinfo->context && IsA(fcinfo->context, AggState))
			? PG_GETARG_HEAPTUPLEHEADER(0)
			: PG_GETARG_HEAPTUPLEHEADER_COPY(0);

		get_nb_state(tup, &state, nclasses);
	}
	else
	{
		/* Construct the state arrays */
		int     size;

		/* allocate memory and copy the classes array */
		size = VARSIZE(classes);
		state.classes = (ArrayType *) palloc(size);
		SET_VARSIZE(state.classes, size);
		memcpy(state.classes, classes, size);

		/* allocate memory and construct the accumulator array */
		size = ARR_OVERHEAD_NONULLS(1) + nclasses * sizeof(float8);
		state.accum = (ArrayType *) palloc(size);
		SET_VARSIZE(state.accum, size);
		state.accum->ndim          = 1;
		state.accum->dataoffset    = 0;
		ARR_ELEMTYPE(state.accum)  = FLOAT8OID;
		ARR_DIMS(state.accum)[0]   = nclasses;
		ARR_LBOUND(state.accum)[0] = 1;
		prior_data = (float8*) ARR_DATA_PTR(state.accum);
		for (i = 0; i < nclasses; i++)
			prior_data[i] = 0;

		/* allocate memory and construct the total array */
		size = ARR_OVERHEAD_NONULLS(1) + nclasses * sizeof(int64);
		state.total = (ArrayType *) palloc(size);
		SET_VARSIZE(state.total, size);
		state.total->ndim          = 1;
		state.total->dataoffset    = 0;
		ARR_ELEMTYPE(state.total)  = INT8OID;
		ARR_DIMS(state.total)[0]   = nclasses;
		ARR_LBOUND(state.total)[0] = 1;
		stotal_data = (int64*) ARR_DATA_PTR(state.total);
		for (i = 0; i < nclasses; i++)
			stotal_data[i] = 0;
	}

	/* Adjust the prior based on the current input row */
	prior_data = (float8*) ARR_DATA_PTR(state.accum);
	stotal_data = (int64*) ARR_DATA_PTR(state.total);
	for (i = 0; i < nclasses; i++)
	{
		/*
		 * Calculate the accumulation value for the classifier
		 *
		 * The logical calculation is: 
		 *     product((class[i]+1)/(total_data[i]+attr_count))
		 *
		 * Instead of this calculation we calculate:
		 *     sum(ln((class[i]+1)/(total_data[i]+attr_count)))
		 *
		 * The reason for this is to increase the numerical stability of
		 * the algorithm.
		 *
		 * Since the ln(0) is undefined we want to increment the count 
		 * for all classes. 
		 *
		 * This get's a bit more complicated for the denominator which
		 * needs to know how many values there are for this attribute
		 * so that we keep the total probability for the attribute = 1.
		 * To handle this the aggregation function should be passed the
		 * number of distinct values for the aggregate it is computing.
		 *
		 * If for some reason this value is not present, or < 1 then we
		 * just switch to non-adjusted calculations.  In this case we 
		 * will simply skip over any row that has a 0 count on any 
		 * class_data index. (handled above)
		 */
		if (attr_count > 1)
			prior_data[i] += log((class_data[i]+1)/(float8)(total_data[i] + attr_count));
		else
			prior_data[i] += log(class_data[i]/(float8)total_data[i]);

		/* 
		 * The total array should be constant throughout, but if not it should
		 * reflect the maximum values encountered 
		 */
		if (total_data[i] > stotal_data[i])
			stotal_data[i] = total_data[i];
	}


	/* Construct the return tuple */
	if (get_call_result_type(fcinfo, NULL, &resultDesc) != TYPEFUNC_COMPOSITE)
		elog(ERROR, "return type must be a row type");
	BlessTupleDesc(resultDesc); 
	
	resultDatum[0] = PointerGetDatum(state.classes);
	resultDatum[1] = PointerGetDatum(state.accum);
	resultDatum[2] = PointerGetDatum(state.total);
	resultNull[0]  = false;
	resultNull[1]  = false;
	resultNull[2]  = false;

	result = heap_form_tuple(resultDesc, resultDatum, resultNull);
	PG_RETURN_DATUM(HeapTupleGetDatum(result));
}