示例#1
0
Datum pcpoint_in(PG_FUNCTION_ARGS)
{
	char *str = PG_GETARG_CSTRING(0);
	/* Datum pc_oid = PG_GETARG_OID(1); Not needed. */
    int32 typmod = 0;
    uint32 pcid = 0;
	PCPOINT *pt;
	SERIALIZED_POINT *serpt;
	
	if ( (PG_NARGS()>2) && (!PG_ARGISNULL(2)) ) 
	{
		typmod = PG_GETARG_INT32(2);
        pcid = pcid_from_typmod(typmod);
	}

	/* Empty string. */
	if ( str[0] == '\0' )
	{
		ereport(ERROR,(errmsg("pcpoint parse error - empty string")));
	}

	/* Binary or text form? Let's find out. */
	if ( str[0] == '0' )
	{		
		/* Hex-encoded binary */
		pt = pc_point_from_hexwkb(str, strlen(str), fcinfo);
        pcid_consistent(pt->schema->pcid, pcid);
		serpt = pc_point_serialize(pt);
		pc_point_free(pt);
	}
	else
	{
		ereport(ERROR,(errmsg("parse error - support for text format not yet implemented")));
	}

	PG_RETURN_POINTER(serpt);
}
示例#2
0
Datum LWGEOMFromWKB(PG_FUNCTION_ARGS)
{
	bytea *bytea_wkb = (bytea*)PG_GETARG_BYTEA_P(0);
	int32 srid = 0;
	GSERIALIZED *geom;
	LWGEOM *lwgeom;
	uint8_t *wkb = (uint8_t*)VARDATA(bytea_wkb);
	
	lwgeom = lwgeom_from_wkb(wkb, VARSIZE(bytea_wkb)-VARHDRSZ, LW_PARSER_CHECK_ALL);
	
	if (  ( PG_NARGS()>1) && ( ! PG_ARGISNULL(1) ))
	{
		srid = PG_GETARG_INT32(1);
		lwgeom_set_srid(lwgeom, srid);
	}

	if ( lwgeom_needs_bbox(lwgeom) )
		lwgeom_add_bbox(lwgeom);

	geom = geometry_serialize(lwgeom);
	lwgeom_free(lwgeom);
	PG_FREE_IF_COPY(bytea_wkb, 0);
	PG_RETURN_POINTER(geom);
}
示例#3
0
文件: pinv.c 项目: qiuyesuifeng/gpdb
Datum
pseudoinverse(PG_FUNCTION_ARGS)
{
    /*
     * A note on types: PostgreSQL array dimensions are of type int. See, e.g.,
     * the macro definition ARR_DIMS
     */
    int         rows, columns;
    float8     *A, *Aplus;
    ArrayType  *A_PG, *Aplus_PG;
    int lbs[2], dims[2];

    /*
     * Perform all the error checking needed to ensure that no one is
     * trying to call this in some sort of crazy way.
     */
    if (PG_NARGS() != 1)
    {
        ereport(ERROR,
                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                 errmsg("pseudoinverse called with %d arguments",
                        PG_NARGS())));
    }
    if (PG_ARGISNULL(0))
        PG_RETURN_NULL();

    A_PG = PG_GETARG_ARRAYTYPE_P(0);

    if (ARR_ELEMTYPE(A_PG) != FLOAT8OID)
        ereport(ERROR,
                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                 errmsg("pseudoinverse only defined over float8[]")));
    if (ARR_NDIM(A_PG) != 2)
        ereport(ERROR,
                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                 errmsg("pseudoinverse only defined over 2 dimensional arrays"))
               );
    if (ARR_NULLBITMAP(A_PG))
        ereport(ERROR,
                (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
                 errmsg("null array element not allowed in this context")));

    /* Extract rows, columns, and data */
    rows = ARR_DIMS(A_PG)[0];
    columns = ARR_DIMS(A_PG)[1];
    A = (float8 *) ARR_DATA_PTR(A_PG);


    /*  Allocate a PG array for the result, "Aplus" is A+, the pseudo inverse of A */
    lbs[0] = 1;
    lbs[1] = 1;
    dims[0] = columns;
    dims[1] = rows;
    Aplus_PG = construct_md_array(NULL, NULL, 2, dims, lbs, FLOAT8OID,
                                  sizeof(float8), true, 'd');

    Aplus = (float8 *) ARR_DATA_PTR(Aplus_PG);

    pinv(rows,columns,A,Aplus);

    PG_RETURN_ARRAYTYPE_P(Aplus_PG);
}
示例#4
0
Datum
generate_series_step_int4(PG_FUNCTION_ARGS)
{
	FuncCallContext *funcctx;
	generate_series_fctx *fctx;
	int32		result;
	MemoryContext oldcontext;

	/* stuff done only on the first call of the function */
	if (SRF_IS_FIRSTCALL())
	{
		int32		start = PG_GETARG_INT32(0);
		int32		finish = PG_GETARG_INT32(1);
		int32		step = 1;

		/* see if we were given an explicit step size */
		if (PG_NARGS() == 3)
			step = PG_GETARG_INT32(2);
		if (step == 0)
			ereport(ERROR,
					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
					 errmsg("step size may not equal zero")));

		/* create a function context for cross-call persistence */
		funcctx = SRF_FIRSTCALL_INIT();

		/*
		 * switch to memory context appropriate for multiple function calls
		 */
		oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);

		/* allocate memory for user context */
		fctx = (generate_series_fctx *) palloc(sizeof(generate_series_fctx));

		/*
		 * Use fctx to keep state from call to call. Seed current with the
		 * original start value
		 */
		fctx->current = start;
		fctx->finish = finish;
		fctx->step = step;

		funcctx->user_fctx = fctx;
		MemoryContextSwitchTo(oldcontext);
	}

	/* stuff done on every call of the function */
	funcctx = SRF_PERCALL_SETUP();

	/*
	 * get the saved state and use current as the result for this iteration
	 */
	fctx = funcctx->user_fctx;
	result = fctx->current;

	if ((fctx->step > 0 && fctx->current <= fctx->finish) ||
		(fctx->step < 0 && fctx->current >= fctx->finish))
	{
		/* increment current in preparation for next iteration */
		fctx->current += fctx->step;

		/* if next-value computation overflows, this is the final result */
		if (SAMESIGN(result, fctx->step) && !SAMESIGN(result, fctx->current))
			fctx->step = 0;

		/* do when there is more left to send */
		SRF_RETURN_NEXT(funcctx, Int32GetDatum(result));
	}
	else
		/* do when there is no more left */
		SRF_RETURN_DONE(funcctx);
}
示例#5
0
Datum
tuple_data_split(PG_FUNCTION_ARGS)
{
	Oid			relid;
	bytea	   *raw_data;
	uint16		t_infomask;
	uint16		t_infomask2;
	char	   *t_bits_str;
	bool		do_detoast = false;
	bits8	   *t_bits = NULL;
	Datum		res;

	relid = PG_GETARG_OID(0);
	raw_data = PG_ARGISNULL(1) ? NULL : PG_GETARG_BYTEA_P(1);
	t_infomask = PG_GETARG_INT16(2);
	t_infomask2 = PG_GETARG_INT16(3);
	t_bits_str = PG_ARGISNULL(4) ? NULL :
		text_to_cstring(PG_GETARG_TEXT_PP(4));

	if (PG_NARGS() >= 6)
		do_detoast = PG_GETARG_BOOL(5);

	if (!superuser())
		ereport(ERROR,
				(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
				 errmsg("must be superuser to use raw page functions")));

	if (!raw_data)
		PG_RETURN_NULL();

	/*
	 * Convert t_bits string back to the bits8 array as represented in the
	 * tuple header.
	 */
	if (t_infomask & HEAP_HASNULL)
	{
		int			bits_str_len;
		int			bits_len;

		bits_len = BITMAPLEN(t_infomask2 & HEAP_NATTS_MASK) * BITS_PER_BYTE;
		if (!t_bits_str)
			ereport(ERROR,
					(errcode(ERRCODE_DATA_CORRUPTED),
					 errmsg("argument of t_bits is null, but it is expected to be null and %d character long",
							bits_len)));

		bits_str_len = strlen(t_bits_str);
		if (bits_len != bits_str_len)
			ereport(ERROR,
					(errcode(ERRCODE_DATA_CORRUPTED),
					 errmsg("unexpected length of t_bits %u, expected %d",
							bits_str_len, bits_len)));

		/* do the conversion */
		t_bits = text_to_bits(t_bits_str, bits_str_len);
	}
	else
	{
		if (t_bits_str)
			ereport(ERROR,
					(errcode(ERRCODE_DATA_CORRUPTED),
					 errmsg("t_bits string is expected to be NULL, but instead it is %zu bytes length",
							strlen(t_bits_str))));
	}

	/* Split tuple data */
	res = tuple_data_split_internal(relid, (char *) raw_data + VARHDRSZ,
									VARSIZE(raw_data) - VARHDRSZ,
									t_infomask, t_infomask2, t_bits,
									do_detoast);

	if (t_bits)
		pfree(t_bits);

	PG_RETURN_ARRAYTYPE_P(res);
}
示例#6
0
/*
 * Common subroutine for num_nulls() and num_nonnulls().
 * Returns TRUE if successful, FALSE if function should return NULL.
 * If successful, total argument count and number of nulls are
 * returned into *nargs and *nulls.
 */
static bool
count_nulls(FunctionCallInfo fcinfo,
			int32 *nargs, int32 *nulls)
{
	int32		count = 0;
	int			i;

	/* Did we get a VARIADIC array argument, or separate arguments? */
	if (get_fn_expr_variadic(fcinfo->flinfo))
	{
		ArrayType  *arr;
		int			ndims,
					nitems,
				   *dims;
		bits8	   *bitmap;

		Assert(PG_NARGS() == 1);

		/*
		 * If we get a null as VARIADIC array argument, we can't say anything
		 * useful about the number of elements, so return NULL.  This behavior
		 * is consistent with other variadic functions - see concat_internal.
		 */
		if (PG_ARGISNULL(0))
			return false;

		/*
		 * Non-null argument had better be an array.  We assume that any call
		 * context that could let get_fn_expr_variadic return true will have
		 * checked that a VARIADIC-labeled parameter actually is an array.  So
		 * it should be okay to just Assert that it's an array rather than
		 * doing a full-fledged error check.
		 */
		Assert(OidIsValid(get_base_element_type(get_fn_expr_argtype(fcinfo->flinfo, 0))));

		/* OK, safe to fetch the array value */
		arr = PG_GETARG_ARRAYTYPE_P(0);

		/* Count the array elements */
		ndims = ARR_NDIM(arr);
		dims = ARR_DIMS(arr);
		nitems = ArrayGetNItems(ndims, dims);

		/* Count those that are NULL */
		bitmap = ARR_NULLBITMAP(arr);
		if (bitmap)
		{
			int			bitmask = 1;

			for (i = 0; i < nitems; i++)
			{
				if ((*bitmap & bitmask) == 0)
					count++;

				bitmask <<= 1;
				if (bitmask == 0x100)
				{
					bitmap++;
					bitmask = 1;
				}
			}
		}

		*nargs = nitems;
		*nulls = count;
	}
	else
	{
		/* Separate arguments, so just count 'em */
		for (i = 0; i < PG_NARGS(); i++)
		{
			if (PG_ARGISNULL(i))
				count++;
		}

		*nargs = PG_NARGS();
		*nulls = count;
	}

	return true;
}
示例#7
0
/*
 * pivot_accum() - Pivot and accumulate
 */
static Datum oid_pivot_accum(FunctionCallInfo fcinfo, Oid type)
{
	ArrayType  *data;
	ArrayType  *labels;
	text       *attr;
	int         i;
	
	/* Simple argument validation */
	if (PG_NARGS() != 4)
		ereport(ERROR, 
				(errcode(ERRCODE_DATATYPE_MISMATCH),
				 errmsg("pivot_accum called with %d input arguments",
						PG_NARGS())));
	if (PG_ARGISNULL(1) || PG_ARGISNULL(2) || PG_ARGISNULL(3))
	{
		if (PG_ARGISNULL(0))
			PG_RETURN_NULL();
		PG_RETURN_ARRAYTYPE_P(PG_GETARG_ARRAYTYPE_P(0));
	}
    labels = PG_GETARG_ARRAYTYPE_P(1);
	attr   = PG_GETARG_TEXT_P(2);
	
	/* Do nothing if the attr isn't in the labels array. */
	if ((i = pivot_find(labels, attr)) < 0)
	{
		if (PG_ARGISNULL(0))
			PG_RETURN_NULL();
		PG_RETURN_ARRAYTYPE_P(PG_GETARG_ARRAYTYPE_P(0));
	}
	
	/* Get the data array, or make it if null */
	if (!PG_ARGISNULL(0))
	{
		data = PG_GETARG_ARRAYTYPE_P(0);
		Assert(ARR_DIMS(labels)[0] == ARR_DIMS(data)[0]);
	}
	else
	{
		int elsize, size, nelem;

		switch (type) {
			case INT4OID:
				elsize = 4;
				break;
			case INT8OID:
			case FLOAT8OID:
				elsize = 8; 
				break;
			default:
				elsize = 0;  /* Fixes complier warnings */
				Assert(false);
		}
		nelem = ARR_DIMS(labels)[0];
		size = nelem * elsize + ARR_OVERHEAD_NONULLS(1);
		data = (ArrayType *) palloc(size);
		SET_VARSIZE(data, size);
		data->ndim = 1;
		data->dataoffset = 0;
		data->elemtype = type;
		ARR_DIMS(data)[0] = nelem;
		ARR_LBOUND(data)[0] = 1;
		memset(ARR_DATA_PTR(data), 0, nelem * elsize);
	}
	
	
	/*   
	 * Should we think about upconverting the arrays? Or is the assumption that 
	 * the pivot isn't usually doing much aggregation?
	 */
	switch (type) {
		case INT4OID:
		{
			int32 *datap = (int32*) ARR_DATA_PTR(data);
			int32  value = PG_GETARG_INT32(3);
			datap[i] += value;
			break;
		}
		case INT8OID:
		{
			int64 *datap = (int64*) ARR_DATA_PTR(data);
			int64  value = PG_GETARG_INT64(3);
			datap[i] += value;
			break;
		}
		case FLOAT8OID:
		{
			float8 *datap = (float8*) ARR_DATA_PTR(data);
			float8  value = PG_GETARG_FLOAT8(3);
			datap[i] += value;
			break;
		}
		default:
			Assert(false);
	}
	PG_RETURN_ARRAYTYPE_P(data);
}
示例#8
0
/*
 * decode(lhs, [rhs, ret], ..., [default])
 */
Datum
ora_decode(PG_FUNCTION_ARGS)
{
	int		nargs;
	int		i;
	int		retarg;

	/* default value is last arg or NULL. */
	nargs = PG_NARGS();
	if (nargs % 2 == 0)
	{
		retarg = nargs - 1;
		nargs -= 1;		/* ignore the last argument */
	}
	else
		retarg = -1;	/* NULL */

	if (PG_ARGISNULL(0))
	{
		for (i = 1; i < nargs; i += 2)
		{
			if (PG_ARGISNULL(i))
			{
				retarg = i + 1;
				break;
			}
		}
	}
	else
	{
		FmgrInfo   *eq;
#if PG_VERSION_NUM >= 90100
		Oid		collation = PG_GET_COLLATION();
#endif

		/*
		 * On first call, get the input type's operator '=' and save at
		 * fn_extra.
		 */
		if (fcinfo->flinfo->fn_extra == NULL)
		{
			MemoryContext	oldctx;
			Oid				typid = get_fn_expr_argtype(fcinfo->flinfo, 0);
			Oid				eqoid = equality_oper_funcid(typid);

			oldctx = MemoryContextSwitchTo(fcinfo->flinfo->fn_mcxt);
			eq = palloc(sizeof(FmgrInfo));
			fmgr_info(eqoid, eq);
			MemoryContextSwitchTo(oldctx);

			fcinfo->flinfo->fn_extra = eq;
		}
		else
			eq = fcinfo->flinfo->fn_extra;

		for (i = 1; i < nargs; i += 2)
		{
			FunctionCallInfoData	func;
			Datum					result;

			if (PG_ARGISNULL(i))
				continue;

#if PG_VERSION_NUM >= 90100
			InitFunctionCallInfoData(func, eq, 2, collation, NULL, NULL);
#else
			InitFunctionCallInfoData(func, eq, 2, NULL, NULL);
#endif
			func.arg[0] = PG_GETARG_DATUM(0);
			func.arg[1] = PG_GETARG_DATUM(i);
			func.argnull[0] = false;
			func.argnull[1] = false;
			result = FunctionCallInvoke(&func);

			if (!func.isnull && DatumGetBool(result))
			{
				retarg = i + 1;
				break;
			}
		}
	}

	if (retarg < 0 || PG_ARGISNULL(retarg))
		PG_RETURN_NULL();
	else
		PG_RETURN_DATUM(PG_GETARG_DATUM(retarg));
}
Datum geography_as_geojson(PG_FUNCTION_ARGS)
{
	LWGEOM *lwgeom = NULL;
	GSERIALIZED *g = NULL;
	char *geojson;
	text *result;
	int version;
	int option = 0;
	int has_bbox = 0;
	int precision = OUT_MAX_DOUBLE_PRECISION;
	char * srs = NULL;

	/* Get the version */
	version = PG_GETARG_INT32(0);
	if ( version != 1)
	{
		elog(ERROR, "Only GeoJSON 1 is supported");
		PG_RETURN_NULL();
	}

	/* Get the geography */
	if (PG_ARGISNULL(1) ) PG_RETURN_NULL();
	g = (GSERIALIZED*)PG_DETOAST_DATUM(PG_GETARG_DATUM(1));

	/* Convert to lwgeom so we can run the old functions */
	lwgeom = lwgeom_from_gserialized(g);

	/* Retrieve precision if any (default is max) */
	if (PG_NARGS() >2 && !PG_ARGISNULL(2))
	{
		precision = PG_GETARG_INT32(2);
		if ( precision > OUT_MAX_DOUBLE_PRECISION )
			precision = OUT_MAX_DOUBLE_PRECISION;
		else if ( precision < 0 ) precision = 0;
	}

	/* Retrieve output option
	 * 0 = without option (default)
	 * 1 = bbox
	 * 2 = short crs
	 * 4 = long crs
	 */
	if (PG_NARGS() >3 && !PG_ARGISNULL(3))
		option = PG_GETARG_INT32(3);

	if (option & 2 || option & 4)
	{
		/* Geography only handle srid SRID_DEFAULT */
		if (option & 2) srs = getSRSbySRID(SRID_DEFAULT, true);
		if (option & 4) srs = getSRSbySRID(SRID_DEFAULT, false);

		if (!srs)
		{
			elog(ERROR, "SRID SRID_DEFAULT unknown in spatial_ref_sys table");
			PG_RETURN_NULL();
		}
	}

	if (option & 1) has_bbox = 1;

	geojson = lwgeom_to_geojson(lwgeom, srs, precision, has_bbox);
    lwgeom_free(lwgeom);
	PG_FREE_IF_COPY(g, 1);
	if (srs) pfree(srs);

	result = cstring2text(geojson);
	lwfree(geojson);

	PG_RETURN_TEXT_P(result);
}
示例#10
0
文件: genfile.c 项目: RingsC/postgres
/*
 * stat a file
 */
Datum
pg_stat_file(PG_FUNCTION_ARGS)
{
	text	   *filename_t = PG_GETARG_TEXT_PP(0);
	char	   *filename;
	struct stat fst;
	Datum		values[6];
	bool		isnull[6];
	HeapTuple	tuple;
	TupleDesc	tupdesc;
	bool		missing_ok = false;

	/* check the optional argument */
	if (PG_NARGS() == 2)
		missing_ok = PG_GETARG_BOOL(1);

	filename = convert_and_check_filename(filename_t);

	if (stat(filename, &fst) < 0)
	{
		if (missing_ok && errno == ENOENT)
			PG_RETURN_NULL();
		else
			ereport(ERROR,
					(errcode_for_file_access(),
					 errmsg("could not stat file \"%s\": %m", filename)));
	}

	/*
	 * This record type had better match the output parameters declared for me
	 * in pg_proc.h.
	 */
	tupdesc = CreateTemplateTupleDesc(6, false);
	TupleDescInitEntry(tupdesc, (AttrNumber) 1,
					   "size", INT8OID, -1, 0);
	TupleDescInitEntry(tupdesc, (AttrNumber) 2,
					   "access", TIMESTAMPTZOID, -1, 0);
	TupleDescInitEntry(tupdesc, (AttrNumber) 3,
					   "modification", TIMESTAMPTZOID, -1, 0);
	TupleDescInitEntry(tupdesc, (AttrNumber) 4,
					   "change", TIMESTAMPTZOID, -1, 0);
	TupleDescInitEntry(tupdesc, (AttrNumber) 5,
					   "creation", TIMESTAMPTZOID, -1, 0);
	TupleDescInitEntry(tupdesc, (AttrNumber) 6,
					   "isdir", BOOLOID, -1, 0);
	BlessTupleDesc(tupdesc);

	memset(isnull, false, sizeof(isnull));

	values[0] = Int64GetDatum((int64) fst.st_size);
	values[1] = TimestampTzGetDatum(time_t_to_timestamptz(fst.st_atime));
	values[2] = TimestampTzGetDatum(time_t_to_timestamptz(fst.st_mtime));
	/* Unix has file status change time, while Win32 has creation time */
#if !defined(WIN32) && !defined(__CYGWIN__)
	values[3] = TimestampTzGetDatum(time_t_to_timestamptz(fst.st_ctime));
	isnull[4] = true;
#else
	isnull[3] = true;
	values[4] = TimestampTzGetDatum(time_t_to_timestamptz(fst.st_ctime));
#endif
	values[5] = BoolGetDatum(S_ISDIR(fst.st_mode));

	tuple = heap_form_tuple(tupdesc, values, isnull);

	pfree(filename);

	PG_RETURN_DATUM(HeapTupleGetDatum(tuple));
}
示例#11
0
/*
table_log_restore_table()

restore a complete table based on the logging table

parameter:
  - original table name
  - name of primary key in original table
  - logging table
  - name of primary key in logging table
  - restore table name
  - timestamp for restoring data
  - primary key to restore (only this key will be restored) (optional)
  - restore mode
    0: restore from blank table (default)
       needs a complete logging table
    1: restore from actual table backwards
  - dont create table temporarly
    0: create restore table temporarly (default)
    1: create restore table not temporarly
  return:
    not yet defined
*/
Datum table_log_restore_table(PG_FUNCTION_ARGS) {
  /* the original table name */
  char  *table_orig;
  /* the primary key in the original table */
  char  *table_orig_pkey;
  /* number columns in original table */
  int  table_orig_columns;
  /* the log table name */
  char  *table_log;
  /* the primary key in the log table (usually trigger_id) */
  /* cannot be the same then then the pkey in the original table */
  char  *table_log_pkey;
  /* number columns in log table */
  int  table_log_columns;
  /* the restore table name */
  char  *table_restore;
  /* the timestamp in past */
  Datum      timestamp = PG_GETARG_DATUM(5);
  /* the single pkey, can be null (then all keys will be restored) */
  char  *search_pkey = "";
  /* the restore method
    - 0: restore from blank table (default)
         needs a complete log table!
    - 1: restore from actual table backwards
  */
  int            method = 0;
  /* dont create restore table temporarly
    - 0: create restore table temporarly (default)
    - 1: dont create restore table temporarly
  */
  int            not_temporarly = 0;
  int            ret, results, i, number_columns;
  char           query[250 + NAMEDATALEN];	/* for getting table infos (250 chars (+ one times the length of all names) should be enough) */
  int            need_search_pkey = 0;          /* does we have a single key to restore? */
  char           *tmp, *timestamp_string, *old_pkey_string = "";
  char           *trigger_mode, *trigger_tuple, *trigger_changed;
  SPITupleTable  *spi_tuptable = NULL;          /* for saving query results */
  VarChar        *return_name;

  /* memory for dynamic query */
  int      d_query_size = 250;                  /* start with 250 bytes */
  char     *d_query;
  char     *d_query_start;

  /* memory for column names */
  int      col_query_size = 0;
  char     *col_query;
  char     *col_query_start;

  int      col_pkey = 0;

  /*
   * Some checks first...
   */

#ifdef TABLE_LOG_DEBUG
  elog(NOTICE, "start table_log_restore_table()");
#endif /* TABLE_LOG_DEBUG */

  /* does we have all arguments? */
  if (PG_ARGISNULL(0)) {
    elog(ERROR, "table_log_restore_table: missing original table name");
  }
  if (PG_ARGISNULL(1)) {
    elog(ERROR, "table_log_restore_table: missing primary key name for original table");
  }
  if (PG_ARGISNULL(2)) {
    elog(ERROR, "table_log_restore_table: missing log table name");
  }
  if (PG_ARGISNULL(3)) {
    elog(ERROR, "table_log_restore_table: missing primary key name for log table");
  }
  if (PG_ARGISNULL(4)) {
    elog(ERROR, "table_log_restore_table: missing copy table name");
  }
  if (PG_ARGISNULL(5)) {
    elog(ERROR, "table_log_restore_table: missing timestamp");
  }

  /* first check number arguments to avoid an segfault */
  if (PG_NARGS() >= 7) {
    /* if argument is given, check if not null */
    if (!PG_ARGISNULL(6)) {
      /* yes, fetch it */
      search_pkey = __table_log_varcharout((VarChar *)PG_GETARG_VARCHAR_P(6));
      /* and check, if we have an argument */
      if (strlen(search_pkey) > 0) {
        need_search_pkey = 1;
#ifdef TABLE_LOG_DEBUG
        elog(NOTICE, "table_log_restore_table: will restore a single key");
#endif /* TABLE_LOG_DEBUG */
      }
    }
  }

  /* same procedere here */
  if (PG_NARGS() >= 8) {
    if (!PG_ARGISNULL(7)) {
      method = PG_GETARG_INT32(7);
      if (method > 0) {
        method = 1;
      } else {
        method = 0;
      }
    }
  }
#ifdef TABLE_LOG_DEBUG
  if (method == 1) {
    elog(NOTICE, "table_log_restore_table: will restore from actual state backwards");
  } else {
    elog(NOTICE, "table_log_restore_table: will restore from begin forward");
  }
#endif /* TABLE_LOG_DEBUG */
  if (PG_NARGS() >= 9) {
    if (!PG_ARGISNULL(8)) {
      not_temporarly = PG_GETARG_INT32(8);
      if (not_temporarly > 0) {
        not_temporarly = 1;
      } else {
        not_temporarly = 0;
      }
    }
  }
#ifdef TABLE_LOG_DEBUG
  if (not_temporarly == 1) {
    elog(NOTICE, "table_log_restore_table: dont create restore table temporarly");
  }
#endif /* TABLE_LOG_DEBUG */
  /* get parameter */
  table_orig = __table_log_varcharout((VarChar *)PG_GETARG_VARCHAR_P(0));
  table_orig_pkey = __table_log_varcharout((VarChar *)PG_GETARG_VARCHAR_P(1));
  table_log = __table_log_varcharout((VarChar *)PG_GETARG_VARCHAR_P(2));
  table_log_pkey = __table_log_varcharout((VarChar *)PG_GETARG_VARCHAR_P(3));
  table_restore = __table_log_varcharout((VarChar *)PG_GETARG_VARCHAR_P(4));

  /* pkey of original table cannot be the same as of log table */
  if (strcmp((const char *)table_orig_pkey, (const char *)table_log_pkey) == 0) {
    elog(ERROR, "pkey of logging table cannot be the pkey of the original table: %s <-> %s", table_orig_pkey, table_log_pkey);
  }

  /* Connect to SPI manager */
  ret = SPI_connect();
  if (ret != SPI_OK_CONNECT) {
    elog(ERROR, "table_log_restore_table: SPI_connect returned %d", ret);
  }

  /* check original table */
  snprintf(query, 249, "SELECT a.attname FROM pg_class c, pg_attribute a WHERE c.relname = %s AND a.attnum > 0 AND a.attrelid = c.oid ORDER BY a.attnum", do_quote_literal(table_orig));
#ifdef TABLE_LOG_DEBUG_QUERY
  elog(NOTICE, "query: %s", query);
#endif /* TABLE_LOG_DEBUG_QUERY */
  ret = SPI_exec(query, 0);
  if (ret != SPI_OK_SELECT) {
    elog(ERROR, "could not check relation: %s", table_orig);
  }
  if (SPI_processed > 0) {
    table_orig_columns = SPI_processed;
  } else {
    elog(ERROR, "could not check relation: %s", table_orig);
  }
  /* check pkey in original table */
  snprintf(query, 249, "SELECT a.attname FROM pg_class c, pg_attribute a WHERE c.relname=%s AND c.relkind='r' AND a.attname=%s AND a.attnum > 0 AND a.attrelid = c.oid", do_quote_literal(table_orig), do_quote_literal(table_orig_pkey));
#ifdef TABLE_LOG_DEBUG_QUERY
  elog(NOTICE, "query: %s", query);
#endif /* TABLE_LOG_DEBUG_QUERY */
  ret = SPI_exec(query, 0);
  if (ret != SPI_OK_SELECT) {
    elog(ERROR, "could not check relation: %s", table_orig);
  }
  if (SPI_processed == 0) {
    elog(ERROR, "could not check relation: %s", table_orig);
  }
#ifdef TABLE_LOG_DEBUG
  elog(NOTICE, "original table: OK (%i columns)", table_orig_columns);
#endif /* TABLE_LOG_DEBUG */

  /* check log table */
  snprintf(query, 249, "SELECT a.attname FROM pg_class c, pg_attribute a WHERE c.relname = %s AND a.attnum > 0 AND a.attrelid = c.oid ORDER BY a.attnum", do_quote_literal(table_log));
#ifdef TABLE_LOG_DEBUG_QUERY
  elog(NOTICE, "query: %s", query);
#endif /* TABLE_LOG_DEBUG_QUERY */
  ret = SPI_exec(query, 0);
  if (ret != SPI_OK_SELECT) {
    elog(ERROR, "could not check relation [1]: %s", table_log);
  }
  if (SPI_processed > 0) {
    table_log_columns = SPI_processed;
  } else {
    elog(ERROR, "could not check relation [2]: %s", table_log);
  }
  /* check pkey in log table */
  snprintf(query, 249, "SELECT a.attname FROM pg_class c, pg_attribute a WHERE c.relname=%s AND c.relkind='r' AND a.attname=%s AND a.attnum > 0 AND a.attrelid = c.oid", do_quote_literal(table_log), do_quote_literal(table_log_pkey));
#ifdef TABLE_LOG_DEBUG_QUERY
  elog(NOTICE, "query: %s", query);
#endif /* TABLE_LOG_DEBUG_QUERY */
  ret = SPI_exec(query, 0);
  if (ret != SPI_OK_SELECT) {
    elog(ERROR, "could not check relation [3]: %s", table_log);
  }
  if (SPI_processed == 0) {
    elog(ERROR, "could not check relation [4]: %s", table_log);
  }
#ifdef TABLE_LOG_DEBUG
  elog(NOTICE, "log table: OK (%i columns)", table_log_columns);
#endif /* TABLE_LOG_DEBUG */

  /* check restore table */
  snprintf(query, 249, "SELECT pg_attribute.attname AS a FROM pg_class, pg_attribute WHERE pg_class.relname=%s AND pg_attribute.attnum > 0 AND pg_attribute.attrelid=pg_class.oid", do_quote_literal(table_restore));
#ifdef TABLE_LOG_DEBUG_QUERY
  elog(NOTICE, "query: %s", query);
#endif /* TABLE_LOG_DEBUG_QUERY */
  ret = SPI_exec(query, 0);
  if (ret != SPI_OK_SELECT) {
    elog(ERROR, "could not check relation: %s", table_restore);
  }
  if (SPI_processed > 0) {
    elog(ERROR, "restore table already exists: %s", table_restore);
  }
#ifdef TABLE_LOG_DEBUG
  elog(NOTICE, "restore table: OK (doesnt exists)");
#endif /* TABLE_LOG_DEBUG */

  /* now get all columns from original table */
  snprintf(query, 249, "SELECT a.attname, format_type(a.atttypid, a.atttypmod), a.attnum FROM pg_class c, pg_attribute a WHERE c.relname = %s AND a.attnum > 0 AND a.attrelid = c.oid ORDER BY a.attnum", do_quote_literal(table_orig));
#ifdef TABLE_LOG_DEBUG_QUERY
  elog(NOTICE, "query: %s", query);
#endif /* TABLE_LOG_DEBUG_QUERY */
  ret = SPI_exec(query, 0);
  if (ret != SPI_OK_SELECT) {
    elog(ERROR, "could not get columns from relation: %s", table_orig);
  }
  if (SPI_processed == 0) {
    elog(ERROR, "could not check relation: %s", table_orig);
  }
  results = SPI_processed;
  /* store number columns for later */
  number_columns = SPI_processed;
#ifdef TABLE_LOG_DEBUG
  elog(NOTICE, "number columns: %i", results);
#endif /* TABLE_LOG_DEBUG */
  for (i = 0; i < results; i++) {
    /* the column name */
    tmp = SPI_getvalue(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1);
    col_query_size += strlen(do_quote_ident(tmp)) + 2;
    /* now check, if this is the pkey */
    if (strcmp((const char *)tmp, (const char *)table_orig_pkey) == 0) {
      /* remember the (real) number */
      col_pkey = i + 1;
    }
  }
  /* check if we have found the pkey */
  if (col_pkey == 0) {
    elog(ERROR, "cannot find pkey (%s) in table %s", table_orig_pkey, table_orig);
  }
  /* allocate memory for string */
  col_query_size += 10;
  col_query_start = (char *) palloc((col_query_size + 1) * sizeof(char));
  col_query = col_query_start;
  for (i = 0; i < results; i++) {
    if (i > 0) {
      sprintf(col_query, ", ");
      col_query = col_query_start + strlen(col_query_start);
    }
    sprintf(col_query, "%s", do_quote_ident(SPI_getvalue(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1)));
    col_query = col_query_start + strlen(col_query_start);
  }
#ifdef TABLE_LOG_DEBUG
  elog(NOTICE, "string for columns: %s", col_query_start);
#endif /* TABLE_LOG_DEBUG */

  /* create restore table */
#ifdef TABLE_LOG_DEBUG
  elog(NOTICE, "create restore table: %s", table_restore);
#endif /* TABLE_LOG_DEBUG */

  snprintf(query, 249, "SELECT * INTO ");
  /* per default create a temporary table */
  if (not_temporarly == 0) {
    strcat(query, "TEMPORARY ");
  }
  strcat(query, "TABLE ");
  strncat(query, table_restore, 249);
  /* from which table? */
  strncat(query, " FROM ", 249);
  strncat(query, table_orig, 249);
  if (need_search_pkey == 1) {
    /* only extract a specific key */
    strncat(query, " WHERE ", 249);
    strncat(query, do_quote_ident(table_orig_pkey), 249);
    strncat(query, "=", 249);
    strncat(query, do_quote_literal(search_pkey), 249);
  }
  if (method == 0) {
    /* restore from begin (blank table) */
    strncat(query, " LIMIT 0", 249);
  }
#ifdef TABLE_LOG_DEBUG_QUERY
  elog(NOTICE, "query: %s", query);
#endif /* TABLE_LOG_DEBUG_QUERY */
  ret = SPI_exec(query, 0);
  if (ret != SPI_OK_SELINTO) {
    elog(ERROR, "could not check relation: %s", table_restore);
  }
  if (method == 1) {
#ifdef TABLE_LOG_DEBUG
    elog(NOTICE, "%i rows copied", SPI_processed);
#endif /* TABLE_LOG_DEBUG */
  }

  /* get timestamp as string */
  timestamp_string = DatumGetCString(DirectFunctionCall1(timestamptz_out, timestamp));

#ifdef TABLE_LOG_DEBUG
  if (method == 0) {
    elog(NOTICE, "need logs from start to timestamp: %s", timestamp_string);
  } else {
    elog(NOTICE, "need logs from end to timestamp: %s", timestamp_string);
  }
#endif /* TABLE_LOG_DEBUG */

  /* now build query for getting logs */
#ifdef TABLE_LOG_DEBUG
  elog(NOTICE, "build query for getting logs");
#endif /* TABLE_LOG_DEBUG */

  d_query_size += d_query_size + strlen(col_query_start);
  if (need_search_pkey == 1) {
    /* add size of pkey and size of value */
    d_query_size += strlen(do_quote_ident(table_orig_pkey)) * 2 + strlen(do_quote_literal(search_pkey)) + 3;
  }

  /* allocate memory for string */
  d_query_size += 10;
  d_query_start = (char *) palloc((d_query_size + 1) * sizeof(char));
  d_query = d_query_start;

  snprintf(d_query, d_query_size, "SELECT %s, trigger_mode, trigger_tuple, trigger_changed FROM %s WHERE ", col_query_start, do_quote_ident(table_log));
  d_query = d_query_start + strlen(d_query_start);
  if (method == 0) {
    /* from start to timestamp */
    snprintf(d_query, d_query_size, "trigger_changed <= %s ", do_quote_literal(timestamp_string));
  } else {
    /* from now() backwards to timestamp */
    snprintf(d_query, d_query_size, "trigger_changed >= %s ", do_quote_literal(timestamp_string));
  }
  d_query = d_query_start + strlen(d_query_start);
  if (need_search_pkey == 1) {
    snprintf(d_query, d_query_size, "AND %s = %s ", do_quote_ident(table_orig_pkey), do_quote_literal(search_pkey));
    d_query = d_query_start + strlen(d_query_start);
  }
  if (method == 0) {
    snprintf(d_query, d_query_size, "ORDER BY %s ASC", do_quote_ident(table_log_pkey));
  } else {
    snprintf(d_query, d_query_size, "ORDER BY %s DESC", do_quote_ident(table_log_pkey));
  }
  d_query = d_query_start + strlen(d_query_start);
#ifdef TABLE_LOG_DEBUG_QUERY
  elog(NOTICE, "query: %s", d_query_start);
#endif /* TABLE_LOG_DEBUG_QUERY */
  ret = SPI_exec(d_query_start, 0);
  if (ret != SPI_OK_SELECT) {
    elog(ERROR, "could not get log data from table: %s", table_log);
  }
#ifdef TABLE_LOG_DEBUG
  elog(NOTICE, "number log entries to restore: %i", SPI_processed);
#endif /* TABLE_LOG_DEBUG */
  results = SPI_processed;
  /* save results */
  spi_tuptable = SPI_tuptable;

  /* go through all results */
  for (i = 0; i < results; i++) {
    /* get tuple data */
    trigger_mode = SPI_getvalue(spi_tuptable->vals[i], spi_tuptable->tupdesc, number_columns + 1);
    trigger_tuple = SPI_getvalue(spi_tuptable->vals[i], spi_tuptable->tupdesc, number_columns + 2);
    trigger_changed = SPI_getvalue(spi_tuptable->vals[i], spi_tuptable->tupdesc, number_columns + 3);
    /* check for update tuples we doesnt need */
    if (strcmp((const char *)trigger_mode, (const char *)"UPDATE") == 0) {
      if (method == 0 && strcmp((const char *)trigger_tuple, (const char *)"old") == 0) {
        /* we need the old value of the pkey for the update */
        old_pkey_string = SPI_getvalue(spi_tuptable->vals[i], spi_tuptable->tupdesc, col_pkey);
#ifdef TABLE_LOG_DEBUG
        elog(NOTICE, "tuple old pkey: %s", old_pkey_string);
#endif /* TABLE_LOG_DEBUG */
        /* then skip this tuple */
        continue;
      }
      if (method == 1 && strcmp((const char *)trigger_tuple, (const char *)"new") == 0) {
        /* we need the old value of the pkey for the update */
        old_pkey_string = SPI_getvalue(spi_tuptable->vals[i], spi_tuptable->tupdesc, col_pkey);
#ifdef TABLE_LOG_DEBUG
        elog(NOTICE, "tuple: old pkey: %s", old_pkey_string);
#endif /* TABLE_LOG_DEBUG */
        /* then skip this tuple */
        continue;
      }
    }

    if (method == 0) {
      /* roll forward */
#ifdef TABLE_LOG_DEBUG
      elog(NOTICE, "tuple: %s  %s  %s", trigger_mode, trigger_tuple, trigger_changed);
#endif /* TABLE_LOG_DEBUG */
      if (strcmp((const char *)trigger_mode, (const char *)"INSERT") == 0) {
        __table_log_restore_table_insert(spi_tuptable, table_restore, table_orig_pkey, col_query_start, col_pkey, number_columns, i);
      } else if (strcmp((const char *)trigger_mode, (const char *)"UPDATE") == 0) {
        __table_log_restore_table_update(spi_tuptable, table_restore, table_orig_pkey, col_query_start, col_pkey, number_columns, i, old_pkey_string);
      } else if (strcmp((const char *)trigger_mode, (const char *)"DELETE") == 0) {
        __table_log_restore_table_delete(spi_tuptable, table_restore, table_orig_pkey, col_query_start, col_pkey, number_columns, i);
      } else {
        elog(ERROR, "unknown trigger_mode: %s", trigger_mode);
      }
    } else {
      /* roll back */
      char rb_mode[10]; /* reverse the method */
      if (strcmp((const char *)trigger_mode, (const char *)"INSERT") == 0) {
        sprintf(rb_mode, "DELETE");
      } else if (strcmp((const char *)trigger_mode, (const char *)"UPDATE") == 0) {
        sprintf(rb_mode, "UPDATE");
      } else if (strcmp((const char *)trigger_mode, (const char *)"DELETE") == 0) {
        sprintf(rb_mode, "INSERT");
      } else {
        elog(ERROR, "unknown trigger_mode: %s", trigger_mode);
      }
#ifdef TABLE_LOG_DEBUG
      elog(NOTICE, "tuple: %s  %s  %s", rb_mode, trigger_tuple, trigger_changed);
#endif /* TABLE_LOG_DEBUG */
      if (strcmp((const char *)trigger_mode, (const char *)"INSERT") == 0) {
        __table_log_restore_table_delete(spi_tuptable, table_restore, table_orig_pkey, col_query_start, col_pkey, number_columns, i);
      } else if (strcmp((const char *)trigger_mode, (const char *)"UPDATE") == 0) {
        __table_log_restore_table_update(spi_tuptable, table_restore, table_orig_pkey, col_query_start, col_pkey, number_columns, i, old_pkey_string);
      } else if (strcmp((const char *)trigger_mode, (const char *)"DELETE") == 0) {
        __table_log_restore_table_insert(spi_tuptable, table_restore, table_orig_pkey, col_query_start, col_pkey, number_columns, i);
      }
    }
  }

#ifdef TABLE_LOG_DEBUG
  elog(NOTICE, "table_log_restore_table() done, results in: %s", table_restore);
#endif /* TABLE_LOG_DEBUG */

  /* convert string to VarChar for result */
  return_name = DatumGetVarCharP(DirectFunctionCall2(varcharin, CStringGetDatum(table_restore), Int32GetDatum(strlen(table_restore) + VARHDRSZ)));

  /* close SPI connection */
  SPI_finish();
  /* and return the name of the restore table */
  PG_RETURN_VARCHAR_P(return_name);
}
示例#12
0
static Datum
argm_transfn_universal(PG_FUNCTION_ARGS, int32 compareFunctionResultToAdvance)
{
	Oid           type;
	ArgmState    *state;
	MemoryContext aggcontext,
	              oldcontext;
	int           i;
	int32         comparison_result;
	
	if (!AggCheckCallContext(fcinfo, &aggcontext))
	{
		/* cannot be called directly because of internal-type argument */
		elog(ERROR, "argm**_transfn called in non-aggregate context");
	}
	
	if (PG_ARGISNULL(0))
	{
		/* First time through --- initialize */
		
		/* Make a temporary context to hold all the junk */
		oldcontext = MemoryContextSwitchTo(aggcontext);
		
		/* Initialize the state variable*/
		state = palloc(sizeof(ArgmState));
		
		state->key_count = PG_NARGS() - 1;
		state->keys = palloc(sizeof(ArgmDatumWithType) * (state->key_count));

		for (i = 0; i < state->key_count; i++)
		{
			type = get_fn_expr_argtype(fcinfo->flinfo, i + 1);
			if (type == InvalidOid)
				ereport(ERROR,
						(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
						 errmsg("could not determine input data type")));
			state->keys[i].type = type;
			get_typlenbyvalalign(type,
								&state->keys[i].typlen,
								&state->keys[i].typbyval,
								&state->keys[i].typalign);
			/* We do not need a sorting proc for payload */
			if (i != 0)
				state->keys[i].cmp_proc = 
					lookup_type_cache(state->keys[i].type,
					                  TYPECACHE_CMP_PROC)->cmp_proc;

			/* Copy initial values */
			argm_copy_datum(PG_ARGISNULL(i + 1),
			                  PG_GETARG_DATUM(i + 1),
			                 &(state->keys[i]), false);
		}
	}
	else
	{
		state = (ArgmState *) PG_GETARG_POINTER(0);
		
		oldcontext = MemoryContextSwitchTo(aggcontext);
		
		/* compare keys (but not payload) lexicographically */
		for (i = 1; i < state->key_count; i++)
		{
			/* nulls last */
			if (PG_ARGISNULL(i + 1) && !state->keys[i].is_null)
				break;
			
			if (!PG_ARGISNULL(i + 1) && state->keys[i].is_null) {
				/* nulls last */
				comparison_result = 1;
			} else {
				comparison_result = DatumGetInt32(OidFunctionCall2Coll(
					state->keys[i].cmp_proc,
					PG_GET_COLLATION(),
					PG_GETARG_DATUM(i + 1),
					state->keys[i].value
				)) * compareFunctionResultToAdvance;
				
				if (comparison_result < 0)
					break;
			}
			
			if (comparison_result > 0)
			{
				for (i = 0; i < state->key_count; i++)
					argm_copy_datum(PG_ARGISNULL(i + 1),
					                PG_GETARG_DATUM(i + 1),
					               &(state->keys[i]), true);
				break;
			}
		}
	}
	
	MemoryContextSwitchTo(oldcontext);
	
	PG_RETURN_POINTER(state);
}
示例#13
0
/*
 * extract_variadic_args
 *
 * Extract a set of argument values, types and NULL markers for a given
 * input function which makes use of a VARIADIC input whose argument list
 * depends on the caller context. When doing a VARIADIC call, the caller
 * has provided one argument made of an array of values, so deconstruct the
 * array data before using it for the next processing. If no VARIADIC call
 * is used, just fill in the status data based on all the arguments given
 * by the caller.
 *
 * This function returns the number of arguments generated, or -1 in the
 * case of "VARIADIC NULL".
 */
int
extract_variadic_args(FunctionCallInfo fcinfo, int variadic_start,
					  bool convert_unknown, Datum **args, Oid **types,
					  bool **nulls)
{
	bool		variadic = get_fn_expr_variadic(fcinfo->flinfo);
	Datum	   *args_res;
	bool	   *nulls_res;
	Oid		   *types_res;
	int			nargs,
				i;

	*args = NULL;
	*types = NULL;
	*nulls = NULL;

	if (variadic)
	{
		ArrayType  *array_in;
		Oid			element_type;
		bool		typbyval;
		char		typalign;
		int16		typlen;

		Assert(PG_NARGS() == variadic_start + 1);

		if (PG_ARGISNULL(variadic_start))
			return -1;

		array_in = PG_GETARG_ARRAYTYPE_P(variadic_start);
		element_type = ARR_ELEMTYPE(array_in);

		get_typlenbyvalalign(element_type,
							 &typlen, &typbyval, &typalign);
		deconstruct_array(array_in, element_type, typlen, typbyval,
						  typalign, &args_res, &nulls_res,
						  &nargs);

		/* All the elements of the array have the same type */
		types_res = (Oid *) palloc0(nargs * sizeof(Oid));
		for (i = 0; i < nargs; i++)
			types_res[i] = element_type;
	}
	else
	{
		nargs = PG_NARGS() - variadic_start;
		Assert(nargs > 0);
		nulls_res = (bool *) palloc0(nargs * sizeof(bool));
		args_res = (Datum *) palloc0(nargs * sizeof(Datum));
		types_res = (Oid *) palloc0(nargs * sizeof(Oid));

		for (i = 0; i < nargs; i++)
		{
			nulls_res[i] = PG_ARGISNULL(i + variadic_start);
			types_res[i] = get_fn_expr_argtype(fcinfo->flinfo,
											   i + variadic_start);

			/*
			 * Turn a constant (more or less literal) value that's of unknown
			 * type into text if required. Unknowns come in as a cstring
			 * pointer. Note: for functions declared as taking type "any", the
			 * parser will not do any type conversion on unknown-type literals
			 * (that is, undecorated strings or NULLs).
			 */
			if (convert_unknown &&
				types_res[i] == UNKNOWNOID &&
				get_fn_expr_arg_stable(fcinfo->flinfo, i + variadic_start))
			{
				types_res[i] = TEXTOID;

				if (PG_ARGISNULL(i + variadic_start))
					args_res[i] = (Datum) 0;
				else
					args_res[i] =
						CStringGetTextDatum(PG_GETARG_POINTER(i + variadic_start));
			}
			else
			{
				/* no conversion needed, just take the datum as given */
				args_res[i] = PG_GETARG_DATUM(i + variadic_start);
			}

			if (!OidIsValid(types_res[i]) ||
				(convert_unknown && types_res[i] == UNKNOWNOID))
				ereport(ERROR,
						(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
						 errmsg("could not determine data type for argument %d",
								i + 1)));
		}
	}

	/* Fill in results */
	*args = args_res;
	*nulls = nulls_res;
	*types = types_res;

	return nargs;
}
示例#14
0
Datum
_Slony_I_createEvent(PG_FUNCTION_ARGS)
{
	TransactionId newXid = GetTopTransactionId();
	Slony_I_ClusterStatus *cs;
	char	   *ev_type_c;
	Datum		argv[9];
	char		nulls[10];
	char	   *buf;
	size_t		buf_size;
	int			rc;
	int			i;
	int64		retval;
	bool		isnull;

#ifdef HAVE_GETACTIVESNAPSHOT
	if (GetActiveSnapshot() == NULL)
		elog(ERROR, "Slony-I: ActiveSnapshot is NULL in createEvent()");
#else
	if (SerializableSnapshot == NULL)
		elog(ERROR, "Slony-I: SerializableSnapshot is NULL in createEvent()");
#endif

	if ((rc = SPI_connect()) < 0)
		elog(ERROR, "Slony-I: SPI_connect() failed in createEvent()");

	/*
	 * Get or create the cluster status information and make sure it has the
	 * SPI plans that we need here.
	 */
	cs = getClusterStatus(PG_GETARG_NAME(0),
						  PLAN_INSERT_EVENT);

	buf_size = 8192;
	buf = palloc(buf_size);

	/*
	 * Do the following only once per transaction.
	 */
	if (!TransactionIdEquals(cs->currentXid, newXid))
	{
		cs->currentXid = newXid;
	}

	/*
	 * Call the saved INSERT plan
	 */
	for (i = 1; i < 10; i++)
	{
		if (i >= PG_NARGS() || PG_ARGISNULL(i))
		{
			argv[i - 1] = (Datum) 0;
			nulls[i - 1] = 'n';
		}
		else
		{
			argv[i - 1] = PG_GETARG_DATUM(i);
			nulls[i - 1] = ' ';
		}
	}
	nulls[9] = '\0';

	if ((rc = SPI_execp(cs->plan_insert_event, argv, nulls, 0)) < 0)
		elog(ERROR, "Slony-I: SPI_execp() failed for \"INSERT INTO sl_event ...\"");

	/*
	 * The INSERT plan also contains a SELECT currval('sl_event_seq'), use the
	 * new sequence number as return value.
	 */
	if (SPI_processed != 1)
		elog(ERROR, "Slony-I: INSERT plan did not return 1 result row");
	retval = DatumGetInt64(SPI_getbinval(SPI_tuptable->vals[0],
										 SPI_tuptable->tupdesc, 1, &isnull));

	/*
	 * For SYNC and ENABLE_SUBSCRIPTION events, we also remember all current
	 * sequence values.
	 */
	if (PG_NARGS() > 1 && !PG_ARGISNULL(1))
	{
		ev_type_c = DatumGetPointer(DirectFunctionCall1(
											   textout, PG_GETARG_DATUM(1)));
		if (strcmp(ev_type_c, "SYNC") == 0 ||
			strcmp(ev_type_c, "ENABLE_SUBSCRIPTION") == 0)
		{
/*@-nullpass@*/
			if ((rc = SPI_execp(cs->plan_record_sequences, NULL, NULL, 0)) < 0)
				elog(ERROR, "Slony-I: SPI_execp() failed for \"INSERT INTO sl_seqlog ...\"");
/*@+nullpass@*/
		}
	}

	(void) SPI_finish();
/*@-mustfreefresh@*/
	PG_RETURN_INT64(retval);
}
示例#15
0
Datum
gp_add_persistent_relation_node_entry(PG_FUNCTION_ARGS)
{
	Datum				values[Natts_gp_persistent_relation_node];
	ItemPointerData		persistentTid;
	int64				persistentSerialNum;
	int					i;
	
	/* Must be super user */
	if (!superuser())
		elog(ERROR, "permission denied");

	/* Check input arguments */

	/*
	 * First parameter is the tid, remaining parameters should match the column
	 * types in gp_persistent_relation_node.
	 */
	if (PG_NARGS() != Natts_gp_persistent_relation_node + 1)
	{
		Oid		 procOid  = fcinfo->flinfo->fn_oid;
		char	*procName = format_procedure(procOid);

		elog(ERROR, "function '%s' received unexpected number of arguments",
			 procName);
	}

	/* 
	 * For the moment we don't support inserting at particular tids, 
	 * initial argument MUST be null.
	 */
	if (!PG_ARGISNULL(0))
		elog(ERROR, "direct tid assignment to %s is not yet supported",
			 "gp_persistent_relation_node");
	
	/* 
	 * validate that datatypes match expected, e.g. no one went and changed
	 * the catalog without updating this function.
	 */

	/* Build up the tuple we want to add */
	memset(&persistentTid, 0, sizeof(persistentTid));
	for (i = 0; i < Natts_gp_persistent_relation_node; i++)
	{
		if (PG_ARGISNULL(i+1))
			elog(ERROR, "null arguments not supported");
		values[i] = PG_GETARG_DATUM(i+1);
	}
	
	/*
	 * TODO: Validate the tuple
	 *   - Specified database exists
	 *   - Specified tablespace exists
	 *   - Specified relfile is in the filesystem
	 *   - etc.
	 */

	/* Add it to the table */
	PersistentFileSysObj_AddTuple(PersistentFsObjType_RelationFile,
								  values,
								  true, /* flushToXlog */
								  &persistentTid,
								  &persistentSerialNum);

	/* explain how we re-wrote that tuple */
	elog(NOTICE,
		 "inserted 1 row (TID %s, persistent_serial_num " INT64_FORMAT ")",
		 ItemPointerToString(&persistentTid),
		 persistentSerialNum);

	PG_RETURN_BOOL(true);
}
Datum geography_as_gml(PG_FUNCTION_ARGS)
{
	LWGEOM *lwgeom = NULL;
	GSERIALIZED *g = NULL;
	char *gml;
	text *result;
	int version;
	char *srs;
	int srid = SRID_DEFAULT;
	int precision = OUT_MAX_DOUBLE_PRECISION;
	int option=0;
	int lwopts = LW_GML_IS_DIMS;
	static const char *default_prefix = "gml:";
	const char *prefix = default_prefix;
	char *prefix_buf = "";
	text *prefix_text, *id_text = NULL;
	const char *id=NULL;
	char *id_buf;


	/* Get the version */
	version = PG_GETARG_INT32(0);
	if ( version != 2 && version != 3 )
	{
		elog(ERROR, "Only GML 2 and GML 3 are supported");
		PG_RETURN_NULL();
	}

	/* Get the geography */
	if ( PG_ARGISNULL(1) ) PG_RETURN_NULL();
	g = (GSERIALIZED*)PG_DETOAST_DATUM(PG_GETARG_DATUM(1));

	/* Convert to lwgeom so we can run the old functions */
	lwgeom = lwgeom_from_gserialized(g);

	/* Retrieve precision if any (default is max) */
	if (PG_NARGS() >2 && !PG_ARGISNULL(2))
	{
		precision = PG_GETARG_INT32(2);
		if ( precision > OUT_MAX_DOUBLE_PRECISION )
			precision = OUT_MAX_DOUBLE_PRECISION;
		else if ( precision < 0 ) precision = 0;
	}

	/* retrieve option */
	if (PG_NARGS() >3 && !PG_ARGISNULL(3))
		option = PG_GETARG_INT32(3);


	/* retrieve prefix */
	if (PG_NARGS() >4 && !PG_ARGISNULL(4))
	{
		prefix_text = PG_GETARG_TEXT_P(4);
		if ( VARSIZE(prefix_text)-VARHDRSZ == 0 )
		{
			prefix = "";
		}
		else
		{
			/* +2 is one for the ':' and one for term null */
			prefix_buf = palloc(VARSIZE(prefix_text)-VARHDRSZ+2);
			memcpy(prefix_buf, VARDATA(prefix_text),
			       VARSIZE(prefix_text)-VARHDRSZ);
			/* add colon and null terminate */
			prefix_buf[VARSIZE(prefix_text)-VARHDRSZ] = ':';
			prefix_buf[VARSIZE(prefix_text)-VARHDRSZ+1] = '\0';
			prefix = prefix_buf;
		}
	}

	/* retrieve id */
	if (PG_NARGS() >5 && !PG_ARGISNULL(5))
	{
		prefix_text = PG_GETARG_TEXT_P(5);
		if ( VARSIZE(id_text)-VARHDRSZ == 0 )
		{
			id = "";
		}
		else
		{
			id_buf = palloc(VARSIZE(id_text)-VARHDRSZ+1);
			memcpy(id_buf, VARDATA(id_text), VARSIZE(id_text)-VARHDRSZ);
			prefix_buf[VARSIZE(id_text)-VARHDRSZ+1] = '\0';
			id = id_buf;
		}
	}

	if (option & 1) srs = getSRSbySRID(srid, false);
	else srs = getSRSbySRID(srid, true);
	if (!srs)
	{
		elog(ERROR, "SRID %d unknown in spatial_ref_sys table", SRID_DEFAULT);
		PG_RETURN_NULL();
	}

	/* Revert lat/lon only with long SRS */
	if (option & 1) lwopts |= LW_GML_IS_DEGREE;
	if (option & 2) lwopts &= ~LW_GML_IS_DIMS; 

	if (version == 2)
		gml = lwgeom_to_gml2(lwgeom, srs, precision, prefix);
	else
		gml = lwgeom_to_gml3(lwgeom, srs, precision, lwopts, prefix, id);

    lwgeom_free(lwgeom);
	PG_FREE_IF_COPY(g, 1);

	/* Return null on null */
	if ( ! gml ) 
		PG_RETURN_NULL();

	/* Turn string result into text for return */
	result = cstring2text(gml);
	lwfree(gml);

	PG_RETURN_TEXT_P(result);
}
Datum geography_as_kml(PG_FUNCTION_ARGS)
{
	GSERIALIZED *g = NULL;
	LWGEOM *lwgeom = NULL;
	char *kml;
	text *result;
	int version;
	int precision = OUT_MAX_DOUBLE_PRECISION;
	static const char *default_prefix = "";
	char *prefixbuf;
	const char* prefix = default_prefix;
	text *prefix_text;


	/* Get the version */
	version = PG_GETARG_INT32(0);
	if ( version != 2)
	{
		elog(ERROR, "Only KML 2 is supported");
		PG_RETURN_NULL();
	}

	/* Get the geometry */
	if ( PG_ARGISNULL(1) ) PG_RETURN_NULL();
	g = (GSERIALIZED*)PG_DETOAST_DATUM(PG_GETARG_DATUM(1));

	/* Convert to lwgeom so we can run the old functions */
	lwgeom = lwgeom_from_gserialized(g);

	/* Retrieve precision if any (default is max) */
	if (PG_NARGS() >2 && !PG_ARGISNULL(2))
	{
		precision = PG_GETARG_INT32(2);
		if ( precision > OUT_MAX_DOUBLE_PRECISION )
			precision = OUT_MAX_DOUBLE_PRECISION;
		else if ( precision < 0 ) precision = 0;
	}

	/* retrieve prefix */
	if (PG_NARGS() >3 && !PG_ARGISNULL(3))
	{
		prefix_text = PG_GETARG_TEXT_P(3);
		if ( VARSIZE(prefix_text)-VARHDRSZ == 0 )
		{
			prefix = "";
		}
		else
		{
			/* +2 is one for the ':' and one for term null */
			prefixbuf = palloc(VARSIZE(prefix_text)-VARHDRSZ+2);
			memcpy(prefixbuf, VARDATA(prefix_text),
			       VARSIZE(prefix_text)-VARHDRSZ);
			/* add colon and null terminate */
			prefixbuf[VARSIZE(prefix_text)-VARHDRSZ] = ':';
			prefixbuf[VARSIZE(prefix_text)-VARHDRSZ+1] = '\0';
			prefix = prefixbuf;
		}
	}

	kml = lwgeom_to_kml2(lwgeom, precision, prefix);

    lwgeom_free(lwgeom);
	PG_FREE_IF_COPY(g, 1);

	if ( ! kml )
		PG_RETURN_NULL();

	result = cstring2text(kml);
	lwfree(kml);

	PG_RETURN_TEXT_P(result);
}
示例#18
0
文件: genfile.c 项目: RingsC/postgres
/*
 * List a directory (returns the filenames only)
 */
Datum
pg_ls_dir(PG_FUNCTION_ARGS)
{
	FuncCallContext *funcctx;
	struct dirent *de;
	directory_fctx *fctx;
	MemoryContext oldcontext;

	if (SRF_IS_FIRSTCALL())
	{
		bool		missing_ok = false;
		bool		include_dot_dirs = false;

		/* check the optional arguments */
		if (PG_NARGS() == 3)
		{
			if (!PG_ARGISNULL(1))
				missing_ok = PG_GETARG_BOOL(1);
			if (!PG_ARGISNULL(2))
				include_dot_dirs = PG_GETARG_BOOL(2);
		}

		funcctx = SRF_FIRSTCALL_INIT();
		oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);

		fctx = palloc(sizeof(directory_fctx));
		fctx->location = convert_and_check_filename(PG_GETARG_TEXT_PP(0));

		fctx->include_dot_dirs = include_dot_dirs;
		fctx->dirdesc = AllocateDir(fctx->location);

		if (!fctx->dirdesc)
		{
			if (missing_ok && errno == ENOENT)
			{
				MemoryContextSwitchTo(oldcontext);
				SRF_RETURN_DONE(funcctx);
			}
			else
				ereport(ERROR,
						(errcode_for_file_access(),
						 errmsg("could not open directory \"%s\": %m",
								fctx->location)));
		}
		funcctx->user_fctx = fctx;
		MemoryContextSwitchTo(oldcontext);
	}

	funcctx = SRF_PERCALL_SETUP();
	fctx = (directory_fctx *) funcctx->user_fctx;

	while ((de = ReadDir(fctx->dirdesc, fctx->location)) != NULL)
	{
		if (!fctx->include_dot_dirs &&
			(strcmp(de->d_name, ".") == 0 ||
			 strcmp(de->d_name, "..") == 0))
			continue;

		SRF_RETURN_NEXT(funcctx, CStringGetTextDatum(de->d_name));
	}

	FreeDir(fctx->dirdesc);

	SRF_RETURN_DONE(funcctx);
}
Datum generate_sparse_vector(PG_FUNCTION_ARGS)
{
    SvecType    *output_sfv;
    int16_t     typlen;
	bool        typbyval;
	char        typalign;
	bool        *nulls;

	if (PG_NARGS() != 3)
		elog(ERROR, "Invalid number of arguments.");

	ArrayType   *term_index = PG_GETARG_ARRAYTYPE_P(0);
	ArrayType   *term_count = PG_GETARG_ARRAYTYPE_P(1);

	int64_t dict_size = PG_GETARG_INT64(2);

	/* Check if arrays have null entries */
	if (ARR_HASNULL(term_index) || ARR_HASNULL(term_count))
		elog(ERROR, "One or both of the argument arrays has one or more null entries.");

	if (dict_size <= 0)
		elog(ERROR, "Dictionary size cannot be zero or negative.");

	/* Check if any of the argument arrays is empty */
	if ((ARR_NDIM(term_index) == 0) || (ARR_NDIM(term_count) == 0))
		elog(ERROR, "One or more argument arrays is empty.");

	int term_index_nelems = ARR_DIMS(term_index)[0];
	int term_count_nelems = ARR_DIMS(term_count)[0];


	/* If no. of elements in the arrays are not equal, throw an error */
	if (term_index_nelems != term_count_nelems)
		elog(ERROR, "No. of elements in the argument arrays are not equal.");

	Datum *term_index_data;
	Datum *term_count_data;

	/* Deconstruct the arrays */
	get_typlenbyvalalign(INT8OID, &typlen, &typbyval, &typalign);
	deconstruct_array(term_index, INT8OID, typlen, typbyval, typalign,
	                  &term_index_data, &nulls, &term_index_nelems);

	get_typlenbyvalalign(FLOAT8OID, &typlen, &typbyval, &typalign);
	deconstruct_array(term_count, FLOAT8OID, typlen, typbyval, typalign,
	                  &term_count_data, &nulls, &term_count_nelems);

	/* Check if term index array has indexes in proper order or not */
	for(int i = 0; i < term_index_nelems; i++)
	{
		if (DatumGetInt64(term_index_data[i]) < 0 ||
                DatumGetInt64(term_index_data[i]) >= dict_size)
			elog(ERROR, "Term indexes must range from 0 to total number of elements in the dictonary - 1.");
	}
        

    float8 *histogram = (float8 *)palloc0(sizeof(float8) * dict_size);
    for (int k = 0; k < dict_size; k++)
    {
        histogram[k] = 0;
    }
    for (int i = 0; i < term_index_nelems; i++)
    {
        uint64_t idx = DatumGetInt64(term_index_data[i]);
        histogram[idx] += DatumGetFloat8(term_count_data[i]);
    }

    output_sfv = svec_from_float8arr(histogram, dict_size);
    pfree(histogram);

    PG_RETURN_POINTER(output_sfv);
}
示例#20
0
Datum LWGEOM_in(PG_FUNCTION_ARGS)
{
	char *input = PG_GETARG_CSTRING(0);
	int32 geom_typmod = -1;
	char *str = input;
	LWGEOM_PARSER_RESULT lwg_parser_result;
	LWGEOM *lwgeom;
	GSERIALIZED *ret;
	int srid = 0;

	if ( (PG_NARGS()>2) && (!PG_ARGISNULL(2)) ) {
		geom_typmod = PG_GETARG_INT32(2);
	}

	lwgeom_parser_result_init(&lwg_parser_result);

	/* Empty string. */
	if ( str[0] == '\0' )
		ereport(ERROR,(errmsg("parse error - invalid geometry")));

	/* Starts with "SRID=" */
	if( strncasecmp(str,"SRID=",5) == 0 )
	{
		/* Roll forward to semi-colon */
		char *tmp = str;
		while ( tmp && *tmp != ';' )
			tmp++;
		
		/* Check next character to see if we have WKB  */
		if ( tmp && *(tmp+1) == '0' )
		{
			/* Null terminate the SRID= string */
			*tmp = '\0';
			/* Set str to the start of the real WKB */
			str = tmp + 1;
			/* Move tmp to the start of the numeric part */
			tmp = input + 5;
			/* Parse out the SRID number */
			srid = atoi(tmp);
		}
	}
	
	/* WKB? Let's find out. */
	if ( str[0] == '0' )
	{
		size_t hexsize = strlen(str);
		unsigned char *wkb = bytes_from_hexbytes(str, hexsize);
		/* TODO: 20101206: No parser checks! This is inline with current 1.5 behavior, but needs discussion */
		lwgeom = lwgeom_from_wkb(wkb, hexsize/2, LW_PARSER_CHECK_NONE);
		/* If we picked up an SRID at the head of the WKB set it manually */
		if ( srid ) lwgeom_set_srid(lwgeom, srid);
		/* Add a bbox if necessary */
		if ( lwgeom_needs_bbox(lwgeom) ) lwgeom_add_bbox(lwgeom);
		pfree(wkb);
		ret = geometry_serialize(lwgeom);
		lwgeom_free(lwgeom);
	}
	/* WKT then. */
	else
	{
		if ( lwgeom_parse_wkt(&lwg_parser_result, str, LW_PARSER_CHECK_ALL) == LW_FAILURE )
		{
			PG_PARSER_ERROR(lwg_parser_result);
		}
		lwgeom = lwg_parser_result.geom;
		if ( lwgeom_needs_bbox(lwgeom) )
			lwgeom_add_bbox(lwgeom);		
		ret = geometry_serialize(lwgeom);
		lwgeom_parser_result_free(&lwg_parser_result);
	}

	if ( geom_typmod >= 0 )
	{
		postgis_valid_typmod(ret, geom_typmod);
		POSTGIS_DEBUG(3, "typmod and geometry were consistent");
	}
	else
	{
		POSTGIS_DEBUG(3, "typmod was -1");
	}

	/* Don't free the parser result (and hence lwgeom) until we have done */
	/* the typemod check with lwgeom */
	
	PG_RETURN_POINTER(ret);

}
示例#21
0
文件: xlogviewer.c 项目: 50wu/gpdb
Datum
xlogviewer(PG_FUNCTION_ARGS)
{
	FuncCallContext		*funcctx;
	XLogViewerContext	*context;
	int					call_cntr;
	int					max_calls;
	TupleDesc			tupdesc;
	AttInMetadata		*attinmeta;
	int					fd;

	int					logRecOff;
	int32				logPageOff;
	XLogRecord 			*record;
	XLogRecPtr			curRecPtr; 
	uint32				logId;
	uint32				logSeg;
	TimeLineID			logTLI;
	char				*fnamebase;
	bool				ignore_errors = (PG_NARGS() == 2?PG_GETARG_BOOL(1):false);

	const char * const RM_names[RM_MAX_ID+1] = {
		"XLOG ",					/* 0 */
		"XACT ",					/* 1 */
		"SMGR ",					/* 2 */
		"CLOG ",					/* 3 */
		"DBASE",					/* 4 */
		"TBSPC",					/* 5 */
		"MXACT",					/* 6 */
		"RM  7",					/* 7 */
		"RM  8",					/* 8 */
		"RM  9",					/* 9 */
		"HEAP ",					/* 10 */
		"BTREE",					/* 11 */
		"HASH ",					/* 12 */
		"RTREE",					/* 13 */
		"GIST ",					/* 14 */
		"SEQ  "						/* 15 */
	};

	/* stuff done only on the first call of the function */
	if (SRF_IS_FIRSTCALL())
	{
		MemoryContext	oldcontext;
		char			*xlog_file = GET_STR(PG_GETARG_TEXT_P(0));

		funcctx = SRF_FIRSTCALL_INIT();
		oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
		funcctx->max_calls = 1;

		/* Build a tuple descriptor for our result type */
		if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
			ereport(ERROR,
					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
					 errmsg("function returning record called in context "
							"that cannot accept type record")));

		/* generate attribute metadata needed later to produce tuples from raw C strings */
		attinmeta = TupleDescGetAttInMetadata(tupdesc);
		funcctx->attinmeta = attinmeta;

		/* Try to open XLOG file */ 
		if ((fd = open(xlog_file, O_RDONLY | PG_BINARY, 0)) < 0)
		{
			ereport(ERROR,
					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
					 errmsg("Couldn't open xlog-file: %s", xlog_file)));
			SRF_RETURN_DONE(funcctx);
		}

		/*
		 * Extract logfile id and segment from file name
		 */
		fnamebase = strrchr(xlog_file, '/');
		if (fnamebase)
			fnamebase++;
		else
			fnamebase = xlog_file;
		if (sscanf(fnamebase, "%8x%8x%8x", &logTLI, &logId, &logSeg) != 3)
		{
			ereport(ERROR,
					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
					 errmsg("Can't recognize logfile name '%s'", fnamebase)));
			SRF_RETURN_DONE(funcctx);
		}

		context = palloc(sizeof(XLogViewerContext));

		context->logFd = fd;
		context->logId = logId;
		context->logSeg = logSeg;
		context->logPageOff = -BLCKSZ;
		context->logRecOff = 0;

		funcctx->user_fctx = (void *) context;

		MemoryContextSwitchTo(oldcontext);
	}

	/* stuff done on every call of the function */
	funcctx = SRF_PERCALL_SETUP();

	call_cntr = funcctx->call_cntr;
	max_calls = funcctx->max_calls;
	attinmeta = funcctx->attinmeta;

	context = (XLogViewerContext *) funcctx->user_fctx;
	fd = context->logFd;
	logId = context->logId;
	logSeg = context->logSeg;
	logRecOff = context->logRecOff;
	logPageOff = context->logPageOff;
	curRecPtr = context->curRecPtr;

	if (call_cntr < max_calls)    /* do when there is more left to send */
	{
		char		**values;
		HeapTuple	tuple;
		Datum		result;

		if((record = readRecord(&fd, &logRecOff, &logPageOff, &curRecPtr, logId, logSeg, ignore_errors)))
		{
			funcctx->max_calls += 1;

			context->logRecOff = logRecOff;
			context->logPageOff = logPageOff;
			context->curRecPtr = curRecPtr;

			/* build a tuple */
		       	values = (char **) palloc(7 * sizeof(char *));
		        values[0] = (char *) palloc(16 * sizeof(char));
		        values[1] = (char *) palloc(16 * sizeof(char));
		        values[2] = (char *) palloc(5 * sizeof(char));
		        values[3] = (char *) palloc(16 * sizeof(char));
		        values[4] = (char *) palloc(16 * sizeof(char));
		        values[5] = (char *) palloc(16 * sizeof(char));
		        values[6] = (char *) palloc(16 * sizeof(char));

		        snprintf(values[0], 16, "%d", record->xl_rmid);
		        snprintf(values[1], 16, "%d", record->xl_xid);
		        sprintf(values[2], "%s", RM_names[record->xl_rmid]);
		        snprintf(values[3], 16, "%2X", record->xl_info);
		        snprintf(values[4], 16, "%d", record->xl_len);
		        snprintf(values[5], 16, "%d", record->xl_tot_len);
		        snprintf(values[6], 16, "%s", getXactName(record->xl_rmid, record->xl_info));

			tuple = BuildTupleFromCStrings(attinmeta, values);

			/* make the tuple into a datum */
			result = HeapTupleGetDatum(tuple);
			SRF_RETURN_NEXT(funcctx, result);
			
		}
		else
		{
			close(fd);
			SRF_RETURN_DONE(funcctx);
		}
		
	}
	else    /* do when there is no more left */
	{
		close(fd);
		SRF_RETURN_DONE(funcctx);
	}
}
示例#22
0
/*
 * array_position_common
 *		Common code for array_position and array_position_start
 *
 * These are separate wrappers for the sake of opr_sanity regression test.
 * They are not strict so we have to test for null inputs explicitly.
 */
static Datum
array_position_common(FunctionCallInfo fcinfo)
{
	ArrayType  *array;
	Oid			collation = PG_GET_COLLATION();
	Oid			element_type;
	Datum		searched_element,
				value;
	bool		isnull;
	int			position,
				position_min;
	bool		found = false;
	TypeCacheEntry *typentry;
	ArrayMetaState *my_extra;
	bool		null_search;
	ArrayIterator array_iterator;

	if (PG_ARGISNULL(0))
		PG_RETURN_NULL();

	array = PG_GETARG_ARRAYTYPE_P(0);
	element_type = ARR_ELEMTYPE(array);

	/*
	 * We refuse to search for elements in multi-dimensional arrays, since we
	 * have no good way to report the element's location in the array.
	 */
	if (ARR_NDIM(array) > 1)
		ereport(ERROR,
				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
				 errmsg("searching for elements in multidimensional arrays is not supported")));

	if (PG_ARGISNULL(1))
	{
		/* fast return when the array doesn't have nulls */
		if (!array_contains_nulls(array))
			PG_RETURN_NULL();
		searched_element = (Datum) 0;
		null_search = true;
	}
	else
	{
		searched_element = PG_GETARG_DATUM(1);
		null_search = false;
	}

	position = (ARR_LBOUND(array))[0] - 1;

	/* figure out where to start */
	if (PG_NARGS() == 3)
	{
		if (PG_ARGISNULL(2))
			ereport(ERROR,
					(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
					 errmsg("initial position must not be null")));

		position_min = PG_GETARG_INT32(2);
	}
	else
		position_min = (ARR_LBOUND(array))[0];

	/*
	 * We arrange to look up type info for array_create_iterator only once per
	 * series of calls, assuming the element type doesn't change underneath
	 * us.
	 */
	my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
	if (my_extra == NULL)
	{
		fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
													  sizeof(ArrayMetaState));
		my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
		my_extra->element_type = ~element_type;
	}

	if (my_extra->element_type != element_type)
	{
		get_typlenbyvalalign(element_type,
							 &my_extra->typlen,
							 &my_extra->typbyval,
							 &my_extra->typalign);

		typentry = lookup_type_cache(element_type, TYPECACHE_EQ_OPR_FINFO);

		if (!OidIsValid(typentry->eq_opr_finfo.fn_oid))
			ereport(ERROR,
					(errcode(ERRCODE_UNDEFINED_FUNCTION),
				errmsg("could not identify an equality operator for type %s",
					   format_type_be(element_type))));

		my_extra->element_type = element_type;
		fmgr_info_cxt(typentry->eq_opr_finfo.fn_oid, &my_extra->proc,
					  fcinfo->flinfo->fn_mcxt);
	}

	/* Examine each array element until we find a match. */
	array_iterator = array_create_iterator(array, 0, my_extra);
	while (array_iterate(array_iterator, &value, &isnull))
	{
		position++;

		/* skip initial elements if caller requested so */
		if (position < position_min)
			continue;

		/*
		 * Can't look at the array element's value if it's null; but if we
		 * search for null, we have a hit and are done.
		 */
		if (isnull || null_search)
		{
			if (isnull && null_search)
			{
				found = true;
				break;
			}
			else
				continue;
		}

		/* not nulls, so run the operator */
		if (DatumGetBool(FunctionCall2Coll(&my_extra->proc, collation,
										   searched_element, value)))
		{
			found = true;
			break;
		}
	}

	array_free_iterator(array_iterator);

	/* Avoid leaking memory when handed toasted input */
	PG_FREE_IF_COPY(array, 0);

	if (!found)
		PG_RETURN_NULL();

	PG_RETURN_INT32(position);
}
示例#23
0
Datum 
text_unpivot(PG_FUNCTION_ARGS)
{
	FuncCallContext      *funcctx;
	unpivot_fctx         *fctx;
	MemoryContext         oldcontext;
	Datum                 d[2];
	bool                  isna[2];

	/* stuff done only on the first call of the function */
	if (SRF_IS_FIRSTCALL())
	{
		TupleDesc  tupdesc;
		ArrayType *labels;
		ArrayType *data;
		Oid        eltype1;
		Oid        eltype2;

		/* see if we were given an explicit step size */
		if (PG_NARGS() != 2)
			ereport(ERROR,
					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
					 errmsg("number of parameters != 2")));

		/* 
		 * Type check inputs:
		 *    0: label text[]
		 *    1: value anyarray
		 */
		eltype1 = get_fn_expr_argtype(fcinfo->flinfo, 0);
		eltype2 = get_fn_expr_argtype(fcinfo->flinfo, 1);

		if (!OidIsValid(eltype1))
			elog(ERROR, "could not determine data type of input 'label'");
		if (!OidIsValid(eltype2))
			elog(ERROR, "could not determine data type of input 'value'");

		/* Strict function, return null on null input */
		if (PG_ARGISNULL(0) || PG_ARGISNULL(1))
			PG_RETURN_NULL();
		
		/* create a function context for cross-call persistence */
		funcctx = SRF_FIRSTCALL_INIT();

		/* switch to memory context appropriate for multiple function calls */
		oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);

        /* Build a tuple descriptor for our result type */
        if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
            ereport(ERROR,
                    (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                     errmsg("function returning record called in context "
                            "that cannot accept type record")));
		funcctx->tuple_desc = BlessTupleDesc(tupdesc);

		/* allocate memory for user context */
		fctx = (unpivot_fctx *) palloc(sizeof(unpivot_fctx));

		/* Use fctx to keep state from call to call */
		labels = PG_GETARG_ARRAYTYPE_P(0);
		data   = PG_GETARG_ARRAYTYPE_P(1);
		array_loop(labels, ARR_LBOUND(labels)[0], &fctx->label_iter);
		array_loop(data,   ARR_LBOUND(labels)[0], &fctx->data_iter);		

		funcctx->user_fctx = fctx;
		MemoryContextSwitchTo(oldcontext);
	}

	/* stuff done on every call of the function */
	funcctx = SRF_PERCALL_SETUP();

	/* get the saved state and use current as the result for this iteration */
	fctx = (unpivot_fctx*) funcctx->user_fctx;

	if (array_next(&fctx->label_iter, &d[0], &isna[0]))
	{
		HeapTuple tuple;

		array_next(&fctx->data_iter, &d[1], &isna[1]);
		tuple = heap_form_tuple(funcctx->tuple_desc, d, isna);
		SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));		
	}
	else
	{
		SRF_RETURN_DONE(funcctx);
	}
}
示例#24
0
/*
 * SQL function jsonb_build_object(variadic "any")
 */
Datum
jsonb_build_object(PG_FUNCTION_ARGS)
{
	int			nargs = PG_NARGS();
	int			i;
	Datum		arg;
	Oid			val_type;
	JsonbInState result;

	if (nargs % 2 != 0)
		ereport(ERROR,
				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
				 errmsg("invalid number of arguments: object must be matched key value pairs")));

	memset(&result, 0, sizeof(JsonbInState));

	result.res = pushJsonbValue(&result.parseState, WJB_BEGIN_OBJECT, NULL);

	for (i = 0; i < nargs; i += 2)
	{
		/* process key */

		if (PG_ARGISNULL(i))
			ereport(ERROR,
					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
					 errmsg("argument %d: key must not be null", i + 1)));
		val_type = get_fn_expr_argtype(fcinfo->flinfo, i);

		/*
		 * turn a constant (more or less literal) value that's of unknown type
		 * into text. Unknowns come in as a cstring pointer.
		 */
		if (val_type == UNKNOWNOID && get_fn_expr_arg_stable(fcinfo->flinfo, i))
		{
			val_type = TEXTOID;
			arg = CStringGetTextDatum(PG_GETARG_POINTER(i));
		}
		else
		{
			arg = PG_GETARG_DATUM(i);
		}
		if (val_type == InvalidOid || val_type == UNKNOWNOID)
			ereport(ERROR,
					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
			   errmsg("argument %d: could not determine data type", i + 1)));

		add_jsonb(arg, false, &result, val_type, true);

		/* process value */

		val_type = get_fn_expr_argtype(fcinfo->flinfo, i + 1);
		/* see comments above */
		if (val_type == UNKNOWNOID && get_fn_expr_arg_stable(fcinfo->flinfo, i + 1))
		{
			val_type = TEXTOID;
			if (PG_ARGISNULL(i + 1))
				arg = (Datum) 0;
			else
				arg = CStringGetTextDatum(PG_GETARG_POINTER(i + 1));
		}
		else
		{
			arg = PG_GETARG_DATUM(i + 1);
		}
		if (val_type == InvalidOid || val_type == UNKNOWNOID)
			ereport(ERROR,
					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
			   errmsg("argument %d: could not determine data type", i + 2)));
		add_jsonb(arg, PG_ARGISNULL(i + 1), &result, val_type, false);
	}

	result.res = pushJsonbValue(&result.parseState, WJB_END_OBJECT, NULL);

	PG_RETURN_POINTER(JsonbValueToJsonb(result.res));
}
示例#25
0
/**
 * @brief Return the n-th element from a composite value
 *
 * To the user, AnyType is a fully recursive type: Each AnyType object can be a
 * composite object and be composed of a number of other AnyType objects.
 * Function written using the C++ abstraction layer have a single logical
 * argument of type AnyType.
 */
inline
AnyType
AnyType::operator[](uint16_t inID) const {
    consistencyCheck();

    if (isNull()) {
        // Handle case mContent == NULL
        throw std::invalid_argument("Invalid type conversion. "
                                    "Null where not expected.");
    }
    if (!isComposite()) {
        // Handle case mContent == Scalar
        throw std::invalid_argument("Invalid type conversion. "
                                    "Composite type where not expected.");
    }

    if (mContent == ReturnComposite)
        return mChildren[inID];

    // It holds now that mContent is either FunctionComposite or NativeComposite
    // In this case, it is guaranteed that fcinfo != NULL
    Oid typeID = 0;
    bool isMutable = false;
    Datum datum = 0;

    if (mContent == FunctionComposite) {
        // This AnyType object represents to composite value consisting of all
        // function arguments

        if (inID >= size_t(PG_NARGS()))
            throw std::out_of_range("Invalid type conversion. Access behind "
                                    "end of argument list.");

        if (PG_ARGISNULL(inID))
            return AnyType();

        typeID = mSysInfo->functionInformation(fcinfo->flinfo->fn_oid)
                 ->getArgumentType(inID, fcinfo->flinfo);
        if (inID == 0) {
            // If we are called as an aggregate function, the first argument is
            // the transition state. In that case, we are free to modify the
            // data. In fact, for performance reasons, we *should* even do all
            // modifications in-place. In all other cases, directly modifying
            // memory is dangerous.
            // See warning at:
            // http://www.postgresql.org/docs/current/static/xfunc-c.html#XFUNC-C-BASETYPE

            // BACKEND: AggCheckCallContext currently will never raise an
            // exception
            isMutable = AggCheckCallContext(fcinfo, NULL);
        }
        datum = PG_GETARG_DATUM(inID);
    } else { /* if (mContent == NativeComposite) */
        // This AnyType objects represents a tuple that was passed from the
        // backend

        TupleDesc tupdesc = mSysInfo
                            ->typeInformation(HeapTupleHeaderGetTypeId(mTupleHeader))
                            ->getTupleDesc(HeapTupleHeaderGetTypMod(mTupleHeader));

        if (inID >= tupdesc->natts)
            throw std::out_of_range("Invalid type conversion. Access behind "
                                    "end of composite object.");

        typeID = tupdesc->attrs[inID]->atttypid;
        bool isNull = false;
        datum = madlib_GetAttributeByNum(mTupleHeader, inID, &isNull);
        if (isNull)
            return AnyType();
    }

    if (typeID == InvalidOid)
        throw std::invalid_argument("Backend returned invalid type ID.");

    return mSysInfo->typeInformation(typeID)->isCompositeType()
           ? AnyType(mSysInfo, madlib_DatumGetHeapTupleHeader(datum), datum,
                     typeID)
           : AnyType(mSysInfo, datum, typeID, isMutable);
}
示例#26
0
/*
 * Arguments:
 * 0: queue_name  text          NOT NULL
 * 1: ev_id       int8			if NULL take from SEQ
 * 2: ev_time     timestamptz   if NULL use now()
 * 3: ev_owner    int4
 * 4: ev_retry    int4
 * 5: ev_type     text
 * 6: ev_data     text
 * 7: ev_extra1   text
 * 8: ev_extra2   text
 * 9: ev_extra3   text
 * 10:ev_extra4   text
 */
Datum
pgq_insert_event_raw(PG_FUNCTION_ARGS)
{
	Datum values[11];
	char nulls[11];
	struct QueueState state;
	int64 ret_id;
	void *ins_plan;
	Datum ev_id, ev_time;
	int i, res;

	if (PG_NARGS() < 6)
		elog(ERROR, "Need at least 6 arguments");
	if (PG_ARGISNULL(0))
		elog(ERROR, "Queue name must not be NULL");

	if (SPI_connect() < 0)
		elog(ERROR, "SPI_connect() failed");
	
	init_cache();

	load_queue_info(PG_GETARG_DATUM(0), &state);

	if (PG_ARGISNULL(1))
		ev_id = state.next_event_id;
	else
		ev_id = PG_GETARG_DATUM(1);

	if (PG_ARGISNULL(2))
		ev_time = DirectFunctionCall1(now, 0);
	else
		ev_time = PG_GETARG_DATUM(2);

	/*
	 * Prepare arguments for INSERT
	 */
	values[0] = ev_id;
	nulls[0] = ' ';
	values[1] = ev_time;
	nulls[1] = ' ';
	for (i = 3; i < 11; i++) {
		int dst = i - 1;
		if (i >= PG_NARGS() || PG_ARGISNULL(i)) {
			values[dst] = (Datum)NULL;
			nulls[dst] = 'n';
		} else {
			values[dst] = PG_GETARG_DATUM(i);
			nulls[dst] = ' ';
		}
	}

	/*
	 * Perform INSERT into queue table.
	 */
	ins_plan = load_insert_plan(&state);
	res = SPI_execute_plan(ins_plan, values, nulls, false, 0);
	if (res != SPI_OK_INSERT)
		elog(ERROR, "Queue insert failed");

	/*
	 * ev_id cannot pass SPI_finish()
	 */
	ret_id = DatumGetInt64(ev_id);

	if (SPI_finish() < 0)
		elog(ERROR, "SPI_finish failed");

	PG_RETURN_INT64(ret_id);
}
示例#27
0
Datum kc_expand(PG_FUNCTION_ARGS) {

    KC_ENTRY                        *search;
    FuncCallContext                 *funcctx;
    int                             call_cntr;
    char                            *kbuf;
    size_t                          ksiz, vsiz;
    const char                      *cvbuf;
    char                            *kv_kbuf = NULL; 
    size_t                          kv_ksiz;
    int                             done;

    /* stuff done only on the first call of the function */
    if (SRF_IS_FIRSTCALL()) {
        MemoryContext   oldcontext;

        /* create a function context for cross-call persistence */
        funcctx = SRF_FIRSTCALL_INIT();

        /* switch to memory context appropriate for multiple function calls */
        oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);

        // Make sure that there are enough args.
        if (PG_NARGS() < MIN_ARGS) {
            ereport(ERROR,
                    (errcode(ERRCODE_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION),
                     errmsg("Must run expand with at least %d args!", MIN_ARGS)));
        }

        /* Make the kcdb here. */
        search = (KC_ENTRY *)palloc(sizeof(KC_ENTRY)); 
        search->db = kcdbnew();
        if (open_db (search->db, text_to_cstring(PG_GETARG_TEXT_PP(0)), text_to_cstring(PG_GETARG_TEXT_PP(1)))) {

            // Set the key to jump into:
            // Call with -- map_name, result_id, class, doctype, pop, psource
            // Here, map_name describes a db to open.
            // Otherwise, result_id:class:doctype:pop:psource
            (search->jump_key) = (char *) palloc(MAX_JUMP_KEY_LEN * sizeof(char));

            int index_point;
            search->jump_key = text_to_cstring(PG_GETARG_TEXT_PP(2));
            int size_left = MAX_JUMP_KEY_LEN;
            for (index_point = START_VARIABLE_INDEX; index_point < END_VARIABLE_INDEX; index_point++) {
                if (PG_NARGS() > index_point) {
                    char *next_idx = text_to_cstring(PG_GETARG_TEXT_PP(index_point));
                    if (next_idx != NULL) {
                        size_left = size_left - (2 + strlen(next_idx));
                        strncat (search->jump_key, CF_LABEL_SEP, size_left);
                        strncat (search->jump_key, next_idx, size_left);
                    }
                }
            }
            
#ifdef CF_DUBUG
            ereport(NOTICE,
                    (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                     errmsg("Setting jump buffer -- [%s]", search->jump_key)));
#endif
            
            // Create a cursor, and set it to the base point looking for entries.
            search->cur = kcdbcursor(search->db);
            kccurjumpkey(search->cur, search->jump_key, MAX_JUMP_KEY_LEN);
        } else {
            search->db = NULL;
        }

        search->next_map = 0;
        search->msg = NULL;
        
        // Save the search struct for the subsequent calls.
        funcctx->user_fctx = search;

        MemoryContextSwitchTo(oldcontext);
    }

    /* stuff done on every call of the function */
    funcctx = SRF_PERCALL_SETUP();

    call_cntr = funcctx->call_cntr;
    search = (KC_ENTRY *) funcctx->user_fctx;
    
    // If no current msg, try to get the next one.
    done = 1;

#ifdef CF_DUBUG
    ereport(NOTICE,
            (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
             errmsg("beginning run")));
#endif

    if (search->msg) {

#ifdef CF_DUBUG  
        ereport(NOTICE,
                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                 errmsg("Incrementing next from map %d -- %zu", search->next_map, search->msg->n_map_entry)));
#endif

        // Case if we are using the external cursor running over kv map.
        // Ready the next 
        if (search->msg->kv_map_file) {
            
            if ((kv_kbuf = kccurgetkey(search->kv_cur, &kv_ksiz, 1)) == NULL) {
                done = 1;
                kccurdel(search->kv_cur);
                kcdbendtran (search->kv_db, 1);            
                if (!kcdbclose(search->kv_db)) {
                    ereport(ERROR,
                            (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION),
                             errmsg("Error Closeing db: \"%s\"", kcecodename(kcdbecode(search->kv_db)))));
                }

                // Also need to free this.
                cloudflare__zone_time_bucket__free_unpacked(search->msg, NULL);
                search->msg = NULL;

            } else {
                done = 0;
            }


        } else {
            if (search->next_map >= search->msg->n_map_entry) {
                // Done with this msg -- move on to the next one.
                cloudflare__zone_time_bucket__free_unpacked(search->msg, NULL);
                search->msg = NULL;
            } else {
                done = 0;
            }
        }
    }

    if (search->db && !search->msg) {
      
#ifdef CF_DUBUG  
        ereport(NOTICE,
                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                 errmsg("Getting new buf -- %s", search->jump_key)));
#endif        

        if ((kbuf = kccurget(search->cur, &ksiz, &cvbuf, &vsiz, 1)) != NULL) {
            // Pull up the PB and expand it.
            search->msg = cloudflare__zone_time_bucket__unpack(NULL, vsiz, (const uint8_t *)cvbuf);
            if (search->msg == NULL) {   // Something failed
                ereport(ERROR,
                        (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION),
                         errmsg("error unpacking incoming message")));
                done = 1;
            } else {
                // Does the buffer match the searched for string?
                // @TODO -- bound this?
                if (strstr(search->msg->db_key, search->jump_key)) {
                    done = 0;
                    search->next_map = 0;

                    // And load the kvkc if needed.
                    if (search->msg->kv_map_file) {
                        
#ifdef CF_DUBUG  
                        ereport(NOTICE,
                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                                 errmsg("Switching to kvs %s", search->msg->kv_map_file)));
#endif

                        search->kv_db = kcdbnew();
                        
                        if (!kcdbopen(search->kv_db, search->msg->kv_map_file, KCOWRITER)) {
#ifdef CF_NO_DB_IS_ERR
                            ereport(ERROR,
                                    (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION),
                                     errmsg("Error opening db: \"%s\", \"%s\". Make sure that the map_name is valid.", 
                                            search->msg->kv_map_file, kcecodename(kcdbecode(search->kv_db)))));
#endif
#ifdef CF_DUBUG
                            ereport(NOTICE,
                                    (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                                     errmsg("Error opening db: \"%s\", \"%s\". Make sure that the map_name is valid.", 
                                            search->msg->kv_map_file, kcecodename(kcdbecode(search->kv_db)))));
#endif
                            done = 1;
                        } else {
                            kcdbbegintran (search->kv_db, 0);
                            search->kv_cur = kcdbcursor(search->kv_db);
                            kccurjump(search->kv_cur);   

                            if ((kv_kbuf = kccurgetkey(search->kv_cur, &kv_ksiz, 1)) == NULL) {
                                done = 1;
                                kccurdel(search->kv_cur);
                                kcdbendtran (search->kv_db, 1);
                                if (!kcdbclose(search->kv_db)) {
                                    ereport(ERROR,
                                            (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION),
                                             errmsg("Error Closeing db: \"%s\"", kcecodename(kcdbecode(search->kv_db)))));
                                }
                            } else {
                                done = 0;
                            }
                        }
                    }
                } else {
                    done = 1;
                }
            }
            kcfree(kbuf);
        } else {
#ifdef CF_DUBUG
            ereport(NOTICE,
                    (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                     errmsg("no msg to find")));
#endif
            done = 1;
        }
    }

#ifdef CF_DUBUG
    ereport(NOTICE,
            (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
             errmsg("Done? %d -- next buf -- %d", done, search->next_map)));   
#endif

    // Take the next itteration over the cursor. If the next is NULL or else not matching the resultid passed in
    // End. Otherwise, parse the value, populating the next row of the returning tuple.
    if (!done) {
        KC_ROW                          *out;
        Datum                           result;

        size_t size = sizeof(KC_ROW);
        out = (KC_ROW *)palloc(size);
        memset(out, '0', size);
        SET_VARSIZE(out, size);

        out->classification = (char *)palloc(MAX_KC_ROW_ENTRY * sizeof(char));
        out->doctype = (char *)palloc(MAX_KC_ROW_ENTRY * sizeof(char));
        out->pop = (char *)palloc(MAX_KC_ROW_ENTRY * sizeof(char));
        out->psource = (char *)palloc(MAX_KC_ROW_ENTRY * sizeof(char));
        out->key = (char *)palloc(MAX_KC_ROW_ENTRY * sizeof(char));

        strncpy(out->classification, search->msg->classification, MAX_KC_ROW_ENTRY);
        strncpy(out->doctype, search->msg->doctype, MAX_KC_ROW_ENTRY);
        strncpy(out->pop, search->msg->pop, MAX_KC_ROW_ENTRY);
        strncpy(out->psource, search->msg->psource, MAX_KC_ROW_ENTRY);

        if (search->msg->kv_map_file) {

#ifdef CF_DUBUG
            ereport(NOTICE,
                    (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                     errmsg("getting val from -- [%s]", search->msg->kv_map_file)));
#endif

            snprintf(out->key, MAX_KC_ROW_ENTRY, "%s", kv_kbuf);
            out->value = kcdbincrint (search->kv_db, kv_kbuf, kv_ksiz, 0);

            if (out->value == INT64_MIN) {
                ereport(NOTICE,
                        (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION),
                         errmsg("ERROR Getting val from key -- [%s], %s", kv_kbuf, kcecodename(kcdbecode(search->kv_db)))));
            }

            kcfree(kv_kbuf);
        } else {

#ifdef CF_DUBUG
            ereport(NOTICE,
                    (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                     errmsg("Loading %s %ld", search->msg->map_entry[search->next_map]->key, 
                            search->msg->map_entry[search->next_map]->value)));
#endif

            snprintf(out->key, MAX_KC_ROW_ENTRY, "%s", search->msg->map_entry[search->next_map]->key);        
            out->value = search->msg->map_entry[search->next_map]->value;
        }

        result = PointerGetDatum(out);

        /* clean up (this is not really necessary) */
        pfree(out->classification);
        pfree(out->doctype);
        pfree(out->pop);
        pfree(out->psource);
        pfree(out->key);
        pfree(out);

        // Remember that we are going to the next step.
        search->next_map++;

        SRF_RETURN_NEXT(funcctx, result);
    } else {    /* do when there is no more left */
        if (search->db) {
            kccurdel(search->cur);
            if (!kcdbclose(search->db)) {
                ereport(ERROR,
                        (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION),
                         errmsg("Error Closeing db: \"%s\"", kcecodename(kcdbecode(search->db)))));
            }
            
            if (search->msg != NULL) {
                cloudflare__zone_time_bucket__free_unpacked(search->msg, NULL);
            }
            
            pfree(search->jump_key);
        }
        pfree(search);

#ifdef CF_DUBUG
        ereport(NOTICE,
                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                 errmsg("Done with run")));
#endif

        // Don't delete db, this leads to segfaults.
        SRF_RETURN_DONE(funcctx);
    }
}
示例#28
0
文件: bayes.c 项目: BenjaminYu/gpdb
/*
 * nb_classify_accum - Naive Bayesian Classification Accumulator
 */
Datum 
nb_classify_accum(PG_FUNCTION_ARGS)
{
	nb_classify_state  state;
	TupleDesc          resultDesc;
	int                i;
	int                nclasses;
	ArrayType         *classes;          /* arg[1] */
	int64              attr_count;       /* arg[2] */
	ArrayType         *class_count;      /* arg[3] */
	ArrayType         *class_total;      /* arg[4] */
	HeapTuple          result;
	Datum              resultDatum[3];
	bool               resultNull[3];
	bool               skip;
	int64             *class_data;
	int64             *total_data;
	float8            *prior_data;
	int64             *stotal_data;

	/* Check input parameters */
	if (PG_NARGS() != 5)
	{
		ereport(ERROR,
				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
				 errmsg("nb_classify_accum called with %d arguments", 
						PG_NARGS())));
	}

	/* Skip rows with NULLs */
	if (PG_ARGISNULL(1) || PG_ARGISNULL(3) || PG_ARGISNULL(4))
	{
		if (PG_ARGISNULL(0))
			PG_RETURN_NULL();
		PG_RETURN_DATUM(PG_GETARG_DATUM(0));
	}
	classes     = PG_GETARG_ARRAYTYPE_P(1);
	attr_count  = PG_ARGISNULL(2) ? 0 : PG_GETARG_INT64(2);
	class_count = PG_GETARG_ARRAYTYPE_P(3);
	class_total = PG_GETARG_ARRAYTYPE_P(4);

	if (ARR_NDIM(classes) != 1 || 
		ARR_NDIM(class_count) != 1 || 
		ARR_NDIM(class_total) != 1)
	{
		ereport(ERROR,
				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
				 errmsg("nb_classify cannot accumulate multidimensional arrays")));
	}

	/* All three arrays must be equal cardinality */
	nclasses = ARR_DIMS(classes)[0];
	if (ARR_DIMS(class_count)[0] != nclasses ||
		ARR_DIMS(class_total)[0] != nclasses)
	{
		ereport(ERROR,
				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
				 errmsg("nb_classify: non-conformable arrays")));
	}
	
	class_data = (int64*) ARR_DATA_PTR(class_count);
	total_data = (int64*) ARR_DATA_PTR(class_total);

	/* It is an error for class_data, total_data, or attr_count to be a 
	   negative number */
	if (attr_count < 0) 
	{
		ereport(ERROR,
				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
				 errmsg("nb_classify: attr_count value must be >= 0")));

	}
	skip = false;
	for (i = 0; i < nclasses; i++) 
	{
		if (class_data[i] < 0) 
		{
			ereport(ERROR,
					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
					 errmsg("nb_classify: class_data values must be >= 0")));
		}
		if (total_data[i] < 0) 
		{
			ereport(ERROR,
					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
					 errmsg("nb_classify: total_data values must be >= 0")));
		}
		if (class_data[i] > total_data[i]) 
		{
			ereport(ERROR,
					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
					 errmsg("nb_classify: class_data values must be <= total_data")));
		}

		/* 
		 * If we do not have an adjustment value and any class has a zero value
		 * then we must skip this row, otherwise we will try to calculate ln(0)
		 */
		if (attr_count == 0 && class_data[i] == 0) 
		{
			skip = true;
		}
	}
	if (skip)
	{
		PG_RETURN_DATUM(PG_GETARG_DATUM(0));			
	}



	/* Get/create the accumulation state */
	if (!PG_ARGISNULL(0))
	{
		HeapTupleHeader tup;
		tup = (fcinfo->context && IsA(fcinfo->context, AggState))
			? PG_GETARG_HEAPTUPLEHEADER(0)
			: PG_GETARG_HEAPTUPLEHEADER_COPY(0);

		get_nb_state(tup, &state, nclasses);
	}
	else
	{
		/* Construct the state arrays */
		int     size;

		/* allocate memory and copy the classes array */
		size = VARSIZE(classes);
		state.classes = (ArrayType *) palloc(size);
		SET_VARSIZE(state.classes, size);
		memcpy(state.classes, classes, size);

		/* allocate memory and construct the accumulator array */
		size = ARR_OVERHEAD_NONULLS(1) + nclasses * sizeof(float8);
		state.accum = (ArrayType *) palloc(size);
		SET_VARSIZE(state.accum, size);
		state.accum->ndim          = 1;
		state.accum->dataoffset    = 0;
		ARR_ELEMTYPE(state.accum)  = FLOAT8OID;
		ARR_DIMS(state.accum)[0]   = nclasses;
		ARR_LBOUND(state.accum)[0] = 1;
		prior_data = (float8*) ARR_DATA_PTR(state.accum);
		for (i = 0; i < nclasses; i++)
			prior_data[i] = 0;

		/* allocate memory and construct the total array */
		size = ARR_OVERHEAD_NONULLS(1) + nclasses * sizeof(int64);
		state.total = (ArrayType *) palloc(size);
		SET_VARSIZE(state.total, size);
		state.total->ndim          = 1;
		state.total->dataoffset    = 0;
		ARR_ELEMTYPE(state.total)  = INT8OID;
		ARR_DIMS(state.total)[0]   = nclasses;
		ARR_LBOUND(state.total)[0] = 1;
		stotal_data = (int64*) ARR_DATA_PTR(state.total);
		for (i = 0; i < nclasses; i++)
			stotal_data[i] = 0;
	}

	/* Adjust the prior based on the current input row */
	prior_data = (float8*) ARR_DATA_PTR(state.accum);
	stotal_data = (int64*) ARR_DATA_PTR(state.total);
	for (i = 0; i < nclasses; i++)
	{
		/*
		 * Calculate the accumulation value for the classifier
		 *
		 * The logical calculation is: 
		 *     product((class[i]+1)/(total_data[i]+attr_count))
		 *
		 * Instead of this calculation we calculate:
		 *     sum(ln((class[i]+1)/(total_data[i]+attr_count)))
		 *
		 * The reason for this is to increase the numerical stability of
		 * the algorithm.
		 *
		 * Since the ln(0) is undefined we want to increment the count 
		 * for all classes. 
		 *
		 * This get's a bit more complicated for the denominator which
		 * needs to know how many values there are for this attribute
		 * so that we keep the total probability for the attribute = 1.
		 * To handle this the aggregation function should be passed the
		 * number of distinct values for the aggregate it is computing.
		 *
		 * If for some reason this value is not present, or < 1 then we
		 * just switch to non-adjusted calculations.  In this case we 
		 * will simply skip over any row that has a 0 count on any 
		 * class_data index. (handled above)
		 */
		if (attr_count > 1)
			prior_data[i] += log((class_data[i]+1)/(float8)(total_data[i] + attr_count));
		else
			prior_data[i] += log(class_data[i]/(float8)total_data[i]);

		/* 
		 * The total array should be constant throughout, but if not it should
		 * reflect the maximum values encountered 
		 */
		if (total_data[i] > stotal_data[i])
			stotal_data[i] = total_data[i];
	}


	/* Construct the return tuple */
	if (get_call_result_type(fcinfo, NULL, &resultDesc) != TYPEFUNC_COMPOSITE)
		elog(ERROR, "return type must be a row type");
	BlessTupleDesc(resultDesc); 
	
	resultDatum[0] = PointerGetDatum(state.classes);
	resultDatum[1] = PointerGetDatum(state.accum);
	resultDatum[2] = PointerGetDatum(state.total);
	resultNull[0]  = false;
	resultNull[1]  = false;
	resultNull[2]  = false;

	result = heap_form_tuple(resultDesc, resultDatum, resultNull);
	PG_RETURN_DATUM(HeapTupleGetDatum(result));
}
示例#29
0
/*
 * pgmpc_playlist
 * List all songs in given playlist. If not playlist is specified list
 * songs of current playlist.
 */
Datum
pgmpc_playlist(PG_FUNCTION_ARGS)
{
	TupleDesc   tupdesc;
	Tuplestorestate *tupstore;
	char *playlist = NULL;
	bool ret;

	if (PG_NARGS() == 1 && !PG_ARGISNULL(0))
		playlist = text_to_cstring(PG_GETARG_TEXT_PP(0));

	/* Initialize function context */
	pgmpc_init_setof_single(fcinfo, TEXTOID, "playlist", &tupdesc, &tupstore);

	/*
	 * Run the command to get all the songs.
	 */
	pgmpc_init();
	ret = playlist ?
		mpd_send_list_playlist_meta(mpd_conn, playlist) :
		mpd_send_list_queue_meta(mpd_conn);
	if (!ret)
		pgmpc_print_error();

	/* Now get all the songs and send them back to caller */
	while (true)
	{
		Datum       values[1];
		bool		nulls[1];
		struct mpd_song *song = mpd_recv_song(mpd_conn);

		/* Leave if done */
		if (song == NULL)
			break;

		/* Assign song name */
		nulls[0] = false;
		values[0] = CStringGetTextDatum(mpd_song_get_uri(song));

		/* Save values */
		tuplestore_putvalues(tupstore, tupdesc, values, nulls);

		/* Clean up for the next one */
		mpd_song_free(song);
	}

	/* We may be in error state, so check for it */
	if (mpd_connection_get_error(mpd_conn) != MPD_ERROR_SUCCESS)
	{
		const char *message = mpd_connection_get_error_message(mpd_conn);
		pgmpc_reset();
		ereport(ERROR,
				(errcode(ERRCODE_SYSTEM_ERROR),
				 errmsg("mpd command failed: %s",
						message)));
	}

	/* Clean up */
	pgmpc_reset();

	/* clean up and return the tuplestore */
	tuplestore_donestoring(tupstore);

	return (Datum) 0;
}
示例#30
0
文件: file.c 项目: 50wu/gpdb
/*
 * FUNCTION UTL_FILE.FOPEN(location text,
 *			   filename text,
 *			   open_mode text,
 *			   max_linesize integer)
 *          RETURNS UTL_FILE.FILE_TYPE;
 *
 * The FOPEN function opens specified file and returns file handle.
 *  open_mode: ['R', 'W', 'A']
 *  max_linesize: [1 .. 32767]
 *
 * Exceptions:
 *  INVALID_MODE, INVALID_OPERATION, INVALID_PATH, INVALID_MAXLINESIZE
 */
Datum
utl_file_fopen(PG_FUNCTION_ARGS)
{
	text	   *open_mode;
	int			max_linesize;
	int			encoding;
	const char *mode = NULL;
	FILE	   *file;
	char	   *fullname;
	int			d;

	NOT_NULL_ARG(0);
	NOT_NULL_ARG(1);
	NOT_NULL_ARG(2);
	NOT_NULL_ARG(3);

	open_mode = PG_GETARG_TEXT_P(2);

	NON_EMPTY_TEXT(open_mode);

	max_linesize = PG_GETARG_INT32(3);
	CHECK_LINESIZE(max_linesize);

	if (PG_NARGS() > 4 && !PG_ARGISNULL(4))
	{
		const char *encname = NameStr(*PG_GETARG_NAME(4));
		encoding = pg_char_to_encoding(encname);
		if (encoding < 0)
			ereport(ERROR,
				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
				 errmsg("invalid encoding name \"%s\"", encname)));
	}
	else
		encoding = GetDatabaseEncoding();

	if (VARSIZE(open_mode) - VARHDRSZ != 1)
		CUSTOM_EXCEPTION(INVALID_MODE, "open mode is different than [R,W,A]");

	switch (*((char*)VARDATA(open_mode)))
	{
		case 'a':
		case 'A':
			mode = "a";
			break;

		case 'r':
		case 'R':
			mode = "r";
			break;

		case 'w':
		case 'W':
			mode = "w";
			break;

		default:
			CUSTOM_EXCEPTION(INVALID_MODE, "open mode is different than [R,W,A]");
	}

	/* open file */
	fullname = get_safe_path(PG_GETARG_TEXT_P(0), PG_GETARG_TEXT_P(1));

	/*
	 * We cannot use AllocateFile here because those files are automatically
	 * closed at the end of (sub)transactions, but we want to keep them open
	 * for oracle compatibility.
	 */
#if NOT_USED
	fullname = convert_encoding_server_to_platform(fullname);
#endif
	file = fopen(fullname, mode);
	if (!file)
		IO_EXCEPTION();

	d = get_descriptor(file, max_linesize, encoding);
	if (d == INVALID_SLOTID)
	{
		fclose(file);
		ereport(ERROR,
		    (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
		     errmsg("program limit exceeded"),
		     errdetail("Too much concurent opened files"),
		     errhint("You can only open a maximum of ten files for each session")));
	}

	PG_RETURN_INT32(d);
}