Datum x509_get_version(PG_FUNCTION_ARGS) { bytea *raw; X509 *cert; int version; // check for null value. raw = PG_GETARG_BYTEA_P(0); if (raw == NULL || VARSIZE(raw) == VARHDRSZ) { PG_RETURN_NULL(); } cert = x509_from_bytea(raw); if (cert == NULL) { ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg( "unable to decode X509 record"))); } version = X509_get_version(cert); X509_free(cert); PG_RETURN_INT32(version); }
Datum pg_stat_get_backend_wait_event_type(PG_FUNCTION_ARGS) { int32 beid = PG_GETARG_INT32(0); PgBackendStatus *beentry; PGPROC *proc; const char *wait_event_type; if ((beentry = pgstat_fetch_stat_beentry(beid)) == NULL) wait_event_type = "<backend information not available>"; else if (!has_privs_of_role(GetUserId(), beentry->st_userid)) wait_event_type = "<insufficient privilege>"; else { proc = BackendPidGetProc(beentry->st_procpid); wait_event_type = pgstat_get_wait_event_type(proc->wait_event_info); } if (!wait_event_type) PG_RETURN_NULL(); PG_RETURN_TEXT_P(cstring_to_text(wait_event_type)); }
Datum fetchval(PG_FUNCTION_ARGS) { HStore *hs = PG_GETARG_HS(0); text *key = PG_GETARG_TEXT_P(1); HEntry *entry; text *out; if ((entry = findkey(hs, VARDATA(key), VARSIZE(key) - VARHDRSZ)) == NULL || entry->valisnull) { PG_FREE_IF_COPY(hs, 0); PG_FREE_IF_COPY(key, 1); PG_RETURN_NULL(); } out = palloc(VARHDRSZ + entry->vallen); memcpy(VARDATA(out), STRPTR(hs) + entry->pos + entry->keylen, entry->vallen); SET_VARSIZE(out, VARHDRSZ + entry->vallen); PG_FREE_IF_COPY(hs, 0); PG_FREE_IF_COPY(key, 1); PG_RETURN_POINTER(out); }
Datum lexize_bycurrent(PG_FUNCTION_ARGS) { Datum res; SET_FUNCOID(); if (currect_dictionary_id == 0) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("no currect dictionary"), errhint("Execute select set_curdict()."))); res = DirectFunctionCall3( lexize, ObjectIdGetDatum(currect_dictionary_id), PG_GETARG_DATUM(0), (Datum) 0 ); if (res) PG_RETURN_DATUM(res); else PG_RETURN_NULL(); }
/* * SQL function: format_type(type_oid, typemod) * * `type_oid' is from pg_type.oid, `typemod' is from * pg_attribute.atttypmod. This function will get the type name and * format it and the modifier to canonical SQL format, if the type is * a standard type. Otherwise you just get pg_type.typname back, * double quoted if it contains funny characters or matches a keyword. * * If typemod is NULL then we are formatting a type name in a context where * no typemod is available, eg a function argument or result type. This * yields a slightly different result from specifying typemod = -1 in some * cases. Given typemod = -1 we feel compelled to produce an output that * the parser will interpret as having typemod -1, so that pg_dump will * produce CREATE TABLE commands that recreate the original state. But * given NULL typemod, we assume that the parser's interpretation of * typemod doesn't matter, and so we are willing to output a slightly * "prettier" representation of the same type. For example, type = bpchar * and typemod = NULL gets you "character", whereas typemod = -1 gets you * "bpchar" --- the former will be interpreted as character(1) by the * parser, which does not yield typemod -1. * * XXX encoding a meaning in typemod = NULL is ugly; it'd have been * cleaner to make two functions of one and two arguments respectively. * Not worth changing it now, however. */ Datum format_type(PG_FUNCTION_ARGS) { Oid type_oid; int32 typemod; char *result; /* Since this function is not strict, we must test for null args */ if (PG_ARGISNULL(0)) PG_RETURN_NULL(); type_oid = PG_GETARG_OID(0); if (PG_ARGISNULL(1)) result = format_type_internal(type_oid, -1, false, true); else { typemod = PG_GETARG_INT32(1); result = format_type_internal(type_oid, typemod, true, true); } PG_RETURN_TEXT_P(cstring_to_text(result)); }
Datum LWGEOM_x_point(PG_FUNCTION_ARGS) { GSERIALIZED *geom; LWGEOM *lwgeom; LWPOINT *point = NULL; POINT2D p; geom = (GSERIALIZED *)PG_DETOAST_DATUM(PG_GETARG_DATUM(0)); if ( gserialized_get_type(geom) != POINTTYPE ) lwerror("Argument to X() must be a point"); lwgeom = lwgeom_from_gserialized(geom); point = lwgeom_as_lwpoint(lwgeom); if ( lwgeom_is_empty(lwgeom) ) PG_RETURN_NULL(); getPoint2d_p(point->point, 0, &p); PG_FREE_IF_COPY(geom, 0); PG_RETURN_FLOAT8(p.x); }
Datum LWGEOM_line_desegmentize(PG_FUNCTION_ARGS) { GSERIALIZED *geom = PG_GETARG_GSERIALIZED_P(0); GSERIALIZED *ret; LWGEOM *igeom = NULL, *ogeom = NULL; POSTGIS_DEBUG(2, "LWGEOM_line_desegmentize."); igeom = lwgeom_from_gserialized(geom); ogeom = lwgeom_unstroke(igeom); lwgeom_free(igeom); if (ogeom == NULL) { PG_FREE_IF_COPY(geom, 0); PG_RETURN_NULL(); } ret = geometry_serialize(ogeom); lwgeom_free(ogeom); PG_FREE_IF_COPY(geom, 0); PG_RETURN_POINTER(ret); }
PGDLLEXPORT Datum hello( PG_FUNCTION_ARGS ) { // variable declarations char greet[] = "Hello, "; text *towhom; int greetlen; int towhomlen; text *greeting; int greeting_size; // Get arguments. If we declare our function as STRICT, then // this check is superfluous. if(PG_ARGISNULL(0)) { PG_RETURN_NULL(); } towhom = PG_GETARG_TEXT_P(0); // Calculate string sizes. greetlen = strlen(greet); towhomlen = VARSIZE(towhom)-VARHDRSZ; // Allocate memory and set data structure size. greeting = (text *)palloc( greetlen + towhomlen ); //VARATT_SIZEP( greeting ) = greetlen + towhomlen + VARHDRSZ;//postgres 7.4 greeting_size=greetlen+towhomlen+VARHDRSZ; SET_VARSIZE(greeting,greeting_size); // Construct greeting string. strncpy( VARDATA(greeting), greet, greetlen ); strncpy( VARDATA(greeting) + greetlen, VARDATA(towhom), towhomlen ); PG_RETURN_TEXT_P( greeting ); }
/* * Read a section of a file, returning it as text */ Datum pg_read_file(PG_FUNCTION_ARGS) { text *filename_t = PG_GETARG_TEXT_PP(0); int64 seek_offset = 0; int64 bytes_to_read = -1; bool missing_ok = false; char *filename; text *result; if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("must be superuser to read files")))); /* handle optional arguments */ if (PG_NARGS() >= 3) { seek_offset = PG_GETARG_INT64(1); bytes_to_read = PG_GETARG_INT64(2); if (bytes_to_read < 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("requested length cannot be negative"))); } if (PG_NARGS() >= 4) missing_ok = PG_GETARG_BOOL(3); filename = convert_and_check_filename(filename_t); result = read_text_file(filename, seek_offset, bytes_to_read, missing_ok); if (result) PG_RETURN_TEXT_P(result); else PG_RETURN_NULL(); }
Datum pcpatch_in(PG_FUNCTION_ARGS) { char *str = PG_GETARG_CSTRING(0); /* Datum geog_oid = PG_GETARG_OID(1); Not needed. */ uint32 typmod = 0, pcid = 0; PCPATCH *patch; SERIALIZED_PATCH *serpatch = NULL; if ( (PG_NARGS()>2) && (!PG_ARGISNULL(2)) ) { typmod = PG_GETARG_INT32(2); pcid = pcid_from_typmod(typmod); } /* Empty string. */ if ( str[0] == '\0' ) { ereport(ERROR,(errmsg("pcpatch parse error - empty string"))); } /* Binary or text form? Let's find out. */ if ( str[0] == '0' ) { /* Hex-encoded binary */ patch = pc_patch_from_hexwkb(str, strlen(str), fcinfo); pcid_consistent(patch->schema->pcid, pcid); serpatch = pc_patch_serialize(patch, NULL); pc_patch_free(patch); } else { ereport(ERROR,(errmsg("parse error - support for text format not yet implemented"))); } if ( serpatch ) PG_RETURN_POINTER(serpatch); else PG_RETURN_NULL(); }
Datum linterp_int32(PG_FUNCTION_ARGS) { float8 y0; float8 y1; float8 p; float8 r; int32 result; bool eq_bounds = false; bool eq_abscissas = false; /* Common */ p = linterp_abscissa(fcinfo, &eq_bounds, &eq_abscissas); /* Ordinate type specific code*/ y0 = (float8)PG_GETARG_INT32(2); y1 = (float8)PG_GETARG_INT32(4); if ( eq_bounds ) { if ( eq_abscissas && y0 == y1 ) r = y0; else PG_RETURN_NULL(); } else { r = round(y0+p*(y1-y0)); if ( r < INT_MIN || r > INT_MAX ) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("value \"%f\" is out of range for type integer", r))); } result = (int32)r; PG_RETURN_INT32(result); }
Datum xpath_string(PG_FUNCTION_ARGS) { text *document = PG_GETARG_TEXT_P(0); text *xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */ xmlChar *xpath; int32 pathsize; text *xpres; xmlXPathObjectPtr res; xpath_workspace workspace; pathsize = VARSIZE(xpathsupp) - VARHDRSZ; /* * We encapsulate the supplied path with "string()" = 8 chars + 1 for NUL * at end */ /* We could try casting to string using the libxml function? */ xpath = (xmlChar *) palloc(pathsize + 9); strncpy((char *) xpath, "string(", 7); memcpy((char *) (xpath + 7), VARDATA(xpathsupp), pathsize); xpath[pathsize + 7] = ')'; xpath[pathsize + 8] = '\0'; res = pgxml_xpath(document, xpath, &workspace); xpres = pgxml_result_to_text(res, NULL, NULL, NULL); cleanup_workspace(&workspace); pfree(xpath); if (xpres == NULL) PG_RETURN_NULL(); PG_RETURN_TEXT_P(xpres); }
Datum xpath_string(PG_FUNCTION_ARGS) { xmlChar *xpath; int32 pathsize; text *xpathsupp, *xpres; /* PG_GETARG_TEXT_P(0) is document buffer */ xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */ pathsize = VARSIZE(xpathsupp) - VARHDRSZ; /* * We encapsulate the supplied path with "string()" = 8 chars + 1 for NUL * at end */ /* We could try casting to string using the libxml function? */ xpath = (xmlChar *) palloc(pathsize + 9); memcpy((char *) (xpath + 7), VARDATA(xpathsupp), pathsize); strncpy((char *) xpath, "string(", 7); xpath[pathsize + 7] = ')'; xpath[pathsize + 8] = '\0'; xpres = pgxml_result_to_text( pgxml_xpath(PG_GETARG_TEXT_P(0), xpath), NULL, NULL, NULL); xmlCleanupParser(); pfree(xpath); if (xpres == NULL) PG_RETURN_NULL(); PG_RETURN_TEXT_P(xpres); }
Datum pgx_complex_near(PG_FUNCTION_ARGS) { double re[2]; double im[2]; double p, q; int i; // unwrap values. for (i = 0; i < 2; i++) { HeapTupleHeader t = PG_GETARG_HEAPTUPLEHEADER(i); bool isnull[2]; Datum dr = GetAttributeByName(t, "re", &isnull[0]); Datum di = GetAttributeByName(t, "im", &isnull[1]); // STRICT prevents the 'complex' value from being null but does // not prevent its components from being null. if (isnull[0] || isnull[1]) { PG_RETURN_NULL(); } re[i] = DatumGetFloat8(dr); im[i] = DatumGetFloat8(di); } // compute distance between points, distance of points from origin. p = hypot(re[0] - re[1], im[0] - im[1]); q = hypot(re[0], im[0]) + hypot(re[1], im[1]); if (q == 0) { PG_RETURN_BOOL(1); } // we consider the points 'near' each other if the distance between them is small // relative to the size of them. PG_RETURN_BOOL(p / q < 1e-8); }
Datum quantile_numeric_array(PG_FUNCTION_ARGS) { int i, idx = 0; struct_numeric * data; Numeric * result; CHECK_AGG_CONTEXT("quantile_numeric_array", fcinfo); if (PG_ARGISNULL(0)) { PG_RETURN_NULL(); } data = (struct_numeric*)PG_GETARG_POINTER(0); result = palloc(data->nquantiles * sizeof(Numeric)); qsort(data->elements, data->next, sizeof(Numeric), &numeric_comparator); for (i = 0; i < data->nquantiles; i++) { if ((data->quantiles[i] > 0) && (data->quantiles[i] < 1)) { idx = (int)ceil(data->next * data->quantiles[i]) - 1; } else if (data->quantiles[i] <= 0) { idx = 0; } else if (data->quantiles[i] >= 1) { idx = data->next - 1; } result[i] = data->elements[idx]; } return numeric_to_array(fcinfo, result, data->nquantiles); }
Datum regress_dist_ptpath(PG_FUNCTION_ARGS) { Point *pt = PG_GETARG_POINT_P(0); PATH *path = PG_GETARG_PATH_P(1); float8 result = 0.0; /* keep compiler quiet */ float8 tmp; int i; LSEG lseg; switch (path->npts) { case 0: PG_RETURN_NULL(); case 1: result = point_dt(pt, &path->p[0]); break; default: /* * the distance from a point to a path is the smallest distance * from the point to any of its constituent segments. */ Assert(path->npts > 1); for (i = 0; i < path->npts - 1; ++i) { regress_lseg_construct(&lseg, &path->p[i], &path->p[i + 1]); tmp = DatumGetFloat8(DirectFunctionCall2(dist_ps, PointPGetDatum(pt), LsegPGetDatum(&lseg))); if (i == 0 || tmp < result) result = tmp; } break; } PG_RETURN_FLOAT8(result); }
Datum pg_tablespace_size_name(PG_FUNCTION_ARGS) { Name tblspcName = PG_GETARG_NAME(0); Oid tblspcOid = get_tablespace_oid(NameStr(*tblspcName), false); int64 size; size = calculate_tablespace_size(tblspcOid); if (Gp_role == GP_ROLE_DISPATCH) { char *sql; sql = psprintf("select pg_catalog.pg_tablespace_size(%s)", quote_literal_cstr(NameStr(*tblspcName))); size += get_size_from_segDBs(sql); } if (size < 0) PG_RETURN_NULL(); PG_RETURN_INT64(size); }
Datum ssl_client_serial(PG_FUNCTION_ARGS) { Datum result; Port *port = MyProcPort; X509 *peer = port->peer; ASN1_INTEGER *serial = NULL; BIGNUM *b; char *decimal; if (!peer) PG_RETURN_NULL(); serial = X509_get_serialNumber(peer); b = ASN1_INTEGER_to_BN(serial, NULL); decimal = BN_bn2dec(b); BN_free(b); result = DirectFunctionCall3(numeric_in, CStringGetDatum(decimal), ObjectIdGetDatum(0), Int32GetDatum(-1)); OPENSSL_free(decimal); return result; }
Datum pg_relation_size_name(PG_FUNCTION_ARGS) { text *relname = PG_GETARG_TEXT_P(0); RangeVar *relrv; Relation rel; int64 size; if (GP_ROLE_EXECUTE == Gp_role) { ereport(ERROR, (errcode(ERRCODE_GP_COMMAND_ERROR), errmsg("pg_relation_size: cannot be executed in segment"))); } relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname)); rel = try_relation_openrv(relrv, AccessShareLock, false); /* * While we scan pg_class with an MVCC snapshot, * someone else might drop the table. It's better to return NULL for * already-dropped tables than throw an error and abort the whole query. */ if (!RelationIsValid(rel)) PG_RETURN_NULL(); if (rel->rd_node.relNode == 0) size = 0; else size = calculate_relation_size(rel); relation_close(rel, AccessShareLock); PG_RETURN_INT64(size); }
Datum pcpatch_bytea_envelope(PG_FUNCTION_ARGS) { SERIALIZED_PATCH *serpatch = PG_GETARG_SERPATCH_P(0); uint8 *bytes; size_t bytes_size; bytea *wkb; size_t wkb_size; PCSCHEMA *schema = pc_schema_from_pcid(serpatch->pcid, fcinfo); PCPATCH *pa = pc_patch_deserialize(serpatch, schema); if ( ! pa ) PG_RETURN_NULL(); bytes = pc_patch_to_geometry_wkb_envelope(pa, &bytes_size); wkb_size = VARHDRSZ + bytes_size; wkb = palloc(wkb_size); memcpy(VARDATA(wkb), bytes, bytes_size); SET_VARSIZE(wkb, wkb_size); pc_patch_free(pa); pfree(bytes); PG_RETURN_BYTEA_P(wkb); }
Datum regress_path_dist(PG_FUNCTION_ARGS) { PATH *p1 = PG_GETARG_PATH_P(0); PATH *p2 = PG_GETARG_PATH_P(1); bool have_min = false; float8 min = 0.0; /* initialize to keep compiler quiet */ float8 tmp; int i, j; LSEG seg1, seg2; for (i = 0; i < p1->npts - 1; i++) { for (j = 0; j < p2->npts - 1; j++) { regress_lseg_construct(&seg1, &p1->p[i], &p1->p[i + 1]); regress_lseg_construct(&seg2, &p2->p[j], &p2->p[j + 1]); tmp = DatumGetFloat8(DirectFunctionCall2(lseg_distance, LsegPGetDatum(&seg1), LsegPGetDatum(&seg2))); if (!have_min || tmp < min) { min = tmp; have_min = true; } } } if (!have_min) PG_RETURN_NULL(); PG_RETURN_FLOAT8(min); }
Datum array_agg_finalfn(PG_FUNCTION_ARGS) { Datum result; ArrayBuildState *state; int dims[1]; int lbs[1]; /* * Test for null before Asserting we are in right context. This is to * avoid possible Assert failure in 8.4beta installations, where it is * possible for users to create NULL constants of type internal. */ if (PG_ARGISNULL(0)) PG_RETURN_NULL(); /* returns null iff no input values */ /* cannot be called directly because of internal-type argument */ Assert(AggCheckCallContext(fcinfo, NULL)); state = (ArrayBuildState *) PG_GETARG_POINTER(0); dims[0] = state->nelems; lbs[0] = 1; /* * Make the result. We cannot release the ArrayBuildState because * sometimes aggregate final functions are re-executed. Rather, it is * nodeAgg.c's responsibility to reset the aggcontext when it's safe to do * so. */ result = makeMdArrayResult(state, 1, dims, lbs, CurrentMemoryContext, false); PG_RETURN_DATUM(result); }
Datum lwgeom_eq(PG_FUNCTION_ARGS) { GSERIALIZED *geom1 = (GSERIALIZED *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0)); GSERIALIZED *geom2 = (GSERIALIZED *) PG_DETOAST_DATUM(PG_GETARG_DATUM(1)); GBOX box1; GBOX box2; bool result; POSTGIS_DEBUG(2, "lwgeom_eq called"); if (gserialized_get_srid(geom1) != gserialized_get_srid(geom2)) { elog(BTREE_SRID_MISMATCH_SEVERITY, "Operation on two GEOMETRIES with different SRIDs\n"); PG_FREE_IF_COPY(geom1, 0); PG_FREE_IF_COPY(geom2, 1); PG_RETURN_NULL(); } gserialized_get_gbox_p(geom1, &box1); gserialized_get_gbox_p(geom2, &box2); PG_FREE_IF_COPY(geom1, 0); PG_FREE_IF_COPY(geom2, 1); if ( ! (FPeq(box1.xmin, box2.xmin) && FPeq(box1.ymin, box2.ymin) && FPeq(box1.xmax, box2.xmax) && FPeq(box1.ymax, box2.ymax)) ) { result = FALSE; } else { result = TRUE; } PG_RETURN_BOOL(result); }
Datum pcpoint_as_bytea(PG_FUNCTION_ARGS) { SERIALIZED_POINT *serpt = PG_GETARG_SERPOINT_P(0); uint8 *bytes; size_t bytes_size; bytea *wkb; size_t wkb_size; PCSCHEMA *schema = pc_schema_from_pcid(serpt->pcid, fcinfo); PCPOINT *pt = pc_point_deserialize(serpt, schema); if ( ! pt ) PG_RETURN_NULL(); bytes = pc_point_to_geometry_wkb(pt, &bytes_size); wkb_size = VARHDRSZ + bytes_size; wkb = palloc(wkb_size); memcpy(VARDATA(wkb), bytes, bytes_size); SET_VARSIZE(wkb, wkb_size); pc_point_free(pt); pfree(bytes); PG_RETURN_BYTEA_P(wkb); }
Datum getmass(PG_FUNCTION_ARGS){ Datum mol_datum = PG_GETARG_DATUM(0); float result = 0; PG_BINGO_BEGIN { BingoPgCommon::BingoSessionHandler bingo_handler(fcinfo->flinfo->fn_oid); bingo_handler.setFunctionName("getmass"); BingoPgText mol_text(mol_datum); int buf_len, bingo_res; const char* buf = mol_text.getText(buf_len); bingo_res = mangoMass(buf, buf_len, 0, &result); if(bingo_res < 1) { CORE_HANDLE_WARNING(0, 1, "bingo.getmass", bingoGetError()); PG_RETURN_NULL(); } } PG_BINGO_END PG_RETURN_FLOAT4(result); }
Datum array_agg_array_finalfn(PG_FUNCTION_ARGS) { Datum result; ArrayBuildStateArr *state; /* cannot be called directly because of internal-type argument */ Assert(AggCheckCallContext(fcinfo, NULL)); state = PG_ARGISNULL(0) ? NULL : (ArrayBuildStateArr *) PG_GETARG_POINTER(0); if (state == NULL) PG_RETURN_NULL(); /* returns null iff no input values */ /* * Make the result. We cannot release the ArrayBuildStateArr because * sometimes aggregate final functions are re-executed. Rather, it is * nodeAgg.c's responsibility to reset the aggcontext when it's safe to do * so. */ result = makeArrayResultArr(state, CurrentMemoryContext, false); PG_RETURN_DATUM(result); }
Datum box2d_from_geohash(PG_FUNCTION_ARGS) { GBOX *box = NULL; text *geohash_input = NULL; char *geohash = NULL; int precision = -1; if (PG_ARGISNULL(0)) { PG_RETURN_NULL(); } if (!PG_ARGISNULL(1)) { precision = PG_GETARG_INT32(1); } geohash_input = PG_GETARG_TEXT_P(0); geohash = text2cstring(geohash_input); box = parse_geohash(geohash, precision); PG_RETURN_POINTER(box); }
Datum LWGEOM_startpoint_linestring(PG_FUNCTION_ARGS) { GSERIALIZED *geom = PG_GETARG_GSERIALIZED_P(0); LWGEOM *lwgeom = lwgeom_from_gserialized(geom); LWPOINT *lwpoint = NULL; int type = lwgeom->type; if ( type == LINETYPE || type == CIRCSTRINGTYPE ) { lwpoint = lwline_get_lwpoint((LWLINE*)lwgeom, 0); } else if ( type == COMPOUNDTYPE ) { lwpoint = lwcompound_get_startpoint((LWCOMPOUND*)lwgeom); } lwgeom_free(lwgeom); PG_FREE_IF_COPY(geom, 0); if ( ! lwpoint ) PG_RETURN_NULL(); PG_RETURN_POINTER(geometry_serialize(lwpoint_as_lwgeom(lwpoint))); }
Datum pg_file_rename(PG_FUNCTION_ARGS) { char *fn1, *fn2, *fn3; int rc; requireSuperuser(); if (PG_ARGISNULL(0) || PG_ARGISNULL(1)) PG_RETURN_NULL(); fn1 = convert_and_check_filename(PG_GETARG_TEXT_PP(0), false); fn2 = convert_and_check_filename(PG_GETARG_TEXT_PP(1), false); if (PG_ARGISNULL(2)) fn3 = 0; else fn3 = convert_and_check_filename(PG_GETARG_TEXT_PP(2), false); if (access(fn1, W_OK) < 0) { ereport(WARNING, (errcode_for_file_access(), errmsg("file \"%s\" is not accessible: %m", fn1))); PG_RETURN_BOOL(false); } if (fn3 && access(fn2, W_OK) < 0) { ereport(WARNING, (errcode_for_file_access(), errmsg("file \"%s\" is not accessible: %m", fn2))); PG_RETURN_BOOL(false); } rc = access(fn3 ? fn3 : fn2, 2); if (rc >= 0 || errno != ENOENT) { ereport(ERROR, (ERRCODE_DUPLICATE_FILE, errmsg("cannot rename to target file \"%s\"", fn3 ? fn3 : fn2))); } if (fn3) { if (rename(fn2, fn3) != 0) { ereport(ERROR, (errcode_for_file_access(), errmsg("could not rename \"%s\" to \"%s\": %m", fn2, fn3))); } if (rename(fn1, fn2) != 0) { ereport(WARNING, (errcode_for_file_access(), errmsg("could not rename \"%s\" to \"%s\": %m", fn1, fn2))); if (rename(fn3, fn2) != 0) { ereport(ERROR, (errcode_for_file_access(), errmsg("could not rename \"%s\" back to \"%s\": %m", fn3, fn2))); } else { ereport(ERROR, (ERRCODE_UNDEFINED_FILE, errmsg("renaming \"%s\" to \"%s\" was reverted", fn2, fn3))); } } } else if (rename(fn1, fn2) != 0) { ereport(ERROR, (errcode_for_file_access(), errmsg("could not rename \"%s\" to \"%s\": %m", fn1, fn2))); } PG_RETURN_BOOL(true); }
Datum crosstab(PG_FUNCTION_ARGS) { char *sql = text_to_cstring(PG_GETARG_TEXT_PP(0)); ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; Tuplestorestate *tupstore; TupleDesc tupdesc; int call_cntr; int max_calls; AttInMetadata *attinmeta; SPITupleTable *spi_tuptable; TupleDesc spi_tupdesc; bool firstpass; char *lastrowid; int i; int num_categories; MemoryContext per_query_ctx; MemoryContext oldcontext; int ret; int proc; /* check to see if caller supports us returning a tuplestore */ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"))); if (!(rsinfo->allowedModes & SFRM_Materialize)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not " \ "allowed in this context"))); per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; /* Connect to SPI manager */ if ((ret = SPI_connect()) < 0) /* internal error */ elog(ERROR, "crosstab: SPI_connect returned %d", ret); /* Retrieve the desired rows */ ret = SPI_execute(sql, true, 0); proc = SPI_processed; /* If no qualifying tuples, fall out early */ if (ret != SPI_OK_SELECT || proc <= 0) { SPI_finish(); rsinfo->isDone = ExprEndResult; PG_RETURN_NULL(); } spi_tuptable = SPI_tuptable; spi_tupdesc = spi_tuptable->tupdesc; /*---------- * The provided SQL query must always return three columns. * * 1. rowname * the label or identifier for each row in the final result * 2. category * the label or identifier for each column in the final result * 3. values * the value for each column in the final result *---------- */ if (spi_tupdesc->natts != 3) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid source data SQL statement"), errdetail("The provided SQL must return 3 " "columns: rowid, category, and values."))); /* get a tuple descriptor for our result type */ switch (get_call_result_type(fcinfo, NULL, &tupdesc)) { case TYPEFUNC_COMPOSITE: /* success */ break; case TYPEFUNC_RECORD: /* failed to determine actual type of RECORD */ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("function returning record called in context " "that cannot accept type record"))); break; default: /* result type isn't composite */ elog(ERROR, "return type must be a row type"); break; } /* * Check that return tupdesc is compatible with the data we got from SPI, * at least based on number and type of attributes */ if (!compatCrosstabTupleDescs(tupdesc, spi_tupdesc)) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("return and sql tuple descriptions are " \ "incompatible"))); /* * switch to long-lived memory context */ oldcontext = MemoryContextSwitchTo(per_query_ctx); /* make sure we have a persistent copy of the result tupdesc */ tupdesc = CreateTupleDescCopy(tupdesc); /* initialize our tuplestore in long-lived context */ tupstore = tuplestore_begin_heap(rsinfo->allowedModes & SFRM_Materialize_Random, false, work_mem); MemoryContextSwitchTo(oldcontext); /* * Generate attribute metadata needed later to produce tuples from raw C * strings */ attinmeta = TupleDescGetAttInMetadata(tupdesc); /* total number of tuples to be examined */ max_calls = proc; /* the return tuple always must have 1 rowid + num_categories columns */ num_categories = tupdesc->natts - 1; firstpass = true; lastrowid = NULL; for (call_cntr = 0; call_cntr < max_calls; call_cntr++) { bool skip_tuple = false; char **values; /* allocate and zero space */ values = (char **) palloc0((1 + num_categories) * sizeof(char *)); /* * now loop through the sql results and assign each value in sequence * to the next category */ for (i = 0; i < num_categories; i++) { HeapTuple spi_tuple; char *rowid; /* see if we've gone too far already */ if (call_cntr >= max_calls) break; /* get the next sql result tuple */ spi_tuple = spi_tuptable->vals[call_cntr]; /* get the rowid from the current sql result tuple */ rowid = SPI_getvalue(spi_tuple, spi_tupdesc, 1); /* * If this is the first pass through the values for this rowid, * set the first column to rowid */ if (i == 0) { xpstrdup(values[0], rowid); /* * Check to see if the rowid is the same as that of the last * tuple sent -- if so, skip this tuple entirely */ if (!firstpass && xstreq(lastrowid, rowid)) { xpfree(rowid); skip_tuple = true; break; } } /* * If rowid hasn't changed on us, continue building the output * tuple. */ if (xstreq(rowid, values[0])) { /* * Get the next category item value, which is always attribute * number three. * * Be careful to assign the value to the array index based on * which category we are presently processing. */ values[1 + i] = SPI_getvalue(spi_tuple, spi_tupdesc, 3); /* * increment the counter since we consume a row for each * category, but not for last pass because the outer loop will * do that for us */ if (i < (num_categories - 1)) call_cntr++; xpfree(rowid); } else { /* * We'll fill in NULLs for the missing values, but we need to * decrement the counter since this sql result row doesn't * belong to the current output tuple. */ call_cntr--; xpfree(rowid); break; } } if (!skip_tuple) { HeapTuple tuple; /* build the tuple and store it */ tuple = BuildTupleFromCStrings(attinmeta, values); tuplestore_puttuple(tupstore, tuple); heap_freetuple(tuple); } /* Remember current rowid */ xpfree(lastrowid); xpstrdup(lastrowid, values[0]); firstpass = false; /* Clean up */ for (i = 0; i < num_categories + 1; i++) if (values[i] != NULL) pfree(values[i]); pfree(values); } /* let the caller know we're sending back a tuplestore */ rsinfo->returnMode = SFRM_Materialize; rsinfo->setResult = tupstore; rsinfo->setDesc = tupdesc; /* release SPI related resources (and return to caller's context) */ SPI_finish(); return (Datum) 0; }