/* * decode(lhs, [rhs, ret], ..., [default]) */ Datum ora_decode(PG_FUNCTION_ARGS) { int nargs; int i; int retarg; /* default value is last arg or NULL. */ nargs = PG_NARGS(); if (nargs % 2 == 0) { retarg = nargs - 1; nargs -= 1; /* ignore the last argument */ } else retarg = -1; /* NULL */ if (PG_ARGISNULL(0)) { for (i = 1; i < nargs; i += 2) { if (PG_ARGISNULL(i)) { retarg = i + 1; break; } } } else { FmgrInfo *eq; Oid collation = PG_GET_COLLATION(); /* * On first call, get the input type's operator '=' and save at * fn_extra. */ if (fcinfo->flinfo->fn_extra == NULL) { MemoryContext oldctx; Oid typid = get_fn_expr_argtype(fcinfo->flinfo, 0); Oid eqoid = equality_oper_funcid(typid); oldctx = MemoryContextSwitchTo(fcinfo->flinfo->fn_mcxt); eq = palloc(sizeof(FmgrInfo)); fmgr_info(eqoid, eq); MemoryContextSwitchTo(oldctx); fcinfo->flinfo->fn_extra = eq; } else eq = fcinfo->flinfo->fn_extra; for (i = 1; i < nargs; i += 2) { FunctionCallInfoData func; Datum result; if (PG_ARGISNULL(i)) continue; InitFunctionCallInfoData(func, eq, 2, collation, NULL, NULL); func.arg[0] = PG_GETARG_DATUM(0); func.arg[1] = PG_GETARG_DATUM(i); func.argnull[0] = false; func.argnull[1] = false; result = FunctionCallInvoke(&func); if (!func.isnull && DatumGetBool(result)) { retarg = i + 1; break; } } } if (retarg < 0 || PG_ARGISNULL(retarg)) PG_RETURN_NULL(); else PG_RETURN_DATUM(PG_GETARG_DATUM(retarg)); }
/* * SQL function jsonb_object(text[], text[]) * * take separate name and value arrays of text to construct a jsonb object * pairwise. */ Datum jsonb_object_two_arg(PG_FUNCTION_ARGS) { ArrayType *key_array = PG_GETARG_ARRAYTYPE_P(0); ArrayType *val_array = PG_GETARG_ARRAYTYPE_P(1); int nkdims = ARR_NDIM(key_array); int nvdims = ARR_NDIM(val_array); Datum *key_datums, *val_datums; bool *key_nulls, *val_nulls; int key_count, val_count, i; JsonbInState result; memset(&result, 0, sizeof(JsonbInState)); (void) pushJsonbValue(&result.parseState, WJB_BEGIN_OBJECT, NULL); if (nkdims > 1 || nkdims != nvdims) ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("wrong number of array subscripts"))); if (nkdims == 0) PG_RETURN_DATUM(CStringGetTextDatum("{}")); deconstruct_array(key_array, TEXTOID, -1, false, 'i', &key_datums, &key_nulls, &key_count); deconstruct_array(val_array, TEXTOID, -1, false, 'i', &val_datums, &val_nulls, &val_count); if (key_count != val_count) ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("mismatched array dimensions"))); for (i = 0; i < key_count; ++i) { JsonbValue v; char *str; int len; if (key_nulls[i]) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("null value not allowed for object key"))); str = TextDatumGetCString(key_datums[i]); len = strlen(str); v.type = jbvString; v.val.string.len = len; v.val.string.val = str; (void) pushJsonbValue(&result.parseState, WJB_KEY, &v); if (val_nulls[i]) { v.type = jbvNull; } else { str = TextDatumGetCString(val_datums[i]); len = strlen(str); v.type = jbvString; v.val.string.len = len; v.val.string.val = str; } (void) pushJsonbValue(&result.parseState, WJB_VALUE, &v); } result.res = pushJsonbValue(&result.parseState, WJB_END_OBJECT, NULL); pfree(key_datums); pfree(key_nulls); pfree(val_datums); pfree(val_nulls); PG_RETURN_POINTER(JsonbValueToJsonb(result.res)); }
/*! UDA transition function for the fmsketch aggregate. */ Datum __fmsketch_trans(PG_FUNCTION_ARGS) { bytea * transblob = (bytea *)PG_GETARG_BYTEA_P(0); fmtransval *transval; Oid element_type = get_fn_expr_argtype(fcinfo->flinfo, 1); Oid funcOid; bool typIsVarlena; Datum retval; Datum inval; /* * This is Postgres boilerplate for UDFs that modify the data in their own context. * Such UDFs can only be correctly called in an agg context since regular scalar * UDFs are essentially stateless across invocations. */ if (!(fcinfo->context && (IsA(fcinfo->context, AggState) #ifdef NOTGP || IsA(fcinfo->context, WindowAggState) #endif ))) elog( ERROR, "UDF call to a function that only works for aggs (destructive pass by reference)"); /* get the provided element, being careful in case it's NULL */ if (!PG_ARGISNULL(1)) { if (!OidIsValid(element_type)) elog(ERROR, "could not determine data type of input"); inval = PG_GETARG_DATUM(1); /* * if this is the first call, initialize transval to hold a sortasort * on the first call, we should have the empty string (if the agg was declared properly!) */ if (VARSIZE(transblob) <= VARHDRSZ) { size_t blobsz = VARHDRSZ + sizeof(fmtransval) + SORTASORT_INITIAL_STORAGE; transblob = (bytea *)palloc0(blobsz); SET_VARSIZE(transblob, blobsz); transval = (fmtransval *)VARDATA(transblob); transval->typOid = element_type; /* figure out the outfunc for this type */ getTypeOutputInfo(element_type, &funcOid, &typIsVarlena); get_typlenbyval(element_type, &(transval->typLen), &(transval->typByVal)); transval->status = SMALL; sortasort_init((sortasort *)transval->storage, MINVALS, SORTASORT_INITIAL_STORAGE, transval->typLen, transval->typByVal); } else { // check_fmtransval(transblob); /* extract the existing transval from the transblob */ transval = (fmtransval *)VARDATA(transblob); // if (transval->typOid != element_type) { // elog(ERROR, "cannot aggregate on elements with different types"); // } } /* * if we've seen < MINVALS distinct values, place datum into the sortasort * XXXX Would be cleaner to try the sortasort insert and if it fails, then continue. */ if (transval->status == SMALL && ((sortasort *)(transval->storage))->num_vals < MINVALS) { int len = ExtractDatumLen(inval, transval->typLen, transval->typByVal, -1); retval = PointerGetDatum(fmsketch_sortasort_insert( transblob, inval, len)); PG_RETURN_DATUM(retval); } /* * if we've seen exactly MINVALS distinct values, create FM bitmaps * and load the contents of the sortasort into the FM sketch */ else if (transval->status == SMALL && ((sortasort *)(transval->storage))->num_vals == MINVALS) { int i; sortasort *s = (sortasort *)(transval->storage); bytea *newblob = fm_new(transval); transval = (fmtransval *)VARDATA(newblob); /* * "catch up" on the past as if we were doing FM from the beginning: * apply the FM sketching algorithm to each value previously stored in the sortasort */ for (i = 0; i < MINVALS; i++) __fmsketch_trans_c(newblob, PointerExtractDatum(sortasort_getval(s,i), s->typByVal)); /* * XXXX would like to pfree the old transblob, but the memory allocator doesn't like it * XXXX Meanwhile we know that this memory "leak" is of fixed size and will get * XXXX deallocated "soon" when the memory context is destroyed. */ /* drop through to insert the current datum in "BIG" mode */ transblob = newblob; } /* * if we're here we've seen >=MINVALS distinct values and are in BIG mode. * Just for sanity, let's check. */ if (transval->status != BIG) elog( ERROR, "FM sketch failed internal sanity check"); /* Apply FM algorithm to this datum */ retval = __fmsketch_trans_c(transblob, inval); PG_RETURN_DATUM(retval); } else PG_RETURN_NULL(); }
/* ----------------------------------------------- * bt_page_stats() * * Usage: SELECT * FROM bt_page_stats('t1_pkey', 1); * ----------------------------------------------- */ Datum bt_page_stats(PG_FUNCTION_ARGS) { text *relname = PG_GETARG_TEXT_PP(0); uint32 blkno = PG_GETARG_UINT32(1); Buffer buffer; Relation rel; RangeVar *relrv; Datum result; HeapTuple tuple; TupleDesc tupleDesc; int j; char *values[11]; BTPageStat stat; if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("must be superuser to use pageinspect functions")))); relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname)); rel = relation_openrv(relrv, AccessShareLock); if (!IS_INDEX(rel) || !IS_BTREE(rel)) elog(ERROR, "relation \"%s\" is not a btree index", RelationGetRelationName(rel)); /* * Reject attempts to read non-local temporary relations; we would be * likely to get wrong data since we have no visibility into the owning * session's local buffers. */ if (RELATION_IS_OTHER_TEMP(rel)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot access temporary tables of other sessions"))); if (blkno == 0) elog(ERROR, "block 0 is a meta page"); CHECK_RELATION_BLOCK_RANGE(rel, blkno); buffer = ReadBuffer(rel, blkno); LockBuffer(buffer, BUFFER_LOCK_SHARE); /* keep compiler quiet */ stat.btpo_prev = stat.btpo_next = InvalidBlockNumber; stat.btpo_flags = stat.free_size = stat.avg_item_size = 0; GetBTPageStatistics(blkno, buffer, &stat); UnlockReleaseBuffer(buffer); relation_close(rel, AccessShareLock); /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "return type must be a row type"); j = 0; values[j++] = psprintf("%d", stat.blkno); values[j++] = psprintf("%c", stat.type); values[j++] = psprintf("%d", stat.live_items); values[j++] = psprintf("%d", stat.dead_items); values[j++] = psprintf("%d", stat.avg_item_size); values[j++] = psprintf("%d", stat.page_size); values[j++] = psprintf("%d", stat.free_size); values[j++] = psprintf("%d", stat.btpo_prev); values[j++] = psprintf("%d", stat.btpo_next); values[j++] = psprintf("%d", (stat.type == 'd') ? stat.btpo.xact : stat.btpo.level); values[j++] = psprintf("%d", stat.btpo_flags); tuple = BuildTupleFromCStrings(TupleDescGetAttInMetadata(tupleDesc), values); result = HeapTupleGetDatum(tuple); PG_RETURN_DATUM(result); }
/* * SQL function json_populate_record * * set fields in a record from the argument json * * Code adapted shamelessly from hstore's populate_record * which is in turn partly adapted from record_out. * * The json is decomposed into a hash table, in which each * field in the record is then looked up by name. */ Datum json_populate_record(PG_FUNCTION_ARGS) { Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0); text *json; bool use_json_as_text; HTAB *json_hash; HeapTupleHeader rec; Oid tupType; int32 tupTypmod; TupleDesc tupdesc; HeapTupleData tuple; HeapTuple rettuple; RecordIOData *my_extra; int ncolumns; int i; Datum *values; bool *nulls; char fname[NAMEDATALEN]; JsonHashEntry hashentry; use_json_as_text = PG_ARGISNULL(2) ? false : PG_GETARG_BOOL(2); if (!type_is_rowtype(argtype)) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("first argument must be a rowtype"))); if (PG_ARGISNULL(0)) { if (PG_ARGISNULL(1)) PG_RETURN_NULL(); rec = NULL; /* * have no tuple to look at, so the only source of type info is the * argtype. The lookup_rowtype_tupdesc call below will error out if we * don't have a known composite type oid here. */ tupType = argtype; tupTypmod = -1; } else { rec = PG_GETARG_HEAPTUPLEHEADER(0); if (PG_ARGISNULL(1)) PG_RETURN_POINTER(rec); /* Extract type info from the tuple itself */ tupType = HeapTupleHeaderGetTypeId(rec); tupTypmod = HeapTupleHeaderGetTypMod(rec); } json = PG_GETARG_TEXT_P(1); json_hash = get_json_object_as_hash(json, "json_populate_record", use_json_as_text); /* * if the input json is empty, we can only skip the rest if we were passed * in a non-null record, since otherwise there may be issues with domain * nulls. */ if (hash_get_num_entries(json_hash) == 0 && rec) PG_RETURN_POINTER(rec); tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod); ncolumns = tupdesc->natts; if (rec) { /* Build a temporary HeapTuple control structure */ tuple.t_len = HeapTupleHeaderGetDatumLength(rec); ItemPointerSetInvalid(&(tuple.t_self)); tuple.t_tableOid = InvalidOid; tuple.t_data = rec; } /* * We arrange to look up the needed I/O info just once per series of * calls, assuming the record type doesn't change underneath us. */ my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; if (my_extra == NULL || my_extra->ncolumns != ncolumns) { fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt, sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData)); my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; my_extra->record_type = InvalidOid; my_extra->record_typmod = 0; } if (my_extra->record_type != tupType || my_extra->record_typmod != tupTypmod) { MemSet(my_extra, 0, sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData)); my_extra->record_type = tupType; my_extra->record_typmod = tupTypmod; my_extra->ncolumns = ncolumns; } values = (Datum *) palloc(ncolumns * sizeof(Datum)); nulls = (bool *) palloc(ncolumns * sizeof(bool)); if (rec) { /* Break down the tuple into fields */ heap_deform_tuple(&tuple, tupdesc, values, nulls); } else { for (i = 0; i < ncolumns; ++i) { values[i] = (Datum) 0; nulls[i] = true; } } for (i = 0; i < ncolumns; ++i) { ColumnIOData *column_info = &my_extra->columns[i]; Oid column_type = tupdesc->attrs[i]->atttypid; char *value; /* Ignore dropped columns in datatype */ if (tupdesc->attrs[i]->attisdropped) { nulls[i] = true; continue; } memset(fname, 0, NAMEDATALEN); strncpy(fname, NameStr(tupdesc->attrs[i]->attname), NAMEDATALEN); hashentry = hash_search(json_hash, fname, HASH_FIND, NULL); /* * we can't just skip here if the key wasn't found since we might have * a domain to deal with. If we were passed in a non-null record * datum, we assume that the existing values are valid (if they're * not, then it's not our fault), but if we were passed in a null, * then every field which we don't populate needs to be run through * the input function just in case it's a domain type. */ if (hashentry == NULL && rec) continue; /* * Prepare to convert the column value from text */ if (column_info->column_type != column_type) { getTypeInputInfo(column_type, &column_info->typiofunc, &column_info->typioparam); fmgr_info_cxt(column_info->typiofunc, &column_info->proc, fcinfo->flinfo->fn_mcxt); column_info->column_type = column_type; } if (hashentry == NULL || hashentry->isnull) { /* * need InputFunctionCall to happen even for nulls, so that domain * checks are done */ values[i] = InputFunctionCall(&column_info->proc, NULL, column_info->typioparam, tupdesc->attrs[i]->atttypmod); nulls[i] = true; } else { value = hashentry->val; values[i] = InputFunctionCall(&column_info->proc, value, column_info->typioparam, tupdesc->attrs[i]->atttypmod); nulls[i] = false; } } rettuple = heap_form_tuple(tupdesc, values, nulls); ReleaseTupleDesc(tupdesc); PG_RETURN_DATUM(HeapTupleGetDatum(rettuple)); }
Datum count_distinct_elements_append(PG_FUNCTION_ARGS) { int i; element_set_t *eset = NULL; /* info for anyarray */ Oid input_type; Oid element_type; /* array data */ ArrayType *input; int ndims; int *dims; int nitems; bits8 *null_bitmap; char *arr_ptr; Datum element; /* memory contexts */ MemoryContext oldcontext; MemoryContext aggcontext; /* * If the new value is NULL, we simply return the current aggregate state * (it might be NULL, so check it). In this case we don't really care about * the types etc. * * We may still get NULL elements in the array, but to check that we would * have to walk the array, which does not qualify as cheap check. Also we * assume that there's at least one non-NULL element, and we'll walk the * array just once. It's possible we'll get empty set this way. */ if (PG_ARGISNULL(1) && PG_ARGISNULL(0)) PG_RETURN_NULL(); else if (PG_ARGISNULL(1)) PG_RETURN_DATUM(PG_GETARG_DATUM(0)); /* from now on we know the new value is not NULL */ /* get the type of array elements */ input_type = get_fn_expr_argtype(fcinfo->flinfo, 1); element_type = get_element_type(input_type); /* * parse the array contents (we know we got non-NULL value) */ input = PG_GETARG_ARRAYTYPE_P(1); ndims = ARR_NDIM(input); dims = ARR_DIMS(input); nitems = ArrayGetNItems(ndims, dims); null_bitmap = ARR_NULLBITMAP(input); arr_ptr = ARR_DATA_PTR(input); /* make sure we're running as part of aggregate function */ GET_AGG_CONTEXT("count_distinct_elements_append", fcinfo, aggcontext); oldcontext = MemoryContextSwitchTo(aggcontext); /* add all array elements to the set */ for (i = 0; i < nitems; i++) { /* ignore nulls */ if (null_bitmap && !(null_bitmap[i / 8] & (1 << (i % 8)))) continue; /* init the hash table, if needed */ if (eset == NULL) { if (PG_ARGISNULL(0)) { int16 typlen; bool typbyval; char typalign; /* get type information for the second parameter (anyelement item) */ get_typlenbyvalalign(element_type, &typlen, &typbyval, &typalign); /* we can't handle varlena types yet or values passed by reference */ if ((typlen < 0) || (! typbyval)) elog(ERROR, "count_distinct_elements handles only arrays of fixed-length types passed by value"); eset = init_set(typlen, typalign, aggcontext); } else eset = (element_set_t *)PG_GETARG_POINTER(0); } element = fetch_att(arr_ptr, true, eset->item_size); add_element(eset, (char*)&element); /* advance array pointer */ arr_ptr = att_addlength_pointer(arr_ptr, eset->item_size, arr_ptr); arr_ptr = (char *) att_align_nominal(arr_ptr, eset->typalign); } MemoryContextSwitchTo(oldcontext); if (eset == NULL) PG_RETURN_NULL(); else PG_RETURN_POINTER(eset); }
Datum dbms_assert_enquote_literal(PG_FUNCTION_ARGS) { PG_RETURN_DATUM(DirectFunctionCall1(quote_literal, PG_GETARG_DATUM(0))); }
/* * Given an index tuple corresponding to a certain page range and a scan key, * return whether the scan key is consistent with the index tuple's min/max * values. Return true if so, false otherwise. */ Datum brin_minmax_consistent(PG_FUNCTION_ARGS) { BrinDesc *bdesc = (BrinDesc *) PG_GETARG_POINTER(0); BrinValues *column = (BrinValues *) PG_GETARG_POINTER(1); ScanKey key = (ScanKey) PG_GETARG_POINTER(2); Oid colloid = PG_GET_COLLATION(), subtype; AttrNumber attno; Datum value; Datum matches; FmgrInfo *finfo; Assert(key->sk_attno == column->bv_attno); /* handle IS NULL/IS NOT NULL tests */ if (key->sk_flags & SK_ISNULL) { if (key->sk_flags & SK_SEARCHNULL) { if (column->bv_allnulls || column->bv_hasnulls) PG_RETURN_BOOL(true); PG_RETURN_BOOL(false); } /* * For IS NOT NULL, we can only skip ranges that are known to have * only nulls. */ if (key->sk_flags & SK_SEARCHNOTNULL) PG_RETURN_BOOL(!column->bv_allnulls); /* * Neither IS NULL nor IS NOT NULL was used; assume all indexable * operators are strict and return false. */ PG_RETURN_BOOL(false); } /* if the range is all empty, it cannot possibly be consistent */ if (column->bv_allnulls) PG_RETURN_BOOL(false); attno = key->sk_attno; subtype = key->sk_subtype; value = key->sk_argument; switch (key->sk_strategy) { case BTLessStrategyNumber: case BTLessEqualStrategyNumber: finfo = minmax_get_strategy_procinfo(bdesc, attno, subtype, key->sk_strategy); matches = FunctionCall2Coll(finfo, colloid, column->bv_values[0], value); break; case BTEqualStrategyNumber: /* * In the equality case (WHERE col = someval), we want to return * the current page range if the minimum value in the range <= * scan key, and the maximum value >= scan key. */ finfo = minmax_get_strategy_procinfo(bdesc, attno, subtype, BTLessEqualStrategyNumber); matches = FunctionCall2Coll(finfo, colloid, column->bv_values[0], value); if (!DatumGetBool(matches)) break; /* max() >= scankey */ finfo = minmax_get_strategy_procinfo(bdesc, attno, subtype, BTGreaterEqualStrategyNumber); matches = FunctionCall2Coll(finfo, colloid, column->bv_values[1], value); break; case BTGreaterEqualStrategyNumber: case BTGreaterStrategyNumber: finfo = minmax_get_strategy_procinfo(bdesc, attno, subtype, key->sk_strategy); matches = FunctionCall2Coll(finfo, colloid, column->bv_values[1], value); break; default: /* shouldn't happen */ elog(ERROR, "invalid strategy number %d", key->sk_strategy); matches = 0; break; } PG_RETURN_DATUM(matches); }
Datum gin_extract_hstore_query(PG_FUNCTION_ARGS) { StrategyNumber strategy = PG_GETARG_UINT16(2); if (strategy == HStoreContainsStrategyNumber) { PG_RETURN_DATUM(DirectFunctionCall2(gin_extract_hstore, PG_GETARG_DATUM(0), PG_GETARG_DATUM(1) )); } else if (strategy == HStoreExistsStrategyNumber) { text *item, *query = PG_GETARG_TEXT_PP(0); int32 *nentries = (int32 *) PG_GETARG_POINTER(1); Datum *entries = NULL; *nentries = 1; entries = (Datum *) palloc(sizeof(Datum)); item = makeitem(VARDATA_ANY(query), VARSIZE_ANY_EXHDR(query)); *VARDATA(item) = KEYFLAG; entries[0] = PointerGetDatum(item); PG_RETURN_POINTER(entries); } else if (strategy == HStoreExistsAnyStrategyNumber || strategy == HStoreExistsAllStrategyNumber) { ArrayType *query = PG_GETARG_ARRAYTYPE_P(0); Datum *key_datums; bool *key_nulls; int key_count; int i, j; int32 *nentries = (int32 *) PG_GETARG_POINTER(1); Datum *entries = NULL; text *item; deconstruct_array(query, TEXTOID, -1, false, 'i', &key_datums, &key_nulls, &key_count); entries = (Datum *) palloc(sizeof(Datum) * key_count); for (i = 0, j = 0; i < key_count; ++i) { if (key_nulls[i]) continue; item = makeitem(VARDATA(key_datums[i]), VARSIZE(key_datums[i]) - VARHDRSZ); *VARDATA(item) = KEYFLAG; entries[j++] = PointerGetDatum(item); } *nentries = j ? j : -1; PG_RETURN_POINTER(entries); } else elog(ERROR, "Unsupported strategy number: %d", strategy); PG_RETURN_POINTER(NULL); }
/* * Internal handler function */ static Datum handler_internal(Oid function_oid, FunctionCallInfo fcinfo, bool execute) { HeapTuple proctuple; Form_pg_proc pg_proc_entry; char *sourcecode; Datum prosrcdatum; bool isnull; const char **xslt_params; int i; Oid *argtypes; char **argnames; char *argmodes; int numargs; xmlDocPtr ssdoc; xmlDocPtr xmldoc; xmlDocPtr resdoc; xsltStylesheetPtr stylesheet; int reslen; xmlChar *resstr; Datum resdatum; if (CALLED_AS_TRIGGER(fcinfo)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("trigger functions not supported"))); proctuple = SearchSysCache(PROCOID, ObjectIdGetDatum(function_oid), 0, 0, 0); if (!HeapTupleIsValid(proctuple)) elog(ERROR, "cache lookup failed for function %u", function_oid); prosrcdatum = SysCacheGetAttr(PROCOID, proctuple, Anum_pg_proc_prosrc, &isnull); if (isnull) elog(ERROR, "null prosrc"); sourcecode = pstrdup(DatumGetCString(DirectFunctionCall1(textout, prosrcdatum))); /* allow one blank line at the start */ if (sourcecode[0] == '\n') sourcecode++; numargs = get_func_arg_info(proctuple, &argtypes, &argnames, &argmodes); if (numargs < 1) ereport(ERROR, (errmsg("XSLT function must have at least one argument"))); if (argtypes[0] != XMLOID) ereport(ERROR, (errmsg("first argument of XSLT function must have type XML"))); #if 0 xsltSetGenericErrorFunc(NULL, xmlGenericError); #endif ssdoc = xmlParseDoc((xmlChar *) sourcecode); /* XXX use backend's xml_parse here() */ stylesheet = xsltParseStylesheetDoc(ssdoc); /* XXX check error handling */ if (!stylesheet) ereport(ERROR, (errmsg("could not parse stylesheet"))); pg_proc_entry = (Form_pg_proc) GETSTRUCT(proctuple); { char *method; method = (char *) stylesheet->method; /* * TODO: This is strictly speaking not correct because the * default output method may be "html", but that can only * detected at run time, so punt for now. */ if (!method) method = "xml"; if (strcmp(method, "xml") == 0 && pg_proc_entry->prorettype != XMLOID) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("XSLT stylesheet has output method \"xml\" but return type of function is not xml"))); else if ((strcmp(method, "html") == 0 || strcmp(method, "text") == 0) && pg_proc_entry->prorettype != TEXTOID && pg_proc_entry->prorettype != VARCHAROID) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("XSLT stylesheet has output method \"%s\" but return type of function is not text or varchar", method))); } /* validation stops here */ if (!execute) { ReleaseSysCache(proctuple); PG_RETURN_VOID(); } /* execution begins here */ xslt_params = palloc(((numargs - 1) * 2 + 1) * sizeof(*xslt_params)); for (i = 0; i < numargs-1; i++) { xslt_params[i*2] = argnames[i+1]; xslt_params[i*2+1] = type_to_cstring(PG_GETARG_DATUM(i+1), argtypes[i+1]); } xslt_params[i*2] = NULL; { xmltype *arg0 = PG_GETARG_XML_P(0); // XXX this ought to use xml_parse() xmldoc = xmlParseMemory((char *) VARDATA(arg0), VARSIZE(arg0) - VARHDRSZ); } resdoc = xsltApplyStylesheet(stylesheet, xmldoc, xslt_params); if (!resdoc) elog(ERROR, "xsltApplyStylesheet() failed"); xmlFreeDoc(xmldoc); if (xsltSaveResultToString(&resstr, &reslen, resdoc, stylesheet) != 0) elog(ERROR, "result serialization failed"); xsltFreeStylesheet(stylesheet); xmlFreeDoc(resdoc); xsltCleanupGlobals(); xmlCleanupParser(); resdatum = cstring_to_type(resstr ? (char *) resstr : "", pg_proc_entry->prorettype); ReleaseSysCache(proctuple); PG_RETURN_DATUM(resdatum); }
Datum session_user(PG_FUNCTION_ARGS) { PG_RETURN_DATUM(DirectFunctionCall1(namein, CStringGetDatum(GetUserNameFromId(GetSessionUserId())))); }
/* * master_get_table_metadata takes in a relation name, and returns partition * related metadata for the relation. These metadata are grouped and returned in * a tuple, and are used by the caller when creating new shards. The function * errors if given relation does not exist, or is not partitioned. */ Datum master_get_table_metadata(PG_FUNCTION_ARGS) { text *relationName = PG_GETARG_TEXT_P(0); Oid relationId = ResolveRelationId(relationName); DistTableCacheEntry *partitionEntry = NULL; char *partitionKeyString = NULL; TypeFuncClass resultTypeClass = 0; Datum partitionKeyExpr = 0; Datum partitionKey = 0; Datum metadataDatum = 0; HeapTuple metadataTuple = NULL; TupleDesc metadataDescriptor = NULL; uint64 shardMaxSizeInBytes = 0; char shardStorageType = 0; Datum values[TABLE_METADATA_FIELDS]; bool isNulls[TABLE_METADATA_FIELDS]; CheckCitusVersion(ERROR); /* find partition tuple for partitioned relation */ partitionEntry = DistributedTableCacheEntry(relationId); /* create tuple descriptor for return value */ resultTypeClass = get_call_result_type(fcinfo, NULL, &metadataDescriptor); if (resultTypeClass != TYPEFUNC_COMPOSITE) { ereport(ERROR, (errmsg("return type must be a row type"))); } /* form heap tuple for table metadata */ memset(values, 0, sizeof(values)); memset(isNulls, false, sizeof(isNulls)); partitionKeyString = partitionEntry->partitionKeyString; /* reference tables do not have partition key */ if (partitionKeyString == NULL) { partitionKey = PointerGetDatum(NULL); isNulls[3] = true; } else { /* get decompiled expression tree for partition key */ partitionKeyExpr = PointerGetDatum(cstring_to_text(partitionEntry->partitionKeyString)); partitionKey = DirectFunctionCall2(pg_get_expr, partitionKeyExpr, ObjectIdGetDatum(relationId)); } shardMaxSizeInBytes = (int64) ShardMaxSize * 1024L; /* get storage type */ shardStorageType = ShardStorageType(relationId); values[0] = ObjectIdGetDatum(relationId); values[1] = shardStorageType; values[2] = partitionEntry->partitionMethod; values[3] = partitionKey; values[4] = Int32GetDatum(ShardReplicationFactor); values[5] = Int64GetDatum(shardMaxSizeInBytes); values[6] = Int32GetDatum(ShardPlacementPolicy); metadataTuple = heap_form_tuple(metadataDescriptor, values, isNulls); metadataDatum = HeapTupleGetDatum(metadataTuple); PG_RETURN_DATUM(metadataDatum); }
Datum LWGEOM_distance_ellipsoid(PG_FUNCTION_ARGS) { PG_RETURN_DATUM(DirectFunctionCall4(geometry_distance_spheroid, PG_GETARG_DATUM(0), PG_GETARG_DATUM(1), PG_GETARG_DATUM(2), BoolGetDatum(TRUE))); }
static Datum dbms_pipe_unpack_message(PG_FUNCTION_ARGS, message_data_type dtype) { Oid tupType; void *ptr; message_data_type type; int32 size; Datum result; message_data_type next_type; if (input_buffer == NULL || input_buffer->items_count <= 0 || input_buffer->next == NULL || input_buffer->next->type == IT_NO_MORE_ITEMS) PG_RETURN_NULL(); next_type = input_buffer->next->type; if (next_type != dtype) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("datatype mismatch"), errdetail("unpack unexpected type: %d", next_type))); ptr = unpack_field(input_buffer, &type, &size, &tupType); Assert(ptr != NULL); switch (type) { case IT_TIMESTAMPTZ: result = TimestampTzGetDatum(*(TimestampTz*)ptr); break; case IT_DATE: result = DateADTGetDatum(*(DateADT*)ptr); break; case IT_VARCHAR: case IT_NUMBER: case IT_BYTEA: result = PointerGetDatum(cstring_to_text_with_len(ptr, size)); break; case IT_RECORD: { #if PG_VERSION_NUM >= 120000 LOCAL_FCINFO(info, 3); #else FunctionCallInfoData info_data; FunctionCallInfo info = &info_data; #endif StringInfoData buf; text *data = cstring_to_text_with_len(ptr, size); buf.data = VARDATA(data); buf.len = VARSIZE(data) - VARHDRSZ; buf.maxlen = buf.len; buf.cursor = 0; /* * Normally one would call record_recv() using DirectFunctionCall3, * but that does not work since record_recv wants to cache some data * using fcinfo->flinfo->fn_extra. So we need to pass it our own * flinfo parameter. */ InitFunctionCallInfoData(*info, fcinfo->flinfo, 3, InvalidOid, NULL, NULL); init_args_3(info, PointerGetDatum(&buf), ObjectIdGetDatum(tupType), Int32GetDatum(-1)); result = record_recv(info); break; } default: elog(ERROR, "unexpected type: %d", type); result = (Datum) 0; /* keep compiler quiet */ } if (input_buffer->items_count == 0) { pfree(input_buffer); input_buffer = NULL; } PG_RETURN_DATUM(result); }
/* ------------------------------------------------ * hash_bitmap_info() * * Get bitmap information for a particular overflow page * * Usage: SELECT * FROM hash_bitmap_info('con_hash_index'::regclass, 5); * ------------------------------------------------ */ Datum hash_bitmap_info(PG_FUNCTION_ARGS) { Oid indexRelid = PG_GETARG_OID(0); uint64 ovflblkno = PG_GETARG_INT64(1); HashMetaPage metap; Buffer metabuf, mapbuf; BlockNumber bitmapblkno; Page mappage; bool bit = false; TupleDesc tupleDesc; Relation indexRel; uint32 ovflbitno; int32 bitmappage, bitmapbit; HeapTuple tuple; int i, j; Datum values[3]; bool nulls[3]; uint32 *freep; if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("must be superuser to use raw page functions")))); indexRel = index_open(indexRelid, AccessShareLock); if (!IS_HASH(indexRel)) elog(ERROR, "relation \"%s\" is not a hash index", RelationGetRelationName(indexRel)); if (RELATION_IS_OTHER_TEMP(indexRel)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot access temporary tables of other sessions"))); if (ovflblkno >= RelationGetNumberOfBlocks(indexRel)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("block number " UINT64_FORMAT " is out of range for relation \"%s\"", ovflblkno, RelationGetRelationName(indexRel)))); /* Read the metapage so we can determine which bitmap page to use */ metabuf = _hash_getbuf(indexRel, HASH_METAPAGE, HASH_READ, LH_META_PAGE); metap = HashPageGetMeta(BufferGetPage(metabuf)); /* * Reject attempt to read the bit for a metapage or bitmap page; this is * only meaningful for overflow pages. */ if (ovflblkno == 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid overflow block number %u", (BlockNumber) ovflblkno))); for (i = 0; i < metap->hashm_nmaps; i++) if (metap->hashm_mapp[i] == ovflblkno) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid overflow block number %u", (BlockNumber) ovflblkno))); /* * Identify overflow bit number. This will error out for primary bucket * pages, and we've already rejected the metapage and bitmap pages above. */ ovflbitno = _hash_ovflblkno_to_bitno(metap, (BlockNumber) ovflblkno); bitmappage = ovflbitno >> BMPG_SHIFT(metap); bitmapbit = ovflbitno & BMPG_MASK(metap); if (bitmappage >= metap->hashm_nmaps) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid overflow block number %u", (BlockNumber) ovflblkno))); bitmapblkno = metap->hashm_mapp[bitmappage]; _hash_relbuf(indexRel, metabuf); /* Check the status of bitmap bit for overflow page */ mapbuf = _hash_getbuf(indexRel, bitmapblkno, HASH_READ, LH_BITMAP_PAGE); mappage = BufferGetPage(mapbuf); freep = HashPageGetBitmap(mappage); bit = ISSET(freep, bitmapbit) != 0; _hash_relbuf(indexRel, mapbuf); index_close(indexRel, AccessShareLock); /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "return type must be a row type"); tupleDesc = BlessTupleDesc(tupleDesc); MemSet(nulls, 0, sizeof(nulls)); j = 0; values[j++] = Int64GetDatum((int64) bitmapblkno); values[j++] = Int32GetDatum(bitmapbit); values[j++] = BoolGetDatum(bit); tuple = heap_form_tuple(tupleDesc, values, nulls); PG_RETURN_DATUM(HeapTupleGetDatum(tuple)); }
/* * Compute an xlog file name and decimal byte offset given a WAL location, * such as is returned by pg_stop_backup() or pg_xlog_switch(). * * Note that a location exactly at a segment boundary is taken to be in * the previous segment. This is usually the right thing, since the * expected usage is to determine which xlog file(s) are ready to archive. */ Datum pg_xlogfile_name_offset(PG_FUNCTION_ARGS) { text *location = PG_GETARG_TEXT_P(0); char *locationstr; unsigned int uxlogid; unsigned int uxrecoff; uint32 xlogid; uint32 xlogseg; uint32 xrecoff; XLogRecPtr locationpoint; char xlogfilename[MAXFNAMELEN]; Datum values[2]; bool isnull[2]; TupleDesc resultTupleDesc; HeapTuple resultHeapTuple; Datum result; if (RecoveryInProgress()) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("recovery is in progress"), errhint("pg_xlogfile_name_offset() cannot be executed during recovery."))); /* * Read input and parse */ locationstr = text_to_cstring(location); validate_xlog_location(locationstr); if (sscanf(locationstr, "%X/%X", &uxlogid, &uxrecoff) != 2) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("could not parse transaction log location \"%s\"", locationstr))); locationpoint.xlogid = uxlogid; locationpoint.xrecoff = uxrecoff; /* * Construct a tuple descriptor for the result row. This must match this * function's pg_proc entry! */ resultTupleDesc = CreateTemplateTupleDesc(2, false); TupleDescInitEntry(resultTupleDesc, (AttrNumber) 1, "file_name", TEXTOID, -1, 0); TupleDescInitEntry(resultTupleDesc, (AttrNumber) 2, "file_offset", INT4OID, -1, 0); resultTupleDesc = BlessTupleDesc(resultTupleDesc); /* * xlogfilename */ XLByteToPrevSeg(locationpoint, xlogid, xlogseg); XLogFileName(xlogfilename, ThisTimeLineID, xlogid, xlogseg); values[0] = CStringGetTextDatum(xlogfilename); isnull[0] = false; /* * offset */ xrecoff = locationpoint.xrecoff - xlogseg * XLogSegSize; values[1] = UInt32GetDatum(xrecoff); isnull[1] = false; /* * Tuple jam: Having first prepared your Datums, then squash together */ resultHeapTuple = heap_form_tuple(resultTupleDesc, values, isnull); result = HeapTupleGetDatum(resultHeapTuple); PG_RETURN_DATUM(result); }
/* ------------------------------------------------ * hash_metapage_info() * * Get the meta-page information for a hash index * * Usage: SELECT * FROM hash_metapage_info(get_raw_page('con_hash_index', 0)) * ------------------------------------------------ */ Datum hash_metapage_info(PG_FUNCTION_ARGS) { bytea *raw_page = PG_GETARG_BYTEA_P(0); Page page; HashMetaPageData *metad; TupleDesc tupleDesc; HeapTuple tuple; int i, j; Datum values[16]; bool nulls[16]; Datum spares[HASH_MAX_SPLITPOINTS]; Datum mapp[HASH_MAX_BITMAPS]; if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("must be superuser to use raw page functions")))); page = verify_hash_page(raw_page, LH_META_PAGE); /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "return type must be a row type"); tupleDesc = BlessTupleDesc(tupleDesc); metad = HashPageGetMeta(page); MemSet(nulls, 0, sizeof(nulls)); j = 0; values[j++] = Int64GetDatum((int64) metad->hashm_magic); values[j++] = Int64GetDatum((int64) metad->hashm_version); values[j++] = Float8GetDatum(metad->hashm_ntuples); values[j++] = Int32GetDatum((int32) metad->hashm_ffactor); values[j++] = Int32GetDatum((int32) metad->hashm_bsize); values[j++] = Int32GetDatum((int32) metad->hashm_bmsize); values[j++] = Int32GetDatum((int32) metad->hashm_bmshift); values[j++] = Int64GetDatum((int64) metad->hashm_maxbucket); values[j++] = Int64GetDatum((int64) metad->hashm_highmask); values[j++] = Int64GetDatum((int64) metad->hashm_lowmask); values[j++] = Int64GetDatum((int64) metad->hashm_ovflpoint); values[j++] = Int64GetDatum((int64) metad->hashm_firstfree); values[j++] = Int64GetDatum((int64) metad->hashm_nmaps); values[j++] = ObjectIdGetDatum((Oid) metad->hashm_procid); for (i = 0; i < HASH_MAX_SPLITPOINTS; i++) spares[i] = Int64GetDatum((int64) metad->hashm_spares[i]); values[j++] = PointerGetDatum(construct_array(spares, HASH_MAX_SPLITPOINTS, INT8OID, 8, FLOAT8PASSBYVAL, 'd')); for (i = 0; i < HASH_MAX_BITMAPS; i++) mapp[i] = Int64GetDatum((int64) metad->hashm_mapp[i]); values[j++] = PointerGetDatum(construct_array(mapp, HASH_MAX_BITMAPS, INT8OID, 8, FLOAT8PASSBYVAL, 'd')); tuple = heap_form_tuple(tupleDesc, values, nulls); PG_RETURN_DATUM(HeapTupleGetDatum(tuple)); }
/* * stat a file */ Datum pg_stat_file(PG_FUNCTION_ARGS) { text *filename_t = PG_GETARG_TEXT_P(0); char *filename; struct stat fst; Datum values[6]; bool isnull[6]; HeapTuple tuple; TupleDesc tupdesc; if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("must be superuser to get file information")))); filename = convert_and_check_filename(filename_t); if (stat(filename, &fst) < 0) ereport(ERROR, (errcode_for_file_access(), errmsg("could not stat file \"%s\": %m", filename))); /* * This record type had better match the output parameters declared for me * in pg_proc.h. */ tupdesc = CreateTemplateTupleDesc(6, false); TupleDescInitEntry(tupdesc, (AttrNumber) 1, "size", INT8OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 2, "access", TIMESTAMPTZOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 3, "modification", TIMESTAMPTZOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 4, "change", TIMESTAMPTZOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 5, "creation", TIMESTAMPTZOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 6, "isdir", BOOLOID, -1, 0); BlessTupleDesc(tupdesc); memset(isnull, false, sizeof(isnull)); values[0] = Int64GetDatum((int64) fst.st_size); values[1] = TimestampTzGetDatum(time_t_to_timestamptz(fst.st_atime)); values[2] = TimestampTzGetDatum(time_t_to_timestamptz(fst.st_mtime)); /* Unix has file status change time, while Win32 has creation time */ #if !defined(WIN32) && !defined(__CYGWIN__) values[3] = TimestampTzGetDatum(time_t_to_timestamptz(fst.st_ctime)); isnull[4] = true; #else isnull[3] = true; values[4] = TimestampTzGetDatum(time_t_to_timestamptz(fst.st_ctime)); #endif values[5] = BoolGetDatum(S_ISDIR(fst.st_mode)); tuple = heap_form_tuple(tupdesc, values, isnull); pfree(filename); PG_RETURN_DATUM(HeapTupleGetDatum(tuple)); }
Datum spheretrans(PG_FUNCTION_ARGS) { Datum d = PG_GETARG_DATUM ( 0 ) ; PG_RETURN_DATUM ( d ); }
Datum pg_stat_get_archiver(PG_FUNCTION_ARGS) { TupleDesc tupdesc; Datum values[7]; bool nulls[7]; PgStat_ArchiverStats *archiver_stats; /* Initialise values and NULL flags arrays */ MemSet(values, 0, sizeof(values)); MemSet(nulls, 0, sizeof(nulls)); /* Initialise attributes information in the tuple descriptor */ tupdesc = CreateTemplateTupleDesc(7, false); TupleDescInitEntry(tupdesc, (AttrNumber) 1, "archived_count", INT8OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 2, "last_archived_wal", TEXTOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 3, "last_archived_time", TIMESTAMPTZOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 4, "failed_count", INT8OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 5, "last_failed_wal", TEXTOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 6, "last_failed_time", TIMESTAMPTZOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 7, "stats_reset", TIMESTAMPTZOID, -1, 0); BlessTupleDesc(tupdesc); /* Get statistics about the archiver process */ archiver_stats = pgstat_fetch_stat_archiver(); /* Fill values and NULLs */ values[0] = Int64GetDatum(archiver_stats->archived_count); if (*(archiver_stats->last_archived_wal) == '\0') nulls[1] = true; else values[1] = CStringGetTextDatum(archiver_stats->last_archived_wal); if (archiver_stats->last_archived_timestamp == 0) nulls[2] = true; else values[2] = TimestampTzGetDatum(archiver_stats->last_archived_timestamp); values[3] = Int64GetDatum(archiver_stats->failed_count); if (*(archiver_stats->last_failed_wal) == '\0') nulls[4] = true; else values[4] = CStringGetTextDatum(archiver_stats->last_failed_wal); if (archiver_stats->last_failed_timestamp == 0) nulls[5] = true; else values[5] = TimestampTzGetDatum(archiver_stats->last_failed_timestamp); if (archiver_stats->stat_reset_timestamp == 0) nulls[6] = true; else values[6] = TimestampTzGetDatum(archiver_stats->stat_reset_timestamp); /* Returns the record as Datum */ PG_RETURN_DATUM(HeapTupleGetDatum( heap_form_tuple(tupdesc, values, nulls))); }
Datum gtrgm_out(PG_FUNCTION_ARGS) { elog(ERROR, "not implemented"); PG_RETURN_DATUM(0); }
Datum hstore_populate_record(PG_FUNCTION_ARGS) { Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0); HStore *hs; HEntry *entries; char *ptr; HeapTupleHeader rec; Oid tupType; int32 tupTypmod; TupleDesc tupdesc; HeapTupleData tuple; HeapTuple rettuple; RecordIOData *my_extra; int ncolumns; int i; Datum *values; bool *nulls; if (!type_is_rowtype(argtype)) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("first argument must be a rowtype"))); if (PG_ARGISNULL(0)) { if (PG_ARGISNULL(1)) PG_RETURN_NULL(); rec = NULL; /* * have no tuple to look at, so the only source of type info is the * argtype. The lookup_rowtype_tupdesc call below will error out if we * don't have a known composite type oid here. */ tupType = argtype; tupTypmod = -1; } else { rec = PG_GETARG_HEAPTUPLEHEADER(0); if (PG_ARGISNULL(1)) PG_RETURN_POINTER(rec); /* Extract type info from the tuple itself */ tupType = HeapTupleHeaderGetTypeId(rec); tupTypmod = HeapTupleHeaderGetTypMod(rec); } hs = PG_GETARG_HS(1); entries = ARRPTR(hs); ptr = STRPTR(hs); /* * if the input hstore is empty, we can only skip the rest if we were * passed in a non-null record, since otherwise there may be issues with * domain nulls. */ if (HS_COUNT(hs) == 0 && rec) PG_RETURN_POINTER(rec); tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod); ncolumns = tupdesc->natts; if (rec) { /* Build a temporary HeapTuple control structure */ tuple.t_len = HeapTupleHeaderGetDatumLength(rec); ItemPointerSetInvalid(&(tuple.t_self)); tuple.t_tableOid = InvalidOid; tuple.t_data = rec; } /* * We arrange to look up the needed I/O info just once per series of * calls, assuming the record type doesn't change underneath us. */ my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; if (my_extra == NULL || my_extra->ncolumns != ncolumns) { fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt, sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData)); my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; my_extra->record_type = InvalidOid; my_extra->record_typmod = 0; } if (my_extra->record_type != tupType || my_extra->record_typmod != tupTypmod) { MemSet(my_extra, 0, sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData)); my_extra->record_type = tupType; my_extra->record_typmod = tupTypmod; my_extra->ncolumns = ncolumns; } values = (Datum *) palloc(ncolumns * sizeof(Datum)); nulls = (bool *) palloc(ncolumns * sizeof(bool)); if (rec) { /* Break down the tuple into fields */ heap_deform_tuple(&tuple, tupdesc, values, nulls); } else { for (i = 0; i < ncolumns; ++i) { values[i] = (Datum) 0; nulls[i] = true; } } for (i = 0; i < ncolumns; ++i) { ColumnIOData *column_info = &my_extra->columns[i]; Oid column_type = tupdesc->attrs[i]->atttypid; char *value; int idx; int vallen; /* Ignore dropped columns in datatype */ if (tupdesc->attrs[i]->attisdropped) { nulls[i] = true; continue; } idx = hstoreFindKey(hs, 0, NameStr(tupdesc->attrs[i]->attname), strlen(NameStr(tupdesc->attrs[i]->attname))); /* * we can't just skip here if the key wasn't found since we might have * a domain to deal with. If we were passed in a non-null record * datum, we assume that the existing values are valid (if they're * not, then it's not our fault), but if we were passed in a null, * then every field which we don't populate needs to be run through * the input function just in case it's a domain type. */ if (idx < 0 && rec) continue; /* * Prepare to convert the column value from text */ if (column_info->column_type != column_type) { getTypeInputInfo(column_type, &column_info->typiofunc, &column_info->typioparam); fmgr_info_cxt(column_info->typiofunc, &column_info->proc, fcinfo->flinfo->fn_mcxt); column_info->column_type = column_type; } if (idx < 0 || HS_VALISNULL(entries, idx)) { /* * need InputFunctionCall to happen even for nulls, so that domain * checks are done */ values[i] = InputFunctionCall(&column_info->proc, NULL, column_info->typioparam, tupdesc->attrs[i]->atttypmod); nulls[i] = true; } else { vallen = HS_VALLEN(entries, idx); value = palloc(1 + vallen); memcpy(value, HS_VAL(entries, ptr, idx), vallen); value[vallen] = 0; values[i] = InputFunctionCall(&column_info->proc, value, column_info->typioparam, tupdesc->attrs[i]->atttypmod); nulls[i] = false; } } rettuple = heap_form_tuple(tupdesc, values, nulls); ReleaseTupleDesc(tupdesc); PG_RETURN_DATUM(HeapTupleGetDatum(rettuple)); }
/* ------------------------------------------------ * bt_metap() * * Get a btree's meta-page information * * Usage: SELECT * FROM bt_metap('t1_pkey') * ------------------------------------------------ */ Datum bt_metap(PG_FUNCTION_ARGS) { text *relname = PG_GETARG_TEXT_PP(0); Datum result; Relation rel; RangeVar *relrv; BTMetaPageData *metad; TupleDesc tupleDesc; int j; char *values[8]; Buffer buffer; Page page; HeapTuple tuple; if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("must be superuser to use pageinspect functions")))); relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname)); rel = relation_openrv(relrv, AccessShareLock); if (!IS_INDEX(rel) || !IS_BTREE(rel)) elog(ERROR, "relation \"%s\" is not a btree index", RelationGetRelationName(rel)); /* * Reject attempts to read non-local temporary relations; we would be * likely to get wrong data since we have no visibility into the owning * session's local buffers. */ if (RELATION_IS_OTHER_TEMP(rel)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot access temporary tables of other sessions"))); buffer = ReadBuffer(rel, 0); LockBuffer(buffer, BUFFER_LOCK_SHARE); page = BufferGetPage(buffer); metad = BTPageGetMeta(page); /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "return type must be a row type"); j = 0; values[j++] = psprintf("%d", metad->btm_magic); values[j++] = psprintf("%d", metad->btm_version); values[j++] = psprintf("%d", metad->btm_root); values[j++] = psprintf("%d", metad->btm_level); values[j++] = psprintf("%d", metad->btm_fastroot); values[j++] = psprintf("%d", metad->btm_fastlevel); /* * Get values of extended metadata if available, use default values * otherwise. */ if (metad->btm_version >= BTREE_NOVAC_VERSION) { values[j++] = psprintf("%u", metad->btm_oldest_btpo_xact); values[j++] = psprintf("%f", metad->btm_last_cleanup_num_heap_tuples); } else { values[j++] = "0"; values[j++] = "-1"; } tuple = BuildTupleFromCStrings(TupleDescGetAttInMetadata(tupleDesc), values); result = HeapTupleGetDatum(tuple); UnlockReleaseBuffer(buffer); relation_close(rel, AccessShareLock); PG_RETURN_DATUM(result); }
/* ------------------------------------------------------ * pgstatginindex() * * Usage: SELECT * FROM pgstatginindex('ginindex'); * ------------------------------------------------------ */ Datum pgstatginindex(PG_FUNCTION_ARGS) { Oid relid = PG_GETARG_OID(0); Relation rel; Buffer buffer; Page page; GinMetaPageData *metadata; GinIndexStat stats; HeapTuple tuple; TupleDesc tupleDesc; Datum values[3]; bool nulls[3] = {false, false, false}; Datum result; if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("must be superuser to use pgstattuple functions")))); rel = relation_open(relid, AccessShareLock); if (!IS_INDEX(rel) || !IS_GIN(rel)) elog(ERROR, "relation \"%s\" is not a GIN index", RelationGetRelationName(rel)); /* * Reject attempts to read non-local temporary relations; we would be * likely to get wrong data since we have no visibility into the owning * session's local buffers. */ if (RELATION_IS_OTHER_TEMP(rel)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot access temporary indexes of other sessions"))); /* * Read metapage */ buffer = ReadBuffer(rel, GIN_METAPAGE_BLKNO); LockBuffer(buffer, GIN_SHARE); page = BufferGetPage(buffer); metadata = GinPageGetMeta(page); stats.version = metadata->ginVersion; stats.pending_pages = metadata->nPendingPages; stats.pending_tuples = metadata->nPendingHeapTuples; UnlockReleaseBuffer(buffer); relation_close(rel, AccessShareLock); /* * Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "return type must be a row type"); values[0] = Int32GetDatum(stats.version); values[1] = UInt32GetDatum(stats.pending_pages); values[2] = Int64GetDatum(stats.pending_tuples); /* * Build and return the tuple */ tuple = heap_form_tuple(tupleDesc, values, nulls); result = HeapTupleGetDatum(tuple); PG_RETURN_DATUM(result); }
Datum g_cube_compress(PG_FUNCTION_ARGS) { PG_RETURN_DATUM(PG_GETARG_DATUM(0)); }
Datum ghstore_out(PG_FUNCTION_ARGS) { elog(ERROR, "Not implemented"); PG_RETURN_DATUM(0); }
/* ------------------------------------------------------ * pgstatindex() * * Usage: SELECT * FROM pgstatindex('t1_pkey'); * ------------------------------------------------------ */ Datum pgstatindex(PG_FUNCTION_ARGS) { text *relname = PG_GETARG_TEXT_P(0); Relation rel; RangeVar *relrv; Datum result; BlockNumber nblocks; BlockNumber blkno; BTIndexStat indexStat; if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("must be superuser to use pgstattuple functions")))); relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname)); rel = relation_openrv(relrv, AccessShareLock); if (!IS_INDEX(rel) || !IS_BTREE(rel)) elog(ERROR, "relation \"%s\" is not a btree index", RelationGetRelationName(rel)); /* * Reject attempts to read non-local temporary relations; we would be * likely to get wrong data since we have no visibility into the owning * session's local buffers. */ if (RELATION_IS_OTHER_TEMP(rel)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot access temporary tables of other sessions"))); /* * Read metapage */ { Buffer buffer = ReadBuffer(rel, 0); Page page = BufferGetPage(buffer); BTMetaPageData *metad = BTPageGetMeta(page); indexStat.version = metad->btm_version; indexStat.level = metad->btm_level; indexStat.root_blkno = metad->btm_root; ReleaseBuffer(buffer); } /* -- init counters -- */ indexStat.root_pages = 0; indexStat.internal_pages = 0; indexStat.leaf_pages = 0; indexStat.empty_pages = 0; indexStat.deleted_pages = 0; indexStat.max_avail = 0; indexStat.free_space = 0; indexStat.fragments = 0; /* * Scan all blocks except the metapage */ nblocks = RelationGetNumberOfBlocks(rel); for (blkno = 1; blkno < nblocks; blkno++) { Buffer buffer; Page page; BTPageOpaque opaque; /* Read and lock buffer */ buffer = ReadBuffer(rel, blkno); LockBuffer(buffer, BUFFER_LOCK_SHARE); page = BufferGetPage(buffer); opaque = (BTPageOpaque) PageGetSpecialPointer(page); /* Determine page type, and update totals */ if (P_ISLEAF(opaque)) { int max_avail; max_avail = BLCKSZ - (BLCKSZ - ((PageHeader) page)->pd_special + SizeOfPageHeaderData); indexStat.max_avail += max_avail; indexStat.free_space += PageGetFreeSpace(page); indexStat.leaf_pages++; /* * If the next leaf is on an earlier block, it means a * fragmentation. */ if (opaque->btpo_next != P_NONE && opaque->btpo_next < blkno) indexStat.fragments++; } else if (P_ISDELETED(opaque)) indexStat.deleted_pages++; else if (P_IGNORE(opaque)) indexStat.empty_pages++; else if (P_ISROOT(opaque)) indexStat.root_pages++; else indexStat.internal_pages++; /* Unlock and release buffer */ LockBuffer(buffer, BUFFER_LOCK_UNLOCK); ReleaseBuffer(buffer); } relation_close(rel, AccessShareLock); /*---------------------------- * Build a result tuple *---------------------------- */ { TupleDesc tupleDesc; int j; char *values[10]; HeapTuple tuple; /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "return type must be a row type"); j = 0; values[j] = palloc(32); snprintf(values[j++], 32, "%d", indexStat.version); values[j] = palloc(32); snprintf(values[j++], 32, "%d", indexStat.level); values[j] = palloc(32); snprintf(values[j++], 32, INT64_FORMAT, (indexStat.root_pages + indexStat.leaf_pages + indexStat.internal_pages + indexStat.deleted_pages + indexStat.empty_pages) * BLCKSZ); values[j] = palloc(32); snprintf(values[j++], 32, "%u", indexStat.root_blkno); values[j] = palloc(32); snprintf(values[j++], 32, INT64_FORMAT, indexStat.internal_pages); values[j] = palloc(32); snprintf(values[j++], 32, INT64_FORMAT, indexStat.leaf_pages); values[j] = palloc(32); snprintf(values[j++], 32, INT64_FORMAT, indexStat.empty_pages); values[j] = palloc(32); snprintf(values[j++], 32, INT64_FORMAT, indexStat.deleted_pages); values[j] = palloc(32); snprintf(values[j++], 32, "%.2f", 100.0 - (double) indexStat.free_space / (double) indexStat.max_avail * 100.0); values[j] = palloc(32); snprintf(values[j++], 32, "%.2f", (double) indexStat.fragments / (double) indexStat.leaf_pages * 100.0); tuple = BuildTupleFromCStrings(TupleDescGetAttInMetadata(tupleDesc), values); result = HeapTupleGetDatum(tuple); } PG_RETURN_DATUM(result); }
/** * @brief Entry point of the user-defined function for pg_bulkload. * @return Returns number of loaded tuples. If the case of errors, -1 will be * returned. */ Datum pg_bulkload(PG_FUNCTION_ARGS) { Reader *rd = NULL; Writer *wt = NULL; Datum options; MemoryContext ctx; MemoryContext ccxt; PGRUsage ru0; PGRUsage ru1; int64 count; int64 parse_errors; int64 skip; WriterResult ret; char *start; char *end; float8 system; float8 user; float8 duration; TupleDesc tupdesc; Datum values[PG_BULKLOAD_COLS]; bool nulls[PG_BULKLOAD_COLS]; HeapTuple result; /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "return type must be a row type"); BULKLOAD_PROFILE_PUSH(); pg_rusage_init(&ru0); /* must be the super user */ if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to use pg_bulkload"))); options = PG_GETARG_DATUM(0); ccxt = CurrentMemoryContext; /* * STEP 1: Initialization */ /* parse options and create reader and writer */ ParseOptions(options, &rd, &wt, ru0.tv.tv_sec); /* initialize reader */ ReaderInit(rd); /* * We need to split PG_TRY block because gcc optimizes if-branches with * longjmp codes too much. Local variables initialized in either branch * cannot be handled another branch. */ PG_TRY(); { /* truncate heap */ if (wt->truncate) TruncateTable(wt->relid); /* initialize writer */ WriterInit(wt); /* initialize checker */ CheckerInit(&rd->checker, wt->rel, wt->tchecker); /* initialize parser */ ParserInit(rd->parser, &rd->checker, rd->infile, wt->desc, wt->multi_process, PG_GET_COLLATION()); } PG_CATCH(); { if (rd) ReaderClose(rd, true); if (wt) WriterClose(wt, true); PG_RE_THROW(); } PG_END_TRY(); /* No throwable codes here! */ PG_TRY(); { /* create logger */ CreateLogger(rd->logfile, wt->verbose, rd->infile[0] == ':'); start = timeval_to_cstring(ru0.tv); LoggerLog(INFO, "\npg_bulkload %s on %s\n\n", PG_BULKLOAD_VERSION, start); ReaderDumpParams(rd); WriterDumpParams(wt); LoggerLog(INFO, "\n"); BULKLOAD_PROFILE(&prof_init); /* * STEP 2: Build heap */ /* Switch into its memory context */ Assert(wt->context); ctx = MemoryContextSwitchTo(wt->context); /* Loop for each input file record. */ while (wt->count < rd->limit) { HeapTuple tuple; CHECK_FOR_INTERRUPTS(); /* read tuple */ BULKLOAD_PROFILE_PUSH(); tuple = ReaderNext(rd); BULKLOAD_PROFILE_POP(); BULKLOAD_PROFILE(&prof_reader); if (tuple == NULL) break; /* write tuple */ BULKLOAD_PROFILE_PUSH(); WriterInsert(wt, tuple); wt->count += 1; BULKLOAD_PROFILE_POP(); BULKLOAD_PROFILE(&prof_writer); MemoryContextReset(wt->context); BULKLOAD_PROFILE(&prof_reset); } MemoryContextSwitchTo(ctx); /* * STEP 3: Finalize heap and merge indexes */ count = wt->count; parse_errors = rd->parse_errors; /* * close writer first and reader second because shmem_exit callback * is managed by a simple stack. */ ret = WriterClose(wt, false); wt = NULL; skip = ReaderClose(rd, false); rd = NULL; } PG_CATCH(); { ErrorData *errdata; MemoryContext ecxt; ecxt = MemoryContextSwitchTo(ccxt); errdata = CopyErrorData(); LoggerLog(INFO, "%s\n", errdata->message); FreeErrorData(errdata); /* close writer first, and reader second */ if (wt) WriterClose(wt, true); if (rd) ReaderClose(rd, true); MemoryContextSwitchTo(ecxt); PG_RE_THROW(); } PG_END_TRY(); count -= ret.num_dup_new; LoggerLog(INFO, "\n" " " int64_FMT " Rows skipped.\n" " " int64_FMT " Rows successfully loaded.\n" " " int64_FMT " Rows not loaded due to parse errors.\n" " " int64_FMT " Rows not loaded due to duplicate errors.\n" " " int64_FMT " Rows replaced with new rows.\n\n", skip, count, parse_errors, ret.num_dup_new, ret.num_dup_old); pg_rusage_init(&ru1); system = diffTime(ru1.ru.ru_stime, ru0.ru.ru_stime); user = diffTime(ru1.ru.ru_utime, ru0.ru.ru_utime); duration = diffTime(ru1.tv, ru0.tv); end = timeval_to_cstring(ru1.tv); memset(nulls, 0, sizeof(nulls)); values[0] = Int64GetDatum(skip); values[1] = Int64GetDatum(count); values[2] = Int64GetDatum(parse_errors); values[3] = Int64GetDatum(ret.num_dup_new); values[4] = Int64GetDatum(ret.num_dup_old); values[5] = Float8GetDatumFast(system); values[6] = Float8GetDatumFast(user); values[7] = Float8GetDatumFast(duration); LoggerLog(INFO, "Run began on %s\n" "Run ended on %s\n\n" "CPU %.2fs/%.2fu sec elapsed %.2f sec\n", start, end, system, user, duration); LoggerClose(); result = heap_form_tuple(tupdesc, values, nulls); BULKLOAD_PROFILE(&prof_fini); BULKLOAD_PROFILE_POP(); BULKLOAD_PROFILE_PRINT(); PG_RETURN_DATUM(HeapTupleGetDatum(result)); }
/* * SQL-functions CURRENT_USER, SESSION_USER */ Datum current_user(PG_FUNCTION_ARGS) { PG_RETURN_DATUM(DirectFunctionCall1(namein, CStringGetDatum(GetUserNameFromId(GetUserId(), false)))); }
Datum gtsquery_decompress(PG_FUNCTION_ARGS) { PG_RETURN_DATUM(PG_GETARG_DATUM(0)); }