Datum btint28cmp(PG_FUNCTION_ARGS) { int16 a = PG_GETARG_INT16(0); int64 b = PG_GETARG_INT64(1); if (a > b) PG_RETURN_INT32(1); else if (a == b) PG_RETURN_INT32(0); else PG_RETURN_INT32(-1); }
Datum btint42cmp(PG_FUNCTION_ARGS) { int32 a = PG_GETARG_INT32(0); int16 b = PG_GETARG_INT16(1); if (a > b) PG_RETURN_INT32(1); else if (a == b) PG_RETURN_INT32(0); else PG_RETURN_INT32(-1); }
Datum int2div(PG_FUNCTION_ARGS) { int16 arg1 = PG_GETARG_INT16(0); int16 arg2 = PG_GETARG_INT16(1); int16 result; if (arg2 == 0) { ereport(ERROR, (errcode(ERRCODE_DIVISION_BY_ZERO), errmsg("division by zero"))); /* ensure compiler realizes we mustn't reach the division (gcc bug) */ PG_RETURN_NULL(); } /* * SHRT_MIN / -1 is problematic, since the result can't be represented on * a two's-complement machine. Some machines produce SHRT_MIN, some * produce zero, some throw an exception. We can dodge the problem by * recognizing that division by -1 is the same as negation. */ if (arg2 == -1) { result = -arg1; /* overflow check (needed for SHRT_MIN) */ if (arg1 != 0 && SAMESIGN(result, arg1)) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("smallint out of range"))); PG_RETURN_INT16(result); } /* No overflow is possible */ result = arg1 / arg2; PG_RETURN_INT16(result); }
Datum int2abs(PG_FUNCTION_ARGS) { int16 arg1 = PG_GETARG_INT16(0); int16 result; result = (arg1 < 0) ? -arg1 : arg1; /* overflow check (needed for SHRT_MIN) */ if (result < 0) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("smallint out of range"))); PG_RETURN_INT16(result); }
Datum int2um(PG_FUNCTION_ARGS) { int16 arg = PG_GETARG_INT16(0); int16 result; result = -arg; /* overflow check (needed for SHRT_MIN) */ if (arg != 0 && SAMESIGN(result, arg)) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("smallint out of range"))); PG_RETURN_INT16(result); }
/* Return a specific normalized UR coordinate */ Datum cube_ur_coord(PG_FUNCTION_ARGS) { NDBOX *c = PG_GETARG_NDBOX(0); int n = PG_GETARG_INT16(1); double result; if (c->dim >= n && n > 0) result = Max(c->x[n - 1], c->x[c->dim + n - 1]); else result = 0; PG_FREE_IF_COPY(c, 0); PG_RETURN_FLOAT8(result); }
Datum set_sphere_output_precision(PG_FUNCTION_ARGS) { short int c = PG_GETARG_INT16(0); char *buf = (char *) palloc(20); if (c > DBL_DIG) c = DBL_DIG; if (c < 1) c = DBL_DIG; sphere_output_precision = c; sprintf(buf, "SET %d", c); PG_RETURN_CSTRING(buf); }
/* cash_div_int2() * Divide cash by int2. * * XXX Don't know if rounding or truncating is correct behavior. * Round for now. - tgl 97/04/15 */ Datum cash_div_int2(PG_FUNCTION_ARGS) { Cash c = PG_GETARG_CASH(0); int16 s = PG_GETARG_INT16(1); Cash result; if (s == 0) ereport(ERROR, (errcode(ERRCODE_DIVISION_BY_ZERO), errmsg("division by zero"))); result = rint(c / s); PG_RETURN_CASH(result); }
Datum linterp_int16(PG_FUNCTION_ARGS) { float8 y0; float8 y1; float8 p; float8 r; int16 result; bool eq_bounds = false; bool eq_abscissas = false; /* Common */ p = linterp_abscissa(fcinfo, &eq_bounds, &eq_abscissas); /* Ordinate type specific code*/ y0 = (float8)PG_GETARG_INT16(2); y1 = (float8)PG_GETARG_INT16(4); if ( eq_bounds ) { if ( eq_abscissas && y0 == y1 ) r = y0; else PG_RETURN_NULL(); } else { r = round(y0+p*(y1-y0)); if ( r < SHRT_MIN || r > SHRT_MAX ) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("value \"%f\" is out of range for type smallint", r))); } result = (int16)r; PG_RETURN_INT16(result); }
/* Return a specific normalized UR coordinate */ Datum cube_ur_coord(PG_FUNCTION_ARGS) { NDBOX *c = PG_GETARG_NDBOX(0); int n = PG_GETARG_INT16(1); double result; if (DIM(c) >= n && n > 0) result = Max(LL_COORD(c, n - 1), UR_COORD(c, n - 1)); else result = 0; PG_FREE_IF_COPY(c, 0); PG_RETURN_FLOAT8(result); }
Datum gbt_int2_distance(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); int16 query = PG_GETARG_INT16(1); /* Oid subtype = PG_GETARG_OID(3); */ int16KEY *kkk = (int16KEY *) DatumGetPointer(entry->key); GBT_NUMKEY_R key; key.lower = (GBT_NUMKEY *) &kkk->lower; key.upper = (GBT_NUMKEY *) &kkk->upper; PG_RETURN_FLOAT8( gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo) ); }
Datum int24div(PG_FUNCTION_ARGS) { int16 arg1 = PG_GETARG_INT16(0); int32 arg2 = PG_GETARG_INT32(1); if (arg2 == 0) { ereport(ERROR, (errcode(ERRCODE_DIVISION_BY_ZERO), errmsg("division by zero"))); /* ensure compiler realizes we mustn't reach the division (gcc bug) */ PG_RETURN_NULL(); } /* No overflow is possible */ PG_RETURN_INT32((int32) arg1 / arg2); }
Datum HASHAPI_Hash_2_Text_Text(PG_FUNCTION_ARGS) { int32 num_segs; /* number of segments */ text *val1; /* text value1 */ text *val2; /* text value2 */ unsigned int targetbucket; /* 0-based */ int16 algorithm; /* hashing algorithm */ Datum d1,d2; Oid oid; /* Get number of segments */ num_segs = PG_GETARG_INT32(0); /* Get hashing algoriithm */ algorithm = PG_GETARG_INT16(1); /* Get the value to hash */ val1 = PG_GETARG_TEXT_P(2); val2 = PG_GETARG_TEXT_P(3); d1 = PointerGetDatum(val1); d2 = PointerGetDatum(val2); /* create a CdbHash for this hash test. */ h = makeCdbHash(num_segs, algorithm); /* init cdb hash */ cdbhashinit(h); oid = TEXTOID; cdbhash(h, d1, oid); cdbhash(h, d2, oid); /* reduce the result hash value */ targetbucket = cdbhashreduce(h); /* Avoid leaking memory for toasted inputs */ PG_FREE_IF_COPY(val1, 1); PG_FREE_IF_COPY(val2, 2); PG_RETURN_INT32(targetbucket); /* return target bucket (segID) */ }
/* * Remove knowledge of a segment from the master. * * gp_remove_segment(order) * * Args: * order - order of registration * * Returns: * true on success, otherwise error. */ Datum gp_remove_segment(PG_FUNCTION_ARGS) { int16 order; if (PG_ARGISNULL(0)) elog(ERROR, "Registration id cannot be NULL"); order = PG_GETARG_INT16(0); mirroring_sanity_check(MASTER_ONLY | SUPERUSER | UTILITY_MODE, "gp_remove_segment"); if (order == MASTER_ORDER_ID || order == STANDBY_ORDER_ID) elog(ERROR, "Cannot remove master or standby"); remove_segment(order); PG_RETURN_BOOL(true); }
Datum int24pl(PG_FUNCTION_ARGS) { int16 arg1 = PG_GETARG_INT16(0); int32 arg2 = PG_GETARG_INT32(1); int32 result; result = arg1 + arg2; /* * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("integer out of range"))); PG_RETURN_INT32(result); }
Datum int24mi(PG_FUNCTION_ARGS) { int16 arg1 = PG_GETARG_INT16(0); int32 arg2 = PG_GETARG_INT32(1); int32 result; result = arg1 - arg2; /* * Overflow check. If the inputs are of the same sign then their * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("integer out of range"))); PG_RETURN_INT32(result); }
/* * pg_column_is_updatable - determine whether a column is updatable * * This function encapsulates the decision about just what * information_schema.columns.is_updatable actually means. It's not clear * whether deletability of the column's relation should be required, so * we want that decision in C code where we could change it without initdb. */ Datum pg_column_is_updatable(PG_FUNCTION_ARGS) { Oid reloid = PG_GETARG_OID(0); AttrNumber attnum = PG_GETARG_INT16(1); AttrNumber col = attnum - FirstLowInvalidHeapAttributeNumber; bool include_triggers = PG_GETARG_BOOL(2); int events; /* System columns are never updatable */ if (attnum <= 0) PG_RETURN_BOOL(false); events = relation_is_updatable(reloid, include_triggers, bms_make_singleton(col)); /* We require both updatability and deletability of the relation */ #define REQ_EVENTS ((1 << CMD_UPDATE) | (1 << CMD_DELETE)) PG_RETURN_BOOL((events & REQ_EVENTS) == REQ_EVENTS); }
/* * decompress_data * * Decompress the bytea buffer and return result as bytea, this may be a page * with its hole filled with zeros or a page without a hole. */ Datum decompress_data(PG_FUNCTION_ARGS) { bytea *compress_data = PG_GETARG_BYTEA_P(0); int16 raw_len = PG_GETARG_INT16(1); bytea *res; char *uncompress_buffer; uncompress_buffer = palloc(raw_len); if (pglz_decompress(VARDATA(compress_data), VARSIZE(compress_data) - VARHDRSZ, uncompress_buffer, raw_len) < 0) ereport(ERROR, (errmsg("Decompression failed..."))); /* Build result */ res = (bytea *) palloc(raw_len + VARHDRSZ); SET_VARSIZE(res, raw_len + VARHDRSZ); memcpy(VARDATA(res), uncompress_buffer, raw_len); pfree(uncompress_buffer); PG_RETURN_BYTEA_P(res); }
Datum gbt_int2_consistent(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); int16 query = PG_GETARG_INT16(1); StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); /* Oid subtype = PG_GETARG_OID(3); */ bool *recheck = (bool *) PG_GETARG_POINTER(4); int16KEY *kkk = (int16KEY *) DatumGetPointer(entry->key); GBT_NUMKEY_R key; /* All cases served by this function are exact */ *recheck = false; key.lower = (GBT_NUMKEY *) &kkk->lower; key.upper = (GBT_NUMKEY *) &kkk->upper; PG_RETURN_BOOL( gbt_num_consistent(&key, (void *) &query, &strategy, GIST_LEAF(entry), &tinfo) ); }
Datum HASHAPI_Hash_1_SmallInt(PG_FUNCTION_ARGS) { int32 num_segs; /* number of segments */ int16 algorithm;/* hashing algorithm */ int32 value; /* int input value will be cast to int16 */ int16 val1; unsigned int targetbucket; /* 0-based */ Datum d1; Oid oid; /* Get number of segments */ num_segs = PG_GETARG_INT32(0); /* Get hashing algoriithm */ algorithm = PG_GETARG_INT16(1); /* Get the value to hash */ value = PG_GETARG_INT32(2); val1 = (int16)value; d1 = Int16GetDatum(val1); /* create a CdbHash for this hash test. */ h = makeCdbHash(num_segs, algorithm); /* init cdb hash */ cdbhashinit(h); oid = INT2OID; cdbhash(h, d1, oid); /* reduce the result hash value */ targetbucket = cdbhashreduce(h); PG_RETURN_INT32(targetbucket); /* return target bucket (segID) */ }
Datum HASHAPI_Hash_2_Int_Int(PG_FUNCTION_ARGS) { int32 num_segs; /* number of segments */ int32 val1; /* int input value */ int32 val2; /* bigint input value */ unsigned int targetbucket; /* 0-based */ int16 algorithm; /* hashing algorithm */ Datum d1,d2; Oid oid; /* Get number of segments */ num_segs = PG_GETARG_INT32(0); /* Get hashing algoriithm */ algorithm = PG_GETARG_INT16(1); /* Get the values to hash */ val1 = PG_GETARG_INT32(2); val2 = PG_GETARG_INT32(3); d1 = Int32GetDatum(val1); d2 = Int32GetDatum(val2); /* create a CdbHash for this hash test. */ h = makeCdbHash(num_segs, algorithm); /* init cdb hash */ cdbhashinit(h); oid = INT4OID; cdbhash(h, d1, oid); cdbhash(h, d2, oid); /* reduce the result hash value */ targetbucket = cdbhashreduce(h); PG_RETURN_INT32(targetbucket); /* return target bucket (segID) */ }
Datum int42div(PG_FUNCTION_ARGS) { int32 arg1 = PG_GETARG_INT32(0); int16 arg2 = PG_GETARG_INT16(1); int32 result; if (unlikely(arg2 == 0)) { ereport(ERROR, (errcode(ERRCODE_DIVISION_BY_ZERO), errmsg("division by zero"))); /* ensure compiler realizes we mustn't reach the division (gcc bug) */ PG_RETURN_NULL(); } /* * INT_MIN / -1 is problematic, since the result can't be represented on a * two's-complement machine. Some machines produce INT_MIN, some produce * zero, some throw an exception. We can dodge the problem by recognizing * that division by -1 is the same as negation. */ if (arg2 == -1) { if (unlikely(arg1 == PG_INT32_MIN)) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("integer out of range"))); result = -arg1; PG_RETURN_INT32(result); } /* No overflow is possible */ result = arg1 / arg2; PG_RETURN_INT32(result); }
Datum HASHAPI_Hash_1_Bool(PG_FUNCTION_ARGS) { int32 num_segs; /* number of segments */ Datum d1; Oid oid; int16 algorithm; /* hashing algorithm */ bool val1; /* boolean input value */ unsigned int targetbucket; /* 0-based */ /* Get number of segments */ num_segs = PG_GETARG_INT32(0); /* Get hashing algoriithm */ algorithm = PG_GETARG_INT16(1); /* Get the value to hash */ val1 = PG_GETARG_BOOL(2); d1 = BoolGetDatum(val1); /* create a CdbHash for this hash test. */ h = makeCdbHash(num_segs, algorithm); /* init cdb hash */ cdbhashinit(h); oid = BOOLOID; cdbhash(h, d1, oid); /* reduce the result hash value */ targetbucket = cdbhashreduce(h); PG_RETURN_INT32(targetbucket); /* return target bucket (segID) */ }
Datum geography_gist_join_selectivity(PG_FUNCTION_ARGS) { PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0); /* Oid operator = PG_GETARG_OID(1); */ List *args = (List *) PG_GETARG_POINTER(2); JoinType jointype = (JoinType) PG_GETARG_INT16(3); Node *arg1, *arg2; Var *var1, *var2; Oid relid1, relid2; HeapTuple stats1_tuple, stats2_tuple; GEOG_STATS *geogstats1, *geogstats2; /* * These are to avoid casting the corresponding * "type-punned" pointers, which would break * "strict-aliasing rules". */ GEOG_STATS **gs1ptr=&geogstats1, **gs2ptr=&geogstats2; int geogstats1_nvalues = 0, geogstats2_nvalues = 0; float8 selectivity1 = 0.0, selectivity2 = 0.0; float4 num1_tuples = 0.0, num2_tuples = 0.0; float4 total_tuples = 0.0, rows_returned = 0.0; GBOX search_box; /** * Join selectivity algorithm. To calculation the selectivity we * calculate the intersection of the two column sample extents, * sum the results, and then multiply by two since for each * geometry in col 1 that intersects a geometry in col 2, the same * will also be true. */ POSTGIS_DEBUGF(3, "geography_gist_join_selectivity called with jointype %d", jointype); /* * We'll only respond to an inner join/unknown context join */ if (jointype != JOIN_INNER) { elog(NOTICE, "geography_gist_join_selectivity called with incorrect join type"); PG_RETURN_FLOAT8(DEFAULT_GEOGRAPHY_SEL); } /* * Determine the oids of the geometry columns we are working with */ arg1 = (Node *) linitial(args); arg2 = (Node *) lsecond(args); if (!IsA(arg1, Var) || !IsA(arg2, Var)) { elog(DEBUG1, "geography_gist_join_selectivity called with arguments that are not column references"); PG_RETURN_FLOAT8(DEFAULT_GEOGRAPHY_SEL); } var1 = (Var *)arg1; var2 = (Var *)arg2; relid1 = getrelid(var1->varno, root->parse->rtable); relid2 = getrelid(var2->varno, root->parse->rtable); POSTGIS_DEBUGF(3, "Working with relations oids: %d %d", relid1, relid2); /* Read the stats tuple from the first column */ stats1_tuple = SearchSysCache(STATRELATT, ObjectIdGetDatum(relid1), Int16GetDatum(var1->varattno), 0, 0); if ( ! stats1_tuple ) { POSTGIS_DEBUG(3, " No statistics, returning default geometry join selectivity"); PG_RETURN_FLOAT8(DEFAULT_GEOGRAPHY_SEL); } if ( ! get_attstatsslot(stats1_tuple, 0, 0, STATISTIC_KIND_GEOGRAPHY, InvalidOid, NULL, NULL, #if POSTGIS_PGSQL_VERSION >= 85 NULL, #endif (float4 **)gs1ptr, &geogstats1_nvalues) ) { POSTGIS_DEBUG(3, " STATISTIC_KIND_GEOGRAPHY stats not found - returning default geometry join selectivity"); ReleaseSysCache(stats1_tuple); PG_RETURN_FLOAT8(DEFAULT_GEOGRAPHY_SEL); } /* Read the stats tuple from the second column */ stats2_tuple = SearchSysCache(STATRELATT, ObjectIdGetDatum(relid2), Int16GetDatum(var2->varattno), 0, 0); if ( ! stats2_tuple ) { POSTGIS_DEBUG(3, " No statistics, returning default geometry join selectivity"); free_attstatsslot(0, NULL, 0, (float *)geogstats1, geogstats1_nvalues); ReleaseSysCache(stats1_tuple); PG_RETURN_FLOAT8(DEFAULT_GEOGRAPHY_SEL); } if ( ! get_attstatsslot(stats2_tuple, 0, 0, STATISTIC_KIND_GEOGRAPHY, InvalidOid, NULL, NULL, #if POSTGIS_PGSQL_VERSION >= 85 NULL, #endif (float4 **)gs2ptr, &geogstats2_nvalues) ) { POSTGIS_DEBUG(3, " STATISTIC_KIND_GEOGRAPHY stats not found - returning default geometry join selectivity"); free_attstatsslot(0, NULL, 0, (float *)geogstats1, geogstats1_nvalues); ReleaseSysCache(stats2_tuple); ReleaseSysCache(stats1_tuple); PG_RETURN_FLOAT8(DEFAULT_GEOGRAPHY_SEL); } /** * Setup the search box - this is the intersection of the two column * extents. */ search_box.xmin = Max(geogstats1->xmin, geogstats2->xmin); search_box.ymin = Max(geogstats1->ymin, geogstats2->ymin); search_box.zmin = Max(geogstats1->zmin, geogstats2->zmin); search_box.xmax = Min(geogstats1->xmax, geogstats2->xmax); search_box.ymax = Min(geogstats1->ymax, geogstats2->ymax); search_box.zmax = Min(geogstats1->zmax, geogstats2->zmax); /* If the extents of the two columns don't intersect, return zero */ if (search_box.xmin > search_box.xmax || search_box.ymin > search_box.ymax || search_box.zmin > search_box.zmax) PG_RETURN_FLOAT8(0.0); POSTGIS_DEBUGF(3, " -- geomstats1 box: %.15g %.15g %.15g, %.15g %.15g %.15g", geogstats1->xmin, geogstats1->ymin, geogstats1->zmin, geogstats1->xmax, geogstats1->ymax, geogstats1->zmax); POSTGIS_DEBUGF(3, " -- geomstats2 box: %.15g %.15g %.15g, %.15g %.15g %.15g", geogstats2->xmin, geogstats2->ymin, geogstats2->zmin, geogstats2->xmax, geogstats2->ymax, geogstats2->zmax); POSTGIS_DEBUGF(3, " -- calculated intersection box is : %.15g %.15g %.15g, %.15g %.15g %.15g", search_box.xmin, search_box.ymin, search_box.zmin, search_box.xmax, search_box.ymax, search_box.zmax); /* Do the selectivity */ selectivity1 = estimate_selectivity(&search_box, geogstats1); selectivity2 = estimate_selectivity(&search_box, geogstats2); POSTGIS_DEBUGF(3, "selectivity1: %.15g selectivity2: %.15g", selectivity1, selectivity2); /* * OK, so before we calculate the join selectivity we also need to * know the number of tuples in each of the columns since * estimate_selectivity returns the number of estimated tuples * divided by the total number of tuples. */ num1_tuples = geogstats1->totalrows; num2_tuples = geogstats2->totalrows; /* Free the statistic tuples */ free_attstatsslot(0, NULL, 0, (float *)geogstats1, geogstats1_nvalues); ReleaseSysCache(stats1_tuple); free_attstatsslot(0, NULL, 0, (float *)geogstats2, geogstats2_nvalues); ReleaseSysCache(stats2_tuple); /* * Finally calculate the estimate of the number of rows returned * * = 2 * (nrows from col1 + nrows from col2) / * total nrows in col1 x total nrows in col2 * * The factor of 2 accounts for the fact that for each tuple in * col 1 matching col 2, * there will be another match in col 2 matching col 1 */ total_tuples = num1_tuples * num2_tuples; rows_returned = 2 * ((num1_tuples * selectivity1) + (num2_tuples * selectivity2)); POSTGIS_DEBUGF(3, "Rows from rel1: %f", num1_tuples * selectivity1); POSTGIS_DEBUGF(3, "Rows from rel2: %f", num2_tuples * selectivity2); POSTGIS_DEBUGF(3, "Estimated rows returned: %f", rows_returned); /* * One (or both) tuple count is zero... * We return default selectivity estimate. * We could probably attempt at an estimate * w/out looking at tables tuple count, with * a function of selectivity1, selectivity2. */ if ( ! total_tuples ) { POSTGIS_DEBUG(3, "Total tuples == 0, returning default join selectivity"); PG_RETURN_FLOAT8(DEFAULT_GEOGRAPHY_SEL); } if ( rows_returned > total_tuples ) PG_RETURN_FLOAT8(1.0); PG_RETURN_FLOAT8(rows_returned / total_tuples); }
Datum hashint2(PG_FUNCTION_ARGS) { PG_RETURN_UINT32(~((uint32) PG_GETARG_INT16(0))); }
Datum svec_cast_int2(PG_FUNCTION_ARGS) { float8 value=(float8 )PG_GETARG_INT16(0); PG_RETURN_SVECTYPE_P(svec_make_scalar(value,1)); }
/** * Returns a histogram from an array of numbers. * by Paul A. Jungwirth */ Datum array_to_hist(PG_FUNCTION_ARGS) { // Our arguments: ArrayType *vals; pgnum bucketsStart; pgnum bucketsSize; int32 bucketsCount; // The array element type: Oid valsType; // The array element type widths for our input and output arrays: int16 valsTypeWidth; int16 histTypeWidth; // The array element type "is passed by value" flags (not really used): bool valsTypeByValue; bool histTypeByValue; // The array element type alignment codes (not really used): char valsTypeAlignmentCode; char histTypeAlignmentCode; // The array contents, as PostgreSQL "Datum" objects: Datum *valsContent; Datum *histContent; // List of "is null" flags for the array contents (not used): bool *valsNullFlags; // The size of the input array: int valsLength; // The output array: ArrayType* histArray; pgnum histMax; pgnum v; int i; if (PG_ARGISNULL(0) || PG_ARGISNULL(1) || PG_ARGISNULL(2) || PG_ARGISNULL(3)) { ereport(ERROR, (errmsg("Null arguments not accepted"))); } vals = PG_GETARG_ARRAYTYPE_P(0); if (ARR_NDIM(vals) > 1) { ereport(ERROR, (errmsg("One-dimesional arrays are required"))); } if (array_contains_nulls(vals)) { ereport(ERROR, (errmsg("Array contains null elements"))); } // Determine the array element types. valsType = ARR_ELEMTYPE(vals); if (valsType != INT2OID && valsType != INT4OID && valsType != INT8OID && valsType != FLOAT4OID && valsType != FLOAT8OID) { ereport(ERROR, (errmsg("Histogram subject must be SMALLINT, INTEGER, BIGINT, REAL, or DOUBLE PRECISION values"))); } valsLength = (ARR_DIMS(vals))[0]; switch (valsType) { case INT2OID: bucketsStart.i16 = PG_GETARG_INT16(1); bucketsSize.i16 = PG_GETARG_INT16(2); break; case INT4OID: bucketsStart.i32 = PG_GETARG_INT32(1); bucketsSize.i32 = PG_GETARG_INT32(2); break; case INT8OID: bucketsStart.i64 = PG_GETARG_INT64(1); bucketsSize.i64 = PG_GETARG_INT64(2); break; case FLOAT4OID: bucketsStart.f4 = PG_GETARG_FLOAT4(1); bucketsSize.f4 = PG_GETARG_FLOAT4(2); break; case FLOAT8OID: bucketsStart.f8 = PG_GETARG_FLOAT8(1); bucketsSize.f8 = PG_GETARG_FLOAT8(2); break; default: break; } bucketsCount = PG_GETARG_INT32(3); get_typlenbyvalalign(valsType, &valsTypeWidth, &valsTypeByValue, &valsTypeAlignmentCode); // Extract the array contents (as Datum objects). deconstruct_array(vals, valsType, valsTypeWidth, valsTypeByValue, valsTypeAlignmentCode, &valsContent, &valsNullFlags, &valsLength); // Create a new array of histogram bins (as Datum objects). // Memory we palloc is freed automatically at the end of the transaction. histContent = palloc0(sizeof(Datum) * bucketsCount); // Generate the histogram switch (valsType) { case INT2OID: histMax.i16 = bucketsStart.i16 + (bucketsSize.i16 * bucketsCount); for (i = 0; i < valsLength; i++) { v.i16 = DatumGetInt16(valsContent[i]); if (v.i16 >= bucketsStart.i16 && v.i16 <= histMax.i16) { int b = (v.i16 - bucketsStart.i16) / bucketsSize.i16; if (b >= 0 && b < bucketsCount) { histContent[b] = Int32GetDatum(DatumGetInt32(histContent[b]) + 1); } } } break; case INT4OID: histMax.i32 = bucketsStart.i32 + (bucketsSize.i32 * bucketsCount); for (i = 0; i < valsLength; i++) { v.i32 = DatumGetInt32(valsContent[i]); if (v.i32 >= bucketsStart.i32 && v.i32 <= histMax.i32) { int b = (v.i32 - bucketsStart.i32) / bucketsSize.i32; if (b >= 0 && b < bucketsCount) { histContent[b] = Int32GetDatum(DatumGetInt32(histContent[b]) + 1); } } } break; case INT8OID: histMax.i64 = bucketsStart.i64 + (bucketsSize.i64 * bucketsCount); for (i = 0; i < valsLength; i++) { v.i64 = DatumGetInt64(valsContent[i]); if (v.i64 >= bucketsStart.i64 && v.i64 <= histMax.i64) { int b = (v.i64 - bucketsStart.i64) / bucketsSize.i64; if (b >= 0 && b < bucketsCount) { histContent[b] = Int64GetDatum(DatumGetInt64(histContent[b]) + 1); } } } break; case FLOAT4OID: histMax.f4 = bucketsStart.f4 + (bucketsSize.f4 * bucketsCount); for (i = 0; i < valsLength; i++) { v.f4 = DatumGetFloat4(valsContent[i]); if (v.f4 >= bucketsStart.f4 && v.f4 <= histMax.f4) { int b = (v.f4 - bucketsStart.f4) / bucketsSize.f4; if (b >= 0 && b < bucketsCount) { histContent[b] = Int32GetDatum(DatumGetInt32(histContent[b]) + 1); } } } break; case FLOAT8OID: histMax.f8 = bucketsStart.f8 + (bucketsSize.f8 * bucketsCount); for (i = 0; i < valsLength; i++) { v.f8 = DatumGetFloat8(valsContent[i]); if (v.f8 >= bucketsStart.f8 && v.f8 <= histMax.f8) { int b = (v.f8 - bucketsStart.f8) / bucketsSize.f8; if (b >= 0 && b < bucketsCount) { histContent[b] = Int32GetDatum(DatumGetInt32(histContent[b]) + 1); } } } break; default: break; } // Wrap the buckets in a new PostgreSQL array object. get_typlenbyvalalign(INT4OID, &histTypeWidth, &histTypeByValue, &histTypeAlignmentCode); histArray = construct_array(histContent, bucketsCount, INT4OID, histTypeWidth, histTypeByValue, histTypeAlignmentCode); // Return the final PostgreSQL array object. PG_RETURN_ARRAYTYPE_P(histArray); }
Datum float8arr_cast_int2(PG_FUNCTION_ARGS) { float8 value=(float8 )PG_GETARG_INT16(0); PG_RETURN_ARRAYTYPE_P(svec_return_array_internal(svec_make_scalar(value,1))); }
Datum tuple_data_split(PG_FUNCTION_ARGS) { Oid relid; bytea *raw_data; uint16 t_infomask; uint16 t_infomask2; char *t_bits_str; bool do_detoast = false; bits8 *t_bits = NULL; Datum res; relid = PG_GETARG_OID(0); raw_data = PG_ARGISNULL(1) ? NULL : PG_GETARG_BYTEA_P(1); t_infomask = PG_GETARG_INT16(2); t_infomask2 = PG_GETARG_INT16(3); t_bits_str = PG_ARGISNULL(4) ? NULL : text_to_cstring(PG_GETARG_TEXT_PP(4)); if (PG_NARGS() >= 6) do_detoast = PG_GETARG_BOOL(5); if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to use raw page functions"))); if (!raw_data) PG_RETURN_NULL(); /* * Convert t_bits string back to the bits8 array as represented in the * tuple header. */ if (t_infomask & HEAP_HASNULL) { int bits_str_len; int bits_len; bits_len = BITMAPLEN(t_infomask2 & HEAP_NATTS_MASK) * BITS_PER_BYTE; if (!t_bits_str) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("argument of t_bits is null, but it is expected to be null and %d character long", bits_len))); bits_str_len = strlen(t_bits_str); if (bits_len != bits_str_len) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("unexpected length of t_bits %u, expected %d", bits_str_len, bits_len))); /* do the conversion */ t_bits = text_to_bits(t_bits_str, bits_str_len); } else { if (t_bits_str) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("t_bits string is expected to be NULL, but instead it is %zu bytes length", strlen(t_bits_str)))); } /* Split tuple data */ res = tuple_data_split_internal(relid, (char *) raw_data + VARHDRSZ, VARSIZE(raw_data) - VARHDRSZ, t_infomask, t_infomask2, t_bits, do_detoast); if (t_bits) pfree(t_bits); PG_RETURN_ARRAYTYPE_P(res); }
/* * linterp_abscissa * * Common code that checks arguments. The result is a floating point value * representing what fraction of the distance x lies along the interval from * x0 to x1. It can be negative or greater than one (extrapolation) though * this isn't the intended use. If x0 == x1, then the fraction is not * determined and the function returns 0 and sets *notnull false. In all * other cases (except error exits) *notnull is set to true. An additional * flag indicates whether the abscissa value is equal to the lower boundary * value. */ static float8 linterp_abscissa(PG_FUNCTION_ARGS, bool *p_eq_bounds, bool *p_eq_abscissas) { Oid x_type; Oid x0_type; Oid x1_type; Oid y0_type; Oid y1_type; float8 p = 0; bool eq_bounds = false; bool eq_abscissas = false; /* The abscissa (x) arguments are nominally declared anyelement. * All the type checking is up to us. We insist that the types * are exactly alike. Explicit casts may be needed. */ x_type = get_fn_expr_argtype(fcinfo->flinfo, 0); x0_type = get_fn_expr_argtype(fcinfo->flinfo, 1); x1_type = get_fn_expr_argtype(fcinfo->flinfo, 3); if (!OidIsValid(x_type)||!OidIsValid(x0_type)||!OidIsValid(x1_type)) { elog(ERROR, "could not determine argument data types"); } if ( x_type!=x0_type || x_type!=x1_type ) { elog(ERROR, "abscissa types unequal"); } /* The ordinate (y) arguments are specifically declared in the SQL * function declaration. Here we just check and insist they are * identical. */ y0_type = get_fn_expr_argtype(fcinfo->flinfo, 2); y1_type = get_fn_expr_argtype(fcinfo->flinfo, 4); if ( y0_type != y1_type ) { elog(ERROR, "mismatched ordinate types"); } switch (x_type) { case INT8OID: { float8 x = (float8)PG_GETARG_INT64(0); float8 x0 = (float8)PG_GETARG_INT64(1); float8 x1 = (float8)PG_GETARG_INT64(3); if ( x1 == x0 ) { eq_bounds = true; eq_abscissas = ( x == x0 ); } else p = (x-x0)/(x1-x0); } break; case INT4OID: { float8 x = (float8)PG_GETARG_INT32(0); float8 x0 = (float8)PG_GETARG_INT32(1); float8 x1 = (float8)PG_GETARG_INT32(3); if ( x1 == x0 ) { eq_bounds = true; eq_abscissas = ( x == x0 ); } else p = (x-x0)/(x1-x0); } break; case INT2OID: { float8 x = (float8)PG_GETARG_INT16(0); float8 x0 = (float8)PG_GETARG_INT16(1); float8 x1 = (float8)PG_GETARG_INT16(3); if ( x1 == x0 ) { eq_bounds = true; eq_abscissas = ( x == x0 ); } else p = (x-x0)/(x1-x0); } break; case FLOAT4OID: { float8 x = (float8)PG_GETARG_FLOAT4(0); float8 x0 = (float8)PG_GETARG_FLOAT4(1); float8 x1 = (float8)PG_GETARG_FLOAT4(3); if ( x1 == x0 ) { eq_bounds = true; eq_abscissas = ( x == x0 ); } else p = (x-x0)/(x1-x0); } break; case FLOAT8OID: { float8 x = PG_GETARG_FLOAT8(0); float8 x0 = PG_GETARG_FLOAT8(1); float8 x1 = PG_GETARG_FLOAT8(3); if ( x1 == x0 ) { eq_bounds = true; eq_abscissas = ( x == x0 ); } else p = (x-x0)/(x1-x0); } break; case DATEOID: { DateADT x = PG_GETARG_DATEADT(0); DateADT x0 = PG_GETARG_DATEADT(1); DateADT x1 = PG_GETARG_DATEADT(3); int32 x_x0 = date_diff(x, x0); int32 x1_x0 = date_diff(x1, x0); if ( x1 == x0 ) { eq_bounds = true; eq_abscissas = ( x_x0 == 0 ); } else p = ((float8)x_x0)/((float8)x1_x0); } break; case TIMEOID: { TimeADT x = PG_GETARG_TIMEADT(0); TimeADT x0 = PG_GETARG_TIMEADT(1); TimeADT x1 = PG_GETARG_TIMEADT(3); p = time_li_fraction(x, x0, x1, &eq_bounds, &eq_abscissas); } break; case TIMESTAMPOID: { Timestamp x = PG_GETARG_TIMESTAMP(0); Timestamp x0 = PG_GETARG_TIMESTAMP(1); Timestamp x1 = PG_GETARG_TIMESTAMP(3); p = timestamp_li_fraction(x, x0, x1, &eq_bounds, &eq_abscissas); } break; case TIMESTAMPTZOID: { TimestampTz x = PG_GETARG_TIMESTAMPTZ(0); TimestampTz x0 = PG_GETARG_TIMESTAMPTZ(1); TimestampTz x1 = PG_GETARG_TIMESTAMPTZ(3); p = timestamptz_li_fraction(x, x0, x1, &eq_bounds, &eq_abscissas); } break; case INTERVALOID: { Interval * x = PG_GETARG_INTERVAL_P(0); Interval * x0 = PG_GETARG_INTERVAL_P(1); Interval * x1 = PG_GETARG_INTERVAL_P(3); p = interval_li_fraction(x, x0, x1, &eq_bounds, &eq_abscissas); } break; case NUMERICOID: { Numeric x = PG_GETARG_NUMERIC(0); Numeric x0 = PG_GETARG_NUMERIC(1); Numeric x1 = PG_GETARG_NUMERIC(3); p = numeric_li_fraction(x, x0, x1, &eq_bounds, &eq_abscissas); } break; default: elog(ERROR, "abscissa type not supported"); } if ( p_eq_bounds ) *p_eq_bounds = eq_bounds; if ( p_eq_abscissas ) *p_eq_abscissas = eq_abscissas; return p; }