struct PlaceSpecification * getPlaceSpecificationFromDatabase(long long placeid) { const char * query = build_placeSpecQuery(placeid); if ( SPI_execute(query, true, 1) != SPI_OK_SELECT ) ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), errmsg( "Error when performing placeid query"))); if ( SPI_processed < 1 ) ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), errmsg( "unable to find placespecification"))); else if ( SPI_processed > 1 ) ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), errmsg( "too many placespecifications returned!"))); HeapTuple placeRow = * SPI_tuptable->vals; struct PlaceSpecification * ret = (struct PlaceSpecification *) malloc(sizeof(struct PlaceSpecification)); ret->startX_ = DatumGetFloat4( SPI_getbinval(placeRow, SPI_tuptable->tupdesc, 1, NULL) ); ret->startY_ = DatumGetFloat4( SPI_getbinval(placeRow, SPI_tuptable->tupdesc, 2, NULL) ); ret->xNumber_ = DatumGetInt32( SPI_getbinval(placeRow, SPI_tuptable->tupdesc, 3, NULL) ); ret->yNumber_ = DatumGetInt32( SPI_getbinval(placeRow, SPI_tuptable->tupdesc, 4, NULL) ); ret->xIncrement_ = DatumGetFloat4( SPI_getbinval(placeRow, SPI_tuptable->tupdesc, 5, NULL) ); ret->yIncrement_ = DatumGetFloat4( SPI_getbinval(placeRow, SPI_tuptable->tupdesc, 6, NULL) ); ret->srid_ = DatumGetInt32( SPI_getbinval(placeRow, SPI_tuptable->tupdesc, 7, NULL) ); char * projDef = SPI_getvalue(placeRow, SPI_tuptable->tupdesc, 8); ret->projDefinition_ = strdup(projDef); pfree(projDef); return ret; }
double pgr_SPI_getFloat8(HeapTuple *tuple, TupleDesc *tupdesc, Column_info_t info) { Datum binval; bool isnull; double value = 0.0; binval = SPI_getbinval(*tuple, *tupdesc, info.colNumber, &isnull); if (isnull) elog(ERROR, "Unexpected Null value in column %s", info.name); switch (info.type) { case INT2OID: value = (double) DatumGetInt16(binval); break; case INT4OID: value = (double) DatumGetInt32(binval); break; case INT8OID: value = (double) DatumGetInt64(binval); break; case FLOAT4OID: value = (double) DatumGetFloat4(binval); break; case FLOAT8OID: value = DatumGetFloat8(binval); break; default: elog(ERROR, "Unexpected Column type of %s. Expected ANY-NUMERICAL", info.name); } PGR_DBG("Variable: %s Value: %lf", info.name, value); return value; }
/* * Examine parameters and prepare for a sample scan. */ static void system_beginsamplescan(SampleScanState *node, Datum *params, int nparams, uint32 seed) { SystemSamplerData *sampler = (SystemSamplerData *) node->tsm_state; double percent = DatumGetFloat4(params[0]); double dcutoff; if (percent < 0 || percent > 100 || isnan(percent)) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLESAMPLE_ARGUMENT), errmsg("sample percentage must be between 0 and 100"))); /* * The cutoff is sample probability times (PG_UINT32_MAX + 1); we have to * store that as a uint64, of course. Note that this gives strictly * correct behavior at the limits of zero or one probability. */ dcutoff = rint(((double) PG_UINT32_MAX + 1) * percent / 100); sampler->cutoff = (uint64) dcutoff; sampler->seed = seed; sampler->nextblock = 0; sampler->lt = InvalidOffsetNumber; /* * Bulkread buffer access strategy probably makes sense unless we're * scanning a very small fraction of the table. The 1% cutoff here is a * guess. We should use pagemode visibility checking, since we scan all * tuples on each selected page. */ node->use_bulkread = (percent >= 1); node->use_pagemode = true; }
/* * Examine parameters and prepare for a sample scan. */ static void bernoulli_beginsamplescan(SampleScanState *node, Datum *params, int nparams, uint32 seed) { BernoulliSamplerData *sampler = (BernoulliSamplerData *) node->tsm_state; double percent = DatumGetFloat4(params[0]); double dcutoff; if (percent < 0 || percent > 100 || isnan(percent)) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLESAMPLE_ARGUMENT), errmsg("sample percentage must be between 0 and 100"))); /* * The cutoff is sample probability times (PG_UINT32_MAX + 1); we have to * store that as a uint64, of course. Note that this gives strictly * correct behavior at the limits of zero or one probability. */ dcutoff = rint(((double) PG_UINT32_MAX + 1) * percent / 100); sampler->cutoff = (uint64) dcutoff; sampler->seed = seed; sampler->lt = InvalidOffsetNumber; /* * Use bulkread, since we're scanning all pages. But pagemode visibility * checking is a win only at larger sampling fractions. The 25% cutoff * here is based on very limited experimentation. */ node->use_bulkread = true; node->use_pagemode = (percent >= 25); }
static float seg_atof(char *value) { Datum datum; datum = DirectFunctionCall1(float4in, CStringGetDatum(value)); return DatumGetFloat4(datum); }
Datum array_quantile(PG_FUNCTION_ARGS) { // The formal PostgreSQL array object ArrayType *array; // The array element type Oid arrayElementType; // The array element type width int16 arrayElementTypeWidth; // The array element type "is passed by value" flags (not used, should always be true) bool arrayElementTypeByValue; // The array element type alignment codes (not used) char arrayElementTypeAlignmentCode; // The array contents, as PostgreSQL "datum" objects Datum *arrayContent; // List of "is null" flags for the array contents bool *arrayNullFlags; // The size of each array int arrayLength; int i,j, nelem; double median, mad, f, quantile; double *inarray; if (PG_ARGISNULL(0) || PG_ARGISNULL(1)) ereport(ERROR, (errmsg("Null arrays not accepted"))); // Get array and quantile from input array = PG_GETARG_ARRAYTYPE_P(0); f = PG_GETARG_FLOAT8(1); if (ARR_NDIM(array) != 1) ereport(ERROR, (errmsg("One-dimesional arrays are required"))); if (array_contains_nulls(array)) ereport(ERROR, (errmsg("Array contains null elements"))); arrayLength = (ARR_DIMS(array))[0]; arrayElementType = ARR_ELEMTYPE(array); get_typlenbyvalalign(arrayElementType, &arrayElementTypeWidth, &arrayElementTypeByValue, &arrayElementTypeAlignmentCode); deconstruct_array(array, arrayElementType, arrayElementTypeWidth, arrayElementTypeByValue, arrayElementTypeAlignmentCode, &arrayContent, &arrayNullFlags, &arrayLength); inarray = (double*)malloc(arrayLength*sizeof(double)); for (i=0; i<arrayLength; i++) { inarray[i] = DatumGetFloat4(arrayContent[i]); } gsl_sort (inarray, 1, arrayLength); quantile = gsl_stats_quantile_from_sorted_data(inarray, 1, arrayLength, f); PG_RETURN_FLOAT8(quantile); }
Datum similarity_op(PG_FUNCTION_ARGS) { float4 res = DatumGetFloat4(DirectFunctionCall2(similarity, PG_GETARG_DATUM(0), PG_GETARG_DATUM(1))); PG_RETURN_BOOL(res >= trgm_limit); }
Datum similarity_dist(PG_FUNCTION_ARGS) { float4 res = DatumGetFloat4(DirectFunctionCall2(similarity, PG_GETARG_DATUM(0), PG_GETARG_DATUM(1))); PG_RETURN_FLOAT4(1.0 - res); }
/** * Convert postgres Datum into a ConcreteValue object. */ AbstractValueSPtr AbstractPGValue::DatumToValue(bool inMemoryIsWritable, Oid inTypeID, Datum inDatum) const { // First check if datum is rowtype if (type_is_rowtype(inTypeID)) { HeapTupleHeader pgTuple = DatumGetHeapTupleHeader(inDatum); return AbstractValueSPtr(new PGValue<HeapTupleHeader>(pgTuple)); } else if (type_is_array(inTypeID)) { ArrayType *pgArray = DatumGetArrayTypeP(inDatum); if (ARR_NDIM(pgArray) != 1) throw std::invalid_argument("Multidimensional arrays not yet supported"); if (ARR_HASNULL(pgArray)) throw std::invalid_argument("Arrays with NULLs not yet supported"); switch (ARR_ELEMTYPE(pgArray)) { case FLOAT8OID: { MemHandleSPtr memoryHandle(new PGArrayHandle(pgArray)); if (inMemoryIsWritable) { return AbstractValueSPtr( new ConcreteValue<Array<double> >( Array<double>(memoryHandle, boost::extents[ ARR_DIMS(pgArray)[0] ]) ) ); } else { return AbstractValueSPtr( new ConcreteValue<Array_const<double> >( Array_const<double>(memoryHandle, boost::extents[ ARR_DIMS(pgArray)[0] ]) ) ); } } } } switch (inTypeID) { case BOOLOID: return AbstractValueSPtr( new ConcreteValue<bool>( DatumGetBool(inDatum) )); case INT2OID: return AbstractValueSPtr( new ConcreteValue<int16_t>( DatumGetInt16(inDatum) )); case INT4OID: return AbstractValueSPtr( new ConcreteValue<int32_t>( DatumGetInt32(inDatum) )); case INT8OID: return AbstractValueSPtr( new ConcreteValue<int64_t>( DatumGetInt64(inDatum) )); case FLOAT4OID: return AbstractValueSPtr( new ConcreteValue<float>( DatumGetFloat4(inDatum) )); case FLOAT8OID: return AbstractValueSPtr( new ConcreteValue<double>( DatumGetFloat8(inDatum) )); } return AbstractValueSPtr(); }
Datum cosine_op(PG_FUNCTION_ARGS) { float4 res; /* * store *_is_normalized value temporarily 'cause * threshold (we're comparing against) is normalized */ bool tmp = pgs_cosine_is_normalized; pgs_cosine_is_normalized = true; res = DatumGetFloat4(DirectFunctionCall2( cosine, PG_GETARG_DATUM(0), PG_GETARG_DATUM(1))); /* we're done; back to the previous value */ pgs_cosine_is_normalized = tmp; PG_RETURN_BOOL(res >= pgs_cosine_threshold); }
/* * Sample size estimation. */ static void system_samplescangetsamplesize(PlannerInfo *root, RelOptInfo *baserel, List *paramexprs, BlockNumber *pages, double *tuples) { Node *pctnode; float4 samplefract; /* Try to extract an estimate for the sample percentage */ pctnode = (Node *) linitial(paramexprs); pctnode = estimate_expression_value(root, pctnode); if (IsA(pctnode, Const) && !((Const *) pctnode)->constisnull) { samplefract = DatumGetFloat4(((Const *) pctnode)->constvalue); if (samplefract >= 0 && samplefract <= 100 && !isnan(samplefract)) samplefract /= 100.0f; else { /* Default samplefract if the value is bogus */ samplefract = 0.1f; } } else { /* Default samplefract if we didn't obtain a non-null Const */ samplefract = 0.1f; } /* We'll visit a sample of the pages ... */ *pages = clamp_row_est(baserel->pages * samplefract); /* ... and hopefully get a representative number of tuples from them */ *tuples = clamp_row_est(baserel->tuples * samplefract); }
/* * Costing function. */ Datum tsm_bernoulli_cost(PG_FUNCTION_ARGS) { PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0); Path *path = (Path *) PG_GETARG_POINTER(1); RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2); List *args = (List *) PG_GETARG_POINTER(3); BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4); double *tuples = (double *) PG_GETARG_POINTER(5); Node *pctnode; float4 samplesize; *pages = baserel->pages; pctnode = linitial(args); pctnode = estimate_expression_value(root, pctnode); if (IsA(pctnode, RelabelType)) pctnode = (Node *) ((RelabelType *) pctnode)->arg; if (IsA(pctnode, Const)) { samplesize = DatumGetFloat4(((Const *) pctnode)->constvalue); samplesize /= 100.0; } else { /* Default samplesize if the estimation didn't return Const. */ samplesize = 0.1f; } *tuples = path->rows * samplesize; path->rows = *tuples; PG_RETURN_VOID(); }
/** * Returns a histogram from an array of numbers. * by Paul A. Jungwirth */ Datum array_to_hist(PG_FUNCTION_ARGS) { // Our arguments: ArrayType *vals; pgnum bucketsStart; pgnum bucketsSize; int32 bucketsCount; // The array element type: Oid valsType; // The array element type widths for our input and output arrays: int16 valsTypeWidth; int16 histTypeWidth; // The array element type "is passed by value" flags (not really used): bool valsTypeByValue; bool histTypeByValue; // The array element type alignment codes (not really used): char valsTypeAlignmentCode; char histTypeAlignmentCode; // The array contents, as PostgreSQL "Datum" objects: Datum *valsContent; Datum *histContent; // List of "is null" flags for the array contents (not used): bool *valsNullFlags; // The size of the input array: int valsLength; // The output array: ArrayType* histArray; pgnum histMax; pgnum v; int i; if (PG_ARGISNULL(0) || PG_ARGISNULL(1) || PG_ARGISNULL(2) || PG_ARGISNULL(3)) { ereport(ERROR, (errmsg("Null arguments not accepted"))); } vals = PG_GETARG_ARRAYTYPE_P(0); if (ARR_NDIM(vals) > 1) { ereport(ERROR, (errmsg("One-dimesional arrays are required"))); } if (array_contains_nulls(vals)) { ereport(ERROR, (errmsg("Array contains null elements"))); } // Determine the array element types. valsType = ARR_ELEMTYPE(vals); if (valsType != INT2OID && valsType != INT4OID && valsType != INT8OID && valsType != FLOAT4OID && valsType != FLOAT8OID) { ereport(ERROR, (errmsg("Histogram subject must be SMALLINT, INTEGER, BIGINT, REAL, or DOUBLE PRECISION values"))); } valsLength = (ARR_DIMS(vals))[0]; switch (valsType) { case INT2OID: bucketsStart.i16 = PG_GETARG_INT16(1); bucketsSize.i16 = PG_GETARG_INT16(2); break; case INT4OID: bucketsStart.i32 = PG_GETARG_INT32(1); bucketsSize.i32 = PG_GETARG_INT32(2); break; case INT8OID: bucketsStart.i64 = PG_GETARG_INT64(1); bucketsSize.i64 = PG_GETARG_INT64(2); break; case FLOAT4OID: bucketsStart.f4 = PG_GETARG_FLOAT4(1); bucketsSize.f4 = PG_GETARG_FLOAT4(2); break; case FLOAT8OID: bucketsStart.f8 = PG_GETARG_FLOAT8(1); bucketsSize.f8 = PG_GETARG_FLOAT8(2); break; default: break; } bucketsCount = PG_GETARG_INT32(3); get_typlenbyvalalign(valsType, &valsTypeWidth, &valsTypeByValue, &valsTypeAlignmentCode); // Extract the array contents (as Datum objects). deconstruct_array(vals, valsType, valsTypeWidth, valsTypeByValue, valsTypeAlignmentCode, &valsContent, &valsNullFlags, &valsLength); // Create a new array of histogram bins (as Datum objects). // Memory we palloc is freed automatically at the end of the transaction. histContent = palloc0(sizeof(Datum) * bucketsCount); // Generate the histogram switch (valsType) { case INT2OID: histMax.i16 = bucketsStart.i16 + (bucketsSize.i16 * bucketsCount); for (i = 0; i < valsLength; i++) { v.i16 = DatumGetInt16(valsContent[i]); if (v.i16 >= bucketsStart.i16 && v.i16 <= histMax.i16) { int b = (v.i16 - bucketsStart.i16) / bucketsSize.i16; if (b >= 0 && b < bucketsCount) { histContent[b] = Int32GetDatum(DatumGetInt32(histContent[b]) + 1); } } } break; case INT4OID: histMax.i32 = bucketsStart.i32 + (bucketsSize.i32 * bucketsCount); for (i = 0; i < valsLength; i++) { v.i32 = DatumGetInt32(valsContent[i]); if (v.i32 >= bucketsStart.i32 && v.i32 <= histMax.i32) { int b = (v.i32 - bucketsStart.i32) / bucketsSize.i32; if (b >= 0 && b < bucketsCount) { histContent[b] = Int32GetDatum(DatumGetInt32(histContent[b]) + 1); } } } break; case INT8OID: histMax.i64 = bucketsStart.i64 + (bucketsSize.i64 * bucketsCount); for (i = 0; i < valsLength; i++) { v.i64 = DatumGetInt64(valsContent[i]); if (v.i64 >= bucketsStart.i64 && v.i64 <= histMax.i64) { int b = (v.i64 - bucketsStart.i64) / bucketsSize.i64; if (b >= 0 && b < bucketsCount) { histContent[b] = Int64GetDatum(DatumGetInt64(histContent[b]) + 1); } } } break; case FLOAT4OID: histMax.f4 = bucketsStart.f4 + (bucketsSize.f4 * bucketsCount); for (i = 0; i < valsLength; i++) { v.f4 = DatumGetFloat4(valsContent[i]); if (v.f4 >= bucketsStart.f4 && v.f4 <= histMax.f4) { int b = (v.f4 - bucketsStart.f4) / bucketsSize.f4; if (b >= 0 && b < bucketsCount) { histContent[b] = Int32GetDatum(DatumGetInt32(histContent[b]) + 1); } } } break; case FLOAT8OID: histMax.f8 = bucketsStart.f8 + (bucketsSize.f8 * bucketsCount); for (i = 0; i < valsLength; i++) { v.f8 = DatumGetFloat8(valsContent[i]); if (v.f8 >= bucketsStart.f8 && v.f8 <= histMax.f8) { int b = (v.f8 - bucketsStart.f8) / bucketsSize.f8; if (b >= 0 && b < bucketsCount) { histContent[b] = Int32GetDatum(DatumGetInt32(histContent[b]) + 1); } } } break; default: break; } // Wrap the buckets in a new PostgreSQL array object. get_typlenbyvalalign(INT4OID, &histTypeWidth, &histTypeByValue, &histTypeAlignmentCode); histArray = construct_array(histContent, bucketsCount, INT4OID, histTypeWidth, histTypeByValue, histTypeAlignmentCode); // Return the final PostgreSQL array object. PG_RETURN_ARRAYTYPE_P(histArray); }
GISTENTRY * gbt_num_compress(GISTENTRY *retval, GISTENTRY *entry, const gbtree_ninfo *tinfo) { if (entry->leafkey) { union { int16 i2; int32 i4; int64 i8; float4 f4; float8 f8; DateADT dt; TimeADT tm; Timestamp ts; Cash ch; } v; GBT_NUMKEY *r = (GBT_NUMKEY *) palloc0(2 * tinfo->size); void *leaf = NULL; switch (tinfo->t) { case gbt_t_int2: v.i2 = DatumGetInt16(entry->key); leaf = &v.i2; break; case gbt_t_int4: v.i4 = DatumGetInt32(entry->key); leaf = &v.i4; break; case gbt_t_int8: v.i8 = DatumGetInt64(entry->key); leaf = &v.i8; break; case gbt_t_oid: v.i4 = DatumGetObjectId(entry->key); leaf = &v.i4; break; case gbt_t_float4: v.f4 = DatumGetFloat4(entry->key); leaf = &v.f4; break; case gbt_t_float8: v.f8 = DatumGetFloat8(entry->key); leaf = &v.f8; break; case gbt_t_date: v.dt = DatumGetDateADT(entry->key); leaf = &v.dt; break; case gbt_t_time: v.tm = DatumGetTimeADT(entry->key); leaf = &v.tm; break; case gbt_t_ts: v.ts = DatumGetTimestamp(entry->key); leaf = &v.ts; break; case gbt_t_cash: v.ch = DatumGetCash(entry->key); leaf = &v.ch; break; default: leaf = DatumGetPointer(entry->key); } memcpy((void *) &r[0], leaf, tinfo->size); memcpy((void *) &r[tinfo->size], leaf, tinfo->size); retval = palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(r), entry->rel, entry->page, entry->offset, FALSE); } else retval = entry; return retval; }
void datum_to_bson(const char* field_name, mongo::BSONObjBuilder& builder, Datum val, bool is_null, Oid typid) { PGBSON_LOG << "BEGIN datum_to_bson, field_name=" << field_name << ", typeid=" << typid << PGBSON_ENDL; if (field_name == NULL) { field_name = ""; } if (is_null) { builder.appendNull(field_name); } else { switch(typid) { case BOOLOID: builder.append(field_name, DatumGetBool(val)); break; case CHAROID: { char c = DatumGetChar(val); builder.append(field_name, &c, 1); break; } case INT8OID: builder.append(field_name, (long long)DatumGetInt64(val)); break; case INT2OID: builder.append(field_name, DatumGetInt16(val)); break; case INT4OID: builder.append(field_name, DatumGetInt32(val)); break; case TEXTOID: case JSONOID: case XMLOID: { text* t = DatumGetTextP(val); builder.append(field_name, VARDATA(t), VARSIZE(t)-VARHDRSZ+1); break; } case FLOAT4OID: builder.append(field_name, DatumGetFloat4(val)); break; case FLOAT8OID: builder.append(field_name, DatumGetFloat8(val)); break; case RECORDOID: { mongo::BSONObjBuilder sub(builder.subobjStart(field_name)); composite_to_bson(sub, val); sub.done(); break; } case TIMESTAMPOID: { Timestamp ts = DatumGetTimestamp(val); #ifdef HAVE_INT64_TIMESTAMP mongo::Date_t date(ts); #else mongo::Date_t date(ts * 1000); #endif builder.append(field_name, date); break; } default: { PGBSON_LOG << "datum_to_bson - unknown type, using text output." << PGBSON_ENDL; PGBSON_LOG << "datum_to_bson - type=" << get_typename(typid) << PGBSON_ENDL; if (get_typename(typid) == "bson") { bytea* data = DatumGetBson(val); mongo::BSONObj obj(VARDATA_ANY(data)); builder.append(field_name, obj); } else { // use text output for the type bool typisvarlena = false; Oid typoutput; getTypeOutputInfo(typid, &typoutput, &typisvarlena); PGBSON_LOG << "datum_to_bson - typisvarlena=" << std::boolalpha << typisvarlena << PGBSON_ENDL; Datum out_val = val; /* * If we have a toasted datum, forcibly detoast it here to avoid * memory leakage inside the type's output routine. */ if (typisvarlena) { out_val = PointerGetDatum(PG_DETOAST_DATUM(val)); PGBSON_LOG << "datum_to_bson - var len valuie detoasted" << PGBSON_ENDL; } char* outstr = OidOutputFunctionCall(typoutput, out_val); builder.append(field_name, outstr); /* Clean up detoasted copy, if any */ if (val != out_val) pfree(DatumGetPointer(out_val)); } } } // switch } // if not null PGBSON_LOG << "END datum_to_bson, field_name=" << field_name << PGBSON_ENDL; }
/** * Returns a mean from an array of numbers. * by Paul A. Jungwirth */ Datum array_to_mean(PG_FUNCTION_ARGS) { // Our arguments: ArrayType *vals; // The array element type: Oid valsType; // The array element type widths for our input array: int16 valsTypeWidth; // The array element type "is passed by value" flags (not really used): bool valsTypeByValue; // The array element type alignment codes (not really used): char valsTypeAlignmentCode; // The array contents, as PostgreSQL "Datum" objects: Datum *valsContent; // List of "is null" flags for the array contents (not used): bool *valsNullFlags; // The size of the input array: int valsLength; float8 v = 0; int i; if (PG_ARGISNULL(0)) { ereport(ERROR, (errmsg("Null arrays not accepted"))); } vals = PG_GETARG_ARRAYTYPE_P(0); if (ARR_NDIM(vals) == 0) { PG_RETURN_NULL(); } if (ARR_NDIM(vals) > 1) { ereport(ERROR, (errmsg("One-dimesional arrays are required"))); } if (array_contains_nulls(vals)) { ereport(ERROR, (errmsg("Array contains null elements"))); } // Determine the array element types. valsType = ARR_ELEMTYPE(vals); if (valsType != INT2OID && valsType != INT4OID && valsType != INT8OID && valsType != FLOAT4OID && valsType != FLOAT8OID) { ereport(ERROR, (errmsg("Mean subject must be SMALLINT, INTEGER, BIGINT, REAL, or DOUBLE PRECISION values"))); } valsLength = (ARR_DIMS(vals))[0]; if (valsLength == 0) PG_RETURN_NULL(); get_typlenbyvalalign(valsType, &valsTypeWidth, &valsTypeByValue, &valsTypeAlignmentCode); // Extract the array contents (as Datum objects). deconstruct_array(vals, valsType, valsTypeWidth, valsTypeByValue, valsTypeAlignmentCode, &valsContent, &valsNullFlags, &valsLength); // Iterate through the contents and sum things up, // then return the mean: // Watch out for overflow: // http://stackoverflow.com/questions/1930454/what-is-a-good-solution-for-calculating-an-average-where-the-sum-of-all-values-e/1934266#1934266 switch (valsType) { case INT2OID: for (i = 0; i < valsLength; i++) { v += (DatumGetInt16(valsContent[i]) - v) / (i + 1); } break; case INT4OID: for (i = 0; i < valsLength; i++) { v += (DatumGetInt32(valsContent[i]) - v) / (i + 1); } break; case INT8OID: for (i = 0; i < valsLength; i++) { v += (DatumGetInt64(valsContent[i]) - v) / (i + 1); } break; case FLOAT4OID: for (i = 0; i < valsLength; i++) { v += (DatumGetFloat4(valsContent[i]) - v) / (i + 1); } break; case FLOAT8OID: for (i = 0; i < valsLength; i++) { v += (DatumGetFloat8(valsContent[i]) - v) / (i + 1); } break; default: ereport(ERROR, (errmsg("Mean subject must be SMALLINT, INTEGER, BIGINT, REAL, or DOUBLE PRECISION values"))); break; } PG_RETURN_FLOAT8(v); }
/** * @brief Convert postgres Datum into a ConcreteValue object. */ AbstractValueSPtr PGAbstractValue::DatumToValue(bool inMemoryIsWritable, Oid inTypeID, Datum inDatum) const { bool isTuple; bool isArray; HeapTupleHeader pgTuple; ArrayType *pgArray; bool errorOccurred = false; PG_TRY(); { isTuple = type_is_rowtype(inTypeID); isArray = type_is_array(inTypeID); if (isTuple) pgTuple = DatumGetHeapTupleHeader(inDatum); else if (isArray) pgArray = DatumGetArrayTypeP(inDatum); } PG_CATCH(); { errorOccurred = true; } PG_END_TRY(); BOOST_ASSERT_MSG(errorOccurred == false, "An exception occurred while " "converting a PostgreSQL datum to DBAL object."); // First check if datum is rowtype if (isTuple) { return AbstractValueSPtr(new PGValue<HeapTupleHeader>(pgTuple)); } else if (isArray) { if (ARR_NDIM(pgArray) != 1) throw std::invalid_argument("Multidimensional arrays not yet supported"); if (ARR_HASNULL(pgArray)) throw std::invalid_argument("Arrays with NULLs not yet supported"); switch (ARR_ELEMTYPE(pgArray)) { case FLOAT8OID: { MemHandleSPtr memoryHandle(new PGArrayHandle(pgArray)); if (inMemoryIsWritable) { return AbstractValueSPtr( new ConcreteValue<Array<double> >( Array<double>(memoryHandle, boost::extents[ ARR_DIMS(pgArray)[0] ]) ) ); } else { return AbstractValueSPtr( new ConcreteValue<Array_const<double> >( Array_const<double>(memoryHandle, boost::extents[ ARR_DIMS(pgArray)[0] ]) ) ); } } // FIXME: Default case } } switch (inTypeID) { case BOOLOID: return AbstractValueSPtr( new ConcreteValue<bool>( DatumGetBool(inDatum) )); case INT2OID: return AbstractValueSPtr( new ConcreteValue<int16_t>( DatumGetInt16(inDatum) )); case INT4OID: return AbstractValueSPtr( new ConcreteValue<int32_t>( DatumGetInt32(inDatum) )); case INT8OID: return AbstractValueSPtr( new ConcreteValue<int64_t>( DatumGetInt64(inDatum) )); case FLOAT4OID: return AbstractValueSPtr( new ConcreteValue<float>( DatumGetFloat4(inDatum) )); case FLOAT8OID: return AbstractValueSPtr( new ConcreteValue<double>( DatumGetFloat8(inDatum) )); } return AbstractValueSPtr(); }
/* * GetStreamScanPlan */ ForeignScan * GetStreamScanPlan(PlannerInfo *root, RelOptInfo *baserel, Oid relid, ForeignPath *best_path, List *tlist, List *scan_clauses, Plan *outer_plan) { StreamFdwInfo *sinfo = (StreamFdwInfo *) baserel->fdw_private; List *physical_tlist = build_physical_tlist(root, baserel); RangeTblEntry *rte = NULL; int i; TableSampleClause *sample; Value *sample_cutoff = NULL; /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */ scan_clauses = extract_actual_clauses(scan_clauses, false); for (i = 1; i <= root->simple_rel_array_size; i++) { rte = root->simple_rte_array[i]; if (rte && rte->relid == relid) break; } if (!rte || rte->relid != relid) elog(ERROR, "stream RTE missing"); sample = rte->tablesample; if (sample) { double dcutoff; Datum d; ExprContext *econtext; bool isnull; Node *node; Expr *expr; ExprState *estate; ParseState *ps = make_parsestate(NULL); float4 percent; if (sample->tsmhandler != BERNOULLI_OID) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("tablesample method %s is not supported by streams", get_func_name(sample->tsmhandler)), errhint("Only bernoulli tablesample method can be used with streams."))); if (sample->repeatable) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("streams don't support the REPEATABLE clause for tablesample"))); econtext = CreateStandaloneExprContext(); ps = make_parsestate(NULL); node = (Node *) linitial(sample->args); node = transformExpr(ps, node, EXPR_KIND_OTHER); expr = expression_planner((Expr *) node); estate = ExecInitExpr(expr, NULL); d = ExecEvalExpr(estate, econtext, &isnull, NULL); free_parsestate(ps); FreeExprContext(econtext, false); percent = DatumGetFloat4(d); if (percent < 0 || percent > 100 || isnan(percent)) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLESAMPLE_ARGUMENT), errmsg("sample percentage must be between 0 and 100"))); dcutoff = rint(((double) RAND_MAX + 1) * percent / 100); sample_cutoff = makeInteger((int) dcutoff); } return make_foreignscan(tlist, scan_clauses, baserel->relid, NIL, list_make3(sinfo->colnames, physical_tlist, sample_cutoff), NIL, NIL, outer_plan); }
/* * Add an attribute to the hash calculation. * **IMPORTANT: any new hard coded support for a data type in here * must be added to isGreenplumDbHashable() below! * * Note that the caller should provide the base type if the datum is * of a domain type. It is quite expensive to call get_typtype() and * getBaseType() here since this function gets called a lot for the * same set of Datums. * * @param hashFn called to update the hash value. * @param clientData passed to hashFn. */ void hashDatum(Datum datum, Oid type, datumHashFunction hashFn, void *clientData) { void *buf = NULL; /* pointer to the data */ size_t len = 0; /* length for the data buffer */ int64 intbuf; /* an 8 byte buffer for all integer sizes */ float4 buf_f4; float8 buf_f8; Timestamp tsbuf; /* timestamp data dype is either a double or * int8 (determined in compile time) */ TimestampTz tstzbuf; DateADT datebuf; TimeADT timebuf; TimeTzADT *timetzptr; Interval *intervalptr; AbsoluteTime abstime_buf; RelativeTime reltime_buf; TimeInterval tinterval; AbsoluteTime tinterval_len; Numeric num; bool bool_buf; char char_buf; Name namebuf; ArrayType *arrbuf; inet *inetptr; /* inet/cidr */ unsigned char inet_hkey[sizeof(inet_struct)]; macaddr *macptr; /* MAC address */ VarBit *vbitptr; int2vector *i2vec_buf; oidvector *oidvec_buf; Cash cash_buf; AclItem *aclitem_ptr; uint32 aclitem_buf; /* * special case buffers */ uint32 nanbuf; uint32 invalidbuf; void *tofree = NULL; /* * Select the hash to be performed according to the field type we are adding to the * hash. */ switch (type) { /* * ======= NUMERIC TYPES ======== */ case INT2OID: /* -32 thousand to 32 thousand, 2-byte storage */ intbuf = (int64) DatumGetInt16(datum); /* cast to 8 byte before * hashing */ buf = &intbuf; len = sizeof(intbuf); break; case INT4OID: /* -2 billion to 2 billion integer, 4-byte * storage */ intbuf = (int64) DatumGetInt32(datum); /* cast to 8 byte before * hashing */ buf = &intbuf; len = sizeof(intbuf); break; case INT8OID: /* ~18 digit integer, 8-byte storage */ intbuf = DatumGetInt64(datum); /* cast to 8 byte before * hashing */ buf = &intbuf; len = sizeof(intbuf); break; case FLOAT4OID: /* single-precision floating point number, * 4-byte storage */ buf_f4 = DatumGetFloat4(datum); /* * On IEEE-float machines, minus zero and zero have different bit * patterns but should compare as equal. We must ensure that they * have the same hash value, which is most easily done this way: */ if (buf_f4 == (float4) 0) buf_f4 = 0.0; buf = &buf_f4; len = sizeof(buf_f4); break; case FLOAT8OID: /* double-precision floating point number, * 8-byte storage */ buf_f8 = DatumGetFloat8(datum); /* * On IEEE-float machines, minus zero and zero have different bit * patterns but should compare as equal. We must ensure that they * have the same hash value, which is most easily done this way: */ if (buf_f8 == (float8) 0) buf_f8 = 0.0; buf = &buf_f8; len = sizeof(buf_f8); break; case NUMERICOID: num = DatumGetNumeric(datum); if (NUMERIC_IS_NAN(num)) { nanbuf = NAN_VAL; buf = &nanbuf; len = sizeof(nanbuf); } else /* not a nan */ { buf = num->n_data; len = (VARSIZE(num) - NUMERIC_HDRSZ); } /* * If we did a pg_detoast_datum, we need to remember to pfree, * or we will leak memory. Because of the 1-byte varlena header stuff. */ if (num != DatumGetPointer(datum)) tofree = num; break; /* * ====== CHARACTER TYPES ======= */ case CHAROID: /* char(1), single character */ char_buf = DatumGetChar(datum); buf = &char_buf; len = 1; break; case BPCHAROID: /* char(n), blank-padded string, fixed storage */ case TEXTOID: /* text */ case VARCHAROID: /* varchar */ case BYTEAOID: /* bytea */ { int tmplen; varattrib_untoast_ptr_len(datum, (char **) &buf, &tmplen, &tofree); /* adjust length to not include trailing blanks */ if (type != BYTEAOID && tmplen > 1) tmplen = ignoreblanks((char *) buf, tmplen); len = tmplen; break; } case NAMEOID: namebuf = DatumGetName(datum); len = NAMEDATALEN; buf = NameStr(*namebuf); /* adjust length to not include trailing blanks */ if (len > 1) len = ignoreblanks((char *) buf, len); break; /* * ====== OBJECT IDENTIFIER TYPES ====== */ case OIDOID: /* object identifier(oid), maximum 4 billion */ case REGPROCOID: /* function name */ case REGPROCEDUREOID: /* function name with argument types */ case REGOPEROID: /* operator name */ case REGOPERATOROID: /* operator with argument types */ case REGCLASSOID: /* relation name */ case REGTYPEOID: /* data type name */ intbuf = (int64) DatumGetUInt32(datum); /* cast to 8 byte before hashing */ buf = &intbuf; len = sizeof(intbuf); break; case TIDOID: /* tuple id (6 bytes) */ buf = DatumGetPointer(datum); len = SizeOfIptrData; break; /* * ====== DATE/TIME TYPES ====== */ case TIMESTAMPOID: /* date and time */ tsbuf = DatumGetTimestamp(datum); buf = &tsbuf; len = sizeof(tsbuf); break; case TIMESTAMPTZOID: /* date and time with time zone */ tstzbuf = DatumGetTimestampTz(datum); buf = &tstzbuf; len = sizeof(tstzbuf); break; case DATEOID: /* ANSI SQL date */ datebuf = DatumGetDateADT(datum); buf = &datebuf; len = sizeof(datebuf); break; case TIMEOID: /* hh:mm:ss, ANSI SQL time */ timebuf = DatumGetTimeADT(datum); buf = &timebuf; len = sizeof(timebuf); break; case TIMETZOID: /* time with time zone */ /* * will not compare to TIMEOID on equal values. * Postgres never attempts to compare the two as well. */ timetzptr = DatumGetTimeTzADTP(datum); buf = (unsigned char *) timetzptr; /* * Specify hash length as sizeof(double) + sizeof(int4), not as * sizeof(TimeTzADT), so that any garbage pad bytes in the structure * won't be included in the hash! */ len = sizeof(timetzptr->time) + sizeof(timetzptr->zone); break; case INTERVALOID: /* @ <number> <units>, time interval */ intervalptr = DatumGetIntervalP(datum); buf = (unsigned char *) intervalptr; /* * Specify hash length as sizeof(double) + sizeof(int4), not as * sizeof(Interval), so that any garbage pad bytes in the structure * won't be included in the hash! */ len = sizeof(intervalptr->time) + sizeof(intervalptr->month); break; case ABSTIMEOID: abstime_buf = DatumGetAbsoluteTime(datum); if (abstime_buf == INVALID_ABSTIME) { /* hash to a constant value */ invalidbuf = INVALID_VAL; len = sizeof(invalidbuf); buf = &invalidbuf; } else { len = sizeof(abstime_buf); buf = &abstime_buf; } break; case RELTIMEOID: reltime_buf = DatumGetRelativeTime(datum); if (reltime_buf == INVALID_RELTIME) { /* hash to a constant value */ invalidbuf = INVALID_VAL; len = sizeof(invalidbuf); buf = &invalidbuf; } else { len = sizeof(reltime_buf); buf = &reltime_buf; } break; case TINTERVALOID: tinterval = DatumGetTimeInterval(datum); /* * check if a valid interval. the '0' status code * stands for T_INTERVAL_INVAL which is defined in * nabstime.c. We use the actual value instead * of defining it again here. */ if(tinterval->status == 0 || tinterval->data[0] == INVALID_ABSTIME || tinterval->data[1] == INVALID_ABSTIME) { /* hash to a constant value */ invalidbuf = INVALID_VAL; len = sizeof(invalidbuf); buf = &invalidbuf; } else { /* normalize on length of the time interval */ tinterval_len = tinterval->data[1] - tinterval->data[0]; len = sizeof(tinterval_len); buf = &tinterval_len; } break; /* * ======= NETWORK TYPES ======== */ case INETOID: case CIDROID: inetptr = DatumGetInetP(datum); len = inet_getkey(inetptr, inet_hkey, sizeof(inet_hkey)); /* fill-in inet_key & get len */ buf = inet_hkey; break; case MACADDROID: macptr = DatumGetMacaddrP(datum); len = sizeof(macaddr); buf = (unsigned char *) macptr; break; /* * ======== BIT STRINGS ======== */ case BITOID: case VARBITOID: /* * Note that these are essentially strings. * we don't need to worry about '10' and '010' * to compare, b/c they will not, by design. * (see SQL standard, and varbit.c) */ vbitptr = DatumGetVarBitP(datum); len = VARBITBYTES(vbitptr); buf = (char *) VARBITS(vbitptr); break; /* * ======= other types ======= */ case BOOLOID: /* boolean, 'true'/'false' */ bool_buf = DatumGetBool(datum); buf = &bool_buf; len = sizeof(bool_buf); break; /* * We prepare the hash key for aclitems just like postgresql does. * (see code and comment in acl.c: hash_aclitem() ). */ case ACLITEMOID: aclitem_ptr = DatumGetAclItemP(datum); aclitem_buf = (uint32) (aclitem_ptr->ai_privs + aclitem_ptr->ai_grantee + aclitem_ptr->ai_grantor); buf = &aclitem_buf; len = sizeof(aclitem_buf); break; /* * ANYARRAY is a pseudo-type. We use it to include * any of the array types (OIDs 1007-1033 in pg_type.h). * caller needs to be sure the type is ANYARRAYOID * before calling cdbhash on an array (INSERT and COPY do so). */ case ANYARRAYOID: arrbuf = DatumGetArrayTypeP(datum); len = VARSIZE(arrbuf) - VARHDRSZ; buf = VARDATA(arrbuf); break; case INT2VECTOROID: i2vec_buf = (int2vector *) DatumGetPointer(datum); len = i2vec_buf->dim1 * sizeof(int2); buf = (void *)i2vec_buf->values; break; case OIDVECTOROID: oidvec_buf = (oidvector *) DatumGetPointer(datum); len = oidvec_buf->dim1 * sizeof(Oid); buf = oidvec_buf->values; break; case CASHOID: /* cash is stored in int32 internally */ cash_buf = (* (Cash *)DatumGetPointer(datum)); len = sizeof(Cash); buf = &cash_buf; break; default: ereport(ERROR, (errcode(ERRCODE_CDB_FEATURE_NOT_YET), errmsg("Type %u is not hashable.", type))); } /* switch(type) */ /* do the hash using the selected algorithm */ hashFn(clientData, buf, len); if(tofree) pfree(tofree); }
extern int output_bin_dfile( text *tbl_name, text *out_file, int x_min, int y_min, int x_max, int y_max, int flip_signs ) { FILE *fpout; char *tablename; char *outfilename; char outfilepath[256]; char tmpPath[256] = ""; bool flipSigns = ( flip_signs == 0 ? false : true ); char query[512]; unsigned int i, rows; int result; bool nullDepth = false; Datum datumDepth; float4 floatDepth; float bigendDepth; byte *lEnd = ((byte *) &floatDepth); byte *bEnd = ((byte *) &bigendDepth); byte cnt = 0; // Prepare tablename tablename = DatumGetCString(DirectFunctionCall1( textout, PointerGetDatum( tbl_name ) )); // Prepare outfilename outfilename = DatumGetCString(DirectFunctionCall1( textout, PointerGetDatum( out_file ) )); // Build the query statement sprintf( query, "SELECT depth::float4 FROM %s WHERE x >= %i AND x <= %i AND y >= %i AND y <= %i ORDER BY y ASC, x ASC;", tablename, x_min, x_max, y_min, y_max ); // Open the output file for binary write access fpout = fopen(outfilename,"wb"); if (fpout==NULL) { elog( ERROR, "Unable to open output file: '%s'", outfilename ); } // Output file is open and ready, query is ready SPI_connect(); // Execute the query result = SPI_exec( query, 0 ); rows = SPI_processed; // If the SELECT statement worked, and returned more than zero rows if (result == SPI_OK_SELECT && rows > 0) { // Get the tuple (row) description for the rows TupleDesc tupdesc = SPI_tuptable->tupdesc; // Get pointer to the tuple table containing the result tuples SPITupleTable *tuptable = SPI_tuptable; // Loop over each row in the result set (tuple set) for( i = 0; i < rows; i++ ) { // Get tuple (row) number i HeapTuple tuple = tuptable->vals[i]; // Store a pointer to the depth value Datum on this row datumDepth = SPI_getbinval( tuple, tupdesc, 1, &nullDepth ); floatDepth = DatumGetFloat4( datumDepth ); if( nullDepth ) elog ( ERROR, "NULL depth value on row %i", i ); if( flipSigns ) floatDepth *= -1.0; // Write the little-endian floatDepth into bigendDepth bEnd += 3; for( cnt = 0; cnt < 4; cnt++ ) { *bEnd = *lEnd; if( cnt < 3 ) { lEnd++; bEnd--; } } lEnd -= 3; // Write the floating point depth value out to the file fwrite(&bigendDepth,sizeof(float),1,fpout); } } // Done using the result set SPI_finish(); // Close the output file fclose(fpout); // CHMOD the file for access by group tportal int mode = ( S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH ); chmod( outfilename, mode ); // Free up memory pfree(tablename); pfree(outfilename); return 0; }
static DTYPE *get_pgarray(int *num, ArrayType *input) { int ndims, *dims, *lbs; bool *nulls; Oid i_eltype; int16 i_typlen; bool i_typbyval; char i_typalign; Datum *i_data; int i, n; DTYPE *data; /* get input array element type */ i_eltype = ARR_ELEMTYPE(input); get_typlenbyvalalign(i_eltype, &i_typlen, &i_typbyval, &i_typalign); /* validate input data type */ switch(i_eltype) { case INT2OID: case INT4OID: case FLOAT4OID: case FLOAT8OID: break; default: elog(ERROR, "Invalid input data type"); break; } /* get various pieces of data from the input array */ ndims = ARR_NDIM(input); dims = ARR_DIMS(input); lbs = ARR_LBOUND(input); if (ndims != 2 || dims[0] != dims[1]) { elog(ERROR, "Error: matrix[num][num] in its definition."); } /* get src data */ deconstruct_array(input, i_eltype, i_typlen, i_typbyval, i_typalign, &i_data, &nulls, &n); DBG("get_pgarray: ndims=%d, n=%d", ndims, n); #ifdef DEBUG for (i=0; i<ndims; i++) { DBG(" dims[%d]=%d, lbs[%d]=%d", i, dims[i], i, lbs[i]); } #endif /* construct a C array */ data = (DTYPE *) palloc(n * sizeof(DTYPE)); if (!data) { elog(ERROR, "Error: Out of memory!"); } for (i=0; i<n; i++) { if (nulls[i]) { data[i] = INFINITY; } else { switch(i_eltype) { case INT2OID: data[i] = (DTYPE) DatumGetInt16(i_data[i]); break; case INT4OID: data[i] = (DTYPE) DatumGetInt32(i_data[i]); break; case FLOAT4OID: data[i] = (DTYPE) DatumGetFloat4(i_data[i]); break; case FLOAT8OID: data[i] = (DTYPE) DatumGetFloat8(i_data[i]); break; } /* we assume negative values are INFINTY */ /******************************************************** TODO: based on trying to add an endpt it is likely that this will not work and you will get and error in findEulerianPath **********************************************************/ if (data[i] < 0) data[i] = INFINITY; } DBG(" data[%d]=%.4f", i, data[i]); } pfree(nulls); pfree(i_data); *num = dims[0]; return data; }
static PyObject * PLyFloat_FromFloat4(PLyDatumToOb *arg, Datum d) { return PyFloat_FromDouble(DatumGetFloat4(d)); }
int write2sqlite(char *sqlitedb_name,char *dataset_name, char *sql_string, char *twkb_name,char *id_name,char *idx_geom,char *idx_tbl, char *idx_id, int create) { char *err_msg; int spi_conn; int proc, rc; /*Sqlite*/ sqlite3 *db; TupleDesc tupdesc; SPITupleTable *tuptable; HeapTuple tuple; int i, j; SPIPlanPtr plan; char insert_str[SQLSTRLEN]; Portal cur; void *val_p; int val_int; int64 val_int64; float8 val_float; bool null_check; char *pg_type; int tot_rows = 0; sqlite3_stmt *prepared_statement; spi_conn = SPI_connect(); if (spi_conn!=SPI_OK_CONNECT) ereport(ERROR, ( errmsg("Failed to open SPI Connection"))); /*Open the sqlite db to write to*/ rc = sqlite3_open(sqlitedb_name, &db); if (rc != SQLITE_OK) { sqlite3_close(db); ereport(ERROR, ( errmsg("Cannot open SQLite database"))); } plan = SPI_prepare(sql_string,0,NULL); //ret = SPI_exec(sql_string, 0); cur = SPI_cursor_open("our_cursor", plan,NULL,NULL,true); elog(INFO, "build sql-strings and create table if : %d",create); create_sqlite_table(&cur,db, insert_str,dataset_name,twkb_name, id_name,create); elog(INFO, "back from creating table"); elog(INFO, "inserted sql = %s",insert_str); //TODO add error handling sqlite3_prepare_v2(db,insert_str,strlen(insert_str), &prepared_statement,NULL); do { sqlite3_exec(db, "BEGIN TRANSACTION", NULL, NULL, &err_msg); SPI_cursor_fetch(cur, true,10000); proc = SPI_processed; tot_rows += proc; // if (ret > 0 && SPI_tuptable != NULL) // { tupdesc = SPI_tuptable->tupdesc; tuptable = SPI_tuptable; for (j = 0; j < proc; j++) { tuple = tuptable->vals[j]; for (i = 1; i <= tupdesc->natts; i++) { pg_type = SPI_gettype(tupdesc, i); if(strcmp(pg_type, "bool")==0) { val_int = (bool) (DatumGetBool(SPI_getbinval(tuple,tupdesc,i, &null_check)) ? 1:0); if(null_check) sqlite3_bind_null(prepared_statement, i); else sqlite3_bind_int(prepared_statement, i,(int) val_int); } if(strcmp(pg_type, "int2")==0) { val_int = (int) DatumGetInt16(SPI_getbinval(tuple,tupdesc,i, &null_check)); //TODO add error handling if(null_check) sqlite3_bind_null(prepared_statement, i); else sqlite3_bind_int(prepared_statement, i,val_int); } else if(strcmp(pg_type, "int4")==0) { val_int = (int) DatumGetInt32(SPI_getbinval(tuple,tupdesc,i, &null_check)); //TODO add error handling if(null_check) sqlite3_bind_null(prepared_statement, i); else sqlite3_bind_int(prepared_statement, i,val_int); } else if(strcmp(pg_type, "int8")==0) { val_int64 = (int64) DatumGetInt64(SPI_getbinval(tuple,tupdesc,i, &null_check)); //TODO add error handling if(null_check) sqlite3_bind_null(prepared_statement, i); else sqlite3_bind_int64(prepared_statement, i,val_int64); } else if(strcmp(pg_type, "float4")==0) { val_float = (float8) DatumGetFloat4(SPI_getbinval(tuple,tupdesc,i, &null_check)); //TODO add error handling if(null_check) sqlite3_bind_null(prepared_statement, i); else sqlite3_bind_double(prepared_statement, i,val_float); } else if(strcmp(pg_type, "float8")==0) { val_float = (float8) DatumGetFloat8(SPI_getbinval(tuple,tupdesc,i, &null_check)); //TODO add error handling if(null_check) sqlite3_bind_null(prepared_statement, i); else sqlite3_bind_double(prepared_statement, i,val_float); } else if(strcmp(pg_type, "bytea")==0) { val_p = (void*) PG_DETOAST_DATUM(SPI_getbinval(tuple,tupdesc,i, &null_check)); //TODO add error handling if(null_check) sqlite3_bind_null(prepared_statement, i); else sqlite3_bind_blob(prepared_statement, i, (const void*) VARDATA_ANY(val_p), VARSIZE_ANY(val_p)-VARHDRSZ, SQLITE_TRANSIENT); } else { // val = (void*) PG_DETOAST_DATUM(SPI_getbinval(tuple,tupdesc,i, &null_check)); //TODO add error handling sqlite3_bind_text(prepared_statement,i,SPI_getvalue(tuple, tupdesc, i),-1,NULL); } } sqlite3_step(prepared_statement); sqlite3_clear_bindings(prepared_statement); sqlite3_reset(prepared_statement); } sqlite3_exec(db, "END TRANSACTION", NULL, NULL, &err_msg); elog(INFO, "inserted %d rows in table",tot_rows); } while (proc > 0); if(dataset_name && idx_geom && idx_id) create_spatial_index(db,dataset_name,idx_tbl, idx_geom, idx_id, sql_string,create); else elog(INFO, "Finnishing without spatial index"); SPI_finish(); sqlite3_close(db); return 0; }
Datum geometry_estimated_extent(PG_FUNCTION_ARGS) { text *txnsp = NULL; text *txtbl = NULL; text *txcol = NULL; char *nsp = NULL; char *tbl = NULL; char *col = NULL; char *query; ArrayType *array = NULL; int SPIcode; SPITupleTable *tuptable; TupleDesc tupdesc ; HeapTuple tuple ; bool isnull; GBOX *box; size_t querysize; GEOM_STATS geomstats; float reltuples; Datum binval; if ( PG_NARGS() == 3 ) { txnsp = PG_GETARG_TEXT_P(0); txtbl = PG_GETARG_TEXT_P(1); txcol = PG_GETARG_TEXT_P(2); } else if ( PG_NARGS() == 2 ) { txtbl = PG_GETARG_TEXT_P(0); txcol = PG_GETARG_TEXT_P(1); } else { elog(ERROR, "estimated_extent() called with wrong number of arguments"); PG_RETURN_NULL(); } POSTGIS_DEBUG(2, "geomtery_estimated_extent called"); /* Connect to SPI manager */ SPIcode = SPI_connect(); if (SPIcode != SPI_OK_CONNECT) { elog(ERROR, "geometry_estimated_extent: couldnt open a connection to SPI"); PG_RETURN_NULL() ; } querysize = VARSIZE(txtbl)+VARSIZE(txcol)+516; if ( txnsp ) { nsp = text2cstring(txnsp); querysize += VARSIZE(txnsp); } else { querysize += 32; /* current_schema() */ } tbl = text2cstring(txtbl); col = text2cstring(txcol); #if POSTGIS_DEBUG_LEVEL > 0 if ( txnsp ) { POSTGIS_DEBUGF(3, " schema:%s table:%s column:%s", nsp, tbl, col); } else { POSTGIS_DEBUGF(3, " schema:current_schema() table:%s column:%s", tbl, col); } #endif query = palloc(querysize); /* Security check: because we access information in the pg_statistic table, we must run as the database superuser (by marking the function as SECURITY DEFINER) and check permissions ourselves */ if ( txnsp ) { sprintf(query, "SELECT has_table_privilege((SELECT usesysid FROM pg_user WHERE usename = session_user), '\"%s\".\"%s\"', 'select')", nsp, tbl); } else { sprintf(query, "SELECT has_table_privilege((SELECT usesysid FROM pg_user WHERE usename = session_user), '\"%s\"', 'select')", tbl); } POSTGIS_DEBUGF(4, "permission check sql query is: %s", query); SPIcode = SPI_exec(query, 1); if (SPIcode != SPI_OK_SELECT) { elog(ERROR, "geometry_estimated_extent: couldn't execute permission check sql via SPI"); SPI_finish(); PG_RETURN_NULL(); } tuptable = SPI_tuptable; tupdesc = SPI_tuptable->tupdesc; tuple = tuptable->vals[0]; if (!DatumGetBool(SPI_getbinval(tuple, tupdesc, 1, &isnull))) { elog(ERROR, "geometry_estimated_extent: permission denied for relation %s", tbl); SPI_finish(); PG_RETURN_NULL(); } /* Return the stats data */ if ( txnsp ) { sprintf(query, "SELECT s.stanumbers1[5:8], c.reltuples FROM pg_class c" " LEFT OUTER JOIN pg_namespace n ON (n.oid = c.relnamespace)" " LEFT OUTER JOIN pg_attribute a ON (a.attrelid = c.oid )" " LEFT OUTER JOIN pg_statistic s ON (s.starelid = c.oid AND " "s.staattnum = a.attnum )" " WHERE c.relname = '%s' AND a.attname = '%s' " " AND n.nspname = '%s';", tbl, col, nsp); } else { sprintf(query, "SELECT s.stanumbers1[5:8], c.reltuples FROM pg_class c" " LEFT OUTER JOIN pg_namespace n ON (n.oid = c.relnamespace)" " LEFT OUTER JOIN pg_attribute a ON (a.attrelid = c.oid )" " LEFT OUTER JOIN pg_statistic s ON (s.starelid = c.oid AND " "s.staattnum = a.attnum )" " WHERE c.relname = '%s' AND a.attname = '%s' " " AND n.nspname = current_schema();", tbl, col); } POSTGIS_DEBUGF(4, " query: %s", query); SPIcode = SPI_exec(query, 1); if (SPIcode != SPI_OK_SELECT ) { elog(ERROR,"geometry_estimated_extent: couldnt execute sql via SPI"); SPI_finish(); PG_RETURN_NULL(); } if (SPI_processed != 1) { POSTGIS_DEBUGF(3, " %d stat rows", SPI_processed); elog(ERROR, "Unexistent field \"%s\".\"%s\".\"%s\"", ( nsp ? nsp : "<current>" ), tbl, col); SPI_finish(); PG_RETURN_NULL() ; } tuptable = SPI_tuptable; tupdesc = SPI_tuptable->tupdesc; tuple = tuptable->vals[0]; /* Check if the table has zero rows first */ binval = SPI_getbinval(tuple, tupdesc, 2, &isnull); if (isnull) { POSTGIS_DEBUG(3, " reltuples is NULL"); elog(ERROR, "geometry_estimated_extent: null reltuples for table"); SPI_finish(); PG_RETURN_NULL(); } reltuples = DatumGetFloat4(binval); if ( ! reltuples ) { POSTGIS_DEBUG(3, "table has estimated zero rows"); /* * TODO: distinguish between empty and not analyzed ? */ elog(NOTICE, "\"%s\".\"%s\".\"%s\" is empty or not analyzed", ( nsp ? nsp : "<current>" ), tbl, col); SPI_finish(); PG_RETURN_NULL(); } binval = SPI_getbinval(tuple, tupdesc, 1, &isnull); if (isnull) { POSTGIS_DEBUG(3, " stats are NULL"); elog(ERROR, "geometry_estimated_extent: null statistics for table"); SPI_finish(); PG_RETURN_NULL(); } array = DatumGetArrayTypeP(binval); if ( ArrayGetNItems(ARR_NDIM(array), ARR_DIMS(array)) != 4 ) { elog(ERROR, " corrupted histogram"); PG_RETURN_NULL(); } POSTGIS_DEBUGF(3, " stats array has %d elems", ArrayGetNItems(ARR_NDIM(array), ARR_DIMS(array))); /* * Construct GBOX. * Must allocate this in upper executor context * to keep it alive after SPI_finish(). */ box = SPI_palloc(sizeof(GBOX)); FLAGS_SET_GEODETIC(box->flags, 0); FLAGS_SET_Z(box->flags, 0); FLAGS_SET_M(box->flags, 0); /* Construct the box */ memcpy(&(geomstats.xmin), ARR_DATA_PTR(array), sizeof(float)*4); box->xmin = geomstats.xmin; box->xmax = geomstats.xmax; box->ymin = geomstats.ymin; box->ymax = geomstats.ymax; POSTGIS_DEBUGF(3, " histogram extent = %g %g, %g %g", box->xmin, box->ymin, box->xmax, box->ymax); SPIcode = SPI_finish(); if (SPIcode != SPI_OK_FINISH ) { elog(ERROR, "geometry_estimated_extent: couldn't disconnect from SPI"); } /* TODO: enlarge the box by some factor */ PG_RETURN_POINTER(box); }
Datum fft_agg_finalfn(PG_FUNCTION_ARGS) { ArrayType *input, *result = NULL; Oid eltype; int16 typlen; bool typbyval; char typalign; Datum *data; int i, n; int ndims, *dims; kiss_fft_cfg cfg; kiss_fft_cpx *cx_in = NULL, *cx_out = NULL; if (!AggCheckCallContext(fcinfo, NULL)) elog(ERROR, "fft_agg_finalfn() Not aggregate context"); if (PG_ARGISNULL(0)) elog(ERROR, "fft_agg_finalfn() args cannot be null"); /* elog(INFO, "fft_agg_finalfn() nargs: %d", PG_NARGS()); */ /* elog(INFO, "p1 %p", PG_GETARG_POINTER(0)); */ /* state array */ input = PG_GETARG_ARRAYTYPE_P(0); /* elog(INFO, "arg 0 array type: 0x%p", input); */ /* get various pieces of data from the input array */ ndims = ARR_NDIM(input); dims = ARR_DIMS(input); eltype = ARR_ELEMTYPE(input); /* elog(INFO, "input ndims: %d dims: %d elemtype: %d", ndims, *dims, eltype); */ Assert(ndims == 1); Assert(eltype == FLOAT4OID); /* get input array element type */ get_typlenbyvalalign(eltype, &typlen, &typbyval, &typalign); /* elog(INFO, "olen: %d obyval: %d align: %d", typlen, typbyval, typalign); */ /* get src data */ deconstruct_array(input, eltype, typlen, typbyval, typalign, &data, NULL, &n); /* elog(INFO, "idata: %p n: %d", data, n); */ Assert(*dims == n); cx_in = palloc0(*dims * sizeof(kiss_fft_cpx)); cx_out = palloc0(*dims * sizeof(kiss_fft_cpx)); /* apply scale */ for (i=0; i<*dims; i++) { cx_in[i].r = DatumGetFloat4(data[i]); /* elog(INFO, "%d %f %f", i, DatumGetFloat4(data[i]), cx_in[i].i); */ } if ((cfg = kiss_fft_alloc(*dims, 0, 0, 0)) == NULL) elog(ERROR, "kiss_fft_alloc() failed"); kiss_fft(cfg, cx_in, cx_out); for (i=0; i<*dims; i++) { ((float *)data)[i] = (cx_out[i].r * cx_out[i].r + cx_out[i].i * cx_out[i].i) / ((double)*dims); /* printf("%23.15e %23.15e\n", freq / (double)n * (double)i, (cx_out[i].r * cx_out[i].r + cx_out[i].i * cx_out[i].i) / (double)n); */ /* elog(INFO, "data %d %f", i, (double)(data[i])); */ } /* elog(INFO, "odata: %p dims: %d otype: %d olen: %d obyval: %d align: %d", data, *dims, eltype, typlen, typbyval, typalign); */ result = construct_array((void *)data, *dims, eltype, typlen, typbyval, typalign); free(cfg); pfree(data); pfree(cx_in); pfree(cx_out); PG_RETURN_ARRAYTYPE_P(result); }