static int compute_shortest_path_shooting_star(char* sql, int source_edge_id, int target_edge_id, bool directed, bool has_reverse_cost, path_element_t **path, int *path_count) { int SPIcode; void *SPIplan; Portal SPIportal; bool moredata = TRUE; int ntuples; edge_shooting_star_t *edges = NULL; int total_tuples = 0; // int v_max_id=0; // int v_min_id=INT_MAX; int e_max_id=0; int e_min_id=INT_MAX; edge_shooting_star_columns_t edge_columns = {id: -1, source: -1, target: -1, cost: -1, reverse_cost: -1, s_x: -1, s_y: -1, t_x: -1, t_y: -1, to_cost: -1, rule: -1}; char *err_msg; int ret = -1; register int z, t; int s_count=0; int t_count=0; DBG("start shortest_path_shooting_star\n"); SPIcode = SPI_connect(); if (SPIcode != SPI_OK_CONNECT) { elog(ERROR, "shortest_path_shooting_star: couldn't open a connection to SPI"); return -1; } SPIplan = SPI_prepare(sql, 0, NULL); if (SPIplan == NULL) { elog(ERROR, "shortest_path_shooting_star: couldn't create query plan via SPI"); return -1; } if ((SPIportal = SPI_cursor_open(NULL, SPIplan, NULL, NULL, true)) == NULL) { elog(ERROR, "shortest_path_shooting_star: SPI_cursor_open('%s') returns NULL", sql); return -1; } while (moredata == TRUE) { SPI_cursor_fetch(SPIportal, TRUE, TUPLIMIT); if (edge_columns.id == -1) { if (fetch_edge_shooting_star_columns(SPI_tuptable, &edge_columns, has_reverse_cost) == -1) return finish(SPIcode, ret); } //DBG("***%i***", ret); ntuples = SPI_processed; total_tuples += ntuples; if (!edges) edges = palloc(total_tuples * sizeof(edge_shooting_star_t)); else edges = repalloc(edges, total_tuples * sizeof(edge_shooting_star_t)); if (edges == NULL) { elog(ERROR, "Out of memory"); return finish(SPIcode, ret); } if (ntuples > 0) { int t; SPITupleTable *tuptable = SPI_tuptable; TupleDesc tupdesc = SPI_tuptable->tupdesc; for (t = 0; t < ntuples; t++) { HeapTuple tuple = tuptable->vals[t]; fetch_edge_shooting_star(&tuple, &tupdesc, &edge_columns, &edges[total_tuples - ntuples + t]); } SPI_freetuptable(tuptable); } else { moredata = FALSE; } } DBG("Total %i tuples", total_tuples); for(z=0; z<total_tuples; z++) { if(edges[z].id<e_min_id) e_min_id=edges[z].id; if(edges[z].id>e_max_id) e_max_id=edges[z].id; } DBG("E : %i <-> %i", e_min_id, e_max_id); for(z=0; z<total_tuples; ++z) { //check if edges[] contains source and target if(edges[z].id == source_edge_id) ++s_count; if(edges[z].id == target_edge_id) ++t_count; //edges[z].source-=v_min_id; //edges[z].target-=v_min_id; } DBG("Total %i tuples", total_tuples); if(s_count == 0) { elog(ERROR, "Start edge was not found."); return -1; } if(t_count == 0) { elog(ERROR, "Target edge was not found."); return -1; } DBG("Total %i tuples", total_tuples); DBG("Calling boost_shooting_star <%i>\n", total_tuples); //time_t stime = time(NULL); ret = boost_shooting_star(edges, total_tuples, source_edge_id, target_edge_id, directed, has_reverse_cost, path, path_count, &err_msg, e_max_id); //time_t etime = time(NULL); //DBG("Path was calculated in %f seconds. \n", difftime(etime, stime)); DBG("SIZE %i\n",*path_count); DBG("ret = %i\n",ret); if (ret < 0) { ereport(ERROR, (errcode(ERRCODE_E_R_E_CONTAINING_SQL_NOT_PERMITTED), errmsg("Error computing path: %s", err_msg))); } return finish(SPIcode, ret); } PG_FUNCTION_INFO_V1(shortest_path_shooting_star); Datum shortest_path_shooting_star(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; int call_cntr; int max_calls; TupleDesc tuple_desc; path_element_t *path = 0; /* stuff done only on the first call of the function */ if (SRF_IS_FIRSTCALL()) { MemoryContext oldcontext; int path_count = 0; int ret; /* create a function context for cross-call persistence */ funcctx = SRF_FIRSTCALL_INIT(); /* switch to memory context appropriate for multiple function calls */ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); ret = compute_shortest_path_shooting_star(text2char(PG_GETARG_TEXT_P(0)), PG_GETARG_INT32(1), PG_GETARG_INT32(2), PG_GETARG_BOOL(3), PG_GETARG_BOOL(4), &path, &path_count); #ifdef DEBUG DBG("Ret is %i", ret); if (ret >= 0) { int i; for (i = 0; i < path_count; i++) { DBG("Step # %i vertex_id %i ", i, path[i].vertex_id); DBG(" edge_id %i ", path[i].edge_id); DBG(" cost %f ", path[i].cost); } } #endif /* total number of tuples to be returned */ DBG("Conting tuples number\n"); funcctx->max_calls = path_count; funcctx->user_fctx = path; DBG("Path count %i", path_count); funcctx->tuple_desc = BlessTupleDesc(RelationNameGetTupleDesc("pgr_costResult")); MemoryContextSwitchTo(oldcontext); } /* stuff done on every call of the function */ funcctx = SRF_PERCALL_SETUP(); call_cntr = funcctx->call_cntr; max_calls = funcctx->max_calls; tuple_desc = funcctx->tuple_desc; path = (path_element_t*) funcctx->user_fctx; DBG("Trying to allocate some memory\n"); if (call_cntr < max_calls) /* do when there is more left to send */ { HeapTuple tuple; Datum result; Datum *values; char* nulls; values = palloc(4 * sizeof(Datum)); nulls = palloc(4 * sizeof(char)); values[0] = Int32GetDatum(call_cntr); nulls[0] = ' '; values[1] = Int32GetDatum(path[call_cntr].vertex_id); nulls[1] = ' '; values[2] = Int32GetDatum(path[call_cntr].edge_id); nulls[2] = ' '; values[3] = Float8GetDatum(path[call_cntr].cost); nulls[3] = ' '; tuple = heap_formtuple(tuple_desc, values, nulls); /* make the tuple into a datum */ result = HeapTupleGetDatum(tuple); /* clean up (this is not really necessary) */ pfree(values); pfree(nulls); SRF_RETURN_NEXT(funcctx, result); } else /* do when there is no more left */ { if (path) free(path); SRF_RETURN_DONE(funcctx); } }
Datum geography_as_geojson(PG_FUNCTION_ARGS) { LWGEOM *lwgeom = NULL; GSERIALIZED *g = NULL; char *geojson; text *result; int version; int option = 0; int has_bbox = 0; int precision = OUT_MAX_DOUBLE_PRECISION; char * srs = NULL; /* Get the version */ version = PG_GETARG_INT32(0); if ( version != 1) { elog(ERROR, "Only GeoJSON 1 is supported"); PG_RETURN_NULL(); } /* Get the geography */ if (PG_ARGISNULL(1) ) PG_RETURN_NULL(); g = (GSERIALIZED*)PG_DETOAST_DATUM(PG_GETARG_DATUM(1)); /* Convert to lwgeom so we can run the old functions */ lwgeom = lwgeom_from_gserialized(g); /* Retrieve precision if any (default is max) */ if (PG_NARGS() >2 && !PG_ARGISNULL(2)) { precision = PG_GETARG_INT32(2); if ( precision > OUT_MAX_DOUBLE_PRECISION ) precision = OUT_MAX_DOUBLE_PRECISION; else if ( precision < 0 ) precision = 0; } /* Retrieve output option * 0 = without option (default) * 1 = bbox * 2 = short crs * 4 = long crs */ if (PG_NARGS() >3 && !PG_ARGISNULL(3)) option = PG_GETARG_INT32(3); if (option & 2 || option & 4) { /* Geography only handle srid SRID_DEFAULT */ if (option & 2) srs = getSRSbySRID(SRID_DEFAULT, true); if (option & 4) srs = getSRSbySRID(SRID_DEFAULT, false); if (!srs) { elog(ERROR, "SRID SRID_DEFAULT unknown in spatial_ref_sys table"); PG_RETURN_NULL(); } } if (option & 1) has_bbox = 1; geojson = lwgeom_to_geojson(lwgeom, srs, precision, has_bbox); lwgeom_free(lwgeom); PG_FREE_IF_COPY(g, 1); if (srs) pfree(srs); result = cstring2text(geojson); lwfree(geojson); PG_RETURN_TEXT_P(result); }
Datum gin_consistent_jsonb(PG_FUNCTION_ARGS) { bool *check = (bool *) PG_GETARG_POINTER(0); StrategyNumber strategy = PG_GETARG_UINT16(1); /* Jsonb *query = PG_GETARG_JSONB(2); */ int32 nkeys = PG_GETARG_INT32(3); /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ bool *recheck = (bool *) PG_GETARG_POINTER(5); bool res = true; int32 i; if (strategy == JsonbContainsStrategyNumber) { /* * We must always recheck, since we can't tell from the index whether * the positions of the matched items match the structure of the query * object. (Even if we could, we'd also have to worry about hashed * keys and the index's failure to distinguish keys from string array * elements.) However, the tuple certainly doesn't match unless it * contains all the query keys. */ *recheck = true; for (i = 0; i < nkeys; i++) { if (!check[i]) { res = false; break; } } } else if (strategy == JsonbExistsStrategyNumber) { /* * Although the key is certainly present in the index, we must recheck * because (1) the key might be hashed, and (2) the index match might * be for a key that's not at top level of the JSON object. For (1), * we could look at the query key to see if it's hashed and not * recheck if not, but the index lacks enough info to tell about (2). */ *recheck = true; res = true; } else if (strategy == JsonbExistsAnyStrategyNumber) { /* As for plain exists, we must recheck */ *recheck = true; res = true; } else if (strategy == JsonbExistsAllStrategyNumber) { /* As for plain exists, we must recheck */ *recheck = true; /* ... but unless all the keys are present, we can say "false" */ for (i = 0; i < nkeys; i++) { if (!check[i]) { res = false; break; } } } else elog(ERROR, "unrecognized strategy number: %d", strategy); PG_RETURN_BOOL(res); }
/* * record_recv - binary input routine for any composite type. */ Datum record_recv(PG_FUNCTION_ARGS) { StringInfo buf = (StringInfo) PG_GETARG_POINTER(0); Oid tupType = PG_GETARG_OID(1); #ifdef NOT_USED int32 typmod = PG_GETARG_INT32(2); #endif HeapTupleHeader result; int32 tupTypmod; TupleDesc tupdesc; HeapTuple tuple; RecordIOData *my_extra; int ncolumns; int usercols; int validcols; int i; Datum *values; bool *nulls; /* * Use the passed type unless it's RECORD; we can't support input of * anonymous types, mainly because there's no good way to figure out which * anonymous type is wanted. Note that for RECORD, what we'll probably * actually get is RECORD's typelem, ie, zero. */ if (tupType == InvalidOid || tupType == RECORDOID) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("input of anonymous composite types is not implemented"))); tupTypmod = -1; /* for all non-anonymous types */ tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod); ncolumns = tupdesc->natts; /* * We arrange to look up the needed I/O info just once per series of * calls, assuming the record type doesn't change underneath us. */ my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; if (my_extra == NULL || my_extra->ncolumns != ncolumns) { fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt, sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData)); my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; my_extra->record_type = InvalidOid; my_extra->record_typmod = 0; } if (my_extra->record_type != tupType || my_extra->record_typmod != tupTypmod) { MemSet(my_extra, 0, sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData)); my_extra->record_type = tupType; my_extra->record_typmod = tupTypmod; my_extra->ncolumns = ncolumns; } values = (Datum *) palloc(ncolumns * sizeof(Datum)); nulls = (bool *) palloc(ncolumns * sizeof(bool)); /* Fetch number of columns user thinks it has */ usercols = pq_getmsgint(buf, 4); /* Need to scan to count nondeleted columns */ validcols = 0; for (i = 0; i < ncolumns; i++) { if (!tupdesc->attrs[i]->attisdropped) validcols++; } if (usercols != validcols) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("wrong number of columns: %d, expected %d", usercols, validcols))); /* Process each column */ for (i = 0; i < ncolumns; i++) { ColumnIOData *column_info = &my_extra->columns[i]; Oid column_type = tupdesc->attrs[i]->atttypid; Oid coltypoid; int itemlen; StringInfoData item_buf; StringInfo bufptr; char csave; /* Ignore dropped columns in datatype, but fill with nulls */ if (tupdesc->attrs[i]->attisdropped) { values[i] = (Datum) 0; nulls[i] = true; continue; } /* Verify column datatype */ coltypoid = pq_getmsgint(buf, sizeof(Oid)); if (coltypoid != column_type) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("wrong data type: %u, expected %u", coltypoid, column_type))); /* Get and check the item length */ itemlen = pq_getmsgint(buf, 4); if (itemlen < -1 || itemlen > (buf->len - buf->cursor)) ereport(ERROR, (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION), errmsg("insufficient data left in message"))); if (itemlen == -1) { /* -1 length means NULL */ bufptr = NULL; nulls[i] = true; csave = 0; /* keep compiler quiet */ } else { /* * Rather than copying data around, we just set up a phony * StringInfo pointing to the correct portion of the input buffer. * We assume we can scribble on the input buffer so as to maintain * the convention that StringInfos have a trailing null. */ item_buf.data = &buf->data[buf->cursor]; item_buf.maxlen = itemlen + 1; item_buf.len = itemlen; item_buf.cursor = 0; buf->cursor += itemlen; csave = buf->data[buf->cursor]; buf->data[buf->cursor] = '\0'; bufptr = &item_buf; nulls[i] = false; } /* Now call the column's receiveproc */ if (column_info->column_type != column_type) { getTypeBinaryInputInfo(column_type, &column_info->typiofunc, &column_info->typioparam); fmgr_info_cxt(column_info->typiofunc, &column_info->proc, fcinfo->flinfo->fn_mcxt); column_info->column_type = column_type; } values[i] = ReceiveFunctionCall(&column_info->proc, bufptr, column_info->typioparam, tupdesc->attrs[i]->atttypmod); if (bufptr) { /* Trouble if it didn't eat the whole buffer */ if (item_buf.cursor != itemlen) ereport(ERROR, (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION), errmsg("improper binary format in record column %d", i + 1))); buf->data[buf->cursor] = csave; } } tuple = heap_form_tuple(tupdesc, values, nulls); /* * We cannot return tuple->t_data because heap_form_tuple allocates it as * part of a larger chunk, and our caller may expect to be able to pfree * our result. So must copy the info into a new palloc chunk. */ result = (HeapTupleHeader) palloc(tuple->t_len); memcpy(result, tuple->t_data, tuple->t_len); heap_freetuple(tuple); pfree(values); pfree(nulls); ReleaseTupleDesc(tupdesc); PG_RETURN_HEAPTUPLEHEADER(result); }
Datum geography_as_gml(PG_FUNCTION_ARGS) { LWGEOM *lwgeom = NULL; GSERIALIZED *g = NULL; char *gml; text *result; int version; char *srs; int srid = SRID_DEFAULT; int precision = OUT_MAX_DOUBLE_PRECISION; int option=0; int lwopts = LW_GML_IS_DIMS; static const char *default_prefix = "gml:"; char *prefixbuf; const char* prefix = default_prefix; text *prefix_text; /* Get the version */ version = PG_GETARG_INT32(0); if ( version != 2 && version != 3 ) { elog(ERROR, "Only GML 2 and GML 3 are supported"); PG_RETURN_NULL(); } /* Get the geography */ if ( PG_ARGISNULL(1) ) PG_RETURN_NULL(); g = (GSERIALIZED*)PG_DETOAST_DATUM(PG_GETARG_DATUM(1)); /* Convert to lwgeom so we can run the old functions */ lwgeom = lwgeom_from_gserialized(g); /* Retrieve precision if any (default is max) */ if (PG_NARGS() >2 && !PG_ARGISNULL(2)) { precision = PG_GETARG_INT32(2); if ( precision > OUT_MAX_DOUBLE_PRECISION ) precision = OUT_MAX_DOUBLE_PRECISION; else if ( precision < 0 ) precision = 0; } /* retrieve option */ if (PG_NARGS() >3 && !PG_ARGISNULL(3)) option = PG_GETARG_INT32(3); /* retrieve prefix */ if (PG_NARGS() >4 && !PG_ARGISNULL(4)) { prefix_text = PG_GETARG_TEXT_P(4); if ( VARSIZE(prefix_text)-VARHDRSZ == 0 ) { prefix = ""; } else { /* +2 is one for the ':' and one for term null */ prefixbuf = palloc(VARSIZE(prefix_text)-VARHDRSZ+2); memcpy(prefixbuf, VARDATA(prefix_text), VARSIZE(prefix_text)-VARHDRSZ); /* add colon and null terminate */ prefixbuf[VARSIZE(prefix_text)-VARHDRSZ] = ':'; prefixbuf[VARSIZE(prefix_text)-VARHDRSZ+1] = '\0'; prefix = prefixbuf; } } if (option & 1) srs = getSRSbySRID(srid, false); else srs = getSRSbySRID(srid, true); if (!srs) { elog(ERROR, "SRID %d unknown in spatial_ref_sys table", SRID_DEFAULT); PG_RETURN_NULL(); } /* Revert lat/lon only with long SRS */ if (option & 1) lwopts |= LW_GML_IS_DEGREE; if (option & 2) lwopts &= ~LW_GML_IS_DIMS; if (version == 2) gml = lwgeom_to_gml2(lwgeom, srs, precision, prefix); else gml = lwgeom_to_gml3(lwgeom, srs, precision, lwopts, prefix); lwgeom_free(lwgeom); PG_FREE_IF_COPY(g, 1); /* Return null on null */ if ( ! gml ) PG_RETURN_NULL(); /* Turn string result into text for return */ result = cstring2text(gml); lwfree(gml); PG_RETURN_TEXT_P(result); }
Datum pgfadvise(PG_FUNCTION_ARGS) { /* SRF Stuff */ FuncCallContext *funcctx; pgfadvise_fctx *fctx; /* our structure use to return values */ pgfadviseStruct *pgfdv; /* our return value, 0 for success */ int result; /* The file we are working on */ char filename[MAXPGPATH]; /* stuff done only on the first call of the function */ if (SRF_IS_FIRSTCALL()) { MemoryContext oldcontext; Oid relOid = PG_GETARG_OID(0); text *forkName = PG_GETARG_TEXT_P(1); int advice = PG_GETARG_INT32(2); /* * Postgresql stuff to return a tuple */ TupleDesc tupdesc; /* create a function context for cross-call persistence */ funcctx = SRF_FIRSTCALL_INIT(); /* * switch to memory context appropriate for multiple function calls */ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); /* allocate memory for user context */ fctx = (pgfadvise_fctx *) palloc(sizeof(pgfadvise_fctx)); /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "pgfadvise: return type must be a row type"); /* provide the tuple descriptor to the fonction structure */ fctx->tupd = tupdesc; /* open the current relation, accessShareLock */ // TODO use try_relation_open instead ? fctx->rel = relation_open(relOid, AccessShareLock); /* we get the common part of the filename of each segment of a relation */ fctx->relationpath = relpathpg(fctx->rel, forkName); /* Here we keep track of current action in all calls */ fctx->advice = advice; /* segcount is used to get the next segment of the current relation */ fctx->segcount = 0; /* And finally we keep track of our initialization */ elog(DEBUG1, "pgfadvise: init done for %s, in fork %s", fctx->relationpath, text_to_cstring(forkName)); funcctx->user_fctx = fctx; MemoryContextSwitchTo(oldcontext); } /* After the first call, we recover our context */ funcctx = SRF_PERCALL_SETUP(); fctx = funcctx->user_fctx; /* * If we are still looking the first segment * relationpath should not be suffixed */ if (fctx->segcount == 0) snprintf(filename, MAXPGPATH, "%s", fctx->relationpath); else snprintf(filename, MAXPGPATH, "%s.%u", fctx->relationpath, fctx->segcount); elog(DEBUG1, "pgfadvise: about to work with %s, current advice : %d", filename, fctx->advice); /* * Call posix_fadvise with the advice, returning the structure */ pgfdv = (pgfadviseStruct *) palloc(sizeof(pgfadviseStruct)); result = pgfadvise_file(filename, fctx->advice, pgfdv); /* * When we have work with all segments of the current relation * We exit from the SRF * Else we build and return the tuple for this segment */ if (result) { elog(DEBUG1, "pgfadvise: closing %s", fctx->relationpath); relation_close(fctx->rel, AccessShareLock); pfree(fctx); SRF_RETURN_DONE(funcctx); } else { /* * Postgresql stuff to return a tuple */ HeapTuple tuple; Datum values[PGFADVISE_COLS]; bool nulls[PGFADVISE_COLS]; /* initialize nulls array to build the tuple */ memset(nulls, 0, sizeof(nulls)); /* prepare the number of the next segment */ fctx->segcount++; /* Filename */ values[0] = CStringGetTextDatum( filename ); /* os page size */ values[1] = Int64GetDatum( (int64) pgfdv->pageSize ); /* number of pages used by segment */ values[2] = Int64GetDatum( (int64) ((pgfdv->filesize+pgfdv->pageSize-1)/pgfdv->pageSize) ); /* free page cache */ values[3] = Int64GetDatum( (int64) pgfdv->pagesFree ); /* Build the result tuple. */ tuple = heap_form_tuple(fctx->tupd, values, nulls); /* Ok, return results, and go for next call */ SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); } }
/* table_log_restore_table() restore a complete table based on the logging table parameter: - original table name - name of primary key in original table - logging table - name of primary key in logging table - restore table name - timestamp for restoring data - primary key to restore (only this key will be restored) (optional) - restore mode 0: restore from blank table (default) needs a complete logging table 1: restore from actual table backwards - dont create table temporarly 0: create restore table temporarly (default) 1: create restore table not temporarly return: not yet defined */ Datum table_log_restore_table(PG_FUNCTION_ARGS) { /* the original table name */ char *table_orig; /* the primary key in the original table */ char *table_orig_pkey; /* number columns in original table */ int table_orig_columns; /* the log table name */ char *table_log; /* the primary key in the log table (usually trigger_id) */ /* cannot be the same then then the pkey in the original table */ char *table_log_pkey; /* number columns in log table */ int table_log_columns; /* the restore table name */ char *table_restore; /* the timestamp in past */ Datum timestamp = PG_GETARG_DATUM(5); /* the single pkey, can be null (then all keys will be restored) */ char *search_pkey = ""; /* the restore method - 0: restore from blank table (default) needs a complete log table! - 1: restore from actual table backwards */ int method = 0; /* dont create restore table temporarly - 0: create restore table temporarly (default) - 1: dont create restore table temporarly */ int not_temporarly = 0; int ret, results, i, number_columns; char query[250 + NAMEDATALEN]; /* for getting table infos (250 chars (+ one times the length of all names) should be enough) */ int need_search_pkey = 0; /* does we have a single key to restore? */ char *tmp, *timestamp_string, *old_pkey_string = ""; char *trigger_mode, *trigger_tuple, *trigger_changed; SPITupleTable *spi_tuptable = NULL; /* for saving query results */ VarChar *return_name; /* memory for dynamic query */ int d_query_size = 250; /* start with 250 bytes */ char *d_query; char *d_query_start; /* memory for column names */ int col_query_size = 0; char *col_query; char *col_query_start; int col_pkey = 0; /* * Some checks first... */ #ifdef TABLE_LOG_DEBUG elog(NOTICE, "start table_log_restore_table()"); #endif /* TABLE_LOG_DEBUG */ /* does we have all arguments? */ if (PG_ARGISNULL(0)) { elog(ERROR, "table_log_restore_table: missing original table name"); } if (PG_ARGISNULL(1)) { elog(ERROR, "table_log_restore_table: missing primary key name for original table"); } if (PG_ARGISNULL(2)) { elog(ERROR, "table_log_restore_table: missing log table name"); } if (PG_ARGISNULL(3)) { elog(ERROR, "table_log_restore_table: missing primary key name for log table"); } if (PG_ARGISNULL(4)) { elog(ERROR, "table_log_restore_table: missing copy table name"); } if (PG_ARGISNULL(5)) { elog(ERROR, "table_log_restore_table: missing timestamp"); } /* first check number arguments to avoid an segfault */ if (PG_NARGS() >= 7) { /* if argument is given, check if not null */ if (!PG_ARGISNULL(6)) { /* yes, fetch it */ search_pkey = __table_log_varcharout((VarChar *)PG_GETARG_VARCHAR_P(6)); /* and check, if we have an argument */ if (strlen(search_pkey) > 0) { need_search_pkey = 1; #ifdef TABLE_LOG_DEBUG elog(NOTICE, "table_log_restore_table: will restore a single key"); #endif /* TABLE_LOG_DEBUG */ } } } /* same procedere here */ if (PG_NARGS() >= 8) { if (!PG_ARGISNULL(7)) { method = PG_GETARG_INT32(7); if (method > 0) { method = 1; } else { method = 0; } } } #ifdef TABLE_LOG_DEBUG if (method == 1) { elog(NOTICE, "table_log_restore_table: will restore from actual state backwards"); } else { elog(NOTICE, "table_log_restore_table: will restore from begin forward"); } #endif /* TABLE_LOG_DEBUG */ if (PG_NARGS() >= 9) { if (!PG_ARGISNULL(8)) { not_temporarly = PG_GETARG_INT32(8); if (not_temporarly > 0) { not_temporarly = 1; } else { not_temporarly = 0; } } } #ifdef TABLE_LOG_DEBUG if (not_temporarly == 1) { elog(NOTICE, "table_log_restore_table: dont create restore table temporarly"); } #endif /* TABLE_LOG_DEBUG */ /* get parameter */ table_orig = __table_log_varcharout((VarChar *)PG_GETARG_VARCHAR_P(0)); table_orig_pkey = __table_log_varcharout((VarChar *)PG_GETARG_VARCHAR_P(1)); table_log = __table_log_varcharout((VarChar *)PG_GETARG_VARCHAR_P(2)); table_log_pkey = __table_log_varcharout((VarChar *)PG_GETARG_VARCHAR_P(3)); table_restore = __table_log_varcharout((VarChar *)PG_GETARG_VARCHAR_P(4)); /* pkey of original table cannot be the same as of log table */ if (strcmp((const char *)table_orig_pkey, (const char *)table_log_pkey) == 0) { elog(ERROR, "pkey of logging table cannot be the pkey of the original table: %s <-> %s", table_orig_pkey, table_log_pkey); } /* Connect to SPI manager */ ret = SPI_connect(); if (ret != SPI_OK_CONNECT) { elog(ERROR, "table_log_restore_table: SPI_connect returned %d", ret); } /* check original table */ snprintf(query, 249, "SELECT a.attname FROM pg_class c, pg_attribute a WHERE c.relname = %s AND a.attnum > 0 AND a.attrelid = c.oid ORDER BY a.attnum", do_quote_literal(table_orig)); #ifdef TABLE_LOG_DEBUG_QUERY elog(NOTICE, "query: %s", query); #endif /* TABLE_LOG_DEBUG_QUERY */ ret = SPI_exec(query, 0); if (ret != SPI_OK_SELECT) { elog(ERROR, "could not check relation: %s", table_orig); } if (SPI_processed > 0) { table_orig_columns = SPI_processed; } else { elog(ERROR, "could not check relation: %s", table_orig); } /* check pkey in original table */ snprintf(query, 249, "SELECT a.attname FROM pg_class c, pg_attribute a WHERE c.relname=%s AND c.relkind='r' AND a.attname=%s AND a.attnum > 0 AND a.attrelid = c.oid", do_quote_literal(table_orig), do_quote_literal(table_orig_pkey)); #ifdef TABLE_LOG_DEBUG_QUERY elog(NOTICE, "query: %s", query); #endif /* TABLE_LOG_DEBUG_QUERY */ ret = SPI_exec(query, 0); if (ret != SPI_OK_SELECT) { elog(ERROR, "could not check relation: %s", table_orig); } if (SPI_processed == 0) { elog(ERROR, "could not check relation: %s", table_orig); } #ifdef TABLE_LOG_DEBUG elog(NOTICE, "original table: OK (%i columns)", table_orig_columns); #endif /* TABLE_LOG_DEBUG */ /* check log table */ snprintf(query, 249, "SELECT a.attname FROM pg_class c, pg_attribute a WHERE c.relname = %s AND a.attnum > 0 AND a.attrelid = c.oid ORDER BY a.attnum", do_quote_literal(table_log)); #ifdef TABLE_LOG_DEBUG_QUERY elog(NOTICE, "query: %s", query); #endif /* TABLE_LOG_DEBUG_QUERY */ ret = SPI_exec(query, 0); if (ret != SPI_OK_SELECT) { elog(ERROR, "could not check relation [1]: %s", table_log); } if (SPI_processed > 0) { table_log_columns = SPI_processed; } else { elog(ERROR, "could not check relation [2]: %s", table_log); } /* check pkey in log table */ snprintf(query, 249, "SELECT a.attname FROM pg_class c, pg_attribute a WHERE c.relname=%s AND c.relkind='r' AND a.attname=%s AND a.attnum > 0 AND a.attrelid = c.oid", do_quote_literal(table_log), do_quote_literal(table_log_pkey)); #ifdef TABLE_LOG_DEBUG_QUERY elog(NOTICE, "query: %s", query); #endif /* TABLE_LOG_DEBUG_QUERY */ ret = SPI_exec(query, 0); if (ret != SPI_OK_SELECT) { elog(ERROR, "could not check relation [3]: %s", table_log); } if (SPI_processed == 0) { elog(ERROR, "could not check relation [4]: %s", table_log); } #ifdef TABLE_LOG_DEBUG elog(NOTICE, "log table: OK (%i columns)", table_log_columns); #endif /* TABLE_LOG_DEBUG */ /* check restore table */ snprintf(query, 249, "SELECT pg_attribute.attname AS a FROM pg_class, pg_attribute WHERE pg_class.relname=%s AND pg_attribute.attnum > 0 AND pg_attribute.attrelid=pg_class.oid", do_quote_literal(table_restore)); #ifdef TABLE_LOG_DEBUG_QUERY elog(NOTICE, "query: %s", query); #endif /* TABLE_LOG_DEBUG_QUERY */ ret = SPI_exec(query, 0); if (ret != SPI_OK_SELECT) { elog(ERROR, "could not check relation: %s", table_restore); } if (SPI_processed > 0) { elog(ERROR, "restore table already exists: %s", table_restore); } #ifdef TABLE_LOG_DEBUG elog(NOTICE, "restore table: OK (doesnt exists)"); #endif /* TABLE_LOG_DEBUG */ /* now get all columns from original table */ snprintf(query, 249, "SELECT a.attname, format_type(a.atttypid, a.atttypmod), a.attnum FROM pg_class c, pg_attribute a WHERE c.relname = %s AND a.attnum > 0 AND a.attrelid = c.oid ORDER BY a.attnum", do_quote_literal(table_orig)); #ifdef TABLE_LOG_DEBUG_QUERY elog(NOTICE, "query: %s", query); #endif /* TABLE_LOG_DEBUG_QUERY */ ret = SPI_exec(query, 0); if (ret != SPI_OK_SELECT) { elog(ERROR, "could not get columns from relation: %s", table_orig); } if (SPI_processed == 0) { elog(ERROR, "could not check relation: %s", table_orig); } results = SPI_processed; /* store number columns for later */ number_columns = SPI_processed; #ifdef TABLE_LOG_DEBUG elog(NOTICE, "number columns: %i", results); #endif /* TABLE_LOG_DEBUG */ for (i = 0; i < results; i++) { /* the column name */ tmp = SPI_getvalue(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1); col_query_size += strlen(do_quote_ident(tmp)) + 2; /* now check, if this is the pkey */ if (strcmp((const char *)tmp, (const char *)table_orig_pkey) == 0) { /* remember the (real) number */ col_pkey = i + 1; } } /* check if we have found the pkey */ if (col_pkey == 0) { elog(ERROR, "cannot find pkey (%s) in table %s", table_orig_pkey, table_orig); } /* allocate memory for string */ col_query_size += 10; col_query_start = (char *) palloc((col_query_size + 1) * sizeof(char)); col_query = col_query_start; for (i = 0; i < results; i++) { if (i > 0) { sprintf(col_query, ", "); col_query = col_query_start + strlen(col_query_start); } sprintf(col_query, "%s", do_quote_ident(SPI_getvalue(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1))); col_query = col_query_start + strlen(col_query_start); } #ifdef TABLE_LOG_DEBUG elog(NOTICE, "string for columns: %s", col_query_start); #endif /* TABLE_LOG_DEBUG */ /* create restore table */ #ifdef TABLE_LOG_DEBUG elog(NOTICE, "create restore table: %s", table_restore); #endif /* TABLE_LOG_DEBUG */ snprintf(query, 249, "SELECT * INTO "); /* per default create a temporary table */ if (not_temporarly == 0) { strcat(query, "TEMPORARY "); } strcat(query, "TABLE "); strncat(query, table_restore, 249); /* from which table? */ strncat(query, " FROM ", 249); strncat(query, table_orig, 249); if (need_search_pkey == 1) { /* only extract a specific key */ strncat(query, " WHERE ", 249); strncat(query, do_quote_ident(table_orig_pkey), 249); strncat(query, "=", 249); strncat(query, do_quote_literal(search_pkey), 249); } if (method == 0) { /* restore from begin (blank table) */ strncat(query, " LIMIT 0", 249); } #ifdef TABLE_LOG_DEBUG_QUERY elog(NOTICE, "query: %s", query); #endif /* TABLE_LOG_DEBUG_QUERY */ ret = SPI_exec(query, 0); if (ret != SPI_OK_SELINTO) { elog(ERROR, "could not check relation: %s", table_restore); } if (method == 1) { #ifdef TABLE_LOG_DEBUG elog(NOTICE, "%i rows copied", SPI_processed); #endif /* TABLE_LOG_DEBUG */ } /* get timestamp as string */ timestamp_string = DatumGetCString(DirectFunctionCall1(timestamptz_out, timestamp)); #ifdef TABLE_LOG_DEBUG if (method == 0) { elog(NOTICE, "need logs from start to timestamp: %s", timestamp_string); } else { elog(NOTICE, "need logs from end to timestamp: %s", timestamp_string); } #endif /* TABLE_LOG_DEBUG */ /* now build query for getting logs */ #ifdef TABLE_LOG_DEBUG elog(NOTICE, "build query for getting logs"); #endif /* TABLE_LOG_DEBUG */ d_query_size += d_query_size + strlen(col_query_start); if (need_search_pkey == 1) { /* add size of pkey and size of value */ d_query_size += strlen(do_quote_ident(table_orig_pkey)) * 2 + strlen(do_quote_literal(search_pkey)) + 3; } /* allocate memory for string */ d_query_size += 10; d_query_start = (char *) palloc((d_query_size + 1) * sizeof(char)); d_query = d_query_start; snprintf(d_query, d_query_size, "SELECT %s, trigger_mode, trigger_tuple, trigger_changed FROM %s WHERE ", col_query_start, do_quote_ident(table_log)); d_query = d_query_start + strlen(d_query_start); if (method == 0) { /* from start to timestamp */ snprintf(d_query, d_query_size, "trigger_changed <= %s ", do_quote_literal(timestamp_string)); } else { /* from now() backwards to timestamp */ snprintf(d_query, d_query_size, "trigger_changed >= %s ", do_quote_literal(timestamp_string)); } d_query = d_query_start + strlen(d_query_start); if (need_search_pkey == 1) { snprintf(d_query, d_query_size, "AND %s = %s ", do_quote_ident(table_orig_pkey), do_quote_literal(search_pkey)); d_query = d_query_start + strlen(d_query_start); } if (method == 0) { snprintf(d_query, d_query_size, "ORDER BY %s ASC", do_quote_ident(table_log_pkey)); } else { snprintf(d_query, d_query_size, "ORDER BY %s DESC", do_quote_ident(table_log_pkey)); } d_query = d_query_start + strlen(d_query_start); #ifdef TABLE_LOG_DEBUG_QUERY elog(NOTICE, "query: %s", d_query_start); #endif /* TABLE_LOG_DEBUG_QUERY */ ret = SPI_exec(d_query_start, 0); if (ret != SPI_OK_SELECT) { elog(ERROR, "could not get log data from table: %s", table_log); } #ifdef TABLE_LOG_DEBUG elog(NOTICE, "number log entries to restore: %i", SPI_processed); #endif /* TABLE_LOG_DEBUG */ results = SPI_processed; /* save results */ spi_tuptable = SPI_tuptable; /* go through all results */ for (i = 0; i < results; i++) { /* get tuple data */ trigger_mode = SPI_getvalue(spi_tuptable->vals[i], spi_tuptable->tupdesc, number_columns + 1); trigger_tuple = SPI_getvalue(spi_tuptable->vals[i], spi_tuptable->tupdesc, number_columns + 2); trigger_changed = SPI_getvalue(spi_tuptable->vals[i], spi_tuptable->tupdesc, number_columns + 3); /* check for update tuples we doesnt need */ if (strcmp((const char *)trigger_mode, (const char *)"UPDATE") == 0) { if (method == 0 && strcmp((const char *)trigger_tuple, (const char *)"old") == 0) { /* we need the old value of the pkey for the update */ old_pkey_string = SPI_getvalue(spi_tuptable->vals[i], spi_tuptable->tupdesc, col_pkey); #ifdef TABLE_LOG_DEBUG elog(NOTICE, "tuple old pkey: %s", old_pkey_string); #endif /* TABLE_LOG_DEBUG */ /* then skip this tuple */ continue; } if (method == 1 && strcmp((const char *)trigger_tuple, (const char *)"new") == 0) { /* we need the old value of the pkey for the update */ old_pkey_string = SPI_getvalue(spi_tuptable->vals[i], spi_tuptable->tupdesc, col_pkey); #ifdef TABLE_LOG_DEBUG elog(NOTICE, "tuple: old pkey: %s", old_pkey_string); #endif /* TABLE_LOG_DEBUG */ /* then skip this tuple */ continue; } } if (method == 0) { /* roll forward */ #ifdef TABLE_LOG_DEBUG elog(NOTICE, "tuple: %s %s %s", trigger_mode, trigger_tuple, trigger_changed); #endif /* TABLE_LOG_DEBUG */ if (strcmp((const char *)trigger_mode, (const char *)"INSERT") == 0) { __table_log_restore_table_insert(spi_tuptable, table_restore, table_orig_pkey, col_query_start, col_pkey, number_columns, i); } else if (strcmp((const char *)trigger_mode, (const char *)"UPDATE") == 0) { __table_log_restore_table_update(spi_tuptable, table_restore, table_orig_pkey, col_query_start, col_pkey, number_columns, i, old_pkey_string); } else if (strcmp((const char *)trigger_mode, (const char *)"DELETE") == 0) { __table_log_restore_table_delete(spi_tuptable, table_restore, table_orig_pkey, col_query_start, col_pkey, number_columns, i); } else { elog(ERROR, "unknown trigger_mode: %s", trigger_mode); } } else { /* roll back */ char rb_mode[10]; /* reverse the method */ if (strcmp((const char *)trigger_mode, (const char *)"INSERT") == 0) { sprintf(rb_mode, "DELETE"); } else if (strcmp((const char *)trigger_mode, (const char *)"UPDATE") == 0) { sprintf(rb_mode, "UPDATE"); } else if (strcmp((const char *)trigger_mode, (const char *)"DELETE") == 0) { sprintf(rb_mode, "INSERT"); } else { elog(ERROR, "unknown trigger_mode: %s", trigger_mode); } #ifdef TABLE_LOG_DEBUG elog(NOTICE, "tuple: %s %s %s", rb_mode, trigger_tuple, trigger_changed); #endif /* TABLE_LOG_DEBUG */ if (strcmp((const char *)trigger_mode, (const char *)"INSERT") == 0) { __table_log_restore_table_delete(spi_tuptable, table_restore, table_orig_pkey, col_query_start, col_pkey, number_columns, i); } else if (strcmp((const char *)trigger_mode, (const char *)"UPDATE") == 0) { __table_log_restore_table_update(spi_tuptable, table_restore, table_orig_pkey, col_query_start, col_pkey, number_columns, i, old_pkey_string); } else if (strcmp((const char *)trigger_mode, (const char *)"DELETE") == 0) { __table_log_restore_table_insert(spi_tuptable, table_restore, table_orig_pkey, col_query_start, col_pkey, number_columns, i); } } } #ifdef TABLE_LOG_DEBUG elog(NOTICE, "table_log_restore_table() done, results in: %s", table_restore); #endif /* TABLE_LOG_DEBUG */ /* convert string to VarChar for result */ return_name = DatumGetVarCharP(DirectFunctionCall2(varcharin, CStringGetDatum(table_restore), Int32GetDatum(strlen(table_restore) + VARHDRSZ))); /* close SPI connection */ SPI_finish(); /* and return the name of the restore table */ PG_RETURN_VARCHAR_P(return_name); }
/* * In all cases, GIN_TRUE is at least as favorable to inclusion as * GIN_MAYBE. If no better option is available, simply treat * GIN_MAYBE as if it were GIN_TRUE and apply the same test as the binary * consistent function. */ Datum gin_trgm_triconsistent(PG_FUNCTION_ARGS) { GinTernaryValue *check = (GinTernaryValue *) PG_GETARG_POINTER(0); StrategyNumber strategy = PG_GETARG_UINT16(1); /* text *query = PG_GETARG_TEXT_P(2); */ int32 nkeys = PG_GETARG_INT32(3); Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); GinTernaryValue res = GIN_MAYBE; int32 i, ntrue; bool *boolcheck; switch (strategy) { case SimilarityStrategyNumber: /* Count the matches */ ntrue = 0; for (i = 0; i < nkeys; i++) { if (check[i] != GIN_FALSE) ntrue++; } /* * See comment in gin_trgm_consistent() about * upper bound formula */ res = (nkeys == 0) ? GIN_FALSE : (((((float4) ntrue) / ((float4) nkeys)) >= trgm_limit) ? GIN_MAYBE : GIN_FALSE); break; case ILikeStrategyNumber: #ifndef IGNORECASE elog(ERROR, "cannot handle ~~* with case-sensitive trigrams"); #endif /* FALL THRU */ case LikeStrategyNumber: /* Check if all extracted trigrams are presented. */ res = GIN_MAYBE; for (i = 0; i < nkeys; i++) { if (check[i] == GIN_FALSE) { res = GIN_FALSE; break; } } break; case RegExpICaseStrategyNumber: #ifndef IGNORECASE elog(ERROR, "cannot handle ~* with case-sensitive trigrams"); #endif /* FALL THRU */ case RegExpStrategyNumber: if (nkeys < 1) { /* Regex processing gave no result: do full index scan */ res = GIN_MAYBE; } else { /* * As trigramsMatchGraph implements a montonic boolean function, * promoting all GIN_MAYBE keys to GIN_TRUE will give a * conservative result. */ boolcheck = (bool *) palloc(sizeof(bool) * nkeys); for (i = 0; i < nkeys; i++) boolcheck[i] = (check[i] != GIN_FALSE); if (!trigramsMatchGraph((TrgmPackedGraph *) extra_data[0], boolcheck)) res = GIN_FALSE; pfree(boolcheck); } break; default: elog(ERROR, "unrecognized strategy number: %d", strategy); res = GIN_FALSE; /* keep compiler quiet */ break; } /* All cases served by this function are inexact */ Assert(res != GIN_TRUE); PG_RETURN_GIN_TERNARY_VALUE(res); }
Datum generate_series_step_int4(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; generate_series_fctx *fctx; int32 result; MemoryContext oldcontext; /* stuff done only on the first call of the function */ if (SRF_IS_FIRSTCALL()) { int32 start = PG_GETARG_INT32(0); int32 finish = PG_GETARG_INT32(1); int32 step = 1; /* see if we were given an explicit step size */ if (PG_NARGS() == 3) step = PG_GETARG_INT32(2); if (step == 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("step size cannot equal zero"))); /* create a function context for cross-call persistence */ funcctx = SRF_FIRSTCALL_INIT(); /* * switch to memory context appropriate for multiple function calls */ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); /* allocate memory for user context */ fctx = (generate_series_fctx *) palloc(sizeof(generate_series_fctx)); /* * Use fctx to keep state from call to call. Seed current with the * original start value */ fctx->current = start; fctx->finish = finish; fctx->step = step; funcctx->user_fctx = fctx; MemoryContextSwitchTo(oldcontext); } /* stuff done on every call of the function */ funcctx = SRF_PERCALL_SETUP(); /* * get the saved state and use current as the result for this iteration */ fctx = funcctx->user_fctx; result = fctx->current; if ((fctx->step > 0 && fctx->current <= fctx->finish) || (fctx->step < 0 && fctx->current >= fctx->finish)) { /* increment current in preparation for next iteration */ fctx->current += fctx->step; /* if next-value computation overflows, this is the final result */ if (SAMESIGN(result, fctx->step) && !SAMESIGN(result, fctx->current)) fctx->step = 0; /* do when there is more left to send */ SRF_RETURN_NEXT(funcctx, Int32GetDatum(result)); } else /* do when there is no more left */ SRF_RETURN_DONE(funcctx); }
static int compute_shortest_path_astar(char* sql, int source_vertex_id, int target_vertex_id, bool directed, bool has_reverse_cost, path_element_t **path, int *path_count) { int SPIcode; void *SPIplan; Portal SPIportal; bool moredata = TRUE; int ntuples; edge_astar_t *edges = NULL; int total_tuples = 0; int v_max_id=0; int v_min_id=INT_MAX; edge_astar_columns_t edge_columns = {.id= -1, .source= -1, .target= -1, .cost= -1, .reverse_cost= -1, .s_x= -1, .s_y= -1, .t_x= -1, .t_y= -1}; char *err_msg; int ret = -1; register int z; int s_count=0; int t_count=0; struct vItem { int id; int key; }; DBG("start shortest_path_astar\n"); SPIcode = SPI_connect(); if (SPIcode != SPI_OK_CONNECT) { elog(ERROR, "shortest_path_astar: couldn't open a connection to SPI"); return -1; } SPIplan = SPI_prepare(sql, 0, NULL); if (SPIplan == NULL) { elog(ERROR, "shortest_path_astar: couldn't create query plan via SPI"); return -1; } if ((SPIportal = SPI_cursor_open(NULL, SPIplan, NULL, NULL, true)) == NULL) { elog(ERROR, "shortest_path_astar: SPI_cursor_open('%s') returns NULL", sql); return -1; } while (moredata == TRUE) { SPI_cursor_fetch(SPIportal, TRUE, TUPLIMIT); if (edge_columns.id == -1) { if (fetch_edge_astar_columns(SPI_tuptable, &edge_columns, has_reverse_cost) == -1) return finish(SPIcode, ret); } ntuples = SPI_processed; total_tuples += ntuples; if (!edges) edges = palloc(total_tuples * sizeof(edge_astar_t)); else edges = repalloc(edges, total_tuples * sizeof(edge_astar_t)); if (edges == NULL) { elog(ERROR, "Out of memory"); return finish(SPIcode, ret); } if (ntuples > 0) { int t; SPITupleTable *tuptable = SPI_tuptable; TupleDesc tupdesc = SPI_tuptable->tupdesc; for (t = 0; t < ntuples; t++) { HeapTuple tuple = tuptable->vals[t]; fetch_edge_astar(&tuple, &tupdesc, &edge_columns, &edges[total_tuples - ntuples + t]); } SPI_freetuptable(tuptable); } else { moredata = FALSE; } } //defining min and max vertex id DBG("Total %i tuples", total_tuples); for(z=0; z<total_tuples; z++) { if(edges[z].source<v_min_id) v_min_id=edges[z].source; if(edges[z].source>v_max_id) v_max_id=edges[z].source; if(edges[z].target<v_min_id) v_min_id=edges[z].target; if(edges[z].target>v_max_id) v_max_id=edges[z].target; DBG("%i <-> %i", v_min_id, v_max_id); } //:::::::::::::::::::::::::::::::::::: //:: reducing vertex id (renumbering) //:::::::::::::::::::::::::::::::::::: for(z=0; z<total_tuples; z++) { //check if edges[] contains source and target if(edges[z].source == source_vertex_id || edges[z].target == source_vertex_id) ++s_count; if(edges[z].source == target_vertex_id || edges[z].target == target_vertex_id) ++t_count; edges[z].source-=v_min_id; edges[z].target-=v_min_id; DBG("%i - %i", edges[z].source, edges[z].target); } DBG("Total %i tuples", total_tuples); if(s_count == 0) { elog(ERROR, "Start vertex was not found."); return -1; } if(t_count == 0) { elog(ERROR, "Target vertex was not found."); return -1; } DBG("Total %i tuples", total_tuples); profstop("extract", prof_extract); profstart(prof_astar); DBG("Calling boost_astar <%i>\n", total_tuples); // calling C++ A* function ret = boost_astar(edges, total_tuples, source_vertex_id-v_min_id, target_vertex_id-v_min_id, directed, has_reverse_cost, path, path_count, &err_msg); if (ret < 0) { elog(ERROR, "Error computing path: %s", err_msg); } DBG("SIZE %i\n",*path_count); DBG("ret = %i\n",ret); //:::::::::::::::::::::::::::::::: //:: restoring original vertex id //:::::::::::::::::::::::::::::::: for(z=0; z<*path_count; z++) { //DBG("vetex %i\n",(*path)[z].vertex_id); (*path)[z].vertex_id += v_min_id; } profstop("astar", prof_astar); profstart(prof_store); return finish(SPIcode, ret); } PG_FUNCTION_INFO_V1(shortest_path_astar); Datum shortest_path_astar(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; int call_cntr; int max_calls; TupleDesc tuple_desc; path_element_t *path = 0; /* stuff done only on the first call of the function */ if (SRF_IS_FIRSTCALL()) { MemoryContext oldcontext; int path_count = 0; int ret; // XXX profiling messages are not thread safe profstart(prof_total); profstart(prof_extract); /* create a function context for cross-call persistence */ funcctx = SRF_FIRSTCALL_INIT(); /* switch to memory context appropriate for multiple function calls */ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); ret = compute_shortest_path_astar(text2char(PG_GETARG_TEXT_P(0)), PG_GETARG_INT32(1), PG_GETARG_INT32(2), PG_GETARG_BOOL(3), PG_GETARG_BOOL(4), &path, &path_count); #ifdef DEBUG DBG("Ret is %i", ret); int i; for (i = 0; i < path_count; i++) { DBG("Step # %i vertex_id %i ", i, path[i].vertex_id); DBG(" edge_id %i ", path[i].edge_id); DBG(" cost %f ", path[i].cost); } #endif /* total number of tuples to be returned */ DBG("Conting tuples number\n"); funcctx->max_calls = path_count; funcctx->user_fctx = path; DBG("Path count %i", path_count); funcctx->tuple_desc = BlessTupleDesc(RelationNameGetTupleDesc("pgr_costResult")); MemoryContextSwitchTo(oldcontext); } /* stuff done on every call of the function */ DBG("Strange stuff doing\n"); funcctx = SRF_PERCALL_SETUP(); call_cntr = funcctx->call_cntr; max_calls = funcctx->max_calls; tuple_desc = funcctx->tuple_desc; path = (path_element_t*) funcctx->user_fctx; DBG("Trying to allocate some memory\n"); if (call_cntr < max_calls) { /* do when there is more left to send */ HeapTuple tuple; Datum result; Datum *values; char* nulls; values = palloc(4 * sizeof(Datum)); nulls = palloc(4 * sizeof(char)); values[0] = Int32GetDatum(call_cntr); nulls[0] = ' '; values[1] = Int32GetDatum(path[call_cntr].vertex_id); nulls[1] = ' '; values[2] = Int32GetDatum(path[call_cntr].edge_id); nulls[2] = ' '; values[3] = Float8GetDatum(path[call_cntr].cost); nulls[3] = ' '; DBG("Heap making\n"); tuple = heap_formtuple(tuple_desc, values, nulls); DBG("Datum making\n"); /* make the tuple into a datum */ result = HeapTupleGetDatum(tuple); DBG("Trying to free some memory\n"); /* clean up (this is not really necessary) */ pfree(values); pfree(nulls); SRF_RETURN_NEXT(funcctx, result); } else /* do when there is no more left */ { if (path) free(path); profstop("store", prof_store); profstop("total", prof_total); #ifdef PROFILE elog(NOTICE, "_________"); #endif SRF_RETURN_DONE(funcctx); } }
Datum gin_trgm_consistent(PG_FUNCTION_ARGS) { bool *check = (bool *) PG_GETARG_POINTER(0); StrategyNumber strategy = PG_GETARG_UINT16(1); /* text *query = PG_GETARG_TEXT_P(2); */ int32 nkeys = PG_GETARG_INT32(3); Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); bool *recheck = (bool *) PG_GETARG_POINTER(5); bool res; int32 i, ntrue; /* All cases served by this function are inexact */ *recheck = true; switch (strategy) { case SimilarityStrategyNumber: /* Count the matches */ ntrue = 0; for (i = 0; i < nkeys; i++) { if (check[i]) ntrue++; } /*-------------------- * If DIVUNION is defined then similarity formula is: * c / (len1 + len2 - c) * where c is number of common trigrams and it stands as ntrue in * this code. Here we don't know value of len2 but we can assume * that c (ntrue) is a lower bound of len2, so upper bound of * similarity is: * c / (len1 + c - c) => c / len1 * If DIVUNION is not defined then similarity formula is: * c / max(len1, len2) * And again, c (ntrue) is a lower bound of len2, but c <= len1 * just by definition and, consequently, upper bound of * similarity is just c / len1. * So, independly on DIVUNION the upper bound formula is the same. */ res = (nkeys == 0) ? false : ((((((float4) ntrue) / ((float4) nkeys))) >= trgm_limit) ? true : false); break; case ILikeStrategyNumber: #ifndef IGNORECASE elog(ERROR, "cannot handle ~~* with case-sensitive trigrams"); #endif /* FALL THRU */ case LikeStrategyNumber: /* Check if all extracted trigrams are presented. */ res = true; for (i = 0; i < nkeys; i++) { if (!check[i]) { res = false; break; } } break; case RegExpICaseStrategyNumber: #ifndef IGNORECASE elog(ERROR, "cannot handle ~* with case-sensitive trigrams"); #endif /* FALL THRU */ case RegExpStrategyNumber: if (nkeys < 1) { /* Regex processing gave no result: do full index scan */ res = true; } else res = trigramsMatchGraph((TrgmPackedGraph *) extra_data[0], check); break; default: elog(ERROR, "unrecognized strategy number: %d", strategy); res = false; /* keep compiler quiet */ break; } PG_RETURN_BOOL(res); }
/* * rangesel -- restriction selectivity for range operators */ Datum rangesel(PG_FUNCTION_ARGS) { PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0); Oid operator___ = PG_GETARG_OID(1); List *args = (List *) PG_GETARG_POINTER(2); int varRelid = PG_GETARG_INT32(3); VariableStatData vardata; Node *other; bool varonleft; Selectivity selec; TypeCacheEntry *typcache = NULL; RangeType *constrange = NULL; /* * If expression is not (variable op something) or (something op * variable), then punt and return a default estimate. */ if (!get_restriction_variable(root, args, varRelid, &vardata, &other, &varonleft)) PG_RETURN_FLOAT8(default_range_selectivity(operator___)); /* * Can't do anything useful if the something is not a constant, either. */ if (!IsA(other, Const)) { ReleaseVariableStats(vardata); PG_RETURN_FLOAT8(default_range_selectivity(operator___)); } /* * All the range operators are strict, so we can cope with a NULL constant * right away. */ if (((Const *) other)->constisnull) { ReleaseVariableStats(vardata); PG_RETURN_FLOAT8(0.0); } /* * If var is on the right, commute the operator___, so that we can assume the * var is on the left in what follows. */ if (!varonleft) { /* we have other Op var, commute to make var Op other */ operator___ = get_commutator(operator___); if (!operator___) { /* Use default selectivity (should we raise an error instead?) */ ReleaseVariableStats(vardata); PG_RETURN_FLOAT8(default_range_selectivity(operator___)); } } /* * OK, there's a Var and a Const we're dealing with here. We need the * Const to be of same range type as the column, else we can't do anything * useful. (Such cases will likely fail at runtime, but here we'd rather * just return a default estimate.) * * If the operator___ is "range @> element", the constant should be of the * element type of the range column. Convert it to a range that includes * only that single point, so that we don't need special handling for that * in what follows. */ if (operator___ == OID_RANGE_CONTAINS_ELEM_OP) { typcache = range_get_typcache(fcinfo, vardata.vartype); if (((Const *) other)->consttype == typcache->rngelemtype->type_id) { RangeBound lower, upper; lower.inclusive = true; lower.val = ((Const *) other)->constvalue; lower.infinite = false; lower.lower = true; upper.inclusive = true; upper.val = ((Const *) other)->constvalue; upper.infinite = false; upper.lower = false; constrange = range_serialize(typcache, &lower, &upper, false); } } else if (operator___ == OID_RANGE_ELEM_CONTAINED_OP) { /* * Here, the Var is the elem, not the range. For now we just punt and * return the default estimate. In future we could disassemble the * range constant and apply scalarineqsel ... */ } else if (((Const *) other)->consttype == vardata.vartype) { /* Both sides are the same range type */ typcache = range_get_typcache(fcinfo, vardata.vartype); constrange = DatumGetRangeType(((Const *) other)->constvalue); } /* * If we got a valid constant on one side of the operator___, proceed to * estimate using statistics. Otherwise punt and return a default constant * estimate. Note that calc_rangesel need not handle * OID_RANGE_ELEM_CONTAINED_OP. */ if (constrange) selec = calc_rangesel(typcache, &vardata, constrange, operator___); else selec = default_range_selectivity(operator___); ReleaseVariableStats(vardata); CLAMP_PROBABILITY(selec); PG_RETURN_FLOAT8((float8) selec); }
Datum LWGEOM_interiorringn_polygon(PG_FUNCTION_ARGS) { GSERIALIZED *geom; int32 wanted_index; LWCURVEPOLY *curvepoly = NULL; LWPOLY *poly = NULL; POINTARRAY *ring; LWLINE *line; LWGEOM *lwgeom; GSERIALIZED *result; GBOX *bbox = NULL; int type; POSTGIS_DEBUG(2, "LWGEOM_interierringn_polygon called."); wanted_index = PG_GETARG_INT32(1); if ( wanted_index < 1 ) { /* elog(ERROR, "InteriorRingN: ring number is 1-based"); */ PG_RETURN_NULL(); /* index out of range */ } geom = (GSERIALIZED *)PG_DETOAST_DATUM(PG_GETARG_DATUM(0)); type = gserialized_get_type(geom); if ( (type != POLYGONTYPE) && (type != CURVEPOLYTYPE) ) { elog(ERROR, "InteriorRingN: geom is not a polygon"); PG_FREE_IF_COPY(geom, 0); PG_RETURN_NULL(); } lwgeom = lwgeom_from_gserialized(geom); if( lwgeom_is_empty(lwgeom) ) { lwpoly_free(poly); PG_FREE_IF_COPY(geom, 0); PG_RETURN_NULL(); } if ( type == POLYGONTYPE) { poly = lwgeom_as_lwpoly(lwgeom_from_gserialized(geom)); /* Ok, now we have a polygon. Let's see if it has enough holes */ if ( wanted_index >= poly->nrings ) { lwpoly_free(poly); PG_FREE_IF_COPY(geom, 0); PG_RETURN_NULL(); } ring = poly->rings[wanted_index]; /* COMPUTE_BBOX==TAINTING */ if ( poly->bbox ) { bbox = lwalloc(sizeof(GBOX)); ptarray_calculate_gbox_cartesian(ring, bbox); } /* This is a LWLINE constructed by interior ring POINTARRAY */ line = lwline_construct(poly->srid, bbox, ring); result = geometry_serialize((LWGEOM *)line); lwline_release(line); lwpoly_free(poly); } else { curvepoly = lwgeom_as_lwcurvepoly(lwgeom_from_gserialized(geom)); if (wanted_index >= curvepoly->nrings) { PG_FREE_IF_COPY(geom, 0); lwgeom_release((LWGEOM *)curvepoly); PG_RETURN_NULL(); } result = geometry_serialize(curvepoly->rings[wanted_index]); lwgeom_free((LWGEOM*)curvepoly); } PG_FREE_IF_COPY(geom, 0); PG_RETURN_POINTER(result); }
/* * array_position_common * Common code for array_position and array_position_start * * These are separate wrappers for the sake of opr_sanity regression test. * They are not strict so we have to test for null inputs explicitly. */ static Datum array_position_common(FunctionCallInfo fcinfo) { ArrayType *array; Oid collation = PG_GET_COLLATION(); Oid element_type; Datum searched_element, value; bool isnull; int position, position_min; bool found = false; TypeCacheEntry *typentry; ArrayMetaState *my_extra; bool null_search; ArrayIterator array_iterator; if (PG_ARGISNULL(0)) PG_RETURN_NULL(); array = PG_GETARG_ARRAYTYPE_P(0); element_type = ARR_ELEMTYPE(array); /* * We refuse to search for elements in multi-dimensional arrays, since we * have no good way to report the element's location in the array. */ if (ARR_NDIM(array) > 1) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("searching for elements in multidimensional arrays is not supported"))); if (PG_ARGISNULL(1)) { /* fast return when the array doesn't have have nulls */ if (!array_contains_nulls(array)) PG_RETURN_NULL(); searched_element = (Datum) 0; null_search = true; } else { searched_element = PG_GETARG_DATUM(1); null_search = false; } position = (ARR_LBOUND(array))[0] - 1; /* figure out where to start */ if (PG_NARGS() == 3) { if (PG_ARGISNULL(2)) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("initial position should not be NULL"))); position_min = PG_GETARG_INT32(2); } else position_min = (ARR_LBOUND(array))[0]; /* * We arrange to look up type info for array_create_iterator only once per * series of calls, assuming the element type doesn't change underneath us. */ my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra; if (my_extra == NULL) { fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt, sizeof(ArrayMetaState)); my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra; my_extra->element_type = ~element_type; } if (my_extra->element_type != element_type) { get_typlenbyvalalign(element_type, &my_extra->typlen, &my_extra->typbyval, &my_extra->typalign); typentry = lookup_type_cache(element_type, TYPECACHE_EQ_OPR_FINFO); if (!OidIsValid(typentry->eq_opr_finfo.fn_oid)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), errmsg("could not identify an equality operator for type %s", format_type_be(element_type)))); my_extra->element_type = element_type; fmgr_info(typentry->eq_opr_finfo.fn_oid, &my_extra->proc); } /* Examine each array element until we find a match. */ array_iterator = array_create_iterator(array, 0, my_extra); while (array_iterate(array_iterator, &value, &isnull)) { position++; /* skip initial elements if caller requested so */ if (position < position_min) continue; /* * Can't look at the array element's value if it's null; but if we * search for null, we have a hit and are done. */ if (isnull || null_search) { if (isnull && null_search) { found = true; break; } else continue; } /* not nulls, so run the operator */ if (DatumGetBool(FunctionCall2Coll(&my_extra->proc, collation, searched_element, value))) { found = true; break; } } array_free_iterator(array_iterator); /* Avoid leaking memory when handed toasted input */ PG_FREE_IF_COPY(array, 0); if (!found) PG_RETURN_NULL(); PG_RETURN_INT32(position); }
Datum intersectsPrepared(PG_FUNCTION_ARGS) { #ifndef PREPARED_GEOM elog(ERROR,"Not implemented in this version!"); PG_RETURN_NULL(); /* never get here */ #else PG_LWGEOM * geom1; PG_LWGEOM * geom2; bool result; BOX2DFLOAT4 box1, box2; PrepGeomCache * prep_cache; int32 key1, key2; geom1 = (PG_LWGEOM *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0)); geom2 = (PG_LWGEOM *) PG_DETOAST_DATUM(PG_GETARG_DATUM(1)); key1 = PG_GETARG_INT32(2); key2 = PG_GETARG_INT32(3); errorIfGeometryCollection(geom1,geom2); errorIfSRIDMismatch(pglwgeom_getSRID(geom1), pglwgeom_getSRID(geom2)); /* * short-circuit 1: if geom2 bounding box does not overlap * geom1 bounding box we can prematurely return FALSE. * Do the test IFF BOUNDING BOX AVAILABLE. */ if ( getbox2d_p(SERIALIZED_FORM(geom1), &box1) && getbox2d_p(SERIALIZED_FORM(geom2), &box2) ) { if (( box2.xmax < box1.xmin ) || ( box2.xmin > box1.xmax ) || ( box2.ymax < box1.ymin ) || ( box2.ymin > box1.ymax )) PG_RETURN_BOOL(FALSE); } prep_cache = GetPrepGeomCache( fcinfo, geom1, geom2 ); initGEOS(lwnotice, lwnotice); if ( prep_cache && prep_cache->prepared_geom ) { if ( prep_cache->argnum == 1 ) { GEOSGeom g = POSTGIS2GEOS(geom2); result = GEOSPreparedIntersects( prep_cache->prepared_geom, g); GEOSGeom_destroy(g); } else { GEOSGeom g = POSTGIS2GEOS(geom1); result = GEOSPreparedIntersects( prep_cache->prepared_geom, g); GEOSGeom_destroy(g); } } else { GEOSGeom g1 = POSTGIS2GEOS(geom1); GEOSGeom g2 = POSTGIS2GEOS(geom2); result = GEOSIntersects( g1, g2); GEOSGeom_destroy(g1); GEOSGeom_destroy(g2); } if (result == 2) { elog(ERROR,"GEOS contains() threw an error!"); PG_RETURN_NULL(); /* never get here */ } PG_FREE_IF_COPY(geom1, 0); PG_FREE_IF_COPY(geom2, 1); PG_RETURN_BOOL(result); #endif /* PREPARED_GEOM */ }
Datum cdb_heap_test(PG_FUNCTION_ARGS) { int nSlots = PG_GETARG_INT32(0); int nToGenerate = PG_GETARG_INT32(1); int seed = PG_GETARG_INT32(2); bool result = true; CdbHeap *cdbheap = NULL; int i; int *intvals; HeapValue* values; int *slotcounts; int *slotpositions; HeapValue *phv; Assert(nSlots >= 1 && nToGenerate >= 1 ); cdbheap = CreateCdbHeap( nSlots, cmpIntP, createIntP, copyIntP, switchIntP, NULL); /* * Generate nToGenerate random integers */ intvals = (int *)palloc0( nToGenerate * sizeof(int) ); srand( seed ); for ( i=0; i<nToGenerate; i++ ) { intvals[i] = rand(); } // Now sort these qsort( intvals, nToGenerate, sizeof(int), cmpint ); // Now allocate nSlots arrays for the sorted ints, and an array for the counts values = (HeapValue *)palloc0( nToGenerate * sizeof(HeapValue)); slotcounts = (int *)palloc0( nSlots * sizeof(int) ); slotpositions = (int *)palloc0( nSlots * sizeof(int) ); // Now randomly assign the values to nSlots, preserving order for ( i=0; i<nToGenerate; i++ ) { int slot = floor( ((double)rand() / ((double)RAND_MAX + 1 )) * nSlots); Assert( slot >= 0 && slot < nSlots); values[i].source = slot; values[i].value = intvals + i; slotcounts[slot]++; } elog( DEBUG4, "Slot counts follow" ); int sum = 0; for ( i=0; i<nSlots; i++ ) { if ( slotcounts[i] == 0 ) { ereport(NOTICE, (errcode(ERRCODE_MOST_SPECIFIC_TYPE_MISMATCH), errmsg("Function cdb_heap_test() cannot proceed, because not all slots got values.")) ); result = false; goto end; } sum += slotcounts[i]; elog( DEBUG4, "slotcount[%d] = %d" , i, slotcounts[i]); } elog( DEBUG4, "slotcount total = %d" , sum); // Now add the first element from each slot to the heap. for ( i=0; i<nSlots; i++ ) { int index = GetNextValueForSlot( values, i, slotpositions[i], nToGenerate ); SetCdbHeapInitialValue( cdbheap, &values[index] ); slotpositions[i] = index+1; } // Now grab lowest element from heap, and ask for the next element from the // same slot int lastval = INT_MIN; int lastslot; for ( i=0; i<nToGenerate; i++ ) { HeapValue *phv = (HeapValue *)GetLowestValueFromCdbHeap( cdbheap ); Assert( phv != NULL ); if ( phv->value == NULL ) { ereport(NOTICE, (errcode(ERRCODE_MOST_SPECIFIC_TYPE_MISMATCH), errmsg("Function cdb_heap_test() failed. At index %d, value was NULL", i )) ); result = false; goto end; } if ( lastval > *phv->value ) { ereport(NOTICE, (errcode(ERRCODE_MOST_SPECIFIC_TYPE_MISMATCH), errmsg("Function cdb_heap_test() failed. At index %d, value %d was smaller than previous value %d", i, *phv->value, lastval )) ); result = false; goto end; } lastval = *phv->value; lastslot = phv->source; int index = GetNextValueForSlot( values, lastslot, slotpositions[lastslot], nToGenerate ); if ( index == -1 ) phv->value = NULL; else { phv = &values[index]; slotpositions[lastslot] = index + 1; } MergeNewValueIntoCdbHeap( cdbheap, phv ); } phv = GetLowestValueFromCdbHeap( cdbheap ); Assert( phv != NULL && phv->value == NULL ); end: if ( cdbheap != NULL ) DestroyCdbHeap( cdbheap ); PG_RETURN_BOOL(result); }
/* * gistgettuple() -- Get the next tuple in the scan */ Datum gistgettuple(PG_FUNCTION_ARGS) { IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0); ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1); GISTScanOpaque so = (GISTScanOpaque) scan->opaque; if (dir != ForwardScanDirection) elog(ERROR, "GiST only supports forward scan direction"); if (!so->qual_ok) PG_RETURN_BOOL(false); if (so->firstCall) { /* Begin the scan by processing the root page */ GISTSearchItem fakeItem; pgstat_count_index_scan(scan->indexRelation); so->firstCall = false; so->curTreeItem = NULL; so->curPageData = so->nPageData = 0; fakeItem.blkno = GIST_ROOT_BLKNO; memset(&fakeItem.data.parentlsn, 0, sizeof(GistNSN)); gistScanPage(scan, &fakeItem, NULL, NULL, NULL); } if (scan->numberOfOrderBys > 0) { /* Must fetch tuples in strict distance order */ PG_RETURN_BOOL(getNextNearest(scan)); } else { /* Fetch tuples index-page-at-a-time */ for (;;) { if (so->curPageData < so->nPageData) { /* continuing to return tuples from a leaf page */ scan->xs_ctup.t_self = so->pageData[so->curPageData].heapPtr; scan->xs_recheck = so->pageData[so->curPageData].recheck; so->curPageData++; PG_RETURN_BOOL(true); } /* find and process the next index page */ do { GISTSearchItem *item = getNextGISTSearchItem(so); if (!item) PG_RETURN_BOOL(false); CHECK_FOR_INTERRUPTS(); /* * While scanning a leaf page, ItemPointers of matching heap * tuples are stored in so->pageData. If there are any on * this page, we fall out of the inner "do" and loop around to * return them. */ gistScanPage(scan, item, so->curTreeItem->distances, NULL, NULL); pfree(item); } while (so->nPageData == 0); } } PG_RETURN_BOOL(false); /* keep compiler quiet */ }
/* * hashgettuple() -- Get the next tuple in the scan. */ Datum hashgettuple(PG_FUNCTION_ARGS) { IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0); ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1); HashScanOpaque so = (HashScanOpaque) scan->opaque; Relation rel = scan->indexRelation; Buffer buf; Page page; OffsetNumber offnum; ItemPointer current; bool res; /* Hash indexes are always lossy since we store only the hash code */ scan->xs_recheck = true; /* * We hold pin but not lock on current buffer while outside the hash AM. * Reacquire the read lock here. */ if (BufferIsValid(so->hashso_curbuf)) _hash_chgbufaccess(rel, so->hashso_curbuf, HASH_NOLOCK, HASH_READ); /* * If we've already initialized this scan, we can just advance it in the * appropriate direction. If we haven't done so yet, we call a routine to * get the first item in the scan. */ current = &(so->hashso_curpos); if (ItemPointerIsValid(current)) { /* * An insertion into the current index page could have happened while * we didn't have read lock on it. Re-find our position by looking * for the TID we previously returned. (Because we hold share lock on * the bucket, no deletions or splits could have occurred; therefore * we can expect that the TID still exists in the current index page, * at an offset >= where we were.) */ OffsetNumber maxoffnum; buf = so->hashso_curbuf; Assert(BufferIsValid(buf)); page = BufferGetPage(buf); maxoffnum = PageGetMaxOffsetNumber(page); for (offnum = ItemPointerGetOffsetNumber(current); offnum <= maxoffnum; offnum = OffsetNumberNext(offnum)) { IndexTuple itup; itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum)); if (ItemPointerEquals(&(so->hashso_heappos), &(itup->t_tid))) break; } if (offnum > maxoffnum) elog(ERROR, "failed to re-find scan position within index \"%s\"", RelationGetRelationName(rel)); ItemPointerSetOffsetNumber(current, offnum); /* * Check to see if we should kill the previously-fetched tuple. */ if (scan->kill_prior_tuple) { /* * Yes, so mark it by setting the LP_DEAD state in the item flags. */ ItemIdMarkDead(PageGetItemId(page, offnum)); /* * Since this can be redone later if needed, it's treated the same * as a commit-hint-bit status update for heap tuples: we mark the * buffer dirty but don't make a WAL log entry. */ SetBufferCommitInfoNeedsSave(buf); } /* * Now continue the scan. */ res = _hash_next(scan, dir); } else res = _hash_first(scan, dir); /* * Skip killed tuples if asked to. */ if (scan->ignore_killed_tuples) { while (res) { offnum = ItemPointerGetOffsetNumber(current); page = BufferGetPage(so->hashso_curbuf); if (!ItemIdIsDead(PageGetItemId(page, offnum))) break; res = _hash_next(scan, dir); } } /* Release read lock on current buffer, but keep it pinned */ if (BufferIsValid(so->hashso_curbuf)) _hash_chgbufaccess(rel, so->hashso_curbuf, HASH_READ, HASH_NOLOCK); /* Return current heap TID on success */ scan->xs_ctup.t_self = so->hashso_heappos; PG_RETURN_BOOL(res); }
Datum pgfadvise_loader(PG_FUNCTION_ARGS) { Oid relOid = PG_GETARG_OID(0); text *forkName = PG_GETARG_TEXT_P(1); int segmentNumber = PG_GETARG_INT32(2); bool willneed = PG_GETARG_BOOL(3); bool dontneed = PG_GETARG_BOOL(4); VarBit *databit; /* our structure use to return values */ pgfloaderStruct *pgfloader; Relation rel; char *relationpath; char filename[MAXPGPATH]; /* our return value, 0 for success */ int result; /* * Postgresql stuff to return a tuple */ HeapTuple tuple; TupleDesc tupdesc; Datum values[PGFADVISE_LOADER_COLS]; bool nulls[PGFADVISE_LOADER_COLS]; if (PG_ARGISNULL(5)) elog(ERROR, "pgfadvise_loader: databit argument shouldn't be NULL"); databit = PG_GETARG_VARBIT_P(5); /* initialize nulls array to build the tuple */ memset(nulls, 0, sizeof(nulls)); /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "return type must be a row type"); /* open the current relation in accessShareLock */ rel = relation_open(relOid, AccessShareLock); /* we get the common part of the filename of each segment of a relation */ relationpath = relpathpg(rel, forkName); /* * If we are looking the first segment, * relationpath should not be suffixed */ if (segmentNumber == 0) snprintf(filename, MAXPGPATH, "%s", relationpath); else snprintf(filename, MAXPGPATH, "%s.%u", relationpath, (int) segmentNumber); /* * We don't need the relation anymore * the only purpose was to get a consistent filename * (if file disappear, an error is logged) */ relation_close(rel, AccessShareLock); /* * Call pgfadvise_loader with the varbit */ pgfloader = (pgfloaderStruct *) palloc(sizeof(pgfloaderStruct)); result = pgfadvise_loader_file(filename, willneed, dontneed, databit, pgfloader); if (result != 0) elog(ERROR, "Can't read file %s, fork(%s)", filename, text_to_cstring(forkName)); /* Filename */ values[0] = CStringGetTextDatum( filename ); /* os page size */ values[1] = Int64GetDatum( pgfloader->pageSize ); /* free page cache */ values[2] = Int64GetDatum( pgfloader->pagesFree ); /* pages loaded */ values[3] = Int64GetDatum( pgfloader->pagesLoaded ); /* pages unloaded */ values[4] = Int64GetDatum( pgfloader->pagesUnloaded ); /* Build and return the result tuple. */ tuple = heap_form_tuple(tupdesc, values, nulls); PG_RETURN_DATUM( HeapTupleGetDatum(tuple) ); }
/* * arraycontsel -- restriction selectivity for array @>, &&, <@ operators */ Datum arraycontsel(PG_FUNCTION_ARGS) { PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0); Oid operator = PG_GETARG_OID(1); List *args = (List *) PG_GETARG_POINTER(2); int varRelid = PG_GETARG_INT32(3); VariableStatData vardata; Node *other; bool varonleft; Selectivity selec; Oid element_typeid; /* * If expression is not (variable op something) or (something op * variable), then punt and return a default estimate. */ if (!get_restriction_variable(root, args, varRelid, &vardata, &other, &varonleft)) PG_RETURN_FLOAT8(DEFAULT_SEL(operator)); /* * Can't do anything useful if the something is not a constant, either. */ if (!IsA(other, Const)) { ReleaseVariableStats(vardata); PG_RETURN_FLOAT8(DEFAULT_SEL(operator)); } /* * The "&&", "@>" and "<@" operators are strict, so we can cope with a * NULL constant right away. */ if (((Const *) other)->constisnull) { ReleaseVariableStats(vardata); PG_RETURN_FLOAT8(0.0); } /* * If var is on the right, commute the operator, so that we can assume the * var is on the left in what follows. */ if (!varonleft) { if (operator == OID_ARRAY_CONTAINS_OP) operator = OID_ARRAY_CONTAINED_OP; else if (operator == OID_ARRAY_CONTAINED_OP) operator = OID_ARRAY_CONTAINS_OP; } /* * OK, there's a Var and a Const we're dealing with here. We need the * Const to be a array with same element type as column, else we can't do * anything useful. (Such cases will likely fail at runtime, but here * we'd rather just return a default estimate.) */ element_typeid = get_base_element_type(((Const *) other)->consttype); if (element_typeid != InvalidOid && element_typeid == get_base_element_type(vardata.vartype)) { selec = calc_arraycontsel(&vardata, ((Const *) other)->constvalue, element_typeid, operator); } else { selec = DEFAULT_SEL(operator); } ReleaseVariableStats(vardata); CLAMP_PROBABILITY(selec); PG_RETURN_FLOAT8((float8) selec); }
/* * consistent support function */ Datum ginarrayconsistent(PG_FUNCTION_ARGS) { bool *check = (bool *) PG_GETARG_POINTER(0); StrategyNumber strategy = PG_GETARG_UINT16(1); /* ArrayType *query = PG_GETARG_ARRAYTYPE_P(2); */ int32 nkeys = PG_GETARG_INT32(3); /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ bool *recheck = (bool *) PG_GETARG_POINTER(5); /* Datum *queryKeys = (Datum *) PG_GETARG_POINTER(6); */ bool *nullFlags = (bool *) PG_GETARG_POINTER(7); bool res; int32 i; switch (strategy) { case GinOverlapStrategy: /* result is not lossy */ *recheck = false; /* must have a match for at least one non-null element */ res = false; for (i = 0; i < nkeys; i++) { if (check[i] && !nullFlags[i]) { res = true; break; } } break; case GinContainsStrategy: /* result is not lossy */ *recheck = false; /* must have all elements in check[] true, and no nulls */ res = true; for (i = 0; i < nkeys; i++) { if (!check[i] || nullFlags[i]) { res = false; break; } } break; case GinContainedStrategy: /* we will need recheck */ *recheck = true; /* can't do anything else useful here */ res = true; break; case GinEqualStrategy: /* we will need recheck */ *recheck = true; /* * Must have all elements in check[] true; no discrimination * against nulls here. This is because array_contain_compare and * array_eq handle nulls differently ... */ res = true; for (i = 0; i < nkeys; i++) { if (!check[i]) { res = false; break; } } break; default: elog(ERROR, "ginarrayconsistent: unknown strategy number: %d", strategy); res = false; } PG_RETURN_BOOL(res); }
/* * tsmatchsel -- Selectivity of "@@" * * restriction selectivity function for tsvector @@ tsquery and * tsquery @@ tsvector */ Datum tsmatchsel(PG_FUNCTION_ARGS) { PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0); #ifdef NOT_USED Oid operator = PG_GETARG_OID(1); #endif List *args = (List *) PG_GETARG_POINTER(2); int varRelid = PG_GETARG_INT32(3); VariableStatData vardata; Node *other; bool varonleft; Selectivity selec; /* * If expression is not variable = something or something = variable, then * punt and return a default estimate. */ if (!get_restriction_variable(root, args, varRelid, &vardata, &other, &varonleft)) PG_RETURN_FLOAT8(DEFAULT_TS_MATCH_SEL); /* * Can't do anything useful if the something is not a constant, either. */ if (!IsA(other, Const)) { ReleaseVariableStats(vardata); PG_RETURN_FLOAT8(DEFAULT_TS_MATCH_SEL); } /* * The "@@" operator is strict, so we can cope with NULL right away */ if (((Const *) other)->constisnull) { ReleaseVariableStats(vardata); PG_RETURN_FLOAT8(0.0); } /* * OK, there's a Var and a Const we're dealing with here. We need the * Const to be a TSQuery, else we can't do anything useful. We have to * check this because the Var might be the TSQuery not the TSVector. */ if (((Const *) other)->consttype == TSQUERYOID) { /* tsvector @@ tsquery or the other way around */ Assert(vardata.vartype == TSVECTOROID); selec = tsquerysel(&vardata, ((Const *) other)->constvalue); } else { /* If we can't see the query structure, must punt */ selec = DEFAULT_TS_MATCH_SEL; } ReleaseVariableStats(vardata); CLAMP_PROBABILITY(selec); PG_RETURN_FLOAT8((float8) selec); }
/* * record_in - input routine for any composite type. */ Datum record_in(PG_FUNCTION_ARGS) { char *string = PG_GETARG_CSTRING(0); Oid tupType = PG_GETARG_OID(1); #ifdef NOT_USED int32 typmod = PG_GETARG_INT32(2); #endif HeapTupleHeader result; int32 tupTypmod; TupleDesc tupdesc; HeapTuple tuple; RecordIOData *my_extra; bool needComma = false; int ncolumns; int i; char *ptr; Datum *values; bool *nulls; StringInfoData buf; /* * Use the passed type unless it's RECORD; we can't support input of * anonymous types, mainly because there's no good way to figure out which * anonymous type is wanted. Note that for RECORD, what we'll probably * actually get is RECORD's typelem, ie, zero. */ if (tupType == InvalidOid || tupType == RECORDOID) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("input of anonymous composite types is not implemented"))); tupTypmod = -1; /* for all non-anonymous types */ tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod); ncolumns = tupdesc->natts; /* * We arrange to look up the needed I/O info just once per series of * calls, assuming the record type doesn't change underneath us. */ my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; if (my_extra == NULL || my_extra->ncolumns != ncolumns) { fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt, sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData)); my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; my_extra->record_type = InvalidOid; my_extra->record_typmod = 0; } if (my_extra->record_type != tupType || my_extra->record_typmod != tupTypmod) { MemSet(my_extra, 0, sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData)); my_extra->record_type = tupType; my_extra->record_typmod = tupTypmod; my_extra->ncolumns = ncolumns; } values = (Datum *) palloc(ncolumns * sizeof(Datum)); nulls = (bool *) palloc(ncolumns * sizeof(bool)); /* * Scan the string. We use "buf" to accumulate the de-quoted data for * each column, which is then fed to the appropriate input converter. */ ptr = string; /* Allow leading whitespace */ while (*ptr && isspace((unsigned char) *ptr)) ptr++; if (*ptr++ != '(') { ReleaseTupleDesc(tupdesc); ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("malformed record literal: \"%s\"", string), errdetail("Missing left parenthesis."))); } initStringInfo(&buf); for (i = 0; i < ncolumns; i++) { ColumnIOData *column_info = &my_extra->columns[i]; Oid column_type = tupdesc->attrs[i]->atttypid; char *column_data; /* Ignore dropped columns in datatype, but fill with nulls */ if (tupdesc->attrs[i]->attisdropped) { values[i] = (Datum) 0; nulls[i] = true; continue; } if (needComma) { /* Skip comma that separates prior field from this one */ if (*ptr == ',') ptr++; else { ReleaseTupleDesc(tupdesc); /* *ptr must be ')' */ ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("malformed record literal: \"%s\"", string), errdetail("Too few columns."))); } } /* Check for null: completely empty input means null */ if (*ptr == ',' || *ptr == ')') { column_data = NULL; nulls[i] = true; } else { /* Extract string for this column */ bool inquote = false; buf.len = 0; buf.data[0] = '\0'; while (inquote || !(*ptr == ',' || *ptr == ')')) { char ch = *ptr++; if (ch == '\0') { ReleaseTupleDesc(tupdesc); ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("malformed record literal: \"%s\"", string), errdetail("Unexpected end of input."))); } if (ch == '\\') { if (*ptr == '\0') { ReleaseTupleDesc(tupdesc); ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("malformed record literal: \"%s\"", string), errdetail("Unexpected end of input."))); } appendStringInfoChar(&buf, *ptr++); } else if (ch == '\"') { if (!inquote) inquote = true; else if (*ptr == '\"') { /* doubled quote within quote sequence */ appendStringInfoChar(&buf, *ptr++); } else inquote = false; } else appendStringInfoChar(&buf, ch); } column_data = buf.data; nulls[i] = false; } /* * Convert the column value */ if (column_info->column_type != column_type) { getTypeInputInfo(column_type, &column_info->typiofunc, &column_info->typioparam); fmgr_info_cxt(column_info->typiofunc, &column_info->proc, fcinfo->flinfo->fn_mcxt); column_info->column_type = column_type; } values[i] = InputFunctionCall(&column_info->proc, column_data, column_info->typioparam, tupdesc->attrs[i]->atttypmod); /* * Prep for next column */ needComma = true; } if (*ptr++ != ')') { ReleaseTupleDesc(tupdesc); ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("malformed record literal: \"%s\"", string), errdetail("Too many columns."))); } /* Allow trailing whitespace */ while (*ptr && isspace((unsigned char) *ptr)) ptr++; if (*ptr) { ReleaseTupleDesc(tupdesc); ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("malformed record literal: \"%s\"", string), errdetail("Junk after right parenthesis."))); } tuple = heap_form_tuple(tupdesc, values, nulls); /* * We cannot return tuple->t_data because heap_form_tuple allocates it as * part of a larger chunk, and our caller may expect to be able to pfree * our result. So must copy the info into a new palloc chunk. */ result = (HeapTupleHeader) palloc(tuple->t_len); memcpy(result, tuple->t_data, tuple->t_len); heap_freetuple(tuple); pfree(buf.data); pfree(values); pfree(nulls); ReleaseTupleDesc(tupdesc); PG_RETURN_HEAPTUPLEHEADER(result); }
/* * master_copy_shard_placement implements a user-facing UDF to copy data from * a healthy (source) node to an inactive (target) node. To accomplish this it * entirely recreates the table structure before copying all data. During this * time all modifications are paused to the shard. After successful repair, the * inactive placement is marked healthy and modifications may continue. If the * repair fails at any point, this function throws an error, leaving the node * in an unhealthy state. */ Datum master_copy_shard_placement(PG_FUNCTION_ARGS) { int64 shardId = PG_GETARG_INT64(0); text *sourceNodeName = PG_GETARG_TEXT_P(1); int32 sourceNodePort = PG_GETARG_INT32(2); text *targetNodeName = PG_GETARG_TEXT_P(3); int32 targetNodePort = PG_GETARG_INT32(4); ShardInterval *shardInterval = LoadShardInterval(shardId); Oid distributedTableId = shardInterval->relationId; List *shardPlacementList = NIL; ShardPlacement *sourcePlacement = NULL; ShardPlacement *targetPlacement = NULL; List *ddlCommandList = NIL; bool recreated = false; bool dataCopied = false; /* * By taking an exclusive lock on the shard, we both stop all modifications * (INSERT, UPDATE, or DELETE) and prevent concurrent repair operations from * being able to operate on this shard. */ LockShard(shardId, ExclusiveLock); shardPlacementList = LoadShardPlacementList(shardId); sourcePlacement = SearchShardPlacementInList(shardPlacementList, sourceNodeName, sourceNodePort); if (sourcePlacement->shardState != STATE_FINALIZED) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("source placement must be in finalized state"))); } targetPlacement = SearchShardPlacementInList(shardPlacementList, targetNodeName, targetNodePort); if (targetPlacement->shardState != STATE_INACTIVE) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("target placement must be in inactive state"))); } /* retrieve the DDL commands for the table and run them */ ddlCommandList = RecreateTableDDLCommandList(distributedTableId, shardId); recreated = ExecuteRemoteCommandList(targetPlacement->nodeName, targetPlacement->nodePort, ddlCommandList); if (!recreated) { ereport(ERROR, (errmsg("could not recreate shard table"), errhint("Consult recent messages in the server logs for " "details."))); } HOLD_INTERRUPTS(); dataCopied = CopyDataFromFinalizedPlacement(distributedTableId, shardId, sourcePlacement, targetPlacement); if (!dataCopied) { ereport(ERROR, (errmsg("could not copy shard data"), errhint("Consult recent messages in the server logs for " "details."))); } /* the placement is repaired, so return to finalized state */ DeleteShardPlacementRow(targetPlacement->id); InsertShardPlacementRow(targetPlacement->id, targetPlacement->shardId, STATE_FINALIZED, targetPlacement->nodeName, targetPlacement->nodePort); RESUME_INTERRUPTS(); PG_RETURN_VOID(); }
Datum geography_as_kml(PG_FUNCTION_ARGS) { GSERIALIZED *g = NULL; LWGEOM *lwgeom = NULL; char *kml; text *result; int version; int precision = OUT_MAX_DOUBLE_PRECISION; static const char *default_prefix = ""; char *prefixbuf; const char* prefix = default_prefix; text *prefix_text; /* Get the version */ version = PG_GETARG_INT32(0); if ( version != 2) { elog(ERROR, "Only KML 2 is supported"); PG_RETURN_NULL(); } /* Get the geometry */ if ( PG_ARGISNULL(1) ) PG_RETURN_NULL(); g = (GSERIALIZED*)PG_DETOAST_DATUM(PG_GETARG_DATUM(1)); /* Convert to lwgeom so we can run the old functions */ lwgeom = lwgeom_from_gserialized(g); /* Retrieve precision if any (default is max) */ if (PG_NARGS() >2 && !PG_ARGISNULL(2)) { precision = PG_GETARG_INT32(2); if ( precision > OUT_MAX_DOUBLE_PRECISION ) precision = OUT_MAX_DOUBLE_PRECISION; else if ( precision < 0 ) precision = 0; } /* retrieve prefix */ if (PG_NARGS() >3 && !PG_ARGISNULL(3)) { prefix_text = PG_GETARG_TEXT_P(3); if ( VARSIZE(prefix_text)-VARHDRSZ == 0 ) { prefix = ""; } else { /* +2 is one for the ':' and one for term null */ prefixbuf = palloc(VARSIZE(prefix_text)-VARHDRSZ+2); memcpy(prefixbuf, VARDATA(prefix_text), VARSIZE(prefix_text)-VARHDRSZ); /* add colon and null terminate */ prefixbuf[VARSIZE(prefix_text)-VARHDRSZ] = ':'; prefixbuf[VARSIZE(prefix_text)-VARHDRSZ+1] = '\0'; prefix = prefixbuf; } } kml = lwgeom_to_kml2(lwgeom, precision, prefix); lwgeom_free(lwgeom); PG_FREE_IF_COPY(g, 1); if ( ! kml ) PG_RETURN_NULL(); result = cstring2text(kml); lwfree(kml); PG_RETURN_TEXT_P(result); }
Datum svec_cast_int4(PG_FUNCTION_ARGS) { float8 value=(float8 )PG_GETARG_INT32(0); PG_RETURN_SVECTYPE_P(svec_make_scalar(value,1)); }
Datum LWGEOM_in(PG_FUNCTION_ARGS) { char *input = PG_GETARG_CSTRING(0); int32 geom_typmod = -1; char *str = input; LWGEOM_PARSER_RESULT lwg_parser_result; LWGEOM *lwgeom; GSERIALIZED *ret; int srid = 0; if ( (PG_NARGS()>2) && (!PG_ARGISNULL(2)) ) { geom_typmod = PG_GETARG_INT32(2); } lwgeom_parser_result_init(&lwg_parser_result); /* Empty string. */ if ( str[0] == '\0' ) ereport(ERROR,(errmsg("parse error - invalid geometry"))); /* Starts with "SRID=" */ if( strncasecmp(str,"SRID=",5) == 0 ) { /* Roll forward to semi-colon */ char *tmp = str; while ( tmp && *tmp != ';' ) tmp++; /* Check next character to see if we have WKB */ if ( tmp && *(tmp+1) == '0' ) { /* Null terminate the SRID= string */ *tmp = '\0'; /* Set str to the start of the real WKB */ str = tmp + 1; /* Move tmp to the start of the numeric part */ tmp = input + 5; /* Parse out the SRID number */ srid = atoi(tmp); } } /* WKB? Let's find out. */ if ( str[0] == '0' ) { size_t hexsize = strlen(str); unsigned char *wkb = bytes_from_hexbytes(str, hexsize); /* TODO: 20101206: No parser checks! This is inline with current 1.5 behavior, but needs discussion */ lwgeom = lwgeom_from_wkb(wkb, hexsize/2, LW_PARSER_CHECK_NONE); /* If we picked up an SRID at the head of the WKB set it manually */ if ( srid ) lwgeom_set_srid(lwgeom, srid); /* Add a bbox if necessary */ if ( lwgeom_needs_bbox(lwgeom) ) lwgeom_add_bbox(lwgeom); pfree(wkb); ret = geometry_serialize(lwgeom); lwgeom_free(lwgeom); } /* WKT then. */ else { if ( lwgeom_parse_wkt(&lwg_parser_result, str, LW_PARSER_CHECK_ALL) == LW_FAILURE ) { PG_PARSER_ERROR(lwg_parser_result); } lwgeom = lwg_parser_result.geom; if ( lwgeom_needs_bbox(lwgeom) ) lwgeom_add_bbox(lwgeom); ret = geometry_serialize(lwgeom); lwgeom_parser_result_free(&lwg_parser_result); } if ( geom_typmod >= 0 ) { postgis_valid_typmod(ret, geom_typmod); POSTGIS_DEBUG(3, "typmod and geometry were consistent"); } else { POSTGIS_DEBUG(3, "typmod was -1"); } /* Don't free the parser result (and hence lwgeom) until we have done */ /* the typemod check with lwgeom */ PG_RETURN_POINTER(ret); }
Datum float8arr_cast_int4(PG_FUNCTION_ARGS) { float8 value=(float8 )PG_GETARG_INT32(0); PG_RETURN_ARRAYTYPE_P(svec_return_array_internal(svec_make_scalar(value,1))); }
/* * ltreeparentsel - Selectivity of parent relationship for ltree data types. */ Datum ltreeparentsel(PG_FUNCTION_ARGS) { PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0); Oid operator = PG_GETARG_OID(1); List *args = (List *) PG_GETARG_POINTER(2); int varRelid = PG_GETARG_INT32(3); VariableStatData vardata; Node *other; bool varonleft; double selec; /* * If expression is not variable <@ something or something <@ variable, * then punt and return a default estimate. */ if (!get_restriction_variable(root, args, varRelid, &vardata, &other, &varonleft)) PG_RETURN_FLOAT8(DEFAULT_PARENT_SEL); /* * If the something is a NULL constant, assume operator is strict and * return zero, ie, operator will never return TRUE. */ if (IsA(other, Const) && ((Const *) other)->constisnull) { ReleaseVariableStats(vardata); PG_RETURN_FLOAT8(0.0); } if (IsA(other, Const)) { /* Variable is being compared to a known non-null constant */ Datum constval = ((Const *) other)->constvalue; FmgrInfo contproc; double mcvsum; double mcvsel; double nullfrac; int hist_size; fmgr_info(get_opcode(operator), &contproc); /* * Is the constant "<@" to any of the column's most common values? */ mcvsel = mcv_selectivity(&vardata, &contproc, constval, varonleft, &mcvsum); /* * If the histogram is large enough, see what fraction of it the * constant is "<@" to, and assume that's representative of the * non-MCV population. Otherwise use the default selectivity for the * non-MCV population. */ selec = histogram_selectivity(&vardata, &contproc, constval, varonleft, 10, 1, &hist_size); if (selec < 0) { /* Nope, fall back on default */ selec = DEFAULT_PARENT_SEL; } else if (hist_size < 100) { /* * For histogram sizes from 10 to 100, we combine the histogram * and default selectivities, putting increasingly more trust in * the histogram for larger sizes. */ double hist_weight = hist_size / 100.0; selec = selec * hist_weight + DEFAULT_PARENT_SEL * (1.0 - hist_weight); } /* In any case, don't believe extremely small or large estimates. */ if (selec < 0.0001) selec = 0.0001; else if (selec > 0.9999) selec = 0.9999; if (HeapTupleIsValid(vardata.statsTuple)) nullfrac = ((Form_pg_statistic) GETSTRUCT(vardata.statsTuple))->stanullfrac; else nullfrac = 0.0; /* * Now merge the results from the MCV and histogram calculations, * realizing that the histogram covers only the non-null values that * are not listed in MCV. */ selec *= 1.0 - nullfrac - mcvsum; selec += mcvsel; } else selec = DEFAULT_PARENT_SEL; ReleaseVariableStats(vardata); /* result should be in range, but make sure... */ CLAMP_PROBABILITY(selec); PG_RETURN_FLOAT8((float8) selec); }
/* * Selectivity estimation for the subnet inclusion/overlap operators */ Datum networksel(PG_FUNCTION_ARGS) { PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0); Oid operator = PG_GETARG_OID(1); List *args = (List *) PG_GETARG_POINTER(2); int varRelid = PG_GETARG_INT32(3); VariableStatData vardata; Node *other; bool varonleft; Selectivity selec, mcv_selec, non_mcv_selec; Datum constvalue, *hist_values; int hist_nvalues; Form_pg_statistic stats; double sumcommon, nullfrac; FmgrInfo proc; /* * If expression is not (variable op something) or (something op * variable), then punt and return a default estimate. */ if (!get_restriction_variable(root, args, varRelid, &vardata, &other, &varonleft)) PG_RETURN_FLOAT8(DEFAULT_SEL(operator)); /* * Can't do anything useful if the something is not a constant, either. */ if (!IsA(other, Const)) { ReleaseVariableStats(vardata); PG_RETURN_FLOAT8(DEFAULT_SEL(operator)); } /* All of the operators handled here are strict. */ if (((Const *) other)->constisnull) { ReleaseVariableStats(vardata); PG_RETURN_FLOAT8(0.0); } constvalue = ((Const *) other)->constvalue; /* Otherwise, we need stats in order to produce a non-default estimate. */ if (!HeapTupleIsValid(vardata.statsTuple)) { ReleaseVariableStats(vardata); PG_RETURN_FLOAT8(DEFAULT_SEL(operator)); } stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple); nullfrac = stats->stanullfrac; /* * If we have most-common-values info, add up the fractions of the MCV * entries that satisfy MCV OP CONST. These fractions contribute directly * to the result selectivity. Also add up the total fraction represented * by MCV entries. */ fmgr_info(get_opcode(operator), &proc); mcv_selec = mcv_selectivity(&vardata, &proc, constvalue, varonleft, &sumcommon); /* * If we have a histogram, use it to estimate the proportion of the * non-MCV population that satisfies the clause. If we don't, apply the * default selectivity to that population. */ if (get_attstatsslot(vardata.statsTuple, vardata.atttype, vardata.atttypmod, STATISTIC_KIND_HISTOGRAM, InvalidOid, NULL, &hist_values, &hist_nvalues, NULL, NULL)) { int opr_codenum = inet_opr_codenum(operator); /* Commute if needed, so we can consider histogram to be on the left */ if (!varonleft) opr_codenum = -opr_codenum; non_mcv_selec = inet_hist_value_sel(hist_values, hist_nvalues, constvalue, opr_codenum); free_attstatsslot(vardata.atttype, hist_values, hist_nvalues, NULL, 0); } else non_mcv_selec = DEFAULT_SEL(operator); /* Combine selectivities for MCV and non-MCV populations */ selec = mcv_selec + (1.0 - nullfrac - sumcommon) * non_mcv_selec; /* Result should be in range, but make sure... */ CLAMP_PROBABILITY(selec); ReleaseVariableStats(vardata); PG_RETURN_FLOAT8(selec); }