static HV * plperl_spi_execute_fetch_result(SPITupleTable *tuptable, int processed, int status) { HV *result; result = newHV(); hv_store(result, "status", strlen("status"), newSVpv((char *) SPI_result_code_string(status), 0), 0); hv_store(result, "processed", strlen("processed"), newSViv(processed), 0); if (status == SPI_OK_SELECT) { AV *rows; SV *row; int i; rows = newAV(); for (i = 0; i < processed; i++) { row = plperl_hash_from_tuple(tuptable->vals[i], tuptable->tupdesc); av_push(rows, row); } hv_store(result, "rows", strlen("rows"), newRV_noinc((SV *) rows), 0); } SPI_freetuptable(tuptable); return result; }
static PyObject * PLy_cursor_iternext(PyObject *self) { PLyCursorObject *cursor; PyObject *ret; volatile MemoryContext oldcontext; volatile ResourceOwner oldowner; Portal portal; cursor = (PLyCursorObject *) self; if (cursor->closed) { PLy_exception_set(PyExc_ValueError, "iterating a closed cursor"); return NULL; } portal = GetPortalByName(cursor->portalname); if (!PortalIsValid(portal)) { PLy_exception_set(PyExc_ValueError, "iterating a cursor in an aborted subtransaction"); return NULL; } oldcontext = CurrentMemoryContext; oldowner = CurrentResourceOwner; PLy_spi_subtransaction_begin(oldcontext, oldowner); PG_TRY(); { SPI_cursor_fetch(portal, true, 1); if (SPI_processed == 0) { PyErr_SetNone(PyExc_StopIteration); ret = NULL; } else { if (cursor->result.is_rowtype != 1) PLy_input_tuple_funcs(&cursor->result, SPI_tuptable->tupdesc); ret = PLyDict_FromTuple(&cursor->result, SPI_tuptable->vals[0], SPI_tuptable->tupdesc); } SPI_freetuptable(SPI_tuptable); PLy_spi_subtransaction_commit(oldcontext, oldowner); } PG_CATCH(); { PLy_spi_subtransaction_abort(oldcontext, oldowner); return NULL; } PG_END_TRY(); return ret; }
SEXP plr_SPI_cursor_fetch(SEXP cursor_in,SEXP forward_in, SEXP rows_in) { Portal portal=NULL; int ntuples; SEXP result = NULL; MemoryContext oldcontext; int forward; int rows; PREPARE_PG_TRY; PUSH_PLERRCONTEXT(rsupport_error_callback, "pg.spi.cursor_fetch"); portal = R_ExternalPtrAddr(cursor_in); if(!IS_LOGICAL(forward_in)) { error("pg.spi.cursor_fetch arg2 must be boolean"); return result; } if(!IS_INTEGER(rows_in)) { error("pg.spi.cursor_fetch arg3 must be an integer"); return result; } forward = LOGICAL_DATA(forward_in)[0]; rows = INTEGER_DATA(rows_in)[0]; /* switch to SPI memory context */ oldcontext = MemoryContextSwitchTo(plr_SPI_context); PG_TRY(); { /* Open the cursor */ SPI_cursor_fetch(portal,forward,rows); } PLR_PG_CATCH(); PLR_PG_END_TRY(); /* back to caller's memory context */ MemoryContextSwitchTo(oldcontext); /* check the result */ ntuples = SPI_processed; if (ntuples > 0) { result = rpgsql_get_results(ntuples, SPI_tuptable); SPI_freetuptable(SPI_tuptable); } else result = R_NilValue; POP_PLERRCONTEXT; return result; }
static int luaP_tuptablegc (lua_State *L) { luaP_Tuptable *t = (luaP_Tuptable *) lua_touserdata(L, 1); SPI_freetuptable(t->tuptable); return 0; }
static int compute_driving_distance(char* sql, int source_vertex_id, float8 distance, bool directed, bool has_reverse_cost, path_element_t **path, int *path_count) { int SPIcode; void *SPIplan; Portal SPIportal; bool moredata = TRUE; int ntuples; edge_t *edges = NULL; int total_tuples = 0; edge_columns_t edge_columns = {.id= -1, .source= -1, .target= -1, .cost= -1, .reverse_cost= -1}; int v_max_id=0; int v_min_id=INT_MAX; char *err_msg; int ret = -1; int s_count = 0; register int z; DBG("start driving_distance\n"); SPIcode = SPI_connect(); if (SPIcode != SPI_OK_CONNECT) { elog(ERROR, "driving_distance: couldn't open a connection to SPI"); return -1; } SPIplan = SPI_prepare(sql, 0, NULL); if (SPIplan == NULL) { elog(ERROR, "driving_distance: couldn't create query plan via SPI"); return -1; } if ((SPIportal = SPI_cursor_open(NULL, SPIplan, NULL, NULL, true)) == NULL) { elog(ERROR, "driving_distance: SPI_cursor_open('%s') returns NULL", sql); return -1; } while (moredata == TRUE) { SPI_cursor_fetch(SPIportal, TRUE, TUPLIMIT); if (edge_columns.id == -1) { if (fetch_edge_columns(SPI_tuptable, &edge_columns, has_reverse_cost) == -1) return finish(SPIcode, ret); } ntuples = SPI_processed; total_tuples += ntuples; if (!edges) edges = palloc(total_tuples * sizeof(edge_t)); else edges = repalloc(edges, total_tuples * sizeof(edge_t)); if (edges == NULL) { elog(ERROR, "Out of memory"); return finish(SPIcode, ret); } if (ntuples > 0) { int t; SPITupleTable *tuptable = SPI_tuptable; TupleDesc tupdesc = SPI_tuptable->tupdesc; for (t = 0; t < ntuples; t++) { HeapTuple tuple = tuptable->vals[t]; fetch_edge(&tuple, &tupdesc, &edge_columns, &edges[total_tuples - ntuples + t]); } SPI_freetuptable(tuptable); } else { moredata = FALSE; } } //defining min and max vertex id DBG("Total %i tuples", total_tuples); for(z=0; z<total_tuples; z++) { if(edges[z].source<v_min_id) v_min_id=edges[z].source; if(edges[z].source>v_max_id) v_max_id=edges[z].source; if(edges[z].target<v_min_id) v_min_id=edges[z].target; if(edges[z].target>v_max_id) v_max_id=edges[z].target; DBG("%i <-> %i", v_min_id, v_max_id); } //:::::::::::::::::::::::::::::::::::: //:: reducing vertex id (renumbering) //:::::::::::::::::::::::::::::::::::: for(z=0; z<total_tuples; z++) { //check if edges[] contains source if(edges[z].source == source_vertex_id || edges[z].target == source_vertex_id) ++s_count; edges[z].source-=v_min_id; edges[z].target-=v_min_id; DBG("%i - %i", edges[z].source, edges[z].target); } if(s_count == 0) { elog(ERROR, "Start vertex was not found."); return -1; } source_vertex_id -= v_min_id; profstop("extract", prof_extract); profstart(prof_dijkstra); DBG("Calling boost_dijkstra\n"); ret = boost_dijkstra_dist(edges, total_tuples, source_vertex_id, distance, directed, has_reverse_cost, path, path_count, &err_msg); DBG("Back from boost_dijkstra\n"); if (ret < 0) { elog(ERROR, "Error computing path: %s", err_msg); } profstop("dijkstra", prof_dijkstra); profstart(prof_store); //:::::::::::::::::::::::::::::::: //:: restoring original vertex id //:::::::::::::::::::::::::::::::: for(z=0; z<*path_count; z++) { //DBG("vetex %i\n",(*path)[z].vertex_id); (*path)[z].vertex_id+=v_min_id; } return finish(SPIcode, ret); } PG_FUNCTION_INFO_V1(driving_distance); Datum driving_distance(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; int call_cntr; int max_calls; TupleDesc tuple_desc; path_element_t *path = 0; /* stuff done only on the first call of the function */ if (SRF_IS_FIRSTCALL()) { MemoryContext oldcontext; int path_count = 0; int ret; // XXX profiling messages are not thread safe profstart(prof_total); profstart(prof_extract); /* create a function context for cross-call persistence */ funcctx = SRF_FIRSTCALL_INIT(); /* switch to memory context appropriate for multiple function calls */ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); ret = compute_driving_distance(text2char(PG_GETARG_TEXT_P(0)), // sql PG_GETARG_INT32(1), // source vertex PG_GETARG_FLOAT8(2), // distance or time PG_GETARG_BOOL(3), PG_GETARG_BOOL(4), &path, &path_count); if (ret < 0) { elog(ERROR, "Error computing path"); } #ifdef DEBUG DBG("Ret is %i", ret); int i; for (i = 0; i < path_count; i++) { DBG("Step %i vertex_id %i ", i, path[i].vertex_id); DBG(" edge_id %i ", path[i].edge_id); DBG(" cost %f ", path[i].cost); } #endif /* total number of tuples to be returned */ funcctx->max_calls = path_count; funcctx->user_fctx = path; funcctx->tuple_desc = BlessTupleDesc( RelationNameGetTupleDesc("pgr_costResult")); MemoryContextSwitchTo(oldcontext); } /* stuff done on every call of the function */ funcctx = SRF_PERCALL_SETUP(); call_cntr = funcctx->call_cntr; max_calls = funcctx->max_calls; tuple_desc = funcctx->tuple_desc; path = (path_element_t*) funcctx->user_fctx; if (call_cntr < max_calls) { /* do when there is more left to send */ HeapTuple tuple; Datum result; Datum *values; char* nulls; values = palloc(4 * sizeof(Datum)); nulls = palloc(4 * sizeof(char)); values[0] = Int32GetDatum(call_cntr); nulls[0] = ' '; values[1] = Int32GetDatum(path[call_cntr].vertex_id); nulls[1] = ' '; values[2] = Int32GetDatum(path[call_cntr].edge_id); nulls[2] = ' '; values[3] = Float8GetDatum(path[call_cntr].cost); nulls[3] = ' '; tuple = heap_formtuple(tuple_desc, values, nulls); /* make the tuple into a datum */ result = HeapTupleGetDatum(tuple); /* clean up (this is not really necessary) */ pfree(values); pfree(nulls); SRF_RETURN_NEXT(funcctx, result); } else { /* do when there is no more left */ if (path) free(path); profstop("store", prof_store); profstop("total", prof_total); #ifdef PROFILE elog(NOTICE, "_________"); #endif DBG("Returning value"); SRF_RETURN_DONE(funcctx); } }
static TSVectorStat * ts_stat_sql(MemoryContext persistentContext, text *txt, text *ws) { char *query = text_to_cstring(txt); int i; TSVectorStat *stat; bool isnull; Portal portal; SPIPlanPtr plan; if ((plan = SPI_prepare(query, 0, NULL)) == NULL) /* internal error */ elog(ERROR, "SPI_prepare(\"%s\") failed", query); if ((portal = SPI_cursor_open(NULL, plan, NULL, NULL, true)) == NULL) /* internal error */ elog(ERROR, "SPI_cursor_open(\"%s\") failed", query); SPI_cursor_fetch(portal, true, 100); if (SPI_tuptable == NULL || SPI_tuptable->tupdesc->natts != 1 || !IsBinaryCoercible(SPI_gettypeid(SPI_tuptable->tupdesc, 1), TSVECTOROID)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("ts_stat query must return one tsvector column"))); stat = MemoryContextAllocZero(persistentContext, sizeof(TSVectorStat)); stat->maxdepth = 1; if (ws) { char *buf; buf = VARDATA(ws); while (buf - VARDATA(ws) < VARSIZE(ws) - VARHDRSZ) { if (pg_mblen(buf) == 1) { switch (*buf) { case 'A': case 'a': stat->weight |= 1 << 3; break; case 'B': case 'b': stat->weight |= 1 << 2; break; case 'C': case 'c': stat->weight |= 1 << 1; break; case 'D': case 'd': stat->weight |= 1; break; default: stat->weight |= 0; } } buf += pg_mblen(buf); } } while (SPI_processed > 0) { for (i = 0; i < SPI_processed; i++) { Datum data = SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1, &isnull); if (!isnull) stat = ts_accum(persistentContext, stat, data); } SPI_freetuptable(SPI_tuptable); SPI_cursor_fetch(portal, true, 100); } SPI_freetuptable(SPI_tuptable); SPI_cursor_close(portal); SPI_freeplan(plan); pfree(query); return stat; }
int compute_kshortest_path(char* sql, int start_vertex, int end_vertex, int no_paths, bool has_reverse_cost, ksp_path_element_t **ksp_path, int *path_count) { int SPIcode; void *SPIplan; Portal SPIportal; bool moredata = TRUE; int ntuples; ksp_edge_t *edges = NULL; int total_tuples = 0; #ifndef _MSC_VER ksp_edge_columns_t edge_columns = {.id= -1, .source= -1, .target= -1, .cost= -1, .reverse_cost= -1}; #else // _MSC_VER ksp_edge_columns_t edge_columns = {-1, -1, -1, -1, -1}; #endif // _MSC_VER int v_max_id=0; int v_min_id=INT_MAX; int s_count = 0; int t_count = 0; char *err_msg=(char *)""; int ret = -1; register int z; DBG("start kshortest_path %s\n",sql); SPIcode = SPI_connect(); if (SPIcode != SPI_OK_CONNECT) { elog(ERROR, "kshortest_path: couldn't open a connection to SPI"); return -1; } SPIplan = SPI_prepare(sql, 0, NULL); if (SPIplan == NULL) { elog(ERROR, "kshortest_path: couldn't create query plan via SPI"); return -1; } if ((SPIportal = SPI_cursor_open(NULL, SPIplan, NULL, NULL, true)) == NULL) { elog(ERROR, "shortest_path: SPI_cursor_open('%s') returns NULL", sql); return -1; } while (moredata == TRUE) { SPI_cursor_fetch(SPIportal, TRUE, TUPLIMIT); if (edge_columns.id == -1) { if (ksp_fetch_edge_columns(SPI_tuptable, &edge_columns, has_reverse_cost) == -1) return ksp_finish(SPIcode, ret); } ntuples = SPI_processed; total_tuples += ntuples; if (!edges) edges = (ksp_edge_t *)palloc(total_tuples * sizeof(ksp_edge_t)); else edges = (ksp_edge_t *)repalloc(edges, total_tuples * sizeof(ksp_edge_t)); if (edges == NULL) { elog(ERROR, "Out of memory"); return ksp_finish(SPIcode, ret); } if (ntuples > 0) { int t; SPITupleTable *tuptable = SPI_tuptable; TupleDesc tupdesc = SPI_tuptable->tupdesc; for (t = 0; t < ntuples; t++) { HeapTuple tuple = tuptable->vals[t]; ksp_fetch_edge(&tuple, &tupdesc, &edge_columns, &edges[total_tuples - ntuples + t]); } SPI_freetuptable(tuptable); } else { moredata = FALSE; } } DBG("Total %i tuples", total_tuples); for(z=0; z<total_tuples; z++) { //check if edges[] contains source and target if(edges[z].source == start_vertex || edges[z].target == start_vertex) ++s_count; if(edges[z].source == end_vertex || edges[z].target == end_vertex) ++t_count; DBG("%i - %i", edges[z].source, edges[z].target); } DBG("Total %i tuples", total_tuples); if(s_count == 0) { elog(ERROR, "Start vertex was not found."); return -1; } if(t_count == 0) { elog(ERROR, "Target vertex was not found."); return -1; } DBG("Calling doKpaths\n"); DBG("SIZE %i\n",total_tuples); ret = doKpaths(edges, total_tuples, start_vertex, end_vertex, no_paths, has_reverse_cost, ksp_path, path_count, &err_msg); DBG("SIZE %i\n",*path_count); DBG("ret = %i\n", ret); if (ret < 0) { //elog(ERROR, "Error computing path: %s", err_msg); ereport(ERROR, (errcode(ERRCODE_E_R_E_CONTAINING_SQL_NOT_PERMITTED), errmsg("Error computing path: %s", err_msg))); } return ksp_finish(SPIcode, ret); } ksp_path_element_t * get_ksp_memory(int size,ksp_path_element_t *path){ if(path ==0 ){ path=malloc(size * sizeof(ksp_path_element_t)); } else { path=realloc(path,size * sizeof(ksp_path_element_t)); } return path; }
static PyObject * PLy_spi_execute_fetch_result(SPITupleTable *tuptable, int rows, int status) { PLyResultObject *result; volatile MemoryContext oldcontext; result = (PLyResultObject *) PLy_result_new(); Py_DECREF(result->status); result->status = PyInt_FromLong(status); if (status > 0 && tuptable == NULL) { Py_DECREF(result->nrows); result->nrows = PyInt_FromLong(rows); } else if (status > 0 && tuptable != NULL) { PLyTypeInfo args; int i; Py_DECREF(result->nrows); result->nrows = PyInt_FromLong(rows); PLy_typeinfo_init(&args); oldcontext = CurrentMemoryContext; PG_TRY(); { MemoryContext oldcontext2; /* * Save tuple descriptor for later use by result set metadata * functions. Save it in TopMemoryContext so that it survives * outside of an SPI context. We trust that PLy_result_dealloc() * will clean it up when the time is right. */ oldcontext2 = MemoryContextSwitchTo(TopMemoryContext); result->tupdesc = CreateTupleDescCopy(tuptable->tupdesc); MemoryContextSwitchTo(oldcontext2); if (rows) { Py_DECREF(result->rows); result->rows = PyList_New(rows); PLy_input_tuple_funcs(&args, tuptable->tupdesc); for (i = 0; i < rows; i++) { PyObject *row = PLyDict_FromTuple(&args, tuptable->vals[i], tuptable->tupdesc); PyList_SetItem(result->rows, i, row); } } } PG_CATCH(); { MemoryContextSwitchTo(oldcontext); if (!PyErr_Occurred()) PLy_exception_set(PLy_exc_error, "unrecognized error in PLy_spi_execute_fetch_result"); PLy_typeinfo_dealloc(&args); SPI_freetuptable(tuptable); Py_DECREF(result); return NULL; } PG_END_TRY(); PLy_typeinfo_dealloc(&args); SPI_freetuptable(tuptable); } return (PyObject *) result; }
Datum tsquery_rewrite(PG_FUNCTION_ARGS) { QUERYTYPE *query = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(0))); text *in = PG_GETARG_TEXT_P(1); QUERYTYPE *rewrited = query; QTNode *tree; char *buf; void *plan; Portal portal; bool isnull; int i; if (query->size == 0) { PG_FREE_IF_COPY(in, 1); PG_RETURN_POINTER(rewrited); } tree = QT2QTN(GETQUERY(query), GETOPERAND(query)); QTNTernary(tree); QTNSort(tree); buf = (char *) palloc(VARSIZE(in)); memcpy(buf, VARDATA(in), VARSIZE(in) - VARHDRSZ); buf[VARSIZE(in) - VARHDRSZ] = '\0'; SPI_connect(); if (tsqOid == InvalidOid) get_tsq_Oid(); if ((plan = SPI_prepare(buf, 0, NULL)) == NULL) elog(ERROR, "SPI_prepare('%s') returns NULL", buf); if ((portal = SPI_cursor_open(NULL, plan, NULL, NULL, false)) == NULL) elog(ERROR, "SPI_cursor_open('%s') returns NULL", buf); SPI_cursor_fetch(portal, true, 100); if (SPI_tuptable->tupdesc->natts != 2) elog(ERROR, "number of fields doesn't equal to 2"); if (SPI_gettypeid(SPI_tuptable->tupdesc, 1) != tsqOid) elog(ERROR, "column #1 isn't of tsquery type"); if (SPI_gettypeid(SPI_tuptable->tupdesc, 2) != tsqOid) elog(ERROR, "column #2 isn't of tsquery type"); while (SPI_processed > 0 && tree) { for (i = 0; i < SPI_processed && tree; i++) { Datum qdata = SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1, &isnull); Datum sdata; if (isnull) continue; sdata = SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 2, &isnull); if (!isnull) { QUERYTYPE *qtex = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(qdata)); QUERYTYPE *qtsubs = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(sdata)); QTNode *qex, *qsubs = NULL; if (qtex->size == 0) { if (qtex != (QUERYTYPE *) DatumGetPointer(qdata)) pfree(qtex); if (qtsubs != (QUERYTYPE *) DatumGetPointer(sdata)) pfree(qtsubs); continue; } qex = QT2QTN(GETQUERY(qtex), GETOPERAND(qtex)); QTNTernary(qex); QTNSort(qex); if (qtsubs->size) qsubs = QT2QTN(GETQUERY(qtsubs), GETOPERAND(qtsubs)); tree = findsubquery(tree, qex, SPIMemory, qsubs, NULL); QTNFree(qex); if (qtex != (QUERYTYPE *) DatumGetPointer(qdata)) pfree(qtex); QTNFree(qsubs); if (qtsubs != (QUERYTYPE *) DatumGetPointer(sdata)) pfree(qtsubs); } } SPI_freetuptable(SPI_tuptable); SPI_cursor_fetch(portal, true, 100); } SPI_freetuptable(SPI_tuptable); SPI_cursor_close(portal); SPI_freeplan(plan); SPI_finish(); if (tree) { QTNBinary(tree); rewrited = QTN2QT(tree, PlainMemory); QTNFree(tree); PG_FREE_IF_COPY(query, 0); } else { rewrited->len = HDRSIZEQT; rewrited->size = 0; } pfree(buf); PG_FREE_IF_COPY(in, 1); PG_RETURN_POINTER(rewrited); }
void pgr_get_restriction_data( char *restrictions_sql, Restrict_t **restrictions, size_t *total_restrictions) { const int tuple_limit = 1000000; clock_t start_t = clock(); PGR_DBG("pgr_get_restriction_data"); PGR_DBG("%s", restrictions_sql); Column_info_t info[3]; int i; for (i = 0; i < 3; ++i) { info[i].colNumber = -1; info[i].type = 0; info[i].strict = true; info[i].eType = ANY_INTEGER; } info[0].name = strdup("id"); info[1].name = strdup("cost"); info[2].name = strdup("restricted_edges"); info[1].eType = ANY_NUMERICAL; info[2].eType = ANY_INTEGER_ARRAY; #if 0 // experiment starts size_t total_tuples = (*total_restrictions) ; (*restrictions) = (Restrict_t *)palloc0(sizeof(Restrict_t)); (*restrictions)[0].id = 1; (*restrictions)[0].cost = -1; (*restrictions)[0].restricted_edges[0] = 4; (*restrictions)[0].restricted_edges[1] = 7; // experiment ends #endif #if 1 size_t ntuples; size_t total_tuples; void *SPIplan; SPIplan = pgr_SPI_prepare(restrictions_sql); Portal SPIportal; SPIportal = pgr_SPI_cursor_open(SPIplan); bool moredata = TRUE; (*total_restrictions) = total_tuples = 0; while (moredata == TRUE) { SPI_cursor_fetch(SPIportal, TRUE, tuple_limit); if (total_tuples == 0) { pgr_fetch_column_info(info, 3); } ntuples = SPI_processed; total_tuples += ntuples; PGR_DBG("SPI_processed %ld", ntuples); if (ntuples > 0) { if ((*restrictions) == NULL) (*restrictions) = (Restrict_t *)palloc0( total_tuples * sizeof(Restrict_t)); else (*restrictions) = (Restrict_t *)repalloc( (*restrictions), total_tuples * sizeof(Restrict_t)); if ((*restrictions) == NULL) { elog(ERROR, "Out of memory"); } size_t t; SPITupleTable *tuptable = SPI_tuptable; TupleDesc tupdesc = SPI_tuptable->tupdesc; PGR_DBG("processing %ld", ntuples); for (t = 0; t < ntuples; t++) { HeapTuple tuple = tuptable->vals[t]; fetch_restriction(&tuple, &tupdesc, info, &(*restrictions)[total_tuples - ntuples + t]); } SPI_freetuptable(tuptable); } else { moredata = FALSE; } } SPI_cursor_close(SPIportal); if (total_tuples == 0) { (*total_restrictions) = 0; PGR_DBG("NO restrictions"); return; } (*total_restrictions) = total_tuples; #endif PGR_DBG("Finish reading %ld data, %ld", total_tuples, (*total_restrictions)); clock_t end_t = clock(); time_msg(" reading Restrictions", start_t, end_t); }
/** * @fn Datum reorg_apply(PG_FUNCTION_ARGS) * @brief Apply operations in log table into temp table. * * reorg_apply(sql_peek, sql_insert, sql_delete, sql_update, sql_pop, count) * * @param sql_peek SQL to pop tuple from log table. * @param sql_insert SQL to insert into temp table. * @param sql_delete SQL to delete from temp table. * @param sql_update SQL to update temp table. * @param sql_pop SQL to delete tuple from log table. * @param count Max number of operations, or no count iff <=0. * @retval Number of performed operations. */ Datum reorg_apply(PG_FUNCTION_ARGS) { #define DEFAULT_PEEK_COUNT 1000 const char *sql_peek = PG_GETARG_CSTRING(0); const char *sql_insert = PG_GETARG_CSTRING(1); const char *sql_delete = PG_GETARG_CSTRING(2); const char *sql_update = PG_GETARG_CSTRING(3); const char *sql_pop = PG_GETARG_CSTRING(4); int32 count = PG_GETARG_INT32(5); SPIPlanPtr plan_peek = NULL; SPIPlanPtr plan_insert = NULL; SPIPlanPtr plan_delete = NULL; SPIPlanPtr plan_update = NULL; SPIPlanPtr plan_pop = NULL; uint32 n, i; Oid argtypes_peek[1] = { INT4OID }; Datum values_peek[1]; bool nulls_peek[1] = { 0 }; /* authority check */ must_be_superuser("reorg_apply"); /* connect to SPI manager */ reorg_init(); /* peek tuple in log */ plan_peek = reorg_prepare(sql_peek, 1, argtypes_peek); for (n = 0;;) { int ntuples; SPITupleTable *tuptable; TupleDesc desc; Oid argtypes[3]; /* id, pk, row */ Datum values[3]; /* id, pk, row */ bool nulls[3]; /* id, pk, row */ /* peek tuple in log */ if (count == 0) values_peek[0] = Int32GetDatum(DEFAULT_PEEK_COUNT); else values_peek[0] = Int32GetDatum(Min(count - n, DEFAULT_PEEK_COUNT)); execute_plan(SPI_OK_SELECT, plan_peek, values_peek, nulls_peek); if (SPI_processed <= 0) break; /* copy tuptable because we will call other sqls. */ ntuples = SPI_processed; tuptable = SPI_tuptable; desc = tuptable->tupdesc; argtypes[0] = SPI_gettypeid(desc, 1); /* id */ argtypes[1] = SPI_gettypeid(desc, 2); /* pk */ argtypes[2] = SPI_gettypeid(desc, 3); /* row */ for (i = 0; i < ntuples; i++, n++) { HeapTuple tuple; tuple = tuptable->vals[i]; values[0] = SPI_getbinval(tuple, desc, 1, &nulls[0]); values[1] = SPI_getbinval(tuple, desc, 2, &nulls[1]); values[2] = SPI_getbinval(tuple, desc, 3, &nulls[2]); if (nulls[1]) { /* INSERT */ if (plan_insert == NULL) plan_insert = reorg_prepare(sql_insert, 1, &argtypes[2]); execute_plan(SPI_OK_INSERT, plan_insert, &values[2], &nulls[2]); } else if (nulls[2]) { /* DELETE */ if (plan_delete == NULL) plan_delete = reorg_prepare(sql_delete, 1, &argtypes[1]); execute_plan(SPI_OK_DELETE, plan_delete, &values[1], &nulls[1]); } else { /* UPDATE */ if (plan_update == NULL) plan_update = reorg_prepare(sql_update, 2, &argtypes[1]); execute_plan(SPI_OK_UPDATE, plan_update, &values[1], &nulls[1]); } } /* delete tuple in log */ if (plan_pop == NULL) plan_pop = reorg_prepare(sql_pop, 1, argtypes); execute_plan(SPI_OK_DELETE, plan_pop, values, nulls); SPI_freetuptable(tuptable); } SPI_finish(); PG_RETURN_INT32(n); }
static Slony_I_ClusterStatus * getClusterStatus(Name cluster_name, int need_plan_mask) { Slony_I_ClusterStatus *cs; int rc; char query[1024]; bool isnull; Oid plan_types[9]; Oid txid_snapshot_typid; TypeName *txid_snapshot_typname; /* * Find an existing cs row for this cluster */ for (cs = clusterStatusList; cs; cs = cs->next) { if ((bool) DirectFunctionCall2(nameeq, NameGetDatum(&(cs->clustername)), NameGetDatum(cluster_name)) == true) { /* * Return it if all the requested SPI plans are prepared already. */ if ((cs->have_plan & need_plan_mask) == need_plan_mask) return cs; /* * Create more SPI plans below. */ break; } } if (cs == NULL) { /* * No existing cs found ... create a new one */ cs = (Slony_I_ClusterStatus *) malloc(sizeof(Slony_I_ClusterStatus)); memset(cs, 0, sizeof(Slony_I_ClusterStatus)); /* * We remember the plain cluster name for fast lookup */ strncpy(NameStr(cs->clustername), NameStr(*cluster_name), NAMEDATALEN); /* * ... and the quoted identifier of it for building queries */ cs->clusterident = strdup(DatumGetCString(DirectFunctionCall1(textout, DirectFunctionCall1(quote_ident, DirectFunctionCall1(textin, CStringGetDatum(NameStr(*cluster_name))))))); /* * Get our local node ID */ snprintf(query, 1024, "select last_value::int4 from %s.sl_local_node_id", cs->clusterident); rc = SPI_exec(query, 0); if (rc < 0 || SPI_processed != 1) elog(ERROR, "Slony-I: failed to read sl_local_node_id"); cs->localNodeId = DatumGetInt32( SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull)); SPI_freetuptable(SPI_tuptable); if (cs->localNodeId < 0) elog(ERROR, "Slony-I: Node is uninitialized - cluster %s", DatumGetCString(cluster_name)); /* * Initialize the currentXid to invalid */ cs->currentXid = InvalidTransactionId; /* * Insert the new control block into the list */ cs->next = clusterStatusList; clusterStatusList = cs; } /* * Prepare and save the PLAN_INSERT_EVENT */ if ((need_plan_mask & PLAN_INSERT_EVENT) != 0 && (cs->have_plan & PLAN_INSERT_EVENT) == 0) { /* * Lookup the oid of the txid_snapshot type */ txid_snapshot_typname = makeNode(TypeName); txid_snapshot_typname->names = lappend(lappend(NIL, makeString("pg_catalog")), makeString("txid_snapshot")); #ifdef HAVE_TYPENAMETYPEID_3 txid_snapshot_typid = typenameTypeId(NULL, txid_snapshot_typname, NULL); #elif HAVE_TYPENAMETYPEID_2 txid_snapshot_typid = typenameTypeId(NULL, txid_snapshot_typname); #elif HAVE_TYPENAMETYPEID_1 txid_snapshot_typid = typenameTypeId(txid_snapshot_typname); #endif /* * Create the saved plan. We lock the sl_event table in exclusive mode * in order to ensure that all events are really assigned sequence * numbers in the order they get committed. */ sprintf(query, "LOCK TABLE %s.sl_event IN EXCLUSIVE MODE; " "INSERT INTO %s.sl_event " "(ev_origin, ev_seqno, " "ev_timestamp, ev_snapshot, " "ev_type, ev_data1, ev_data2, ev_data3, ev_data4, " "ev_data5, ev_data6, ev_data7, ev_data8) " "VALUES ('%d', nextval('%s.sl_event_seq'), " "now(), \"pg_catalog\".txid_current_snapshot(), $1, $2, " "$3, $4, $5, $6, $7, $8, $9); " "SELECT currval('%s.sl_event_seq');", cs->clusterident, cs->clusterident, cs->localNodeId, cs->clusterident, cs->clusterident); plan_types[0] = TEXTOID; plan_types[1] = TEXTOID; plan_types[2] = TEXTOID; plan_types[3] = TEXTOID; plan_types[4] = TEXTOID; plan_types[5] = TEXTOID; plan_types[6] = TEXTOID; plan_types[7] = TEXTOID; plan_types[8] = TEXTOID; cs->plan_insert_event = SPI_saveplan(SPI_prepare(query, 9, plan_types)); if (cs->plan_insert_event == NULL) elog(ERROR, "Slony-I: SPI_prepare() failed"); /* * Also prepare the plan to remember sequence numbers on certain * events. */ sprintf(query, "insert into %s.sl_seqlog " "(seql_seqid, seql_origin, seql_ev_seqno, seql_last_value) " "select * from (" "select seq_id, %d, currval('%s.sl_event_seq'), seq_last_value " "from %s.sl_seqlastvalue " "where seq_origin = '%d') as FOO " "where NOT %s.seqtrack(seq_id, seq_last_value) IS NULL; ", cs->clusterident, cs->localNodeId, cs->clusterident, cs->clusterident, cs->localNodeId, cs->clusterident); cs->plan_record_sequences = SPI_saveplan(SPI_prepare(query, 0, NULL)); if (cs->plan_record_sequences == NULL) elog(ERROR, "Slony-I: SPI_prepare() failed"); cs->have_plan |= PLAN_INSERT_EVENT; } /* * Prepare and save the PLAN_INSERT_LOG */ if ((need_plan_mask & PLAN_INSERT_LOG) != 0 && (cs->have_plan & PLAN_INSERT_LOG) == 0) { /* * Create the saved plan's */ sprintf(query, "INSERT INTO %s.sl_log_1 " "(log_origin, log_txid, log_tableid, log_actionseq," " log_cmdtype, log_cmddata) " "VALUES (%d, \"pg_catalog\".txid_current(), $1, " "nextval('%s.sl_action_seq'), $2, $3); ", cs->clusterident, cs->localNodeId, cs->clusterident); plan_types[0] = INT4OID; plan_types[1] = TEXTOID; plan_types[2] = TEXTOID; cs->plan_insert_log_1 = SPI_saveplan(SPI_prepare(query, 3, plan_types)); if (cs->plan_insert_log_1 == NULL) elog(ERROR, "Slony-I: SPI_prepare() failed"); sprintf(query, "INSERT INTO %s.sl_log_2 " "(log_origin, log_txid, log_tableid, log_actionseq," " log_cmdtype, log_cmddata) " "VALUES (%d, \"pg_catalog\".txid_current(), $1, " "nextval('%s.sl_action_seq'), $2, $3); ", cs->clusterident, cs->localNodeId, cs->clusterident); plan_types[0] = INT4OID; plan_types[1] = TEXTOID; plan_types[2] = TEXTOID; cs->plan_insert_log_2 = SPI_saveplan(SPI_prepare(query, 3, plan_types)); if (cs->plan_insert_log_2 == NULL) elog(ERROR, "Slony-I: SPI_prepare() failed"); /* @-nullderef@ */ /* * Also create the 3 rather static text values for the log_cmdtype * parameter and initialize the cmddata_buf. */ cs->cmdtype_I = malloc(VARHDRSZ + 1); SET_VARSIZE(cs->cmdtype_I, VARHDRSZ + 1); *VARDATA(cs->cmdtype_I) = 'I'; cs->cmdtype_U = malloc(VARHDRSZ + 1); SET_VARSIZE(cs->cmdtype_U, VARHDRSZ + 1); *VARDATA(cs->cmdtype_U) = 'U'; cs->cmdtype_D = malloc(VARHDRSZ + 1); SET_VARSIZE(cs->cmdtype_D, VARHDRSZ + 1); *VARDATA(cs->cmdtype_D) = 'D'; /* * And the plan to read the current log_status. */ sprintf(query, "SELECT last_value::int4 FROM %s.sl_log_status", cs->clusterident); cs->plan_get_logstatus = SPI_saveplan(SPI_prepare(query, 0, NULL)); cs->cmddata_size = 8192; cs->cmddata_buf = (text *) malloc(8192); cs->have_plan |= PLAN_INSERT_LOG; } return cs; /* @+nullderef@ */ }
Datum _Slony_I_logTrigger(PG_FUNCTION_ARGS) { TransactionId newXid = GetTopTransactionId(); Slony_I_ClusterStatus *cs; TriggerData *tg; Datum argv[4]; text *cmdtype = NULL; int rc; Name cluster_name; int32 tab_id; char *attkind; int attkind_idx; int cmddata_need; /* * Don't do any logging if the current session role isn't Origin. */ if (SessionReplicationRole != SESSION_REPLICATION_ROLE_ORIGIN) return PointerGetDatum(NULL); /* * Get the trigger call context */ if (!CALLED_AS_TRIGGER(fcinfo)) elog(ERROR, "Slony-I: logTrigger() not called as trigger"); tg = (TriggerData *) (fcinfo->context); /* * Check all logTrigger() calling conventions */ if (!TRIGGER_FIRED_AFTER(tg->tg_event)) elog(ERROR, "Slony-I: logTrigger() must be fired AFTER"); if (!TRIGGER_FIRED_FOR_ROW(tg->tg_event)) elog(ERROR, "Slony-I: logTrigger() must be fired FOR EACH ROW"); if (tg->tg_trigger->tgnargs != 3) elog(ERROR, "Slony-I: logTrigger() must be defined with 3 args"); /* * Connect to the SPI manager */ if ((rc = SPI_connect()) < 0) elog(ERROR, "Slony-I: SPI_connect() failed in createEvent()"); /* * Get all the trigger arguments */ cluster_name = DatumGetName(DirectFunctionCall1(namein, CStringGetDatum(tg->tg_trigger->tgargs[0]))); tab_id = strtol(tg->tg_trigger->tgargs[1], NULL, 10); attkind = tg->tg_trigger->tgargs[2]; /* * Get or create the cluster status information and make sure it has the * SPI plans that we need here. */ cs = getClusterStatus(cluster_name, PLAN_INSERT_LOG); /* * Do the following only once per transaction. */ if (!TransactionIdEquals(cs->currentXid, newXid)) { int32 log_status; bool isnull; /* * Determine the currently active log table */ if (SPI_execp(cs->plan_get_logstatus, NULL, NULL, 0) < 0) elog(ERROR, "Slony-I: cannot determine log status"); if (SPI_processed != 1) elog(ERROR, "Slony-I: cannot determine log status"); log_status = DatumGetInt32(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull)); SPI_freetuptable(SPI_tuptable); switch (log_status) { case 0: case 2: cs->plan_active_log = cs->plan_insert_log_1; break; case 1: case 3: cs->plan_active_log = cs->plan_insert_log_2; break; default: elog(ERROR, "Slony-I: illegal log status %d", log_status); break; } cs->currentXid = newXid; } /* * Determine cmdtype and cmddata depending on the command type */ if (TRIGGER_FIRED_BY_INSERT(tg->tg_event)) { HeapTuple new_row = tg->tg_trigtuple; TupleDesc tupdesc = tg->tg_relation->rd_att; char *col_ident; char *col_value; int len_ident; int len_value; int i; int need_comma = false; char *OldDateStyle; char *cp = VARDATA(cs->cmddata_buf); /* * INSERT * * cmdtype = 'I' cmddata = ("col" [, ...]) values ('value' [, ...]) */ cmdtype = cs->cmdtype_I; /* * Specify all the columns */ *cp++ = '('; for (i = 0; i < tg->tg_relation->rd_att->natts; i++) { /* * Skip dropped columns */ if (tupdesc->attrs[i]->attisdropped) continue; col_ident = (char *) slon_quote_identifier(SPI_fname(tupdesc, i + 1)); cmddata_need = (cp - (char *) (cs->cmddata_buf)) + 16 + (len_ident = strlen(col_ident)); if (cs->cmddata_size < cmddata_need) { int have = (cp - (char *) (cs->cmddata_buf)); while (cs->cmddata_size < cmddata_need) cs->cmddata_size *= 2; cs->cmddata_buf = realloc(cs->cmddata_buf, cs->cmddata_size); cp = (char *) (cs->cmddata_buf) + have; } if (need_comma) *cp++ = ','; else need_comma = true; memcpy(cp, col_ident, len_ident); cp += len_ident; } /* * Append the string ") values (" */ *cp++ = ')'; *cp++ = ' '; *cp++ = 'v'; *cp++ = 'a'; *cp++ = 'l'; *cp++ = 'u'; *cp++ = 'e'; *cp++ = 's'; *cp++ = ' '; *cp++ = '('; /* * Append the values */ need_comma = false; OldDateStyle = GetConfigOptionByName("DateStyle", NULL); if (!strstr(OldDateStyle, "ISO")) set_config_option("DateStyle", "ISO", PGC_USERSET, PGC_S_SESSION, true, true); for (i = 0; i < tg->tg_relation->rd_att->natts; i++) { /* * Skip dropped columns */ if (tupdesc->attrs[i]->attisdropped) continue; if ((col_value = SPI_getvalue(new_row, tupdesc, i + 1)) == NULL) { col_value = "NULL"; } else { col_value = slon_quote_literal(col_value); } cmddata_need = (cp - (char *) (cs->cmddata_buf)) + 16 + (len_value = strlen(col_value)); if (cs->cmddata_size < cmddata_need) { int have = (cp - (char *) (cs->cmddata_buf)); while (cs->cmddata_size < cmddata_need) cs->cmddata_size *= 2; cs->cmddata_buf = realloc(cs->cmddata_buf, cs->cmddata_size); cp = (char *) (cs->cmddata_buf) + have; } if (need_comma) *cp++ = ','; else need_comma = true; memcpy(cp, col_value, len_value); cp += len_value; } if (!strstr(OldDateStyle, "ISO")) set_config_option("DateStyle", OldDateStyle, PGC_USERSET, PGC_S_SESSION, true, true); /* * Terminate and done */ *cp++ = ')'; *cp = '\0'; SET_VARSIZE(cs->cmddata_buf, VARHDRSZ + (cp - VARDATA(cs->cmddata_buf))); } else if (TRIGGER_FIRED_BY_UPDATE(tg->tg_event)) { HeapTuple old_row = tg->tg_trigtuple; HeapTuple new_row = tg->tg_newtuple; TupleDesc tupdesc = tg->tg_relation->rd_att; Datum old_value; Datum new_value; bool old_isnull; bool new_isnull; char *col_ident; char *col_value; int len_ident; int len_value; int i; int need_comma = false; int need_and = false; char *OldDateStyle; char *cp = VARDATA(cs->cmddata_buf); /* * UPDATE * * cmdtype = 'U' cmddata = "col_ident"='value' [, ...] where * "pk_ident" = 'value' [ and ...] */ cmdtype = cs->cmdtype_U; for (i = 0; i < tg->tg_relation->rd_att->natts; i++) { /* * Ignore dropped columns */ if (tupdesc->attrs[i]->attisdropped) continue; old_value = SPI_getbinval(old_row, tupdesc, i + 1, &old_isnull); new_value = SPI_getbinval(new_row, tupdesc, i + 1, &new_isnull); /* * If old and new value are NULL, the column is unchanged */ if (old_isnull && new_isnull) continue; /* * If both are NOT NULL, we need to compare the values and skip * setting the column if equal */ if (!old_isnull && !new_isnull) { Oid opr_oid; FmgrInfo *opr_finfo_p; /* * Lookup the equal operators function call info using the * typecache if available */ #ifdef HAVE_TYPCACHE TypeCacheEntry *type_cache; type_cache = lookup_type_cache( SPI_gettypeid(tupdesc, i + 1), TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO); opr_oid = type_cache->eq_opr; if (opr_oid == ARRAY_EQ_OP) opr_oid = InvalidOid; else opr_finfo_p = &(type_cache->eq_opr_finfo); #else FmgrInfo opr_finfo; opr_oid = compatible_oper_funcid(makeList1(makeString("=")), SPI_gettypeid(tupdesc, i + 1), SPI_gettypeid(tupdesc, i + 1), true); if (OidIsValid(opr_oid)) { fmgr_info(opr_oid, &opr_finfo); opr_finfo_p = &opr_finfo; } #endif /* * If we have an equal operator, use that to do binary * comparision. Else get the string representation of both * attributes and do string comparision. */ if (OidIsValid(opr_oid)) { if (DatumGetBool(FunctionCall2(opr_finfo_p, old_value, new_value))) continue; } else { char *old_strval = SPI_getvalue(old_row, tupdesc, i + 1); char *new_strval = SPI_getvalue(new_row, tupdesc, i + 1); if (strcmp(old_strval, new_strval) == 0) continue; } } if (need_comma) *cp++ = ','; else need_comma = true; col_ident = (char *) slon_quote_identifier(SPI_fname(tupdesc, i + 1)); if (new_isnull) col_value = "NULL"; else { OldDateStyle = GetConfigOptionByName("DateStyle", NULL); if (!strstr(OldDateStyle, "ISO")) set_config_option("DateStyle", "ISO", PGC_USERSET, PGC_S_SESSION, true, true); col_value = slon_quote_literal(SPI_getvalue(new_row, tupdesc, i + 1)); if (!strstr(OldDateStyle, "ISO")) set_config_option("DateStyle", OldDateStyle, PGC_USERSET, PGC_S_SESSION, true, true); } cmddata_need = (cp - (char *) (cs->cmddata_buf)) + 16 + (len_ident = strlen(col_ident)) + (len_value = strlen(col_value)); if (cs->cmddata_size < cmddata_need) { int have = (cp - (char *) (cs->cmddata_buf)); while (cs->cmddata_size < cmddata_need) cs->cmddata_size *= 2; cs->cmddata_buf = realloc(cs->cmddata_buf, cs->cmddata_size); cp = (char *) (cs->cmddata_buf) + have; } memcpy(cp, col_ident, len_ident); cp += len_ident; *cp++ = '='; memcpy(cp, col_value, len_value); cp += len_value; } /* * It can happen that the only UPDATE an application does is to set a * column to the same value again. In that case, we'd end up here with * no columns in the SET clause yet. We add the first key column here * with it's old value to simulate the same for the replication * engine. */ if (!need_comma) { for (i = 0, attkind_idx = -1; i < tg->tg_relation->rd_att->natts; i++) { if (tupdesc->attrs[i]->attisdropped) continue; attkind_idx++; if (!attkind[attkind_idx]) elog(ERROR, "Slony-I: no key columns found in logTrigger() attkind parameter"); if (attkind[attkind_idx] == 'k') break; } col_ident = (char *) slon_quote_identifier(SPI_fname(tupdesc, i + 1)); col_value = slon_quote_literal(SPI_getvalue(old_row, tupdesc, i + 1)); cmddata_need = (cp - (char *) (cs->cmddata_buf)) + 16 + (len_ident = strlen(col_ident)) + (len_value = strlen(col_value)); if (cs->cmddata_size < cmddata_need) { int have = (cp - (char *) (cs->cmddata_buf)); while (cs->cmddata_size < cmddata_need) cs->cmddata_size *= 2; cs->cmddata_buf = realloc(cs->cmddata_buf, cs->cmddata_size); cp = (char *) (cs->cmddata_buf) + have; } memcpy(cp, col_ident, len_ident); cp += len_ident; *cp++ = '='; memcpy(cp, col_value, len_value); cp += len_value; } *cp++ = ' '; *cp++ = 'w'; *cp++ = 'h'; *cp++ = 'e'; *cp++ = 'r'; *cp++ = 'e'; *cp++ = ' '; for (i = 0, attkind_idx = -1; i < tg->tg_relation->rd_att->natts; i++) { /* * Ignore dropped columns */ if (tupdesc->attrs[i]->attisdropped) continue; attkind_idx++; if (!attkind[attkind_idx]) break; if (attkind[attkind_idx] != 'k') continue; col_ident = (char *) slon_quote_identifier(SPI_fname(tupdesc, i + 1)); col_value = slon_quote_literal(SPI_getvalue(old_row, tupdesc, i + 1)); if (col_value == NULL) elog(ERROR, "Slony-I: old key column %s.%s IS NULL on UPDATE", NameStr(tg->tg_relation->rd_rel->relname), col_ident); cmddata_need = (cp - (char *) (cs->cmddata_buf)) + 16 + (len_ident = strlen(col_ident)) + (len_value = strlen(col_value)); if (cs->cmddata_size < cmddata_need) { int have = (cp - (char *) (cs->cmddata_buf)); while (cs->cmddata_size < cmddata_need) cs->cmddata_size *= 2; cs->cmddata_buf = realloc(cs->cmddata_buf, cs->cmddata_size); cp = (char *) (cs->cmddata_buf) + have; } if (need_and) { *cp++ = ' '; *cp++ = 'a'; *cp++ = 'n'; *cp++ = 'd'; *cp++ = ' '; } else need_and = true; memcpy(cp, col_ident, len_ident); cp += len_ident; *cp++ = '='; memcpy(cp, col_value, len_value); cp += len_value; } *cp = '\0'; SET_VARSIZE(cs->cmddata_buf, VARHDRSZ + (cp - VARDATA(cs->cmddata_buf))); } else if (TRIGGER_FIRED_BY_DELETE(tg->tg_event)) { HeapTuple old_row = tg->tg_trigtuple; TupleDesc tupdesc = tg->tg_relation->rd_att; char *col_ident; char *col_value; int len_ident; int len_value; int i; int need_and = false; char *cp = VARDATA(cs->cmddata_buf); /* * DELETE * * cmdtype = 'D' cmddata = "pk_ident"='value' [and ...] */ cmdtype = cs->cmdtype_D; for (i = 0, attkind_idx = -1; i < tg->tg_relation->rd_att->natts; i++) { if (tupdesc->attrs[i]->attisdropped) continue; attkind_idx++; if (!attkind[attkind_idx]) break; if (attkind[attkind_idx] != 'k') continue; col_ident = (char *) slon_quote_identifier(SPI_fname(tupdesc, i + 1)); col_value = slon_quote_literal(SPI_getvalue(old_row, tupdesc, i + 1)); if (col_value == NULL) elog(ERROR, "Slony-I: old key column %s.%s IS NULL on DELETE", NameStr(tg->tg_relation->rd_rel->relname), col_ident); cmddata_need = (cp - (char *) (cs->cmddata_buf)) + 16 + (len_ident = strlen(col_ident)) + (len_value = strlen(col_value)); if (cs->cmddata_size < cmddata_need) { int have = (cp - (char *) (cs->cmddata_buf)); while (cs->cmddata_size < cmddata_need) cs->cmddata_size *= 2; cs->cmddata_buf = realloc(cs->cmddata_buf, cs->cmddata_size); cp = (char *) (cs->cmddata_buf) + have; } if (need_and) { *cp++ = ' '; *cp++ = 'a'; *cp++ = 'n'; *cp++ = 'd'; *cp++ = ' '; } else need_and = true; memcpy(cp, col_ident, len_ident); cp += len_ident; *cp++ = '='; memcpy(cp, col_value, len_value); cp += len_value; } *cp = '\0'; SET_VARSIZE(cs->cmddata_buf, VARHDRSZ + (cp - VARDATA(cs->cmddata_buf))); } else elog(ERROR, "Slony-I: logTrigger() fired for unhandled event"); /* * Construct the parameter array and insert the log row. */ argv[0] = Int32GetDatum(tab_id); argv[1] = PointerGetDatum(cmdtype); argv[2] = PointerGetDatum(cs->cmddata_buf); SPI_execp(cs->plan_active_log, argv, NULL, 0); SPI_finish(); return PointerGetDatum(NULL); }
static PyObject * PLy_spi_execute_fetch_result(SPITupleTable *tuptable, uint64 rows, int status) { PLyResultObject *result; volatile MemoryContext oldcontext; result = (PLyResultObject *) PLy_result_new(); Py_DECREF(result->status); result->status = PyInt_FromLong(status); if (status > 0 && tuptable == NULL) { Py_DECREF(result->nrows); result->nrows = (rows > (uint64) LONG_MAX) ? PyFloat_FromDouble((double) rows) : PyInt_FromLong((long) rows); } else if (status > 0 && tuptable != NULL) { PLyTypeInfo args; MemoryContext cxt; Py_DECREF(result->nrows); result->nrows = (rows > (uint64) LONG_MAX) ? PyFloat_FromDouble((double) rows) : PyInt_FromLong((long) rows); cxt = AllocSetContextCreate(CurrentMemoryContext, "PL/Python temp context", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); PLy_typeinfo_init(&args, cxt); oldcontext = CurrentMemoryContext; PG_TRY(); { MemoryContext oldcontext2; if (rows) { uint64 i; /* * PyList_New() and PyList_SetItem() use Py_ssize_t for list * size and list indices; so we cannot support a result larger * than PY_SSIZE_T_MAX. */ if (rows > (uint64) PY_SSIZE_T_MAX) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("query result has too many rows to fit in a Python list"))); Py_DECREF(result->rows); result->rows = PyList_New(rows); PLy_input_tuple_funcs(&args, tuptable->tupdesc); for (i = 0; i < rows; i++) { PyObject *row = PLyDict_FromTuple(&args, tuptable->vals[i], tuptable->tupdesc); PyList_SetItem(result->rows, i, row); } } /* * Save tuple descriptor for later use by result set metadata * functions. Save it in TopMemoryContext so that it survives * outside of an SPI context. We trust that PLy_result_dealloc() * will clean it up when the time is right. (Do this as late as * possible, to minimize the number of ways the tupdesc could get * leaked due to errors.) */ oldcontext2 = MemoryContextSwitchTo(TopMemoryContext); result->tupdesc = CreateTupleDescCopy(tuptable->tupdesc); MemoryContextSwitchTo(oldcontext2); } PG_CATCH(); { MemoryContextSwitchTo(oldcontext); MemoryContextDelete(cxt); Py_DECREF(result); PG_RE_THROW(); } PG_END_TRY(); MemoryContextDelete(cxt); SPI_freetuptable(tuptable); } return (PyObject *) result; }
static int compute_shortest_path_shooting_star(char* sql, int source_edge_id, int target_edge_id, bool directed, bool has_reverse_cost, path_element_t **path, int *path_count) { int SPIcode; void *SPIplan; Portal SPIportal; bool moredata = TRUE; int ntuples; edge_shooting_star_t *edges = NULL; int total_tuples = 0; // int v_max_id=0; // int v_min_id=INT_MAX; int e_max_id=0; int e_min_id=INT_MAX; edge_shooting_star_columns_t edge_columns = {id: -1, source: -1, target: -1, cost: -1, reverse_cost: -1, s_x: -1, s_y: -1, t_x: -1, t_y: -1, to_cost: -1, rule: -1}; char *err_msg; int ret = -1; register int z, t; int s_count=0; int t_count=0; DBG("start shortest_path_shooting_star\n"); SPIcode = SPI_connect(); if (SPIcode != SPI_OK_CONNECT) { elog(ERROR, "shortest_path_shooting_star: couldn't open a connection to SPI"); return -1; } SPIplan = SPI_prepare(sql, 0, NULL); if (SPIplan == NULL) { elog(ERROR, "shortest_path_shooting_star: couldn't create query plan via SPI"); return -1; } if ((SPIportal = SPI_cursor_open(NULL, SPIplan, NULL, NULL, true)) == NULL) { elog(ERROR, "shortest_path_shooting_star: SPI_cursor_open('%s') returns NULL", sql); return -1; } while (moredata == TRUE) { SPI_cursor_fetch(SPIportal, TRUE, TUPLIMIT); if (edge_columns.id == -1) { if (fetch_edge_shooting_star_columns(SPI_tuptable, &edge_columns, has_reverse_cost) == -1) return finish(SPIcode, ret); } //DBG("***%i***", ret); ntuples = SPI_processed; total_tuples += ntuples; if (!edges) edges = palloc(total_tuples * sizeof(edge_shooting_star_t)); else edges = repalloc(edges, total_tuples * sizeof(edge_shooting_star_t)); if (edges == NULL) { elog(ERROR, "Out of memory"); return finish(SPIcode, ret); } if (ntuples > 0) { int t; SPITupleTable *tuptable = SPI_tuptable; TupleDesc tupdesc = SPI_tuptable->tupdesc; for (t = 0; t < ntuples; t++) { HeapTuple tuple = tuptable->vals[t]; fetch_edge_shooting_star(&tuple, &tupdesc, &edge_columns, &edges[total_tuples - ntuples + t]); } SPI_freetuptable(tuptable); } else { moredata = FALSE; } } DBG("Total %i tuples", total_tuples); for(z=0; z<total_tuples; z++) { if(edges[z].id<e_min_id) e_min_id=edges[z].id; if(edges[z].id>e_max_id) e_max_id=edges[z].id; } DBG("E : %i <-> %i", e_min_id, e_max_id); for(z=0; z<total_tuples; ++z) { //check if edges[] contains source and target if(edges[z].id == source_edge_id) ++s_count; if(edges[z].id == target_edge_id) ++t_count; //edges[z].source-=v_min_id; //edges[z].target-=v_min_id; } DBG("Total %i tuples", total_tuples); if(s_count == 0) { elog(ERROR, "Start edge was not found."); return -1; } if(t_count == 0) { elog(ERROR, "Target edge was not found."); return -1; } DBG("Total %i tuples", total_tuples); DBG("Calling boost_shooting_star <%i>\n", total_tuples); //time_t stime = time(NULL); ret = boost_shooting_star(edges, total_tuples, source_edge_id, target_edge_id, directed, has_reverse_cost, path, path_count, &err_msg, e_max_id); //time_t etime = time(NULL); //DBG("Path was calculated in %f seconds. \n", difftime(etime, stime)); DBG("SIZE %i\n",*path_count); DBG("ret = %i\n",ret); if (ret < 0) { ereport(ERROR, (errcode(ERRCODE_E_R_E_CONTAINING_SQL_NOT_PERMITTED), errmsg("Error computing path: %s", err_msg))); } return finish(SPIcode, ret); } PG_FUNCTION_INFO_V1(shortest_path_shooting_star); Datum shortest_path_shooting_star(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; int call_cntr; int max_calls; TupleDesc tuple_desc; path_element_t *path = 0; /* stuff done only on the first call of the function */ if (SRF_IS_FIRSTCALL()) { MemoryContext oldcontext; int path_count = 0; int ret; /* create a function context for cross-call persistence */ funcctx = SRF_FIRSTCALL_INIT(); /* switch to memory context appropriate for multiple function calls */ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); ret = compute_shortest_path_shooting_star(text2char(PG_GETARG_TEXT_P(0)), PG_GETARG_INT32(1), PG_GETARG_INT32(2), PG_GETARG_BOOL(3), PG_GETARG_BOOL(4), &path, &path_count); #ifdef DEBUG DBG("Ret is %i", ret); if (ret >= 0) { int i; for (i = 0; i < path_count; i++) { DBG("Step # %i vertex_id %i ", i, path[i].vertex_id); DBG(" edge_id %i ", path[i].edge_id); DBG(" cost %f ", path[i].cost); } } #endif /* total number of tuples to be returned */ DBG("Conting tuples number\n"); funcctx->max_calls = path_count; funcctx->user_fctx = path; DBG("Path count %i", path_count); funcctx->tuple_desc = BlessTupleDesc(RelationNameGetTupleDesc("pgr_costResult")); MemoryContextSwitchTo(oldcontext); } /* stuff done on every call of the function */ funcctx = SRF_PERCALL_SETUP(); call_cntr = funcctx->call_cntr; max_calls = funcctx->max_calls; tuple_desc = funcctx->tuple_desc; path = (path_element_t*) funcctx->user_fctx; DBG("Trying to allocate some memory\n"); if (call_cntr < max_calls) /* do when there is more left to send */ { HeapTuple tuple; Datum result; Datum *values; char* nulls; values = palloc(4 * sizeof(Datum)); nulls = palloc(4 * sizeof(char)); values[0] = Int32GetDatum(call_cntr); nulls[0] = ' '; values[1] = Int32GetDatum(path[call_cntr].vertex_id); nulls[1] = ' '; values[2] = Int32GetDatum(path[call_cntr].edge_id); nulls[2] = ' '; values[3] = Float8GetDatum(path[call_cntr].cost); nulls[3] = ' '; tuple = heap_formtuple(tuple_desc, values, nulls); /* make the tuple into a datum */ result = HeapTupleGetDatum(tuple); /* clean up (this is not really necessary) */ pfree(values); pfree(nulls); SRF_RETURN_NEXT(funcctx, result); } else /* do when there is no more left */ { if (path) free(path); SRF_RETURN_DONE(funcctx); } }
/** * @fn Datum repack_apply(PG_FUNCTION_ARGS) * @brief Apply operations in log table into temp table. * * repack_apply(sql_peek, sql_insert, sql_delete, sql_update, sql_pop, count) * * @param sql_peek SQL to pop tuple from log table. * @param sql_insert SQL to insert into temp table. * @param sql_delete SQL to delete from temp table. * @param sql_update SQL to update temp table. * @param sql_pop SQL to bulk-delete tuples from log table. * @param count Max number of operations, or no count iff <=0. * @retval Number of performed operations. */ Datum repack_apply(PG_FUNCTION_ARGS) { #define DEFAULT_PEEK_COUNT 1000 const char *sql_peek = PG_GETARG_CSTRING(0); const char *sql_insert = PG_GETARG_CSTRING(1); const char *sql_delete = PG_GETARG_CSTRING(2); const char *sql_update = PG_GETARG_CSTRING(3); /* sql_pop, the fourth arg, will be used in the loop below */ int32 count = PG_GETARG_INT32(5); SPIPlanPtr plan_peek = NULL; SPIPlanPtr plan_insert = NULL; SPIPlanPtr plan_delete = NULL; SPIPlanPtr plan_update = NULL; uint32 n, i; Oid argtypes_peek[1] = { INT4OID }; Datum values_peek[1]; const char nulls_peek[1] = { 0 }; StringInfoData sql_pop; initStringInfo(&sql_pop); /* authority check */ must_be_superuser("repack_apply"); /* connect to SPI manager */ repack_init(); /* peek tuple in log */ plan_peek = repack_prepare(sql_peek, 1, argtypes_peek); for (n = 0;;) { int ntuples; SPITupleTable *tuptable; TupleDesc desc; Oid argtypes[3]; /* id, pk, row */ Datum values[3]; /* id, pk, row */ bool nulls[3]; /* id, pk, row */ /* peek tuple in log */ if (count <= 0) values_peek[0] = Int32GetDatum(DEFAULT_PEEK_COUNT); else values_peek[0] = Int32GetDatum(Min(count - n, DEFAULT_PEEK_COUNT)); execute_plan(SPI_OK_SELECT, plan_peek, values_peek, nulls_peek); if (SPI_processed <= 0) break; /* copy tuptable because we will call other sqls. */ ntuples = SPI_processed; tuptable = SPI_tuptable; desc = tuptable->tupdesc; argtypes[0] = SPI_gettypeid(desc, 1); /* id */ argtypes[1] = SPI_gettypeid(desc, 2); /* pk */ argtypes[2] = SPI_gettypeid(desc, 3); /* row */ resetStringInfo(&sql_pop); appendStringInfoString(&sql_pop, PG_GETARG_CSTRING(4)); for (i = 0; i < ntuples; i++, n++) { HeapTuple tuple; char *pkid; tuple = tuptable->vals[i]; values[0] = SPI_getbinval(tuple, desc, 1, &nulls[0]); values[1] = SPI_getbinval(tuple, desc, 2, &nulls[1]); values[2] = SPI_getbinval(tuple, desc, 3, &nulls[2]); pkid = SPI_getvalue(tuple, desc, 1); Assert(pkid != NULL); if (nulls[1]) { /* INSERT */ if (plan_insert == NULL) plan_insert = repack_prepare(sql_insert, 1, &argtypes[2]); execute_plan(SPI_OK_INSERT, plan_insert, &values[2], (nulls[2] ? "n" : " ")); } else if (nulls[2]) { /* DELETE */ if (plan_delete == NULL) plan_delete = repack_prepare(sql_delete, 1, &argtypes[1]); execute_plan(SPI_OK_DELETE, plan_delete, &values[1], (nulls[1] ? "n" : " ")); } else { /* UPDATE */ if (plan_update == NULL) plan_update = repack_prepare(sql_update, 2, &argtypes[1]); execute_plan(SPI_OK_UPDATE, plan_update, &values[1], (nulls[1] ? "n" : " ")); } /* Add the primary key ID of each row from the log * table we have processed so far to this * DELETE ... IN (...) query string, so we * can delete all the rows we have processed at-once. */ if (i == 0) appendStringInfoString(&sql_pop, pkid); else appendStringInfo(&sql_pop, ",%s", pkid); pfree(pkid); } /* i must be > 0 (and hence we must have some rows to delete) * since SPI_processed > 0 */ Assert(i > 0); appendStringInfoString(&sql_pop, ");"); /* Bulk delete of processed rows from the log table */ execute(SPI_OK_DELETE, sql_pop.data); SPI_freetuptable(tuptable); } SPI_finish(); PG_RETURN_INT32(n); }
static int compute_apsp_warshall(char* sql, bool directed, bool has_reverse_cost, apsp_element_t **pair, int *pair_count) { int i; int SPIcode; void *SPIplan; Portal SPIportal; bool moredata = TRUE; int ntuples; edge_t *edges = NULL; int total_tuples = 0; edge_columns_t edge_columns = {.id= -1, .source= -1, .target= -1, .cost= -1, .reverse_cost= -1}; int v_max_id=0; int v_min_id=INT_MAX; int s_count = 0; int t_count = 0; char *err_msg; int ret = -1; register int z; // set<int> vertices; DBG("start compute_apsp_warshall\n"); SPIcode = SPI_connect(); if (SPIcode != SPI_OK_CONNECT) { elog(ERROR, "compute_apsp_warshall: couldn't open a connection to SPI"); return -1; } SPIplan = SPI_prepare(sql, 0, NULL); if (SPIplan == NULL) { elog(ERROR, "compute_apsp_warshall: couldn't create query plan via SPI"); return -1; } if ((SPIportal = SPI_cursor_open(NULL, SPIplan, NULL, NULL, true)) == NULL) { elog(ERROR, "compute_apsp_warshall: SPI_cursor_open('%s') returns NULL", sql); return -1; } while (moredata == TRUE) { SPI_cursor_fetch(SPIportal, TRUE, TUPLIMIT); if (edge_columns.id == -1) { if (fetch_edge_columns(SPI_tuptable, &edge_columns, has_reverse_cost) == -1) return finish(SPIcode, ret); } ntuples = SPI_processed; total_tuples += ntuples; if (!edges) edges = palloc(total_tuples * sizeof(edge_t)); else edges = repalloc(edges, total_tuples * sizeof(edge_t)); if (edges == NULL) { elog(ERROR, "Out of memory"); return finish(SPIcode, ret); } DBG("Number of tuples fetched: %i",ntuples); if (ntuples > 0) { int t; SPITupleTable *tuptable = SPI_tuptable; TupleDesc tupdesc = SPI_tuptable->tupdesc; for (t = 0; t < ntuples; t++) { HeapTuple tuple = tuptable->vals[t]; fetch_edge(&tuple, &tupdesc, &edge_columns, &edges[total_tuples - ntuples + t]); // vertices.insert(edges[total_tuples - ntuples + t].source); // vertices.insert(edges[total_tuples - ntuples + t].target); } SPI_freetuptable(tuptable); } else { moredata = FALSE; } } #ifdef DEBUG for (i = 0; i < total_tuples; i++) { DBG("Step %i src_vertex_id %i ", i, edges[i].source); DBG(" dest_vertex_id %i ", edges[i].target); DBG(" cost %f ", edges[i].cost); } #endif DBG("Calling boost_apsp\n"); //start_vertex -= v_min_id; //end_vertex -= v_min_id; ret = boost_apsp(edges, total_tuples, 0, //vertices.size() directed, has_reverse_cost, pair, pair_count, &err_msg); DBG("Boost message: \n%s",err_msg); DBG("SIZE %i\n",*pair_count); /* //:::::::::::::::::::::::::::::::: //:: restoring original vertex id //:::::::::::::::::::::::::::::::: for(z=0;z<*path_count;z++) { //DBG("vetex %i\n",(*path)[z].vertex_id); (*path)[z].vertex_id+=v_min_id; } DBG("ret = %i\n", ret); DBG("*path_count = %i\n", *path_count); DBG("ret = %i\n", ret); */ if (ret < 0) { //elog(ERROR, "Error computing path: %s", err_msg); ereport(ERROR, (errcode(ERRCODE_E_R_E_CONTAINING_SQL_NOT_PERMITTED), errmsg("Error computing path: %s", err_msg))); } return finish(SPIcode, ret); } PG_FUNCTION_INFO_V1(apsp_warshall); Datum apsp_warshall(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; int call_cntr; int max_calls; TupleDesc tuple_desc; apsp_element_t *pair; /* stuff done only on the first call of the function */ if (SRF_IS_FIRSTCALL()) { MemoryContext oldcontext; int pair_count = 0; int ret; /* create a function context for cross-call persistence */ funcctx = SRF_FIRSTCALL_INIT(); /* switch to memory context appropriate for multiple function calls */ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); ret = compute_apsp_warshall(text2char(PG_GETARG_TEXT_P(0)), PG_GETARG_BOOL(1), PG_GETARG_BOOL(2), &pair, &pair_count); #ifdef DEBUG DBG("Ret is %i", ret); if (ret >= 0) { int i; for (i = 0; i < pair_count; i++) { DBG("Step: %i, source_id: %i, target_id: %i, cost: %f ", i, pair[i].src_vertex_id, pair[i].dest_vertex_id, pair[i].cost); } } #endif /* total number of tuples to be returned */ funcctx->max_calls = pair_count; funcctx->user_fctx = pair; funcctx->tuple_desc = BlessTupleDesc(RelationNameGetTupleDesc("pgr_costResult")); MemoryContextSwitchTo(oldcontext); } /* stuff done on every call of the function */ funcctx = SRF_PERCALL_SETUP(); call_cntr = funcctx->call_cntr; max_calls = funcctx->max_calls; tuple_desc = funcctx->tuple_desc; pair = (apsp_element_t*) funcctx->user_fctx; if (call_cntr < max_calls) /* do when there is more left to send */ { HeapTuple tuple; Datum result; Datum *values; char* nulls; /* This will work for some compilers. If it crashes with segfault, try to change the following block with this one values = palloc(4 * sizeof(Datum)); nulls = palloc(4 * sizeof(char)); values[0] = call_cntr; nulls[0] = ' '; values[1] = Int32GetDatum(path[call_cntr].vertex_id); nulls[1] = ' '; values[2] = Int32GetDatum(path[call_cntr].edge_id); nulls[2] = ' '; values[3] = Float8GetDatum(path[call_cntr].cost); nulls[3] = ' '; */ values = palloc(4 * sizeof(Datum)); nulls = palloc(4 * sizeof(char)); values[0] = Int32GetDatum(call_cntr); nulls[0] = ' '; values[1] = Int32GetDatum(pair[call_cntr].src_vertex_id); nulls[1] = ' '; values[2] = Int32GetDatum(pair[call_cntr].dest_vertex_id); nulls[2] = ' '; values[3] = Float8GetDatum(pair[call_cntr].cost); nulls[3] = ' '; tuple = heap_formtuple(tuple_desc, values, nulls); /* make the tuple into a datum */ result = HeapTupleGetDatum(tuple); /* clean up (this is not really necessary) */ pfree(values); pfree(nulls); SRF_RETURN_NEXT(funcctx, result); } else /* do when there is no more left */ { SRF_RETURN_DONE(funcctx); } }
static int load_lex(LEXICON *lex, char *tab) { int ret; SPIPlanPtr SPIplan; Portal SPIportal; bool moredata = TRUE; #ifdef DEBUG struct timeval t1, t2; double elapsed; #endif char *sql; int ntuples; int total_tuples = 0; lex_columns_t lex_columns = {seq: -1, word: -1, stdword: -1, token: -1}; int seq; char *word; char *stdword; int token; DBG("start load_lex\n"); SET_TIME(t1); if (!tab || !strlen(tab)) { elog(NOTICE, "load_lex: rules table is not usable"); return -1; } if (!tableNameOk(tab)) { elog(NOTICE, "load_lex: lex and gaz table names may only be alphanum and '.\"_' characters (%s)", tab); return -1; } sql = SPI_palloc(strlen(tab)+65); strcpy(sql, "select seq, word, stdword, token from "); strcat(sql, tab); strcat(sql, " order by id "); /* get the sql for the lexicon records and prepare the query */ SPIplan = SPI_prepare(sql, 0, NULL); if (SPIplan == NULL) { elog(NOTICE, "load_lex: couldn't create query plan for the lex data via SPI (%s)", sql); return -1; } /* get the sql for the lexicon records and prepare the query */ SPIplan = SPI_prepare(sql, 0, NULL); if (SPIplan == NULL) { elog(NOTICE, "load_lex: couldn't create query plan for the lexicon data via SPI"); return -1; } if ((SPIportal = SPI_cursor_open(NULL, SPIplan, NULL, NULL, true)) == NULL) { elog(NOTICE, "load_lex: SPI_cursor_open('%s') returns NULL", sql); return -1; } while (moredata == TRUE) { //DBG("calling SPI_cursor_fetch"); SPI_cursor_fetch(SPIportal, TRUE, TUPLIMIT); if (SPI_tuptable == NULL) { elog(NOTICE, "load_lex: SPI_tuptable is NULL"); return -1; } if (lex_columns.seq == -1) { ret = fetch_lex_columns(SPI_tuptable, &lex_columns); if (ret) return ret; } ntuples = SPI_processed; //DBG("Reading edges: %i - %i", total_tuples, total_tuples+ntuples); total_tuples += ntuples; if (ntuples > 0) { int t; Datum binval; bool isnull; SPITupleTable *tuptable = SPI_tuptable; TupleDesc tupdesc = SPI_tuptable->tupdesc; for (t = 0; t < ntuples; t++) { //if (t%100 == 0) { DBG(" t: %i", t); } HeapTuple tuple = tuptable->vals[t]; GET_INT_FROM_TUPLE(seq,lex_columns.seq,"load_lex: seq contains a null value"); GET_TEXT_FROM_TUPLE(word,lex_columns.word); GET_TEXT_FROM_TUPLE(stdword,lex_columns.stdword); GET_INT_FROM_TUPLE(token,lex_columns.token,"load_lex: token contains a null value"); lex_add_entry(lex, seq, word, stdword, token); } //DBG("calling SPI_freetuptable"); SPI_freetuptable(tuptable); //DBG("back from SPI_freetuptable"); } else moredata = FALSE; } SET_TIME(t2); ELAPSED_T(t1, t2); DBG("Time to read %i lexicon records: %.1f ms.", total_tuples, elapsed); return 0; } static int fetch_rules_columns(SPITupleTable *tuptable, rules_columns_t *rules_cols) { int err = 0; FETCH_COL(rules_cols,rule,"rule"); if (err) { elog(NOTICE, "rules queries must return column 'rule'"); return -1; } CHECK_TYP(rules_cols,rule,TEXTOID); if (err) { elog(NOTICE, "rules column type must be: 'rule' text"); return -1; } return 0; }
Datum tsquery_rewrite_query(PG_FUNCTION_ARGS) { TSQuery query = PG_GETARG_TSQUERY_COPY(0); text *in = PG_GETARG_TEXT_P(1); TSQuery rewrited = query; MemoryContext outercontext = CurrentMemoryContext; MemoryContext oldcontext; QTNode *tree; char *buf; SPIPlanPtr plan; Portal portal; bool isnull; if (query->size == 0) { PG_FREE_IF_COPY(in, 1); PG_RETURN_POINTER(rewrited); } tree = QT2QTN(GETQUERY(query), GETOPERAND(query)); QTNTernary(tree); QTNSort(tree); buf = text_to_cstring(in); SPI_connect(); if ((plan = SPI_prepare(buf, 0, NULL)) == NULL) elog(ERROR, "SPI_prepare(\"%s\") failed", buf); if ((portal = SPI_cursor_open(NULL, plan, NULL, NULL, true)) == NULL) elog(ERROR, "SPI_cursor_open(\"%s\") failed", buf); SPI_cursor_fetch(portal, true, 100); if (SPI_tuptable == NULL || SPI_tuptable->tupdesc->natts != 2 || SPI_gettypeid(SPI_tuptable->tupdesc, 1) != TSQUERYOID || SPI_gettypeid(SPI_tuptable->tupdesc, 2) != TSQUERYOID) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("ts_rewrite query must return two tsquery columns"))); while (SPI_processed > 0 && tree) { uint64 i; for (i = 0; i < SPI_processed && tree; i++) { Datum qdata = SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1, &isnull); Datum sdata; if (isnull) continue; sdata = SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 2, &isnull); if (!isnull) { TSQuery qtex = DatumGetTSQuery(qdata); TSQuery qtsubs = DatumGetTSQuery(sdata); QTNode *qex, *qsubs = NULL; if (qtex->size == 0) { if (qtex != (TSQuery) DatumGetPointer(qdata)) pfree(qtex); if (qtsubs != (TSQuery) DatumGetPointer(sdata)) pfree(qtsubs); continue; } qex = QT2QTN(GETQUERY(qtex), GETOPERAND(qtex)); QTNTernary(qex); QTNSort(qex); if (qtsubs->size) qsubs = QT2QTN(GETQUERY(qtsubs), GETOPERAND(qtsubs)); oldcontext = MemoryContextSwitchTo(outercontext); tree = findsubquery(tree, qex, qsubs, NULL); MemoryContextSwitchTo(oldcontext); QTNFree(qex); if (qtex != (TSQuery) DatumGetPointer(qdata)) pfree(qtex); QTNFree(qsubs); if (qtsubs != (TSQuery) DatumGetPointer(sdata)) pfree(qtsubs); if (tree) { /* ready the tree for another pass */ QTNClearFlags(tree, QTN_NOCHANGE); QTNSort(tree); } } } SPI_freetuptable(SPI_tuptable); SPI_cursor_fetch(portal, true, 100); } SPI_freetuptable(SPI_tuptable); SPI_cursor_close(portal); SPI_freeplan(plan); SPI_finish(); if (tree) { QTNBinary(tree); rewrited = QTN2QT(tree); QTNFree(tree); PG_FREE_IF_COPY(query, 0); } else { SET_VARSIZE(rewrited, HDRSIZETQ); rewrited->size = 0; } pfree(buf); PG_FREE_IF_COPY(in, 1); PG_RETURN_POINTER(rewrited); }
static int load_rules(RULES *rules, char *tab) { int ret; SPIPlanPtr SPIplan; Portal SPIportal; bool moredata = TRUE; #ifdef DEBUG struct timeval t1, t2; double elapsed; #endif char *sql; int rule_arr[MAX_RULE_LENGTH]; int ntuples; int total_tuples = 0; rules_columns_t rules_columns = {rule: -1}; char *rule; DBG("start load_rules\n"); SET_TIME(t1); if (!tab || !strlen(tab)) { elog(NOTICE, "load_rules: rules table is not usable"); return -1; } if (!tableNameOk(tab)) { elog(NOTICE, "load_rules: rules table name may only be alphanum and '.\"_' characters (%s)", tab); return -1; } sql = SPI_palloc(strlen(tab)+35); strcpy(sql, "select rule from "); strcat(sql, tab); strcat(sql, " order by id "); /* get the sql for the lexicon records and prepare the query */ SPIplan = SPI_prepare(sql, 0, NULL); if (SPIplan == NULL) { elog(NOTICE, "load_rules: couldn't create query plan for the rule data via SPI (%s)", sql); return -1; } if ((SPIportal = SPI_cursor_open(NULL, SPIplan, NULL, NULL, true)) == NULL) { elog(NOTICE, "load_rules: SPI_cursor_open('%s') returns NULL", sql); return -1; } while (moredata == TRUE) { //DBG("calling SPI_cursor_fetch"); SPI_cursor_fetch(SPIportal, TRUE, TUPLIMIT); if (SPI_tuptable == NULL) { elog(NOTICE, "load_rules: SPI_tuptable is NULL"); return -1; } if (rules_columns.rule == -1) { ret = fetch_rules_columns(SPI_tuptable, &rules_columns); if (ret) return ret; } ntuples = SPI_processed; //DBG("Reading edges: %i - %i", total_tuples, total_tuples+ntuples); if (ntuples > 0) { int t; SPITupleTable *tuptable = SPI_tuptable; TupleDesc tupdesc = SPI_tuptable->tupdesc; for (t = 0; t < ntuples; t++) { int nr; //if (t%100 == 0) { DBG(" t: %i", t); } HeapTuple tuple = tuptable->vals[t]; GET_TEXT_FROM_TUPLE(rule,rules_columns.rule); nr = parse_rule(rule, rule_arr); if (nr == -1) { elog(NOTICE, "load_roles: rule exceeds 128 terms"); return -1; } ret = rules_add_rule(rules, nr, rule_arr); if (ret != 0) { elog(NOTICE,"load_roles: failed to add rule %d (%d): %s", total_tuples+t+1, ret, rule); return -1; } } //DBG("calling SPI_freetuptable"); SPI_freetuptable(tuptable); //DBG("back from SPI_freetuptable"); } else moredata = FALSE; total_tuples += ntuples; } ret = rules_ready(rules); if (ret != 0) { elog(NOTICE, "load_roles: failed to ready the rules: err: %d", ret); return -1; } SET_TIME(t2); ELAPSED_T(t1, t2); DBG("Time to read %i rule records: %.1f ms.", total_tuples, elapsed); return 0; }
/*! * bigint start_vid, * bigint end_vid, * float agg_cost, */ void pgr_get_matrixRows( char *sql, Matrix_cell_t **rows, size_t *total_rows) { clock_t start_t = clock(); const int tuple_limit = 1000000; size_t ntuples; size_t total_tuples = 0; Column_info_t info[3]; int i; for (i = 0; i < 3; ++i) { info[i].colNumber = -1; info[i].type = 0; info[i].strict = true; info[i].eType = ANY_INTEGER; } info[0].name = strdup("start_vid"); info[1].name = strdup("end_vid"); info[2].name = strdup("agg_cost"); info[2].eType = ANY_NUMERICAL; void *SPIplan; SPIplan = pgr_SPI_prepare(sql); Portal SPIportal; SPIportal = pgr_SPI_cursor_open(SPIplan); bool moredata = TRUE; (*total_rows) = total_tuples; while (moredata == TRUE) { SPI_cursor_fetch(SPIportal, TRUE, tuple_limit); if (total_tuples == 0) pgr_fetch_column_info(info, 3); ntuples = SPI_processed; total_tuples += ntuples; if (ntuples > 0) { if ((*rows) == NULL) (*rows) = (Matrix_cell_t *)palloc0( total_tuples * sizeof(Matrix_cell_t)); else (*rows) = (Matrix_cell_t *)repalloc( (*rows), total_tuples * sizeof(Matrix_cell_t)); if ((*rows) == NULL) { elog(ERROR, "Out of memory"); } SPITupleTable *tuptable = SPI_tuptable; TupleDesc tupdesc = SPI_tuptable->tupdesc; PGR_DBG("processing %ld edge tupĺes", ntuples); size_t t; for (t = 0; t < ntuples; t++) { HeapTuple tuple = tuptable->vals[t]; pgr_fetch_row(&tuple, &tupdesc, info, &(*rows)[total_tuples - ntuples + t]); } SPI_freetuptable(tuptable); } else { moredata = FALSE; } } SPI_cursor_close(SPIportal); if (total_tuples == 0) { (*total_rows) = 0; PGR_DBG("NO rows"); return; } (*total_rows) = total_tuples; time_msg(" reading Edges", start_t, clock()); }
/* * plr_SPI_exec - The builtin SPI_exec command for the R interpreter */ SEXP plr_SPI_exec(SEXP rsql) { int spi_rc = 0; char buf[64]; const char *sql; int count = 0; int ntuples; SEXP result = NULL; MemoryContext oldcontext; PREPARE_PG_TRY; /* set up error context */ PUSH_PLERRCONTEXT(rsupport_error_callback, "pg.spi.exec"); PROTECT(rsql = AS_CHARACTER(rsql)); sql = CHAR(STRING_ELT(rsql, 0)); UNPROTECT(1); if (sql == NULL) error("%s", "cannot exec empty query"); /* switch to SPI memory context */ oldcontext = MemoryContextSwitchTo(plr_SPI_context); /* * trap elog/ereport so we can let R finish up gracefully * and generate the error once we exit the interpreter */ PG_TRY(); { /* Execute the query and handle return codes */ spi_rc = SPI_exec(sql, count); } PLR_PG_CATCH(); PLR_PG_END_TRY(); /* back to caller's memory context */ MemoryContextSwitchTo(oldcontext); switch (spi_rc) { case SPI_OK_UTILITY: snprintf(buf, sizeof(buf), "%d", 0); SPI_freetuptable(SPI_tuptable); PROTECT(result = NEW_CHARACTER(1)); SET_STRING_ELT(result, 0, COPY_TO_USER_STRING(buf)); UNPROTECT(1); break; case SPI_OK_SELINTO: case SPI_OK_INSERT: case SPI_OK_DELETE: case SPI_OK_UPDATE: snprintf(buf, sizeof(buf), "%d", SPI_processed); SPI_freetuptable(SPI_tuptable); PROTECT(result = NEW_CHARACTER(1)); SET_STRING_ELT(result, 0, COPY_TO_USER_STRING(buf)); UNPROTECT(1); break; case SPI_OK_SELECT: ntuples = SPI_processed; if (ntuples > 0) { result = rpgsql_get_results(ntuples, SPI_tuptable); SPI_freetuptable(SPI_tuptable); } else result = R_NilValue; break; case SPI_ERROR_ARGUMENT: error("SPI_exec() failed: SPI_ERROR_ARGUMENT"); break; case SPI_ERROR_UNCONNECTED: error("SPI_exec() failed: SPI_ERROR_UNCONNECTED"); break; case SPI_ERROR_COPY: error("SPI_exec() failed: SPI_ERROR_COPY"); break; case SPI_ERROR_CURSOR: error("SPI_exec() failed: SPI_ERROR_CURSOR"); break; case SPI_ERROR_TRANSACTION: error("SPI_exec() failed: SPI_ERROR_TRANSACTION"); break; case SPI_ERROR_OPUNKNOWN: error("SPI_exec() failed: SPI_ERROR_OPUNKNOWN"); break; default: error("SPI_exec() failed: %d", spi_rc); break; } POP_PLERRCONTEXT; return result; }
static PyObject * PLy_spi_execute_fetch_result(SPITupleTable *tuptable, int rows, int status) { PLyResultObject *result; volatile MemoryContext oldcontext; result = (PLyResultObject *) PLy_result_new(); Py_DECREF(result->status); result->status = PyInt_FromLong(status); if (status > 0 && tuptable == NULL) { Py_DECREF(result->nrows); result->nrows = PyInt_FromLong(rows); } else if (status > 0 && tuptable != NULL) { PLyTypeInfo args; int i; Py_DECREF(result->nrows); result->nrows = PyInt_FromLong(rows); PLy_typeinfo_init(&args); oldcontext = CurrentMemoryContext; PG_TRY(); { if (rows) { Py_DECREF(result->rows); result->rows = PyList_New(rows); PLy_input_tuple_funcs(&args, tuptable->tupdesc); for (i = 0; i < rows; i++) { PyObject *row = PLyDict_FromTuple(&args, tuptable->vals[i], tuptable->tupdesc); PyList_SetItem(result->rows, i, row); } } } PG_CATCH(); { MemoryContextSwitchTo(oldcontext); if (!PyErr_Occurred()) PLy_exception_set(PLy_exc_error, "unrecognized error in PLy_spi_execute_fetch_result"); PLy_typeinfo_dealloc(&args); SPI_freetuptable(tuptable); Py_DECREF(result); return NULL; } PG_END_TRY(); PLy_typeinfo_dealloc(&args); SPI_freetuptable(tuptable); } return (PyObject *) result; }
/* * plr_SPI_execp - The builtin SPI_execp command for the R interpreter */ SEXP plr_SPI_execp(SEXP rsaved_plan, SEXP rargvalues) { saved_plan_desc *plan_desc = (saved_plan_desc *) R_ExternalPtrAddr(rsaved_plan); void *saved_plan = plan_desc->saved_plan; int nargs = plan_desc->nargs; Oid *typeids = plan_desc->typeids; FmgrInfo *typinfuncs = plan_desc->typinfuncs; int i; Datum *argvalues = NULL; char *nulls = NULL; bool isnull = false; SEXP obj; int spi_rc = 0; char buf[64]; int count = 0; int ntuples; SEXP result = NULL; MemoryContext oldcontext; PREPARE_PG_TRY; /* set up error context */ PUSH_PLERRCONTEXT(rsupport_error_callback, "pg.spi.execp"); if (nargs > 0) { if (!Rf_isVectorList(rargvalues)) error("%s", "second parameter must be a list of arguments " \ "to the prepared plan"); if (length(rargvalues) != nargs) error("list of arguments (%d) is not the same length " \ "as that of the prepared plan (%d)", length(rargvalues), nargs); argvalues = (Datum *) palloc(nargs * sizeof(Datum)); nulls = (char *) palloc(nargs * sizeof(char)); } for (i = 0; i < nargs; i++) { PROTECT(obj = VECTOR_ELT(rargvalues, i)); argvalues[i] = get_scalar_datum(obj, typeids[i], typinfuncs[i], &isnull); if (!isnull) nulls[i] = ' '; else nulls[i] = 'n'; UNPROTECT(1); } /* switch to SPI memory context */ oldcontext = MemoryContextSwitchTo(plr_SPI_context); /* * trap elog/ereport so we can let R finish up gracefully * and generate the error once we exit the interpreter */ PG_TRY(); { /* Execute the plan */ spi_rc = SPI_execp(saved_plan, argvalues, nulls, count); } PLR_PG_CATCH(); PLR_PG_END_TRY(); /* back to caller's memory context */ MemoryContextSwitchTo(oldcontext); /* check the result */ switch (spi_rc) { case SPI_OK_UTILITY: snprintf(buf, sizeof(buf), "%d", 0); SPI_freetuptable(SPI_tuptable); PROTECT(result = NEW_CHARACTER(1)); SET_STRING_ELT(result, 0, COPY_TO_USER_STRING(buf)); UNPROTECT(1); break; case SPI_OK_SELINTO: case SPI_OK_INSERT: case SPI_OK_DELETE: case SPI_OK_UPDATE: snprintf(buf, sizeof(buf), "%d", SPI_processed); SPI_freetuptable(SPI_tuptable); PROTECT(result = NEW_CHARACTER(1)); SET_STRING_ELT(result, 0, COPY_TO_USER_STRING(buf)); UNPROTECT(1); break; case SPI_OK_SELECT: ntuples = SPI_processed; if (ntuples > 0) { result = rpgsql_get_results(ntuples, SPI_tuptable); SPI_freetuptable(SPI_tuptable); } else result = R_NilValue; break; case SPI_ERROR_ARGUMENT: error("SPI_execp() failed: SPI_ERROR_ARGUMENT"); break; case SPI_ERROR_UNCONNECTED: error("SPI_execp() failed: SPI_ERROR_UNCONNECTED"); break; case SPI_ERROR_COPY: error("SPI_execp() failed: SPI_ERROR_COPY"); break; case SPI_ERROR_CURSOR: error("SPI_execp() failed: SPI_ERROR_CURSOR"); break; case SPI_ERROR_TRANSACTION: error("SPI_execp() failed: SPI_ERROR_TRANSACTION"); break; case SPI_ERROR_OPUNKNOWN: error("SPI_execp() failed: SPI_ERROR_OPUNKNOWN"); break; default: error("SPI_execp() failed: %d", spi_rc); break; } POP_PLERRCONTEXT; return result; }
static int compute_sql_asm_tsp(char* sql, int sourceVertexId, bool reverseCost, tspPathElementType **path, int *pathCount) { int SPIcode; void *SPIplan; Portal SPIportal; bool moredata = TRUE; int ntuples; tspEdgeType *edges = NULL; int totalTuples = 0; DBG("Sql %s source %d reverse %s",sql,sourceVertexId,reverseCost==true?"true":"false"); tspEdgeType edgeColumns = {.id= -1, .source= -1, .target= -1, .cost= -1 }; char *errMesg; int ret = -1; errMesg=palloc(sizeof(char) * 300); DBG("start compute_sql_asm_tsp %i",*pathCount); SPIcode = SPI_connect(); if (SPIcode != SPI_OK_CONNECT) { elog(ERROR, "compute_sql_asm_tsp: couldn't open a connection to SPI"); return -1; } SPIplan = SPI_prepare(sql, 0, NULL); if (SPIplan == NULL) { elog(ERROR, "compute_sql_asm_tsp: couldn't create query plan via SPI"); return -1; } if ((SPIportal = SPI_cursor_open(NULL, SPIplan, NULL, NULL, true)) == NULL) { elog(ERROR, "compute_sql_asm_tsp: SPI_cursor_open('%s') returns NULL", sql); return -1; } while (moredata == TRUE) { SPI_cursor_fetch(SPIportal, TRUE, TUPLIMIT); if (edgeColumns.id == -1) { if (!fetchEdgeTspColumns(SPI_tuptable, &edgeColumns,reverseCost)) return finish(SPIcode, ret); } ntuples = SPI_processed; totalTuples += ntuples; if (!edges){ edges = palloc(totalTuples * sizeof(tspEdgeType)); } else { edges = repalloc(edges, totalTuples * sizeof(tspEdgeType)); } if (edges == NULL) { elog(ERROR, "Out of memory"); return finish(SPIcode, ret); } if (ntuples > 0) { int t; SPITupleTable *tuptable = SPI_tuptable; TupleDesc tupdesc = SPI_tuptable->tupdesc; for (t = 0; t < ntuples; t++) { HeapTuple tuple = tuptable->vals[t]; fetchEdgeTsp(&tuple, &tupdesc, &edgeColumns, &edges[totalTuples - ntuples + t],reverseCost); } SPI_freetuptable(tuptable); } else { moredata = FALSE; } } DBG("Total %i tuples", totalTuples); DBG("Calling tsp functions total tuples <%i> initial path count <%i>", totalTuples,*pathCount); ret=processATSPData(edges,totalTuples,sourceVertexId,reverseCost, path, pathCount,errMesg); DBG("SIZE %i elements to process",*pathCount); if (!ret ) { elog(ERROR, "Error computing path: %s", errMesg); } return finish(SPIcode, ret); } PG_FUNCTION_INFO_V1(sql_asm_tsp); Datum sql_asm_tsp(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; int callCntr; int maxCalls; TupleDesc tupleDesc; tspPathElementType *path; /* stuff done only on the first call of the function */ if (SRF_IS_FIRSTCALL()) { MemoryContext oldcontext; int pathCount = 0; int ret; /* create a function context for cross-call persistence */ funcctx = SRF_FIRSTCALL_INIT(); /* switch to memory context appropriate for multiple function calls */ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); ret = compute_sql_asm_tsp(text2char(PG_GETARG_TEXT_P(0)), PG_GETARG_INT32(1), PG_GETARG_BOOL(2), &path, &pathCount); #ifdef DEBUG if (ret >= 0) { int i; for (i = 0; i < pathCount; i++) { DBG("Step # %i vertexId %i cost %.4f", i, path[i].vertexId,path[i].cost); } } #endif /* total number of tuples to be returned */ funcctx->max_calls = pathCount; funcctx->user_fctx = path; DBG("Path count %i", pathCount); funcctx->tuple_desc = BlessTupleDesc(RelationNameGetTupleDesc("pgr_costResult")); MemoryContextSwitchTo(oldcontext); } funcctx = SRF_PERCALL_SETUP(); callCntr = funcctx->call_cntr; maxCalls = funcctx->max_calls; tupleDesc = funcctx->tuple_desc; path = (tspPathElementType*) funcctx->user_fctx; if (callCntr < maxCalls) { /* do when there is more left to send */ HeapTuple tuple; Datum result; Datum *values; char* nulls; values = palloc(4 * sizeof(Datum)); nulls = palloc(4 * sizeof(char)); values[0] = Int32GetDatum(callCntr); nulls[0] = ' '; values[1] = Int32GetDatum(path[callCntr].vertexId); nulls[1] = ' '; values[2] = Float8GetDatum(0); // edge id not supplied by this method nulls[2] = ' '; values[3] = Float8GetDatum(path[callCntr].cost); nulls[3] = ' '; tuple = heap_formtuple(tupleDesc, values, nulls); /* make the tuple into a datum */ result = HeapTupleGetDatum(tuple); /* clean up (this is not really necessary) */ pfree(values); pfree(nulls); SRF_RETURN_NEXT(funcctx, result); } else { /* do when there is no more left */ SRF_RETURN_DONE(funcctx); } }
static int compute_shortest_path(char* sql, int start_vertex, int end_vertex, bool directed, bool has_reverse_cost, path_element_t **path, int *path_count) { int SPIcode; void *SPIplan; Portal SPIportal; bool moredata = TRUE; int ntuples; edge_t *edges = NULL; int total_tuples = 0; edge_columns_t edge_columns = {id: -1, source: -1, target: -1, cost: -1, reverse_cost: -1}; int v_max_id=0; int v_min_id=INT_MAX; int s_count = 0; int t_count = 0; char *err_msg; int ret = -1; register int z; DBG("start shortest_path\n"); SPIcode = SPI_connect(); if (SPIcode != SPI_OK_CONNECT) { elog(ERROR, "shortest_path: couldn't open a connection to SPI"); return -1; } SPIplan = SPI_prepare(sql, 0, NULL); if (SPIplan == NULL) { elog(ERROR, "shortest_path: couldn't create query plan via SPI"); return -1; } if ((SPIportal = SPI_cursor_open(NULL, SPIplan, NULL, NULL, true)) == NULL) { elog(ERROR, "shortest_path: SPI_cursor_open('%s') returns NULL", sql); return -1; } while (moredata == TRUE) { SPI_cursor_fetch(SPIportal, TRUE, TUPLIMIT); if (edge_columns.id == -1) { if (fetch_edge_columns(SPI_tuptable, &edge_columns, has_reverse_cost) == -1) return finish(SPIcode, ret); } ntuples = SPI_processed; total_tuples += ntuples; if (!edges) edges = palloc(total_tuples * sizeof(edge_t)); else edges = repalloc(edges, total_tuples * sizeof(edge_t)); if (edges == NULL) { elog(ERROR, "Out of memory"); return finish(SPIcode, ret); } if (ntuples > 0) { int t; SPITupleTable *tuptable = SPI_tuptable; TupleDesc tupdesc = SPI_tuptable->tupdesc; for (t = 0; t < ntuples; t++) { HeapTuple tuple = tuptable->vals[t]; fetch_edge(&tuple, &tupdesc, &edge_columns, &edges[total_tuples - ntuples + t]); } SPI_freetuptable(tuptable); } else { moredata = FALSE; } } //defining min and max vertex id DBG("Total %i tuples", total_tuples); for(z=0; z<total_tuples; z++) { if(edges[z].source<v_min_id) v_min_id=edges[z].source; if(edges[z].source>v_max_id) v_max_id=edges[z].source; if(edges[z].target<v_min_id) v_min_id=edges[z].target; if(edges[z].target>v_max_id) v_max_id=edges[z].target; DBG("%i <-> %i", v_min_id, v_max_id); } //:::::::::::::::::::::::::::::::::::: //:: reducing vertex id (renumbering) //:::::::::::::::::::::::::::::::::::: for(z=0; z<total_tuples; z++) { //check if edges[] contains source and target if(edges[z].source == start_vertex || edges[z].target == start_vertex) ++s_count; if(edges[z].source == end_vertex || edges[z].target == end_vertex) ++t_count; edges[z].source-=v_min_id; edges[z].target-=v_min_id; DBG("%i - %i", edges[z].source, edges[z].target); } DBG("Total %i tuples", total_tuples); if(s_count == 0) { elog(ERROR, "Source vertex: %d was not found as vertex of any of the input edges.", start_vertex); return -1; } if(t_count == 0) { elog(ERROR, "Target vertex: %d was not found as vertex of any of the input edges.", end_vertex); return -1; } DBG("Calling boost_dijkstra\n"); start_vertex -= v_min_id; end_vertex -= v_min_id; ret = boost_dijkstra(edges, total_tuples, start_vertex, end_vertex, directed, has_reverse_cost, path, path_count, &err_msg); DBG("SIZE %i\n",*path_count); //:::::::::::::::::::::::::::::::: //:: restoring original vertex id //:::::::::::::::::::::::::::::::: for(z=0;z<*path_count;z++) { //DBG("vetex %i\n",(*path)[z].vertex_id); (*path)[z].vertex_id+=v_min_id; } DBG("ret = %i\n", ret); DBG("*path_count = %i\n", *path_count); DBG("ret = %i\n", ret); if (ret < 0) { //elog(ERROR, "Error computing path: %s", err_msg); ereport(ERROR, (errcode(ERRCODE_E_R_E_CONTAINING_SQL_NOT_PERMITTED), errmsg("Error computing path: %s", err_msg))); } if (edges) { /* clean up input egdes */ pfree (edges); } return finish(SPIcode, ret); } PG_FUNCTION_INFO_V1(shortest_path); Datum shortest_path(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; int call_cntr; int max_calls; TupleDesc tuple_desc; path_element_t *path = NULL; char *sql = NULL; /* stuff done only on the first call of the function */ if (SRF_IS_FIRSTCALL()) { MemoryContext oldcontext; int path_count = 0; int ret; /* create a function context for cross-call persistence */ funcctx = SRF_FIRSTCALL_INIT(); /* switch to memory context appropriate for multiple function calls */ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); /* edge sql query */ sql = text2char(PG_GETARG_TEXT_P(0)); ret = compute_shortest_path(sql, PG_GETARG_INT32(1), PG_GETARG_INT32(2), PG_GETARG_BOOL(3), PG_GETARG_BOOL(4), &path, &path_count); /* clean up sql query string */ if (sql) { pfree (sql); } #ifdef DEBUG DBG("Ret is %i", ret); if (ret >= 0) { int i; for (i = 0; i < path_count; i++) { DBG("Step %i vertex_id %i ", i, path[i].vertex_id); DBG(" edge_id %i ", path[i].edge_id); DBG(" cost %f ", path[i].cost); } } #endif /* total number of tuples to be returned */ funcctx->max_calls = path_count; funcctx->user_fctx = path; funcctx->tuple_desc = BlessTupleDesc(RelationNameGetTupleDesc("path_result")); MemoryContextSwitchTo(oldcontext); } /* stuff done on every call of the function */ funcctx = SRF_PERCALL_SETUP(); call_cntr = funcctx->call_cntr; max_calls = funcctx->max_calls; tuple_desc = funcctx->tuple_desc; path = (path_element_t*) funcctx->user_fctx; if (call_cntr < max_calls) /* do when there is more left to send */ { HeapTuple tuple; Datum result; Datum *values; char* nulls; /* This will work for some compilers. If it crashes with segfault, try to change the following block with this one values = palloc(4 * sizeof(Datum)); nulls = palloc(4 * sizeof(char)); values[0] = call_cntr; nulls[0] = ' '; values[1] = Int32GetDatum(path[call_cntr].vertex_id); nulls[1] = ' '; values[2] = Int32GetDatum(path[call_cntr].edge_id); nulls[2] = ' '; values[3] = Float8GetDatum(path[call_cntr].cost); nulls[3] = ' '; */ values = palloc(3 * sizeof(Datum)); nulls = palloc(3 * sizeof(char)); values[0] = Int32GetDatum(path[call_cntr].vertex_id); nulls[0] = ' '; values[1] = Int32GetDatum(path[call_cntr].edge_id); nulls[1] = ' '; values[2] = Float8GetDatum(path[call_cntr].cost); nulls[2] = ' '; tuple = heap_formtuple(tuple_desc, values, nulls); /* make the tuple into a datum */ result = HeapTupleGetDatum(tuple); /* clean up (this is not really necessary) */ pfree(values); pfree(nulls); SRF_RETURN_NEXT(funcctx, result); } else /* do when there is no more left */ { if (path) { /* clean up returned edge paths must be a free because it's malloc'd */ free (path); path = NULL; } SRF_RETURN_DONE(funcctx); } }
/* * Clean up SPI state at subtransaction commit or abort. * * During commit, there shouldn't be any unclosed entries remaining from * the current subtransaction; we emit a warning if any are found. */ void AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid) { bool found = false; while (_SPI_connected >= 0) { _SPI_connection *connection = &(_SPI_stack[_SPI_connected]); if (connection->connectSubid != mySubid) break; /* couldn't be any underneath it either */ found = true; /* * Release procedure memory explicitly (see note in SPI_connect) */ if (connection->execCxt) { MemoryContextDelete(connection->execCxt); connection->execCxt = NULL; } if (connection->procCxt) { MemoryContextDelete(connection->procCxt); connection->procCxt = NULL; } /* * Pop the stack entry and reset global variables. Unlike * SPI_finish(), we don't risk switching to memory contexts that might * be already gone. */ _SPI_connected--; _SPI_curid = _SPI_connected; if (_SPI_connected == -1) _SPI_current = NULL; else _SPI_current = &(_SPI_stack[_SPI_connected]); SPI_processed = 0; SPI_lastoid = InvalidOid; SPI_tuptable = NULL; } if (found && isCommit) ereport(WARNING, (errcode(ERRCODE_WARNING), errmsg("subtransaction left non-empty SPI stack"), errhint("Check for missing \"SPI_finish\" calls."))); /* * If we are aborting a subtransaction and there is an open SPI context * surrounding the subxact, clean up to prevent memory leakage. */ if (_SPI_current && !isCommit) { /* free Executor memory the same as _SPI_end_call would do */ MemoryContextResetAndDeleteChildren(_SPI_current->execCxt); /* throw away any partially created tuple-table */ SPI_freetuptable(_SPI_current->tuptable); _SPI_current->tuptable = NULL; } }
void pgr_get_customers_data( char *customers_sql, Customer_t **customers, size_t *total_customers) { const int tuple_limit = 1000000; PGR_DBG("pgr_get_customers_data"); PGR_DBG("%s", customers_sql); Column_info_t info[9]; int i; for (i = 0; i < 9; ++i) { info[i].colNumber = -1; info[i].type = 0; info[i].strict = true; info[i].eType = ANY_NUMERICAL; } /*! int64_t id; double x; double y; double demand; double Etime; double Ltime; double Stime; int64_t Pindex; int64_t Dindex; double Ddist; */ info[0].name = strdup("id"); info[1].name = strdup("x"); info[2].name = strdup("y"); info[3].name = strdup("demand"); info[4].name = strdup("opentime"); info[5].name = strdup("closetime"); info[6].name = strdup("servicetime"); info[7].name = strdup("pindex"); info[8].name = strdup("dindex"); info[0].eType = ANY_INTEGER; info[7].eType = ANY_INTEGER; info[8].eType = ANY_INTEGER; size_t ntuples; size_t total_tuples; void *SPIplan; SPIplan = pgr_SPI_prepare(customers_sql); Portal SPIportal; SPIportal = pgr_SPI_cursor_open(SPIplan); bool moredata = TRUE; (*total_customers) = total_tuples = 0; /* on the first tuple get the column numbers */ while (moredata == TRUE) { SPI_cursor_fetch(SPIportal, TRUE, tuple_limit); if (total_tuples == 0) { pgr_fetch_column_info(info, 9); } ntuples = SPI_processed; total_tuples += ntuples; PGR_DBG("SPI_processed %ld", ntuples); if (ntuples > 0) { if ((*customers) == NULL) (*customers) = (Customer_t *)palloc0( total_tuples * sizeof(Customer_t)); else (*customers) = (Customer_t *)repalloc( (*customers), total_tuples * sizeof(Customer_t)); if ((*customers) == NULL) { elog(ERROR, "Out of memory"); } size_t t; SPITupleTable *tuptable = SPI_tuptable; TupleDesc tupdesc = SPI_tuptable->tupdesc; PGR_DBG("processing %ld", ntuples); for (t = 0; t < ntuples; t++) { HeapTuple tuple = tuptable->vals[t]; fetch_customer(&tuple, &tupdesc, info, &(*customers)[total_tuples - ntuples + t]); } SPI_freetuptable(tuptable); } else { moredata = FALSE; } } SPI_cursor_close(SPIportal); if (total_tuples == 0) { (*total_customers) = 0; PGR_DBG("NO customers"); return; } (*total_customers) = total_tuples; PGR_DBG("Finish reading %ld data, %ld", total_tuples, (*total_customers)); }
SV * plperl_spi_fetchrow(char *cursor) { SV *row; /* * Execute the FETCH inside a sub-transaction, so we can cope with errors * sanely */ MemoryContext oldcontext = CurrentMemoryContext; ResourceOwner oldowner = CurrentResourceOwner; BeginInternalSubTransaction(NULL); /* Want to run inside function's memory context */ MemoryContextSwitchTo(oldcontext); PG_TRY(); { Portal p = SPI_cursor_find(cursor); if (!p) row = newSV(0); else { SPI_cursor_fetch(p, true, 1); if (SPI_processed == 0) { SPI_cursor_close(p); row = newSV(0); } else { row = plperl_hash_from_tuple(SPI_tuptable->vals[0], SPI_tuptable->tupdesc); } SPI_freetuptable(SPI_tuptable); } /* Commit the inner transaction, return to outer xact context */ ReleaseCurrentSubTransaction(); MemoryContextSwitchTo(oldcontext); CurrentResourceOwner = oldowner; /* * AtEOSubXact_SPI() should not have popped any SPI context, but just * in case it did, make sure we remain connected. */ SPI_restore_connection(); } PG_CATCH(); { ErrorData *edata; /* Save error info */ MemoryContextSwitchTo(oldcontext); edata = CopyErrorData(); FlushErrorState(); /* Abort the inner transaction */ RollbackAndReleaseCurrentSubTransaction(); MemoryContextSwitchTo(oldcontext); CurrentResourceOwner = oldowner; /* * If AtEOSubXact_SPI() popped any SPI context of the subxact, it will * have left us in a disconnected state. We need this hack to return * to connected state. */ SPI_restore_connection(); /* Punt the error to Perl */ croak("%s", edata->message); /* Can't get here, but keep compiler quiet */ return NULL; } PG_END_TRY(); return row; }
static int compute_alpha_shape(char* sql, vertex_t **res, int *res_count) { int SPIcode; void *SPIplan; Portal SPIportal; bool moredata = TRUE; int ntuples; vertex_t *vertices = NULL; int total_tuples = 0; vertex_columns_t vertex_columns = {.id= -1, .x= -1, .y= -1}; char *err_msg; int ret = -1; DBG("start alpha_shape\n"); SPIcode = SPI_connect(); if (SPIcode != SPI_OK_CONNECT) { elog(ERROR, "alpha_shape: couldn't open a connection to SPI"); return -1; } SPIplan = SPI_prepare(sql, 0, NULL); if (SPIplan == NULL) { elog(ERROR, "alpha_shape: couldn't create query plan via SPI"); return -1; } if ((SPIportal = SPI_cursor_open(NULL, SPIplan, NULL, NULL, true)) == NULL) { elog(ERROR, "alpha_shape: SPI_cursor_open('%s') returns NULL", sql); return -1; } while (moredata == TRUE) { SPI_cursor_fetch(SPIportal, TRUE, TUPLIMIT); if (vertex_columns.id == -1) { if (fetch_vertices_columns(SPI_tuptable, &vertex_columns) == -1) return finish(SPIcode, ret); } ntuples = SPI_processed; total_tuples += ntuples; if (!vertices) vertices = palloc(total_tuples * sizeof(vertex_t)); else vertices = repalloc(vertices, total_tuples * sizeof(vertex_t)); if (vertices == NULL) { elog(ERROR, "Out of memory"); return finish(SPIcode, ret); } if (ntuples > 0) { int t; SPITupleTable *tuptable = SPI_tuptable; TupleDesc tupdesc = SPI_tuptable->tupdesc; for (t = 0; t < ntuples; t++) { HeapTuple tuple = tuptable->vals[t]; fetch_vertex(&tuple, &tupdesc, &vertex_columns, &vertices[total_tuples - ntuples + t]); } SPI_freetuptable(tuptable); } else { moredata = FALSE; } } // if (total_tuples < 2) //this was the buggy code of the pgrouting project. // TODO: report this as a bug to the pgrouting project // the CGAL alpha-shape function crashes if called with less than three points!!! if (total_tuples == 0) { elog(ERROR, "Distance is too short. no vertex for alpha shape calculation. alpha shape calculation needs at least 3 vertices."); } if (total_tuples == 1) { elog(ERROR, "Distance is too short. only 1 vertex for alpha shape calculation. alpha shape calculation needs at least 3 vertices."); } if (total_tuples == 2) { elog(ERROR, "Distance is too short. only 2 vertices for alpha shape calculation. alpha shape calculation needs at least 3 vertices."); } if (total_tuples < 3) { // elog(ERROR, "Distance is too short ...."); return finish(SPIcode, ret); } DBG("Calling CGAL alpha-shape\n"); profstop("extract", prof_extract); profstart(prof_alpha); ret = alpha_shape(vertices, total_tuples, res, res_count, &err_msg); profstop("alpha", prof_alpha); profstart(prof_store); if (ret < 0) { //elog(ERROR, "Error computing shape: %s", err_msg); ereport(ERROR, (errcode(ERRCODE_E_R_E_CONTAINING_SQL_NOT_PERMITTED), errmsg("Error computing shape: %s", err_msg))); } return finish(SPIcode, ret); } PG_FUNCTION_INFO_V1(alphashape); Datum alphashape(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; int call_cntr; int max_calls; TupleDesc tuple_desc; vertex_t *res = 0; /* stuff done only on the first call of the function */ if (SRF_IS_FIRSTCALL()) { MemoryContext oldcontext; int res_count; int ret; // XXX profiling messages are not thread safe profstart(prof_total); profstart(prof_extract); /* create a function context for cross-call persistence */ funcctx = SRF_FIRSTCALL_INIT(); /* switch to memory context appropriate for multiple function calls */ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); ret = compute_alpha_shape(text2char(PG_GETARG_TEXT_P(0)), &res, &res_count); /* total number of tuples to be returned */ DBG("Conting tuples number\n"); funcctx->max_calls = res_count; funcctx->user_fctx = res; DBG("Total count %i", res_count); if (get_call_result_type(fcinfo, NULL, &tuple_desc) != TYPEFUNC_COMPOSITE) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("function returning record called in context " "that cannot accept type record"))); funcctx->tuple_desc = BlessTupleDesc(tuple_desc); MemoryContextSwitchTo(oldcontext); } /* stuff done on every call of the function */ DBG("Strange stuff doing\n"); funcctx = SRF_PERCALL_SETUP(); call_cntr = funcctx->call_cntr; max_calls = funcctx->max_calls; tuple_desc = funcctx->tuple_desc; res = (vertex_t*) funcctx->user_fctx; DBG("Trying to allocate some memory\n"); if (call_cntr < max_calls) /* do when there is more left to send */ { HeapTuple tuple; Datum result; Datum *values; char* nulls; /* This will work for some compilers. If it crashes with segfault, try to change the following block with this one values = palloc(3 * sizeof(Datum)); nulls = palloc(3 * sizeof(char)); values[0] = call_cntr; nulls[0] = ' '; values[1] = Float8GetDatum(res[call_cntr].x); nulls[1] = ' '; values[2] = Float8GetDatum(res[call_cntr].y); nulls[2] = ' '; */ values = palloc(2 * sizeof(Datum)); nulls = palloc(2 * sizeof(char)); values[0] = Float8GetDatum(res[call_cntr].x); nulls[0] = ' '; values[1] = Float8GetDatum(res[call_cntr].y); nulls[1] = ' '; DBG("Heap making\n"); tuple = heap_formtuple(tuple_desc, values, nulls); DBG("Datum making\n"); /* make the tuple into a datum */ result = HeapTupleGetDatum(tuple); DBG("Trying to free some memory\n"); /* clean up (this is not really necessary) */ pfree(values); pfree(nulls); SRF_RETURN_NEXT(funcctx, result); } else /* do when there is no more left */ { if (res) free(res); profstop("store", prof_store); profstop("total", prof_total); #ifdef PROFILE elog(NOTICE, "_________"); #endif SRF_RETURN_DONE(funcctx); } }