char *lookup_primary_key(char *schemaName, char *tableName, bool failOnMissing) { StringInfo sql = makeStringInfo(); char *keyname; SPI_connect(); appendStringInfo(sql, "SELECT column_name FROM information_schema.key_column_usage WHERE table_schema = '%s' AND table_name = '%s'", schemaName, tableName); SPI_execute(sql->data, true, 1); if (SPI_processed == 0) { if (failOnMissing) elog(ERROR, "Cannot find primary key column for: %s.%s", schemaName, tableName); else { SPI_finish(); return NULL; } } keyname = SPI_getvalue(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1); if (keyname == NULL) elog(ERROR, "Primary Key field is null for: %s.%s", schemaName, tableName); keyname = MemoryContextStrdup(TopTransactionContext, keyname); SPI_finish(); return keyname; }
/* * Ensure that the environment is sane. * This involves checking the Postgresql version, and if in network mode * also establishing a connection to a receiver. */ int ensure_valid_environment(void) { StringInfoData buf; int retval; char* pgversion; SPITupleTable *coltuptable; SetCurrentStatementStartTimestamp(); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); /* Ensure compatible version */ pgstat_report_activity(STATE_RUNNING, "verifying compatible postgres version"); initStringInfo(&buf); appendStringInfo(&buf, "select version();" ); retval = SPI_execute(buf.data, false, 0); if (retval != SPI_OK_SELECT) { elog(FATAL, "Unable to query postgres version %d", retval); SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); return 1; } coltuptable = SPI_tuptable; pgversion = SPI_getvalue(coltuptable->vals[0], coltuptable->tupdesc, 1); if(strstr(pgversion, "PostgreSQL 9.3") == NULL) { elog(FATAL, "Unsupported Postgresql version"); SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); return 1; } SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); /* * Attempt to establish a connection if the output mode is network. */ if (strcmp(output_mode, "network") == 0) { retval = establish_connection(); if (retval == 2) { elog(LOG, "Error : Failed to connect to antenna please check domain is available from host."); } } //TODO verify logging directory is accessible when csv mode. elog(LOG, "Pgsampler Initialized"); return 0; }
char *lookup_field_mapping(MemoryContext cxt, Oid tableRelId, char *fieldname) { char *definition = NULL; StringInfo query; SPI_connect(); query = makeStringInfo(); appendStringInfo(query, "select definition from zdb_mappings where table_name = %d::regclass and field_name = %s;", tableRelId, TextDatumGetCString(DirectFunctionCall1(quote_literal, CStringGetTextDatum(fieldname)))); if (SPI_execute(query->data, true, 2) != SPI_OK_SELECT) elog(ERROR, "Problem looking up analysis thing with query: %s", query->data); if (SPI_processed > 1) { elog(ERROR, "Too many mappings found"); } else if (SPI_processed == 1) { char *json = SPI_getvalue(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1); Size len = strlen(json); definition = (char *) MemoryContextAllocZero(cxt, (Size) len + 1); memcpy(definition, json, len); } SPI_finish(); return definition; }
char *lookup_analysis_thing(MemoryContext cxt, char *thing) { char *definition = ""; StringInfo query; SPI_connect(); query = makeStringInfo(); appendStringInfo(query, "select (to_json(name) || ':' || definition) from %s;", TextDatumGetCString(DirectFunctionCall1(quote_ident, CStringGetTextDatum(thing)))); if (SPI_execute(query->data, true, 0) != SPI_OK_SELECT) elog(ERROR, "Problem looking up analysis thing with query: %s", query->data); if (SPI_processed > 0) { StringInfo json = makeStringInfo(); int i; for (i = 0; i < SPI_processed; i++) { if (i > 0) appendStringInfoCharMacro(json, ','); appendStringInfo(json, "%s", SPI_getvalue(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1)); } definition = (char *) MemoryContextAllocZero(cxt, (Size) json->len + 1); memcpy(definition, json->data, json->len); } SPI_finish(); return definition; }
bool type_is_domain(char *type_name, Oid *base_type) { bool rc; StringInfo query; SPI_connect(); query = makeStringInfo(); appendStringInfo(query, "SELECT typtype = 'd', typbasetype FROM pg_type WHERE typname = %s", TextDatumGetCString(DirectFunctionCall1(quote_literal, CStringGetTextDatum(type_name)))); if (SPI_execute(query->data, true, 1) != SPI_OK_SELECT) elog(ERROR, "Problem determing if %s is a domain with query: %s", type_name, query->data); if (SPI_processed == 0) { rc = false; } else { bool isnull; Datum d; d = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull); rc = isnull || DatumGetBool(d); d = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 2, &isnull); *base_type = isnull ? InvalidOid : DatumGetObjectId(d); } SPI_finish(); return rc; }
long long * all_referenced_files(int * countOut) { char query[128]; snprintf(query, 128, "SELECT file_id FROM "WDB_SCHEMA".file_blob"); SPI_connect(); int result = SPI_execute(query, true, 0); if ( SPI_OK_SELECT != result ) ereport(ERROR, (errcode( ERRCODE_RAISE_EXCEPTION ), errmsg("Error when reading from file_blob"))); * countOut = SPI_processed; long long * ret = (long long *) SPI_palloc(sizeof(long long) * (* countOut)); int i; for ( i = 0; i < * countOut; ++ i ) { bool isNull; // unused Datum d = SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1, & isNull); ret[i] = DatumGetInt64(d); } SPI_finish(); return ret; }
Datum ts_stat2(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; Datum result; if (SRF_IS_FIRSTCALL()) { TSVectorStat *stat; text *txt = PG_GETARG_TEXT_P(0); text *ws = PG_GETARG_TEXT_P(1); funcctx = SRF_FIRSTCALL_INIT(); SPI_connect(); stat = ts_stat_sql(funcctx->multi_call_memory_ctx, txt, ws); PG_FREE_IF_COPY(txt, 0); PG_FREE_IF_COPY(ws, 1); ts_setup_firstcall(fcinfo, funcctx, stat); SPI_finish(); } funcctx = SRF_PERCALL_SETUP(); if ((result = ts_process_call(funcctx)) != (Datum) 0) SRF_RETURN_NEXT(funcctx, result); SRF_RETURN_DONE(funcctx); }
/* table_log_show_column() show a single column on a date in the past parameter: not yet defined return: not yet defined */ Datum table_log_show_column(PG_FUNCTION_ARGS) { TriggerData *trigdata = (TriggerData *) fcinfo->context; int ret; /* * Some checks first... */ #ifdef TABLE_LOG_DEBUG elog(NOTICE, "start table_log_show_column()"); #endif /* TABLE_LOG_DEBUG */ /* Connect to SPI manager */ ret = SPI_connect(); if (ret != SPI_OK_CONNECT) { elog(ERROR, "table_log_show_column: SPI_connect returned %d", ret); } #ifdef TABLE_LOG_DEBUG elog(NOTICE, "this function isnt available yet"); #endif /* TABLE_LOG_DEBUG */ /* close SPI connection */ SPI_finish(); return PG_RETURN_NULL; }
/* * Returns a count of the number of non-template databases from the catalog. */ int get_database_count(void) { int retval, processed; StringInfoData buf; SPITupleTable *coltuptable; int database_count = 0; SetCurrentStatementStartTimestamp(); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); initStringInfo(&buf); appendStringInfo(&buf, "SELECT count(*) FROM pg_database WHERE datname NOT IN ('template0', 'template1') AND datallowconn IS TRUE;"); retval = SPI_execute(buf.data, false, 0); if (retval != SPI_OK_SELECT) { elog(FATAL, "Database information collection failed"); // FAIL RETURN 1 } processed = SPI_processed; if (processed > 0) { coltuptable = SPI_tuptable; database_count = atoi(SPI_getvalue(coltuptable->vals[0], coltuptable->tupdesc, 1)); } SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); return database_count; }
/* * Create tables as storage of shreded data * @param none * @return true/false */ Datum create_xmlindex_tables(PG_FUNCTION_ARGS) { StringInfoData query; initStringInfo(&query); appendStringInfo(&query, "CREATE TABLE xml_documents_table " "(did serial not null, " "name text, " "value xml," "xdb_sequence int default 0); " "CREATE TABLE attribute_table " "(name text, " "did int not null, " "pre_order int not null, " "size int not null, " "depth int, " "parent_id int, " "prev_id int, " "value text," "PRIMARY KEY (did,pre_order)); " "CREATE TABLE element_table " "(name text, " "did int not null, " "pre_order int not null, " "size int not null, " "depth int, " "parent_id int, " "prev_id int, " "child_id int, " "attr_id int, " "PRIMARY KEY (did,pre_order,size));" "CREATE TABLE text_table " "(did int not null, " "pre_order int not null, " "depth int not null, " "parent_id int, " "prev_id int, " "value text, " "PRIMARY KEY (pre_order, did));" ); SPI_connect(); if (SPI_execute(query.data, false, 0) == SPI_ERROR_ARGUMENT) { ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), errmsg("invalid query"))); } // TODO add foreign key to xml_documents_table SPI_finish(); create_indexes_on_tables(); PG_RETURN_BOOL(true); }
/** * @fn Datum repack_index_swap(PG_FUNCTION_ARGS) * @brief Swap out an original index on a table with the newly-created one. * * repack_index_swap(index) * * @param index Oid of the *original* index. * @retval void */ Datum repack_index_swap(PG_FUNCTION_ARGS) { Oid orig_idx_oid = PG_GETARG_OID(0); Oid repacked_idx_oid; StringInfoData str; SPITupleTable *tuptable; TupleDesc desc; HeapTuple tuple; /* authority check */ must_be_superuser("repack_index_swap"); /* connect to SPI manager */ repack_init(); initStringInfo(&str); /* Find the OID of our new index. */ appendStringInfo(&str, "SELECT oid FROM pg_class " "WHERE relname = 'index_%u' AND relkind = 'i'", orig_idx_oid); execute(SPI_OK_SELECT, str.data); if (SPI_processed != 1) elog(ERROR, "Could not find index 'index_%u', found " UINT64_FORMAT " matches", orig_idx_oid, (uint64) SPI_processed); tuptable = SPI_tuptable; desc = tuptable->tupdesc; tuple = tuptable->vals[0]; repacked_idx_oid = getoid(tuple, desc, 1); swap_heap_or_index_files(orig_idx_oid, repacked_idx_oid); SPI_finish(); PG_RETURN_VOID(); }
/* * Create indexes on shreded data * @return true if succed */ bool create_indexes_on_tables(void) { bool result = false; SPI_connect(); if (SPI_execute("CREATE INDEX attr_tab_all_index ON attribute_table (name, did, pre_order); " "CREATE INDEX attr_tab_range_index ON element_table USING gist (range_i(pre_order, (pre_order+size)));" "CREATE INDEX did_tab_name_index ON xml_documents_table (name); " "CREATE INDEX elem_tab_all_index ON element_table (name, did, pre_order, size); " "CREATE INDEX elem_tab_range_index ON element_table USING gist (range(pre_order, (pre_order+size)));" "CREATE INDEX text_tab_index ON text_table (parent_id,did);" , false, 0) == SPI_ERROR_PARAM) { ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), errmsg("Can not get ID of lastly inserted XML document"))); } result = true; SPI_finish(); return result; }
static void getExtensionLoadPath() { MemoryContext curr; Datum dtm; bool isnull; /* * Check whether sqlj.loadpath exists before querying it. I would more * happily just PG_CATCH() the error and compare to ERRCODE_UNDEFINED_TABLE * but what's required to make that work right is "not terribly well * documented, but the exception-block handling in plpgsql provides a * working model" and that code is a lot more fiddly than you would guess. */ if ( InvalidOid == get_relname_relid("loadpath", GetSysCacheOid1(NAMESPACENAME, CStringGetDatum("sqlj"))) ) return; SPI_connect(); curr = CurrentMemoryContext; if ( SPI_OK_SELECT == SPI_execute( "SELECT path, exnihilo FROM sqlj.loadpath", true, 1) && 1 == SPI_processed ) { MemoryContextSwitchTo(TopMemoryContext); pljavaLoadPath = (char const *)SPI_getvalue( SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1); MemoryContextSwitchTo(curr); dtm = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 2, &isnull); if ( isnull ) elog(ERROR, "defect in CREATE EXTENSION script"); extensionExNihilo = DatumGetBool(dtm); } SPI_finish(); }
/* * Main worker routine. Accepts dsm_handle as an argument */ static void bg_worker_main(Datum main_arg) { PartitionArgs *args; dsm_handle handle = DatumGetInt32(main_arg); /* Create resource owner */ CurrentResourceOwner = ResourceOwnerCreate(NULL, "CreatePartitionsWorker"); /* Attach to dynamic shared memory */ if (!handle) { ereport(WARNING, (errmsg("pg_pathman worker: invalid dsm_handle"))); } segment = dsm_attach(handle); args = dsm_segment_address(segment); /* Establish connection and start transaction */ BackgroundWorkerInitializeConnectionByOid(args->dbid, InvalidOid); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); /* Create partitions */ args->result = create_partitions(args->relid, PATHMAN_GET_DATUM(args->value, args->by_val), args->value_type, &args->crashed); /* Cleanup */ SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); dsm_detach(segment); }
/* * PgQ log trigger, takes 2 arguments: * 1. queue name to be inserted to. * * Queue events will be in format: * ev_type - operation type, I/U/D * ev_data - urlencoded column values * ev_extra1 - table name * ev_extra2 - optional urlencoded backup */ Datum pgq_logutriga(PG_FUNCTION_ARGS) { TriggerData *tg; struct PgqTriggerEvent ev; HeapTuple row; /* * Get the trigger call context */ if (!CALLED_AS_TRIGGER(fcinfo)) elog(ERROR, "pgq.logutriga not called as trigger"); tg = (TriggerData *)(fcinfo->context); if (TRIGGER_FIRED_BY_UPDATE(tg->tg_event)) row = tg->tg_newtuple; else row = tg->tg_trigtuple; if (pgq_is_logging_disabled()) goto skip_it; /* * Connect to the SPI manager */ if (SPI_connect() < 0) elog(ERROR, "logutriga: SPI_connect() failed"); pgq_prepare_event(&ev, tg, true); appendStringInfoChar(ev.field[EV_TYPE], ev.op_type); appendStringInfoChar(ev.field[EV_TYPE], ':'); appendStringInfoString(ev.field[EV_TYPE], ev.pkey_list); appendStringInfoString(ev.field[EV_EXTRA1], ev.info->table_name); if (is_interesting_change(&ev, tg)) { /* * create type, data */ pgq_urlenc_row(&ev, row, ev.field[EV_DATA]); /* * Construct the parameter array and insert the log row. */ pgq_insert_tg_event(&ev); } if (SPI_finish() < 0) elog(ERROR, "SPI_finish failed"); /* * After trigger ignores result, * before trigger skips event if NULL. */ skip_it: if (TRIGGER_FIRED_AFTER(tg->tg_event) || ev.tgargs->skip) return PointerGetDatum(NULL); else return PointerGetDatum(row); }
void Invocation_assertDisconnect(void) { if(currentInvocation->hasConnected) { SPI_finish(); currentInvocation->hasConnected = false; } }
/* * Initialize workspace for a worker process: create the schema if it doesn't * already exist. */ static void initialize_worker_spi(worktable *table) { int ret; int ntup; bool isnull; StringInfoData buf; SetCurrentStatementStartTimestamp(); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); pgstat_report_activity(STATE_RUNNING, "initializing spi_worker schema"); /* XXX could we use CREATE SCHEMA IF NOT EXISTS? */ initStringInfo(&buf); appendStringInfo(&buf, "select count(*) from pg_namespace where nspname = '%s'", table->schema); ret = SPI_execute(buf.data, true, 0); if (ret != SPI_OK_SELECT) elog(FATAL, "SPI_execute failed: error code %d", ret); if (SPI_processed != 1) elog(FATAL, "not a singleton result"); ntup = DatumGetInt64(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull)); if (isnull) elog(FATAL, "null result"); if (ntup == 0) { resetStringInfo(&buf); appendStringInfo(&buf, "CREATE SCHEMA \"%s\" " "CREATE TABLE \"%s\" (" " type text CHECK (type IN ('total', 'delta')), " " value integer)" "CREATE UNIQUE INDEX \"%s_unique_total\" ON \"%s\" (type) " "WHERE type = 'total'", table->schema, table->name, table->name, table->name); /* set statement start time */ SetCurrentStatementStartTimestamp(); ret = SPI_execute(buf.data, false, 0); if (ret != SPI_OK_UTILITY) elog(FATAL, "failed to create my schema"); } SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); pgstat_report_activity(STATE_IDLE, NULL); }
/** * @fn Datum reorg_trigger(PG_FUNCTION_ARGS) * @brief Insert a operation log into log-table. * * reorg_trigger(sql) * * @param sql SQL to insert a operation log into log-table. */ Datum reorg_trigger(PG_FUNCTION_ARGS) { TriggerData *trigdata = (TriggerData *) fcinfo->context; TupleDesc desc; HeapTuple tuple; Datum values[2]; bool nulls[2] = { 0, 0 }; Oid argtypes[2]; const char *sql; /* authority check */ must_be_superuser("reorg_trigger"); /* make sure it's called as a trigger at all */ if (!CALLED_AS_TRIGGER(fcinfo) || !TRIGGER_FIRED_BEFORE(trigdata->tg_event) || !TRIGGER_FIRED_FOR_ROW(trigdata->tg_event) || trigdata->tg_trigger->tgnargs != 1) elog(ERROR, "reorg_trigger: invalid trigger call"); /* retrieve parameters */ sql = trigdata->tg_trigger->tgargs[0]; desc = RelationGetDescr(trigdata->tg_relation); argtypes[0] = argtypes[1] = trigdata->tg_relation->rd_rel->reltype; /* connect to SPI manager */ reorg_init(); if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event)) { /* INSERT: (NULL, newtup) */ tuple = trigdata->tg_trigtuple; nulls[0] = true; values[1] = copy_tuple(tuple, desc); } else if (TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) { /* DELETE: (oldtup, NULL) */ tuple = trigdata->tg_trigtuple; values[0] = copy_tuple(tuple, desc); nulls[1] = true; } else { /* UPDATE: (oldtup, newtup) */ tuple = trigdata->tg_newtuple; values[0] = copy_tuple(trigdata->tg_trigtuple, desc); values[1] = copy_tuple(tuple, desc); } /* INSERT INTO reorg.log VALUES ($1, $2) */ execute_with_args(SPI_OK_INSERT, sql, 2, argtypes, values, nulls); SPI_finish(); PG_RETURN_POINTER(tuple); }
/** * @fn Datum reorg_drop(PG_FUNCTION_ARGS) * @brief Delete temporarily objects. * * reorg_drop(oid, relname) * * @param oid Oid of target table. * @retval None. */ Datum reorg_drop(PG_FUNCTION_ARGS) { Oid oid = PG_GETARG_OID(0); const char *relname = get_quoted_relname(oid); const char *nspname = get_quoted_nspname(oid); /* authority check */ must_be_superuser("reorg_drop"); /* connect to SPI manager */ reorg_init(); /* * drop reorg trigger: We have already dropped the trigger in normal * cases, but it can be left on error. */ execute_with_format( SPI_OK_UTILITY, "DROP TRIGGER IF EXISTS z_reorg_trigger ON %s.%s CASCADE", nspname, relname); #if PG_VERSION_NUM < 80400 /* delete autovacuum settings */ execute_with_format( SPI_OK_DELETE, "DELETE FROM pg_catalog.pg_autovacuum v" " USING pg_class c, pg_namespace n" " WHERE relname IN ('log_%u', 'table_%u')" " AND n.nspname = 'reorg'" " AND c.relnamespace = n.oid" " AND v.vacrelid = c.oid", oid, oid); #endif /* drop log table */ execute_with_format( SPI_OK_UTILITY, "DROP TABLE IF EXISTS reorg.log_%u CASCADE", oid); /* drop temp table */ execute_with_format( SPI_OK_UTILITY, "DROP TABLE IF EXISTS reorg.table_%u CASCADE", oid); /* drop type for log table */ execute_with_format( SPI_OK_UTILITY, "DROP TYPE IF EXISTS reorg.pk_%u CASCADE", oid); SPI_finish(); PG_RETURN_VOID(); }
// http://www.postgresql.org/docs/9.4/static/spi-spi-finish.html void pgr_SPI_finish(void) { PGR_DBG("Disconnecting SPI"); int code = SPI_OK_FINISH; code = SPI_finish(); if (code != SPI_OK_FINISH) { // SPI_ERROR_UNCONNECTED elog(ERROR, "There was no connection to SPI"); } }
static int finish(int code, int ret) { code = SPI_finish(); if (code != SPI_OK_FINISH ) { elog(ERROR,"couldn't disconnect from SPI"); return -1 ; } return ret; }
static void update_gp_master_mirroring(char *str) { volatile bool connected = false; volatile bool resetModsDML = false; PG_TRY(); { StringInfoData sql; initStringInfo(&sql); appendStringInfo(&sql, "update gp_master_mirroring set " "summary_state = '%s', detail_state = null," "log_time = current_timestamp, error_message = null", str); if (SPI_OK_CONNECT != SPI_connect()) elog(ERROR, "cannot connect via SPI"); connected = true; if (!allowSystemTableModsDML) { allowSystemTableModsDML = true; resetModsDML = true; } if (SPI_execute(sql.data, false, 0) < 0) elog(ERROR, "cannot update gp_master_mirroring"); if (resetModsDML) allowSystemTableModsDML = false; } PG_CATCH(); { if (connected) SPI_finish(); if (resetModsDML) allowSystemTableModsDML = false; PG_RE_THROW(); } PG_END_TRY(); SPI_finish(); }
static void _SPI_disc(bool pop) { int ret; if( (ret = SPI_finish()) != SPI_OK_FINISH ) elog( ERROR, "SPI_finish returned %s", SPI_result_code_string(ret)); if(pop) SPI_pop(); }
Datum dbms_alert_signal(PG_FUNCTION_ARGS) { void *plan; Oid argtypes[] = {TEXTOID, TEXTOID}; Datum values[2]; char nulls[2] = {' ',' '}; if (PG_ARGISNULL(0)) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("event name is NULL"), errdetail("Eventname may not be NULL."))); if (PG_ARGISNULL(1)) nulls[1] = 'n'; values[0] = PG_GETARG_DATUM(0); values[1] = PG_GETARG_DATUM(1); if (SPI_connect() < 0) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("SPI_connect failed"))); SPI_EXEC("SELECT 1 FROM pg_catalog.pg_class c " "WHERE pg_catalog.pg_table_is_visible(c.oid) " "AND c.relkind='r' AND c.relname = 'ora_alerts'", SELECT); if (0 == SPI_processed) { SPI_EXEC("CREATE TEMP TABLE ora_alerts(event text, message text)", UTILITY); SPI_EXEC("REVOKE ALL ON TABLE ora_alerts FROM PUBLIC", UTILITY); SPI_EXEC("CREATE CONSTRAINT TRIGGER ora_alert_signal AFTER INSERT ON ora_alerts " "INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE dbms_alert.defered_signal()", UTILITY); } if (!(plan = SPI_prepare( "INSERT INTO ora_alerts(event,message) VALUES($1, $2)", 2, argtypes))) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("SPI_prepare failed"))); if (SPI_OK_INSERT != SPI_execute_plan(plan, values, nulls, false, 1)) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("can't execute sql"))); SPI_finish(); PG_RETURN_VOID(); }
/* * connectby - does the real work for connectby_text() */ static Tuplestorestate * connectby(char *relname, char *key_fld, char *parent_key_fld, char *orderby_fld, char *branch_delim, char *start_with, int max_depth, bool show_branch, bool show_serial, MemoryContext per_query_ctx, bool randomAccess, AttInMetadata *attinmeta) { Tuplestorestate *tupstore = NULL; int ret; MemoryContext oldcontext; int serial = 1; /* Connect to SPI manager */ if ((ret = SPI_connect()) < 0) /* internal error */ elog(ERROR, "connectby: SPI_connect returned %d", ret); /* switch to longer term context to create the tuple store */ oldcontext = MemoryContextSwitchTo(per_query_ctx); /* initialize our tuplestore */ tupstore = tuplestore_begin_heap(randomAccess, false, work_mem); MemoryContextSwitchTo(oldcontext); /* now go get the whole tree */ tupstore = build_tuplestore_recursively(key_fld, parent_key_fld, relname, orderby_fld, branch_delim, start_with, start_with, /* current_branch */ 0, /* initial level is 0 */ &serial, /* initial serial is 1 */ max_depth, show_branch, show_serial, per_query_ctx, attinmeta, tupstore); SPI_finish(); return tupstore; }
static int finish(int code, int ret) { PGR_DBG("In finish, trying to disconnect from spi %d",ret); code = SPI_finish(); if (code != SPI_OK_FINISH ) { elog(ERROR,"couldn't disconnect from SPI"); return -1 ; } return ret; }
Datum cdb_get_oid(PG_FUNCTION_ARGS) { int result; if ( SPI_OK_CONNECT != SPI_connect() ) { ereport(ERROR, (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), errmsg("SPI_connect failed in cdb_eget_oid" ))); } if ( SPI_OK_UTILITY != SPI_execute( "CREATE TEMPORARY TABLE pgdump_oid (dummy integer) WITH OIDS", false, 0 ) ) { ereport(ERROR, (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), errmsg("SPI_execute failed in cdb_get_oid" ))); } if ( SPI_OK_INSERT != SPI_execute( "insert into pgdump_oid values(0)", false, 0 ) ) { ereport(ERROR, (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), errmsg("SPI_execute failed to insert a row into pgdump_oid in cdb_get_oid" ))); } if ( SPI_OK_SELECT != SPI_execute( "select oid from pgdump_oid", false, 0 ) ) { ereport(ERROR, (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), errmsg("SPI_execute failed in cdb_get_oid" ))); } if ( SPI_processed == 0 ) ereport(ERROR, (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), errmsg("No rows in pgdump_oid in cdb_get_oid" ))); TupleDesc tupdesc = SPI_tuptable->tupdesc; result = atoi( SPI_getvalue( SPI_tuptable->vals[0], tupdesc, 1)); if ( SPI_OK_UTILITY != SPI_execute( "DROP TABLE pgdump_oid", false, 0 ) ) { ereport(ERROR, (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), errmsg("SPI_execute failed in cdb_get_oid" ))); } SPI_finish(); PG_RETURN_INT32(result); }
void Invocation_popInvocation(bool wasException) { CallLocal* cl; Invocation* ctx = currentInvocation->previous; if(currentInvocation->invocation != 0) { if(!wasException) JNI_callVoidMethod(currentInvocation->invocation, s_Invocation_onExit); JNI_deleteGlobalRef(currentInvocation->invocation); } /* * Check for any DualState objects that became unreachable and can be freed. */ pljava_DualState_cleanEnqueuedInstances(); if(currentInvocation->hasConnected) SPI_finish(); JNI_popLocalFrame(0); if(ctx != 0) { PG_TRY(); { Backend_setJavaSecurity(ctx->trusted); } PG_CATCH(); { elog(FATAL, "Failed to reinstate untrusted security after a trusted call or vice versa"); } PG_END_TRY(); MemoryContextSwitchTo(ctx->upperContext); } /** * Reset all local wrappers that has been allocated during this call. Yank them * from the double linked list but do *not* remove them. */ cl = currentInvocation->callLocals; if(cl != 0) { CallLocal* first = cl; do { cl->pointer = 0; cl->invocation = 0; cl = cl->next; } while(cl != first); } currentInvocation = ctx; --s_callLevel; }
static void execute_pg_settings_logger(config_log_objects *objects) { int ret; bool isnull; StringInfoData buf; SetCurrentStatementStartTimestamp(); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); pgstat_report_activity(STATE_RUNNING, "executing configuration logger function"); initStringInfo(&buf); appendStringInfo( &buf, "SELECT %s.%s()", config_log_schema, objects->function_name ); ret = SPI_execute(buf.data, false, 0); if (ret != SPI_OK_SELECT) { elog(FATAL, "SPI_execute failed: error code %d", ret); } if (SPI_processed != 1) { elog(FATAL, "not a singleton result"); } log_info("pg_settings_logger() executed"); if(DatumGetBool(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull))) { log_info("Configuration changes recorded"); } else { log_info("No configuration changes detected"); } SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); pgstat_report_activity(STATE_IDLE, NULL); }
Datum plpgsql_inline_handler(PG_FUNCTION_ARGS) { InlineCodeBlock *codeblock = (InlineCodeBlock *) DatumGetPointer(PG_GETARG_DATUM(0)); PLpgSQL_function *func; FunctionCallInfoData fake_fcinfo; FmgrInfo flinfo; Datum retval; int rc; Assert(IsA(codeblock, InlineCodeBlock)); /* * Connect to SPI manager */ if ((rc = SPI_connect()) != SPI_OK_CONNECT) elog(ERROR, "SPI_connect failed: %s", SPI_result_code_string(rc)); /* Compile the anonymous code block */ func = plpgsql_compile_inline(fcinfo, codeblock->source_text); /* Mark the function as busy, just pro forma */ func->use_count++; /* * Set up a fake fcinfo with just enough info to satisfy * plpgsql_exec_function(). In particular note that this sets things up * with no arguments passed. */ MemSet(&fake_fcinfo, 0, sizeof(fake_fcinfo)); MemSet(&flinfo, 0, sizeof(flinfo)); fake_fcinfo.flinfo = &flinfo; flinfo.fn_oid = InvalidOid; flinfo.fn_mcxt = CurrentMemoryContext; retval = plpgsql_exec_function(func, &fake_fcinfo); /* Function should now have no remaining use-counts ... */ func->use_count--; Assert(func->use_count == 0); /* ... so we can free subsidiary storage */ plpgsql_free_function_memory(func); /* * Disconnect from SPI manager */ if ((rc = SPI_finish()) != SPI_OK_FINISH) elog(ERROR, "SPI_finish failed: %s", SPI_result_code_string(rc)); return retval; }