void __table_log_restore_table_delete(SPITupleTable *spi_tuptable, char *table_restore, char *table_orig_pkey, char *col_query_start, int col_pkey, int number_columns, int i) { int ret; char *tmp; /* memory for dynamic query */ int d_query_size; char *d_query; char *d_query_start; /* get the size of value */ tmp = SPI_getvalue(spi_tuptable->vals[i], spi_tuptable->tupdesc, col_pkey); if (tmp == NULL) { elog(ERROR, "pkey cannot be NULL"); } /* reserve memory */ d_query_size = 250 + strlen(do_quote_ident(table_restore)) + strlen(do_quote_ident(table_orig_pkey)) + strlen(do_quote_literal(tmp)); d_query_start = (char *) palloc((d_query_size + 1) * sizeof(char)); d_query = d_query_start; /* build query */ sprintf(d_query, "DELETE FROM %s WHERE %s=%s", do_quote_ident(table_restore), do_quote_ident(table_orig_pkey), do_quote_literal(tmp)); d_query = d_query_start + strlen(d_query_start); #ifdef TABLE_LOG_DEBUG_QUERY elog(NOTICE, "query: %s", d_query_start); #endif /* TABLE_LOG_DEBUG_QUERY */ ret = SPI_exec(d_query_start, 0); if (ret != SPI_OK_DELETE) { elog(ERROR, "could not delete data from: %s", table_restore); } /* done */ }
int2vector * getPrimaryKey(Oid tblOid) { char *queryBase; char *query; bool isNull; int2vector *resultKey; int2vector *tpResultKey; HeapTuple resTuple; Datum resDatum; int ret; queryBase = "SELECT indkey FROM pg_index WHERE indisprimary='t' AND indrelid="; query = SPI_palloc(strlen(queryBase) + MAX_OID_LEN + 1); sprintf(query, "%s%d", queryBase, tblOid); ret = SPI_exec(query, 1); SPI_pfree(query); if (ret != SPI_OK_SELECT || SPI_processed != 1) return NULL; resTuple = SPI_tuptable->vals[0]; resDatum = SPI_getbinval(resTuple, SPI_tuptable->tupdesc, 1, &isNull); tpResultKey = (int2vector *) DatumGetPointer(resDatum); resultKey = SPI_palloc(VARSIZE(tpResultKey)); memcpy(resultKey, tpResultKey, VARSIZE(tpResultKey)); return resultKey; }
char * plj_get_configvalue_string(const char *paramName) { char *sql; int proc, ret; /* * no SPI_connect, we are already connected. */ sql = SPI_palloc(strlen(paramName) + strlen(get_sql)); sprintf(sql, get_sql, paramName); ret = SPI_exec(sql, 1); proc = SPI_processed; if (ret == SPI_OK_SELECT && proc > 0) { TupleDesc tupdesc = SPI_tuptable->tupdesc; SPITupleTable *tuptable = SPI_tuptable; return SPI_getvalue(tuptable->vals[0], tupdesc, 1); } elog(WARNING, "[config db] config value not set: %s", paramName); return ""; }
/* * Class: org_postgresql_pljava_internal_SPI * Method: _exec * Signature: (JLjava/lang/String;I)I */ JNIEXPORT jint JNICALL Java_org_postgresql_pljava_internal_SPI__1exec(JNIEnv* env, jclass cls, jlong threadId, jstring cmd, jint count) { jint result = 0; BEGIN_NATIVE char* command = String_createNTS(cmd); if(command != 0) { STACK_BASE_VARS STACK_BASE_PUSH(threadId) PG_TRY(); { Invocation_assertConnect(); result = (jint)SPI_exec(command, (int)count); if(result < 0) Exception_throwSPI("exec", result); } PG_CATCH(); { Exception_throw_ERROR("SPI_exec"); } PG_END_TRY(); pfree(command); STACK_BASE_POP() }
void __table_log_restore_table_update(SPITupleTable *spi_tuptable, char *table_restore, char *table_orig_pkey, char *col_query_start, int col_pkey, int number_columns, int i, char *old_pkey_string) { int size_of_values, j, ret; char *tmp, *tmp2; /* memory for dynamic query */ int d_query_size; char *d_query; char *d_query_start; /* get the size of names and values */ size_of_values = 0; /* go through all columns in this result */ for (j = 1; j <= number_columns; j++) { /* get value */ tmp = SPI_getvalue(spi_tuptable->vals[i], spi_tuptable->tupdesc, j); /* and get name of column */ tmp2 = SPI_fname(spi_tuptable->tupdesc, j); if (tmp == NULL) { size_of_values += 6 + strlen(do_quote_ident(tmp2)) + 2; } else { size_of_values += strlen(do_quote_literal(tmp)) + strlen(do_quote_ident(tmp2)) + 3; } } /* reserve memory */ d_query_size = 250 + size_of_values + NAMEDATALEN + strlen(do_quote_literal(old_pkey_string)); d_query_start = (char *) palloc((d_query_size + 1) * sizeof(char)); d_query = d_query_start; /* build query */ sprintf(d_query, "UPDATE %s SET ", do_quote_ident(table_restore)); d_query = d_query_start + strlen(d_query_start); for (j = 1; j <= number_columns; j++) { if (j > 1) { strncat(d_query_start, (const char *)", ", d_query_size); d_query += 2; } tmp = SPI_getvalue(spi_tuptable->vals[i], spi_tuptable->tupdesc, j); tmp2 = SPI_fname(spi_tuptable->tupdesc, j); if (tmp == NULL) { snprintf(d_query, d_query_size, "%s=NULL", do_quote_ident(tmp2)); } else { snprintf(d_query, d_query_size, "%s=%s", do_quote_ident(tmp2), do_quote_literal(tmp)); } d_query = d_query_start + strlen(d_query_start); } snprintf(d_query, d_query_size, " WHERE %s=%s", do_quote_ident(table_orig_pkey), do_quote_literal(old_pkey_string)); d_query = d_query_start + strlen(d_query_start); #ifdef TABLE_LOG_DEBUG_QUERY elog(NOTICE, "query: %s", d_query_start); #endif /* TABLE_LOG_DEBUG_QUERY */ ret = SPI_exec(d_query_start, 0); if (ret != SPI_OK_UPDATE) { elog(ERROR, "could not update data in: %s", table_restore); } /* done */ }
/* * spi_exec_utility * Execute given utility command via SPI. */ static void spi_exec_utility(const char *query) { int ret; ret = SPI_exec(query, 0); if (ret != SPI_OK_UTILITY) elog(ERROR, "pg_dbms_stats: SPI_exec => %d", ret); }
void __table_log_restore_table_insert(SPITupleTable *spi_tuptable, char *table_restore, char *table_orig_pkey, char *col_query_start, int col_pkey, int number_columns, int i) { int size_of_values, j, ret; char *tmp; /* memory for dynamic query */ int d_query_size; char *d_query; char *d_query_start; /* get the size of values */ size_of_values = 0; /* go through all columns in this result */ for (j = 1; j <= number_columns; j++) { tmp = SPI_getvalue(spi_tuptable->vals[i], spi_tuptable->tupdesc, j); if (tmp == NULL) { size_of_values += 6; } else { size_of_values += strlen(do_quote_literal(tmp)) + 3; } } /* reserve memory */ d_query_size = 250 + strlen(col_query_start) + size_of_values; d_query_start = (char *) palloc((d_query_size + 1) * sizeof(char)); d_query = d_query_start; /* build query */ sprintf(d_query, "INSERT INTO %s (%s) VALUES (", do_quote_ident(table_restore), col_query_start); d_query = d_query_start + strlen(d_query_start); for (j = 1; j <= number_columns; j++) { if (j > 1) { strncat(d_query_start, (const char *)", ", d_query_size); } tmp = SPI_getvalue(spi_tuptable->vals[i], spi_tuptable->tupdesc, j); if (tmp == NULL) { strncat(d_query_start, (const char *)"NULL", d_query_size); } else { strncat(d_query_start, do_quote_literal(tmp), d_query_size); } } strncat(d_query_start, (const char *)")", d_query_size); #ifdef TABLE_LOG_DEBUG_QUERY elog(NOTICE, "query: %s", d_query_start); #endif /* TABLE_LOG_DEBUG_QUERY */ ret = SPI_exec(d_query_start, 0); if (ret != SPI_OK_INSERT) { elog(ERROR, "could not insert data into: %s", table_restore); } /* done */ }
/** * The given query must return exactly one row with exactly one value * * Returns a value allocated with malloc, so the returned char* must bee free'd */ static char * performSimpleQuery_(const char * query) { char * ret = NULL; SPI_connect(); int result = SPI_exec(query, 1); if ( result >= 0 ) ret = strdup( SPI_getvalue( * SPI_tuptable->vals, SPI_tuptable->tupdesc, 1 ) ); SPI_finish(); if ( result < 0 ) return NULL; return ret; }
int32 _rserv_sync_(int32 server) #endif { #ifdef PG_FUNCTION_INFO_V1 int32 server = PG_GETARG_INT32(0); #endif char sql[8192]; char buf[8192]; char *active = buf; uint32 xcnt; int ret; if (SerializableSnapshot == NULL) /* internal error */ elog(ERROR, "_rserv_sync_: SerializableSnapshot is NULL"); buf[0] = 0; for (xcnt = 0; xcnt < SerializableSnapshot->xcnt; xcnt++) { snprintf(buf + strlen(buf), 8192 - strlen(buf), "%s%u", (xcnt) ? ", " : "", SerializableSnapshot->xip[xcnt]); } if ((ret = SPI_connect()) < 0) /* internal error */ elog(ERROR, "_rserv_sync_: SPI_connect returned %d", ret); snprintf(sql, 8192, "insert into _RSERV_SYNC_ " "(server, syncid, synctime, status, minid, maxid, active) " "values (%u, currval('_rserv_sync_seq_'), now(), 0, %d, %d, '%s')", server, SerializableSnapshot->xmin, SerializableSnapshot->xmax, active); ret = SPI_exec(sql, 0); if (ret < 0) ereport(ERROR, (errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION), errmsg("SPI_exec returned %d", ret))); SPI_finish(); return (0); }
static void get_tsq_Oid(void) { int ret; bool isnull; if ((ret = SPI_exec("select oid from pg_type where typname='tsquery'", 1)) < 0) /* internal error */ elog(ERROR, "SPI_exec to get tsquery oid returns %d", ret); if (SPI_processed < 1) /* internal error */ elog(ERROR, "there is no tsvector type"); tsqOid = DatumGetObjectId(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull)); if (tsqOid == InvalidOid) /* internal error */ elog(ERROR, "tsquery type has InvalidOid"); }
char* GetProj4StringSPI(int srid) { static int maxproj4len = 512; int spi_result; char *proj_str = palloc(maxproj4len); char proj4_spi_buffer[256]; /* Connect */ spi_result = SPI_connect(); if (spi_result != SPI_OK_CONNECT) { elog(ERROR, "GetProj4StringSPI: Could not connect to database using SPI"); } /* Execute the lookup query */ snprintf(proj4_spi_buffer, 255, "SELECT proj4text FROM spatial_ref_sys WHERE srid = %d LIMIT 1", srid); spi_result = SPI_exec(proj4_spi_buffer, 1); /* Read back the PROJ4 text */ if (spi_result == SPI_OK_SELECT && SPI_processed > 0) { /* Select the first (and only tuple) */ TupleDesc tupdesc = SPI_tuptable->tupdesc; SPITupleTable *tuptable = SPI_tuptable; HeapTuple tuple = tuptable->vals[0]; /* Make a projection object out of it */ strncpy(proj_str, SPI_getvalue(tuple, tupdesc, 1), maxproj4len - 1); } else { elog(ERROR, "GetProj4StringSPI: Cannot find SRID (%d) in spatial_ref_sys", srid); } spi_result = SPI_finish(); if (spi_result != SPI_OK_FINISH) { elog(ERROR, "GetProj4StringSPI: Could not disconnect from database using SPI"); } return proj_str; }
/* execute sql with format */ void execute_with_format(int expected, const char *format, ...) { va_list ap; StringInfoData sql; int ret; initStringInfo(&sql); va_start(ap, format); appendStringInfoVA_s(&sql, format, ap); va_end(ap); if (sql.len == 0) elog(WARNING, "execute_with_format(%s)", format); ret = SPI_exec(sql.data, 0); if EXEC_FAILED(ret, expected) elog(ERROR, "query failed: (sql=%s, code=%d, expected=%d)", sql.data, ret, expected); termStringInfo(&sql); }
Datum get_group_name(PG_FUNCTION_ARGS) { int group_id = PG_GETARG_INT32(0); int ret = SPI_connect(); if (ret < 0) elog(ERROR, "get_group_name: SPI_connect returned %d", ret); char buf[1024]; sprintf(buf, "SELECT get_group_name_by_id(%d)", group_id); elog (INFO, "get_group_name: %s", buf); ret = SPI_exec(buf, 10); if (ret < 0) elog(ERROR, "get_group_name: SPI_exec returned %d", ret); else elog(INFO, "get_group_name: SPI_exec succeeded"); char *group_name = SPI_getvalue(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1); SPI_finish(); elog (INFO, "get_group_name: %s", group_name); text *result = 0; if (0 == group_name) { elog(ERROR, "get_group_name: SPI_getvalue returned null"); result = (text *)palloc(VARHDRSZ); SET_VARSIZE(result, VARHDRSZ); } else { int len = strlen(group_name); result = (text *)palloc(VARHDRSZ + len); SET_VARSIZE(result, VARHDRSZ + len); memcpy(VARDATA(result), group_name, len); } PG_RETURN_TEXT_P(result); }
/* * worker_merge_files_and_run_query creates a merge task table within the job's * schema, which should have already been created by the task tracker protocol. * It copies files in its task directory into this table. Then it runs final * query to create result table of the job. * * Note that here we followed a different approach to create a task table for merge * files than worker_merge_files_into_table(). In future we should unify these * two approaches. For this purpose creating a directory_fdw extension and using * it would make sense. Then we can merge files with a query or without query * through directory_fdw. */ Datum worker_merge_files_and_run_query(PG_FUNCTION_ARGS) { uint64 jobId = PG_GETARG_INT64(0); uint32 taskId = PG_GETARG_UINT32(1); text *createMergeTableQueryText = PG_GETARG_TEXT_P(2); text *createIntermediateTableQueryText = PG_GETARG_TEXT_P(3); const char *createMergeTableQuery = text_to_cstring(createMergeTableQueryText); const char *createIntermediateTableQuery = text_to_cstring(createIntermediateTableQueryText); StringInfo taskDirectoryName = TaskDirectoryName(jobId, taskId); StringInfo jobSchemaName = JobSchemaName(jobId); StringInfo intermediateTableName = TaskTableName(taskId); StringInfo mergeTableName = makeStringInfo(); StringInfo setSearchPathString = makeStringInfo(); bool schemaExists = false; int connected = 0; int setSearchPathResult = 0; int createMergeTableResult = 0; int createIntermediateTableResult = 0; int finished = 0; /* * If the schema for the job isn't already created by the task tracker * protocol, we fall to using the default 'public' schema. */ schemaExists = JobSchemaExists(jobSchemaName); if (!schemaExists) { resetStringInfo(jobSchemaName); appendStringInfoString(jobSchemaName, "public"); } appendStringInfo(setSearchPathString, SET_SEARCH_PATH_COMMAND, jobSchemaName->data); /* Add "public" to search path to access UDFs in public schema */ appendStringInfo(setSearchPathString, ",public"); connected = SPI_connect(); if (connected != SPI_OK_CONNECT) { ereport(ERROR, (errmsg("could not connect to SPI manager"))); } setSearchPathResult = SPI_exec(setSearchPathString->data, 0); if (setSearchPathResult < 0) { ereport(ERROR, (errmsg("execution was not successful \"%s\"", setSearchPathString->data))); } createMergeTableResult = SPI_exec(createMergeTableQuery, 0); if (createMergeTableResult < 0) { ereport(ERROR, (errmsg("execution was not successful \"%s\"", createMergeTableQuery))); } appendStringInfo(mergeTableName, "%s%s", intermediateTableName->data, MERGE_TABLE_SUFFIX); CopyTaskFilesFromDirectory(jobSchemaName, mergeTableName, taskDirectoryName); createIntermediateTableResult = SPI_exec(createIntermediateTableQuery, 0); if (createIntermediateTableResult < 0) { ereport(ERROR, (errmsg("execution was not successful \"%s\"", createIntermediateTableQuery))); } finished = SPI_finish(); if (finished != SPI_OK_FINISH) { ereport(ERROR, (errmsg("could not disconnect from SPI manager"))); } PG_RETURN_VOID(); }
/* Clause 3.3.9.3 */ Datum TradeStatusFrame1(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; AttInMetadata *attinmeta; int call_cntr; int max_calls; int i; char **values = NULL; /* Stuff done only on the first call of the function. */ if (SRF_IS_FIRSTCALL()) { MemoryContext oldcontext; enum tsf1 { i_broker_name=0, i_charge, i_cust_f_name, i_cust_l_name, i_ex_name, i_exec_name, i_num_found, i_s_name, i_status_name, i_symbol, i_trade_dts, i_trade_id, i_trade_qty, i_type_name }; long acct_id = PG_GETARG_INT64(0); int ret; TupleDesc tupdesc; SPITupleTable *tuptable = NULL; HeapTuple tuple = NULL; char sql[2048]; /* * Prepare a values array for building the returned tuple. * This should be an array of C strings, which will * be processed later by the type input functions. */ values = (char **) palloc(sizeof(char *) * 14); values[i_charge] = (char *) palloc((VALUE_T_LEN + 1) * sizeof(char) * 50); values[i_ex_name] = (char *) palloc((EX_NAME_LEN + 3) * sizeof(char) * 50); values[i_exec_name] = (char *) palloc((T_EXEC_NAME_LEN + 3) * sizeof(char) * 50); values[i_num_found] = (char *) palloc((BIGINT_LEN + 1) * sizeof(char)); values[i_s_name] = (char *) palloc((S_NAME_LEN + 3) * sizeof(char) * 50); values[i_status_name] = (char *) palloc((ST_NAME_LEN + 3) * sizeof(char) * 50); values[i_symbol] = (char *) palloc((S_SYMB_LEN + 3) * sizeof(char) * 50); values[i_trade_dts] = (char *) palloc((MAXDATELEN + 1) * sizeof(char) * 50); values[i_trade_id] = (char *) palloc((BIGINT_LEN + 1) * sizeof(char) * 50); values[i_trade_qty] = (char *) palloc((INTEGER_LEN + 1) * sizeof(char) * 50); values[i_type_name] = (char *) palloc((TT_NAME_LEN + 3) * sizeof(char) * 50); values[i_cust_l_name] = NULL; values[i_cust_f_name] = NULL; values[i_broker_name] = NULL; #ifdef DEBUG dump_tsf1_inputs(acct_id); #endif /* create a function context for cross-call persistence */ funcctx = SRF_FIRSTCALL_INIT(); funcctx->max_calls = 1; /* switch to memory context appropriate for multiple function calls */ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); SPI_connect(); sprintf(sql, TSF1_1, acct_id); #ifdef DEBUG elog(NOTICE, "SQL\n%s", sql); #endif /* DEBUG */ ret = SPI_exec(sql, 0); if (ret == SPI_OK_SELECT) { tupdesc = SPI_tuptable->tupdesc; tuptable = SPI_tuptable; } else { FAIL_FRAME_SET(&funcctx->max_calls, sql); dump_tsf1_inputs(acct_id); } sprintf(values[i_num_found], "%d", SPI_processed); strcpy(values[i_trade_id], "{"); strcpy(values[i_trade_dts], "{"); strcpy(values[i_status_name], "{"); strcpy(values[i_type_name], "{"); strcpy(values[i_symbol], "{"); strcpy(values[i_trade_qty], "{"); strcpy(values[i_exec_name], "{"); strcpy(values[i_charge], "{"); strcpy(values[i_s_name], "{"); strcpy(values[i_ex_name], "{"); for (i = 0; i < SPI_processed; i++) { tuple = tuptable->vals[i]; if (i > 0) { strcat(values[i_trade_id], ","); strcat(values[i_trade_dts], ","); strcat(values[i_status_name], ","); strcat(values[i_type_name], ","); strcat(values[i_symbol], ","); strcat(values[i_trade_qty], ","); strcat(values[i_exec_name], ","); strcat(values[i_charge], ","); strcat(values[i_s_name], ","); strcat(values[i_ex_name], ","); } strcat(values[i_trade_id], SPI_getvalue(tuple, tupdesc, 1)); strcat(values[i_trade_dts], SPI_getvalue(tuple, tupdesc, 2)); strcat(values[i_status_name], "\""); strcat(values[i_status_name], SPI_getvalue(tuple, tupdesc, 3)); strcat(values[i_status_name], "\""); strcat(values[i_type_name], "\""); strcat(values[i_type_name], SPI_getvalue(tuple, tupdesc, 4)); strcat(values[i_type_name], "\""); strcat(values[i_symbol], "\""); strcat(values[i_symbol], SPI_getvalue(tuple, tupdesc, 5)); strcat(values[i_symbol], "\""); strcat(values[i_trade_qty], SPI_getvalue(tuple, tupdesc, 6)); strcat(values[i_exec_name], "\""); strcat(values[i_exec_name], SPI_getvalue(tuple, tupdesc, 7)); strcat(values[i_exec_name], "\""); strcat(values[i_charge], SPI_getvalue(tuple, tupdesc, 8)); strcat(values[i_s_name], "\""); strcat(values[i_s_name], SPI_getvalue(tuple, tupdesc, 9)); strcat(values[i_s_name], "\""); strcat(values[i_ex_name], "\""); strcat(values[i_ex_name], SPI_getvalue(tuple, tupdesc, 10)); strcat(values[i_ex_name], "\""); } strcat(values[i_trade_id], "}"); strcat(values[i_trade_dts], "}"); strcat(values[i_status_name], "}"); strcat(values[i_type_name], "}"); strcat(values[i_symbol], "}"); strcat(values[i_trade_qty], "}"); strcat(values[i_exec_name], "}"); strcat(values[i_charge], "}"); strcat(values[i_s_name], "}"); strcat(values[i_ex_name], "}"); sprintf(sql, TSF1_2, acct_id); #ifdef DEBUG elog(NOTICE, "SQL\n%s", sql); #endif /* DEBUG */ ret = SPI_exec(sql, 0); if (ret == SPI_OK_SELECT) { tupdesc = SPI_tuptable->tupdesc; tuptable = SPI_tuptable; if (SPI_processed > 0) { tuple = tuptable->vals[0]; values[i_cust_l_name] = SPI_getvalue(tuple, tupdesc, 1); values[i_cust_f_name] = SPI_getvalue(tuple, tupdesc, 2); values[i_broker_name] = SPI_getvalue(tuple, tupdesc, 3); } } else { FAIL_FRAME_SET(&funcctx->max_calls, sql); dump_tsf1_inputs(acct_id); } /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("function returning record called in context " "that cannot accept type record"))); } /* * generate attribute metadata needed later to produce tuples from raw * C strings */ attinmeta = TupleDescGetAttInMetadata(tupdesc); funcctx->attinmeta = attinmeta; MemoryContextSwitchTo(oldcontext); } /* stuff done on every call of the function */ funcctx = SRF_PERCALL_SETUP(); call_cntr = funcctx->call_cntr; max_calls = funcctx->max_calls; attinmeta = funcctx->attinmeta; if (call_cntr < max_calls) { /* do when there is more left to send */ HeapTuple tuple; Datum result; #ifdef DEBUG for (i = 0; i < 14; i++) { elog(NOTICE, "TSF1 OUT: %d %s", i, values[i]); } #endif /* DEBUG */ /* Build a tuple. */ tuple = BuildTupleFromCStrings(attinmeta, values); /* Make the tuple into a datum. */ result = HeapTupleGetDatum(tuple); SRF_RETURN_NEXT(funcctx, result); } else { /* Do when there is no more left. */ SPI_finish(); SRF_RETURN_DONE(funcctx); } }
Datum trigf(PG_FUNCTION_ARGS) { TriggerData *trigdata = (TriggerData *) fcinfo->context; TupleDesc tupdesc; HeapTuple rettuple; char *when; bool checknull = false; bool isnull; int ret, i; /* make sure it's called as a trigger at all */ if (!CALLED_AS_TRIGGER(fcinfo)) elog(ERROR, "trigf: not called by trigger manager"); /* tuple to return to executor */ if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event)) rettuple = trigdata->tg_newtuple; else rettuple = trigdata->tg_trigtuple; /* check for null values */ if (!TRIGGER_FIRED_BY_DELETE(trigdata->tg_event) && TRIGGER_FIRED_BEFORE(trigdata->tg_event)) checknull = true; if (TRIGGER_FIRED_BEFORE(trigdata->tg_event)) when = "before"; else when = "after "; tupdesc = trigdata->tg_relation->rd_att; /* connect to SPI manager */ if ((ret = SPI_connect()) < 0) elog(ERROR, "trigf (fired %s): SPI_connect returned %d", when, ret); /* get number of rows in table */ ret = SPI_exec("SELECT count(*) FROM ttest", 0); if (ret < 0) elog(ERROR, "trigf (fired %s): SPI_exec returned %d", when, ret); /* count(*) returns int8, so be careful to convert */ i = DatumGetInt64(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull)); elog (INFO, "trigf (fired %s): there are %d rows in ttest", when, i); SPI_finish(); if (checknull) { SPI_getbinval(rettuple, tupdesc, 1, &isnull); if (isnull) rettuple = NULL; } return PointerGetDatum(rettuple); }
Datum delivery(PG_FUNCTION_ARGS) { /* Input variables. */ int32 w_id = PG_GETARG_INT32(0); int32 o_carrier_id = PG_GETARG_INT32(1); TupleDesc tupdesc; SPITupleTable *tuptable; HeapTuple tuple; char query[256]; int d_id; int ret; char *no_o_id = NULL; char *o_c_id = NULL; char *ol_amount = NULL; SPI_connect(); for (d_id = 1; d_id <= 10; d_id++) { sprintf(query, DELIVERY_1, w_id, d_id); elog(DEBUG1, "%s", query); ret = SPI_exec(query, 0); if (ret == SPI_OK_SELECT && SPI_processed > 0) { tupdesc = SPI_tuptable->tupdesc; tuptable = SPI_tuptable; tuple = tuptable->vals[0]; no_o_id = SPI_getvalue(tuple, tupdesc, 1); elog(DEBUG1, "no_o_id = %s", no_o_id); } else { /* Nothing to deliver for this district, try next district. */ continue; } sprintf(query, DELIVERY_2, no_o_id, w_id, d_id); elog(DEBUG1, "%s", query); ret = SPI_exec(query, 0); if (ret != SPI_OK_DELETE) { SPI_finish(); PG_RETURN_INT32(-1); } sprintf(query, DELIVERY_3, no_o_id, w_id, d_id); elog(DEBUG1, "%s", query); ret = SPI_exec(query, 0); if (ret == SPI_OK_SELECT && SPI_processed > 0) { tupdesc = SPI_tuptable->tupdesc; tuptable = SPI_tuptable; tuple = tuptable->vals[0]; o_c_id = SPI_getvalue(tuple, tupdesc, 1); elog(DEBUG1, "o_c_id = %s", no_o_id); } else { SPI_finish(); PG_RETURN_INT32(-1); } sprintf(query, DELIVERY_4, o_carrier_id, no_o_id, w_id, d_id); elog(DEBUG1, "%s", query); ret = SPI_exec(query, 0); if (ret != SPI_OK_UPDATE) { SPI_finish(); PG_RETURN_INT32(-1); } sprintf(query, DELIVERY_5, no_o_id, w_id, d_id); elog(DEBUG1, "%s", query); ret = SPI_exec(query, 0); if (ret != SPI_OK_UPDATE) { SPI_finish(); PG_RETURN_INT32(-1); } sprintf(query, DELIVERY_6, no_o_id, w_id, d_id); elog(DEBUG1, "%s", query); ret = SPI_exec(query, 0); if (ret == SPI_OK_SELECT && SPI_processed > 0) { tupdesc = SPI_tuptable->tupdesc; tuptable = SPI_tuptable; tuple = tuptable->vals[0]; ol_amount = SPI_getvalue(tuple, tupdesc, 1); elog(DEBUG1, "ol_amount = %s", no_o_id); } else { SPI_finish(); PG_RETURN_INT32(-1); } sprintf(query, DELIVERY_7, ol_amount, o_c_id, w_id, d_id); elog(DEBUG1, "%s", query); ret = SPI_exec(query, 0); if (ret != SPI_OK_UPDATE) { SPI_finish(); PG_RETURN_INT32(-1); } } SPI_finish(); PG_RETURN_INT32(1); }
PCSCHEMA * pc_schema_from_pcid_uncached(uint32 pcid) { char sql[256]; char *xml, *xml_spi, *srid_spi; int err, srid; size_t size; PCSCHEMA *schema; if (SPI_OK_CONNECT != SPI_connect ()) { SPI_finish(); elog(ERROR, "pc_schema_from_pcid: could not connect to SPI manager"); return NULL; } sprintf(sql, "select %s, %s from %s where pcid = %d", POINTCLOUD_FORMATS_XML, POINTCLOUD_FORMATS_SRID, POINTCLOUD_FORMATS, pcid); err = SPI_exec(sql, 1); if ( err < 0 ) { SPI_finish(); elog(ERROR, "pc_schema_from_pcid: error (%d) executing query: %s", err, sql); return NULL; } /* No entry in POINTCLOUD_FORMATS */ if (SPI_processed <= 0) { SPI_finish(); elog(ERROR, "no entry in \"%s\" for pcid = %d", POINTCLOUD_FORMATS, pcid); return NULL; } /* Result */ xml_spi = SPI_getvalue(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1); srid_spi = SPI_getvalue(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 2); /* NULL result */ if ( ! ( xml_spi && srid_spi ) ) { SPI_finish(); elog(ERROR, "unable to read row from \"%s\" for pcid = %d", POINTCLOUD_FORMATS, pcid); return NULL; } /* Copy result to upper executor context */ size = strlen(xml_spi) + 1; xml = SPI_palloc(size); memcpy(xml, xml_spi, size); /* Parse the SRID string into the function stack */ srid = atoi(srid_spi); /* Disconnect from SPI, losing all our SPI-allocated memory now... */ SPI_finish(); /* Build the schema object */ err = pc_schema_from_xml(xml, &schema); if ( ! err ) { ereport(ERROR, (errcode(ERRCODE_NOT_AN_XML_DOCUMENT), errmsg("unable to parse XML for pcid = %d in \"%s\"", pcid, POINTCLOUD_FORMATS))); } schema->pcid = pcid; schema->srid = srid; return schema; }
Datum xpath_table(PG_FUNCTION_ARGS) { /* Function parameters */ char *pkeyfield = text_to_cstring(PG_GETARG_TEXT_PP(0)); char *xmlfield = text_to_cstring(PG_GETARG_TEXT_PP(1)); char *relname = text_to_cstring(PG_GETARG_TEXT_PP(2)); char *xpathset = text_to_cstring(PG_GETARG_TEXT_PP(3)); char *condition = text_to_cstring(PG_GETARG_TEXT_PP(4)); /* SPI (input tuple) support */ SPITupleTable *tuptable; HeapTuple spi_tuple; TupleDesc spi_tupdesc; /* Output tuple (tuplestore) support */ Tuplestorestate *tupstore = NULL; TupleDesc ret_tupdesc; HeapTuple ret_tuple; ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; AttInMetadata *attinmeta; MemoryContext per_query_ctx; MemoryContext oldcontext; char **values; xmlChar **xpaths; char *pos; const char *pathsep = "|"; int numpaths; int ret; int proc; int i; int j; int rownr; /* For issuing multiple rows from one original * document */ bool had_values; /* To determine end of nodeset results */ StringInfoData query_buf; PgXmlErrorContext *xmlerrcxt; volatile xmlDocPtr doctree = NULL; /* We only have a valid tuple description in table function mode */ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"))); if (rsinfo->expectedDesc == NULL) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("xpath_table must be called as a table function"))); /* * We want to materialise because it means that we don't have to carry * libxml2 parser state between invocations of this function */ if (!(rsinfo->allowedModes & SFRM_Materialize)) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("xpath_table requires Materialize mode, but it is not " "allowed in this context"))); /* * The tuplestore must exist in a higher context than this function call * (per_query_ctx is used) */ per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; oldcontext = MemoryContextSwitchTo(per_query_ctx); /* * Create the tuplestore - work_mem is the max in-memory size before a * file is created on disk to hold it. */ tupstore = tuplestore_begin_heap(rsinfo->allowedModes & SFRM_Materialize_Random, false, work_mem); MemoryContextSwitchTo(oldcontext); /* get the requested return tuple description */ ret_tupdesc = CreateTupleDescCopy(rsinfo->expectedDesc); /* must have at least one output column (for the pkey) */ if (ret_tupdesc->natts < 1) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("xpath_table must have at least one output column"))); /* * At the moment we assume that the returned attributes make sense for the * XPath specififed (i.e. we trust the caller). It's not fatal if they get * it wrong - the input function for the column type will raise an error * if the path result can't be converted into the correct binary * representation. */ attinmeta = TupleDescGetAttInMetadata(ret_tupdesc); /* Set return mode and allocate value space. */ rsinfo->returnMode = SFRM_Materialize; rsinfo->setDesc = ret_tupdesc; values = (char **) palloc(ret_tupdesc->natts * sizeof(char *)); xpaths = (xmlChar **) palloc(ret_tupdesc->natts * sizeof(xmlChar *)); /* * Split XPaths. xpathset is a writable CString. * * Note that we stop splitting once we've done all needed for tupdesc */ numpaths = 0; pos = xpathset; while (numpaths < (ret_tupdesc->natts - 1)) { xpaths[numpaths++] = (xmlChar *) pos; pos = strstr(pos, pathsep); if (pos != NULL) { *pos = '\0'; pos++; } else break; } /* Now build query */ initStringInfo(&query_buf); /* Build initial sql statement */ appendStringInfo(&query_buf, "SELECT %s, %s FROM %s WHERE %s", pkeyfield, xmlfield, relname, condition); if ((ret = SPI_connect()) < 0) elog(ERROR, "xpath_table: SPI_connect returned %d", ret); if ((ret = SPI_exec(query_buf.data, 0)) != SPI_OK_SELECT) elog(ERROR, "xpath_table: SPI execution failed for query %s", query_buf.data); proc = SPI_processed; /* elog(DEBUG1,"xpath_table: SPI returned %d rows",proc); */ tuptable = SPI_tuptable; spi_tupdesc = tuptable->tupdesc; /* Switch out of SPI context */ MemoryContextSwitchTo(oldcontext); /* * Check that SPI returned correct result. If you put a comma into one of * the function parameters, this will catch it when the SPI query returns * e.g. 3 columns. */ if (spi_tupdesc->natts != 2) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("expression returning multiple columns is not valid in parameter list"), errdetail("Expected two columns in SPI result, got %d.", spi_tupdesc->natts))); } /* * Setup the parser. This should happen after we are done evaluating the * query, in case it calls functions that set up libxml differently. */ xmlerrcxt = pgxml_parser_init(PG_XML_STRICTNESS_LEGACY); PG_TRY(); { /* For each row i.e. document returned from SPI */ for (i = 0; i < proc; i++) { char *pkey; char *xmldoc; xmlXPathContextPtr ctxt; xmlXPathObjectPtr res; xmlChar *resstr; xmlXPathCompExprPtr comppath; /* Extract the row data as C Strings */ spi_tuple = tuptable->vals[i]; pkey = SPI_getvalue(spi_tuple, spi_tupdesc, 1); xmldoc = SPI_getvalue(spi_tuple, spi_tupdesc, 2); /* * Clear the values array, so that not-well-formed documents * return NULL in all columns. Note that this also means that * spare columns will be NULL. */ for (j = 0; j < ret_tupdesc->natts; j++) values[j] = NULL; /* Insert primary key */ values[0] = pkey; /* Parse the document */ if (xmldoc) doctree = xmlParseMemory(xmldoc, strlen(xmldoc)); else /* treat NULL as not well-formed */ doctree = NULL; if (doctree == NULL) { /* not well-formed, so output all-NULL tuple */ ret_tuple = BuildTupleFromCStrings(attinmeta, values); tuplestore_puttuple(tupstore, ret_tuple); heap_freetuple(ret_tuple); } else { /* New loop here - we have to deal with nodeset results */ rownr = 0; do { /* Now evaluate the set of xpaths. */ had_values = false; for (j = 0; j < numpaths; j++) { ctxt = xmlXPathNewContext(doctree); ctxt->node = xmlDocGetRootElement(doctree); /* compile the path */ comppath = xmlXPathCompile(xpaths[j]); if (comppath == NULL) xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION, "XPath Syntax Error"); /* Now evaluate the path expression. */ res = xmlXPathCompiledEval(comppath, ctxt); xmlXPathFreeCompExpr(comppath); if (res != NULL) { switch (res->type) { case XPATH_NODESET: /* We see if this nodeset has enough nodes */ if (res->nodesetval != NULL && rownr < res->nodesetval->nodeNr) { resstr = xmlXPathCastNodeToString(res->nodesetval->nodeTab[rownr]); had_values = true; } else resstr = NULL; break; case XPATH_STRING: resstr = xmlStrdup(res->stringval); break; default: elog(NOTICE, "unsupported XQuery result: %d", res->type); resstr = xmlStrdup((const xmlChar *) "<unsupported/>"); } /* * Insert this into the appropriate column in the * result tuple. */ values[j + 1] = (char *) resstr; } xmlXPathFreeContext(ctxt); } /* Now add the tuple to the output, if there is one. */ if (had_values) { ret_tuple = BuildTupleFromCStrings(attinmeta, values); tuplestore_puttuple(tupstore, ret_tuple); heap_freetuple(ret_tuple); } rownr++; } while (had_values); } if (doctree != NULL) xmlFreeDoc(doctree); doctree = NULL; if (pkey) pfree(pkey); if (xmldoc) pfree(xmldoc); } } PG_CATCH(); { if (doctree != NULL) xmlFreeDoc(doctree); pg_xml_done(xmlerrcxt, true); PG_RE_THROW(); } PG_END_TRY(); if (doctree != NULL) xmlFreeDoc(doctree); pg_xml_done(xmlerrcxt, false); tuplestore_donestoring(tupstore); SPI_finish(); rsinfo->setResult = tupstore; /* * SFRM_Materialize mode expects us to return a NULL Datum. The actual * tuples are in our tuplestore and passed back through rsinfo->setResult. * rsinfo->setDesc is set to the tuple description that we actually used * to build our tuples with, so the caller can verify we did what it was * expecting. */ return (Datum) 0; }
/* * refresh_by_match_merge * * Refresh a materialized view with transactional semantics, while allowing * concurrent reads. * * This is called after a new version of the data has been created in a * temporary table. It performs a full outer join against the old version of * the data, producing "diff" results. This join cannot work if there are any * duplicated rows in either the old or new versions, in the sense that every * column would compare as equal between the two rows. It does work correctly * in the face of rows which have at least one NULL value, with all non-NULL * columns equal. The behavior of NULLs on equality tests and on UNIQUE * indexes turns out to be quite convenient here; the tests we need to make * are consistent with default behavior. If there is at least one UNIQUE * index on the materialized view, we have exactly the guarantee we need. * * The temporary table used to hold the diff results contains just the TID of * the old record (if matched) and the ROW from the new table as a single * column of complex record type (if matched). * * Once we have the diff table, we perform set-based DELETE and INSERT * operations against the materialized view, and discard both temporary * tables. * * Everything from the generation of the new data to applying the differences * takes place under cover of an ExclusiveLock, since it seems as though we * would want to prohibit not only concurrent REFRESH operations, but also * incremental maintenance. It also doesn't seem reasonable or safe to allow * SELECT FOR UPDATE or SELECT FOR SHARE on rows being updated or deleted by * this command. */ static void refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner, int save_sec_context) { StringInfoData querybuf; Relation matviewRel; Relation tempRel; char *matviewname; char *tempname; char *diffname; TupleDesc tupdesc; bool foundUniqueIndex; List *indexoidlist; ListCell *indexoidscan; int16 relnatts; bool *usedForQual; initStringInfo(&querybuf); matviewRel = heap_open(matviewOid, NoLock); matviewname = quote_qualified_identifier(get_namespace_name(RelationGetNamespace(matviewRel)), RelationGetRelationName(matviewRel)); tempRel = heap_open(tempOid, NoLock); tempname = quote_qualified_identifier(get_namespace_name(RelationGetNamespace(tempRel)), RelationGetRelationName(tempRel)); diffname = make_temptable_name_n(tempname, 2); relnatts = matviewRel->rd_rel->relnatts; usedForQual = (bool *) palloc0(sizeof(bool) * relnatts); /* Open SPI context. */ if (SPI_connect() != SPI_OK_CONNECT) elog(ERROR, "SPI_connect failed"); /* Analyze the temp table with the new contents. */ appendStringInfo(&querybuf, "ANALYZE %s", tempname); if (SPI_exec(querybuf.data, 0) != SPI_OK_UTILITY) elog(ERROR, "SPI_exec failed: %s", querybuf.data); /* * We need to ensure that there are not duplicate rows without NULLs in * the new data set before we can count on the "diff" results. Check for * that in a way that allows showing the first duplicated row found. Even * after we pass this test, a unique index on the materialized view may * find a duplicate key problem. */ resetStringInfo(&querybuf); appendStringInfo(&querybuf, "SELECT newdata FROM %s newdata " "WHERE newdata IS NOT NULL AND EXISTS " "(SELECT * FROM %s newdata2 WHERE newdata2 IS NOT NULL " "AND newdata2 OPERATOR(pg_catalog.*=) newdata " "AND newdata2.ctid OPERATOR(pg_catalog.<>) " "newdata.ctid) LIMIT 1", tempname, tempname); if (SPI_execute(querybuf.data, false, 1) != SPI_OK_SELECT) elog(ERROR, "SPI_exec failed: %s", querybuf.data); if (SPI_processed > 0) { ereport(ERROR, (errcode(ERRCODE_CARDINALITY_VIOLATION), errmsg("new data for \"%s\" contains duplicate rows without any null columns", RelationGetRelationName(matviewRel)), errdetail("Row: %s", SPI_getvalue(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1)))); } SetUserIdAndSecContext(relowner, save_sec_context | SECURITY_LOCAL_USERID_CHANGE); /* Start building the query for creating the diff table. */ resetStringInfo(&querybuf); appendStringInfo(&querybuf, "CREATE TEMP TABLE %s AS " "SELECT mv.ctid AS tid, newdata " "FROM %s mv FULL JOIN %s newdata ON (", diffname, matviewname, tempname); /* * Get the list of index OIDs for the table from the relcache, and look up * each one in the pg_index syscache. We will test for equality on all * columns present in all unique indexes which only reference columns and * include all rows. */ tupdesc = matviewRel->rd_att; foundUniqueIndex = false; indexoidlist = RelationGetIndexList(matviewRel); foreach(indexoidscan, indexoidlist) { Oid indexoid = lfirst_oid(indexoidscan); Relation indexRel; Form_pg_index indexStruct; indexRel = index_open(indexoid, RowExclusiveLock); indexStruct = indexRel->rd_index; /* * We're only interested if it is unique, valid, contains no * expressions, and is not partial. */ if (indexStruct->indisunique && IndexIsValid(indexStruct) && RelationGetIndexExpressions(indexRel) == NIL && RelationGetIndexPredicate(indexRel) == NIL) { int numatts = indexStruct->indnatts; int i; /* Add quals for all columns from this index. */ for (i = 0; i < numatts; i++) { int attnum = indexStruct->indkey.values[i]; Oid type; Oid op; const char *colname; /* * Only include the column once regardless of how many times * it shows up in how many indexes. */ if (usedForQual[attnum - 1]) continue; usedForQual[attnum - 1] = true; /* * Actually add the qual, ANDed with any others. */ if (foundUniqueIndex) appendStringInfoString(&querybuf, " AND "); colname = quote_identifier(NameStr((tupdesc->attrs[attnum - 1])->attname)); appendStringInfo(&querybuf, "newdata.%s ", colname); type = attnumTypeId(matviewRel, attnum); op = lookup_type_cache(type, TYPECACHE_EQ_OPR)->eq_opr; mv_GenerateOper(&querybuf, op); appendStringInfo(&querybuf, " mv.%s", colname); foundUniqueIndex = true; } } /* Keep the locks, since we're about to run DML which needs them. */ index_close(indexRel, NoLock); }
/* Clause 3.3.1.3 */ Datum MarketWatchFrame1(PG_FUNCTION_ARGS) { int i; int status = 0; double old_mkt_cap = 0.0; double new_mkt_cap = 0.0; double pct_change = 0.0; struct pg_tm tt, *tm = &tt; int64 acct_id = PG_GETARG_INT64(0); int64 cust_id = PG_GETARG_INT64(1); int64 ending_co_id = PG_GETARG_INT64(2); char *industry_name_p = (char *) PG_GETARG_TEXT_P(3); DateADT start_date_p = PG_GETARG_DATEADT(4); int64 starting_co_id = PG_GETARG_INT64(5); int ret; TupleDesc tupdesc; SPITupleTable *tuptable = NULL; HeapTuple tuple = NULL; Datum result; char buf[MAXDATELEN + 1]; char industry_name[IN_NAME_LEN + 1]; char sql[2048] = ""; j2date(start_date_p + POSTGRES_EPOCH_JDATE, &(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday)); EncodeDateOnly(tm, DateStyle, buf); strcpy(industry_name, DatumGetCString(DirectFunctionCall1(textout, PointerGetDatum(industry_name_p)))); #ifdef DEBUG dump_mwf1_inputs(acct_id, cust_id, ending_co_id, industry_name, buf, starting_co_id); #endif SPI_connect(); if (cust_id != 0) { sprintf(sql, MWF1_1, cust_id); } else if (industry_name[0] != '\0') { sprintf(sql, MWF1_2, industry_name, starting_co_id, ending_co_id); } else if (acct_id != 0) { sprintf(sql, MWF1_3, acct_id); } else { status = BAD_INPUT_DATA; } #ifdef DEBUG elog(NOTICE, "SQL\n%s", sql); #endif /* DEBUG */ ret = SPI_exec(sql, 0); if (ret != SPI_OK_SELECT) { dump_mwf1_inputs(acct_id, cust_id, ending_co_id, industry_name, buf, starting_co_id); FAIL_FRAME(sql); } if (status != BAD_INPUT_DATA) { int count = SPI_processed; TupleDesc tupdesc4; SPITupleTable *tuptable4 = NULL; HeapTuple tuple4 = NULL; char *symbol; char *new_price; char *old_price; char *s_num_out; tupdesc = SPI_tuptable->tupdesc; tuptable = SPI_tuptable; for (i = 0; i < count; i++) { tuple = tuptable->vals[i]; symbol = SPI_getvalue(tuple, tupdesc, 1); #ifdef DEBUG elog(NOTICE, " symbol = '%s'", symbol); #endif /* DEBUG */ sprintf(sql, MWF1_4, symbol); #ifdef DEBUG elog(NOTICE, "SQL\n%s", sql); #endif /* DEBUG */ ret = SPI_exec(sql, 0); if (ret != SPI_OK_SELECT || SPI_processed == 0) { dump_mwf1_inputs(acct_id, cust_id, ending_co_id, industry_name, buf, starting_co_id); FAIL_FRAME(sql); continue; } tupdesc4 = SPI_tuptable->tupdesc; tuptable4 = SPI_tuptable; tuple4 = tuptable4->vals[0]; new_price = SPI_getvalue(tuple4, tupdesc4, 1); #ifdef DEBUG elog(NOTICE, " new_price = %s", new_price); elog(NOTICE, " new_price = %f", atof(new_price)); #endif /* DEBUG */ sprintf(sql, MWF1_5, symbol); #ifdef DEBUG elog(NOTICE, "SQL\n%s", sql); #endif /* DEBUG */ ret = SPI_exec(sql, 0); if (ret != SPI_OK_SELECT) { dump_mwf1_inputs(acct_id, cust_id, ending_co_id, industry_name, buf, starting_co_id); elog(NOTICE, "ERROR: sql not ok = %d", ret); } tupdesc4 = SPI_tuptable->tupdesc; tuptable4 = SPI_tuptable; tuple4 = tuptable4->vals[0]; s_num_out = SPI_getvalue(tuple4, tupdesc4, 1); #ifdef DEBUG elog(NOTICE, " s_num_out = %s", s_num_out); #endif /* DEBUG */ sprintf(sql, MWF1_6, symbol, pstrdup(buf)); #ifdef DEBUG elog(NOTICE, "SQL\n%s", sql); #endif /* DEBUG */ ret = SPI_exec(sql, 0); if (ret != SPI_OK_SELECT) { dump_mwf1_inputs(acct_id, cust_id, ending_co_id, industry_name, buf, starting_co_id); FAIL_FRAME(sql); } if (SPI_processed == 0) { elog(NOTICE, "No rows returned for getting old_price."); } else { tupdesc4 = SPI_tuptable->tupdesc; tuptable4 = SPI_tuptable; tuple4 = tuptable4->vals[0]; old_price = SPI_getvalue(tuple4, tupdesc4, 1); #ifdef DEBUG elog(NOTICE, " old_price = %s", old_price); elog(NOTICE, " old_price = %f", atof(old_price)); #endif /* DEBUG */ old_mkt_cap += atof(s_num_out) * atof(old_price); } new_mkt_cap += atof(s_num_out) * atof(new_price); #ifdef DEBUG elog(NOTICE, "old_mkt_cap = %f", old_mkt_cap); elog(NOTICE, "new_mkt_cap = %f", new_mkt_cap); #endif /* DEBUG */ } pct_change = 100.0 * (new_mkt_cap / old_mkt_cap - 1.0); #ifdef DEBUG elog(NOTICE, "pct_change = %f", pct_change); #endif /* DEBUG */ } #ifdef DEBUG elog(NOTICE, "MWF1 OUT: 1 %f", pct_change); #endif /* DEBUG */ SPI_finish(); result = DirectFunctionCall1(float8_numeric, Float8GetDatum(pct_change)); PG_RETURN_NUMERIC(result); }
/* * plr_SPI_exec - The builtin SPI_exec command for the R interpreter */ SEXP plr_SPI_exec(SEXP rsql) { int spi_rc = 0; char buf[64]; const char *sql; int count = 0; int ntuples; SEXP result = NULL; MemoryContext oldcontext; PREPARE_PG_TRY; /* set up error context */ PUSH_PLERRCONTEXT(rsupport_error_callback, "pg.spi.exec"); PROTECT(rsql = AS_CHARACTER(rsql)); sql = CHAR(STRING_ELT(rsql, 0)); UNPROTECT(1); if (sql == NULL) error("%s", "cannot exec empty query"); /* switch to SPI memory context */ oldcontext = MemoryContextSwitchTo(plr_SPI_context); /* * trap elog/ereport so we can let R finish up gracefully * and generate the error once we exit the interpreter */ PG_TRY(); { /* Execute the query and handle return codes */ spi_rc = SPI_exec(sql, count); } PLR_PG_CATCH(); PLR_PG_END_TRY(); /* back to caller's memory context */ MemoryContextSwitchTo(oldcontext); switch (spi_rc) { case SPI_OK_UTILITY: snprintf(buf, sizeof(buf), "%d", 0); SPI_freetuptable(SPI_tuptable); PROTECT(result = NEW_CHARACTER(1)); SET_STRING_ELT(result, 0, COPY_TO_USER_STRING(buf)); UNPROTECT(1); break; case SPI_OK_SELINTO: case SPI_OK_INSERT: case SPI_OK_DELETE: case SPI_OK_UPDATE: snprintf(buf, sizeof(buf), "%d", SPI_processed); SPI_freetuptable(SPI_tuptable); PROTECT(result = NEW_CHARACTER(1)); SET_STRING_ELT(result, 0, COPY_TO_USER_STRING(buf)); UNPROTECT(1); break; case SPI_OK_SELECT: ntuples = SPI_processed; if (ntuples > 0) { result = rpgsql_get_results(ntuples, SPI_tuptable); SPI_freetuptable(SPI_tuptable); } else result = R_NilValue; break; case SPI_ERROR_ARGUMENT: error("SPI_exec() failed: SPI_ERROR_ARGUMENT"); break; case SPI_ERROR_UNCONNECTED: error("SPI_exec() failed: SPI_ERROR_UNCONNECTED"); break; case SPI_ERROR_COPY: error("SPI_exec() failed: SPI_ERROR_COPY"); break; case SPI_ERROR_CURSOR: error("SPI_exec() failed: SPI_ERROR_CURSOR"); break; case SPI_ERROR_TRANSACTION: error("SPI_exec() failed: SPI_ERROR_TRANSACTION"); break; case SPI_ERROR_OPUNKNOWN: error("SPI_exec() failed: SPI_ERROR_OPUNKNOWN"); break; default: error("SPI_exec() failed: %d", spi_rc); break; } POP_PLERRCONTEXT; return result; }
HeapTuple _rserv_log_() #endif { Trigger *trigger; /* to get trigger name */ int nargs; /* # of args specified in CREATE TRIGGER */ char **args; /* argument: argnum */ Relation rel; /* triggered relation */ HeapTuple tuple; /* tuple to return */ HeapTuple newtuple = NULL; /* tuple to return */ TupleDesc tupdesc; /* tuple description */ int keynum; char *key; char *okey; char *newkey = NULL; int deleted; char sql[8192]; char outbuf[8192]; char oidbuf[64]; int ret; /* Called by trigger manager ? */ if (!CurrentTriggerData) /* internal error */ elog(ERROR, "_rserv_log_: triggers are not initialized"); /* Should be called for ROW trigger */ if (TRIGGER_FIRED_FOR_STATEMENT(CurrentTriggerData->tg_event)) /* internal error */ elog(ERROR, "_rserv_log_: can't process STATEMENT events"); tuple = CurrentTriggerData->tg_trigtuple; trigger = CurrentTriggerData->tg_trigger; nargs = trigger->tgnargs; args = trigger->tgargs; if (nargs != 1) /* odd number of arguments! */ /* internal error */ elog(ERROR, "_rserv_log_: need in *one* argument"); keynum = atoi(args[0]); if (keynum < 0 && keynum != ObjectIdAttributeNumber) /* internal error */ elog(ERROR, "_rserv_log_: invalid keynum %d", keynum); rel = CurrentTriggerData->tg_relation; tupdesc = rel->rd_att; deleted = (TRIGGER_FIRED_BY_DELETE(CurrentTriggerData->tg_event)) ? 1 : 0; if (TRIGGER_FIRED_BY_UPDATE(CurrentTriggerData->tg_event)) newtuple = CurrentTriggerData->tg_newtuple; #ifndef PG_FUNCTION_INFO_V1 /* * Setting CurrentTriggerData to NULL prevents direct calls to trigger * functions in queries. Normally, trigger functions have to be called * by trigger manager code only. */ CurrentTriggerData = NULL; #endif /* Connect to SPI manager */ if ((ret = SPI_connect()) < 0) /* internal error */ elog(ERROR, "_rserv_log_: SPI_connect returned %d", ret); if (keynum == ObjectIdAttributeNumber) { snprintf(oidbuf, sizeof(oidbuf), "%u", HeapTupleGetOid(tuple)); key = oidbuf; } else key = SPI_getvalue(tuple, tupdesc, keynum); if (key == NULL) ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), errmsg("key must be not null"))); if (newtuple && keynum != ObjectIdAttributeNumber) { newkey = SPI_getvalue(newtuple, tupdesc, keynum); if (newkey == NULL) ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), errmsg("key must be not null"))); if (strcmp(newkey, key) == 0) newkey = NULL; else deleted = 1; /* old key was deleted */ } if (strpbrk(key, "\\ \n'")) okey = OutputValue(key, outbuf, sizeof(outbuf)); else okey = key; snprintf(sql, 8192, "update _RSERV_LOG_ set logid = %d, logtime = now(), " "deleted = %d where reloid = %u and key = '%s'", GetCurrentTransactionId(), deleted, rel->rd_id, okey); if (debug) elog(DEBUG4, "sql: %s", sql); ret = SPI_exec(sql, 0); if (ret < 0) ereport(ERROR, (errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION), errmsg("SPI_exec(update) returned %d", ret))); /* * If no tuple was UPDATEd then do INSERT... */ if (SPI_processed > 1) ereport(ERROR, (errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION), errmsg("duplicate tuples"))); else if (SPI_processed == 0) { snprintf(sql, 8192, "insert into _RSERV_LOG_ " "(reloid, logid, logtime, deleted, key) " "values (%u, %d, now(), %d, '%s')", rel->rd_id, GetCurrentTransactionId(), deleted, okey); if (debug) elog(DEBUG4, "sql: %s", sql); ret = SPI_exec(sql, 0); if (ret < 0) ereport(ERROR, (errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION), errmsg("SPI_exec(insert) returned %d", ret))); } if (okey != key && okey != outbuf) pfree(okey); if (newkey) { if (strpbrk(newkey, "\\ \n'")) okey = OutputValue(newkey, outbuf, sizeof(outbuf)); else okey = newkey; snprintf(sql, 8192, "insert into _RSERV_LOG_ " "(reloid, logid, logtime, deleted, key) " "values (%u, %d, now(), 0, '%s')", rel->rd_id, GetCurrentTransactionId(), okey); if (debug) elog(DEBUG4, "sql: %s", sql); ret = SPI_exec(sql, 0); if (ret < 0) ereport(ERROR, (errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION), errmsg("SPI_exec returned %d", ret))); if (okey != newkey && okey != outbuf) pfree(okey); } SPI_finish(); #ifdef PG_FUNCTION_INFO_V1 return (PointerGetDatum(tuple)); #else return (tuple); #endif }
/* table_log_restore_table() restore a complete table based on the logging table parameter: - original table name - name of primary key in original table - logging table - name of primary key in logging table - restore table name - timestamp for restoring data - primary key to restore (only this key will be restored) (optional) - restore mode 0: restore from blank table (default) needs a complete logging table 1: restore from actual table backwards - dont create table temporarly 0: create restore table temporarly (default) 1: create restore table not temporarly return: not yet defined */ Datum table_log_restore_table(PG_FUNCTION_ARGS) { /* the original table name */ char *table_orig; /* the primary key in the original table */ char *table_orig_pkey; /* number columns in original table */ int table_orig_columns; /* the log table name */ char *table_log; /* the primary key in the log table (usually trigger_id) */ /* cannot be the same then then the pkey in the original table */ char *table_log_pkey; /* number columns in log table */ int table_log_columns; /* the restore table name */ char *table_restore; /* the timestamp in past */ Datum timestamp = PG_GETARG_DATUM(5); /* the single pkey, can be null (then all keys will be restored) */ char *search_pkey = ""; /* the restore method - 0: restore from blank table (default) needs a complete log table! - 1: restore from actual table backwards */ int method = 0; /* dont create restore table temporarly - 0: create restore table temporarly (default) - 1: dont create restore table temporarly */ int not_temporarly = 0; int ret, results, i, number_columns; char query[250 + NAMEDATALEN]; /* for getting table infos (250 chars (+ one times the length of all names) should be enough) */ int need_search_pkey = 0; /* does we have a single key to restore? */ char *tmp, *timestamp_string, *old_pkey_string = ""; char *trigger_mode, *trigger_tuple, *trigger_changed; SPITupleTable *spi_tuptable = NULL; /* for saving query results */ VarChar *return_name; /* memory for dynamic query */ int d_query_size = 250; /* start with 250 bytes */ char *d_query; char *d_query_start; /* memory for column names */ int col_query_size = 0; char *col_query; char *col_query_start; int col_pkey = 0; /* * Some checks first... */ #ifdef TABLE_LOG_DEBUG elog(NOTICE, "start table_log_restore_table()"); #endif /* TABLE_LOG_DEBUG */ /* does we have all arguments? */ if (PG_ARGISNULL(0)) { elog(ERROR, "table_log_restore_table: missing original table name"); } if (PG_ARGISNULL(1)) { elog(ERROR, "table_log_restore_table: missing primary key name for original table"); } if (PG_ARGISNULL(2)) { elog(ERROR, "table_log_restore_table: missing log table name"); } if (PG_ARGISNULL(3)) { elog(ERROR, "table_log_restore_table: missing primary key name for log table"); } if (PG_ARGISNULL(4)) { elog(ERROR, "table_log_restore_table: missing copy table name"); } if (PG_ARGISNULL(5)) { elog(ERROR, "table_log_restore_table: missing timestamp"); } /* first check number arguments to avoid an segfault */ if (PG_NARGS() >= 7) { /* if argument is given, check if not null */ if (!PG_ARGISNULL(6)) { /* yes, fetch it */ search_pkey = __table_log_varcharout((VarChar *)PG_GETARG_VARCHAR_P(6)); /* and check, if we have an argument */ if (strlen(search_pkey) > 0) { need_search_pkey = 1; #ifdef TABLE_LOG_DEBUG elog(NOTICE, "table_log_restore_table: will restore a single key"); #endif /* TABLE_LOG_DEBUG */ } } } /* same procedere here */ if (PG_NARGS() >= 8) { if (!PG_ARGISNULL(7)) { method = PG_GETARG_INT32(7); if (method > 0) { method = 1; } else { method = 0; } } } #ifdef TABLE_LOG_DEBUG if (method == 1) { elog(NOTICE, "table_log_restore_table: will restore from actual state backwards"); } else { elog(NOTICE, "table_log_restore_table: will restore from begin forward"); } #endif /* TABLE_LOG_DEBUG */ if (PG_NARGS() >= 9) { if (!PG_ARGISNULL(8)) { not_temporarly = PG_GETARG_INT32(8); if (not_temporarly > 0) { not_temporarly = 1; } else { not_temporarly = 0; } } } #ifdef TABLE_LOG_DEBUG if (not_temporarly == 1) { elog(NOTICE, "table_log_restore_table: dont create restore table temporarly"); } #endif /* TABLE_LOG_DEBUG */ /* get parameter */ table_orig = __table_log_varcharout((VarChar *)PG_GETARG_VARCHAR_P(0)); table_orig_pkey = __table_log_varcharout((VarChar *)PG_GETARG_VARCHAR_P(1)); table_log = __table_log_varcharout((VarChar *)PG_GETARG_VARCHAR_P(2)); table_log_pkey = __table_log_varcharout((VarChar *)PG_GETARG_VARCHAR_P(3)); table_restore = __table_log_varcharout((VarChar *)PG_GETARG_VARCHAR_P(4)); /* pkey of original table cannot be the same as of log table */ if (strcmp((const char *)table_orig_pkey, (const char *)table_log_pkey) == 0) { elog(ERROR, "pkey of logging table cannot be the pkey of the original table: %s <-> %s", table_orig_pkey, table_log_pkey); } /* Connect to SPI manager */ ret = SPI_connect(); if (ret != SPI_OK_CONNECT) { elog(ERROR, "table_log_restore_table: SPI_connect returned %d", ret); } /* check original table */ snprintf(query, 249, "SELECT a.attname FROM pg_class c, pg_attribute a WHERE c.relname = %s AND a.attnum > 0 AND a.attrelid = c.oid ORDER BY a.attnum", do_quote_literal(table_orig)); #ifdef TABLE_LOG_DEBUG_QUERY elog(NOTICE, "query: %s", query); #endif /* TABLE_LOG_DEBUG_QUERY */ ret = SPI_exec(query, 0); if (ret != SPI_OK_SELECT) { elog(ERROR, "could not check relation: %s", table_orig); } if (SPI_processed > 0) { table_orig_columns = SPI_processed; } else { elog(ERROR, "could not check relation: %s", table_orig); } /* check pkey in original table */ snprintf(query, 249, "SELECT a.attname FROM pg_class c, pg_attribute a WHERE c.relname=%s AND c.relkind='r' AND a.attname=%s AND a.attnum > 0 AND a.attrelid = c.oid", do_quote_literal(table_orig), do_quote_literal(table_orig_pkey)); #ifdef TABLE_LOG_DEBUG_QUERY elog(NOTICE, "query: %s", query); #endif /* TABLE_LOG_DEBUG_QUERY */ ret = SPI_exec(query, 0); if (ret != SPI_OK_SELECT) { elog(ERROR, "could not check relation: %s", table_orig); } if (SPI_processed == 0) { elog(ERROR, "could not check relation: %s", table_orig); } #ifdef TABLE_LOG_DEBUG elog(NOTICE, "original table: OK (%i columns)", table_orig_columns); #endif /* TABLE_LOG_DEBUG */ /* check log table */ snprintf(query, 249, "SELECT a.attname FROM pg_class c, pg_attribute a WHERE c.relname = %s AND a.attnum > 0 AND a.attrelid = c.oid ORDER BY a.attnum", do_quote_literal(table_log)); #ifdef TABLE_LOG_DEBUG_QUERY elog(NOTICE, "query: %s", query); #endif /* TABLE_LOG_DEBUG_QUERY */ ret = SPI_exec(query, 0); if (ret != SPI_OK_SELECT) { elog(ERROR, "could not check relation [1]: %s", table_log); } if (SPI_processed > 0) { table_log_columns = SPI_processed; } else { elog(ERROR, "could not check relation [2]: %s", table_log); } /* check pkey in log table */ snprintf(query, 249, "SELECT a.attname FROM pg_class c, pg_attribute a WHERE c.relname=%s AND c.relkind='r' AND a.attname=%s AND a.attnum > 0 AND a.attrelid = c.oid", do_quote_literal(table_log), do_quote_literal(table_log_pkey)); #ifdef TABLE_LOG_DEBUG_QUERY elog(NOTICE, "query: %s", query); #endif /* TABLE_LOG_DEBUG_QUERY */ ret = SPI_exec(query, 0); if (ret != SPI_OK_SELECT) { elog(ERROR, "could not check relation [3]: %s", table_log); } if (SPI_processed == 0) { elog(ERROR, "could not check relation [4]: %s", table_log); } #ifdef TABLE_LOG_DEBUG elog(NOTICE, "log table: OK (%i columns)", table_log_columns); #endif /* TABLE_LOG_DEBUG */ /* check restore table */ snprintf(query, 249, "SELECT pg_attribute.attname AS a FROM pg_class, pg_attribute WHERE pg_class.relname=%s AND pg_attribute.attnum > 0 AND pg_attribute.attrelid=pg_class.oid", do_quote_literal(table_restore)); #ifdef TABLE_LOG_DEBUG_QUERY elog(NOTICE, "query: %s", query); #endif /* TABLE_LOG_DEBUG_QUERY */ ret = SPI_exec(query, 0); if (ret != SPI_OK_SELECT) { elog(ERROR, "could not check relation: %s", table_restore); } if (SPI_processed > 0) { elog(ERROR, "restore table already exists: %s", table_restore); } #ifdef TABLE_LOG_DEBUG elog(NOTICE, "restore table: OK (doesnt exists)"); #endif /* TABLE_LOG_DEBUG */ /* now get all columns from original table */ snprintf(query, 249, "SELECT a.attname, format_type(a.atttypid, a.atttypmod), a.attnum FROM pg_class c, pg_attribute a WHERE c.relname = %s AND a.attnum > 0 AND a.attrelid = c.oid ORDER BY a.attnum", do_quote_literal(table_orig)); #ifdef TABLE_LOG_DEBUG_QUERY elog(NOTICE, "query: %s", query); #endif /* TABLE_LOG_DEBUG_QUERY */ ret = SPI_exec(query, 0); if (ret != SPI_OK_SELECT) { elog(ERROR, "could not get columns from relation: %s", table_orig); } if (SPI_processed == 0) { elog(ERROR, "could not check relation: %s", table_orig); } results = SPI_processed; /* store number columns for later */ number_columns = SPI_processed; #ifdef TABLE_LOG_DEBUG elog(NOTICE, "number columns: %i", results); #endif /* TABLE_LOG_DEBUG */ for (i = 0; i < results; i++) { /* the column name */ tmp = SPI_getvalue(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1); col_query_size += strlen(do_quote_ident(tmp)) + 2; /* now check, if this is the pkey */ if (strcmp((const char *)tmp, (const char *)table_orig_pkey) == 0) { /* remember the (real) number */ col_pkey = i + 1; } } /* check if we have found the pkey */ if (col_pkey == 0) { elog(ERROR, "cannot find pkey (%s) in table %s", table_orig_pkey, table_orig); } /* allocate memory for string */ col_query_size += 10; col_query_start = (char *) palloc((col_query_size + 1) * sizeof(char)); col_query = col_query_start; for (i = 0; i < results; i++) { if (i > 0) { sprintf(col_query, ", "); col_query = col_query_start + strlen(col_query_start); } sprintf(col_query, "%s", do_quote_ident(SPI_getvalue(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1))); col_query = col_query_start + strlen(col_query_start); } #ifdef TABLE_LOG_DEBUG elog(NOTICE, "string for columns: %s", col_query_start); #endif /* TABLE_LOG_DEBUG */ /* create restore table */ #ifdef TABLE_LOG_DEBUG elog(NOTICE, "create restore table: %s", table_restore); #endif /* TABLE_LOG_DEBUG */ snprintf(query, 249, "SELECT * INTO "); /* per default create a temporary table */ if (not_temporarly == 0) { strcat(query, "TEMPORARY "); } strcat(query, "TABLE "); strncat(query, table_restore, 249); /* from which table? */ strncat(query, " FROM ", 249); strncat(query, table_orig, 249); if (need_search_pkey == 1) { /* only extract a specific key */ strncat(query, " WHERE ", 249); strncat(query, do_quote_ident(table_orig_pkey), 249); strncat(query, "=", 249); strncat(query, do_quote_literal(search_pkey), 249); } if (method == 0) { /* restore from begin (blank table) */ strncat(query, " LIMIT 0", 249); } #ifdef TABLE_LOG_DEBUG_QUERY elog(NOTICE, "query: %s", query); #endif /* TABLE_LOG_DEBUG_QUERY */ ret = SPI_exec(query, 0); if (ret != SPI_OK_SELINTO) { elog(ERROR, "could not check relation: %s", table_restore); } if (method == 1) { #ifdef TABLE_LOG_DEBUG elog(NOTICE, "%i rows copied", SPI_processed); #endif /* TABLE_LOG_DEBUG */ } /* get timestamp as string */ timestamp_string = DatumGetCString(DirectFunctionCall1(timestamptz_out, timestamp)); #ifdef TABLE_LOG_DEBUG if (method == 0) { elog(NOTICE, "need logs from start to timestamp: %s", timestamp_string); } else { elog(NOTICE, "need logs from end to timestamp: %s", timestamp_string); } #endif /* TABLE_LOG_DEBUG */ /* now build query for getting logs */ #ifdef TABLE_LOG_DEBUG elog(NOTICE, "build query for getting logs"); #endif /* TABLE_LOG_DEBUG */ d_query_size += d_query_size + strlen(col_query_start); if (need_search_pkey == 1) { /* add size of pkey and size of value */ d_query_size += strlen(do_quote_ident(table_orig_pkey)) * 2 + strlen(do_quote_literal(search_pkey)) + 3; } /* allocate memory for string */ d_query_size += 10; d_query_start = (char *) palloc((d_query_size + 1) * sizeof(char)); d_query = d_query_start; snprintf(d_query, d_query_size, "SELECT %s, trigger_mode, trigger_tuple, trigger_changed FROM %s WHERE ", col_query_start, do_quote_ident(table_log)); d_query = d_query_start + strlen(d_query_start); if (method == 0) { /* from start to timestamp */ snprintf(d_query, d_query_size, "trigger_changed <= %s ", do_quote_literal(timestamp_string)); } else { /* from now() backwards to timestamp */ snprintf(d_query, d_query_size, "trigger_changed >= %s ", do_quote_literal(timestamp_string)); } d_query = d_query_start + strlen(d_query_start); if (need_search_pkey == 1) { snprintf(d_query, d_query_size, "AND %s = %s ", do_quote_ident(table_orig_pkey), do_quote_literal(search_pkey)); d_query = d_query_start + strlen(d_query_start); } if (method == 0) { snprintf(d_query, d_query_size, "ORDER BY %s ASC", do_quote_ident(table_log_pkey)); } else { snprintf(d_query, d_query_size, "ORDER BY %s DESC", do_quote_ident(table_log_pkey)); } d_query = d_query_start + strlen(d_query_start); #ifdef TABLE_LOG_DEBUG_QUERY elog(NOTICE, "query: %s", d_query_start); #endif /* TABLE_LOG_DEBUG_QUERY */ ret = SPI_exec(d_query_start, 0); if (ret != SPI_OK_SELECT) { elog(ERROR, "could not get log data from table: %s", table_log); } #ifdef TABLE_LOG_DEBUG elog(NOTICE, "number log entries to restore: %i", SPI_processed); #endif /* TABLE_LOG_DEBUG */ results = SPI_processed; /* save results */ spi_tuptable = SPI_tuptable; /* go through all results */ for (i = 0; i < results; i++) { /* get tuple data */ trigger_mode = SPI_getvalue(spi_tuptable->vals[i], spi_tuptable->tupdesc, number_columns + 1); trigger_tuple = SPI_getvalue(spi_tuptable->vals[i], spi_tuptable->tupdesc, number_columns + 2); trigger_changed = SPI_getvalue(spi_tuptable->vals[i], spi_tuptable->tupdesc, number_columns + 3); /* check for update tuples we doesnt need */ if (strcmp((const char *)trigger_mode, (const char *)"UPDATE") == 0) { if (method == 0 && strcmp((const char *)trigger_tuple, (const char *)"old") == 0) { /* we need the old value of the pkey for the update */ old_pkey_string = SPI_getvalue(spi_tuptable->vals[i], spi_tuptable->tupdesc, col_pkey); #ifdef TABLE_LOG_DEBUG elog(NOTICE, "tuple old pkey: %s", old_pkey_string); #endif /* TABLE_LOG_DEBUG */ /* then skip this tuple */ continue; } if (method == 1 && strcmp((const char *)trigger_tuple, (const char *)"new") == 0) { /* we need the old value of the pkey for the update */ old_pkey_string = SPI_getvalue(spi_tuptable->vals[i], spi_tuptable->tupdesc, col_pkey); #ifdef TABLE_LOG_DEBUG elog(NOTICE, "tuple: old pkey: %s", old_pkey_string); #endif /* TABLE_LOG_DEBUG */ /* then skip this tuple */ continue; } } if (method == 0) { /* roll forward */ #ifdef TABLE_LOG_DEBUG elog(NOTICE, "tuple: %s %s %s", trigger_mode, trigger_tuple, trigger_changed); #endif /* TABLE_LOG_DEBUG */ if (strcmp((const char *)trigger_mode, (const char *)"INSERT") == 0) { __table_log_restore_table_insert(spi_tuptable, table_restore, table_orig_pkey, col_query_start, col_pkey, number_columns, i); } else if (strcmp((const char *)trigger_mode, (const char *)"UPDATE") == 0) { __table_log_restore_table_update(spi_tuptable, table_restore, table_orig_pkey, col_query_start, col_pkey, number_columns, i, old_pkey_string); } else if (strcmp((const char *)trigger_mode, (const char *)"DELETE") == 0) { __table_log_restore_table_delete(spi_tuptable, table_restore, table_orig_pkey, col_query_start, col_pkey, number_columns, i); } else { elog(ERROR, "unknown trigger_mode: %s", trigger_mode); } } else { /* roll back */ char rb_mode[10]; /* reverse the method */ if (strcmp((const char *)trigger_mode, (const char *)"INSERT") == 0) { sprintf(rb_mode, "DELETE"); } else if (strcmp((const char *)trigger_mode, (const char *)"UPDATE") == 0) { sprintf(rb_mode, "UPDATE"); } else if (strcmp((const char *)trigger_mode, (const char *)"DELETE") == 0) { sprintf(rb_mode, "INSERT"); } else { elog(ERROR, "unknown trigger_mode: %s", trigger_mode); } #ifdef TABLE_LOG_DEBUG elog(NOTICE, "tuple: %s %s %s", rb_mode, trigger_tuple, trigger_changed); #endif /* TABLE_LOG_DEBUG */ if (strcmp((const char *)trigger_mode, (const char *)"INSERT") == 0) { __table_log_restore_table_delete(spi_tuptable, table_restore, table_orig_pkey, col_query_start, col_pkey, number_columns, i); } else if (strcmp((const char *)trigger_mode, (const char *)"UPDATE") == 0) { __table_log_restore_table_update(spi_tuptable, table_restore, table_orig_pkey, col_query_start, col_pkey, number_columns, i, old_pkey_string); } else if (strcmp((const char *)trigger_mode, (const char *)"DELETE") == 0) { __table_log_restore_table_insert(spi_tuptable, table_restore, table_orig_pkey, col_query_start, col_pkey, number_columns, i); } } } #ifdef TABLE_LOG_DEBUG elog(NOTICE, "table_log_restore_table() done, results in: %s", table_restore); #endif /* TABLE_LOG_DEBUG */ /* convert string to VarChar for result */ return_name = DatumGetVarCharP(DirectFunctionCall2(varcharin, CStringGetDatum(table_restore), Int32GetDatum(strlen(table_restore) + VARHDRSZ))); /* close SPI connection */ SPI_finish(); /* and return the name of the restore table */ PG_RETURN_VARCHAR_P(return_name); }
/* __table_log() helper function for table_log() parameter: - trigger data - change mode (INSERT, UPDATE, DELETE) - tuple to log (old, new) - pointer to tuple - number columns in table - logging table - flag for writing session user return: none */ static void __table_log (TriggerData *trigdata, char *changed_mode, char *changed_tuple, HeapTuple tuple, int number_columns, char *log_table, int use_session_user, char *log_schema) { char *before_char; int i, col_nr, found_col; /* start with 100 bytes */ int size_query = 100; char *query; char *query_start; int ret; #ifdef TABLE_LOG_DEBUG elog(NOTICE, "calculate query size"); #endif /* TABLE_LOG_DEBUG */ /* add all sizes we need and know at this point */ size_query += strlen(changed_mode) + strlen(changed_tuple) + strlen(log_table) + strlen(log_schema); /* calculate size of the columns */ col_nr = 0; for (i = 1; i <= number_columns; i++) { col_nr++; found_col = 0; do { if (trigdata->tg_relation->rd_att->attrs[col_nr - 1]->attisdropped) { /* this column is dropped, skip it */ col_nr++; continue; } else { found_col++; } } while (found_col == 0); /* the column name */ size_query += strlen(do_quote_ident(SPI_fname(trigdata->tg_relation->rd_att, col_nr))) + 3; /* the value */ before_char = SPI_getvalue(tuple, trigdata->tg_relation->rd_att, col_nr); /* old size plus this char and 3 bytes for , and so */ if (before_char == NULL) { size_query += 6; } else { size_query += strlen(do_quote_literal(before_char)) + 3; } } if (use_session_user == 1) { /* add memory for session user */ size_query += NAMEDATALEN + 20; } #ifdef TABLE_LOG_DEBUG // elog(NOTICE, "query size: %i", size_query); #endif /* TABLE_LOG_DEBUG */ #ifdef TABLE_LOG_DEBUG elog(NOTICE, "build query"); #endif /* TABLE_LOG_DEBUG */ /* allocate memory */ query_start = (char *) palloc(size_query * sizeof(char)); query = query_start; /* build query */ sprintf(query, "INSERT INTO %s.%s (", do_quote_ident(log_schema), do_quote_ident(log_table)); query = query_start + strlen(query); /* add colum names */ col_nr = 0; for (i = 1; i <= number_columns; i++) { col_nr++; found_col = 0; do { if (trigdata->tg_relation->rd_att->attrs[col_nr - 1]->attisdropped) { /* this column is dropped, skip it */ col_nr++; continue; } else { found_col++; } } while (found_col == 0); sprintf(query, "%s, ", do_quote_ident(SPI_fname(trigdata->tg_relation->rd_att, col_nr))); query = query_start + strlen(query_start); } /* add session user */ if (use_session_user == 1) { sprintf(query, "trigger_user, "); query = query_start + strlen(query_start); } /* add the 3 extra colum names */ sprintf(query, "trigger_mode, trigger_tuple, trigger_changed) VALUES ("); query = query_start + strlen(query_start); /* add values */ col_nr = 0; for (i = 1; i <= number_columns; i++) { col_nr++; found_col = 0; do { if (trigdata->tg_relation->rd_att->attrs[col_nr - 1]->attisdropped) { /* this column is dropped, skip it */ col_nr++; continue; } else { found_col++; } } while (found_col == 0); before_char = SPI_getvalue(tuple, trigdata->tg_relation->rd_att, col_nr); if (before_char == NULL) { sprintf(query, "NULL, "); } else { sprintf(query, "%s, ", do_quote_literal(before_char)); } query = query_start + strlen(query_start); } /* add session user */ if (use_session_user == 1) { sprintf(query, "SESSION_USER, "); query = query_start + strlen(query_start); } /* add the 3 extra values */ sprintf(query, "%s, %s, NOW());", do_quote_literal(changed_mode), do_quote_literal(changed_tuple)); query = query_start + strlen(query_start); #ifdef TABLE_LOG_DEBUG_QUERY elog(NOTICE, "query: %s", query_start); #else #ifdef TABLE_LOG_DEBUG elog(NOTICE, "execute query"); #endif /* TABLE_LOG_DEBUG */ #endif /* TABLE_LOG_DEBUG_QUERY */ /* execute insert */ ret = SPI_exec(query_start, 0); if (ret != SPI_OK_INSERT) { elog(ERROR, "could not insert log information into relation %s (error: %d)", log_table, ret); } #ifdef TABLE_LOG_DEBUG elog(NOTICE, "done"); #endif /* TABLE_LOG_DEBUG */ /* clean up */ pfree(query_start); }
/* * refresh_by_match_merge * * Refresh a materialized view with transactional semantics, while allowing * concurrent reads. * * This is called after a new version of the data has been created in a * temporary table. It performs a full outer join against the old version of * the data, producing "diff" results. This join cannot work if there are any * duplicated rows in either the old or new versions, in the sense that every * column would compare as equal between the two rows. It does work correctly * in the face of rows which have at least one NULL value, with all non-NULL * columns equal. The behavior of NULLs on equality tests and on UNIQUE * indexes turns out to be quite convenient here; the tests we need to make * are consistent with default behavior. If there is at least one UNIQUE * index on the materialized view, we have exactly the guarantee we need. By * joining based on equality on all columns which are part of any unique * index, we identify the rows on which we can use UPDATE without any problem. * If any column is NULL in either the old or new version of a row (or both), * we must use DELETE and INSERT, since there could be multiple rows which are * NOT DISTINCT FROM each other, and we could otherwise end up with the wrong * number of occurrences in the updated relation. The temporary table used to * hold the diff results contains just the TID of the old record (if matched) * and the ROW from the new table as a single column of complex record type * (if matched). * * Once we have the diff table, we perform set-based DELETE, UPDATE, and * INSERT operations against the materialized view, and discard both temporary * tables. * * Everything from the generation of the new data to applying the differences * takes place under cover of an ExclusiveLock, since it seems as though we * would want to prohibit not only concurrent REFRESH operations, but also * incremental maintenance. It also doesn't seem reasonable or safe to allow * SELECT FOR UPDATE or SELECT FOR SHARE on rows being updated or deleted by * this command. */ static void refresh_by_match_merge(Oid matviewOid, Oid tempOid) { StringInfoData querybuf; Relation matviewRel; Relation tempRel; char *matviewname; char *tempname; char *diffname; TupleDesc tupdesc; bool foundUniqueIndex; List *indexoidlist; ListCell *indexoidscan; int16 relnatts; bool *usedForQual; Oid save_userid; int save_sec_context; int save_nestlevel; initStringInfo(&querybuf); matviewRel = heap_open(matviewOid, NoLock); matviewname = quote_qualified_identifier(get_namespace_name(RelationGetNamespace(matviewRel)), RelationGetRelationName(matviewRel)); tempRel = heap_open(tempOid, NoLock); tempname = quote_qualified_identifier(get_namespace_name(RelationGetNamespace(tempRel)), RelationGetRelationName(tempRel)); diffname = make_temptable_name_n(tempname, 2); relnatts = matviewRel->rd_rel->relnatts; usedForQual = (bool *) palloc0(sizeof(bool) * relnatts); /* Open SPI context. */ if (SPI_connect() != SPI_OK_CONNECT) elog(ERROR, "SPI_connect failed"); /* Analyze the temp table with the new contents. */ appendStringInfo(&querybuf, "ANALYZE %s", tempname); if (SPI_exec(querybuf.data, 0) != SPI_OK_UTILITY) elog(ERROR, "SPI_exec failed: %s", querybuf.data); /* * We need to ensure that there are not duplicate rows without NULLs in * the new data set before we can count on the "diff" results. Check for * that in a way that allows showing the first duplicated row found. Even * after we pass this test, a unique index on the materialized view may * find a duplicate key problem. */ resetStringInfo(&querybuf); appendStringInfo(&querybuf, "SELECT x FROM %s x WHERE x IS NOT NULL AND EXISTS " "(SELECT * FROM %s y WHERE y IS NOT NULL " "AND (y.*) = (x.*) AND y.ctid <> x.ctid) LIMIT 1", tempname, tempname); if (SPI_execute(querybuf.data, false, 1) != SPI_OK_SELECT) elog(ERROR, "SPI_exec failed: %s", querybuf.data); if (SPI_processed > 0) { ereport(ERROR, (errcode(ERRCODE_CARDINALITY_VIOLATION), errmsg("new data for \"%s\" contains duplicate rows without any NULL columns", RelationGetRelationName(matviewRel)), errdetail("Row: %s", SPI_getvalue(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1)))); } /* Start building the query for creating the diff table. */ resetStringInfo(&querybuf); appendStringInfo(&querybuf, "CREATE TEMP TABLE %s AS " "SELECT x.ctid AS tid, y FROM %s x FULL JOIN %s y ON (", diffname, matviewname, tempname); /* * Get the list of index OIDs for the table from the relcache, and look up * each one in the pg_index syscache. We will test for equality on all * columns present in all unique indexes which only reference columns and * include all rows. */ tupdesc = matviewRel->rd_att; foundUniqueIndex = false; indexoidlist = RelationGetIndexList(matviewRel); foreach(indexoidscan, indexoidlist) { Oid indexoid = lfirst_oid(indexoidscan); HeapTuple indexTuple; Form_pg_index index; indexTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexoid)); if (!HeapTupleIsValid(indexTuple)) /* should not happen */ elog(ERROR, "cache lookup failed for index %u", indexoid); index = (Form_pg_index) GETSTRUCT(indexTuple); /* We're only interested if it is unique and valid. */ if (index->indisunique && IndexIsValid(index)) { int numatts = index->indnatts; int i; bool expr = false; Relation indexRel; /* Skip any index on an expression. */ for (i = 0; i < numatts; i++) { if (index->indkey.values[i] == 0) { expr = true; break; } } if (expr) { ReleaseSysCache(indexTuple); continue; } /* Skip partial indexes. */ indexRel = index_open(index->indexrelid, RowExclusiveLock); if (RelationGetIndexPredicate(indexRel) != NIL) { index_close(indexRel, NoLock); ReleaseSysCache(indexTuple); continue; } /* Hold the locks, since we're about to run DML which needs them. */ index_close(indexRel, NoLock); /* Add quals for all columns from this index. */ for (i = 0; i < numatts; i++) { int attnum = index->indkey.values[i]; Oid type; Oid op; const char *colname; /* * Only include the column once regardless of how many times * it shows up in how many indexes. * * This is also useful later to omit columns which can not * have changed from the SET clause of the UPDATE statement. */ if (usedForQual[attnum - 1]) continue; usedForQual[attnum - 1] = true; /* * Actually add the qual, ANDed with any others. */ if (foundUniqueIndex) appendStringInfoString(&querybuf, " AND "); colname = quote_identifier(NameStr((tupdesc->attrs[attnum - 1])->attname)); appendStringInfo(&querybuf, "y.%s ", colname); type = attnumTypeId(matviewRel, attnum); op = lookup_type_cache(type, TYPECACHE_EQ_OPR)->eq_opr; mv_GenerateOper(&querybuf, op); appendStringInfo(&querybuf, " x.%s", colname); foundUniqueIndex = true; } } ReleaseSysCache(indexTuple); }
Datum funny_dup17(PG_FUNCTION_ARGS) { TriggerData *trigdata = (TriggerData *) fcinfo->context; TransactionId *xid; int *level; bool *recursion; Relation rel; TupleDesc tupdesc; HeapTuple tuple; char *query, *fieldval, *fieldtype; char *when; int inserted; int selected = 0; int ret; if (!CALLED_AS_TRIGGER(fcinfo)) elog(ERROR, "funny_dup17: not fired by trigger manager"); tuple = trigdata->tg_trigtuple; rel = trigdata->tg_relation; tupdesc = rel->rd_att; if (TRIGGER_FIRED_BEFORE(trigdata->tg_event)) { xid = &fd17b_xid; level = &fd17b_level; recursion = &fd17b_recursion; when = "BEFORE"; } else { xid = &fd17a_xid; level = &fd17a_level; recursion = &fd17a_recursion; when = "AFTER "; } if (!TransactionIdIsCurrentTransactionId(*xid)) { *xid = GetCurrentTransactionId(); *level = 0; *recursion = true; } if (*level == 17) { *recursion = false; return PointerGetDatum(tuple); } if (!(*recursion)) return PointerGetDatum(tuple); (*level)++; SPI_connect(); fieldval = SPI_getvalue(tuple, tupdesc, 1); fieldtype = SPI_gettype(tupdesc, 1); query = (char *) palloc(100 + NAMEDATALEN * 3 + strlen(fieldval) + strlen(fieldtype)); sprintf(query, "insert into %s select * from %s where %s = '%s'::%s", SPI_getrelname(rel), SPI_getrelname(rel), SPI_fname(tupdesc, 1), fieldval, fieldtype); if ((ret = SPI_exec(query, 0)) < 0) elog(ERROR, "funny_dup17 (fired %s) on level %3d: SPI_exec (insert ...) returned %d", when, *level, ret); inserted = SPI_processed; sprintf(query, "select count (*) from %s where %s = '%s'::%s", SPI_getrelname(rel), SPI_fname(tupdesc, 1), fieldval, fieldtype); if ((ret = SPI_exec(query, 0)) < 0) elog(ERROR, "funny_dup17 (fired %s) on level %3d: SPI_exec (select ...) returned %d", when, *level, ret); if (SPI_processed > 0) { selected = DatumGetInt32(DirectFunctionCall1(int4in, CStringGetDatum(SPI_getvalue( SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1 )))); } elog(DEBUG4, "funny_dup17 (fired %s) on level %3d: %d/%d tuples inserted/selected", when, *level, inserted, selected); SPI_finish(); (*level)--; if (*level == 0) *xid = InvalidTransactionId; return PointerGetDatum(tuple); }
static Slony_I_ClusterStatus * getClusterStatus(Name cluster_name, int need_plan_mask) { Slony_I_ClusterStatus *cs; int rc; char query[1024]; bool isnull; Oid plan_types[9]; Oid txid_snapshot_typid; TypeName *txid_snapshot_typname; /* * Find an existing cs row for this cluster */ for (cs = clusterStatusList; cs; cs = cs->next) { if ((bool) DirectFunctionCall2(nameeq, NameGetDatum(&(cs->clustername)), NameGetDatum(cluster_name)) == true) { /* * Return it if all the requested SPI plans are prepared already. */ if ((cs->have_plan & need_plan_mask) == need_plan_mask) return cs; /* * Create more SPI plans below. */ break; } } if (cs == NULL) { /* * No existing cs found ... create a new one */ cs = (Slony_I_ClusterStatus *) malloc(sizeof(Slony_I_ClusterStatus)); memset(cs, 0, sizeof(Slony_I_ClusterStatus)); /* * We remember the plain cluster name for fast lookup */ strncpy(NameStr(cs->clustername), NameStr(*cluster_name), NAMEDATALEN); /* * ... and the quoted identifier of it for building queries */ cs->clusterident = strdup(DatumGetCString(DirectFunctionCall1(textout, DirectFunctionCall1(quote_ident, DirectFunctionCall1(textin, CStringGetDatum(NameStr(*cluster_name))))))); /* * Get our local node ID */ snprintf(query, 1024, "select last_value::int4 from %s.sl_local_node_id", cs->clusterident); rc = SPI_exec(query, 0); if (rc < 0 || SPI_processed != 1) elog(ERROR, "Slony-I: failed to read sl_local_node_id"); cs->localNodeId = DatumGetInt32( SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull)); SPI_freetuptable(SPI_tuptable); if (cs->localNodeId < 0) elog(ERROR, "Slony-I: Node is uninitialized - cluster %s", DatumGetCString(cluster_name)); /* * Initialize the currentXid to invalid */ cs->currentXid = InvalidTransactionId; /* * Insert the new control block into the list */ cs->next = clusterStatusList; clusterStatusList = cs; } /* * Prepare and save the PLAN_INSERT_EVENT */ if ((need_plan_mask & PLAN_INSERT_EVENT) != 0 && (cs->have_plan & PLAN_INSERT_EVENT) == 0) { /* * Lookup the oid of the txid_snapshot type */ txid_snapshot_typname = makeNode(TypeName); txid_snapshot_typname->names = lappend(lappend(NIL, makeString("pg_catalog")), makeString("txid_snapshot")); #ifdef HAVE_TYPENAMETYPEID_3 txid_snapshot_typid = typenameTypeId(NULL, txid_snapshot_typname, NULL); #elif HAVE_TYPENAMETYPEID_2 txid_snapshot_typid = typenameTypeId(NULL, txid_snapshot_typname); #elif HAVE_TYPENAMETYPEID_1 txid_snapshot_typid = typenameTypeId(txid_snapshot_typname); #endif /* * Create the saved plan. We lock the sl_event table in exclusive mode * in order to ensure that all events are really assigned sequence * numbers in the order they get committed. */ sprintf(query, "LOCK TABLE %s.sl_event IN EXCLUSIVE MODE; " "INSERT INTO %s.sl_event " "(ev_origin, ev_seqno, " "ev_timestamp, ev_snapshot, " "ev_type, ev_data1, ev_data2, ev_data3, ev_data4, " "ev_data5, ev_data6, ev_data7, ev_data8) " "VALUES ('%d', nextval('%s.sl_event_seq'), " "now(), \"pg_catalog\".txid_current_snapshot(), $1, $2, " "$3, $4, $5, $6, $7, $8, $9); " "SELECT currval('%s.sl_event_seq');", cs->clusterident, cs->clusterident, cs->localNodeId, cs->clusterident, cs->clusterident); plan_types[0] = TEXTOID; plan_types[1] = TEXTOID; plan_types[2] = TEXTOID; plan_types[3] = TEXTOID; plan_types[4] = TEXTOID; plan_types[5] = TEXTOID; plan_types[6] = TEXTOID; plan_types[7] = TEXTOID; plan_types[8] = TEXTOID; cs->plan_insert_event = SPI_saveplan(SPI_prepare(query, 9, plan_types)); if (cs->plan_insert_event == NULL) elog(ERROR, "Slony-I: SPI_prepare() failed"); /* * Also prepare the plan to remember sequence numbers on certain * events. */ sprintf(query, "insert into %s.sl_seqlog " "(seql_seqid, seql_origin, seql_ev_seqno, seql_last_value) " "select * from (" "select seq_id, %d, currval('%s.sl_event_seq'), seq_last_value " "from %s.sl_seqlastvalue " "where seq_origin = '%d') as FOO " "where NOT %s.seqtrack(seq_id, seq_last_value) IS NULL; ", cs->clusterident, cs->localNodeId, cs->clusterident, cs->clusterident, cs->localNodeId, cs->clusterident); cs->plan_record_sequences = SPI_saveplan(SPI_prepare(query, 0, NULL)); if (cs->plan_record_sequences == NULL) elog(ERROR, "Slony-I: SPI_prepare() failed"); cs->have_plan |= PLAN_INSERT_EVENT; } /* * Prepare and save the PLAN_INSERT_LOG */ if ((need_plan_mask & PLAN_INSERT_LOG) != 0 && (cs->have_plan & PLAN_INSERT_LOG) == 0) { /* * Create the saved plan's */ sprintf(query, "INSERT INTO %s.sl_log_1 " "(log_origin, log_txid, log_tableid, log_actionseq," " log_cmdtype, log_cmddata) " "VALUES (%d, \"pg_catalog\".txid_current(), $1, " "nextval('%s.sl_action_seq'), $2, $3); ", cs->clusterident, cs->localNodeId, cs->clusterident); plan_types[0] = INT4OID; plan_types[1] = TEXTOID; plan_types[2] = TEXTOID; cs->plan_insert_log_1 = SPI_saveplan(SPI_prepare(query, 3, plan_types)); if (cs->plan_insert_log_1 == NULL) elog(ERROR, "Slony-I: SPI_prepare() failed"); sprintf(query, "INSERT INTO %s.sl_log_2 " "(log_origin, log_txid, log_tableid, log_actionseq," " log_cmdtype, log_cmddata) " "VALUES (%d, \"pg_catalog\".txid_current(), $1, " "nextval('%s.sl_action_seq'), $2, $3); ", cs->clusterident, cs->localNodeId, cs->clusterident); plan_types[0] = INT4OID; plan_types[1] = TEXTOID; plan_types[2] = TEXTOID; cs->plan_insert_log_2 = SPI_saveplan(SPI_prepare(query, 3, plan_types)); if (cs->plan_insert_log_2 == NULL) elog(ERROR, "Slony-I: SPI_prepare() failed"); /* @-nullderef@ */ /* * Also create the 3 rather static text values for the log_cmdtype * parameter and initialize the cmddata_buf. */ cs->cmdtype_I = malloc(VARHDRSZ + 1); SET_VARSIZE(cs->cmdtype_I, VARHDRSZ + 1); *VARDATA(cs->cmdtype_I) = 'I'; cs->cmdtype_U = malloc(VARHDRSZ + 1); SET_VARSIZE(cs->cmdtype_U, VARHDRSZ + 1); *VARDATA(cs->cmdtype_U) = 'U'; cs->cmdtype_D = malloc(VARHDRSZ + 1); SET_VARSIZE(cs->cmdtype_D, VARHDRSZ + 1); *VARDATA(cs->cmdtype_D) = 'D'; /* * And the plan to read the current log_status. */ sprintf(query, "SELECT last_value::int4 FROM %s.sl_log_status", cs->clusterident); cs->plan_get_logstatus = SPI_saveplan(SPI_prepare(query, 0, NULL)); cs->cmddata_size = 8192; cs->cmddata_buf = (text *) malloc(8192); cs->have_plan |= PLAN_INSERT_LOG; } return cs; /* @+nullderef@ */ }
Datum check_authorization(PG_FUNCTION_ARGS) { TriggerData *trigdata = (TriggerData *) fcinfo->context; char *colname; HeapTuple rettuple_ok; HeapTuple rettuple_fail; TupleDesc tupdesc; int SPIcode; char query[1024]; const char *pk_id = NULL; SPITupleTable *tuptable; HeapTuple tuple; char *lockcode; char *authtable = "authorization_table"; const char *op; #define ERRMSGLEN 256 char err_msg[ERRMSGLEN]; /* Make sure trigdata is pointing at what I expect */ if ( ! CALLED_AS_TRIGGER(fcinfo) ) { elog(ERROR,"check_authorization: not fired by trigger manager"); } if ( ! TRIGGER_FIRED_BEFORE(trigdata->tg_event) ) { elog(ERROR,"check_authorization: not fired *before* event"); } if ( TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ) { rettuple_ok = trigdata->tg_newtuple; rettuple_fail = NULL; op = "UPDATE"; } else if ( TRIGGER_FIRED_BY_DELETE(trigdata->tg_event) ) { rettuple_ok = trigdata->tg_trigtuple; rettuple_fail = NULL; op = "DELETE"; } else { elog(ERROR,"check_authorization: not fired by update or delete"); PG_RETURN_NULL(); } tupdesc = trigdata->tg_relation->rd_att; /* Connect to SPI manager */ SPIcode = SPI_connect(); if (SPIcode != SPI_OK_CONNECT) { elog(ERROR,"check_authorization: could not connect to SPI"); PG_RETURN_NULL() ; } colname = trigdata->tg_trigger->tgargs[0]; pk_id = SPI_getvalue(trigdata->tg_trigtuple, tupdesc, SPI_fnumber(tupdesc, colname)); POSTGIS_DEBUG(3, "check_authorization called"); sprintf(query,"SELECT authid FROM \"%s\" WHERE expires >= now() AND toid = '%d' AND rid = '%s'", authtable, trigdata->tg_relation->rd_id, pk_id); POSTGIS_DEBUGF(3 ,"about to execute :%s", query); SPIcode = SPI_exec(query,0); if (SPIcode !=SPI_OK_SELECT ) elog(ERROR,"couldnt execute to test for lock :%s",query); if (!SPI_processed ) { POSTGIS_DEBUGF(3, "there is NO lock on row '%s'", pk_id); SPI_finish(); return PointerGetDatum(rettuple_ok); } /* there is a lock - check to see if I have rights to it! */ tuptable = SPI_tuptable; tupdesc = tuptable->tupdesc; tuple = tuptable->vals[0]; lockcode = SPI_getvalue(tuple, tupdesc, 1); POSTGIS_DEBUGF(3, "there is a lock on row '%s' (auth: '%s').", pk_id, lockcode); /* * check to see if temp_lock_have_table table exists * (it might not exist if they own no locks) */ sprintf(query,"SELECT * FROM pg_class WHERE relname = 'temp_lock_have_table'"); SPIcode = SPI_exec(query,0); if (SPIcode != SPI_OK_SELECT ) elog(ERROR,"couldnt execute to test for lockkey temp table :%s",query); if (SPI_processed==0) { goto fail; } sprintf(query, "SELECT * FROM temp_lock_have_table WHERE xideq( transid, getTransactionID() ) AND lockcode ='%s'", lockcode); POSTGIS_DEBUGF(3, "about to execute :%s", query); SPIcode = SPI_exec(query,0); if (SPIcode != SPI_OK_SELECT ) elog(ERROR, "couldnt execute to test for lock aquire: %s", query); if (SPI_processed >0) { POSTGIS_DEBUG(3, "I own the lock - I can modify the row"); SPI_finish(); return PointerGetDatum(rettuple_ok); } fail: snprintf(err_msg, ERRMSGLEN, "%s where \"%s\" = '%s' requires authorization '%s'", op, colname, pk_id, lockcode); err_msg[ERRMSGLEN-1] = '\0'; #ifdef ABORT_ON_AUTH_FAILURE elog(ERROR, "%s", err_msg); #else elog(NOTICE, "%s", err_msg); #endif SPI_finish(); return PointerGetDatum(rettuple_fail); }
Datum qhsrvaccount_monitor(PG_FUNCTION_ARGS) { TriggerData *trigdata = (TriggerData *) fcinfo->context; TupleDesc tupdesc; HeapTuple rettuple; char opcode = 'X'; // U for update I for insert D for delete X for unknow /* make sure it's called as a trigger at all */ if (!CALLED_AS_TRIGGER(fcinfo)) elog(ERROR, "qhsrvaccount_monitor: not called by trigger manager"); /* tuple to return to executor */ if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event)){ opcode = 'U'; rettuple = trigdata->tg_newtuple; }else{ if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event)) { opcode = 'I'; }else if(TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)){ opcode = 'D'; } rettuple = trigdata->tg_trigtuple; } tupdesc = trigdata->tg_relation->rd_att; char notify[1024] = {0}; unsigned int notifylen = 0; unsigned int natts = trigdata->tg_relation->rd_att->natts; char* value ; unsigned int valuelen = 0; notify[0] = opcode; notify[1] = '^'; notifylen = 2; int i = 0; for(; i < natts; ++i){ value = SPI_getvalue(rettuple, tupdesc, i+1); if (value == NULL) { value = "$"; } valuelen = strlen(value); memcpy(notify+notifylen, value,valuelen); *(notify+notifylen+valuelen) = '^'; notifylen = notifylen+valuelen+1; } notify[--notifylen] = 0; elog(INFO, "%s", notify); int ret; char *tablename = " qhsrvaccount,"; int tablenamelen = strlen(tablename); if ((ret = SPI_connect()) < 0) elog(ERROR, "SPI connect return %d", ret); /* get number of rows in table */ char notifycmd[1024] = {0}; unsigned int notifycmdlen = sizeof("NOTIFY")-1; memcpy(notifycmd, "NOTIFY", notifycmdlen); notifycmd[notifycmdlen++] = ' '; memcpy(¬ifycmd[notifycmdlen], tablename, tablenamelen); notifycmdlen += tablenamelen; notifycmd[notifycmdlen++] = '\''; memcpy(¬ifycmd[notifycmdlen], notify, notifylen); notifycmdlen += notifylen; notifycmd[notifycmdlen++] = '\''; elog(INFO, "%s", notifycmd); ret = SPI_exec(notifycmd,0); if (ret < 0) elog(ERROR, " SPI_exec returned %d", ret); SPI_finish(); return PointerGetDatum(rettuple); }