void PLy_spi_subtransaction_abort(MemoryContext oldcontext, ResourceOwner oldowner) { ErrorData *edata; PLyExceptionEntry *entry; PyObject *exc; /* Save error info */ MemoryContextSwitchTo(oldcontext); edata = CopyErrorData(); FlushErrorState(); /* Abort the inner transaction */ RollbackAndReleaseCurrentSubTransaction(); MemoryContextSwitchTo(oldcontext); CurrentResourceOwner = oldowner; /* * If AtEOSubXact_SPI() popped any SPI context of the subxact, it will have * left us in a disconnected state. We need this hack to return to * connected state. */ SPI_restore_connection(); /* Look up the correct exception */ entry = hash_search(PLy_spi_exceptions, &(edata->sqlerrcode), HASH_FIND, NULL); /* We really should find it, but just in case have a fallback */ Assert(entry != NULL); exc = entry ? entry->exc : PLy_exc_spi_error; /* Make Python raise the exception */ PLy_spi_exception_set(exc, edata); FreeErrorData(edata); }
void run__const_to_str__negative(Const* input, StringInfo result, char* value) { StringInfo err_msg = makeStringInfo(); appendStringInfo(err_msg, "internal error in pxffilters.c:const_to_str. " "Using unsupported data type (%d) (value %s)", input->consttype, value); /* Setting the test -- code omitted -- */ PG_TRY(); { /* This will throw a ereport(ERROR).*/ const_to_str(input, result); } PG_CATCH(); { CurrentMemoryContext = 1; ErrorData *edata = CopyErrorData(); /* Validate the type of expected error */ assert_true(edata->sqlerrcode == ERRCODE_INTERNAL_ERROR); assert_true(edata->elevel == ERROR); assert_string_equal(edata->message, err_msg->data); pfree(err_msg->data); pfree(err_msg); return; } PG_END_TRY(); assert_true(false); }
void test_get_format_name(void **state) { char *formatName = get_format_name('t'); assert_string_equal(formatName, TextFormatName); formatName = get_format_name('c'); assert_string_equal(formatName, TextFormatName); formatName = get_format_name('b'); assert_string_equal(formatName, GpdbWritableFormatName); MemoryContext old_context = CurrentMemoryContext; PG_TRY(); { formatName = get_format_name('x'); assert_false("Expected Exception"); } PG_CATCH(); { ErrorData *edata; MemoryContextSwitchTo(old_context); edata = CopyErrorData(); FlushErrorState(); assert_string_equal(edata->message, "Unable to get format name for format code: x"); } PG_END_TRY(); }
static int geterrcode(void) { /* switch context to work around Assert() in CopyErrorData() */ MemoryContext ctx = MemoryContextSwitchTo(TopMemoryContext); ErrorData *edata = CopyErrorData(); int code = edata->sqlerrcode; FreeErrorData(edata); MemoryContextSwitchTo(ctx); return code; }
void test_build_http_headers_empty_user_error(void **state) { /* setup mock data and expectations */ PxfInputData *input = (PxfInputData *) palloc0(sizeof(PxfInputData)); CHURL_HEADERS headers = (CHURL_HEADERS) palloc0(sizeof(CHURL_HEADERS)); GPHDUri *gphd_uri = (GPHDUri *) palloc0(sizeof(GPHDUri)); Relation rel = (Relation) palloc0(sizeof(RelationData)); ExtTableEntry ext_tbl; struct tupleDesc tuple; input->headers = headers; input->gphduri = gphd_uri; input->rel = NULL; gphd_uri->uri = "testuri"; expect_any(external_set_env_vars, extvar); expect_string(external_set_env_vars, uri, gphd_uri->uri); expect_value(external_set_env_vars, csv, false); expect_value(external_set_env_vars, escape, NULL); expect_value(external_set_env_vars, quote, NULL); expect_value(external_set_env_vars, header, false); expect_value(external_set_env_vars, scancounter, 0); struct extvar_t mock_extvar; mock_extvar.GP_USER = ""; snprintf(mock_extvar.GP_SEGMENT_ID, sizeof(mock_extvar.GP_SEGMENT_ID), "SegId"); snprintf(mock_extvar.GP_SEGMENT_COUNT, sizeof(mock_extvar.GP_SEGMENT_COUNT), "10"); snprintf(mock_extvar.GP_XID, sizeof(mock_extvar.GP_XID), "20"); will_assign_memory(external_set_env_vars, extvar, &mock_extvar, sizeof(extvar_t)); will_be_called(external_set_env_vars); MemoryContext old_context = CurrentMemoryContext; PG_TRY(); { build_http_headers(input); assert_false("Expected Exception"); } PG_CATCH(); { MemoryContextSwitchTo(old_context); ErrorData *edata = CopyErrorData(); assert_true(edata->elevel == ERROR); char *expected_message = pstrdup("User identity is unknown"); assert_string_equal(edata->message, expected_message); pfree(expected_message); } PG_END_TRY(); }
PgQueryNormalizeResult pg_query_normalize(const char* input) { MemoryContext ctx = NULL; PgQueryNormalizeResult result = {0}; ctx = pg_query_enter_memory_context("pg_query_normalize"); PG_TRY(); { List *tree; pgssConstLocations jstate; int query_len; /* Parse query */ tree = raw_parser(input); /* Set up workspace for constant recording */ jstate.clocations_buf_size = 32; jstate.clocations = (pgssLocationLen *) palloc(jstate.clocations_buf_size * sizeof(pgssLocationLen)); jstate.clocations_count = 0; jstate.highest_extern_param_id = 0; /* Walk tree and record const locations */ const_record_walker((Node *) tree, &jstate); /* Normalize query */ query_len = (int) strlen(input); result.normalized_query = strdup(generate_normalized_query(&jstate, input, 0, &query_len, PG_UTF8)); } PG_CATCH(); { ErrorData* error_data; PgQueryError* error; MemoryContextSwitchTo(ctx); error_data = CopyErrorData(); error = malloc(sizeof(PgQueryError)); error->message = strdup(error_data->message); error->filename = strdup(error_data->filename); error->lineno = error_data->lineno; error->cursorpos = error_data->cursorpos; result.error = error; FlushErrorState(); } PG_END_TRY(); pg_query_exit_memory_context(ctx); return result; }
/* * Tests that cdbdisp_dispatchPlan handles a plan size overflow * when splan_len_uncompressed * num_slices. */ void test__cdbdisp_dispatchPlan__Overflow_plan_size_in_kb(void **state) { bool success = false; struct CdbDispatcherState *ds = (struct CdbDispatcherState *) palloc0(sizeof(struct CdbDispatcherState)); struct QueryDesc *queryDesc = (struct QueryDesc *) palloc0(sizeof(QueryDesc)); _init_cdbdisp_dispatchPlan(queryDesc); /* Set max plan to a value that will require handling INT32 * overflow of the current plan size */ gp_max_plan_size = INT_MAX; queryDesc->plannedstmt->planTree = (struct Plan *)palloc0(sizeof(struct Plan)); /* Set num_slices and uncompressed_size to be INT_MAX-1 to force overflow */ queryDesc->plannedstmt->planTree->nMotionNodes = INT_MAX-1; expect_any(serializeNode, node); expect_any(serializeNode, size); expect_any(serializeNode, uncompressed_size_out); will_assign_value(serializeNode, uncompressed_size_out, INT_MAX-1); will_return(serializeNode, NULL); PG_TRY(); { cdbdisp_dispatchPlan(queryDesc, true, true, ds); } PG_CATCH(); { /* Verify that we get the correct error (limit exceeded) */ ErrorData *edata = CopyErrorData(); StringInfo message = makeStringInfo(); appendStringInfo(message, "Query plan size limit exceeded, current size: " UINT64_FORMAT "KB, max allowed size: %dKB", ((INT_MAX-1)*(INT_MAX-1)/(uint64)1024), INT_MAX); if (edata->elevel == ERROR && strncmp(edata->message, message->data, message->len)) { success = true; } } PG_END_TRY(); assert_true(success); }
jobject ErrorData_getCurrentError(void) { Ptr2Long p2l; jobject jed; MemoryContext curr = MemoryContextSwitchTo(JavaMemoryContext); ErrorData* errorData = CopyErrorData(); MemoryContextSwitchTo(curr); p2l.longVal = 0L; /* ensure that the rest is zeroed out */ p2l.ptrVal = errorData; jed = JNI_newObject(s_ErrorData_class, s_ErrorData_init, p2l.longVal); return jed; }
void test__BufferedReadUseBeforeBuffer__IsNextReadLenZero(void **state) { BufferedRead *bufferedRead = palloc(sizeof(BufferedRead)); int32 memoryLen = 512; /* maxBufferLen + largeReadLen */ uint8 *memory = malloc(sizeof(memoryLen)); char *relname = "test"; int32 maxBufferLen = 128; int32 maxLargeReadLen = 128; int32 nextBufferLen; int32 maxReadAheadLen = 64; memset(bufferedRead, 0 , sizeof(BufferedRead)); /* * Initialize the buffer */ BufferedReadInit(bufferedRead, memory, memoryLen, maxBufferLen, maxLargeReadLen, relname); /* * filling up the bufferedRead struct */ bufferedRead->largeReadLen=100; bufferedRead->bufferOffset=0; bufferedRead->fileLen=200; bufferedRead->temporaryLimitFileLen=200; bufferedRead->largeReadPosition=50; bufferedRead->maxLargeReadLen = 0; /* this will get assigned to nextReadLen(=0) */ PG_TRY(); { /* * This will throw a ereport(ERROR). */ BufferedReadUseBeforeBuffer(bufferedRead, maxReadAheadLen, &nextBufferLen); } PG_CATCH(); { CurrentMemoryContext = 1; //To be fixed ErrorData *edata = CopyErrorData(); /* * Validate the expected error */ assert_true(edata->sqlerrcode == ERRCODE_INTERNAL_ERROR); assert_true(edata->elevel == ERROR); } PG_END_TRY(); }
/* * run_pg_parse_json: * * Wrap pg_parse_json in order to restore InterruptHoldoffCount when parse * error occured. * * Returns true when parse completed. False for unexpected end of string. */ bool run_pg_parse_json(JsonLexContext *lex, JsonSemAction *sem) { MemoryContext ccxt = CurrentMemoryContext; uint32 saved_IntrHoldoffCount; /* * "ereport(ERROR.." occurs on error in pg_parse_json resets * InterruptHoldoffCount to zero, so we must save the value before calling * json parser to restore it on parse error. See errfinish(). */ saved_IntrHoldoffCount = InterruptHoldoffCount; PG_TRY(); { pg_parse_json(lex, sem); } PG_CATCH(); { ErrorData *errdata; MemoryContext ecxt; InterruptHoldoffCount = saved_IntrHoldoffCount; ecxt = MemoryContextSwitchTo(ccxt); errdata = CopyErrorData(); if (errdata->sqlerrcode == ERRCODE_INVALID_TEXT_REPRESENTATION) { FlushErrorState(); return false; } else { MemoryContextSwitchTo(ecxt); PG_RE_THROW(); } } PG_END_TRY(); return true; }
/* * SUT: rest_request * call_rest throws an error while not in HA mode */ void test__rest_request__callRestThrowsNoHA(void **state) { GPHDUri *hadoop_uri = (GPHDUri*) palloc0(sizeof(GPHDUri)); hadoop_uri->host = pstrdup("host1"); hadoop_uri->port = pstrdup("port1"); ClientContext* client_context = (ClientContext*) palloc0(sizeof(ClientContext)); char *restMsg = "empty message"; expect_any(call_rest, hadoop_uri); expect_any(call_rest, client_context); expect_any(call_rest, rest_msg); will_be_called_with_sideeffect(call_rest, &FirstException, NULL); /* test */ PG_TRY(); { rest_request(hadoop_uri, client_context, restMsg); } PG_CATCH(); { pfree(hadoop_uri->host); pfree(hadoop_uri->port); pfree(hadoop_uri); pfree(client_context); CurrentMemoryContext = 1; ErrorData *edata = CopyErrorData(); /*Validate the type of expected error */ assert_string_equal(edata->message, "first exception"); return; } PG_END_TRY(); assert_true(false); }
/* * SUT: rest_request * call_rest throws an error while in HA mode * and the failover method finds an active IP so the second * call to call_rest does no throw an exception */ void test__rest_request__callRestThrowsHAFirstTime(void **state) { GPHDUri *hadoop_uri = (GPHDUri*) palloc0(sizeof(GPHDUri)); hadoop_uri->host = pstrdup("host1"); hadoop_uri->port = pstrdup("port1"); NNHAConf *ha_nodes = (NNHAConf*) palloc0(sizeof(NNHAConf)); hadoop_uri->ha_nodes = ha_nodes; ha_nodes->nodes = (char *[]){"host1", "host2"}; ha_nodes->restports = (char *[]){"port1", "port2"}; ha_nodes->numn = 2; ClientContext* client_context = (ClientContext*) palloc0(sizeof(ClientContext)); char *restMsg = "empty message"; expect_any(call_rest, hadoop_uri); expect_any(call_rest, client_context); expect_any(call_rest, rest_msg); will_be_called_with_sideeffect(call_rest, &FirstException, NULL); /* the second call from ha_failover */ expect_any(call_rest, hadoop_uri); expect_any(call_rest, client_context); expect_any(call_rest, rest_msg); will_be_called(call_rest); /* test */ rest_request(hadoop_uri, client_context, restMsg); pfree(hadoop_uri); pfree(client_context); } /* * SUT: rest_request * call_rest throws an error while in HA mode * and the failover method finds an an active IP so the second * call to call_rest is issued on the second IP. This call also throws * an exception - but this time the exception is not caught. */ void test__rest_request__callRestThrowsHASecondTime(void **state) { GPHDUri *hadoop_uri = (GPHDUri*) palloc0(sizeof(GPHDUri)); hadoop_uri->host = pstrdup("host1"); hadoop_uri->port = pstrdup("port1"); NNHAConf *ha_nodes = (NNHAConf*) palloc0(sizeof(NNHAConf)); hadoop_uri->ha_nodes = ha_nodes; ha_nodes->nodes = (char *[]){"host1", "host2"}; ha_nodes->restports = (char *[]){"port1", "port2"}; ha_nodes->numn = 2; ClientContext* client_context = (ClientContext*) palloc0(sizeof(ClientContext)); char *restMsg = "empty message"; expect_any(call_rest, hadoop_uri); expect_any(call_rest, client_context); expect_any(call_rest, rest_msg); will_be_called_with_sideeffect(call_rest, &FirstException, NULL); /* the second call from ha_failover */ expect_any(call_rest, hadoop_uri); expect_any(call_rest, client_context); expect_any(call_rest, rest_msg); will_be_called_with_sideeffect(call_rest, &SecondException, NULL); /* test */ PG_TRY(); { rest_request(hadoop_uri, client_context, restMsg); } PG_CATCH(); { pfree(hadoop_uri->host); pfree(hadoop_uri->port); pfree(hadoop_uri); pfree(client_context); CurrentMemoryContext = 1; ErrorData *edata = CopyErrorData(); /*Validate the type of expected error */ assert_string_equal(edata->message, "second exception"); /* the first exception was caught by rest_request() */ return; } PG_END_TRY(); assert_true(false); } /* * SUT: rest_request * the first time call_rest is called we succeed, since the first IP is valid * No exceptions are thrown */ void test__rest_request__callRestHASuccessFromTheFirstCall(void **state) { GPHDUri *hadoop_uri = (GPHDUri*) palloc0(sizeof(GPHDUri)); hadoop_uri->host = pstrdup("host1"); hadoop_uri->port = pstrdup("port1"); NNHAConf *ha_nodes = (NNHAConf*) palloc0(sizeof(NNHAConf)); hadoop_uri->ha_nodes = ha_nodes; ha_nodes->nodes = (char *[]){"host1", "host2"}; ha_nodes->restports = (char *[]){"port1", "port2"}; ha_nodes->numn = 2; ClientContext* client_context = (ClientContext*) palloc0(sizeof(ClientContext)); char *restMsg = "empty message"; expect_any(call_rest, hadoop_uri); expect_any(call_rest, client_context); expect_any(call_rest, rest_msg); will_be_called(call_rest); /* test */ rest_request(hadoop_uri, client_context, restMsg); pfree(hadoop_uri->host); pfree(hadoop_uri->port); pfree(hadoop_uri); pfree(client_context); } void test__normalize_size(void **state) { float4 result = normalize_size(10000000, "B"); assert_int_equal(result, 10000000); result = normalize_size(10000000, "KB"); assert_int_equal(result, 10240000000); result = normalize_size(500, "MB"); assert_int_equal(result, 524288000); result = normalize_size(10, "GB"); assert_int_equal(result, 10737418240); result = normalize_size(10000, "TB"); assert_int_equal(result, 10995116277760000); } int main(int argc, char *argv[]) { cmockery_parse_arguments(argc, argv); const UnitTest tests[] = { unit_test(test__rest_request__callRestThrowsNoHA), unit_test(test__rest_request__callRestThrowsHAFirstTime), unit_test(test__rest_request__callRestThrowsHASecondTime), unit_test(test__rest_request__callRestHASuccessFromTheFirstCall), unit_test(test__normalize_size) }; return run_tests(tests); }
/* * initTrie - create trie from file. * * Function converts UTF8-encoded file into current encoding. */ static TrieChar * initTrie(char *filename) { TrieChar *volatile rootTrie = NULL; MemoryContext ccxt = CurrentMemoryContext; tsearch_readline_state trst; volatile bool skip; filename = get_tsearch_config_filename(filename, "rules"); if (!tsearch_readline_begin(&trst, filename)) ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("could not open unaccent file \"%s\": %m", filename))); do { /* * pg_do_encoding_conversion() (called by tsearch_readline()) will * emit exception if it finds untranslatable characters in current * locale. We just skip such lines, continuing with the next. */ skip = true; PG_TRY(); { char *line; while ((line = tsearch_readline(&trst)) != NULL) { /*---------- * The format of each line must be "src" or "src trg", where * src and trg are sequences of one or more non-whitespace * characters, separated by whitespace. Whitespace at start * or end of line is ignored. If trg is omitted, an empty * string is used as the replacement. * * We use a simple state machine, with states * 0 initial (before src) * 1 in src * 2 in whitespace after src * 3 in trg * 4 in whitespace after trg * -1 syntax error detected *---------- */ int state; char *ptr; char *src = NULL; char *trg = NULL; int ptrlen; int srclen = 0; int trglen = 0; state = 0; for (ptr = line; *ptr; ptr += ptrlen) { ptrlen = pg_mblen(ptr); /* ignore whitespace, but end src or trg */ if (t_isspace(ptr)) { if (state == 1) state = 2; else if (state == 3) state = 4; continue; } switch (state) { case 0: /* start of src */ src = ptr; srclen = ptrlen; state = 1; break; case 1: /* continue src */ srclen += ptrlen; break; case 2: /* start of trg */ trg = ptr; trglen = ptrlen; state = 3; break; case 3: /* continue trg */ trglen += ptrlen; break; default: /* bogus line format */ state = -1; break; } } if (state == 1 || state == 2) { /* trg was omitted, so use "" */ trg = ""; trglen = 0; } if (state > 0) rootTrie = placeChar(rootTrie, (unsigned char *) src, srclen, trg, trglen); else if (state < 0) ereport(WARNING, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("invalid syntax: more than two strings in unaccent rule"))); pfree(line); } skip = false; } PG_CATCH(); { ErrorData *errdata; MemoryContext ecxt; ecxt = MemoryContextSwitchTo(ccxt); errdata = CopyErrorData(); if (errdata->sqlerrcode == ERRCODE_UNTRANSLATABLE_CHARACTER) { FlushErrorState(); } else { MemoryContextSwitchTo(ecxt); PG_RE_THROW(); } } PG_END_TRY(); } while (skip); tsearch_readline_end(&trst); return rootTrie; }
/** * @brief Entry point of the user-defined function for pg_bulkload. * @return Returns number of loaded tuples. If the case of errors, -1 will be * returned. */ Datum pg_bulkload(PG_FUNCTION_ARGS) { Reader *rd = NULL; Writer *wt = NULL; Datum options; MemoryContext ctx; MemoryContext ccxt; PGRUsage ru0; PGRUsage ru1; int64 count; int64 parse_errors; int64 skip; WriterResult ret; char *start; char *end; float8 system; float8 user; float8 duration; TupleDesc tupdesc; Datum values[PG_BULKLOAD_COLS]; bool nulls[PG_BULKLOAD_COLS]; HeapTuple result; /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "return type must be a row type"); BULKLOAD_PROFILE_PUSH(); pg_rusage_init(&ru0); /* must be the super user */ if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to use pg_bulkload"))); options = PG_GETARG_DATUM(0); ccxt = CurrentMemoryContext; /* * STEP 1: Initialization */ /* parse options and create reader and writer */ ParseOptions(options, &rd, &wt, ru0.tv.tv_sec); /* initialize reader */ ReaderInit(rd); /* * We need to split PG_TRY block because gcc optimizes if-branches with * longjmp codes too much. Local variables initialized in either branch * cannot be handled another branch. */ PG_TRY(); { /* truncate heap */ if (wt->truncate) TruncateTable(wt->relid); /* initialize writer */ WriterInit(wt); /* initialize checker */ CheckerInit(&rd->checker, wt->rel, wt->tchecker); /* initialize parser */ ParserInit(rd->parser, &rd->checker, rd->infile, wt->desc, wt->multi_process, PG_GET_COLLATION()); } PG_CATCH(); { if (rd) ReaderClose(rd, true); if (wt) WriterClose(wt, true); PG_RE_THROW(); } PG_END_TRY(); /* No throwable codes here! */ PG_TRY(); { /* create logger */ CreateLogger(rd->logfile, wt->verbose, rd->infile[0] == ':'); start = timeval_to_cstring(ru0.tv); LoggerLog(INFO, "\npg_bulkload %s on %s\n\n", PG_BULKLOAD_VERSION, start); ReaderDumpParams(rd); WriterDumpParams(wt); LoggerLog(INFO, "\n"); BULKLOAD_PROFILE(&prof_init); /* * STEP 2: Build heap */ /* Switch into its memory context */ Assert(wt->context); ctx = MemoryContextSwitchTo(wt->context); /* Loop for each input file record. */ while (wt->count < rd->limit) { HeapTuple tuple; CHECK_FOR_INTERRUPTS(); /* read tuple */ BULKLOAD_PROFILE_PUSH(); tuple = ReaderNext(rd); BULKLOAD_PROFILE_POP(); BULKLOAD_PROFILE(&prof_reader); if (tuple == NULL) break; /* write tuple */ BULKLOAD_PROFILE_PUSH(); WriterInsert(wt, tuple); wt->count += 1; BULKLOAD_PROFILE_POP(); BULKLOAD_PROFILE(&prof_writer); MemoryContextReset(wt->context); BULKLOAD_PROFILE(&prof_reset); } MemoryContextSwitchTo(ctx); /* * STEP 3: Finalize heap and merge indexes */ count = wt->count; parse_errors = rd->parse_errors; /* * close writer first and reader second because shmem_exit callback * is managed by a simple stack. */ ret = WriterClose(wt, false); wt = NULL; skip = ReaderClose(rd, false); rd = NULL; } PG_CATCH(); { ErrorData *errdata; MemoryContext ecxt; ecxt = MemoryContextSwitchTo(ccxt); errdata = CopyErrorData(); LoggerLog(INFO, "%s\n", errdata->message); FreeErrorData(errdata); /* close writer first, and reader second */ if (wt) WriterClose(wt, true); if (rd) ReaderClose(rd, true); MemoryContextSwitchTo(ecxt); PG_RE_THROW(); } PG_END_TRY(); count -= ret.num_dup_new; LoggerLog(INFO, "\n" " " int64_FMT " Rows skipped.\n" " " int64_FMT " Rows successfully loaded.\n" " " int64_FMT " Rows not loaded due to parse errors.\n" " " int64_FMT " Rows not loaded due to duplicate errors.\n" " " int64_FMT " Rows replaced with new rows.\n\n", skip, count, parse_errors, ret.num_dup_new, ret.num_dup_old); pg_rusage_init(&ru1); system = diffTime(ru1.ru.ru_stime, ru0.ru.ru_stime); user = diffTime(ru1.ru.ru_utime, ru0.ru.ru_utime); duration = diffTime(ru1.tv, ru0.tv); end = timeval_to_cstring(ru1.tv); memset(nulls, 0, sizeof(nulls)); values[0] = Int64GetDatum(skip); values[1] = Int64GetDatum(count); values[2] = Int64GetDatum(parse_errors); values[3] = Int64GetDatum(ret.num_dup_new); values[4] = Int64GetDatum(ret.num_dup_old); values[5] = Float8GetDatumFast(system); values[6] = Float8GetDatumFast(user); values[7] = Float8GetDatumFast(duration); LoggerLog(INFO, "Run began on %s\n" "Run ended on %s\n\n" "CPU %.2fs/%.2fu sec elapsed %.2f sec\n", start, end, system, user, duration); LoggerClose(); result = heap_form_tuple(tupdesc, values, nulls); BULKLOAD_PROFILE(&prof_fini); BULKLOAD_PROFILE_POP(); BULKLOAD_PROFILE_PRINT(); PG_RETURN_DATUM(HeapTupleGetDatum(result)); }
/* * A function having everything to do with logging, which ought to be factored * out one day to make a start on the Thoughts-on-logging wiki ideas. */ static void reLogWithChangedLevel(int level) { ErrorData *edata = CopyErrorData(); int sqlstate = edata->sqlerrcode; int category = ERRCODE_TO_CATEGORY(sqlstate); FlushErrorState(); if ( WARNING > level ) { if ( ERRCODE_SUCCESSFUL_COMPLETION != category ) sqlstate = ERRCODE_SUCCESSFUL_COMPLETION; } else if ( WARNING == level ) { if ( ERRCODE_WARNING != category && ERRCODE_NO_DATA != category ) sqlstate = ERRCODE_WARNING; } else if ( ERRCODE_WARNING == category || ERRCODE_NO_DATA == category || ERRCODE_SUCCESSFUL_COMPLETION == category ) sqlstate = ERRCODE_INTERNAL_ERROR; #if PG_VERSION_NUM >= 90500 edata->elevel = level; edata->sqlerrcode = sqlstate; PG_TRY(); { ThrowErrorData(edata); } PG_CATCH(); { FreeErrorData(edata); /* otherwise this wouldn't happen in ERROR case */ PG_RE_THROW(); } PG_END_TRY(); FreeErrorData(edata); #else if (!errstart(level, edata->filename, edata->lineno, edata->funcname, NULL)) { FreeErrorData(edata); return; } errcode(sqlstate); if (edata->message) errmsg("%s", edata->message); if (edata->detail) errdetail("%s", edata->detail); if (edata->detail_log) errdetail_log("%s", edata->detail_log); if (edata->hint) errhint("%s", edata->hint); if (edata->context) errcontext("%s", edata->context); /* this may need to be trimmed */ #if PG_VERSION_NUM >= 90300 if (edata->schema_name) err_generic_string(PG_DIAG_SCHEMA_NAME, edata->schema_name); if (edata->table_name) err_generic_string(PG_DIAG_TABLE_NAME, edata->table_name); if (edata->column_name) err_generic_string(PG_DIAG_COLUMN_NAME, edata->column_name); if (edata->datatype_name) err_generic_string(PG_DIAG_DATATYPE_NAME, edata->datatype_name); if (edata->constraint_name) err_generic_string(PG_DIAG_CONSTRAINT_NAME, edata->constraint_name); #endif if (edata->internalquery) internalerrquery(edata->internalquery); FreeErrorData(edata); errfinish(0); #endif }
PgQueryInternalParsetreeAndError pg_query_raw_parse(const char* input) { PgQueryInternalParsetreeAndError result = {0}; MemoryContext parse_context = CurrentMemoryContext; char stderr_buffer[STDERR_BUFFER_LEN + 1] = {0}; #ifndef DEBUG int stderr_global; int stderr_pipe[2]; #endif #ifndef DEBUG // Setup pipe for stderr redirection if (pipe(stderr_pipe) != 0) { PgQueryError* error = malloc(sizeof(PgQueryError)); error->message = strdup("Failed to open pipe, too many open file descriptors") result.error = error; return result; } fcntl(stderr_pipe[0], F_SETFL, fcntl(stderr_pipe[0], F_GETFL) | O_NONBLOCK); // Redirect stderr to the pipe stderr_global = dup(STDERR_FILENO); dup2(stderr_pipe[1], STDERR_FILENO); close(stderr_pipe[1]); #endif PG_TRY(); { result.tree = raw_parser(input); #ifndef DEBUG // Save stderr for result read(stderr_pipe[0], stderr_buffer, STDERR_BUFFER_LEN); #endif result.stderr_buffer = strdup(stderr_buffer); } PG_CATCH(); { ErrorData* error_data; PgQueryError* error; MemoryContextSwitchTo(parse_context); error_data = CopyErrorData(); // Note: This is intentionally malloc so exiting the memory context doesn't free this error = (PgQueryError *)malloc(sizeof(PgQueryError)); error->message = strdup(error_data->message); error->filename = strdup(error_data->filename); error->lineno = error_data->lineno; error->cursorpos = error_data->cursorpos; result.error = error; FlushErrorState(); } PG_END_TRY(); #ifndef DEBUG // Restore stderr, close pipe dup2(stderr_global, STDERR_FILENO); close(stderr_pipe[0]); close(stderr_global); #endif return result; }
Datum pgsynck(PG_FUNCTION_ARGS) { /* List *raw_parsetree_list = NULL; */ char *query_string = NULL; char *oneq = NULL; char *q; ErrorData *edata = NULL; MemoryContext oldcontext = CurrentMemoryContext; MemoryContext per_query_ctx; ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; TupleDesc tupdesc; Tuplestorestate *tupstore; /* check to see if caller supports us returning a tuplestore */ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"))); if (!(rsinfo->allowedModes & SFRM_Materialize)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not " \ "allowed in this context"))); /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "return type must be a row type"); per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; oldcontext = MemoryContextSwitchTo(per_query_ctx); tupstore = tuplestore_begin_heap(true, false, work_mem); rsinfo->returnMode = SFRM_Materialize; rsinfo->setResult = tupstore; rsinfo->setDesc = tupdesc; MemoryContextSwitchTo(oldcontext); q = query_string = text_to_cstring(PG_GETARG_TEXT_PP(0)); oneq = get_one_query(&q); while (oneq != NULL) { int j = 0; Datum values[PGSYNCK_COLS]; bool nulls[PGSYNCK_COLS]; memset(values, 0, sizeof(values)); memset(nulls, 0, sizeof(nulls)); /* sql */ values[j++] = CStringGetTextDatum(oneq); PG_TRY(); { raw_parser(oneq); /* cursorpos */ values[j++] = Int32GetDatum(0); /* sqlerrcode */ values[j++] = Int32GetDatum(0); /* message - primary error message */ values[j++] = CStringGetTextDatum(""); /* hint - hint message */ values[j++] = CStringGetTextDatum(""); tuplestore_putvalues(tupstore, tupdesc, values, nulls); } PG_CATCH(); { /* Save error info */ MemoryContextSwitchTo(oldcontext); edata = CopyErrorData(); FlushErrorState(); /* cursorpos - cursor index into query string */ values[j++] = Int32GetDatum(edata->cursorpos); /* sqlerrcode - encoded ERRSTATE */ values[j++] = Int32GetDatum(edata->sqlerrcode); /* message - primary error message */ values[j++] = CStringGetTextDatum(edata->message ? edata->message:""); /* hint - hint message */ values[j++] = CStringGetTextDatum(edata->hint ? edata->hint:""); tuplestore_putvalues(tupstore, tupdesc, values, nulls); FreeErrorData(edata); } PG_END_TRY(); oneq = get_one_query(&q); } /* clean up and return the tuplestore */ tuplestore_donestoring(tupstore); return (Datum) 0; }
/** * @brief Read the next tuple from parser. * @param rd [in/out] reader * @return type */ HeapTuple ReaderNext(Reader *rd) { HeapTuple tuple; MemoryContext ccxt; bool eof; Parser *parser = rd->parser; ccxt = CurrentMemoryContext; eof = false; do { tuple = NULL; parser->parsing_field = -1; PG_TRY(); { tuple = ParserRead(parser, &rd->checker); if (tuple == NULL) eof = true; else { tuple = CheckerTuple(&rd->checker, tuple, &parser->parsing_field); CheckerConstraints(&rd->checker, tuple, &parser->parsing_field); } } PG_CATCH(); { ErrorData *errdata; MemoryContext ecxt; char *message; StringInfoData buf; if (parser->parsing_field < 0) PG_RE_THROW(); /* should not ignore */ ecxt = MemoryContextSwitchTo(ccxt); errdata = CopyErrorData(); /* We cannot ignore query aborts. */ switch (errdata->sqlerrcode) { case ERRCODE_ADMIN_SHUTDOWN: case ERRCODE_QUERY_CANCELED: MemoryContextSwitchTo(ecxt); PG_RE_THROW(); break; } /* Absorb parse errors. */ rd->parse_errors++; if (errdata->message) message = pstrdup(errdata->message); else message = "<no error message>"; FlushErrorState(); FreeErrorData(errdata); initStringInfo(&buf); appendStringInfo(&buf, "Parse error Record " int64_FMT ": Input Record " int64_FMT ": Rejected", rd->parse_errors, parser->count); if (parser->parsing_field > 0) appendStringInfo(&buf, " - column %d", parser->parsing_field); appendStringInfo(&buf, ". %s\n", message); LoggerLog(WARNING, buf.data); /* Terminate if PARSE_ERRORS has been reached. */ if (rd->parse_errors > rd->max_parse_errors) { eof = true; LoggerLog(WARNING, "Maximum parse error count exceeded - " int64_FMT " error(s) found in input file\n", rd->parse_errors); } /* output parse bad file. */ if (rd->parse_fp == NULL) if ((rd->parse_fp = AllocateFile(rd->parse_badfile, "w")) == NULL) ereport(ERROR, (errcode_for_file_access(), errmsg("could not open parse bad file \"%s\": %m", rd->parse_badfile))); ParserDumpRecord(parser, rd->parse_fp, rd->parse_badfile); MemoryContextReset(ccxt); // Without the below line, the regression tests shows the different result on debug-build mode. tuple = NULL; } PG_END_TRY(); } while (!eof && !tuple); BULKLOAD_PROFILE(&prof_reader_parser); return tuple; }
static char* dbms_utility_format_call_stack(char mode) { MemoryContext oldcontext = CurrentMemoryContext; ErrorData *edata; ErrorContextCallback *econtext; StringInfo sinfo; #if PG_VERSION_NUM >= 80400 errstart(ERROR, __FILE__, __LINE__, PG_FUNCNAME_MACRO, TEXTDOMAIN); #else errstart(ERROR, __FILE__, __LINE__, PG_FUNCNAME_MACRO); #endif MemoryContextSwitchTo(oldcontext); for (econtext = error_context_stack; econtext != NULL; econtext = econtext->previous) (*econtext->callback) (econtext->arg); edata = CopyErrorData(); FlushErrorState(); /* Now I wont to parse edata->context to more traditional format */ /* I am not sure about order */ sinfo = makeStringInfo(); switch (mode) { case 'o': appendStringInfoString(sinfo, "----- PL/pgSQL Call Stack -----\n"); appendStringInfoString(sinfo, " object line object\n"); appendStringInfoString(sinfo, " handle number name\n"); break; } if (edata->context) { char *start = edata->context; while (*start) { char *oname = "anonymous object"; char *line = ""; char *eol = strchr(start, '\n'); Oid fnoid = InvalidOid; /* first, solve multilines */ if (eol) *eol = '\0'; /* first know format */ if (strncmp(start, "PL/pgSQL function ",18) == 0) { char *p1, *p2; if ((p1 = strstr(start, "function \""))) { p1 += strlen("function \""); if ((p2 = strchr(p1, '"'))) { *p2++ = '\0'; oname = p1; start = p2; } } else if ((p1 = strstr(start, "function "))) { p1 += strlen("function "); if ((p2 = strchr(p1, ')'))) { char c = *++p2; *p2 = '\0'; oname = pstrdup(p1); fnoid = DatumGetObjectId(DirectFunctionCall1(regprocedurein, CStringGetDatum(oname))); *p2 = c; start = p2; } } if ((p1 = strstr(start, "line "))) { int p2i; char c; p1 += strlen("line "); p2i = strspn(p1, "0123456789"); /* safe separator */ c = p1[p2i]; p1[p2i] = '\0'; line = pstrdup(p1); p1[p2i] = c; start = p1 + p2i; } } switch (mode) { case 'o': appendStringInfo(sinfo, "%8x %5s function %s", (int)fnoid, line, oname); break; case 'p': appendStringInfo(sinfo, "%8d %5s function %s", (int)fnoid, line, oname); break; case 's': appendStringInfo(sinfo, "%d,%s,%s", (int)fnoid, line, oname); break; } if (eol) { start = eol + 1; appendStringInfoChar(sinfo, '\n'); } else break; } } return sinfo->data; }
static PyObject * PLy_output(volatile int level, PyObject *self, PyObject *args, PyObject *kw) { int sqlstate = 0; char *volatile sqlstatestr = NULL; char *volatile message = NULL; char *volatile detail = NULL; char *volatile hint = NULL; char *volatile column_name = NULL; char *volatile constraint_name = NULL; char *volatile datatype_name = NULL; char *volatile table_name = NULL; char *volatile schema_name = NULL; volatile MemoryContext oldcontext; PyObject *key, *value; PyObject *volatile so; Py_ssize_t pos = 0; if (PyTuple_Size(args) == 1) { /* * Treat single argument specially to avoid undesirable ('tuple',) * decoration. */ PyObject *o; if (!PyArg_UnpackTuple(args, "plpy.elog", 1, 1, &o)) PLy_elog(ERROR, "could not unpack arguments in plpy.elog"); so = PyObject_Str(o); } else so = PyObject_Str(args); if (so == NULL || ((message = PyString_AsString(so)) == NULL)) { level = ERROR; message = dgettext(TEXTDOMAIN, "could not parse error message in plpy.elog"); } message = pstrdup(message); Py_XDECREF(so); if (kw != NULL) { while (PyDict_Next(kw, &pos, &key, &value)) { char *keyword = PyString_AsString(key); if (strcmp(keyword, "message") == 0) { /* the message should not be overwriten */ if (PyTuple_Size(args) != 0) { PLy_exception_set(PyExc_TypeError, "Argument 'message' given by name and position"); return NULL; } if (message) pfree(message); message = object_to_string(value); } else if (strcmp(keyword, "detail") == 0) detail = object_to_string(value); else if (strcmp(keyword, "hint") == 0) hint = object_to_string(value); else if (strcmp(keyword, "sqlstate") == 0) sqlstatestr = object_to_string(value); else if (strcmp(keyword, "schema_name") == 0) schema_name = object_to_string(value); else if (strcmp(keyword, "table_name") == 0) table_name = object_to_string(value); else if (strcmp(keyword, "column_name") == 0) column_name = object_to_string(value); else if (strcmp(keyword, "datatype_name") == 0) datatype_name = object_to_string(value); else if (strcmp(keyword, "constraint_name") == 0) constraint_name = object_to_string(value); else { PLy_exception_set(PyExc_TypeError, "'%s' is an invalid keyword argument for this function", keyword); return NULL; } } } if (sqlstatestr != NULL) { if (strlen(sqlstatestr) != 5) { PLy_exception_set(PyExc_ValueError, "invalid SQLSTATE code"); return NULL; } if (strspn(sqlstatestr, "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ") != 5) { PLy_exception_set(PyExc_ValueError, "invalid SQLSTATE code"); return NULL; } sqlstate = MAKE_SQLSTATE(sqlstatestr[0], sqlstatestr[1], sqlstatestr[2], sqlstatestr[3], sqlstatestr[4]); } oldcontext = CurrentMemoryContext; PG_TRY(); { if (message != NULL) pg_verifymbstr(message, strlen(message), false); if (detail != NULL) pg_verifymbstr(detail, strlen(detail), false); if (hint != NULL) pg_verifymbstr(hint, strlen(hint), false); if (schema_name != NULL) pg_verifymbstr(schema_name, strlen(schema_name), false); if (table_name != NULL) pg_verifymbstr(table_name, strlen(table_name), false); if (column_name != NULL) pg_verifymbstr(column_name, strlen(column_name), false); if (datatype_name != NULL) pg_verifymbstr(datatype_name, strlen(datatype_name), false); if (constraint_name != NULL) pg_verifymbstr(constraint_name, strlen(constraint_name), false); ereport(level, ((sqlstate != 0) ? errcode(sqlstate) : 0, (message != NULL) ? errmsg_internal("%s", message) : 0, (detail != NULL) ? errdetail_internal("%s", detail) : 0, (hint != NULL) ? errhint("%s", hint) : 0, (column_name != NULL) ? err_generic_string(PG_DIAG_COLUMN_NAME, column_name) : 0, (constraint_name != NULL) ? err_generic_string(PG_DIAG_CONSTRAINT_NAME, constraint_name) : 0, (datatype_name != NULL) ? err_generic_string(PG_DIAG_DATATYPE_NAME, datatype_name) : 0, (table_name != NULL) ? err_generic_string(PG_DIAG_TABLE_NAME, table_name) : 0, (schema_name != NULL) ? err_generic_string(PG_DIAG_SCHEMA_NAME, schema_name) : 0)); } PG_CATCH(); { ErrorData *edata; MemoryContextSwitchTo(oldcontext); edata = CopyErrorData(); FlushErrorState(); PLy_exception_set_with_details(PLy_exc_error, edata); FreeErrorData(edata); return NULL; } PG_END_TRY(); /* * return a legal object so the interpreter will continue on its merry way */ Py_INCREF(Py_None); return Py_None; }
SV * plperl_spi_query(char *query) { SV *cursor; /* * Execute the query inside a sub-transaction, so we can cope with errors * sanely */ MemoryContext oldcontext = CurrentMemoryContext; ResourceOwner oldowner = CurrentResourceOwner; BeginInternalSubTransaction(NULL); /* Want to run inside function's memory context */ MemoryContextSwitchTo(oldcontext); PG_TRY(); { void *plan; Portal portal = NULL; /* Create a cursor for the query */ plan = SPI_prepare(query, 0, NULL); if (plan) portal = SPI_cursor_open(NULL, plan, NULL, NULL, false); if (portal) cursor = newSVpv(portal->name, 0); else cursor = newSV(0); /* Commit the inner transaction, return to outer xact context */ ReleaseCurrentSubTransaction(); MemoryContextSwitchTo(oldcontext); CurrentResourceOwner = oldowner; /* * AtEOSubXact_SPI() should not have popped any SPI context, but just * in case it did, make sure we remain connected. */ SPI_restore_connection(); } PG_CATCH(); { ErrorData *edata; /* Save error info */ MemoryContextSwitchTo(oldcontext); edata = CopyErrorData(); FlushErrorState(); /* Abort the inner transaction */ RollbackAndReleaseCurrentSubTransaction(); MemoryContextSwitchTo(oldcontext); CurrentResourceOwner = oldowner; /* * If AtEOSubXact_SPI() popped any SPI context of the subxact, it will * have left us in a disconnected state. We need this hack to return * to connected state. */ SPI_restore_connection(); /* Punt the error to Perl */ croak("%s", edata->message); /* Can't get here, but keep compiler quiet */ return NULL; } PG_END_TRY(); return cursor; }
/* * initSuffixTree - create suffix tree from file. Function converts * UTF8-encoded file into current encoding. */ static SuffixChar * initSuffixTree(char *filename) { SuffixChar *volatile rootSuffixTree = NULL; MemoryContext ccxt = CurrentMemoryContext; tsearch_readline_state trst; volatile bool skip; filename = get_tsearch_config_filename(filename, "rules"); if (!tsearch_readline_begin(&trst, filename)) ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("could not open unaccent file \"%s\": %m", filename))); do { char src[4096]; char trg[4096]; int srclen; int trglen; char *line = NULL; skip = true; PG_TRY(); { /* * pg_do_encoding_conversion() (called by tsearch_readline()) will * emit exception if it finds untranslatable characters in current * locale. We just skip such characters. */ while ((line = tsearch_readline(&trst)) != NULL) { if (sscanf(line, "%s\t%s\n", src, trg) != 2) continue; srclen = strlen(src); trglen = strlen(trg); rootSuffixTree = placeChar(rootSuffixTree, (unsigned char *) src, srclen, trg, trglen); skip = false; pfree(line); } } PG_CATCH(); { ErrorData *errdata; MemoryContext ecxt; ecxt = MemoryContextSwitchTo(ccxt); errdata = CopyErrorData(); if (errdata->sqlerrcode == ERRCODE_UNTRANSLATABLE_CHARACTER) { FlushErrorState(); } else { MemoryContextSwitchTo(ecxt); PG_RE_THROW(); } } PG_END_TRY(); } while (skip); tsearch_readline_end(&trst); return rootSuffixTree; }
HV * plperl_spi_exec(char *query, int limit) { HV *ret_hv; /* * Execute the query inside a sub-transaction, so we can cope with errors * sanely */ MemoryContext oldcontext = CurrentMemoryContext; ResourceOwner oldowner = CurrentResourceOwner; BeginInternalSubTransaction(NULL); /* Want to run inside function's memory context */ MemoryContextSwitchTo(oldcontext); PG_TRY(); { int spi_rv; spi_rv = SPI_execute(query, plperl_current_prodesc->fn_readonly, limit); ret_hv = plperl_spi_execute_fetch_result(SPI_tuptable, SPI_processed, spi_rv); /* Commit the inner transaction, return to outer xact context */ ReleaseCurrentSubTransaction(); MemoryContextSwitchTo(oldcontext); CurrentResourceOwner = oldowner; /* * AtEOSubXact_SPI() should not have popped any SPI context, but just * in case it did, make sure we remain connected. */ SPI_restore_connection(); } PG_CATCH(); { ErrorData *edata; /* Save error info */ MemoryContextSwitchTo(oldcontext); edata = CopyErrorData(); FlushErrorState(); /* Abort the inner transaction */ RollbackAndReleaseCurrentSubTransaction(); MemoryContextSwitchTo(oldcontext); CurrentResourceOwner = oldowner; /* * If AtEOSubXact_SPI() popped any SPI context of the subxact, it will * have left us in a disconnected state. We need this hack to return * to connected state. */ SPI_restore_connection(); /* Punt the error to Perl */ croak("%s", edata->message); /* Can't get here, but keep compiler quiet */ return NULL; } PG_END_TRY(); return ret_hv; }
static void lint_func_beg( PLpgSQL_execstate * estate, PLpgSQL_function * func ) { const char *err_text = estate->err_text; if (plpgsql_lint_enable) { int i; PLpgSQL_rec *saved_records; PLpgSQL_var *saved_vars; MemoryContext oldcontext; ResourceOwner oldowner; /* * inside control a rec and vars variables are modified, so we should to save their * content */ saved_records = palloc(sizeof(PLpgSQL_rec) * estate->ndatums); saved_vars = palloc(sizeof(PLpgSQL_var) * estate->ndatums); for (i = 0; i < estate->ndatums; i++) { if (estate->datums[i]->dtype == PLPGSQL_DTYPE_REC) { PLpgSQL_rec *rec = (PLpgSQL_rec *) estate->datums[i]; saved_records[i].tup = rec->tup; saved_records[i].tupdesc = rec->tupdesc; saved_records[i].freetup = rec->freetup; saved_records[i].freetupdesc = rec->freetupdesc; /* don't release a original tupdesc and original tup */ rec->freetup = false; rec->freetupdesc = false; } else if (estate->datums[i]->dtype == PLPGSQL_DTYPE_VAR) { PLpgSQL_var *var = (PLpgSQL_var *) estate->datums[i]; saved_vars[i].value = var->value; saved_vars[i].isnull = var->isnull; saved_vars[i].freeval = var->freeval; var->freeval = false; } } estate->err_text = NULL; /* * Raised exception should be trapped in outer functtion. Protection * against outer trap is QUERY_CANCELED exception. */ oldcontext = CurrentMemoryContext; oldowner = CurrentResourceOwner; PG_TRY(); { lint_stmt(estate, func, (PLpgSQL_stmt *) func->action); } PG_CATCH(); { ErrorData *edata; /* Save error info */ MemoryContextSwitchTo(oldcontext); edata = CopyErrorData(); FlushErrorState(); CurrentResourceOwner = oldowner; edata->sqlerrcode = ERRCODE_QUERY_CANCELED; ReThrowError(edata); } PG_END_TRY(); estate->err_text = err_text; estate->err_stmt = NULL; /* return back a original rec variables */ for (i = 0; i < estate->ndatums; i++) { if (estate->datums[i]->dtype == PLPGSQL_DTYPE_REC) { PLpgSQL_rec *rec = (PLpgSQL_rec *) estate->datums[i]; if (rec->freetupdesc) FreeTupleDesc(rec->tupdesc); rec->tup = saved_records[i].tup; rec->tupdesc = saved_records[i].tupdesc; rec->freetup = saved_records[i].freetup; rec->freetupdesc = saved_records[i].freetupdesc; } else if (estate->datums[i]->dtype == PLPGSQL_DTYPE_VAR) { PLpgSQL_var *var = (PLpgSQL_var *) estate->datums[i]; var->value = saved_vars[i].value; var->isnull = saved_vars[i].isnull; var->freeval = saved_vars[i].freeval; } } pfree(saved_records); pfree(saved_vars); } }
SV * plperl_spi_fetchrow(char *cursor) { SV *row; /* * Execute the FETCH inside a sub-transaction, so we can cope with errors * sanely */ MemoryContext oldcontext = CurrentMemoryContext; ResourceOwner oldowner = CurrentResourceOwner; BeginInternalSubTransaction(NULL); /* Want to run inside function's memory context */ MemoryContextSwitchTo(oldcontext); PG_TRY(); { Portal p = SPI_cursor_find(cursor); if (!p) row = newSV(0); else { SPI_cursor_fetch(p, true, 1); if (SPI_processed == 0) { SPI_cursor_close(p); row = newSV(0); } else { row = plperl_hash_from_tuple(SPI_tuptable->vals[0], SPI_tuptable->tupdesc); } SPI_freetuptable(SPI_tuptable); } /* Commit the inner transaction, return to outer xact context */ ReleaseCurrentSubTransaction(); MemoryContextSwitchTo(oldcontext); CurrentResourceOwner = oldowner; /* * AtEOSubXact_SPI() should not have popped any SPI context, but just * in case it did, make sure we remain connected. */ SPI_restore_connection(); } PG_CATCH(); { ErrorData *edata; /* Save error info */ MemoryContextSwitchTo(oldcontext); edata = CopyErrorData(); FlushErrorState(); /* Abort the inner transaction */ RollbackAndReleaseCurrentSubTransaction(); MemoryContextSwitchTo(oldcontext); CurrentResourceOwner = oldowner; /* * If AtEOSubXact_SPI() popped any SPI context of the subxact, it will * have left us in a disconnected state. We need this hack to return * to connected state. */ SPI_restore_connection(); /* Punt the error to Perl */ croak("%s", edata->message); /* Can't get here, but keep compiler quiet */ return NULL; } PG_END_TRY(); return row; }
/* * initSuffixTree - create suffix tree from file. Function converts * UTF8-encoded file into current encoding. */ static SuffixChar * initSuffixTree(char *filename) { SuffixChar *volatile rootSuffixTree = NULL; MemoryContext ccxt = CurrentMemoryContext; tsearch_readline_state trst; volatile bool skip; filename = get_tsearch_config_filename(filename, "rules"); if (!tsearch_readline_begin(&trst, filename)) ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("could not open unaccent file \"%s\": %m", filename))); do { /* * pg_do_encoding_conversion() (called by tsearch_readline()) will * emit exception if it finds untranslatable characters in current * locale. We just skip such lines, continuing with the next. */ skip = true; PG_TRY(); { char *line; while ((line = tsearch_readline(&trst)) != NULL) { /* * The format of each line must be "src trg" where src and trg * are sequences of one or more non-whitespace characters, * separated by whitespace. Whitespace at start or end of * line is ignored. */ int state; char *ptr; char *src = NULL; char *trg = NULL; int ptrlen; int srclen = 0; int trglen = 0; state = 0; for (ptr = line; *ptr; ptr += ptrlen) { ptrlen = pg_mblen(ptr); /* ignore whitespace, but end src or trg */ if (t_isspace(ptr)) { if (state == 1) state = 2; else if (state == 3) state = 4; continue; } switch (state) { case 0: /* start of src */ src = ptr; srclen = ptrlen; state = 1; break; case 1: /* continue src */ srclen += ptrlen; break; case 2: /* start of trg */ trg = ptr; trglen = ptrlen; state = 3; break; case 3: /* continue trg */ trglen += ptrlen; break; default: /* bogus line format */ state = -1; break; } } if (state >= 3) rootSuffixTree = placeChar(rootSuffixTree, (unsigned char *) src, srclen, trg, trglen); pfree(line); } skip = false; } PG_CATCH(); { ErrorData *errdata; MemoryContext ecxt; ecxt = MemoryContextSwitchTo(ccxt); errdata = CopyErrorData(); if (errdata->sqlerrcode == ERRCODE_UNTRANSLATABLE_CHARACTER) { FlushErrorState(); } else { MemoryContextSwitchTo(ecxt); PG_RE_THROW(); } } PG_END_TRY(); } while (skip); tsearch_readline_end(&trst); return rootSuffixTree; }