static pgutErrorData * pgut_errinit(int elevel) { int save_errno = errno; pgutErrorData *edata = getErrorData(); edata->elevel = elevel; edata->save_errno = save_errno; edata->ecode = (elevel >= ERROR ? 1 : 0); if (edata->msg.data) resetStringInfo(&edata->msg); else initStringInfo(&edata->msg); if (edata->detail.data) resetStringInfo(&edata->detail); else initStringInfo(&edata->detail); if (edata->hint.data) resetStringInfo(&edata->hint); else initStringInfo(&edata->hint); return edata; }
static void add_projection_desc_httpheader(CHURL_HEADERS headers, ProjectionInfo *projInfo, List *qualsAttributes) { int i; char long_number[sizeof(int32) * 8]; int *varNumbers = projInfo->pi_varNumbers; StringInfoData formatter; initStringInfo(&formatter); /* Convert the number of projection columns to a string */ pg_ltoa(list_length(projInfo->pi_targetlist) + list_length(qualsAttributes), long_number); churl_headers_append(headers, "X-GP-ATTRS-PROJ", long_number); for(i = 0; i < list_length(projInfo->pi_targetlist); i++) { int number = varNumbers[i] - 1; pg_ltoa(number, long_number); resetStringInfo(&formatter); appendStringInfo(&formatter, "X-GP-ATTRS-PROJ-IDX"); churl_headers_append(headers, formatter.data,long_number); } ListCell *attribute = NULL; foreach(attribute, qualsAttributes) { AttrNumber attrNumber = lfirst_int(attribute); pg_ltoa(attrNumber, long_number); resetStringInfo(&formatter); appendStringInfo(&formatter, "X-GP-ATTRS-PROJ-IDX"); churl_headers_append(headers, formatter.data,long_number); }
/* * GetUniqueMatRelName * * Returns a unique name for the given CV's underlying materialization table */ char * GetUniqueMatRelName(char *cvname, char* nspname) { char *relname = palloc0(NAMEDATALEN); int i = 0; StringInfoData suffix; Oid nspoid; if (nspname != NULL) nspoid = GetSysCacheOid1(NAMESPACENAME, CStringGetDatum(nspname)); else nspoid = InvalidOid; initStringInfo(&suffix); strcpy(relname, cvname); while (true) { appendStringInfo(&suffix, "%s%d", CQ_TABLE_SUFFIX, i); append_suffix(relname, suffix.data, NAMEDATALEN); resetStringInfo(&suffix); if (!OidIsValid(get_relname_relid(relname, nspoid))) break; } return relname; }
static rc testStringInfo(void) { StringInfo str = makeStringInfo(); appendStringInfoChar(str,'a'); ASSERT_EQUALS_STRING("a", str->data, "data is a"); appendStringInfoString(str, "hello"); ASSERT_EQUALS_STRING("ahello", str->data, "data is ahello"); ASSERT_EQUALS_INT(6, str->len, "length is 6"); for(int i = 0; i < 256; i++) appendStringInfoChar(str, 'b'); ASSERT_EQUALS_INT(6 + 256, str->len, "length is 6 + 256"); for(int i = 255; i < 256 + 6; i++) ASSERT_EQUALS_INT('b', str->data[i], "chars are all b"); resetStringInfo(str); ASSERT_EQUALS_INT(0, str->len, "after reset length is 0"); appendStringInfo(str, "%s", "test"); ASSERT_EQUALS_STRING("test", str->data, "data is test"); return PASS; }
char * getElementNodeStr(XMLCompNodeHdr element) { XMLScanData textScan; XMLNodeHdr textNode; StringInfoData si; initScanForTextNodes(&textScan, element); /* * Set the size to something smaller than what 'initStringInfo()' does */ si.maxlen = 32; si.data = (char *) palloc(si.maxlen); resetStringInfo(&si); while ((textNode = getNextXMLNode(&textScan, false)) != NULL) { char *cntPart = XNODE_CONTENT(textNode); appendStringInfoString(&si, cntPart); } finalizeScanForTextNodes(&textScan); return si.data; }
/* -------------------------------- * pq_getstring - get a null terminated string from connection * * The return value is placed in an expansible StringInfo, which has * already been initialized by the caller. * * This is used only for dealing with old-protocol clients. The idea * is to produce a StringInfo that looks the same as we would get from * pq_getmessage() with a newer client; we will then process it with * pq_getmsgstring. Therefore, no character set conversion is done here, * even though this is presumably useful only for text. * * returns 0 if OK, EOF if trouble * -------------------------------- */ int pq_getstring(StringInfo s) { int i; resetStringInfo(s); /* Read until we get the terminating '\0' */ for (;;) { while (PqRecvPointer >= PqRecvLength) { if (pq_recvbuf()) /* If nothing in buffer, then recv some */ return EOF; /* Failed to recv data */ } for (i = PqRecvPointer; i < PqRecvLength; i++) { if (PqRecvBuffer[i] == '\0') { /* include the '\0' in the copy */ appendBinaryStringInfo(s, PqRecvBuffer + PqRecvPointer, i - PqRecvPointer + 1); PqRecvPointer = i + 1; /* advance past \0 */ return 0; } } /* If we're here we haven't got the \0 in the buffer yet. */ appendBinaryStringInfo(s, PqRecvBuffer + PqRecvPointer, PqRecvLength - PqRecvPointer); PqRecvPointer = PqRecvLength; } }
/* Discard accumulated COPY line */ static void CopyClear(void) { /* Make sure init is done */ CopyAppend(NULL); resetStringInfo(©String); }
/* * Initialize workspace for a worker process: create the schema if it doesn't * already exist. */ static void initialize_worker_spi(worktable *table) { int ret; int ntup; bool isnull; StringInfoData buf; SetCurrentStatementStartTimestamp(); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); pgstat_report_activity(STATE_RUNNING, "initializing spi_worker schema"); /* XXX could we use CREATE SCHEMA IF NOT EXISTS? */ initStringInfo(&buf); appendStringInfo(&buf, "select count(*) from pg_namespace where nspname = '%s'", table->schema); ret = SPI_execute(buf.data, true, 0); if (ret != SPI_OK_SELECT) elog(FATAL, "SPI_execute failed: error code %d", ret); if (SPI_processed != 1) elog(FATAL, "not a singleton result"); ntup = DatumGetInt64(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull)); if (isnull) elog(FATAL, "null result"); if (ntup == 0) { resetStringInfo(&buf); appendStringInfo(&buf, "CREATE SCHEMA \"%s\" " "CREATE TABLE \"%s\" (" " type text CHECK (type IN ('total', 'delta')), " " value integer)" "CREATE UNIQUE INDEX \"%s_unique_total\" ON \"%s\" (type) " "WHERE type = 'total'", table->schema, table->name, table->name, table->name); /* set statement start time */ SetCurrentStatementStartTimestamp(); ret = SPI_execute(buf.data, false, 0); if (ret != SPI_OK_UTILITY) elog(FATAL, "failed to create my schema"); } SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); pgstat_report_activity(STATE_IDLE, NULL); }
/* * pg_split_opts -- split a string of options and append it to an argv array * * The caller is responsible for ensuring the argv array is large enough. The * maximum possible number of arguments added by this routine is * (strlen(optstr) + 1) / 2. * * Because some option values can contain spaces we allow escaping using * backslashes, with \\ representing a literal backslash. */ void pg_split_opts(char **argv, int *argcp, char *optstr) { StringInfoData s; initStringInfo(&s); while (*optstr) { bool last_was_escape = false; resetStringInfo(&s); /* skip over leading space */ while (isspace((unsigned char) *optstr)) optstr++; if (*optstr == '\0') break; /* * Parse a single option + value, stopping at the first space, unless * it's escaped. */ while (*optstr) { if (isspace((unsigned char) *optstr) && !last_was_escape) break; if (!last_was_escape && *optstr == '\\') last_was_escape = true; else { last_was_escape = false; appendStringInfoChar(&s, *optstr); } optstr++; } /* now store the option */ argv[(*argcp)++] = pstrdup(s.data); } resetStringInfo(&s); }
/* * initStringInfo * * Initialize a StringInfoData struct (with previously undefined contents) * to describe an empty string. */ void initStringInfo(StringInfo str) { int size = 1024; /* initial default buffer size */ str->data = (char *) palloc(size); str->maxlen = size; resetStringInfo(str); }
/* * Decode an INSERT entry */ static void decoder_raw_insert(StringInfo s, Relation relation, HeapTuple tuple) { TupleDesc tupdesc = RelationGetDescr(relation); int natt; bool first_column = true; StringInfo values = makeStringInfo(); /* Initialize string info for values */ initStringInfo(values); /* Query header */ appendStringInfo(s, "INSERT INTO "); print_relname(s, relation); appendStringInfo(s, " ("); /* Build column names and values */ for (natt = 0; natt < tupdesc->natts; natt++) { Form_pg_attribute attr; Datum origval; bool isnull; attr = tupdesc->attrs[natt]; /* Skip dropped columns and system columns */ if (attr->attisdropped || attr->attnum < 0) continue; /* Skip comma for first colums */ if (!first_column) { appendStringInfoString(s, ", "); appendStringInfoString(values, ", "); } else first_column = false; /* Print attribute name */ appendStringInfo(s, "%s", quote_identifier(NameStr(attr->attname))); /* Get Datum from tuple */ origval = heap_getattr(tuple, natt + 1, tupdesc, &isnull); /* Get output function */ print_value(values, origval, attr->atttypid, isnull); } /* Append values */ appendStringInfo(s, ") VALUES (%s);", values->data); /* Clean up */ resetStringInfo(values); }
Datum hstore_to_json(PG_FUNCTION_ARGS) { HStore *in = PG_GETARG_HS(0); int i; int count = HS_COUNT(in); char *base = STRPTR(in); HEntry *entries = ARRPTR(in); StringInfoData tmp, dst; if (count == 0) PG_RETURN_TEXT_P(cstring_to_text_with_len("{}",2)); initStringInfo(&tmp); initStringInfo(&dst); appendStringInfoChar(&dst, '{'); for (i = 0; i < count; i++) { resetStringInfo(&tmp); appendBinaryStringInfo(&tmp, HS_KEY(entries, base, i), HS_KEYLEN(entries, i)); escape_json(&dst, tmp.data); appendStringInfoString(&dst, ": "); if (HS_VALISNULL(entries, i)) appendStringInfoString(&dst, "null"); else { resetStringInfo(&tmp); appendBinaryStringInfo(&tmp, HS_VAL(entries, base, i), HS_VALLEN(entries, i)); escape_json(&dst, tmp.data); } if (i + 1 != count) appendStringInfoString(&dst, ", "); } appendStringInfoChar(&dst, '}'); PG_RETURN_TEXT_P(cstring_to_text(dst.data)); }
/* * This function will set a string in shared memory which is the name of the database to connect to * the next time the background worker restarts. Because a bgworker can only connect to one database * at a time, and some catalogs and stats are scoped to the current database, the bg worker * periodically restarts to collect latest stats from another database. * */ int set_next_db_target(void) { int retval, processed; StringInfoData buf; SPITupleTable *coltuptable; char* next_db_target; SetCurrentStatementStartTimestamp(); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); /* get sorted list of databases, find one after target_db*/ initStringInfo(&buf); appendStringInfo(&buf, "SELECT datname FROM pg_database WHERE datname NOT IN ('template0', 'template1') AND datallowconn IS TRUE AND datname > '%s' ORDER BY datname ASC LIMIT 1;", target_db ); retval = SPI_execute(buf.data, false, 0); if (retval != SPI_OK_SELECT) { elog(FATAL, "Database information collection failed"); // FAIL RETURN 1 } processed = SPI_processed; if(processed == 0) { //No matching records so pick first database. resetStringInfo(&buf); appendStringInfoString(&buf, "SELECT datname FROM pg_database WHERE datname NOT IN ('template0', 'template1') AND datallowconn IS TRUE ORDER BY datname ASC LIMIT 1;" ); retval = SPI_execute(buf.data, false, 0); if (retval != SPI_OK_SELECT) { elog(FATAL, "Database information collection failed"); // FAIL RETURN 1 } } coltuptable = SPI_tuptable; next_db_target = SPI_getvalue(coltuptable->vals[0], coltuptable->tupdesc, 1); // elog(LOG, "NEXTDB TARGET: %s", next_db_target); //print next target db strcpy(pgsampler_state->next_db, next_db_target); SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); return 0; }
char * pgsp_json_xmlize(char *json) { pgspParserContext ctx; JsonSemAction sem; JsonLexContext lex; int start_len; char buf[32]; init_json_lex_context(&lex, json); init_parser_context(&ctx, PGSP_JSON_XMLIZE, json, buf, sizeof(buf)); sem.semstate = (void*)&ctx; sem.object_start = xml_objstart; sem.object_end = xml_objend; sem.array_start = NULL; sem.array_end = xml_arrend; sem.object_field_start = xml_ofstart; sem.object_field_end = xml_ofend; sem.array_element_start= xml_aestart; sem.array_element_end = xml_aeend; sem.scalar = xml_scalar; appendStringInfo(ctx.dest, "<explain xmlns=\"http://www.postgresql.org/2009/explain\">\n <Query>"); start_len = ctx.dest->len; if (!run_pg_parse_json(&lex, &sem)) { if (ctx.dest->len > start_len && ctx.dest->data[ctx.dest->len - 1] != '\n') appendStringInfoChar(ctx.dest, '\n'); if (ctx.dest->len == start_len) { resetStringInfo(ctx.dest); appendStringInfoString(ctx.dest, "<Input was not JSON>"); } else appendStringInfoString(ctx.dest, "<truncated>"); } else appendStringInfo(ctx.dest, "</Query>\n</explain>\n"); return ctx.dest->data; }
/* * worker_merge_files_into_table creates a task table within the job's schema, * which should have already been created by the task tracker protocol, and * copies files in its task directory into this table. If the schema doesn't * exist, the function defaults to the 'public' schema. Note that, unlike * partitioning functions, this function is not always idempotent. On success, * the function creates the table and loads data, and subsequent calls to the * function error out because the table already exist. On failure, the task * table creation commands are rolled back, and the function can be called * again. */ Datum worker_merge_files_into_table(PG_FUNCTION_ARGS) { uint64 jobId = PG_GETARG_INT64(0); uint32 taskId = PG_GETARG_UINT32(1); ArrayType *columnNameObject = PG_GETARG_ARRAYTYPE_P(2); ArrayType *columnTypeObject = PG_GETARG_ARRAYTYPE_P(3); StringInfo jobSchemaName = JobSchemaName(jobId); StringInfo taskTableName = TaskTableName(taskId); StringInfo taskDirectoryName = TaskDirectoryName(jobId, taskId); bool schemaExists = false; List *columnNameList = NIL; List *columnTypeList = NIL; /* we should have the same number of column names and types */ int32 columnNameCount = ArrayObjectCount(columnNameObject); int32 columnTypeCount = ArrayObjectCount(columnTypeObject); if (columnNameCount != columnTypeCount) { ereport(ERROR, (errmsg("column name array size: %d and type array size: %d" " do not match", columnNameCount, columnTypeCount))); } /* * If the schema for the job isn't already created by the task tracker * protocol, we fall to using the default 'public' schema. */ schemaExists = JobSchemaExists(jobSchemaName); if (!schemaExists) { resetStringInfo(jobSchemaName); appendStringInfoString(jobSchemaName, "public"); } /* create the task table and copy files into the table */ columnNameList = ArrayObjectToCStringList(columnNameObject); columnTypeList = ArrayObjectToCStringList(columnTypeObject); CreateTaskTable(jobSchemaName, taskTableName, columnNameList, columnTypeList); CopyTaskFilesFromDirectory(jobSchemaName, taskTableName, taskDirectoryName); PG_RETURN_VOID(); }
char * getXMLNodeKindStr(XMLNodeKind k) { StringInfoData result; result.maxlen = 32; result.data = (char *) palloc(result.maxlen); resetStringInfo(&result); switch (k) { case XMLNODE_DOC: appendStringInfoString(&result, "XML document"); break; case XMLNODE_DTD: appendStringInfoString(&result, "DTD"); break; case XMLNODE_ELEMENT: appendStringInfoString(&result, "XML element"); break; case XMLNODE_ATTRIBUTE: appendStringInfoString(&result, "XML element attribute"); break; case XMLNODE_COMMENT: appendStringInfoString(&result, "XML comment"); break; case XMLNODE_CDATA: appendStringInfoString(&result, "CDATA section"); break; case XMLNODE_PI: appendStringInfoString(&result, "processing instruction"); break; case XMLNODE_TEXT: appendStringInfoString(&result, "text node"); break; case XMLNODE_DOC_FRAGMENT: appendStringInfoString(&result, "document fragment"); break; default: elog(ERROR, "unknown node kind: %u", k); return NULL; } return result.data; }
/* * Report a detected deadlock, with available details. */ void DeadLockReport(void) { StringInfoData buf; StringInfoData buf2; int i; initStringInfo(&buf); initStringInfo(&buf2); for (i = 0; i < nDeadlockDetails; i++) { DEADLOCK_INFO *info = &deadlockDetails[i]; int nextpid; /* The last proc waits for the first one... */ if (i < nDeadlockDetails - 1) nextpid = info[1].pid; else nextpid = deadlockDetails[0].pid; if (i > 0) appendStringInfoChar(&buf, '\n'); /* reset buf2 to hold next object description */ resetStringInfo(&buf2); DescribeLockTag(&buf2, &info->locktag); appendStringInfo(&buf, _("Process %d waits for %s on %s; blocked by process %d."), info->pid, GetLockmodeName(info->locktag.locktag_lockmethodid, info->lockmode), buf2.data, nextpid); } ereport(ERROR, (errcode(ERRCODE_T_R_DEADLOCK_DETECTED), errmsg("deadlock detected"), errdetail("%s", buf.data))); }
/* * Process a status update message received from standby. */ static void ProcessStandbyMessage(void) { char msgtype; resetStringInfo(&reply_message); /* * Read the message contents. */ if (pq_getmessage(&reply_message, 0)) { ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("unexpected EOF on standby connection"))); proc_exit(0); } /* * Check message type from the first byte. */ msgtype = pq_getmsgbyte(&reply_message); switch (msgtype) { case 'r': ProcessStandbyReplyMessage(); break; case 'h': ProcessStandbyHSFeedbackMessage(); break; default: ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("unexpected message type \"%c\"", msgtype))); proc_exit(0); } }
/* * Wrapper for libchurl */ static void process_request(ClientContext* client_context, char *uri) { size_t n = 0; char buffer[RAW_BUF_SIZE]; print_http_headers(client_context->http_headers); client_context->handle = churl_init_download(uri, client_context->http_headers); memset(buffer, 0, RAW_BUF_SIZE); resetStringInfo(&(client_context->the_rest_buf)); /* * This try-catch ensures that in case of an exception during the "communication with PXF and the accumulation of * PXF data in client_context->the_rest_buf", we still get to terminate the libcurl connection nicely and avoid * leaving the PXF server connection hung. */ PG_TRY(); { /* read some bytes to make sure the connection is established */ churl_read_check_connectivity(client_context->handle); while ((n = churl_read(client_context->handle, buffer, sizeof(buffer))) != 0) { appendBinaryStringInfo(&(client_context->the_rest_buf), buffer, n); memset(buffer, 0, RAW_BUF_SIZE); } churl_cleanup(client_context->handle, false); } PG_CATCH(); { if (client_context->handle) churl_cleanup(client_context->handle, true); PG_RE_THROW(); } PG_END_TRY(); }
/* -------------------------------- * pq_getmessage - get a message with length word from connection * * The return value is placed in an expansible StringInfo, which has * already been initialized by the caller. * Only the message body is placed in the StringInfo; the length word * is removed. Also, s->cursor is initialized to zero for convenience * in scanning the message contents. * * If maxlen is not zero, it is an upper limit on the length of the * message we are willing to accept. We abort the connection (by * returning EOF) if client tries to send more than that. * * returns 0 if OK, EOF if trouble * -------------------------------- */ int pq_getmessage(StringInfo s, int maxlen) { int32 len; resetStringInfo(s); /* Read message length word */ if (pq_getbytes((char *) &len, 4) == EOF) { ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("unexpected EOF within message length word"))); return EOF; } len = ntohl(len); if (len < 4 || (maxlen > 0 && len > maxlen)) { ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("invalid message length"))); return EOF; } len -= 4; /* discount length itself */ if (len > 0) { /* * Allocate space for message. If we run out of room (ridiculously * large message), we will elog(ERROR), but we want to discard the * message body so as not to lose communication sync. */ PG_TRY(); { enlargeStringInfo(s, len); } PG_CATCH(); { if (pq_discardbytes(len) == EOF) ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("incomplete message from client"))); PG_RE_THROW(); } PG_END_TRY(); /* And grab the message */ if (pq_getbytes(s->data, len) == EOF) { ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("incomplete message from client"))); return EOF; } s->len = len; /* Place a trailing null per StringInfo convention */ s->data[len] = '\0'; } return 0; }
/* * worker_merge_files_and_run_query creates a merge task table within the job's * schema, which should have already been created by the task tracker protocol. * It copies files in its task directory into this table. Then it runs final * query to create result table of the job. * * Note that here we followed a different approach to create a task table for merge * files than worker_merge_files_into_table(). In future we should unify these * two approaches. For this purpose creating a directory_fdw extension and using * it would make sense. Then we can merge files with a query or without query * through directory_fdw. */ Datum worker_merge_files_and_run_query(PG_FUNCTION_ARGS) { uint64 jobId = PG_GETARG_INT64(0); uint32 taskId = PG_GETARG_UINT32(1); text *createMergeTableQueryText = PG_GETARG_TEXT_P(2); text *createIntermediateTableQueryText = PG_GETARG_TEXT_P(3); const char *createMergeTableQuery = text_to_cstring(createMergeTableQueryText); const char *createIntermediateTableQuery = text_to_cstring(createIntermediateTableQueryText); StringInfo taskDirectoryName = TaskDirectoryName(jobId, taskId); StringInfo jobSchemaName = JobSchemaName(jobId); StringInfo intermediateTableName = TaskTableName(taskId); StringInfo mergeTableName = makeStringInfo(); StringInfo setSearchPathString = makeStringInfo(); bool schemaExists = false; int connected = 0; int setSearchPathResult = 0; int createMergeTableResult = 0; int createIntermediateTableResult = 0; int finished = 0; /* * If the schema for the job isn't already created by the task tracker * protocol, we fall to using the default 'public' schema. */ schemaExists = JobSchemaExists(jobSchemaName); if (!schemaExists) { resetStringInfo(jobSchemaName); appendStringInfoString(jobSchemaName, "public"); } appendStringInfo(setSearchPathString, SET_SEARCH_PATH_COMMAND, jobSchemaName->data); /* Add "public" to search path to access UDFs in public schema */ appendStringInfo(setSearchPathString, ",public"); connected = SPI_connect(); if (connected != SPI_OK_CONNECT) { ereport(ERROR, (errmsg("could not connect to SPI manager"))); } setSearchPathResult = SPI_exec(setSearchPathString->data, 0); if (setSearchPathResult < 0) { ereport(ERROR, (errmsg("execution was not successful \"%s\"", setSearchPathString->data))); } createMergeTableResult = SPI_exec(createMergeTableQuery, 0); if (createMergeTableResult < 0) { ereport(ERROR, (errmsg("execution was not successful \"%s\"", createMergeTableQuery))); } appendStringInfo(mergeTableName, "%s%s", intermediateTableName->data, MERGE_TABLE_SUFFIX); CopyTaskFilesFromDirectory(jobSchemaName, mergeTableName, taskDirectoryName); createIntermediateTableResult = SPI_exec(createIntermediateTableQuery, 0); if (createIntermediateTableResult < 0) { ereport(ERROR, (errmsg("execution was not successful \"%s\"", createIntermediateTableQuery))); } finished = SPI_finish(); if (finished != SPI_OK_FINISH) { ereport(ERROR, (errmsg("could not disconnect from SPI manager"))); } PG_RETURN_VOID(); }
Datum hstore_to_json_loose(PG_FUNCTION_ARGS) { HStore *in = PG_GETARG_HS(0); int i; int count = HS_COUNT(in); char *base = STRPTR(in); HEntry *entries = ARRPTR(in); bool is_number; StringInfoData tmp, dst; if (count == 0) PG_RETURN_TEXT_P(cstring_to_text_with_len("{}",2)); initStringInfo(&tmp); initStringInfo(&dst); appendStringInfoChar(&dst, '{'); for (i = 0; i < count; i++) { resetStringInfo(&tmp); appendBinaryStringInfo(&tmp, HS_KEY(entries, base, i), HS_KEYLEN(entries, i)); escape_json(&dst, tmp.data); appendStringInfoString(&dst, ": "); if (HS_VALISNULL(entries, i)) appendStringInfoString(&dst, "null"); /* guess that values of 't' or 'f' are booleans */ else if (HS_VALLEN(entries, i) == 1 && *(HS_VAL(entries, base, i)) == 't') appendStringInfoString(&dst, "true"); else if (HS_VALLEN(entries, i) == 1 && *(HS_VAL(entries, base, i)) == 'f') appendStringInfoString(&dst, "false"); else { is_number = false; resetStringInfo(&tmp); appendBinaryStringInfo(&tmp, HS_VAL(entries, base, i), HS_VALLEN(entries, i)); /* * don't treat something with a leading zero followed by another * digit as numeric - could be a zip code or similar */ if (tmp.len > 0 && !(tmp.data[0] == '0' && isdigit((unsigned char) tmp.data[1])) && strspn(tmp.data, "+-0123456789Ee.") == tmp.len) { /* * might be a number. See if we can input it as a numeric * value. Ignore any actual parsed value. */ char *endptr = "junk"; long lval; lval = strtol(tmp.data, &endptr, 10); (void) lval; if (*endptr == '\0') { /* * strol man page says this means the whole string is * valid */ is_number = true; } else { /* not an int - try a double */ double dval; dval = strtod(tmp.data, &endptr); (void) dval; if (*endptr == '\0') is_number = true; } } if (is_number) appendBinaryStringInfo(&dst, tmp.data, tmp.len); else escape_json(&dst, tmp.data); } if (i + 1 != count) appendStringInfoString(&dst, ", "); } appendStringInfoChar(&dst, '}'); PG_RETURN_TEXT_P(cstring_to_text(dst.data)); }
static void kill_idle_main(Datum main_arg) { StringInfoData buf; /* Register functions for SIGTERM/SIGHUP management */ pqsignal(SIGHUP, kill_idle_sighup); pqsignal(SIGTERM, kill_idle_sigterm); /* We're now ready to receive signals */ BackgroundWorkerUnblockSignals(); /* Connect to a database */ BackgroundWorkerInitializeConnection("postgres", NULL); /* Build query for process */ initStringInfo(&buf); kill_idle_build_query(&buf); while (!got_sigterm) { int rc, ret, i; /* Wait necessary amount of time */ rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, kill_max_idle_time * 1000L, PG_WAIT_EXTENSION); ResetLatch(&MyProc->procLatch); /* Emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) proc_exit(1); /* Process signals */ if (got_sighup) { int old_interval; /* Save old value of kill interval */ old_interval = kill_max_idle_time; /* Process config file */ ProcessConfigFile(PGC_SIGHUP); got_sighup = false; ereport(LOG, (errmsg("bgworker kill_idle signal: processed SIGHUP"))); /* Rebuild query if necessary */ if (old_interval != kill_max_idle_time) { resetStringInfo(&buf); initStringInfo(&buf); kill_idle_build_query(&buf); } } if (got_sigterm) { /* Simply exit */ ereport(LOG, (errmsg("bgworker kill_idle signal: processed SIGTERM"))); proc_exit(0); } /* Process idle connection kill */ SetCurrentStatementStartTimestamp(); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); pgstat_report_activity(STATE_RUNNING, buf.data); /* Statement start time */ SetCurrentStatementStartTimestamp(); /* Execute query */ ret = SPI_execute(buf.data, false, 0); /* Some error handling */ if (ret != SPI_OK_SELECT) elog(FATAL, "Error when trying to kill idle connections"); /* Do some processing and log stuff disconnected */ for (i = 0; i < SPI_processed; i++) { int32 pidValue; bool isnull; char *datname = NULL; char *usename = NULL; char *client_addr = NULL; /* Fetch values */ pidValue = DatumGetInt32(SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1, &isnull)); usename = DatumGetCString(SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 3, &isnull)); datname = DatumGetCString(SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 4, &isnull)); client_addr = DatumGetCString(SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 5, &isnull)); /* Log what has been disconnected */ elog(LOG, "Disconnected idle connection: PID %d %s/%s/%s", pidValue, datname ? datname : "none", usename ? usename : "none", client_addr ? client_addr : "none"); } SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); pgstat_report_activity(STATE_IDLE, NULL); } /* No problems, so clean exit */ proc_exit(0); }
/* * Call reorg_one_table for the target table or each table in a database. */ static bool reorg_one_database(const char *orderby, const char *table) { bool ret = true; PGresult *res; int i; int num; StringInfoData sql; initStringInfo(&sql); reconnect(ERROR); /* Disable statement timeout. */ command("SET statement_timeout = 0", 0, NULL); /* Restrict search_path to system catalog. */ command("SET search_path = pg_catalog, pg_temp, public", 0, NULL); /* To avoid annoying "create implicit ..." messages. */ command("SET client_min_messages = warning", 0, NULL); /* acquire target tables */ appendStringInfoString(&sql, "SELECT * FROM reorg.tables WHERE "); if (table) { appendStringInfoString(&sql, "relid = $1::regclass"); res = execute_elevel(sql.data, 1, &table, DEBUG2); } else { appendStringInfoString(&sql, "pkid IS NOT NULL"); if (!orderby) appendStringInfoString(&sql, " AND ckid IS NOT NULL"); res = execute_elevel(sql.data, 0, NULL, DEBUG2); } if (PQresultStatus(res) != PGRES_TUPLES_OK) { if (sqlstate_equals(res, SQLSTATE_INVALID_SCHEMA_NAME)) { /* Schema reorg does not exist. Skip the database. */ ret = false; goto cleanup; } else { /* exit otherwise */ printf("%s", PQerrorMessage(connection)); PQclear(res); exit(1); } } num = PQntuples(res); for (i = 0; i < num; i++) { reorg_table table; const char *create_table; const char *ckey; int c = 0; table.target_name = getstr(res, i, c++); table.target_oid = getoid(res, i, c++); table.target_toast = getoid(res, i, c++); table.target_tidx = getoid(res, i, c++); table.pkid = getoid(res, i, c++); table.ckid = getoid(res, i, c++); if (table.pkid == 0) ereport(ERROR, (errcode(E_PG_COMMAND), errmsg("relation \"%s\" must have a primary key or not-null unique keys", table.target_name))); table.create_pktype = getstr(res, i, c++); table.create_log = getstr(res, i, c++); table.create_trigger = getstr(res, i, c++); table.alter_table = getstr(res, i, c++); create_table = getstr(res, i, c++); table.drop_columns = getstr(res, i, c++); table.delete_log = getstr(res, i, c++); table.lock_table = getstr(res, i, c++); ckey = getstr(res, i, c++); resetStringInfo(&sql); if (!orderby) { /* CLUSTER mode */ if (ckey == NULL) ereport(ERROR, (errcode(E_PG_COMMAND), errmsg("relation \"%s\" has no cluster key", table.target_name))); appendStringInfo(&sql, "%s ORDER BY %s", create_table, ckey); table.create_table = sql.data; } else if (!orderby[0]) { /* VACUUM FULL mode */ table.create_table = create_table; } else { /* User specified ORDER BY */ appendStringInfo(&sql, "%s ORDER BY %s", create_table, orderby); table.create_table = sql.data; } table.sql_peek = getstr(res, i, c++); table.sql_insert = getstr(res, i, c++); table.sql_delete = getstr(res, i, c++); table.sql_update = getstr(res, i, c++); table.sql_pop = getstr(res, i, c++); reorg_one_table(&table, orderby); } cleanup: PQclear(res); disconnect(); termStringInfo(&sql); return ret; }
/* * Report a detected deadlock, with available details. */ void DeadLockReport(void) { StringInfoData clientbuf; /* errdetail for client */ StringInfoData logbuf; /* errdetail for server log */ StringInfoData locktagbuf; int i; initStringInfo(&clientbuf); initStringInfo(&logbuf); initStringInfo(&locktagbuf); /* Generate the "waits for" lines sent to the client */ for (i = 0; i < nDeadlockDetails; i++) { DEADLOCK_INFO *info = &deadlockDetails[i]; int nextpid; /* The last proc waits for the first one... */ if (i < nDeadlockDetails - 1) nextpid = info[1].pid; else nextpid = deadlockDetails[0].pid; /* reset locktagbuf to hold next object description */ resetStringInfo(&locktagbuf); DescribeLockTag(&locktagbuf, &info->locktag); if (i > 0) appendStringInfoChar(&clientbuf, '\n'); appendStringInfo(&clientbuf, _("Process %d waits for %s on %s; blocked by process %d."), info->pid, GetLockmodeName(info->locktag.locktag_lockmethodid, info->lockmode), locktagbuf.data, nextpid); } /* Duplicate all the above for the server ... */ appendStringInfoString(&logbuf, clientbuf.data); /* ... and add info about query strings */ for (i = 0; i < nDeadlockDetails; i++) { DEADLOCK_INFO *info = &deadlockDetails[i]; appendStringInfoChar(&logbuf, '\n'); appendStringInfo(&logbuf, _("Process %d: %s"), info->pid, pgstat_get_backend_current_activity(info->pid, false)); } ereport(ERROR, (errcode(ERRCODE_T_R_DEADLOCK_DETECTED), errmsg("deadlock detected"), errdetail("%s", clientbuf.data), errdetail_log("%s", logbuf.data), errhint("See server log for query details."))); }
/* * sepgsql_relation_post_create * * The post creation hook of relation/attribute */ void sepgsql_relation_post_create(Oid relOid) { Relation rel; ScanKeyData skey; SysScanDesc sscan; HeapTuple tuple; Form_pg_class classForm; ObjectAddress object; uint16 tclass; char *scontext; /* subject */ char *tcontext; /* schema */ char *rcontext; /* relation */ char *ccontext; /* column */ char *nsp_name; StringInfoData audit_name; /* * Fetch catalog record of the new relation. Because pg_class entry is not * visible right now, we need to scan the catalog using SnapshotSelf. */ rel = heap_open(RelationRelationId, AccessShareLock); ScanKeyInit(&skey, ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relOid)); sscan = systable_beginscan(rel, ClassOidIndexId, true, SnapshotSelf, 1, &skey); tuple = systable_getnext(sscan); if (!HeapTupleIsValid(tuple)) elog(ERROR, "catalog lookup failed for relation %u", relOid); classForm = (Form_pg_class) GETSTRUCT(tuple); /* ignore indexes on toast tables */ if (classForm->relkind == RELKIND_INDEX && classForm->relnamespace == PG_TOAST_NAMESPACE) goto out; /* * check db_schema:{add_name} permission of the namespace */ object.classId = NamespaceRelationId; object.objectId = classForm->relnamespace; object.objectSubId = 0; sepgsql_avc_check_perms(&object, SEPG_CLASS_DB_SCHEMA, SEPG_DB_SCHEMA__ADD_NAME, getObjectIdentity(&object), true); switch (classForm->relkind) { case RELKIND_RELATION: tclass = SEPG_CLASS_DB_TABLE; break; case RELKIND_SEQUENCE: tclass = SEPG_CLASS_DB_SEQUENCE; break; case RELKIND_VIEW: tclass = SEPG_CLASS_DB_VIEW; break; case RELKIND_INDEX: /* deal with indexes specially; no need for tclass */ sepgsql_index_modify(relOid); goto out; default: /* ignore other relkinds */ goto out; } /* * Compute a default security label when we create a new relation object * under the specified namespace. */ scontext = sepgsql_get_client_label(); tcontext = sepgsql_get_label(NamespaceRelationId, classForm->relnamespace, 0); rcontext = sepgsql_compute_create(scontext, tcontext, tclass, NameStr(classForm->relname)); /* * check db_xxx:{create} permission */ nsp_name = get_namespace_name(classForm->relnamespace); initStringInfo(&audit_name); appendStringInfo(&audit_name, "%s.%s", quote_identifier(nsp_name), quote_identifier(NameStr(classForm->relname))); sepgsql_avc_check_perms_label(rcontext, tclass, SEPG_DB_DATABASE__CREATE, audit_name.data, true); /* * Assign the default security label on the new relation */ object.classId = RelationRelationId; object.objectId = relOid; object.objectSubId = 0; SetSecurityLabel(&object, SEPGSQL_LABEL_TAG, rcontext); /* * We also assigns a default security label on columns of the new regular * tables. */ if (classForm->relkind == RELKIND_RELATION) { Relation arel; ScanKeyData akey; SysScanDesc ascan; HeapTuple atup; Form_pg_attribute attForm; arel = heap_open(AttributeRelationId, AccessShareLock); ScanKeyInit(&akey, Anum_pg_attribute_attrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relOid)); ascan = systable_beginscan(arel, AttributeRelidNumIndexId, true, SnapshotSelf, 1, &akey); while (HeapTupleIsValid(atup = systable_getnext(ascan))) { attForm = (Form_pg_attribute) GETSTRUCT(atup); resetStringInfo(&audit_name); appendStringInfo(&audit_name, "%s.%s.%s", quote_identifier(nsp_name), quote_identifier(NameStr(classForm->relname)), quote_identifier(NameStr(attForm->attname))); ccontext = sepgsql_compute_create(scontext, rcontext, SEPG_CLASS_DB_COLUMN, NameStr(attForm->attname)); /* * check db_column:{create} permission */ sepgsql_avc_check_perms_label(ccontext, SEPG_CLASS_DB_COLUMN, SEPG_DB_COLUMN__CREATE, audit_name.data, true); object.classId = RelationRelationId; object.objectId = relOid; object.objectSubId = attForm->attnum; SetSecurityLabel(&object, SEPGSQL_LABEL_TAG, ccontext); pfree(ccontext); } systable_endscan(ascan); heap_close(arel, AccessShareLock); } pfree(rcontext); out: systable_endscan(sscan); heap_close(rel, AccessShareLock); }
/* * record_in - input routine for any composite type. */ Datum record_in(PG_FUNCTION_ARGS) { char *string = PG_GETARG_CSTRING(0); Oid tupType = PG_GETARG_OID(1); #ifdef NOT_USED int32 typmod = PG_GETARG_INT32(2); #endif HeapTupleHeader result; int32 tupTypmod; TupleDesc tupdesc; HeapTuple tuple; RecordIOData *my_extra; bool needComma = false; int ncolumns; int i; char *ptr; Datum *values; bool *nulls; StringInfoData buf; /* * Use the passed type unless it's RECORD; we can't support input of * anonymous types, mainly because there's no good way to figure out which * anonymous type is wanted. Note that for RECORD, what we'll probably * actually get is RECORD's typelem, ie, zero. */ if (tupType == InvalidOid || tupType == RECORDOID) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("input of anonymous composite types is not implemented"))); tupTypmod = -1; /* for all non-anonymous types */ /* * This comes from the composite type's pg_type.oid and stores system oids * in user tables, specifically DatumTupleFields. This oid must be * preserved by binary upgrades. */ tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod); ncolumns = tupdesc->natts; /* * We arrange to look up the needed I/O info just once per series of * calls, assuming the record type doesn't change underneath us. */ my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; if (my_extra == NULL || my_extra->ncolumns != ncolumns) { fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt, sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData)); my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; my_extra->record_type = InvalidOid; my_extra->record_typmod = 0; } if (my_extra->record_type != tupType || my_extra->record_typmod != tupTypmod) { MemSet(my_extra, 0, sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData)); my_extra->record_type = tupType; my_extra->record_typmod = tupTypmod; my_extra->ncolumns = ncolumns; } values = (Datum *) palloc(ncolumns * sizeof(Datum)); nulls = (bool *) palloc(ncolumns * sizeof(bool)); /* * Scan the string. We use "buf" to accumulate the de-quoted data for * each column, which is then fed to the appropriate input converter. */ ptr = string; /* Allow leading whitespace */ while (*ptr && isspace((unsigned char) *ptr)) ptr++; if (*ptr++ != '(') ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("malformed record literal: \"%s\"", string), errdetail("Missing left parenthesis."))); initStringInfo(&buf); for (i = 0; i < ncolumns; i++) { ColumnIOData *column_info = &my_extra->columns[i]; Oid column_type = tupdesc->attrs[i]->atttypid; char *column_data; /* Ignore dropped columns in datatype, but fill with nulls */ if (tupdesc->attrs[i]->attisdropped) { values[i] = (Datum) 0; nulls[i] = true; continue; } if (needComma) { /* Skip comma that separates prior field from this one */ if (*ptr == ',') ptr++; else /* *ptr must be ')' */ ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("malformed record literal: \"%s\"", string), errdetail("Too few columns."))); } /* Check for null: completely empty input means null */ if (*ptr == ',' || *ptr == ')') { column_data = NULL; nulls[i] = true; } else { /* Extract string for this column */ bool inquote = false; resetStringInfo(&buf); while (inquote || !(*ptr == ',' || *ptr == ')')) { char ch = *ptr++; if (ch == '\0') ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("malformed record literal: \"%s\"", string), errdetail("Unexpected end of input."))); if (ch == '\\') { if (*ptr == '\0') ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("malformed record literal: \"%s\"", string), errdetail("Unexpected end of input."))); appendStringInfoChar(&buf, *ptr++); } else if (ch == '\"') { if (!inquote) inquote = true; else if (*ptr == '\"') { /* doubled quote within quote sequence */ appendStringInfoChar(&buf, *ptr++); } else inquote = false; } else appendStringInfoChar(&buf, ch); } column_data = buf.data; nulls[i] = false; } /* * Convert the column value */ if (column_info->column_type != column_type) { getTypeInputInfo(column_type, &column_info->typiofunc, &column_info->typioparam); fmgr_info_cxt(column_info->typiofunc, &column_info->proc, fcinfo->flinfo->fn_mcxt); column_info->column_type = column_type; } values[i] = InputFunctionCall(&column_info->proc, column_data, column_info->typioparam, tupdesc->attrs[i]->atttypmod); /* * Prep for next column */ needComma = true; } if (*ptr++ != ')') ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("malformed record literal: \"%s\"", string), errdetail("Too many columns."))); /* Allow trailing whitespace */ while (*ptr && isspace((unsigned char) *ptr)) ptr++; if (*ptr) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("malformed record literal: \"%s\"", string), errdetail("Junk after right parenthesis."))); tuple = heap_form_tuple(tupdesc, values, nulls); /* * We cannot return tuple->t_data because heap_form_tuple allocates it as * part of a larger chunk, and our caller may expect to be able to pfree * our result. So must copy the info into a new palloc chunk. */ result = (HeapTupleHeader) palloc(tuple->t_len); memcpy(result, tuple->t_data, tuple->t_len); heap_freetuple(tuple); pfree(buf.data); pfree(values); pfree(nulls); ReleaseTupleDesc(tupdesc); PG_RETURN_HEAPTUPLEHEADER(result); }
/* * refresh_by_match_merge * * Refresh a materialized view with transactional semantics, while allowing * concurrent reads. * * This is called after a new version of the data has been created in a * temporary table. It performs a full outer join against the old version of * the data, producing "diff" results. This join cannot work if there are any * duplicated rows in either the old or new versions, in the sense that every * column would compare as equal between the two rows. It does work correctly * in the face of rows which have at least one NULL value, with all non-NULL * columns equal. The behavior of NULLs on equality tests and on UNIQUE * indexes turns out to be quite convenient here; the tests we need to make * are consistent with default behavior. If there is at least one UNIQUE * index on the materialized view, we have exactly the guarantee we need. * * The temporary table used to hold the diff results contains just the TID of * the old record (if matched) and the ROW from the new table as a single * column of complex record type (if matched). * * Once we have the diff table, we perform set-based DELETE and INSERT * operations against the materialized view, and discard both temporary * tables. * * Everything from the generation of the new data to applying the differences * takes place under cover of an ExclusiveLock, since it seems as though we * would want to prohibit not only concurrent REFRESH operations, but also * incremental maintenance. It also doesn't seem reasonable or safe to allow * SELECT FOR UPDATE or SELECT FOR SHARE on rows being updated or deleted by * this command. */ static void refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner, int save_sec_context) { StringInfoData querybuf; Relation matviewRel; Relation tempRel; char *matviewname; char *tempname; char *diffname; TupleDesc tupdesc; bool foundUniqueIndex; List *indexoidlist; ListCell *indexoidscan; int16 relnatts; bool *usedForQual; initStringInfo(&querybuf); matviewRel = heap_open(matviewOid, NoLock); matviewname = quote_qualified_identifier(get_namespace_name(RelationGetNamespace(matviewRel)), RelationGetRelationName(matviewRel)); tempRel = heap_open(tempOid, NoLock); tempname = quote_qualified_identifier(get_namespace_name(RelationGetNamespace(tempRel)), RelationGetRelationName(tempRel)); diffname = make_temptable_name_n(tempname, 2); relnatts = matviewRel->rd_rel->relnatts; usedForQual = (bool *) palloc0(sizeof(bool) * relnatts); /* Open SPI context. */ if (SPI_connect() != SPI_OK_CONNECT) elog(ERROR, "SPI_connect failed"); /* Analyze the temp table with the new contents. */ appendStringInfo(&querybuf, "ANALYZE %s", tempname); if (SPI_exec(querybuf.data, 0) != SPI_OK_UTILITY) elog(ERROR, "SPI_exec failed: %s", querybuf.data); /* * We need to ensure that there are not duplicate rows without NULLs in * the new data set before we can count on the "diff" results. Check for * that in a way that allows showing the first duplicated row found. Even * after we pass this test, a unique index on the materialized view may * find a duplicate key problem. */ resetStringInfo(&querybuf); appendStringInfo(&querybuf, "SELECT newdata FROM %s newdata " "WHERE newdata IS NOT NULL AND EXISTS " "(SELECT * FROM %s newdata2 WHERE newdata2 IS NOT NULL " "AND newdata2 OPERATOR(pg_catalog.*=) newdata " "AND newdata2.ctid OPERATOR(pg_catalog.<>) " "newdata.ctid) LIMIT 1", tempname, tempname); if (SPI_execute(querybuf.data, false, 1) != SPI_OK_SELECT) elog(ERROR, "SPI_exec failed: %s", querybuf.data); if (SPI_processed > 0) { ereport(ERROR, (errcode(ERRCODE_CARDINALITY_VIOLATION), errmsg("new data for \"%s\" contains duplicate rows without any null columns", RelationGetRelationName(matviewRel)), errdetail("Row: %s", SPI_getvalue(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1)))); } SetUserIdAndSecContext(relowner, save_sec_context | SECURITY_LOCAL_USERID_CHANGE); /* Start building the query for creating the diff table. */ resetStringInfo(&querybuf); appendStringInfo(&querybuf, "CREATE TEMP TABLE %s AS " "SELECT mv.ctid AS tid, newdata " "FROM %s mv FULL JOIN %s newdata ON (", diffname, matviewname, tempname); /* * Get the list of index OIDs for the table from the relcache, and look up * each one in the pg_index syscache. We will test for equality on all * columns present in all unique indexes which only reference columns and * include all rows. */ tupdesc = matviewRel->rd_att; foundUniqueIndex = false; indexoidlist = RelationGetIndexList(matviewRel); foreach(indexoidscan, indexoidlist) { Oid indexoid = lfirst_oid(indexoidscan); Relation indexRel; Form_pg_index indexStruct; indexRel = index_open(indexoid, RowExclusiveLock); indexStruct = indexRel->rd_index; /* * We're only interested if it is unique, valid, contains no * expressions, and is not partial. */ if (indexStruct->indisunique && IndexIsValid(indexStruct) && RelationGetIndexExpressions(indexRel) == NIL && RelationGetIndexPredicate(indexRel) == NIL) { int numatts = indexStruct->indnatts; int i; /* Add quals for all columns from this index. */ for (i = 0; i < numatts; i++) { int attnum = indexStruct->indkey.values[i]; Oid type; Oid op; const char *colname; /* * Only include the column once regardless of how many times * it shows up in how many indexes. */ if (usedForQual[attnum - 1]) continue; usedForQual[attnum - 1] = true; /* * Actually add the qual, ANDed with any others. */ if (foundUniqueIndex) appendStringInfoString(&querybuf, " AND "); colname = quote_identifier(NameStr((tupdesc->attrs[attnum - 1])->attname)); appendStringInfo(&querybuf, "newdata.%s ", colname); type = attnumTypeId(matviewRel, attnum); op = lookup_type_cache(type, TYPECACHE_EQ_OPR)->eq_opr; mv_GenerateOper(&querybuf, op); appendStringInfo(&querybuf, " mv.%s", colname); foundUniqueIndex = true; } } /* Keep the locks, since we're about to run DML which needs them. */ index_close(indexRel, NoLock); }
static Tuplestorestate * build_tuplestore_recursively(char *key_fld, char *parent_key_fld, char *relname, char *orderby_fld, char *branch_delim, char *start_with, char *branch, int level, int *serial, int max_depth, bool show_branch, bool show_serial, MemoryContext per_query_ctx, AttInMetadata *attinmeta, Tuplestorestate *tupstore) { TupleDesc tupdesc = attinmeta->tupdesc; int ret; int proc; int serial_column; StringInfoData sql; char **values; char *current_key; char *current_key_parent; char current_level[INT32_STRLEN]; char serial_str[INT32_STRLEN]; char *current_branch; HeapTuple tuple; if (max_depth > 0 && level > max_depth) return tupstore; initStringInfo(&sql); /* Build initial sql statement */ if (!show_serial) { appendStringInfo(&sql, "SELECT %s, %s FROM %s WHERE %s = %s AND %s IS NOT NULL AND %s <> %s", key_fld, parent_key_fld, relname, parent_key_fld, quote_literal_cstr(start_with), key_fld, key_fld, parent_key_fld); serial_column = 0; } else { appendStringInfo(&sql, "SELECT %s, %s FROM %s WHERE %s = %s AND %s IS NOT NULL AND %s <> %s ORDER BY %s", key_fld, parent_key_fld, relname, parent_key_fld, quote_literal_cstr(start_with), key_fld, key_fld, parent_key_fld, orderby_fld); serial_column = 1; } if (show_branch) values = (char **) palloc((CONNECTBY_NCOLS + serial_column) * sizeof(char *)); else values = (char **) palloc((CONNECTBY_NCOLS_NOBRANCH + serial_column) * sizeof(char *)); /* First time through, do a little setup */ if (level == 0) { /* root value is the one we initially start with */ values[0] = start_with; /* root value has no parent */ values[1] = NULL; /* root level is 0 */ sprintf(current_level, "%d", level); values[2] = current_level; /* root branch is just starting root value */ if (show_branch) values[3] = start_with; /* root starts the serial with 1 */ if (show_serial) { sprintf(serial_str, "%d", (*serial)++); if (show_branch) values[4] = serial_str; else values[3] = serial_str; } /* construct the tuple */ tuple = BuildTupleFromCStrings(attinmeta, values); /* now store it */ tuplestore_puttuple(tupstore, tuple); /* increment level */ level++; } /* Retrieve the desired rows */ ret = SPI_execute(sql.data, true, 0); proc = SPI_processed; /* Check for qualifying tuples */ if ((ret == SPI_OK_SELECT) && (proc > 0)) { HeapTuple spi_tuple; SPITupleTable *tuptable = SPI_tuptable; TupleDesc spi_tupdesc = tuptable->tupdesc; int i; StringInfoData branchstr; StringInfoData chk_branchstr; StringInfoData chk_current_key; /* First time through, do a little more setup */ if (level == 0) { /* * Check that return tupdesc is compatible with the one we got * from the query, but only at level 0 -- no need to check more * than once */ if (!compatConnectbyTupleDescs(tupdesc, spi_tupdesc)) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("invalid return type"), errdetail("Return and SQL tuple descriptions are " \ "incompatible."))); } initStringInfo(&branchstr); initStringInfo(&chk_branchstr); initStringInfo(&chk_current_key); for (i = 0; i < proc; i++) { /* initialize branch for this pass */ appendStringInfo(&branchstr, "%s", branch); appendStringInfo(&chk_branchstr, "%s%s%s", branch_delim, branch, branch_delim); /* get the next sql result tuple */ spi_tuple = tuptable->vals[i]; /* get the current key and parent */ current_key = SPI_getvalue(spi_tuple, spi_tupdesc, 1); appendStringInfo(&chk_current_key, "%s%s%s", branch_delim, current_key, branch_delim); current_key_parent = pstrdup(SPI_getvalue(spi_tuple, spi_tupdesc, 2)); /* get the current level */ sprintf(current_level, "%d", level); /* check to see if this key is also an ancestor */ if (strstr(chk_branchstr.data, chk_current_key.data)) elog(ERROR, "infinite recursion detected"); /* OK, extend the branch */ appendStringInfo(&branchstr, "%s%s", branch_delim, current_key); current_branch = branchstr.data; /* build a tuple */ values[0] = pstrdup(current_key); values[1] = current_key_parent; values[2] = current_level; if (show_branch) values[3] = current_branch; if (show_serial) { sprintf(serial_str, "%d", (*serial)++); if (show_branch) values[4] = serial_str; else values[3] = serial_str; } tuple = BuildTupleFromCStrings(attinmeta, values); xpfree(current_key); xpfree(current_key_parent); /* store the tuple for later use */ tuplestore_puttuple(tupstore, tuple); heap_freetuple(tuple); /* recurse using current_key_parent as the new start_with */ tupstore = build_tuplestore_recursively(key_fld, parent_key_fld, relname, orderby_fld, branch_delim, values[0], current_branch, level + 1, serial, max_depth, show_branch, show_serial, per_query_ctx, attinmeta, tupstore); /* reset branch for next pass */ resetStringInfo(&branchstr); resetStringInfo(&chk_branchstr); resetStringInfo(&chk_current_key); } xpfree(branchstr.data); xpfree(chk_branchstr.data); xpfree(chk_current_key.data); } return tupstore; }
/* * The next token in the input stream is known to be a string; lex it. */ static inline void json_lex_string(JsonLexContext *lex) { char *s; int len; int hi_surrogate = -1; if (lex->strval != NULL) resetStringInfo(lex->strval); Assert(lex->input_length > 0); s = lex->token_start; len = lex->token_start - lex->input; for (;;) { s++; len++; /* Premature end of the string. */ if (len >= lex->input_length) { lex->token_terminator = s; report_invalid_token(lex); } else if (*s == '"') break; else if ((unsigned char) *s < 32) { /* Per RFC4627, these characters MUST be escaped. */ /* Since *s isn't printable, exclude it from the context string */ lex->token_terminator = s; ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type json"), errdetail("Character with value 0x%02x must be escaped.", (unsigned char) *s), report_json_context(lex))); } else if (*s == '\\') { /* OK, we have an escape character. */ s++; len++; if (len >= lex->input_length) { lex->token_terminator = s; report_invalid_token(lex); } else if (*s == 'u') { int i; int ch = 0; for (i = 1; i <= 4; i++) { s++; len++; if (len >= lex->input_length) { lex->token_terminator = s; report_invalid_token(lex); } else if (*s >= '0' && *s <= '9') ch = (ch * 16) + (*s - '0'); else if (*s >= 'a' && *s <= 'f') ch = (ch * 16) + (*s - 'a') + 10; else if (*s >= 'A' && *s <= 'F') ch = (ch * 16) + (*s - 'A') + 10; else { lex->token_terminator = s + pg_mblen(s); ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type json"), errdetail("\"\\u\" must be followed by four hexadecimal digits."), report_json_context(lex))); } } if (lex->strval != NULL) { char utf8str[5]; int utf8len; if (ch >= 0xd800 && ch <= 0xdbff) { if (hi_surrogate != -1) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type json"), errdetail("Unicode high surrogate must not follow a high surrogate."), report_json_context(lex))); hi_surrogate = (ch & 0x3ff) << 10; continue; } else if (ch >= 0xdc00 && ch <= 0xdfff) { if (hi_surrogate == -1) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type json"), errdetail("Unicode low surrogate must follow a high surrogate."), report_json_context(lex))); ch = 0x10000 + hi_surrogate + (ch & 0x3ff); hi_surrogate = -1; } if (hi_surrogate != -1) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type json"), errdetail("Unicode low surrogate must follow a high surrogate."), report_json_context(lex))); /* * For UTF8, replace the escape sequence by the actual utf8 * character in lex->strval. Do this also for other encodings * if the escape designates an ASCII character, otherwise * raise an error. We don't ever unescape a \u0000, since that * would result in an impermissible nul byte. */ if (ch == 0) { appendStringInfoString(lex->strval, "\\u0000"); } else if (GetDatabaseEncoding() == PG_UTF8) { unicode_to_utf8(ch, (unsigned char *) utf8str); utf8len = pg_utf_mblen((unsigned char *) utf8str); appendBinaryStringInfo(lex->strval, utf8str, utf8len); } else if (ch <= 0x007f) { /* * This is the only way to designate things like a form feed * character in JSON, so it's useful in all encodings. */ appendStringInfoChar(lex->strval, (char) ch); } else { ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type json"), errdetail("Unicode escape values cannot be used for code point values above 007F when the server encoding is not UTF8."), report_json_context(lex))); } } } else if (lex->strval != NULL) { if (hi_surrogate != -1) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type json"), errdetail("Unicode low surrogate must follow a high surrogate."), report_json_context(lex))); switch (*s) { case '"': case '\\': case '/': appendStringInfoChar(lex->strval, *s); break; case 'b': appendStringInfoChar(lex->strval, '\b'); break; case 'f': appendStringInfoChar(lex->strval, '\f'); break; case 'n': appendStringInfoChar(lex->strval, '\n'); break; case 'r': appendStringInfoChar(lex->strval, '\r'); break; case 't': appendStringInfoChar(lex->strval, '\t'); break; default: /* Not a valid string escape, so error out. */ lex->token_terminator = s + pg_mblen(s); ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type json"), errdetail("Escape sequence \"\\%s\" is invalid.", extract_mb_char(s)), report_json_context(lex))); } } else if (strchr("\"\\/bfnrt", *s) == NULL) { /* * Simpler processing if we're not bothered about de-escaping * * It's very tempting to remove the strchr() call here and * replace it with a switch statement, but testing so far has * shown it's not a performance win. */ lex->token_terminator = s + pg_mblen(s); ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type json"), errdetail("Escape sequence \"\\%s\" is invalid.", extract_mb_char(s)), report_json_context(lex))); } } else if (lex->strval != NULL) { if (hi_surrogate != -1) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type json"), errdetail("Unicode low surrogate must follow a high surrogate."), report_json_context(lex))); appendStringInfoChar(lex->strval, *s); } } if (hi_surrogate != -1) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type json"), errdetail("Unicode low surrogate must follow a high surrogate."), report_json_context(lex))); /* Hooray, we found the end of the string! */ lex->prev_token_terminator = lex->token_terminator; lex->token_terminator = s + 1; }