/* * pg_get_serverdef_string finds the foreign server that corresponds to the * given foreign tableId, and returns this server's definition. */ char * pg_get_serverdef_string(Oid tableRelationId) { ForeignTable *foreignTable = GetForeignTable(tableRelationId); ForeignServer *server = GetForeignServer(foreignTable->serverid); ForeignDataWrapper *foreignDataWrapper = GetForeignDataWrapper(server->fdwid); StringInfoData buffer = { NULL, 0, 0, 0 }; initStringInfo(&buffer); appendStringInfo(&buffer, "CREATE SERVER %s", quote_identifier(server->servername)); if (server->servertype != NULL) { appendStringInfo(&buffer, " TYPE %s", quote_literal_cstr(server->servertype)); } if (server->serverversion != NULL) { appendStringInfo(&buffer, " VERSION %s", quote_literal_cstr(server->serverversion)); } appendStringInfo(&buffer, " FOREIGN DATA WRAPPER %s", quote_identifier(foreignDataWrapper->fdwname)); /* append server options, if any */ AppendOptionListToString(&buffer, server->options); return (buffer.data); }
/* * pg_get_extensiondef_string finds the foreign data wrapper that corresponds to * the given foreign tableId, and checks if an extension owns this foreign data * wrapper. If it does, the function returns the extension's definition. If not, * the function returns null. */ char * pg_get_extensiondef_string(Oid tableRelationId) { ForeignTable *foreignTable = GetForeignTable(tableRelationId); ForeignServer *server = GetForeignServer(foreignTable->serverid); ForeignDataWrapper *foreignDataWrapper = GetForeignDataWrapper(server->fdwid); StringInfoData buffer = { NULL, 0, 0, 0 }; Oid classId = ForeignDataWrapperRelationId; Oid objectId = server->fdwid; Oid extensionId = getExtensionOfObject(classId, objectId); if (OidIsValid(extensionId)) { char *extensionName = get_extension_name(extensionId); Oid extensionSchemaId = get_extension_schema(extensionId); char *extensionSchema = get_namespace_name(extensionSchemaId); initStringInfo(&buffer); appendStringInfo(&buffer, "CREATE EXTENSION IF NOT EXISTS %s WITH SCHEMA %s", quote_identifier(extensionName), quote_identifier(extensionSchema)); } else { ereport(NOTICE, (errmsg("foreign-data wrapper \"%s\" does not have an " "extension defined", foreignDataWrapper->fdwname))); } return (buffer.data); }
/* * Copied from src/backend/utils/adt/ruleutils.c, not exported. * * get_opclass_name - fetch name of an index operator class * * The opclass name is appended (after a space) to buf. * * Output is suppressed if the opclass is the default for the given * actual_datatype. (If you don't want this behavior, just pass * InvalidOid for actual_datatype.) */ void get_opclass_name(Oid opclass, Oid actual_datatype, StringInfo buf) { HeapTuple ht_opc; Form_pg_opclass opcrec; char *opcname; char *nspname; ht_opc = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclass)); if (!HeapTupleIsValid(ht_opc)) elog(ERROR, "cache lookup failed for opclass %u", opclass); opcrec = (Form_pg_opclass) GETSTRUCT(ht_opc); if (!OidIsValid(actual_datatype) || GetDefaultOpClass(actual_datatype, opcrec->opcmethod) != opclass) { /* Okay, we need the opclass name. Do we need to qualify it? */ opcname = NameStr(opcrec->opcname); if (OpclassIsVisible(opclass)) appendStringInfo(buf, " %s", quote_identifier(opcname)); else { nspname = get_namespace_name(opcrec->opcnamespace); appendStringInfo(buf, " %s.%s", quote_identifier(nspname), quote_identifier(opcname)); } } ReleaseSysCache(ht_opc); }
static StringInfo get_dymanic_record_fields(PlxFn *plx_fn, FunctionCallInfo fcinfo) { StringInfo buf; Oid oid; TupleDesc tuple_desc; int i; get_call_result_type(fcinfo, &oid, &tuple_desc); buf = makeStringInfo(); for (i = 0; i < tuple_desc->natts; i++) { Form_pg_attribute a; HeapTuple type_tuple; Form_pg_type type_struct; a = tuple_desc->attrs[i]; type_tuple = SearchSysCache(TYPEOID, ObjectIdGetDatum(a->atttypid), 0, 0, 0); if (!HeapTupleIsValid(type_tuple)) plx_error(plx_fn, "cache lookup failed for type %u", a->atttypid); type_struct = (Form_pg_type) GETSTRUCT(type_tuple); { appendStringInfo( buf, "%s%s %s", ((i > 0) ? ", " : ""), quote_identifier(NameStr(a->attname)), quote_identifier(NameStr(type_struct->typname))); } ReleaseSysCache(type_tuple); } return buf; }
/* * DropErrorTable * * Drop the error table from the database. This function will be called from * destroyCdbSreh when an autogenerated error table was not used in the COPY * operation granted KEEP wasn't specified. * */ static void DropErrorTable(CdbSreh *cdbsreh) { StringInfoData dropstmt; RangeVar *errtbl_rv; Insist(Gp_role == GP_ROLE_DISPATCH); ereport(NOTICE, (errcode(ERRCODE_SUCCESSFUL_COMPLETION), errmsg("Dropping the auto-generated unused error table"), errhint("Use KEEP in LOG INTO clause to force keeping the error table alive"))); initStringInfo(&dropstmt); appendStringInfo(&dropstmt, "DROP TABLE %s.%s", quote_identifier(get_namespace_name(RelationGetNamespace(cdbsreh->errtbl))), quote_identifier(RelationGetRelationName(cdbsreh->errtbl))); errtbl_rv = makeRangeVar(get_namespace_name(RelationGetNamespace(cdbsreh->errtbl)), RelationGetRelationName(cdbsreh->errtbl), -1); /* DROP the relation on the QD */ RemoveRelation(errtbl_rv,DROP_RESTRICT, NULL, RELKIND_RELATION); /* dispatch the DROP to the QEs */ CdbDoCommand(dropstmt.data, false, /*no txn */ false); pfree(dropstmt.data); }
/* * Creates and populates a sample table for a PXF table. * The actual queried table is not the original PXF table but a copy of it * with additional attributes to enable sampling. * * The results are stored in sampleTableRelTuples. */ static void buildSampleFromPxf(const char* sampleSchemaName, const char* sampleTableName, const char* pxfSampleTable, List *lAttributeNames, float4 *sampleTableRelTuples) { int nAttributes = -1; int i = 0; ListCell *le = NULL; StringInfoData str; initStringInfo(&str); appendStringInfo(&str, "create table %s.%s as (select ", quote_identifier(sampleSchemaName), quote_identifier(sampleTableName)); nAttributes = list_length(lAttributeNames); foreach_with_count(le, lAttributeNames, i) { appendStringInfo(&str, "Ta.%s", quote_identifier((const char *) lfirst(le))); if (i < nAttributes - 1) { appendStringInfo(&str, ", "); } else { appendStringInfo(&str, " "); } }
/* * Get the max size of the relation across segments */ int64 cdbRelMaxSegSize(Relation rel) { int64 size = 0; int i; CdbPgResults cdb_pgresults = {NULL, 0}; StringInfoData buffer; char *schemaName; char *relName; /* * Let's ask the QEs for the size of the relation */ initStringInfo(&buffer); schemaName = get_namespace_name(RelationGetNamespace(rel)); if (schemaName == NULL) elog(ERROR, "cache lookup failed for namespace %d", RelationGetNamespace(rel)); relName = RelationGetRelationName(rel); /* * Safer to pass names than oids, just in case they get out of sync between QD and QE, * which might happen with a toast table or index, I think (but maybe not) */ appendStringInfo(&buffer, "select pg_relation_size('%s.%s')", quote_identifier(schemaName), quote_identifier(relName)); CdbDispatchCommand(buffer.data, DF_WITH_SNAPSHOT, &cdb_pgresults); for (i = 0; i < cdb_pgresults.numResults; i++) { struct pg_result * pgresult = cdb_pgresults.pg_results[i]; if (PQresultStatus(pgresult) != PGRES_TUPLES_OK) { cdbdisp_clearCdbPgResults(&cdb_pgresults); elog(ERROR,"cdbRelMaxSegSize: resultStatus not tuples_Ok: %s %s", PQresStatus(PQresultStatus(pgresult)),PQresultErrorMessage(pgresult)); } else { Assert(PQntuples(pgresult) == 1); int64 tempsize = 0; (void) scanint8(PQgetvalue(pgresult, 0, 0), false, &tempsize); if (tempsize > size) size = tempsize; } } pfree(buffer.data); cdbdisp_clearCdbPgResults(&cdb_pgresults); return size; }
/* * Append a psql meta-command that connects to the given database with the * then-current connection's user, host and port. */ void appendPsqlMetaConnect(PQExpBuffer buf, const char *dbname) { const char *s; bool complex; /* * If the name is plain ASCII characters, emit a trivial "\connect "foo"". * For other names, even many not technically requiring it, skip to the * general case. No database has a zero-length name. */ complex = false; for (s = dbname; *s; s++) { if (*s == '\n' || *s == '\r') { fprintf(stderr, _("database name contains a newline or carriage return: \"%s\"\n"), dbname); exit(EXIT_FAILURE); } if (!((*s >= 'a' && *s <= 'z') || (*s >= 'A' && *s <= 'Z') || (*s >= '0' && *s <= '9') || *s == '_' || *s == '.')) { complex = true; } } appendPQExpBufferStr(buf, "\\connect "); if (complex) { PQExpBufferData connstr; initPQExpBuffer(&connstr); appendPQExpBuffer(&connstr, "dbname="); appendConnStrVal(&connstr, dbname); appendPQExpBuffer(buf, "-reuse-previous=on "); /* * As long as the name does not contain a newline, SQL identifier * quoting satisfies the psql meta-command parser. Prefer not to * involve psql-interpreted single quotes, which behaved differently * before PostgreSQL 9.2. */ appendPQExpBufferStr(buf, quote_identifier(connstr.data)); termPQExpBuffer(&connstr); } else appendPQExpBufferStr(buf, quote_identifier(dbname)); appendPQExpBufferChar(buf, '\n'); }
/* * It is possible for there to be a mismatch in the need for TOAST tables * between the old and new servers, e.g. some pre-9.1 tables didn't need * TOAST tables but will need them in 9.1+. (There are also opposite cases, * but these are handled by setting binary_upgrade_next_toast_pg_class_oid.) * * We can't allow the TOAST table to be created by pg_dump with a * pg_dump-assigned oid because it might conflict with a later table that * uses that oid, causing a "file exists" error for pg_class conflicts, and * a "duplicate oid" error for pg_type conflicts. (TOAST tables need pg_type * entries.) * * Therefore, a backend in binary-upgrade mode will not create a TOAST * table unless an OID as passed in via pg_upgrade_support functions. * This function is called after the restore and uses ALTER TABLE to * auto-create any needed TOAST tables which will not conflict with * restored oids. */ void optionally_create_toast_tables(void) { int dbnum; prep_status("Creating newly-required TOAST tables"); for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++) { PGresult *res; int ntups; int rowno; int i_nspname, i_relname; DbInfo *active_db = &new_cluster.dbarr.dbs[dbnum]; PGconn *conn = connectToServer(&new_cluster, active_db->db_name); res = executeQueryOrDie(conn, "SELECT n.nspname, c.relname " "FROM pg_catalog.pg_class c, " " pg_catalog.pg_namespace n " "WHERE c.relnamespace = n.oid AND " " n.nspname NOT IN ('pg_catalog', 'information_schema') AND " "c.relkind IN ('r', 'm') AND " "c.reltoastrelid = 0"); ntups = PQntuples(res); i_nspname = PQfnumber(res, "nspname"); i_relname = PQfnumber(res, "relname"); for (rowno = 0; rowno < ntups; rowno++) { /* enable auto-oid-numbered TOAST creation if needed */ PQclear(executeQueryOrDie(conn, "SELECT binary_upgrade.set_next_toast_pg_class_oid('%d'::pg_catalog.oid);", OPTIONALLY_CREATE_TOAST_OID)); /* dummy command that also triggers check for required TOAST table */ PQclear(executeQueryOrDie(conn, "ALTER TABLE %s.%s RESET (binary_upgrade_dummy_option);", quote_identifier(PQgetvalue(res, rowno, i_nspname)), quote_identifier(PQgetvalue(res, rowno, i_relname)))); } PQclear(res); PQfinish(conn); } check_ok(); }
/* * Print a WHERE clause item */ static void print_where_clause_item(StringInfo s, Relation relation, HeapTuple tuple, int natt, bool *first_column) { Form_pg_attribute attr; Datum origval; bool isnull; TupleDesc tupdesc = RelationGetDescr(relation); attr = tupdesc->attrs[natt - 1]; /* Skip dropped columns and system columns */ if (attr->attisdropped || attr->attnum < 0) return; /* Skip comma for first colums */ if (!*first_column) appendStringInfoString(s, " AND "); else *first_column = false; /* Print attribute name */ appendStringInfo(s, "%s = ", quote_identifier(NameStr(attr->attname))); /* Get Datum from tuple */ origval = heap_getattr(tuple, natt, tupdesc, &isnull); /* Get output function */ print_value(s, origval, attr->atttypid, isnull); }
void InstallHelper_groundwork() { Invocation ctx; Invocation_pushInvocation(&ctx, false); ctx.function = Function_INIT_WRITER; PG_TRY(); { char const *lpt = LOADPATH_TBL_NAME; char const *lptq = quote_identifier(lpt); jstring pljlp = String_createJavaStringFromNTS(pljavaLoadPath); jstring jlpt = String_createJavaStringFromNTS(lpt); jstring jlptq = String_createJavaStringFromNTS(lptq); if ( lptq != lpt ) pfree((void *)lptq); JNI_callStaticVoidMethod( s_InstallHelper_class, s_InstallHelper_groundwork, pljlp, jlpt, jlptq, pljavaLoadingAsExtension ? JNI_TRUE : JNI_FALSE, extensionExNihilo ? JNI_TRUE : JNI_FALSE); JNI_deleteLocalRef(pljlp); JNI_deleteLocalRef(jlpt); JNI_deleteLocalRef(jlptq); Invocation_popInvocation(false); } PG_CATCH(); { Invocation_popInvocation(true); PG_RE_THROW(); } PG_END_TRY(); }
/* * AppendOptionListToString converts the option list to its textual format, and * appends this text to the given string buffer. */ void AppendOptionListToString(StringInfo stringBuffer, List *optionList) { if (optionList != NIL) { ListCell *optionCell = NULL; bool firstOptionPrinted = false; appendStringInfo(stringBuffer, " OPTIONS ("); foreach(optionCell, optionList) { DefElem *option = (DefElem *) lfirst(optionCell); char *optionName = option->defname; char *optionValue = defGetString(option); if (firstOptionPrinted) { appendStringInfo(stringBuffer, ", "); } firstOptionPrinted = true; appendStringInfo(stringBuffer, "%s ", quote_identifier(optionName)); appendStringInfo(stringBuffer, "%s", quote_literal_cstr(optionValue)); }
/* * format_operator - converts operator OID to "opr_name(args)" * * This exports the useful functionality of regoperatorout for use * in other backend modules. The result is a palloc'd string. */ char * format_operator(Oid operator_oid) { char *result; HeapTuple opertup; opertup = SearchSysCache1(OPEROID, ObjectIdGetDatum(operator_oid)); if (HeapTupleIsValid(opertup)) { Form_pg_operator operform = (Form_pg_operator) GETSTRUCT(opertup); char *oprname = NameStr(operform->oprname); char *nspname; StringInfoData buf; /* XXX no support here for bootstrap mode */ initStringInfo(&buf); /* * Would this oper be found (given the right args) by regoperatorin? * If not, we need to qualify it. */ if (!OperatorIsVisible(operator_oid)) { nspname = get_namespace_name(operform->oprnamespace); appendStringInfo(&buf, "%s.", quote_identifier(nspname)); } appendStringInfo(&buf, "%s(", oprname); if (operform->oprleft) appendStringInfo(&buf, "%s,", format_type_be(operform->oprleft)); else appendStringInfo(&buf, "NONE,"); if (operform->oprright) appendStringInfo(&buf, "%s)", format_type_be(operform->oprright)); else appendStringInfo(&buf, "NONE)"); result = buf.data; ReleaseSysCache(opertup); } else { /* * If OID doesn't match any pg_operator entry, return it numerically */ result = (char *) palloc(NAMEDATALEN); snprintf(result, NAMEDATALEN, "%u", operator_oid); } return result; }
/* * split_old_dump * * This function splits pg_dumpall output into global values and * database creation, and per-db schemas. This allows us to create * the support functions between restoring these two parts of the * dump. We split on the first "\connect " after a CREATE ROLE * username match; this is where the per-db restore starts. * * We suppress recreation of our own username so we don't generate * an error during restore */ void split_old_dump(void) { FILE *all_dump, *globals_dump, *db_dump; FILE *current_output; char line[LINE_ALLOC]; bool start_of_line = true; char create_role_str[MAX_STRING]; char create_role_str_quote[MAX_STRING]; char filename[MAXPGPATH]; bool suppressed_username = false; snprintf(filename, sizeof(filename), "%s", ALL_DUMP_FILE); if ((all_dump = fopen(filename, "r")) == NULL) pg_log(PG_FATAL, "Could not open dump file \"%s\": %s\n", filename, getErrorText(errno)); snprintf(filename, sizeof(filename), "%s", GLOBALS_DUMP_FILE); if ((globals_dump = fopen_priv(filename, "w")) == NULL) pg_log(PG_FATAL, "Could not write to dump file \"%s\": %s\n", filename, getErrorText(errno)); snprintf(filename, sizeof(filename), "%s", DB_DUMP_FILE); if ((db_dump = fopen_priv(filename, "w")) == NULL) pg_log(PG_FATAL, "Could not write to dump file \"%s\": %s\n", filename, getErrorText(errno)); current_output = globals_dump; /* patterns used to prevent our own username from being recreated */ snprintf(create_role_str, sizeof(create_role_str), "CREATE ROLE %s;", os_info.user); snprintf(create_role_str_quote, sizeof(create_role_str_quote), "CREATE ROLE %s;", quote_identifier(os_info.user)); while (fgets(line, sizeof(line), all_dump) != NULL) { /* switch to db_dump file output? */ if (current_output == globals_dump && start_of_line && suppressed_username && strncmp(line, "\\connect ", strlen("\\connect ")) == 0) current_output = db_dump; /* output unless we are recreating our own username */ if (current_output != globals_dump || !start_of_line || (strncmp(line, create_role_str, strlen(create_role_str)) != 0 && strncmp(line, create_role_str_quote, strlen(create_role_str_quote)) != 0)) fputs(line, current_output); else suppressed_username = true; if (strlen(line) > 0 && line[strlen(line) - 1] == '\n') start_of_line = true; else start_of_line = false; } fclose(all_dump); fclose(globals_dump); fclose(db_dump); }
/* * Decode an INSERT entry */ static void decoder_raw_insert(StringInfo s, Relation relation, HeapTuple tuple) { TupleDesc tupdesc = RelationGetDescr(relation); int natt; bool first_column = true; StringInfo values = makeStringInfo(); /* Initialize string info for values */ initStringInfo(values); /* Query header */ appendStringInfo(s, "INSERT INTO "); print_relname(s, relation); appendStringInfo(s, " ("); /* Build column names and values */ for (natt = 0; natt < tupdesc->natts; natt++) { Form_pg_attribute attr; Datum origval; bool isnull; attr = tupdesc->attrs[natt]; /* Skip dropped columns and system columns */ if (attr->attisdropped || attr->attnum < 0) continue; /* Skip comma for first colums */ if (!first_column) { appendStringInfoString(s, ", "); appendStringInfoString(values, ", "); } else first_column = false; /* Print attribute name */ appendStringInfo(s, "%s", quote_identifier(NameStr(attr->attname))); /* Get Datum from tuple */ origval = heap_getattr(tuple, natt + 1, tupdesc, &isnull); /* Get output function */ print_value(values, origval, attr->atttypid, isnull); } /* Append values */ appendStringInfo(s, ") VALUES (%s);", values->data); /* Clean up */ resetStringInfo(values); }
/* * quote_ident - * returns a properly quoted identifier */ Datum quote_ident(PG_FUNCTION_ARGS) { text *t = PG_GETARG_TEXT_PP(0); const char *qstr; char *str; str = text_to_cstring(t); qstr = quote_identifier(str); PG_RETURN_TEXT_P(cstring_to_text(qstr)); }
/* * Decode an UPDATE entry */ static void decoder_raw_update(StringInfo s, Relation relation, HeapTuple oldtuple, HeapTuple newtuple) { TupleDesc tupdesc = RelationGetDescr(relation); int natt; bool first_column = true; /* If there are no new values, simply leave as there is nothing to do */ if (newtuple == NULL) return; appendStringInfo(s, "UPDATE "); print_relname(s, relation); /* Build the SET clause with the new values */ appendStringInfo(s, " SET "); for (natt = 0; natt < tupdesc->natts; natt++) { Form_pg_attribute attr; Datum origval; bool isnull; attr = tupdesc->attrs[natt]; /* Skip dropped columns and system columns */ if (attr->attisdropped || attr->attnum < 0) continue; /* Skip comma for first colums */ if (!first_column) { appendStringInfoString(s, ", "); } else first_column = false; /* Print attribute name */ appendStringInfo(s, "%s = ", quote_identifier(NameStr(attr->attname))); /* Get Datum from tuple */ origval = heap_getattr(newtuple, natt + 1, tupdesc, &isnull); /* Get output function */ print_value(s, origval, attr->atttypid, isnull); } /* Print WHERE clause */ print_where_clause(s, relation, oldtuple, newtuple); appendStringInfoString(s, ";"); }
/* * quote_object_names * * It tries to quote the supplied identifiers */ static char * quote_object_name(const char *src1, const char *src2, const char *src3, const char *src4) { StringInfoData result; const char *temp; initStringInfo(&result); if (src1) { temp = quote_identifier(src1); appendStringInfo(&result, "%s", temp); if (src1 != temp) pfree((void *)temp); } if (src2) { temp = quote_identifier(src2); appendStringInfo(&result, ".%s", temp); if (src2 != temp) pfree((void *)temp); } if (src3) { temp = quote_identifier(src3); appendStringInfo(&result, ".%s", temp); if (src3 != temp) pfree((void *)temp); } if (src4) { temp = quote_identifier(src4); appendStringInfo(&result, ".%s", temp); if (src4 != temp) pfree((void *)temp); } return result.data; }
/* attach worker to the shared memory segment, read the job structure */ static void initialize_worker(uint32 segment) { dsm_segment *seg; ResourceOwner old, tmp; /* Connect to dynamic shared memory segment. * * In order to attach a dynamic shared memory segment, we need a * resource owner. We cannot to StartTransactionCommand here, since * we haven't yet attached to the database: to do this, we need to * fetch information about connection properties from the shared * memory segment. */ old = CurrentResourceOwner; CurrentResourceOwner = ResourceOwnerCreate(NULL, "Background Worker"); seg = dsm_attach(segment); if (seg == NULL) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("unable to map dynamic shared memory segment"))); dsm_pin_mapping(seg); tmp = CurrentResourceOwner; CurrentResourceOwner = old; ResourceOwnerDelete(tmp); job = palloc(sizeof(JobDesc)); /* copy the arguments from shared memory segment */ memcpy(job, dsm_segment_address(seg), sizeof(JobDesc)); /* and detach it right away */ dsm_detach(seg); Assert(job->magic == JOB_MAGIC); job_run_function.schema = quote_identifier(job->schemaname); job_run_function.name = quote_identifier("run_job"); }
static PyObject * PLy_quote_ident(PyObject *self, PyObject *args) { const char *str; const char *quoted; PyObject *ret; if (!PyArg_ParseTuple(args, "s:quote_ident", &str)) return NULL; quoted = quote_identifier(str); ret = PyString_FromString(quoted); return ret; }
/* * Creates an external PXF table, with the same properties * as the given PXF table to be sampled, other than additional * 2 attributes in the location clause - * pxf_stats_sample_ratio and pxf_stats_max_fragments, * and a segment reject limit of 25 percent. */ static void buildPxfTableCopy(Oid relationOid, float4 samplingRatio, int pxfStatMaxFragments, const char* schemaName, const char* tableName, const char* sampleSchemaName, const char* pxfSampleTable) { /* create table string */ char* createPxfSampleStr = createPxfSampleStmt(relationOid, schemaName, tableName, sampleSchemaName, pxfSampleTable, samplingRatio, pxfStatMaxFragments); spiExecuteWithCallback(createPxfSampleStr, false /*readonly*/, 0 /*tcount */, NULL, NULL); pfree(createPxfSampleStr); elog(DEBUG2, "Created PXF table %s.%s for sampling PXF table %s.%s", quote_identifier(sampleSchemaName), quote_identifier(pxfSampleTable), quote_identifier(schemaName), quote_identifier(tableName)); }
/* * worker_copy_shard_placement implements a internal UDF to copy a table's data from * a healthy placement into a receiving table on an unhealthy placement. This * function returns a boolean reflecting success or failure. */ Datum worker_copy_shard_placement(PG_FUNCTION_ARGS) { text *shardRelationNameText = PG_GETARG_TEXT_P(0); text *nodeNameText = PG_GETARG_TEXT_P(1); int32 nodePort = PG_GETARG_INT32(2); char *shardRelationName = text_to_cstring(shardRelationNameText); char *nodeName = text_to_cstring(nodeNameText); bool fetchSuccessful = false; Oid shardRelationId = ResolveRelationId(shardRelationNameText); Relation shardTable = heap_open(shardRelationId, RowExclusiveLock); TupleDesc tupleDescriptor = RelationGetDescr(shardTable); Tuplestorestate *tupleStore = tuplestore_begin_heap(false, false, work_mem); StringInfo selectAllQuery = NULL; ShardPlacement *placement = NULL; Task *task = NULL; selectAllQuery = makeStringInfo(); appendStringInfo(selectAllQuery, SELECT_ALL_QUERY, quote_identifier(shardRelationName)); placement = (ShardPlacement *) palloc0(sizeof(ShardPlacement)); placement->nodeName = nodeName; placement->nodePort = nodePort; task = (Task *) palloc0(sizeof(Task)); task->queryString = selectAllQuery; task->taskPlacementList = list_make1(placement); fetchSuccessful = ExecuteTaskAndStoreResults(task, tupleDescriptor, tupleStore); if (!fetchSuccessful) { ereport(ERROR, (errmsg("could not store shard rows from healthy placement"), errhint("Consult recent messages in the server logs for " "details."))); } CopyDataFromTupleStoreToRelation(tupleStore, shardTable); tuplestore_end(tupleStore); heap_close(shardTable, RowExclusiveLock); PG_RETURN_VOID(); }
static void mv_GenerateOper(StringInfo buf, Oid opoid) { HeapTuple opertup; Form_pg_operator operform; opertup = SearchSysCache1(OPEROID, ObjectIdGetDatum(opoid)); if (!HeapTupleIsValid(opertup)) elog(ERROR, "cache lookup failed for operator %u", opoid); operform = (Form_pg_operator) GETSTRUCT(opertup); Assert(operform->oprkind == 'b'); appendStringInfo(buf, "OPERATOR(%s.%s)", quote_identifier(get_namespace_name(operform->oprnamespace)), NameStr(operform->oprname)); ReleaseSysCache(opertup); }
static void getExtensionLoadPath() { MemoryContext curr; Datum dtm; bool isnull; StringInfoData buf; /* * Check whether sqlj.loadpath exists before querying it. I would more * happily just PG_CATCH() the error and compare to ERRCODE_UNDEFINED_TABLE * but what's required to make that work right is "not terribly well * documented, but the exception-block handling in plpgsql provides a * working model" and that code is a lot more fiddly than you would guess. */ if ( InvalidOid == get_relname_relid(LOADPATH_TBL_NAME, GetSysCacheOid1(NAMESPACENAME, CStringGetDatum("sqlj"))) ) return; SPI_connect(); curr = CurrentMemoryContext; initStringInfo(&buf); appendStringInfo(&buf, "SELECT path, exnihilo FROM sqlj.%s", quote_identifier(LOADPATH_TBL_NAME)); if ( SPI_OK_SELECT == SPI_execute(buf.data, true, 1) && 1 == SPI_processed ) { MemoryContextSwitchTo(TopMemoryContext); pljavaLoadPath = (char const *)SPI_getvalue( SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1); MemoryContextSwitchTo(curr); dtm = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 2, &isnull); if ( isnull ) elog(ERROR, "defect in CREATE EXTENSION script"); extensionExNihilo = DatumGetBool(dtm); } SPI_finish(); }
/* * regoperout - converts operator OID to "opr_name" */ Datum regoperout(PG_FUNCTION_ARGS) { Oid oprid = PG_GETARG_OID(0); char *result; HeapTuple opertup; if (oprid == InvalidOid) { result = pstrdup("0"); PG_RETURN_CSTRING(result); } opertup = SearchSysCache1(OPEROID, ObjectIdGetDatum(oprid)); if (HeapTupleIsValid(opertup)) { Form_pg_operator operform = (Form_pg_operator) GETSTRUCT(opertup); char *oprname = NameStr(operform->oprname); /* * In bootstrap mode, skip the fancy namespace stuff and just return * the oper name. (This path is only needed for debugging output * anyway.) */ if (IsBootstrapProcessingMode()) result = pstrdup(oprname); else { FuncCandidateList clist; /* * Would this oper be found (uniquely!) by regoperin? If not, * qualify it. */ clist = OpernameGetCandidates(list_make1(makeString(oprname)), '\0'); if (clist != NULL && clist->next == NULL && clist->oid == oprid) result = pstrdup(oprname); else { const char *nspname; nspname = get_namespace_name(operform->oprnamespace); nspname = quote_identifier(nspname); result = (char *) palloc(strlen(nspname) + strlen(oprname) + 2); sprintf(result, "%s.%s", nspname, oprname); } } ReleaseSysCache(opertup); } else { /* * If OID doesn't match any pg_operator entry, return it numerically */ result = (char *) palloc(NAMEDATALEN); snprintf(result, NAMEDATALEN, "%u", oprid); } PG_RETURN_CSTRING(result); }
/* * set_frozenxids() * * This is called on the new cluster before we restore anything, with * minmxid_only = false. Its purpose is to ensure that all initdb-created * vacuumable tables have relfrozenxid/relminmxid matching the old cluster's * xid/mxid counters. We also initialize the datfrozenxid/datminmxid of the * built-in databases to match. * * As we create user tables later, their relfrozenxid/relminmxid fields will * be restored properly by the binary-upgrade restore script. Likewise for * user-database datfrozenxid/datminmxid. However, if we're upgrading from a * pre-9.3 database, which does not store per-table or per-DB minmxid, then * the relminmxid/datminmxid values filled in by the restore script will just * be zeroes. * * Hence, with a pre-9.3 source database, a second call occurs after * everything is restored, with minmxid_only = true. This pass will * initialize all tables and databases, both those made by initdb and user * objects, with the desired minmxid value. frozenxid values are left alone. */ static void set_frozenxids(bool minmxid_only) { int dbnum; PGconn *conn, *conn_template1; PGresult *dbres; int ntups; int i_datname; int i_datallowconn; if (!minmxid_only) prep_status("Setting frozenxid and minmxid counters in new cluster"); else prep_status("Setting minmxid counter in new cluster"); conn_template1 = connectToServer(&new_cluster, "template1"); if (!minmxid_only) /* set pg_database.datfrozenxid */ PQclear(executeQueryOrDie(conn_template1, "UPDATE pg_catalog.pg_database " "SET datfrozenxid = '%u'", old_cluster.controldata.chkpnt_nxtxid)); /* set pg_database.datminmxid */ PQclear(executeQueryOrDie(conn_template1, "UPDATE pg_catalog.pg_database " "SET datminmxid = '%u'", old_cluster.controldata.chkpnt_nxtmulti)); /* get database names */ dbres = executeQueryOrDie(conn_template1, "SELECT datname, datallowconn " "FROM pg_catalog.pg_database"); i_datname = PQfnumber(dbres, "datname"); i_datallowconn = PQfnumber(dbres, "datallowconn"); ntups = PQntuples(dbres); for (dbnum = 0; dbnum < ntups; dbnum++) { char *datname = PQgetvalue(dbres, dbnum, i_datname); char *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn); /* * We must update databases where datallowconn = false, e.g. * template0, because autovacuum increments their datfrozenxids, * relfrozenxids, and relminmxid even if autovacuum is turned off, and * even though all the data rows are already frozen. To enable this, * we temporarily change datallowconn. */ if (strcmp(datallowconn, "f") == 0) PQclear(executeQueryOrDie(conn_template1, "ALTER DATABASE %s ALLOW_CONNECTIONS = true", quote_identifier(datname))); conn = connectToServer(&new_cluster, datname); if (!minmxid_only) /* set pg_class.relfrozenxid */ PQclear(executeQueryOrDie(conn, "UPDATE pg_catalog.pg_class " "SET relfrozenxid = '%u' " /* only heap, materialized view, and TOAST are vacuumed */ "WHERE relkind IN (" CppAsString2(RELKIND_RELATION) ", " CppAsString2(RELKIND_MATVIEW) ", " CppAsString2(RELKIND_TOASTVALUE) ")", old_cluster.controldata.chkpnt_nxtxid)); /* set pg_class.relminmxid */ PQclear(executeQueryOrDie(conn, "UPDATE pg_catalog.pg_class " "SET relminmxid = '%u' " /* only heap, materialized view, and TOAST are vacuumed */ "WHERE relkind IN (" CppAsString2(RELKIND_RELATION) ", " CppAsString2(RELKIND_MATVIEW) ", " CppAsString2(RELKIND_TOASTVALUE) ")", old_cluster.controldata.chkpnt_nxtmulti)); PQfinish(conn); /* Reset datallowconn flag */ if (strcmp(datallowconn, "f") == 0) PQclear(executeQueryOrDie(conn_template1, "ALTER DATABASE %s ALLOW_CONNECTIONS = false", quote_identifier(datname))); } PQclear(dbres); PQfinish(conn_template1); check_ok(); }
/* * format_operator - converts operator OID to "opr_name(args)" * * This exports the useful functionality of regoperatorout for use * in other backend modules. The result is a palloc'd string. */ char * format_operator(Oid operator_oid) { char *result; HeapTuple opertup; cqContext *pcqCtx; pcqCtx = caql_beginscan( NULL, cql("SELECT * FROM pg_operator " " WHERE oid = :1 ", ObjectIdGetDatum(operator_oid))); opertup = caql_getnext(pcqCtx); /* XXX XXX select oprname, oprnamespace from pg_operator */ if (HeapTupleIsValid(opertup)) { Form_pg_operator operform = (Form_pg_operator) GETSTRUCT(opertup); char *oprname = NameStr(operform->oprname); char *nspname; StringInfoData buf; /* XXX no support here for bootstrap mode */ initStringInfo(&buf); /* * Would this oper be found (given the right args) by regoperatorin? * If not, we need to qualify it. */ if (!OperatorIsVisible(operator_oid)) { nspname = get_namespace_name(operform->oprnamespace); appendStringInfo(&buf, "%s.", quote_identifier(nspname)); } appendStringInfo(&buf, "%s(", oprname); if (operform->oprleft) appendStringInfo(&buf, "%s,", format_type_be(operform->oprleft)); else appendStringInfo(&buf, "NONE,"); if (operform->oprright) appendStringInfo(&buf, "%s)", format_type_be(operform->oprright)); else appendStringInfo(&buf, "NONE)"); result = buf.data; } else { /* * If OID doesn't match any pg_operator entry, return it numerically */ result = (char *) palloc(NAMEDATALEN); snprintf(result, NAMEDATALEN, "%u", operator_oid); } caql_endscan(pcqCtx); return result; }
/* * old_8_3_create_sequence_script() * 8.3 -> 8.4 * 8.4 added the column "start_value" to all sequences. For this reason, * we don't transfer sequence files but instead use the CREATE SEQUENCE * command from the schema dump, and use setval() to restore the sequence * value and 'is_called' from the old database. This is safe to run * by pg_upgrade because sequence files are not transfered from the old * server, even in link mode. */ char * old_8_3_create_sequence_script(ClusterInfo *cluster) { int dbnum; FILE *script = NULL; bool found = false; char *output_path = pg_malloc(MAX_PG_PATH); snprintf(output_path, MAX_PG_PATH, "%s/adjust_sequences.sql", os_info.cwd); prep_status("Creating script to adjust sequences"); for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) { PGresult *res; bool db_used = false; int ntups; int rowno; int i_nspname, i_relname; DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; PGconn *conn = connectToServer(cluster, active_db->db_name); /* Find any sequences */ res = executeQueryOrDie(conn, "SELECT n.nspname, c.relname " "FROM pg_catalog.pg_class c, " " pg_catalog.pg_namespace n " "WHERE c.relkind = 'S' AND " " c.relnamespace = n.oid AND " /* exclude possible orphaned temp tables */ " n.nspname !~ '^pg_temp_' AND " " n.nspname !~ '^pg_toast_temp_' AND " " n.nspname NOT IN ('pg_catalog', 'information_schema')"); ntups = PQntuples(res); i_nspname = PQfnumber(res, "nspname"); i_relname = PQfnumber(res, "relname"); for (rowno = 0; rowno < ntups; rowno++) { PGresult *seq_res; int i_last_value, i_is_called; const char *nspname = PQgetvalue(res, rowno, i_nspname); const char *relname = PQgetvalue(res, rowno, i_relname); found = true; if (script == NULL && (script = fopen(output_path, "w")) == NULL) pg_log(PG_FATAL, "could not create necessary file: %s\n", output_path); if (!db_used) { fprintf(script, "\\connect %s\n\n", quote_identifier(active_db->db_name)); db_used = true; } /* Find the desired sequence */ seq_res = executeQueryOrDie(conn, "SELECT s.last_value, s.is_called " "FROM %s.%s s", quote_identifier(nspname), quote_identifier(relname)); assert(PQntuples(seq_res) == 1); i_last_value = PQfnumber(seq_res, "last_value"); i_is_called = PQfnumber(seq_res, "is_called"); fprintf(script, "SELECT setval('%s.%s', %s, '%s');\n", quote_identifier(nspname), quote_identifier(relname), PQgetvalue(seq_res, 0, i_last_value), PQgetvalue(seq_res, 0, i_is_called)); PQclear(seq_res); } if (db_used) fprintf(script, "\n"); PQclear(res); PQfinish(conn); } if (script) fclose(script); check_ok(); if (found) return output_path; else { pg_free(output_path); return NULL; } }
/* * pg_get_tableschemadef_string returns the definition of a given table. This * definition includes table's schema, default column values, not null and check * constraints. The definition does not include constraints that trigger index * creations; specifically, unique and primary key constraints are excluded. */ static char * pg_shard_get_tableschemadef_string(Oid tableRelationId) { Relation relation = NULL; char *relationName = NULL; char relationKind = 0; TupleDesc tupleDescriptor = NULL; TupleConstr *tupleConstraints = NULL; int attributeIndex = 0; bool firstAttributePrinted = false; AttrNumber defaultValueIndex = 0; AttrNumber constraintIndex = 0; AttrNumber constraintCount = 0; StringInfoData buffer = { NULL, 0, 0, 0 }; /* * Instead of retrieving values from system catalogs as other functions in * ruleutils.c do, we follow an unusual approach here: we open the relation, * and fetch the relation's tuple descriptor. We do this because the tuple * descriptor already contains information harnessed from pg_attrdef, * pg_attribute, pg_constraint, and pg_class; and therefore using the * descriptor saves us from a lot of additional work. */ relation = relation_open(tableRelationId, AccessShareLock); relationName = generate_relation_name(tableRelationId); relationKind = relation->rd_rel->relkind; if (relationKind != RELKIND_RELATION && relationKind != RELKIND_FOREIGN_TABLE) { ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("%s is not a regular or foreign table", relationName))); } initStringInfo(&buffer); if (relationKind == RELKIND_RELATION) { appendStringInfo(&buffer, "CREATE TABLE %s (", relationName); } else { appendStringInfo(&buffer, "CREATE FOREIGN TABLE %s (", relationName); } /* * Iterate over the table's columns. If a particular column is not dropped * and is not inherited from another table, print the column's name and its * formatted type. */ tupleDescriptor = RelationGetDescr(relation); tupleConstraints = tupleDescriptor->constr; for (attributeIndex = 0; attributeIndex < tupleDescriptor->natts; attributeIndex++) { Form_pg_attribute attributeForm = tupleDescriptor->attrs[attributeIndex]; if (!attributeForm->attisdropped && attributeForm->attinhcount == 0) { const char *attributeName = NULL; const char *attributeTypeName = NULL; if (firstAttributePrinted) { appendStringInfoString(&buffer, ", "); } firstAttributePrinted = true; attributeName = NameStr(attributeForm->attname); appendStringInfo(&buffer, "%s ", quote_identifier(attributeName)); attributeTypeName = format_type_with_typemod(attributeForm->atttypid, attributeForm->atttypmod); appendStringInfoString(&buffer, attributeTypeName); /* if this column has a default value, append the default value */ if (attributeForm->atthasdef) { AttrDefault *defaultValueList = NULL; AttrDefault *defaultValue = NULL; Node *defaultNode = NULL; List *defaultContext = NULL; char *defaultString = NULL; Assert(tupleConstraints != NULL); defaultValueList = tupleConstraints->defval; Assert(defaultValueList != NULL); defaultValue = &(defaultValueList[defaultValueIndex]); defaultValueIndex++; Assert(defaultValue->adnum == (attributeIndex + 1)); Assert(defaultValueIndex <= tupleConstraints->num_defval); /* convert expression to node tree, and prepare deparse context */ defaultNode = (Node *) stringToNode(defaultValue->adbin); defaultContext = deparse_context_for(relationName, tableRelationId); /* deparse default value string */ defaultString = deparse_expression(defaultNode, defaultContext, false, false); appendStringInfo(&buffer, " DEFAULT %s", defaultString); } /* if this column has a not null constraint, append the constraint */ if (attributeForm->attnotnull) { appendStringInfoString(&buffer, " NOT NULL"); } } } /* * Now check if the table has any constraints. If it does, set the number of * check constraints here. Then iterate over all check constraints and print * them. */ if (tupleConstraints != NULL) { constraintCount = tupleConstraints->num_check; } for (constraintIndex = 0; constraintIndex < constraintCount; constraintIndex++) { ConstrCheck *checkConstraintList = tupleConstraints->check; ConstrCheck *checkConstraint = &(checkConstraintList[constraintIndex]); Node *checkNode = NULL; List *checkContext = NULL; char *checkString = NULL; /* if an attribute or constraint has been printed, format properly */ if (firstAttributePrinted || constraintIndex > 0) { appendStringInfoString(&buffer, ", "); } appendStringInfo(&buffer, "CONSTRAINT %s CHECK ", quote_identifier(checkConstraint->ccname)); /* convert expression to node tree, and prepare deparse context */ checkNode = (Node *) stringToNode(checkConstraint->ccbin); checkContext = deparse_context_for(relationName, tableRelationId); /* deparse check constraint string */ checkString = deparse_expression(checkNode, checkContext, false, false); appendStringInfoString(&buffer, checkString); } /* close create table's outer parentheses */ appendStringInfoString(&buffer, ")"); /* * If the relation is a foreign table, append the server name and options to * the create table statement. */ if (relationKind == RELKIND_FOREIGN_TABLE) { ForeignTable *foreignTable = GetForeignTable(tableRelationId); ForeignServer *foreignServer = GetForeignServer(foreignTable->serverid); char *serverName = foreignServer->servername; appendStringInfo(&buffer, " SERVER %s", quote_identifier(serverName)); AppendOptionListToString(&buffer, foreignTable->options); } relation_close(relation, AccessShareLock); return (buffer.data); }
void worker_spi_main(Datum main_arg) { int index = DatumGetInt32(main_arg); worktable *table; StringInfoData buf; char name[20]; table = palloc(sizeof(worktable)); sprintf(name, "schema%d", index); table->schema = pstrdup(name); table->name = pstrdup("counted"); /* Establish signal handlers before unblocking signals. */ pqsignal(SIGHUP, worker_spi_sighup); pqsignal(SIGTERM, worker_spi_sigterm); /* We're now ready to receive signals */ BackgroundWorkerUnblockSignals(); /* Connect to our database */ BackgroundWorkerInitializeConnection("postgres", NULL); elog(LOG, "%s initialized with %s.%s", MyBgworkerEntry->bgw_name, table->schema, table->name); initialize_worker_spi(table); /* * Quote identifiers passed to us. Note that this must be done after * initialize_worker_spi, because that routine assumes the names are not * quoted. * * Note some memory might be leaked here. */ table->schema = quote_identifier(table->schema); table->name = quote_identifier(table->name); initStringInfo(&buf); appendStringInfo(&buf, "WITH deleted AS (DELETE " "FROM %s.%s " "WHERE type = 'delta' RETURNING value), " "total AS (SELECT coalesce(sum(value), 0) as sum " "FROM deleted) " "UPDATE %s.%s " "SET value = %s.value + total.sum " "FROM total WHERE type = 'total' " "RETURNING %s.value", table->schema, table->name, table->schema, table->name, table->name, table->name); /* * Main loop: do this until the SIGTERM handler tells us to terminate */ while (!got_sigterm) { int ret; int rc; /* * Background workers mustn't call usleep() or any direct equivalent: * instead, they may wait on their process latch, which sleeps as * necessary, but is awakened if postmaster dies. That way the * background process goes away immediately in an emergency. */ rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, worker_spi_naptime * 1000L); ResetLatch(&MyProc->procLatch); /* emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) proc_exit(1); /* * In case of a SIGHUP, just reload the configuration. */ if (got_sighup) { got_sighup = false; ProcessConfigFile(PGC_SIGHUP); } /* * Start a transaction on which we can run queries. Note that each * StartTransactionCommand() call should be preceded by a * SetCurrentStatementStartTimestamp() call, which sets both the time * for the statement we're about the run, and also the transaction * start time. Also, each other query sent to SPI should probably be * preceded by SetCurrentStatementStartTimestamp(), so that statement * start time is always up to date. * * The SPI_connect() call lets us run queries through the SPI manager, * and the PushActiveSnapshot() call creates an "active" snapshot * which is necessary for queries to have MVCC data to work on. * * The pgstat_report_activity() call makes our activity visible * through the pgstat views. */ SetCurrentStatementStartTimestamp(); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); pgstat_report_activity(STATE_RUNNING, buf.data); /* We can now execute queries via SPI */ ret = SPI_execute(buf.data, false, 0); if (ret != SPI_OK_UPDATE_RETURNING) elog(FATAL, "cannot select from table %s.%s: error code %d", table->schema, table->name, ret); if (SPI_processed > 0) { bool isnull; int32 val; val = DatumGetInt32(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull)); if (!isnull) elog(LOG, "%s: count in %s.%s is now %d", MyBgworkerEntry->bgw_name, table->schema, table->name, val); } /* * And finish our transaction. */ SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); pgstat_report_activity(STATE_IDLE, NULL); } proc_exit(1); }