/* * Auto-stats employs this sub-routine to issue an analyze on a specific relation. */ static void autostats_issue_analyze(Oid relationOid) { VacuumStmt *analyzeStmt = NULL; RangeVar *relation = NULL; /* * If this user does not own the table, then auto-stats will not issue the * analyze. */ if (!(pg_class_ownercheck(relationOid, GetUserId()) || (pg_database_ownercheck(MyDatabaseId, GetUserId()) && !IsSharedRelation(relationOid)))) { elog(DEBUG3, "Auto-stats did not issue ANALYZE on tableoid %d since the user does not have table-owner level permissions.", relationOid); return; } relation = makeRangeVar(get_namespace_name(get_rel_namespace(relationOid)), get_rel_name(relationOid), -1); analyzeStmt = makeNode(VacuumStmt); /* Set up command parameters */ analyzeStmt->vacuum = false; analyzeStmt->full = false; analyzeStmt->analyze = true; analyzeStmt->freeze_min_age = -1; analyzeStmt->verbose = false; analyzeStmt->rootonly = false; analyzeStmt->relation = relation; /* not used since we pass relids list */ analyzeStmt->va_cols = NIL; vacuum(analyzeStmt, NIL, NULL, false, false); pfree(analyzeStmt); }
/* * DropErrorTable * * Drop the error table from the database. This function will be called from * destroyCdbSreh when an autogenerated error table was not used in the COPY * operation granted KEEP wasn't specified. * */ static void DropErrorTable(CdbSreh *cdbsreh) { StringInfoData dropstmt; RangeVar *errtbl_rv; Insist(Gp_role == GP_ROLE_DISPATCH); ereport(NOTICE, (errcode(ERRCODE_SUCCESSFUL_COMPLETION), errmsg("Dropping the auto-generated unused error table"), errhint("Use KEEP in LOG INTO clause to force keeping the error table alive"))); initStringInfo(&dropstmt); appendStringInfo(&dropstmt, "DROP TABLE %s.%s", quote_identifier(get_namespace_name(RelationGetNamespace(cdbsreh->errtbl))), quote_identifier(RelationGetRelationName(cdbsreh->errtbl))); errtbl_rv = makeRangeVar(get_namespace_name(RelationGetNamespace(cdbsreh->errtbl)), RelationGetRelationName(cdbsreh->errtbl), -1); /* DROP the relation on the QD */ RemoveRelation(errtbl_rv,DROP_RESTRICT, NULL, RELKIND_RELATION); /* dispatch the DROP to the QEs */ CdbDoCommand(dropstmt.data, false, /*no txn */ false); pfree(dropstmt.data); }
/* Creates a simple table that only defines columns, in the given schema. */ static void CreateTaskTable(StringInfo schemaName, StringInfo relationName, List *columnNameList, List *columnTypeList) { CreateStmt *createStatement = NULL; RangeVar *relation = NULL; List *columnDefinitionList = NIL; Oid relationId = InvalidOid; #if (PG_VERSION_NUM >= 90500) ObjectAddress relationObject; #endif Assert(schemaName != NULL); Assert(relationName != NULL); /* * This new relation doesn't log to WAL, as the table creation and data copy * statements occur in the same transaction. Still, we want to make the * relation unlogged once we upgrade to PostgreSQL 9.1. */ relation = makeRangeVar(schemaName->data, relationName->data, -1); columnDefinitionList = ColumnDefinitionList(columnNameList, columnTypeList); createStatement = CreateStatement(relation, columnDefinitionList); #if (PG_VERSION_NUM >= 90500) relationObject = DefineRelation(createStatement, RELKIND_RELATION, InvalidOid, NULL); relationId = relationObject.objectId; #else relationId = DefineRelation(createStatement, RELKIND_RELATION, InvalidOid); #endif Assert(relationId != InvalidOid); CommandCounterIncrement(); }
/* * makeRangeVarFromNameList * Utility routine to convert a qualified-name list into RangeVar form. * * Copied from backend/catalog/namespace.c */ RangeVar * makeRangeVarFromNameList(List *names) { RangeVar *rel = makeRangeVar(NULL, NULL, -1); switch (list_length(names)) { case 1: rel->relname = strVal(linitial(names)); break; case 2: rel->schemaname = strVal(linitial(names)); rel->relname = strVal(lsecond(names)); break; case 3: rel->catalogname = strVal(linitial(names)); rel->schemaname = strVal(lsecond(names)); rel->relname = strVal(lthird(names)); break; default: ereport(WARNING, (errmsg("invalid relation name, too many indirections, while converting from table name to RangeVar"))); break; } return rel; }
/* * makeRangeVarFromNameList * Utility routine to convert a qualified-name list into RangeVar form. * * Copied from backend/catalog/namespace.c */ RangeVar * makeRangeVarFromNameList(List *names) { RangeVar *rel = makeRangeVar(NULL, NULL, -1); switch (list_length(names)) { case 1: rel->relname = strVal(linitial(names)); break; case 2: rel->schemaname = strVal(linitial(names)); rel->relname = strVal(lsecond(names)); break; case 3: rel->catalogname = strVal(linitial(names)); rel->schemaname = strVal(lsecond(names)); rel->relname = strVal(lthird(names)); break; default: pool_error("improper relation name (too many dotted names)"); break; } return rel; }
/* * CopyTaskFilesFromDirectory finds all files in the given directory, except for * those having an attempt suffix. The function then copies these files into the * database table identified by the given schema and table name. */ static void CopyTaskFilesFromDirectory(StringInfo schemaName, StringInfo relationName, StringInfo sourceDirectoryName) { const char *directoryName = sourceDirectoryName->data; struct dirent *directoryEntry = NULL; uint64 copiedRowTotal = 0; DIR *directory = AllocateDir(directoryName); if (directory == NULL) { ereport(ERROR, (errcode_for_file_access(), errmsg("could not open directory \"%s\": %m", directoryName))); } directoryEntry = ReadDir(directory, directoryName); for (; directoryEntry != NULL; directoryEntry = ReadDir(directory, directoryName)) { const char *baseFilename = directoryEntry->d_name; const char *queryString = NULL; StringInfo fullFilename = NULL; RangeVar *relation = NULL; CopyStmt *copyStatement = NULL; uint64 copiedRowCount = 0; /* if system file or lingering task file, skip it */ if (strncmp(baseFilename, ".", MAXPGPATH) == 0 || strncmp(baseFilename, "..", MAXPGPATH) == 0 || strstr(baseFilename, ATTEMPT_FILE_SUFFIX) != NULL) { continue; } fullFilename = makeStringInfo(); appendStringInfo(fullFilename, "%s/%s", directoryName, baseFilename); /* build relation object and copy statement */ relation = makeRangeVar(schemaName->data, relationName->data, -1); copyStatement = CopyStatement(relation, fullFilename->data); if (BinaryWorkerCopyFormat) { DefElem *copyOption = makeDefElem("format", (Node *) makeString("binary")); copyStatement->options = list_make1(copyOption); } DoCopy(copyStatement, queryString, &copiedRowCount); copiedRowTotal += copiedRowCount; CommandCounterIncrement(); } ereport(DEBUG2, (errmsg("copied " UINT64_FORMAT " rows into table: \"%s.%s\"", copiedRowTotal, schemaName->data, relationName->data))); FreeDir(directory); }
/* * truncate relation */ void TruncateTable(Oid relid) { TruncateStmt stmt; RangeVar *heap; Assert(OidIsValid(relid)); heap = makeRangeVar(get_namespace_name(get_rel_namespace(relid)), get_rel_name(relid), -1); memset(&stmt, 0, sizeof(stmt)); stmt.type = T_TruncateStmt; stmt.relations = list_make1(heap); stmt.behavior = DROP_RESTRICT; ExecuteTruncate(&stmt); CommandCounterIncrement(); }
/* * Create a toast table during bootstrap * * Here we need to prespecify the OIDs of the toast table and its index */ void BootstrapToastTable(char *relName, Oid toastOid, Oid toastIndexOid) { Relation rel; rel = heap_openrv(makeRangeVar(NULL, relName, -1), AccessExclusiveLock); if (rel->rd_rel->relkind != RELKIND_RELATION && rel->rd_rel->relkind != RELKIND_MATVIEW) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not a table or materialized view", relName))); /* create_toast_table does all the work */ if (!create_toast_table(rel, toastOid, toastIndexOid, (Datum) 0)) elog(ERROR, "\"%s\" does not require a toast table", relName); heap_close(rel, NoLock); }
/* * Create a toast table during bootstrap * * Here we need to prespecify the OIDs of the toast table and its index */ void BootstrapToastTable(char *relName, Oid toastOid, Oid toastIndexOid) { Relation rel; rel = heap_openrv(makeRangeVar(NULL, relName, -1), AccessExclusiveLock); /* Note: during bootstrap may see uncataloged relation */ if (rel->rd_rel->relkind != RELKIND_RELATION && rel->rd_rel->relkind != RELKIND_UNCATALOGED) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not a table", relName))); /* create_toast_table does all the work */ if (!create_toast_table(rel, toastOid, toastIndexOid, false)) elog(ERROR, "\"%s\" does not require a toast table", relName); heap_close(rel, NoLock); }
/* * open_pipeline_kafka_offsets * * Open and return pipeline_kafka_offsets relation */ static Relation open_pipeline_kafka_offsets(void) { Relation offsets = heap_openrv(makeRangeVar(NULL, OFFSETS_RELATION, -1), RowExclusiveLock); return offsets; }
/* * open_pipeline_kafka_brokers * * Open and return pipeline_kafka_brokers relation */ static Relation open_pipeline_kafka_brokers(void) { Relation brokers = heap_openrv(makeRangeVar(NULL, BROKER_RELATION, -1), AccessExclusiveLock); return brokers; }
/* * open_pipeline_kafka_consumers * * Open and return pipeline_kafka_consumers relation */ static Relation open_pipeline_kafka_consumers(void) { Relation consumers = heap_openrv(makeRangeVar(NULL, CONSUMER_RELATION, -1), AccessExclusiveLock); return consumers; }
/* * Creates a sample table with data from a PXF table. * We need to create a copy of the PXF table, in order to pass the sampling * parameters pxf_sample_ratio and pxf_max_fragments as attributes, * and to create a segment reject limit of 25 percent. * * The new PXF table is sampled and the results are saved in the returned sample table. * Note that ANALYZE can be executed only by the database owner. * It is safe to assume that the database owner has permissions to create temp tables. * The sampling is done by uniformly sampling pxf_sample_ratio records of each fragments, * up to pxf_max_fragments. * * Input: * relationOid - relation to be sampled * sampleTableName - sample table name, moderately unique * lAttributeNames - attributes to be included in the sample * relTuples - estimated size of relation * relFrags - estimated number of fragments in relation * requestedSampleSize - as determined by attribute statistics requirements. * sampleTableRelTuples - limit on size of the sample. * Output: * sampleTableRelTuples - number of tuples in the sample table created. */ Oid buildPxfSampleTable(Oid relationOid, char* sampleTableName, List *lAttributeNames, float4 relTuples, float4 relFrags, float4 requestedSampleSize, float4 *sampleTableRelTuples) { const char *schemaName = get_namespace_name(get_rel_namespace(relationOid)); /* must be pfreed */ const char *tableName = get_rel_name(relationOid); /* must be pfreed */ char *sampleSchemaName = pstrdup("pg_temp"); char *pxfSampleTable = temporarySampleTableName(relationOid, "pg_analyze_pxf"); /* must be pfreed */ Oid sampleTableOid = InvalidOid; Oid pxfSampleTableOid = InvalidOid; RangeVar *rangeVar = NULL; float4 pxfSamplingRatio = 0.0; Assert(requestedSampleSize > 0.0); Assert(relTuples > 0.0); Assert(relFrags > 0.0); /* calculate pxf_sample_ratio */ pxfSamplingRatio = calculateSamplingRatio(relTuples, relFrags, requestedSampleSize); /* build copy of original pxf table */ buildPxfTableCopy(relationOid, pxfSamplingRatio, pxf_stat_max_fragments, schemaName, tableName, sampleSchemaName, pxfSampleTable); rangeVar = makeRangeVar(NULL /*catalogname*/, sampleSchemaName, pxfSampleTable, -1); pxfSampleTableOid = RangeVarGetRelid(rangeVar, true /* failOK */, false /*allowHcatalog*/); buildSampleFromPxf(sampleSchemaName, sampleTableName, pxfSampleTable, lAttributeNames, sampleTableRelTuples); rangeVar = makeRangeVar(NULL /*catalogname*/, sampleSchemaName, sampleTableName, -1); sampleTableOid = RangeVarGetRelid(rangeVar, true /* failOK */, false /*allowHcatalog*/); Assert(sampleTableOid != InvalidOid); /** * MPP-10723: Very rarely, we may be unlucky and generate an empty sample table. We error out in this case rather than * generate bad statistics. */ if (*sampleTableRelTuples < 1.0) { elog(ERROR, "ANALYZE unable to generate accurate statistics on table %s.%s. Try lowering gp_analyze_relative_error", quote_identifier(schemaName), quote_identifier(tableName)); } if (pxfSampleTableOid != InvalidOid) { elog(DEBUG2, "ANALYZE dropping PXF sample table"); dropSampleTable(pxfSampleTableOid, true); } pfree((void *) rangeVar); pfree((void *) pxfSampleTable); pfree((void *) tableName); pfree((void *) schemaName); pfree((void *) sampleSchemaName); return sampleTableOid; }
/* * Open the local relation associated with the remote one. * * Optionally rebuilds the Relcache mapping if it was invalidated * by local DDL. */ LogicalRepRelMapEntry * logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode) { LogicalRepRelMapEntry *entry; bool found; if (LogicalRepRelMap == NULL) logicalrep_relmap_init(); /* Search for existing entry. */ entry = hash_search(LogicalRepRelMap, (void *) &remoteid, HASH_FIND, &found); if (!found) elog(ERROR, "no relation map entry for remote relation ID %u", remoteid); /* Need to update the local cache? */ if (!OidIsValid(entry->localreloid)) { Oid relid; int i; int found; Bitmapset *idkey; TupleDesc desc; LogicalRepRelation *remoterel; MemoryContext oldctx; remoterel = &entry->remoterel; /* Try to find and lock the relation by name. */ relid = RangeVarGetRelid(makeRangeVar(remoterel->nspname, remoterel->relname, -1), lockmode, true); if (!OidIsValid(relid)) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("logical replication target relation \"%s.%s\" does not exist", remoterel->nspname, remoterel->relname))); entry->localrel = heap_open(relid, NoLock); /* * We currently only support writing to regular and partitioned * tables. */ if (entry->localrel->rd_rel->relkind != RELKIND_RELATION) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("logical replication target relation \"%s.%s\" is not a table", remoterel->nspname, remoterel->relname))); /* * Build the mapping of local attribute numbers to remote attribute * numbers and validate that we don't miss any replicated columns * as that would result in potentially unwanted data loss. */ desc = RelationGetDescr(entry->localrel); oldctx = MemoryContextSwitchTo(LogicalRepRelMapContext); entry->attrmap = palloc(desc->natts * sizeof(int)); MemoryContextSwitchTo(oldctx); found = 0; for (i = 0; i < desc->natts; i++) { int attnum = logicalrep_rel_att_by_name(remoterel, NameStr(desc->attrs[i]->attname)); entry->attrmap[i] = attnum; if (attnum >= 0) found++; } /* TODO, detail message with names of missing columns */ if (found < remoterel->natts) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("logical replication target relation \"%s.%s\" is missing " "some replicated columns", remoterel->nspname, remoterel->relname))); /* * Check that replica identity matches. We allow for stricter replica * identity (fewer columns) on subscriber as that will not stop us * from finding unique tuple. IE, if publisher has identity * (id,timestamp) and subscriber just (id) this will not be a problem, * but in the opposite scenario it will. * * Don't throw any error here just mark the relation entry as not * updatable, as replica identity is only for updates and deletes * but inserts can be replicated even without it. */ entry->updatable = true; idkey = RelationGetIndexAttrBitmap(entry->localrel, INDEX_ATTR_BITMAP_IDENTITY_KEY); /* fallback to PK if no replica identity */ if (idkey == NULL) { idkey = RelationGetIndexAttrBitmap(entry->localrel, INDEX_ATTR_BITMAP_PRIMARY_KEY); /* * If no replica identity index and no PK, the published table * must have replica identity FULL. */ if (idkey == NULL && remoterel->replident != REPLICA_IDENTITY_FULL) entry->updatable = false; } i = -1; while ((i = bms_next_member(idkey, i)) >= 0) { int attnum = i + FirstLowInvalidHeapAttributeNumber; if (!AttrNumberIsForUserDefinedAttr(attnum)) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("logical replication target relation \"%s.%s\" uses " "system columns in REPLICA IDENTITY index", remoterel->nspname, remoterel->relname))); attnum = AttrNumberGetAttrOffset(attnum); if (!bms_is_member(entry->attrmap[attnum], remoterel->attkeys)) { entry->updatable = false; break; } } entry->localreloid = relid; } else entry->localrel = heap_open(entry->localreloid, lockmode); return entry; }
static void grab_ExecutorEnd(QueryDesc * queryDesc) { Datum values[10]; bool nulls[10] = {false, false, false, false, false, false, false, false, false, false}; Relation dump_heap; RangeVar *dump_table_rv; HeapTuple tuple; Oid namespaceId; /* lookup schema */ namespaceId = GetSysCacheOid1(NAMESPACENAME, CStringGetDatum(EXTENSION_SCHEMA)); if (OidIsValid(namespaceId)) { /* lookup table */ if (OidIsValid(get_relname_relid(EXTENSION_LOG_TABLE, namespaceId))) { /* get table heap */ dump_table_rv = makeRangeVar(EXTENSION_SCHEMA, EXTENSION_LOG_TABLE, -1); dump_heap = heap_openrv(dump_table_rv, RowExclusiveLock); /* transaction info */ values[0] = Int32GetDatum(GetCurrentTransactionId()); values[1] = Int32GetDatum(GetCurrentCommandId(false)); values[2] = Int32GetDatum(MyProcPid); values[3] = Int32GetDatum(GetUserId()); /* query timing */ if (queryDesc->totaltime != NULL) { InstrEndLoop(queryDesc->totaltime); values[4] = TimestampGetDatum( TimestampTzPlusMilliseconds(GetCurrentTimestamp(), (queryDesc->totaltime->total * -1000.0))); values[5] = Float8GetDatum(queryDesc->totaltime->total); } else { nulls[4] = true; nulls[5] = true; } /* query command type */ values[6] = Int32GetDatum(queryDesc->operation); /* query text */ values[7] = CStringGetDatum( cstring_to_text(queryDesc->sourceText)); /* query params */ if (queryDesc->params != NULL) { int numParams = queryDesc->params->numParams; Oid out_func_oid, ptype; Datum pvalue; bool isvarlena; FmgrInfo *out_functions; bool arr_nulls[numParams]; size_t arr_nelems = (size_t) numParams; Datum *arr_val_elems = palloc(sizeof(Datum) * arr_nelems); Datum *arr_typ_elems = palloc(sizeof(Datum) * arr_nelems); char elem_val_byval, elem_val_align, elem_typ_byval, elem_typ_align; int16 elem_val_len, elem_typ_len; int elem_dims[1], elem_lbs[1]; int paramno; /* init */ out_functions = (FmgrInfo *) palloc( (numParams) * sizeof(FmgrInfo)); get_typlenbyvalalign(TEXTOID, &elem_val_len, &elem_val_byval, &elem_val_align); get_typlenbyvalalign(REGTYPEOID, &elem_typ_len, &elem_typ_byval, &elem_typ_align); elem_dims[0] = arr_nelems; elem_lbs[0] = 1; for (paramno = 0; paramno < numParams; paramno++) { pvalue = queryDesc->params->params[paramno].value; ptype = queryDesc->params->params[paramno].ptype; getTypeOutputInfo(ptype, &out_func_oid, &isvarlena); fmgr_info(out_func_oid, &out_functions[paramno]); arr_typ_elems[paramno] = ptype; arr_nulls[paramno] = true; if (!queryDesc->params->params[paramno].isnull) { arr_nulls[paramno] = false; arr_val_elems[paramno] = PointerGetDatum( cstring_to_text( OutputFunctionCall(&out_functions[paramno], pvalue))); } } values[8] = PointerGetDatum( construct_md_array( arr_val_elems, arr_nulls, 1, elem_dims, elem_lbs, TEXTOID, elem_val_len, elem_val_byval, elem_val_align)); values[9] = PointerGetDatum( construct_array( arr_typ_elems, arr_nelems, REGTYPEOID, elem_typ_len, elem_typ_byval, elem_typ_align)); pfree(out_functions); pfree(arr_val_elems); } else { nulls[8] = true; nulls[9] = true; } /* insert */ tuple = heap_form_tuple(dump_heap->rd_att, values, nulls); simple_heap_insert(dump_heap, tuple); heap_close(dump_heap, RowExclusiveLock); } } if (prev_ExecutorEnd) prev_ExecutorEnd(queryDesc); else standard_ExecutorEnd(queryDesc); }