/* * Mark variable as used */ void plpgsql_check_record_variable_usage(PLpgSQL_checkstate *cstate, int dno, bool write) { if (dno >= 0) { if (!write) cstate->used_variables = bms_add_member(cstate->used_variables, dno); else cstate->modif_variables = bms_add_member(cstate->modif_variables, dno); } }
/* * Determine whether a relation can be proven functionally dependent on * a set of grouping columns. If so, return true and add the pg_constraint * OIDs of the constraints needed for the proof to the *constraintDeps list. * * grouping_columns is a list of grouping expressions, in which columns of * the rel of interest are Vars with the indicated varno/varlevelsup. * * Currently we only check to see if the rel has a primary key that is a * subset of the grouping_columns. We could also use plain unique constraints * if all their columns are known not null, but there's a problem: we need * to be able to represent the not-null-ness as part of the constraints added * to *constraintDeps. FIXME whenever not-null constraints get represented * in pg_constraint. */ bool check_functional_grouping(Oid relid, Index varno, Index varlevelsup, List *grouping_columns, List **constraintDeps) { Bitmapset *pkattnos; Bitmapset *groupbyattnos; Oid constraintOid; ListCell *gl; /* If the rel has no PK, then we can't prove functional dependency */ pkattnos = get_primary_key_attnos(relid, false, &constraintOid); if (pkattnos == NULL) return false; /* Identify all the rel's columns that appear in grouping_columns */ groupbyattnos = NULL; foreach(gl, grouping_columns) { Var *gvar = (Var *) lfirst(gl); if (IsA(gvar, Var) && gvar->varno == varno && gvar->varlevelsup == varlevelsup) groupbyattnos = bms_add_member(groupbyattnos, gvar->varattno - FirstLowInvalidHeapAttributeNumber); }
/* ---------------------------------------------------------------- * ExecReScanRecursiveUnion * * Rescans the relation. * ---------------------------------------------------------------- */ void ExecReScanRecursiveUnion(RecursiveUnionState *node) { PlanState *outerPlan = outerPlanState(node); PlanState *innerPlan = innerPlanState(node); RecursiveUnion *plan = (RecursiveUnion *) node->ps.plan; /* * Set recursive term's chgParam to tell it that we'll modify the working * table and therefore it has to rescan. */ innerPlan->chgParam = bms_add_member(innerPlan->chgParam, plan->wtParam); /* * if chgParam of subnode is not null then plan will be re-scanned by * first ExecProcNode. Because of above, we only have to do this to the * non-recursive term. */ if (outerPlan->chgParam == NULL) ExecReScan(outerPlan); /* Release any hashtable storage */ if (node->tableContext) MemoryContextResetAndDeleteChildren(node->tableContext); /* And rebuild empty hashtable if needed */ if (plan->numCols > 0) build_hash_table(node); /* reset processing state */ node->recursing = false; node->intermediate_empty = true; tuplestore_clear(node->working_table); tuplestore_clear(node->intermediate_table); }
/* * fixup_inherited_columns * * When user is querying on a table with children, it implicitly accesses * child tables also. So, we also need to check security label of child * tables and columns, but here is no guarantee attribute numbers are * same between the parent ans children. * It returns a bitmapset which contains attribute number of the child * table based on the given bitmapset of the parent. */ static Bitmapset * fixup_inherited_columns(Oid parentId, Oid childId, Bitmapset *columns) { AttrNumber attno; Bitmapset *tmpset; Bitmapset *result = NULL; char *attname; int index; /* * obviously, no need to do anything here */ if (parentId == childId) return columns; tmpset = bms_copy(columns); while ((index = bms_first_member(tmpset)) > 0) { attno = index + FirstLowInvalidHeapAttributeNumber; /* * whole-row-reference shall be fixed-up later */ if (attno == InvalidAttrNumber) { result = bms_add_member(result, index); continue; } attname = get_attname(parentId, attno); if (!attname) elog(ERROR, "cache lookup failed for attribute %d of relation %u", attno, parentId); attno = get_attnum(childId, attname); if (attno == InvalidAttrNumber) elog(ERROR, "cache lookup failed for attribute %s of relation %u", attname, childId); index = attno - FirstLowInvalidHeapAttributeNumber; result = bms_add_member(result, index); pfree(attname); } bms_free(tmpset); return result; }
/* XML */ static void xml_objstart(void *state) { pgspParserContext *ctx = (pgspParserContext *)state; ctx->level ++; ctx->first = bms_add_member(ctx->first, ctx->level); }
/* * fixup_inherited_columns * * When user is querying on a table with children, it implicitly accesses * child tables also. So, we also need to check security label of child * tables and columns, but here is no guarantee attribute numbers are * same between the parent ans children. * It returns a bitmapset which contains attribute number of the child * table based on the given bitmapset of the parent. */ static Bitmapset * fixup_inherited_columns(Oid parentId, Oid childId, Bitmapset *columns) { Bitmapset *result = NULL; int index; /* * obviously, no need to do anything here */ if (parentId == childId) return columns; index = -1; while ((index = bms_next_member(columns, index)) >= 0) { /* bit numbers are offset by FirstLowInvalidHeapAttributeNumber */ AttrNumber attno = index + FirstLowInvalidHeapAttributeNumber; char *attname; /* * whole-row-reference shall be fixed-up later */ if (attno == InvalidAttrNumber) { result = bms_add_member(result, index); continue; } attname = get_attname(parentId, attno, false); attno = get_attnum(childId, attname); if (attno == InvalidAttrNumber) elog(ERROR, "cache lookup failed for attribute %s of relation %u", attname, childId); result = bms_add_member(result, attno - FirstLowInvalidHeapAttributeNumber); pfree(attname); } return result; }
static void json_arrstart(void *state) { pgspParserContext *ctx = (pgspParserContext *)state; appendStringInfoChar(ctx->dest, '['); ctx->fname = NULL; ctx->level++; ctx->last_elem_is_object = true; ctx->first = bms_add_member(ctx->first, ctx->level); }
/* * dependencies_clauselist_selectivity * Return the estimated selectivity of (a subset of) the given clauses * using functional dependency statistics, or 1.0 if no useful functional * dependency statistic exists. * * 'estimatedclauses' is an output argument that gets a bit set corresponding * to the (zero-based) list index of each clause that is included in the * estimated selectivity. * * Given equality clauses on attributes (a,b) we find the strongest dependency * between them, i.e. either (a=>b) or (b=>a). Assuming (a=>b) is the selected * dependency, we then combine the per-clause selectivities using the formula * * P(a,b) = P(a) * [f + (1-f)*P(b)] * * where 'f' is the degree of the dependency. * * With clauses on more than two attributes, the dependencies are applied * recursively, starting with the widest/strongest dependencies. For example * P(a,b,c) is first split like this: * * P(a,b,c) = P(a,b) * [f + (1-f)*P(c)] * * assuming (a,b=>c) is the strongest dependency. */ Selectivity dependencies_clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo, RelOptInfo *rel, Bitmapset **estimatedclauses) { Selectivity s1 = 1.0; ListCell *l; Bitmapset *clauses_attnums = NULL; StatisticExtInfo *stat; MVDependencies *dependencies; AttrNumber *list_attnums; int listidx; /* initialize output argument */ *estimatedclauses = NULL; /* check if there's any stats that might be useful for us. */ if (!has_stats_of_kind(rel->statlist, STATS_EXT_DEPENDENCIES)) return 1.0; list_attnums = (AttrNumber *) palloc(sizeof(AttrNumber) * list_length(clauses)); /* * Pre-process the clauses list to extract the attnums seen in each item. * We need to determine if there's any clauses which will be useful for * dependency selectivity estimations. Along the way we'll record all of * the attnums for each clause in a list which we'll reference later so we * don't need to repeat the same work again. We'll also keep track of all * attnums seen. */ listidx = 0; foreach(l, clauses) { Node *clause = (Node *) lfirst(l); AttrNumber attnum; if (dependency_is_compatible_clause(clause, rel->relid, &attnum)) { list_attnums[listidx] = attnum; clauses_attnums = bms_add_member(clauses_attnums, attnum); } else list_attnums[listidx] = InvalidAttrNumber; listidx++; }
static void json_arrstart(void *state) { pgspParserContext *ctx = (pgspParserContext *)state; if (IS_INDENTED_ARRAY(ctx->current_list)) ctx->wlist_level++; appendStringInfoChar(ctx->dest, '['); ctx->fname = NULL; ctx->level++; ctx->last_elem_is_object = true; ctx->first = bms_add_member(ctx->first, ctx->level); }
/* * statext_ndistinct_build * Compute ndistinct coefficient for the combination of attributes. * * This computes the ndistinct estimate using the same estimator used * in analyze.c and then computes the coefficient. */ MVNDistinct * statext_ndistinct_build(double totalrows, int numrows, HeapTuple *rows, Bitmapset *attrs, VacAttrStats **stats) { MVNDistinct *result; int k; int itemcnt; int numattrs = bms_num_members(attrs); int numcombs = num_combinations(numattrs); result = palloc(offsetof(MVNDistinct, items) + numcombs * sizeof(MVNDistinctItem)); result->magic = STATS_NDISTINCT_MAGIC; result->type = STATS_NDISTINCT_TYPE_BASIC; result->nitems = numcombs; itemcnt = 0; for (k = 2; k <= numattrs; k++) { int *combination; CombinationGenerator *generator; /* generate combinations of K out of N elements */ generator = generator_init(numattrs, k); while ((combination = generator_next(generator))) { MVNDistinctItem *item = &result->items[itemcnt]; int j; item->attrs = NULL; for (j = 0; j < k; j++) item->attrs = bms_add_member(item->attrs, stats[combination[j]]->attr->attnum); item->ndistinct = ndistinct_for_combination(totalrows, numrows, rows, stats, k, combination); itemcnt++; Assert(itemcnt <= result->nitems); } generator_free(generator); } /* must consume exactly the whole output array */ Assert(itemcnt == result->nitems); return result; }
static void yaml_arrstart(void *state) { pgspParserContext *ctx = (pgspParserContext *)state; if (ctx->fname) { appendStringInfoString(ctx->dest, ctx->fname); appendStringInfoString(ctx->dest, ":"); } ctx->fname = NULL; ctx->level++; ctx->first = bms_add_member(ctx->first, ctx->level); }
/* * GetAnyDataNode * Pick any data node from given set, but try a preferred node */ int GetAnyDataNode(Bitmapset *nodes) { Bitmapset *preferred = NULL; int i, nodeid; int nmembers = 0; int members[NumDataNodes]; for (i = 0; i < num_preferred_data_nodes; i++) { char ntype = PGXC_NODE_DATANODE; nodeid = PGXCNodeGetNodeId(preferred_data_node[i], &ntype); /* OK, found one */ if (bms_is_member(nodeid, nodes)) preferred = bms_add_member(preferred, nodeid); } /* * If no preferred data nodes or they are not in the desired set, pick up * from the original set. */ if (bms_is_empty(preferred)) preferred = bms_copy(nodes); /* * Load balance. * We can not get item from the set, convert it to array */ while ((nodeid = bms_first_member(preferred)) >= 0) members[nmembers++] = nodeid; bms_free(preferred); /* If there is a single member nothing to balance */ if (nmembers == 1) return members[0]; /* * In general, the set may contain any number of nodes, and if we save * previous returned index for load balancing the distribution won't be * flat, because small set will probably reset saved value, and lower * indexes will be picked up more often. * So we just get a random value from 0..nmembers-1. */ return members[((unsigned int) random()) % nmembers]; }
/* * fixup_whole_row_references * * When user reference a whole of row, it is equivalent to reference to * all the user columns (not system columns). So, we need to fix up the * given bitmapset, if it contains a whole of the row reference. */ static Bitmapset * fixup_whole_row_references(Oid relOid, Bitmapset *columns) { Bitmapset *result; HeapTuple tuple; AttrNumber natts; AttrNumber attno; int index; /* if no whole of row references, do not anything */ index = InvalidAttrNumber - FirstLowInvalidHeapAttributeNumber; if (!bms_is_member(index, columns)) return columns; /* obtain number of attributes */ tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relOid)); if (!HeapTupleIsValid(tuple)) elog(ERROR, "cache lookup failed for relation %u", relOid); natts = ((Form_pg_class) GETSTRUCT(tuple))->relnatts; ReleaseSysCache(tuple); /* fix up the given columns */ result = bms_copy(columns); result = bms_del_member(result, index); for (attno = 1; attno <= natts; attno++) { tuple = SearchSysCache2(ATTNUM, ObjectIdGetDatum(relOid), Int16GetDatum(attno)); if (!HeapTupleIsValid(tuple)) continue; if (((Form_pg_attribute) GETSTRUCT(tuple))->attisdropped) continue; index = attno - FirstLowInvalidHeapAttributeNumber; result = bms_add_member(result, index); ReleaseSysCache(tuple); } return result; }
static void xml_ofstart(void *state, char *fname, bool isnull) { word_table *p; pgspParserContext *ctx = (pgspParserContext *)state; char *s; p = search_word_table(propfields, fname, ctx->mode); if (!p) { ereport(DEBUG1, (errmsg("Short JSON parser encoutered unknown field name: \"%s\".", fname), errdetail_log("INPUT: \"%s\"", ctx->org_string))); } s = (p ? p->longname : fname); /* * save current process context * There's no problem if P_Plan appears recursively. */ if (p && (p->tag == P_Plan || p->tag == P_Triggers)) ctx->processing = p->tag; appendStringInfoChar(ctx->dest, '\n'); appendStringInfoSpaces(ctx->dest, (ctx->level + 1) * INDENT_STEP); ctx->valconverter = NULL; appendStringInfoChar(ctx->dest, '<'); appendStringInfoString(ctx->dest, escape_xml(hyphenate_words(ctx, s))); appendStringInfoChar(ctx->dest, '>'); ctx->valconverter = (p ? p->converter : NULL); /* * If the object field name is Plan or Triggers, the value should be an * array and the items are tagged by other than "Item". "Item"s appear * only in Output field. */ if (p && (p->tag == P_Plans || p->tag == P_Triggers)) ctx->not_item = bms_add_member(ctx->not_item, ctx->level + 1); else ctx->not_item = bms_del_member(ctx->not_item, ctx->level + 1); }
/* YAML */ static void yaml_objstart(void *state) { pgspParserContext *ctx = (pgspParserContext *)state; if (ctx->fname) { if (ctx->dest->len > 0) appendStringInfoChar(ctx->dest, '\n'); appendStringInfoSpaces(ctx->dest, (ctx->level - 1) * INDENT_STEP); appendStringInfoString(ctx->dest, "- "); appendStringInfoString(ctx->dest, ctx->fname); appendStringInfoString(ctx->dest, ":\n"); appendStringInfoSpaces(ctx->dest, (ctx->level + 1) * INDENT_STEP); ctx->fname = NULL; } ctx->level++; ctx->first = bms_add_member(ctx->first, ctx->level); }
/* ---------------------------------------------------------------- * ExecReScanGatherMerge * * Prepare to re-scan the result of a GatherMerge. * ---------------------------------------------------------------- */ void ExecReScanGatherMerge(GatherMergeState *node) { GatherMerge *gm = (GatherMerge *) node->ps.plan; PlanState *outerPlan = outerPlanState(node); /* Make sure any existing workers are gracefully shut down */ ExecShutdownGatherMergeWorkers(node); /* Free any unused tuples, so we don't leak memory across rescans */ gather_merge_clear_tuples(node); /* Mark node so that shared state will be rebuilt at next call */ node->initialized = false; node->gm_initialized = false; /* * Set child node's chgParam to tell it that the next scan might deliver a * different set of rows within the leader process. (The overall rowset * shouldn't change, but the leader process's subset might; hence nodes * between here and the parallel table scan node mustn't optimize on the * assumption of an unchanging rowset.) */ if (gm->rescan_param >= 0) outerPlan->chgParam = bms_add_member(outerPlan->chgParam, gm->rescan_param); /* * If chgParam of subnode is not null then plan will be re-scanned by * first ExecProcNode. Note: because this does nothing if we have a * rescan_param, it's currently guaranteed that parallel-aware child nodes * will not see a ReScan call until after they get a ReInitializeDSM call. * That ordering might not be something to rely on, though. A good rule * of thumb is that ReInitializeDSM should reset only shared state, ReScan * should reset only local state, and anything that depends on both of * those steps being finished must wait until the first ExecProcNode call. */ if (outerPlan->chgParam == NULL) ExecReScan(outerPlan); }
/* JSON */ static void json_objstart(void *state) { pgspParserContext *ctx = (pgspParserContext *)state; if (ctx->mode == PGSP_JSON_INFLATE) { if (!ctx->fname && ctx->dest->len > 0) { appendStringInfoChar(ctx->dest, '\n'); appendStringInfoSpaces(ctx->dest, (ctx->level) * INDENT_STEP); } ctx->fname = NULL; } appendStringInfoChar(ctx->dest, '{'); ctx->level++; ctx->first = bms_add_member(ctx->first, ctx->level); if (ctx->mode == PGSP_JSON_INFLATE) appendStringInfoChar(ctx->dest, '\n'); }
/* * Read relation attribute names from the stream. */ static void logicalrep_read_attrs(StringInfo in, LogicalRepRelation *rel) { int i; int natts; char **attnames; Oid *atttyps; Bitmapset *attkeys = NULL; natts = pq_getmsgint(in, 2); attnames = palloc(natts * sizeof(char *)); atttyps = palloc(natts * sizeof(Oid)); /* read the attributes */ for (i = 0; i < natts; i++) { uint8 flags; /* Check for replica identity column */ flags = pq_getmsgbyte(in); if (flags & LOGICALREP_IS_REPLICA_IDENTITY) attkeys = bms_add_member(attkeys, i); /* attribute name */ attnames[i] = pstrdup(pq_getmsgstring(in)); /* attribute type id */ atttyps[i] = (Oid) pq_getmsgint(in, 4); /* we ignore attribute mode for now */ (void) pq_getmsgint(in, 4); } rel->attnames = attnames; rel->atttyps = atttyps; rel->attkeys = attkeys; rel->natts = natts; }
/* * _readBitmapset */ static Bitmapset * _readBitmapset(void) { Bitmapset *result = NULL; READ_TEMP_LOCALS(); token = pg_strtok(&length); if (token == NULL) elog(ERROR, "incomplete Bitmapset structure"); if (length != 1 || token[0] != '(') elog(ERROR, "unrecognized token: \"%.*s\"", length, token); token = pg_strtok(&length); if (token == NULL) elog(ERROR, "incomplete Bitmapset structure"); if (length != 1 || token[0] != 'b') elog(ERROR, "unrecognized token: \"%.*s\"", length, token); for (;;) { int val; char *endptr; token = pg_strtok(&length); if (token == NULL) elog(ERROR, "unterminated Bitmapset structure"); if (length == 1 && token[0] == ')') break; val = (int) strtol(token, &endptr, 10); if (endptr != token + length) elog(ERROR, "unrecognized integer: \"%.*s\"", length, token); result = bms_add_member(result, val); } return result; }
/* * statext_ndistinct_deserialize * Read an on-disk bytea format MVNDistinct to in-memory format */ MVNDistinct * statext_ndistinct_deserialize(bytea *data) { int i; Size minimum_size; MVNDistinct ndist; MVNDistinct *ndistinct; char *tmp; if (data == NULL) return NULL; /* we expect at least the basic fields of MVNDistinct struct */ if (VARSIZE_ANY_EXHDR(data) < SizeOfMVNDistinct) elog(ERROR, "invalid MVNDistinct size %zd (expected at least %zd)", VARSIZE_ANY_EXHDR(data), SizeOfMVNDistinct); /* initialize pointer to the data part (skip the varlena header) */ tmp = VARDATA_ANY(data); /* read the header fields and perform basic sanity checks */ memcpy(&ndist.magic, tmp, sizeof(uint32)); tmp += sizeof(uint32); memcpy(&ndist.type, tmp, sizeof(uint32)); tmp += sizeof(uint32); memcpy(&ndist.nitems, tmp, sizeof(uint32)); tmp += sizeof(uint32); if (ndist.magic != STATS_NDISTINCT_MAGIC) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("invalid ndistinct magic %08x (expected %08x)", ndist.magic, STATS_NDISTINCT_MAGIC))); if (ndist.type != STATS_NDISTINCT_TYPE_BASIC) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("invalid ndistinct type %d (expected %d)", ndist.type, STATS_NDISTINCT_TYPE_BASIC))); if (ndist.nitems == 0) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("invalid zero-length item array in MVNDistinct"))); /* what minimum bytea size do we expect for those parameters */ minimum_size = (SizeOfMVNDistinct + ndist.nitems * (SizeOfMVNDistinctItem + sizeof(AttrNumber) * 2)); if (VARSIZE_ANY_EXHDR(data) < minimum_size) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("invalid MVNDistinct size %zd (expected at least %zd)", VARSIZE_ANY_EXHDR(data), minimum_size))); /* * Allocate space for the ndistinct items (no space for each item's * attnos: those live in bitmapsets allocated separately) */ ndistinct = palloc0(MAXALIGN(SizeOfMVNDistinct) + (ndist.nitems * sizeof(MVNDistinctItem))); ndistinct->magic = ndist.magic; ndistinct->type = ndist.type; ndistinct->nitems = ndist.nitems; for (i = 0; i < ndistinct->nitems; i++) { MVNDistinctItem *item = &ndistinct->items[i]; int nelems; item->attrs = NULL; /* ndistinct value */ memcpy(&item->ndistinct, tmp, sizeof(double)); tmp += sizeof(double); /* number of attributes */ memcpy(&nelems, tmp, sizeof(int)); tmp += sizeof(int); Assert((nelems >= 2) && (nelems <= STATS_MAX_DIMENSIONS)); while (nelems-- > 0) { AttrNumber attno; memcpy(&attno, tmp, sizeof(AttrNumber)); tmp += sizeof(AttrNumber); item->attrs = bms_add_member(item->attrs, attno); } /* still within the bytea */ Assert(tmp <= ((char *) data + VARSIZE_ANY(data))); } /* we should have consumed the whole bytea exactly */ Assert(tmp == ((char *) data + VARSIZE_ANY(data))); return ndistinct; }
/* * intorel_startup --- executor startup */ static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo) { DR_intorel *myState = (DR_intorel *) self; IntoClause *into = myState->into; bool is_matview; char relkind; CreateStmt *create; Oid intoRelationId; Relation intoRelationDesc; RangeTblEntry *rte; Datum toast_options; ListCell *lc; int attnum; static char *validnsps[] = HEAP_RELOPT_NAMESPACES; Assert(into != NULL); /* else somebody forgot to set it */ /* This code supports both CREATE TABLE AS and CREATE MATERIALIZED VIEW */ is_matview = (into->viewQuery != NULL); relkind = is_matview ? RELKIND_MATVIEW : RELKIND_RELATION; /* * Create the target relation by faking up a CREATE TABLE parsetree and * passing it to DefineRelation. */ create = makeNode(CreateStmt); create->relation = into->rel; create->tableElts = NIL; /* will fill below */ create->inhRelations = NIL; create->ofTypename = NULL; create->constraints = NIL; create->options = into->options; create->oncommit = into->onCommit; create->tablespacename = into->tableSpaceName; create->if_not_exists = false; /* * Build column definitions using "pre-cooked" type and collation info. If * a column name list was specified in CREATE TABLE AS, override the * column names derived from the query. (Too few column names are OK, too * many are not.) */ lc = list_head(into->colNames); for (attnum = 0; attnum < typeinfo->natts; attnum++) { Form_pg_attribute attribute = typeinfo->attrs[attnum]; ColumnDef *col = makeNode(ColumnDef); TypeName *coltype = makeNode(TypeName); if (lc) { col->colname = strVal(lfirst(lc)); lc = lnext(lc); } else col->colname = NameStr(attribute->attname); col->typeName = coltype; col->inhcount = 0; col->is_local = true; col->is_not_null = false; col->is_from_type = false; col->storage = 0; col->raw_default = NULL; col->cooked_default = NULL; col->collClause = NULL; col->collOid = attribute->attcollation; col->constraints = NIL; col->fdwoptions = NIL; coltype->names = NIL; coltype->typeOid = attribute->atttypid; coltype->setof = false; coltype->pct_type = false; coltype->typmods = NIL; coltype->typemod = attribute->atttypmod; coltype->arrayBounds = NIL; coltype->location = -1; /* * It's possible that the column is of a collatable type but the * collation could not be resolved, so double-check. (We must check * this here because DefineRelation would adopt the type's default * collation rather than complaining.) */ if (!OidIsValid(col->collOid) && type_is_collatable(coltype->typeOid)) ereport(ERROR, (errcode(ERRCODE_INDETERMINATE_COLLATION), errmsg("no collation was derived for column \"%s\" with collatable type %s", col->colname, format_type_be(coltype->typeOid)), errhint("Use the COLLATE clause to set the collation explicitly."))); create->tableElts = lappend(create->tableElts, col); } if (lc != NULL) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("too many column names were specified"))); /* * Actually create the target table */ intoRelationId = DefineRelation(create, relkind, InvalidOid); /* * If necessary, create a TOAST table for the target table. Note that * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that * the TOAST table will be visible for insertion. */ CommandCounterIncrement(); /* parse and validate reloptions for the toast table */ toast_options = transformRelOptions((Datum) 0, create->options, "toast", validnsps, true, false); (void) heap_reloptions(RELKIND_TOASTVALUE, toast_options, true); AlterTableCreateToastTable(intoRelationId, toast_options); /* Create the "view" part of a materialized view. */ if (is_matview) { /* StoreViewQuery scribbles on tree, so make a copy */ Query *query = (Query *) copyObject(into->viewQuery); StoreViewQuery(intoRelationId, query, false); CommandCounterIncrement(); } /* * Finally we can open the target table */ intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock); /* * Check INSERT permission on the constructed table. * * XXX: It would arguably make sense to skip this check if into->skipData * is true. */ rte = makeNode(RangeTblEntry); rte->rtekind = RTE_RELATION; rte->relid = intoRelationId; rte->relkind = relkind; rte->requiredPerms = ACL_INSERT; for (attnum = 1; attnum <= intoRelationDesc->rd_att->natts; attnum++) rte->modifiedCols = bms_add_member(rte->modifiedCols, attnum - FirstLowInvalidHeapAttributeNumber); ExecCheckRTPerms(list_make1(rte), true); /* * Tentatively mark the target as populated, if it's a matview and we're * going to fill it; otherwise, no change needed. */ if (is_matview && !into->skipData) SetMatViewPopulatedState(intoRelationDesc, true); /* * Fill private fields of myState for use by later routines */ myState->rel = intoRelationDesc; myState->output_cid = GetCurrentCommandId(true); /* * We can skip WAL-logging the insertions, unless PITR or streaming * replication is in use. We can skip the FSM in any case. */ myState->hi_options = HEAP_INSERT_SKIP_FSM | (XLogIsNeeded() ? 0 : HEAP_INSERT_SKIP_WAL); myState->bistate = GetBulkInsertState(); /* Not using WAL requires smgr_targblock be initially invalid */ Assert(RelationGetTargetBlock(intoRelationDesc) == InvalidBlockNumber); }
/* ---------------------------------------------------------------- * ExecRecursiveUnion(node) * * Scans the recursive query sequentially and returns the next * qualifying tuple. * * 1. evaluate non recursive term and assign the result to RT * * 2. execute recursive terms * * 2.1 WT := RT * 2.2 while WT is not empty repeat 2.3 to 2.6. if WT is empty returns RT * 2.3 replace the name of recursive term with WT * 2.4 evaluate the recursive term and store into WT * 2.5 append WT to RT * 2.6 go back to 2.2 * ---------------------------------------------------------------- */ TupleTableSlot * ExecRecursiveUnion(RecursiveUnionState *node) { PlanState *outerPlan = outerPlanState(node); PlanState *innerPlan = innerPlanState(node); RecursiveUnion *plan = (RecursiveUnion *) node->ps.plan; TupleTableSlot *slot; RUHashEntry entry; bool isnew; /* 1. Evaluate non-recursive term */ if (!node->recursing) { for (;;) { slot = ExecProcNode(outerPlan); if (TupIsNull(slot)) break; if (plan->numCols > 0) { /* Find or build hashtable entry for this tuple's group */ entry = (RUHashEntry) LookupTupleHashEntry(node->hashtable, slot, &isnew); /* Must reset temp context after each hashtable lookup */ MemoryContextReset(node->tempContext); /* Ignore tuple if already seen */ if (!isnew) continue; } /* Each non-duplicate tuple goes to the working table ... */ tuplestore_puttupleslot(node->working_table, slot); /* ... and to the caller */ return slot; } node->recursing = true; } /* 2. Execute recursive term */ for (;;) { slot = ExecProcNode(innerPlan); if (TupIsNull(slot)) { /* Done if there's nothing in the intermediate table */ if (node->intermediate_empty) break; /* done with old working table ... */ tuplestore_end(node->working_table); /* intermediate table becomes working table */ node->working_table = node->intermediate_table; /* create new empty intermediate table */ node->intermediate_table = tuplestore_begin_heap(false, false, work_mem); node->intermediate_empty = true; /* reset the recursive term */ innerPlan->chgParam = bms_add_member(innerPlan->chgParam, plan->wtParam); /* and continue fetching from recursive term */ continue; } if (plan->numCols > 0) { /* Find or build hashtable entry for this tuple's group */ entry = (RUHashEntry) LookupTupleHashEntry(node->hashtable, slot, &isnew); /* Must reset temp context after each hashtable lookup */ MemoryContextReset(node->tempContext); /* Ignore tuple if already seen */ if (!isnew) continue; } /* Else, tuple is good; stash it in intermediate table ... */ node->intermediate_empty = false; tuplestore_puttupleslot(node->intermediate_table, slot); /* ... and return it */ return slot; } return NULL; }
/* * get_primary_key_attnos * Identify the columns in a relation's primary key, if any. * * Returns a Bitmapset of the column attnos of the primary key's columns, * with attnos being offset by FirstLowInvalidHeapAttributeNumber so that * system columns can be represented. * * If there is no primary key, return NULL. We also return NULL if the pkey * constraint is deferrable and deferrableOk is false. * * *constraintOid is set to the OID of the pkey constraint, or InvalidOid * on failure. */ Bitmapset * get_primary_key_attnos(Oid relid, bool deferrableOk, Oid *constraintOid) { Bitmapset *pkattnos = NULL; Relation pg_constraint; HeapTuple tuple; SysScanDesc scan; ScanKeyData skey[1]; /* Set *constraintOid, to avoid complaints about uninitialized vars */ *constraintOid = InvalidOid; /* Scan pg_constraint for constraints of the target rel */ pg_constraint = heap_open(ConstraintRelationId, AccessShareLock); ScanKeyInit(&skey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relid)); scan = systable_beginscan(pg_constraint, ConstraintRelidIndexId, true, NULL, 1, skey); while (HeapTupleIsValid(tuple = systable_getnext(scan))) { Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tuple); Datum adatum; bool isNull; ArrayType *arr; int16 *attnums; int numkeys; int i; /* Skip constraints that are not PRIMARY KEYs */ if (con->contype != CONSTRAINT_PRIMARY) continue; /* * If the primary key is deferrable, but we've been instructed to * ignore deferrable constraints, then we might as well give up * searching, since there can only be a single primary key on a table. */ if (con->condeferrable && !deferrableOk) break; /* Extract the conkey array, ie, attnums of PK's columns */ adatum = heap_getattr(tuple, Anum_pg_constraint_conkey, RelationGetDescr(pg_constraint), &isNull); if (isNull) elog(ERROR, "null conkey for constraint %u", HeapTupleGetOid(tuple)); arr = DatumGetArrayTypeP(adatum); /* ensure not toasted */ numkeys = ARR_DIMS(arr)[0]; if (ARR_NDIM(arr) != 1 || numkeys < 0 || ARR_HASNULL(arr) || ARR_ELEMTYPE(arr) != INT2OID) elog(ERROR, "conkey is not a 1-D smallint array"); attnums = (int16 *) ARR_DATA_PTR(arr); /* Construct the result value */ for (i = 0; i < numkeys; i++) { pkattnos = bms_add_member(pkattnos, attnums[i] - FirstLowInvalidHeapAttributeNumber); } *constraintOid = HeapTupleGetOid(tuple); /* No need to search further */ break; } systable_endscan(scan); heap_close(pg_constraint, AccessShareLock); return pkattnos; }
/* * get_relation_constraint_attnos * Find a constraint on the specified relation with the specified name * and return the constrained columns. * * Returns a Bitmapset of the column attnos of the constrained columns, with * attnos being offset by FirstLowInvalidHeapAttributeNumber so that system * columns can be represented. * * *constraintOid is set to the OID of the constraint, or InvalidOid on * failure. */ Bitmapset * get_relation_constraint_attnos(Oid relid, const char *conname, bool missing_ok, Oid *constraintOid) { Bitmapset *conattnos = NULL; Relation pg_constraint; HeapTuple tuple; SysScanDesc scan; ScanKeyData skey[1]; /* Set *constraintOid, to avoid complaints about uninitialized vars */ *constraintOid = InvalidOid; /* * Fetch the constraint tuple from pg_constraint. There may be more than * one match, because constraints are not required to have unique names; * if so, error out. */ pg_constraint = heap_open(ConstraintRelationId, AccessShareLock); ScanKeyInit(&skey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relid)); scan = systable_beginscan(pg_constraint, ConstraintRelidIndexId, true, NULL, 1, skey); while (HeapTupleIsValid(tuple = systable_getnext(scan))) { Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tuple); Datum adatum; bool isNull; ArrayType *arr; int16 *attnums; int numcols; int i; /* Check the constraint name */ if (strcmp(NameStr(con->conname), conname) != 0) continue; if (OidIsValid(*constraintOid)) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("table \"%s\" has multiple constraints named \"%s\"", get_rel_name(relid), conname))); *constraintOid = HeapTupleGetOid(tuple); /* Extract the conkey array, ie, attnums of constrained columns */ adatum = heap_getattr(tuple, Anum_pg_constraint_conkey, RelationGetDescr(pg_constraint), &isNull); if (isNull) continue; /* no constrained columns */ arr = DatumGetArrayTypeP(adatum); /* ensure not toasted */ numcols = ARR_DIMS(arr)[0]; if (ARR_NDIM(arr) != 1 || numcols < 0 || ARR_HASNULL(arr) || ARR_ELEMTYPE(arr) != INT2OID) elog(ERROR, "conkey is not a 1-D smallint array"); attnums = (int16 *) ARR_DATA_PTR(arr); /* Construct the result value */ for (i = 0; i < numcols; i++) { conattnos = bms_add_member(conattnos, attnums[i] - FirstLowInvalidHeapAttributeNumber); } } systable_endscan(scan); /* If no such constraint exists, complain */ if (!OidIsValid(*constraintOid) && !missing_ok) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("constraint \"%s\" for table \"%s\" does not exist", conname, get_rel_name(relid)))); heap_close(pg_constraint, AccessShareLock); return conattnos; }
/* * Does the supplied GpPolicy support unique indexing on the specified * attributes? * * If the table is distributed randomly, no unique indexing is supported. * Otherwise, the set of columns being indexed should be a superset of the * policy. * * If the proposed index does not match the distribution policy but the relation * is empty and does not have a primary key or unique index, update the * distribution policy to match the index definition (MPP-101), as long as it * doesn't contain expressions. */ void checkPolicyForUniqueIndex(Relation rel, AttrNumber *indattr, int nidxatts, bool isprimary, bool has_exprs, bool has_pkey, bool has_ukey) { Bitmapset *polbm = NULL; Bitmapset *indbm = NULL; int i; GpPolicy *pol = rel->rd_cdbpolicy; /* * Firstly, unique/primary key indexes aren't supported if we're * distributing randomly. */ if (GpPolicyIsRandomly(pol)) { ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("%s and DISTRIBUTED RANDOMLY are incompatible", isprimary ? "PRIMARY KEY" : "UNIQUE"))); } /* * We use bitmaps to make intersection tests easier. As noted, order is * not relevant so looping is just painful. */ for (i = 0; i < pol->nattrs; i++) polbm = bms_add_member(polbm, pol->attrs[i]); for (i = 0; i < nidxatts; i++) { if (indattr[i] < 0) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("cannot create %s on system column", isprimary ? "primary key" : "unique index"))); indbm = bms_add_member(indbm, indattr[i]); } Assert(bms_membership(polbm) != BMS_EMPTY_SET); Assert(bms_membership(indbm) != BMS_EMPTY_SET); /* * If the existing policy is not a subset, we must either error out or * update the distribution policy. It might be tempting to say that even * when the policy is a subset, we should update it to match the index * definition. The problem then is that if the user actually wants to * distribution on (a, b) but then creates an index on (a, b, c) we'll * change the policy underneath them. * * What is really needed is a new field in gp_distribution_policy telling us * if the policy has been explicitly set. */ if (!bms_is_subset(polbm, indbm)) { if (cdbRelSize(rel) != 0 || has_pkey || has_ukey || has_exprs) { ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("%s must contain all columns in the " "distribution key of relation \"%s\"", isprimary ? "PRIMARY KEY" : "UNIQUE index", RelationGetRelationName(rel)))); } else { /* update policy since table is not populated yet. See MPP-101 */ GpPolicy *policy = palloc(sizeof(GpPolicy) + (sizeof(AttrNumber) * nidxatts)); policy->ptype = POLICYTYPE_PARTITIONED; policy->nattrs = 0; for (i = 0; i < nidxatts; i++) policy->attrs[policy->nattrs++] = indattr[i]; GpPolicyReplace(rel->rd_id, policy); if (isprimary) elog(NOTICE, "updating distribution policy to match new primary key"); else elog(NOTICE, "updating distribution policy to match new unique index"); } } }