/* * Function dumping dependent relation & function oids for a given SQL text */ Datum gp_dump_query_oids(PG_FUNCTION_ARGS) { char *sqlText = text_to_cstring(PG_GETARG_TEXT_P(0)); List *queryList = pg_parse_and_rewrite(sqlText, NULL, 0); ListCell *plc; StringInfoData relbuf, funcbuf; initStringInfo(&relbuf); initStringInfo(&funcbuf); typedef struct OidHashEntry { Oid key; bool value; } OidHashEntry; HASHCTL ctl; ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(OidHashEntry); ctl.hash = oid_hash; HTAB *relhtab = hash_create("relid hash table", 100, &ctl, HASH_ELEM | HASH_FUNCTION); HTAB *funchtab = hash_create("funcid hash table", 100, &ctl, HASH_ELEM | HASH_FUNCTION); foreach(plc, queryList) { Query *query = (Query *) lfirst(plc); if (CMD_UTILITY == query->commandType && T_ExplainStmt == query->utilityStmt->type) { Node *queryExplain = ((ExplainStmt *)query->utilityStmt)->query; List *queryTree = pg_analyze_and_rewrite(queryExplain, sqlText, NULL, 0); Assert(1 == list_length(queryTree)); query = (Query *) lfirst(list_head(queryTree)); } traverseQueryOids(query, relhtab, &relbuf, funchtab, &funcbuf); }
/* * Parse and plan a querystring. * * At entry, plan->argtypes and plan->nargs must be valid. * * Query and plan lists are stored into *plan. */ static void _SPI_prepare_plan(const char *src, _SPI_plan *plan) { List *raw_parsetree_list; List *query_list_list; List *plan_list; ListCell *list_item; ErrorContextCallback spierrcontext; Oid *argtypes = plan->argtypes; int nargs = plan->nargs; /* * Increment CommandCounter to see changes made by now. We must do * this to be sure of seeing any schema changes made by a just-preceding * SPI command. (But we don't bother advancing the snapshot, since the * planner generally operates under SnapshotNow rules anyway.) */ CommandCounterIncrement(); /* * Setup error traceback support for ereport() */ spierrcontext.callback = _SPI_error_callback; spierrcontext.arg = (void *) src; spierrcontext.previous = error_context_stack; error_context_stack = &spierrcontext; /* * Parse the request string into a list of raw parse trees. */ raw_parsetree_list = pg_parse_query(src); /* * Do parse analysis and rule rewrite for each raw parsetree. * * We save the querytrees from each raw parsetree as a separate * sublist. This allows _SPI_execute_plan() to know where the * boundaries between original queries fall. */ query_list_list = NIL; plan_list = NIL; foreach(list_item, raw_parsetree_list) { Node *parsetree = (Node *) lfirst(list_item); List *query_list; query_list = pg_analyze_and_rewrite(parsetree, argtypes, nargs); query_list_list = lappend(query_list_list, query_list); plan_list = list_concat(plan_list, pg_plan_queries(query_list, NULL, false)); }
static PlannedStmt * get_plan_from_stmt(Oid id, Node *node, const char *sql, bool is_combine) { Query *query; PlannedStmt *plan; query = linitial(pg_analyze_and_rewrite(node, sql, NULL, 0)); query->isContinuous = true; query->isCombine = is_combine; query->cq_id = id; plan = pg_plan_query(query, 0, NULL); plan->is_continuous = true; plan->is_combine = is_combine; plan->cq_id = id; /* * Unique plans get transformed into ContinuousUnique plans for * continuous query processes. */ if (IsA(plan->planTree, Unique)) { ContinuousUnique *cunique = makeNode(ContinuousUnique); Unique *unique = (Unique *) plan->planTree; memcpy((char *) &cunique->unique, (char *) unique, sizeof(Unique)); cunique->cq_id = id; cunique->unique.plan.type = T_ContinuousUnique; plan->planTree = (Plan *) cunique; Assert(IsA(plan->planTree->lefttree, Sort)); /* Strip out the sort since its not needed */ plan->planTree->lefttree = plan->planTree->lefttree->lefttree; } return plan; }
/* * master_apply_delete_command takes in a delete command, finds shards that * match the criteria defined in the delete command, drops the found shards from * the worker nodes, and updates the corresponding metadata on the master node. * This function drops a shard if and only if all rows in the shard satisfy * the conditions in the delete command. Note that this function only accepts * conditions on the partition key and if no condition is provided then all * shards are deleted. * * We mark shard placements that we couldn't drop as to be deleted later. If a * shard satisfies the given conditions, we delete it from shard metadata table * even though related shard placements are not deleted. */ Datum master_apply_delete_command(PG_FUNCTION_ARGS) { text *queryText = PG_GETARG_TEXT_P(0); char *queryString = text_to_cstring(queryText); char *relationName = NULL; char *schemaName = NULL; Oid relationId = InvalidOid; List *shardIntervalList = NIL; List *deletableShardIntervalList = NIL; List *queryTreeList = NIL; Query *deleteQuery = NULL; Node *whereClause = NULL; Node *deleteCriteria = NULL; Node *queryTreeNode = NULL; DeleteStmt *deleteStatement = NULL; int droppedShardCount = 0; LOCKMODE lockMode = 0; char partitionMethod = 0; bool failOK = false; #if (PG_VERSION_NUM >= 100000) RawStmt *rawStmt = (RawStmt *) ParseTreeRawStmt(queryString); queryTreeNode = rawStmt->stmt; #else queryTreeNode = ParseTreeNode(queryString); #endif EnsureCoordinator(); CheckCitusVersion(ERROR); if (!IsA(queryTreeNode, DeleteStmt)) { ereport(ERROR, (errmsg("query \"%s\" is not a delete statement", queryString))); } deleteStatement = (DeleteStmt *) queryTreeNode; schemaName = deleteStatement->relation->schemaname; relationName = deleteStatement->relation->relname; /* * We take an exclusive lock while dropping shards to prevent concurrent * writes. We don't want to block SELECTs, which means queries might fail * if they access a shard that has just been dropped. */ lockMode = ExclusiveLock; relationId = RangeVarGetRelid(deleteStatement->relation, lockMode, failOK); /* schema-prefix if it is not specified already */ if (schemaName == NULL) { Oid schemaId = get_rel_namespace(relationId); schemaName = get_namespace_name(schemaId); } CheckDistributedTable(relationId); EnsureTablePermissions(relationId, ACL_DELETE); #if (PG_VERSION_NUM >= 100000) queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL); #else queryTreeList = pg_analyze_and_rewrite(queryTreeNode, queryString, NULL, 0); #endif deleteQuery = (Query *) linitial(queryTreeList); CheckTableCount(deleteQuery); /* get where clause and flatten it */ whereClause = (Node *) deleteQuery->jointree->quals; deleteCriteria = eval_const_expressions(NULL, whereClause); partitionMethod = PartitionMethod(relationId); if (partitionMethod == DISTRIBUTE_BY_HASH) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot delete from hash distributed table with this " "command"), errdetail("Delete statements on hash-partitioned tables " "are not supported with master_apply_delete_command."), errhint("Use master_modify_multiple_shards command instead."))); } else if (partitionMethod == DISTRIBUTE_BY_NONE) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot delete from distributed table"), errdetail("Delete statements on reference tables " "are not supported."))); } CheckDeleteCriteria(deleteCriteria); CheckPartitionColumn(relationId, deleteCriteria); shardIntervalList = LoadShardIntervalList(relationId); /* drop all shards if where clause is not present */ if (deleteCriteria == NULL) { deletableShardIntervalList = shardIntervalList; ereport(DEBUG2, (errmsg("dropping all shards for \"%s\"", relationName))); } else { deletableShardIntervalList = ShardsMatchingDeleteCriteria(relationId, shardIntervalList, deleteCriteria); } droppedShardCount = DropShards(relationId, schemaName, relationName, deletableShardIntervalList); PG_RETURN_INT32(droppedShardCount); }