/* * get_proposed_default_constraint * * This function returns the negation of new_part_constraints, which * would be an integral part of the default partition constraints after * addition of the partition to which the new_part_constraints belongs. */ List * get_proposed_default_constraint(List *new_part_constraints) { Expr *defPartConstraint; defPartConstraint = make_ands_explicit(new_part_constraints); /* * Derive the partition constraints of default partition by negating the * given partition constraints. The partition constraint never evaluates * to NULL, so negating it like this is safe. */ defPartConstraint = makeBoolExpr(NOT_EXPR, list_make1(defPartConstraint), -1); /* Simplify, to put the negated expression into canonical form */ defPartConstraint = (Expr *) eval_const_expressions(NULL, (Node *) defPartConstraint); defPartConstraint = canonicalize_qual(defPartConstraint, true); return make_ands_implicit(defPartConstraint); }
Datum gp_get_physical_index_relid(PG_FUNCTION_ARGS) { Oid rootOid = PG_GETARG_OID(0); Oid partOid = PG_GETARG_OID(1); LogicalIndexInfo logicalIndexInfo; Oid resultOid; int2vector *indexKeys; text *inText; Relation rel; logicalIndexInfo.nColumns = 0; logicalIndexInfo.indexKeys = NULL; logicalIndexInfo.indPred = NIL; logicalIndexInfo.indExprs = NIL; if (!PG_ARGISNULL(2)) { indexKeys = (int2vector *)PG_GETARG_POINTER(2); logicalIndexInfo.nColumns = indexKeys->dim1; logicalIndexInfo.indexKeys = (AttrNumber *)palloc0(indexKeys->dim1 * sizeof(AttrNumber)); for (int i = 0; i < indexKeys->dim1; i++) logicalIndexInfo.indexKeys[i] = indexKeys->values[i]; } if (!PG_ARGISNULL(3)) { Node *indPred; inText = PG_GETARG_TEXT_P(3); indPred = stringToNode(text_to_cstring(inText)); /* Perform the same normalization as relcache.c does. */ indPred = eval_const_expressions(NULL, indPred); indPred = (Node *) canonicalize_qual((Expr *) indPred); set_coercionform_dontcare(indPred); indPred = (Node *) make_ands_implicit((Expr *) indPred); fix_opfuncids(indPred); logicalIndexInfo.indPred = (List *) indPred; } if (!PG_ARGISNULL(4)) { Node *indExprs; inText = PG_GETARG_TEXT_P(4); indExprs = stringToNode(text_to_cstring(inText)); /* Perform the same normalization as relcache.c does. */ indExprs = eval_const_expressions(NULL, indExprs); set_coercionform_dontcare(indExprs); fix_opfuncids(indExprs); logicalIndexInfo.indExprs = (List *) indExprs; } logicalIndexInfo.indIsUnique = PG_GETARG_BOOL(5); AttrNumber *attMap = IndexScan_GetColumnMapping(rootOid, partOid); rel = heap_open(partOid, AccessShareLock); /* * The varno is hard-coded to 1 as the original getPhysicalIndexRelid was * using a hard-coded 1 for varattno mapping of logicalIndexInfo. */ IndexScan_MapLogicalIndexInfo(&logicalIndexInfo, attMap, 1); /* do the actual work */ resultOid = getPhysicalIndexRelid(rel, &logicalIndexInfo); if (attMap) { pfree(attMap); } heap_close(rel, AccessShareLock); return ObjectIdGetDatum(resultOid); }
/* * master_apply_delete_command takes in a delete command, finds shards that * match the criteria defined in the delete command, drops the found shards from * the worker nodes, and updates the corresponding metadata on the master node. * This function drops a shard if and only if all rows in the shard satisfy * the conditions in the delete command. Note that this function only accepts * conditions on the partition key and if no condition is provided then all * shards are deleted. * * We mark shard placements that we couldn't drop as to be deleted later. If a * shard satisfies the given conditions, we delete it from shard metadata table * even though related shard placements are not deleted. */ Datum master_apply_delete_command(PG_FUNCTION_ARGS) { text *queryText = PG_GETARG_TEXT_P(0); char *queryString = text_to_cstring(queryText); char *relationName = NULL; char *schemaName = NULL; Oid relationId = InvalidOid; List *shardIntervalList = NIL; List *deletableShardIntervalList = NIL; List *queryTreeList = NIL; Query *deleteQuery = NULL; Node *whereClause = NULL; Node *deleteCriteria = NULL; Node *queryTreeNode = NULL; DeleteStmt *deleteStatement = NULL; int droppedShardCount = 0; LOCKMODE lockMode = 0; char partitionMethod = 0; bool failOK = false; #if (PG_VERSION_NUM >= 100000) RawStmt *rawStmt = (RawStmt *) ParseTreeRawStmt(queryString); queryTreeNode = rawStmt->stmt; #else queryTreeNode = ParseTreeNode(queryString); #endif EnsureCoordinator(); CheckCitusVersion(ERROR); if (!IsA(queryTreeNode, DeleteStmt)) { ereport(ERROR, (errmsg("query \"%s\" is not a delete statement", queryString))); } deleteStatement = (DeleteStmt *) queryTreeNode; schemaName = deleteStatement->relation->schemaname; relationName = deleteStatement->relation->relname; /* * We take an exclusive lock while dropping shards to prevent concurrent * writes. We don't want to block SELECTs, which means queries might fail * if they access a shard that has just been dropped. */ lockMode = ExclusiveLock; relationId = RangeVarGetRelid(deleteStatement->relation, lockMode, failOK); /* schema-prefix if it is not specified already */ if (schemaName == NULL) { Oid schemaId = get_rel_namespace(relationId); schemaName = get_namespace_name(schemaId); } CheckDistributedTable(relationId); EnsureTablePermissions(relationId, ACL_DELETE); #if (PG_VERSION_NUM >= 100000) queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL); #else queryTreeList = pg_analyze_and_rewrite(queryTreeNode, queryString, NULL, 0); #endif deleteQuery = (Query *) linitial(queryTreeList); CheckTableCount(deleteQuery); /* get where clause and flatten it */ whereClause = (Node *) deleteQuery->jointree->quals; deleteCriteria = eval_const_expressions(NULL, whereClause); partitionMethod = PartitionMethod(relationId); if (partitionMethod == DISTRIBUTE_BY_HASH) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot delete from hash distributed table with this " "command"), errdetail("Delete statements on hash-partitioned tables " "are not supported with master_apply_delete_command."), errhint("Use master_modify_multiple_shards command instead."))); } else if (partitionMethod == DISTRIBUTE_BY_NONE) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot delete from distributed table"), errdetail("Delete statements on reference tables " "are not supported."))); } CheckDeleteCriteria(deleteCriteria); CheckPartitionColumn(relationId, deleteCriteria); shardIntervalList = LoadShardIntervalList(relationId); /* drop all shards if where clause is not present */ if (deleteCriteria == NULL) { deletableShardIntervalList = shardIntervalList; ereport(DEBUG2, (errmsg("dropping all shards for \"%s\"", relationName))); } else { deletableShardIntervalList = ShardsMatchingDeleteCriteria(relationId, shardIntervalList, deleteCriteria); } droppedShardCount = DropShards(relationId, schemaName, relationName, deletableShardIntervalList); PG_RETURN_INT32(droppedShardCount); }