示例#1
0
/*
 * CreateJobSchema creates a job schema with the given schema name. Note that
 * this function ensures that our pg_ prefixed schema names can be created.
 * Further note that the created schema does not become visible to other
 * processes until the transaction commits.
 */
static void
CreateJobSchema(StringInfo schemaName)
{
	const char *queryString = NULL;
	bool oldAllowSystemTableMods = false;

	Oid savedUserId = InvalidOid;
	int savedSecurityContext = 0;

	/* build a CREATE SCHEMA statement */
	CreateSchemaStmt *createSchemaStmt = makeNode(CreateSchemaStmt);
	createSchemaStmt->schemaname = schemaName->data;
	createSchemaStmt->authrole = NULL;
	createSchemaStmt->schemaElts = NIL;

	/* allow schema names that start with pg_ */
	oldAllowSystemTableMods = allowSystemTableMods;
	allowSystemTableMods = true;

	/* ensure we're allowed to create this schema */
	GetUserIdAndSecContext(&savedUserId, &savedSecurityContext);
	SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE);

	/* actually create schema, and make it visible */
	CreateSchemaCommand(createSchemaStmt, queryString);
	CommandCounterIncrement();

	/* and reset environment */
	SetUserIdAndSecContext(savedUserId, savedSecurityContext);
	allowSystemTableMods = oldAllowSystemTableMods;
}
示例#2
0
/*
 * GetNextPlacementId allocates and returns a unique placementId for
 * the placement to be created. This allocation occurs both in shared memory
 * and in write ahead logs; writing to logs avoids the risk of having shardId
 * collisions.
 *
 * NB: This can be called by any user; for now we have decided that that's
 * ok. We might want to restrict this to users part of a specific role or such
 * at some later point.
 */
uint64
GetNextPlacementId(void)
{
	text *sequenceName = NULL;
	Oid sequenceId = InvalidOid;
	Datum sequenceIdDatum = 0;
	Oid savedUserId = InvalidOid;
	int savedSecurityContext = 0;
	Datum placementIdDatum = 0;
	uint64 placementId = 0;

	/*
	 * In regression tests, we would like to generate placement IDs consistently
	 * even if the tests run in parallel. Instead of the sequence, we can use
	 * the next_placement_id GUC to specify which shard ID the current session
	 * should generate next. The GUC is automatically increased by 1 every time
	 * a new placement ID is generated.
	 */
	if (NextPlacementId > 0)
	{
		placementId = NextPlacementId;
		NextPlacementId += 1;

		return placementId;
	}

	sequenceName = cstring_to_text(PLACEMENTID_SEQUENCE_NAME);
	sequenceId = ResolveRelationId(sequenceName);
	sequenceIdDatum = ObjectIdGetDatum(sequenceId);

	GetUserIdAndSecContext(&savedUserId, &savedSecurityContext);
	SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE);

	/* generate new and unique placement id from sequence */
	placementIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum);

	SetUserIdAndSecContext(savedUserId, savedSecurityContext);

	placementId = DatumGetInt64(placementIdDatum);

	return placementId;
}
示例#3
0
/*
 * master_get_new_shardid allocates and returns a unique shardId for the shard
 * to be created. This allocation occurs both in shared memory and in write
 * ahead logs; writing to logs avoids the risk of having shardId collisions.
 *
 * Please note that the caller is still responsible for finalizing shard data
 * and the shardId with the master node. Further note that this function relies
 * on an internal sequence created in initdb to generate unique identifiers.
 *
 * NB: This can be called by any user; for now we have decided that that's
 * ok. We might want to restrict this to users part of a specific role or such
 * at some later point.
 */
Datum
master_get_new_shardid(PG_FUNCTION_ARGS)
{
	text *sequenceName = cstring_to_text(SHARDID_SEQUENCE_NAME);
	Oid sequenceId = ResolveRelationId(sequenceName);
	Datum sequenceIdDatum = ObjectIdGetDatum(sequenceId);
	Oid savedUserId = InvalidOid;
	int savedSecurityContext = 0;
	Datum shardIdDatum = 0;

	GetUserIdAndSecContext(&savedUserId, &savedSecurityContext);
	SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE);

	/* generate new and unique shardId from sequence */
	shardIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum);

	SetUserIdAndSecContext(savedUserId, savedSecurityContext);

	PG_RETURN_DATUM(shardIdDatum);
}
示例#4
0
/*
 * CREATE SCHEMA
 */
Oid
CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
{
	const char *schemaName = stmt->schemaname;
	Oid			namespaceId;
	OverrideSearchPath *overridePath;
	List	   *parsetree_list;
	ListCell   *parsetree_item;
	Oid			owner_uid;
	Oid			saved_uid;
	int			save_sec_context;
	AclResult	aclresult;
	ObjectAddress address;

	GetUserIdAndSecContext(&saved_uid, &save_sec_context);

	/*
	 * Who is supposed to own the new schema?
	 */
	if (stmt->authrole)
		owner_uid = get_rolespec_oid(stmt->authrole, false);
	else
		owner_uid = saved_uid;

	/* fill schema name with the user name if not specified */
	if (!schemaName)
	{
		HeapTuple	tuple;

		tuple = SearchSysCache1(AUTHOID, ObjectIdGetDatum(owner_uid));
		if (!HeapTupleIsValid(tuple))
			elog(ERROR, "cache lookup failed for role %u", owner_uid);
		schemaName =
			pstrdup(NameStr(((Form_pg_authid) GETSTRUCT(tuple))->rolname));
		ReleaseSysCache(tuple);
	}

	/*
	 * To create a schema, must have schema-create privilege on the current
	 * database and must be able to become the target role (this does not
	 * imply that the target role itself must have create-schema privilege).
	 * The latter provision guards against "giveaway" attacks.  Note that a
	 * superuser will always have both of these privileges a fortiori.
	 */
	aclresult = pg_database_aclcheck(MyDatabaseId, saved_uid, ACL_CREATE);
	if (aclresult != ACLCHECK_OK)
		aclcheck_error(aclresult, ACL_KIND_DATABASE,
					   get_database_name(MyDatabaseId));

	check_is_member_of_role(saved_uid, owner_uid);

	/* Additional check to protect reserved schema names */
	if (!allowSystemTableMods && IsReservedName(schemaName))
		ereport(ERROR,
				(errcode(ERRCODE_RESERVED_NAME),
				 errmsg("unacceptable schema name \"%s\"", schemaName),
		   errdetail("The prefix \"pg_\" is reserved for system schemas.")));

	/*
	 * If if_not_exists was given and the schema already exists, bail out.
	 * (Note: we needn't check this when not if_not_exists, because
	 * NamespaceCreate will complain anyway.)  We could do this before making
	 * the permissions checks, but since CREATE TABLE IF NOT EXISTS makes its
	 * creation-permission check first, we do likewise.
	 */
	if (stmt->if_not_exists &&
		SearchSysCacheExists1(NAMESPACENAME, PointerGetDatum(schemaName)))
	{
		ereport(NOTICE,
				(errcode(ERRCODE_DUPLICATE_SCHEMA),
				 errmsg("schema \"%s\" already exists, skipping",
						schemaName)));
		return InvalidOid;
	}

	/*
	 * If the requested authorization is different from the current user,
	 * temporarily set the current user so that the object(s) will be created
	 * with the correct ownership.
	 *
	 * (The setting will be restored at the end of this routine, or in case of
	 * error, transaction abort will clean things up.)
	 */
	if (saved_uid != owner_uid)
		SetUserIdAndSecContext(owner_uid,
							save_sec_context | SECURITY_LOCAL_USERID_CHANGE);

	/* Create the schema's namespace */
	namespaceId = NamespaceCreate(schemaName, owner_uid, false);

	/* Advance cmd counter to make the namespace visible */
	CommandCounterIncrement();

	/*
	 * Temporarily make the new namespace be the front of the search path, as
	 * well as the default creation target namespace.  This will be undone at
	 * the end of this routine, or upon error.
	 */
	overridePath = GetOverrideSearchPath(CurrentMemoryContext);
	overridePath->schemas = lcons_oid(namespaceId, overridePath->schemas);
	/* XXX should we clear overridePath->useTemp? */
	PushOverrideSearchPath(overridePath);

	/*
	 * Report the new schema to possibly interested event triggers.  Note we
	 * must do this here and not in ProcessUtilitySlow because otherwise the
	 * objects created below are reported before the schema, which would be
	 * wrong.
	 */
	ObjectAddressSet(address, NamespaceRelationId, namespaceId);
	EventTriggerCollectSimpleCommand(address, InvalidObjectAddress,
									 (Node *) stmt);

	/*
	 * Examine the list of commands embedded in the CREATE SCHEMA command, and
	 * reorganize them into a sequentially executable order with no forward
	 * references.  Note that the result is still a list of raw parsetrees ---
	 * we cannot, in general, run parse analysis on one statement until we
	 * have actually executed the prior ones.
	 */
	parsetree_list = transformCreateSchemaStmt(stmt);

	/*
	 * Execute each command contained in the CREATE SCHEMA.  Since the grammar
	 * allows only utility commands in CREATE SCHEMA, there is no need to pass
	 * them through parse_analyze() or the rewriter; we can just hand them
	 * straight to ProcessUtility.
	 */
	foreach(parsetree_item, parsetree_list)
	{
		Node	   *stmt = (Node *) lfirst(parsetree_item);

		/* do this step */
		ProcessUtility(stmt,
					   queryString,
					   PROCESS_UTILITY_SUBCOMMAND,
					   NULL,
					   None_Receiver,
					   NULL);
		/* make sure later steps can see the object created here */
		CommandCounterIncrement();
	}
示例#5
0
CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
#endif
{
	const char *schemaName = stmt->schemaname;
	const char *authId = stmt->authid;
	Oid			namespaceId;
	OverrideSearchPath *overridePath;
	List	   *parsetree_list;
	ListCell   *parsetree_item;
	Oid			owner_uid;
	Oid			saved_uid;
	int			save_sec_context;
	AclResult	aclresult;

	GetUserIdAndSecContext(&saved_uid, &save_sec_context);

	/*
	 * Who is supposed to own the new schema?
	 */
	if (authId)
		owner_uid = get_role_oid(authId, false);
	else
		owner_uid = saved_uid;

	/*
	 * To create a schema, must have schema-create privilege on the current
	 * database and must be able to become the target role (this does not
	 * imply that the target role itself must have create-schema privilege).
	 * The latter provision guards against "giveaway" attacks.	Note that a
	 * superuser will always have both of these privileges a fortiori.
	 */
	aclresult = pg_database_aclcheck(MyDatabaseId, saved_uid, ACL_CREATE);
	if (aclresult != ACLCHECK_OK)
		aclcheck_error(aclresult, ACL_KIND_DATABASE,
					   get_database_name(MyDatabaseId));

	check_is_member_of_role(saved_uid, owner_uid);

	/* Additional check to protect reserved schema names */
	if (!allowSystemTableMods && IsReservedName(schemaName))
		ereport(ERROR,
				(errcode(ERRCODE_RESERVED_NAME),
				 errmsg("unacceptable schema name \"%s\"", schemaName),
		   errdetail("The prefix \"pg_\" is reserved for system schemas.")));

	/*
	 * If the requested authorization is different from the current user,
	 * temporarily set the current user so that the object(s) will be created
	 * with the correct ownership.
	 *
	 * (The setting will be restored at the end of this routine, or in case of
	 * error, transaction abort will clean things up.)
	 */
	if (saved_uid != owner_uid)
		SetUserIdAndSecContext(owner_uid,
							save_sec_context | SECURITY_LOCAL_USERID_CHANGE);

	/* Create the schema's namespace */
	namespaceId = NamespaceCreate(schemaName, owner_uid, false);

	/* Advance cmd counter to make the namespace visible */
	CommandCounterIncrement();

	/*
	 * Temporarily make the new namespace be the front of the search path, as
	 * well as the default creation target namespace.  This will be undone at
	 * the end of this routine, or upon error.
	 */
	overridePath = GetOverrideSearchPath(CurrentMemoryContext);
	overridePath->schemas = lcons_oid(namespaceId, overridePath->schemas);
	/* XXX should we clear overridePath->useTemp? */
	PushOverrideSearchPath(overridePath);

	/*
	 * Examine the list of commands embedded in the CREATE SCHEMA command, and
	 * reorganize them into a sequentially executable order with no forward
	 * references.	Note that the result is still a list of raw parsetrees ---
	 * we cannot, in general, run parse analysis on one statement until we
	 * have actually executed the prior ones.
	 */
	parsetree_list = transformCreateSchemaStmt(stmt);

#ifdef PGXC
	/*
	 * Add a RemoteQuery node for a query at top level on a remote Coordinator,
	 * if not done already.
	 */
	if (!sentToRemote)
		parsetree_list = AddRemoteQueryNode(parsetree_list, queryString,
											EXEC_ON_ALL_NODES, false);
#endif

	/*
	 * Execute each command contained in the CREATE SCHEMA.  Since the grammar
	 * allows only utility commands in CREATE SCHEMA, there is no need to pass
	 * them through parse_analyze() or the rewriter; we can just hand them
	 * straight to ProcessUtility.
	 */
	foreach(parsetree_item, parsetree_list)
	{
		Node	   *stmt = (Node *) lfirst(parsetree_item);

		/* do this step */
		ProcessUtility(stmt,
					   queryString,
					   NULL,
					   false,	/* not top level */
					   None_Receiver,
#ifdef PGXC
					   true,
#endif /* PGXC */
					   NULL);
		/* make sure later steps can see the object created here */
		CommandCounterIncrement();
	}
示例#6
0
文件: schemacmds.c 项目: royc1/gpdb
/*
 * CREATE SCHEMA
 */
void
CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
{
    const char *schemaName = stmt->schemaname;
    const char *authId = stmt->authid;
    Oid			namespaceId;
    OverrideSearchPath *overridePath;
    List	   *parsetree_list;
    ListCell   *parsetree_item;
    Oid			owner_uid;
    Oid			saved_uid;
    int			save_sec_context;
    AclResult	aclresult;
    bool		shouldDispatch = (Gp_role == GP_ROLE_DISPATCH &&
                                  !IsBootstrapProcessingMode());

    /*
     * GPDB: Creation of temporary namespaces is a special case. This statement
     * is dispatched by the dispatcher node the first time a temporary table is
     * created. It bypasses all the normal checks and logic of schema creation,
     * and is routed to the internal routine for creating temporary namespaces,
     * instead.
     */
    if (stmt->istemp)
    {
        Assert(Gp_role == GP_ROLE_EXECUTE);

        Assert(stmt->schemaname == InvalidOid);
        Assert(stmt->authid == NULL);
        Assert(stmt->schemaElts == NIL);
        Assert(stmt->schemaOid != InvalidOid);
        Assert(stmt->toastSchemaOid != InvalidOid);

        InitTempTableNamespaceWithOids(stmt->schemaOid,
                                       stmt->toastSchemaOid);
        return;
    }

    GetUserIdAndSecContext(&saved_uid, &save_sec_context);

    /*
     * Who is supposed to own the new schema?
     */
    if (authId)
        owner_uid = get_roleid_checked(authId);
    else
        owner_uid = saved_uid;

    /*
     * To create a schema, must have schema-create privilege on the current
     * database and must be able to become the target role (this does not
     * imply that the target role itself must have create-schema privilege).
     * The latter provision guards against "giveaway" attacks.	Note that a
     * superuser will always have both of these privileges a fortiori.
     */
    aclresult = pg_database_aclcheck(MyDatabaseId, saved_uid, ACL_CREATE);
    if (aclresult != ACLCHECK_OK)
        aclcheck_error(aclresult, ACL_KIND_DATABASE,
                       get_database_name(MyDatabaseId));

    check_is_member_of_role(saved_uid, owner_uid);

    /* Additional check to protect reserved schema names */
    if (!allowSystemTableModsDDL && IsReservedName(schemaName))
    {
        ereport(ERROR,
                (errcode(ERRCODE_RESERVED_NAME),
                 errmsg("unacceptable schema name \"%s\"", schemaName),
                 errdetail("The prefix \"%s\" is reserved for system schemas.",
                           GetReservedPrefix(schemaName))));
    }

    /*
     * If the requested authorization is different from the current user,
     * temporarily set the current user so that the object(s) will be created
     * with the correct ownership.
     *
     * (The setting will be restored at the end of this routine, or in case
     * of error, transaction abort will clean things up.)
     */
    if (saved_uid != owner_uid)
        SetUserIdAndSecContext(owner_uid,
                               save_sec_context | SECURITY_LOCAL_USERID_CHANGE);

    /* Create the schema's namespace */
    if (shouldDispatch || Gp_role != GP_ROLE_EXECUTE)
    {
        namespaceId = NamespaceCreate(schemaName, owner_uid, 0);

        if (shouldDispatch)
        {
            elog(DEBUG5, "shouldDispatch = true, namespaceOid = %d", namespaceId);

            Assert(stmt->schemaOid == 0);
            stmt->schemaOid = namespaceId;

            /*
             * Dispatch the command to all primary and mirror segment dbs.
             * Starts a global transaction and reconfigures cluster if needed.
             * Waits for QEs to finish.  Exits via ereport(ERROR,...) if error.
             */
            CdbDispatchUtilityStatement((Node *) stmt,
                                        DF_CANCEL_ON_ERROR |
                                        DF_WITH_SNAPSHOT |
                                        DF_NEED_TWO_PHASE,
                                        NULL);
        }

        /* MPP-6929: metadata tracking */
        if (Gp_role == GP_ROLE_DISPATCH)
            MetaTrackAddObject(NamespaceRelationId,
                               namespaceId,
                               saved_uid,
                               "CREATE", "SCHEMA"
                              );
    }
    else
    {
        namespaceId = NamespaceCreate(schemaName, owner_uid, stmt->schemaOid);
    }

    /* Advance cmd counter to make the namespace visible */
    CommandCounterIncrement();

    /*
     * Temporarily make the new namespace be the front of the search path, as
     * well as the default creation target namespace.  This will be undone at
     * the end of this routine, or upon error.
     */
    overridePath = GetOverrideSearchPath(CurrentMemoryContext);
    overridePath->schemas = lcons_oid(namespaceId, overridePath->schemas);
    /* XXX should we clear overridePath->useTemp? */
    PushOverrideSearchPath(overridePath);

    /*
     * Examine the list of commands embedded in the CREATE SCHEMA command, and
     * reorganize them into a sequentially executable order with no forward
     * references.	Note that the result is still a list of raw parsetrees ---
     * we cannot, in general, run parse analysis on one statement until we
     * have actually executed the prior ones.
     */
    parsetree_list = transformCreateSchemaStmt(stmt);

    /*
     * Execute each command contained in the CREATE SCHEMA.  Since the grammar
     * allows only utility commands in CREATE SCHEMA, there is no need to pass
     * them through parse_analyze() or the rewriter; we can just hand them
     * straight to ProcessUtility.
     */
    foreach(parsetree_item, parsetree_list)
    {
        Node	   *stmt = (Node *) lfirst(parsetree_item);

        /* do this step */
        ProcessUtility(stmt,
                       queryString,
                       NULL,
                       false,	/* not top level */
                       None_Receiver,
                       NULL);
        /* make sure later steps can see the object created here */
        CommandCounterIncrement();
    }
示例#7
0
/*
 * ExecCreateTableAs -- execute a CREATE TABLE AS command
 */
void
ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
				  ParamListInfo params, char *completionTag)
{
	Query	   *query = (Query *) stmt->query;
	IntoClause *into = stmt->into;
	bool		is_matview = (into->viewQuery != NULL);
	DestReceiver *dest;
	Oid			save_userid = InvalidOid;
	int			save_sec_context = 0;
	int			save_nestlevel = 0;
	List	   *rewritten;
	PlannedStmt *plan;
	QueryDesc  *queryDesc;
	ScanDirection dir;

	/*
	 * Create the tuple receiver object and insert info it will need
	 */
	dest = CreateIntoRelDestReceiver(into);

	/*
	 * The contained Query could be a SELECT, or an EXECUTE utility command.
	 * If the latter, we just pass it off to ExecuteQuery.
	 */
	Assert(IsA(query, Query));
	if (query->commandType == CMD_UTILITY &&
		IsA(query->utilityStmt, ExecuteStmt))
	{
		ExecuteStmt *estmt = (ExecuteStmt *) query->utilityStmt;

		Assert(!is_matview);	/* excluded by syntax */
		ExecuteQuery(estmt, into, queryString, params, dest, completionTag);

		return;
	}
	Assert(query->commandType == CMD_SELECT);

	/*
	 * For materialized views, lock down security-restricted operations and
	 * arrange to make GUC variable changes local to this command.  This is
	 * not necessary for security, but this keeps the behavior similar to
	 * REFRESH MATERIALIZED VIEW.  Otherwise, one could create a materialized
	 * view not possible to refresh.
	 */
	if (is_matview)
	{
		GetUserIdAndSecContext(&save_userid, &save_sec_context);
		SetUserIdAndSecContext(save_userid,
						   save_sec_context | SECURITY_RESTRICTED_OPERATION);
		save_nestlevel = NewGUCNestLevel();
	}

	/*
	 * Parse analysis was done already, but we still have to run the rule
	 * rewriter.  We do not do AcquireRewriteLocks: we assume the query either
	 * came straight from the parser, or suitable locks were acquired by
	 * plancache.c.
	 *
	 * Because the rewriter and planner tend to scribble on the input, we make
	 * a preliminary copy of the source querytree.  This prevents problems in
	 * the case that CTAS is in a portal or plpgsql function and is executed
	 * repeatedly.  (See also the same hack in EXPLAIN and PREPARE.)
	 */
	rewritten = QueryRewrite((Query *) copyObject(query));

	/* SELECT should never rewrite to more or less than one SELECT query */
	if (list_length(rewritten) != 1)
		elog(ERROR, "unexpected rewrite result for CREATE TABLE AS SELECT");
	query = (Query *) linitial(rewritten);
	Assert(query->commandType == CMD_SELECT);

	/* plan the query */
	plan = pg_plan_query(query, 0, params);

	/*
	 * Use a snapshot with an updated command ID to ensure this query sees
	 * results of any previously executed queries.  (This could only matter if
	 * the planner executed an allegedly-stable function that changed the
	 * database contents, but let's do it anyway to be parallel to the EXPLAIN
	 * code path.)
	 */
	PushCopiedSnapshot(GetActiveSnapshot());
	UpdateActiveSnapshotCommandId();

	/* Create a QueryDesc, redirecting output to our tuple receiver */
	queryDesc = CreateQueryDesc(plan, queryString,
								GetActiveSnapshot(), InvalidSnapshot,
								dest, params, 0);

	/* call ExecutorStart to prepare the plan for execution */
	ExecutorStart(queryDesc, GetIntoRelEFlags(into));

	/*
	 * Normally, we run the plan to completion; but if skipData is specified,
	 * just do tuple receiver startup and shutdown.
	 */
	if (into->skipData)
		dir = NoMovementScanDirection;
	else
		dir = ForwardScanDirection;

	/* run the plan */
	ExecutorRun(queryDesc, dir, 0L);

	/* save the rowcount if we're given a completionTag to fill */
	if (completionTag)
		snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
				 "SELECT %u", queryDesc->estate->es_processed);

	/* and clean up */
	ExecutorFinish(queryDesc);
	ExecutorEnd(queryDesc);

	FreeQueryDesc(queryDesc);

	PopActiveSnapshot();

	if (is_matview)
	{
		/* Roll back any GUC changes */
		AtEOXact_GUC(false, save_nestlevel);

		/* Restore userid and security context */
		SetUserIdAndSecContext(save_userid, save_sec_context);
	}
}
示例#8
0
文件: matview.c 项目: qowldi/pg
/*
 * ExecRefreshMatView -- execute a REFRESH MATERIALIZED VIEW command
 *
 * This refreshes the materialized view by creating a new table and swapping
 * the relfilenodes of the new table and the old materialized view, so the OID
 * of the original materialized view is preserved. Thus we do not lose GRANT
 * nor references to this materialized view.
 *
 * If WITH NO DATA was specified, this is effectively like a TRUNCATE;
 * otherwise it is like a TRUNCATE followed by an INSERT using the SELECT
 * statement associated with the materialized view.  The statement node's
 * skipData field shows whether the clause was used.
 *
 * Indexes are rebuilt too, via REINDEX. Since we are effectively bulk-loading
 * the new heap, it's better to create the indexes afterwards than to fill them
 * incrementally while we load.
 *
 * The matview's "populated" state is changed based on whether the contents
 * reflect the result set of the materialized view's query.
 */
void
ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString,
				   ParamListInfo params, char *completionTag)
{
	Oid			matviewOid;
	Relation	matviewRel;
	RewriteRule *rule;
	List	   *actions;
	Query	   *dataQuery;
	Oid			tableSpace;
	Oid			relowner;
	Oid			OIDNewHeap;
	DestReceiver *dest;
	bool		concurrent;
	LOCKMODE	lockmode;
	Oid			save_userid;
	int			save_sec_context;
	int			save_nestlevel;

	/* Determine strength of lock needed. */
	concurrent = stmt->concurrent;
	lockmode = concurrent ? ExclusiveLock : AccessExclusiveLock;

	/*
	 * Get a lock until end of transaction.
	 */
	matviewOid = RangeVarGetRelidExtended(stmt->relation,
										  lockmode, false, false,
										  RangeVarCallbackOwnsTable, NULL);
	matviewRel = heap_open(matviewOid, NoLock);

	/* Make sure it is a materialized view. */
	if (matviewRel->rd_rel->relkind != RELKIND_MATVIEW)
		ereport(ERROR,
				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
				 errmsg("\"%s\" is not a materialized view",
						RelationGetRelationName(matviewRel))));

	/* Check that CONCURRENTLY is not specified if not populated. */
	if (concurrent && !RelationIsPopulated(matviewRel))
		ereport(ERROR,
				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
				 errmsg("CONCURRENTLY cannot be used when the materialized view is not populated")));

	/* Check that conflicting options have not been specified. */
	if (concurrent && stmt->skipData)
		ereport(ERROR,
				(errcode(ERRCODE_SYNTAX_ERROR),
				 errmsg("CONCURRENTLY and WITH NO DATA options cannot be used together")));

	/* We don't allow an oid column for a materialized view. */
	Assert(!matviewRel->rd_rel->relhasoids);

	/*
	 * Check that everything is correct for a refresh. Problems at this point
	 * are internal errors, so elog is sufficient.
	 */
	if (matviewRel->rd_rel->relhasrules == false ||
		matviewRel->rd_rules->numLocks < 1)
		elog(ERROR,
			 "materialized view \"%s\" is missing rewrite information",
			 RelationGetRelationName(matviewRel));

	if (matviewRel->rd_rules->numLocks > 1)
		elog(ERROR,
			 "materialized view \"%s\" has too many rules",
			 RelationGetRelationName(matviewRel));

	rule = matviewRel->rd_rules->rules[0];
	if (rule->event != CMD_SELECT || !(rule->isInstead))
		elog(ERROR,
			 "the rule for materialized view \"%s\" is not a SELECT INSTEAD OF rule",
			 RelationGetRelationName(matviewRel));

	actions = rule->actions;
	if (list_length(actions) != 1)
		elog(ERROR,
			 "the rule for materialized view \"%s\" is not a single action",
			 RelationGetRelationName(matviewRel));

	/*
	 * The stored query was rewritten at the time of the MV definition, but
	 * has not been scribbled on by the planner.
	 */
	dataQuery = (Query *) linitial(actions);
	Assert(IsA(dataQuery, Query));

	/*
	 * Check for active uses of the relation in the current transaction, such
	 * as open scans.
	 *
	 * NB: We count on this to protect us against problems with refreshing the
	 * data using HEAP_INSERT_FROZEN.
	 */
	CheckTableNotInUse(matviewRel, "REFRESH MATERIALIZED VIEW");

	/*
	 * Tentatively mark the matview as populated or not (this will roll back
	 * if we fail later).
	 */
	SetMatViewPopulatedState(matviewRel, !stmt->skipData);

	relowner = matviewRel->rd_rel->relowner;

	/*
	 * Switch to the owner's userid, so that any functions are run as that
	 * user.  Also arrange to make GUC variable changes local to this command.
	 * Don't lock it down too tight to create a temporary table just yet.  We
	 * will switch modes when we are about to execute user code.
	 */
	GetUserIdAndSecContext(&save_userid, &save_sec_context);
	SetUserIdAndSecContext(relowner,
						   save_sec_context | SECURITY_LOCAL_USERID_CHANGE);
	save_nestlevel = NewGUCNestLevel();

	/* Concurrent refresh builds new data in temp tablespace, and does diff. */
	if (concurrent)
		tableSpace = GetDefaultTablespace(RELPERSISTENCE_TEMP);
	else
		tableSpace = matviewRel->rd_rel->reltablespace;

	/*
	 * Create the transient table that will receive the regenerated data. Lock
	 * it against access by any other process until commit (by which time it
	 * will be gone).
	 */
	OIDNewHeap = make_new_heap(matviewOid, tableSpace, concurrent,
							   ExclusiveLock);
	LockRelationOid(OIDNewHeap, AccessExclusiveLock);
	dest = CreateTransientRelDestReceiver(OIDNewHeap);

	/*
	 * Now lock down security-restricted operations.
	 */
	SetUserIdAndSecContext(relowner,
						   save_sec_context | SECURITY_RESTRICTED_OPERATION);

	/* Generate the data, if wanted. */
	if (!stmt->skipData)
		refresh_matview_datafill(dest, dataQuery, queryString);

	heap_close(matviewRel, NoLock);

	/* Make the matview match the newly generated data. */
	if (concurrent)
	{
		int			old_depth = matview_maintenance_depth;

		PG_TRY();
		{
			refresh_by_match_merge(matviewOid, OIDNewHeap, relowner,
								   save_sec_context);
		}
		PG_CATCH();
		{
			matview_maintenance_depth = old_depth;
			PG_RE_THROW();
		}
		PG_END_TRY();
		Assert(matview_maintenance_depth == old_depth);
	}
	else
		refresh_by_heap_swap(matviewOid, OIDNewHeap);

	/* Roll back any GUC changes */
	AtEOXact_GUC(false, save_nestlevel);

	/* Restore userid and security context */
	SetUserIdAndSecContext(save_userid, save_sec_context);
}
示例#9
0
/*
 * refresh_matview_datafill
 */
static void
refresh_matview_datafill(DestReceiver *dest, Query *query,
						 const char *queryString, Oid relowner)
{
	List	   *rewritten;
	PlannedStmt *plan;
	QueryDesc  *queryDesc;
	Oid			save_userid;
	int			save_sec_context;
	int			save_nestlevel;
	Query	   *copied_query;

	/*
	 * Switch to the owner's userid, so that any functions are run as that
	 * user.  Also lock down security-restricted operations and arrange to
	 * make GUC variable changes local to this command.
	 */
	GetUserIdAndSecContext(&save_userid, &save_sec_context);
	SetUserIdAndSecContext(relowner,
						   save_sec_context | SECURITY_RESTRICTED_OPERATION);
	save_nestlevel = NewGUCNestLevel();

	/* Lock and rewrite, using a copy to preserve the original query. */
	copied_query = copyObject(query);
	AcquireRewriteLocks(copied_query, true, false);
	rewritten = QueryRewrite(copied_query);

	/* SELECT should never rewrite to more or less than one SELECT query */
	if (list_length(rewritten) != 1)
		elog(ERROR, "unexpected rewrite result for REFRESH MATERIALIZED VIEW");
	query = (Query *) linitial(rewritten);

	/* Check for user-requested abort. */
	CHECK_FOR_INTERRUPTS();

	/* Plan the query which will generate data for the refresh. */
	plan = pg_plan_query(query, 0, NULL);

	/*
	 * Use a snapshot with an updated command ID to ensure this query sees
	 * results of any previously executed queries.  (This could only matter if
	 * the planner executed an allegedly-stable function that changed the
	 * database contents, but let's do it anyway to be safe.)
	 */
	PushCopiedSnapshot(GetActiveSnapshot());
	UpdateActiveSnapshotCommandId();

	/* Create a QueryDesc, redirecting output to our tuple receiver */
	queryDesc = CreateQueryDesc(plan, queryString,
								GetActiveSnapshot(), InvalidSnapshot,
								dest, NULL, 0);

	/* call ExecutorStart to prepare the plan for execution */
	ExecutorStart(queryDesc, EXEC_FLAG_WITHOUT_OIDS);

	/* run the plan */
	ExecutorRun(queryDesc, ForwardScanDirection, 0L);

	/* and clean up */
	ExecutorFinish(queryDesc);
	ExecutorEnd(queryDesc);

	FreeQueryDesc(queryDesc);

	PopActiveSnapshot();

	/* Roll back any GUC changes */
	AtEOXact_GUC(false, save_nestlevel);

	/* Restore userid and security context */
	SetUserIdAndSecContext(save_userid, save_sec_context);
}
示例#10
0
/*
 * Get any row security quals and check quals that should be applied to the
 * specified RTE.
 *
 * In addition, hasRowSecurity is set to true if row level security is enabled
 * (even if this RTE doesn't have any row security quals), and hasSubLinks is
 * set to true if any of the quals returned contain sublinks.
 */
void
get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
						  int rt_index, List **securityQuals,
						  List **withCheckOptions, bool *hasRowSecurity,
						  bool *hasSubLinks)
{
	Expr			   *rowsec_expr = NULL;
	Expr			   *rowsec_with_check_expr = NULL;
	Expr			   *hook_expr_restrictive = NULL;
	Expr			   *hook_with_check_expr_restrictive = NULL;
	Expr			   *hook_expr_permissive = NULL;
	Expr			   *hook_with_check_expr_permissive = NULL;

	List			   *rowsec_policies;
	List			   *hook_policies_restrictive = NIL;
	List			   *hook_policies_permissive = NIL;

	Relation 			rel;
	Oid					user_id;
	int					sec_context;
	int					rls_status;
	bool				defaultDeny = false;

	/* Defaults for the return values */
	*securityQuals = NIL;
	*withCheckOptions = NIL;
	*hasRowSecurity = false;
	*hasSubLinks = false;

	/* This is just to get the security context */
	GetUserIdAndSecContext(&user_id, &sec_context);

	/* Switch to checkAsUser if it's set */
	user_id = rte->checkAsUser ? rte->checkAsUser : GetUserId();

	/*
	 * If this is not a normal relation, or we have been told
	 * to explicitly skip RLS (perhaps because this is an FK check)
	 * then just return immediately.
	 */
	if (rte->relid < FirstNormalObjectId
		|| rte->relkind != RELKIND_RELATION
		|| (sec_context & SECURITY_ROW_LEVEL_DISABLED))
		return;

	/* Determine the state of RLS for this, pass checkAsUser explicitly */
	rls_status = check_enable_rls(rte->relid, rte->checkAsUser, false);

	/* If there is no RLS on this table at all, nothing to do */
	if (rls_status == RLS_NONE)
		return;

	/*
	 * RLS_NONE_ENV means we are not doing any RLS now, but that may change
	 * with changes to the environment, so we mark it as hasRowSecurity to
	 * force a re-plan when the environment changes.
	 */
	if (rls_status == RLS_NONE_ENV)
	{
		/*
		 * Indicate that this query may involve RLS and must therefore
		 * be replanned if the environment changes (GUCs, role), but we
		 * are not adding anything here.
		 */
		*hasRowSecurity = true;

		return;
	}

	/* Grab the built-in policies which should be applied to this relation. */
	rel = heap_open(rte->relid, NoLock);

	rowsec_policies = pull_row_security_policies(commandType, rel,
												 user_id);

	/*
	 * Check if this is only the default-deny policy.
	 *
	 * Normally, if the table has row security enabled but there are
	 * no policies, we use a default-deny policy and not allow anything.
	 * However, when an extension uses the hook to add their own
	 * policies, we don't want to include the default deny policy or
	 * there won't be any way for a user to use an extension exclusively
	 * for the policies to be used.
	 */
	if (((RowSecurityPolicy *) linitial(rowsec_policies))->policy_id
			== InvalidOid)
		defaultDeny = true;

	/* Now that we have our policies, build the expressions from them. */
	process_policies(root, rowsec_policies, rt_index, &rowsec_expr,
					 &rowsec_with_check_expr, hasSubLinks, OR_EXPR);

	/*
	 * Also, allow extensions to add their own policies.
	 *
	 * extensions can add either permissive or restrictive policies.
	 *
	 * Note that, as with the internal policies, if multiple policies are
	 * returned then they will be combined into a single expression with
	 * all of them OR'd (for permissive) or AND'd (for restrictive) together.
	 *
	 * If only a USING policy is returned by the extension then it will be
	 * used for WITH CHECK as well, similar to how internal policies are
	 * handled.
	 *
	 * The only caveat to this is that if there are NO internal policies
	 * defined, there ARE policies returned by the extension, and RLS is
	 * enabled on the table, then we will ignore the internally-generated
	 * default-deny policy and use only the policies returned by the
	 * extension.
	 */
	if (row_security_policy_hook_restrictive)
	{
		hook_policies_restrictive = (*row_security_policy_hook_restrictive)(commandType, rel);

		/* Build the expression from any policies returned. */
		if (hook_policies_restrictive != NIL)
			process_policies(root, hook_policies_restrictive, rt_index,
							 &hook_expr_restrictive,
							 &hook_with_check_expr_restrictive,
							 hasSubLinks,
							 AND_EXPR);
	}

	if (row_security_policy_hook_permissive)
	{
		hook_policies_permissive = (*row_security_policy_hook_permissive)(commandType, rel);

		/* Build the expression from any policies returned. */
		if (hook_policies_permissive != NIL)
			process_policies(root, hook_policies_permissive, rt_index,
							 &hook_expr_permissive,
							 &hook_with_check_expr_permissive, hasSubLinks,
							 OR_EXPR);
	}

	/*
	 * If the only built-in policy is the default-deny one, and hook
	 * policies exist, then use the hook policies only and do not apply
	 * the default-deny policy.  Otherwise, we will apply both sets below.
	 */
	if (defaultDeny &&
		(hook_policies_restrictive != NIL || hook_policies_permissive != NIL))
	{
		rowsec_expr = NULL;
		rowsec_with_check_expr = NULL;
	}

	/*
	 * For INSERT or UPDATE, we need to add the WITH CHECK quals to
	 * Query's withCheckOptions to verify that any new___ records pass the
	 * WITH CHECK policy (this will be a copy of the USING policy, if no
	 * explicit WITH CHECK policy exists).
	 */
	if (commandType == CMD_INSERT || commandType == CMD_UPDATE)
	{
		/*
		 * WITH CHECK OPTIONS wants a WCO node which wraps each Expr, so
		 * create them as necessary.
		 */

		/*
		 * Handle any restrictive policies first.
		 *
		 * They can simply be added.
		 */
		if (hook_with_check_expr_restrictive)
		{
			WithCheckOption	   *wco;

			wco = (WithCheckOption *) makeNode(WithCheckOption);
			wco->kind = commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
														  WCO_RLS_UPDATE_CHECK;
			wco->relname = pstrdup(RelationGetRelationName(rel));
			wco->qual = (Node *) hook_with_check_expr_restrictive;
			wco->cascaded = false;
			*withCheckOptions = lappend(*withCheckOptions, wco);
		}

		/*
		 * Handle built-in policies, if there are no permissive
		 * policies from the hook.
		 */
		if (rowsec_with_check_expr && !hook_with_check_expr_permissive)
		{
			WithCheckOption	   *wco;

			wco = (WithCheckOption *) makeNode(WithCheckOption);
			wco->kind = commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
														  WCO_RLS_UPDATE_CHECK;
			wco->relname = pstrdup(RelationGetRelationName(rel));
			wco->qual = (Node *) rowsec_with_check_expr;
			wco->cascaded = false;
			*withCheckOptions = lappend(*withCheckOptions, wco);
		}
		/* Handle the hook policies, if there are no built-in ones. */
		else if (!rowsec_with_check_expr && hook_with_check_expr_permissive)
		{
			WithCheckOption	   *wco;

			wco = (WithCheckOption *) makeNode(WithCheckOption);
			wco->kind = commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
														  WCO_RLS_UPDATE_CHECK;
			wco->relname = pstrdup(RelationGetRelationName(rel));
			wco->qual = (Node *) hook_with_check_expr_permissive;
			wco->cascaded = false;
			*withCheckOptions = lappend(*withCheckOptions, wco);
		}
		/* Handle the case where there are both. */
		else if (rowsec_with_check_expr && hook_with_check_expr_permissive)
		{
			WithCheckOption	   *wco;
			List			   *combined_quals = NIL;
			Expr			   *combined_qual_eval;

			combined_quals = lcons(copyObject(rowsec_with_check_expr),
								   combined_quals);

			combined_quals = lcons(copyObject(hook_with_check_expr_permissive),
								   combined_quals);

			combined_qual_eval = makeBoolExpr(OR_EXPR, combined_quals, -1);

			wco = (WithCheckOption *) makeNode(WithCheckOption);
			wco->kind = commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
														  WCO_RLS_UPDATE_CHECK;
			wco->relname = pstrdup(RelationGetRelationName(rel));
			wco->qual = (Node *) combined_qual_eval;
			wco->cascaded = false;
			*withCheckOptions = lappend(*withCheckOptions, wco);
		}

		/*
		 * ON CONFLICT DO UPDATE has an RTE that is subject to both INSERT and
		 * UPDATE RLS enforcement.  Those are enforced (as a special, distinct
		 * kind of WCO) on the target tuple.
		 *
		 * Make a second, recursive pass over the RTE for this, gathering
		 * UPDATE-applicable RLS checks/WCOs, and gathering and converting
		 * UPDATE-applicable security quals into WCO_RLS_CONFLICT_CHECK RLS
		 * checks/WCOs.  Finally, these distinct kinds of RLS checks/WCOs are
		 * concatenated with our own INSERT-applicable list.
		 */
		if (root->onConflict && root->onConflict->action == ONCONFLICT_UPDATE &&
			commandType == CMD_INSERT)
		{
			List	   *conflictSecurityQuals = NIL;
			List	   *conflictWCOs = NIL;
			ListCell   *item;
			bool		conflictHasRowSecurity = false;
			bool		conflictHasSublinks = false;

			/* Assume that RTE is target resultRelation */
			get_row_security_policies(root, CMD_UPDATE, rte, rt_index,
									  &conflictSecurityQuals, &conflictWCOs,
									  &conflictHasRowSecurity,
									  &conflictHasSublinks);

			if (conflictHasRowSecurity)
				*hasRowSecurity = true;
			if (conflictHasSublinks)
				*hasSubLinks = true;

			/*
			 * Append WITH CHECK OPTIONs/RLS checks, which should not conflict
			 * between this INSERT and the auxiliary UPDATE
			 */
			*withCheckOptions = list_concat(*withCheckOptions,
											conflictWCOs);

			foreach(item, conflictSecurityQuals)
			{
				Expr			   *conflict_rowsec_expr = (Expr *) lfirst(item);
				WithCheckOption	   *wco;

				wco = (WithCheckOption *) makeNode(WithCheckOption);

				wco->kind = WCO_RLS_CONFLICT_CHECK;
				wco->relname = pstrdup(RelationGetRelationName(rel));
				wco->qual = (Node *) copyObject(conflict_rowsec_expr);
				wco->cascaded = false;
				*withCheckOptions = lappend(*withCheckOptions, wco);
			}
		}
示例#11
0
/*
 * FetchRegularTable fetches the given table's data using the copy out command.
 * The function then fetches the DDL commands necessary to create this table's
 * replica, and locally applies these DDL commands. Last, the function copies
 * the fetched table data into the created table; and on success, returns true.
 * On failure due to connectivity issues with remote node, the function returns
 * false. On other types of failures, the function errors out.
 */
static bool
FetchRegularTable(const char *nodeName, uint32 nodePort, const char *tableName)
{
	StringInfo localFilePath = NULL;
	StringInfo remoteCopyCommand = NULL;
	List *ddlCommandList = NIL;
	ListCell *ddlCommandCell = NULL;
	CopyStmt *localCopyCommand = NULL;
	RangeVar *localTable = NULL;
	uint64 shardId = 0;
	bool received = false;
	StringInfo queryString = NULL;
	const char *tableOwner = NULL;
	Oid tableOwnerId = InvalidOid;
	Oid savedUserId = InvalidOid;
	int savedSecurityContext = 0;
	List *tableNameList = NIL;

	/* copy remote table's data to this node in an idempotent manner */
	shardId = ExtractShardId(tableName);
	localFilePath = makeStringInfo();
	appendStringInfo(localFilePath, "base/%s/%s" UINT64_FORMAT,
					 PG_JOB_CACHE_DIR, TABLE_FILE_PREFIX, shardId);

	remoteCopyCommand = makeStringInfo();
	appendStringInfo(remoteCopyCommand, COPY_OUT_COMMAND, tableName);

	received = ReceiveRegularFile(nodeName, nodePort, remoteCopyCommand, localFilePath);
	if (!received)
	{
		return false;
	}

	/* fetch the ddl commands needed to create the table */
	tableOwner = RemoteTableOwner(nodeName, nodePort, tableName);
	if (tableOwner == NULL)
	{
		return false;
	}
	tableOwnerId = get_role_oid(tableOwner, false);

	/* fetch the ddl commands needed to create the table */
	ddlCommandList = TableDDLCommandList(nodeName, nodePort, tableName);
	if (ddlCommandList == NIL)
	{
		return false;
	}

	/*
	 * Apply DDL commands against the database. Note that on failure from here
	 * on, we immediately error out instead of returning false.  Have to do
	 * this as the table's owner to ensure the local table is created with
	 * compatible permissions.
	 */
	GetUserIdAndSecContext(&savedUserId, &savedSecurityContext);
	SetUserIdAndSecContext(tableOwnerId, SECURITY_LOCAL_USERID_CHANGE);

	foreach(ddlCommandCell, ddlCommandList)
	{
		StringInfo ddlCommand = (StringInfo) lfirst(ddlCommandCell);
		Node *ddlCommandNode = ParseTreeNode(ddlCommand->data);

		ProcessUtility(ddlCommandNode, ddlCommand->data, PROCESS_UTILITY_TOPLEVEL,
					   NULL, None_Receiver, NULL);
		CommandCounterIncrement();
	}