コード例 #1
0
ファイル: gpoptutils.c プロジェクト: fei199255/gpdb
static void
traverseQueryOids
	(
	Query          *pquery,
	HTAB           *relhtab,
	StringInfoData *relbuf,
	HTAB           *funchtab,
	StringInfoData *funcbuf
	)
{
	bool	   found;
	const char *whitespace = " \t\n\r";
	char	   *query = nodeToString(pquery);
	char	   *token = strtok(query, whitespace);

	while (token)
	{
		if (pg_strcasecmp(token, ":relid") == 0)
		{
			token = strtok(NULL, whitespace);
			if (token)
			{
				Oid relid = atooid(token);
				hash_search(relhtab, (void *)&relid, HASH_ENTER, &found);
				if (!found)
				{
					if (relbuf->len != 0)
						appendStringInfo(relbuf, "%s", ",");
					appendStringInfo(relbuf, "%u", relid);
				}
			}
		}
		else if (pg_strcasecmp(token, ":funcid") == 0)
		{
			token = strtok(NULL, whitespace);
			if (token)
			{
				Oid funcid = atooid(token);
				hash_search(funchtab, (void *)&funcid, HASH_ENTER, &found);
				if (!found)
				{
					if (funcbuf->len != 0)
						appendStringInfo(funcbuf, "%s", ",");
					appendStringInfo(funcbuf, "%u", funcid);
				}
			}
		}

		token = strtok(NULL, whitespace);
	}
}
コード例 #2
0
ファイル: check.c プロジェクト: canoon/postgres
/*
 *	check_is_install_user()
 *
 *	Check we are the install user, and that the new cluster
 *	has no other users.
 */
static void
check_is_install_user(ClusterInfo *cluster)
{
	PGresult   *res;
	PGconn	   *conn = connectToServer(cluster, "template1");

	prep_status("Checking database user is the install user");

	/* Can't use pg_authid because only superusers can view it. */
	res = executeQueryOrDie(conn,
							"SELECT rolsuper, oid "
							"FROM pg_catalog.pg_roles "
							"WHERE rolname = current_user "
							"AND rolname !~ '^pg_'");

	/*
	 * We only allow the install user in the new cluster (see comment below)
	 * and we preserve pg_authid.oid, so this must be the install user in the
	 * old cluster too.
	 */
	if (PQntuples(res) != 1 ||
		atooid(PQgetvalue(res, 0, 1)) != BOOTSTRAP_SUPERUSERID)
		pg_fatal("database user \"%s\" is not the install user\n",
				 os_info.user);

	PQclear(res);

	res = executeQueryOrDie(conn,
							"SELECT COUNT(*) "
							"FROM pg_catalog.pg_roles "
							"WHERE rolname !~ '^pg_'");

	if (PQntuples(res) != 1)
		pg_fatal("could not determine the number of users\n");

	/*
	 * We only allow the install user in the new cluster because other defined
	 * users might match users defined in the old cluster and generate an
	 * error during pg_dump restore.
	 */
	if (cluster == &new_cluster && atooid(PQgetvalue(res, 0, 0)) != 1)
		pg_fatal("Only the install user can be defined in the new cluster.\n");

	PQclear(res);

	PQfinish(conn);

	check_ok();
}
コード例 #3
0
/*
 * do_lo_unlink()
 *
 * removes a large object out of the database
 */
bool
do_lo_unlink(const char *loid_arg)
{
	int			status;
	Oid			loid = atooid(loid_arg);
	bool		own_transaction;

	if (!start_lo_xact("\\lo_unlink", &own_transaction))
		return false;

	SetCancelConn();
	status = lo_unlink(pset.db, loid);
	ResetCancelConn();

	if (status == -1)
	{
		fputs(PQerrorMessage(pset.db), stderr);
		return fail_lo_xact("\\lo_unlink", own_transaction);
	}

	if (!finish_lo_xact("\\lo_unlink", own_transaction))
		return false;

	fprintf(pset.queryFout, "lo_unlink %u\n", loid);

	return true;
}
コード例 #4
0
ファイル: command.c プロジェクト: 50wu/gpdb
/*
 * This function takes a function description, e.g. "x" or "x(int)", and
 * issues a query on the given connection to retrieve the function's OID
 * using a cast to regproc or regprocedure (as appropriate). The result,
 * if there is one, is returned at *foid.  Note that we'll fail if the
 * function doesn't exist OR if there are multiple matching candidates
 * OR if there's something syntactically wrong with the function description;
 * unfortunately it can be hard to tell the difference.
 */
static bool
lookup_function_oid(PGconn *conn, const char *desc, Oid *foid)
{
	bool		result = true;
	PQExpBuffer query;
	PGresult   *res;

	query = createPQExpBuffer();
	printfPQExpBuffer(query, "SELECT ");
	appendStringLiteralConn(query, desc, conn);
	appendPQExpBuffer(query, "::pg_catalog.%s::pg_catalog.oid",
					  strchr(desc, '(') ? "regprocedure" : "regproc");

	res = PQexec(conn, query->data);
	if (PQresultStatus(res) == PGRES_TUPLES_OK && PQntuples(res) == 1)
		*foid = atooid(PQgetvalue(res, 0, 0));
	else
	{
		minimal_error_message(res);
		result = false;
	}

	PQclear(res);
	destroyPQExpBuffer(query);

	return result;
}
コード例 #5
0
/*
 * do_lo_export()
 *
 * Write a large object to a file
 */
bool
do_lo_export(const char *loid_arg, const char *filename_arg)
{
	int			status;
	bool		own_transaction;

	if (!start_lo_xact("\\lo_export", &own_transaction))
		return false;

	SetCancelConn();
	status = lo_export(pset.db, atooid(loid_arg), filename_arg);
	ResetCancelConn();

	/* of course this status is documented nowhere :( */
	if (status != 1)
	{
		psql_error("%s", PQerrorMessage(pset.db));
		return fail_lo_xact("\\lo_export", own_transaction);
	}

	if (!finish_lo_xact("\\lo_export", own_transaction))
		return false;

	print_lo_result("lo_export");

	return true;
}
コード例 #6
0
/*
 * do_lo_unlink()
 *
 * removes a large object out of the database
 */
bool
do_lo_unlink(const char *loid_arg)
{
	int			status;
	Oid			loid = atooid(loid_arg);
	bool		own_transaction;

	if (!start_lo_xact("\\lo_unlink", &own_transaction))
		return false;

	SetCancelConn();
	status = lo_unlink(pset.db, loid);
	ResetCancelConn();

	if (status == -1)
	{
		psql_error("%s", PQerrorMessage(pset.db));
		return fail_lo_xact("\\lo_unlink", own_transaction);
	}

	if (!finish_lo_xact("\\lo_unlink", own_transaction))
		return false;

	print_lo_result("lo_unlink %u", loid);

	return true;
}
コード例 #7
0
ファイル: pg_backup_files.c プロジェクト: sunyangkobe/cscd43
static void
_getBlobTocEntry(ArchiveHandle *AH, Oid *oid, char fname[K_STD_BUF_SIZE])
{
	lclContext *ctx = (lclContext *) AH->formatData;
	char		blobTe[K_STD_BUF_SIZE];
	size_t		fpos;
	size_t		eos;

	if (fgets(&blobTe[0], K_STD_BUF_SIZE - 1, ctx->blobToc) != NULL)
	{
		*oid = atooid(blobTe);

		fpos = strcspn(blobTe, " ");

		strncpy(fname, &blobTe[fpos + 1], K_STD_BUF_SIZE - 1);

		eos = strlen(fname) - 1;

		if (fname[eos] == '\n')
			fname[eos] = '\0';

	}
	else
	{

		*oid = 0;
		fname[0] = '\0';
	}
}
コード例 #8
0
/*
 * do_lo_export()
 *
 * Write a large object to a file
 */
bool
do_lo_export(const char *loid_arg, const char *filename_arg)
{
	int			status;
	bool		own_transaction;

	if (!start_lo_xact("\\lo_export", &own_transaction))
		return false;

	SetCancelConn();
	status = lo_export(pset.db, atooid(loid_arg), filename_arg);
	ResetCancelConn();

	/* of course this status is documented nowhere :( */
	if (status != 1)
	{
		fputs(PQerrorMessage(pset.db), stderr);
		return fail_lo_xact("\\lo_export", own_transaction);
	}

	if (!finish_lo_xact("\\lo_export", own_transaction))
		return false;

	fprintf(pset.queryFout, "lo_export\n");

	return true;
}
コード例 #9
0
ファイル: info.c プロジェクト: no0p/postgres
/*
 * get_db_infos()
 *
 * Scans pg_database system catalog and populates all user
 * databases.
 */
static void
get_db_infos(ClusterInfo *cluster)
{
    PGconn	   *conn = connectToServer(cluster, "template1");
    PGresult   *res;
    int			ntups;
    int			tupnum;
    DbInfo	   *dbinfos;
    int			i_datname,
                i_oid,
                i_encoding,
                i_datcollate,
                i_datctype,
                i_spclocation;
    char		query[QUERY_ALLOC];

    snprintf(query, sizeof(query),
             "SELECT d.oid, d.datname, d.encoding, d.datcollate, d.datctype, "
             "%s AS spclocation "
             "FROM pg_catalog.pg_database d "
             " LEFT OUTER JOIN pg_catalog.pg_tablespace t "
             " ON d.dattablespace = t.oid "
             "WHERE d.datallowconn = true "
             /* we don't preserve pg_database.oid so we sort by name */
             "ORDER BY 2",
             /* 9.2 removed the spclocation column */
             (GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
             "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid)");

    res = executeQueryOrDie(conn, "%s", query);

    i_oid = PQfnumber(res, "oid");
    i_datname = PQfnumber(res, "datname");
    i_encoding = PQfnumber(res, "encoding");
    i_datcollate = PQfnumber(res, "datcollate");
    i_datctype = PQfnumber(res, "datctype");
    i_spclocation = PQfnumber(res, "spclocation");

    ntups = PQntuples(res);
    dbinfos = (DbInfo *) pg_malloc(sizeof(DbInfo) * ntups);

    for (tupnum = 0; tupnum < ntups; tupnum++)
    {
        dbinfos[tupnum].db_oid = atooid(PQgetvalue(res, tupnum, i_oid));
        dbinfos[tupnum].db_name = pg_strdup(PQgetvalue(res, tupnum, i_datname));
        dbinfos[tupnum].db_encoding = atoi(PQgetvalue(res, tupnum, i_encoding));
        dbinfos[tupnum].db_collate = pg_strdup(PQgetvalue(res, tupnum, i_datcollate));
        dbinfos[tupnum].db_ctype = pg_strdup(PQgetvalue(res, tupnum, i_datctype));
        snprintf(dbinfos[tupnum].db_tablespace, sizeof(dbinfos[tupnum].db_tablespace), "%s",
                 PQgetvalue(res, tupnum, i_spclocation));
    }
    PQclear(res);

    PQfinish(conn);

    cluster->dbarr.dbs = dbinfos;
    cluster->dbarr.ndbs = ntups;
}
コード例 #10
0
ファイル: info.c プロジェクト: aKhadiemik/postgres
/*
 * get_db_infos()
 *
 * Scans pg_database system catalog and populates all user
 * databases.
 */
static void
get_db_infos(ClusterInfo *cluster)
{
	PGconn	   *conn = connectToServer(cluster, "template1");
	PGresult   *res;
	int			ntups;
	int			tupnum;
	DbInfo	   *dbinfos;
	int			i_datname,
				i_oid,
				i_spclocation;

	res = executeQueryOrDie(conn,
							"SELECT d.oid, d.datname, t.spclocation "
							"FROM pg_catalog.pg_database d "
							" LEFT OUTER JOIN pg_catalog.pg_tablespace t "
							" ON d.dattablespace = t.oid "
							"WHERE d.datallowconn = true "
	/* we don't preserve pg_database.oid so we sort by name */
							"ORDER BY 2");

	i_oid = PQfnumber(res, "oid");
	i_datname = PQfnumber(res, "datname");
	i_spclocation = PQfnumber(res, "spclocation");

	ntups = PQntuples(res);
	dbinfos = (DbInfo *) pg_malloc(sizeof(DbInfo) * ntups);

	for (tupnum = 0; tupnum < ntups; tupnum++)
	{
		dbinfos[tupnum].db_oid = atooid(PQgetvalue(res, tupnum, i_oid));
		snprintf(dbinfos[tupnum].db_name, sizeof(dbinfos[tupnum].db_name), "%s",
				 PQgetvalue(res, tupnum, i_datname));
		snprintf(dbinfos[tupnum].db_tblspace, sizeof(dbinfos[tupnum].db_tblspace), "%s",
				 PQgetvalue(res, tupnum, i_spclocation));
	}
	PQclear(res);

	PQfinish(conn);

	cluster->dbarr.dbs = dbinfos;
	cluster->dbarr.ndbs = ntups;
}
コード例 #11
0
ファイル: relfilenode.c プロジェクト: GisKook/Gis
/*
 * get_pg_database_relfilenode()
 *
 *	Retrieves the relfilenode for a few system-catalog tables.	We need these
 *	relfilenodes later in the upgrade process.
 */
void
get_pg_database_relfilenode(ClusterInfo *cluster)
{
	PGconn	   *conn = connectToServer(cluster, "template1");
	PGresult   *res;
	int			i_relfile;

	res = executeQueryOrDie(conn,
							"SELECT c.relname, c.relfilenode "
							"FROM 	pg_catalog.pg_class c, "
							"		pg_catalog.pg_namespace n "
							"WHERE 	c.relnamespace = n.oid AND "
							"		n.nspname = 'pg_catalog' AND "
							"		c.relname = 'pg_database' "
							"ORDER BY c.relname");

	i_relfile = PQfnumber(res, "relfilenode");
	cluster->pg_database_oid = atooid(PQgetvalue(res, 0, i_relfile));

	PQclear(res);
	PQfinish(conn);
}
コード例 #12
0
ファイル: check.c プロジェクト: CadillacBupt/recdb-postgresql
/*
 *	check_is_super_user()
 *
 *	Check we are superuser, and out user id and user count
 */
static void
check_is_super_user(ClusterInfo *cluster)
{
	PGresult   *res;
	PGconn	   *conn = connectToServer(cluster, "template1");

	prep_status("Checking database user is a superuser");

	/* Can't use pg_authid because only superusers can view it. */
	res = executeQueryOrDie(conn,
							"SELECT rolsuper, oid "
							"FROM pg_catalog.pg_roles "
							"WHERE rolname = current_user");

	if (PQntuples(res) != 1 || strcmp(PQgetvalue(res, 0, 0), "t") != 0)
		pg_log(PG_FATAL, "database user \"%s\" is not a superuser\n",
			   os_info.user);

	cluster->install_role_oid = atooid(PQgetvalue(res, 0, 1));

	PQclear(res);

	res = executeQueryOrDie(conn,
							"SELECT COUNT(*) "
							"FROM pg_catalog.pg_roles ");

	if (PQntuples(res) != 1)
		pg_log(PG_FATAL, "could not determine the number of users\n");

	cluster->role_count = atoi(PQgetvalue(res, 0, 0));

	PQclear(res);

	PQfinish(conn);

	check_ok();
}
コード例 #13
0
ファイル: vacuumlo.c プロジェクト: Brar/postgres
/*
 * This vacuums LOs of one database. It returns 0 on success, -1 on failure.
 */
static int
vacuumlo(const char *database, const struct _param *param)
{
	PGconn	   *conn;
	PGresult   *res,
			   *res2;
	char		buf[BUFSIZE];
	long		matched;
	long		deleted;
	int			i;
	bool		new_pass;
	bool		success = true;
	static bool have_password = false;
	static char password[100];

	/* Note: password can be carried over from a previous call */
	if (param->pg_prompt == TRI_YES && !have_password)
	{
		simple_prompt("Password: "******"host";
		values[0] = param->pg_host;
		keywords[1] = "port";
		values[1] = param->pg_port;
		keywords[2] = "user";
		values[2] = param->pg_user;
		keywords[3] = "password";
		values[3] = have_password ? password : NULL;
		keywords[4] = "dbname";
		values[4] = database;
		keywords[5] = "fallback_application_name";
		values[5] = param->progname;
		keywords[6] = NULL;
		values[6] = NULL;

		new_pass = false;
		conn = PQconnectdbParams(keywords, values, true);
		if (!conn)
		{
			fprintf(stderr, "Connection to database \"%s\" failed\n",
					database);
			return -1;
		}

		if (PQstatus(conn) == CONNECTION_BAD &&
			PQconnectionNeedsPassword(conn) &&
			!have_password &&
			param->pg_prompt != TRI_NO)
		{
			PQfinish(conn);
			simple_prompt("Password: "******"Connection to database \"%s\" failed:\n%s",
				database, PQerrorMessage(conn));
		PQfinish(conn);
		return -1;
	}

	if (param->verbose)
	{
		fprintf(stdout, "Connected to database \"%s\"\n", database);
		if (param->dry_run)
			fprintf(stdout, "Test run: no large objects will be removed!\n");
	}

	res = PQexec(conn, ALWAYS_SECURE_SEARCH_PATH_SQL);
	if (PQresultStatus(res) != PGRES_TUPLES_OK)
	{
		fprintf(stderr, "Failed to set search_path:\n");
		fprintf(stderr, "%s", PQerrorMessage(conn));
		PQclear(res);
		PQfinish(conn);
		return -1;
	}
	PQclear(res);

	/*
	 * First we create and populate the LO temp table
	 */
	buf[0] = '\0';
	strcat(buf, "CREATE TEMP TABLE vacuum_l AS ");
	if (PQserverVersion(conn) >= 90000)
		strcat(buf, "SELECT oid AS lo FROM pg_largeobject_metadata");
	else
		strcat(buf, "SELECT DISTINCT loid AS lo FROM pg_largeobject");
	res = PQexec(conn, buf);
	if (PQresultStatus(res) != PGRES_COMMAND_OK)
	{
		fprintf(stderr, "Failed to create temp table:\n");
		fprintf(stderr, "%s", PQerrorMessage(conn));
		PQclear(res);
		PQfinish(conn);
		return -1;
	}
	PQclear(res);

	/*
	 * Analyze the temp table so that planner will generate decent plans for
	 * the DELETEs below.
	 */
	buf[0] = '\0';
	strcat(buf, "ANALYZE vacuum_l");
	res = PQexec(conn, buf);
	if (PQresultStatus(res) != PGRES_COMMAND_OK)
	{
		fprintf(stderr, "Failed to vacuum temp table:\n");
		fprintf(stderr, "%s", PQerrorMessage(conn));
		PQclear(res);
		PQfinish(conn);
		return -1;
	}
	PQclear(res);

	/*
	 * Now find any candidate tables that have columns of type oid.
	 *
	 * NOTE: we ignore system tables and temp tables by the expedient of
	 * rejecting tables in schemas named 'pg_*'.  In particular, the temp
	 * table formed above is ignored, and pg_largeobject will be too. If
	 * either of these were scanned, obviously we'd end up with nothing to
	 * delete...
	 *
	 * NOTE: the system oid column is ignored, as it has attnum < 1. This
	 * shouldn't matter for correctness, but it saves time.
	 */
	buf[0] = '\0';
	strcat(buf, "SELECT s.nspname, c.relname, a.attname ");
	strcat(buf, "FROM pg_class c, pg_attribute a, pg_namespace s, pg_type t ");
	strcat(buf, "WHERE a.attnum > 0 AND NOT a.attisdropped ");
	strcat(buf, "      AND a.attrelid = c.oid ");
	strcat(buf, "      AND a.atttypid = t.oid ");
	strcat(buf, "      AND c.relnamespace = s.oid ");
	strcat(buf, "      AND t.typname in ('oid', 'lo') ");
	strcat(buf, "      AND c.relkind in (" CppAsString2(RELKIND_RELATION) ", " CppAsString2(RELKIND_MATVIEW) ")");
	strcat(buf, "      AND s.nspname !~ '^pg_'");
	res = PQexec(conn, buf);
	if (PQresultStatus(res) != PGRES_TUPLES_OK)
	{
		fprintf(stderr, "Failed to find OID columns:\n");
		fprintf(stderr, "%s", PQerrorMessage(conn));
		PQclear(res);
		PQfinish(conn);
		return -1;
	}

	for (i = 0; i < PQntuples(res); i++)
	{
		char	   *schema,
				   *table,
				   *field;

		schema = PQgetvalue(res, i, 0);
		table = PQgetvalue(res, i, 1);
		field = PQgetvalue(res, i, 2);

		if (param->verbose)
			fprintf(stdout, "Checking %s in %s.%s\n", field, schema, table);

		schema = PQescapeIdentifier(conn, schema, strlen(schema));
		table = PQescapeIdentifier(conn, table, strlen(table));
		field = PQescapeIdentifier(conn, field, strlen(field));

		if (!schema || !table || !field)
		{
			fprintf(stderr, "%s", PQerrorMessage(conn));
			PQclear(res);
			PQfinish(conn);
			if (schema != NULL)
				PQfreemem(schema);
			if (schema != NULL)
				PQfreemem(table);
			if (schema != NULL)
				PQfreemem(field);
			return -1;
		}

		snprintf(buf, BUFSIZE,
				 "DELETE FROM vacuum_l "
				 "WHERE lo IN (SELECT %s FROM %s.%s)",
				 field, schema, table);
		res2 = PQexec(conn, buf);
		if (PQresultStatus(res2) != PGRES_COMMAND_OK)
		{
			fprintf(stderr, "Failed to check %s in table %s.%s:\n",
					field, schema, table);
			fprintf(stderr, "%s", PQerrorMessage(conn));
			PQclear(res2);
			PQclear(res);
			PQfinish(conn);
			PQfreemem(schema);
			PQfreemem(table);
			PQfreemem(field);
			return -1;
		}
		PQclear(res2);

		PQfreemem(schema);
		PQfreemem(table);
		PQfreemem(field);
	}
	PQclear(res);

	/*
	 * Now, those entries remaining in vacuum_l are orphans.  Delete 'em.
	 *
	 * We don't want to run each delete as an individual transaction, because
	 * the commit overhead would be high.  However, since 9.0 the backend will
	 * acquire a lock per deleted LO, so deleting too many LOs per transaction
	 * risks running out of room in the shared-memory lock table. Accordingly,
	 * we delete up to transaction_limit LOs per transaction.
	 */
	res = PQexec(conn, "begin");
	if (PQresultStatus(res) != PGRES_COMMAND_OK)
	{
		fprintf(stderr, "Failed to start transaction:\n");
		fprintf(stderr, "%s", PQerrorMessage(conn));
		PQclear(res);
		PQfinish(conn);
		return -1;
	}
	PQclear(res);

	buf[0] = '\0';
	strcat(buf,
		   "DECLARE myportal CURSOR WITH HOLD FOR SELECT lo FROM vacuum_l");
	res = PQexec(conn, buf);
	if (PQresultStatus(res) != PGRES_COMMAND_OK)
	{
		fprintf(stderr, "DECLARE CURSOR failed: %s", PQerrorMessage(conn));
		PQclear(res);
		PQfinish(conn);
		return -1;
	}
	PQclear(res);

	snprintf(buf, BUFSIZE, "FETCH FORWARD %ld IN myportal",
			 param->transaction_limit > 0 ? param->transaction_limit : 1000L);

	deleted = 0;

	while (1)
	{
		res = PQexec(conn, buf);
		if (PQresultStatus(res) != PGRES_TUPLES_OK)
		{
			fprintf(stderr, "FETCH FORWARD failed: %s", PQerrorMessage(conn));
			PQclear(res);
			PQfinish(conn);
			return -1;
		}

		matched = PQntuples(res);
		if (matched <= 0)
		{
			/* at end of resultset */
			PQclear(res);
			break;
		}

		for (i = 0; i < matched; i++)
		{
			Oid			lo = atooid(PQgetvalue(res, i, 0));

			if (param->verbose)
			{
				fprintf(stdout, "\rRemoving lo %6u   ", lo);
				fflush(stdout);
			}

			if (param->dry_run == 0)
			{
				if (lo_unlink(conn, lo) < 0)
				{
					fprintf(stderr, "\nFailed to remove lo %u: ", lo);
					fprintf(stderr, "%s", PQerrorMessage(conn));
					if (PQtransactionStatus(conn) == PQTRANS_INERROR)
					{
						success = false;
						PQclear(res);
						break;
					}
				}
				else
					deleted++;
			}
			else
				deleted++;

			if (param->transaction_limit > 0 &&
				(deleted % param->transaction_limit) == 0)
			{
				res2 = PQexec(conn, "commit");
				if (PQresultStatus(res2) != PGRES_COMMAND_OK)
				{
					fprintf(stderr, "Failed to commit transaction:\n");
					fprintf(stderr, "%s", PQerrorMessage(conn));
					PQclear(res2);
					PQclear(res);
					PQfinish(conn);
					return -1;
				}
				PQclear(res2);
				res2 = PQexec(conn, "begin");
				if (PQresultStatus(res2) != PGRES_COMMAND_OK)
				{
					fprintf(stderr, "Failed to start transaction:\n");
					fprintf(stderr, "%s", PQerrorMessage(conn));
					PQclear(res2);
					PQclear(res);
					PQfinish(conn);
					return -1;
				}
				PQclear(res2);
			}
		}

		PQclear(res);
	}

	/*
	 * That's all folks!
	 */
	res = PQexec(conn, "commit");
	if (PQresultStatus(res) != PGRES_COMMAND_OK)
	{
		fprintf(stderr, "Failed to commit transaction:\n");
		fprintf(stderr, "%s", PQerrorMessage(conn));
		PQclear(res);
		PQfinish(conn);
		return -1;
	}
	PQclear(res);

	PQfinish(conn);

	if (param->verbose)
	{
		if (param->dry_run)
			fprintf(stdout, "\rWould remove %ld large objects from database \"%s\".\n",
					deleted, database);
		else if (success)
			fprintf(stdout,
					"\rSuccessfully removed %ld large objects from database \"%s\".\n",
					deleted, database);
		else
			fprintf(stdout, "\rRemoval from database \"%s\" failed at object %ld of %ld.\n",
					database, deleted, matched);
	}

	return ((param->dry_run || success) ? 0 : -1);
}
コード例 #14
0
ファイル: droplang.c プロジェクト: markwkm/postgres
int
main(int argc, char *argv[])
{
	static struct option long_options[] = {
		{"list", no_argument, NULL, 'l'},
		{"host", required_argument, NULL, 'h'},
		{"port", required_argument, NULL, 'p'},
		{"username", required_argument, NULL, 'U'},
		{"no-password", no_argument, NULL, 'w'},
		{"password", no_argument, NULL, 'W'},
		{"dbname", required_argument, NULL, 'd'},
		{"echo", no_argument, NULL, 'e'},
		{NULL, 0, NULL, 0}
	};

	const char *progname;
	int			optindex;
	int			c;
	bool		listlangs = false;
	const char *dbname = NULL;
	char	   *host = NULL;
	char	   *port = NULL;
	char	   *username = NULL;
	enum trivalue prompt_password = TRI_DEFAULT;
	bool		echo = false;
	char	   *langname = NULL;
	char	   *p;
	Oid			lanplcallfoid;
	Oid			laninline;
	Oid			lanvalidator;
	char	   *handler;
	char	   *inline_handler;
	char	   *validator;
	char	   *handler_ns;
	char	   *inline_ns;
	char	   *validator_ns;
	bool		keephandler;
	bool		keepinline;
	bool		keepvalidator;
	PQExpBufferData sql;
	PGconn	   *conn;
	PGresult   *result;

	progname = get_progname(argv[0]);
	set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pgscripts"));

	handle_help_version_opts(argc, argv, "droplang", help);

	while ((c = getopt_long(argc, argv, "lh:p:U:wWd:e", long_options, &optindex)) != -1)
	{
		switch (c)
		{
			case 'l':
				listlangs = true;
				break;
			case 'h':
				host = optarg;
				break;
			case 'p':
				port = optarg;
				break;
			case 'U':
				username = optarg;
				break;
			case 'w':
				prompt_password = TRI_NO;
				break;
			case 'W':
				prompt_password = TRI_YES;
				break;
			case 'd':
				dbname = optarg;
				break;
			case 'e':
				echo = true;
				break;
			default:
				fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
				exit(1);
		}
	}

	if (argc - optind > 0)
	{
		if (listlangs)
			dbname = argv[optind++];
		else
		{
			langname = argv[optind++];
			if (argc - optind > 0)
				dbname = argv[optind++];
		}
	}

	if (argc - optind > 0)
	{
		fprintf(stderr, _("%s: too many command-line arguments (first is \"%s\")\n"),
				progname, argv[optind]);
		fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
		exit(1);
	}

	if (dbname == NULL)
	{
		if (getenv("PGDATABASE"))
			dbname = getenv("PGDATABASE");
		else if (getenv("PGUSER"))
			dbname = getenv("PGUSER");
		else
			dbname = get_user_name(progname);
	}

	initPQExpBuffer(&sql);

	/*
	 * List option
	 */
	if (listlangs)
	{
		printQueryOpt popt;
		static const bool translate_columns[] = {false, true};

		conn = connectDatabase(dbname, host, port, username, prompt_password,
							   progname);

		printfPQExpBuffer(&sql, "SELECT lanname as \"%s\", "
				"(CASE WHEN lanpltrusted THEN '%s' ELSE '%s' END) as \"%s\" "
						  "FROM pg_catalog.pg_language WHERE lanispl;",
						  gettext_noop("Name"),
						  gettext_noop("yes"), gettext_noop("no"),
						  gettext_noop("Trusted?"));
		result = executeQuery(conn, sql.data, progname, echo);

		memset(&popt, 0, sizeof(popt));
		popt.topt.format = PRINT_ALIGNED;
		popt.topt.border = 1;
		popt.topt.start_table = true;
		popt.topt.stop_table = true;
		popt.topt.encoding = PQclientEncoding(conn);
		popt.title = _("Procedural Languages");
		popt.translate_header = true;
		popt.translate_columns = translate_columns;
		printQuery(result, &popt, stdout, NULL);

		PQfinish(conn);
		exit(0);
	}

	if (langname == NULL)
	{
		fprintf(stderr, _("%s: missing required argument language name\n"),
				progname);
		fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
				progname);
		exit(1);
	}

	for (p = langname; *p; p++)
		if (*p >= 'A' && *p <= 'Z')
			*p += ('a' - 'A');

	conn = connectDatabase(dbname, host, port, username, prompt_password, progname);

	/*
	 * Force schema search path to be just pg_catalog, so that we don't have
	 * to be paranoid about search paths below.
	 */
	executeCommand(conn, "SET search_path = pg_catalog;", progname, echo);

	/*
	 * Make sure the language is installed and find the OIDs of the language
	 * support functions
	 */
	printfPQExpBuffer(&sql, "SELECT lanplcallfoid, laninline, lanvalidator "
					  "FROM pg_language WHERE lanname = '%s' AND lanispl;",
					  langname);
	result = executeQuery(conn, sql.data, progname, echo);
	if (PQntuples(result) == 0)
	{
		PQfinish(conn);
		fprintf(stderr, _("%s: language \"%s\" is not installed in "
						  "database \"%s\"\n"),
				progname, langname, dbname);
		exit(1);
	}
	lanplcallfoid = atooid(PQgetvalue(result, 0, 0));
	laninline = atooid(PQgetvalue(result, 0, 1));
	lanvalidator = atooid(PQgetvalue(result, 0, 2));
	PQclear(result);

	/*
	 * Check that there are no functions left defined in that language
	 */
	printfPQExpBuffer(&sql, "SELECT count(proname) FROM pg_proc P, "
					  "pg_language L WHERE P.prolang = L.oid "
					  "AND L.lanname = '%s';", langname);
	result = executeQuery(conn, sql.data, progname, echo);
	if (strcmp(PQgetvalue(result, 0, 0), "0") != 0)
	{
		PQfinish(conn);
		fprintf(stderr,
				_("%s: still %s functions declared in language \"%s\"; "
				  "language not removed\n"),
				progname, PQgetvalue(result, 0, 0), langname);
		exit(1);
	}
	PQclear(result);

	/*
	 * Check that the handler function isn't used by some other language
	 */
	printfPQExpBuffer(&sql, "SELECT count(*) FROM pg_language "
					  "WHERE lanplcallfoid = %u AND lanname <> '%s';",
					  lanplcallfoid, langname);
	result = executeQuery(conn, sql.data, progname, echo);
	if (strcmp(PQgetvalue(result, 0, 0), "0") == 0)
		keephandler = false;
	else
		keephandler = true;
	PQclear(result);

	/*
	 * Find the handler name
	 */
	if (!keephandler)
	{
		printfPQExpBuffer(&sql, "SELECT proname, (SELECT nspname "
						  "FROM pg_namespace ns WHERE ns.oid = pronamespace) "
						  "AS prons FROM pg_proc WHERE oid = %u;",
						  lanplcallfoid);
		result = executeQuery(conn, sql.data, progname, echo);
		handler = strdup(PQgetvalue(result, 0, 0));
		handler_ns = strdup(PQgetvalue(result, 0, 1));
		PQclear(result);
	}
	else
	{
		handler = NULL;
		handler_ns = NULL;
	}

	/*
	 * Check that the inline function isn't used by some other language
	 */
	if (OidIsValid(laninline))
	{
		printfPQExpBuffer(&sql, "SELECT count(*) FROM pg_language "
						  "WHERE laninline = %u AND lanname <> '%s';",
						  laninline, langname);
		result = executeQuery(conn, sql.data, progname, echo);
		if (strcmp(PQgetvalue(result, 0, 0), "0") == 0)
			keepinline = false;
		else
			keepinline = true;
		PQclear(result);
	}
	else
		keepinline = true;		/* don't try to delete it */

	/*
	 * Find the inline handler name
	 */
	if (!keepinline)
	{
		printfPQExpBuffer(&sql, "SELECT proname, (SELECT nspname "
						  "FROM pg_namespace ns WHERE ns.oid = pronamespace) "
						  "AS prons FROM pg_proc WHERE oid = %u;",
						  laninline);
		result = executeQuery(conn, sql.data, progname, echo);
		inline_handler = strdup(PQgetvalue(result, 0, 0));
		inline_ns = strdup(PQgetvalue(result, 0, 1));
		PQclear(result);
	}
	else
	{
		inline_handler = NULL;
		inline_ns = NULL;
	}

	/*
	 * Check that the validator function isn't used by some other language
	 */
	if (OidIsValid(lanvalidator))
	{
		printfPQExpBuffer(&sql, "SELECT count(*) FROM pg_language "
						  "WHERE lanvalidator = %u AND lanname <> '%s';",
						  lanvalidator, langname);
		result = executeQuery(conn, sql.data, progname, echo);
		if (strcmp(PQgetvalue(result, 0, 0), "0") == 0)
			keepvalidator = false;
		else
			keepvalidator = true;
		PQclear(result);
	}
	else
		keepvalidator = true;	/* don't try to delete it */

	/*
	 * Find the validator name
	 */
	if (!keepvalidator)
	{
		printfPQExpBuffer(&sql, "SELECT proname, (SELECT nspname "
						  "FROM pg_namespace ns WHERE ns.oid = pronamespace) "
						  "AS prons FROM pg_proc WHERE oid = %u;",
						  lanvalidator);
		result = executeQuery(conn, sql.data, progname, echo);
		validator = strdup(PQgetvalue(result, 0, 0));
		validator_ns = strdup(PQgetvalue(result, 0, 1));
		PQclear(result);
	}
	else
	{
		validator = NULL;
		validator_ns = NULL;
	}

	/*
	 * Drop the language and the functions
	 */
	printfPQExpBuffer(&sql, "DROP LANGUAGE \"%s\";\n", langname);
	if (!keephandler)
		appendPQExpBuffer(&sql, "DROP FUNCTION \"%s\".\"%s\" ();\n",
						  handler_ns, handler);
	if (!keepinline)
		appendPQExpBuffer(&sql, "DROP FUNCTION \"%s\".\"%s\" (internal);\n",
						  inline_ns, inline_handler);
	if (!keepvalidator)
		appendPQExpBuffer(&sql, "DROP FUNCTION \"%s\".\"%s\" (oid);\n",
						  validator_ns, validator);
	if (echo)
		printf("%s", sql.data);
	result = PQexec(conn, sql.data);
	if (PQresultStatus(result) != PGRES_COMMAND_OK)
	{
		fprintf(stderr, _("%s: language removal failed: %s"),
				progname, PQerrorMessage(conn));
		PQfinish(conn);
		exit(1);
	}

	PQclear(result);
	PQfinish(conn);
	exit(0);
}
コード例 #15
0
ファイル: misc.c プロジェクト: Joe-xXx/postgres
Datum
pg_tablespace_databases(PG_FUNCTION_ARGS)
{
	FuncCallContext *funcctx;
	struct dirent *de;
	ts_db_fctx *fctx;

	if (SRF_IS_FIRSTCALL())
	{
		MemoryContext oldcontext;
		Oid			tablespaceOid = PG_GETARG_OID(0);

		funcctx = SRF_FIRSTCALL_INIT();
		oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);

		fctx = palloc(sizeof(ts_db_fctx));

		/*
		 * size = tablespace dirname length + dir sep char + oid + terminator
		 */
		fctx->location = (char *) palloc(9 + 1 + OIDCHARS + 1 +
								   strlen(TABLESPACE_VERSION_DIRECTORY) + 1);
		if (tablespaceOid == GLOBALTABLESPACE_OID)
		{
			fctx->dirdesc = NULL;
			ereport(WARNING,
					(errmsg("global tablespace never has databases")));
		}
		else
		{
			if (tablespaceOid == DEFAULTTABLESPACE_OID)
				sprintf(fctx->location, "base");
			else
				sprintf(fctx->location, "pg_tblspc/%u/%s", tablespaceOid,
						TABLESPACE_VERSION_DIRECTORY);

			fctx->dirdesc = AllocateDir(fctx->location);

			if (!fctx->dirdesc)
			{
				/* the only expected error is ENOENT */
				if (errno != ENOENT)
					ereport(ERROR,
							(errcode_for_file_access(),
							 errmsg("could not open directory \"%s\": %m",
									fctx->location)));
				ereport(WARNING,
					  (errmsg("%u is not a tablespace OID", tablespaceOid)));
			}
		}
		funcctx->user_fctx = fctx;
		MemoryContextSwitchTo(oldcontext);
	}

	funcctx = SRF_PERCALL_SETUP();
	fctx = (ts_db_fctx *) funcctx->user_fctx;

	if (!fctx->dirdesc)			/* not a tablespace */
		SRF_RETURN_DONE(funcctx);

	while ((de = ReadDir(fctx->dirdesc, fctx->location)) != NULL)
	{
		char	   *subdir;
		DIR		   *dirdesc;
		Oid			datOid = atooid(de->d_name);

		/* this test skips . and .., but is awfully weak */
		if (!datOid)
			continue;

		/* if database subdir is empty, don't report tablespace as used */

		/* size = path length + dir sep char + file name + terminator */
		subdir = palloc(strlen(fctx->location) + 1 + strlen(de->d_name) + 1);
		sprintf(subdir, "%s/%s", fctx->location, de->d_name);
		dirdesc = AllocateDir(subdir);
		while ((de = ReadDir(dirdesc, subdir)) != NULL)
		{
			if (strcmp(de->d_name, ".") != 0 && strcmp(de->d_name, "..") != 0)
				break;
		}
		FreeDir(dirdesc);
		pfree(subdir);

		if (!de)
			continue;			/* indeed, nothing in it */

		SRF_RETURN_NEXT(funcctx, ObjectIdGetDatum(datOid));
	}

	FreeDir(fctx->dirdesc);
	SRF_RETURN_DONE(funcctx);
}
コード例 #16
0
ファイル: pg_dumpall.c プロジェクト: joshuawingfield/pgsql
/*
 * Dump commands to create each database.
 *
 * To minimize the number of reconnections (and possibly ensuing
 * password prompts) required by the output script, we emit all CREATE
 * DATABASE commands during the initial phase of the script, and then
 * run pg_dump for each database to dump the contents of that
 * database.  We skip databases marked not datallowconn, since we'd be
 * unable to connect to them anyway (and besides, we don't want to
 * dump template0).
 */
static void
dumpCreateDB(PGconn *conn)
{
	PQExpBuffer buf = createPQExpBuffer();
	char	   *default_encoding = NULL;
	char	   *default_collate = NULL;
	char	   *default_ctype = NULL;
	PGresult   *res;
	int			i;

	fprintf(OPF, "--\n-- Database creation\n--\n\n");

	/*
	 * First, get the installation's default encoding and locale information.
	 * We will dump encoding and locale specifications in the CREATE DATABASE
	 * commands for just those databases with values different from defaults.
	 *
	 * We consider template0's encoding and locale (or, pre-7.1, template1's)
	 * to define the installation default.	Pre-8.4 installations do not have
	 * per-database locale settings; for them, every database must necessarily
	 * be using the installation default, so there's no need to do anything
	 * (which is good, since in very old versions there is no good way to find
	 * out what the installation locale is anyway...)
	 */
	if (server_version >= 80400)
		res = executeQuery(conn,
						   "SELECT pg_encoding_to_char(encoding), "
						   "datcollate, datctype "
						   "FROM pg_database "
						   "WHERE datname = 'template0'");
	else if (server_version >= 70100)
		res = executeQuery(conn,
						   "SELECT pg_encoding_to_char(encoding), "
						   "null::text AS datcollate, null::text AS datctype "
						   "FROM pg_database "
						   "WHERE datname = 'template0'");
	else
		res = executeQuery(conn,
						   "SELECT pg_encoding_to_char(encoding), "
						   "null::text AS datcollate, null::text AS datctype "
						   "FROM pg_database "
						   "WHERE datname = 'template1'");

	/* If for some reason the template DB isn't there, treat as unknown */
	if (PQntuples(res) > 0)
	{
		if (!PQgetisnull(res, 0, 0))
			default_encoding = strdup(PQgetvalue(res, 0, 0));
		if (!PQgetisnull(res, 0, 1))
			default_collate = strdup(PQgetvalue(res, 0, 1));
		if (!PQgetisnull(res, 0, 2))
			default_ctype = strdup(PQgetvalue(res, 0, 2));
	}

	PQclear(res);

	/* Now collect all the information about databases to dump */
	if (server_version >= 80400)
		res = executeQuery(conn,
						   "SELECT datname, "
						   "coalesce(rolname, (select rolname from pg_authid where oid=(select datdba from pg_database where datname='template0'))), "
						   "pg_encoding_to_char(d.encoding), "
						   "datcollate, datctype, datfrozenxid, "
						   "datistemplate, datacl, datconnlimit, "
						   "(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
			  "FROM pg_database d LEFT JOIN pg_authid u ON (datdba = u.oid) "
						   "WHERE datallowconn ORDER BY 1");
	else if (server_version >= 80100)
		res = executeQuery(conn,
						   "SELECT datname, "
						   "coalesce(rolname, (select rolname from pg_authid where oid=(select datdba from pg_database where datname='template0'))), "
						   "pg_encoding_to_char(d.encoding), "
		   "null::text AS datcollate, null::text AS datctype, datfrozenxid, "
						   "datistemplate, datacl, datconnlimit, "
						   "(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
			  "FROM pg_database d LEFT JOIN pg_authid u ON (datdba = u.oid) "
						   "WHERE datallowconn ORDER BY 1");
	else if (server_version >= 80000)
		res = executeQuery(conn,
						   "SELECT datname, "
						   "coalesce(usename, (select usename from pg_shadow where usesysid=(select datdba from pg_database where datname='template0'))), "
						   "pg_encoding_to_char(d.encoding), "
		   "null::text AS datcollate, null::text AS datctype, datfrozenxid, "
						   "datistemplate, datacl, -1 as datconnlimit, "
						   "(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
		   "FROM pg_database d LEFT JOIN pg_shadow u ON (datdba = usesysid) "
						   "WHERE datallowconn ORDER BY 1");
	else if (server_version >= 70300)
		res = executeQuery(conn,
						   "SELECT datname, "
						   "coalesce(usename, (select usename from pg_shadow where usesysid=(select datdba from pg_database where datname='template0'))), "
						   "pg_encoding_to_char(d.encoding), "
		   "null::text AS datcollate, null::text AS datctype, datfrozenxid, "
						   "datistemplate, datacl, -1 as datconnlimit, "
						   "'pg_default' AS dattablespace "
		   "FROM pg_database d LEFT JOIN pg_shadow u ON (datdba = usesysid) "
						   "WHERE datallowconn ORDER BY 1");
	else if (server_version >= 70100)
		res = executeQuery(conn,
						   "SELECT datname, "
						   "coalesce("
					"(select usename from pg_shadow where usesysid=datdba), "
						   "(select usename from pg_shadow where usesysid=(select datdba from pg_database where datname='template0'))), "
						   "pg_encoding_to_char(d.encoding), "
						   "null::text AS datcollate, null::text AS datctype, 0 AS datfrozenxid, "
						   "datistemplate, '' as datacl, -1 as datconnlimit, "
						   "'pg_default' AS dattablespace "
						   "FROM pg_database d "
						   "WHERE datallowconn ORDER BY 1");
	else
	{
		/*
		 * Note: 7.0 fails to cope with sub-select in COALESCE, so just deal
		 * with getting a NULL by not printing any OWNER clause.
		 */
		res = executeQuery(conn,
						   "SELECT datname, "
					"(select usename from pg_shadow where usesysid=datdba), "
						   "pg_encoding_to_char(d.encoding), "
						   "null::text AS datcollate, null::text AS datctype, 0 AS datfrozenxid, "
						   "'f' as datistemplate, "
						   "'' as datacl, -1 as datconnlimit, "
						   "'pg_default' AS dattablespace "
						   "FROM pg_database d "
						   "ORDER BY 1");
	}

	for (i = 0; i < PQntuples(res); i++)
	{
		char	   *dbname = PQgetvalue(res, i, 0);
		char	   *dbowner = PQgetvalue(res, i, 1);
		char	   *dbencoding = PQgetvalue(res, i, 2);
		char	   *dbcollate = PQgetvalue(res, i, 3);
		char	   *dbctype = PQgetvalue(res, i, 4);
		uint32		dbfrozenxid = atooid(PQgetvalue(res, i, 5));
		char	   *dbistemplate = PQgetvalue(res, i, 6);
		char	   *dbacl = PQgetvalue(res, i, 7);
		char	   *dbconnlimit = PQgetvalue(res, i, 8);
		char	   *dbtablespace = PQgetvalue(res, i, 9);
		char	   *fdbname;

		fdbname = strdup(fmtId(dbname));

		resetPQExpBuffer(buf);

		/*
		 * Skip the CREATE DATABASE commands for "template1" and "postgres",
		 * since they are presumably already there in the destination cluster.
		 * We do want to emit their ACLs and config options if any, however.
		 */
		if (strcmp(dbname, "template1") != 0 &&
			strcmp(dbname, "postgres") != 0)
		{
			appendPQExpBuffer(buf, "CREATE DATABASE %s", fdbname);

			appendPQExpBuffer(buf, " WITH TEMPLATE = template0");

			if (strlen(dbowner) != 0)
				appendPQExpBuffer(buf, " OWNER = %s", fmtId(dbowner));

			if (default_encoding && strcmp(dbencoding, default_encoding) != 0)
			{
				appendPQExpBuffer(buf, " ENCODING = ");
				appendStringLiteralConn(buf, dbencoding, conn);
			}

			if (default_collate && strcmp(dbcollate, default_collate) != 0)
			{
				appendPQExpBuffer(buf, " LC_COLLATE = ");
				appendStringLiteralConn(buf, dbcollate, conn);
			}

			if (default_ctype && strcmp(dbctype, default_ctype) != 0)
			{
				appendPQExpBuffer(buf, " LC_CTYPE = ");
				appendStringLiteralConn(buf, dbctype, conn);
			}

			/*
			 * Output tablespace if it isn't the default.  For default, it
			 * uses the default from the template database.  If tablespace is
			 * specified and tablespace creation failed earlier, (e.g. no such
			 * directory), the database creation will fail too.  One solution
			 * would be to use 'SET default_tablespace' like we do in pg_dump
			 * for setting non-default database locations.
			 */
			if (strcmp(dbtablespace, "pg_default") != 0 && !no_tablespaces)
				appendPQExpBuffer(buf, " TABLESPACE = %s",
								  fmtId(dbtablespace));

			if (strcmp(dbconnlimit, "-1") != 0)
				appendPQExpBuffer(buf, " CONNECTION LIMIT = %s",
								  dbconnlimit);

			appendPQExpBuffer(buf, ";\n");

			if (strcmp(dbistemplate, "t") == 0)
			{
				appendPQExpBuffer(buf, "UPDATE pg_catalog.pg_database SET datistemplate = 't' WHERE datname = ");
				appendStringLiteralConn(buf, dbname, conn);
				appendPQExpBuffer(buf, ";\n");
			}

			if (binary_upgrade)
			{
				appendPQExpBuffer(buf, "-- For binary upgrade, set datfrozenxid.\n");
				appendPQExpBuffer(buf, "UPDATE pg_catalog.pg_database "
								  "SET datfrozenxid = '%u' "
								  "WHERE datname = ",
								  dbfrozenxid);
				appendStringLiteralConn(buf, dbname, conn);
				appendPQExpBuffer(buf, ";\n");
			}
		}

		if (!skip_acls &&
			!buildACLCommands(fdbname, NULL, "DATABASE", dbacl, dbowner,
							  "", server_version, buf))
		{
			fprintf(stderr, _("%s: could not parse ACL list (%s) for database \"%s\"\n"),
					progname, dbacl, fdbname);
			PQfinish(conn);
			exit(1);
		}

		fprintf(OPF, "%s", buf->data);

		if (server_version >= 70300)
			dumpDatabaseConfig(conn, dbname);

		free(fdbname);
	}

	PQclear(res);
	destroyPQExpBuffer(buf);

	fprintf(OPF, "\n\n");
}
コード例 #17
0
ファイル: info.c プロジェクト: EMARQUIS/postgres
/*
 * get_rel_infos()
 *
 * gets the relinfos for all the user tables of the database referred
 * by "db".
 *
 * NOTE: we assume that relations/entities with oids greater than
 * FirstNormalObjectId belongs to the user
 */
static void
get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
{
	PGconn	   *conn = connectToServer(cluster,
									   dbinfo->db_name);
	PGresult   *res;
	RelInfo    *relinfos;
	int			ntups;
	int			relnum;
	int			num_rels = 0;
	char	   *nspname = NULL;
	char	   *relname = NULL;
	int			i_spclocation,
				i_nspname,
				i_relname,
				i_oid,
				i_relfilenode,
				i_reltablespace;
	char		query[QUERY_ALLOC];

	/*
	 * pg_largeobject contains user data that does not appear in pg_dumpall
	 * --schema-only output, so we have to copy that system table heap and
	 * index.  We could grab the pg_largeobject oids from template1, but it is
	 * easy to treat it as a normal table. Order by oid so we can join old/new
	 * structures efficiently.
	 */

	snprintf(query, sizeof(query),
			 "CREATE TEMPORARY TABLE info_rels (reloid) AS SELECT c.oid "
			 "FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n "
			 "	   ON c.relnamespace = n.oid "
			 "LEFT OUTER JOIN pg_catalog.pg_index i "
			 "	   ON c.oid = i.indexrelid "
			 "WHERE relkind IN ('r', 'm', 'i'%s) AND "

	/*
	 * pg_dump only dumps valid indexes;  testing indisready is necessary in
	 * 9.2, and harmless in earlier/later versions.
	 */
			 " i.indisvalid IS DISTINCT FROM false AND "
			 " i.indisready IS DISTINCT FROM false AND "
	/* exclude possible orphaned temp tables */
			 "  ((n.nspname !~ '^pg_temp_' AND "
			 "    n.nspname !~ '^pg_toast_temp_' AND "
	/* skip pg_toast because toast index have relkind == 'i', not 't' */
			 "    n.nspname NOT IN ('pg_catalog', 'information_schema', "
			 "						'binary_upgrade', 'pg_toast') AND "
			 "	  c.oid >= %u) "
			 "  OR (n.nspname = 'pg_catalog' AND "
	"    relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) ));",
	/* see the comment at the top of old_8_3_create_sequence_script() */
			 (GET_MAJOR_VERSION(old_cluster.major_version) <= 803) ?
			 "" : ", 'S'",
			 FirstNormalObjectId,
	/* does pg_largeobject_metadata need to be migrated? */
			 (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) ?
	"" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'");

	PQclear(executeQueryOrDie(conn, "%s", query));

	/*
	 * Get TOAST tables and indexes;  we have to gather the TOAST tables in
	 * later steps because we can't schema-qualify TOAST tables.
	 */
	PQclear(executeQueryOrDie(conn,
							  "INSERT INTO info_rels "
							  "SELECT reltoastrelid "
							  "FROM info_rels i JOIN pg_catalog.pg_class c "
							  "		ON i.reloid = c.oid "
							  "		AND c.reltoastrelid != %u", InvalidOid));
	PQclear(executeQueryOrDie(conn,
							  "INSERT INTO info_rels "
							  "SELECT indexrelid "
							  "FROM pg_index "
							  "WHERE indisvalid "
							  "    AND indrelid IN (SELECT reltoastrelid "
							  "        FROM info_rels i "
							  "            JOIN pg_catalog.pg_class c "
							  "            ON i.reloid = c.oid "
							  "            AND c.reltoastrelid != %u)",
							  InvalidOid));

	snprintf(query, sizeof(query),
			 "SELECT c.oid, n.nspname, c.relname, "
			 "	c.relfilenode, c.reltablespace, %s "
			 "FROM info_rels i JOIN pg_catalog.pg_class c "
			 "		ON i.reloid = c.oid "
			 "  JOIN pg_catalog.pg_namespace n "
			 "	   ON c.relnamespace = n.oid "
			 "  LEFT OUTER JOIN pg_catalog.pg_tablespace t "
			 "	   ON c.reltablespace = t.oid "
	/* we preserve pg_class.oid so we sort by it to match old/new */
			 "ORDER BY 1;",
	/* 9.2 removed the spclocation column */
			 (GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
			 "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation");

	res = executeQueryOrDie(conn, "%s", query);

	ntups = PQntuples(res);

	relinfos = (RelInfo *) pg_malloc(sizeof(RelInfo) * ntups);

	i_oid = PQfnumber(res, "oid");
	i_nspname = PQfnumber(res, "nspname");
	i_relname = PQfnumber(res, "relname");
	i_relfilenode = PQfnumber(res, "relfilenode");
	i_reltablespace = PQfnumber(res, "reltablespace");
	i_spclocation = PQfnumber(res, "spclocation");

	for (relnum = 0; relnum < ntups; relnum++)
	{
		RelInfo    *curr = &relinfos[num_rels++];
		const char *tblspace;

		curr->reloid = atooid(PQgetvalue(res, relnum, i_oid));

		nspname = PQgetvalue(res, relnum, i_nspname);
		curr->nspname = pg_strdup(nspname);

		relname = PQgetvalue(res, relnum, i_relname);
		curr->relname = pg_strdup(relname);

		curr->relfilenode = atooid(PQgetvalue(res, relnum, i_relfilenode));

		if (atooid(PQgetvalue(res, relnum, i_reltablespace)) != 0)
			/* Might be "", meaning the cluster default location. */
			tblspace = PQgetvalue(res, relnum, i_spclocation);
		else
			/* A zero reltablespace indicates the database tablespace. */
			tblspace = dbinfo->db_tblspace;

		strlcpy(curr->tablespace, tblspace, sizeof(curr->tablespace));
	}
	PQclear(res);

	PQfinish(conn);

	dbinfo->rel_arr.rels = relinfos;
	dbinfo->rel_arr.nrels = num_rels;
}
コード例 #18
0
ファイル: vacuumlo.c プロジェクト: GisKook/Gis
/*
 * This vacuums LOs of one database. It returns 0 on success, -1 on failure.
 */
int
vacuumlo(char *database, struct _param * param)
{
	PGconn	   *conn;
	PGresult   *res,
			   *res2;
	char		buf[BUFSIZE];
	int			matched;
	int			deleted;
	int			i;
	static char *password = NULL;
	bool		new_pass;

	if (param->pg_prompt == TRI_YES && password == NULL)
		password = simple_prompt("Password: "******"Connection to database \"%s\" failed\n",
					database);
			return -1;
		}

		if (PQstatus(conn) == CONNECTION_BAD &&
			PQconnectionNeedsPassword(conn) &&
			password == NULL &&
			param->pg_prompt != TRI_NO)
		{
			PQfinish(conn);
			password = simple_prompt("Password: "******"Connection to database \"%s\" failed:\n%s",
				database, PQerrorMessage(conn));
		PQfinish(conn);
		return -1;
	}

	if (param->verbose)
	{
		fprintf(stdout, "Connected to %s\n", database);
		if (param->dry_run)
			fprintf(stdout, "Test run: no large objects will be removed!\n");
	}

	/*
	 * Don't get fooled by any non-system catalogs
	 */
	res = PQexec(conn, "SET search_path = pg_catalog");
	if (PQresultStatus(res) != PGRES_COMMAND_OK)
	{
		fprintf(stderr, "Failed to set search_path:\n");
		fprintf(stderr, "%s", PQerrorMessage(conn));
		PQclear(res);
		PQfinish(conn);
		return -1;
	}
	PQclear(res);

	/*
	 * First we create and populate the LO temp table
	 */
	buf[0] = '\0';
	strcat(buf, "CREATE TEMP TABLE vacuum_l AS ");
	if (PQserverVersion(conn) >= 90000)
		strcat(buf, "SELECT oid AS lo FROM pg_largeobject_metadata");
	else
		strcat(buf, "SELECT DISTINCT loid AS lo FROM pg_largeobject");
	res = PQexec(conn, buf);
	if (PQresultStatus(res) != PGRES_COMMAND_OK)
	{
		fprintf(stderr, "Failed to create temp table:\n");
		fprintf(stderr, "%s", PQerrorMessage(conn));
		PQclear(res);
		PQfinish(conn);
		return -1;
	}
	PQclear(res);

	/*
	 * Analyze the temp table so that planner will generate decent plans for
	 * the DELETEs below.
	 */
	buf[0] = '\0';
	strcat(buf, "ANALYZE vacuum_l");
	res = PQexec(conn, buf);
	if (PQresultStatus(res) != PGRES_COMMAND_OK)
	{
		fprintf(stderr, "Failed to vacuum temp table:\n");
		fprintf(stderr, "%s", PQerrorMessage(conn));
		PQclear(res);
		PQfinish(conn);
		return -1;
	}
	PQclear(res);

	/*
	 * Now find any candidate tables that have columns of type oid.
	 *
	 * NOTE: we ignore system tables and temp tables by the expedient of
	 * rejecting tables in schemas named 'pg_*'.  In particular, the temp
	 * table formed above is ignored, and pg_largeobject will be too. If
	 * either of these were scanned, obviously we'd end up with nothing to
	 * delete...
	 *
	 * NOTE: the system oid column is ignored, as it has attnum < 1. This
	 * shouldn't matter for correctness, but it saves time.
	 */
	buf[0] = '\0';
	strcat(buf, "SELECT s.nspname, c.relname, a.attname ");
	strcat(buf, "FROM pg_class c, pg_attribute a, pg_namespace s, pg_type t ");
	strcat(buf, "WHERE a.attnum > 0 AND NOT a.attisdropped ");
	strcat(buf, "      AND a.attrelid = c.oid ");
	strcat(buf, "      AND a.atttypid = t.oid ");
	strcat(buf, "      AND c.relnamespace = s.oid ");
	strcat(buf, "      AND t.typname in ('oid', 'lo') ");
	strcat(buf, "      AND c.relkind = 'r'");
	strcat(buf, "      AND s.nspname !~ '^pg_'");
	res = PQexec(conn, buf);
	if (PQresultStatus(res) != PGRES_TUPLES_OK)
	{
		fprintf(stderr, "Failed to find OID columns:\n");
		fprintf(stderr, "%s", PQerrorMessage(conn));
		PQclear(res);
		PQfinish(conn);
		return -1;
	}

	for (i = 0; i < PQntuples(res); i++)
	{
		char	   *schema,
				   *table,
				   *field;

		schema = PQgetvalue(res, i, 0);
		table = PQgetvalue(res, i, 1);
		field = PQgetvalue(res, i, 2);

		if (param->verbose)
			fprintf(stdout, "Checking %s in %s.%s\n", field, schema, table);

		snprintf(buf, BUFSIZE,
				 "DELETE FROM vacuum_l "
				 "WHERE lo IN (SELECT \"%s\" FROM \"%s\".\"%s\")",
				 field, schema, table);
		res2 = PQexec(conn, buf);
		if (PQresultStatus(res2) != PGRES_COMMAND_OK)
		{
			fprintf(stderr, "Failed to check %s in table %s.%s:\n",
					field, schema, table);
			fprintf(stderr, "%s", PQerrorMessage(conn));
			PQclear(res2);
			PQclear(res);
			PQfinish(conn);
			return -1;
		}
		PQclear(res2);
	}
	PQclear(res);

	/*
	 * Run the actual deletes in a single transaction.	Note that this would
	 * be a bad idea in pre-7.1 Postgres releases (since rolling back a table
	 * delete used to cause problems), but it should be safe now.
	 */
	res = PQexec(conn, "begin");
	PQclear(res);

	/*
	 * Finally, those entries remaining in vacuum_l are orphans.
	 */
	buf[0] = '\0';
	strcat(buf, "SELECT lo ");
	strcat(buf, "FROM vacuum_l");
	res = PQexec(conn, buf);
	if (PQresultStatus(res) != PGRES_TUPLES_OK)
	{
		fprintf(stderr, "Failed to read temp table:\n");
		fprintf(stderr, "%s", PQerrorMessage(conn));
		PQclear(res);
		PQfinish(conn);
		return -1;
	}

	matched = PQntuples(res);
	deleted = 0;
	for (i = 0; i < matched; i++)
	{
		Oid			lo = atooid(PQgetvalue(res, i, 0));

		if (param->verbose)
		{
			fprintf(stdout, "\rRemoving lo %6u   ", lo);
			fflush(stdout);
		}

		if (param->dry_run == 0)
		{
			if (lo_unlink(conn, lo) < 0)
			{
				fprintf(stderr, "\nFailed to remove lo %u: ", lo);
				fprintf(stderr, "%s", PQerrorMessage(conn));
			}
			else
				deleted++;
		}
		else
			deleted++;
	}
	PQclear(res);

	/*
	 * That's all folks!
	 */
	res = PQexec(conn, "end");
	PQclear(res);

	PQfinish(conn);

	if (param->verbose)
		fprintf(stdout, "\r%s %d large objects from %s.\n",
		   (param->dry_run ? "Would remove" : "Removed"), deleted, database);

	return 0;
}
コード例 #19
0
ファイル: info.c プロジェクト: aKhadiemik/postgres
/*
 * get_rel_infos()
 *
 * gets the relinfos for all the user tables of the database refered
 * by "db".
 *
 * NOTE: we assume that relations/entities with oids greater than
 * FirstNormalObjectId belongs to the user
 */
static void
get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
{
	PGconn	   *conn = connectToServer(cluster,
									   dbinfo->db_name);
	PGresult   *res;
	RelInfo    *relinfos;
	int			ntups;
	int			relnum;
	int			num_rels = 0;
	char	   *nspname = NULL;
	char	   *relname = NULL;
	int			i_spclocation,
				i_nspname,
				i_relname,
				i_oid,
				i_relfilenode;
	char		query[QUERY_ALLOC];

	/*
	 * pg_largeobject contains user data that does not appear in pg_dumpall
	 * --schema-only output, so we have to copy that system table heap and
	 * index.  We could grab the pg_largeobject oids from template1, but it is
	 * easy to treat it as a normal table. Order by oid so we can join old/new
	 * structures efficiently.
	 */

	snprintf(query, sizeof(query),
			 "SELECT c.oid, n.nspname, c.relname, "
			 "	c.relfilenode, t.spclocation "
			 "FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n "
			 "	   ON c.relnamespace = n.oid "
			 "  LEFT OUTER JOIN pg_catalog.pg_tablespace t "
			 "	   ON c.reltablespace = t.oid "
			 "WHERE relkind IN ('r','t', 'i'%s) AND "
			 "  ((n.nspname NOT IN ('pg_catalog', 'information_schema', 'binary_upgrade') AND "
			 "	  c.oid >= %u) "
			 "  OR (n.nspname = 'pg_catalog' AND "
	"    relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) )) "
	/* we preserve pg_class.oid so we sort by it to match old/new */
			 "ORDER BY 1;",
	/* see the comment at the top of old_8_3_create_sequence_script() */
			 (GET_MAJOR_VERSION(old_cluster.major_version) <= 803) ?
			 "" : ", 'S'",
	/* this oid allows us to skip system toast tables */
			 FirstNormalObjectId,
	/* does pg_largeobject_metadata need to be migrated? */
			 (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) ?
	"" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'");

	res = executeQueryOrDie(conn, query);

	ntups = PQntuples(res);

	relinfos = (RelInfo *) pg_malloc(sizeof(RelInfo) * ntups);

	i_oid = PQfnumber(res, "oid");
	i_nspname = PQfnumber(res, "nspname");
	i_relname = PQfnumber(res, "relname");
	i_relfilenode = PQfnumber(res, "relfilenode");
	i_spclocation = PQfnumber(res, "spclocation");

	for (relnum = 0; relnum < ntups; relnum++)
	{
		RelInfo    *curr = &relinfos[num_rels++];
		const char *tblspace;

		curr->reloid = atooid(PQgetvalue(res, relnum, i_oid));

		nspname = PQgetvalue(res, relnum, i_nspname);
		strlcpy(curr->nspname, nspname, sizeof(curr->nspname));

		relname = PQgetvalue(res, relnum, i_relname);
		strlcpy(curr->relname, relname, sizeof(curr->relname));

		curr->relfilenode = atooid(PQgetvalue(res, relnum, i_relfilenode));

		tblspace = PQgetvalue(res, relnum, i_spclocation);
		/* if no table tablespace, use the database tablespace */
		if (strlen(tblspace) == 0)
			tblspace = dbinfo->db_tblspace;
		strlcpy(curr->tablespace, tblspace, sizeof(curr->tablespace));
	}
	PQclear(res);

	PQfinish(conn);

	dbinfo->rel_arr.rels = relinfos;
	dbinfo->rel_arr.nrels = num_rels;
}
コード例 #20
0
void
pglo_export(LODumpMaster * pgLO)
{
	LOlist	   *ll;
	int			tuples;
	char		path[BUFSIZ],
				Qbuff[QUERY_BUFSIZ];

	if (pgLO->action != ACTION_SHOW)
	{
		time_t		t;

		time(&t);
		fprintf(pgLO->index, "#\n# This is the PostgreSQL large object dump index\n#\n");
		fprintf(pgLO->index, "#\tDate:     %s", ctime(&t));
		fprintf(pgLO->index, "#\tHost:     %s\n", pgLO->host);
		fprintf(pgLO->index, "#\tDatabase: %s\n", pgLO->db);
		fprintf(pgLO->index, "#\tUser:     %s\n", pgLO->user);
		fprintf(pgLO->index, "#\n# oid\ttable\tattribut\tinfile\tschema\n#\n");
	}

	pgLO->counter = 0;

	for (ll = pgLO->lolist; ll->lo_table != NULL; ll++)
	{
		/*
		 * Query: find the LOs referenced by this column
		 */
		snprintf(Qbuff, QUERY_BUFSIZ,
				 "SELECT DISTINCT l.loid FROM \"%s\".\"%s\" x, pg_catalog.pg_largeobject l "
				 "WHERE x.\"%s\" = l.loid",
				 ll->lo_schema, ll->lo_table, ll->lo_attr);

		/* puts(Qbuff); */

		pgLO->res = PQexec(pgLO->conn, Qbuff);

		if (PQresultStatus(pgLO->res) != PGRES_TUPLES_OK)
		{
			fprintf(stderr, "%s: Failed to get LO OIDs:\n%s", progname,
					PQerrorMessage(pgLO->conn));
		}
		else if ((tuples = PQntuples(pgLO->res)) == 0)
		{
			if (!pgLO->quiet && pgLO->action == ACTION_EXPORT_ATTR)
				printf("%s: no large objects in \"%s\".\"%s\".\"%s\"\n",
					   progname, ll->lo_schema, ll->lo_table, ll->lo_attr);
		}
		else
		{

			int			t;
			char	   *val;

			/*
			 * Create DIR/FILE
			 */
			if (pgLO->action != ACTION_SHOW)
			{

				snprintf(path, BUFSIZ, "%s/%s/%s", pgLO->space, pgLO->db,
						 ll->lo_schema);

				if (mkdir(path, DIR_UMASK) == -1)
				{
					if (errno != EEXIST)
					{
						perror(path);
						exit(RE_ERROR);
					}
				}
				
				snprintf(path, BUFSIZ, "%s/%s/%s/%s", pgLO->space, pgLO->db,
						 ll->lo_schema, ll->lo_table);

				if (mkdir(path, DIR_UMASK) == -1)
				{
					if (errno != EEXIST)
					{
						perror(path);
						exit(RE_ERROR);
					}
				}

				snprintf(path, BUFSIZ, "%s/%s/%s/%s/%s", pgLO->space, pgLO->db,
						 ll->lo_schema, ll->lo_table, ll->lo_attr);

				if (mkdir(path, DIR_UMASK) == -1)
				{
					if (errno != EEXIST)
					{
						perror(path);
						exit(RE_ERROR);
					}
				}

				if (!pgLO->quiet)
					printf("dump %s.%s.%s (%d large obj)\n",
						   ll->lo_schema, ll->lo_table, ll->lo_attr, tuples);
			}

			pgLO->counter += tuples;

			for (t = 0; t < tuples; t++)
			{
				Oid			lo;

				val = PQgetvalue(pgLO->res, t, 0);

				lo = atooid(val);

				if (pgLO->action == ACTION_SHOW)
				{
					printf("%s.%s.%s: %u\n", ll->lo_schema, ll->lo_table, ll->lo_attr, lo);
					continue;
				}

				snprintf(path, BUFSIZ, "%s/%s/%s/%s/%s/%s", pgLO->space,
						 pgLO->db, ll->lo_schema, ll->lo_table, ll->lo_attr, val);

				if (lo_export(pgLO->conn, lo, path) < 0)
					fprintf(stderr, "%s: lo_export failed:\n%s", progname,
							PQerrorMessage(pgLO->conn));

				else
					fprintf(pgLO->index, "%s\t%s\t%s\t%s/%s/%s/%s/%s\t%s\n", 
							val, ll->lo_table, ll->lo_attr, pgLO->db, 
							ll->lo_schema, ll->lo_table, ll->lo_attr, 
							val, ll->lo_schema);
			}
		}

		PQclear(pgLO->res);
	}
}
コード例 #21
0
ファイル: info.c プロジェクト: no0p/postgres
/*
 * get_rel_infos()
 *
 * gets the relinfos for all the user tables of the database referred
 * by "db".
 *
 * NOTE: we assume that relations/entities with oids greater than
 * FirstNormalObjectId belongs to the user
 */
static void
get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
{
    PGconn	   *conn = connectToServer(cluster,
                                       dbinfo->db_name);
    PGresult   *res;
    RelInfo    *relinfos;
    int			ntups;
    int			relnum;
    int			num_rels = 0;
    char	   *nspname = NULL;
    char	   *relname = NULL;
    char	   *tablespace = NULL;
    int			i_spclocation,
                i_nspname,
                i_relname,
                i_oid,
                i_relfilenode,
                i_reltablespace;
    char		query[QUERY_ALLOC];
    char	   *last_namespace = NULL,
                *last_tablespace = NULL;

    /*
     * pg_largeobject contains user data that does not appear in pg_dump
     * --schema-only output, so we have to copy that system table heap and
     * index.  We could grab the pg_largeobject oids from template1, but it is
     * easy to treat it as a normal table. Order by oid so we can join old/new
     * structures efficiently.
     */

    snprintf(query, sizeof(query),
             /* get regular heap */
             "WITH regular_heap (reloid) AS ( "
             "	SELECT c.oid "
             "	FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n "
             "		   ON c.relnamespace = n.oid "
             "	LEFT OUTER JOIN pg_catalog.pg_index i "
             "		   ON c.oid = i.indexrelid "
             "	WHERE relkind IN ('r', 'm', 'i', 'S') AND "
             /*
              * pg_dump only dumps valid indexes;  testing indisready is necessary in
              * 9.2, and harmless in earlier/later versions.
              */
             "		i.indisvalid IS DISTINCT FROM false AND "
             "		i.indisready IS DISTINCT FROM false AND "
             /* exclude possible orphaned temp tables */
             "	  ((n.nspname !~ '^pg_temp_' AND "
             "	    n.nspname !~ '^pg_toast_temp_' AND "
             /* skip pg_toast because toast index have relkind == 'i', not 't' */
             "	    n.nspname NOT IN ('pg_catalog', 'information_schema', "
             "							'binary_upgrade', 'pg_toast') AND "
             "		  c.oid >= %u) OR "
             "	  (n.nspname = 'pg_catalog' AND "
             "    relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) ))), "
             /*
              * We have to gather the TOAST tables in later steps because we
              * can't schema-qualify TOAST tables.
              */
             /* get TOAST heap */
             "	toast_heap (reloid) AS ( "
             "	SELECT reltoastrelid "
             "	FROM regular_heap JOIN pg_catalog.pg_class c "
             "		ON regular_heap.reloid = c.oid "
             "		AND c.reltoastrelid != %u), "
             /* get indexes on regular and TOAST heap */
             "	all_index (reloid) AS ( "
             "	SELECT indexrelid "
             "	FROM pg_index "
             "	WHERE indisvalid "
             "    AND indrelid IN (SELECT reltoastrelid "
             "        FROM (SELECT reloid FROM regular_heap "
             "			   UNION ALL "
             "			   SELECT reloid FROM toast_heap) all_heap "
             "            JOIN pg_catalog.pg_class c "
             "            ON all_heap.reloid = c.oid "
             "            AND c.reltoastrelid != %u)) "
             /* get all rels */
             "SELECT c.oid, n.nspname, c.relname, "
             "	c.relfilenode, c.reltablespace, %s "
             "FROM (SELECT reloid FROM regular_heap "
             "	   UNION ALL "
             "	   SELECT reloid FROM toast_heap  "
             "	   UNION ALL "
             "	   SELECT reloid FROM all_index) all_rels "
             "  JOIN pg_catalog.pg_class c "
             "		ON all_rels.reloid = c.oid "
             "  JOIN pg_catalog.pg_namespace n "
             "	   ON c.relnamespace = n.oid "
             "  LEFT OUTER JOIN pg_catalog.pg_tablespace t "
             "	   ON c.reltablespace = t.oid "
             /* we preserve pg_class.oid so we sort by it to match old/new */
             "ORDER BY 1;",
             FirstNormalObjectId,
             /* does pg_largeobject_metadata need to be migrated? */
             (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) ?
             "" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'",
             InvalidOid, InvalidOid,
             /* 9.2 removed the spclocation column */
             (GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
             "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation");

    res = executeQueryOrDie(conn, "%s", query);

    ntups = PQntuples(res);

    relinfos = (RelInfo *) pg_malloc(sizeof(RelInfo) * ntups);

    i_oid = PQfnumber(res, "oid");
    i_nspname = PQfnumber(res, "nspname");
    i_relname = PQfnumber(res, "relname");
    i_relfilenode = PQfnumber(res, "relfilenode");
    i_reltablespace = PQfnumber(res, "reltablespace");
    i_spclocation = PQfnumber(res, "spclocation");

    for (relnum = 0; relnum < ntups; relnum++)
    {
        RelInfo    *curr = &relinfos[num_rels++];

        curr->reloid = atooid(PQgetvalue(res, relnum, i_oid));

        nspname = PQgetvalue(res, relnum, i_nspname);
        curr->nsp_alloc = false;

        /*
         * Many of the namespace and tablespace strings are identical, so we
         * try to reuse the allocated string pointers where possible to reduce
         * memory consumption.
         */
        /* Can we reuse the previous string allocation? */
        if (last_namespace && strcmp(nspname, last_namespace) == 0)
            curr->nspname = last_namespace;
        else
        {
            last_namespace = curr->nspname = pg_strdup(nspname);
            curr->nsp_alloc = true;
        }

        relname = PQgetvalue(res, relnum, i_relname);
        curr->relname = pg_strdup(relname);

        curr->relfilenode = atooid(PQgetvalue(res, relnum, i_relfilenode));
        curr->tblsp_alloc = false;

        /* Is the tablespace oid non-zero? */
        if (atooid(PQgetvalue(res, relnum, i_reltablespace)) != 0)
        {
            /*
             * The tablespace location might be "", meaning the cluster
             * default location, i.e. pg_default or pg_global.
             */
            tablespace = PQgetvalue(res, relnum, i_spclocation);

            /* Can we reuse the previous string allocation? */
            if (last_tablespace && strcmp(tablespace, last_tablespace) == 0)
                curr->tablespace = last_tablespace;
            else
            {
                last_tablespace = curr->tablespace = pg_strdup(tablespace);
                curr->tblsp_alloc = true;
            }
        }
        else
            /* A zero reltablespace oid indicates the database tablespace. */
            curr->tablespace = dbinfo->db_tablespace;
    }
    PQclear(res);

    PQfinish(conn);

    dbinfo->rel_arr.rels = relinfos;
    dbinfo->rel_arr.nrels = num_rels;
}
コード例 #22
0
ファイル: info.c プロジェクト: adityavs/postgres
/*
 * get_rel_infos()
 *
 * gets the relinfos for all the user tables and indexes of the database
 * referred to by "dbinfo".
 *
 * Note: the resulting RelInfo array is assumed to be sorted by OID.
 * This allows later processing to match up old and new databases efficiently.
 */
static void
get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
{
	PGconn	   *conn = connectToServer(cluster,
									   dbinfo->db_name);
	PGresult   *res;
	RelInfo    *relinfos;
	int			ntups;
	int			relnum;
	int			num_rels = 0;
	char	   *nspname = NULL;
	char	   *relname = NULL;
	char	   *tablespace = NULL;
	int			i_spclocation,
				i_nspname,
				i_relname,
				i_reloid,
				i_indtable,
				i_toastheap,
				i_relfilenode,
				i_reltablespace;
	char		query[QUERY_ALLOC];
	char	   *last_namespace = NULL,
			   *last_tablespace = NULL;

	query[0] = '\0';			/* initialize query string to empty */

	/*
	 * Create a CTE that collects OIDs of regular user tables, including
	 * matviews and sequences, but excluding toast tables and indexes.  We
	 * assume that relations with OIDs >= FirstNormalObjectId belong to the
	 * user.  (That's probably redundant with the namespace-name exclusions,
	 * but let's be safe.)
	 *
	 * pg_largeobject contains user data that does not appear in pg_dump
	 * output, so we have to copy that system table.  It's easiest to do that
	 * by treating it as a user table.
	 */
	snprintf(query + strlen(query), sizeof(query) - strlen(query),
			 "WITH regular_heap (reloid, indtable, toastheap) AS ( "
			 "  SELECT c.oid, 0::oid, 0::oid "
			 "  FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n "
			 "         ON c.relnamespace = n.oid "
			 "  WHERE relkind IN (" CppAsString2(RELKIND_RELATION) ", "
			 CppAsString2(RELKIND_MATVIEW) ") AND "
	/* exclude possible orphaned temp tables */
			 "    ((n.nspname !~ '^pg_temp_' AND "
			 "      n.nspname !~ '^pg_toast_temp_' AND "
			 "      n.nspname NOT IN ('pg_catalog', 'information_schema', "
			 "                        'binary_upgrade', 'pg_toast') AND "
			 "      c.oid >= %u::pg_catalog.oid) OR "
			 "     (n.nspname = 'pg_catalog' AND "
			 "      relname IN ('pg_largeobject') ))), ",
			 FirstNormalObjectId);

	/*
	 * Add a CTE that collects OIDs of toast tables belonging to the tables
	 * selected by the regular_heap CTE.  (We have to do this separately
	 * because the namespace-name rules above don't work for toast tables.)
	 */
	snprintf(query + strlen(query), sizeof(query) - strlen(query),
			 "  toast_heap (reloid, indtable, toastheap) AS ( "
			 "  SELECT c.reltoastrelid, 0::oid, c.oid "
			 "  FROM regular_heap JOIN pg_catalog.pg_class c "
			 "      ON regular_heap.reloid = c.oid "
			 "  WHERE c.reltoastrelid != 0), ");

	/*
	 * Add a CTE that collects OIDs of all valid indexes on the previously
	 * selected tables.  We can ignore invalid indexes since pg_dump does.
	 * Testing indisready is necessary in 9.2, and harmless in earlier/later
	 * versions.
	 */
	snprintf(query + strlen(query), sizeof(query) - strlen(query),
			 "  all_index (reloid, indtable, toastheap) AS ( "
			 "  SELECT indexrelid, indrelid, 0::oid "
			 "  FROM pg_catalog.pg_index "
			 "  WHERE indisvalid AND indisready "
			 "    AND indrelid IN "
			 "        (SELECT reloid FROM regular_heap "
			 "         UNION ALL "
			 "         SELECT reloid FROM toast_heap)) ");

	/*
	 * And now we can write the query that retrieves the data we want for each
	 * heap and index relation.  Make sure result is sorted by OID.
	 */
	snprintf(query + strlen(query), sizeof(query) - strlen(query),
			 "SELECT all_rels.*, n.nspname, c.relname, "
			 "  c.relfilenode, c.reltablespace, %s "
			 "FROM (SELECT * FROM regular_heap "
			 "      UNION ALL "
			 "      SELECT * FROM toast_heap "
			 "      UNION ALL "
			 "      SELECT * FROM all_index) all_rels "
			 "  JOIN pg_catalog.pg_class c "
			 "      ON all_rels.reloid = c.oid "
			 "  JOIN pg_catalog.pg_namespace n "
			 "     ON c.relnamespace = n.oid "
			 "  LEFT OUTER JOIN pg_catalog.pg_tablespace t "
			 "     ON c.reltablespace = t.oid "
			 "ORDER BY 1;",
	/* 9.2 removed the pg_tablespace.spclocation column */
			 (GET_MAJOR_VERSION(cluster->major_version) >= 902) ?
			 "pg_catalog.pg_tablespace_location(t.oid) AS spclocation" :
			 "t.spclocation");

	res = executeQueryOrDie(conn, "%s", query);

	ntups = PQntuples(res);

	relinfos = (RelInfo *) pg_malloc(sizeof(RelInfo) * ntups);

	i_reloid = PQfnumber(res, "reloid");
	i_indtable = PQfnumber(res, "indtable");
	i_toastheap = PQfnumber(res, "toastheap");
	i_nspname = PQfnumber(res, "nspname");
	i_relname = PQfnumber(res, "relname");
	i_relfilenode = PQfnumber(res, "relfilenode");
	i_reltablespace = PQfnumber(res, "reltablespace");
	i_spclocation = PQfnumber(res, "spclocation");

	for (relnum = 0; relnum < ntups; relnum++)
	{
		RelInfo    *curr = &relinfos[num_rels++];

		curr->reloid = atooid(PQgetvalue(res, relnum, i_reloid));
		curr->indtable = atooid(PQgetvalue(res, relnum, i_indtable));
		curr->toastheap = atooid(PQgetvalue(res, relnum, i_toastheap));

		nspname = PQgetvalue(res, relnum, i_nspname);
		curr->nsp_alloc = false;

		/*
		 * Many of the namespace and tablespace strings are identical, so we
		 * try to reuse the allocated string pointers where possible to reduce
		 * memory consumption.
		 */
		/* Can we reuse the previous string allocation? */
		if (last_namespace && strcmp(nspname, last_namespace) == 0)
			curr->nspname = last_namespace;
		else
		{
			last_namespace = curr->nspname = pg_strdup(nspname);
			curr->nsp_alloc = true;
		}

		relname = PQgetvalue(res, relnum, i_relname);
		curr->relname = pg_strdup(relname);

		curr->relfilenode = atooid(PQgetvalue(res, relnum, i_relfilenode));
		curr->tblsp_alloc = false;

		/* Is the tablespace oid non-default? */
		if (atooid(PQgetvalue(res, relnum, i_reltablespace)) != 0)
		{
			/*
			 * The tablespace location might be "", meaning the cluster
			 * default location, i.e. pg_default or pg_global.
			 */
			tablespace = PQgetvalue(res, relnum, i_spclocation);

			/* Can we reuse the previous string allocation? */
			if (last_tablespace && strcmp(tablespace, last_tablespace) == 0)
				curr->tablespace = last_tablespace;
			else
			{
				last_tablespace = curr->tablespace = pg_strdup(tablespace);
				curr->tblsp_alloc = true;
			}
		}
		else
			/* A zero reltablespace oid indicates the database tablespace. */
			curr->tablespace = dbinfo->db_tablespace;
	}
	PQclear(res);

	PQfinish(conn);

	dbinfo->rel_arr.rels = relinfos;
	dbinfo->rel_arr.nrels = num_rels;
}
コード例 #23
0
ファイル: lo.c プロジェクト: adunstan/pg-cvs-mirror
Datum
lo_manage(PG_FUNCTION_ARGS)
{
	TriggerData *trigdata = (TriggerData *) fcinfo->context;
	int			attnum;			/* attribute number to monitor	*/
	char	  **args;			/* Args containing attr name	*/
	TupleDesc	tupdesc;		/* Tuple Descriptor				*/
	HeapTuple	rettuple;		/* Tuple to be returned			*/
	bool		isdelete;		/* are we deleting?				*/
	HeapTuple	newtuple;		/* The new value for tuple		*/
	HeapTuple	trigtuple;		/* The original value of tuple	*/

	if (!CALLED_AS_TRIGGER(fcinfo))		/* internal error */
		elog(ERROR, "not fired by trigger manager");

	/*
	 * Fetch some values from trigdata
	 */
	newtuple = trigdata->tg_newtuple;
	trigtuple = trigdata->tg_trigtuple;
	tupdesc = trigdata->tg_relation->rd_att;
	args = trigdata->tg_trigger->tgargs;

	/* tuple to return to Executor */
	if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
		rettuple = newtuple;
	else
		rettuple = trigtuple;

	/* Are we deleting the row? */
	isdelete = TRIGGER_FIRED_BY_DELETE(trigdata->tg_event);

	/* Get the column we're interested in */
	attnum = SPI_fnumber(tupdesc, args[0]);

	if (attnum <= 0)
		elog(ERROR, "column \"%s\" does not exist", args[0]);

	/*
	 * Handle updates
	 *
	 * Here, if the value of the monitored attribute changes, then the large
	 * object associated with the original value is unlinked.
	 */
	if (newtuple != NULL)
	{
		char	   *orig = SPI_getvalue(trigtuple, tupdesc, attnum);
		char	   *newv = SPI_getvalue(newtuple, tupdesc, attnum);

		if (orig != NULL && (newv == NULL || strcmp(orig, newv) != 0))
			DirectFunctionCall1(lo_unlink,
								ObjectIdGetDatum(atooid(orig)));

		if (newv)
			pfree(newv);
		if (orig)
			pfree(orig);
	}

	/*
	 * Handle deleting of rows
	 *
	 * Here, we unlink the large object associated with the managed attribute
	 */
	if (isdelete)
	{
		char	   *orig = SPI_getvalue(trigtuple, tupdesc, attnum);

		if (orig != NULL)
		{
			DirectFunctionCall1(lo_unlink,
								ObjectIdGetDatum(atooid(orig)));

			pfree(orig);
		}
	}

	return PointerGetDatum(rettuple);
}