Ejemplo n.º 1
0
/*
 * set_locale_and_encoding()
 *
 * query the database to get the template0 locale
 */
static void
set_locale_and_encoding(ClusterInfo *cluster)
{
	ControlData *ctrl = &cluster->controldata;
	PGconn	   *conn;
	PGresult   *res;
	int			i_encoding;
	int			cluster_version = cluster->major_version;

	conn = connectToServer(cluster, "template1");

	/* for pg < 80400, we got the values from pg_controldata */
	if (cluster_version >= 80400)
	{
		int			i_datcollate;
		int			i_datctype;

		res = executeQueryOrDie(conn,
								"SELECT datcollate, datctype "
								"FROM	pg_catalog.pg_database "
								"WHERE	datname = 'template0' ");
		assert(PQntuples(res) == 1);

		i_datcollate = PQfnumber(res, "datcollate");
		i_datctype = PQfnumber(res, "datctype");

		if (GET_MAJOR_VERSION(cluster->major_version) < 902)
		{
			/*
			 * Pre-9.2 did not canonicalize the supplied locale names to match
			 * what the system returns, while 9.2+ does, so convert pre-9.2 to
			 * match.
			 */
			ctrl->lc_collate = get_canonical_locale_name(LC_COLLATE,
								pg_strdup(PQgetvalue(res, 0, i_datcollate)));
			ctrl->lc_ctype = get_canonical_locale_name(LC_CTYPE,
								  pg_strdup(PQgetvalue(res, 0, i_datctype)));
		}
		else
		{
			ctrl->lc_collate = pg_strdup(PQgetvalue(res, 0, i_datcollate));
			ctrl->lc_ctype = pg_strdup(PQgetvalue(res, 0, i_datctype));
		}

		PQclear(res);
	}

	res = executeQueryOrDie(conn,
							"SELECT pg_catalog.pg_encoding_to_char(encoding) "
							"FROM	pg_catalog.pg_database "
							"WHERE	datname = 'template0' ");
	assert(PQntuples(res) == 1);

	i_encoding = PQfnumber(res, "pg_encoding_to_char");
	ctrl->encoding = pg_strdup(PQgetvalue(res, 0, i_encoding));

	PQclear(res);

	PQfinish(conn);
}
Ejemplo n.º 2
0
/*
 * Issue a query on a catalog table, and produce calls to a preassign support
 * function from the result set.
 *
 * The output is a string, containing SQL calls like:
 *
 * SELECT binary_upgrade.preassign_*_oid(<oid>, <other args);
 *
 * 'funcname' is the "preassign_*_oid" function to use.
 * 'sql' is the query to issue. The columns of the result set are passed as
 * arguments to the preassign-support function.
 *
 */
static void
dump_rows(migratorContext *ctx, PQExpBuffer buf, FILE *file, PGconn *conn,
		  const char *sql, const char *funcname)
{
	int			ntups;
	int			ncols;
	int			row;
	int			col;
	PGresult   *res;

	if (file != NULL)
		buf = createPQExpBuffer();

	/*
	 * Add a WHERE or AND clause to filter out built-in objects.
	 *
	 * If the query contains "UNION ALL", then it's the caller's
	 * responsibility to do the filtering. This special case is for the
	 * one more complicated query in get_old_oids() function; all the
	 * other queries are very simple ones.
	 */
	if (strstr(sql, "WHERE ") == NULL)
		res = executeQueryOrDie(ctx, conn, "%s WHERE oid >= %u", sql, FirstNormalObjectId);
	else if (strstr(sql, "UNION ALL") == NULL)
		res = executeQueryOrDie(ctx, conn, "%s AND oid >= %u", sql, FirstNormalObjectId);
	else
		res = executeQueryOrDie(ctx, conn, "%s", sql);

	ntups = PQntuples(res);
	ncols = PQnfields(res);

	for (row = 0; row < ntups; row++)
	{
		appendPQExpBuffer(buf, "SELECT binary_upgrade.%s('%s'",
						  funcname,
						  simple_escape_literal(ctx, conn, PQgetvalue(res, row, 0)));

		for (col = 1; col < ncols; col++)
			appendPQExpBuffer(buf, ", '%s'",
							  simple_escape_literal(ctx, conn, PQgetvalue(res, row, col)));
		appendPQExpBuffer(buf, ");\n");

		if (file)
		{
			fwrite(buf->data, buf->len, 1, file);
			resetPQExpBuffer(buf);
		}
	}
	PQclear(res);

	if (file != NULL)
		destroyPQExpBuffer(buf);
}
Ejemplo n.º 3
0
/*
 *	check_is_install_user()
 *
 *	Check we are the install user, and that the new cluster
 *	has no other users.
 */
static void
check_is_install_user(ClusterInfo *cluster)
{
	PGresult   *res;
	PGconn	   *conn = connectToServer(cluster, "template1");

	prep_status("Checking database user is the install user");

	/* Can't use pg_authid because only superusers can view it. */
	res = executeQueryOrDie(conn,
							"SELECT rolsuper, oid "
							"FROM pg_catalog.pg_roles "
							"WHERE rolname = current_user "
							"AND rolname !~ '^pg_'");

	/*
	 * We only allow the install user in the new cluster (see comment below)
	 * and we preserve pg_authid.oid, so this must be the install user in the
	 * old cluster too.
	 */
	if (PQntuples(res) != 1 ||
		atooid(PQgetvalue(res, 0, 1)) != BOOTSTRAP_SUPERUSERID)
		pg_fatal("database user \"%s\" is not the install user\n",
				 os_info.user);

	PQclear(res);

	res = executeQueryOrDie(conn,
							"SELECT COUNT(*) "
							"FROM pg_catalog.pg_roles "
							"WHERE rolname !~ '^pg_'");

	if (PQntuples(res) != 1)
		pg_fatal("could not determine the number of users\n");

	/*
	 * We only allow the install user in the new cluster because other defined
	 * users might match users defined in the old cluster and generate an
	 * error during pg_dump restore.
	 */
	if (cluster == &new_cluster && atooid(PQgetvalue(res, 0, 0)) != 1)
		pg_fatal("Only the install user can be defined in the new cluster.\n");

	PQclear(res);

	PQfinish(conn);

	check_ok();
}
Ejemplo n.º 4
0
/*
 * It is possible for there to be a mismatch in the need for TOAST tables
 * between the old and new servers, e.g. some pre-9.1 tables didn't need
 * TOAST tables but will need them in 9.1+.  (There are also opposite cases,
 * but these are handled by setting binary_upgrade_next_toast_pg_class_oid.)
 *
 * We can't allow the TOAST table to be created by pg_dump with a
 * pg_dump-assigned oid because it might conflict with a later table that
 * uses that oid, causing a "file exists" error for pg_class conflicts, and
 * a "duplicate oid" error for pg_type conflicts.  (TOAST tables need pg_type
 * entries.)
 *
 * Therefore, a backend in binary-upgrade mode will not create a TOAST
 * table unless an OID as passed in via pg_upgrade_support functions.
 * This function is called after the restore and uses ALTER TABLE to
 * auto-create any needed TOAST tables which will not conflict with
 * restored oids.
 */
void
optionally_create_toast_tables(void)
{
	int			dbnum;

	prep_status("Creating newly-required TOAST tables");

	for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_relname;
		DbInfo	   *active_db = &new_cluster.dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(&new_cluster, active_db->db_name);

		res = executeQueryOrDie(conn,
								"SELECT n.nspname, c.relname "
								"FROM	pg_catalog.pg_class c, "
								"		pg_catalog.pg_namespace n "
								"WHERE	c.relnamespace = n.oid AND "
							  "		n.nspname NOT IN ('pg_catalog', 'information_schema') AND "
								"c.relkind IN ('r', 'm') AND "
								"c.reltoastrelid = 0");

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_relname = PQfnumber(res, "relname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			/* enable auto-oid-numbered TOAST creation if needed */
			PQclear(executeQueryOrDie(conn, "SELECT binary_upgrade.set_next_toast_pg_class_oid('%d'::pg_catalog.oid);",
					OPTIONALLY_CREATE_TOAST_OID));

			/* dummy command that also triggers check for required TOAST table */
			PQclear(executeQueryOrDie(conn, "ALTER TABLE %s.%s RESET (binary_upgrade_dummy_option);",
					quote_identifier(PQgetvalue(res, rowno, i_nspname)),
					quote_identifier(PQgetvalue(res, rowno, i_relname))));
		}

		PQclear(res);

		PQfinish(conn);
	}

	check_ok();
}
Ejemplo n.º 5
0
/*
 * check_for_pg_role_prefix()
 *
 *	Versions older than 9.6 should not have any pg_* roles
 */
static void
check_for_pg_role_prefix(ClusterInfo *cluster)
{
	PGresult   *res;
	PGconn	   *conn = connectToServer(cluster, "template1");

	prep_status("Checking for roles starting with \"pg_\"");

	res = executeQueryOrDie(conn,
							"SELECT * "
							"FROM pg_catalog.pg_roles "
							"WHERE rolname ~ '^pg_'");

	if (PQntuples(res) != 0)
	{
		if (cluster == &old_cluster)
			pg_fatal("The source cluster contains roles starting with \"pg_\"\n");
		else
			pg_fatal("The target cluster contains roles starting with \"pg_\"\n");
	}

	PQclear(res);

	PQfinish(conn);

	check_ok();
}
Ejemplo n.º 6
0
/*
 *	check_for_prepared_transactions()
 *
 *	Make sure there are no prepared transactions because the storage format
 *	might have changed.
 */
static void
check_for_prepared_transactions(ClusterInfo *cluster)
{
	PGresult   *res;
	PGconn	   *conn = connectToServer(cluster, "template1");

	prep_status("Checking for prepared transactions");

	res = executeQueryOrDie(conn,
							"SELECT * "
							"FROM pg_catalog.pg_prepared_xacts");

	if (PQntuples(res) != 0)
	{
		if (cluster == &old_cluster)
			pg_fatal("The source cluster contains prepared transactions\n");
		else
			pg_fatal("The target cluster contains prepared transactions\n");
	}

	PQclear(res);

	PQfinish(conn);

	check_ok();
}
Ejemplo n.º 7
0
Archivo: info.c Proyecto: no0p/postgres
/*
 * get_db_infos()
 *
 * Scans pg_database system catalog and populates all user
 * databases.
 */
static void
get_db_infos(ClusterInfo *cluster)
{
    PGconn	   *conn = connectToServer(cluster, "template1");
    PGresult   *res;
    int			ntups;
    int			tupnum;
    DbInfo	   *dbinfos;
    int			i_datname,
                i_oid,
                i_encoding,
                i_datcollate,
                i_datctype,
                i_spclocation;
    char		query[QUERY_ALLOC];

    snprintf(query, sizeof(query),
             "SELECT d.oid, d.datname, d.encoding, d.datcollate, d.datctype, "
             "%s AS spclocation "
             "FROM pg_catalog.pg_database d "
             " LEFT OUTER JOIN pg_catalog.pg_tablespace t "
             " ON d.dattablespace = t.oid "
             "WHERE d.datallowconn = true "
             /* we don't preserve pg_database.oid so we sort by name */
             "ORDER BY 2",
             /* 9.2 removed the spclocation column */
             (GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
             "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid)");

    res = executeQueryOrDie(conn, "%s", query);

    i_oid = PQfnumber(res, "oid");
    i_datname = PQfnumber(res, "datname");
    i_encoding = PQfnumber(res, "encoding");
    i_datcollate = PQfnumber(res, "datcollate");
    i_datctype = PQfnumber(res, "datctype");
    i_spclocation = PQfnumber(res, "spclocation");

    ntups = PQntuples(res);
    dbinfos = (DbInfo *) pg_malloc(sizeof(DbInfo) * ntups);

    for (tupnum = 0; tupnum < ntups; tupnum++)
    {
        dbinfos[tupnum].db_oid = atooid(PQgetvalue(res, tupnum, i_oid));
        dbinfos[tupnum].db_name = pg_strdup(PQgetvalue(res, tupnum, i_datname));
        dbinfos[tupnum].db_encoding = atoi(PQgetvalue(res, tupnum, i_encoding));
        dbinfos[tupnum].db_collate = pg_strdup(PQgetvalue(res, tupnum, i_datcollate));
        dbinfos[tupnum].db_ctype = pg_strdup(PQgetvalue(res, tupnum, i_datctype));
        snprintf(dbinfos[tupnum].db_tablespace, sizeof(dbinfos[tupnum].db_tablespace), "%s",
                 PQgetvalue(res, tupnum, i_spclocation));
    }
    PQclear(res);

    PQfinish(conn);

    cluster->dbarr.dbs = dbinfos;
    cluster->dbarr.ndbs = ntups;
}
Ejemplo n.º 8
0
/*
 * set_locale_and_encoding()
 *
 * query the database to get the template0 locale
 */
static void
set_locale_and_encoding(ClusterInfo *cluster)
{
	ControlData *ctrl = &cluster->controldata;
	PGconn	   *conn;
	PGresult   *res;
	int			i_encoding;
	int			cluster_version = cluster->major_version;

	conn = connectToServer(cluster, "template1");

	/* for pg < 80400, we got the values from pg_controldata */
	if (cluster_version >= 80400)
	{
		int			i_datcollate;
		int			i_datctype;

		res = executeQueryOrDie(conn,
								"SELECT datcollate, datctype "
								"FROM 	pg_catalog.pg_database "
								"WHERE	datname = 'template0' ");
		assert(PQntuples(res) == 1);

		i_datcollate = PQfnumber(res, "datcollate");
		i_datctype = PQfnumber(res, "datctype");

		ctrl->lc_collate = pg_strdup(PQgetvalue(res, 0, i_datcollate));
		ctrl->lc_ctype = pg_strdup(PQgetvalue(res, 0, i_datctype));

		PQclear(res);
	}

	res = executeQueryOrDie(conn,
							"SELECT pg_catalog.pg_encoding_to_char(encoding) "
							"FROM 	pg_catalog.pg_database "
							"WHERE	datname = 'template0' ");
	assert(PQntuples(res) == 1);

	i_encoding = PQfnumber(res, "pg_encoding_to_char");
	ctrl->encoding = pg_strdup(PQgetvalue(res, 0, i_encoding));

	PQclear(res);

	PQfinish(conn);
}
Ejemplo n.º 9
0
static void
check_proper_datallowconn(ClusterInfo *cluster)
{
	int			dbnum;
	PGconn	   *conn_template1;
	PGresult   *dbres;
	int			ntups;
	int			i_datname;
	int			i_datallowconn;

	prep_status("Checking database connection settings");

	conn_template1 = connectToServer(cluster, "template1");

	/* get database names */
	dbres = executeQueryOrDie(conn_template1,
							  "SELECT	datname, datallowconn "
							  "FROM	pg_catalog.pg_database");

	i_datname = PQfnumber(dbres, "datname");
	i_datallowconn = PQfnumber(dbres, "datallowconn");

	ntups = PQntuples(dbres);
	for (dbnum = 0; dbnum < ntups; dbnum++)
	{
		char	   *datname = PQgetvalue(dbres, dbnum, i_datname);
		char	   *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn);

		if (strcmp(datname, "template0") == 0)
		{
			/* avoid restore failure when pg_dumpall tries to create template0 */
			if (strcmp(datallowconn, "t") == 0)
				pg_fatal("template0 must not allow connections, "
						 "i.e. its pg_database.datallowconn must be false\n");
		}
		else
		{
			/*
			 * avoid datallowconn == false databases from being skipped on
			 * restore
			 */
			if (strcmp(datallowconn, "f") == 0)
				pg_fatal("All non-template0 databases must allow connections, "
						 "i.e. their pg_database.datallowconn must be true\n");
		}
	}

	PQclear(dbres);

	PQfinish(conn_template1);

	check_ok();
}
Ejemplo n.º 10
0
void
uninstall_support_functions_from_new_cluster(void)
{
	int			dbnum;

	prep_status("Removing support functions from new cluster");

	for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
	{
		DbInfo	   *new_db = &new_cluster.dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(&new_cluster, new_db->db_name);

		/* suppress NOTICE of dropped objects */
		PQclear(executeQueryOrDie(conn,
								  "SET client_min_messages = warning;"));
		PQclear(executeQueryOrDie(conn,
								  "DROP SCHEMA binary_upgrade CASCADE;"));
		PQclear(executeQueryOrDie(conn,
								  "RESET client_min_messages;"));
		PQfinish(conn);
	}
	check_ok();
}
Ejemplo n.º 11
0
/*
 * Read all pg_type and pg_class OIDs from old cluster, into the DbInfo structs.
 *
 * The OIDs of types and relations need to be preserved from the old cluster.
 * In the upstream, we use a different mechanism, integrated into pg_dump, for
 * this, but things are a bit more complicated in GPDB, because of partitions,
 * auxiliary AO segment tables, and bitmap index LOV tables. So we use a
 * different strategy.
 */
void
get_old_oids(migratorContext *ctx)
{
	int			dbnum;

	prep_status(ctx, "Exporting object OIDs from the old cluster");

	for (dbnum = 0; dbnum < ctx->old.dbarr.ndbs; dbnum++)
	{
		DbInfo	   *olddb = &ctx->old.dbarr.dbs[dbnum];
		PGconn	   *conn;
		PQExpBuffer	buf = createPQExpBuffer();

		conn = connectToServer(ctx, olddb->db_name, CLUSTER_OLD);
		PQclear(executeQueryOrDie(ctx, conn, "set search_path='pg_catalog';"));

		dump_rows(ctx, buf, NULL, conn,
				  "SELECT oid, nspname FROM pg_namespace",
				  "preassign_namespace_oid");
		dump_rows(ctx, buf, NULL, conn,
				  "SELECT oid, typname, typnamespace FROM pg_type",
				  "preassign_type_oid");

		/*
		 * A table's TOAST table might not be named the usual way in the old
		 * cluster, so dump it according to the way it will be named when created
		 * in the new cluster, instead of the current name.
		 *
		 * For example, "alter_distpol_g_char_11_false_false", in the regression
		 * database. FIXME: I don't quite understand how that happens.
		 *
		 * We don't preserve the OIDs of AO segment tables.
		 */
		dump_rows(ctx, buf, NULL, conn,
				  "SELECT oid, relname, relnamespace FROM pg_class "
				  " WHERE relnamespace NOT IN "
				  "   ( " CppAsString2(PG_TOAST_NAMESPACE) ", "
				  "" CppAsString2(PG_AOSEGMENT_NAMESPACE) ") "
				  " AND oid >= " CppAsString2(FirstNormalObjectId) " "
				  "UNION ALL "
				  "SELECT reltoastrelid, 'pg_toast_' || oid, " CppAsString2(PG_TOAST_NAMESPACE) " FROM pg_class WHERE reltoastrelid <> 0 AND oid >= " CppAsString2(FirstNormalObjectId) " ",
				  "preassign_relation_oid");

		PQfinish(conn);

		olddb->reserved_oids = buf->data;
	}

	check_ok(ctx);
}
Ejemplo n.º 12
0
/*
 *	check_is_super_user()
 *
 *	Check we are superuser, and out user id and user count
 */
static void
check_is_super_user(ClusterInfo *cluster)
{
	PGresult   *res;
	PGconn	   *conn = connectToServer(cluster, "template1");

	prep_status("Checking database user is a superuser");

	/* Can't use pg_authid because only superusers can view it. */
	res = executeQueryOrDie(conn,
							"SELECT rolsuper, oid "
							"FROM pg_catalog.pg_roles "
							"WHERE rolname = current_user");

	if (PQntuples(res) != 1 || strcmp(PQgetvalue(res, 0, 0), "t") != 0)
		pg_log(PG_FATAL, "database user \"%s\" is not a superuser\n",
			   os_info.user);

	cluster->install_role_oid = atooid(PQgetvalue(res, 0, 1));

	PQclear(res);

	res = executeQueryOrDie(conn,
							"SELECT COUNT(*) "
							"FROM pg_catalog.pg_roles ");

	if (PQntuples(res) != 1)
		pg_log(PG_FATAL, "could not determine the number of users\n");

	cluster->role_count = atoi(PQgetvalue(res, 0, 0));

	PQclear(res);

	PQfinish(conn);

	check_ok();
}
Ejemplo n.º 13
0
/*
 * get_db_infos()
 *
 * Scans pg_database system catalog and populates all user
 * databases.
 */
static void
get_db_infos(ClusterInfo *cluster)
{
	PGconn	   *conn = connectToServer(cluster, "template1");
	PGresult   *res;
	int			ntups;
	int			tupnum;
	DbInfo	   *dbinfos;
	int			i_datname,
				i_oid,
				i_spclocation;

	res = executeQueryOrDie(conn,
							"SELECT d.oid, d.datname, t.spclocation "
							"FROM pg_catalog.pg_database d "
							" LEFT OUTER JOIN pg_catalog.pg_tablespace t "
							" ON d.dattablespace = t.oid "
							"WHERE d.datallowconn = true "
	/* we don't preserve pg_database.oid so we sort by name */
							"ORDER BY 2");

	i_oid = PQfnumber(res, "oid");
	i_datname = PQfnumber(res, "datname");
	i_spclocation = PQfnumber(res, "spclocation");

	ntups = PQntuples(res);
	dbinfos = (DbInfo *) pg_malloc(sizeof(DbInfo) * ntups);

	for (tupnum = 0; tupnum < ntups; tupnum++)
	{
		dbinfos[tupnum].db_oid = atooid(PQgetvalue(res, tupnum, i_oid));
		snprintf(dbinfos[tupnum].db_name, sizeof(dbinfos[tupnum].db_name), "%s",
				 PQgetvalue(res, tupnum, i_datname));
		snprintf(dbinfos[tupnum].db_tblspace, sizeof(dbinfos[tupnum].db_tblspace), "%s",
				 PQgetvalue(res, tupnum, i_spclocation));
	}
	PQclear(res);

	PQfinish(conn);

	cluster->dbarr.dbs = dbinfos;
	cluster->dbarr.ndbs = ntups;
}
Ejemplo n.º 14
0
/*
 * connectToServer()
 *
 *	Connects to the desired database on the designated server.
 *	If the connection attempt fails, this function logs an error
 *	message and calls exit() to kill the program.
 */
PGconn *
connectToServer(ClusterInfo *cluster, const char *db_name)
{
	PGconn	   *conn = get_db_conn(cluster, db_name);

	if (conn == NULL || PQstatus(conn) != CONNECTION_OK)
	{
		pg_log(PG_REPORT, "connection to database failed: %s",
			   PQerrorMessage(conn));

		if (conn)
			PQfinish(conn);

		printf(_("Failure, exiting\n"));
		exit(1);
	}

	PQclear(executeQueryOrDie(conn, ALWAYS_SECURE_SEARCH_PATH_SQL));

	return conn;
}
Ejemplo n.º 15
0
/*
 * get_pg_database_relfilenode()
 *
 *	Retrieves the relfilenode for a few system-catalog tables.	We need these
 *	relfilenodes later in the upgrade process.
 */
void
get_pg_database_relfilenode(ClusterInfo *cluster)
{
	PGconn	   *conn = connectToServer(cluster, "template1");
	PGresult   *res;
	int			i_relfile;

	res = executeQueryOrDie(conn,
							"SELECT c.relname, c.relfilenode "
							"FROM 	pg_catalog.pg_class c, "
							"		pg_catalog.pg_namespace n "
							"WHERE 	c.relnamespace = n.oid AND "
							"		n.nspname = 'pg_catalog' AND "
							"		c.relname = 'pg_database' "
							"ORDER BY c.relname");

	i_relfile = PQfnumber(res, "relfilenode");
	cluster->pg_database_oid = atooid(PQgetvalue(res, 0, i_relfile));

	PQclear(res);
	PQfinish(conn);
}
Ejemplo n.º 16
0
/*
 *	check_for_prepared_transactions()
 *
 *	Make sure there are no prepared transactions because the storage format
 *	might have changed.
 */
static void
check_for_prepared_transactions(ClusterInfo *cluster)
{
	PGresult   *res;
	PGconn	   *conn = connectToServer(cluster, "template1");

	prep_status("Checking for prepared transactions");

	res = executeQueryOrDie(conn,
							"SELECT * "
							"FROM pg_catalog.pg_prepared_xacts");

	if (PQntuples(res) != 0)
		pg_log(PG_FATAL, "The %s cluster contains prepared transactions\n",
			   CLUSTER_NAME(cluster));

	PQclear(res);

	PQfinish(conn);

	check_ok();
}
Ejemplo n.º 17
0
/*
 * get_tablespace_paths()
 *
 * Scans pg_tablespace and returns a malloc'ed array of all tablespace
 * paths. Its the caller's responsibility to free the array.
 */
static void
get_tablespace_paths(void)
{
	PGconn	   *conn = connectToServer(&old_cluster, "template1");
	PGresult   *res;
	int			tblnum;
	int			i_spclocation;
	char		query[QUERY_ALLOC];

	snprintf(query, sizeof(query),
			 "SELECT	%s "
			 "FROM	pg_catalog.pg_tablespace "
			 "WHERE	spcname != 'pg_default' AND "
			 "		spcname != 'pg_global'",
	/* 9.2 removed the spclocation column */
			 (GET_MAJOR_VERSION(old_cluster.major_version) <= 901) ?
	"spclocation" : "pg_catalog.pg_tablespace_location(oid) AS spclocation");

	res = executeQueryOrDie(conn, "%s", query);

	if ((os_info.num_tablespaces = PQntuples(res)) != 0)
		os_info.tablespaces = (char **) pg_malloc(
								   os_info.num_tablespaces * sizeof(char *));
	else
		os_info.tablespaces = NULL;

	i_spclocation = PQfnumber(res, "spclocation");

	for (tblnum = 0; tblnum < os_info.num_tablespaces; tblnum++)
		os_info.tablespaces[tblnum] = pg_strdup(
									 PQgetvalue(res, tblnum, i_spclocation));

	PQclear(res);

	PQfinish(conn);

	return;
}
Ejemplo n.º 18
0
/*
 * check_for_jsonb_9_4_usage()
 *
 *	JSONB changed its storage format during 9.4 beta, so check for it.
 */
static void
check_for_jsonb_9_4_usage(ClusterInfo *cluster)
{
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char		output_path[MAXPGPATH];

	prep_status("Checking for JSONB user data types");

	snprintf(output_path, sizeof(output_path), "tables_using_jsonb.txt");

	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		bool		db_used = false;
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_relname,
					i_attname;
		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(cluster, active_db->db_name);

		/*
		 * While several relkinds don't store any data, e.g. views, they can
		 * be used to define data types of other columns, so we check all
		 * relkinds.
		 */
		res = executeQueryOrDie(conn,
								"SELECT n.nspname, c.relname, a.attname "
								"FROM	pg_catalog.pg_class c, "
								"		pg_catalog.pg_namespace n, "
								"		pg_catalog.pg_attribute a "
								"WHERE	c.oid = a.attrelid AND "
								"		NOT a.attisdropped AND "
								"		a.atttypid = 'pg_catalog.jsonb'::pg_catalog.regtype AND "
								"		c.relnamespace = n.oid AND "
		/* exclude possible orphaned temp tables */
								"  		n.nspname !~ '^pg_temp_' AND "
							  "		n.nspname NOT IN ('pg_catalog', 'information_schema')");

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_relname = PQfnumber(res, "relname");
		i_attname = PQfnumber(res, "attname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			found = true;
			if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
				pg_fatal("Could not open file \"%s\": %s\n",
						 output_path, getErrorText(errno));
			if (!db_used)
			{
				fprintf(script, "Database: %s\n", active_db->db_name);
				db_used = true;
			}
			fprintf(script, "  %s.%s.%s\n",
					PQgetvalue(res, rowno, i_nspname),
					PQgetvalue(res, rowno, i_relname),
					PQgetvalue(res, rowno, i_attname));
		}

		PQclear(res);

		PQfinish(conn);
	}

	if (script)
		fclose(script);

	if (found)
	{
		pg_log(PG_REPORT, "fatal\n");
		pg_fatal("Your installation contains one of the JSONB data types in user tables.\n"
		 "The internal format of JSONB changed during 9.4 beta so this cluster cannot currently\n"
				 "be upgraded.  You can remove the problem tables and restart the upgrade.  A list\n"
				 "of the problem columns is in the file:\n"
				 "    %s\n\n", output_path);
	}
	else
		check_ok();
}
Ejemplo n.º 19
0
/*
 *	set_frozenxids()
 *
 * This is called on the new cluster before we restore anything, with
 * minmxid_only = false.  Its purpose is to ensure that all initdb-created
 * vacuumable tables have relfrozenxid/relminmxid matching the old cluster's
 * xid/mxid counters.  We also initialize the datfrozenxid/datminmxid of the
 * built-in databases to match.
 *
 * As we create user tables later, their relfrozenxid/relminmxid fields will
 * be restored properly by the binary-upgrade restore script.  Likewise for
 * user-database datfrozenxid/datminmxid.  However, if we're upgrading from a
 * pre-9.3 database, which does not store per-table or per-DB minmxid, then
 * the relminmxid/datminmxid values filled in by the restore script will just
 * be zeroes.
 *
 * Hence, with a pre-9.3 source database, a second call occurs after
 * everything is restored, with minmxid_only = true.  This pass will
 * initialize all tables and databases, both those made by initdb and user
 * objects, with the desired minmxid value.  frozenxid values are left alone.
 */
static void
set_frozenxids(bool minmxid_only)
{
	int			dbnum;
	PGconn	   *conn,
			   *conn_template1;
	PGresult   *dbres;
	int			ntups;
	int			i_datname;
	int			i_datallowconn;

	if (!minmxid_only)
		prep_status("Setting frozenxid and minmxid counters in new cluster");
	else
		prep_status("Setting minmxid counter in new cluster");

	conn_template1 = connectToServer(&new_cluster, "template1");

	if (!minmxid_only)
		/* set pg_database.datfrozenxid */
		PQclear(executeQueryOrDie(conn_template1,
								  "UPDATE pg_catalog.pg_database "
								  "SET	datfrozenxid = '%u'",
								  old_cluster.controldata.chkpnt_nxtxid));

	/* set pg_database.datminmxid */
	PQclear(executeQueryOrDie(conn_template1,
							  "UPDATE pg_catalog.pg_database "
							  "SET	datminmxid = '%u'",
							  old_cluster.controldata.chkpnt_nxtmulti));

	/* get database names */
	dbres = executeQueryOrDie(conn_template1,
							  "SELECT	datname, datallowconn "
							  "FROM	pg_catalog.pg_database");

	i_datname = PQfnumber(dbres, "datname");
	i_datallowconn = PQfnumber(dbres, "datallowconn");

	ntups = PQntuples(dbres);
	for (dbnum = 0; dbnum < ntups; dbnum++)
	{
		char	   *datname = PQgetvalue(dbres, dbnum, i_datname);
		char	   *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn);

		/*
		 * We must update databases where datallowconn = false, e.g.
		 * template0, because autovacuum increments their datfrozenxids,
		 * relfrozenxids, and relminmxid even if autovacuum is turned off, and
		 * even though all the data rows are already frozen.  To enable this,
		 * we temporarily change datallowconn.
		 */
		if (strcmp(datallowconn, "f") == 0)
			PQclear(executeQueryOrDie(conn_template1,
									  "ALTER DATABASE %s ALLOW_CONNECTIONS = true",
									  quote_identifier(datname)));

		conn = connectToServer(&new_cluster, datname);

		if (!minmxid_only)
			/* set pg_class.relfrozenxid */
			PQclear(executeQueryOrDie(conn,
									  "UPDATE	pg_catalog.pg_class "
									  "SET	relfrozenxid = '%u' "
			/* only heap, materialized view, and TOAST are vacuumed */
									  "WHERE	relkind IN ("
									  CppAsString2(RELKIND_RELATION) ", "
									  CppAsString2(RELKIND_MATVIEW) ", "
									  CppAsString2(RELKIND_TOASTVALUE) ")",
									  old_cluster.controldata.chkpnt_nxtxid));

		/* set pg_class.relminmxid */
		PQclear(executeQueryOrDie(conn,
								  "UPDATE	pg_catalog.pg_class "
								  "SET	relminmxid = '%u' "
		/* only heap, materialized view, and TOAST are vacuumed */
								  "WHERE	relkind IN ("
								  CppAsString2(RELKIND_RELATION) ", "
								  CppAsString2(RELKIND_MATVIEW) ", "
								  CppAsString2(RELKIND_TOASTVALUE) ")",
								  old_cluster.controldata.chkpnt_nxtmulti));
		PQfinish(conn);

		/* Reset datallowconn flag */
		if (strcmp(datallowconn, "f") == 0)
			PQclear(executeQueryOrDie(conn_template1,
									  "ALTER DATABASE %s ALLOW_CONNECTIONS = false",
									  quote_identifier(datname)));
	}

	PQclear(dbres);

	PQfinish(conn_template1);

	check_ok();
}
Ejemplo n.º 20
0
/*
 *	check_for_isn_and_int8_passing_mismatch()
 *
 *	contrib/isn relies on data type int8, and in 8.4 int8 can now be passed
 *	by value.  The schema dumps the CREATE TYPE PASSEDBYVALUE setting so
 *	it must match for the old and new servers.
 */
static void
check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster)
{
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char		output_path[MAXPGPATH];

	prep_status("Checking for contrib/isn with bigint-passing mismatch");

	if (old_cluster.controldata.float8_pass_by_value ==
		new_cluster.controldata.float8_pass_by_value)
	{
		/* no mismatch */
		check_ok();
		return;
	}

	snprintf(output_path, sizeof(output_path),
			 "contrib_isn_and_int8_pass_by_value.txt");

	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		bool		db_used = false;
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_proname;
		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(cluster, active_db->db_name);

		/* Find any functions coming from contrib/isn */
		res = executeQueryOrDie(conn,
								"SELECT n.nspname, p.proname "
								"FROM	pg_catalog.pg_proc p, "
								"		pg_catalog.pg_namespace n "
								"WHERE	p.pronamespace = n.oid AND "
								"		p.probin = '$libdir/isn'");

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_proname = PQfnumber(res, "proname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			found = true;
			if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
				pg_fatal("Could not open file \"%s\": %s\n",
						 output_path, getErrorText(errno));
			if (!db_used)
			{
				fprintf(script, "Database: %s\n", active_db->db_name);
				db_used = true;
			}
			fprintf(script, "  %s.%s\n",
					PQgetvalue(res, rowno, i_nspname),
					PQgetvalue(res, rowno, i_proname));
		}

		PQclear(res);

		PQfinish(conn);
	}

	if (script)
		fclose(script);

	if (found)
	{
		pg_log(PG_REPORT, "fatal\n");
		pg_fatal("Your installation contains \"contrib/isn\" functions which rely on the\n"
		  "bigint data type.  Your old and new clusters pass bigint values\n"
		"differently so this cluster cannot currently be upgraded.  You can\n"
				 "manually upgrade databases that use \"contrib/isn\" facilities and remove\n"
				 "\"contrib/isn\" from the old cluster and restart the upgrade.  A list of\n"
				 "the problem functions is in the file:\n"
				 "    %s\n\n", output_path);
	}
	else
		check_ok();
}
Ejemplo n.º 21
0
/*
 * check_for_reg_data_type_usage()
 *	pg_upgrade only preserves these system values:
 *		pg_class.oid
 *		pg_type.oid
 *		pg_enum.oid
 *
 *	Many of the reg* data types reference system catalog info that is
 *	not preserved, and hence these data types cannot be used in user
 *	tables upgraded by pg_upgrade.
 */
static void
check_for_reg_data_type_usage(ClusterInfo *cluster)
{
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char		output_path[MAXPGPATH];

	prep_status("Checking for reg* system OID user data types");

	snprintf(output_path, sizeof(output_path), "tables_using_reg.txt");

	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		bool		db_used = false;
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_relname,
					i_attname;
		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(cluster, active_db->db_name);

		/*
		 * While several relkinds don't store any data, e.g. views, they can
		 * be used to define data types of other columns, so we check all
		 * relkinds.
		 */
		res = executeQueryOrDie(conn,
								"SELECT n.nspname, c.relname, a.attname "
								"FROM	pg_catalog.pg_class c, "
								"		pg_catalog.pg_namespace n, "
								"		pg_catalog.pg_attribute a "
								"WHERE	c.oid = a.attrelid AND "
								"		NOT a.attisdropped AND "
								"		a.atttypid IN ( "
		  "			'pg_catalog.regproc'::pg_catalog.regtype, "
								"			'pg_catalog.regprocedure'::pg_catalog.regtype, "
		  "			'pg_catalog.regoper'::pg_catalog.regtype, "
								"			'pg_catalog.regoperator'::pg_catalog.regtype, "
		/* regclass.oid is preserved, so 'regclass' is OK */
		/* regtype.oid is preserved, so 'regtype' is OK */
		"			'pg_catalog.regconfig'::pg_catalog.regtype, "
								"			'pg_catalog.regdictionary'::pg_catalog.regtype) AND "
								"		c.relnamespace = n.oid AND "
							  "		n.nspname NOT IN ('pg_catalog', 'information_schema')");

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_relname = PQfnumber(res, "relname");
		i_attname = PQfnumber(res, "attname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			found = true;
			if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
				pg_fatal("Could not open file \"%s\": %s\n",
						 output_path, getErrorText(errno));
			if (!db_used)
			{
				fprintf(script, "Database: %s\n", active_db->db_name);
				db_used = true;
			}
			fprintf(script, "  %s.%s.%s\n",
					PQgetvalue(res, rowno, i_nspname),
					PQgetvalue(res, rowno, i_relname),
					PQgetvalue(res, rowno, i_attname));
		}

		PQclear(res);

		PQfinish(conn);
	}

	if (script)
		fclose(script);

	if (found)
	{
		pg_log(PG_REPORT, "fatal\n");
		pg_fatal("Your installation contains one of the reg* data types in user tables.\n"
		 "These data types reference system OIDs that are not preserved by\n"
		"pg_upgrade, so this cluster cannot currently be upgraded.  You can\n"
				 "remove the problem tables and restart the upgrade.  A list of the problem\n"
				 "columns is in the file:\n"
				 "    %s\n\n", output_path);
	}
	else
		check_ok();
}
Ejemplo n.º 22
0
/*
 * old_8_3_invalidate_bpchar_pattern_ops_indexes()
 *	8.3 -> 8.4
 *	8.4 bpchar_pattern_ops no longer sorts based on trailing spaces
 */
void
old_8_3_invalidate_bpchar_pattern_ops_indexes(ClusterInfo *cluster,
											  bool check_mode)
{
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char		output_path[MAX_PG_PATH];

	prep_status("Checking for bpchar_pattern_ops indexes");

	snprintf(output_path, sizeof(output_path), "%s/reindex_bpchar_ops.sql",
			 os_info.cwd);

	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		bool		db_used = false;
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_relname;
		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(cluster, active_db->db_name);

		/* find bpchar_pattern_ops indexes */

		/*
		 * Do only non-hash, non-gin indexees;	we already invalidated them
		 * above; no need to reindex twice
		 */
		res = executeQueryOrDie(conn,
								"SELECT n.nspname, c.relname "
								"FROM	pg_catalog.pg_index i, "
								"		pg_catalog.pg_class c, "
								"		pg_catalog.pg_namespace n "
								"WHERE	indexrelid = c.oid AND "
								"		c.relnamespace = n.oid AND "
								"		( "
								"			SELECT	o.oid "
				   "			FROM	pg_catalog.pg_opclass o, "
				  "					pg_catalog.pg_am a"
		"			WHERE	a.amname NOT IN ('hash', 'gin') AND "
			"					a.oid = o.opcmethod AND "
								"					o.opcname = 'bpchar_pattern_ops') "
								"		= ANY (i.indclass) AND "
								SKIP_TSVECTOR_TABLES);

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_relname = PQfnumber(res, "relname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			found = true;
			if (!check_mode)
			{
				if (script == NULL && (script = fopen(output_path, "w")) == NULL)
					pg_log(PG_FATAL, "could not create necessary file:  %s\n", output_path);
				if (!db_used)
				{
					fprintf(script, "\\connect %s\n",
							quote_identifier(active_db->db_name));
					db_used = true;
				}
				fprintf(script, "REINDEX INDEX %s.%s;\n",
						quote_identifier(PQgetvalue(res, rowno, i_nspname)),
						quote_identifier(PQgetvalue(res, rowno, i_relname)));
			}
		}

		PQclear(res);

		if (!check_mode && found)
			/* mark bpchar_pattern_ops indexes as invalid */
			PQclear(executeQueryOrDie(conn,
									  "UPDATE pg_catalog.pg_index i "
									  "SET	indisvalid = false "
									  "FROM	pg_catalog.pg_class c, "
									  "		pg_catalog.pg_namespace n "
									  "WHERE	indexrelid = c.oid AND "
									  "		c.relnamespace = n.oid AND "
									  "		( "
									  "			SELECT	o.oid "
						 "			FROM	pg_catalog.pg_opclass o, "
						"					pg_catalog.pg_am a"
			  "			WHERE	a.amname NOT IN ('hash', 'gin') AND "
				  "					a.oid = o.opcmethod AND "
									  "					o.opcname = 'bpchar_pattern_ops') "
									  "		= ANY (i.indclass)"));

		PQfinish(conn);
	}

	if (script)
		fclose(script);

	if (found)
	{
		report_status(PG_WARNING, "warning");
		if (check_mode)
			pg_log(PG_WARNING, "\n"
				   "| Your installation contains indexes using\n"
				   "| \"bpchar_pattern_ops\".  These indexes have\n"
				   "| different internal formats between your old and\n"
				   "| new clusters so they must be reindexed with the\n"
				   "| REINDEX command.  After upgrading, you will be\n"
				   "| given REINDEX instructions.\n\n");
		else
			pg_log(PG_WARNING, "\n"
				   "| Your installation contains indexes using\n"
				   "| \"bpchar_pattern_ops\".  These indexes have\n"
				   "| different internal formats between your old and\n"
				   "| new clusters so they must be reindexed with the\n"
				   "| REINDEX command.  The file:\n"
				   "| \t%s\n"
				   "| when executed by psql by the database super-user\n"
				   "| will recreate all invalid indexes; until then,\n"
				   "| none of these indexes will be used.\n\n",
				   output_path);
	}
	else
		check_ok();
}
Ejemplo n.º 23
0
/*
 * old_8_3_create_sequence_script()
 *	8.3 -> 8.4
 *	8.4 added the column "start_value" to all sequences.  For this reason,
 *	we don't transfer sequence files but instead use the CREATE SEQUENCE
 *	command from the schema dump, and use setval() to restore the sequence
 *	value and 'is_called' from the old database.  This is safe to run
 *	by pg_upgrade because sequence files are not transfered from the old
 *	server, even in link mode.
 */
char *
old_8_3_create_sequence_script(ClusterInfo *cluster)
{
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char	   *output_path = pg_malloc(MAX_PG_PATH);

	snprintf(output_path, MAX_PG_PATH, "%s/adjust_sequences.sql", os_info.cwd);

	prep_status("Creating script to adjust sequences");

	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		bool		db_used = false;
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_relname;
		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(cluster, active_db->db_name);

		/* Find any sequences */
		res = executeQueryOrDie(conn,
								"SELECT n.nspname, c.relname "
								"FROM	pg_catalog.pg_class c, "
								"		pg_catalog.pg_namespace n "
								"WHERE	c.relkind = 'S' AND "
								"		c.relnamespace = n.oid AND "
								 /* exclude possible orphaned temp tables */
								"  		n.nspname !~ '^pg_temp_' AND "
								"		n.nspname !~ '^pg_toast_temp_' AND "
						 		"		n.nspname NOT IN ('pg_catalog', 'information_schema')");

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_relname = PQfnumber(res, "relname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			PGresult   *seq_res;
			int			i_last_value,
						i_is_called;
			const char *nspname = PQgetvalue(res, rowno, i_nspname);
			const char *relname = PQgetvalue(res, rowno, i_relname);

			found = true;

			if (script == NULL && (script = fopen(output_path, "w")) == NULL)
				pg_log(PG_FATAL, "could not create necessary file:  %s\n", output_path);
			if (!db_used)
			{
				fprintf(script, "\\connect %s\n\n",
						quote_identifier(active_db->db_name));
				db_used = true;
			}

			/* Find the desired sequence */
			seq_res = executeQueryOrDie(conn,
										"SELECT s.last_value, s.is_called "
										"FROM	%s.%s s",
										quote_identifier(nspname),
										quote_identifier(relname));

			assert(PQntuples(seq_res) == 1);
			i_last_value = PQfnumber(seq_res, "last_value");
			i_is_called = PQfnumber(seq_res, "is_called");

			fprintf(script, "SELECT setval('%s.%s', %s, '%s');\n",
					quote_identifier(nspname), quote_identifier(relname),
					PQgetvalue(seq_res, 0, i_last_value), PQgetvalue(seq_res, 0, i_is_called));
			PQclear(seq_res);
		}
		if (db_used)
			fprintf(script, "\n");

		PQclear(res);

		PQfinish(conn);
	}

	if (script)
		fclose(script);

	check_ok();

	if (found)
		return output_path;
	else
	{
		pg_free(output_path);
		return NULL;
	}
}
Ejemplo n.º 24
0
/*
 *	old_8_3_check_ltree_usage()
 *	8.3 -> 8.4
 *	The internal ltree structure was changed in 8.4 so upgrading is impossible.
 */
void
old_8_3_check_ltree_usage(ClusterInfo *cluster)
{
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char		output_path[MAX_PG_PATH];

	prep_status("Checking for contrib/ltree");

	snprintf(output_path, sizeof(output_path), "%s/contrib_ltree.txt",
			 os_info.cwd);

	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		bool		db_used = false;
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_proname;
		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(cluster, active_db->db_name);

		/* Find any functions coming from contrib/ltree */
		res = executeQueryOrDie(conn,
								"SELECT n.nspname, p.proname "
								"FROM	pg_catalog.pg_proc p, "
								"		pg_catalog.pg_namespace n "
								"WHERE	p.pronamespace = n.oid AND "
								"		p.probin = '$libdir/ltree'");

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_proname = PQfnumber(res, "proname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			found = true;
			if (script == NULL && (script = fopen(output_path, "w")) == NULL)
				pg_log(PG_FATAL, "Could not open file \"%s\": %s\n",
					   output_path, getErrorText(errno));
			if (!db_used)
			{
				fprintf(script, "Database: %s\n", active_db->db_name);
				db_used = true;
			}
			fprintf(script, "  %s.%s\n",
					PQgetvalue(res, rowno, i_nspname),
					PQgetvalue(res, rowno, i_proname));
		}

		PQclear(res);

		PQfinish(conn);
	}

	if (script)
		fclose(script);

	if (found)
	{
		pg_log(PG_REPORT, "fatal\n");
		pg_log(PG_FATAL,
			   "Your installation contains the \"ltree\" data type.  This data type\n"
			   "changed its internal storage format between your old and new clusters so this\n"
			   "cluster cannot currently be upgraded.  You can manually upgrade databases\n"
			   "that use \"contrib/ltree\" facilities and remove \"contrib/ltree\" from the old\n"
			   "cluster and restart the upgrade.  A list of the problem functions is in the\n"
			   "file:\n"
			   "    %s\n\n", output_path);
	}
	else
		check_ok();
}
Ejemplo n.º 25
0
/*
 * old_8_3_rebuild_tsvector_tables()
 *	8.3 -> 8.4
 * 8.3 sorts lexemes by its length and if lengths are the same then it uses
 * alphabetic order;  8.4 sorts lexemes in lexicographical order, e.g.
 *
 * => SELECT 'c bb aaa'::tsvector;
 *	   tsvector
 * ----------------
 *	'aaa' 'bb' 'c'		   -- 8.4
 *	'c' 'bb' 'aaa'		   -- 8.3
 */
void
old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode)
{
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char		output_path[MAX_PG_PATH];

	prep_status("Checking for tsvector user columns");

	snprintf(output_path, sizeof(output_path), "%s/rebuild_tsvector_tables.sql",
			 os_info.cwd);

	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		bool		db_used = false;
		char		nspname[NAMEDATALEN] = "",
					relname[NAMEDATALEN] = "";
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_relname,
					i_attname;
		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(cluster, active_db->db_name);

		/* Find any user-defined tsvector columns */
		res = executeQueryOrDie(conn,
								"SELECT n.nspname, c.relname, a.attname "
								"FROM	pg_catalog.pg_class c, "
								"		pg_catalog.pg_namespace n, "
								"		pg_catalog.pg_attribute a "
								"WHERE	c.relkind = 'r' AND "
								"		c.oid = a.attrelid AND "
								"		NOT a.attisdropped AND "
								"		a.atttypid = 'pg_catalog.tsvector'::pg_catalog.regtype AND "
								"		c.relnamespace = n.oid AND "
								 /* exclude possible orphaned temp tables */
								"  		n.nspname !~ '^pg_temp_' AND "
								"		n.nspname !~ '^pg_toast_temp_' AND "
						 		"		n.nspname NOT IN ('pg_catalog', 'information_schema')");

/*
 *	This macro is used below to avoid reindexing indexes already rebuilt
 *	because of tsvector columns.
 */
#define SKIP_TSVECTOR_TABLES \
								"i.indrelid NOT IN ( "					\
								"SELECT DISTINCT c.oid "				\
								"FROM	pg_catalog.pg_class c, "		\
								"		pg_catalog.pg_namespace n, "	\
								"		pg_catalog.pg_attribute a "		\
								"WHERE	c.relkind = 'r' AND "			\
								"		c.oid = a.attrelid AND "		\
								"		NOT a.attisdropped AND "		\
								"		a.atttypid = 'pg_catalog.tsvector'::pg_catalog.regtype AND " \
								"		c.relnamespace = n.oid AND "	\
								"       n.nspname !~ '^pg_' AND "		\
								"		n.nspname != 'information_schema') "

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_relname = PQfnumber(res, "relname");
		i_attname = PQfnumber(res, "attname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			found = true;
			if (!check_mode)
			{
				if (script == NULL && (script = fopen(output_path, "w")) == NULL)
					pg_log(PG_FATAL, "could not create necessary file:  %s\n", output_path);
				if (!db_used)
				{
					fprintf(script, "\\connect %s\n\n",
							quote_identifier(active_db->db_name));
					db_used = true;
				}

				/* Rebuild all tsvector collumns with one ALTER TABLE command */
				if (strcmp(PQgetvalue(res, rowno, i_nspname), nspname) != 0 ||
					strcmp(PQgetvalue(res, rowno, i_relname), relname) != 0)
				{
					if (strlen(nspname) != 0 || strlen(relname) != 0)
						fprintf(script, ";\n\n");
					fprintf(script, "ALTER TABLE %s.%s\n",
						 quote_identifier(PQgetvalue(res, rowno, i_nspname)),
						quote_identifier(PQgetvalue(res, rowno, i_relname)));
				}
				else
					fprintf(script, ",\n");
				strlcpy(nspname, PQgetvalue(res, rowno, i_nspname), sizeof(nspname));
				strlcpy(relname, PQgetvalue(res, rowno, i_relname), sizeof(relname));

				fprintf(script, "ALTER COLUMN %s "
				/* This could have been a custom conversion function call. */
						"TYPE pg_catalog.tsvector USING %s::pg_catalog.text::pg_catalog.tsvector",
						quote_identifier(PQgetvalue(res, rowno, i_attname)),
						quote_identifier(PQgetvalue(res, rowno, i_attname)));
			}
		}
		if (strlen(nspname) != 0 || strlen(relname) != 0)
			fprintf(script, ";\n\n");

		PQclear(res);

		/* XXX Mark tables as not accessable somehow */

		PQfinish(conn);
	}

	if (script)
		fclose(script);

	if (found)
	{
		report_status(PG_WARNING, "warning");
		if (check_mode)
			pg_log(PG_WARNING, "\n"
				   "| Your installation contains tsvector columns.\n"
				   "| The tsvector internal storage format changed\n"
				   "| between your old and new clusters so the tables\n"
				   "| must be rebuilt.  After upgrading, you will be\n"
				   "| given instructions.\n\n");
		else
			pg_log(PG_WARNING, "\n"
				   "| Your installation contains tsvector columns.\n"
				   "| The tsvector internal storage format changed\n"
				   "| between your old and new clusters so the tables\n"
				   "| must be rebuilt.  The file:\n"
				   "| \t%s\n"
				   "| when executed by psql by the database super-user\n"
				   "| will rebuild all tables with tsvector columns.\n\n",
				   output_path);
	}
	else
		check_ok();
}
Ejemplo n.º 26
0
/*
 * old_8_3_check_for_name_data_type_usage()
 *	8.3 -> 8.4
 *	Alignment for the 'name' data type changed to 'char' in 8.4;
 *	checks tables and indexes.
 */
void
old_8_3_check_for_name_data_type_usage(ClusterInfo *cluster)
{
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char		output_path[MAX_PG_PATH];

	prep_status("Checking for invalid 'name' user columns");

	snprintf(output_path, sizeof(output_path), "%s/tables_using_name.txt",
			 os_info.cwd);

	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		bool		db_used = false;
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_relname,
					i_attname;
		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(cluster, active_db->db_name);

		/*
		 * With a smaller alignment in 8.4, 'name' cannot be used in a
		 * non-pg_catalog table, except as the first column. (We could tighten
		 * that condition with enough analysis, but it seems not worth the
		 * trouble.)
		 */
		res = executeQueryOrDie(conn,
								"SELECT n.nspname, c.relname, a.attname "
								"FROM	pg_catalog.pg_class c, "
								"		pg_catalog.pg_namespace n, "
								"		pg_catalog.pg_attribute a "
								"WHERE	c.oid = a.attrelid AND "
								"		a.attnum > 1 AND "
								"		NOT a.attisdropped AND "
								"		a.atttypid = 'pg_catalog.name'::pg_catalog.regtype AND "
								"		c.relnamespace = n.oid AND "
								 /* exclude possible orphaned temp tables */
								"  		n.nspname !~ '^pg_temp_' AND "
								"		n.nspname !~ '^pg_toast_temp_' AND "
						 		"		n.nspname NOT IN ('pg_catalog', 'information_schema')");

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_relname = PQfnumber(res, "relname");
		i_attname = PQfnumber(res, "attname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			found = true;
			if (script == NULL && (script = fopen(output_path, "w")) == NULL)
				pg_log(PG_FATAL, "could not create necessary file:  %s\n", output_path);
			if (!db_used)
			{
				fprintf(script, "Database:  %s\n", active_db->db_name);
				db_used = true;
			}
			fprintf(script, "  %s.%s.%s\n",
					PQgetvalue(res, rowno, i_nspname),
					PQgetvalue(res, rowno, i_relname),
					PQgetvalue(res, rowno, i_attname));
		}

		PQclear(res);

		PQfinish(conn);
	}

	if (script)
		fclose(script);

	if (found)
	{
		pg_log(PG_REPORT, "fatal\n");
		pg_log(PG_FATAL,
			   "| Your installation contains the \"name\" data type in\n"
			   "| user tables.  This data type changed its internal\n"
			   "| alignment between your old and new clusters so this\n"
			   "| cluster cannot currently be upgraded.  You can\n"
			   "| remove the problem tables and restart the upgrade.\n"
			   "| A list of the problem columns is in the file:\n"
			   "| \t%s\n\n", output_path);
	}
	else
		check_ok();
}
Ejemplo n.º 27
0
/*
 * old_8_3_check_for_tsquery_usage()
 *	8.3 -> 8.4
 *	A new 'prefix' field was added to the 'tsquery' data type in 8.4
 *	so upgrading of such fields is impossible.
 */
void
old_8_3_check_for_tsquery_usage(ClusterInfo *cluster)
{
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char		output_path[MAX_PG_PATH];

	prep_status("Checking for tsquery user columns");

	snprintf(output_path, sizeof(output_path), "%s/tables_using_tsquery.txt",
			 os_info.cwd);

	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		bool		db_used = false;
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_relname,
					i_attname;
		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(cluster, active_db->db_name);

		/* Find any user-defined tsquery columns */
		res = executeQueryOrDie(conn,
								"SELECT n.nspname, c.relname, a.attname "
								"FROM	pg_catalog.pg_class c, "
								"		pg_catalog.pg_namespace n, "
								"		pg_catalog.pg_attribute a "
								"WHERE	c.relkind = 'r' AND "
								"		c.oid = a.attrelid AND "
								"		NOT a.attisdropped AND "
								"		a.atttypid = 'pg_catalog.tsquery'::pg_catalog.regtype AND "
								"		c.relnamespace = n.oid AND "
								 /* exclude possible orphaned temp tables */
								"  		n.nspname !~ '^pg_temp_' AND "
								"		n.nspname !~ '^pg_toast_temp_' AND "
						 		"		n.nspname NOT IN ('pg_catalog', 'information_schema')");

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_relname = PQfnumber(res, "relname");
		i_attname = PQfnumber(res, "attname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			found = true;
			if (script == NULL && (script = fopen(output_path, "w")) == NULL)
				pg_log(PG_FATAL, "could not create necessary file:  %s\n", output_path);
			if (!db_used)
			{
				fprintf(script, "Database:  %s\n", active_db->db_name);
				db_used = true;
			}
			fprintf(script, "  %s.%s.%s\n",
					PQgetvalue(res, rowno, i_nspname),
					PQgetvalue(res, rowno, i_relname),
					PQgetvalue(res, rowno, i_attname));
		}

		PQclear(res);

		PQfinish(conn);
	}

	if (script)
		fclose(script);

	if (found)
	{
		pg_log(PG_REPORT, "fatal\n");
		pg_log(PG_FATAL,
			   "| Your installation contains the \"tsquery\" data type.\n"
			   "| This data type added a new internal field between\n"
			   "| your old and new clusters so this cluster cannot\n"
			   "| currently be upgraded.  You can remove the problem\n"
			   "| columns and restart the upgrade.  A list of the\n"
			   "| problem columns is in the file:\n"
			   "| \t%s\n\n", output_path);
	}
	else
		check_ok();
}
Ejemplo n.º 28
0
/*
 * old_9_3_check_for_line_data_type_usage()
 *	9.3 -> 9.4
 *	Fully implement the 'line' data type in 9.4, which previously returned
 *	"not enabled" by default and was only functionally enabled with a
 *	compile-time switch;  9.4 "line" has different binary and text
 *	representation formats;  checks tables and indexes.
 */
void
old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster)
{
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char		output_path[MAXPGPATH];

	prep_status("Checking for invalid \"line\" user columns");

	snprintf(output_path, sizeof(output_path), "tables_using_line.txt");

	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		bool		db_used = false;
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_relname,
					i_attname;
		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(cluster, active_db->db_name);

		res = executeQueryOrDie(conn,
								"SELECT n.nspname, c.relname, a.attname "
								"FROM	pg_catalog.pg_class c, "
								"		pg_catalog.pg_namespace n, "
								"		pg_catalog.pg_attribute a "
								"WHERE	c.oid = a.attrelid AND "
								"		NOT a.attisdropped AND "
								"		a.atttypid = 'pg_catalog.line'::pg_catalog.regtype AND "
								"		c.relnamespace = n.oid AND "
		/* exclude possible orphaned temp tables */
								"		n.nspname !~ '^pg_temp_' AND "
						 "		n.nspname !~ '^pg_toast_temp_' AND "
								"		n.nspname NOT IN ('pg_catalog', 'information_schema')");

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_relname = PQfnumber(res, "relname");
		i_attname = PQfnumber(res, "attname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			found = true;
			if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
				pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno));
			if (!db_used)
			{
				fprintf(script, "Database: %s\n", active_db->db_name);
				db_used = true;
			}
			fprintf(script, "  %s.%s.%s\n",
					PQgetvalue(res, rowno, i_nspname),
					PQgetvalue(res, rowno, i_relname),
					PQgetvalue(res, rowno, i_attname));
		}

		PQclear(res);

		PQfinish(conn);
	}

	if (script)
		fclose(script);

	if (found)
	{
		pg_log(PG_REPORT, "fatal\n");
		pg_fatal("Your installation contains the \"line\" data type in user tables.  This\n"
				 "data type changed its internal and input/output format between your old\n"
				 "and new clusters so this cluster cannot currently be upgraded.  You can\n"
				 "remove the problem tables and restart the upgrade.  A list of the problem\n"
				 "columns is in the file:\n"
				 "    %s\n\n", output_path);
	}
	else
		check_ok();
}
Ejemplo n.º 29
0
/*
 * new_9_0_populate_pg_largeobject_metadata()
 *	new >= 9.0, old <= 8.4
 *	9.0 has a new pg_largeobject permission table
 */
void
new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster, bool check_mode)
{
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char		output_path[MAXPGPATH];

	prep_status("Checking for large objects");

	snprintf(output_path, sizeof(output_path), "pg_largeobject.sql");

	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		int			i_count;
		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(cluster, active_db->db_name);

		/* find if there are any large objects */
		res = executeQueryOrDie(conn,
								"SELECT count(*) "
								"FROM	pg_catalog.pg_largeobject ");

		i_count = PQfnumber(res, "count");
		if (atoi(PQgetvalue(res, 0, i_count)) != 0)
		{
			found = true;
			if (!check_mode)
			{
				if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
					pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno));
				fprintf(script, "\\connect %s\n",
						quote_identifier(active_db->db_name));
				fprintf(script,
						"SELECT pg_catalog.lo_create(t.loid)\n"
						"FROM (SELECT DISTINCT loid FROM pg_catalog.pg_largeobject) AS t;\n");
			}
		}

		PQclear(res);
		PQfinish(conn);
	}

	if (script)
		fclose(script);

	if (found)
	{
		report_status(PG_WARNING, "warning");
		if (check_mode)
			pg_log(PG_WARNING, "\n"
				   "Your installation contains large objects.  The new database has an\n"
				   "additional large object permission table.  After upgrading, you will be\n"
				   "given a command to populate the pg_largeobject permission table with\n"
				   "default permissions.\n\n");
		else
			pg_log(PG_WARNING, "\n"
				   "Your installation contains large objects.  The new database has an\n"
				   "additional large object permission table, so default permissions must be\n"
				   "defined for all large objects.  The file\n"
				   "    %s\n"
				   "when executed by psql by the database superuser will set the default\n"
				   "permissions.\n\n",
				   output_path);
	}
	else
		check_ok();
}
Ejemplo n.º 30
0
/*
 * old_8_3_invalidate_hash_gin_indexes()
 *	8.3 -> 8.4
 *	Hash, Gin, and GiST index binary format has changes from 8.3->8.4
 */
void
old_8_3_invalidate_hash_gin_indexes(ClusterInfo *cluster, bool check_mode)
{
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char		output_path[MAXPGPATH];

	prep_status("Checking for hash and GIN indexes");

	snprintf(output_path, sizeof(output_path), "reindex_hash_and_gin.sql");

	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		bool		db_used = false;
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_relname;
		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(cluster, active_db->db_name);

		/* find hash and gin indexes */
		res = executeQueryOrDie(conn,
								"SELECT n.nspname, c.relname "
								"FROM	pg_catalog.pg_class c, "
								"		pg_catalog.pg_index i, "
								"		pg_catalog.pg_am a, "
								"		pg_catalog.pg_namespace n "
								"WHERE	i.indexrelid = c.oid AND "
								"		c.relam = a.oid AND "
								"		c.relnamespace = n.oid AND "
							"		a.amname IN ('hash', 'gin') AND "
								SKIP_TSVECTOR_TABLES);

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_relname = PQfnumber(res, "relname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			found = true;
			if (!check_mode)
			{
				if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
					pg_log(PG_FATAL, "could not open file \"%s\": %s\n", output_path, getErrorText(errno));
				if (!db_used)
				{
					fprintf(script, "\\connect %s\n",
							quote_identifier(active_db->db_name));
					db_used = true;
				}
				fprintf(script, "REINDEX INDEX %s.%s;\n",
						quote_identifier(PQgetvalue(res, rowno, i_nspname)),
						quote_identifier(PQgetvalue(res, rowno, i_relname)));
			}
		}

		PQclear(res);

		if (!check_mode && found)
			/* mark hash and gin indexes as invalid */
			PQclear(executeQueryOrDie(conn,
									  "UPDATE pg_catalog.pg_index i "
									  "SET	indisvalid = false "
									  "FROM	pg_catalog.pg_class c, "
									  "		pg_catalog.pg_am a, "
									  "		pg_catalog.pg_namespace n "
									  "WHERE	i.indexrelid = c.oid AND "
									  "		c.relam = a.oid AND "
									  "		c.relnamespace = n.oid AND "
									"		a.amname IN ('hash', 'gin')"));

		PQfinish(conn);
	}

	if (script)
		fclose(script);

	if (found)
	{
		report_status(PG_WARNING, "warning");
		if (check_mode)
			pg_log(PG_WARNING, "\n"
				   "Your installation contains hash and/or GIN indexes.  These indexes have\n"
				   "different internal formats between your old and new clusters, so they\n"
				   "must be reindexed with the REINDEX command.  After upgrading, you will\n"
				   "be given REINDEX instructions.\n\n");
		else
			pg_log(PG_WARNING, "\n"
				   "Your installation contains hash and/or GIN indexes.  These indexes have\n"
				   "different internal formats between your old and new clusters, so they\n"
				   "must be reindexed with the REINDEX command.  The file:\n"
				   "    %s\n"
				   "when executed by psql by the database superuser will recreate all invalid\n"
				   "indexes; until then, none of these indexes will be used.\n\n",
				   output_path);
	}
	else
		check_ok();
}