Пример #1
0
static void
prepare_new_cluster(void)
{
	/*
	 * It would make more sense to freeze after loading the schema, but that
	 * would cause us to lose the frozenids restored by the load. We use
	 * --analyze so autovacuum doesn't update statistics later
	 */
	prep_status("Analyzing all rows in the new cluster");
	exec_prog(UTILITY_LOG_FILE, NULL, true, true,
			  "\"%s/vacuumdb\" %s --all --analyze %s",
			  new_cluster.bindir, cluster_conn_opts(&new_cluster),
			  log_opts.verbose ? "--verbose" : "");
	check_ok();

	/*
	 * We do freeze after analyze so pg_statistic is also frozen. template0 is
	 * not frozen here, but data rows were frozen by initdb, and we set its
	 * datfrozenxid, relfrozenxids, and relminmxid later to match the new xid
	 * counter later.
	 */
	prep_status("Freezing all rows in the new cluster");
	exec_prog(UTILITY_LOG_FILE, NULL, true, true,
			  "\"%s/vacuumdb\" %s --all --freeze %s",
			  new_cluster.bindir, cluster_conn_opts(&new_cluster),
			  log_opts.verbose ? "--verbose" : "");
	check_ok();
}
Пример #2
0
void
generate_old_dump(void)
{
	int			dbnum;
	mode_t		old_umask;

	prep_status("Creating dump of global objects");

	/* run new pg_dumpall binary for globals */
	exec_prog(UTILITY_LOG_FILE, NULL, true,
			  "\"%s/pg_dumpall\" %s --globals-only --quote-all-identifiers "
			  "--binary-upgrade %s -f %s",
			  new_cluster.bindir, cluster_conn_opts(&old_cluster),
			  log_opts.verbose ? "--verbose" : "",
			  GLOBALS_DUMP_FILE);
	check_ok();

	prep_status("Creating dump of database schemas\n");

	/*
	 * Set umask for this function, all functions it calls, and all
	 * subprocesses/threads it creates.  We can't use fopen_priv() as Windows
	 * uses threads and umask is process-global.
	 */
	old_umask = umask(S_IRWXG | S_IRWXO);

	/* create per-db dump files */
	for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
	{
		char		sql_file_name[MAXPGPATH],
					log_file_name[MAXPGPATH];
		DbInfo	   *old_db = &old_cluster.dbarr.dbs[dbnum];

		pg_log(PG_STATUS, "%s", old_db->db_name);
		snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
		snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);

		parallel_exec_prog(log_file_name, NULL,
				   "\"%s/pg_dump\" %s --schema-only --quote-all-identifiers "
				  "--binary-upgrade --format=custom %s --file=\"%s\" \"%s\"",
						 new_cluster.bindir, cluster_conn_opts(&old_cluster),
						   log_opts.verbose ? "--verbose" : "",
						   sql_file_name, old_db->db_name);
	}

	/* reap all children */
	while (reap_child(true) == true)
		;

	umask(old_umask);

	end_progress_output();
	check_ok();
}
Пример #3
0
static void
prepare_new_databases(void)
{
	/*
	 * We set autovacuum_freeze_max_age to its maximum value so autovacuum
	 * does not launch here and delete clog files, before the frozen xids are
	 * set.
	 */

	set_frozenxids(false);

	prep_status("Restoring global objects in the new cluster");

	/*
	 * We have to create the databases first so we can install support
	 * functions in all the other databases.  Ideally we could create the
	 * support functions in template1 but pg_dumpall creates database using
	 * the template0 template.
	 */
	exec_prog(UTILITY_LOG_FILE, NULL, true,
			  "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"",
			  new_cluster.bindir, cluster_conn_opts(&new_cluster),
			  GLOBALS_DUMP_FILE);
	check_ok();

	/* we load this to get a current list of databases */
	get_db_and_rel_infos(&new_cluster);
}
Пример #4
0
void
issue_warnings(char *sequence_script_file_name)
{
	/* old = PG 8.3 warnings? */
	if (GET_MAJOR_VERSION(old_cluster.major_version) <= 803)
	{
		start_postmaster(&new_cluster);

		/* restore proper sequence values using file created from old server */
		if (sequence_script_file_name)
		{
			prep_status("Adjusting sequences");
			exec_prog(UTILITY_LOG_FILE, NULL, true,
					  "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"",
					  new_cluster.bindir, cluster_conn_opts(&new_cluster),
					  sequence_script_file_name);
			unlink(sequence_script_file_name);
			check_ok();
		}

		old_8_3_rebuild_tsvector_tables(&new_cluster, false);
		old_8_3_invalidate_hash_gin_indexes(&new_cluster, false);
		old_8_3_invalidate_bpchar_pattern_ops_indexes(&new_cluster, false);
		stop_postmaster(false);
	}

	/* Create dummy large object permissions for old < PG 9.0? */
	if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804)
	{
		start_postmaster(&new_cluster);
		new_9_0_populate_pg_largeobject_metadata(&new_cluster, false);
		stop_postmaster(false);
	}
}
Пример #5
0
static void
create_new_objects(void)
{
	int			dbnum;

	prep_status("Adding support functions to new cluster");

	/*
	 * Technically, we only need to install these support functions in new
	 * databases that also exist in the old cluster, but for completeness we
	 * process all new databases.
	 */
	for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
	{
		DbInfo	   *new_db = &new_cluster.dbarr.dbs[dbnum];

		/* skip db we already installed */
		if (strcmp(new_db->db_name, "template1") != 0)
			install_support_functions_in_new_db(new_db->db_name);
	}
	check_ok();

	prep_status("Restoring database schemas in the new cluster\n");

	for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
	{
		char		sql_file_name[MAXPGPATH],
					log_file_name[MAXPGPATH];
		DbInfo	   *old_db = &old_cluster.dbarr.dbs[dbnum];

		pg_log(PG_STATUS, "%s", old_db->db_name);
		snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
		snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);

		/*
		 * pg_dump only produces its output at the end, so there is little
		 * parallelism if using the pipe.
		 */
		parallel_exec_prog(log_file_name,
						   NULL,
						   "\"%s/pg_restore\" %s --exit-on-error --verbose --dbname \"%s\" \"%s\"",
						   new_cluster.bindir,
						   cluster_conn_opts(&new_cluster),
						   old_db->db_name,
						   sql_file_name);
	}

	/* reap all children */
	while (reap_child(true) == true)
		;

	end_progress_output();
	check_ok();

	/* regenerate now that we have objects in the databases */
	get_db_and_rel_infos(&new_cluster);

	uninstall_support_functions_from_new_cluster();
}
Пример #6
0
static void
create_new_objects(void)
{
	int			dbnum;

	prep_status("Restoring database schemas in the new cluster\n");

	for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
	{
		char		sql_file_name[MAXPGPATH],
					log_file_name[MAXPGPATH];
		DbInfo	   *old_db = &old_cluster.dbarr.dbs[dbnum];
		PQExpBufferData connstr,
					escaped_connstr;

		initPQExpBuffer(&connstr);
		appendPQExpBuffer(&connstr, "dbname=");
		appendConnStrVal(&connstr, old_db->db_name);
		initPQExpBuffer(&escaped_connstr);
		appendShellString(&escaped_connstr, connstr.data);
		termPQExpBuffer(&connstr);

		pg_log(PG_STATUS, "%s", old_db->db_name);
		snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
		snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);

		/*
		 * pg_dump only produces its output at the end, so there is little
		 * parallelism if using the pipe.
		 */
		parallel_exec_prog(log_file_name,
						   NULL,
						   "\"%s/pg_restore\" %s --exit-on-error --verbose --dbname %s \"%s\"",
						   new_cluster.bindir,
						   cluster_conn_opts(&new_cluster),
						   escaped_connstr.data,
						   sql_file_name);

		termPQExpBuffer(&escaped_connstr);
	}

	/* reap all children */
	while (reap_child(true) == true)
		;

	end_progress_output();
	check_ok();

	/*
	 * We don't have minmxids for databases or relations in pre-9.3 clusters,
	 * so set those after we have restored the schema.
	 */
	if (GET_MAJOR_VERSION(old_cluster.major_version) < 903)
		set_frozenxids(true);

	/* regenerate now that we have objects in the databases */
	get_db_and_rel_infos(&new_cluster);
}
Пример #7
0
void
generate_old_dump(void)
{
    int			dbnum;

    prep_status("Creating dump of global objects");

    /* run new pg_dumpall binary for globals */
    exec_prog(UTILITY_LOG_FILE, NULL, true,
              "\"%s/pg_dumpall\" %s --schema-only --globals-only --binary-upgrade %s -f %s",
              new_cluster.bindir, cluster_conn_opts(&old_cluster),
              log_opts.verbose ? "--verbose" : "",
              GLOBALS_DUMP_FILE);
    check_ok();

    prep_status("Creating dump of database schemas\n");

    /* create per-db dump files */
    for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
    {
        char 		sql_file_name[MAXPGPATH], log_file_name[MAXPGPATH];
        DbInfo     *old_db = &old_cluster.dbarr.dbs[dbnum];

        pg_log(PG_STATUS, "%s", old_db->db_name);
        snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
        snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);

        parallel_exec_prog(log_file_name, NULL,
                           "\"%s/pg_dump\" %s --schema-only --binary-upgrade --format=custom %s --file=\"%s\" \"%s\"",
                           new_cluster.bindir, cluster_conn_opts(&old_cluster),
                           log_opts.verbose ? "--verbose" : "", sql_file_name, old_db->db_name);
    }

    /* reap all children */
    while (reap_child(true) == true)
        ;

    end_progress_output();
    check_ok();
}
Пример #8
0
static void
prepare_new_globals(void)
{
	/*
	 * Before we restore anything, set frozenxids of initdb-created tables.
	 */
	set_frozenxids(false);

	/*
	 * Now restore global objects (roles and tablespaces).
	 */
	prep_status("Restoring global objects in the new cluster");

	exec_prog(UTILITY_LOG_FILE, NULL, true, true,
			  "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"",
			  new_cluster.bindir, cluster_conn_opts(&new_cluster),
			  GLOBALS_DUMP_FILE);
	check_ok();
}
Пример #9
0
static void
prepare_new_databases(void)
{
	/*
	 * We set autovacuum_freeze_max_age to its maximum value so autovacuum
	 * does not launch here and delete clog files, before the frozen xids are
	 * set.
	 */

	set_frozenxids();

	prep_status("Restoring global objects in the new cluster");

	/*
	 * Install support functions in the global-object restore database to
	 * preserve pg_authid.oid.	pg_dumpall uses 'template0' as its template
	 * database so objects we add into 'template1' are not propogated.	They
	 * are removed on pg_upgrade exit.
	 */
	install_support_functions_in_new_db("template1");

	/*
	 * We have to create the databases first so we can install support
	 * functions in all the other databases.  Ideally we could create the
	 * support functions in template1 but pg_dumpall creates database using
	 * the template0 template.
	 */
	exec_prog(RESTORE_LOG_FILE, NULL, true,
			  "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"",
			  new_cluster.bindir, cluster_conn_opts(&new_cluster),
			  GLOBALS_DUMP_FILE);
	check_ok();

	/* we load this to get a current list of databases */
	get_db_and_rel_infos(&new_cluster);
}
Пример #10
0
static void
create_new_objects(void)
{
	int			dbnum;

	prep_status("Restoring database schemas in the new cluster\n");

	/*
	 * We cannot process the template1 database concurrently with others,
	 * because when it's transiently dropped, connection attempts would fail.
	 * So handle it in a separate non-parallelized pass.
	 */
	for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
	{
		char		sql_file_name[MAXPGPATH],
					log_file_name[MAXPGPATH];
		DbInfo	   *old_db = &old_cluster.dbarr.dbs[dbnum];
		const char *create_opts;

		/* Process only template1 in this pass */
		if (strcmp(old_db->db_name, "template1") != 0)
			continue;

		pg_log(PG_STATUS, "%s", old_db->db_name);
		snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
		snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);

		/*
		 * template1 and postgres databases will already exist in the target
		 * installation, so tell pg_restore to drop and recreate them;
		 * otherwise we would fail to propagate their database-level
		 * properties.
		 */
		create_opts = "--clean --create";

		exec_prog(log_file_name,
				  NULL,
				  true,
				  true,
				  "\"%s/pg_restore\" %s %s --exit-on-error --verbose "
				  "--dbname postgres \"%s\"",
				  new_cluster.bindir,
				  cluster_conn_opts(&new_cluster),
				  create_opts,
				  sql_file_name);

		break;					/* done once we've processed template1 */
	}

	for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
	{
		char		sql_file_name[MAXPGPATH],
					log_file_name[MAXPGPATH];
		DbInfo	   *old_db = &old_cluster.dbarr.dbs[dbnum];
		const char *create_opts;

		/* Skip template1 in this pass */
		if (strcmp(old_db->db_name, "template1") == 0)
			continue;

		pg_log(PG_STATUS, "%s", old_db->db_name);
		snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
		snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);

		/*
		 * template1 and postgres databases will already exist in the target
		 * installation, so tell pg_restore to drop and recreate them;
		 * otherwise we would fail to propagate their database-level
		 * properties.
		 */
		if (strcmp(old_db->db_name, "postgres") == 0)
			create_opts = "--clean --create";
		else
			create_opts = "--create";

		parallel_exec_prog(log_file_name,
						   NULL,
						   "\"%s/pg_restore\" %s %s --exit-on-error --verbose "
						   "--dbname template1 \"%s\"",
						   new_cluster.bindir,
						   cluster_conn_opts(&new_cluster),
						   create_opts,
						   sql_file_name);
	}

	/* reap all children */
	while (reap_child(true) == true)
		;

	end_progress_output();
	check_ok();

	/*
	 * We don't have minmxids for databases or relations in pre-9.3 clusters,
	 * so set those after we have restored the schema.
	 */
	if (GET_MAJOR_VERSION(old_cluster.major_version) < 903)
		set_frozenxids(true);

	/* update new_cluster info now that we have objects in the databases */
	get_db_and_rel_infos(&new_cluster);
}