static void prepare_new_databases(void) { /* * We set autovacuum_freeze_max_age to its maximum value so autovacuum * does not launch here and delete clog files, before the frozen xids are * set. */ set_frozenxids(); prep_status("Creating databases in the new cluster"); /* * Install support functions in the global-restore database to preserve * pg_authid.oid. */ install_support_functions_in_new_db(GLOBAL_DUMP_DB); /* * We have to create the databases first so we can install support * functions in all the other databases. Ideally we could create the * support functions in template1 but pg_dumpall creates database using * the template0 template. */ exec_prog(true, SYSTEMQUOTE "\"%s/psql\" --set ON_ERROR_STOP=on " /* --no-psqlrc prevents AUTOCOMMIT=off */ "--no-psqlrc --port %d --username \"%s\" " "-f \"%s/%s\" --dbname template1 >> \"%s\"" SYSTEMQUOTE, new_cluster.bindir, new_cluster.port, os_info.user, os_info.cwd, GLOBALS_DUMP_FILE, log_opts.filename2); check_ok(); /* we load this to get a current list of databases */ get_db_and_rel_infos(&new_cluster); }
void check_new_cluster(void) { set_locale_and_encoding(&new_cluster); check_locale_and_encoding(&old_cluster.controldata, &new_cluster.controldata); get_db_and_rel_infos(&new_cluster); check_new_cluster_is_empty(); check_loadable_libraries(); if (user_opts.transfer_mode == TRANSFER_MODE_LINK) check_hard_link(); check_is_super_user(&new_cluster); /* * We don't restore our own user, so both clusters must match have * matching install-user oids. */ if (old_cluster.install_role_oid != new_cluster.install_role_oid) pg_log(PG_FATAL, "Old and new cluster install users have different values for pg_authid.oid.\n"); /* * We only allow the install user in the new cluster because other * defined users might match users defined in the old cluster and * generate an error during pg_dump restore. */ if (new_cluster.role_count != 1) pg_log(PG_FATAL, "Only the install user can be defined in the new cluster.\n"); check_for_prepared_transactions(&new_cluster); }
static void prepare_new_databases(void) { /* * We set autovacuum_freeze_max_age to its maximum value so autovacuum * does not launch here and delete clog files, before the frozen xids are * set. */ set_frozenxids(); prep_status("Restoring global objects in the new cluster"); /* * Install support functions in the global-object restore database to * preserve pg_authid.oid. pg_dumpall uses 'template0' as its template * database so objects we add into 'template1' are not propogated. They * are removed on pg_upgrade exit. */ install_support_functions_in_new_db("template1"); /* * We have to create the databases first so we can install support * functions in all the other databases. Ideally we could create the * support functions in template1 but pg_dumpall creates database using * the template0 template. */ exec_prog(RESTORE_LOG_FILE, NULL, true, "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"", new_cluster.bindir, cluster_conn_opts(&new_cluster), GLOBALS_DUMP_FILE); check_ok(); /* we load this to get a current list of databases */ get_db_and_rel_infos(&new_cluster); }
static void create_new_objects(void) { int dbnum; prep_status("Restoring database schemas in the new cluster\n"); /* * We cannot process the template1 database concurrently with others, * because when it's transiently dropped, connection attempts would fail. * So handle it in a separate non-parallelized pass. */ for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) { char sql_file_name[MAXPGPATH], log_file_name[MAXPGPATH]; DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum]; const char *create_opts; /* Process only template1 in this pass */ if (strcmp(old_db->db_name, "template1") != 0) continue; pg_log(PG_STATUS, "%s", old_db->db_name); snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid); snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid); /* * template1 and postgres databases will already exist in the target * installation, so tell pg_restore to drop and recreate them; * otherwise we would fail to propagate their database-level * properties. */ create_opts = "--clean --create"; exec_prog(log_file_name, NULL, true, true, "\"%s/pg_restore\" %s %s --exit-on-error --verbose " "--dbname postgres \"%s\"", new_cluster.bindir, cluster_conn_opts(&new_cluster), create_opts, sql_file_name); break; /* done once we've processed template1 */ } for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) { char sql_file_name[MAXPGPATH], log_file_name[MAXPGPATH]; DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum]; const char *create_opts; /* Skip template1 in this pass */ if (strcmp(old_db->db_name, "template1") == 0) continue; pg_log(PG_STATUS, "%s", old_db->db_name); snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid); snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid); /* * template1 and postgres databases will already exist in the target * installation, so tell pg_restore to drop and recreate them; * otherwise we would fail to propagate their database-level * properties. */ if (strcmp(old_db->db_name, "postgres") == 0) create_opts = "--clean --create"; else create_opts = "--create"; parallel_exec_prog(log_file_name, NULL, "\"%s/pg_restore\" %s %s --exit-on-error --verbose " "--dbname template1 \"%s\"", new_cluster.bindir, cluster_conn_opts(&new_cluster), create_opts, sql_file_name); } /* reap all children */ while (reap_child(true) == true) ; end_progress_output(); check_ok(); /* * We don't have minmxids for databases or relations in pre-9.3 clusters, * so set those after we have restored the schema. */ if (GET_MAJOR_VERSION(old_cluster.major_version) < 903) set_frozenxids(true); /* update new_cluster info now that we have objects in the databases */ get_db_and_rel_infos(&new_cluster); }
void check_old_cluster(bool live_check, char **sequence_script_file_name) { /* -- OLD -- */ if (!live_check) start_postmaster(&old_cluster); set_locale_and_encoding(&old_cluster); get_pg_database_relfilenode(&old_cluster); /* Extract a list of databases and tables from the old cluster */ get_db_and_rel_infos(&old_cluster); init_tablespaces(); get_loadable_libraries(); /* * Check for various failure cases */ check_is_super_user(&old_cluster); check_for_prepared_transactions(&old_cluster); check_for_reg_data_type_usage(&old_cluster); check_for_isn_and_int8_passing_mismatch(&old_cluster); /* old = PG 8.3 checks? */ if (GET_MAJOR_VERSION(old_cluster.major_version) <= 803) { old_8_3_check_for_name_data_type_usage(&old_cluster); old_8_3_check_for_tsquery_usage(&old_cluster); old_8_3_check_ltree_usage(&old_cluster); if (user_opts.check) { old_8_3_rebuild_tsvector_tables(&old_cluster, true); old_8_3_invalidate_hash_gin_indexes(&old_cluster, true); old_8_3_invalidate_bpchar_pattern_ops_indexes(&old_cluster, true); } else /* * While we have the old server running, create the script to * properly restore its sequence values but we report this at the * end. */ *sequence_script_file_name = old_8_3_create_sequence_script(&old_cluster); } /* Pre-PG 9.0 had no large object permissions */ if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) new_9_0_populate_pg_largeobject_metadata(&old_cluster, true); /* * While not a check option, we do this now because this is the only time * the old server is running. */ if (!user_opts.check) { generate_old_dump(); split_old_dump(); } if (!live_check) stop_postmaster(false); }
static void create_new_objects(void) { int dbnum; prep_status("Adding support functions to new cluster"); /* * Technically, we only need to install these support functions in new * databases that also exist in the old cluster, but for completeness we * process all new databases. */ for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++) { DbInfo *new_db = &new_cluster.dbarr.dbs[dbnum]; /* skip db we already installed */ if (strcmp(new_db->db_name, "template1") != 0) install_support_functions_in_new_db(new_db->db_name); } check_ok(); prep_status("Restoring database schemas in the new cluster\n"); for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) { char sql_file_name[MAXPGPATH], log_file_name[MAXPGPATH]; DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum]; pg_log(PG_STATUS, "%s", old_db->db_name); snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid); snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid); /* * pg_dump only produces its output at the end, so there is little * parallelism if using the pipe. */ parallel_exec_prog(log_file_name, NULL, "\"%s/pg_restore\" %s --exit-on-error --verbose --dbname \"%s\" \"%s\"", new_cluster.bindir, cluster_conn_opts(&new_cluster), old_db->db_name, sql_file_name); } /* reap all children */ while (reap_child(true) == true) ; end_progress_output(); check_ok(); /* * We don't have minmxids for databases or relations in pre-9.3 * clusters, so set those after we have restores the schemas. */ if (GET_MAJOR_VERSION(old_cluster.major_version) < 903) set_frozenxids(true); optionally_create_toast_tables(); /* regenerate now that we have objects in the databases */ get_db_and_rel_infos(&new_cluster); uninstall_support_functions_from_new_cluster(); }
void check_and_dump_old_cluster(bool live_check) { /* -- OLD -- */ if (!live_check) start_postmaster(&old_cluster, true); /* Extract a list of databases and tables from the old cluster */ get_db_and_rel_infos(&old_cluster); init_tablespaces(); get_loadable_libraries(); /* * Check for various failure cases */ check_is_install_user(&old_cluster); check_proper_datallowconn(&old_cluster); check_for_prepared_transactions(&old_cluster); check_for_reg_data_type_usage(&old_cluster); check_for_isn_and_int8_passing_mismatch(&old_cluster); /* * Pre-PG 10 allowed tables with 'unknown' type columns and non WAL logged * hash indexes */ if (GET_MAJOR_VERSION(old_cluster.major_version) <= 906) { old_9_6_check_for_unknown_data_type_usage(&old_cluster); if (user_opts.check) old_9_6_invalidate_hash_indexes(&old_cluster, true); } /* 9.5 and below should not have roles starting with pg_ */ if (GET_MAJOR_VERSION(old_cluster.major_version) <= 905) check_for_pg_role_prefix(&old_cluster); if (GET_MAJOR_VERSION(old_cluster.major_version) == 904 && old_cluster.controldata.cat_ver < JSONB_FORMAT_CHANGE_CAT_VER) check_for_jsonb_9_4_usage(&old_cluster); /* Pre-PG 9.4 had a different 'line' data type internal format */ if (GET_MAJOR_VERSION(old_cluster.major_version) <= 903) old_9_3_check_for_line_data_type_usage(&old_cluster); /* Pre-PG 9.0 had no large object permissions */ if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) new_9_0_populate_pg_largeobject_metadata(&old_cluster, true); /* * While not a check option, we do this now because this is the only time * the old server is running. */ if (!user_opts.check) generate_old_dump(); if (!live_check) stop_postmaster(false); }