static void create_new_objects(void) { int dbnum; prep_status("Restoring database schemas in the new cluster\n"); for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) { char sql_file_name[MAXPGPATH], log_file_name[MAXPGPATH]; DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum]; PQExpBufferData connstr, escaped_connstr; initPQExpBuffer(&connstr); appendPQExpBuffer(&connstr, "dbname="); appendConnStrVal(&connstr, old_db->db_name); initPQExpBuffer(&escaped_connstr); appendShellString(&escaped_connstr, connstr.data); termPQExpBuffer(&connstr); pg_log(PG_STATUS, "%s", old_db->db_name); snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid); snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid); /* * pg_dump only produces its output at the end, so there is little * parallelism if using the pipe. */ parallel_exec_prog(log_file_name, NULL, "\"%s/pg_restore\" %s --exit-on-error --verbose --dbname %s \"%s\"", new_cluster.bindir, cluster_conn_opts(&new_cluster), escaped_connstr.data, sql_file_name); termPQExpBuffer(&escaped_connstr); } /* reap all children */ while (reap_child(true) == true) ; end_progress_output(); check_ok(); /* * We don't have minmxids for databases or relations in pre-9.3 clusters, * so set those after we have restored the schema. */ if (GET_MAJOR_VERSION(old_cluster.major_version) < 903) set_frozenxids(true); /* regenerate now that we have objects in the databases */ get_db_and_rel_infos(&new_cluster); }
/* * Append a psql meta-command that connects to the given database with the * then-current connection's user, host and port. */ void appendPsqlMetaConnect(PQExpBuffer buf, const char *dbname) { const char *s; bool complex; /* * If the name is plain ASCII characters, emit a trivial "\connect "foo"". * For other names, even many not technically requiring it, skip to the * general case. No database has a zero-length name. */ complex = false; for (s = dbname; *s; s++) { if (*s == '\n' || *s == '\r') { fprintf(stderr, _("database name contains a newline or carriage return: \"%s\"\n"), dbname); exit(EXIT_FAILURE); } if (!((*s >= 'a' && *s <= 'z') || (*s >= 'A' && *s <= 'Z') || (*s >= '0' && *s <= '9') || *s == '_' || *s == '.')) { complex = true; } } appendPQExpBufferStr(buf, "\\connect "); if (complex) { PQExpBufferData connstr; initPQExpBuffer(&connstr); appendPQExpBuffer(&connstr, "dbname="); appendConnStrVal(&connstr, dbname); appendPQExpBuffer(buf, "-reuse-previous=on "); /* * As long as the name does not contain a newline, SQL identifier * quoting satisfies the psql meta-command parser. Prefer not to * involve psql-interpreted single quotes, which behaved differently * before PostgreSQL 9.2. */ appendPQExpBufferStr(buf, fmtId(connstr.data)); termPQExpBuffer(&connstr); } else appendPQExpBufferStr(buf, fmtId(dbname)); appendPQExpBufferChar(buf, '\n'); }
/* * get_db_conn() * * get database connection, using named database + standard params for cluster */ static PGconn * get_db_conn(ClusterInfo *cluster, const char *db_name) { PQExpBufferData conn_opts; PGconn *conn; /* Build connection string with proper quoting */ initPQExpBuffer(&conn_opts); appendPQExpBufferStr(&conn_opts, "dbname="); appendConnStrVal(&conn_opts, db_name); appendPQExpBufferStr(&conn_opts, " user="******" port=%d", cluster->port); if (cluster->sockdir) { appendPQExpBufferStr(&conn_opts, " host="); appendConnStrVal(&conn_opts, cluster->sockdir); } conn = PQconnectdb(conn_opts.data); termPQExpBuffer(&conn_opts); return conn; }
static void cluster_all_databases(bool verbose, const char *maintenance_db, const char *host, const char *port, const char *username, enum trivalue prompt_password, const char *progname, bool echo, bool quiet) { PGconn *conn; PGresult *result; PQExpBufferData connstr; int i; conn = connectMaintenanceDatabase(maintenance_db, host, port, username, prompt_password, progname, echo); result = executeQuery(conn, "SELECT datname FROM pg_database WHERE datallowconn ORDER BY 1;", progname, echo); PQfinish(conn); initPQExpBuffer(&connstr); for (i = 0; i < PQntuples(result); i++) { char *dbname = PQgetvalue(result, i, 0); if (!quiet) { printf(_("%s: clustering database \"%s\"\n"), progname, dbname); fflush(stdout); } resetPQExpBuffer(&connstr); appendPQExpBuffer(&connstr, "dbname="); appendConnStrVal(&connstr, dbname); cluster_one_database(connstr.data, verbose, NULL, host, port, username, prompt_password, progname, echo); } termPQExpBuffer(&connstr); PQclear(result); }
/* * Vacuum/analyze all connectable databases. * * In analyze-in-stages mode, we process all databases in one stage before * moving on to the next stage. That ensure minimal stats are available * quickly everywhere before generating more detailed ones. */ static void vacuum_all_databases(vacuumingOptions *vacopts, bool analyze_in_stages, const char *maintenance_db, const char *host, const char *port, const char *username, enum trivalue prompt_password, int concurrentCons, const char *progname, bool echo, bool quiet) { PGconn *conn; PGresult *result; PQExpBufferData connstr; int stage; int i; conn = connectMaintenanceDatabase(maintenance_db, host, port, username, prompt_password, progname, echo); result = executeQuery(conn, "SELECT datname FROM pg_database WHERE datallowconn ORDER BY 1;", progname, echo); PQfinish(conn); initPQExpBuffer(&connstr); if (analyze_in_stages) { /* * When analyzing all databases in stages, we analyze them all in the * fastest stage first, so that initial statistics become available * for all of them as soon as possible. * * This means we establish several times as many connections, but * that's a secondary consideration. */ for (stage = 0; stage < ANALYZE_NUM_STAGES; stage++) { for (i = 0; i < PQntuples(result); i++) { resetPQExpBuffer(&connstr); appendPQExpBuffer(&connstr, "dbname="); appendConnStrVal(&connstr, PQgetvalue(result, i, 0)); vacuum_one_database(connstr.data, vacopts, stage, NULL, host, port, username, prompt_password, concurrentCons, progname, echo, quiet); } } } else { for (i = 0; i < PQntuples(result); i++) { resetPQExpBuffer(&connstr); appendPQExpBuffer(&connstr, "dbname="); appendConnStrVal(&connstr, PQgetvalue(result, i, 0)); vacuum_one_database(connstr.data, vacopts, ANALYZE_NO_STAGE, NULL, host, port, username, prompt_password, concurrentCons, progname, echo, quiet); } } termPQExpBuffer(&connstr); PQclear(result); }
void generate_old_dump(void) { int dbnum; mode_t old_umask; prep_status("Creating dump of global objects"); /* run new pg_dumpall binary for globals */ exec_prog(UTILITY_LOG_FILE, NULL, true, "\"%s/pg_dumpall\" %s --globals-only --quote-all-identifiers " "--binary-upgrade %s -f %s", new_cluster.bindir, cluster_conn_opts(&old_cluster), log_opts.verbose ? "--verbose" : "", GLOBALS_DUMP_FILE); check_ok(); prep_status("Creating dump of database schemas\n"); /* * Set umask for this function, all functions it calls, and all * subprocesses/threads it creates. We can't use fopen_priv() as Windows * uses threads and umask is process-global. */ old_umask = umask(S_IRWXG | S_IRWXO); /* create per-db dump files */ for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) { char sql_file_name[MAXPGPATH], log_file_name[MAXPGPATH]; DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum]; PQExpBufferData connstr, escaped_connstr; initPQExpBuffer(&connstr); appendPQExpBuffer(&connstr, "dbname="); appendConnStrVal(&connstr, old_db->db_name); initPQExpBuffer(&escaped_connstr); appendShellString(&escaped_connstr, connstr.data); termPQExpBuffer(&connstr); pg_log(PG_STATUS, "%s", old_db->db_name); snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid); snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid); parallel_exec_prog(log_file_name, NULL, "\"%s/pg_dump\" %s --schema-only --quote-all-identifiers " "--binary-upgrade --format=custom %s --file=\"%s\" %s", new_cluster.bindir, cluster_conn_opts(&old_cluster), log_opts.verbose ? "--verbose" : "", sql_file_name, escaped_connstr.data); termPQExpBuffer(&escaped_connstr); } /* reap all children */ while (reap_child(true) == true) ; umask(old_umask); end_progress_output(); check_ok(); }