/* * Actual code to make call to the database and print the output data. */ int sql_exec(PGconn *conn, const char *todo, bool quiet) { PGresult *res; int nfields; int nrows; int i, j, l; int *length; char *pad; /* make the call */ res = PQexec(conn, todo); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { fprintf(stderr, "oid2name: query failed: %s\n", PQerrorMessage(conn)); fprintf(stderr, "oid2name: query was: %s\n", todo); PQclear(res); PQfinish(conn); exit(-1); } /* get the number of fields */ nrows = PQntuples(res); nfields = PQnfields(res); /* for each field, get the needed width */ length = (int *) pg_malloc(sizeof(int) * nfields); for (j = 0; j < nfields; j++) length[j] = strlen(PQfname(res, j)); for (i = 0; i < nrows; i++) { for (j = 0; j < nfields; j++) { l = strlen(PQgetvalue(res, i, j)); if (l > length[j]) length[j] = strlen(PQgetvalue(res, i, j)); } } /* print a header */ if (!quiet) { for (j = 0, l = 0; j < nfields; j++) { fprintf(stdout, "%*s", length[j] + 2, PQfname(res, j)); l += length[j] + 2; } fprintf(stdout, "\n"); pad = (char *) pg_malloc(l + 1); MemSet(pad, '-', l); pad[l] = '\0'; fprintf(stdout, "%s\n", pad); free(pad); } /* for each row, dump the information */ for (i = 0; i < nrows; i++) { for (j = 0; j < nfields; j++) fprintf(stdout, "%*s", length[j] + 2, PQgetvalue(res, i, j)); fprintf(stdout, "\n"); } /* cleanup */ PQclear(res); free(length); return 0; }
static int copy_file(const char *srcfile, const char *dstfile, bool force) { #define COPY_BUF_SIZE (50 * BLCKSZ) int src_fd; int dest_fd; char *buffer; int ret = 0; int save_errno = 0; if ((srcfile == NULL) || (dstfile == NULL)) { errno = EINVAL; return -1; } if ((src_fd = open(srcfile, O_RDONLY, 0)) < 0) return -1; if ((dest_fd = open(dstfile, O_RDWR | O_CREAT | (force ? 0 : O_EXCL), S_IRUSR | S_IWUSR)) < 0) { save_errno = errno; if (src_fd != 0) close(src_fd); errno = save_errno; return -1; } buffer = (char *) pg_malloc(COPY_BUF_SIZE); /* perform data copying i.e read src source, write to destination */ while (true) { ssize_t nbytes = read(src_fd, buffer, COPY_BUF_SIZE); if (nbytes < 0) { save_errno = errno; ret = -1; break; } if (nbytes == 0) break; errno = 0; if (write(dest_fd, buffer, nbytes) != nbytes) { /* if write didn't set errno, assume problem is no disk space */ if (errno == 0) errno = ENOSPC; save_errno = errno; ret = -1; break; } } pg_free(buffer); if (src_fd != 0) close(src_fd); if (dest_fd != 0) close(dest_fd); if (save_errno != 0) errno = save_errno; return ret; }
/* * get_loadable_libraries() * * Fetch the names of all old libraries containing C-language functions. * We will later check that they all exist in the new installation. */ void get_loadable_libraries(void) { PGresult **ress; int totaltups; int dbnum; bool found_public_plpython_handler = false; char *pg83_str; ress = (PGresult **) pg_malloc(old_cluster.dbarr.ndbs * sizeof(PGresult *)); totaltups = 0; /* * gpoptutils was removed during the 5.0 development cycle and the * functionality is now in backend, skip when checking for loadable * libraries in 4.3-> upgrades. */ if (GET_MAJOR_VERSION(old_cluster.major_version) == 802) pg83_str = "probin NOT IN ('$libdir/gpoptutils') AND "; else pg83_str = ""; /* Fetch all library names, removing duplicates within each DB */ for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) { DbInfo *active_db = &old_cluster.dbarr.dbs[dbnum]; PGconn *conn = connectToServer(&old_cluster, active_db->db_name); /* * Fetch all libraries referenced in this DB. We can't exclude the * "pg_catalog" schema because, while such functions are not * explicitly dumped by pg_dump, they do reference implicit objects * that pg_dump does dump, e.g. CREATE LANGUAGE plperl. */ ress[dbnum] = executeQueryOrDie(conn, "SELECT DISTINCT probin " "FROM pg_catalog.pg_proc " "WHERE prolang = 13 /* C */ AND " "probin IS NOT NULL AND " " %s " "oid >= %u;", pg83_str, FirstNormalObjectId); totaltups += PQntuples(ress[dbnum]); /* * Systems that install plpython before 8.1 have * plpython_call_handler() defined in the "public" schema, causing * pg_dumpall to dump it. However that function still references * "plpython" (no "2"), so it throws an error on restore. This code * checks for the problem function, reports affected databases to the * user and explains how to remove them. 8.1 git commit: * e0dedd0559f005d60c69c9772163e69c204bac69 * http://archives.postgresql.org/pgsql-hackers/2012-03/msg01101.php * http://archives.postgresql.org/pgsql-bugs/2012-05/msg00206.php */ if (GET_MAJOR_VERSION(old_cluster.major_version) < 901) { PGresult *res; res = executeQueryOrDie(conn, "SELECT 1 " "FROM pg_catalog.pg_proc JOIN pg_namespace " " ON pronamespace = pg_namespace.oid " "WHERE proname = 'plpython_call_handler' AND " "nspname = 'public' AND " "prolang = 13 /* C */ AND " "probin = '$libdir/plpython' AND " "pg_proc.oid >= %u;", FirstNormalObjectId); if (PQntuples(res) > 0) { if (!found_public_plpython_handler) { pg_log(PG_WARNING, "\nThe old cluster has a \"plpython_call_handler\" function defined\n" "in the \"public\" schema which is a duplicate of the one defined\n" "in the \"pg_catalog\" schema. You can confirm this by executing\n" "in psql:\n" "\n" " \\df *.plpython_call_handler\n" "\n" "The \"public\" schema version of this function was created by a\n" "pre-8.1 install of plpython, and must be removed for pg_upgrade\n" "to complete because it references a now-obsolete \"plpython\"\n" "shared object file. You can remove the \"public\" schema version\n" "of this function by running the following command:\n" "\n" " DROP FUNCTION public.plpython_call_handler()\n" "\n" "in each affected database:\n" "\n"); } pg_log(PG_WARNING, " %s\n", active_db->db_name); found_public_plpython_handler = true; } PQclear(res); } PQfinish(conn); } if (found_public_plpython_handler) pg_log(PG_FATAL, "Remove the problem functions from the old cluster to continue.\n"); totaltups++; /* reserve for pg_upgrade_support */ /* Allocate what's certainly enough space */ os_info.libraries = (char **) pg_malloc(totaltups * sizeof(char *)); /* * Now remove duplicates across DBs. This is pretty inefficient code, but * there probably aren't enough entries to matter. */ totaltups = 0; os_info.libraries[totaltups++] = pg_strdup(PG_UPGRADE_SUPPORT); for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) { PGresult *res = ress[dbnum]; int ntups; int rowno; ntups = PQntuples(res); for (rowno = 0; rowno < ntups; rowno++) { char *lib = PQgetvalue(res, rowno, 0); bool dup = false; int n; for (n = 0; n < totaltups; n++) { if (strcmp(lib, os_info.libraries[n]) == 0) { dup = true; break; } } if (!dup) os_info.libraries[totaltups++] = pg_strdup(lib); } PQclear(res); } os_info.num_libraries = totaltups; pg_free(ress); }
/* * parallel_transfer_all_new_dbs * * This has the same API as transfer_all_new_dbs, except it does parallel execution * by transferring multiple tablespaces in parallel */ void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata, char *old_tablespace) { #ifndef WIN32 pid_t child; #else HANDLE child; transfer_thread_arg *new_arg; #endif if (user_opts.jobs <= 1) /* throw_error must be true to allow jobs */ transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, NULL); else { /* parallel */ #ifdef WIN32 if (thread_handles == NULL) thread_handles = pg_malloc(user_opts.jobs * sizeof(HANDLE)); if (transfer_thread_args == NULL) { int i; transfer_thread_args = pg_malloc(user_opts.jobs * sizeof(transfer_thread_arg *)); /* * For safety and performance, we keep the args allocated during * the entire life of the process, and we don't free the args in a * thread different from the one that allocated it. */ for (i = 0; i < user_opts.jobs; i++) transfer_thread_args[i] = pg_malloc0(sizeof(transfer_thread_arg)); } cur_thread_args = (void **) transfer_thread_args; #endif /* harvest any dead children */ while (reap_child(false) == true) ; /* must we wait for a dead child? */ if (parallel_jobs >= user_opts.jobs) reap_child(true); /* set this before we start the job */ parallel_jobs++; /* Ensure stdio state is quiesced before forking */ fflush(NULL); #ifndef WIN32 child = fork(); if (child == 0) { transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, old_tablespace); /* if we take another exit path, it will be non-zero */ /* use _exit to skip atexit() functions */ _exit(0); } else if (child < 0) /* fork failed */ pg_fatal("could not create worker process: %s\n", strerror(errno)); #else /* empty array element are always at the end */ new_arg = transfer_thread_args[parallel_jobs - 1]; /* Can only pass one pointer into the function, so use a struct */ new_arg->old_db_arr = old_db_arr; new_arg->new_db_arr = new_db_arr; if (new_arg->old_pgdata) pg_free(new_arg->old_pgdata); new_arg->old_pgdata = pg_strdup(old_pgdata); if (new_arg->new_pgdata) pg_free(new_arg->new_pgdata); new_arg->new_pgdata = pg_strdup(new_pgdata); if (new_arg->old_tablespace) pg_free(new_arg->old_tablespace); new_arg->old_tablespace = old_tablespace ? pg_strdup(old_tablespace) : NULL; child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_transfer_all_new_dbs, new_arg, 0, NULL); if (child == 0) pg_fatal("could not create worker thread: %s\n", strerror(errno)); thread_handles[parallel_jobs - 1] = child; #endif } return; }
/* * Try to read the existing pg_control file. * * This routine is also responsible for updating old pg_control versions * to the current format. (Currently we don't do anything of the sort.) */ static bool ReadControlFile(void) { int fd; int len; char *buffer; pg_crc32 crc; if ((fd = open(XLOG_CONTROL_FILE, O_RDONLY | PG_BINARY, 0)) < 0) { /* * If pg_control is not there at all, or we can't read it, the odds * are we've been handed a bad DataDir path, so give up. User can do * "touch pg_control" to force us to proceed. */ fprintf(stderr, _("%s: could not open file \"%s\" for reading: %s\n"), progname, XLOG_CONTROL_FILE, strerror(errno)); if (errno == ENOENT) fprintf(stderr, _("If you are sure the data directory path is correct, execute\n" " touch %s\n" "and try again.\n"), XLOG_CONTROL_FILE); exit(1); } /* Use malloc to ensure we have a maxaligned buffer */ buffer = (char *) pg_malloc(PG_CONTROL_SIZE); len = read(fd, buffer, PG_CONTROL_SIZE); if (len < 0) { fprintf(stderr, _("%s: could not read file \"%s\": %s\n"), progname, XLOG_CONTROL_FILE, strerror(errno)); exit(1); } close(fd); if (len >= sizeof(ControlFileData) && ((ControlFileData *) buffer)->pg_control_version == PG_CONTROL_VERSION) { /* Check the CRC. */ INIT_CRC32(crc); COMP_CRC32(crc, buffer, offsetof(ControlFileData, crc)); FIN_CRC32(crc); if (EQ_CRC32(crc, ((ControlFileData *) buffer)->crc)) { /* Valid data... */ memcpy(&ControlFile, buffer, sizeof(ControlFile)); return true; } fprintf(stderr, _("%s: pg_control exists but has invalid CRC; proceed with caution\n"), progname); /* We will use the data anyway, but treat it as guessed. */ memcpy(&ControlFile, buffer, sizeof(ControlFile)); guessed = true; return true; } /* Looks like it's a mess. */ fprintf(stderr, _("%s: pg_control exists but is broken or unknown version; ignoring it\n"), progname); return false; }
/* * Replacement for strtok() (a.k.a. poor man's flex) * * Splits a string into tokens, returning one token per call, then NULL * when no more tokens exist in the given string. * * The calling convention is similar to that of strtok, but with more * frammishes. * * s - string to parse, if NULL continue parsing the last string * whitespace - set of whitespace characters that separate tokens * delim - set of non-whitespace separator characters (or NULL) * quote - set of characters that can quote a token (NULL if none) * escape - character that can quote quotes (0 if none) * e_strings - if TRUE, treat E'...' syntax as a valid token * del_quotes - if TRUE, strip quotes from the returned token, else return * it exactly as found in the string * encoding - the active character-set encoding * * Characters in 'delim', if any, will be returned as single-character * tokens unless part of a quoted token. * * Double occurrences of the quoting character are always taken to represent * a single quote character in the data. If escape isn't 0, then escape * followed by anything (except \0) is a data character too. * * The combination of e_strings and del_quotes both TRUE is not currently * handled. This could be fixed but it's not needed anywhere at the moment. * * Note that the string s is _not_ overwritten in this implementation. * * NB: it's okay to vary delim, quote, and escape from one call to the * next on a single source string, but changing whitespace is a bad idea * since you might lose data. */ char * strtokx(const char *s, const char *whitespace, const char *delim, const char *quote, char escape, bool e_strings, bool del_quotes, int encoding) { static char *storage = NULL;/* store the local copy of the users string * here */ static char *string = NULL; /* pointer into storage where to continue on * next call */ /* variously abused variables: */ unsigned int offset; char *start; char *p; if (s) { free(storage); /* * We may need extra space to insert delimiter nulls for adjacent * tokens. 2X the space is a gross overestimate, but it's unlikely * that this code will be used on huge strings anyway. */ storage = pg_malloc(2 * strlen(s) + 1); strcpy(storage, s); string = storage; } if (!storage) return NULL; /* skip leading whitespace */ offset = strspn(string, whitespace); start = &string[offset]; /* end of string reached? */ if (*start == '\0') { /* technically we don't need to free here, but we're nice */ free(storage); storage = NULL; string = NULL; return NULL; } /* test if delimiter character */ if (delim && strchr(delim, *start)) { /* * If not at end of string, we need to insert a null to terminate the * returned token. We can just overwrite the next character if it * happens to be in the whitespace set ... otherwise move over the * rest of the string to make room. (This is why we allocated extra * space above). */ p = start + 1; if (*p != '\0') { if (!strchr(whitespace, *p)) memmove(p + 1, p, strlen(p) + 1); *p = '\0'; string = p + 1; } else { /* at end of string, so no extra work */ string = p; } return start; } /* check for E string */ p = start; if (e_strings && (*p == 'E' || *p == 'e') && p[1] == '\'') { quote = "'"; escape = '\\'; /* if std strings before, not any more */ p++; } /* test if quoting character */ if (quote && strchr(quote, *p)) { /* okay, we have a quoted token, now scan for the closer */ char thisquote = *p++; for (; *p; p += PQmblen(p, encoding)) { if (*p == escape && p[1] != '\0') p++; /* process escaped anything */ else if (*p == thisquote && p[1] == thisquote) p++; /* process doubled quote */ else if (*p == thisquote) { p++; /* skip trailing quote */ break; } } /* * If not at end of string, we need to insert a null to terminate the * returned token. See notes above. */ if (*p != '\0') { if (!strchr(whitespace, *p)) memmove(p + 1, p, strlen(p) + 1); *p = '\0'; string = p + 1; } else { /* at end of string, so no extra work */ string = p; } /* Clean up the token if caller wants that */ if (del_quotes) strip_quotes(start, thisquote, escape, encoding); return start; } /* * Otherwise no quoting character. Scan till next whitespace, delimiter * or quote. NB: at this point, *start is known not to be '\0', * whitespace, delim, or quote, so we will consume at least one character. */ offset = strcspn(start, whitespace); if (delim) { unsigned int offset2 = strcspn(start, delim); if (offset > offset2) offset = offset2; } if (quote) { unsigned int offset2 = strcspn(start, quote); if (offset > offset2) offset = offset2; } p = start + offset; /* * If not at end of string, we need to insert a null to terminate the * returned token. See notes above. */ if (*p != '\0') { if (!strchr(whitespace, *p)) memmove(p + 1, p, strlen(p) + 1); *p = '\0'; string = p + 1; } else { /* at end of string, so no extra work */ string = p; } return start; }
/* * * main * */ int main(int argc, char *argv[]) { struct adhoc_opts options; int successResult; char *password = NULL; char *password_prompt = NULL; bool new_pass; set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("psql")); if (argc > 1) { if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) { usage(); exit(EXIT_SUCCESS); } if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) { showVersion(); exit(EXIT_SUCCESS); } } #ifdef WIN32 setvbuf(stderr, NULL, _IONBF, 0); #endif setup_cancel_handler(); pset.progname = get_progname(argv[0]); pset.db = NULL; setDecimalLocale(); pset.encoding = PQenv2encoding(); pset.queryFout = stdout; pset.queryFoutPipe = false; pset.cur_cmd_source = stdin; pset.cur_cmd_interactive = false; /* We rely on unmentioned fields of pset.popt to start out 0/false/NULL */ pset.popt.topt.format = PRINT_ALIGNED; pset.popt.topt.border = 1; pset.popt.topt.pager = 1; pset.popt.topt.start_table = true; pset.popt.topt.stop_table = true; pset.popt.default_footer = true; /* We must get COLUMNS here before readline() sets it */ pset.popt.topt.env_columns = getenv("COLUMNS") ? atoi(getenv("COLUMNS")) : 0; pset.notty = (!isatty(fileno(stdin)) || !isatty(fileno(stdout))); pset.getPassword = TRI_DEFAULT; EstablishVariableSpace(); SetVariable(pset.vars, "VERSION", PG_VERSION_STR); /* Default values for variables */ SetVariableBool(pset.vars, "AUTOCOMMIT"); SetVariable(pset.vars, "VERBOSITY", "default"); SetVariable(pset.vars, "PROMPT1", DEFAULT_PROMPT1); SetVariable(pset.vars, "PROMPT2", DEFAULT_PROMPT2); SetVariable(pset.vars, "PROMPT3", DEFAULT_PROMPT3); parse_psql_options(argc, argv, &options); if (!pset.popt.topt.fieldSep) pset.popt.topt.fieldSep = pg_strdup(DEFAULT_FIELD_SEP); if (!pset.popt.topt.recordSep) pset.popt.topt.recordSep = pg_strdup(DEFAULT_RECORD_SEP); if (options.username == NULL) password_prompt = pg_strdup(_("Password: "******"Password for user %s: ")) - 2 + strlen(options.username) + 1); sprintf(password_prompt, _("Password for user %s: "), options.username); } if (pset.getPassword == TRI_YES) password = simple_prompt(password_prompt, 100, false); /* loop until we have a password if requested by backend */ do { #define PARAMS_ARRAY_SIZE 7 const char **keywords = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*keywords)); const char **values = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*values)); keywords[0] = "host"; values[0] = options.host; keywords[1] = "port"; values[1] = options.port; keywords[2] = "user"; values[2] = options.username; keywords[3] = "password"; values[3] = password; keywords[4] = "dbname"; values[4] = (options.action == ACT_LIST_DB && options.dbname == NULL) ? "postgres" : options.dbname; keywords[5] = "fallback_application_name"; values[5] = pset.progname; keywords[6] = NULL; values[6] = NULL; new_pass = false; pset.db = PQconnectdbParams(keywords, values, true); free(keywords); free(values); if (PQstatus(pset.db) == CONNECTION_BAD && PQconnectionNeedsPassword(pset.db) && password == NULL && pset.getPassword != TRI_NO) { PQfinish(pset.db); password = simple_prompt(password_prompt, 100, false); new_pass = true; } } while (new_pass); free(password); free(password_prompt); if (PQstatus(pset.db) == CONNECTION_BAD) { fprintf(stderr, "%s: %s", pset.progname, PQerrorMessage(pset.db)); PQfinish(pset.db); exit(EXIT_BADCONN); } PQsetNoticeProcessor(pset.db, NoticeProcessor, NULL); SyncVariables(); if (options.action == ACT_LIST_DB) { int success = listAllDbs(false); PQfinish(pset.db); exit(success ? EXIT_SUCCESS : EXIT_FAILURE); } if (options.logfilename) { pset.logfile = fopen(options.logfilename, "a"); if (!pset.logfile) fprintf(stderr, _("%s: could not open log file \"%s\": %s\n"), pset.progname, options.logfilename, strerror(errno)); } /* * Now find something to do */ /* * process file given by -f */ if (options.action == ACT_FILE) { if (!options.no_psqlrc) process_psqlrc(argv[0]); successResult = process_file(options.action_string, options.single_txn); } /* * process slash command if one was given to -c */ else if (options.action == ACT_SINGLE_SLASH) { PsqlScanState scan_state; if (pset.echo == PSQL_ECHO_ALL) puts(options.action_string); scan_state = psql_scan_create(); psql_scan_setup(scan_state, options.action_string, strlen(options.action_string)); successResult = HandleSlashCmds(scan_state, NULL) != PSQL_CMD_ERROR ? EXIT_SUCCESS : EXIT_FAILURE; psql_scan_destroy(scan_state); } /* * If the query given to -c was a normal one, send it */ else if (options.action == ACT_SINGLE_QUERY) { if (pset.echo == PSQL_ECHO_ALL) puts(options.action_string); successResult = SendQuery(options.action_string) ? EXIT_SUCCESS : EXIT_FAILURE; } /* * or otherwise enter interactive main loop */ else { if (!options.no_psqlrc) process_psqlrc(argv[0]); connection_warnings(true); if (!pset.quiet && !pset.notty) printf(_("Type \"help\" for help.\n\n")); if (!pset.notty) initializeInput(options.no_readline ? 0 : 1); if (options.action_string) /* -f - was used */ pset.inputfile = "<stdin>"; successResult = MainLoop(stdin); } /* clean up */ if (pset.logfile) fclose(pset.logfile); PQfinish(pset.db); setQFout(NULL); return successResult; }
/* * vacuum_one_database * * Process tables in the given database. If the 'tables' list is empty, * process all tables in the database. * * Note that this function is only concerned with running exactly one stage * when in analyze-in-stages mode; caller must iterate on us if necessary. * * If concurrentCons is > 1, multiple connections are used to vacuum tables * in parallel. In this case and if the table list is empty, we first obtain * a list of tables from the database. */ static void vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, int stage, SimpleStringList *tables, const char *host, const char *port, const char *username, enum trivalue prompt_password, int concurrentCons, const char *progname, bool echo, bool quiet) { PQExpBufferData sql; PQExpBufferData buf; PQExpBufferData catalog_query; PGresult *res; PGconn *conn; SimpleStringListCell *cell; ParallelSlot *slots; SimpleStringList dbtables = {NULL, NULL}; int i; int ntups; bool failed = false; bool parallel = concurrentCons > 1; bool tables_listed = false; bool has_where = false; const char *stage_commands[] = { "SET default_statistics_target=1; SET vacuum_cost_delay=0;", "SET default_statistics_target=10; RESET vacuum_cost_delay;", "RESET default_statistics_target;" }; const char *stage_messages[] = { gettext_noop("Generating minimal optimizer statistics (1 target)"), gettext_noop("Generating medium optimizer statistics (10 targets)"), gettext_noop("Generating default (full) optimizer statistics") }; Assert(stage == ANALYZE_NO_STAGE || (stage >= 0 && stage < ANALYZE_NUM_STAGES)); conn = connectDatabase(dbname, host, port, username, prompt_password, progname, echo, false, true); if (vacopts->disable_page_skipping && PQserverVersion(conn) < 90600) { PQfinish(conn); fprintf(stderr, _("%s: cannot use the \"%s\" option on server versions older than PostgreSQL 9.6\n"), progname, "disable-page-skipping"); exit(1); } if (vacopts->skip_locked && PQserverVersion(conn) < 120000) { PQfinish(conn); fprintf(stderr, _("%s: cannot use the \"%s\" option on server versions older than PostgreSQL 12\n"), progname, "skip-locked"); exit(1); } if (vacopts->min_xid_age != 0 && PQserverVersion(conn) < 90600) { fprintf(stderr, _("%s: cannot use the \"%s\" option on server versions older than PostgreSQL 9.6\n"), progname, "--min-xid-age"); exit(1); } if (vacopts->min_mxid_age != 0 && PQserverVersion(conn) < 90600) { fprintf(stderr, _("%s: cannot use the \"%s\" option on server versions older than PostgreSQL 9.6\n"), progname, "--min-mxid-age"); exit(1); } if (!quiet) { if (stage != ANALYZE_NO_STAGE) printf(_("%s: processing database \"%s\": %s\n"), progname, PQdb(conn), _(stage_messages[stage])); else printf(_("%s: vacuuming database \"%s\"\n"), progname, PQdb(conn)); fflush(stdout); } /* * Prepare the list of tables to process by querying the catalogs. * * Since we execute the constructed query with the default search_path * (which could be unsafe), everything in this query MUST be fully * qualified. * * First, build a WITH clause for the catalog query if any tables were * specified, with a set of values made of relation names and their * optional set of columns. This is used to match any provided column * lists with the generated qualified identifiers and to filter for the * tables provided via --table. If a listed table does not exist, the * catalog query will fail. */ initPQExpBuffer(&catalog_query); for (cell = tables ? tables->head : NULL; cell; cell = cell->next) { char *just_table; const char *just_columns; /* * Split relation and column names given by the user, this is used to * feed the CTE with values on which are performed pre-run validity * checks as well. For now these happen only on the relation name. */ splitTableColumnsSpec(cell->val, PQclientEncoding(conn), &just_table, &just_columns); if (!tables_listed) { appendPQExpBuffer(&catalog_query, "WITH listed_tables (table_oid, column_list) " "AS (\n VALUES ("); tables_listed = true; } else appendPQExpBuffer(&catalog_query, ",\n ("); appendStringLiteralConn(&catalog_query, just_table, conn); appendPQExpBuffer(&catalog_query, "::pg_catalog.regclass, "); if (just_columns && just_columns[0] != '\0') appendStringLiteralConn(&catalog_query, just_columns, conn); else appendPQExpBufferStr(&catalog_query, "NULL"); appendPQExpBufferStr(&catalog_query, "::pg_catalog.text)"); pg_free(just_table); } /* Finish formatting the CTE */ if (tables_listed) appendPQExpBuffer(&catalog_query, "\n)\n"); appendPQExpBuffer(&catalog_query, "SELECT c.relname, ns.nspname"); if (tables_listed) appendPQExpBuffer(&catalog_query, ", listed_tables.column_list"); appendPQExpBuffer(&catalog_query, " FROM pg_catalog.pg_class c\n" " JOIN pg_catalog.pg_namespace ns" " ON c.relnamespace OPERATOR(pg_catalog.=) ns.oid\n" " LEFT JOIN pg_catalog.pg_class t" " ON c.reltoastrelid OPERATOR(pg_catalog.=) t.oid\n"); /* Used to match the tables listed by the user */ if (tables_listed) appendPQExpBuffer(&catalog_query, " JOIN listed_tables" " ON listed_tables.table_oid OPERATOR(pg_catalog.=) c.oid\n"); /* * If no tables were listed, filter for the relevant relation types. If * tables were given via --table, don't bother filtering by relation type. * Instead, let the server decide whether a given relation can be * processed in which case the user will know about it. */ if (!tables_listed) { appendPQExpBuffer(&catalog_query, " WHERE c.relkind OPERATOR(pg_catalog.=) ANY (array[" CppAsString2(RELKIND_RELATION) ", " CppAsString2(RELKIND_MATVIEW) "])\n"); has_where = true; } /* * For --min-xid-age and --min-mxid-age, the age of the relation is the * greatest of the ages of the main relation and its associated TOAST * table. The commands generated by vacuumdb will also process the TOAST * table for the relation if necessary, so it does not need to be * considered separately. */ if (vacopts->min_xid_age != 0) { appendPQExpBuffer(&catalog_query, " %s GREATEST(pg_catalog.age(c.relfrozenxid)," " pg_catalog.age(t.relfrozenxid)) " " OPERATOR(pg_catalog.>=) '%d'::pg_catalog.int4\n" " AND c.relfrozenxid OPERATOR(pg_catalog.!=)" " '0'::pg_catalog.xid\n", has_where ? "AND" : "WHERE", vacopts->min_xid_age); has_where = true; } if (vacopts->min_mxid_age != 0) { appendPQExpBuffer(&catalog_query, " %s GREATEST(pg_catalog.mxid_age(c.relminmxid)," " pg_catalog.mxid_age(t.relminmxid)) OPERATOR(pg_catalog.>=)" " '%d'::pg_catalog.int4\n" " AND c.relminmxid OPERATOR(pg_catalog.!=)" " '0'::pg_catalog.xid\n", has_where ? "AND" : "WHERE", vacopts->min_mxid_age); has_where = true; } /* * Execute the catalog query. We use the default search_path for this * query for consistency with table lookups done elsewhere by the user. */ appendPQExpBuffer(&catalog_query, " ORDER BY c.relpages DESC;"); executeCommand(conn, "RESET search_path;", progname, echo); res = executeQuery(conn, catalog_query.data, progname, echo); termPQExpBuffer(&catalog_query); PQclear(executeQuery(conn, ALWAYS_SECURE_SEARCH_PATH_SQL, progname, echo)); /* * If no rows are returned, there are no matching tables, so we are done. */ ntups = PQntuples(res); if (ntups == 0) { PQclear(res); PQfinish(conn); return; } /* * Build qualified identifiers for each table, including the column list * if given. */ initPQExpBuffer(&buf); for (i = 0; i < ntups; i++) { appendPQExpBufferStr(&buf, fmtQualifiedId(PQgetvalue(res, i, 1), PQgetvalue(res, i, 0))); if (tables_listed && !PQgetisnull(res, i, 2)) appendPQExpBufferStr(&buf, PQgetvalue(res, i, 2)); simple_string_list_append(&dbtables, buf.data); resetPQExpBuffer(&buf); } termPQExpBuffer(&buf); PQclear(res); /* * If there are more connections than vacuumable relations, we don't need * to use them all. */ if (parallel) { if (concurrentCons > ntups) concurrentCons = ntups; if (concurrentCons <= 1) parallel = false; } /* * Setup the database connections. We reuse the connection we already have * for the first slot. If not in parallel mode, the first slot in the * array contains the connection. */ if (concurrentCons <= 0) concurrentCons = 1; slots = (ParallelSlot *) pg_malloc(sizeof(ParallelSlot) * concurrentCons); init_slot(slots, conn); if (parallel) { for (i = 1; i < concurrentCons; i++) { conn = connectDatabase(dbname, host, port, username, prompt_password, progname, echo, false, true); init_slot(slots + i, conn); } } /* * Prepare all the connections to run the appropriate analyze stage, if * caller requested that mode. */ if (stage != ANALYZE_NO_STAGE) { int j; /* We already emitted the message above */ for (j = 0; j < concurrentCons; j++) executeCommand((slots + j)->connection, stage_commands[stage], progname, echo); } initPQExpBuffer(&sql); cell = dbtables.head; do { const char *tabname = cell->val; ParallelSlot *free_slot; if (CancelRequested) { failed = true; goto finish; } /* * Get the connection slot to use. If in parallel mode, here we wait * for one connection to become available if none already is. In * non-parallel mode we simply use the only slot we have, which we * know to be free. */ if (parallel) { /* * Get a free slot, waiting until one becomes free if none * currently is. */ free_slot = GetIdleSlot(slots, concurrentCons, progname); if (!free_slot) { failed = true; goto finish; } free_slot->isFree = false; } else free_slot = slots; prepare_vacuum_command(&sql, PQserverVersion(free_slot->connection), vacopts, tabname); /* * Execute the vacuum. If not in parallel mode, this terminates the * program in case of an error. (The parallel case handles query * errors in ProcessQueryResult through GetIdleSlot.) */ run_vacuum_command(free_slot->connection, sql.data, echo, tabname, progname, parallel); cell = cell->next; } while (cell != NULL); if (parallel) { int j; /* wait for all connections to finish */ for (j = 0; j < concurrentCons; j++) { if (!GetQueryResult((slots + j)->connection, progname)) goto finish; } } finish: for (i = 0; i < concurrentCons; i++) DisconnectDatabase(slots + i); pfree(slots); termPQExpBuffer(&sql); if (failed) exit(1); }
/* * Try to set the wal segment size from the WAL file specified by WALFilePath. * * Return true if size could be determined, false otherwise. */ static bool SetWALSegSize(void) { bool ret_val = false; int fd; /* malloc this buffer to ensure sufficient alignment: */ char *buf = (char *) pg_malloc(XLOG_BLCKSZ); Assert(WalSegSz == -1); if ((fd = open(WALFilePath, O_RDWR, 0)) < 0) { fprintf(stderr, "%s: could not open WAL file \"%s\": %s\n", progname, WALFilePath, strerror(errno)); pg_free(buf); return false; } errno = 0; if (read(fd, buf, XLOG_BLCKSZ) == XLOG_BLCKSZ) { XLogLongPageHeader longhdr = (XLogLongPageHeader) buf; WalSegSz = longhdr->xlp_seg_size; if (IsValidWalSegSize(WalSegSz)) { /* successfully retrieved WAL segment size */ ret_val = true; } else fprintf(stderr, "%s: WAL segment size must be a power of two between 1MB and 1GB, but the WAL file header specifies %d bytes\n", progname, WalSegSz); } else { /* * Don't complain loudly, this is to be expected for segments being * created. */ if (errno != 0) { if (debug) fprintf(stderr, "could not read file \"%s\": %s\n", WALFilePath, strerror(errno)); } else { if (debug) fprintf(stderr, "not enough data in file \"%s\"\n", WALFilePath); } } fflush(stderr); close(fd); pg_free(buf); return ret_val; }
/* * gen_db_file_maps() * * generates database mappings for "old_db" and "new_db". Returns a malloc'ed * array of mappings. nmaps is a return parameter which refers to the number * mappings. * * NOTE: Its the Caller's responsibility to free the returned array. */ FileNameMap * gen_db_file_maps(migratorContext *ctx, DbInfo *old_db, DbInfo *new_db, int *nmaps, const char *old_pgdata, const char *new_pgdata) { FileNameMap *maps; int relnum; int num_maps = 0; maps = (FileNameMap *) pg_malloc(ctx, sizeof(FileNameMap) * new_db->rel_arr.nrels); for (relnum = 0; relnum < new_db->rel_arr.nrels; relnum++) { RelInfo *newrel = &new_db->rel_arr.rels[relnum]; RelInfo *oldrel; /* toast tables are handled by their parent */ if (strcmp(newrel->nspname, "pg_toast") == 0) continue; oldrel = relarr_lookup_rel(ctx, &(old_db->rel_arr), newrel->nspname, newrel->relname, CLUSTER_OLD); map_rel(ctx, oldrel, newrel, old_db, new_db, old_pgdata, new_pgdata, maps + num_maps); num_maps++; /* * so much for the mapping of this relation. Now we need a mapping for * its corresponding toast relation if any. */ if (oldrel->toastrelid > 0) { RelInfo *new_toast; RelInfo *old_toast; char new_name[MAXPGPATH]; char old_name[MAXPGPATH]; /* construct the new and old relnames for the toast relation */ snprintf(old_name, sizeof(old_name), "pg_toast_%u", oldrel->reloid); snprintf(new_name, sizeof(new_name), "pg_toast_%u", newrel->reloid); /* look them up in their respective arrays */ old_toast = relarr_lookup_reloid(ctx, &old_db->rel_arr, oldrel->toastrelid, CLUSTER_OLD); new_toast = relarr_lookup_rel(ctx, &new_db->rel_arr, "pg_toast", new_name, CLUSTER_NEW); /* finally create a mapping for them */ map_rel(ctx, old_toast, new_toast, old_db, new_db, old_pgdata, new_pgdata, maps + num_maps); num_maps++; /* * also need to provide a mapping for the index of this toast * relation. The procedure is similar to what we did above for * toast relation itself, the only difference being that the * relnames need to be appended with _index. */ /* * construct the new and old relnames for the toast index * relations */ snprintf(old_name, sizeof(old_name), "%s_index", old_toast->relname); snprintf(new_name, sizeof(new_name), "pg_toast_%u_index", newrel->reloid); /* look them up in their respective arrays */ old_toast = relarr_lookup_rel(ctx, &old_db->rel_arr, "pg_toast", old_name, CLUSTER_OLD); new_toast = relarr_lookup_rel(ctx, &new_db->rel_arr, "pg_toast", new_name, CLUSTER_NEW); /* finally create a mapping for them */ map_rel(ctx, old_toast, new_toast, old_db, new_db, old_pgdata, new_pgdata, maps + num_maps); num_maps++; } } *nmaps = num_maps; return maps; }
static void ReadDataFromArchiveZlib(ArchiveHandle *AH, ReadFunc readF) { z_streamp zp; char *out; int res = Z_OK; size_t cnt; char *buf; size_t buflen; zp = (z_streamp) pg_malloc(sizeof(z_stream)); zp->zalloc = Z_NULL; zp->zfree = Z_NULL; zp->opaque = Z_NULL; buf = pg_malloc(ZLIB_IN_SIZE); buflen = ZLIB_IN_SIZE; out = pg_malloc(ZLIB_OUT_SIZE + 1); if (inflateInit(zp) != Z_OK) exit_horribly(modulename, "could not initialize compression library: %s\n", zp->msg); /* no minimal chunk size for zlib */ while ((cnt = readF(AH, &buf, &buflen))) { zp->next_in = (void *) buf; zp->avail_in = cnt; while (zp->avail_in > 0) { zp->next_out = (void *) out; zp->avail_out = ZLIB_OUT_SIZE; res = inflate(zp, 0); if (res != Z_OK && res != Z_STREAM_END) exit_horribly(modulename, "could not uncompress data: %s\n", zp->msg); out[ZLIB_OUT_SIZE - zp->avail_out] = '\0'; ahwrite(out, 1, ZLIB_OUT_SIZE - zp->avail_out, AH); } } zp->next_in = NULL; zp->avail_in = 0; while (res != Z_STREAM_END) { zp->next_out = (void *) out; zp->avail_out = ZLIB_OUT_SIZE; res = inflate(zp, 0); if (res != Z_OK && res != Z_STREAM_END) exit_horribly(modulename, "could not uncompress data: %s\n", zp->msg); out[ZLIB_OUT_SIZE - zp->avail_out] = '\0'; ahwrite(out, 1, ZLIB_OUT_SIZE - zp->avail_out, AH); } if (inflateEnd(zp) != Z_OK) exit_horribly(modulename, "could not close compression library: %s\n", zp->msg); free(buf); free(out); free(zp); }
/*---- * Runs a query, which returns pieces of files from the remote source data * directory, and overwrites the corresponding parts of target files with * the received parts. The result set is expected to be of format: * * path text -- path in the data directory, e.g "base/1/123" * begin int8 -- offset within the file * chunk bytea -- file content *---- */ static void receiveFileChunks(const char *sql) { PGresult *res; if (PQsendQueryParams(conn, sql, 0, NULL, NULL, NULL, NULL, 1) != 1) pg_fatal("could not send query: %s", PQerrorMessage(conn)); pg_log(PG_DEBUG, "getting file chunks\n"); if (PQsetSingleRowMode(conn) != 1) pg_fatal("could not set libpq connection to single row mode\n"); while ((res = PQgetResult(conn)) != NULL) { char *filename; int filenamelen; int64 chunkoff; char chunkoff_str[32]; int chunksize; char *chunk; switch (PQresultStatus(res)) { case PGRES_SINGLE_TUPLE: break; case PGRES_TUPLES_OK: PQclear(res); continue; /* final zero-row result */ default: pg_fatal("unexpected result while fetching remote files: %s", PQresultErrorMessage(res)); } /* sanity check the result set */ if (PQnfields(res) != 3 || PQntuples(res) != 1) pg_fatal("unexpected result set size while fetching remote files\n"); if (PQftype(res, 0) != TEXTOID || PQftype(res, 1) != INT8OID || PQftype(res, 2) != BYTEAOID) { pg_fatal("unexpected data types in result set while fetching remote files: %u %u %u\n", PQftype(res, 0), PQftype(res, 1), PQftype(res, 2)); } if (PQfformat(res, 0) != 1 && PQfformat(res, 1) != 1 && PQfformat(res, 2) != 1) { pg_fatal("unexpected result format while fetching remote files\n"); } if (PQgetisnull(res, 0, 0) || PQgetisnull(res, 0, 1)) { pg_fatal("unexpected null values in result while fetching remote files\n"); } if (PQgetlength(res, 0, 1) != sizeof(int64)) pg_fatal("unexpected result length while fetching remote files\n"); /* Read result set to local variables */ memcpy(&chunkoff, PQgetvalue(res, 0, 1), sizeof(int64)); chunkoff = pg_recvint64(chunkoff); chunksize = PQgetlength(res, 0, 2); filenamelen = PQgetlength(res, 0, 0); filename = pg_malloc(filenamelen + 1); memcpy(filename, PQgetvalue(res, 0, 0), filenamelen); filename[filenamelen] = '\0'; chunk = PQgetvalue(res, 0, 2); /* * If a file has been deleted on the source, remove it on the target * as well. Note that multiple unlink() calls may happen on the same * file if multiple data chunks are associated with it, hence ignore * unconditionally anything missing. If this file is not a relation * data file, then it has been already truncated when creating the * file chunk list at the previous execution of the filemap. */ if (PQgetisnull(res, 0, 2)) { pg_log(PG_DEBUG, "received null value for chunk for file \"%s\", file has been deleted\n", filename); remove_target_file(filename, true); pg_free(filename); PQclear(res); continue; } /* * Separate step to keep platform-dependent format code out of * translatable strings. */ snprintf(chunkoff_str, sizeof(chunkoff_str), INT64_FORMAT, chunkoff); pg_log(PG_DEBUG, "received chunk for file \"%s\", offset %s, size %d\n", filename, chunkoff_str, chunksize); open_target_file(filename, false); write_target_range(chunk, chunkoff, chunksize); pg_free(filename); PQclear(res); } }
int main(int argc, char **argv) { struct options *my_opts; PGconn *pgconn; my_opts = (struct options *) pg_malloc(sizeof(struct options)); my_opts->oids = (eary *) pg_malloc(sizeof(eary)); my_opts->tables = (eary *) pg_malloc(sizeof(eary)); my_opts->filenodes = (eary *) pg_malloc(sizeof(eary)); my_opts->oids->num = my_opts->oids->alloc = 0; my_opts->tables->num = my_opts->tables->alloc = 0; my_opts->filenodes->num = my_opts->filenodes->alloc = 0; /* parse the opts */ get_opts(argc, argv, my_opts); if (my_opts->dbname == NULL) { my_opts->dbname = "postgres"; my_opts->nodb = true; } pgconn = sql_conn(my_opts); /* display only tablespaces */ if (my_opts->tablespaces) { if (!my_opts->quiet) printf("All tablespaces:\n"); sql_exec_dumpalltbspc(pgconn, my_opts); PQfinish(pgconn); exit(0); } /* display the given elements in the database */ if (my_opts->oids->num > 0 || my_opts->tables->num > 0 || my_opts->filenodes->num > 0) { if (!my_opts->quiet) printf("From database \"%s\":\n", my_opts->dbname); sql_exec_searchtables(pgconn, my_opts); PQfinish(pgconn); exit(0); } /* no elements given; dump the given database */ if (my_opts->dbname && !my_opts->nodb) { if (!my_opts->quiet) printf("From database \"%s\":\n", my_opts->dbname); sql_exec_dumpalltables(pgconn, my_opts); PQfinish(pgconn); exit(0); } /* no database either; dump all databases */ if (!my_opts->quiet) printf("All databases:\n"); sql_exec_dumpalldbs(pgconn, my_opts); PQfinish(pgconn); return 0; }
/* * Show oid, filenode, name, schema and tablespace for each of the * given objects in the current database. */ void sql_exec_searchtables(PGconn *conn, struct options * opts) { char *todo; char *qualifiers, *ptr; char *comma_oids, *comma_filenodes, *comma_tables; bool written = false; char *addfields = ",c.oid AS \"Oid\", nspname AS \"Schema\", spcname as \"Tablespace\" "; /* get tables qualifiers, whether names, filenodes, or OIDs */ comma_oids = get_comma_elts(opts->oids); comma_tables = get_comma_elts(opts->tables); comma_filenodes = get_comma_elts(opts->filenodes); /* 80 extra chars for SQL expression */ qualifiers = (char *) pg_malloc(strlen(comma_oids) + strlen(comma_tables) + strlen(comma_filenodes) + 80); ptr = qualifiers; if (opts->oids->num > 0) { ptr += sprintf(ptr, "c.oid IN (%s)", comma_oids); written = true; } if (opts->filenodes->num > 0) { if (written) ptr += sprintf(ptr, " OR "); ptr += sprintf(ptr, "pg_catalog.pg_relation_filenode(c.oid) IN (%s)", comma_filenodes); written = true; } if (opts->tables->num > 0) { if (written) ptr += sprintf(ptr, " OR "); sprintf(ptr, "c.relname ~~ ANY (ARRAY[%s])", comma_tables); } free(comma_oids); free(comma_tables); free(comma_filenodes); /* now build the query */ todo = psprintf( "SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s\n" "FROM pg_catalog.pg_class c \n" " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace \n" " LEFT JOIN pg_catalog.pg_database d ON d.datname = pg_catalog.current_database(),\n" " pg_catalog.pg_tablespace t \n" "WHERE relkind IN ('r', 'm', 'i', 'S', 't') AND \n" " t.oid = CASE\n" " WHEN reltablespace <> 0 THEN reltablespace\n" " ELSE dattablespace\n" " END AND \n" " (%s) \n" "ORDER BY relname\n", opts->extended ? addfields : "", qualifiers); free(qualifiers); sql_exec(conn, todo, opts->quiet); }
/* * Callback for processing target file list. * * All source files must be already processed before calling this. This only * marks target data directory's files that didn't exist in the source for * deletion. */ void process_target_file(const char *path, file_type_t type, size_t oldsize, const char *link_target) { bool exists; char localpath[MAXPGPATH]; struct stat statbuf; file_entry_t key; file_entry_t *key_ptr; filemap_t *map = filemap; file_entry_t *entry; snprintf(localpath, sizeof(localpath), "%s/%s", datadir_target, path); if (lstat(localpath, &statbuf) < 0) { if (errno != ENOENT) pg_fatal("could not stat file \"%s\": %s\n", localpath, strerror(errno)); exists = false; } if (map->array == NULL) { /* on first call, initialize lookup array */ if (map->nlist == 0) { /* should not happen */ pg_fatal("source file list is empty\n"); } filemap_list_to_array(map); Assert(map->array != NULL); qsort(map->array, map->narray, sizeof(file_entry_t *), path_cmp); } /* * Completely ignore some special files */ if (strcmp(path, "postmaster.pid") == 0 || strcmp(path, "postmaster.opts") == 0) return; /* * Like in process_source_file, pretend that xlog is always a directory. */ if (strcmp(path, "pg_wal") == 0 && type == FILE_TYPE_SYMLINK) type = FILE_TYPE_DIRECTORY; key.path = (char *) path; key_ptr = &key; exists = (bsearch(&key_ptr, map->array, map->narray, sizeof(file_entry_t *), path_cmp) != NULL); /* Remove any file or folder that doesn't exist in the source system. */ if (!exists) { entry = pg_malloc(sizeof(file_entry_t)); entry->path = pg_strdup(path); entry->type = type; entry->action = FILE_ACTION_REMOVE; entry->oldsize = oldsize; entry->newsize = 0; entry->link_target = link_target ? pg_strdup(link_target) : NULL; entry->next = NULL; entry->pagemap.bitmap = NULL; entry->pagemap.bitmapsize = 0; entry->isrelfile = isRelDataFile(path); if (map->last == NULL) map->first = entry; else map->last->next = entry; map->last = entry; map->nlist++; } else { /* * We already handled all files that exist in the source system in * process_source_file(). */ } }
/* * create_script_for_cluster_analyze() * * This incrementally generates better optimizer statistics */ void create_script_for_cluster_analyze(char **analyze_script_file_name) { FILE *script = NULL; *analyze_script_file_name = pg_malloc(MAXPGPATH); prep_status("Creating script to analyze new cluster"); snprintf(*analyze_script_file_name, MAXPGPATH, "analyze_new_cluster.%s", SCRIPT_EXT); if ((script = fopen_priv(*analyze_script_file_name, "w")) == NULL) pg_log(PG_FATAL, "Could not open file \"%s\": %s\n", *analyze_script_file_name, getErrorText(errno)); #ifndef WIN32 /* add shebang header */ fprintf(script, "#!/bin/sh\n\n"); #endif fprintf(script, "echo %sThis script will generate minimal optimizer statistics rapidly%s\n", ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %sso your system is usable, and then gather statistics twice more%s\n", ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %swith increasing accuracy. When it is done, your system will%s\n", ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %shave the default level of optimizer statistics.%s\n", ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo\n\n"); fprintf(script, "echo %sIf you have used ALTER TABLE to modify the statistics target for%s\n", ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %sany tables, you might want to remove them and restore them after%s\n", ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %srunning this script because they will delay fast statistics generation.%s\n", ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo\n\n"); fprintf(script, "echo %sIf you would like default statistics as quickly as possible, cancel%s\n", ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %sthis script and run:%s\n", ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %s vacuumdb --all %s%s\n", ECHO_QUOTE, /* Did we copy the free space files? */ (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ? "--analyze-only" : "--analyze", ECHO_QUOTE); fprintf(script, "echo\n\n"); #ifndef WIN32 fprintf(script, "sleep 2\n"); fprintf(script, "PGOPTIONS='-c default_statistics_target=1 -c vacuum_cost_delay=0'\n"); /* only need to export once */ fprintf(script, "export PGOPTIONS\n"); #else fprintf(script, "REM simulate sleep 2\n"); fprintf(script, "PING 1.1.1.1 -n 1 -w 2000 > nul\n"); fprintf(script, "SET PGOPTIONS=-c default_statistics_target=1 -c vacuum_cost_delay=0\n"); #endif fprintf(script, "echo %sGenerating minimal optimizer statistics (1 target)%s\n", ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %s--------------------------------------------------%s\n", ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "vacuumdb --all --analyze-only\n"); fprintf(script, "echo\n"); fprintf(script, "echo %sThe server is now available with minimal optimizer statistics.%s\n", ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %sQuery performance will be optimal once this script completes.%s\n", ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo\n\n"); #ifndef WIN32 fprintf(script, "sleep 2\n"); fprintf(script, "PGOPTIONS='-c default_statistics_target=10'\n"); #else fprintf(script, "REM simulate sleep\n"); fprintf(script, "PING 1.1.1.1 -n 1 -w 2000 > nul\n"); fprintf(script, "SET PGOPTIONS=-c default_statistics_target=10\n"); #endif fprintf(script, "echo %sGenerating medium optimizer statistics (10 targets)%s\n", ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %s---------------------------------------------------%s\n", ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "vacuumdb --all --analyze-only\n"); fprintf(script, "echo\n\n"); #ifndef WIN32 fprintf(script, "unset PGOPTIONS\n"); #else fprintf(script, "SET PGOPTIONS\n"); #endif fprintf(script, "echo %sGenerating default (full) optimizer statistics (100 targets?)%s\n", ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %s-------------------------------------------------------------%s\n", ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "vacuumdb --all %s\n", /* Did we copy the free space files? */ (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ? "--analyze-only" : "--analyze"); fprintf(script, "echo\n\n"); fprintf(script, "echo %sDone%s\n", ECHO_QUOTE, ECHO_QUOTE); fclose(script); #ifndef WIN32 if (chmod(*analyze_script_file_name, S_IRWXU) != 0) pg_log(PG_FATAL, "Could not add execute permission to file \"%s\": %s\n", *analyze_script_file_name, getErrorText(errno)); #endif check_ok(); }
/* * Callback for processing source file list. * * This is called once for every file in the source server. We decide what * action needs to be taken for the file, depending on whether the file * exists in the target and whether the size matches. */ void process_source_file(const char *path, file_type_t type, size_t newsize, const char *link_target) { bool exists; char localpath[MAXPGPATH]; struct stat statbuf; filemap_t *map = filemap; file_action_t action = FILE_ACTION_NONE; size_t oldsize = 0; file_entry_t *entry; Assert(map->array == NULL); /* * Completely ignore some special files in source and destination. */ if (strcmp(path, "postmaster.pid") == 0 || strcmp(path, "postmaster.opts") == 0) return; /* * Pretend that pg_wal is a directory, even if it's really a symlink. We * don't want to mess with the symlink itself, nor complain if it's a * symlink in source but not in target or vice versa. */ if (strcmp(path, "pg_wal") == 0 && type == FILE_TYPE_SYMLINK) type = FILE_TYPE_DIRECTORY; /* * Skip temporary files, .../pgsql_tmp/... and .../pgsql_tmp.* in source. * This has the effect that all temporary files in the destination will be * removed. */ if (strstr(path, "/" PG_TEMP_FILE_PREFIX) != NULL) return; if (strstr(path, "/" PG_TEMP_FILES_DIR "/") != NULL) return; /* * sanity check: a filename that looks like a data file better be a * regular file */ if (type != FILE_TYPE_REGULAR && isRelDataFile(path)) pg_fatal("data file \"%s\" in source is not a regular file\n", path); snprintf(localpath, sizeof(localpath), "%s/%s", datadir_target, path); /* Does the corresponding file exist in the target data dir? */ if (lstat(localpath, &statbuf) < 0) { if (errno != ENOENT) pg_fatal("could not stat file \"%s\": %s\n", localpath, strerror(errno)); exists = false; } else exists = true; switch (type) { case FILE_TYPE_DIRECTORY: if (exists && !S_ISDIR(statbuf.st_mode) && strcmp(path, "pg_wal") != 0) { /* it's a directory in source, but not in target. Strange.. */ pg_fatal("\"%s\" is not a directory\n", localpath); } if (!exists) action = FILE_ACTION_CREATE; else action = FILE_ACTION_NONE; oldsize = 0; break; case FILE_TYPE_SYMLINK: if (exists && #ifndef WIN32 !S_ISLNK(statbuf.st_mode) #else !pgwin32_is_junction(localpath) #endif ) { /* * It's a symbolic link in source, but not in target. * Strange.. */ pg_fatal("\"%s\" is not a symbolic link\n", localpath); } if (!exists) action = FILE_ACTION_CREATE; else action = FILE_ACTION_NONE; oldsize = 0; break; case FILE_TYPE_REGULAR: if (exists && !S_ISREG(statbuf.st_mode)) pg_fatal("\"%s\" is not a regular file\n", localpath); if (!exists || !isRelDataFile(path)) { /* * File exists in source, but not in target. Or it's a * non-data file that we have no special processing for. Copy * it in toto. * * An exception: PG_VERSIONs should be identical, but avoid * overwriting it for paranoia. */ if (pg_str_endswith(path, "PG_VERSION")) { action = FILE_ACTION_NONE; oldsize = statbuf.st_size; } else { action = FILE_ACTION_COPY; oldsize = 0; } } else { /* * It's a data file that exists in both. * * If it's larger in target, we can truncate it. There will * also be a WAL record of the truncation in the source * system, so WAL replay would eventually truncate the target * too, but we might as well do it now. * * If it's smaller in the target, it means that it has been * truncated in the target, or enlarged in the source, or * both. If it was truncated in the target, we need to copy * the missing tail from the source system. If it was enlarged * in the source system, there will be WAL records in the * source system for the new blocks, so we wouldn't need to * copy them here. But we don't know which scenario we're * dealing with, and there's no harm in copying the missing * blocks now, so do it now. * * If it's the same size, do nothing here. Any blocks modified * in the target will be copied based on parsing the target * system's WAL, and any blocks modified in the source will be * updated after rewinding, when the source system's WAL is * replayed. */ oldsize = statbuf.st_size; if (oldsize < newsize) action = FILE_ACTION_COPY_TAIL; else if (oldsize > newsize) action = FILE_ACTION_TRUNCATE; else action = FILE_ACTION_NONE; } break; } /* Create a new entry for this file */ entry = pg_malloc(sizeof(file_entry_t)); entry->path = pg_strdup(path); entry->type = type; entry->action = action; entry->oldsize = oldsize; entry->newsize = newsize; entry->link_target = link_target ? pg_strdup(link_target) : NULL; entry->next = NULL; entry->pagemap.bitmap = NULL; entry->pagemap.bitmapsize = 0; entry->isrelfile = isRelDataFile(path); if (map->last) { map->last->next = entry; map->last = entry; } else map->first = map->last = entry; map->nlist++; }
/* * get_rel_infos() * * gets the relinfos for all the user tables of the database referred * by "db". * * NOTE: we assume that relations/entities with oids greater than * FirstNormalObjectId belongs to the user */ static void get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo) { PGconn *conn = connectToServer(cluster, dbinfo->db_name); PGresult *res; RelInfo *relinfos; int ntups; int relnum; int num_rels = 0; char *nspname = NULL; char *relname = NULL; char *tablespace = NULL; int i_spclocation, i_nspname, i_relname, i_oid, i_relfilenode, i_reltablespace; char query[QUERY_ALLOC]; char *last_namespace = NULL, *last_tablespace = NULL; /* * pg_largeobject contains user data that does not appear in pg_dump * --schema-only output, so we have to copy that system table heap and * index. We could grab the pg_largeobject oids from template1, but it is * easy to treat it as a normal table. Order by oid so we can join old/new * structures efficiently. */ snprintf(query, sizeof(query), /* get regular heap */ "WITH regular_heap (reloid) AS ( " " SELECT c.oid " " FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n " " ON c.relnamespace = n.oid " " LEFT OUTER JOIN pg_catalog.pg_index i " " ON c.oid = i.indexrelid " " WHERE relkind IN ('r', 'm', 'i', 'S') AND " /* * pg_dump only dumps valid indexes; testing indisready is necessary in * 9.2, and harmless in earlier/later versions. */ " i.indisvalid IS DISTINCT FROM false AND " " i.indisready IS DISTINCT FROM false AND " /* exclude possible orphaned temp tables */ " ((n.nspname !~ '^pg_temp_' AND " " n.nspname !~ '^pg_toast_temp_' AND " /* skip pg_toast because toast index have relkind == 'i', not 't' */ " n.nspname NOT IN ('pg_catalog', 'information_schema', " " 'binary_upgrade', 'pg_toast') AND " " c.oid >= %u) OR " " (n.nspname = 'pg_catalog' AND " " relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) ))), " /* * We have to gather the TOAST tables in later steps because we * can't schema-qualify TOAST tables. */ /* get TOAST heap */ " toast_heap (reloid) AS ( " " SELECT reltoastrelid " " FROM regular_heap JOIN pg_catalog.pg_class c " " ON regular_heap.reloid = c.oid " " AND c.reltoastrelid != %u), " /* get indexes on regular and TOAST heap */ " all_index (reloid) AS ( " " SELECT indexrelid " " FROM pg_index " " WHERE indisvalid " " AND indrelid IN (SELECT reltoastrelid " " FROM (SELECT reloid FROM regular_heap " " UNION ALL " " SELECT reloid FROM toast_heap) all_heap " " JOIN pg_catalog.pg_class c " " ON all_heap.reloid = c.oid " " AND c.reltoastrelid != %u)) " /* get all rels */ "SELECT c.oid, n.nspname, c.relname, " " c.relfilenode, c.reltablespace, %s " "FROM (SELECT reloid FROM regular_heap " " UNION ALL " " SELECT reloid FROM toast_heap " " UNION ALL " " SELECT reloid FROM all_index) all_rels " " JOIN pg_catalog.pg_class c " " ON all_rels.reloid = c.oid " " JOIN pg_catalog.pg_namespace n " " ON c.relnamespace = n.oid " " LEFT OUTER JOIN pg_catalog.pg_tablespace t " " ON c.reltablespace = t.oid " /* we preserve pg_class.oid so we sort by it to match old/new */ "ORDER BY 1;", FirstNormalObjectId, /* does pg_largeobject_metadata need to be migrated? */ (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) ? "" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'", InvalidOid, InvalidOid, /* 9.2 removed the spclocation column */ (GET_MAJOR_VERSION(cluster->major_version) <= 901) ? "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation"); res = executeQueryOrDie(conn, "%s", query); ntups = PQntuples(res); relinfos = (RelInfo *) pg_malloc(sizeof(RelInfo) * ntups); i_oid = PQfnumber(res, "oid"); i_nspname = PQfnumber(res, "nspname"); i_relname = PQfnumber(res, "relname"); i_relfilenode = PQfnumber(res, "relfilenode"); i_reltablespace = PQfnumber(res, "reltablespace"); i_spclocation = PQfnumber(res, "spclocation"); for (relnum = 0; relnum < ntups; relnum++) { RelInfo *curr = &relinfos[num_rels++]; curr->reloid = atooid(PQgetvalue(res, relnum, i_oid)); nspname = PQgetvalue(res, relnum, i_nspname); curr->nsp_alloc = false; /* * Many of the namespace and tablespace strings are identical, so we * try to reuse the allocated string pointers where possible to reduce * memory consumption. */ /* Can we reuse the previous string allocation? */ if (last_namespace && strcmp(nspname, last_namespace) == 0) curr->nspname = last_namespace; else { last_namespace = curr->nspname = pg_strdup(nspname); curr->nsp_alloc = true; } relname = PQgetvalue(res, relnum, i_relname); curr->relname = pg_strdup(relname); curr->relfilenode = atooid(PQgetvalue(res, relnum, i_relfilenode)); curr->tblsp_alloc = false; /* Is the tablespace oid non-zero? */ if (atooid(PQgetvalue(res, relnum, i_reltablespace)) != 0) { /* * The tablespace location might be "", meaning the cluster * default location, i.e. pg_default or pg_global. */ tablespace = PQgetvalue(res, relnum, i_spclocation); /* Can we reuse the previous string allocation? */ if (last_tablespace && strcmp(tablespace, last_tablespace) == 0) curr->tablespace = last_tablespace; else { last_tablespace = curr->tablespace = pg_strdup(tablespace); curr->tblsp_alloc = true; } } else /* A zero reltablespace oid indicates the database tablespace. */ curr->tablespace = dbinfo->db_tablespace; } PQclear(res); PQfinish(conn); dbinfo->rel_arr.rels = relinfos; dbinfo->rel_arr.nrels = num_rels; }
/* * get_rel_infos() * * gets the relinfos for all the user tables of the database refered * by "db". * * NOTE: we assume that relations/entities with oids greater than * FirstNormalObjectId belongs to the user */ static void get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo) { PGconn *conn = connectToServer(cluster, dbinfo->db_name); PGresult *res; RelInfo *relinfos; int ntups; int relnum; int num_rels = 0; char *nspname = NULL; char *relname = NULL; int i_spclocation, i_nspname, i_relname, i_oid, i_relfilenode; char query[QUERY_ALLOC]; /* * pg_largeobject contains user data that does not appear in pg_dumpall * --schema-only output, so we have to copy that system table heap and * index. We could grab the pg_largeobject oids from template1, but it is * easy to treat it as a normal table. Order by oid so we can join old/new * structures efficiently. */ snprintf(query, sizeof(query), "SELECT c.oid, n.nspname, c.relname, " " c.relfilenode, t.spclocation " "FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n " " ON c.relnamespace = n.oid " " LEFT OUTER JOIN pg_catalog.pg_tablespace t " " ON c.reltablespace = t.oid " "WHERE relkind IN ('r','t', 'i'%s) AND " " ((n.nspname NOT IN ('pg_catalog', 'information_schema', 'binary_upgrade') AND " " c.oid >= %u) " " OR (n.nspname = 'pg_catalog' AND " " relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) )) " /* we preserve pg_class.oid so we sort by it to match old/new */ "ORDER BY 1;", /* see the comment at the top of old_8_3_create_sequence_script() */ (GET_MAJOR_VERSION(old_cluster.major_version) <= 803) ? "" : ", 'S'", /* this oid allows us to skip system toast tables */ FirstNormalObjectId, /* does pg_largeobject_metadata need to be migrated? */ (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) ? "" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'"); res = executeQueryOrDie(conn, query); ntups = PQntuples(res); relinfos = (RelInfo *) pg_malloc(sizeof(RelInfo) * ntups); i_oid = PQfnumber(res, "oid"); i_nspname = PQfnumber(res, "nspname"); i_relname = PQfnumber(res, "relname"); i_relfilenode = PQfnumber(res, "relfilenode"); i_spclocation = PQfnumber(res, "spclocation"); for (relnum = 0; relnum < ntups; relnum++) { RelInfo *curr = &relinfos[num_rels++]; const char *tblspace; curr->reloid = atooid(PQgetvalue(res, relnum, i_oid)); nspname = PQgetvalue(res, relnum, i_nspname); strlcpy(curr->nspname, nspname, sizeof(curr->nspname)); relname = PQgetvalue(res, relnum, i_relname); strlcpy(curr->relname, relname, sizeof(curr->relname)); curr->relfilenode = atooid(PQgetvalue(res, relnum, i_relfilenode)); tblspace = PQgetvalue(res, relnum, i_spclocation); /* if no table tablespace, use the database tablespace */ if (strlen(tblspace) == 0) tblspace = dbinfo->db_tblspace; strlcpy(curr->tablespace, tblspace, sizeof(curr->tablespace)); } PQclear(res); PQfinish(conn); dbinfo->rel_arr.rels = relinfos; dbinfo->rel_arr.nrels = num_rels; }
/* * gen_db_file_maps() * * generates database mappings for "old_db" and "new_db". Returns a malloc'ed * array of mappings. nmaps is a return parameter which refers to the number * mappings. */ FileNameMap * gen_db_file_maps(DbInfo *old_db, DbInfo *new_db, int *nmaps, const char *old_pgdata, const char *new_pgdata) { FileNameMap *maps; int old_relnum, new_relnum; int num_maps = 0; maps = (FileNameMap *) pg_malloc(sizeof(FileNameMap) * old_db->rel_arr.nrels); /* * The old database shouldn't have more relations than the new one. * We force the new cluster to have a TOAST table if the old table * had one. */ if (old_db->rel_arr.nrels > new_db->rel_arr.nrels) pg_fatal("old and new databases \"%s\" have a mismatched number of relations\n", old_db->db_name); /* Drive the loop using new_relnum, which might be higher. */ for (old_relnum = new_relnum = 0; new_relnum < new_db->rel_arr.nrels; new_relnum++) { RelInfo *old_rel; RelInfo *new_rel = &new_db->rel_arr.rels[new_relnum]; /* * It is possible that the new cluster has a TOAST table for a table * that didn't need one in the old cluster, e.g. 9.0 to 9.1 changed the * NUMERIC length computation. Therefore, if we have a TOAST table * in the new cluster that doesn't match, skip over it and continue * processing. It is possible this TOAST table used an OID that was * reserved in the old cluster, but we have no way of testing that, * and we would have already gotten an error at the new cluster schema * creation stage. Fortunately, since we only restore the OID counter * after schema restore, and restore in OID order via pg_dump, a * conflict would only happen if the new TOAST table had a very low * OID. However, TOAST tables created long after initial table * creation can have any OID, particularly after OID wraparound. */ if (old_relnum == old_db->rel_arr.nrels) { if (strcmp(new_rel->nspname, "pg_toast") == 0) continue; else pg_fatal("Extra non-TOAST relation found in database \"%s\": new OID %d\n", old_db->db_name, new_rel->reloid); } old_rel = &old_db->rel_arr.rels[old_relnum]; if (old_rel->reloid != new_rel->reloid) { if (strcmp(new_rel->nspname, "pg_toast") == 0) continue; else pg_fatal("Mismatch of relation OID in database \"%s\": old OID %d, new OID %d\n", old_db->db_name, old_rel->reloid, new_rel->reloid); } /* * TOAST table names initially match the heap pg_class oid. In * pre-8.4, TOAST table names change during CLUSTER; in pre-9.0, TOAST * table names change during ALTER TABLE ALTER COLUMN SET TYPE. In >= * 9.0, TOAST relation names always use heap table oids, hence we * cannot check relation names when upgrading from pre-9.0. Clusters * upgraded to 9.0 will get matching TOAST names. If index names don't * match primary key constraint names, this will fail because pg_dump * dumps constraint names and pg_upgrade checks index names. */ if (strcmp(old_rel->nspname, new_rel->nspname) != 0 || ((GET_MAJOR_VERSION(old_cluster.major_version) >= 900 || strcmp(old_rel->nspname, "pg_toast") != 0) && strcmp(old_rel->relname, new_rel->relname) != 0)) pg_fatal("Mismatch of relation names in database \"%s\": " "old name \"%s.%s\", new name \"%s.%s\"\n", old_db->db_name, old_rel->nspname, old_rel->relname, new_rel->nspname, new_rel->relname); create_rel_filename_map(old_pgdata, new_pgdata, old_db, new_db, old_rel, new_rel, maps + num_maps); num_maps++; old_relnum++; } /* Did we fail to exhaust the old array? */ if (old_relnum != old_db->rel_arr.nrels) pg_fatal("old and new databases \"%s\" have a mismatched number of relations\n", old_db->db_name); *nmaps = num_maps; return maps; }
/* * Try to read a timeline's history file. * * If successful, return the list of component TLIs (the given TLI followed by * its ancestor TLIs). If we can't find the history file, assume that the * timeline has no parents, and return a list of just the specified timeline * ID. */ TimeLineHistoryEntry * rewind_parseTimeLineHistory(char *buffer, TimeLineID targetTLI, int *nentries) { char *fline; TimeLineHistoryEntry *entry; TimeLineHistoryEntry *entries = NULL; int nlines = 0; TimeLineID lasttli = 0; XLogRecPtr prevend; char *bufptr; bool lastline = false; /* * Parse the file... */ prevend = InvalidXLogRecPtr; bufptr = buffer; while (!lastline) { char *ptr; TimeLineID tli; uint32 switchpoint_hi; uint32 switchpoint_lo; int nfields; fline = bufptr; while (*bufptr && *bufptr != '\n') bufptr++; if (!(*bufptr)) lastline = true; else *bufptr++ = '\0'; /* skip leading whitespace and check for # comment */ for (ptr = fline; *ptr; ptr++) { if (!isspace((unsigned char) *ptr)) break; } if (*ptr == '\0' || *ptr == '#') continue; nfields = sscanf(fline, "%u\t%X/%X", &tli, &switchpoint_hi, &switchpoint_lo); if (nfields < 1) { /* expect a numeric timeline ID as first field of line */ fprintf(stderr, _("syntax error in history file: %s\n"), fline); fprintf(stderr, _("Expected a numeric timeline ID.\n")); exit(1); } if (nfields != 3) { fprintf(stderr, _("syntax error in history file: %s\n"), fline); fprintf(stderr, _("Expected a write-ahead log switchpoint location.\n")); exit(1); } if (entries && tli <= lasttli) { fprintf(stderr, _("invalid data in history file: %s\n"), fline); fprintf(stderr, _("Timeline IDs must be in increasing sequence.\n")); exit(1); } lasttli = tli; nlines++; entries = pg_realloc(entries, nlines * sizeof(TimeLineHistoryEntry)); entry = &entries[nlines - 1]; entry->tli = tli; entry->begin = prevend; entry->end = ((uint64) (switchpoint_hi)) << 32 | (uint64) switchpoint_lo; prevend = entry->end; /* we ignore the remainder of each line */ } if (entries && targetTLI <= lasttli) { fprintf(stderr, _("invalid data in history file\n")); fprintf(stderr, _("Timeline IDs must be less than child timeline's ID.\n")); exit(1); } /* * Create one more entry for the "tip" of the timeline, which has no entry * in the history file. */ nlines++; if (entries) entries = pg_realloc(entries, nlines * sizeof(TimeLineHistoryEntry)); else entries = pg_malloc(1 * sizeof(TimeLineHistoryEntry)); entry = &entries[nlines - 1]; entry->tli = targetTLI; entry->begin = prevend; entry->end = InvalidXLogRecPtr; *nentries = nlines; return entries; }
/* * Make a database connection with the given parameters. An * interactive password prompt is automatically issued if required. */ PGconn * connectDatabase(const char *dbname, const char *pghost, const char *pgport, const char *pguser, enum trivalue prompt_password, const char *progname, bool fail_ok) { PGconn *conn; char *password = NULL; bool new_pass; if (prompt_password == TRI_YES) password = simple_prompt("Password: "******"host"; values[0] = pghost; keywords[1] = "port"; values[1] = pgport; keywords[2] = "user"; values[2] = pguser; keywords[3] = "password"; values[3] = password; keywords[4] = "dbname"; values[4] = dbname; keywords[5] = "fallback_application_name"; values[5] = progname; keywords[6] = NULL; values[6] = NULL; new_pass = false; conn = PQconnectdbParams(keywords, values, true); free(keywords); free(values); if (!conn) { fprintf(stderr, _("%s: could not connect to database %s\n"), progname, dbname); exit(1); } if (PQstatus(conn) == CONNECTION_BAD && PQconnectionNeedsPassword(conn) && password == NULL && prompt_password != TRI_NO) { PQfinish(conn); password = simple_prompt("Password: "******"%s: could not connect to database %s: %s"), progname, dbname, PQerrorMessage(conn)); exit(1); } return conn; }
/* * parallel_exec_prog * * This has the same API as exec_prog, except it does parallel execution, * and therefore must throw errors and doesn't return an error status. */ void parallel_exec_prog(const char *log_file, const char *opt_log_file, const char *fmt,...) { va_list args; char cmd[MAX_STRING]; #ifndef WIN32 pid_t child; #else HANDLE child; exec_thread_arg *new_arg; #endif va_start(args, fmt); vsnprintf(cmd, sizeof(cmd), fmt, args); va_end(args); if (user_opts.jobs <= 1) /* throw_error must be true to allow jobs */ exec_prog(log_file, opt_log_file, true, "%s", cmd); else { /* parallel */ #ifdef WIN32 if (thread_handles == NULL) thread_handles = pg_malloc(user_opts.jobs * sizeof(HANDLE)); if (exec_thread_args == NULL) { int i; exec_thread_args = pg_malloc(user_opts.jobs * sizeof(exec_thread_arg *)); /* * For safety and performance, we keep the args allocated during * the entire life of the process, and we don't free the args in a * thread different from the one that allocated it. */ for (i = 0; i < user_opts.jobs; i++) exec_thread_args[i] = pg_malloc0(sizeof(exec_thread_arg)); } cur_thread_args = (void **) exec_thread_args; #endif /* harvest any dead children */ while (reap_child(false) == true) ; /* must we wait for a dead child? */ if (parallel_jobs >= user_opts.jobs) reap_child(true); /* set this before we start the job */ parallel_jobs++; /* Ensure stdio state is quiesced before forking */ fflush(NULL); #ifndef WIN32 child = fork(); if (child == 0) /* use _exit to skip atexit() functions */ _exit(!exec_prog(log_file, opt_log_file, true, "%s", cmd)); else if (child < 0) /* fork failed */ pg_fatal("could not create worker process: %s\n", strerror(errno)); #else /* empty array element are always at the end */ new_arg = exec_thread_args[parallel_jobs - 1]; /* Can only pass one pointer into the function, so use a struct */ if (new_arg->log_file) pg_free(new_arg->log_file); new_arg->log_file = pg_strdup(log_file); if (new_arg->opt_log_file) pg_free(new_arg->opt_log_file); new_arg->opt_log_file = opt_log_file ? pg_strdup(opt_log_file) : NULL; if (new_arg->cmd) pg_free(new_arg->cmd); new_arg->cmd = pg_strdup(cmd); child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_exec_prog, new_arg, 0, NULL); if (child == 0) pg_fatal("could not create worker thread: %s\n", strerror(errno)); thread_handles[parallel_jobs - 1] = child; #endif } return; }
/* * get_sock_dir * * Identify the socket directory to use for this cluster. If we're doing * a live check (old cluster only), we need to find out where the postmaster * is listening. Otherwise, we're going to put the socket into the current * directory. */ void get_sock_dir(ClusterInfo *cluster, bool live_check) { #ifdef HAVE_UNIX_SOCKETS /* * sockdir and port were added to postmaster.pid in PG 9.1. Pre-9.1 cannot * process pg_ctl -w for sockets in non-default locations. */ if (GET_MAJOR_VERSION(cluster->major_version) >= 901) { if (!live_check) { /* Use the current directory for the socket */ cluster->sockdir = pg_malloc(MAXPGPATH); if (!getcwd(cluster->sockdir, MAXPGPATH)) pg_log(PG_FATAL, "cannot find current directory\n"); } else { /* * If we are doing a live check, we will use the old cluster's * Unix domain socket directory so we can connect to the live * server. */ unsigned short orig_port = cluster->port; char filename[MAXPGPATH], line[MAXPGPATH]; FILE *fp; int lineno; snprintf(filename, sizeof(filename), "%s/postmaster.pid", cluster->pgdata); if ((fp = fopen(filename, "r")) == NULL) pg_log(PG_FATAL, "Cannot open file %s: %m\n", filename); for (lineno = 1; lineno <= Max(LOCK_FILE_LINE_PORT, LOCK_FILE_LINE_SOCKET_DIR); lineno++) { if (fgets(line, sizeof(line), fp) == NULL) pg_log(PG_FATAL, "Cannot read line %d from %s: %m\n", lineno, filename); /* potentially overwrite user-supplied value */ if (lineno == LOCK_FILE_LINE_PORT) sscanf(line, "%hu", &old_cluster.port); if (lineno == LOCK_FILE_LINE_SOCKET_DIR) { cluster->sockdir = pg_malloc(MAXPGPATH); /* strip off newline */ sscanf(line, "%s\n", cluster->sockdir); } } fclose(fp); /* warn of port number correction */ if (orig_port != DEF_PGUPORT && old_cluster.port != orig_port) pg_log(PG_WARNING, "User-supplied old port number %hu corrected to %hu\n", orig_port, cluster->port); } } else /* * Can't get sockdir and pg_ctl -w can't use a non-default, use * default */ cluster->sockdir = NULL; #else /* !HAVE_UNIX_SOCKETS */ cluster->sockdir = NULL; #endif }
/* * Write an empty XLOG file, containing only the checkpoint record * already set up in ControlFile. */ static void WriteEmptyXLOG(void) { char *buffer; XLogPageHeader page; XLogLongPageHeader longpage; XLogRecord *record; pg_crc32 crc; char path[MAXPGPATH]; int fd; int nbytes; /* Use malloc() to ensure buffer is MAXALIGNED */ buffer = (char *) pg_malloc(XLOG_BLCKSZ); page = (XLogPageHeader) buffer; memset(buffer, 0, XLOG_BLCKSZ); /* Set up the XLOG page header */ page->xlp_magic = XLOG_PAGE_MAGIC; page->xlp_info = XLP_LONG_HEADER; page->xlp_tli = ControlFile.checkPointCopy.ThisTimeLineID; page->xlp_pageaddr = ControlFile.checkPointCopy.redo - SizeOfXLogLongPHD; longpage = (XLogLongPageHeader) page; longpage->xlp_sysid = ControlFile.system_identifier; longpage->xlp_seg_size = XLogSegSize; longpage->xlp_xlog_blcksz = XLOG_BLCKSZ; /* Insert the initial checkpoint record */ record = (XLogRecord *) ((char *) page + SizeOfXLogLongPHD); record->xl_prev = 0; record->xl_xid = InvalidTransactionId; record->xl_tot_len = SizeOfXLogRecord + sizeof(CheckPoint); record->xl_len = sizeof(CheckPoint); record->xl_info = XLOG_CHECKPOINT_SHUTDOWN; record->xl_rmid = RM_XLOG_ID; memcpy(XLogRecGetData(record), &ControlFile.checkPointCopy, sizeof(CheckPoint)); INIT_CRC32(crc); COMP_CRC32(crc, &ControlFile.checkPointCopy, sizeof(CheckPoint)); COMP_CRC32(crc, (char *) record, offsetof(XLogRecord, xl_crc)); FIN_CRC32(crc); record->xl_crc = crc; /* Write the first page */ XLogFilePath(path, ControlFile.checkPointCopy.ThisTimeLineID, newXlogSegNo); unlink(path); fd = open(path, O_RDWR | O_CREAT | O_EXCL | PG_BINARY, S_IRUSR | S_IWUSR); if (fd < 0) { fprintf(stderr, _("%s: could not open file \"%s\": %s\n"), progname, path, strerror(errno)); exit(1); } errno = 0; if (write(fd, buffer, XLOG_BLCKSZ) != XLOG_BLCKSZ) { /* if write didn't set errno, assume problem is no disk space */ if (errno == 0) errno = ENOSPC; fprintf(stderr, _("%s: could not write file \"%s\": %s\n"), progname, path, strerror(errno)); exit(1); } /* Fill the rest of the file with zeroes */ memset(buffer, 0, XLOG_BLCKSZ); for (nbytes = XLOG_BLCKSZ; nbytes < XLogSegSize; nbytes += XLOG_BLCKSZ) { errno = 0; if (write(fd, buffer, XLOG_BLCKSZ) != XLOG_BLCKSZ) { if (errno == 0) errno = ENOSPC; fprintf(stderr, _("%s: could not write file \"%s\": %s\n"), progname, path, strerror(errno)); exit(1); } } if (fsync(fd) != 0) { fprintf(stderr, _("%s: fsync error: %s\n"), progname, strerror(errno)); exit(1); } close(fd); }
/* * parse the given command line options and try to connect to the db. * * On success, the db conn is returned inside options->db */ int handle_options(int argc, char** argv, struct adhoc_opts * options) { char *password = NULL; char *password_prompt = NULL; bool new_pass = true; parse_psql_options(argc, argv, options); if (!options->action_string) { fprintf(stderr, "Error: Must specify an sql command\n\n"); usage(); exit(1); } if (options->username == NULL) password_prompt = pg_strdup(_("Password: "******"Password for user %s: "), options->username); if (pset.getPassword == TRI_YES) password = simple_prompt(password_prompt, 100, false); do { #define PARAMS_ARRAY_SIZE 8 const char **keywords = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*keywords)); const char **values = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*values)); keywords[0] = "host"; values[0] = options->host; keywords[1] = "port"; values[1] = options->port; keywords[2] = "user"; values[2] = options->username; keywords[3] = "password"; values[3] = password; keywords[4] = "dbname"; values[4] = options->dbname; keywords[5] = "fallback_application_name"; values[5] = pset.progname; keywords[6] = "client_encoding"; values[6] = (pset.notty || getenv("PGCLIENTENCODING")) ? NULL : "auto"; keywords[7] = NULL; values[7] = NULL; new_pass = false; pset.db = PQconnectdbParams(keywords, values, true); free(keywords); free(values); if (PQstatus(pset.db) == CONNECTION_BAD && PQconnectionNeedsPassword(pset.db) && password == NULL && pset.getPassword != TRI_NO) { PQfinish(pset.db); password = simple_prompt(password_prompt, 100, false); new_pass = true; } } while (new_pass); options->db = pset.db; return 0; }
/* * create_script_for_old_cluster_deletion() * * This is particularly useful for tablespace deletion. */ void create_script_for_old_cluster_deletion( char **deletion_script_file_name) { FILE *script = NULL; int tblnum; *deletion_script_file_name = pg_malloc(MAX_PG_PATH); prep_status("Creating script to delete old cluster"); snprintf(*deletion_script_file_name, MAX_PG_PATH, "%s/delete_old_cluster.%s", os_info.cwd, SCRIPT_EXT); if ((script = fopen(*deletion_script_file_name, "w")) == NULL) pg_log(PG_FATAL, "Could not create necessary file: %s\n", *deletion_script_file_name); #ifndef WIN32 /* add shebang header */ fprintf(script, "#!/bin/sh\n\n"); #endif /* delete old cluster's default tablespace */ fprintf(script, RMDIR_CMD " %s\n", old_cluster.pgdata); /* delete old cluster's alternate tablespaces */ for (tblnum = 0; tblnum < os_info.num_tablespaces; tblnum++) { /* * Do the old cluster's per-database directories share a directory * with a new version-specific tablespace? */ if (strlen(old_cluster.tablespace_suffix) == 0) { /* delete per-database directories */ int dbnum; fprintf(script, "\n"); /* remove PG_VERSION? */ if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) fprintf(script, RM_CMD " %s%s/PG_VERSION\n", os_info.tablespaces[tblnum], old_cluster.tablespace_suffix); for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++) { fprintf(script, RMDIR_CMD " %s%s/%d\n", os_info.tablespaces[tblnum], old_cluster.tablespace_suffix, old_cluster.dbarr.dbs[dbnum].db_oid); } } else /* * Simply delete the tablespace directory, which might be ".old" * or a version-specific subdirectory. */ fprintf(script, RMDIR_CMD " %s%s\n", os_info.tablespaces[tblnum], old_cluster.tablespace_suffix); } fclose(script); #ifndef WIN32 if (chmod(*deletion_script_file_name, S_IRWXU) != 0) pg_log(PG_FATAL, "Could not add execute permission to file: %s\n", *deletion_script_file_name); #endif check_ok(); }
/* * get_loadable_libraries() * * Fetch the names of all old libraries containing C-language functions. * We will later check that they all exist in the new installation. */ void get_loadable_libraries(void) { PGresult **ress; int totaltups; int dbnum; ress = (PGresult **) pg_malloc(old_cluster.dbarr.ndbs * sizeof(PGresult *)); totaltups = 0; /* Fetch all library names, removing duplicates within each DB */ for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) { DbInfo *active_db = &old_cluster.dbarr.dbs[dbnum]; PGconn *conn = connectToServer(&old_cluster, active_db->db_name); /* Fetch all libraries referenced in this DB */ ress[dbnum] = executeQueryOrDie(conn, "SELECT DISTINCT probin " "FROM pg_catalog.pg_proc " "WHERE prolang = 13 /* C */ AND " "probin IS NOT NULL AND " "oid >= %u;", FirstNormalObjectId); totaltups += PQntuples(ress[dbnum]); PQfinish(conn); } /* Allocate what's certainly enough space */ if (totaltups > 0) os_info.libraries = (char **) pg_malloc(totaltups * sizeof(char *)); else os_info.libraries = NULL; /* * Now remove duplicates across DBs. This is pretty inefficient code, but * there probably aren't enough entries to matter. */ totaltups = 0; for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) { PGresult *res = ress[dbnum]; int ntups; int rowno; ntups = PQntuples(res); for (rowno = 0; rowno < ntups; rowno++) { char *lib = PQgetvalue(res, rowno, 0); bool dup = false; int n; for (n = 0; n < totaltups; n++) { if (strcmp(lib, os_info.libraries[n]) == 0) { dup = true; break; } } if (!dup) os_info.libraries[totaltups++] = pg_strdup(lib); } PQclear(res); } os_info.num_libraries = totaltups; pg_free(ress); }
/* * Set the variable named "name" to value "value", * or delete it if "value" is NULL. * * Returns true if successful, false if not; in the latter case a suitable * error message has been printed, except for the unexpected case of * space or name being NULL. */ bool SetVariable(VariableSpace space, const char *name, const char *value) { struct _variable *current, *previous; if (!space || !name) return false; if (!valid_variable_name(name)) { /* Deletion of non-existent variable is not an error */ if (!value) return true; psql_error("invalid variable name: \"%s\"\n", name); return false; } for (previous = space, current = space->next; current; previous = current, current = current->next) { int cmp = strcmp(current->name, name); if (cmp == 0) { /* * Found entry, so update, unless assign hook returns false. * * We must duplicate the passed value to start with. This * simplifies the API for substitute hooks. Moreover, some assign * hooks assume that the passed value has the same lifespan as the * variable. Having to free the string again on failure is a * small price to pay for keeping these APIs simple. */ char *new_value = value ? pg_strdup(value) : NULL; bool confirmed; if (current->substitute_hook) new_value = (*current->substitute_hook) (new_value); if (current->assign_hook) confirmed = (*current->assign_hook) (new_value); else confirmed = true; if (confirmed) { if (current->value) pg_free(current->value); current->value = new_value; /* * If we deleted the value, and there are no hooks to * remember, we can discard the variable altogether. */ if (new_value == NULL && current->substitute_hook == NULL && current->assign_hook == NULL) { previous->next = current->next; free(current->name); free(current); } } else if (new_value) pg_free(new_value); /* current->value is left unchanged */ return confirmed; } if (cmp > 0) break; /* it's not there */ } /* not present, make new entry ... unless we were asked to delete */ if (value) { current = pg_malloc(sizeof *current); current->name = pg_strdup(name); current->value = pg_strdup(value); current->substitute_hook = NULL; current->assign_hook = NULL; current->next = previous->next; previous->next = current; } return true; }
/*---- * Runs a query, which returns pieces of files from the remote source data * directory, and overwrites the corresponding parts of target files with * the received parts. The result set is expected to be of format: * * path text -- path in the data directory, e.g "base/1/123" * begin int4 -- offset within the file * chunk bytea -- file content *---- */ static void receiveFileChunks(const char *sql) { PGresult *res; if (PQsendQueryParams(conn, sql, 0, NULL, NULL, NULL, NULL, 1) != 1) pg_fatal("could not send query: %s", PQerrorMessage(conn)); pg_log(PG_DEBUG, "getting file chunks\n"); if (PQsetSingleRowMode(conn) != 1) pg_fatal("could not set libpq connection to single row mode\n"); while ((res = PQgetResult(conn)) != NULL) { char *filename; int filenamelen; int chunkoff; int chunksize; char *chunk; switch (PQresultStatus(res)) { case PGRES_SINGLE_TUPLE: break; case PGRES_TUPLES_OK: PQclear(res); continue; /* final zero-row result */ default: pg_fatal("unexpected result while fetching remote files: %s", PQresultErrorMessage(res)); } /* sanity check the result set */ if (PQnfields(res) != 3 || PQntuples(res) != 1) pg_fatal("unexpected result set size while fetching remote files\n"); if (PQftype(res, 0) != TEXTOID && PQftype(res, 1) != INT4OID && PQftype(res, 2) != BYTEAOID) { pg_fatal("unexpected data types in result set while fetching remote files: %u %u %u\n", PQftype(res, 0), PQftype(res, 1), PQftype(res, 2)); } if (PQfformat(res, 0) != 1 && PQfformat(res, 1) != 1 && PQfformat(res, 2) != 1) { pg_fatal("unexpected result format while fetching remote files\n"); } if (PQgetisnull(res, 0, 0) || PQgetisnull(res, 0, 1)) { pg_fatal("unexpected null values in result while fetching remote files\n"); } if (PQgetlength(res, 0, 1) != sizeof(int32)) pg_fatal("unexpected result length while fetching remote files\n"); /* Read result set to local variables */ memcpy(&chunkoff, PQgetvalue(res, 0, 1), sizeof(int32)); chunkoff = ntohl(chunkoff); chunksize = PQgetlength(res, 0, 2); filenamelen = PQgetlength(res, 0, 0); filename = pg_malloc(filenamelen + 1); memcpy(filename, PQgetvalue(res, 0, 0), filenamelen); filename[filenamelen] = '\0'; chunk = PQgetvalue(res, 0, 2); /* * It's possible that the file was deleted on remote side after we * created the file map. In this case simply ignore it, as if it was * not there in the first place, and move on. */ if (PQgetisnull(res, 0, 2)) { pg_log(PG_DEBUG, "received null value for chunk for file \"%s\", file has been deleted\n", filename); pg_free(filename); PQclear(res); continue; } pg_log(PG_DEBUG, "received chunk for file \"%s\", offset %d, size %d\n", filename, chunkoff, chunksize); open_target_file(filename, false); write_target_range(chunk, chunkoff, chunksize); pg_free(filename); PQclear(res); } }