/* * vacuum_one_database * * Process tables in the given database. If the 'tables' list is empty, * process all tables in the database. * * Note that this function is only concerned with running exactly one stage * when in analyze-in-stages mode; caller must iterate on us if necessary. * * If concurrentCons is > 1, multiple connections are used to vacuum tables * in parallel. In this case and if the table list is empty, we first obtain * a list of tables from the database. */ static void vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, int stage, SimpleStringList *tables, const char *host, const char *port, const char *username, enum trivalue prompt_password, int concurrentCons, const char *progname, bool echo, bool quiet) { PQExpBufferData sql; PGconn *conn; SimpleStringListCell *cell; ParallelSlot *slots = NULL; SimpleStringList dbtables = {NULL, NULL}; int i; bool result = 0; bool parallel = concurrentCons > 1; const char *stage_commands[] = { "SET default_statistics_target=1; SET vacuum_cost_delay=0;", "SET default_statistics_target=10; RESET vacuum_cost_delay;", "RESET default_statistics_target;" }; const char *stage_messages[] = { gettext_noop("Generating minimal optimizer statistics (1 target)"), gettext_noop("Generating medium optimizer statistics (10 targets)"), gettext_noop("Generating default (full) optimizer statistics") }; Assert(stage == ANALYZE_NO_STAGE || (stage >= 0 && stage < ANALYZE_NUM_STAGES)); if (!quiet) { if (stage != ANALYZE_NO_STAGE) printf(_("%s: processing database \"%s\": %s\n"), progname, dbname, stage_messages[stage]); else printf(_("%s: vacuuming database \"%s\"\n"), progname, dbname); fflush(stdout); } conn = connectDatabase(dbname, host, port, username, prompt_password, progname, false); initPQExpBuffer(&sql); /* * If a table list is not provided and we're using multiple connections, * prepare the list of tables by querying the catalogs. */ if (parallel && (!tables || !tables->head)) { PQExpBufferData buf; PGresult *res; int ntups; int i; initPQExpBuffer(&buf); res = executeQuery(conn, "SELECT c.relname, ns.nspname FROM pg_class c, pg_namespace ns\n" " WHERE relkind IN (\'r\', \'m\') AND c.relnamespace = ns.oid\n" " ORDER BY c.relpages DESC;", progname, echo); ntups = PQntuples(res); for (i = 0; i < ntups; i++) { appendPQExpBuffer(&buf, "%s", fmtQualifiedId(PQserverVersion(conn), PQgetvalue(res, i, 1), PQgetvalue(res, i, 0))); simple_string_list_append(&dbtables, buf.data); resetPQExpBuffer(&buf); } termPQExpBuffer(&buf); tables = &dbtables; /* * If there are more connections than vacuumable relations, we don't * need to use them all. */ if (concurrentCons > ntups) concurrentCons = ntups; if (concurrentCons <= 1) parallel = false; } /* * Setup the database connections. We reuse the connection we already have * for the first slot. If not in parallel mode, the first slot in the * array contains the connection. */ slots = (ParallelSlot *) pg_malloc(sizeof(ParallelSlot) * concurrentCons); init_slot(slots, conn); if (parallel) { for (i = 1; i < concurrentCons; i++) { conn = connectDatabase(dbname, host, port, username, prompt_password, progname, false); init_slot(slots + i, conn); } } /* * Prepare all the connections to run the appropriate analyze stage, if * caller requested that mode. */ if (stage != ANALYZE_NO_STAGE) { int j; /* We already emitted the message above */ for (j = 0; j < concurrentCons; j++) executeCommand((slots + j)->connection, stage_commands[stage], progname, echo); } cell = tables ? tables->head : NULL; do { ParallelSlot *free_slot; const char *tabname = cell ? cell->val : NULL; prepare_vacuum_command(&sql, conn, vacopts, tabname); if (CancelRequested) { result = -1; goto finish; } /* * Get the connection slot to use. If in parallel mode, here we wait * for one connection to become available if none already is. In * non-parallel mode we simply use the only slot we have, which we * know to be free. */ if (parallel) { /* * Get a free slot, waiting until one becomes free if none * currently is. */ free_slot = GetIdleSlot(slots, concurrentCons, dbname, progname); if (!free_slot) { result = -1; goto finish; } free_slot->isFree = false; } else free_slot = slots; run_vacuum_command(free_slot->connection, sql.data, echo, dbname, tabname, progname, parallel); if (cell) cell = cell->next; } while (cell != NULL); if (parallel) { int j; for (j = 0; j < concurrentCons; j++) { /* wait for all connection to return the results */ if (!GetQueryResult((slots + j)->connection, dbname, progname)) goto finish; (slots + j)->isFree = true; } } finish: for (i = 0; i < concurrentCons; i++) DisconnectDatabase(slots + i); pfree(slots); termPQExpBuffer(&sql); if (result == -1) exit(1); }
/* * vacuum_one_database * * Process tables in the given database. If the 'tables' list is empty, * process all tables in the database. * * Note that this function is only concerned with running exactly one stage * when in analyze-in-stages mode; caller must iterate on us if necessary. * * If concurrentCons is > 1, multiple connections are used to vacuum tables * in parallel. In this case and if the table list is empty, we first obtain * a list of tables from the database. */ static void vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, int stage, SimpleStringList *tables, const char *host, const char *port, const char *username, enum trivalue prompt_password, int concurrentCons, const char *progname, bool echo, bool quiet) { PQExpBufferData sql; PQExpBufferData buf; PQExpBufferData catalog_query; PGresult *res; PGconn *conn; SimpleStringListCell *cell; ParallelSlot *slots; SimpleStringList dbtables = {NULL, NULL}; int i; int ntups; bool failed = false; bool parallel = concurrentCons > 1; bool tables_listed = false; bool has_where = false; const char *stage_commands[] = { "SET default_statistics_target=1; SET vacuum_cost_delay=0;", "SET default_statistics_target=10; RESET vacuum_cost_delay;", "RESET default_statistics_target;" }; const char *stage_messages[] = { gettext_noop("Generating minimal optimizer statistics (1 target)"), gettext_noop("Generating medium optimizer statistics (10 targets)"), gettext_noop("Generating default (full) optimizer statistics") }; Assert(stage == ANALYZE_NO_STAGE || (stage >= 0 && stage < ANALYZE_NUM_STAGES)); conn = connectDatabase(dbname, host, port, username, prompt_password, progname, echo, false, true); if (vacopts->disable_page_skipping && PQserverVersion(conn) < 90600) { PQfinish(conn); fprintf(stderr, _("%s: cannot use the \"%s\" option on server versions older than PostgreSQL 9.6\n"), progname, "disable-page-skipping"); exit(1); } if (vacopts->skip_locked && PQserverVersion(conn) < 120000) { PQfinish(conn); fprintf(stderr, _("%s: cannot use the \"%s\" option on server versions older than PostgreSQL 12\n"), progname, "skip-locked"); exit(1); } if (vacopts->min_xid_age != 0 && PQserverVersion(conn) < 90600) { fprintf(stderr, _("%s: cannot use the \"%s\" option on server versions older than PostgreSQL 9.6\n"), progname, "--min-xid-age"); exit(1); } if (vacopts->min_mxid_age != 0 && PQserverVersion(conn) < 90600) { fprintf(stderr, _("%s: cannot use the \"%s\" option on server versions older than PostgreSQL 9.6\n"), progname, "--min-mxid-age"); exit(1); } if (!quiet) { if (stage != ANALYZE_NO_STAGE) printf(_("%s: processing database \"%s\": %s\n"), progname, PQdb(conn), _(stage_messages[stage])); else printf(_("%s: vacuuming database \"%s\"\n"), progname, PQdb(conn)); fflush(stdout); } /* * Prepare the list of tables to process by querying the catalogs. * * Since we execute the constructed query with the default search_path * (which could be unsafe), everything in this query MUST be fully * qualified. * * First, build a WITH clause for the catalog query if any tables were * specified, with a set of values made of relation names and their * optional set of columns. This is used to match any provided column * lists with the generated qualified identifiers and to filter for the * tables provided via --table. If a listed table does not exist, the * catalog query will fail. */ initPQExpBuffer(&catalog_query); for (cell = tables ? tables->head : NULL; cell; cell = cell->next) { char *just_table; const char *just_columns; /* * Split relation and column names given by the user, this is used to * feed the CTE with values on which are performed pre-run validity * checks as well. For now these happen only on the relation name. */ splitTableColumnsSpec(cell->val, PQclientEncoding(conn), &just_table, &just_columns); if (!tables_listed) { appendPQExpBuffer(&catalog_query, "WITH listed_tables (table_oid, column_list) " "AS (\n VALUES ("); tables_listed = true; } else appendPQExpBuffer(&catalog_query, ",\n ("); appendStringLiteralConn(&catalog_query, just_table, conn); appendPQExpBuffer(&catalog_query, "::pg_catalog.regclass, "); if (just_columns && just_columns[0] != '\0') appendStringLiteralConn(&catalog_query, just_columns, conn); else appendPQExpBufferStr(&catalog_query, "NULL"); appendPQExpBufferStr(&catalog_query, "::pg_catalog.text)"); pg_free(just_table); } /* Finish formatting the CTE */ if (tables_listed) appendPQExpBuffer(&catalog_query, "\n)\n"); appendPQExpBuffer(&catalog_query, "SELECT c.relname, ns.nspname"); if (tables_listed) appendPQExpBuffer(&catalog_query, ", listed_tables.column_list"); appendPQExpBuffer(&catalog_query, " FROM pg_catalog.pg_class c\n" " JOIN pg_catalog.pg_namespace ns" " ON c.relnamespace OPERATOR(pg_catalog.=) ns.oid\n" " LEFT JOIN pg_catalog.pg_class t" " ON c.reltoastrelid OPERATOR(pg_catalog.=) t.oid\n"); /* Used to match the tables listed by the user */ if (tables_listed) appendPQExpBuffer(&catalog_query, " JOIN listed_tables" " ON listed_tables.table_oid OPERATOR(pg_catalog.=) c.oid\n"); /* * If no tables were listed, filter for the relevant relation types. If * tables were given via --table, don't bother filtering by relation type. * Instead, let the server decide whether a given relation can be * processed in which case the user will know about it. */ if (!tables_listed) { appendPQExpBuffer(&catalog_query, " WHERE c.relkind OPERATOR(pg_catalog.=) ANY (array[" CppAsString2(RELKIND_RELATION) ", " CppAsString2(RELKIND_MATVIEW) "])\n"); has_where = true; } /* * For --min-xid-age and --min-mxid-age, the age of the relation is the * greatest of the ages of the main relation and its associated TOAST * table. The commands generated by vacuumdb will also process the TOAST * table for the relation if necessary, so it does not need to be * considered separately. */ if (vacopts->min_xid_age != 0) { appendPQExpBuffer(&catalog_query, " %s GREATEST(pg_catalog.age(c.relfrozenxid)," " pg_catalog.age(t.relfrozenxid)) " " OPERATOR(pg_catalog.>=) '%d'::pg_catalog.int4\n" " AND c.relfrozenxid OPERATOR(pg_catalog.!=)" " '0'::pg_catalog.xid\n", has_where ? "AND" : "WHERE", vacopts->min_xid_age); has_where = true; } if (vacopts->min_mxid_age != 0) { appendPQExpBuffer(&catalog_query, " %s GREATEST(pg_catalog.mxid_age(c.relminmxid)," " pg_catalog.mxid_age(t.relminmxid)) OPERATOR(pg_catalog.>=)" " '%d'::pg_catalog.int4\n" " AND c.relminmxid OPERATOR(pg_catalog.!=)" " '0'::pg_catalog.xid\n", has_where ? "AND" : "WHERE", vacopts->min_mxid_age); has_where = true; } /* * Execute the catalog query. We use the default search_path for this * query for consistency with table lookups done elsewhere by the user. */ appendPQExpBuffer(&catalog_query, " ORDER BY c.relpages DESC;"); executeCommand(conn, "RESET search_path;", progname, echo); res = executeQuery(conn, catalog_query.data, progname, echo); termPQExpBuffer(&catalog_query); PQclear(executeQuery(conn, ALWAYS_SECURE_SEARCH_PATH_SQL, progname, echo)); /* * If no rows are returned, there are no matching tables, so we are done. */ ntups = PQntuples(res); if (ntups == 0) { PQclear(res); PQfinish(conn); return; } /* * Build qualified identifiers for each table, including the column list * if given. */ initPQExpBuffer(&buf); for (i = 0; i < ntups; i++) { appendPQExpBufferStr(&buf, fmtQualifiedId(PQgetvalue(res, i, 1), PQgetvalue(res, i, 0))); if (tables_listed && !PQgetisnull(res, i, 2)) appendPQExpBufferStr(&buf, PQgetvalue(res, i, 2)); simple_string_list_append(&dbtables, buf.data); resetPQExpBuffer(&buf); } termPQExpBuffer(&buf); PQclear(res); /* * If there are more connections than vacuumable relations, we don't need * to use them all. */ if (parallel) { if (concurrentCons > ntups) concurrentCons = ntups; if (concurrentCons <= 1) parallel = false; } /* * Setup the database connections. We reuse the connection we already have * for the first slot. If not in parallel mode, the first slot in the * array contains the connection. */ if (concurrentCons <= 0) concurrentCons = 1; slots = (ParallelSlot *) pg_malloc(sizeof(ParallelSlot) * concurrentCons); init_slot(slots, conn); if (parallel) { for (i = 1; i < concurrentCons; i++) { conn = connectDatabase(dbname, host, port, username, prompt_password, progname, echo, false, true); init_slot(slots + i, conn); } } /* * Prepare all the connections to run the appropriate analyze stage, if * caller requested that mode. */ if (stage != ANALYZE_NO_STAGE) { int j; /* We already emitted the message above */ for (j = 0; j < concurrentCons; j++) executeCommand((slots + j)->connection, stage_commands[stage], progname, echo); } initPQExpBuffer(&sql); cell = dbtables.head; do { const char *tabname = cell->val; ParallelSlot *free_slot; if (CancelRequested) { failed = true; goto finish; } /* * Get the connection slot to use. If in parallel mode, here we wait * for one connection to become available if none already is. In * non-parallel mode we simply use the only slot we have, which we * know to be free. */ if (parallel) { /* * Get a free slot, waiting until one becomes free if none * currently is. */ free_slot = GetIdleSlot(slots, concurrentCons, progname); if (!free_slot) { failed = true; goto finish; } free_slot->isFree = false; } else free_slot = slots; prepare_vacuum_command(&sql, PQserverVersion(free_slot->connection), vacopts, tabname); /* * Execute the vacuum. If not in parallel mode, this terminates the * program in case of an error. (The parallel case handles query * errors in ProcessQueryResult through GetIdleSlot.) */ run_vacuum_command(free_slot->connection, sql.data, echo, tabname, progname, parallel); cell = cell->next; } while (cell != NULL); if (parallel) { int j; /* wait for all connections to finish */ for (j = 0; j < concurrentCons; j++) { if (!GetQueryResult((slots + j)->connection, progname)) goto finish; } } finish: for (i = 0; i < concurrentCons; i++) DisconnectDatabase(slots + i); pfree(slots); termPQExpBuffer(&sql); if (failed) exit(1); }