finish_sync_worker(void) { /* * Commit any outstanding transaction. This is the usual case, unless * there was nothing to do for the table. */ if (IsTransactionState()) { CommitTransactionCommand(); pgstat_report_stat(false); } /* And flush all writes. */ XLogFlush(GetXLogWriteRecPtr()); StartTransactionCommand(); ereport(LOG, (errmsg("logical replication table synchronization worker for subscription \"%s\", table \"%s\" has finished", MySubscription->name, get_rel_name(MyLogicalRepWorker->relid)))); CommitTransactionCommand(); /* Find the main apply worker and signal it. */ logicalrep_worker_wakeup(MyLogicalRepWorker->subid, InvalidOid); /* Stop gracefully */ proc_exit(0); }
/* * Ensure that the environment is sane. * This involves checking the Postgresql version, and if in network mode * also establishing a connection to a receiver. */ int ensure_valid_environment(void) { StringInfoData buf; int retval; char* pgversion; SPITupleTable *coltuptable; SetCurrentStatementStartTimestamp(); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); /* Ensure compatible version */ pgstat_report_activity(STATE_RUNNING, "verifying compatible postgres version"); initStringInfo(&buf); appendStringInfo(&buf, "select version();" ); retval = SPI_execute(buf.data, false, 0); if (retval != SPI_OK_SELECT) { elog(FATAL, "Unable to query postgres version %d", retval); SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); return 1; } coltuptable = SPI_tuptable; pgversion = SPI_getvalue(coltuptable->vals[0], coltuptable->tupdesc, 1); if(strstr(pgversion, "PostgreSQL 9.3") == NULL) { elog(FATAL, "Unsupported Postgresql version"); SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); return 1; } SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); /* * Attempt to establish a connection if the output mode is network. */ if (strcmp(output_mode, "network") == 0) { retval = establish_connection(); if (retval == 2) { elog(LOG, "Error : Failed to connect to antenna please check domain is available from host."); } } //TODO verify logging directory is accessible when csv mode. elog(LOG, "Pgsampler Initialized"); return 0; }
/* * Returns a count of the number of non-template databases from the catalog. */ int get_database_count(void) { int retval, processed; StringInfoData buf; SPITupleTable *coltuptable; int database_count = 0; SetCurrentStatementStartTimestamp(); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); initStringInfo(&buf); appendStringInfo(&buf, "SELECT count(*) FROM pg_database WHERE datname NOT IN ('template0', 'template1') AND datallowconn IS TRUE;"); retval = SPI_execute(buf.data, false, 0); if (retval != SPI_OK_SELECT) { elog(FATAL, "Database information collection failed"); // FAIL RETURN 1 } processed = SPI_processed; if (processed > 0) { coltuptable = SPI_tuptable; database_count = atoi(SPI_getvalue(coltuptable->vals[0], coltuptable->tupdesc, 1)); } SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); return database_count; }
/* * ProcessCatchupInterrupt * * The portion of catchup interrupt handling that runs outside of the signal * handler, which allows it to actually process pending invalidations. */ void ProcessCatchupInterrupt(void) { while (catchupInterruptPending) { /* * What we need to do here is cause ReceiveSharedInvalidMessages() to * run, which will do the necessary work and also reset the * catchupInterruptPending flag. If we are inside a transaction we * can just call AcceptInvalidationMessages() to do this. If we * aren't, we start and immediately end a transaction; the call to * AcceptInvalidationMessages() happens down inside transaction start. * * It is awfully tempting to just call AcceptInvalidationMessages() * without the rest of the xact start/stop overhead, and I think that * would actually work in the normal case; but I am not sure that things * would clean up nicely if we got an error partway through. */ if (IsTransactionOrTransactionBlock()) { elog(DEBUG4, "ProcessCatchupEvent inside transaction"); AcceptInvalidationMessages(); } else { elog(DEBUG4, "ProcessCatchupEvent outside transaction"); StartTransactionCommand(); CommitTransactionCommand(); } } }
/* * Main worker routine. Accepts dsm_handle as an argument */ static void bg_worker_main(Datum main_arg) { PartitionArgs *args; dsm_handle handle = DatumGetInt32(main_arg); /* Create resource owner */ CurrentResourceOwner = ResourceOwnerCreate(NULL, "CreatePartitionsWorker"); /* Attach to dynamic shared memory */ if (!handle) { ereport(WARNING, (errmsg("pg_pathman worker: invalid dsm_handle"))); } segment = dsm_attach(handle); args = dsm_segment_address(segment); /* Establish connection and start transaction */ BackgroundWorkerInitializeConnectionByOid(args->dbid, InvalidOid); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); /* Create partitions */ args->result = create_partitions(args->relid, PATHMAN_GET_DATUM(args->value, args->by_val), args->value_type, &args->crashed); /* Cleanup */ SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); dsm_detach(segment); }
/* * We keep some resources across transactions, so we attach everything to a * long-lived ResourceOwner, which prevents the below commit from thinking that * there are reference leaks */ static void start_executor(QueryDesc *queryDesc, MemoryContext context, ResourceOwner owner) { MemoryContext old; ResourceOwner save; StartTransactionCommand(); old = MemoryContextSwitchTo(context); save = CurrentResourceOwner; CurrentResourceOwner = owner; queryDesc->snapshot = GetTransactionSnapshot(); queryDesc->snapshot->copied = true; RegisterSnapshotOnOwner(queryDesc->snapshot, owner); ExecutorStart(queryDesc, 0); queryDesc->snapshot->active_count++; UnregisterSnapshotFromOwner(queryDesc->snapshot, owner); UnregisterSnapshotFromOwner(queryDesc->estate->es_snapshot, owner); CurrentResourceOwner = TopTransactionResourceOwner; MemoryContextSwitchTo(old); CommitTransactionCommand(); CurrentResourceOwner = save; }
/* * get_database_oids * * Returns a list of all database OIDs found in pg_database. */ static List * get_database_list(void) { List *dbs = NIL; Relation rel; HeapScanDesc scan; HeapTuple tup; MemoryContext resultcxt; /* This is the context that we will allocate our output data in */ resultcxt = CurrentMemoryContext; /* * Start a transaction so we can access pg_database, and get a snapshot. * We don't have a use for the snapshot itself, but we're interested in * the secondary effect that it sets RecentGlobalXmin. (This is critical * for anything that reads heap pages, because HOT may decide to prune * them even if the process doesn't attempt to modify any tuples.) */ StartTransactionCommand(); (void) GetTransactionSnapshot(); /* We take a AccessExclusiveLock so we don't conflict with any DATABASE commands */ rel = heap_open(DatabaseRelationId, AccessExclusiveLock); scan = heap_beginscan_catalog(rel, 0, NULL); while (HeapTupleIsValid(tup = heap_getnext(scan, ForwardScanDirection))) { MemoryContext oldcxt; Form_pg_database pgdatabase = (Form_pg_database) GETSTRUCT(tup); DatabaseEntry *db_entry; /* Ignore template databases or ones that don't allow connections. */ if (pgdatabase->datistemplate || !pgdatabase->datallowconn) continue; /* * Allocate our results in the caller's context, not the * transaction's. We do this inside the loop, and restore the original * context at the end, so that leaky things like heap_getnext() are * not called in a potentially long-lived context. */ oldcxt = MemoryContextSwitchTo(resultcxt); db_entry = palloc0(sizeof(DatabaseEntry)); db_entry->oid = HeapTupleGetOid(tup); StrNCpy(NameStr(db_entry->name), NameStr(pgdatabase->datname), NAMEDATALEN); dbs = lappend(dbs, db_entry); MemoryContextSwitchTo(oldcxt); } heap_endscan(scan); heap_close(rel, AccessExclusiveLock); CommitTransactionCommand(); return dbs; }
/* * Load the list of subscriptions. * * Only the fields interesting for worker start/stop functions are filled for * each subscription. */ static List * get_subscription_list(void) { List *res = NIL; Relation rel; TableScanDesc scan; HeapTuple tup; MemoryContext resultcxt; /* This is the context that we will allocate our output data in */ resultcxt = CurrentMemoryContext; /* * Start a transaction so we can access pg_database, and get a snapshot. * We don't have a use for the snapshot itself, but we're interested in * the secondary effect that it sets RecentGlobalXmin. (This is critical * for anything that reads heap pages, because HOT may decide to prune * them even if the process doesn't attempt to modify any tuples.) */ StartTransactionCommand(); (void) GetTransactionSnapshot(); rel = table_open(SubscriptionRelationId, AccessShareLock); scan = table_beginscan_catalog(rel, 0, NULL); while (HeapTupleIsValid(tup = heap_getnext(scan, ForwardScanDirection))) { Form_pg_subscription subform = (Form_pg_subscription) GETSTRUCT(tup); Subscription *sub; MemoryContext oldcxt; /* * Allocate our results in the caller's context, not the * transaction's. We do this inside the loop, and restore the original * context at the end, so that leaky things like heap_getnext() are * not called in a potentially long-lived context. */ oldcxt = MemoryContextSwitchTo(resultcxt); sub = (Subscription *) palloc0(sizeof(Subscription)); sub->oid = subform->oid; sub->dbid = subform->subdbid; sub->owner = subform->subowner; sub->enabled = subform->subenabled; sub->name = pstrdup(NameStr(subform->subname)); /* We don't fill fields we are not interested in. */ res = lappend(res, sub); MemoryContextSwitchTo(oldcxt); } table_endscan(scan); table_close(rel, AccessShareLock); CommitTransactionCommand(); return res; }
/* * Initialize workspace for a worker process: create the schema if it doesn't * already exist. */ static void initialize_worker_spi(worktable *table) { int ret; int ntup; bool isnull; StringInfoData buf; SetCurrentStatementStartTimestamp(); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); pgstat_report_activity(STATE_RUNNING, "initializing spi_worker schema"); /* XXX could we use CREATE SCHEMA IF NOT EXISTS? */ initStringInfo(&buf); appendStringInfo(&buf, "select count(*) from pg_namespace where nspname = '%s'", table->schema); ret = SPI_execute(buf.data, true, 0); if (ret != SPI_OK_SELECT) elog(FATAL, "SPI_execute failed: error code %d", ret); if (SPI_processed != 1) elog(FATAL, "not a singleton result"); ntup = DatumGetInt64(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull)); if (isnull) elog(FATAL, "null result"); if (ntup == 0) { resetStringInfo(&buf); appendStringInfo(&buf, "CREATE SCHEMA \"%s\" " "CREATE TABLE \"%s\" (" " type text CHECK (type IN ('total', 'delta')), " " value integer)" "CREATE UNIQUE INDEX \"%s_unique_total\" ON \"%s\" (type) " "WHERE type = 'total'", table->schema, table->name, table->name, table->name); /* set statement start time */ SetCurrentStatementStartTimestamp(); ret = SPI_execute(buf.data, false, 0); if (ret != SPI_OK_UTILITY) elog(FATAL, "failed to create my schema"); } SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); pgstat_report_activity(STATE_IDLE, NULL); }
static void do_end(void) { CommitTransactionCommand(); elog(DEBUG4, "commit transaction"); CHECK_FOR_INTERRUPTS(); /* allow SIGINT to kill bootstrap run */ if (isatty(0)) { printf("bootstrap> "); fflush(stdout); } }
static void execute_pg_settings_logger(config_log_objects *objects) { int ret; bool isnull; StringInfoData buf; SetCurrentStatementStartTimestamp(); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); pgstat_report_activity(STATE_RUNNING, "executing configuration logger function"); initStringInfo(&buf); appendStringInfo( &buf, "SELECT %s.%s()", config_log_schema, objects->function_name ); ret = SPI_execute(buf.data, false, 0); if (ret != SPI_OK_SELECT) { elog(FATAL, "SPI_execute failed: error code %d", ret); } if (SPI_processed != 1) { elog(FATAL, "not a singleton result"); } log_info("pg_settings_logger() executed"); if(DatumGetBool(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull))) { log_info("Configuration changes recorded"); } else { log_info("No configuration changes detected"); } SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); pgstat_report_activity(STATE_IDLE, NULL); }
void worker_main(Datum arg) { int ret; StringInfoData buf; uint32 segment = UInt32GetDatum(arg); /* Setup signal handlers */ pqsignal(SIGHUP, worker_sighup); pqsignal(SIGTERM, worker_sigterm); /* Allow signals */ BackgroundWorkerUnblockSignals(); initialize_worker(segment); /* Connect to the database */ BackgroundWorkerInitializeConnection(job->datname, job->rolname); elog(LOG, "%s initialized running job id %d", MyBgworkerEntry->bgw_name, job->job_id); pgstat_report_appname(MyBgworkerEntry->bgw_name); /* Initialize the query text */ initStringInfo(&buf); appendStringInfo(&buf, "SELECT * FROM %s.%s(%d, NULL)", job_run_function.schema, job_run_function.name, job->job_id); /* Initialize the SPI subsystem */ SetCurrentStatementStartTimestamp(); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); pgstat_report_activity(STATE_RUNNING, buf.data); SetCurrentStatementStartTimestamp(); /* And run the query */ ret = SPI_execute(buf.data, true, 0); if (ret < 0) elog(FATAL, "errors while executing %s", buf.data); /* Commmit the transaction */ SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); pgstat_report_activity(STATE_IDLE, NULL); proc_exit(0); }
/* * This function will set a string in shared memory which is the name of the database to connect to * the next time the background worker restarts. Because a bgworker can only connect to one database * at a time, and some catalogs and stats are scoped to the current database, the bg worker * periodically restarts to collect latest stats from another database. * */ int set_next_db_target(void) { int retval, processed; StringInfoData buf; SPITupleTable *coltuptable; char* next_db_target; SetCurrentStatementStartTimestamp(); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); /* get sorted list of databases, find one after target_db*/ initStringInfo(&buf); appendStringInfo(&buf, "SELECT datname FROM pg_database WHERE datname NOT IN ('template0', 'template1') AND datallowconn IS TRUE AND datname > '%s' ORDER BY datname ASC LIMIT 1;", target_db ); retval = SPI_execute(buf.data, false, 0); if (retval != SPI_OK_SELECT) { elog(FATAL, "Database information collection failed"); // FAIL RETURN 1 } processed = SPI_processed; if(processed == 0) { //No matching records so pick first database. resetStringInfo(&buf); appendStringInfoString(&buf, "SELECT datname FROM pg_database WHERE datname NOT IN ('template0', 'template1') AND datallowconn IS TRUE ORDER BY datname ASC LIMIT 1;" ); retval = SPI_execute(buf.data, false, 0); if (retval != SPI_OK_SELECT) { elog(FATAL, "Database information collection failed"); // FAIL RETURN 1 } } coltuptable = SPI_tuptable; next_db_target = SPI_getvalue(coltuptable->vals[0], coltuptable->tupdesc, 1); // elog(LOG, "NEXTDB TARGET: %s", next_db_target); //print next target db strcpy(pgsampler_state->next_db, next_db_target); SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); return 0; }
/* *-------------------------------------------------------------- * Async_UnlistenOnExit * * Clean up the pg_listener table at backend exit. * * This is executed if we have done any LISTENs in this backend. * It might not be necessary anymore, if the user UNLISTENed everything, * but we don't try to detect that case. * * Results: * XXX * * Side effects: * pg_listener is updated if necessary. * *-------------------------------------------------------------- */ static void Async_UnlistenOnExit(int code, Datum arg) { /* * We need to start/commit a transaction for the unlisten, but if there is * already an active transaction we had better abort that one first. * Otherwise we'd end up committing changes that probably ought to be * discarded. */ AbortOutOfAnyTransaction(); /* Now we can do the unlisten */ StartTransactionCommand(); Async_UnlistenAll(); CommitTransactionCommand(); }
/* * Handle COMMIT message. * * TODO, support tracking of multiple origins */ static void apply_handle_commit(StringInfo s) { LogicalRepCommitData commit_data; logicalrep_read_commit(s, &commit_data); Assert(commit_data.commit_lsn == remote_final_lsn); /* The synchronization worker runs in single transaction. */ if (IsTransactionState() && !am_tablesync_worker()) { /* * Update origin state so we can restart streaming from correct * position in case of crash. */ replorigin_session_origin_lsn = commit_data.end_lsn; replorigin_session_origin_timestamp = commit_data.committime; CommitTransactionCommand(); pgstat_report_stat(false); store_flush_position(commit_data.end_lsn); } else { /* Process any invalidation messages that might have accumulated. */ AcceptInvalidationMessages(); maybe_reread_subscription(); } in_remote_transaction = false; /* Process any tables that are being synchronized in parallel. */ process_syncing_tables(commit_data.end_lsn); pgstat_report_activity(STATE_IDLE, NULL); }
/* * ProcessCatchupEvent * * Respond to a catchup event (SIGUSR1) from another backend. * * This is called either directly from the SIGUSR1 signal handler, * or the next time control reaches the outer idle loop (assuming * there's still anything to do by then). */ static void ProcessCatchupEvent(void) { bool notify_enabled; /* Must prevent SIGUSR2 interrupt while I am running */ notify_enabled = DisableNotifyInterrupt(); /* * What we need to do here is cause ReceiveSharedInvalidMessages() to run, * which will do the necessary work and also reset the * catchupInterruptOccurred flag. If we are inside a transaction we can * just call AcceptInvalidationMessages() to do this. If we aren't, we * start and immediately end a transaction; the call to * AcceptInvalidationMessages() happens down inside transaction start. * * It is awfully tempting to just call AcceptInvalidationMessages() * without the rest of the xact start/stop overhead, and I think that * would actually work in the normal case; but I am not sure that things * would clean up nicely if we got an error partway through. */ if (IsTransactionOrTransactionBlock()) { elog(DEBUG4, "ProcessCatchupEvent inside transaction"); AcceptInvalidationMessages(); } else { elog(DEBUG4, "ProcessCatchupEvent outside transaction"); StartTransactionCommand(); CommitTransactionCommand(); } if (notify_enabled) EnableNotifyInterrupt(); }
/* -------------------------------- * InitPostgres * Initialize POSTGRES. * * The database can be specified by name, using the in_dbname parameter, or by * OID, using the dboid parameter. In the latter case, the actual database * name can be returned to the caller in out_dbname. If out_dbname isn't * NULL, it must point to a buffer of size NAMEDATALEN. * * In bootstrap mode no parameters are used. The autovacuum launcher process * doesn't use any parameters either, because it only goes far enough to be * able to read pg_database; it doesn't connect to any particular database. * In walsender mode only username is used. * * As of PostgreSQL 8.2, we expect InitProcess() was already called, so we * already have a PGPROC struct ... but it's not completely filled in yet. * * Note: * Be very careful with the order of calls in the InitPostgres function. * -------------------------------- */ void InitPostgres(const char *in_dbname, Oid dboid, const char *username, char *out_dbname) { bool bootstrap = IsBootstrapProcessingMode(); bool am_superuser; GucContext gucctx; char *fullpath; char dbname[NAMEDATALEN]; elog(DEBUG3, "InitPostgres"); /* * Add my PGPROC struct to the ProcArray. * * Once I have done this, I am visible to other backends! */ InitProcessPhase2(); /* * Initialize my entry in the shared-invalidation manager's array of * per-backend data. * * Sets up MyBackendId, a unique backend identifier. */ MyBackendId = InvalidBackendId; SharedInvalBackendInit(false); if (MyBackendId > MaxBackends || MyBackendId <= 0) elog(FATAL, "bad backend id: %d", MyBackendId); /* Now that we have a BackendId, we can participate in ProcSignal */ ProcSignalInit(MyBackendId); /* * bufmgr needs another initialization call too */ InitBufferPoolBackend(); /* * Initialize local process's access to XLOG, if appropriate. In * bootstrap case we skip this since StartupXLOG() was run instead. */ if (!bootstrap) (void) RecoveryInProgress(); /* * Initialize the relation cache and the system catalog caches. Note that * no catalog access happens here; we only set up the hashtable structure. * We must do this before starting a transaction because transaction abort * would try to touch these hashtables. */ RelationCacheInitialize(); InitCatalogCache(); InitPlanCache(); /* Initialize portal manager */ EnablePortalManager(); /* Initialize stats collection --- must happen before first xact */ if (!bootstrap) pgstat_initialize(); /* * Load relcache entries for the shared system catalogs. This must create * at least an entry for pg_database. */ RelationCacheInitializePhase2(); /* * Set up process-exit callback to do pre-shutdown cleanup. This has to * be after we've initialized all the low-level modules like the buffer * manager, because during shutdown this has to run before the low-level * modules start to close down. On the other hand, we want it in place * before we begin our first transaction --- if we fail during the * initialization transaction, as is entirely possible, we need the * AbortTransaction call to clean up. */ on_shmem_exit(ShutdownPostgres, 0); /* The autovacuum launcher is done here */ if (IsAutoVacuumLauncherProcess()) return; /* * Start a new transaction here before first access to db, and get a * snapshot. We don't have a use for the snapshot itself, but we're * interested in the secondary effect that it sets RecentGlobalXmin. (This * is critical for anything that reads heap pages, because HOT may decide * to prune them even if the process doesn't attempt to modify any * tuples.) */ if (!bootstrap) { StartTransactionCommand(); (void) GetTransactionSnapshot(); } /* * Set up the global variables holding database id and default tablespace. * But note we won't actually try to touch the database just yet. * * We take a shortcut in the bootstrap and walsender case, otherwise we * have to look up the db's entry in pg_database. */ if (bootstrap || am_walsender) { MyDatabaseId = TemplateDbOid; MyDatabaseTableSpace = DEFAULTTABLESPACE_OID; } else if (in_dbname != NULL) { HeapTuple tuple; Form_pg_database dbform; tuple = GetDatabaseTuple(in_dbname); if (!HeapTupleIsValid(tuple)) ereport(FATAL, (errcode(ERRCODE_UNDEFINED_DATABASE), errmsg("database \"%s\" does not exist", in_dbname))); dbform = (Form_pg_database) GETSTRUCT(tuple); MyDatabaseId = HeapTupleGetOid(tuple); MyDatabaseTableSpace = dbform->dattablespace; /* take database name from the caller, just for paranoia */ strlcpy(dbname, in_dbname, sizeof(dbname)); } else { /* caller specified database by OID */ HeapTuple tuple; Form_pg_database dbform; tuple = GetDatabaseTupleByOid(dboid); if (!HeapTupleIsValid(tuple)) ereport(FATAL, (errcode(ERRCODE_UNDEFINED_DATABASE), errmsg("database %u does not exist", dboid))); dbform = (Form_pg_database) GETSTRUCT(tuple); MyDatabaseId = HeapTupleGetOid(tuple); MyDatabaseTableSpace = dbform->dattablespace; Assert(MyDatabaseId == dboid); strlcpy(dbname, NameStr(dbform->datname), sizeof(dbname)); /* pass the database name back to the caller */ if (out_dbname) strcpy(out_dbname, dbname); } /* Now we can mark our PGPROC entry with the database ID */ /* (We assume this is an atomic store so no lock is needed) */ MyProc->databaseId = MyDatabaseId; /* * Now, take a writer's lock on the database we are trying to connect to. * If there is a concurrently running DROP DATABASE on that database, this * will block us until it finishes (and has committed its update of * pg_database). * * Note that the lock is not held long, only until the end of this startup * transaction. This is OK since we are already advertising our use of * the database in the PGPROC array; anyone trying a DROP DATABASE after * this point will see us there. * * Note: use of RowExclusiveLock here is reasonable because we envision * our session as being a concurrent writer of the database. If we had a * way of declaring a session as being guaranteed-read-only, we could use * AccessShareLock for such sessions and thereby not conflict against * CREATE DATABASE. */ if (!bootstrap && !am_walsender) LockSharedObject(DatabaseRelationId, MyDatabaseId, 0, RowExclusiveLock); /* * Recheck pg_database to make sure the target database hasn't gone away. * If there was a concurrent DROP DATABASE, this ensures we will die * cleanly without creating a mess. */ if (!bootstrap && !am_walsender) { HeapTuple tuple; tuple = GetDatabaseTuple(dbname); if (!HeapTupleIsValid(tuple) || MyDatabaseId != HeapTupleGetOid(tuple) || MyDatabaseTableSpace != ((Form_pg_database) GETSTRUCT(tuple))->dattablespace) ereport(FATAL, (errcode(ERRCODE_UNDEFINED_DATABASE), errmsg("database \"%s\" does not exist", dbname), errdetail("It seems to have just been dropped or renamed."))); } /* * Now we should be able to access the database directory safely. Verify * it's there and looks reasonable. */ fullpath = GetDatabasePath(MyDatabaseId, MyDatabaseTableSpace); if (!bootstrap && !am_walsender) { if (access(fullpath, F_OK) == -1) { if (errno == ENOENT) ereport(FATAL, (errcode(ERRCODE_UNDEFINED_DATABASE), errmsg("database \"%s\" does not exist", dbname), errdetail("The database subdirectory \"%s\" is missing.", fullpath))); else ereport(FATAL, (errcode_for_file_access(), errmsg("could not access directory \"%s\": %m", fullpath))); } ValidatePgVersion(fullpath); } SetDatabasePath(fullpath); /* * It's now possible to do real access to the system catalogs. * * Load relcache entries for the system catalogs. This must create at * least the minimum set of "nailed-in" cache entries. */ RelationCacheInitializePhase3(); /* * Perform client authentication if necessary, then figure out our * postgres user ID, and see if we are a superuser. * * In standalone mode and in autovacuum worker processes, we use a fixed * ID, otherwise we figure it out from the authenticated user name. */ if (bootstrap || IsAutoVacuumWorkerProcess()) { InitializeSessionUserIdStandalone(); am_superuser = true; } else if (!IsUnderPostmaster) { InitializeSessionUserIdStandalone(); am_superuser = true; if (!ThereIsAtLeastOneRole()) ereport(WARNING, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("no roles are defined in this database system"), errhint("You should immediately run CREATE USER \"%s\" SUPERUSER;.", username))); } else { /* normal multiuser case */ Assert(MyProcPort != NULL); PerformAuthentication(MyProcPort); InitializeSessionUserId(username); am_superuser = superuser(); } /* set up ACL framework (so CheckMyDatabase can check permissions) */ initialize_acl(); /* Process pg_db_role_setting options */ process_settings(MyDatabaseId, GetSessionUserId()); /* * Re-read the pg_database row for our database, check permissions and set * up database-specific GUC settings. We can't do this until all the * database-access infrastructure is up. (Also, it wants to know if the * user is a superuser, so the above stuff has to happen first.) */ if (!bootstrap && !am_walsender) CheckMyDatabase(dbname, am_superuser); /* * If we're trying to shut down, only superusers can connect. */ if (!am_superuser && MyProcPort != NULL && MyProcPort->canAcceptConnections == CAC_WAITBACKUP) ereport(FATAL, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to connect during database shutdown"))); /* * Check a normal user hasn't connected to a superuser reserved slot. */ if (!am_superuser && ReservedBackends > 0 && !HaveNFreeProcs(ReservedBackends)) ereport(FATAL, (errcode(ERRCODE_TOO_MANY_CONNECTIONS), errmsg("connection limit exceeded for non-superusers"))); /* * Now process any command-line switches that were included in the startup * packet, if we are in a regular backend. We couldn't do this before * because we didn't know if client is a superuser. */ gucctx = am_superuser ? PGC_SUSET : PGC_BACKEND; if (MyProcPort != NULL && MyProcPort->cmdline_options != NULL) { /* * The maximum possible number of commandline arguments that could * come from MyProcPort->cmdline_options is (strlen + 1) / 2; see * pg_split_opts(). */ char **av; int maxac; int ac; maxac = 2 + (strlen(MyProcPort->cmdline_options) + 1) / 2; av = (char **) palloc(maxac * sizeof(char *)); ac = 0; av[ac++] = "postgres"; /* Note this mangles MyProcPort->cmdline_options */ pg_split_opts(av, &ac, MyProcPort->cmdline_options); av[ac] = NULL; Assert(ac < maxac); (void) process_postgres_switches(ac, av, gucctx); } /* * Process any additional GUC variable settings passed in startup packet. * These are handled exactly like command-line variables. */ if (MyProcPort != NULL) { ListCell *gucopts = list_head(MyProcPort->guc_options); while (gucopts) { char *name; char *value; name = lfirst(gucopts); gucopts = lnext(gucopts); value = lfirst(gucopts); gucopts = lnext(gucopts); SetConfigOption(name, value, gucctx, PGC_S_CLIENT); } } /* Apply PostAuthDelay as soon as we've read all options */ if (PostAuthDelay > 0) pg_usleep(PostAuthDelay * 1000000L); /* * Initialize various default states that can't be set up until we've * selected the active user and gotten the right GUC settings. */ /* set default namespace search path */ InitializeSearchPath(); /* initialize client encoding */ InitializeClientEncoding(); /* reset the database for walsender */ if (am_walsender) MyProc->databaseId = MyDatabaseId = InvalidOid; /* report this backend in the PgBackendStatus array */ if (!bootstrap) pgstat_bestart(); /* close the transaction we started above */ if (!bootstrap) CommitTransactionCommand(); }
/* * kafka_consume_main * * Main function for Kafka consumers running as background workers */ void kafka_consume_main(Datum arg) { char err_msg[512]; rd_kafka_topic_conf_t *topic_conf; rd_kafka_t *kafka; rd_kafka_topic_t *topic; rd_kafka_message_t **messages; const struct rd_kafka_metadata *meta; struct rd_kafka_metadata_topic topic_meta; rd_kafka_resp_err_t err; bool found; Oid id = (Oid) arg; ListCell *lc; KafkaConsumerProc *proc = hash_search(consumer_procs, &id, HASH_FIND, &found); KafkaConsumer consumer; CopyStmt *copy; int valid_brokers = 0; int i; int my_partitions = 0; if (!found) elog(ERROR, "kafka consumer %d not found", id); pqsignal(SIGTERM, kafka_consume_main_sigterm); #define BACKTRACE_SEGFAULTS #ifdef BACKTRACE_SEGFAULTS pqsignal(SIGSEGV, debug_segfault); #endif /* we're now ready to receive signals */ BackgroundWorkerUnblockSignals(); /* give this proc access to the database */ BackgroundWorkerInitializeConnection(NameStr(proc->dbname), NULL); /* load saved consumer state */ StartTransactionCommand(); load_consumer_state(proc->consumer_id, &consumer); copy = get_copy_statement(&consumer); topic_conf = rd_kafka_topic_conf_new(); kafka = rd_kafka_new(RD_KAFKA_CONSUMER, NULL, err_msg, sizeof(err_msg)); rd_kafka_set_logger(kafka, logger); /* * Add all brokers currently in pipeline_kafka_brokers */ if (consumer.brokers == NIL) elog(ERROR, "no valid brokers were found"); foreach(lc, consumer.brokers) valid_brokers += rd_kafka_brokers_add(kafka, lfirst(lc)); if (!valid_brokers) elog(ERROR, "no valid brokers were found"); /* * Set up our topic to read from */ topic = rd_kafka_topic_new(kafka, consumer.topic, topic_conf); err = rd_kafka_metadata(kafka, false, topic, &meta, CONSUMER_TIMEOUT); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) elog(ERROR, "failed to acquire metadata: %s", rd_kafka_err2str(err)); Assert(meta->topic_cnt == 1); topic_meta = meta->topics[0]; load_consumer_offsets(&consumer, &topic_meta, proc->offset); CommitTransactionCommand(); /* * Begin consuming all partitions that this process is responsible for */ for (i = 0; i < topic_meta.partition_cnt; i++) { int partition = topic_meta.partitions[i].id; Assert(partition <= consumer.num_partitions); if (partition % consumer.parallelism != proc->partition_group) continue; elog(LOG, "[kafka consumer] %s <- %s consuming partition %d from offset %ld", consumer.rel->relname, consumer.topic, partition, consumer.offsets[partition]); if (rd_kafka_consume_start(topic, partition, consumer.offsets[partition]) == -1) elog(ERROR, "failed to start consuming: %s", rd_kafka_err2str(rd_kafka_errno2err(errno))); my_partitions++; } /* * No point doing anything if we don't have any partitions assigned to us */ if (my_partitions == 0) { elog(LOG, "[kafka consumer] %s <- %s consumer %d doesn't have any partitions to read from", consumer.rel->relname, consumer.topic, MyProcPid); goto done; } messages = palloc0(sizeof(rd_kafka_message_t) * consumer.batch_size); /* * Consume messages until we are terminated */ while (!got_sigterm) { ssize_t num_consumed; int i; int messages_buffered = 0; int partition; StringInfoData buf; bool xact = false; for (partition = 0; partition < consumer.num_partitions; partition++) { if (partition % consumer.parallelism != proc->partition_group) continue; num_consumed = rd_kafka_consume_batch(topic, partition, CONSUMER_TIMEOUT, messages, consumer.batch_size); if (num_consumed <= 0) continue; if (!xact) { StartTransactionCommand(); xact = true; } initStringInfo(&buf); for (i = 0; i < num_consumed; i++) { if (messages[i]->payload != NULL) { appendBinaryStringInfo(&buf, messages[i]->payload, messages[i]->len); if (buf.len > 0 && buf.data[buf.len - 1] != '\n') appendStringInfoChar(&buf, '\n'); messages_buffered++; } consumer.offsets[partition] = messages[i]->offset; rd_kafka_message_destroy(messages[i]); } } if (!xact) { pg_usleep(1 * 1000); continue; } /* we don't want to die in the event of any errors */ PG_TRY(); { if (messages_buffered) execute_copy(copy, &buf); } PG_CATCH(); { elog(LOG, "[kafka consumer] %s <- %s failed to process batch, dropped %d message%s:", consumer.rel->relname, consumer.topic, (int) num_consumed, (num_consumed == 1 ? "" : "s")); EmitErrorReport(); FlushErrorState(); AbortCurrentTransaction(); xact = false; } PG_END_TRY(); if (!xact) StartTransactionCommand(); if (messages_buffered) save_consumer_state(&consumer, proc->partition_group); CommitTransactionCommand(); } done: hash_search(consumer_procs, &id, HASH_REMOVE, NULL); rd_kafka_topic_destroy(topic); rd_kafka_destroy(kafka); rd_kafka_wait_destroyed(CONSUMER_TIMEOUT); }
/* -------------------------------- * InitPostgres * Initialize POSTGRES. * * The database can be specified by name, using the in_dbname parameter, or by * OID, using the dboid parameter. In the latter case, the actual database * name can be returned to the caller in out_dbname. If out_dbname isn't * NULL, it must point to a buffer of size NAMEDATALEN. * * In bootstrap mode no parameters are used. * * The return value indicates whether the userID is a superuser. (That * can only be tested inside a transaction, so we want to do it during * the startup transaction rather than doing a separate one in postgres.c.) * * As of PostgreSQL 8.2, we expect InitProcess() was already called, so we * already have a PGPROC struct ... but it's not filled in yet. * * Note: * Be very careful with the order of calls in the InitPostgres function. * -------------------------------- */ void InitPostgres(const char *in_dbname, Oid dboid, const char *username, char *out_dbname) { bool bootstrap = IsBootstrapProcessingMode(); bool autovacuum = IsAutoVacuumProcess(); bool am_superuser; char *fullpath; char dbname[NAMEDATALEN]; /* * Add my PGPROC struct to the ProcArray. * * Once I have done this, I am visible to other backends! */ InitProcessPhase2(); /* Initialize SessionState entry */ SessionState_Init(); /* Initialize memory protection */ GPMemoryProtect_Init(); /* * Initialize my entry in the shared-invalidation manager's array of * per-backend data. * * Sets up MyBackendId, a unique backend identifier. */ MyBackendId = InvalidBackendId; SharedInvalBackendInit(false); if (MyBackendId > MaxBackends || MyBackendId <= 0) elog(FATAL, "bad backend id: %d", MyBackendId); /* Now that we have a BackendId, we can participate in ProcSignal */ ProcSignalInit(MyBackendId); /* * bufmgr needs another initialization call too */ InitBufferPoolBackend(); /* * Initialize local process's access to XLOG. In bootstrap case we may * skip this since StartupXLOG() was run instead. */ if (!bootstrap) InitXLOGAccess(); /* * Initialize the relation cache and the system catalog caches. Note that * no catalog access happens here; we only set up the hashtable structure. * We must do this before starting a transaction because transaction abort * would try to touch these hashtables. */ RelationCacheInitialize(); InitCatalogCache(); /* Initialize portal manager */ EnablePortalManager(); /* Initialize stats collection --- must happen before first xact */ if (!bootstrap) pgstat_initialize(); /* * Load relcache entries for the shared system catalogs. This must create * at least entries for pg_database and catalogs used for authentication. */ RelationCacheInitializePhase2(); /* * Set up process-exit callback to do pre-shutdown cleanup. This has to * be after we've initialized all the low-level modules like the buffer * manager, because during shutdown this has to run before the low-level * modules start to close down. On the other hand, we want it in place * before we begin our first transaction --- if we fail during the * initialization transaction, as is entirely possible, we need the * AbortTransaction call to clean up. */ on_shmem_exit(ShutdownPostgres, 0); /* TODO: autovacuum launcher should be done here? */ /* * Start a new transaction here before first access to db, and get a * snapshot. We don't have a use for the snapshot itself, but we're * interested in the secondary effect that it sets RecentGlobalXmin. */ if (!bootstrap) { StartTransactionCommand(); (void) GetTransactionSnapshot(); } /* * Figure out our postgres user id, and see if we are a superuser. * * In standalone mode and in the autovacuum process, we use a fixed id, * otherwise we figure it out from the authenticated user name. */ if (bootstrap || autovacuum) { InitializeSessionUserIdStandalone(); am_superuser = true; } else if (!IsUnderPostmaster) { InitializeSessionUserIdStandalone(); am_superuser = true; if (!ThereIsAtLeastOneRole()) ereport(WARNING, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("no roles are defined in this database system"), errhint("You should immediately run CREATE USER \"%s\" CREATEUSER;.", username))); } else { /* normal multiuser case */ Assert(MyProcPort != NULL); PerformAuthentication(MyProcPort); InitializeSessionUserId(username); am_superuser = superuser(); } /* * Check a normal user hasn't connected to a superuser reserved slot. */ if (!am_superuser && ReservedBackends > 0 && !HaveNFreeProcs(ReservedBackends)) ereport(FATAL, (errcode(ERRCODE_TOO_MANY_CONNECTIONS), errmsg("connection limit exceeded for non-superusers"), errSendAlert(true))); /* * If walsender, we don't want to connect to any particular database. Just * finish the backend startup by processing any options from the startup * packet, and we're done. */ if (am_walsender) { Assert(!bootstrap); /* * We don't have replication role, which existed in postgres. */ if (!superuser()) ereport(FATAL, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser role to start walsender"))); /* process any options passed in the startup packet */ if (MyProcPort != NULL) process_startup_options(MyProcPort, am_superuser); /* Apply PostAuthDelay as soon as we've read all options */ if (PostAuthDelay > 0) pg_usleep(PostAuthDelay * 1000000L); /* initialize client encoding */ InitializeClientEncoding(); /* report this backend in the PgBackendStatus array */ pgstat_bestart(); /* close the transaction we started above */ CommitTransactionCommand(); return; } /* * Set up the global variables holding database id and path. But note we * won't actually try to touch the database just yet. * * We take a shortcut in the bootstrap case, otherwise we have to look up * the db name in pg_database. */ if (bootstrap) { MyDatabaseId = TemplateDbOid; MyDatabaseTableSpace = DEFAULTTABLESPACE_OID; } else if (in_dbname != NULL) { HeapTuple tuple; Form_pg_database dbform; tuple = GetDatabaseTuple(in_dbname); if (!HeapTupleIsValid(tuple)) ereport(FATAL, (errcode(ERRCODE_UNDEFINED_DATABASE), errmsg("database \"%s\" does not exist", in_dbname))); dbform = (Form_pg_database) GETSTRUCT(tuple); MyDatabaseId = HeapTupleGetOid(tuple); MyDatabaseTableSpace = dbform->dattablespace; /* take database name from the caller, just for paranoia */ strlcpy(dbname, in_dbname, sizeof(dbname)); pfree(tuple); } else { /* caller specified database by OID */ HeapTuple tuple; Form_pg_database dbform; tuple = GetDatabaseTupleByOid(dboid); if (!HeapTupleIsValid(tuple)) ereport(FATAL, (errcode(ERRCODE_UNDEFINED_DATABASE), errmsg("database %u does not exist", dboid))); dbform = (Form_pg_database) GETSTRUCT(tuple); MyDatabaseId = HeapTupleGetOid(tuple); MyDatabaseTableSpace = dbform->dattablespace; Assert(MyDatabaseId == dboid); strlcpy(dbname, NameStr(dbform->datname), sizeof(dbname)); /* pass the database name back to the caller */ if (out_dbname) strcpy(out_dbname, dbname); pfree(tuple); } /* Now we can mark our PGPROC entry with the database ID */ /* (We assume this is an atomic store so no lock is needed) */ MyProc->databaseId = MyDatabaseId; /* * Now, take a writer's lock on the database we are trying to connect to. * If there is a concurrently running DROP DATABASE on that database, this * will block us until it finishes (and has committed its update of * pg_database). * * Note that the lock is not held long, only until the end of this startup * transaction. This is OK since we are already advertising our use of * the database in the PGPROC array; anyone trying a DROP DATABASE after * this point will see us there. * * Note: use of RowExclusiveLock here is reasonable because we envision * our session as being a concurrent writer of the database. If we had a * way of declaring a session as being guaranteed-read-only, we could use * AccessShareLock for such sessions and thereby not conflict against * CREATE DATABASE. */ if (!bootstrap) LockSharedObject(DatabaseRelationId, MyDatabaseId, 0, RowExclusiveLock); /* * Recheck pg_database to make sure the target database hasn't gone away. * If there was a concurrent DROP DATABASE, this ensures we will die * cleanly without creating a mess. */ if (!bootstrap) { HeapTuple tuple; tuple = GetDatabaseTuple(dbname); if (!HeapTupleIsValid(tuple) || MyDatabaseId != HeapTupleGetOid(tuple) || MyDatabaseTableSpace != ((Form_pg_database) GETSTRUCT(tuple))->dattablespace) ereport(FATAL, (errcode(ERRCODE_UNDEFINED_DATABASE), errmsg("database \"%s\" does not exist", dbname), errdetail("It seems to have just been dropped or renamed."))); } fullpath = GetDatabasePath(MyDatabaseId, MyDatabaseTableSpace); if (!bootstrap) { if (access(fullpath, F_OK) == -1) { if (errno == ENOENT) ereport(FATAL, (errcode(ERRCODE_UNDEFINED_DATABASE), errmsg("database \"%s\" does not exist", dbname), errdetail("The database subdirectory \"%s\" is missing.", fullpath))); else ereport(FATAL, (errcode_for_file_access(), errmsg("could not access directory \"%s\": %m", fullpath))); } ValidatePgVersion(fullpath); } SetDatabasePath(fullpath); /* * It's now possible to do real access to the system catalogs. * * Load relcache entries for the system catalogs. This must create at * least the minimum set of "nailed-in" cache entries. */ RelationCacheInitializePhase3(); /* * Now we have full access to catalog including toast tables, * we can process pg_authid.rolconfig. This ought to come before * processing startup options so that it can override the settings. */ if (!bootstrap) ProcessRoleGUC(); /* set up ACL framework (so CheckMyDatabase can check permissions) */ initialize_acl(); /* * Re-read the pg_database row for our database, check permissions and set * up database-specific GUC settings. We can't do this until all the * database-access infrastructure is up. (Also, it wants to know if the * user is a superuser, so the above stuff has to happen first.) */ if (!bootstrap) CheckMyDatabase(dbname, am_superuser); /* * Now process any command-line switches and any additional GUC variable * settings passed in the startup packet. We couldn't do this before * because we didn't know if client is a superuser. */ if (MyProcPort != NULL) process_startup_options(MyProcPort, am_superuser); /* * Maintenance Mode: allow superuser to connect when * gp_maintenance_conn GUC is set. We cannot check it until * process_startup_options parses the GUC. */ if (gp_maintenance_mode && Gp_role == GP_ROLE_DISPATCH && !(superuser() && gp_maintenance_conn)) ereport(FATAL, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("maintenance mode: connected by superuser only"), errSendAlert(false))); /* * MPP: If we were started in utility mode then we only want to allow * incoming sessions that specify gp_session_role=utility as well. This * lets the bash scripts start the QD in utility mode and connect in but * protect ourselves from normal clients who might be trying to connect to * the system while we startup. */ if ((Gp_role == GP_ROLE_UTILITY) && (Gp_session_role != GP_ROLE_UTILITY)) { ereport(FATAL, (errcode(ERRCODE_CANNOT_CONNECT_NOW), errmsg("System was started in master-only utility mode - only utility mode connections are allowed"))); } /* Apply PostAuthDelay as soon as we've read all options */ if (PostAuthDelay > 0) pg_usleep(PostAuthDelay * 1000000L); /* set default namespace search path */ InitializeSearchPath(); /* initialize client encoding */ InitializeClientEncoding(); /* report this backend in the PgBackendStatus array */ if (!bootstrap) pgstat_bestart(); /* * MPP package setup * * Primary function is to establish connctions to the qExecs. * This is SKIPPED when the database is in bootstrap mode or * Is not UnderPostmaster. */ if (!bootstrap && IsUnderPostmaster) { cdb_setup(); on_proc_exit( cdb_cleanup, 0 ); } /* * MPP SharedSnapshot Setup */ if (Gp_role == GP_ROLE_DISPATCH) { addSharedSnapshot("Query Dispatcher", gp_session_id); } else if (Gp_role == GP_ROLE_DISPATCHAGENT) { SharedLocalSnapshotSlot = NULL; } else if (Gp_segment == -1 && Gp_role == GP_ROLE_EXECUTE && !Gp_is_writer) { /* * Entry db singleton QE is a user of the shared snapshot -- not a creator. * The lookup will occur once the distributed snapshot has been received. */ lookupSharedSnapshot("Entry DB Singleton", "Query Dispatcher", gp_session_id); } else if (Gp_role == GP_ROLE_EXECUTE) { if (Gp_is_writer) { addSharedSnapshot("Writer qExec", gp_session_id); } else { /* * NOTE: This assumes that the Slot has already been * allocated by the writer. Need to make sure we * always allocate the writer qExec first. */ lookupSharedSnapshot("Reader qExec", "Writer qExec", gp_session_id); } } /* close the transaction we started above */ if (!bootstrap) CommitTransactionCommand(); return; }
/* -------------------------------- * InitPostgres * Initialize POSTGRES. * * The database can be specified by name, using the in_dbname parameter, or by * OID, using the dboid parameter. In the latter case, the computed database * name is passed out to the caller as a palloc'ed string in out_dbname. * * In bootstrap mode no parameters are used. * * The return value indicates whether the userID is a superuser. (That * can only be tested inside a transaction, so we want to do it during * the startup transaction rather than doing a separate one in postgres.c.) * * As of PostgreSQL 8.2, we expect InitProcess() was already called, so we * already have a PGPROC struct ... but it's not filled in yet. * * Note: * Be very careful with the order of calls in the InitPostgres function. * -------------------------------- */ bool InitPostgres(const char *in_dbname, Oid dboid, const char *username, char **out_dbname) { bool bootstrap = IsBootstrapProcessingMode(); bool autovacuum = IsAutoVacuumWorkerProcess(); bool am_superuser; char *fullpath; char dbname[NAMEDATALEN]; /* * Set up the global variables holding database id and path. But note we * won't actually try to touch the database just yet. * * We take a shortcut in the bootstrap case, otherwise we have to look up * the db name in pg_database. */ if (bootstrap) { MyDatabaseId = TemplateDbOid; MyDatabaseTableSpace = DEFAULTTABLESPACE_OID; } else { /* * Find tablespace of the database we're about to open. Since we're * not yet up and running we have to use one of the hackish * FindMyDatabase variants, which look in the flat-file copy of * pg_database. * * If the in_dbname param is NULL, lookup database by OID. */ if (in_dbname == NULL) { if (!FindMyDatabaseByOid(dboid, dbname, &MyDatabaseTableSpace)) ereport(FATAL, (errcode(ERRCODE_UNDEFINED_DATABASE), errmsg("database %u does not exist", dboid))); MyDatabaseId = dboid; /* pass the database name to the caller */ *out_dbname = pstrdup(dbname); } else { if (!FindMyDatabase(in_dbname, &MyDatabaseId, &MyDatabaseTableSpace)) ereport(FATAL, (errcode(ERRCODE_UNDEFINED_DATABASE), errmsg("database \"%s\" does not exist", in_dbname))); /* our database name is gotten from the caller */ strlcpy(dbname, in_dbname, NAMEDATALEN); } } fullpath = GetDatabasePath(MyDatabaseId, MyDatabaseTableSpace); SetDatabasePath(fullpath); /* * Finish filling in the PGPROC struct, and add it to the ProcArray. (We * need to know MyDatabaseId before we can do this, since it's entered * into the PGPROC struct.) * * Once I have done this, I am visible to other backends! */ InitProcessPhase2(); /* * Initialize my entry in the shared-invalidation manager's array of * per-backend data. * * Sets up MyBackendId, a unique backend identifier. */ MyBackendId = InvalidBackendId; SharedInvalBackendInit(); if (MyBackendId > MaxBackends || MyBackendId <= 0) elog(FATAL, "bad backend id: %d", MyBackendId); /* * bufmgr needs another initialization call too */ InitBufferPoolBackend(); /* * Initialize local process's access to XLOG. In bootstrap case we may * skip this since StartupXLOG() was run instead. */ if (!bootstrap) InitXLOGAccess(); /* * Initialize the relation cache and the system catalog caches. Note that * no catalog access happens here; we only set up the hashtable structure. * We must do this before starting a transaction because transaction abort * would try to touch these hashtables. */ RelationCacheInitialize(); InitCatalogCache(); InitPlanCache(); /* Initialize portal manager */ EnablePortalManager(); /* Initialize stats collection --- must happen before first xact */ if (!bootstrap) pgstat_initialize(); /* * Set up process-exit callback to do pre-shutdown cleanup. This has to * be after we've initialized all the low-level modules like the buffer * manager, because during shutdown this has to run before the low-level * modules start to close down. On the other hand, we want it in place * before we begin our first transaction --- if we fail during the * initialization transaction, as is entirely possible, we need the * AbortTransaction call to clean up. */ on_shmem_exit(ShutdownPostgres, 0); /* * Start a new transaction here before first access to db, and get a * snapshot. We don't have a use for the snapshot itself, but we're * interested in the secondary effect that it sets RecentGlobalXmin. */ if (!bootstrap) { StartTransactionCommand(); (void) GetTransactionSnapshot(); } /* * Now that we have a transaction, we can take locks. Take a writer's * lock on the database we are trying to connect to. If there is a * concurrently running DROP DATABASE on that database, this will block us * until it finishes (and has updated the flat file copy of pg_database). * * Note that the lock is not held long, only until the end of this startup * transaction. This is OK since we are already advertising our use of * the database in the PGPROC array; anyone trying a DROP DATABASE after * this point will see us there. * * Note: use of RowExclusiveLock here is reasonable because we envision * our session as being a concurrent writer of the database. If we had a * way of declaring a session as being guaranteed-read-only, we could use * AccessShareLock for such sessions and thereby not conflict against * CREATE DATABASE. */ if (!bootstrap) LockSharedObject(DatabaseRelationId, MyDatabaseId, 0, RowExclusiveLock); /* * Recheck the flat file copy of pg_database to make sure the target * database hasn't gone away. If there was a concurrent DROP DATABASE, * this ensures we will die cleanly without creating a mess. */ if (!bootstrap) { Oid dbid2; Oid tsid2; if (!FindMyDatabase(dbname, &dbid2, &tsid2) || dbid2 != MyDatabaseId || tsid2 != MyDatabaseTableSpace) ereport(FATAL, (errcode(ERRCODE_UNDEFINED_DATABASE), errmsg("database \"%s\" does not exist", dbname), errdetail("It seems to have just been dropped or renamed."))); } /* * Now we should be able to access the database directory safely. Verify * it's there and looks reasonable. */ if (!bootstrap) { if (access(fullpath, F_OK) == -1) { if (errno == ENOENT) ereport(FATAL, (errcode(ERRCODE_UNDEFINED_DATABASE), errmsg("database \"%s\" does not exist", dbname), errdetail("The database subdirectory \"%s\" is missing.", fullpath))); else ereport(FATAL, (errcode_for_file_access(), errmsg("could not access directory \"%s\": %m", fullpath))); } ValidatePgVersion(fullpath); } /* * It's now possible to do real access to the system catalogs. * * Load relcache entries for the system catalogs. This must create at * least the minimum set of "nailed-in" cache entries. */ RelationCacheInitializePhase2(); /* * Figure out our postgres user id, and see if we are a superuser. * * In standalone mode and in the autovacuum process, we use a fixed id, * otherwise we figure it out from the authenticated user name. */ if (bootstrap || autovacuum) { InitializeSessionUserIdStandalone(); am_superuser = true; } else if (!IsUnderPostmaster) { InitializeSessionUserIdStandalone(); am_superuser = true; if (!ThereIsAtLeastOneRole()) ereport(WARNING, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("no roles are defined in this database system"), errhint("You should immediately run CREATE USER \"%s\" CREATEUSER;.", username))); } else { /* normal multiuser case */ InitializeSessionUserId(username); am_superuser = superuser(); } /* set up ACL framework (so CheckMyDatabase can check permissions) */ initialize_acl(); /* * Read the real pg_database row for our database, check permissions and * set up database-specific GUC settings. We can't do this until all the * database-access infrastructure is up. (Also, it wants to know if the * user is a superuser, so the above stuff has to happen first.) */ if (!bootstrap) CheckMyDatabase(dbname, am_superuser); /* * If we're trying to shut down, only superusers can connect. */ if (!am_superuser && MyProcPort != NULL && MyProcPort->canAcceptConnections == CAC_WAITBACKUP) ereport(FATAL, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to connect during database shutdown"))); /* * Check a normal user hasn't connected to a superuser reserved slot. */ if (!am_superuser && ReservedBackends > 0 && !HaveNFreeProcs(ReservedBackends)) ereport(FATAL, (errcode(ERRCODE_TOO_MANY_CONNECTIONS), errmsg("connection limit exceeded for non-superusers"))); /* * Initialize various default states that can't be set up until we've * selected the active user and gotten the right GUC settings. */ /* set default namespace search path */ InitializeSearchPath(); /* initialize client encoding */ InitializeClientEncoding(); /* report this backend in the PgBackendStatus array */ if (!bootstrap) pgstat_bestart(); /* close the transaction we started above */ if (!bootstrap) CommitTransactionCommand(); return am_superuser; }
/* * worker logic */ void wed_worker_main(Datum main_arg) { StringInfoData buf; /* Establish signal handlers before unblocking signals. */ pqsignal(SIGHUP, wed_worker_sighup); pqsignal(SIGTERM, wed_worker_sigterm); /* We're now ready to receive signals */ BackgroundWorkerUnblockSignals(); /* Connect to our database */ BackgroundWorkerInitializeConnection(wed_worker_db_name, NULL); elog(LOG, "%s initialized in: %s", MyBgworkerEntry->bgw_name, wed_worker_db_name); initStringInfo(&buf); appendStringInfo(&buf, "SELECT trcheck()"); /* * Main loop: do this until the SIGTERM handler tells us to terminate */ while (!got_sigterm) { int ret; int rc; /* * Background workers mustn't call usleep() or any direct equivalent: * instead, they may wait on their process latch, which sleeps as * necessary, but is awakened if postmaster dies. That way the * background process goes away immediately in an emergency. */ rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, wed_worker_naptime * 1000L); ResetLatch(&MyProc->procLatch); /* emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) proc_exit(1); /* * In case of a SIGHUP, just reload the configuration. */ if (got_sighup) { got_sighup = false; ProcessConfigFile(PGC_SIGHUP); } /* * Start a transaction on which we can run queries. Note that each * StartTransactionCommand() call should be preceded by a * SetCurrentStatementStartTimestamp() call, which sets both the time * for the statement we're about the run, and also the transaction * start time. Also, each other query sent to SPI should probably be * preceded by SetCurrentStatementStartTimestamp(), so that statement * start time is always up to date. * * The SPI_connect() call lets us run queries through the SPI manager, * and the PushActiveSnapshot() call creates an "active" snapshot * which is necessary for queries to have MVCC data to work on. * * The pgstat_report_activity() call makes our activity visible * through the pgstat views. */ SetCurrentStatementStartTimestamp(); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); pgstat_report_activity(STATE_RUNNING, buf.data); /* We can now execute queries via SPI */ ret = SPI_execute(buf.data, false, 0); if (ret != SPI_OK_SELECT) elog(FATAL, "stored procedure trcheck() not found: error code %d", ret); elog(LOG, "%s : trcheck() done !", MyBgworkerEntry->bgw_name); /* * And finish our transaction. */ SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); pgstat_report_activity(STATE_IDLE, NULL); } proc_exit(1); }
/* * -------------------------------------------------------------- * ProcessIncomingNotify * * Deal with arriving NOTIFYs from other backends. * This is called either directly from the SIGUSR2 signal handler, * or the next time control reaches the outer idle loop. * Scan pg_listener for arriving notifies, report them to my front end, * and clear the notification field in pg_listener until next time. * * NOTE: since we are outside any transaction, we must create our own. * -------------------------------------------------------------- */ static void ProcessIncomingNotify(void) { Relation lRel; TupleDesc tdesc; ScanKeyData key[1]; HeapScanDesc scan; HeapTuple lTuple, rTuple; Datum value[Natts_pg_listener]; char repl[Natts_pg_listener], nulls[Natts_pg_listener]; bool catchup_enabled; /* Must prevent SIGUSR1 interrupt while I am running */ catchup_enabled = DisableCatchupInterrupt(); if (Trace_notify) elog(DEBUG1, "ProcessIncomingNotify"); set_ps_display("notify interrupt", false); notifyInterruptOccurred = 0; StartTransactionCommand(); lRel = heap_open(ListenerRelationId, ExclusiveLock); tdesc = RelationGetDescr(lRel); /* Scan only entries with my listenerPID */ ScanKeyInit(&key[0], Anum_pg_listener_pid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(MyProcPid)); scan = heap_beginscan(lRel, SnapshotNow, 1, key); /* Prepare data for rewriting 0 into notification field */ nulls[0] = nulls[1] = nulls[2] = ' '; repl[0] = repl[1] = repl[2] = ' '; repl[Anum_pg_listener_notify - 1] = 'r'; value[0] = value[1] = value[2] = (Datum) 0; value[Anum_pg_listener_notify - 1] = Int32GetDatum(0); while ((lTuple = heap_getnext(scan, ForwardScanDirection)) != NULL) { Form_pg_listener listener = (Form_pg_listener) GETSTRUCT(lTuple); char *relname = NameStr(listener->relname); int32 sourcePID = listener->notification; if (sourcePID != 0) { /* Notify the frontend */ if (Trace_notify) elog(DEBUG1, "ProcessIncomingNotify: received %s from %d", relname, (int) sourcePID); NotifyMyFrontEnd(relname, sourcePID); /* * Rewrite the tuple with 0 in notification column. * * simple_heap_update is safe here because no one else would have * tried to UNLISTEN us, so there can be no uncommitted changes. */ rTuple = heap_modifytuple(lTuple, tdesc, value, nulls, repl); simple_heap_update(lRel, &lTuple->t_self, rTuple); #ifdef NOT_USED /* currently there are no indexes */ CatalogUpdateIndexes(lRel, rTuple); #endif } } heap_endscan(scan); /* * We do NOT release the lock on pg_listener here; we need to hold it * until end of transaction (which is about to happen, anyway) to ensure * that other backends see our tuple updates when they look. Otherwise, a * transaction started after this one might mistakenly think it doesn't * need to send this backend a new NOTIFY. */ heap_close(lRel, NoLock); CommitTransactionCommand(); /* * Must flush the notify messages to ensure frontend gets them promptly. */ pq_flush(); set_ps_display("idle", false); if (Trace_notify) elog(DEBUG1, "ProcessIncomingNotify: done"); if (catchup_enabled) EnableCatchupInterrupt(); }
static void worker_spi_main(Datum main_arg) { /* Register functions for SIGTERM/SIGHUP management */ pqsignal(SIGHUP, worker_spi_sighup); pqsignal(SIGTERM, worker_spi_sigterm); /* We're now ready to receive signals */ BackgroundWorkerUnblockSignals(); /* Connect to our database */ BackgroundWorkerInitializeConnection("postgres", NULL); while (!got_sigterm) { int ret; int rc; StringInfoData buf; /* * Background workers mustn't call usleep() or any direct equivalent: * instead, they may wait on their process latch, which sleeps as * necessary, but is awakened if postmaster dies. That way the * background process goes away immediately in an emergency. */ rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, 1000L); ResetLatch(&MyProc->procLatch); /* emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) proc_exit(1); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); initStringInfo(&buf); /* Build the query string */ appendStringInfo(&buf, "SELECT count(*) FROM pg_class;"); ret = SPI_execute(buf.data, true, 0); /* Some error messages in case of incorrect handling */ if (ret != SPI_OK_SELECT) elog(FATAL, "SPI_execute failed: error code %d", ret); if (SPI_processed > 0) { int32 count; bool isnull; count = DatumGetInt32(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull)); elog(LOG, "Currently %d relations in database", count); } SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); } proc_exit(0); }
/* * cluster * * Check that the relation is a relation in the appropriate user * ACL. I will use the same security that limits users on the * renamerel() function. * * Check that the index specified is appropriate for the task * ( ie it's an index over this relation ). This is trickier. * * Create a list of all the other indicies on this relation. Because * the cluster will wreck all the tids, I'll need to destroy bogus * indicies. The user will have to re-create them. Not nice, but * I'm not a nice guy. The alternative is to try some kind of post * destroy re-build. This may be possible. I'll check out what the * index create functiond want in the way of paramaters. On the other * hand, re-creating n indicies may blow out the space. * * Create new (temporary) relations for the base heap and the new * index. * * Exclusively lock the relations. * * Create new clustered index and base heap relation. * */ void cluster(char oldrelname[], char oldindexname[]) { Oid OIDOldHeap, OIDOldIndex, OIDNewHeap; Relation OldHeap, OldIndex; Relation NewHeap; char *NewIndexName; char *szNewHeapName; /* * * I'm going to force all checking back into the commands.c function. * * Get the list if indicies for this relation. If the index we want * is among them, do not add it to the 'kill' list, as it will be * handled by the 'clean up' code which commits this transaction. * * I'm not using the SysCache, because this will happen but * once, and the slow way is the sure way in this case. * */ /* * Like vacuum, cluster spans transactions, so I'm going to handle it in * the same way. */ /* matches the StartTransaction in PostgresMain() */ OldHeap = heap_openr(oldrelname); if (!RelationIsValid(OldHeap)) { elog(WARN, "cluster: unknown relation: \"%-.*s\"", NAMEDATALEN, oldrelname); } OIDOldHeap = OldHeap->rd_id; /* Get OID for the index scan */ OldIndex=index_openr(oldindexname);/* Open old index relation */ if (!RelationIsValid(OldIndex)) { elog(WARN, "cluster: unknown index: \"%-.*s\"", NAMEDATALEN, oldindexname); } OIDOldIndex = OldIndex->rd_id; /* OID for the index scan */ heap_close(OldHeap); index_close(OldIndex); /* * I need to build the copies of the heap and the index. The Commit() * between here is *very* bogus. If someone is appending stuff, they will * get the lock after being blocked and add rows which won't be present in * the new table. Bleagh! I'd be best to try and ensure that no-one's * in the tables for the entire duration of this process with a pg_vlock. */ NewHeap = copy_heap(OIDOldHeap); OIDNewHeap = NewHeap->rd_id; szNewHeapName = pstrdup(NewHeap->rd_rel->relname.data); /* Need to do this to make the new heap visible. */ CommandCounterIncrement(); rebuildheap(OIDNewHeap, OIDOldHeap, OIDOldIndex); /* Need to do this to make the new heap visible. */ CommandCounterIncrement(); /* can't be found in the SysCache. */ copy_index(OIDOldIndex, OIDNewHeap); /* No contention with the old */ /* * make this really happen. Flush all the buffers. */ CommitTransactionCommand(); StartTransactionCommand(); /* * Questionable bit here. Because the renamerel destroys all trace of the * pre-existing relation, I'm going to Destroy old, and then rename new * to old. If this fails, it fails, and you lose your old. Tough - say * I. Have good backups! */ /* Here lies the bogosity. The RelationNameGetRelation returns a bad list of TupleDescriptors. Damn. Can't work out why this is. */ heap_destroy(oldrelname); /* AAAAAAAAGH!! */ CommandCounterIncrement(); /* * The Commit flushes all palloced memory, so I have to grab the * New stuff again. This is annoying, but oh heck! */ /* renamerel(szNewHeapName.data, oldrelname); TypeRename(&szNewHeapName, &szOldRelName); sprintf(NewIndexName.data, "temp_%x", OIDOldIndex); renamerel(NewIndexName.data, szOldIndexName.data); */ NewIndexName = palloc(NAMEDATALEN+1); /* XXX */ sprintf(NewIndexName, "temp_%x", OIDOldIndex); renamerel(NewIndexName, oldindexname); }
static void kill_idle_main(Datum main_arg) { StringInfoData buf; /* Register functions for SIGTERM/SIGHUP management */ pqsignal(SIGHUP, kill_idle_sighup); pqsignal(SIGTERM, kill_idle_sigterm); /* We're now ready to receive signals */ BackgroundWorkerUnblockSignals(); /* Connect to a database */ BackgroundWorkerInitializeConnection("postgres", NULL); /* Build query for process */ initStringInfo(&buf); kill_idle_build_query(&buf); while (!got_sigterm) { int rc, ret, i; /* Wait necessary amount of time */ rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, kill_max_idle_time * 1000L, PG_WAIT_EXTENSION); ResetLatch(&MyProc->procLatch); /* Emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) proc_exit(1); /* Process signals */ if (got_sighup) { int old_interval; /* Save old value of kill interval */ old_interval = kill_max_idle_time; /* Process config file */ ProcessConfigFile(PGC_SIGHUP); got_sighup = false; ereport(LOG, (errmsg("bgworker kill_idle signal: processed SIGHUP"))); /* Rebuild query if necessary */ if (old_interval != kill_max_idle_time) { resetStringInfo(&buf); initStringInfo(&buf); kill_idle_build_query(&buf); } } if (got_sigterm) { /* Simply exit */ ereport(LOG, (errmsg("bgworker kill_idle signal: processed SIGTERM"))); proc_exit(0); } /* Process idle connection kill */ SetCurrentStatementStartTimestamp(); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); pgstat_report_activity(STATE_RUNNING, buf.data); /* Statement start time */ SetCurrentStatementStartTimestamp(); /* Execute query */ ret = SPI_execute(buf.data, false, 0); /* Some error handling */ if (ret != SPI_OK_SELECT) elog(FATAL, "Error when trying to kill idle connections"); /* Do some processing and log stuff disconnected */ for (i = 0; i < SPI_processed; i++) { int32 pidValue; bool isnull; char *datname = NULL; char *usename = NULL; char *client_addr = NULL; /* Fetch values */ pidValue = DatumGetInt32(SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1, &isnull)); usename = DatumGetCString(SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 3, &isnull)); datname = DatumGetCString(SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 4, &isnull)); client_addr = DatumGetCString(SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 5, &isnull)); /* Log what has been disconnected */ elog(LOG, "Disconnected idle connection: PID %d %s/%s/%s", pidValue, datname ? datname : "none", usename ? usename : "none", client_addr ? client_addr : "none"); } SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); pgstat_report_activity(STATE_IDLE, NULL); } /* No problems, so clean exit */ proc_exit(0); }
/* * ContinuousQueryWorkerStartup * * Launches a CQ worker, which continuously generates partial query results to send * back to the combiner process. */ void ContinuousQueryWorkerRun(Portal portal, ContinuousViewState *state, QueryDesc *queryDesc, ResourceOwner owner) { EState *estate = NULL; DestReceiver *dest; CmdType operation; MemoryContext oldcontext; int timeoutms = state->maxwaitms; MemoryContext runcontext; CQProcEntry *entry = GetCQProcEntry(MyCQId); ResourceOwner cqowner = ResourceOwnerCreate(NULL, "CQResourceOwner"); bool savereadonly = XactReadOnly; cq_stat_initialize(state->viewid, MyProcPid); dest = CreateDestReceiver(DestCombiner); SetCombinerDestReceiverParams(dest, MyCQId); /* workers only need read-only transactions */ XactReadOnly = true; runcontext = AllocSetContextCreate(TopMemoryContext, "CQRunContext", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); elog(LOG, "\"%s\" worker %d running", queryDesc->plannedstmt->cq_target->relname, MyProcPid); MarkWorkerAsRunning(MyCQId, MyWorkerId); pgstat_report_activity(STATE_RUNNING, queryDesc->sourceText); TupleBufferInitLatch(WorkerTupleBuffer, MyCQId, MyWorkerId, &MyProc->procLatch); oldcontext = MemoryContextSwitchTo(runcontext); retry: PG_TRY(); { bool xact_commit = true; TimestampTz last_process = GetCurrentTimestamp(); TimestampTz last_commit = GetCurrentTimestamp(); start_executor(queryDesc, runcontext, cqowner); CurrentResourceOwner = cqowner; estate = queryDesc->estate; operation = queryDesc->operation; /* * Initialize context that lives for the duration of a single iteration * of the main worker loop */ CQExecutionContext = AllocSetContextCreate(estate->es_query_cxt, "CQExecutionContext", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); estate->es_lastoid = InvalidOid; /* * Startup combiner receiver */ (*dest->rStartup) (dest, operation, queryDesc->tupDesc); for (;;) { if (!TupleBufferHasUnreadSlots()) { if (TimestampDifferenceExceeds(last_process, GetCurrentTimestamp(), state->emptysleepms)) { /* force stats flush */ cq_stat_report(true); pgstat_report_activity(STATE_IDLE, queryDesc->sourceText); TupleBufferWait(WorkerTupleBuffer, MyCQId, MyWorkerId); pgstat_report_activity(STATE_RUNNING, queryDesc->sourceText); } else pg_usleep(Min(WAIT_SLEEP_MS, state->emptysleepms) * 1000); } TupleBufferResetNotify(WorkerTupleBuffer, MyCQId, MyWorkerId); if (xact_commit) StartTransactionCommand(); set_snapshot(estate, cqowner); CurrentResourceOwner = cqowner; MemoryContextSwitchTo(estate->es_query_cxt); estate->es_processed = 0; estate->es_filtered = 0; /* * Run plan on a microbatch */ ExecutePlan(estate, queryDesc->planstate, operation, true, 0, timeoutms, ForwardScanDirection, dest); IncrementCQExecutions(1); TupleBufferClearPinnedSlots(); if (state->long_xact) { if (TimestampDifferenceExceeds(last_commit, GetCurrentTimestamp(), LONG_RUNNING_XACT_DURATION)) xact_commit = true; else xact_commit = false; } unset_snapshot(estate, cqowner); if (xact_commit) { CommitTransactionCommand(); last_commit = GetCurrentTimestamp(); } MemoryContextResetAndDeleteChildren(CQExecutionContext); MemoryContextSwitchTo(runcontext); CurrentResourceOwner = cqowner; if (estate->es_processed || estate->es_filtered) { /* * If the CV query is such that the select does not return any tuples * ex: select id where id=99; and id=99 does not exist, then this reset * will fail. What will happen is that the worker will block at the latch for every * allocated slot, TILL a cv returns a non-zero tuple, at which point * the worker will resume a simple sleep for the threshold time. */ last_process = GetCurrentTimestamp(); /* * Send stats to the collector */ cq_stat_report(false); } /* Has the CQ been deactivated? */ if (!entry->active) { if (ActiveSnapshotSet()) unset_snapshot(estate, cqowner); if (IsTransactionState()) CommitTransactionCommand(); break; } } CurrentResourceOwner = cqowner; /* * The cleanup functions below expect these things to be registered */ RegisterSnapshotOnOwner(estate->es_snapshot, cqowner); RegisterSnapshotOnOwner(queryDesc->snapshot, cqowner); RegisterSnapshotOnOwner(queryDesc->crosscheck_snapshot, cqowner); /* cleanup */ ExecutorFinish(queryDesc); ExecutorEnd(queryDesc); FreeQueryDesc(queryDesc); } PG_CATCH(); { EmitErrorReport(); FlushErrorState(); /* Since the worker is read-only, we can simply commit the transaction. */ if (ActiveSnapshotSet()) unset_snapshot(estate, cqowner); if (IsTransactionState()) CommitTransactionCommand(); TupleBufferUnpinAllPinnedSlots(); TupleBufferClearReaders(); /* This resets the es_query_ctx and in turn the CQExecutionContext */ MemoryContextResetAndDeleteChildren(runcontext); IncrementCQErrors(1); if (continuous_query_crash_recovery) goto retry; } PG_END_TRY(); (*dest->rShutdown) (dest); MemoryContextSwitchTo(oldcontext); MemoryContextDelete(runcontext); XactReadOnly = savereadonly; /* * Remove proc-level stats */ cq_stat_report(true); cq_stat_send_purge(state->viewid, MyProcPid, CQ_STAT_WORKER); CurrentResourceOwner = owner; }
void worker_spi_main(Datum main_arg) { int index = DatumGetInt32(main_arg); worktable *table; StringInfoData buf; char name[20]; table = palloc(sizeof(worktable)); sprintf(name, "schema%d", index); table->schema = pstrdup(name); table->name = pstrdup("counted"); /* Establish signal handlers before unblocking signals. */ pqsignal(SIGHUP, worker_spi_sighup); pqsignal(SIGTERM, worker_spi_sigterm); /* We're now ready to receive signals */ BackgroundWorkerUnblockSignals(); /* Connect to our database */ BackgroundWorkerInitializeConnection("postgres", NULL); elog(LOG, "%s initialized with %s.%s", MyBgworkerEntry->bgw_name, table->schema, table->name); initialize_worker_spi(table); /* * Quote identifiers passed to us. Note that this must be done after * initialize_worker_spi, because that routine assumes the names are not * quoted. * * Note some memory might be leaked here. */ table->schema = quote_identifier(table->schema); table->name = quote_identifier(table->name); initStringInfo(&buf); appendStringInfo(&buf, "WITH deleted AS (DELETE " "FROM %s.%s " "WHERE type = 'delta' RETURNING value), " "total AS (SELECT coalesce(sum(value), 0) as sum " "FROM deleted) " "UPDATE %s.%s " "SET value = %s.value + total.sum " "FROM total WHERE type = 'total' " "RETURNING %s.value", table->schema, table->name, table->schema, table->name, table->name, table->name); /* * Main loop: do this until the SIGTERM handler tells us to terminate */ while (!got_sigterm) { int ret; int rc; /* * Background workers mustn't call usleep() or any direct equivalent: * instead, they may wait on their process latch, which sleeps as * necessary, but is awakened if postmaster dies. That way the * background process goes away immediately in an emergency. */ rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, worker_spi_naptime * 1000L); ResetLatch(&MyProc->procLatch); /* emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) proc_exit(1); /* * In case of a SIGHUP, just reload the configuration. */ if (got_sighup) { got_sighup = false; ProcessConfigFile(PGC_SIGHUP); } /* * Start a transaction on which we can run queries. Note that each * StartTransactionCommand() call should be preceded by a * SetCurrentStatementStartTimestamp() call, which sets both the time * for the statement we're about the run, and also the transaction * start time. Also, each other query sent to SPI should probably be * preceded by SetCurrentStatementStartTimestamp(), so that statement * start time is always up to date. * * The SPI_connect() call lets us run queries through the SPI manager, * and the PushActiveSnapshot() call creates an "active" snapshot * which is necessary for queries to have MVCC data to work on. * * The pgstat_report_activity() call makes our activity visible * through the pgstat views. */ SetCurrentStatementStartTimestamp(); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); pgstat_report_activity(STATE_RUNNING, buf.data); /* We can now execute queries via SPI */ ret = SPI_execute(buf.data, false, 0); if (ret != SPI_OK_UPDATE_RETURNING) elog(FATAL, "cannot select from table %s.%s: error code %d", table->schema, table->name, ret); if (SPI_processed > 0) { bool isnull; int32 val; val = DatumGetInt32(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull)); if (!isnull) elog(LOG, "%s: count in %s.%s is now %d", MyBgworkerEntry->bgw_name, table->schema, table->name, val); } /* * And finish our transaction. */ SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); pgstat_report_activity(STATE_IDLE, NULL); } proc_exit(1); }
/* * Primary entry point for VACUUM and ANALYZE commands. * * relid is normally InvalidOid; if it is not, then it provides the relation * OID to be processed, and vacstmt->relation is ignored. (The non-invalid * case is currently only used by autovacuum.) * * do_toast is passed as FALSE by autovacuum, because it processes TOAST * tables separately. * * for_wraparound is used by autovacuum to let us know when it's forcing * a vacuum for wraparound, which should not be auto-cancelled. * * bstrategy is normally given as NULL, but in autovacuum it can be passed * in to use the same buffer strategy object across multiple vacuum() calls. * * isTopLevel should be passed down from ProcessUtility. * * It is the caller's responsibility that vacstmt and bstrategy * (if given) be allocated in a memory context that won't disappear * at transaction commit. */ void vacuum(VacuumStmt *vacstmt, Oid relid, bool do_toast, BufferAccessStrategy bstrategy, bool for_wraparound, bool isTopLevel) { const char *stmttype; volatile bool all_rels, in_outer_xact, use_own_xacts; List *relations; /* sanity checks on options */ Assert(vacstmt->options & (VACOPT_VACUUM | VACOPT_ANALYZE)); Assert((vacstmt->options & VACOPT_VACUUM) || !(vacstmt->options & (VACOPT_FULL | VACOPT_FREEZE))); Assert((vacstmt->options & VACOPT_ANALYZE) || vacstmt->va_cols == NIL); stmttype = (vacstmt->options & VACOPT_VACUUM) ? "VACUUM" : "ANALYZE"; /* * We cannot run VACUUM inside a user transaction block; if we were inside * a transaction, then our commit- and start-transaction-command calls * would not have the intended effect! There are numerous other subtle * dependencies on this, too. * * ANALYZE (without VACUUM) can run either way. */ if (vacstmt->options & VACOPT_VACUUM) { PreventTransactionChain(isTopLevel, stmttype); in_outer_xact = false; } else in_outer_xact = IsInTransactionChain(isTopLevel); /* * Send info about dead objects to the statistics collector, unless we are * in autovacuum --- autovacuum.c does this for itself. */ if ((vacstmt->options & VACOPT_VACUUM) && !IsAutoVacuumWorkerProcess()) pgstat_vacuum_stat(); /* * Create special memory context for cross-transaction storage. * * Since it is a child of PortalContext, it will go away eventually even * if we suffer an error; there's no need for special abort cleanup logic. */ vac_context = AllocSetContextCreate(PortalContext, "Vacuum", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); /* * If caller didn't give us a buffer strategy object, make one in the * cross-transaction memory context. */ if (bstrategy == NULL) { MemoryContext old_context = MemoryContextSwitchTo(vac_context); bstrategy = GetAccessStrategy(BAS_VACUUM); MemoryContextSwitchTo(old_context); } vac_strategy = bstrategy; /* Remember whether we are processing everything in the DB */ all_rels = (!OidIsValid(relid) && vacstmt->relation == NULL); /* * Build list of relations to process, unless caller gave us one. (If we * build one, we put it in vac_context for safekeeping.) */ relations = get_rel_oids(relid, vacstmt->relation); /* * Decide whether we need to start/commit our own transactions. * * For VACUUM (with or without ANALYZE): always do so, so that we can * release locks as soon as possible. (We could possibly use the outer * transaction for a one-table VACUUM, but handling TOAST tables would be * problematic.) * * For ANALYZE (no VACUUM): if inside a transaction block, we cannot * start/commit our own transactions. Also, there's no need to do so if * only processing one relation. For multiple relations when not within a * transaction block, and also in an autovacuum worker, use own * transactions so we can release locks sooner. */ if (vacstmt->options & VACOPT_VACUUM) use_own_xacts = true; else { Assert(vacstmt->options & VACOPT_ANALYZE); if (IsAutoVacuumWorkerProcess()) use_own_xacts = true; else if (in_outer_xact) use_own_xacts = false; else if (list_length(relations) > 1) use_own_xacts = true; else use_own_xacts = false; } /* * vacuum_rel expects to be entered with no transaction active; it will * start and commit its own transaction. But we are called by an SQL * command, and so we are executing inside a transaction already. We * commit the transaction started in PostgresMain() here, and start * another one before exiting to match the commit waiting for us back in * PostgresMain(). */ if (use_own_xacts) { /* ActiveSnapshot is not set by autovacuum */ if (ActiveSnapshotSet()) PopActiveSnapshot(); /* matches the StartTransaction in PostgresMain() */ CommitTransactionCommand(); } /* Turn vacuum cost accounting on or off */ PG_TRY(); { ListCell *cur; VacuumCostActive = (VacuumCostDelay > 0); VacuumCostBalance = 0; /* * Loop to process each selected relation. */ foreach(cur, relations) { Oid relid = lfirst_oid(cur); bool scanned_all = false; if (vacstmt->options & VACOPT_VACUUM) vacuum_rel(relid, vacstmt, do_toast, for_wraparound, &scanned_all); if (vacstmt->options & VACOPT_ANALYZE) { /* * If using separate xacts, start one for analyze. Otherwise, * we can use the outer transaction. */ if (use_own_xacts) { StartTransactionCommand(); /* functions in indexes may want a snapshot set */ PushActiveSnapshot(GetTransactionSnapshot()); } analyze_rel(relid, vacstmt, vac_strategy, !scanned_all); if (use_own_xacts) { PopActiveSnapshot(); CommitTransactionCommand(); } } } }
/* -------------------------------- * InitPostgres * Initialize POSTGRES. * * The database can be specified by name, using the in_dbname parameter, or by * OID, using the dboid parameter. In the latter case, the actual database * name can be returned to the caller in out_dbname. If out_dbname isn't * NULL, it must point to a buffer of size NAMEDATALEN. * * In bootstrap mode no parameters are used. The autovacuum launcher process * doesn't use any parameters either, because it only goes far enough to be * able to read pg_database; it doesn't connect to any particular database. * In walsender mode only username is used. * * As of PostgreSQL 8.2, we expect InitProcess() was already called, so we * already have a PGPROC struct ... but it's not completely filled in yet. * * Note: * Be very careful with the order of calls in the InitPostgres function. * -------------------------------- */ void InitPostgres(const char *in_dbname, Oid dboid, const char *username, char *out_dbname) { bool bootstrap = IsBootstrapProcessingMode(); bool am_superuser; char *fullpath; char dbname[NAMEDATALEN]; elog(DEBUG3, "InitPostgres"); /* * Add my PGPROC struct to the ProcArray. * * Once I have done this, I am visible to other backends! */ InitProcessPhase2(); /* * Initialize my entry in the shared-invalidation manager's array of * per-backend data. * * Sets up MyBackendId, a unique backend identifier. */ MyBackendId = InvalidBackendId; SharedInvalBackendInit(false); if (MyBackendId > MaxBackends || MyBackendId <= 0) elog(FATAL, "bad backend ID: %d", MyBackendId); /* Now that we have a BackendId, we can participate in ProcSignal */ ProcSignalInit(MyBackendId); /* * Also set up timeout handlers needed for backend operation. We need * these in every case except bootstrap. */ if (!bootstrap) { RegisterTimeout(DEADLOCK_TIMEOUT, CheckDeadLock); RegisterTimeout(STATEMENT_TIMEOUT, StatementTimeoutHandler); RegisterTimeout(LOCK_TIMEOUT, LockTimeoutHandler); } /* * bufmgr needs another initialization call too */ InitBufferPoolBackend(); /* * Initialize local process's access to XLOG. */ if (IsUnderPostmaster) { /* * The postmaster already started the XLOG machinery, but we need to * call InitXLOGAccess(), if the system isn't in hot-standby mode. * This is handled by calling RecoveryInProgress and ignoring the * result. */ (void) RecoveryInProgress(); } else { /* * We are either a bootstrap process or a standalone backend. Either * way, start up the XLOG machinery, and register to have it closed * down at exit. */ StartupXLOG(); on_shmem_exit(ShutdownXLOG, 0); } /* * Initialize the relation cache and the system catalog caches. Note that * no catalog access happens here; we only set up the hashtable structure. * We must do this before starting a transaction because transaction abort * would try to touch these hashtables. */ RelationCacheInitialize(); InitCatalogCache(); InitPlanCache(); /* Initialize portal manager */ EnablePortalManager(); /* Initialize stats collection --- must happen before first xact */ if (!bootstrap) pgstat_initialize(); /* * Load relcache entries for the shared system catalogs. This must create * at least entries for pg_database and catalogs used for authentication. */ RelationCacheInitializePhase2(); /* * Set up process-exit callback to do pre-shutdown cleanup. This is the * first before_shmem_exit callback we register; thus, this will be the * last thing we do before low-level modules like the buffer manager begin * to close down. We need to have this in place before we begin our first * transaction --- if we fail during the initialization transaction, as is * entirely possible, we need the AbortTransaction call to clean up. */ before_shmem_exit(ShutdownPostgres, 0); /* The autovacuum launcher is done here */ if (IsAutoVacuumLauncherProcess()) return; /* * Start a new transaction here before first access to db, and get a * snapshot. We don't have a use for the snapshot itself, but we're * interested in the secondary effect that it sets RecentGlobalXmin. (This * is critical for anything that reads heap pages, because HOT may decide * to prune them even if the process doesn't attempt to modify any * tuples.) */ if (!bootstrap) { /* statement_timestamp must be set for timeouts to work correctly */ SetCurrentStatementStartTimestamp(); StartTransactionCommand(); /* * transaction_isolation will have been set to the default by the * above. If the default is "serializable", and we are in hot * standby, we will fail if we don't change it to something lower. * Fortunately, "read committed" is plenty good enough. */ XactIsoLevel = XACT_READ_COMMITTED; (void) GetTransactionSnapshot(); } /* * Perform client authentication if necessary, then figure out our * postgres user ID, and see if we are a superuser. * * In standalone mode and in autovacuum worker processes, we use a fixed * ID, otherwise we figure it out from the authenticated user name. */ if (bootstrap || IsAutoVacuumWorkerProcess()) { InitializeSessionUserIdStandalone(); am_superuser = true; } else if (!IsUnderPostmaster) { InitializeSessionUserIdStandalone(); am_superuser = true; if (!ThereIsAtLeastOneRole()) ereport(WARNING, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("no roles are defined in this database system"), errhint("You should immediately run CREATE USER \"%s\" SUPERUSER;.", username))); } else if (IsBackgroundWorker) { if (username == NULL) { InitializeSessionUserIdStandalone(); am_superuser = true; } else { InitializeSessionUserId(username); am_superuser = superuser(); } } else { /* normal multiuser case */ Assert(MyProcPort != NULL); PerformAuthentication(MyProcPort); InitializeSessionUserId(username); am_superuser = superuser(); } /* * If we're trying to shut down, only superusers can connect, and new * replication connections are not allowed. */ if ((!am_superuser || am_walsender) && MyProcPort != NULL && MyProcPort->canAcceptConnections == CAC_WAITBACKUP) { if (am_walsender) ereport(FATAL, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("new replication connections are not allowed during database shutdown"))); else ereport(FATAL, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to connect during database shutdown"))); } /* * Binary upgrades only allowed super-user connections */ if (IsBinaryUpgrade && !am_superuser) { ereport(FATAL, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to connect in binary upgrade mode"))); } /* * The last few connections slots are reserved for superusers. Although * replication connections currently require superuser privileges, we * don't allow them to consume the reserved slots, which are intended for * interactive use. */ if ((!am_superuser || am_walsender) && ReservedBackends > 0 && !HaveNFreeProcs(ReservedBackends)) ereport(FATAL, (errcode(ERRCODE_TOO_MANY_CONNECTIONS), errmsg("remaining connection slots are reserved for non-replication superuser connections"))); /* Check replication permissions needed for walsender processes. */ if (am_walsender) { Assert(!bootstrap); if (!superuser() && !has_rolreplication(GetUserId())) ereport(FATAL, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser or replication role to start walsender"))); } /* * If this is a plain walsender only supporting physical replication, we * don't want to connect to any particular database. Just finish the * backend startup by processing any options from the startup packet, and * we're done. */ if (am_walsender && !am_db_walsender) { /* process any options passed in the startup packet */ if (MyProcPort != NULL) process_startup_options(MyProcPort, am_superuser); /* Apply PostAuthDelay as soon as we've read all options */ if (PostAuthDelay > 0) pg_usleep(PostAuthDelay * 1000000L); /* initialize client encoding */ InitializeClientEncoding(); /* report this backend in the PgBackendStatus array */ pgstat_bestart(); /* close the transaction we started above */ CommitTransactionCommand(); return; } /* * Set up the global variables holding database id and default tablespace. * But note we won't actually try to touch the database just yet. * * We take a shortcut in the bootstrap case, otherwise we have to look up * the db's entry in pg_database. */ if (bootstrap) { MyDatabaseId = TemplateDbOid; MyDatabaseTableSpace = DEFAULTTABLESPACE_OID; } else if (in_dbname != NULL) { HeapTuple tuple; Form_pg_database dbform; tuple = GetDatabaseTuple(in_dbname); if (!HeapTupleIsValid(tuple)) ereport(FATAL, (errcode(ERRCODE_UNDEFINED_DATABASE), errmsg("database \"%s\" does not exist", in_dbname))); dbform = (Form_pg_database) GETSTRUCT(tuple); MyDatabaseId = HeapTupleGetOid(tuple); MyDatabaseTableSpace = dbform->dattablespace; /* take database name from the caller, just for paranoia */ strlcpy(dbname, in_dbname, sizeof(dbname)); } else if (OidIsValid(dboid)) { /* caller specified database by OID */ HeapTuple tuple; Form_pg_database dbform; tuple = GetDatabaseTupleByOid(dboid); if (!HeapTupleIsValid(tuple)) ereport(FATAL, (errcode(ERRCODE_UNDEFINED_DATABASE), errmsg("database %u does not exist", dboid))); dbform = (Form_pg_database) GETSTRUCT(tuple); MyDatabaseId = HeapTupleGetOid(tuple); MyDatabaseTableSpace = dbform->dattablespace; Assert(MyDatabaseId == dboid); strlcpy(dbname, NameStr(dbform->datname), sizeof(dbname)); /* pass the database name back to the caller */ if (out_dbname) strcpy(out_dbname, dbname); } else { /* * If this is a background worker not bound to any particular * database, we're done now. Everything that follows only makes * sense if we are bound to a specific database. We do need to * close the transaction we started before returning. */ if (!bootstrap) CommitTransactionCommand(); return; } /* * Now, take a writer's lock on the database we are trying to connect to. * If there is a concurrently running DROP DATABASE on that database, this * will block us until it finishes (and has committed its update of * pg_database). * * Note that the lock is not held long, only until the end of this startup * transaction. This is OK since we will advertise our use of the * database in the ProcArray before dropping the lock (in fact, that's the * next thing to do). Anyone trying a DROP DATABASE after this point will * see us in the array once they have the lock. Ordering is important for * this because we don't want to advertise ourselves as being in this * database until we have the lock; otherwise we create what amounts to a * deadlock with CountOtherDBBackends(). * * Note: use of RowExclusiveLock here is reasonable because we envision * our session as being a concurrent writer of the database. If we had a * way of declaring a session as being guaranteed-read-only, we could use * AccessShareLock for such sessions and thereby not conflict against * CREATE DATABASE. */ if (!bootstrap) LockSharedObject(DatabaseRelationId, MyDatabaseId, 0, RowExclusiveLock); /* * Now we can mark our PGPROC entry with the database ID. * * We assume this is an atomic store so no lock is needed; though actually * things would work fine even if it weren't atomic. Anyone searching the * ProcArray for this database's ID should hold the database lock, so they * would not be executing concurrently with this store. A process looking * for another database's ID could in theory see a chance match if it read * a partially-updated databaseId value; but as long as all such searches * wait and retry, as in CountOtherDBBackends(), they will certainly see * the correct value on their next try. */ MyProc->databaseId = MyDatabaseId; /* * We established a catalog snapshot while reading pg_authid and/or * pg_database; but until we have set up MyDatabaseId, we won't react to * incoming sinval messages for unshared catalogs, so we won't realize it * if the snapshot has been invalidated. Assume it's no good anymore. */ InvalidateCatalogSnapshot(); /* * Recheck pg_database to make sure the target database hasn't gone away. * If there was a concurrent DROP DATABASE, this ensures we will die * cleanly without creating a mess. */ if (!bootstrap) { HeapTuple tuple; tuple = GetDatabaseTuple(dbname); if (!HeapTupleIsValid(tuple) || MyDatabaseId != HeapTupleGetOid(tuple) || MyDatabaseTableSpace != ((Form_pg_database) GETSTRUCT(tuple))->dattablespace) ereport(FATAL, (errcode(ERRCODE_UNDEFINED_DATABASE), errmsg("database \"%s\" does not exist", dbname), errdetail("It seems to have just been dropped or renamed."))); } /* * Now we should be able to access the database directory safely. Verify * it's there and looks reasonable. */ fullpath = GetDatabasePath(MyDatabaseId, MyDatabaseTableSpace); if (!bootstrap) { if (access(fullpath, F_OK) == -1) { if (errno == ENOENT) ereport(FATAL, (errcode(ERRCODE_UNDEFINED_DATABASE), errmsg("database \"%s\" does not exist", dbname), errdetail("The database subdirectory \"%s\" is missing.", fullpath))); else ereport(FATAL, (errcode_for_file_access(), errmsg("could not access directory \"%s\": %m", fullpath))); } ValidatePgVersion(fullpath); } SetDatabasePath(fullpath); /* * It's now possible to do real access to the system catalogs. * * Load relcache entries for the system catalogs. This must create at * least the minimum set of "nailed-in" cache entries. */ RelationCacheInitializePhase3(); /* set up ACL framework (so CheckMyDatabase can check permissions) */ initialize_acl(); /* * Re-read the pg_database row for our database, check permissions and set * up database-specific GUC settings. We can't do this until all the * database-access infrastructure is up. (Also, it wants to know if the * user is a superuser, so the above stuff has to happen first.) */ if (!bootstrap) CheckMyDatabase(dbname, am_superuser); /* * Now process any command-line switches and any additional GUC variable * settings passed in the startup packet. We couldn't do this before * because we didn't know if client is a superuser. */ if (MyProcPort != NULL) process_startup_options(MyProcPort, am_superuser); /* Process pg_db_role_setting options */ process_settings(MyDatabaseId, GetSessionUserId()); /* Apply PostAuthDelay as soon as we've read all options */ if (PostAuthDelay > 0) pg_usleep(PostAuthDelay * 1000000L); /* * Initialize various default states that can't be set up until we've * selected the active user and gotten the right GUC settings. */ /* set default namespace search path */ InitializeSearchPath(); /* initialize client encoding */ InitializeClientEncoding(); /* report this backend in the PgBackendStatus array */ if (!bootstrap) pgstat_bestart(); /* close the transaction we started above */ if (!bootstrap) CommitTransactionCommand(); }
void worker_test_main(Datum main_arg) { dsm_segment *seg; volatile test_shm_mq_header *hdr; PGPROC *registrant; pqsignal(SIGHUP, handle_sighup); pqsignal(SIGTERM, handle_sigterm); BackgroundWorkerUnblockSignals(); printf("worker_test_main: %d\n", DatumGetInt32(main_arg)); CurrentResourceOwner = ResourceOwnerCreate(NULL, "worker test"); seg = dsm_attach(DatumGetInt32(main_arg)); if (seg == NULL) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("unable to map dynamic shared memory segment"))); hdr = dsm_segment_address(seg); /* 開始 */ SpinLockAcquire(&hdr->mutex); hdr->workers_ready++; hdr->workers_attached++; SpinLockRelease(&hdr->mutex); registrant = BackendPidGetProc(MyBgworkerEntry->bgw_notify_pid); if (registrant == NULL) { elog(DEBUG1, "registrant backend has exited prematurely"); proc_exit(1); } SetLatch(®istrant->procLatch); /* Do the work */ BackgroundWorkerInitializeConnection(hdr->dbname, NULL); printf("DSM: %p\n", dsm_segment_address); #if 0 SetCurrentStatementStartTimestamp(); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); pgstat_report_activity(STATE_RUNNING, "initializing spi_worker schema"); SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); pgstat_report_activity(STATE_IDLE, NULL); #endif dsm_detach(seg); proc_exit(0); }