/* * Entrypoint of this module. * * We register more than one worker process here, to demonstrate how that can * be done. */ void _PG_init(void) { BackgroundWorker worker; unsigned int i; /* get the configuration */ DefineCustomIntVariable("worker_spi.naptime", "Duration between each check (in seconds).", NULL, &worker_spi_naptime, 10, 1, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); if (!process_shared_preload_libraries_in_progress) return; DefineCustomIntVariable("worker_spi.total_workers", "Number of workers.", NULL, &worker_spi_total_workers, 2, 1, 100, PGC_POSTMASTER, 0, NULL, NULL, NULL); /* set up common data for all our workers */ memset(&worker, 0, sizeof(worker)); worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_start_time = BgWorkerStart_RecoveryFinished; worker.bgw_restart_time = BGW_NEVER_RESTART; sprintf(worker.bgw_library_name, "worker_spi"); sprintf(worker.bgw_function_name, "worker_spi_main"); worker.bgw_notify_pid = 0; /* * Now fill in worker-specific data, and do the actual registrations. */ for (i = 1; i <= worker_spi_total_workers; i++) { snprintf(worker.bgw_name, BGW_MAXLEN, "worker_spi worker %d", i); snprintf(worker.bgw_type, BGW_MAXLEN, "worker_spi"); worker.bgw_main_arg = Int32GetDatum(i); RegisterBackgroundWorker(&worker); } }
void _PG_init(void) { BackgroundWorker worker; DefineCustomIntVariable("powa.frequency", "Defines the frequency in seconds of the snapshots", NULL, &powa_frequency, 300000, -1, INT_MAX / 1000, PGC_SUSET, GUC_UNIT_MS, NULL, NULL, NULL); DefineCustomIntVariable("powa.coalesce", "Defines the amount of records to group together in the table (more compact)", NULL, &powa_coalesce, 100, 5, INT_MAX, PGC_SUSET, 0, NULL, NULL, NULL); DefineCustomIntVariable("powa.retention", "Automatically purge data older than N minutes", NULL, &powa_retention, HOURS_PER_DAY * MINS_PER_HOUR, 0, INT_MAX / SECS_PER_MINUTE, PGC_SUSET, GUC_UNIT_MIN, NULL, NULL, NULL); DefineCustomStringVariable("powa.database", "Defines the database of the workload repository", NULL, &powa_database, "powa", PGC_POSTMASTER, 0, NULL, NULL, NULL); /* Register the worker processes */ worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_start_time = BgWorkerStart_RecoveryFinished; /* Must write to the database */ worker.bgw_main = powa_main; snprintf(worker.bgw_name, BGW_MAXLEN, "powa"); worker.bgw_restart_time = 10; worker.bgw_main_arg = (Datum) 0; #if (PG_VERSION_NUM >= 90400) worker.bgw_notify_pid = 0; #endif RegisterBackgroundWorker(&worker); }
void pgstrom_init_opencl_server(void) { BackgroundWorker worker; /* number of opencl server threads */ DefineCustomIntVariable("pg_strom.opencl_num_threads", "number of opencl server threads", NULL, &opencl_num_threads, 0, /* auto selection */ 0, INT_MAX, PGC_POSTMASTER, GUC_NOT_IN_SAMPLE, NULL, NULL, NULL); /* launch a background worker process */ memset(&worker, 0, sizeof(BackgroundWorker)); strcpy(worker.bgw_name, "PG-Strom OpenCL Server"); worker.bgw_flags = BGWORKER_SHMEM_ACCESS; worker.bgw_start_time = BgWorkerStart_PostmasterStart; worker.bgw_restart_time = BGW_NEVER_RESTART; worker.bgw_main = pgstrom_opencl_main; worker.bgw_main_arg = 0; RegisterBackgroundWorker(&worker); /* acquire shared memory */ RequestAddinShmemSpace(MAXALIGN(sizeof(*opencl_serv_shm_values))); shmem_startup_hook_next = shmem_startup_hook; shmem_startup_hook = pgstrom_startup_opencl_server; }
static void pgstrom_init_misc_guc(void) { /* GUC variables according to the device information */ DefineCustomBoolVariable("pg_strom.enabled", "Enables the planner's use of PG-Strom", NULL, &pgstrom_enabled, true, PGC_USERSET, GUC_NOT_IN_SAMPLE, NULL, NULL, NULL); DefineCustomBoolVariable("pg_strom.perfmon", "Enables the performance monitor of PG-Strom", NULL, &pgstrom_perfmon_enabled, false, PGC_USERSET, GUC_NOT_IN_SAMPLE, NULL, NULL, NULL); DefineCustomIntVariable("pg_strom.min_async_chunks", "least number of chunks to be run asynchronously", NULL, &pgstrom_min_async_chunks, 2, 2, INT_MAX, PGC_USERSET, GUC_NOT_IN_SAMPLE, NULL, NULL, NULL); DefineCustomIntVariable("pg_strom.max_async_chunks", "max number of chunk to be run asynchronously", NULL, &pgstrom_max_async_chunks, 32, pgstrom_min_async_chunks + 1, INT_MAX, PGC_USERSET, GUC_NOT_IN_SAMPLE, NULL, NULL, NULL); if (pgstrom_max_async_chunks <= pgstrom_min_async_chunks) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("\"pg_strom.max_async_chunks\" must be larger than \"pg_strom.min_async_chunks\""))); }
/* Define custom GUC */ static void load_parameters() { DefineCustomBoolVariable("saio", "Use SA for query planning.", NULL, &enable_saio, true, PGC_USERSET, 0, SAIO_GUC_HOOK_VALUES); DefineCustomIntVariable("saio_threshold", "Sets the threshold of FROM items beyond which SAIO is used.", NULL, &saio_threshold, 14, 1, INT_MAX, PGC_USERSET, 0, SAIO_GUC_HOOK_VALUES); DefineCustomRealVariable("saio_seed", "SA random seed.", NULL, &saio_seed, 0.0, 0.0, 1.0, PGC_USERSET, 0, SAIO_GUC_HOOK_VALUES); DefineCustomIntVariable("saio_equilibrium_factor", "SA scaling factor for reaching equilibirum.", NULL, &saio_equilibrium_factor, 16, 1, INT_MAX, PGC_USERSET, 0, SAIO_GUC_HOOK_VALUES); DefineCustomRealVariable("saio_initial_temperature_factor", "SA scaling factor for initial temperature.", NULL, &saio_initial_temperature_factor, 2.0, 0.0, 10.0, PGC_USERSET, 0, SAIO_GUC_HOOK_VALUES); DefineCustomRealVariable("saio_temperature_reduction_factor", "SA temperature reduction factor.", NULL, &saio_temperature_reduction_factor, 0.9, 0.0, 1.0, PGC_USERSET, 0, SAIO_GUC_HOOK_VALUES); DefineCustomIntVariable("saio_moves_before_frozen", "SA moves before considering system frozen.", NULL, &saio_moves_before_frozen, 4, 1, INT_MAX, PGC_USERSET, 0, SAIO_GUC_HOOK_VALUES); }
/* * Entrypoint of this module. * * We register more than one worker process here, to demonstrate how that can * be done. */ void _PG_init(void) { struct BackgroundWorker worker = { "restgres master" }; /* get the configuration */ DefineCustomStringVariable("http.listen_addresses", "Addresses to listen on; see PostgreSQL listen_addresses", NULL, &restgres_listen_addresses, "*", PGC_SIGHUP, 0, NULL, NULL, NULL); DefineCustomIntVariable("http.port", "Port to listen on (default: any available port)", NULL, &restgres_listen_port, restgres_listen_port, 0, 32768, PGC_SIGHUP, 0, NULL, NULL, NULL); DefineCustomIntVariable("http.max_connections", "Maximum number of connections to serve at once (additional connections will have to wait or be rejected)", NULL, &restgres_max_connections, 100, 0, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); DefineCustomIntVariable("http.max_concurrency", "Maximum number of connections to serve at once (additional connections will have to wait or be rejected)", NULL, &restgres_max_concurrency, 100, 0, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); /* set up common data for all our workers */ worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_restart_time = BGW_DEFAULT_RESTART_INTERVAL; worker.bgw_start_time = BgWorkerStart_RecoveryFinished; worker.bgw_main = restgres_main; worker.bgw_notify_pid = 0; RegisterBackgroundWorker(&worker); }
void _PG_init(void) { coreIntHandler = pqsignal(SIGINT, handleInterrupt); #ifdef WIN32 #if POSTGIS_GEOS_VERSION >= 34 GEOS_interruptRegisterCallback(interruptCallback); #endif lwgeom_register_interrupt_callback(interruptCallback); #endif #if 0 /* Define custom GUC variables. */ DefineCustomIntVariable( "postgis.debug.level", /* name */ "Sets the debugging level of PostGIS.", /* short_desc */ "This is an experimental configuration.", /* long_desc */ &postgis_debug_level, /* valueAddr */ 0, 8, /* min-max */ 0, /* bootValue */ PGC_SUSET, /* GucContext context */ GUC_UNIT_MS, /* int flags */ #if POSTGIS_PGSQL_VERSION >= 91 NULL, /* GucStringCheckHook check_hook */ #endif NULL, /* GucStringAssignHook assign_hook */ NULL /* GucShowHook show_hook */ ); #endif #if 0 /* Define custom GUC variables. */ DefineCustomStringVariable( "postgis.greeting.string", /* name */ "Sets the greeting string used on postgis module load.", /* short_desc */ "This is an experimental configuration.", /* long_desc */ &greeting, /* valueAddr */ "Welcome to PostGIS " POSTGIS_VERSION, /* bootValue */ PGC_SUSET, /* GucContext context */ GUC_UNIT_MS, /* int flags */ #if POSTGIS_PGSQL_VERSION >= 91 NULL, /* GucStringCheckHook check_hook */ #endif NULL, /* GucStringAssignHook assign_hook */ NULL /* GucShowHook show_hook */ ); #endif /* install PostgreSQL handlers */ pg_install_lwgeom_handlers(); /* initialize geometry backend */ lwgeom_init_backend(); }
/* * Entrypoint of this module. * * We register more than one worker process here, to demonstrate how that can * be done. */ void _PG_init(void) { BackgroundWorker worker; if (!process_shared_preload_libraries_in_progress) { return; } DefineCustomIntVariable("pg_octopus.health_check_period", "Duration between each check (in milliseconds).", NULL, &HealthCheckPeriod, 10000, 1, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); DefineCustomIntVariable("pg_octopus.health_check_timeout", "Connect timeout (in milliseconds).", NULL, &HealthCheckTimeout, 2000, 1, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); DefineCustomIntVariable("pg_octopus.health_check_max_retries", "Maximum number of re-tries before marking a node as failed.", NULL, &HealthCheckMaxRetries, 2, 1, 100, PGC_SIGHUP, 0, NULL, NULL, NULL); DefineCustomIntVariable("pg_octopus.health_check_retry_delay", "Delay between consecutive retries.", NULL, &HealthCheckRetryDelay, 1000, 1, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); /* set up common data for all our workers */ worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_start_time = BgWorkerStart_RecoveryFinished; worker.bgw_restart_time = BGW_NEVER_RESTART; worker.bgw_main = PgOctopusWorkerMain; worker.bgw_main_arg = Int32GetDatum(0); worker.bgw_notify_pid = 0; sprintf(worker.bgw_library_name, "pg_octopus"); snprintf(worker.bgw_name, BGW_MAXLEN, "pg_octopus_monitor"); RegisterBackgroundWorker(&worker); }
/* * pgmpc_load_params * Load GUC parameters */ static void pgmpc_load_params(void) { /* Connection host */ DefineCustomStringVariable("pgmpc.mpd_host", "Sets the IP to connect to mpd server.", "Default value is \"localhost\".", &mpd_host, "localhost", PGC_USERSET, 0, NULL, NULL, NULL); /* Connection password */ DefineCustomStringVariable("pgmpc.mpd_password", "Sets the password to connect to mpd server.", "Default value is \"\".", &mpd_password, "", PGC_USERSET, 0, NULL, NULL, NULL); /* Connection port */ DefineCustomIntVariable("pgmpc.mpd_port", "Sets the port to connect to mpd server.", "Default value set to 6600.", &mpd_port, 6600, 1, 65536, PGC_USERSET, 0, NULL, NULL, NULL); /* Connection port */ DefineCustomIntVariable("pgmpc.mpd_timeout", "Sets timeout for connection obtention in s.", "Default value set to 10s. Max is 300s. 0 means inifinite wait.", &mpd_timeout, 10, 0, 300, PGC_USERSET, 0, NULL, NULL, NULL); }
/* * Module Load Callback */ void _PG_init(void) { /* Define custom GUC variables */ DefineCustomStringVariable( "pg_query_statsd.hostname", gettext_noop("Host to send statsd messages to"), NULL, &statsd_host, "", PGC_SIGHUP, 0, NULL, &assign_statsd_host, NULL); DefineCustomIntVariable( "pg_query_statsd.port", // name gettext_noop("Port to send statsd messages to"), // shor desc NULL, // long desc &statsd_port, // var addr 8125, // boot val 0, // min val 16384, // mav val PGC_SIGHUP, // context 0, // flags NULL, // check_hook &assign_statsd_port, // assign_hook NULL); // show_hook EmitWarningsOnPlaceholders("pg_query_statsd"); initialize_socket(statsd_host, statsd_port); /* Install Hooks */ if (ExecutorEnd_hook) prev_ExecutorEnd = ExecutorEnd_hook; else prev_ExecutorEnd = standard_ExecutorEnd; ExecutorEnd_hook = new_executor_end; if (ExecutorStart_hook) prev_ExecutorStart = ExecutorStart_hook; else prev_ExecutorStart = standard_ExecutorStart; ExecutorStart_hook = new_executor_start; }
/* * Module Load Callback */ void _PG_init(void) { /* Define custom GUC variables */ DefineCustomIntVariable("auth_delay.milliseconds", "Milliseconds to delay before reporting authentication failure", NULL, &auth_delay_milliseconds, 0, 0, INT_MAX / 1000, PGC_SIGHUP, GUC_UNIT_MS, NULL, NULL, NULL); /* Install Hooks */ original_client_auth_hook = ClientAuthentication_hook; ClientAuthentication_hook = auth_delay_checks; }
/* * Module load callback */ void _PG_init(void) { /* */ if (! process_shared_preload_libraries_in_progress) { elog(ERROR, "shared_ispell has to be loaded using shared_preload_libraries"); return; } /* Define custom GUC variables. */ /* How much memory should we preallocate for the dictionaries (limits how many * dictionaries you can load into the shared segment). */ DefineCustomIntVariable("shared_ispell.max_size", "amount of memory to pre-allocate for ispell dictionaries", NULL, &max_ispell_mem_size, (32*1024*1024), (1024*1024), INT_MAX, PGC_POSTMASTER, GUC_UNIT_BLOCKS, #if (PG_VERSION_NUM >= 90100) NULL, #endif NULL, NULL); EmitWarningsOnPlaceholders("shared_ispell"); /* * Request additional shared resources. (These are no-ops if we're not in * the postmaster process.) We'll allocate or attach to the shared * resources in ispell_shmem_startup(). */ RequestAddinShmemSpace(max_ispell_mem_size); RequestAddinLWLocks(1); /* Install hooks. */ prev_shmem_startup_hook = shmem_startup_hook; shmem_startup_hook = ispell_shmem_startup; }
static void kill_idle_load_params(void) { /* * Kill backends with idle time more than this interval, possible * candidates for execution are scanned at the same time interbal. */ DefineCustomIntVariable("kill_idle.max_idle_time", "Maximum time allowed for backends to be idle (s).", "Default of 5s, max of 3600s", &kill_max_idle_time, 5, 1, 3600, PGC_SIGHUP, 0, NULL, NULL, NULL); }
void _PG_init(void) { if (!process_shared_preload_libraries_in_progress) return; RequestAddinShmemSpace(getMemstatSize()); RequestNamedLWLockTranche("memstat", PROCARRAY_MAXPROCS); prev_ExecutorStart = ExecutorStart_hook; ExecutorStart_hook = collectLocalMemoryStats; prev_shmem_startup_hook = shmem_startup_hook; shmem_startup_hook = allocShmem; DefineCustomIntVariable("memstat.period", "Sets period to collect memory statistics", "zero means collecting after each query", &ticPeriod, 10, 0, 60*60*24*31, PGC_SUSET, GUC_UNIT_S, NULL, NULL, NULL); }
/* * Module load callback */ void _PG_init(void) { /* * In order to create our shared memory area, we have to be loaded via * shared_preload_libraries. If not, fall out without hooking into any of * the main system. (We don't throw error here because it seems useful to * allow the pg_stat_statements functions to be created even when the * module isn't active. The functions must protect themselves against * being called then, however.) */ if (!process_shared_preload_libraries_in_progress) return; /* * Define (or redefine) custom GUC variables. */ DefineCustomIntVariable("pg_stat_statements.max", "Sets the maximum number of statements tracked by pg_stat_statements.", NULL, &pgss_max, 1000, 100, INT_MAX, PGC_POSTMASTER, 0, NULL, NULL); DefineCustomEnumVariable("pg_stat_statements.track", "Selects which statements are tracked by pg_stat_statements.", NULL, &pgss_track, PGSS_TRACK_TOP, track_options, PGC_SUSET, 0, NULL, NULL); DefineCustomBoolVariable("pg_stat_statements.track_utility", "Selects whether utility commands are tracked by pg_stat_statements.", NULL, &pgss_track_utility, true, PGC_SUSET, 0, NULL, NULL); DefineCustomBoolVariable("pg_stat_statements.save", "Save pg_stat_statements statistics across server shutdowns.", NULL, &pgss_save, true, PGC_SIGHUP, 0, NULL, NULL); EmitWarningsOnPlaceholders("pg_stat_statements"); /* * Request additional shared resources. (These are no-ops if we're not in * the postmaster process.) We'll allocate or attach to the shared * resources in pgss_shmem_startup(). */ RequestAddinShmemSpace(pgss_memsize()); RequestAddinLWLocks(1); /* * Install hooks. */ prev_shmem_startup_hook = shmem_startup_hook; shmem_startup_hook = pgss_shmem_startup; prev_ExecutorStart = ExecutorStart_hook; ExecutorStart_hook = pgss_ExecutorStart; prev_ExecutorRun = ExecutorRun_hook; ExecutorRun_hook = pgss_ExecutorRun; prev_ExecutorEnd = ExecutorEnd_hook; ExecutorEnd_hook = pgss_ExecutorEnd; prev_ProcessUtility = ProcessUtility_hook; ProcessUtility_hook = pgss_ProcessUtility; }
/* * Module load callback */ void _PG_init(void) { /* Define custom GUC variables. */ DefineCustomIntVariable("auto_explain.log_min_duration", "Sets the minimum execution time above which plans will be logged.", "Zero prints all plans. -1 turns this feature off.", &auto_explain_log_min_duration, -1, -1, INT_MAX / 1000, PGC_SUSET, GUC_UNIT_MS, NULL, NULL, NULL); DefineCustomBoolVariable("auto_explain.log_analyze", "Use EXPLAIN ANALYZE for plan logging.", NULL, &auto_explain_log_analyze, false, PGC_SUSET, 0, NULL, NULL, NULL); DefineCustomBoolVariable("auto_explain.log_verbose", "Use EXPLAIN VERBOSE for plan logging.", NULL, &auto_explain_log_verbose, false, PGC_SUSET, 0, NULL, NULL, NULL); DefineCustomBoolVariable("auto_explain.log_buffers", "Log buffers usage.", NULL, &auto_explain_log_buffers, false, PGC_SUSET, 0, NULL, NULL, NULL); DefineCustomBoolVariable("auto_explain.log_triggers", "Include trigger statistics in plans.", "This has no effect unless log_analyze is also set.", &auto_explain_log_triggers, false, PGC_SUSET, 0, NULL, NULL, NULL); DefineCustomEnumVariable("auto_explain.log_format", "EXPLAIN format to be used for plan logging.", NULL, &auto_explain_log_format, EXPLAIN_FORMAT_TEXT, format_options, PGC_SUSET, 0, NULL, NULL, NULL); DefineCustomBoolVariable("auto_explain.log_nested_statements", "Log nested statements.", NULL, &auto_explain_log_nested_statements, false, PGC_SUSET, 0, NULL, NULL, NULL); DefineCustomBoolVariable("auto_explain.log_timing", "Collect timing data, not just row counts.", NULL, &auto_explain_log_timing, true, PGC_SUSET, 0, NULL, NULL, NULL); DefineCustomRealVariable("auto_explain.sample_rate", "Fraction of queries to process.", NULL, &auto_explain_sample_rate, 1.0, 0.0, 1.0, PGC_SUSET, 0, NULL, NULL, NULL); EmitWarningsOnPlaceholders("auto_explain"); /* Install hooks. */ prev_ExecutorStart = ExecutorStart_hook; ExecutorStart_hook = explain_ExecutorStart; prev_ExecutorRun = ExecutorRun_hook; ExecutorRun_hook = explain_ExecutorRun; prev_ExecutorFinish = ExecutorFinish_hook; ExecutorFinish_hook = explain_ExecutorFinish; prev_ExecutorEnd = ExecutorEnd_hook; ExecutorEnd_hook = explain_ExecutorEnd; }
static void registerGUCOptions(void) { char pathbuf[MAXPGPATH]; DefineCustomStringVariable( "pljava.libjvm_location", "Path to the libjvm (.so, .dll, etc.) file in Java's jre/lib area", NULL, /* extended description */ &libjvmlocation, #if PG_VERSION_NUM >= 80400 "libjvm", #endif PGC_SUSET, #if PG_VERSION_NUM >= 80400 0, /* flags */ #endif #if PG_VERSION_NUM >= 90100 check_libjvm_location, #endif assign_libjvm_location, NULL); /* show hook */ DefineCustomStringVariable( "pljava.vmoptions", "Options sent to the JVM when it is created", NULL, /* extended description */ &vmoptions, #if PG_VERSION_NUM >= 80400 NULL, /* boot value */ #endif PGC_SUSET, #if PG_VERSION_NUM >= 80400 0, /* flags */ #endif #if PG_VERSION_NUM >= 90100 check_vmoptions, #endif assign_vmoptions, NULL); /* show hook */ DefineCustomStringVariable( "pljava.classpath", "Classpath used by the JVM", NULL, /* extended description */ &classpath, #if PG_VERSION_NUM >= 80400 InstallHelper_defaultClassPath(pathbuf), /* boot value */ #endif PGC_SUSET, #if PG_VERSION_NUM >= 80400 0, /* flags */ #endif #if PG_VERSION_NUM >= 90100 check_classpath, #endif assign_classpath, NULL); /* show hook */ DefineCustomBoolVariable( "pljava.debug", "Stop the backend to attach a debugger", NULL, /* extended description */ &pljavaDebug, #if PG_VERSION_NUM >= 80400 false, /* boot value */ #endif PGC_USERSET, #if PG_VERSION_NUM >= 80400 0, /* flags */ #endif #if PG_VERSION_NUM >= 90100 NULL, /* check hook */ #endif NULL, NULL); /* assign hook, show hook */ DefineCustomIntVariable( "pljava.statement_cache_size", "Size of the prepared statement MRU cache", NULL, /* extended description */ &statementCacheSize, #if PG_VERSION_NUM >= 80400 11, /* boot value */ #endif 0, 512, /* min, max values */ PGC_USERSET, #if PG_VERSION_NUM >= 80400 0, /* flags */ #endif #if PG_VERSION_NUM >= 90100 NULL, /* check hook */ #endif NULL, NULL); /* assign hook, show hook */ DefineCustomBoolVariable( "pljava.release_lingering_savepoints", "If true, lingering savepoints will be released on function exit. " "If false, they will be rolled back", NULL, /* extended description */ &pljavaReleaseLingeringSavepoints, #if PG_VERSION_NUM >= 80400 false, /* boot value */ #endif PGC_USERSET, #if PG_VERSION_NUM >= 80400 0, /* flags */ #endif #if PG_VERSION_NUM >= 90100 NULL, /* check hook */ #endif NULL, NULL); /* assign hook, show hook */ DefineCustomBoolVariable( "pljava.enable", "If off, the Java virtual machine will not be started until set on.", "This is mostly of use on PostgreSQL versions < 9.2, where option " "settings changed before LOADing PL/Java may be rejected, so they must " "be made after LOAD, but before the virtual machine is started.", &pljavaEnabled, #if PG_VERSION_NUM >= 90200 true, /* boot value */ #elif PG_VERSION_NUM >= 80400 false, /* boot value */ #endif PGC_USERSET, #if PG_VERSION_NUM >= 80400 0, /* flags */ #endif #if PG_VERSION_NUM >= 90100 check_enabled, /* check hook */ #endif assign_enabled, NULL); /* show hook */ DefineCustomStringVariable( "pljava.implementors", "Implementor names recognized in deployment descriptors", NULL, /* extended description */ &implementors, #if PG_VERSION_NUM >= 80400 "postgresql", /* boot value */ #endif PGC_USERSET, #if PG_VERSION_NUM >= 80400 GUC_LIST_INPUT | GUC_LIST_QUOTE, #endif #if PG_VERSION_NUM >= 90100 NULL, /* check hook */ #endif NULL, NULL); /* assign hook, show hook */ EmitWarningsOnPlaceholders("pljava"); }
/* * Module load callback */ void _PG_init(void) { /* can be preloaded only from postgresql.conf */ if (!process_shared_preload_libraries_in_progress) return; /* Define custom GUC variables. */ DefineCustomStringVariable("query_recorder.filename", "Base filename to write the recorded queries.", NULL, &base_filename, false, PGC_BACKEND, 0, #if (PG_VERSION_NUM >= 90100) NULL, #endif NULL, NULL); DefineCustomIntVariable("query_recorder.max_files", "How many files will be rotated.", NULL, &max_files, 100, 1, 1000, PGC_BACKEND, 0, #if (PG_VERSION_NUM >= 90100) NULL, #endif NULL, NULL); DefineCustomIntVariable("query_recorder.size_limit", "File size limit (will create multiple files .000 - .999)", NULL, &max_file_size, 1024*1024*1024/BLCKSZ, /* 1GB */ 1024*1024/BLCKSZ, 1024*1024*1024/BLCKSZ, /* 1MB - 1GB */ PGC_BACKEND, GUC_UNIT_BLOCKS, #if (PG_VERSION_NUM >= 90100) NULL, #endif NULL, NULL); DefineCustomIntVariable("query_recorder.buffer_size", "Size of the buffer used to collect queries.", NULL, &buffer_size, 1024*1024/BLCKSZ, /* 1MB */ 1, 16*1024*1024/BLCKSZ, /* 1 block - 16MB */ PGC_BACKEND, GUC_UNIT_BLOCKS, #if (PG_VERSION_NUM >= 90100) NULL, #endif NULL, NULL); /* Define custom GUC variables. */ DefineCustomBoolVariable("query_recorder.normalize", "Replace line breaks and carriage returns with spaces.", NULL, &normalize, false, PGC_BACKEND, 0, #if (PG_VERSION_NUM >= 90100) NULL, #endif &set_enabled, NULL); /* Define custom GUC variables. */ DefineCustomBoolVariable("query_recorder.enabled", "Enable or disable recording of queries.", NULL, &enabled, false, PGC_SUSET, 0, #if (PG_VERSION_NUM >= 90100) NULL, #endif &set_enabled, (GucShowHook)(&show_enabled)); EmitWarningsOnPlaceholders("query_recorder"); /* * Request additional shared resources. (These are no-ops if we're not in * the postmaster process.) We'll allocate or attach to the shared * resources in pg_record_shmem_startup(). */ RequestAddinShmemSpace(SEGMENT_SIZE); RequestAddinLWLocks(1); /* Install hooks. */ prev_shmem_startup_hook = shmem_startup_hook; shmem_startup_hook = pg_record_shmem_startup; prev_ExecutorStart = ExecutorStart_hook; ExecutorStart_hook = explain_ExecutorStart; prev_ExecutorRun = ExecutorRun_hook; ExecutorRun_hook = explain_ExecutorRun; #if (PG_VERSION_NUM >= 90100) prev_ExecutorFinish = ExecutorFinish_hook; ExecutorFinish_hook = explain_ExecutorFinish; #endif prev_ExecutorEnd = ExecutorEnd_hook; ExecutorEnd_hook = explain_ExecutorEnd; prev_ProcessUtility = ProcessUtility_hook; ProcessUtility_hook = pg_record_ProcessUtility; }
/* * _PG_init * Entry point loading hooks */ void _PG_init(void) { /* Set up GUCs */ DefineCustomStringVariable("redislog.host", "Redis server host name or IP address.", NULL, &Redislog_host, "127.0.0.1", PGC_SIGHUP, GUC_NOT_IN_SAMPLE | GUC_SUPERUSER_ONLY, NULL, &guc_on_assign_reopen_string, NULL); DefineCustomIntVariable("redislog.port", "Redis server port number.", NULL, &Redislog_port, 6379, 0, 65535, PGC_SIGHUP, GUC_NOT_IN_SAMPLE | GUC_SUPERUSER_ONLY, NULL, &guc_on_assign_reopen_int, NULL); DefineCustomIntVariable("redislog.connection_timeout", "Redis server connection timeout.", NULL, &Redislog_timeout, 1000, 1, INT_MAX, PGC_SIGHUP, GUC_NOT_IN_SAMPLE | GUC_SUPERUSER_ONLY | GUC_UNIT_MS, NULL, NULL, NULL); DefineCustomStringVariable("redislog.key", "Redis server key name.", NULL, &Redislog_key, "postgres", PGC_SIGHUP, GUC_NOT_IN_SAMPLE | GUC_SUPERUSER_ONLY, NULL, NULL, NULL); DefineCustomEnumVariable("redislog.min_error_statement", "Controls which SQL statements that cause an error condition are " "recorded in the server log.", "Each level includes all the levels that follow it. The later " "the level, the fewer messages are sent.", &Redislog_min_error_statement, log_min_error_statement, server_message_level_options, PGC_SUSET, GUC_NOT_IN_SAMPLE, NULL, NULL, NULL); DefineCustomEnumVariable("redislog.min_messages", "Set the message levels that are logged.", "Each level includes all the levels that follow it. The higher " "the level, the fewer messages are sent.", &Redislog_min_messages, WARNING, server_message_level_options, PGC_SUSET, GUC_NOT_IN_SAMPLE, NULL, NULL, NULL); DefineCustomBoolVariable("redislog.ship_to_redis_only", "Send log messages to Redis only.", "If set to true, send log messages to Redis only and skip " "journaling them into the main PostgreSQL log. Use the " "PostgreSQL main logger facility for fallback purposes only, " "in case no Redis service is available. " "By default it is set to false.", &Redislog_ship_to_redis_only, FALSE, PGC_SUSET, GUC_NOT_IN_SAMPLE, NULL, NULL, NULL); prev_log_hook = emit_log_hook; emit_log_hook = redis_log_hook; EmitWarningsOnPlaceholders("redislog"); }
void _PG_init(void) { /* * In order to create our shared memory area, we have to be loaded via * shared_preload_libraries. If not, fall out without hooking into any of * the main system. (We don't throw error here because it seems useful to * allow the cs_* functions to be created even when the * module isn't active. The functions must protect themselves against * being called then, however.) */ if (!process_shared_preload_libraries_in_progress) return; DefineCustomIntVariable( "multimaster.workers", "Number of multimaster executor workers per node", NULL, &MMWorkers, 8, 1, INT_MAX, PGC_BACKEND, 0, NULL, NULL, NULL ); DefineCustomIntVariable( "multimaster.queue_size", "Multimaster queue size", NULL, &MMQueueSize, 1024*1024, 1024, INT_MAX, PGC_BACKEND, 0, NULL, NULL, NULL ); DefineCustomIntVariable( "multimaster.local_xid_reserve", "Number of XIDs reserved by node for local transactions", NULL, &DtmLocalXidReserve, 100, 1, INT_MAX, PGC_BACKEND, 0, NULL, NULL, NULL ); DefineCustomIntVariable( "multimaster.buffer_size", "Size of sockhub buffer for connection to DTM daemon, if 0, then direct connection will be used", NULL, &DtmBufferSize, 0, 0, INT_MAX, PGC_BACKEND, 0, NULL, NULL, NULL ); DefineCustomStringVariable( "multimaster.arbiters", "The comma separated host:port pairs where arbiters reside", NULL, &Arbiters, "127.0.0.1:5431", PGC_BACKEND, // context 0, // flags, NULL, // GucStringCheckHook check_hook, NULL, // GucStringAssignHook assign_hook, NULL // GucShowHook show_hook ); DefineCustomStringVariable( "multimaster.conn_strings", "Multimaster node connection strings separated by commas, i.e. 'replication=database dbname=postgres host=localhost port=5001,replication=database dbname=postgres host=localhost port=5002'", NULL, &MMConnStrs, "", PGC_BACKEND, // context 0, // flags, NULL, // GucStringCheckHook check_hook, NULL, // GucStringAssignHook assign_hook, NULL // GucShowHook show_hook ); DefineCustomIntVariable( "multimaster.node_id", "Multimaster node ID", NULL, &MMNodeId, 1, 1, INT_MAX, PGC_BACKEND, 0, NULL, NULL, NULL ); /* * Request additional shared resources. (These are no-ops if we're not in * the postmaster process.) We'll allocate or attach to the shared * resources in dtm_shmem_startup(). */ RequestAddinShmemSpace(DTM_SHMEM_SIZE + MMQueueSize); RequestAddinLWLocks(2); MMNodes = MMStartReceivers(MMConnStrs, MMNodeId); if (MMNodes < 2) { elog(ERROR, "Multimaster should have at least two nodes"); } BgwPoolStart(MMWorkers, MMPoolConstructor); ArbitersCopy = strdup(Arbiters); if (DtmBufferSize != 0) { ArbiterConfig(Arbiters, Unix_socket_directories); RegisterBackgroundWorker(&DtmWorker); } else ArbiterConfig(Arbiters, NULL); /* * Install hooks. */ PreviousShmemStartupHook = shmem_startup_hook; shmem_startup_hook = DtmShmemStartup; PreviousExecutorFinishHook = ExecutorFinish_hook; ExecutorFinish_hook = MMExecutorFinish; PreviousProcessUtilityHook = ProcessUtility_hook; ProcessUtility_hook = MMProcessUtility; }
/* * Entrypoint of this module. * * We register more than one worker process here, to demonstrate how that can * be done. */ void _PG_init(void) { BackgroundWorker worker; if (!process_shared_preload_libraries_in_progress) return; /* get the configuration */ DefineCustomIntVariable("pg_keeper.keepalives_time", "Specific time between polling to primary server", NULL, &keeper_keepalives_time, 5, 1, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); DefineCustomIntVariable("pg_keeper.keepalives_count", "Specific retry count until promoting standby server", NULL, &keeper_keepalives_count, 1, 1, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); DefineCustomStringVariable("pg_keeper.node1_conninfo", "Connection information for node1 server (first master server)", NULL, &keeper_node1_conninfo, NULL, PGC_POSTMASTER, 0, NULL, NULL, NULL); DefineCustomStringVariable("pg_keeper.node2_conninfo", "Connection information for node2 server (first standby server)", NULL, &keeper_node2_conninfo, NULL, PGC_POSTMASTER, 0, NULL, NULL, NULL); DefineCustomStringVariable("pg_keeper.after_command", "Shell command that will be called after promoted", NULL, &keeper_after_command, NULL, PGC_SIGHUP, GUC_NOT_IN_SAMPLE, NULL, NULL, NULL); /* set up common data for all our workers */ worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_start_time = BgWorkerStart_ConsistentState; worker.bgw_restart_time = BGW_NEVER_RESTART; worker.bgw_main = KeeperMain; worker.bgw_notify_pid = 0; /* * Now fill in worker-specific data, and do the actual registrations. */ snprintf(worker.bgw_name, BGW_MAXLEN, "pg_keeper"); worker.bgw_main_arg = Int32GetDatum(1); RegisterBackgroundWorker(&worker); }
/* * Module load callback */ void _PG_init(void) { /* Define custom GUC variables. */ DefineCustomIntVariable("auto_explain.log_min_duration", "Sets the minimum execution time above which plans will be logged.", "Zero prints all plans. -1 turns this feature off.", &auto_explain_log_min_duration, -1, -1, INT_MAX / 1000, PGC_SUSET, GUC_UNIT_MS, NULL, NULL, NULL); DefineCustomBoolVariable("auto_explain.log_analyze", "Use EXPLAIN ANALYZE for plan logging.", NULL, &auto_explain_log_analyze, false, PGC_SUSET, 0, NULL, NULL, NULL); DefineCustomBoolVariable("auto_explain.log_verbose", "Use EXPLAIN VERBOSE for plan logging.", NULL, &auto_explain_log_verbose, false, PGC_SUSET, 0, NULL, NULL, NULL); DefineCustomBoolVariable("auto_explain.log_buffers", "Log buffers usage.", NULL, &auto_explain_log_buffers, false, PGC_SUSET, 0, NULL, NULL, NULL); DefineCustomEnumVariable("auto_explain.log_format", "EXPLAIN format to be used for plan logging.", NULL, &auto_explain_log_format, EXPLAIN_FORMAT_TEXT, format_options, PGC_SUSET, 0, NULL, NULL, NULL); DefineCustomBoolVariable("auto_explain.log_nested_statements", "Log nested statements.", NULL, &auto_explain_log_nested_statements, false, PGC_SUSET, 0, NULL, NULL, NULL); EmitWarningsOnPlaceholders("auto_explain"); /* Install hooks. */ prev_ExecutorStart = ExecutorStart_hook; ExecutorStart_hook = explain_ExecutorStart; prev_ExecutorRun = ExecutorRun_hook; ExecutorRun_hook = explain_ExecutorRun; prev_ExecutorFinish = ExecutorFinish_hook; ExecutorFinish_hook = explain_ExecutorFinish; prev_ExecutorEnd = ExecutorEnd_hook; ExecutorEnd_hook = explain_ExecutorEnd; }
/* * Module load callback */ void _PG_init(void) { /* */ if (!process_shared_preload_libraries_in_progress) return; /* Define custom GUC variables. */ DefineCustomBoolVariable("query_histogram.dynamic", "Dynamic histogram may be modified on the fly.", NULL, &default_histogram_dynamic, false, PGC_BACKEND, 0, #if (PG_VERSION_NUM >= 90100) NULL, #endif NULL, NULL); /* Define custom GUC variables. */ DefineCustomBoolVariable("query_histogram.track_utility", "Selects whether utility commands are tracked.", NULL, &default_histogram_utility, true, PGC_SUSET, 0, #if (PG_VERSION_NUM >= 90100) NULL, #endif &set_histogram_track_utility, NULL); DefineCustomIntVariable("query_histogram.bin_count", "Sets the number of bins of the histogram.", "Zero disables collecting the histogram.", &default_histogram_bins, 100, 0, 1000, PGC_SUSET, 0, #if (PG_VERSION_NUM >= 90100) NULL, #endif &set_histogram_bins_count_hook, NULL); DefineCustomIntVariable("query_histogram.bin_width", "Sets the width of the histogram bin.", NULL, &default_histogram_step, 100, 1, 1000, PGC_SUSET, GUC_UNIT_MS, #if (PG_VERSION_NUM >= 90100) NULL, #endif &set_histogram_bins_width_hook, NULL); DefineCustomIntVariable("query_histogram.sample_pct", "What portion of the queries should be sampled (in percent).", NULL, &default_histogram_sample_pct, 5, 1, 100, PGC_SUSET, 0, #if (PG_VERSION_NUM >= 90100) NULL, #endif &set_histogram_sample_hook, NULL); DefineCustomEnumVariable("query_histogram.histogram_type", "Type of the histogram (how the bin width is computed).", NULL, &default_histogram_type, HISTOGRAM_LINEAR, histogram_type_options, PGC_SUSET, 0, #if (PG_VERSION_NUM >= 90100) NULL, #endif &set_histogram_type_hook, NULL); EmitWarningsOnPlaceholders("query_histogram"); /* * Request additional shared resources. (These are no-ops if we're not in * the postmaster process.) We'll allocate or attach to the shared * resources in histogram_shmem_startup(). */ RequestAddinShmemSpace(get_histogram_size()); RequestAddinLWLocks(1); /* Install hooks. */ prev_shmem_startup_hook = shmem_startup_hook; shmem_startup_hook = histogram_shmem_startup; prev_ExecutorStart = ExecutorStart_hook; ExecutorStart_hook = explain_ExecutorStart; prev_ExecutorRun = ExecutorRun_hook; ExecutorRun_hook = explain_ExecutorRun; #if (PG_VERSION_NUM >= 90100) prev_ExecutorFinish = ExecutorFinish_hook; ExecutorFinish_hook = explain_ExecutorFinish; #endif prev_ExecutorEnd = ExecutorEnd_hook; ExecutorEnd_hook = explain_ExecutorEnd; prev_ProcessUtility = ProcessUtility_hook; ProcessUtility_hook = queryhist_ProcessUtility; }
static void pgstrom_init_misc_guc(void) { /* GUC variables according to the device information */ DefineCustomBoolVariable("pg_strom.enabled", "Enables the planner's use of PG-Strom", NULL, &pgstrom_enabled, true, PGC_USERSET, GUC_NOT_IN_SAMPLE, NULL, NULL, NULL); DefineCustomBoolVariable("pg_strom.perfmon", "Enables the performance monitor of PG-Strom", NULL, &pgstrom_perfmon_enabled, false, PGC_USERSET, GUC_NOT_IN_SAMPLE, NULL, NULL, NULL); DefineCustomBoolVariable("pg_strom.show_device_kernel", "Enables to show device kernel on EXPLAIN", NULL, &pgstrom_show_device_kernel, false, PGC_USERSET, GUC_NOT_IN_SAMPLE, NULL, NULL, NULL); DefineCustomIntVariable("pg_strom.chunk_size", "default size of pgstrom_data_store in MB", NULL, &pgstrom_chunk_size, 15, 4, 128, PGC_USERSET, GUC_NOT_IN_SAMPLE, NULL, NULL, NULL); DefineCustomIntVariable("pg_strom.min_async_chunks", "least number of chunks to be run asynchronously", NULL, &pgstrom_min_async_chunks, 2, 2, INT_MAX, PGC_USERSET, GUC_NOT_IN_SAMPLE, NULL, NULL, NULL); DefineCustomIntVariable("pg_strom.max_async_chunks", "max number of chunk to be run asynchronously", NULL, &pgstrom_max_async_chunks, 3, pgstrom_min_async_chunks + 1, INT_MAX, PGC_USERSET, GUC_NOT_IN_SAMPLE, NULL, NULL, NULL); if (pgstrom_max_async_chunks <= pgstrom_min_async_chunks) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("\"pg_strom.max_async_chunks\" must be larger than \"pg_strom.min_async_chunks\""))); DefineCustomRealVariable("gpu_setup_cost", "Cost to setup GPU device to run", NULL, &pgstrom_gpu_setup_cost, 500 * DEFAULT_SEQ_PAGE_COST, 0, DBL_MAX, PGC_USERSET, GUC_NOT_IN_SAMPLE, NULL, NULL, NULL); DefineCustomRealVariable("gpu_operator_cost", "Cost of processing each operators by GPU", NULL, &pgstrom_gpu_operator_cost, DEFAULT_CPU_OPERATOR_COST / 100.0, 0, DBL_MAX, PGC_USERSET, GUC_NOT_IN_SAMPLE, NULL, NULL, NULL); DefineCustomRealVariable("gpu_tuple_cost", "Cost of processing each tuple for GPU", NULL, &pgstrom_gpu_tuple_cost, DEFAULT_CPU_TUPLE_COST / 32, 0, DBL_MAX, PGC_USERSET, GUC_NOT_IN_SAMPLE, NULL, NULL, NULL); }
/* Register Citus configuration variables. */ static void RegisterCitusConfigVariables(void) { DefineCustomStringVariable( "citus.worker_list_file", gettext_noop("Sets the server's \"worker_list\" configuration file."), NULL, &WorkerListFileName, NULL, PGC_POSTMASTER, GUC_SUPERUSER_ONLY, NULL, NULL, NULL); NormalizeWorkerListPath(); DefineCustomBoolVariable( "citus.binary_master_copy_format", gettext_noop("Use the binary master copy format."), gettext_noop("When enabled, data is copied from workers to the master " "in PostgreSQL's binary serialization format."), &BinaryMasterCopyFormat, false, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.binary_worker_copy_format", gettext_noop("Use the binary worker copy format."), gettext_noop("When enabled, data is copied from workers to workers " "in PostgreSQL's binary serialization format when " "joining large tables."), &BinaryWorkerCopyFormat, false, PGC_SIGHUP, 0, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.expire_cached_shards", gettext_noop("Enables shard cache expiration if a shard's size on disk has " "changed."), gettext_noop("When appending to an existing shard, old data may still be cached " "on other workers. This configuration entry activates automatic " "expiration, but should not be used with manual updates to shards."), &ExpireCachedShards, false, PGC_SIGHUP, 0, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.subquery_pushdown", gettext_noop("Enables supported subquery pushdown to workers."), NULL, &SubqueryPushdown, false, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.log_multi_join_order", gettext_noop("Logs the distributed join order to the server log."), gettext_noop("We use this private configuration entry as a debugging aid. " "If enabled, we print the distributed join order."), &LogMultiJoinOrder, false, PGC_USERSET, GUC_NO_SHOW_ALL, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.explain_multi_logical_plan", gettext_noop("Enables Explain to print out distributed logical plans."), gettext_noop("We use this private configuration entry as a debugging aid. " "If enabled, the Explain command prints out the optimized " "logical plan for distributed queries."), &ExplainMultiLogicalPlan, false, PGC_USERSET, GUC_NO_SHOW_ALL, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.explain_multi_physical_plan", gettext_noop("Enables Explain to print out distributed physical plans."), gettext_noop("We use this private configuration entry as a debugging aid. " "If enabled, the Explain command prints out the physical " "plan for distributed queries."), &ExplainMultiPhysicalPlan, false, PGC_USERSET, GUC_NO_SHOW_ALL, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.explain_distributed_queries", gettext_noop("Enables Explain for distributed queries."), gettext_noop("When enabled, the Explain command shows remote and local " "plans when used with a distributed query. It is enabled " "by default, but can be disabled for regression tests."), &ExplainDistributedQueries, true, PGC_USERSET, GUC_NO_SHOW_ALL, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.explain_all_tasks", gettext_noop("Enables showing output for all tasks in Explain."), gettext_noop("The Explain command for distributed queries shows " "the remote plan for a single task by default. When " "this configuration entry is enabled, the plan for " "all tasks is shown, but the Explain takes longer."), &ExplainAllTasks, false, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.all_modifications_commutative", gettext_noop("Bypasses commutativity checks when enabled"), NULL, &AllModificationsCommutative, false, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.enable_ddl_propagation", gettext_noop("Enables propagating DDL statements to worker shards"), NULL, &EnableDDLPropagation, true, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomIntVariable( "citus.shard_replication_factor", gettext_noop("Sets the replication factor for shards."), gettext_noop("Shards are replicated across nodes according to this " "replication factor. Note that shards read this " "configuration value at sharded table creation time, " "and later reuse the initially read value."), &ShardReplicationFactor, 2, 1, 100, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomIntVariable( "citus.shard_max_size", gettext_noop("Sets the maximum size a shard will grow before it gets split."), gettext_noop("Shards store table and file data. When the source " "file's size for one shard exceeds this configuration " "value, the database ensures that either a new shard " "gets created, or the current one gets split. Note that " "shards read this configuration value at sharded table " "creation time, and later reuse the initially read value."), &ShardMaxSize, 1048576, 256, INT_MAX, /* max allowed size not set to MAX_KILOBYTES on purpose */ PGC_USERSET, GUC_UNIT_KB, NULL, NULL, NULL); DefineCustomIntVariable( "citus.max_worker_nodes_tracked", gettext_noop("Sets the maximum number of worker nodes that are tracked."), gettext_noop("Worker nodes' network locations, their membership and " "health status are tracked in a shared hash table on " "the master node. This configuration value limits the " "size of the hash table, and consequently the maximum " "number of worker nodes that can be tracked."), &MaxWorkerNodesTracked, 2048, 8, INT_MAX, PGC_POSTMASTER, 0, NULL, NULL, NULL); DefineCustomIntVariable( "citus.remote_task_check_interval", gettext_noop("Sets the frequency at which we check job statuses."), gettext_noop("The master node assigns tasks to workers nodes, and " "then regularly checks with them about each task's " "progress. This configuration value sets the time " "interval between two consequent checks."), &RemoteTaskCheckInterval, 10, 1, REMOTE_NODE_CONNECT_TIMEOUT, PGC_USERSET, GUC_UNIT_MS, NULL, NULL, NULL); DefineCustomIntVariable( "citus.task_tracker_delay", gettext_noop("Task tracker sleep time between task management rounds."), gettext_noop("The task tracker process wakes up regularly, walks over " "all tasks assigned to it, and schedules and executes these " "tasks. Then, the task tracker sleeps for a time period " "before walking over these tasks again. This configuration " "value determines the length of that sleeping period."), &TaskTrackerDelay, 200, 10, 100000, PGC_SIGHUP, GUC_UNIT_MS, NULL, NULL, NULL); DefineCustomIntVariable( "citus.max_assign_task_batch_size", gettext_noop("Sets the maximum number of tasks to assign per round."), gettext_noop("The master node synchronously assigns tasks to workers in " "batches. Bigger batches allow for faster task assignment, " "but it may take longer for all workers to get tasks " "if the number of workers is large. This configuration " "value controls the maximum batch size."), &MaxAssignTaskBatchSize, 64, 1, INT_MAX, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomIntVariable( "citus.max_tracked_tasks_per_node", gettext_noop("Sets the maximum number of tracked tasks per node."), gettext_noop("The task tracker processes keeps all assigned tasks in " "a shared hash table, and schedules and executes these " "tasks as appropriate. This configuration value limits " "the size of the hash table, and therefore the maximum " "number of tasks that can be tracked at any given time."), &MaxTrackedTasksPerNode, 1024, 8, INT_MAX, PGC_POSTMASTER, 0, NULL, NULL, NULL); DefineCustomIntVariable( "citus.max_running_tasks_per_node", gettext_noop("Sets the maximum number of tasks to run concurrently per node."), gettext_noop("The task tracker process schedules and executes the tasks " "assigned to it as appropriate. This configuration value " "sets the maximum number of tasks to execute concurrently " "on one node at any given time."), &MaxRunningTasksPerNode, 8, 1, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); DefineCustomIntVariable( "citus.partition_buffer_size", gettext_noop("Sets the buffer size to use for partition operations."), gettext_noop("Worker nodes allow for table data to be repartitioned " "into multiple text files, much like Hadoop's Map " "command. This configuration value sets the buffer size " "to use per partition operation. After the buffer fills " "up, we flush the repartitioned data into text files."), &PartitionBufferSize, 8192, 0, (INT_MAX / 1024), /* result stored in int variable */ PGC_USERSET, GUC_UNIT_KB, NULL, NULL, NULL); DefineCustomIntVariable( "citus.large_table_shard_count", gettext_noop("The shard count threshold over which a table is considered large."), gettext_noop("A distributed table is considered to be large if it has " "more shards than the value specified here. This largeness " "criteria is then used in picking a table join order during " "distributed query planning."), &LargeTableShardCount, 4, 1, 10000, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomIntVariable( "citus.limit_clause_row_fetch_count", gettext_noop("Number of rows to fetch per task for limit clause optimization."), gettext_noop("Select queries get partitioned and executed as smaller " "tasks. In some cases, select queries with limit clauses " "may need to fetch all rows from each task to generate " "results. In those cases, and where an approximation would " "produce meaningful results, this configuration value sets " "the number of rows to fetch from each task."), &LimitClauseRowFetchCount, -1, -1, INT_MAX, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomRealVariable( "citus.count_distinct_error_rate", gettext_noop("Desired error rate when calculating count(distinct) " "approximates using the postgresql-hll extension. " "0.0 disables approximations for count(distinct); 1.0 " "provides no guarantees about the accuracy of results."), NULL, &CountDistinctErrorRate, 0.0, 0.0, 1.0, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomEnumVariable( "citus.multi_shard_commit_protocol", gettext_noop("Sets the commit protocol for commands modifying multiple shards."), gettext_noop("When a failure occurs during commands that modify multiple " "shards (currently, only COPY on distributed tables modifies more " "than one shard), two-phase commit is required to ensure data is " "never lost. Change this setting to '2pc' from its default '1pc' to " "enable 2 PC. You must also set max_prepared_transactions on the " "worker nodes. Recovery from failed 2PCs is currently manual."), &MultiShardCommitProtocol, COMMIT_PROTOCOL_1PC, multi_shard_commit_protocol_options, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomEnumVariable( "citus.task_assignment_policy", gettext_noop("Sets the policy to use when assigning tasks to worker nodes."), gettext_noop("The master node assigns tasks to worker nodes based on shard " "locations. This configuration value specifies the policy to " "use when making these assignments. The greedy policy aims to " "evenly distribute tasks across worker nodes, first-replica just " "assigns tasks in the order shard placements were created, " "and the round-robin policy assigns tasks to worker nodes in " "a round-robin fashion."), &TaskAssignmentPolicy, TASK_ASSIGNMENT_GREEDY, task_assignment_policy_options, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomEnumVariable( "citus.task_executor_type", gettext_noop("Sets the executor type to be used for distributed queries."), gettext_noop("The master node chooses between two different executor types " "when executing a distributed query.The real-time executor is " "optimal for simple key-value lookup queries and queries that " "involve aggregations and/or co-located joins on multiple shards. " "The task-tracker executor is optimal for long-running, complex " "queries that touch thousands of shards and/or that involve table " "repartitioning."), &TaskExecutorType, MULTI_EXECUTOR_REAL_TIME, task_executor_type_options, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomEnumVariable( "citus.shard_placement_policy", gettext_noop("Sets the policy to use when choosing nodes for shard placement."), gettext_noop("The master node chooses which worker nodes to place new shards " "on. This configuration value specifies the policy to use when " "selecting these nodes. The local-node-first policy places the " "first replica on the client node and chooses others randomly. " "The round-robin policy aims to distribute shards evenly across " "the cluster by selecting nodes in a round-robin fashion." "The random policy picks all workers randomly."), &ShardPlacementPolicy, SHARD_PLACEMENT_ROUND_ROBIN, shard_placement_policy_options, PGC_USERSET, 0, NULL, NULL, NULL); /* warn about config items in the citus namespace that are not registered above */ EmitWarningsOnPlaceholders("citus"); }
/* * Entrypoint of this module. * * We register more than one worker process here, to demonstrate how that can * be done. */ void _PG_init(void) { BackgroundWorker worker; unsigned int i; /* Define wich database to attach */ DefineCustomStringVariable("wed_worker.db_name", "WED-flow database to attach", NULL, &wed_worker_db_name, __DB_NAME__, PGC_SIGHUP, 0, NULL, NULL, NULL); /* get the configuration */ DefineCustomIntVariable("wed_worker.naptime", "Duration between each check (in seconds).", NULL, &wed_worker_naptime, 10, 1, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); if (!process_shared_preload_libraries_in_progress) return; DefineCustomIntVariable("wed_worker.total_workers", "Number of workers.", NULL, &wed_worker_total_workers, 1, 1, 100, PGC_POSTMASTER, 0, NULL, NULL, NULL); /* set up common data for all our workers */ worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_start_time = BgWorkerStart_RecoveryFinished; worker.bgw_restart_time = BGW_NEVER_RESTART; worker.bgw_main = wed_worker_main; worker.bgw_notify_pid = 0; /* * Now fill in worker-specific data, and do the actual registrations. */ for (i = 1; i <= wed_worker_total_workers; i++) { snprintf(worker.bgw_name, BGW_MAXLEN, "ww[%s] %d", wed_worker_db_name, i); worker.bgw_main_arg = Int32GetDatum(i); RegisterBackgroundWorker(&worker); } }
/* * Module load callback */ void _PG_init(void) { /* can be preloaded only from postgresql.conf */ if (! process_shared_preload_libraries_in_progress) elog(ERROR, "connection_limits_shared has to be loaded using " "shared_preload_libraries"); DefineCustomIntVariable("connection_limits.per_database", "Default number of connections per database.", "Zero disables this check.", &default_per_database, 0, 0, MaxBackends, PGC_POSTMASTER, 0, #if (PG_VERSION_NUM >= 90100) NULL, #endif NULL, NULL); DefineCustomIntVariable("connection_limits.per_user", "Default number of connections per user.", "Zero disables this check.", &default_per_role, 0, 0, MaxBackends, PGC_POSTMASTER, 0, #if (PG_VERSION_NUM >= 90100) NULL, #endif NULL, NULL); DefineCustomIntVariable("connection_limits.per_ip", "Default number of connections per IP.", "Zero disables this check.", &default_per_ip, 0, 0, MaxBackends, PGC_POSTMASTER, 0, #if (PG_VERSION_NUM >= 90100) NULL, #endif NULL, NULL); EmitWarningsOnPlaceholders("connection_limits"); /* * Request additional shared resources. (These are no-ops if we're not in * the postmaster process.) We'll allocate or attach to the shared * resources in pg_limits_shmem_startup(). */ RequestAddinShmemSpace(SEGMENT_SIZE); RequestAddinLWLocks(1); /* single lock guarding the rules state */ /* Install shared memory startup hook. */ prev_shmem_startup_hook = shmem_startup_hook; shmem_startup_hook = pg_limits_shmem_startup; /* Install client authentication hook. */ prev_client_auth_hook = ClientAuthentication_hook; ClientAuthentication_hook = check_rules; }
/* * Module load callback */ void _PG_init(void) { /* * In order to create our shared memory area, we have to be loaded via * shared_preload_libraries. If not, fall out without hooking into any of * the main system. (We don't throw error here because it seems useful to * allow the query_histogram functions to be created even when the * module isn't active. The functions must protect themselves against * being called then, however.) */ if (!process_shared_preload_libraries_in_progress) return; /* Define custom GUC variables. */ DefineCustomBoolVariable("query_histogram.dynamic", "Dynamic histogram may be modified on the fly.", NULL, &default_histogram_dynamic, false, PGC_BACKEND, 0, NULL, NULL, NULL); /* Define custom GUC variables. */ DefineCustomBoolVariable("query_histogram.track_utility", "Selects whether utility commands are tracked.", NULL, &default_histogram_utility, true, PGC_SUSET, 0, NULL, &set_histogram_track_utility, &show_histogram_track_utility); DefineCustomIntVariable("query_histogram.bin_count", "Sets the number of bins of the histogram.", "Zero disables collecting the histogram.", &default_histogram_bins, 100, 0, 1000, PGC_SUSET, 0, NULL, &set_histogram_bins_count_hook, &show_histogram_bins_count_hook); DefineCustomIntVariable("query_histogram.bin_width", "Sets the width of the histogram bin.", NULL, &default_histogram_step, 100, 1, 1000, PGC_SUSET, GUC_UNIT_MS, NULL, &set_histogram_bins_width_hook, &show_histogram_bins_width_hook); DefineCustomIntVariable("query_histogram.sample_pct", "What portion of the queries should be sampled (in percent).", NULL, &default_histogram_sample_pct, 5, 1, 100, PGC_SUSET, 0, NULL, &set_histogram_sample_hook, &show_histogram_sample_hook); DefineCustomEnumVariable("query_histogram.histogram_type", "Type of the histogram (how the bin width is computed).", NULL, &default_histogram_type, HISTOGRAM_LINEAR, histogram_type_options, PGC_SUSET, 0, NULL, &set_histogram_type_hook, &show_histogram_type_hook); EmitWarningsOnPlaceholders("query_histogram"); /* * Request additional shared resources. (These are no-ops if we're not in * the postmaster process.) We'll allocate or attach to the shared * resources in histogram_shmem_startup(). */ RequestAddinShmemSpace(get_histogram_size()); RequestNamedLWLockTranche("query_histogram", 1); /* Install hooks. */ prev_shmem_startup_hook = shmem_startup_hook; shmem_startup_hook = histogram_shmem_startup; prev_ExecutorStart = ExecutorStart_hook; ExecutorStart_hook = histogram_ExecutorStart; prev_ExecutorRun = ExecutorRun_hook; ExecutorRun_hook = histogram_ExecutorRun; prev_ExecutorFinish = ExecutorFinish_hook; ExecutorFinish_hook = histogram_ExecutorFinish; prev_ExecutorEnd = ExecutorEnd_hook; ExecutorEnd_hook = histogram_ExecutorEnd; prev_ProcessUtility = ProcessUtility_hook; ProcessUtility_hook = queryhist_ProcessUtility; }