/* * ProcSendSignal - send a signal to a backend identified by PID */ void ProcSendSignal(int pid) { PGPROC *proc = NULL; if (RecoveryInProgress()) { /* use volatile pointer to prevent code rearrangement */ volatile PROC_HDR *procglobal = ProcGlobal; SpinLockAcquire(ProcStructLock); /* * Check to see whether it is the Startup process we wish to signal. * This call is made by the buffer manager when it wishes to wake up a * process that has been waiting for a pin in so it can obtain a * cleanup lock using LockBufferForCleanup(). Startup is not a normal * backend, so BackendPidGetProc() will not return any pid at all. So * we remember the information for this special case. */ if (pid == procglobal->startupProcPid) proc = procglobal->startupProc; SpinLockRelease(ProcStructLock); } if (proc == NULL) proc = BackendPidGetProc(pid); if (proc != NULL) { SetLatch(&proc->procLatch); } }
/* * ProcSendSignal - send a signal to a backend identified by PID */ void ProcSendSignal(int pid) { PGPROC *proc = BackendPidGetProc(pid); if (proc != NULL) PGSemaphoreUnlock(&proc->sem); }
static int pg_signal_backend(int pid, int sig) { PGPROC *proc; if (!superuser()) { /* * Since the user is not superuser, check for matching roles. Trust * that BackendPidGetProc will return NULL if the pid isn't valid, * even though the check for whether it's a backend process is below. * The IsBackendPid check can't be relied on as definitive even if it * was first. The process might end between successive checks * regardless of their order. There's no way to acquire a lock on an * arbitrary process to prevent that. But since so far all the callers * of this mechanism involve some request for ending the process * anyway, that it might end on its own first is not a problem. */ proc = BackendPidGetProc(pid); if (proc == NULL || proc->roleId != GetUserId()) return SIGNAL_BACKEND_NOPERMISSION; } if (!IsBackendPid(pid)) { /* * This is just a warning so a loop-through-resultset will not abort * if one backend terminated on it's own during the run */ ereport(WARNING, (errmsg("PID %d is not a PostgreSQL server process", pid))); return SIGNAL_BACKEND_ERROR; } /* * Can the process we just validated above end, followed by the pid being * recycled for a new process, before reaching here? Then we'd be trying * to kill the wrong thing. Seems near impossible when sequential pid * assignment and wraparound is used. Perhaps it could happen on a system * where pid re-use is randomized. That race condition possibility seems * too unlikely to worry about. */ /* If we have setsid(), signal the backend's whole process group */ #ifdef HAVE_SETSID if (kill(-pid, sig)) #else if (kill(pid, sig)) #endif { /* Again, just a warning to allow loops */ ereport(WARNING, (errmsg("could not send signal to process %d: %m", pid))); return SIGNAL_BACKEND_ERROR; } return SIGNAL_BACKEND_SUCCESS; }
static int pg_signal_backend(int pid, int sig) { PGPROC *proc = BackendPidGetProc(pid); /* * BackendPidGetProc returns NULL if the pid isn't valid; but by the time * we reach kill(), a process for which we get a valid proc here might * have terminated on its own. There's no way to acquire a lock on an * arbitrary process to prevent that. But since so far all the callers of * this mechanism involve some request for ending the process anyway, that * it might end on its own first is not a problem. */ if (proc == NULL) { /* * This is just a warning so a loop-through-resultset will not abort * if one backend terminated on its own during the run. */ ereport(WARNING, (errmsg("PID %d is not a PostgreSQL server process", pid))); return SIGNAL_BACKEND_ERROR; } /* Only allow superusers to signal superuser-owned backends. */ if (superuser_arg(proc->roleId) && !superuser()) return SIGNAL_BACKEND_NOSUPERUSER; /* Users can signal backends they have role membership in. */ if (!has_privs_of_role(GetUserId(), proc->roleId) && !has_privs_of_role(GetUserId(), DEFAULT_ROLE_SIGNAL_BACKENDID)) return SIGNAL_BACKEND_NOPERMISSION; /* * Can the process we just validated above end, followed by the pid being * recycled for a new process, before reaching here? Then we'd be trying * to kill the wrong thing. Seems near impossible when sequential pid * assignment and wraparound is used. Perhaps it could happen on a system * where pid re-use is randomized. That race condition possibility seems * too unlikely to worry about. */ /* If we have setsid(), signal the backend's whole process group */ #ifdef HAVE_SETSID if (kill(-pid, sig)) #else if (kill(pid, sig)) #endif { /* Again, just a warning to allow loops */ ereport(WARNING, (errmsg("could not send signal to process %d: %m", pid))); return SIGNAL_BACKEND_ERROR; } return SIGNAL_BACKEND_SUCCESS; }
Datum pg_stat_get_backend_wait_event_type(PG_FUNCTION_ARGS) { int32 beid = PG_GETARG_INT32(0); PgBackendStatus *beentry; PGPROC *proc; const char *wait_event_type; if ((beentry = pgstat_fetch_stat_beentry(beid)) == NULL) wait_event_type = "<backend information not available>"; else if (!has_privs_of_role(GetUserId(), beentry->st_userid)) wait_event_type = "<insufficient privilege>"; else { proc = BackendPidGetProc(beentry->st_procpid); wait_event_type = pgstat_get_wait_event_type(proc->wait_event_info); } if (!wait_event_type) PG_RETURN_NULL(); PG_RETURN_TEXT_P(cstring_to_text(wait_event_type)); }
/* * Background worker entrypoint. * * This is intended to demonstrate how a background worker can be used to * facilitate a parallel computation. Most of the logic here is fairly * boilerplate stuff, designed to attach to the shared memory segment, * notify the user backend that we're alive, and so on. The * application-specific bits of logic that you'd replace for your own worker * are attach_to_queues() and copy_messages(). */ void test_shm_mq_main(Datum main_arg) { dsm_segment *seg; shm_toc *toc; shm_mq_handle *inqh; shm_mq_handle *outqh; volatile test_shm_mq_header *hdr; int myworkernumber; PGPROC *registrant; /* * Establish signal handlers. * * We want CHECK_FOR_INTERRUPTS() to kill off this worker process just as * it would a normal user backend. To make that happen, we establish a * signal handler that is a stripped-down version of die(). We don't have * any equivalent of the backend's command-read loop, where interrupts can * be processed immediately, so make sure ImmediateInterruptOK is turned * off. */ pqsignal(SIGTERM, handle_sigterm); ImmediateInterruptOK = false; BackgroundWorkerUnblockSignals(); /* * Connect to the dynamic shared memory segment. * * The backend that registered this worker passed us the ID of a shared * memory segment to which we must attach for further instructions. In * order to attach to dynamic shared memory, we need a resource owner. * Once we've mapped the segment in our address space, attach to the table * of contents so we can locate the various data structures we'll need to * find within the segment. */ CurrentResourceOwner = ResourceOwnerCreate(NULL, "test_shm_mq worker"); seg = dsm_attach(DatumGetInt32(main_arg)); if (seg == NULL) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("unable to map dynamic shared memory segment"))); toc = shm_toc_attach(PG_TEST_SHM_MQ_MAGIC, dsm_segment_address(seg)); if (toc == NULL) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("bad magic number in dynamic shared memory segment"))); /* * Acquire a worker number. * * By convention, the process registering this background worker should * have stored the control structure at key 0. We look up that key to * find it. Our worker number gives our identity: there may be just one * worker involved in this parallel operation, or there may be many. */ hdr = shm_toc_lookup(toc, 0); SpinLockAcquire(&hdr->mutex); myworkernumber = ++hdr->workers_attached; SpinLockRelease(&hdr->mutex); if (myworkernumber > hdr->workers_total) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("too many message queue testing workers already"))); /* * Attach to the appropriate message queues. */ attach_to_queues(seg, toc, myworkernumber, &inqh, &outqh); /* * Indicate that we're fully initialized and ready to begin the main part * of the parallel operation. * * Once we signal that we're ready, the user backend is entitled to assume * that our on_dsm_detach callbacks will fire before we disconnect from * the shared memory segment and exit. Generally, that means we must have * attached to all relevant dynamic shared memory data structures by now. */ SpinLockAcquire(&hdr->mutex); ++hdr->workers_ready; SpinLockRelease(&hdr->mutex); registrant = BackendPidGetProc(MyBgworkerEntry->bgw_notify_pid); if (registrant == NULL) { elog(DEBUG1, "registrant backend has exited prematurely"); proc_exit(1); } SetLatch(®istrant->procLatch); /* Do the work. */ copy_messages(inqh, outqh); /* * We're done. Explicitly detach the shared memory segment so that we * don't get a resource leak warning at commit time. This will fire any * on_dsm_detach callbacks we've registered, as well. Once that's done, * we can go ahead and exit. */ dsm_detach(seg); proc_exit(1); }
static int pg_signal_backend(int pid, int sig, char *msg) { PGPROC *proc = BackendPidGetProc(pid); /* * BackendPidGetProc returns NULL if the pid isn't valid; but by the time * we reach kill(), a process for which we get a valid proc here might * have terminated on its own. There's no way to acquire a lock on an * arbitrary process to prevent that. But since so far all the callers of * this mechanism involve some request for ending the process anyway, that * it might end on its own first is not a problem. */ if (proc == NULL) { /* * This is just a warning so a loop-through-resultset will not abort * if one backend terminated on its own during the run. */ ereport(WARNING, (errmsg("PID %d is not a PostgreSQL server process", pid))); return SIGNAL_BACKEND_ERROR; } if (!(superuser() || proc->roleId == GetUserId())) return SIGNAL_BACKEND_NOPERMISSION; /* If the user supplied a message to the signalled backend */ if (msg != NULL) { int r; r = SetBackendCancelMessage(pid, msg); if (r != -1 && r != strlen(msg)) ereport(NOTICE, (errmsg("message is too long and has been truncated"))); } /* * Can the process we just validated above end, followed by the pid being * recycled for a new process, before reaching here? Then we'd be trying * to kill the wrong thing. Seems near impossible when sequential pid * assignment and wraparound is used. Perhaps it could happen on a system * where pid re-use is randomized. That race condition possibility seems * too unlikely to worry about. */ /* If we have setsid(), signal the backend's whole process group */ #ifdef HAVE_SETSID if (kill(-pid, sig)) #else if (kill(pid, sig)) #endif { /* Again, just a warning to allow loops */ ereport(WARNING, (errmsg("could not send signal to process %d: %m", pid))); return SIGNAL_BACKEND_ERROR; } return SIGNAL_BACKEND_SUCCESS; }
void worker_test_main(Datum main_arg) { dsm_segment *seg; volatile test_shm_mq_header *hdr; PGPROC *registrant; pqsignal(SIGHUP, handle_sighup); pqsignal(SIGTERM, handle_sigterm); BackgroundWorkerUnblockSignals(); printf("worker_test_main: %d\n", DatumGetInt32(main_arg)); CurrentResourceOwner = ResourceOwnerCreate(NULL, "worker test"); seg = dsm_attach(DatumGetInt32(main_arg)); if (seg == NULL) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("unable to map dynamic shared memory segment"))); hdr = dsm_segment_address(seg); /* 開始 */ SpinLockAcquire(&hdr->mutex); hdr->workers_ready++; hdr->workers_attached++; SpinLockRelease(&hdr->mutex); registrant = BackendPidGetProc(MyBgworkerEntry->bgw_notify_pid); if (registrant == NULL) { elog(DEBUG1, "registrant backend has exited prematurely"); proc_exit(1); } SetLatch(®istrant->procLatch); /* Do the work */ BackgroundWorkerInitializeConnection(hdr->dbname, NULL); printf("DSM: %p\n", dsm_segment_address); #if 0 SetCurrentStatementStartTimestamp(); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); pgstat_report_activity(STATE_RUNNING, "initializing spi_worker schema"); SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); pgstat_report_activity(STATE_IDLE, NULL); #endif dsm_detach(seg); proc_exit(0); }
/* * Returns activity of PG backends. */ Datum pg_stat_get_activity(PG_FUNCTION_ARGS) { #define PG_STAT_GET_ACTIVITY_COLS 23 int num_backends = pgstat_fetch_stat_numbackends(); int curr_backend; int pid = PG_ARGISNULL(0) ? -1 : PG_GETARG_INT32(0); ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; TupleDesc tupdesc; Tuplestorestate *tupstore; MemoryContext per_query_ctx; MemoryContext oldcontext; /* check to see if caller supports us returning a tuplestore */ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"))); if (!(rsinfo->allowedModes & SFRM_Materialize)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not " \ "allowed in this context"))); /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "return type must be a row type"); per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; oldcontext = MemoryContextSwitchTo(per_query_ctx); tupstore = tuplestore_begin_heap(true, false, work_mem); rsinfo->returnMode = SFRM_Materialize; rsinfo->setResult = tupstore; rsinfo->setDesc = tupdesc; MemoryContextSwitchTo(oldcontext); /* 1-based index */ for (curr_backend = 1; curr_backend <= num_backends; curr_backend++) { /* for each row */ Datum values[PG_STAT_GET_ACTIVITY_COLS]; bool nulls[PG_STAT_GET_ACTIVITY_COLS]; LocalPgBackendStatus *local_beentry; PgBackendStatus *beentry; PGPROC *proc; const char *wait_event_type; const char *wait_event; MemSet(values, 0, sizeof(values)); MemSet(nulls, 0, sizeof(nulls)); if (pid != -1) { /* Skip any which are not the one we're looking for. */ PgBackendStatus *be = pgstat_fetch_stat_beentry(curr_backend); if (!be || be->st_procpid != pid) continue; } /* Get the next one in the list */ local_beentry = pgstat_fetch_stat_local_beentry(curr_backend); if (!local_beentry) continue; beentry = &local_beentry->backendStatus; if (!beentry) { int i; for (i = 0; i < sizeof(nulls) / sizeof(nulls[0]); i++) nulls[i] = true; nulls[5] = false; values[5] = CStringGetTextDatum("<backend information not available>"); tuplestore_putvalues(tupstore, tupdesc, values, nulls); continue; } /* Values available to all callers */ values[0] = ObjectIdGetDatum(beentry->st_databaseid); values[1] = Int32GetDatum(beentry->st_procpid); values[2] = ObjectIdGetDatum(beentry->st_userid); if (beentry->st_appname) values[3] = CStringGetTextDatum(beentry->st_appname); else nulls[3] = true; if (TransactionIdIsValid(local_beentry->backend_xid)) values[15] = TransactionIdGetDatum(local_beentry->backend_xid); else nulls[15] = true; if (TransactionIdIsValid(local_beentry->backend_xmin)) values[16] = TransactionIdGetDatum(local_beentry->backend_xmin); else nulls[16] = true; if (beentry->st_ssl) { values[17] = BoolGetDatum(true); /* ssl */ values[18] = CStringGetTextDatum(beentry->st_sslstatus->ssl_version); values[19] = CStringGetTextDatum(beentry->st_sslstatus->ssl_cipher); values[20] = Int32GetDatum(beentry->st_sslstatus->ssl_bits); values[21] = BoolGetDatum(beentry->st_sslstatus->ssl_compression); values[22] = CStringGetTextDatum(beentry->st_sslstatus->ssl_clientdn); } else { values[17] = BoolGetDatum(false); /* ssl */ nulls[18] = nulls[19] = nulls[20] = nulls[21] = nulls[22] = true; } /* Values only available to role member */ if (has_privs_of_role(GetUserId(), beentry->st_userid)) { SockAddr zero_clientaddr; switch (beentry->st_state) { case STATE_IDLE: values[4] = CStringGetTextDatum("idle"); break; case STATE_RUNNING: values[4] = CStringGetTextDatum("active"); break; case STATE_IDLEINTRANSACTION: values[4] = CStringGetTextDatum("idle in transaction"); break; case STATE_FASTPATH: values[4] = CStringGetTextDatum("fastpath function call"); break; case STATE_IDLEINTRANSACTION_ABORTED: values[4] = CStringGetTextDatum("idle in transaction (aborted)"); break; case STATE_DISABLED: values[4] = CStringGetTextDatum("disabled"); break; case STATE_UNDEFINED: nulls[4] = true; break; } values[5] = CStringGetTextDatum(beentry->st_activity); proc = BackendPidGetProc(beentry->st_procpid); wait_event_type = pgstat_get_wait_event_type(proc->wait_event_info); if (wait_event_type) values[6] = CStringGetTextDatum(wait_event_type); else nulls[6] = true; wait_event = pgstat_get_wait_event(proc->wait_event_info); if (wait_event) values[7] = CStringGetTextDatum(wait_event); else nulls[7] = true; if (beentry->st_xact_start_timestamp != 0) values[8] = TimestampTzGetDatum(beentry->st_xact_start_timestamp); else nulls[8] = true; if (beentry->st_activity_start_timestamp != 0) values[9] = TimestampTzGetDatum(beentry->st_activity_start_timestamp); else nulls[9] = true; if (beentry->st_proc_start_timestamp != 0) values[10] = TimestampTzGetDatum(beentry->st_proc_start_timestamp); else nulls[10] = true; if (beentry->st_state_start_timestamp != 0) values[11] = TimestampTzGetDatum(beentry->st_state_start_timestamp); else nulls[11] = true; /* A zeroed client addr means we don't know */ memset(&zero_clientaddr, 0, sizeof(zero_clientaddr)); if (memcmp(&(beentry->st_clientaddr), &zero_clientaddr, sizeof(zero_clientaddr)) == 0) { nulls[12] = true; nulls[13] = true; nulls[14] = true; } else { if (beentry->st_clientaddr.addr.ss_family == AF_INET #ifdef HAVE_IPV6 || beentry->st_clientaddr.addr.ss_family == AF_INET6 #endif ) { char remote_host[NI_MAXHOST]; char remote_port[NI_MAXSERV]; int ret; remote_host[0] = '\0'; remote_port[0] = '\0'; ret = pg_getnameinfo_all(&beentry->st_clientaddr.addr, beentry->st_clientaddr.salen, remote_host, sizeof(remote_host), remote_port, sizeof(remote_port), NI_NUMERICHOST | NI_NUMERICSERV); if (ret == 0) { clean_ipv6_addr(beentry->st_clientaddr.addr.ss_family, remote_host); values[12] = DirectFunctionCall1(inet_in, CStringGetDatum(remote_host)); if (beentry->st_clienthostname && beentry->st_clienthostname[0]) values[13] = CStringGetTextDatum(beentry->st_clienthostname); else nulls[13] = true; values[14] = Int32GetDatum(atoi(remote_port)); } else { nulls[12] = true; nulls[13] = true; nulls[14] = true; } } else if (beentry->st_clientaddr.addr.ss_family == AF_UNIX) { /* * Unix sockets always reports NULL for host and -1 for * port, so it's possible to tell the difference to * connections we have no permissions to view, or with * errors. */ nulls[12] = true; nulls[13] = true; values[14] = DatumGetInt32(-1); } else { /* Unknown address type, should never happen */ nulls[12] = true; nulls[13] = true; nulls[14] = true; } } } else { /* No permissions to view data about this session */ values[5] = CStringGetTextDatum("<insufficient privilege>"); nulls[4] = true; nulls[6] = true; nulls[7] = true; nulls[8] = true; nulls[9] = true; nulls[10] = true; nulls[11] = true; nulls[12] = true; nulls[13] = true; nulls[14] = true; } tuplestore_putvalues(tupstore, tupdesc, values, nulls); /* If only a single backend was requested, and we found it, break. */ if (pid != -1) break; } /* clean up and return the tuplestore */ tuplestore_donestoring(tupstore); return (Datum) 0; }