/* * Signal handler for SIGHUP * * Used only in BufferSaver. Set a flag to notify the main loop of the signal * received, and set our latch to wake it up. */ static void sighupHandler(SIGNAL_ARGS) { got_sighup = true; if (MyProc) SetLatch(&MyProc->procLatch); }
/* * Wake up (using latch) the specified logical replication worker. * * Caller must hold lock, else worker->proc could change under us. */ void logicalrep_worker_wakeup_ptr(LogicalRepWorker *worker) { Assert(LWLockHeldByMe(LogicalRepWorkerLock)); SetLatch(&worker->proc->procLatch); }
/* * ProcSendSignal - send a signal to a backend identified by PID */ void ProcSendSignal(int pid) { PGPROC *proc = NULL; if (RecoveryInProgress()) { /* use volatile pointer to prevent code rearrangement */ volatile PROC_HDR *procglobal = ProcGlobal; SpinLockAcquire(ProcStructLock); /* * Check to see whether it is the Startup process we wish to signal. * This call is made by the buffer manager when it wishes to wake up a * process that has been waiting for a pin in so it can obtain a * cleanup lock using LockBufferForCleanup(). Startup is not a normal * backend, so BackendPidGetProc() will not return any pid at all. So * we remember the information for this special case. */ if (pid == procglobal->startupProcPid) proc = procglobal->startupProc; SpinLockRelease(ProcStructLock); } if (proc == NULL) proc = BackendPidGetProc(pid); if (proc != NULL) { SetLatch(&proc->procLatch); } }
/* * ProcWakeup -- wake up a process by releasing its private___ semaphore. * * Also remove the process from the wait queue and set its links invalid. * RETURN: the next process in the wait queue. * * The appropriate lock partition lock must be held by caller. * * XXX: presently, this code is only used for the "success" case, and only * works correctly for that case. To clean up in failure case, would need * to twiddle the lock's request counts too --- see RemoveFromWaitQueue. * Hence, in practice the waitStatus parameter must be STATUS_OK. */ PGPROC * ProcWakeup(PGPROC *proc, int waitStatus) { PGPROC *retProc; /* Proc should be sleeping ... */ if (proc->links.prev == NULL || proc->links.next == NULL) return NULL; Assert(proc->waitStatus == STATUS_WAITING); /* Save next process before we zap the list link */ retProc = (PGPROC *) proc->links.next; /* Remove process from wait queue */ SHMQueueDelete(&(proc->links)); (proc->waitLock->waitProcs.size)--; /* Clean up process' state and pass it the ok/fail signal */ proc->waitLock = NULL; proc->waitProcLock = NULL; proc->waitStatus = waitStatus; /* And awaken it */ SetLatch(&proc->procLatch); return retProc; }
/* SIGUSR2: set flag to do a last cycle and shut down afterwards */ static void WalSndLastCycleHandler(SIGNAL_ARGS) { ready_to_stop = true; if (MyWalSnd) SetLatch(&MyWalSnd->latch); }
static void worker_spi_sighup(SIGNAL_ARGS) { elog(LOG, "got sighup"); if (MyProc) SetLatch(&MyProc->procLatch); }
/* SIGHUP: set flag to re-read config file at next convenient time */ static void WalSndSigHupHandler(SIGNAL_ARGS) { got_SIGHUP = true; if (MyWalSnd) SetLatch(&MyWalSnd->latch); }
/* SIGTERM: set flag to shut down */ static void WalSndShutdownHandler(SIGNAL_ARGS) { shutdown_requested = true; if (MyWalSnd) SetLatch(&MyWalSnd->latch); }
/* * Signal handler for SIGHUP * Set a flag to let the main loop to reread the config file, and set * our latch to wake it up. */ static void pg_keeper_sighup(SIGNAL_ARGS) { got_sighup = true; if (MyProc) SetLatch(&MyProc->procLatch); }
static void IdleInTransactionSessionTimeoutHandler(void) { IdleInTransactionSessionTimeoutPending = true; InterruptPending = true; SetLatch(MyLatch); }
static void terminate_group(ContQueryProcGroup *grp) { bool found; int i; grp->active = true; for (i = 0; i < TOTAL_SLOTS; i++) { ContQueryProc *proc = &grp->procs[i]; /* Wake up processes, so they can see the terminate flag. */ SetLatch(proc->latch); /* Let workers crash now as well in case we force terminate them. */ ChangeBackgroundWorkerRestartState(&proc->handle, true, 0); } /* Wait for a bit and then force terminate any processes that are still alive. */ pg_usleep(Max(ContQuerySchedulerShmem->params.max_wait, MIN_WAIT_TERMINATE_MS) * 1000); for (i = 0; i < TOTAL_SLOTS; i++) { ContQueryProc *proc = &grp->procs[i]; TerminateBackgroundWorker(&proc->handle); } hash_search(ContQuerySchedulerShmem->proc_table, &grp->db_oid, HASH_REMOVE, &found); Assert(found); TupleBufferDrain(WorkerTupleBuffer, grp->db_oid); TupleBufferDrain(CombinerTupleBuffer, grp->db_oid); }
/* SIGHUP signal handler for archiver process */ static void ArchSigHupHandler(SIGNAL_ARGS) { /* set flag to re-read config file at next convenient time */ got_SIGHUP = true; /* let the waiting loop iterate */ SetLatch(&mainloop_latch); }
static void hello_sighup(SIGNAL_ARGS) { int save_errno = errno; got_sighup = true; SetLatch(MyLatch); errno = save_errno; }
/* SIGUSR1 signal handler for archiver process */ static void pgarch_waken(SIGNAL_ARGS) { /* set flag that there is work to be done */ wakened = true; /* let the waiting loop iterate */ SetLatch(&mainloop_latch); }
/* Wake up all walsenders */ void WalSndWakeup(void) { int i; for (i = 0; i < max_wal_senders; i++) SetLatch(&WalSndCtl->walsnds[i].latch); }
/* SIGUSR2 signal handler for archiver process */ static void pgarch_waken_stop(SIGNAL_ARGS) { /* set flag to do a final cycle and shut down afterwards */ ready_to_stop = true; /* let the waiting loop iterate */ SetLatch(&mainloop_latch); }
/* * hello_sigterm * * SIGTERM handler. */ static void hello_sigterm(SIGNAL_ARGS) { int save_errno = errno; got_sigterm = true; if (MyProc) SetLatch(&MyProc->procLatch); errno = save_errno; }
static void kill_idle_sighup(SIGNAL_ARGS) { int save_errno = errno; got_sighup = true; if (MyProc) SetLatch(&MyProc->procLatch); errno = save_errno; }
/* * Signal handler for SIGHUP * Set a flag to tell the main loop to reread the config file, and set * our latch to wake it up. */ static void pg_octopus_sighup(SIGNAL_ARGS) { int save_errno = errno; got_sighup = true; SetLatch(MyLatch); errno = save_errno; }
/* * Signal handler for SIGHUP * Set a flag to tell the main loop to reread the config file, and set * our latch to wake it up. */ static void worker_spi_sighup(SIGNAL_ARGS) { int save_errno = errno; got_sighup = true; SetLatch(MyLatch); errno = save_errno; }
/* SIGUSR2: set flag to run a shutdown checkpoint and exit */ static void ReqShutdownHandler(SIGNAL_ARGS) { int save_errno = errno; shutdown_requested = true; SetLatch(MyLatch); errno = save_errno; }
/* SIGINT: set flag to run a normal checkpoint right away */ static void ReqCheckpointHandler(SIGNAL_ARGS) { int save_errno = errno; checkpoint_requested = true; SetLatch(MyLatch); errno = save_errno; }
/* SIGHUP: set flag to re-read config file at next convenient time */ static void ChkptSigHupHandler(SIGNAL_ARGS) { int save_errno = errno; got_SIGHUP = true; SetLatch(MyLatch); errno = save_errno; }
/* * ForwardFsyncRequest * Forward a file-fsync request from a backend to the checkpointer * * Whenever a backend is compelled to write directly to a relation * (which should be seldom, if the background writer is getting its job done), * the backend calls this routine to pass over knowledge that the relation * is dirty and must be fsync'd before next checkpoint. We also use this * opportunity to count such writes for statistical purposes. * * This functionality is only supported for regular (not backend-local) * relations, so the rnode argument is intentionally RelFileNode not * RelFileNodeBackend. * * segno specifies which segment (not block!) of the relation needs to be * fsync'd. (Since the valid range is much less than BlockNumber, we can * use high values for special flags; that's all internal to md.c, which * see for details.) * * To avoid holding the lock for longer than necessary, we normally write * to the requests[] queue without checking for duplicates. The checkpointer * will have to eliminate dups internally anyway. However, if we discover * that the queue is full, we make a pass over the entire queue to compact * it. This is somewhat expensive, but the alternative is for the backend * to perform its own fsync, which is far more expensive in practice. It * is theoretically possible a backend fsync might still be necessary, if * the queue is full and contains no duplicate entries. In that case, we * let the backend know by returning false. */ bool ForwardFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno) { CheckpointerRequest *request; bool too_full; if (!IsUnderPostmaster) return false; /* probably shouldn't even get here */ if (AmCheckpointerProcess()) elog(ERROR, "ForwardFsyncRequest must not be called in checkpointer"); LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE); /* Count all backend writes regardless of if they fit in the queue */ if (!AmBackgroundWriterProcess()) CheckpointerShmem->num_backend_writes++; /* * If the checkpointer isn't running or the request queue is full, the * backend will have to perform its own fsync request. But before forcing * that to happen, we can try to compact the request queue. */ if (CheckpointerShmem->checkpointer_pid == 0 || (CheckpointerShmem->num_requests >= CheckpointerShmem->max_requests && !CompactCheckpointerRequestQueue())) { /* * Count the subset of writes where backends have to do their own * fsync */ if (!AmBackgroundWriterProcess()) CheckpointerShmem->num_backend_fsync++; LWLockRelease(CheckpointerCommLock); return false; } /* OK, insert request */ request = &CheckpointerShmem->requests[CheckpointerShmem->num_requests++]; request->rnode = rnode; request->forknum = forknum; request->segno = segno; /* If queue is more than half full, nudge the checkpointer to empty it */ too_full = (CheckpointerShmem->num_requests >= CheckpointerShmem->max_requests / 2); LWLockRelease(CheckpointerCommLock); /* ... but not till after we release the lock */ if (too_full && ProcGlobal->checkpointerLatch) SetLatch(ProcGlobal->checkpointerLatch); return true; }
/* SIGUSR2: set flag to do a last cycle and shut down afterwards */ static void WalSndLastCycleHandler(SIGNAL_ARGS) { int save_errno = errno; walsender_ready_to_stop = true; if (MyWalSnd) SetLatch(&MyWalSnd->latch); errno = save_errno; }
/* SIGTERM: set flag to shut down */ static void WalSndShutdownHandler(SIGNAL_ARGS) { int save_errno = errno; walsender_shutdown_requested = true; if (MyWalSnd) SetLatch(&MyWalSnd->latch); errno = save_errno; }
/* SIGHUP: set flag to re-read config file at next convenient time */ static void WalSndSigHupHandler(SIGNAL_ARGS) { int save_errno = errno; got_SIGHUP = true; if (MyWalSnd) SetLatch(&MyWalSnd->latch); errno = save_errno; }
/* * Signal handler for SIGHUP * Set a flag to tell the main loop to reread the config file, and set * our latch to wake it up. */ static void worker_spi_sighup(SIGNAL_ARGS) { int save_errno = errno; got_sighup = true; if (MyProc) SetLatch(&MyProc->procLatch); errno = save_errno; }
/* * Signal handler for SIGTERM * Set a flag to let the main loop to terminate, and set our latch to wake * it up. */ static void kafka_consume_main_sigterm(SIGNAL_ARGS) { int save_errno = errno; got_sigterm = true; if (MyProc) SetLatch(&MyProc->procLatch); errno = save_errno; }
/* SIGUSR2: set flag to run a shutdown checkpoint and exit */ static void ReqShutdownHandler(SIGNAL_ARGS) { int save_errno = errno; shutdown_requested = true; if (MyProc) SetLatch(&MyProc->procLatch); errno = save_errno; }