static void DtmSerializeLock(PROCLOCK* proclock, void* arg) { ByteBuffer* buf = (ByteBuffer*)arg; LOCK* lock = proclock->tag.myLock; PGPROC* proc = proclock->tag.myProc; if (lock != NULL) { PGXACT* srcPgXact = &ProcGlobal->allPgXact[proc->pgprocno]; if (TransactionIdIsValid(srcPgXact->xid) && proc->waitLock == lock) { LockMethod lockMethodTable = GetLocksMethodTable(lock); int numLockModes = lockMethodTable->numLockModes; int conflictMask = lockMethodTable->conflictTab[proc->waitLockMode]; SHM_QUEUE *procLocks = &(lock->procLocks); int lm; ByteBufferAppendInt32(buf, srcPgXact->xid); /* waiting transaction */ proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink)); while (proclock) { if (proc != proclock->tag.myProc) { PGXACT* dstPgXact = &ProcGlobal->allPgXact[proclock->tag.myProc->pgprocno]; if (TransactionIdIsValid(dstPgXact->xid)) { Assert(srcPgXact->xid != dstPgXact->xid); for (lm = 1; lm <= numLockModes; lm++) { if ((proclock->holdMask & LOCKBIT_ON(lm)) && (conflictMask & LOCKBIT_ON(lm))) { XTM_INFO("%d: %u(%u) waits for %u(%u)\n", getpid(), srcPgXact->xid, proc->pid, dstPgXact->xid, proclock->tag.myProc->pid); ByteBufferAppendInt32(buf, dstPgXact->xid); /* transaction holding lock */ break; } } } } proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink, offsetof(PROCLOCK, lockLink)); } ByteBufferAppendInt32(buf, 0); /* end of lock owners list */ } } }
static bool FindLockCycleRecurse(PGPROC *checkProc, int depth, EDGE *softEdges, /* output argument */ int *nSoftEdges) /* output argument */ { PGPROC *proc; LOCK *lock; PROCLOCK *proclock; SHM_QUEUE *procLocks; LockMethod lockMethodTable; PROC_QUEUE *waitQueue; int queue_size; int conflictMask; int i; int numLockModes, lm; /* * Have we already seen this proc? */ for (i = 0; i < nVisitedProcs; i++) { if (visitedProcs[i] == checkProc) { /* If we return to starting point, we have a deadlock cycle */ if (i == 0) { /* * record total length of cycle --- outer levels will now fill * deadlockDetails[] */ Assert(depth <= MaxBackends); nDeadlockDetails = depth; return true; } /* * Otherwise, we have a cycle but it does not include the start * point, so say "no deadlock". */ return false; } } /* Mark proc as seen */ Assert(nVisitedProcs < MaxBackends); visitedProcs[nVisitedProcs++] = checkProc; /* * If the proc is not waiting, we have no outgoing waits-for edges. */ if (checkProc->links.next == NULL) return false; lock = checkProc->waitLock; if (lock == NULL) return false; lockMethodTable = GetLocksMethodTable(lock); numLockModes = lockMethodTable->numLockModes; conflictMask = lockMethodTable->conflictTab[checkProc->waitLockMode]; /* * Scan for procs that already hold conflicting locks. These are "hard" * edges in the waits-for graph. */ procLocks = &(lock->procLocks); proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink)); while (proclock) { proc = proclock->tag.myProc; /* A proc never blocks itself */ if (proc != checkProc) { for (lm = 1; lm <= numLockModes; lm++) { if ((proclock->holdMask & LOCKBIT_ON(lm)) && (conflictMask & LOCKBIT_ON(lm))) { /* * Look for a blocking autovacuum. There can be more than * one in the deadlock cycle, in which case we just pick a * random one. We stash the autovacuum worker's PGPROC so * that the caller can send a cancel signal to it, if * appropriate. * * Note we read vacuumFlags without any locking. This is * OK only for checking the PROC_IS_AUTOVACUUM flag, * because that flag is set at process start and never * reset; there is logic elsewhere to avoid cancelling an * autovacuum that is working for preventing Xid * wraparound problems (which needs to read a different * vacuumFlag bit), but we don't do that here to avoid * grabbing ProcArrayLock. */ if (proc->vacuumFlags & PROC_IS_AUTOVACUUM) blocking_autovacuum_proc = proc; /* This proc hard-blocks checkProc */ if (FindLockCycleRecurse(proc, depth + 1, softEdges, nSoftEdges)) { /* fill deadlockDetails[] */ DEADLOCK_INFO *info = &deadlockDetails[depth]; info->locktag = lock->tag; info->lockmode = checkProc->waitLockMode; info->pid = checkProc->pid; return true; } /* If no deadlock, we're done looking at this proclock */ break; } } } proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink, offsetof(PROCLOCK, lockLink)); } /* * Scan for procs that are ahead of this one in the lock's wait queue. * Those that have conflicting requests soft-block this one. This must be * done after the hard-block search, since if another proc both hard- and * soft-blocks this one, we want to call it a hard edge. * * If there is a proposed re-ordering of the lock's wait order, use that * rather than the current wait order. */ for (i = 0; i < nWaitOrders; i++) { if (waitOrders[i].lock == lock) break; } if (i < nWaitOrders) { /* Use the given hypothetical wait queue order */ PGPROC **procs = waitOrders[i].procs; queue_size = waitOrders[i].nProcs; for (i = 0; i < queue_size; i++) { proc = procs[i]; /* Done when we reach the target proc */ if (proc == checkProc) break; /* Is there a conflict with this guy's request? */ if (((1 << proc->waitLockMode) & conflictMask) != 0) { /* This proc soft-blocks checkProc */ if (FindLockCycleRecurse(proc, depth + 1, softEdges, nSoftEdges)) { /* fill deadlockDetails[] */ DEADLOCK_INFO *info = &deadlockDetails[depth]; info->locktag = lock->tag; info->lockmode = checkProc->waitLockMode; info->pid = checkProc->pid; /* * Add this edge to the list of soft edges in the cycle */ Assert(*nSoftEdges < MaxBackends); softEdges[*nSoftEdges].waiter = checkProc; softEdges[*nSoftEdges].blocker = proc; (*nSoftEdges)++; return true; } } } } else { /* Use the true lock wait queue order */ waitQueue = &(lock->waitProcs); queue_size = waitQueue->size; proc = (PGPROC *) waitQueue->links.next; while (queue_size-- > 0) { /* Done when we reach the target proc */ if (proc == checkProc) break; /* Is there a conflict with this guy's request? */ if (((1 << proc->waitLockMode) & conflictMask) != 0) { /* This proc soft-blocks checkProc */ if (FindLockCycleRecurse(proc, depth + 1, softEdges, nSoftEdges)) { /* fill deadlockDetails[] */ DEADLOCK_INFO *info = &deadlockDetails[depth]; info->locktag = lock->tag; info->lockmode = checkProc->waitLockMode; info->pid = checkProc->pid; /* * Add this edge to the list of soft edges in the cycle */ Assert(*nSoftEdges < MaxBackends); softEdges[*nSoftEdges].waiter = checkProc; softEdges[*nSoftEdges].blocker = proc; (*nSoftEdges)++; return true; } } proc = (PGPROC *) proc->links.next; } } /* * No conflict detected here. */ return false; }
/* * DeadLockCheck -- Checks for deadlocks for a given process * * This code looks for deadlocks involving the given process. If any * are found, it tries to rearrange lock wait queues to resolve the * deadlock. If resolution is impossible, return DS_HARD_DEADLOCK --- * the caller is then expected to abort the given proc's transaction. * * Caller must already have locked all partitions of the lock tables. * * On failure, deadlock details are recorded in deadlockDetails[] for * subsequent printing by DeadLockReport(). That activity is separate * because (a) we don't want to do it while holding all those LWLocks, * and (b) we are typically invoked inside a signal handler. */ DeadLockState DeadLockCheck(PGPROC *proc) { int i, j; /* Initialize to "no constraints" */ nCurConstraints = 0; nPossibleConstraints = 0; nWaitOrders = 0; /* Initialize to not blocked by an autovacuum worker */ blocking_autovacuum_proc = NULL; /* Search for deadlocks and possible fixes */ if (DeadLockCheckRecurse(proc)) { /* * Call FindLockCycle one more time, to record the correct * deadlockDetails[] for the basic state with no rearrangements. */ int nSoftEdges; TRACE_POSTGRESQL_DEADLOCK_FOUND(); nWaitOrders = 0; if (!FindLockCycle(proc, possibleConstraints, &nSoftEdges)) elog(FATAL, "deadlock seems to have disappeared"); return DS_HARD_DEADLOCK; /* cannot find a non-deadlocked state */ } /* Apply any needed rearrangements of wait queues */ for (i = 0; i < nWaitOrders; i++) { LOCK *lock = waitOrders[i].lock; PGPROC **procs = waitOrders[i].procs; int nProcs = waitOrders[i].nProcs; PROC_QUEUE *waitQueue = &(lock->waitProcs); Assert(nProcs == waitQueue->size); #ifdef DEBUG_DEADLOCK PrintLockQueue(lock, "DeadLockCheck:"); #endif /* Reset the queue and re-add procs in the desired order */ ProcQueueInit(waitQueue); for (j = 0; j < nProcs; j++) { SHMQueueInsertBefore(&(waitQueue->links), &(procs[j]->links)); waitQueue->size++; } #ifdef DEBUG_DEADLOCK PrintLockQueue(lock, "rearranged to:"); #endif /* See if any waiters for the lock can be woken up now */ ProcLockWakeup(GetLocksMethodTable(lock), lock); } /* Return code tells caller if we had to escape a deadlock or not */ if (nWaitOrders > 0) return DS_SOFT_DEADLOCK; else if (blocking_autovacuum_proc != NULL) return DS_BLOCKED_BY_AUTOVACUUM; else return DS_NO_DEADLOCK; }