/* * ReceiveSharedInvalidMessages * Process shared-cache-invalidation messages waiting for this backend * * NOTE: it is entirely possible for this routine to be invoked recursively * as a consequence of processing inside the invalFunction or resetFunction. * Hence, we must be holding no SI resources when we call them. The only * bad side-effect is that SIDelExpiredDataEntries might be called extra * times on the way out of a nested call. */ void ReceiveSharedInvalidMessages( void (*invalFunction) (SharedInvalidationMessage *msg), void (*resetFunction) (void)) { SharedInvalidationMessage data; int getResult; bool gotMessage = false; for (;;) { /* * We can run SIGetDataEntry in parallel with other backends * running SIGetDataEntry for themselves, since each instance will * modify only fields of its own backend's ProcState, and no * instance will look at fields of other backends' ProcStates. We * express this by grabbing SInvalLock in shared mode. Note that * this is not exactly the normal (read-only) interpretation of a * shared lock! Look closely at the interactions before allowing * SInvalLock to be grabbed in shared mode for any other reason! * * The routines later in this file that use shared mode are okay with * this, because they aren't looking at the ProcState fields * associated with SI message transfer; they only use the * ProcState array as an easy way to find all the PGPROC * structures. */ LWLockAcquire(SInvalLock, LW_SHARED); getResult = SIGetDataEntry(shmInvalBuffer, MyBackendId, &data); LWLockRelease(SInvalLock); if (getResult == 0) break; /* nothing more to do */ if (getResult < 0) { /* got a reset message */ elog(DEBUG4, "cache state reset"); resetFunction(); } else { /* got a normal data message */ invalFunction(&data); } gotMessage = true; } /* If we got any messages, try to release dead messages */ if (gotMessage) { LWLockAcquire(SInvalLock, LW_EXCLUSIVE); SIDelExpiredDataEntries(shmInvalBuffer); LWLockRelease(SInvalLock); } }
/* * ReceiveSharedInvalidMessages * Process shared-cache-invalidation messages waiting for this backend * * NOTE: it is entirely possible for this routine to be invoked recursively * as a consequence of processing inside the invalFunction or resetFunction. * Hence, we must be holding no SI resources when we call them. The only * bad side-effect is that SIDelExpiredDataEntries might be called extra * times on the way out of a nested call. */ void ReceiveSharedInvalidMessages( void (*invalFunction) (SharedInvalidationMessage *msg), void (*resetFunction) (void)) { SharedInvalidationMessage data; int getResult; bool gotMessage = false; for (;;) { /* * We can discard any pending catchup event, since we will not exit * this loop until we're fully caught up. */ catchupInterruptOccurred = 0; /* * We can run SIGetDataEntry in parallel with other backends running * SIGetDataEntry for themselves, since each instance will modify only * fields of its own backend's ProcState, and no instance will look at * fields of other backends' ProcStates. We express this by grabbing * SInvalLock in shared mode. Note that this is not exactly the * normal (read-only) interpretation of a shared lock! Look closely at * the interactions before allowing SInvalLock to be grabbed in shared * mode for any other reason! */ LWLockAcquire(SInvalLock, LW_SHARED); getResult = SIGetDataEntry(shmInvalBuffer, MyBackendId, &data); LWLockRelease(SInvalLock); if (getResult == 0) break; /* nothing more to do */ if (getResult < 0) { /* got a reset message */ elog(DEBUG4, "cache state reset"); resetFunction(); } else { /* got a normal data message */ invalFunction(&data); } gotMessage = true; } /* If we got any messages, try to release dead messages */ if (gotMessage) { LWLockAcquire(SInvalLock, LW_EXCLUSIVE); SIDelExpiredDataEntries(shmInvalBuffer); LWLockRelease(SInvalLock); } }
/* * ReceiveSharedInvalidMessages * Process shared-cache-invalidation messages waiting for this backend * * We guarantee to process all messages that had been queued before the * routine was entered. It is of course possible for more messages to get * queued right after our last SIGetDataEntries call. * * NOTE: it is entirely possible for this routine to be invoked recursively * as a consequence of processing inside the invalFunction or resetFunction. * Furthermore, such a recursive call must guarantee that all outstanding * inval messages have been processed before it exits. This is the reason * for the strange-looking choice to use a statically allocated buffer array * and counters; it's so that a recursive call can process messages already * sucked out of sinvaladt.c. */ void ReceiveSharedInvalidMessages( void (*invalFunction) (SharedInvalidationMessage *msg), void (*resetFunction) (void)) { #define MAXINVALMSGS 32 static SharedInvalidationMessage messages[MAXINVALMSGS]; /* * We use volatile here to prevent bugs if a compiler doesn't realize that * recursion is a possibility ... */ static volatile int nextmsg = 0; static volatile int nummsgs = 0; /* Deal with any messages still pending from an outer recursion */ while (nextmsg < nummsgs) { SharedInvalidationMessage *msg = &messages[nextmsg++]; invalFunction(msg); } do { int getResult; nextmsg = nummsgs = 0; /* Try to get some more messages */ getResult = SIGetDataEntries(messages, MAXINVALMSGS); if (getResult < 0) { /* got a reset message */ elog(DEBUG4, "cache state reset"); resetFunction(); break; /* nothing more to do */ } /* Process them, being wary that a recursive call might eat some */ nextmsg = 0; nummsgs = getResult; while (nextmsg < nummsgs) { SharedInvalidationMessage *msg = &messages[nextmsg++]; invalFunction(msg); } /* * We only need to loop if the last SIGetDataEntries call (which might * have been within a recursive call) returned a full buffer. */ } while (nummsgs == MAXINVALMSGS); /* * We are now caught up. If we received a catchup signal, reset that * flag, and call SICleanupQueue(). This is not so much because we need * to flush dead messages right now, as that we want to pass on the * catchup signal to the next slowest backend. "Daisy chaining" the * catchup signal this way avoids creating spikes in system load for what * should be just a background maintenance activity. */ if (catchupInterruptOccurred) { catchupInterruptOccurred = 0; elog(DEBUG4, "sinval catchup complete, cleaning queue"); SICleanupQueue(false, 0); } }