/* * Attempt to read a tuple from given reader. */ static HeapTuple gm_readnext_tuple(GatherMergeState *gm_state, int nreader, bool nowait, bool *done) { TupleQueueReader *reader; HeapTuple tup = NULL; MemoryContext oldContext; MemoryContext tupleContext; tupleContext = gm_state->ps.ps_ExprContext->ecxt_per_tuple_memory; if (done != NULL) *done = false; /* Check for async events, particularly messages from workers. */ CHECK_FOR_INTERRUPTS(); /* Attempt to read a tuple. */ reader = gm_state->reader[nreader]; /* Run TupleQueueReaders in per-tuple context */ oldContext = MemoryContextSwitchTo(tupleContext); tup = TupleQueueReaderNext(reader, nowait, done); MemoryContextSwitchTo(oldContext); return tup; }
/* * Attempt to read a tuple from given worker. */ static HeapTuple gm_readnext_tuple(GatherMergeState *gm_state, int nreader, bool nowait, bool *done) { TupleQueueReader *reader; HeapTuple tup; /* Check for async events, particularly messages from workers. */ CHECK_FOR_INTERRUPTS(); /* * Attempt to read a tuple. * * Note that TupleQueueReaderNext will just return NULL for a worker which * fails to initialize. We'll treat that worker as having produced no * tuples; WaitForParallelWorkersToFinish will error out when we get * there. */ reader = gm_state->reader[nreader - 1]; tup = TupleQueueReaderNext(reader, nowait, done); return tup; }
/* * Attempt to read a tuple from one of our parallel workers. */ static HeapTuple gather_readnext(GatherState *gatherstate) { int waitpos = gatherstate->nextreader; for (;;) { TupleQueueReader *reader; HeapTuple tup; bool readerdone; /* Make sure we've read all messages from workers. */ HandleParallelMessages(); /* Attempt to read a tuple, but don't block if none is available. */ reader = gatherstate->reader[gatherstate->nextreader]; tup = TupleQueueReaderNext(reader, true, &readerdone); /* * If this reader is done, remove it. If all readers are done, * clean up remaining worker state. */ if (readerdone) { DestroyTupleQueueReader(reader); --gatherstate->nreaders; if (gatherstate->nreaders == 0) { ExecShutdownGatherWorkers(gatherstate); return NULL; } else { memmove(&gatherstate->reader[gatherstate->nextreader], &gatherstate->reader[gatherstate->nextreader + 1], sizeof(TupleQueueReader *) * (gatherstate->nreaders - gatherstate->nextreader)); if (gatherstate->nextreader >= gatherstate->nreaders) gatherstate->nextreader = 0; if (gatherstate->nextreader < waitpos) --waitpos; } continue; } /* If we got a tuple, return it. */ if (tup) return tup; /* * Advance nextreader pointer in round-robin fashion. Note that we * only reach this code if we weren't able to get a tuple from the * current worker. We used to advance the nextreader pointer after * every tuple, but it turns out to be much more efficient to keep * reading from the same queue until that would require blocking. */ gatherstate->nextreader = (gatherstate->nextreader + 1) % gatherstate->nreaders; /* Have we visited every TupleQueueReader? */ if (gatherstate->nextreader == waitpos) { /* * If (still) running plan locally, return NULL so caller can * generate another tuple from the local copy of the plan. */ if (gatherstate->need_to_scan_locally) return NULL; /* Nothing to do except wait for developments. */ WaitLatch(MyLatch, WL_LATCH_SET, 0); CHECK_FOR_INTERRUPTS(); ResetLatch(MyLatch); } } }
/* * Attempt to read a tuple from one of our parallel workers. */ static HeapTuple gather_readnext(GatherState *gatherstate) { int nvisited = 0; for (;;) { TupleQueueReader *reader; HeapTuple tup; bool readerdone; /* Check for async events, particularly messages from workers. */ CHECK_FOR_INTERRUPTS(); /* * Attempt to read a tuple, but don't block if none is available. * * Note that TupleQueueReaderNext will just return NULL for a worker * which fails to initialize. We'll treat that worker as having * produced no tuples; WaitForParallelWorkersToFinish will error out * when we get there. */ Assert(gatherstate->nextreader < gatherstate->nreaders); reader = gatherstate->reader[gatherstate->nextreader]; tup = TupleQueueReaderNext(reader, true, &readerdone); /* * If this reader is done, remove it from our working array of active * readers. If all readers are done, we're outta here. */ if (readerdone) { Assert(!tup); --gatherstate->nreaders; if (gatherstate->nreaders == 0) return NULL; memmove(&gatherstate->reader[gatherstate->nextreader], &gatherstate->reader[gatherstate->nextreader + 1], sizeof(TupleQueueReader *) * (gatherstate->nreaders - gatherstate->nextreader)); if (gatherstate->nextreader >= gatherstate->nreaders) gatherstate->nextreader = 0; continue; } /* If we got a tuple, return it. */ if (tup) return tup; /* * Advance nextreader pointer in round-robin fashion. Note that we * only reach this code if we weren't able to get a tuple from the * current worker. We used to advance the nextreader pointer after * every tuple, but it turns out to be much more efficient to keep * reading from the same queue until that would require blocking. */ gatherstate->nextreader++; if (gatherstate->nextreader >= gatherstate->nreaders) gatherstate->nextreader = 0; /* Have we visited every (surviving) TupleQueueReader? */ nvisited++; if (nvisited >= gatherstate->nreaders) { /* * If (still) running plan locally, return NULL so caller can * generate another tuple from the local copy of the plan. */ if (gatherstate->need_to_scan_locally) return NULL; /* Nothing to do except wait for developments. */ WaitLatch(MyLatch, WL_LATCH_SET, 0, WAIT_EVENT_EXECUTE_GATHER); ResetLatch(MyLatch); nvisited = 0; } } }