/* ---------------------------------------------------------------- * ExecShutdownGather * * Destroy the setup for parallel workers including parallel context. * Collect all the stats after workers are stopped, else some work * done by workers won't be accounted. * ---------------------------------------------------------------- */ void ExecShutdownGather(GatherState *node) { ExecShutdownGatherWorkers(node); /* Now destroy the parallel context. */ if (node->pei != NULL) { ExecParallelCleanup(node->pei); node->pei = NULL; } }
/* ---------------------------------------------------------------- * ExecReScanGather * * Re-initialize the workers and rescans a relation via them. * ---------------------------------------------------------------- */ void ExecReScanGather(GatherState *node) { /* * Re-initialize the parallel workers to perform rescan of relation. * We want to gracefully shutdown all the workers so that they * should be able to propagate any error or other information to master * backend before dying. Parallel context will be reused for rescan. */ ExecShutdownGatherWorkers(node); node->initialized = false; if (node->pei) ExecParallelReinitialize(node->pei); ExecReScan(node->ps.lefttree); }
/* ---------------------------------------------------------------- * ExecReScanGather * * Prepare to re-scan the result of a Gather. * ---------------------------------------------------------------- */ void ExecReScanGather(GatherState *node) { Gather *gather = (Gather *) node->ps.plan; PlanState *outerPlan = outerPlanState(node); /* Make sure any existing workers are gracefully shut down */ ExecShutdownGatherWorkers(node); /* Mark node so that shared state will be rebuilt at next call */ node->initialized = false; /* * Set child node's chgParam to tell it that the next scan might deliver a * different set of rows within the leader process. (The overall rowset * shouldn't change, but the leader process's subset might; hence nodes * between here and the parallel table scan node mustn't optimize on the * assumption of an unchanging rowset.) */ if (gather->rescan_param >= 0) outerPlan->chgParam = bms_add_member(outerPlan->chgParam, gather->rescan_param); /* * If chgParam of subnode is not null then plan will be re-scanned by * first ExecProcNode. Note: because this does nothing if we have a * rescan_param, it's currently guaranteed that parallel-aware child nodes * will not see a ReScan call until after they get a ReInitializeDSM call. * That ordering might not be something to rely on, though. A good rule * of thumb is that ReInitializeDSM should reset only shared state, ReScan * should reset only local state, and anything that depends on both of * those steps being finished must wait until the first ExecProcNode call. */ if (outerPlan->chgParam == NULL) ExecReScan(outerPlan); }
/* * Attempt to read a tuple from one of our parallel workers. */ static HeapTuple gather_readnext(GatherState *gatherstate) { int waitpos = gatherstate->nextreader; for (;;) { TupleQueueReader *reader; HeapTuple tup; bool readerdone; /* Make sure we've read all messages from workers. */ HandleParallelMessages(); /* Attempt to read a tuple, but don't block if none is available. */ reader = gatherstate->reader[gatherstate->nextreader]; tup = TupleQueueReaderNext(reader, true, &readerdone); /* * If this reader is done, remove it. If all readers are done, * clean up remaining worker state. */ if (readerdone) { DestroyTupleQueueReader(reader); --gatherstate->nreaders; if (gatherstate->nreaders == 0) { ExecShutdownGatherWorkers(gatherstate); return NULL; } else { memmove(&gatherstate->reader[gatherstate->nextreader], &gatherstate->reader[gatherstate->nextreader + 1], sizeof(TupleQueueReader *) * (gatherstate->nreaders - gatherstate->nextreader)); if (gatherstate->nextreader >= gatherstate->nreaders) gatherstate->nextreader = 0; if (gatherstate->nextreader < waitpos) --waitpos; } continue; } /* If we got a tuple, return it. */ if (tup) return tup; /* * Advance nextreader pointer in round-robin fashion. Note that we * only reach this code if we weren't able to get a tuple from the * current worker. We used to advance the nextreader pointer after * every tuple, but it turns out to be much more efficient to keep * reading from the same queue until that would require blocking. */ gatherstate->nextreader = (gatherstate->nextreader + 1) % gatherstate->nreaders; /* Have we visited every TupleQueueReader? */ if (gatherstate->nextreader == waitpos) { /* * If (still) running plan locally, return NULL so caller can * generate another tuple from the local copy of the plan. */ if (gatherstate->need_to_scan_locally) return NULL; /* Nothing to do except wait for developments. */ WaitLatch(MyLatch, WL_LATCH_SET, 0); CHECK_FOR_INTERRUPTS(); ResetLatch(MyLatch); } } }
/* ---------------------------------------------------------------- * ExecGather(node) * * Scans the relation via multiple workers and returns * the next qualifying tuple. * ---------------------------------------------------------------- */ TupleTableSlot * ExecGather(GatherState *node) { TupleTableSlot *fslot = node->funnel_slot; int i; TupleTableSlot *slot; TupleTableSlot *resultSlot; ExprDoneCond isDone; ExprContext *econtext; /* * Initialize the parallel context and workers on first execution. We do * this on first execution rather than during node initialization, as it * needs to allocate large dynamic segement, so it is better to do if it * is really needed. */ if (!node->initialized) { EState *estate = node->ps.state; Gather *gather = (Gather *) node->ps.plan; /* * Sometimes we might have to run without parallelism; but if * parallel mode is active then we can try to fire up some workers. */ if (gather->num_workers > 0 && IsInParallelMode()) { ParallelContext *pcxt; bool got_any_worker = false; /* Initialize the workers required to execute Gather node. */ if (!node->pei) node->pei = ExecInitParallelPlan(node->ps.lefttree, estate, gather->num_workers); /* * Register backend workers. We might not get as many as we * requested, or indeed any at all. */ pcxt = node->pei->pcxt; LaunchParallelWorkers(pcxt); /* Set up tuple queue readers to read the results. */ if (pcxt->nworkers > 0) { node->nreaders = 0; node->reader = palloc(pcxt->nworkers * sizeof(TupleQueueReader *)); for (i = 0; i < pcxt->nworkers; ++i) { if (pcxt->worker[i].bgwhandle == NULL) continue; shm_mq_set_handle(node->pei->tqueue[i], pcxt->worker[i].bgwhandle); node->reader[node->nreaders++] = CreateTupleQueueReader(node->pei->tqueue[i], fslot->tts_tupleDescriptor); got_any_worker = true; } } /* No workers? Then never mind. */ if (!got_any_worker) ExecShutdownGatherWorkers(node); } /* Run plan locally if no workers or not single-copy. */ node->need_to_scan_locally = (node->reader == NULL) || !gather->single_copy; node->initialized = true; } /* * Check to see if we're still projecting out tuples from a previous scan * tuple (because there is a function-returning-set in the projection * expressions). If so, try to project another one. */ if (node->ps.ps_TupFromTlist) { resultSlot = ExecProject(node->ps.ps_ProjInfo, &isDone); if (isDone == ExprMultipleResult) return resultSlot; /* Done with that source tuple... */ node->ps.ps_TupFromTlist = false; } /* * Reset per-tuple memory context to free any expression evaluation * storage allocated in the previous tuple cycle. Note we can't do this * until we're done projecting. */ econtext = node->ps.ps_ExprContext; ResetExprContext(econtext); /* Get and return the next tuple, projecting if necessary. */ for (;;) { /* * Get next tuple, either from one of our workers, or by running the * plan ourselves. */ slot = gather_getnext(node); if (TupIsNull(slot)) return NULL; /* * form the result tuple using ExecProject(), and return it --- unless * the projection produces an empty set, in which case we must loop * back around for another tuple */ econtext->ecxt_outertuple = slot; resultSlot = ExecProject(node->ps.ps_ProjInfo, &isDone); if (isDone != ExprEndResult) { node->ps.ps_TupFromTlist = (isDone == ExprMultipleResult); return resultSlot; } } return slot; }