/* ---------------------------------------------------------------- * ExecBitmapHeapInitializeDSM * * Set up a parallel bitmap heap scan descriptor. * ---------------------------------------------------------------- */ void ExecBitmapHeapInitializeDSM(BitmapHeapScanState *node, ParallelContext *pcxt) { ParallelBitmapHeapState *pstate; EState *estate = node->ss.ps.state; dsa_area *dsa = node->ss.ps.state->es_query_dsa; /* If there's no DSA, there are no workers; initialize nothing. */ if (dsa == NULL) return; pstate = shm_toc_allocate(pcxt->toc, node->pscan_len); pstate->tbmiterator = 0; pstate->prefetch_iterator = 0; /* Initialize the mutex */ SpinLockInit(&pstate->mutex); pstate->prefetch_pages = 0; pstate->prefetch_target = 0; pstate->state = BM_INITIAL; ConditionVariableInit(&pstate->cv); SerializeSnapshot(estate->es_snapshot, pstate->phs_snapshot_data); shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id, pstate); node->pstate = pstate; }
/* ---------------------------------------------------------------- * ExecIndexOnlyScanInitializeDSM * * Set up a parallel index-only scan descriptor. * ---------------------------------------------------------------- */ void ExecIndexOnlyScanInitializeDSM(IndexOnlyScanState *node, ParallelContext *pcxt) { EState *estate = node->ss.ps.state; ParallelIndexScanDesc piscan; piscan = shm_toc_allocate(pcxt->toc, node->ioss_PscanLen); index_parallelscan_initialize(node->ss.ss_currentRelation, node->ioss_RelationDesc, estate->es_snapshot, piscan); shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id, piscan); node->ioss_ScanDesc = index_beginscan_parallel(node->ss.ss_currentRelation, node->ioss_RelationDesc, node->ioss_NumScanKeys, node->ioss_NumOrderByKeys, piscan); node->ioss_ScanDesc->xs_want_itup = true; node->ioss_VMBuffer = InvalidBuffer; /* * If no run-time keys to calculate or they are ready, go ahead and pass * the scankeys to the index AM. */ if (node->ioss_NumRuntimeKeys == 0 || node->ioss_RuntimeKeysReady) index_rescan(node->ioss_ScanDesc, node->ioss_ScanKeys, node->ioss_NumScanKeys, node->ioss_OrderByKeys, node->ioss_NumOrderByKeys); }
/* ---------------------------------------------------------------- * ExecForeignScanInitializeDSM * * Initialize the parallel coordination information * ---------------------------------------------------------------- */ void ExecForeignScanInitializeDSM(ForeignScanState *node, ParallelContext *pcxt) { FdwRoutine *fdwroutine = node->fdwroutine; if (fdwroutine->InitializeDSMForeignScan) { int plan_node_id = node->ss.ps.plan->plan_node_id; void *coordinate; coordinate = shm_toc_allocate(pcxt->toc, node->pscan_len); fdwroutine->InitializeDSMForeignScan(node, pcxt, coordinate); shm_toc_insert(pcxt->toc, plan_node_id, coordinate); } }
void ExecCustomScanInitializeDSM(CustomScanState *node, ParallelContext *pcxt) { const CustomExecMethods *methods = node->methods; if (methods->InitializeDSMCustomScan) { int plan_node_id = node->ss.ps.plan->plan_node_id; void *coordinate; coordinate = shm_toc_allocate(pcxt->toc, node->pscan_len); methods->InitializeDSMCustomScan(node, pcxt, coordinate); shm_toc_insert(pcxt->toc, plan_node_id, coordinate); } }
/* ---------------------------------------------------------------- * ExecSeqScanInitializeDSM * * Set up a parallel heap scan descriptor. * ---------------------------------------------------------------- */ void ExecSeqScanInitializeDSM(SeqScanState *node, ParallelContext *pcxt) { EState *estate = node->ss.ps.state; ParallelHeapScanDesc pscan; pscan = shm_toc_allocate(pcxt->toc, node->pscan_len); heap_parallelscan_initialize(pscan, node->ss.ss_currentRelation, estate->es_snapshot); shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id, pscan); node->ss.ss_currentScanDesc = heap_beginscan_parallel(node->ss.ss_currentRelation, pscan); }
/* ---------------------------------------------------------------- * ExecSortInitializeDSM * * Initialize DSM space for sort statistics. * ---------------------------------------------------------------- */ void ExecSortInitializeDSM(SortState *node, ParallelContext *pcxt) { Size size; /* don't need this if not instrumenting or no workers */ if (!node->ss.ps.instrument || pcxt->nworkers == 0) return; size = offsetof(SharedSortInfo, sinstrument) + pcxt->nworkers * sizeof(TuplesortInstrumentation); node->shared_info = shm_toc_allocate(pcxt->toc, size); /* ensure any unfilled slots will contain zeroes */ memset(node->shared_info, 0, size); node->shared_info->num_workers = pcxt->nworkers; shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id, node->shared_info); }
/* ---------------------------------------------------------------- * ExecBitmapHeapInitializeDSM * * Set up a parallel bitmap heap scan descriptor. * ---------------------------------------------------------------- */ void ExecBitmapHeapInitializeDSM(BitmapHeapScanState *node, ParallelContext *pcxt) { ParallelBitmapHeapState *pstate; EState *estate = node->ss.ps.state; pstate = shm_toc_allocate(pcxt->toc, node->pscan_len); pstate->tbmiterator = 0; pstate->prefetch_iterator = 0; /* Initialize the mutex */ SpinLockInit(&pstate->mutex); pstate->prefetch_pages = 0; pstate->prefetch_target = 0; pstate->state = BM_INITIAL; ConditionVariableInit(&pstate->cv); SerializeSnapshot(estate->es_snapshot, pstate->phs_snapshot_data); shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id, pstate); node->pstate = pstate; }
/* * Set up a dynamic shared memory segment. * * We set up a small control region that contains only a test_shm_mq_header, * plus one region per message queue. There are as many message queues as * the number of workers, plus one. */ static void setup_dynamic_shared_memory(int64 queue_size, int nworkers, dsm_segment **segp, test_shm_mq_header **hdrp, shm_mq **outp, shm_mq **inp) { shm_toc_estimator e; int i; Size segsize; dsm_segment *seg; shm_toc *toc; test_shm_mq_header *hdr; /* Ensure a valid queue size. */ if (queue_size < 0 || ((uint64) queue_size) < shm_mq_minimum_size) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("queue size must be at least %zu bytes", shm_mq_minimum_size))); if (queue_size != ((Size) queue_size)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("queue size overflows size_t"))); /* * Estimate how much shared memory we need. * * Because the TOC machinery may choose to insert padding of oddly-sized * requests, we must estimate each chunk separately. * * We need one key to register the location of the header, and we need * nworkers + 1 keys to track the locations of the message queues. */ shm_toc_initialize_estimator(&e); shm_toc_estimate_chunk(&e, sizeof(test_shm_mq_header)); for (i = 0; i <= nworkers; ++i) shm_toc_estimate_chunk(&e, (Size) queue_size); shm_toc_estimate_keys(&e, 2 + nworkers); segsize = shm_toc_estimate(&e); /* Create the shared memory segment and establish a table of contents. */ seg = dsm_create(shm_toc_estimate(&e), 0); toc = shm_toc_create(PG_TEST_SHM_MQ_MAGIC, dsm_segment_address(seg), segsize); /* Set up the header region. */ hdr = shm_toc_allocate(toc, sizeof(test_shm_mq_header)); SpinLockInit(&hdr->mutex); hdr->workers_total = nworkers; hdr->workers_attached = 0; hdr->workers_ready = 0; shm_toc_insert(toc, 0, hdr); /* Set up one message queue per worker, plus one. */ for (i = 0; i <= nworkers; ++i) { shm_mq *mq; mq = shm_mq_create(shm_toc_allocate(toc, (Size) queue_size), (Size) queue_size); shm_toc_insert(toc, i + 1, mq); if (i == 0) { /* We send messages to the first queue. */ shm_mq_set_sender(mq, MyProc); *outp = mq; } if (i == nworkers) { /* We receive messages from the last queue. */ shm_mq_set_receiver(mq, MyProc); *inp = mq; } } /* Return results to caller. */ *segp = seg; *hdrp = hdr; }