/* This routine checks whether speficied utility stmt should be involved into * resourece queue mgmt; if yes, take the slot from the resource queue; if we * want to track additional utility stmts, add it into the condition check */ void ResHandleUtilityStmt(Portal portal, Node *stmt) { if (!IsA(stmt, CopyStmt)) { return; } if (Gp_role == GP_ROLE_DISPATCH && IsResQueueEnabled() && (!ResourceSelectOnly) && !superuser()) { Assert(!LWLockHeldExclusiveByMe(ResQueueLock)); LWLockAcquire(ResQueueLock, LW_EXCLUSIVE); ResQueue resQueue = ResQueueHashFind(portal->queueId); LWLockRelease(ResQueueLock); Assert(resQueue); int numSlots = (int) ceil(resQueue->limits[RES_COUNT_LIMIT].threshold_value); if (numSlots >= 1) /* statement limit exists */ { portal->status = PORTAL_QUEUE; portal->releaseResLock = ResLockUtilityPortal(portal, resQueue->ignorecostlimit); } portal->status = PORTAL_ACTIVE; } }
/* * CHECK_FOR_INTERRUPTS() increments a counter, 'backoffTickCounter', on * every call, which we use as a loose measure of progress. Whenever the * counter reaches 'gp_resqueue_priority_local_interval', CHECK_FOR_INTERRUPTS() * calls this function, to perform a backoff action (see BackoffBackend()). */ void BackoffBackendTickExpired(void) { BackoffBackendLocalEntry *le; BackoffBackendSharedEntry *se; StatementId currentStatementId = {gp_session_id, gp_command_count}; backoffTickCounter = 0; if (!(Gp_role == GP_ROLE_DISPATCH || Gp_role == GP_ROLE_EXECUTE) || !IsResQueueEnabled() || !gp_enable_resqueue_priority || !IsUnderPostmaster || (MyBackendId == InvalidBackendId) || proc_exit_inprogress || ProcDiePending /* Proc is dying */ || QueryCancelPending /* Statement cancellation */ || QueryFinishPending /* Statement finish requested */ || InterruptHoldoffCount != 0 /* We're holding off on handling * interrupts */ || CritSectionCount != 0 /* In critical section */ ) { /* Do nothing under these circumstances */ return; } if (!backoffSingleton) { /* Not initialized yet. Do nothing */ return; } Assert(backoffSingleton); le = myBackoffLocalEntry(); se = myBackoffSharedEntry(); if (!equalStatementId(&se->statementId, ¤tStatementId)) { /* This backend's entry has not yet been initialized. Do nothing yet. */ return; } if (le->inTick) { /* No nested calls allowed. This may happen during elog calls :( */ return; } le->inTick = true; /* Perform backoff. */ BackoffBackend(); se->earlyBackoffExit = false; le->inTick = false; }
static void ResourceOwnerReleaseInternal(ResourceOwner owner, ResourceReleasePhase phase, bool isCommit, bool isTopLevel) { ResourceOwner child; ResourceOwner save; ResourceReleaseCallbackItem *item, *next; /* Recurse to handle descendants */ for (child = owner->firstchild; child != NULL; child = child->nextchild) ResourceOwnerReleaseInternal(child, phase, isCommit, isTopLevel); /* * Make CurrentResourceOwner point to me, so that ReleaseBuffer etc don't * get confused. We needn't PG_TRY here because the outermost level will * fix it on error abort. */ save = CurrentResourceOwner; CurrentResourceOwner = owner; if (phase == RESOURCE_RELEASE_BEFORE_LOCKS) { /* * Release buffer pins. Note that ReleaseBuffer will remove the * buffer entry from my list, so I just have to iterate till there are * none. * * During a commit, there shouldn't be any remaining pins --- that * would indicate failure to clean up the executor correctly --- so * issue warnings. In the abort case, just clean up quietly. * * We are careful to do the releasing back-to-front, so as to avoid * O(N^2) behavior in ResourceOwnerForgetBuffer(). */ while (owner->nbuffers > 0) { if (isCommit) PrintBufferLeakWarning(owner->buffers[owner->nbuffers - 1]); ReleaseBuffer(owner->buffers[owner->nbuffers - 1]); } /* * Release relcache references. Note that RelationClose will remove * the relref entry from my list, so I just have to iterate till there * are none. * * As with buffer pins, warn if any are left at commit time, and * release back-to-front for speed. */ while (owner->nrelrefs > 0) { if (isCommit) PrintRelCacheLeakWarning(owner->relrefs[owner->nrelrefs - 1]); RelationClose(owner->relrefs[owner->nrelrefs - 1]); } /* * Release dynamic shared memory segments. Note that dsm_detach() * will remove the segment from my list, so I just have to iterate * until there are none. * * As in the preceding cases, warn if there are leftover at commit * time. */ while (owner->ndsms > 0) { if (isCommit) PrintDSMLeakWarning(owner->dsms[owner->ndsms - 1]); dsm_detach(owner->dsms[owner->ndsms - 1]); } } else if (phase == RESOURCE_RELEASE_LOCKS) { if (isTopLevel) { /* * For a top-level xact we are going to release all locks (or at * least all non-session locks), so just do a single lmgr call at * the top of the recursion. */ if (owner == TopTransactionResourceOwner) { ProcReleaseLocks(isCommit); ReleasePredicateLocks(isCommit); if (Gp_role == GP_ROLE_DISPATCH && IsResQueueEnabled()) ResLockWaitCancel(); } } else { /* * Release locks retail. Note that if we are committing a * subtransaction, we do NOT release its locks yet, but transfer * them to the parent. */ LOCALLOCK **locks; int nlocks; Assert(owner->parent != NULL); /* * Pass the list of locks owned by this resource owner to the lock * manager, unless it has overflowed. */ if (owner->nlocks > MAX_RESOWNER_LOCKS) { locks = NULL; nlocks = 0; } else { locks = owner->locks; nlocks = owner->nlocks; } if (isCommit) LockReassignCurrentOwner(locks, nlocks); else LockReleaseCurrentOwner(locks, nlocks); } } else if (phase == RESOURCE_RELEASE_AFTER_LOCKS) { /* * Release catcache references. Note that ReleaseCatCache will remove * the catref entry from my list, so I just have to iterate till there * are none. * * As with buffer pins, warn if any are left at commit time, and * release back-to-front for speed. */ while (owner->ncatrefs > 0) { if (isCommit) PrintCatCacheLeakWarning(owner->catrefs[owner->ncatrefs - 1], owner->name); ReleaseCatCache(owner->catrefs[owner->ncatrefs - 1]); } /* Ditto for catcache lists */ while (owner->ncatlistrefs > 0) { if (isCommit) PrintCatCacheListLeakWarning(owner->catlistrefs[owner->ncatlistrefs - 1], owner->name); ReleaseCatCacheList(owner->catlistrefs[owner->ncatlistrefs - 1]); } /* Ditto for plancache references */ while (owner->nplanrefs > 0) { if (isCommit) PrintPlanCacheLeakWarning(owner->planrefs[owner->nplanrefs - 1]); ReleaseCachedPlan(owner->planrefs[owner->nplanrefs - 1], true); } /* Ditto for tupdesc references */ while (owner->ntupdescs > 0) { if (isCommit) PrintTupleDescLeakWarning(owner->tupdescs[owner->ntupdescs - 1]); DecrTupleDescRefCount(owner->tupdescs[owner->ntupdescs - 1]); } /* Ditto for snapshot references */ while (owner->nsnapshots > 0) { if (isCommit) PrintSnapshotLeakWarning(owner->snapshots[owner->nsnapshots - 1]); UnregisterSnapshot(owner->snapshots[owner->nsnapshots - 1]); } /* Ditto for temporary files */ while (owner->nfiles > 0) { if (isCommit) PrintFileLeakWarning(owner->files[owner->nfiles - 1]); FileClose(owner->files[owner->nfiles - 1]); } /* Clean up index scans too */ ReleaseResources_hash(); } /* Let add-on modules get a chance too */ for (item = ResourceRelease_callbacks; item; item = next) { next = item->next; (*item->callback) (phase, isCommit, isTopLevel, item->arg); } CurrentResourceOwner = save; }
/** * Set returning function to inspect current state of query prioritization. * Input: * none * Output: * Set of (session_id, command_count, priority, weight) for all backends (on the current segment). * This function is used by jetpack views gp_statement_priorities. */ Datum gp_list_backend_priorities(PG_FUNCTION_ARGS) { typedef struct Context { int currentIndex; } Context; FuncCallContext *funcctx = NULL; Context *context = NULL; if (SRF_IS_FIRSTCALL()) { TupleDesc tupdesc; MemoryContext oldcontext; /* create a function context for cross-call persistence */ funcctx = SRF_FIRSTCALL_INIT(); /* * switch to memory context appropriate for multiple function calls */ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); /* build tupdesc for result tuples */ /* this had better match gp_distributed_xacts view in system_views.sql */ tupdesc = CreateTemplateTupleDesc(4, false); TupleDescInitEntry(tupdesc, (AttrNumber) 1, "session_id", INT4OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 2, "command_count", INT4OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 3, "priority", TEXTOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 4, "weight", INT4OID, -1, 0); funcctx->tuple_desc = BlessTupleDesc(tupdesc); /* * Collect all the locking information that we will format and send * out as a result set. */ context = (Context *) palloc(sizeof(Context)); funcctx->user_fctx = (void *) context; context->currentIndex = 0; MemoryContextSwitchTo(oldcontext); } funcctx = SRF_PERCALL_SETUP(); Assert(funcctx); context = (Context *) funcctx->user_fctx; Assert(context); if (!IsResQueueEnabled() || !gp_enable_resqueue_priority) SRF_RETURN_DONE(funcctx); while (context->currentIndex < backoffSingleton->numEntries) { Datum values[4]; bool nulls[4]; HeapTuple tuple = NULL; Datum result; char *priorityVal = NULL; const BackoffBackendSharedEntry *se = NULL; se = getBackoffEntryRO(context->currentIndex); Assert(se); if (!isValid(&se->statementId)) { context->currentIndex++; continue; } /* * Form tuple with appropriate data. */ MemSet(values, 0, sizeof(values)); MemSet(nulls, false, sizeof(nulls)); values[0] = Int32GetDatum((int32) se->statementId.sessionId); values[1] = Int32GetDatum((int32) se->statementId.commandCount); priorityVal = BackoffPriorityIntToValue(se->weight); Assert(priorityVal); values[2] = CStringGetTextDatum(priorityVal); Assert(se->weight > 0); values[3] = Int32GetDatum((int32) se->weight); tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); Assert(tuple); result = HeapTupleGetDatum(tuple); context->currentIndex++; SRF_RETURN_NEXT(funcctx, result); } SRF_RETURN_DONE(funcctx); }