Ejemplo n.º 1
0
/*
 * ResLockPortal -- get a resource lock for Portal execution.
 *
 * Returns:
 *	true if the lock has been taken
 *	false if the lock has been skipped.
 */
bool
ResLockPortal(Portal portal, QueryDesc *qDesc)
{
	bool		returnReleaseOk = false;	/* Release resource lock? */
	bool		takeLock;					/* Take resource lock? */
	LOCKTAG		tag;
	Oid			queueid;
	int32		lockResult = 0;
	ResPortalIncrement	incData;
	Plan *plan = NULL;

	Assert(qDesc);
	Assert(qDesc->plannedstmt);

	plan = qDesc->plannedstmt->planTree;

	queueid = portal->queueId;

	/* 
	 * Check we have a valid queue before going any further.
	 */
	if (queueid != InvalidOid)
	{

		/*
		 * Check the source tag to see if the original statement is suitable for
		 * locking. 
		 */
		switch (portal->sourceTag)
		{

			/*
			 * For INSERT/UPDATE/DELETE Skip if we have specified only SELECT,
			 * otherwise drop through to handle like a SELECT.
			 */
			case T_InsertStmt:
			case T_DeleteStmt:
			case T_UpdateStmt:
			{
				if (ResourceSelectOnly)
				{
					takeLock = false;
					returnReleaseOk = false;
					break;
				}
			}


			case T_SelectStmt:
			{
				/*
				 * Setup the resource portal increments, ready to be added.
				 */
				incData.pid = MyProc->pid;
				incData.portalId = portal->portalId;
				incData.increments[RES_COUNT_LIMIT] = 1;
				incData.increments[RES_COST_LIMIT] = ceil(plan->total_cost);

				if (gp_resqueue_memory_policy != RESQUEUE_MEMORY_POLICY_NONE)
				{
					Assert(gp_resqueue_memory_policy == RESQUEUE_MEMORY_POLICY_AUTO ||
						   gp_resqueue_memory_policy == RESQUEUE_MEMORY_POLICY_EAGER_FREE);
					
					uint64 queryMemory = qDesc->plannedstmt->query_mem;
					Assert(queryMemory > 0);
					if (gp_log_resqueue_memory)
					{
						elog(gp_resqueue_memory_log_level, "query requested %.0fKB", (double) queryMemory / 1024.0);
					}					
					
					incData.increments[RES_MEMORY_LIMIT] = (Cost) queryMemory;
				}
				else 
				{
					Assert(gp_resqueue_memory_policy == RESQUEUE_MEMORY_POLICY_NONE);
					incData.increments[RES_MEMORY_LIMIT] = (Cost) 0.0;				
				}
				takeLock = true;
				returnReleaseOk = true;
			}
			break;
	
			/*
			 * We are declaring a cursor - do the same as T_SelectStmt, but
			 * need to additionally consider setting the isHold option.
			 */
			case T_DeclareCursorStmt:
			{
				/*
				 * Setup the resource portal increments, ready to be added.
				 */
				incData.pid = MyProc->pid;
				incData.portalId = portal->portalId;
				incData.increments[RES_COUNT_LIMIT] = 1;
				incData.increments[RES_COST_LIMIT] = ceil(plan->total_cost);
				incData.isHold = portal->cursorOptions & CURSOR_OPT_HOLD;

				if (gp_resqueue_memory_policy != RESQUEUE_MEMORY_POLICY_NONE)
				{
					Assert(gp_resqueue_memory_policy == RESQUEUE_MEMORY_POLICY_AUTO ||
						   gp_resqueue_memory_policy == RESQUEUE_MEMORY_POLICY_EAGER_FREE);
					
					uint64 queryMemory = qDesc->plannedstmt->query_mem;
					Assert(queryMemory > 0);
					if (gp_log_resqueue_memory)
					{
						elog(NOTICE, "query requested %.0fKB", (double) queryMemory / 1024.0);
					}

					incData.increments[RES_MEMORY_LIMIT] = (Cost) queryMemory;
				}
				else 
				{
					Assert(gp_resqueue_memory_policy == RESQUEUE_MEMORY_POLICY_NONE);
					incData.increments[RES_MEMORY_LIMIT] = (Cost) 0.0;				
				}

				takeLock = true;
				returnReleaseOk = true;
			}
			break;
	
			/*
			 * We do not want to lock any of these query types.
			 */
			default:
			{
	
				takeLock = false;
				returnReleaseOk = false;
			}
			break;
	
		}
	
	
		/*
		 * Get the resource lock.
		 */
		if (takeLock)
		{
#ifdef RESLOCK_DEBUG
			elog(DEBUG1, "acquire resource lock for queue %u (portal %u)", 
					queueid, portal->portalId);
#endif
			SET_LOCKTAG_RESOURCE_QUEUE(tag, queueid);

			PG_TRY();
			{
				lockResult = ResLockAcquire(&tag, &incData);
			}
			PG_CATCH();
			{
				/* 
				 * We might have been waiting for a resource queue lock when we get 
				 * here. Calling ResLockRelease without calling ResLockWaitCancel will 
				 * cause the locallock to be cleaned up, but will leave the global
				 * variable lockAwaited still pointing to the locallock hash 
				 * entry.
				 */
				ResLockWaitCancel();
		
				/* Change status to no longer waiting for lock */
				pgstat_report_waiting(PGBE_WAITING_NONE);

				/* If we had acquired the resource queue lock, release it and clean up */	
				ResLockRelease(&tag, portal->portalId);

				/*
				 * Perfmon related stuff: clean up if we got cancelled
				 * while waiting.
				 */
				if (gp_enable_gpperfmon && qDesc->gpmon_pkt)
				{			
					gpmon_qlog_query_error(qDesc->gpmon_pkt);
					pfree(qDesc->gpmon_pkt);
					qDesc->gpmon_pkt = NULL;
				}

				portal->queueId = InvalidOid;
				portal->portalId = INVALID_PORTALID;

				PG_RE_THROW();
			}
			PG_END_TRY();

			/* 
			 * See if query was too small to bother locking at all, i.e had
			 * cost smaller than the ignore cost threshold for the queue.
			 */
			if (lockResult == LOCKACQUIRE_NOT_AVAIL)
			{
#ifdef RESLOCK_DEBUG
				elog(DEBUG1, "cancel resource lock for queue %u (portal %u)", 
						queueid, portal->portalId);
#endif
				/* 
				 * Reset portalId and queueid for this portal so the queue
				 * and increment accounting tests continue to work properly.
				 */
				portal->queueId = InvalidOid;
				portal->portalId = INVALID_PORTALID;
				returnReleaseOk = false;
			}

			/* Count holdable cursors (if we are locking this one) .*/
			if (portal->cursorOptions & CURSOR_OPT_HOLD && returnReleaseOk)
				numHoldPortals++;

		}

	}
	return returnReleaseOk;

}
Ejemplo n.º 2
0
/* This function is a simple version of ResLockPortal, which is used specially
 * for utility statements; the main logic is same as ResLockPortal, but remove
 * some unnecessary lines and make some tiny adjustments for utility stmts */
bool
ResLockUtilityPortal(Portal portal, float4 ignoreCostLimit)
{
	bool returnReleaseOk = false;
	LOCKTAG		tag;
	Oid			queueid;
	int32		lockResult = 0;
	ResPortalIncrement	incData;

	queueid = portal->queueId;

	/*
	 * Check we have a valid queue before going any further.
	 */
	if (queueid != InvalidOid)
	{
		/*
		 * Setup the resource portal increments, ready to be added.
		 */
		incData.pid = MyProc->pid;
		incData.portalId = portal->portalId;
		incData.increments[RES_COUNT_LIMIT] = 1;
		incData.increments[RES_COST_LIMIT] = ignoreCostLimit;
		incData.increments[RES_MEMORY_LIMIT] = (Cost) 0.0;
		returnReleaseOk = true;

		/*
		 * Get the resource lock.
		 */
#ifdef RESLOCK_DEBUG
		elog(DEBUG1, "acquire resource lock for queue %u (portal %u)",
				queueid, portal->portalId);
#endif
		SET_LOCKTAG_RESOURCE_QUEUE(tag, queueid);

		PG_TRY();
		{
			lockResult = ResLockAcquire(&tag, &incData);
		}
		PG_CATCH();
		{
			/*
			 * We might have been waiting for a resource queue lock when we get
			 * here. Calling ResLockRelease without calling ResLockWaitCancel will
			 * cause the locallock to be cleaned up, but will leave the global
			 * variable lockAwaited still pointing to the locallock hash
			 * entry.
			 */
			ResLockWaitCancel();

			/* Change status to no longer waiting for lock */
			pgstat_report_waiting(PGBE_WAITING_NONE);

			/* If we had acquired the resource queue lock, release it and clean up */
			ResLockRelease(&tag, portal->portalId);

			/*
			 * Perfmon related stuff: clean up if we got cancelled
			 * while waiting.
			 */

			portal->queueId = InvalidOid;
			portal->portalId = INVALID_PORTALID;

			PG_RE_THROW();
		}
		PG_END_TRY();
	}
	return returnReleaseOk;
}
Ejemplo n.º 3
0
static void
ResourceOwnerReleaseInternal(ResourceOwner owner,
							 ResourceReleasePhase phase,
							 bool isCommit,
							 bool isTopLevel)
{
	ResourceOwner child;
	ResourceOwner save;
	ResourceReleaseCallbackItem *item, *next;

	/* Recurse to handle descendants */
	for (child = owner->firstchild; child != NULL; child = child->nextchild)
		ResourceOwnerReleaseInternal(child, phase, isCommit, isTopLevel);

	/*
	 * Make CurrentResourceOwner point to me, so that ReleaseBuffer etc don't
	 * get confused.  We needn't PG_TRY here because the outermost level will
	 * fix it on error abort.
	 */
	save = CurrentResourceOwner;
	CurrentResourceOwner = owner;

	if (phase == RESOURCE_RELEASE_BEFORE_LOCKS)
	{
		/*
		 * Release buffer pins.  Note that ReleaseBuffer will remove the
		 * buffer entry from my list, so I just have to iterate till there are
		 * none.
		 *
		 * During a commit, there shouldn't be any remaining pins --- that
		 * would indicate failure to clean up the executor correctly --- so
		 * issue warnings.  In the abort case, just clean up quietly.
		 *
		 * We are careful to do the releasing back-to-front, so as to avoid
		 * O(N^2) behavior in ResourceOwnerForgetBuffer().
		 */
		while (owner->nbuffers > 0)
		{
			if (isCommit)
				PrintBufferLeakWarning(owner->buffers[owner->nbuffers - 1]);
			ReleaseBuffer(owner->buffers[owner->nbuffers - 1]);
		}

		/*
		 * Release relcache references.  Note that RelationClose will remove
		 * the relref entry from my list, so I just have to iterate till there
		 * are none.
		 *
		 * As with buffer pins, warn if any are left at commit time, and
		 * release back-to-front for speed.
		 */
		while (owner->nrelrefs > 0)
		{
			if (isCommit)
				PrintRelCacheLeakWarning(owner->relrefs[owner->nrelrefs - 1]);
			RelationClose(owner->relrefs[owner->nrelrefs - 1]);
		}

		/*
		 * Release dynamic shared memory segments.  Note that dsm_detach()
		 * will remove the segment from my list, so I just have to iterate
		 * until there are none.
		 *
		 * As in the preceding cases, warn if there are leftover at commit
		 * time.
		 */
		while (owner->ndsms > 0)
		{
			if (isCommit)
				PrintDSMLeakWarning(owner->dsms[owner->ndsms - 1]);
			dsm_detach(owner->dsms[owner->ndsms - 1]);
		}
	}
	else if (phase == RESOURCE_RELEASE_LOCKS)
	{
		if (isTopLevel)
		{
			/*
			 * For a top-level xact we are going to release all locks (or at
			 * least all non-session locks), so just do a single lmgr call at
			 * the top of the recursion.
			 */
			if (owner == TopTransactionResourceOwner)
			{
				ProcReleaseLocks(isCommit);
				ReleasePredicateLocks(isCommit);
				
				if (Gp_role == GP_ROLE_DISPATCH && IsResQueueEnabled())
 					ResLockWaitCancel();
			}
		}
		else
		{
			/*
			 * Release locks retail.  Note that if we are committing a
			 * subtransaction, we do NOT release its locks yet, but transfer
			 * them to the parent.
			 */
			LOCALLOCK **locks;
			int			nlocks;

			Assert(owner->parent != NULL);

			/*
			 * Pass the list of locks owned by this resource owner to the lock
			 * manager, unless it has overflowed.
			 */
			if (owner->nlocks > MAX_RESOWNER_LOCKS)
			{
				locks = NULL;
				nlocks = 0;
			}
			else
			{
				locks = owner->locks;
				nlocks = owner->nlocks;
			}

			if (isCommit)
				LockReassignCurrentOwner(locks, nlocks);
			else
				LockReleaseCurrentOwner(locks, nlocks);
		}
	}
	else if (phase == RESOURCE_RELEASE_AFTER_LOCKS)
	{
		/*
		 * Release catcache references.  Note that ReleaseCatCache will remove
		 * the catref entry from my list, so I just have to iterate till there
		 * are none.
		 *
		 * As with buffer pins, warn if any are left at commit time, and
		 * release back-to-front for speed.
		 */
		while (owner->ncatrefs > 0)
		{
			if (isCommit)
				PrintCatCacheLeakWarning(owner->catrefs[owner->ncatrefs - 1],
                                         owner->name);
			ReleaseCatCache(owner->catrefs[owner->ncatrefs - 1]);
		}
		/* Ditto for catcache lists */
		while (owner->ncatlistrefs > 0)
		{
			if (isCommit)
				PrintCatCacheListLeakWarning(owner->catlistrefs[owner->ncatlistrefs - 1],
                                             owner->name);
			ReleaseCatCacheList(owner->catlistrefs[owner->ncatlistrefs - 1]);
		}
		/* Ditto for plancache references */
		while (owner->nplanrefs > 0)
		{
			if (isCommit)
				PrintPlanCacheLeakWarning(owner->planrefs[owner->nplanrefs - 1]);
			ReleaseCachedPlan(owner->planrefs[owner->nplanrefs - 1], true);
		}
		/* Ditto for tupdesc references */
		while (owner->ntupdescs > 0)
		{
			if (isCommit)
				PrintTupleDescLeakWarning(owner->tupdescs[owner->ntupdescs - 1]);
			DecrTupleDescRefCount(owner->tupdescs[owner->ntupdescs - 1]);
		}
		/* Ditto for snapshot references */
		while (owner->nsnapshots > 0)
		{
			if (isCommit)
				PrintSnapshotLeakWarning(owner->snapshots[owner->nsnapshots - 1]);
			UnregisterSnapshot(owner->snapshots[owner->nsnapshots - 1]);
		}

		/* Ditto for temporary files */
		while (owner->nfiles > 0)
		{
			if (isCommit)
				PrintFileLeakWarning(owner->files[owner->nfiles - 1]);
			FileClose(owner->files[owner->nfiles - 1]);
		}

		/* Clean up index scans too */
		ReleaseResources_hash();
	}

	/* Let add-on modules get a chance too */
	for (item = ResourceRelease_callbacks; item; item = next)
	{
		next = item->next;
		(*item->callback) (phase, isCommit, isTopLevel, item->arg);
	}

	CurrentResourceOwner = save;
}