Beispiel #1
0
/*
 * ResLockPortal -- get a resource lock for Portal execution.
 *
 * Returns:
 *	true if the lock has been taken
 *	false if the lock has been skipped.
 */
bool
ResLockPortal(Portal portal, QueryDesc *qDesc)
{
	bool		returnReleaseOk = false;	/* Release resource lock? */
	bool		takeLock;					/* Take resource lock? */
	LOCKTAG		tag;
	Oid			queueid;
	int32		lockResult = 0;
	ResPortalIncrement	incData;
	Plan *plan = NULL;

	Assert(qDesc);
	Assert(qDesc->plannedstmt);

	plan = qDesc->plannedstmt->planTree;

	queueid = portal->queueId;

	/* 
	 * Check we have a valid queue before going any further.
	 */
	if (queueid != InvalidOid)
	{

		/*
		 * Check the source tag to see if the original statement is suitable for
		 * locking. 
		 */
		switch (portal->sourceTag)
		{

			/*
			 * For INSERT/UPDATE/DELETE Skip if we have specified only SELECT,
			 * otherwise drop through to handle like a SELECT.
			 */
			case T_InsertStmt:
			case T_DeleteStmt:
			case T_UpdateStmt:
			{
				if (ResourceSelectOnly)
				{
					takeLock = false;
					returnReleaseOk = false;
					break;
				}
			}


			case T_SelectStmt:
			{
				/*
				 * Setup the resource portal increments, ready to be added.
				 */
				incData.pid = MyProc->pid;
				incData.portalId = portal->portalId;
				incData.increments[RES_COUNT_LIMIT] = 1;
				incData.increments[RES_COST_LIMIT] = ceil(plan->total_cost);

				if (gp_resqueue_memory_policy != RESQUEUE_MEMORY_POLICY_NONE)
				{
					Assert(gp_resqueue_memory_policy == RESQUEUE_MEMORY_POLICY_AUTO ||
						   gp_resqueue_memory_policy == RESQUEUE_MEMORY_POLICY_EAGER_FREE);
					
					uint64 queryMemory = qDesc->plannedstmt->query_mem;
					Assert(queryMemory > 0);
					if (gp_log_resqueue_memory)
					{
						elog(gp_resqueue_memory_log_level, "query requested %.0fKB", (double) queryMemory / 1024.0);
					}					
					
					incData.increments[RES_MEMORY_LIMIT] = (Cost) queryMemory;
				}
				else 
				{
					Assert(gp_resqueue_memory_policy == RESQUEUE_MEMORY_POLICY_NONE);
					incData.increments[RES_MEMORY_LIMIT] = (Cost) 0.0;				
				}
				takeLock = true;
				returnReleaseOk = true;
			}
			break;
	
			/*
			 * We are declaring a cursor - do the same as T_SelectStmt, but
			 * need to additionally consider setting the isHold option.
			 */
			case T_DeclareCursorStmt:
			{
				/*
				 * Setup the resource portal increments, ready to be added.
				 */
				incData.pid = MyProc->pid;
				incData.portalId = portal->portalId;
				incData.increments[RES_COUNT_LIMIT] = 1;
				incData.increments[RES_COST_LIMIT] = ceil(plan->total_cost);
				incData.isHold = portal->cursorOptions & CURSOR_OPT_HOLD;

				if (gp_resqueue_memory_policy != RESQUEUE_MEMORY_POLICY_NONE)
				{
					Assert(gp_resqueue_memory_policy == RESQUEUE_MEMORY_POLICY_AUTO ||
						   gp_resqueue_memory_policy == RESQUEUE_MEMORY_POLICY_EAGER_FREE);
					
					uint64 queryMemory = qDesc->plannedstmt->query_mem;
					Assert(queryMemory > 0);
					if (gp_log_resqueue_memory)
					{
						elog(NOTICE, "query requested %.0fKB", (double) queryMemory / 1024.0);
					}

					incData.increments[RES_MEMORY_LIMIT] = (Cost) queryMemory;
				}
				else 
				{
					Assert(gp_resqueue_memory_policy == RESQUEUE_MEMORY_POLICY_NONE);
					incData.increments[RES_MEMORY_LIMIT] = (Cost) 0.0;				
				}

				takeLock = true;
				returnReleaseOk = true;
			}
			break;
	
			/*
			 * We do not want to lock any of these query types.
			 */
			default:
			{
	
				takeLock = false;
				returnReleaseOk = false;
			}
			break;
	
		}
	
	
		/*
		 * Get the resource lock.
		 */
		if (takeLock)
		{
#ifdef RESLOCK_DEBUG
			elog(DEBUG1, "acquire resource lock for queue %u (portal %u)", 
					queueid, portal->portalId);
#endif
			SET_LOCKTAG_RESOURCE_QUEUE(tag, queueid);

			PG_TRY();
			{
				lockResult = ResLockAcquire(&tag, &incData);
			}
			PG_CATCH();
			{
				/* 
				 * We might have been waiting for a resource queue lock when we get 
				 * here. Calling ResLockRelease without calling ResLockWaitCancel will 
				 * cause the locallock to be cleaned up, but will leave the global
				 * variable lockAwaited still pointing to the locallock hash 
				 * entry.
				 */
				ResLockWaitCancel();
		
				/* Change status to no longer waiting for lock */
				pgstat_report_waiting(PGBE_WAITING_NONE);

				/* If we had acquired the resource queue lock, release it and clean up */	
				ResLockRelease(&tag, portal->portalId);

				/*
				 * Perfmon related stuff: clean up if we got cancelled
				 * while waiting.
				 */
				if (gp_enable_gpperfmon && qDesc->gpmon_pkt)
				{			
					gpmon_qlog_query_error(qDesc->gpmon_pkt);
					pfree(qDesc->gpmon_pkt);
					qDesc->gpmon_pkt = NULL;
				}

				portal->queueId = InvalidOid;
				portal->portalId = INVALID_PORTALID;

				PG_RE_THROW();
			}
			PG_END_TRY();

			/* 
			 * See if query was too small to bother locking at all, i.e had
			 * cost smaller than the ignore cost threshold for the queue.
			 */
			if (lockResult == LOCKACQUIRE_NOT_AVAIL)
			{
#ifdef RESLOCK_DEBUG
				elog(DEBUG1, "cancel resource lock for queue %u (portal %u)", 
						queueid, portal->portalId);
#endif
				/* 
				 * Reset portalId and queueid for this portal so the queue
				 * and increment accounting tests continue to work properly.
				 */
				portal->queueId = InvalidOid;
				portal->portalId = INVALID_PORTALID;
				returnReleaseOk = false;
			}

			/* Count holdable cursors (if we are locking this one) .*/
			if (portal->cursorOptions & CURSOR_OPT_HOLD && returnReleaseOk)
				numHoldPortals++;

		}

	}
	return returnReleaseOk;

}
Beispiel #2
0
/* This function is a simple version of ResLockPortal, which is used specially
 * for utility statements; the main logic is same as ResLockPortal, but remove
 * some unnecessary lines and make some tiny adjustments for utility stmts */
bool
ResLockUtilityPortal(Portal portal, float4 ignoreCostLimit)
{
	bool returnReleaseOk = false;
	LOCKTAG		tag;
	Oid			queueid;
	int32		lockResult = 0;
	ResPortalIncrement	incData;

	queueid = portal->queueId;

	/*
	 * Check we have a valid queue before going any further.
	 */
	if (queueid != InvalidOid)
	{
		/*
		 * Setup the resource portal increments, ready to be added.
		 */
		incData.pid = MyProc->pid;
		incData.portalId = portal->portalId;
		incData.increments[RES_COUNT_LIMIT] = 1;
		incData.increments[RES_COST_LIMIT] = ignoreCostLimit;
		incData.increments[RES_MEMORY_LIMIT] = (Cost) 0.0;
		returnReleaseOk = true;

		/*
		 * Get the resource lock.
		 */
#ifdef RESLOCK_DEBUG
		elog(DEBUG1, "acquire resource lock for queue %u (portal %u)",
				queueid, portal->portalId);
#endif
		SET_LOCKTAG_RESOURCE_QUEUE(tag, queueid);

		PG_TRY();
		{
			lockResult = ResLockAcquire(&tag, &incData);
		}
		PG_CATCH();
		{
			/*
			 * We might have been waiting for a resource queue lock when we get
			 * here. Calling ResLockRelease without calling ResLockWaitCancel will
			 * cause the locallock to be cleaned up, but will leave the global
			 * variable lockAwaited still pointing to the locallock hash
			 * entry.
			 */
			ResLockWaitCancel();

			/* Change status to no longer waiting for lock */
			pgstat_report_waiting(PGBE_WAITING_NONE);

			/* If we had acquired the resource queue lock, release it and clean up */
			ResLockRelease(&tag, portal->portalId);

			/*
			 * Perfmon related stuff: clean up if we got cancelled
			 * while waiting.
			 */

			portal->queueId = InvalidOid;
			portal->portalId = INVALID_PORTALID;

			PG_RE_THROW();
		}
		PG_END_TRY();
	}
	return returnReleaseOk;
}
Beispiel #3
0
/*
 * This is the main executioner for any query backend that conflicts with
 * recovery processing. Judgement has already been passed on it within
 * a specific rmgr. Here we just issue the orders to the procs. The procs
 * then throw the required error as instructed.
 */
static void
ResolveRecoveryConflictWithVirtualXIDs(VirtualTransactionId *waitlist,
									   ProcSignalReason reason)
{
	while (VirtualTransactionIdIsValid(*waitlist))
	{
		TimestampTz waitStart;
		char	   *new_status;

		pgstat_report_waiting(true);

		waitStart = GetCurrentTimestamp();
		new_status = NULL;		/* we haven't changed the ps display */

		/* reset standbyWait_us for each xact we wait for */
		standbyWait_us = STANDBY_INITIAL_WAIT_US;

		/* wait until the virtual xid is gone */
		while (!ConditionalVirtualXactLockTableWait(*waitlist))
		{
			/*
			 * Report via ps if we have been waiting for more than 500 msec
			 * (should that be configurable?)
			 */
			if (update_process_title && new_status == NULL &&
				TimestampDifferenceExceeds(waitStart, GetCurrentTimestamp(),
										   500))
			{
				const char *old_status;
				int			len;

				old_status = get_ps_display(&len);
				new_status = (char *) palloc(len + 50);
				memcpy(new_status, old_status, len);
				snprintf(new_status + len, 50,
						 " waiting for max_standby_delay (%d ms)",
						 MaxStandbyDelay);
				set_ps_display(new_status, false);
				new_status[len] = '\0'; /* truncate off " waiting" */
			}

			/* Is it time to kill it? */
			if (WaitExceedsMaxStandbyDelay())
			{
				pid_t		pid;

				/*
				 * Now find out who to throw out of the balloon.
				 */
				Assert(VirtualTransactionIdIsValid(*waitlist));
				pid = CancelVirtualTransaction(*waitlist, reason);

				/*
				 * Wait awhile for it to die so that we avoid flooding an
				 * unresponsive backend when system is heavily loaded.
				 */
				if (pid != 0)
					pg_usleep(5000L);
			}
		}

		/* Reset ps display if we changed it */
		if (new_status)
		{
			set_ps_display(new_status, false);
			pfree(new_status);
		}
		pgstat_report_waiting(false);

		/* The virtual transaction is gone now, wait for the next one */
		waitlist++;
	}
}