Exemplo n.º 1
0
/*
 * Prevent any new threads from entering wait queue and make all threads
 * currently on the wait queue runnable. After waitq_block() completion, no
 * threads should ever appear on the wait queue untill it is unblocked.
 */
void
waitq_block(waitq_t *wq)
{
	ASSERT(!wq->wq_blocked);
	disp_lock_enter(&wq->wq_lock);
	wq->wq_blocked = B_TRUE;
	disp_lock_exit(&wq->wq_lock);
	waitq_runall(wq);
	ASSERT(waitq_isempty(wq));
}
Exemplo n.º 2
0
/*
 * Allow threads to be placed on the wait queue.
 */
void
waitq_unblock(waitq_t *wq)
{
	disp_lock_enter(&wq->wq_lock);

	ASSERT(waitq_isempty(wq));
	ASSERT(wq->wq_blocked);

	wq->wq_blocked = B_FALSE;

	disp_lock_exit(&wq->wq_lock);
}
Exemplo n.º 3
0
/* ARGSUSED */
static void
cap_poke_waitq(cpucap_t *cap, int64_t gen)
{
	ASSERT(MUTEX_HELD(&caps_lock));

	if (cap->cap_base != 0) {
		/*
		 * Because of the way usage is calculated and decayed, its
		 * possible for the zone to be slightly over its cap, but we
		 * don't want to count that after we have reduced the effective
		 * cap to the baseline.  That way the zone will be able to
		 * burst again after the burst_limit has expired.
		 */
		if (cap->cap_usage > cap->cap_base &&
		    cap->cap_chk_value == cap->cap_value) {
			cap->cap_above_base++;

			/*
			 * If bursting is limited and we've been bursting
			 * longer than we're supposed to, then set the
			 * effective cap to the baseline.
			 */
			if (cap->cap_burst_limit != 0) {
				cap->cap_bursting++;
				if (cap->cap_bursting >= cap->cap_burst_limit)
					cap->cap_chk_value = cap->cap_base;
			}
		} else if (cap->cap_bursting > 0) {
			/*
			 * We're not bursting now, but we were, decay the
			 * bursting timer.
			 */
			cap->cap_bursting--;
			/*
			 * Reset the effective cap once we decay to 0 so we
			 * can burst again.
			 */
			if (cap->cap_bursting == 0 &&
			    cap->cap_chk_value != cap->cap_value)
				cap->cap_chk_value = cap->cap_value;
		}
	}

	if (cap->cap_usage >= cap->cap_chk_value) {
		cap->cap_above++;
	} else {
		waitq_t *wq = &cap->cap_waitq;

		cap->cap_below++;

		if (!waitq_isempty(wq)) {
			int i, ndequeue, p;

			/*
			 * Since this function is only called once per tick,
			 * we can hit a situation where we have artificially
			 * limited the project/zone below its cap.  This would
			 * happen if we have multiple threads queued up but
			 * only dequeued one thread/tick. To avoid this we
			 * dequeue multiple threads, calculated based on the
			 * usage percentage of the cap. It is possible that we
			 * could dequeue too many threads and some of them
			 * might be put back on the wait queue quickly, but
			 * since we know that threads are on the wait queue
			 * because we're capping, we know that there is unused
			 * CPU cycles anyway, so this extra work would not
			 * hurt. Also, the ndequeue number is only an upper
			 * bound and we might dequeue less, depending on how
			 * many threads are actually in the wait queue. The
			 * ndequeue values are empirically derived and could be
			 * adjusted or calculated in another way if necessary.
			 */
			p = (int)((100 * cap->cap_usage) / cap->cap_chk_value);
			if (p >= 98)
				ndequeue = 10;
			else if (p >= 95)
				ndequeue = 20;
			else if (p >= 90)
				ndequeue = 40;
			else if (p >= 85)
				ndequeue = 80;
			else
				ndequeue = 160;

			for (i = 0; i < ndequeue; i++) {
				waitq_runone(wq);
				if (waitq_isempty(wq))
					break;
			}
			DTRACE_PROBE2(cpucaps__pokeq, int, p, int, i);
		}
	}
}