예제 #1
0
파일: shuttle.c 프로젝트: andreiw/polaris
/*
 * Place the thread in question on the run q.
 */
static void
shuttle_unsleep(kthread_t *t)
{
	ASSERT(THREAD_LOCK_HELD(t));

	/* Waiting on a shuttle */
	ASSERT(t->t_wchan0 == (caddr_t)1 && t->t_wchan == NULL);
	t->t_flag &= ~T_WAKEABLE;
	t->t_wchan0 = NULL;
	t->t_sobj_ops = NULL;
	THREAD_TRANSITION(t);
	CL_SETRUN(t);
}
예제 #2
0
/*
 * Take thread off its wait queue and make it runnable.
 * Returns with thread lock held.
 */
void
waitq_setrun(kthread_t *t)
{
	waitq_t *wq = t->t_waitq;

	ASSERT(THREAD_LOCK_HELD(t));

	ASSERT(ISWAITING(t));
	if (wq == NULL)
		panic("waitq_setrun: thread %p is not on waitq", t);
	waitq_dequeue(wq, t);
	CL_SETRUN(t);
}
예제 #3
0
/*
 * Change thread's priority while on the wait queue.
 * Dequeue and equeue it again so that it gets placed in the right place.
 */
void
waitq_change_pri(kthread_t *t, pri_t new_pri)
{
	waitq_t *wq = t->t_waitq;

	ASSERT(THREAD_LOCK_HELD(t));
	ASSERT(ISWAITING(t));
	ASSERT(wq != NULL);

	waitq_unlink(wq, t);
	t->t_pri = new_pri;
	waitq_link(wq, t);
}
예제 #4
0
/*
 * Charge thread's project and return True if project or zone should be
 * penalized because its project or zone is exceeding its cap. Also sets
 * TS_PROJWAITQ or TS_ZONEWAITQ in this case.
 *
 * It is possible that the project cap is being disabled while this routine is
 * executed. This should not cause any issues since the association between the
 * thread and its project is protected by thread lock. It will still set
 * TS_PROJECTWAITQ/TS_ZONEWAITQ in this case but cpucaps_enforce will not place
 * anything on the blocked wait queue.
 *
 */
boolean_t
cpucaps_charge(kthread_id_t t, caps_sc_t *csc, cpucaps_charge_t charge_type)
{
	kproject_t	*kpj = ttoproj(t);
	klwp_t		*lwp = t->t_lwp;
	zone_t		*zone;
	cpucap_t	*project_cap;
	boolean_t	rc = B_FALSE;

	ASSERT(THREAD_LOCK_HELD(t));

	/* Nothing to do for projects that are not capped. */
	if (lwp == NULL || !PROJECT_IS_CAPPED(kpj))
		return (B_FALSE);

	caps_charge_adjust(t, csc);

	/*
	 * The caller only requested to charge the project usage, no enforcement
	 * part.
	 */
	if (charge_type == CPUCAPS_CHARGE_ONLY)
		return (B_FALSE);

	project_cap = kpj->kpj_cpucap;

	if (project_cap->cap_usage >= project_cap->cap_chk_value) {
		t->t_schedflag |= TS_PROJWAITQ;
		rc = B_TRUE;
	} else if (t->t_schedflag & TS_PROJWAITQ) {
		t->t_schedflag &= ~TS_PROJWAITQ;
	}

	zone = ttozone(t);
	if (!ZONE_IS_CAPPED(zone)) {
		if (t->t_schedflag & TS_ZONEWAITQ)
			t->t_schedflag &= ~TS_ZONEWAITQ;
	} else {
		cpucap_t *zone_cap = zone->zone_cpucap;

		if (zone_cap->cap_usage >= zone_cap->cap_chk_value) {
			t->t_schedflag |= TS_ZONEWAITQ;
			rc = B_TRUE;
		} else if (t->t_schedflag & TS_ZONEWAITQ) {
			t->t_schedflag &= ~TS_ZONEWAITQ;
		}
	}


	return (rc);
}
예제 #5
0
파일: condvar.c 프로젝트: BjoKaSH/mac-zfs
/*
 * Change the priority of a thread that's blocked on a condition variable.
 */
static void
cv_change_pri(kthread_t *t, pri_t pri, pri_t *t_prip)
{
	condvar_impl_t *cvp = (condvar_impl_t *)t->t_wchan;
	sleepq_t *sqp = t->t_sleepq;

	ASSERT(THREAD_LOCK_HELD(t));
	ASSERT(&SQHASH(cvp)->sq_queue == sqp);

	if (cvp == NULL)
		panic("cv_change_pri: %p not on sleep queue", t);
	sleepq_dequeue(t);
	*t_prip = pri;
	sleepq_insert(sqp, t);
}
예제 #6
0
/*
 * Charge project of thread t the time thread t spent on CPU since previously
 * adjusted.
 *
 * Record the current on-CPU time in the csc structure.
 *
 * Do not adjust for more than one tick worth of time.
 *
 * It is possible that the project cap is being disabled while this routine is
 * executed. This should not cause any issues since the association between the
 * thread and its project is protected by thread lock.
 */
static void
caps_charge_adjust(kthread_id_t t, caps_sc_t *csc)
{
	kproject_t	*kpj = ttoproj(t);
	hrtime_t	new_usage;
	hrtime_t	usage_delta;

	ASSERT(THREAD_LOCK_HELD(t));
	ASSERT(kpj->kpj_cpucap != NULL);

	/* Get on-CPU time since birth of a thread */
	new_usage = mstate_thread_onproc_time(t);

	/* Time spent on CPU since last checked */
	usage_delta = new_usage - csc->csc_cputime;

	/* Save the accumulated on-CPU time */
	csc->csc_cputime = new_usage;

	/* Charge at most one tick worth of on-CPU time */
	if (usage_delta > cap_tick_cost)
		usage_delta = cap_tick_cost;

	/* Add usage_delta to the project usage value. */
	if (usage_delta > 0) {
		cpucap_t *cap = kpj->kpj_cpucap;

		DTRACE_PROBE2(cpucaps__project__charge,
		    kthread_id_t, t, hrtime_t, usage_delta);

		disp_lock_enter_high(&cap->cap_usagelock);
		cap->cap_usage += usage_delta;

		/* Check for overflows */
		if (cap->cap_usage < 0)
			cap->cap_usage = MAX_USAGE - 1;

		disp_lock_exit_high(&cap->cap_usagelock);

		/*
		 * cap_maxusage is only kept for observability. Move it outside
		 * the lock to reduce the time spent while holding the lock.
		 */
		if (cap->cap_usage > cap->cap_maxusage)
			cap->cap_maxusage = cap->cap_usage;
	}
}
예제 #7
0
static void
waitq_dequeue(waitq_t *wq, kthread_t *t)
{
	ASSERT(THREAD_LOCK_HELD(t));
	ASSERT(t->t_waitq == wq);
	ASSERT(ISWAITING(t));

	waitq_unlink(wq, t);
	DTRACE_SCHED1(cpucaps__wakeup, kthread_t *, t);

	/*
	 * Change thread to transition state and drop the wait queue lock. The
	 * thread will remain locked since its t_lockp points to the
	 * transition_lock.
	 */
	THREAD_TRANSITION(t);
}
예제 #8
0
파일: condvar.c 프로젝트: BjoKaSH/mac-zfs
/*
 * Unsleep a thread that's blocked on a condition variable.
 */
static void
cv_unsleep(kthread_t *t)
{
	condvar_impl_t *cvp = (condvar_impl_t *)t->t_wchan;
	sleepq_head_t *sqh = SQHASH(cvp);

	ASSERT(THREAD_LOCK_HELD(t));

	if (cvp == NULL)
		panic("cv_unsleep: thread %p not on sleepq %p", t, sqh);
	DTRACE_SCHED1(wakeup, kthread_t *, t);
	sleepq_unsleep(t);
	if (cvp->cv_waiters != CV_MAX_WAITERS)
		cvp->cv_waiters--;
	disp_lock_exit_high(&sqh->sq_lock);
	CL_SETRUN(t);
}
예제 #9
0
파일: msacct.c 프로젝트: bahamas10/openzfs
/*
 * Return the amount of onproc and runnable time this thread has experienced.
 *
 * Because the fields we read are not protected by locks when updated
 * by the thread itself, this is an inherently racey interface.  In
 * particular, the ASSERT(THREAD_LOCK_HELD(t)) doesn't guarantee as much
 * as it might appear to.
 *
 * The implication for users of this interface is that onproc and runnable
 * are *NOT* monotonically increasing; they may temporarily be larger than
 * they should be.
 */
void
mstate_systhread_times(kthread_t *t, hrtime_t *onproc, hrtime_t *runnable)
{
	struct mstate	*const	ms = &ttolwp(t)->lwp_mstate;

	int		mstate;
	hrtime_t	now;
	hrtime_t	state_start;
	hrtime_t	waitrq;
	hrtime_t	aggr_onp;
	hrtime_t	aggr_run;

	ASSERT(THREAD_LOCK_HELD(t));
	ASSERT(t->t_procp->p_flag & SSYS);
	ASSERT(ttolwp(t) != NULL);

	/* shouldn't be any non-SYSTEM on-CPU time */
	ASSERT(ms->ms_acct[LMS_USER] == 0);
	ASSERT(ms->ms_acct[LMS_TRAP] == 0);

	mstate = t->t_mstate;
	waitrq = t->t_waitrq;
	state_start = ms->ms_state_start;

	aggr_onp = ms->ms_acct[LMS_SYSTEM];
	aggr_run = ms->ms_acct[LMS_WAIT_CPU];

	now = gethrtime_unscaled();

	/* if waitrq == 0, then there is no time to account to TS_RUN */
	if (waitrq == 0)
		waitrq = now;

	/* If there is system time to accumulate, do so */
	if (mstate == LMS_SYSTEM && state_start < waitrq)
		aggr_onp += waitrq - state_start;

	if (waitrq < now)
		aggr_run += now - waitrq;

	scalehrtime(&aggr_onp);
	scalehrtime(&aggr_run);

	*onproc = aggr_onp;
	*runnable = aggr_run;
}
예제 #10
0
/*
 * Take the first thread off the wait queue and make it runnable.
 * Return the pointer to the thread or NULL if waitq is empty
 */
static kthread_t *
waitq_runfirst(waitq_t *wq)
{
	kthread_t *t;

	t = waitq_takeone(wq);
	if (t != NULL) {
		/*
		 * t should have transition lock held.
		 * CL_SETRUN() will replace it with dispq lock and keep it held.
		 * thread_unlock() will drop dispq lock and restore PIL.
		 */
		ASSERT(THREAD_LOCK_HELD(t));
		CL_SETRUN(t);
		thread_unlock(t);
	}
	return (t);
}
예제 #11
0
파일: msacct.c 프로젝트: bahamas10/openzfs
/*
 * Return an aggregation of user and system CPU time consumed by
 * the specified thread in scaled nanoseconds.
 */
hrtime_t
mstate_thread_onproc_time(kthread_t *t)
{
	hrtime_t aggr_time;
	hrtime_t now;
	hrtime_t waitrq;
	hrtime_t state_start;
	struct mstate *ms;
	klwp_t *lwp;
	int	mstate;

	ASSERT(THREAD_LOCK_HELD(t));

	if ((lwp = ttolwp(t)) == NULL)
		return (0);

	mstate = t->t_mstate;
	waitrq = t->t_waitrq;
	ms = &lwp->lwp_mstate;
	state_start = ms->ms_state_start;

	aggr_time = ms->ms_acct[LMS_USER] +
	    ms->ms_acct[LMS_SYSTEM] + ms->ms_acct[LMS_TRAP];

	now = gethrtime_unscaled();

	/*
	 * NOTE: gethrtime_unscaled on X86 taken on different CPUs is
	 * inconsistent, so it is possible that now < state_start.
	 */
	if (mstate == LMS_USER || mstate == LMS_SYSTEM || mstate == LMS_TRAP) {
		/* if waitrq is zero, count all of the time. */
		if (waitrq == 0) {
			waitrq = now;
		}

		if (waitrq > state_start) {
			aggr_time += waitrq - state_start;
		}
	}

	scalehrtime(&aggr_time);
	return (aggr_time);
}
예제 #12
0
static void
waitq_unlink(waitq_t *wq, kthread_t *t)
{
	kthread_t *nt;
	kthread_t **ptl;

	ASSERT(THREAD_LOCK_HELD(t));
	ASSERT(DISP_LOCK_HELD(&wq->wq_lock));
	ASSERT(t->t_waitq == wq);

	ptl = &t->t_priback->t_link;
	/*
	 * Is it the head of a priority sublist?  If so, need to walk
	 * the priorities to find the t_link pointer that points to it.
	 */
	if (*ptl != t) {
		/*
		 * Find the right priority level.
		 */
		ptl = &t->t_waitq->wq_first;
		while ((nt = *ptl) != t)
			ptl = &nt->t_priback->t_link;
	}
	/*
	 * Remove thread from the t_link list.
	 */
	*ptl = t->t_link;

	/*
	 * Take it off the priority sublist if there's more than one
	 * thread there.
	 */
	if (t->t_priforw != t) {
		t->t_priback->t_priforw = t->t_priforw;
		t->t_priforw->t_priback = t->t_priback;
	}
	t->t_link = NULL;

	wq->wq_count--;
	t->t_waitq = NULL;
	t->t_priforw = NULL;
	t->t_priback = NULL;
}
예제 #13
0
파일: semaphore.c 프로젝트: andreiw/polaris
/*
 * Remove a thread from the sleep queue for this
 * semaphore.
 */
static void
sema_dequeue(ksema_t *sp, kthread_t *t)
{
	kthread_t	**tpp;
	kthread_t	*tp;
	sema_impl_t	*s;

	ASSERT(THREAD_LOCK_HELD(t));
	s = (sema_impl_t *)sp;
	tpp = &s->s_slpq;
	while ((tp = *tpp) != NULL) {
		if (tp == t) {
			*tpp = t->t_link;
			t->t_link = NULL;
			return;
		}
		tpp = &tp->t_link;
	}
}
예제 #14
0
파일: condvar.c 프로젝트: BjoKaSH/mac-zfs
/*
 * The cv_block() function blocks a thread on a condition variable
 * by putting it in a hashed sleep queue associated with the
 * synchronization object.
 *
 * Threads are taken off the hashed sleep queues via calls to
 * cv_signal(), cv_broadcast(), or cv_unsleep().
 */
static void
cv_block(condvar_impl_t *cvp)
{
	kthread_t *t = curthread;
	klwp_t *lwp = ttolwp(t);
	sleepq_head_t *sqh;

	ASSERT(THREAD_LOCK_HELD(t));
	ASSERT(t != CPU->cpu_idle_thread);
	ASSERT(CPU_ON_INTR(CPU) == 0);
	ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL);
	ASSERT(t->t_state == TS_ONPROC);

	t->t_schedflag &= ~TS_SIGNALLED;
	CL_SLEEP(t);			/* assign kernel priority */
	t->t_wchan = (caddr_t)cvp;
	t->t_sobj_ops = &cv_sobj_ops;
	DTRACE_SCHED(sleep);

	/*
	 * The check for t_intr is to avoid doing the
	 * account for an interrupt thread on the still-pinned
	 * lwp's statistics.
	 */
	if (lwp != NULL && t->t_intr == NULL) {
		lwp->lwp_ru.nvcsw++;
		(void) new_mstate(t, LMS_SLEEP);
	}

	sqh = SQHASH(cvp);
	disp_lock_enter_high(&sqh->sq_lock);
	if (cvp->cv_waiters < CV_MAX_WAITERS)
		cvp->cv_waiters++;
	ASSERT(cvp->cv_waiters <= CV_MAX_WAITERS);
	THREAD_SLEEP(t, &sqh->sq_lock);
	sleepq_insert(&sqh->sq_queue, t);
	/*
	 * THREAD_SLEEP() moves curthread->t_lockp to point to the
	 * lock sqh->sq_lock. This lock is later released by the caller
	 * when it calls thread_unlock() on curthread.
	 */
}
예제 #15
0
파일: semaphore.c 프로젝트: andreiw/polaris
/*
 * Put a thread on the sleep queue for this semaphore.
 */
static void
sema_queue(ksema_t *sp, kthread_t *t)
{
	kthread_t	**tpp;
	kthread_t	*tp;
	pri_t		cpri;
	sema_impl_t	*s;

	ASSERT(THREAD_LOCK_HELD(t));
	s = (sema_impl_t *)sp;
	tpp = &s->s_slpq;
	cpri = DISP_PRIO(t);
	while ((tp = *tpp) != NULL) {
		if (cpri > DISP_PRIO(tp))
			break;
		tpp = &tp->t_link;
	}
	*tpp = t;
	t->t_link = tp;
}
예제 #16
0
/*
 * Check if thread can be moved to a new cpu partition.  Called by
 * cpupart_move_thread() and pset_bind_start().
 */
int
cpupart_movable_thread(kthread_id_t tp, cpupart_t *cp, int ignore)
{
	ASSERT(MUTEX_HELD(&cpu_lock));
	ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock));
	ASSERT(cp != NULL);
	ASSERT(THREAD_LOCK_HELD(tp));

	/*
	 * CPU-bound threads can't be moved.
	 */
	if (!ignore) {
		cpu_t *boundcpu = tp->t_bound_cpu ? tp->t_bound_cpu :
		    tp->t_weakbound_cpu;
		if (boundcpu != NULL && boundcpu->cpu_part != cp)
			return (EBUSY);
	}

	if (tp->t_cid == sysdccid) {
		return (EINVAL);	/* For now, sysdc threads can't move */
	}

	return (0);
}
예제 #17
0
파일: shuttle.c 프로젝트: andreiw/polaris
/*ARGSUSED*/
static void
shuttle_change_pri(kthread_t *t, pri_t p, pri_t *t_prip)
{
	ASSERT(THREAD_LOCK_HELD(t));
	*t_prip = p;
}
예제 #18
0
/*
 * Sets the value of the yield field for the specified thread.
 * Called by ts_preempt() and ts_tick() to set the field, and
 * ts_yield() to clear it.
 * The kernel never looks at this field so we don't need a
 * schedctl_get_yield() function.
 */
void
schedctl_set_yield(kthread_t *t, short val)
{
	ASSERT(THREAD_LOCK_HELD(t));
	t->t_schedctl->sc_preemptctl.sc_yield = val;
}
예제 #19
0
파일: semaphore.c 프로젝트: andreiw/polaris
/*
 * similiar to sema_p except that it blocks at an interruptible
 * priority. if a signal is present then return 1 otherwise 0.
 */
int
sema_p_sig(ksema_t *sp)
{
	kthread_t	*t = curthread;
	klwp_t		*lwp = ttolwp(t);
	sema_impl_t	*s;
	disp_lock_t	*sqlp;

	if (lwp == NULL) {
		sema_p(sp);
		return (0);
	}

	s = (sema_impl_t *)sp;
	sqlp = &SQHASH(s)->sq_lock;
	disp_lock_enter(sqlp);
	ASSERT(s->s_count >= 0);
	while (s->s_count == 0) {
		proc_t *p = ttoproc(t);
		thread_lock_high(t);
		t->t_flag |= T_WAKEABLE;
		SEMA_BLOCK(s, sqlp);
		lwp->lwp_asleep = 1;
		lwp->lwp_sysabort = 0;
		thread_unlock_nopreempt(t);
		if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t))
			setrun(t);
		swtch();
		t->t_flag &= ~T_WAKEABLE;
		if (ISSIG(t, FORREAL) ||
		    lwp->lwp_sysabort || MUSTRETURN(p, t)) {
			kthread_t *sq, *tp;
			lwp->lwp_asleep = 0;
			lwp->lwp_sysabort = 0;
			disp_lock_enter(sqlp);
			sq = s->s_slpq;
			/*
			 * in case sema_v and interrupt happen
			 * at the same time, we need to pass the
			 * sema_v to the next thread.
			 */
			if ((sq != NULL) && (s->s_count > 0)) {
				tp = sq;
				ASSERT(THREAD_LOCK_HELD(tp));
				sq = sq->t_link;
				tp->t_link = NULL;
				DTRACE_SCHED1(wakeup, kthread_t *, tp);
				tp->t_sobj_ops = NULL;
				tp->t_wchan = NULL;
				ASSERT(tp->t_state == TS_SLEEP);
				CL_WAKEUP(tp);
				s->s_slpq = sq;
				disp_lock_exit_high(sqlp);
				thread_unlock(tp);
			} else {
				disp_lock_exit(sqlp);
			}
			return (1);
		}
		lwp->lwp_asleep = 0;
		disp_lock_enter(sqlp);
	}
	s->s_count--;
	disp_lock_exit(sqlp);
	return (0);
}
예제 #20
0
/*
 * Returns non-zero if the specified thread shouldn't be preempted at this time.
 * Called by ts_preempt(), ts_tick(), and ts_update().
 */
int
schedctl_get_nopreempt(kthread_t *t)
{
	ASSERT(THREAD_LOCK_HELD(t));
	return (t->t_schedctl->sc_preemptctl.sc_nopreempt);
}