Пример #1
0
/*
 * Put specified thread to specified wait queue without dropping thread's lock.
 * Returns 1 if thread was successfully placed on project's wait queue, or
 * 0 if wait queue is blocked.
 */
int
waitq_enqueue(waitq_t *wq, kthread_t *t)
{
	ASSERT(THREAD_LOCK_HELD(t));
	ASSERT(t->t_sleepq == NULL);
	ASSERT(t->t_waitq == NULL);
	ASSERT(t->t_link == NULL);

	disp_lock_enter_high(&wq->wq_lock);

	/*
	 * Can't enqueue anything on a blocked wait queue
	 */
	if (wq->wq_blocked) {
		disp_lock_exit_high(&wq->wq_lock);
		return (0);
	}

	/*
	 * Mark the time when thread is placed on wait queue. The microstate
	 * accounting code uses this timestamp to determine wait times.
	 */
	t->t_waitrq = gethrtime_unscaled();

	/*
	 * Mark thread as not swappable.  If necessary, it will get
	 * swapped out when it returns to the userland.
	 */
	t->t_schedflag |= TS_DONT_SWAP;
	DTRACE_SCHED1(cpucaps__sleep, kthread_t *, t);
	waitq_link(wq, t);

	THREAD_WAIT(t, &wq->wq_lock);
	return (1);
}
Пример #2
0
/*
 * Make 'inheritor' inherit priority from this turnstile.
 */
static void
turnstile_pi_inherit(turnstile_t *ts, kthread_t *inheritor, pri_t epri)
{
	ASSERT(THREAD_LOCK_HELD(inheritor));
	ASSERT(DISP_LOCK_HELD(&TURNSTILE_CHAIN(ts->ts_sobj).tc_lock));

	if (epri <= inheritor->t_pri)
		return;

	if (ts->ts_inheritor == NULL) {
		ts->ts_inheritor = inheritor;
		ts->ts_epri = epri;
		disp_lock_enter_high(&inheritor->t_pi_lock);
		ts->ts_prioinv = inheritor->t_prioinv;
		inheritor->t_prioinv = ts;
		disp_lock_exit_high(&inheritor->t_pi_lock);
	} else {
		/*
		 * 'inheritor' is already inheriting from this turnstile,
		 * so just adjust its priority.
		 */
		ASSERT(ts->ts_inheritor == inheritor);
		if (ts->ts_epri < epri)
			ts->ts_epri = epri;
	}

	if (epri > DISP_PRIO(inheritor))
		thread_change_epri(inheritor, epri);
}
Пример #3
0
/*
 * Mark the current thread as sleeping on a shuttle object, and
 * switch to a new thread.
 * No locks other than 'l' should be held at this point.
 */
void
shuttle_swtch(kmutex_t *l)
{
	klwp_t	*lwp = ttolwp(curthread);

	thread_lock(curthread);
	disp_lock_enter_high(&shuttle_lock);
	lwp->lwp_asleep = 1;			/* /proc */
	lwp->lwp_sysabort = 0;			/* /proc */
	lwp->lwp_ru.nvcsw++;
	curthread->t_flag |= T_WAKEABLE;
	curthread->t_sobj_ops = &shuttle_sobj_ops;
	curthread->t_wchan0 = (caddr_t)1;
	CL_INACTIVE(curthread);
	DTRACE_SCHED(sleep);
	THREAD_SLEEP(curthread, &shuttle_lock);
	(void) new_mstate(curthread, LMS_SLEEP);
	disp_lock_exit_high(&shuttle_lock);
	mutex_exit(l);
	if (ISSIG(curthread, JUSTLOOKING) || MUSTRETURN(curproc, curthread))
		setrun(curthread);
	swtch();
	/*
	 * Caller must check for ISSIG/lwp_sysabort conditions
	 * and clear lwp->lwp_asleep/lwp->lwp_sysabort
	 */
}
Пример #4
0
/*
 * Charge project of thread t the time thread t spent on CPU since previously
 * adjusted.
 *
 * Record the current on-CPU time in the csc structure.
 *
 * Do not adjust for more than one tick worth of time.
 *
 * It is possible that the project cap is being disabled while this routine is
 * executed. This should not cause any issues since the association between the
 * thread and its project is protected by thread lock.
 */
static void
caps_charge_adjust(kthread_id_t t, caps_sc_t *csc)
{
	kproject_t	*kpj = ttoproj(t);
	hrtime_t	new_usage;
	hrtime_t	usage_delta;

	ASSERT(THREAD_LOCK_HELD(t));
	ASSERT(kpj->kpj_cpucap != NULL);

	/* Get on-CPU time since birth of a thread */
	new_usage = mstate_thread_onproc_time(t);

	/* Time spent on CPU since last checked */
	usage_delta = new_usage - csc->csc_cputime;

	/* Save the accumulated on-CPU time */
	csc->csc_cputime = new_usage;

	/* Charge at most one tick worth of on-CPU time */
	if (usage_delta > cap_tick_cost)
		usage_delta = cap_tick_cost;

	/* Add usage_delta to the project usage value. */
	if (usage_delta > 0) {
		cpucap_t *cap = kpj->kpj_cpucap;

		DTRACE_PROBE2(cpucaps__project__charge,
		    kthread_id_t, t, hrtime_t, usage_delta);

		disp_lock_enter_high(&cap->cap_usagelock);
		cap->cap_usage += usage_delta;

		/* Check for overflows */
		if (cap->cap_usage < 0)
			cap->cap_usage = MAX_USAGE - 1;

		disp_lock_exit_high(&cap->cap_usagelock);

		/*
		 * cap_maxusage is only kept for observability. Move it outside
		 * the lock to reduce the time spent while holding the lock.
		 */
		if (cap->cap_usage > cap->cap_maxusage)
			cap->cap_maxusage = cap->cap_usage;
	}
}
Пример #5
0
/*
 * If turnstile is non-NULL, remove it from inheritor's t_prioinv list.
 * Compute new inherited priority, and return it.
 */
static pri_t
turnstile_pi_tsdelete(turnstile_t *ts, kthread_t *inheritor)
{
	turnstile_t **tspp, *tsp;
	pri_t new_epri = 0;

	disp_lock_enter_high(&inheritor->t_pi_lock);
	tspp = &inheritor->t_prioinv;
	while ((tsp = *tspp) != NULL) {
		if (tsp == ts)
			*tspp = tsp->ts_prioinv;
		else
			new_epri = MAX(new_epri, tsp->ts_epri);
		tspp = &tsp->ts_prioinv;
	}
	disp_lock_exit_high(&inheritor->t_pi_lock);
	return (new_epri);
}
Пример #6
0
/*
 * The cv_block() function blocks a thread on a condition variable
 * by putting it in a hashed sleep queue associated with the
 * synchronization object.
 *
 * Threads are taken off the hashed sleep queues via calls to
 * cv_signal(), cv_broadcast(), or cv_unsleep().
 */
static void
cv_block(condvar_impl_t *cvp)
{
	kthread_t *t = curthread;
	klwp_t *lwp = ttolwp(t);
	sleepq_head_t *sqh;

	ASSERT(THREAD_LOCK_HELD(t));
	ASSERT(t != CPU->cpu_idle_thread);
	ASSERT(CPU_ON_INTR(CPU) == 0);
	ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL);
	ASSERT(t->t_state == TS_ONPROC);

	t->t_schedflag &= ~TS_SIGNALLED;
	CL_SLEEP(t);			/* assign kernel priority */
	t->t_wchan = (caddr_t)cvp;
	t->t_sobj_ops = &cv_sobj_ops;
	DTRACE_SCHED(sleep);

	/*
	 * The check for t_intr is to avoid doing the
	 * account for an interrupt thread on the still-pinned
	 * lwp's statistics.
	 */
	if (lwp != NULL && t->t_intr == NULL) {
		lwp->lwp_ru.nvcsw++;
		(void) new_mstate(t, LMS_SLEEP);
	}

	sqh = SQHASH(cvp);
	disp_lock_enter_high(&sqh->sq_lock);
	if (cvp->cv_waiters < CV_MAX_WAITERS)
		cvp->cv_waiters++;
	ASSERT(cvp->cv_waiters <= CV_MAX_WAITERS);
	THREAD_SLEEP(t, &sqh->sq_lock);
	sleepq_insert(&sqh->sq_queue, t);
	/*
	 * THREAD_SLEEP() moves curthread->t_lockp to point to the
	 * lock sqh->sq_lock. This lock is later released by the caller
	 * when it calls thread_unlock() on curthread.
	 */
}
Пример #7
0
/*
 * Mark the specified thread as once again sleeping on a shuttle object.  This
 * routine is called to put a server thread -- one that was dequeued but for
 * which shuttle_resume() was _not_ called -- back to sleep on a shuttle
 * object.  Because we don't hit the sched:::wakeup DTrace probe until
 * shuttle_resume(), we do _not_ have a sched:::sleep probe here.
 */
void
shuttle_sleep(kthread_t *t)
{
	klwp_t	*lwp = ttolwp(t);
	proc_t	*p = ttoproc(t);

	thread_lock(t);
	disp_lock_enter_high(&shuttle_lock);
	if (lwp != NULL) {
		lwp->lwp_asleep = 1;			/* /proc */
		lwp->lwp_sysabort = 0;			/* /proc */
		lwp->lwp_ru.nvcsw++;
	}
	t->t_flag |= T_WAKEABLE;
	t->t_sobj_ops = &shuttle_sobj_ops;
	t->t_wchan0 = (caddr_t)1;
	CL_INACTIVE(t);
	ASSERT(t->t_mstate == LMS_SLEEP);
	THREAD_SLEEP(t, &shuttle_lock);
	disp_lock_exit_high(&shuttle_lock);
	if (lwp && (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t)))
		setrun(t);
}
Пример #8
0
/*
 * Mark the current thread as sleeping on a shuttle object, and
 * resume the specified thread. The 't' thread must be marked as ONPROC.
 *
 * No locks other than 'l' should be held at this point.
 */
void
shuttle_resume(kthread_t *t, kmutex_t *l)
{
	klwp_t	*lwp = ttolwp(curthread);
	cpu_t	*cp;
	disp_lock_t *oldtlp;

	thread_lock(curthread);
	disp_lock_enter_high(&shuttle_lock);
	if (lwp != NULL) {
		lwp->lwp_asleep = 1;			/* /proc */
		lwp->lwp_sysabort = 0;			/* /proc */
		lwp->lwp_ru.nvcsw++;
	}
	curthread->t_flag |= T_WAKEABLE;
	curthread->t_sobj_ops = &shuttle_sobj_ops;
	/*
	 * setting cpu_dispthread before changing thread state
	 * so that kernel preemption will be deferred to after swtch_to()
	 */
	cp = CPU;
	cp->cpu_dispthread = t;
	cp->cpu_dispatch_pri = DISP_PRIO(t);
	/*
	 * Set the wchan0 field so that /proc won't just do a setrun
	 * on this thread when trying to stop a process. Instead,
	 * /proc will mark the thread as VSTOPPED similar to threads
	 * that are blocked on user level condition variables.
	 */
	curthread->t_wchan0 = (caddr_t)1;
	CL_INACTIVE(curthread);
	DTRACE_SCHED1(wakeup, kthread_t *, t);
	DTRACE_SCHED(sleep);
	THREAD_SLEEP(curthread, &shuttle_lock);
	disp_lock_exit_high(&shuttle_lock);

	/*
	 * Update ustate records (there is no waitrq obviously)
	 */
	(void) new_mstate(curthread, LMS_SLEEP);

	thread_lock_high(t);
	oldtlp = t->t_lockp;

	restore_mstate(t);
	t->t_flag &= ~T_WAKEABLE;
	t->t_wchan0 = NULL;
	t->t_sobj_ops = NULL;

	/*
	 * Make sure we end up on the right CPU if we are dealing with bound
	 * CPU's or processor partitions.
	 */
	if (t->t_bound_cpu != NULL || t->t_cpupart != cp->cpu_part) {
		aston(t);
		cp->cpu_runrun = 1;
	}

	/*
	 * We re-assign t_disp_queue and t_lockp of 't' here because
	 * 't' could have been preempted.
	 */
	if (t->t_disp_queue != cp->cpu_disp) {
		t->t_disp_queue = cp->cpu_disp;
		thread_onproc(t, cp);
	}

	/*
	 * We can't call thread_unlock_high() here because t's thread lock
	 * could have changed by thread_onproc() call above to point to
	 * CPU->cpu_thread_lock.
	 */
	disp_lock_exit_high(oldtlp);

	mutex_exit(l);
	/*
	 * Make sure we didn't receive any important events while
	 * we weren't looking
	 */
	if (lwp &&
	    (ISSIG(curthread, JUSTLOOKING) || MUSTRETURN(curproc, curthread)))
		setrun(curthread);

	swtch_to(t);
	/*
	 * Caller must check for ISSIG/lwp_sysabort conditions
	 * and clear lwp->lwp_asleep/lwp->lwp_sysabort
	 */
}