Esempio n. 1
0
/*
 * Put specified thread to specified wait queue without dropping thread's lock.
 * Returns 1 if thread was successfully placed on project's wait queue, or
 * 0 if wait queue is blocked.
 */
int
waitq_enqueue(waitq_t *wq, kthread_t *t)
{
	ASSERT(THREAD_LOCK_HELD(t));
	ASSERT(t->t_sleepq == NULL);
	ASSERT(t->t_waitq == NULL);
	ASSERT(t->t_link == NULL);

	disp_lock_enter_high(&wq->wq_lock);

	/*
	 * Can't enqueue anything on a blocked wait queue
	 */
	if (wq->wq_blocked) {
		disp_lock_exit_high(&wq->wq_lock);
		return (0);
	}

	/*
	 * Mark the time when thread is placed on wait queue. The microstate
	 * accounting code uses this timestamp to determine wait times.
	 */
	t->t_waitrq = gethrtime_unscaled();

	/*
	 * Mark thread as not swappable.  If necessary, it will get
	 * swapped out when it returns to the userland.
	 */
	t->t_schedflag |= TS_DONT_SWAP;
	DTRACE_SCHED1(cpucaps__sleep, kthread_t *, t);
	waitq_link(wq, t);

	THREAD_WAIT(t, &wq->wq_lock);
	return (1);
}
Esempio n. 2
0
/*
 * Make 'inheritor' inherit priority from this turnstile.
 */
static void
turnstile_pi_inherit(turnstile_t *ts, kthread_t *inheritor, pri_t epri)
{
	ASSERT(THREAD_LOCK_HELD(inheritor));
	ASSERT(DISP_LOCK_HELD(&TURNSTILE_CHAIN(ts->ts_sobj).tc_lock));

	if (epri <= inheritor->t_pri)
		return;

	if (ts->ts_inheritor == NULL) {
		ts->ts_inheritor = inheritor;
		ts->ts_epri = epri;
		disp_lock_enter_high(&inheritor->t_pi_lock);
		ts->ts_prioinv = inheritor->t_prioinv;
		inheritor->t_prioinv = ts;
		disp_lock_exit_high(&inheritor->t_pi_lock);
	} else {
		/*
		 * 'inheritor' is already inheriting from this turnstile,
		 * so just adjust its priority.
		 */
		ASSERT(ts->ts_inheritor == inheritor);
		if (ts->ts_epri < epri)
			ts->ts_epri = epri;
	}

	if (epri > DISP_PRIO(inheritor))
		thread_change_epri(inheritor, epri);
}
Esempio n. 3
0
/*
 * Mark the current thread as sleeping on a shuttle object, and
 * switch to a new thread.
 * No locks other than 'l' should be held at this point.
 */
void
shuttle_swtch(kmutex_t *l)
{
	klwp_t	*lwp = ttolwp(curthread);

	thread_lock(curthread);
	disp_lock_enter_high(&shuttle_lock);
	lwp->lwp_asleep = 1;			/* /proc */
	lwp->lwp_sysabort = 0;			/* /proc */
	lwp->lwp_ru.nvcsw++;
	curthread->t_flag |= T_WAKEABLE;
	curthread->t_sobj_ops = &shuttle_sobj_ops;
	curthread->t_wchan0 = (caddr_t)1;
	CL_INACTIVE(curthread);
	DTRACE_SCHED(sleep);
	THREAD_SLEEP(curthread, &shuttle_lock);
	(void) new_mstate(curthread, LMS_SLEEP);
	disp_lock_exit_high(&shuttle_lock);
	mutex_exit(l);
	if (ISSIG(curthread, JUSTLOOKING) || MUSTRETURN(curproc, curthread))
		setrun(curthread);
	swtch();
	/*
	 * Caller must check for ISSIG/lwp_sysabort conditions
	 * and clear lwp->lwp_asleep/lwp->lwp_sysabort
	 */
}
Esempio n. 4
0
/*
 * the semaphore's count is incremented by one. a blocked thread
 * is awakened and re-tries to acquire the semaphore.
 */
void
sema_v(ksema_t *sp)
{
	sema_impl_t 	*s;
	kthread_t 	*sq, *tp;
	disp_lock_t	*sqlp;

	s = (sema_impl_t *)sp;
	sqlp = &SQHASH(s)->sq_lock;
	disp_lock_enter(sqlp);
	if (panicstr) {
		disp_lock_exit(sqlp);
		return;
	}
	s->s_count++;
	sq = s->s_slpq;
	if (sq != NULL) {
		tp = sq;
		ASSERT(THREAD_LOCK_HELD(tp));
		sq = sq->t_link;
		tp->t_link = NULL;
		DTRACE_SCHED1(wakeup, kthread_t *, tp);
		tp->t_sobj_ops = NULL;
		tp->t_wchan = NULL;
		ASSERT(tp->t_state == TS_SLEEP);
		CL_WAKEUP(tp);
		s->s_slpq = sq;
		disp_lock_exit_high(sqlp);
		thread_unlock(tp);
	} else {
		disp_lock_exit(sqlp);
	}
}
Esempio n. 5
0
/*
 * Charge project of thread t the time thread t spent on CPU since previously
 * adjusted.
 *
 * Record the current on-CPU time in the csc structure.
 *
 * Do not adjust for more than one tick worth of time.
 *
 * It is possible that the project cap is being disabled while this routine is
 * executed. This should not cause any issues since the association between the
 * thread and its project is protected by thread lock.
 */
static void
caps_charge_adjust(kthread_id_t t, caps_sc_t *csc)
{
	kproject_t	*kpj = ttoproj(t);
	hrtime_t	new_usage;
	hrtime_t	usage_delta;

	ASSERT(THREAD_LOCK_HELD(t));
	ASSERT(kpj->kpj_cpucap != NULL);

	/* Get on-CPU time since birth of a thread */
	new_usage = mstate_thread_onproc_time(t);

	/* Time spent on CPU since last checked */
	usage_delta = new_usage - csc->csc_cputime;

	/* Save the accumulated on-CPU time */
	csc->csc_cputime = new_usage;

	/* Charge at most one tick worth of on-CPU time */
	if (usage_delta > cap_tick_cost)
		usage_delta = cap_tick_cost;

	/* Add usage_delta to the project usage value. */
	if (usage_delta > 0) {
		cpucap_t *cap = kpj->kpj_cpucap;

		DTRACE_PROBE2(cpucaps__project__charge,
		    kthread_id_t, t, hrtime_t, usage_delta);

		disp_lock_enter_high(&cap->cap_usagelock);
		cap->cap_usage += usage_delta;

		/* Check for overflows */
		if (cap->cap_usage < 0)
			cap->cap_usage = MAX_USAGE - 1;

		disp_lock_exit_high(&cap->cap_usagelock);

		/*
		 * cap_maxusage is only kept for observability. Move it outside
		 * the lock to reduce the time spent while holding the lock.
		 */
		if (cap->cap_usage > cap->cap_maxusage)
			cap->cap_maxusage = cap->cap_usage;
	}
}
Esempio n. 6
0
/*
 * Unsleep a thread that's blocked on a condition variable.
 */
static void
cv_unsleep(kthread_t *t)
{
	condvar_impl_t *cvp = (condvar_impl_t *)t->t_wchan;
	sleepq_head_t *sqh = SQHASH(cvp);

	ASSERT(THREAD_LOCK_HELD(t));

	if (cvp == NULL)
		panic("cv_unsleep: thread %p not on sleepq %p", t, sqh);
	DTRACE_SCHED1(wakeup, kthread_t *, t);
	sleepq_unsleep(t);
	if (cvp->cv_waiters != CV_MAX_WAITERS)
		cvp->cv_waiters--;
	disp_lock_exit_high(&sqh->sq_lock);
	CL_SETRUN(t);
}
Esempio n. 7
0
/*
 * If turnstile is non-NULL, remove it from inheritor's t_prioinv list.
 * Compute new inherited priority, and return it.
 */
static pri_t
turnstile_pi_tsdelete(turnstile_t *ts, kthread_t *inheritor)
{
	turnstile_t **tspp, *tsp;
	pri_t new_epri = 0;

	disp_lock_enter_high(&inheritor->t_pi_lock);
	tspp = &inheritor->t_prioinv;
	while ((tsp = *tspp) != NULL) {
		if (tsp == ts)
			*tspp = tsp->ts_prioinv;
		else
			new_epri = MAX(new_epri, tsp->ts_epri);
		tspp = &tsp->ts_prioinv;
	}
	disp_lock_exit_high(&inheritor->t_pi_lock);
	return (new_epri);
}
Esempio n. 8
0
/*
 * Mark the specified thread as once again sleeping on a shuttle object.  This
 * routine is called to put a server thread -- one that was dequeued but for
 * which shuttle_resume() was _not_ called -- back to sleep on a shuttle
 * object.  Because we don't hit the sched:::wakeup DTrace probe until
 * shuttle_resume(), we do _not_ have a sched:::sleep probe here.
 */
void
shuttle_sleep(kthread_t *t)
{
	klwp_t	*lwp = ttolwp(t);
	proc_t	*p = ttoproc(t);

	thread_lock(t);
	disp_lock_enter_high(&shuttle_lock);
	if (lwp != NULL) {
		lwp->lwp_asleep = 1;			/* /proc */
		lwp->lwp_sysabort = 0;			/* /proc */
		lwp->lwp_ru.nvcsw++;
	}
	t->t_flag |= T_WAKEABLE;
	t->t_sobj_ops = &shuttle_sobj_ops;
	t->t_wchan0 = (caddr_t)1;
	CL_INACTIVE(t);
	ASSERT(t->t_mstate == LMS_SLEEP);
	THREAD_SLEEP(t, &shuttle_lock);
	disp_lock_exit_high(&shuttle_lock);
	if (lwp && (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t)))
		setrun(t);
}
Esempio n. 9
0
/*
 * Mark the current thread as sleeping on a shuttle object, and
 * resume the specified thread. The 't' thread must be marked as ONPROC.
 *
 * No locks other than 'l' should be held at this point.
 */
void
shuttle_resume(kthread_t *t, kmutex_t *l)
{
	klwp_t	*lwp = ttolwp(curthread);
	cpu_t	*cp;
	disp_lock_t *oldtlp;

	thread_lock(curthread);
	disp_lock_enter_high(&shuttle_lock);
	if (lwp != NULL) {
		lwp->lwp_asleep = 1;			/* /proc */
		lwp->lwp_sysabort = 0;			/* /proc */
		lwp->lwp_ru.nvcsw++;
	}
	curthread->t_flag |= T_WAKEABLE;
	curthread->t_sobj_ops = &shuttle_sobj_ops;
	/*
	 * setting cpu_dispthread before changing thread state
	 * so that kernel preemption will be deferred to after swtch_to()
	 */
	cp = CPU;
	cp->cpu_dispthread = t;
	cp->cpu_dispatch_pri = DISP_PRIO(t);
	/*
	 * Set the wchan0 field so that /proc won't just do a setrun
	 * on this thread when trying to stop a process. Instead,
	 * /proc will mark the thread as VSTOPPED similar to threads
	 * that are blocked on user level condition variables.
	 */
	curthread->t_wchan0 = (caddr_t)1;
	CL_INACTIVE(curthread);
	DTRACE_SCHED1(wakeup, kthread_t *, t);
	DTRACE_SCHED(sleep);
	THREAD_SLEEP(curthread, &shuttle_lock);
	disp_lock_exit_high(&shuttle_lock);

	/*
	 * Update ustate records (there is no waitrq obviously)
	 */
	(void) new_mstate(curthread, LMS_SLEEP);

	thread_lock_high(t);
	oldtlp = t->t_lockp;

	restore_mstate(t);
	t->t_flag &= ~T_WAKEABLE;
	t->t_wchan0 = NULL;
	t->t_sobj_ops = NULL;

	/*
	 * Make sure we end up on the right CPU if we are dealing with bound
	 * CPU's or processor partitions.
	 */
	if (t->t_bound_cpu != NULL || t->t_cpupart != cp->cpu_part) {
		aston(t);
		cp->cpu_runrun = 1;
	}

	/*
	 * We re-assign t_disp_queue and t_lockp of 't' here because
	 * 't' could have been preempted.
	 */
	if (t->t_disp_queue != cp->cpu_disp) {
		t->t_disp_queue = cp->cpu_disp;
		thread_onproc(t, cp);
	}

	/*
	 * We can't call thread_unlock_high() here because t's thread lock
	 * could have changed by thread_onproc() call above to point to
	 * CPU->cpu_thread_lock.
	 */
	disp_lock_exit_high(oldtlp);

	mutex_exit(l);
	/*
	 * Make sure we didn't receive any important events while
	 * we weren't looking
	 */
	if (lwp &&
	    (ISSIG(curthread, JUSTLOOKING) || MUSTRETURN(curproc, curthread)))
		setrun(curthread);

	swtch_to(t);
	/*
	 * Caller must check for ISSIG/lwp_sysabort conditions
	 * and clear lwp->lwp_asleep/lwp->lwp_sysabort
	 */
}
Esempio n. 10
0
/*
 * similiar to sema_p except that it blocks at an interruptible
 * priority. if a signal is present then return 1 otherwise 0.
 */
int
sema_p_sig(ksema_t *sp)
{
	kthread_t	*t = curthread;
	klwp_t		*lwp = ttolwp(t);
	sema_impl_t	*s;
	disp_lock_t	*sqlp;

	if (lwp == NULL) {
		sema_p(sp);
		return (0);
	}

	s = (sema_impl_t *)sp;
	sqlp = &SQHASH(s)->sq_lock;
	disp_lock_enter(sqlp);
	ASSERT(s->s_count >= 0);
	while (s->s_count == 0) {
		proc_t *p = ttoproc(t);
		thread_lock_high(t);
		t->t_flag |= T_WAKEABLE;
		SEMA_BLOCK(s, sqlp);
		lwp->lwp_asleep = 1;
		lwp->lwp_sysabort = 0;
		thread_unlock_nopreempt(t);
		if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t))
			setrun(t);
		swtch();
		t->t_flag &= ~T_WAKEABLE;
		if (ISSIG(t, FORREAL) ||
		    lwp->lwp_sysabort || MUSTRETURN(p, t)) {
			kthread_t *sq, *tp;
			lwp->lwp_asleep = 0;
			lwp->lwp_sysabort = 0;
			disp_lock_enter(sqlp);
			sq = s->s_slpq;
			/*
			 * in case sema_v and interrupt happen
			 * at the same time, we need to pass the
			 * sema_v to the next thread.
			 */
			if ((sq != NULL) && (s->s_count > 0)) {
				tp = sq;
				ASSERT(THREAD_LOCK_HELD(tp));
				sq = sq->t_link;
				tp->t_link = NULL;
				DTRACE_SCHED1(wakeup, kthread_t *, tp);
				tp->t_sobj_ops = NULL;
				tp->t_wchan = NULL;
				ASSERT(tp->t_state == TS_SLEEP);
				CL_WAKEUP(tp);
				s->s_slpq = sq;
				disp_lock_exit_high(sqlp);
				thread_unlock(tp);
			} else {
				disp_lock_exit(sqlp);
			}
			return (1);
		}
		lwp->lwp_asleep = 0;
		disp_lock_enter(sqlp);
	}
	s->s_count--;
	disp_lock_exit(sqlp);
	return (0);
}