Exemple #1
0
static void
waitq_link(waitq_t *wq, kthread_t *t)
{
	kthread_t *next_tp;
	kthread_t *last_tp;
	kthread_t **tpp;
	pri_t tpri, next_pri, last_pri = -1;

	ASSERT(DISP_LOCK_HELD(&wq->wq_lock));

	tpri = DISP_PRIO(t);
	tpp = &wq->wq_first;
	while ((next_tp = *tpp) != NULL) {
		next_pri = DISP_PRIO(next_tp);
		if (tpri > next_pri)
			break;
		last_tp = next_tp->t_priback;
		last_pri = next_pri;
		tpp = &last_tp->t_link;
	}
	*tpp = t;
	t->t_link = next_tp;
	if (last_pri == tpri) {
		/* last_tp points to the last thread of this priority */
		t->t_priback = last_tp;
		t->t_priforw = last_tp->t_priforw;
		last_tp->t_priforw->t_priback = t;
		last_tp->t_priforw = t;
	} else {
		t->t_priback = t->t_priforw = t;
	}
	wq->wq_count++;
	t->t_waitq = wq;
}
Exemple #2
0
/*
 * System call interface to scheduler activations.
 * This always operates on the current lwp.
 */
caddr_t
schedctl(void)
{
	kthread_t	*t = curthread;
	sc_shared_t	*ssp;
	uintptr_t	uaddr;
	int		error;

	if (t->t_schedctl == NULL) {
		/*
		 * Allocate and initialize the shared structure.
		 */
		if ((error = schedctl_shared_alloc(&ssp, &uaddr)) != 0)
			return ((caddr_t)(uintptr_t)set_errno(error));
		bzero(ssp, sizeof (*ssp));

		installctx(t, ssp, schedctl_save, schedctl_restore,
		    schedctl_fork, NULL, NULL, NULL);

		thread_lock(t);	/* protect against ts_tick and ts_update */
		t->t_schedctl = ssp;
		t->t_sc_uaddr = uaddr;
		ssp->sc_cid = t->t_cid;
		ssp->sc_cpri = t->t_cpri;
		ssp->sc_priority = DISP_PRIO(t);
		thread_unlock(t);
	}

	return ((caddr_t)t->t_sc_uaddr);
}
/*
 * Make 'inheritor' inherit priority from this turnstile.
 */
static void
turnstile_pi_inherit(turnstile_t *ts, kthread_t *inheritor, pri_t epri)
{
	ASSERT(THREAD_LOCK_HELD(inheritor));
	ASSERT(DISP_LOCK_HELD(&TURNSTILE_CHAIN(ts->ts_sobj).tc_lock));

	if (epri <= inheritor->t_pri)
		return;

	if (ts->ts_inheritor == NULL) {
		ts->ts_inheritor = inheritor;
		ts->ts_epri = epri;
		disp_lock_enter_high(&inheritor->t_pi_lock);
		ts->ts_prioinv = inheritor->t_prioinv;
		inheritor->t_prioinv = ts;
		disp_lock_exit_high(&inheritor->t_pi_lock);
	} else {
		/*
		 * 'inheritor' is already inheriting from this turnstile,
		 * so just adjust its priority.
		 */
		ASSERT(ts->ts_inheritor == inheritor);
		if (ts->ts_epri < epri)
			ts->ts_epri = epri;
	}

	if (epri > DISP_PRIO(inheritor))
		thread_change_epri(inheritor, epri);
}
Exemple #4
0
/*
 * common slave cpu initialization code
 */
void
common_startup_init(cpu_t *cp, int cpuid)
{
	kthread_id_t tp;
	sfmmu_t *sfmmup;
	caddr_t	sp;

	/*
	 * Allocate and initialize the startup thread for this CPU.
	 */
	tp = thread_create(NULL, 0, slave_startup, NULL, 0, &p0,
	    TS_STOPPED, maxclsyspri);

	/*
	 * Set state to TS_ONPROC since this thread will start running
	 * as soon as the CPU comes online.
	 *
	 * All the other fields of the thread structure are setup by
	 * thread_create().
	 */
	THREAD_ONPROC(tp, cp);
	tp->t_preempt = 1;
	tp->t_bound_cpu = cp;
	tp->t_affinitycnt = 1;
	tp->t_cpu = cp;
	tp->t_disp_queue = cp->cpu_disp;

	sfmmup = astosfmmu(&kas);
	CPUSET_ADD(sfmmup->sfmmu_cpusran, cpuid);

	/*
	 * Setup thread to start in slave_startup.
	 */
	sp = tp->t_stk;
	tp->t_pc = (uintptr_t)slave_startup - 8;
	tp->t_sp = (uintptr_t)((struct rwindow *)sp - 1) - STACK_BIAS;

	cp->cpu_id = cpuid;
	cp->cpu_self = cp;
	cp->cpu_thread = tp;
	cp->cpu_lwp = NULL;
	cp->cpu_dispthread = tp;
	cp->cpu_dispatch_pri = DISP_PRIO(tp);
	cp->cpu_startup_thread = tp;

	/*
	 * The dispatcher may discover the CPU before it is in cpu_ready_set
	 * and attempt to poke it. Before the CPU is in cpu_ready_set, any
	 * cross calls to it will be dropped. We initialize
	 * poke_cpu_outstanding to true so that poke_cpu will ignore any poke
	 * requests for this CPU. Pokes that come in before the CPU is in
	 * cpu_ready_set can be ignored because the CPU is about to come
	 * online.
	 */
	cp->cpu_m.poke_cpu_outstanding = B_TRUE;
}
Exemple #5
0
/*
 * Sets the values of the cid and priority fields for the specified thread.
 * Called from thread_change_pri(), thread_change_epri(), THREAD_CHANGE_PRI().
 * Called following calls to CL_FORKRET() and CL_ENTERCLASS().
 */
void
schedctl_set_cidpri(kthread_t *t)
{
	sc_shared_t *tdp = t->t_schedctl;

	if (tdp != NULL) {
		tdp->sc_cid = t->t_cid;
		tdp->sc_cpri = t->t_cpri;
		tdp->sc_priority = DISP_PRIO(t);
	}
}
Exemple #6
0
/*
 * Put a thread on the sleep queue for this semaphore.
 */
static void
sema_queue(ksema_t *sp, kthread_t *t)
{
	kthread_t	**tpp;
	kthread_t	*tp;
	pri_t		cpri;
	sema_impl_t	*s;

	ASSERT(THREAD_LOCK_HELD(t));
	s = (sema_impl_t *)sp;
	tpp = &s->s_slpq;
	cpri = DISP_PRIO(t);
	while ((tp = *tpp) != NULL) {
		if (cpri > DISP_PRIO(tp))
			break;
		tpp = &tp->t_link;
	}
	*tpp = t;
	t->t_link = tp;
}
/*
 * Wake threads that are blocked in a turnstile.
 */
void
turnstile_wakeup(turnstile_t *ts, int qnum, int nthreads, kthread_t *owner)
{
	turnstile_chain_t *tc = &TURNSTILE_CHAIN(ts->ts_sobj);
	sleepq_t *sqp = &ts->ts_sleepq[qnum];

	ASSERT(DISP_LOCK_HELD(&tc->tc_lock));

	/*
	 * Waive any priority we may have inherited from this turnstile.
	 */
	if (ts->ts_inheritor != NULL) {
		turnstile_pi_waive(ts);
	}
	while (nthreads-- > 0) {
		kthread_t *t = sqp->sq_first;
		ASSERT(t->t_ts == ts);
		ASSERT(ts->ts_waiters > 1 || ts->ts_inheritor == NULL);
		DTRACE_SCHED1(wakeup, kthread_t *, t);
		turnstile_dequeue(t);
		CL_WAKEUP(t); /* previous thread lock, tc_lock, not dropped */
		/*
		 * If the caller did direct handoff of ownership,
		 * make the new owner inherit from this turnstile.
		 */
		if (t == owner) {
			kthread_t *wp = ts->ts_sleepq[TS_WRITER_Q].sq_first;
			kthread_t *rp = ts->ts_sleepq[TS_READER_Q].sq_first;
			pri_t wpri = wp ? DISP_PRIO(wp) : 0;
			pri_t rpri = rp ? DISP_PRIO(rp) : 0;
			turnstile_pi_inherit(ts, t, MAX(wpri, rpri));
			owner = NULL;
		}
		thread_unlock_high(t);		/* drop run queue lock */
	}
	if (owner != NULL)
		panic("turnstile_wakeup: owner %p not woken", (void *)owner);
	disp_lock_exit(&tc->tc_lock);
}
/*
 * Compute caller's new inherited priority, and change its effective
 * priority if necessary. Necessary only for SOBJ_USER_PI, because of
 * its interruptibility characteristic.
 */
void
turnstile_pi_recalc(void)
{
	kthread_t *inheritor = curthread;
	pri_t new_epri;

	thread_lock(inheritor);
	new_epri = turnstile_pi_tsdelete(NULL, inheritor);
	if (new_epri != DISP_PRIO(inheritor))
		thread_change_epri(inheritor, new_epri);
	if (DISP_MUST_SURRENDER(inheritor))
		cpu_surrender(inheritor);
	thread_unlock(inheritor);
}
/*
 * Remove turnstile from inheritor's t_prioinv list, compute
 * new priority, and change the inheritor's effective priority if
 * necessary. Keep in synch with turnstile_pi_recalc().
 */
static void
turnstile_pi_waive(turnstile_t *ts)
{
	kthread_t *inheritor = ts->ts_inheritor;
	pri_t new_epri;

	ASSERT(inheritor == curthread);

	thread_lock_high(inheritor);
	new_epri = turnstile_pi_tsdelete(ts, inheritor);
	if (new_epri != DISP_PRIO(inheritor))
		thread_change_epri(inheritor, new_epri);
	ts->ts_inheritor = NULL;
	if (DISP_MUST_SURRENDER(inheritor))
		cpu_surrender(inheritor);
	thread_unlock_high(inheritor);
}
int
turnstile_block(turnstile_t *ts, int qnum, void *sobj, sobj_ops_t *sobj_ops,
    kmutex_t *mp, lwp_timer_t *lwptp)
{
	kthread_t *owner;
	kthread_t *t = curthread;
	proc_t *p = ttoproc(t);
	klwp_t *lwp = ttolwp(t);
	turnstile_chain_t *tc = &TURNSTILE_CHAIN(sobj);
	int error = 0;
	int loser = 0;

	ASSERT(DISP_LOCK_HELD(&tc->tc_lock));
	ASSERT(mp == NULL || IS_UPI(mp));
	ASSERT((SOBJ_TYPE(sobj_ops) == SOBJ_USER_PI) ^ (mp == NULL));

	thread_lock_high(t);

	if (ts == NULL) {
		/*
		 * This is the first thread to block on this sobj.
		 * Take its attached turnstile and add it to the hash chain.
		 */
		ts = t->t_ts;
		ts->ts_sobj = sobj;
		ts->ts_next = tc->tc_first;
		tc->tc_first = ts;
		ASSERT(ts->ts_waiters == 0);
	} else {
		/*
		 * Another thread has already donated its turnstile
		 * to block on this sobj, so ours isn't needed.
		 * Stash it on the active turnstile's freelist.
		 */
		turnstile_t *myts = t->t_ts;
		myts->ts_free = ts->ts_free;
		ts->ts_free = myts;
		t->t_ts = ts;
		ASSERT(ts->ts_sobj == sobj);
		ASSERT(ts->ts_waiters > 0);
	}

	/*
	 * Put the thread to sleep.
	 */
	ASSERT(t != CPU->cpu_idle_thread);
	ASSERT(CPU_ON_INTR(CPU) == 0);
	ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL);
	ASSERT(t->t_state == TS_ONPROC);

	if (SOBJ_TYPE(sobj_ops) == SOBJ_USER_PI) {
		curthread->t_flag |= T_WAKEABLE;
	}
	CL_SLEEP(t);		/* assign kernel priority */
	THREAD_SLEEP(t, &tc->tc_lock);
	t->t_wchan = sobj;
	t->t_sobj_ops = sobj_ops;
	DTRACE_SCHED(sleep);

	if (lwp != NULL) {
		lwp->lwp_ru.nvcsw++;
		(void) new_mstate(t, LMS_SLEEP);
		if (SOBJ_TYPE(sobj_ops) == SOBJ_USER_PI) {
			lwp->lwp_asleep = 1;
			lwp->lwp_sysabort = 0;
			/*
			 * make wchan0 non-zero to conform to the rule that
			 * threads blocking for user-level objects have a
			 * non-zero wchan0: this prevents spurious wake-ups
			 * by, for example, /proc.
			 */
			t->t_wchan0 = (caddr_t)1;
		}
	}
	ts->ts_waiters++;
	sleepq_insert(&ts->ts_sleepq[qnum], t);

	if (SOBJ_TYPE(sobj_ops) == SOBJ_MUTEX &&
	    SOBJ_OWNER(sobj_ops, sobj) == NULL)
		panic("turnstile_block(%p): unowned mutex", (void *)ts);

	/*
	 * Follow the blocking chain to its end, willing our priority to
	 * everyone who's in our way.
	 */
	while (t->t_sobj_ops != NULL &&
	    (owner = SOBJ_OWNER(t->t_sobj_ops, t->t_wchan)) != NULL) {
		if (owner == curthread) {
			if (SOBJ_TYPE(sobj_ops) != SOBJ_USER_PI) {
				panic("Deadlock: cycle in blocking chain");
			}
			/*
			 * If the cycle we've encountered ends in mp,
			 * then we know it isn't a 'real' cycle because
			 * we're going to drop mp before we go to sleep.
			 * Moreover, since we've come full circle we know
			 * that we must have willed priority to everyone
			 * in our way.  Therefore, we can break out now.
			 */
			if (t->t_wchan == (void *)mp)
				break;

			if (loser)
				lock_clear(&turnstile_loser_lock);
			/*
			 * For SOBJ_USER_PI, a cycle is an application
			 * deadlock which needs to be communicated
			 * back to the application.
			 */
			thread_unlock_nopreempt(t);
			mutex_exit(mp);
			setrun(curthread);
			swtch(); /* necessary to transition state */
			curthread->t_flag &= ~T_WAKEABLE;
			if (lwptp->lwpt_id != 0)
				(void) lwp_timer_dequeue(lwptp);
			setallwatch();
			lwp->lwp_asleep = 0;
			lwp->lwp_sysabort = 0;
			return (EDEADLK);
		}
		if (!turnstile_interlock(t->t_lockp, &owner->t_lockp)) {
			/*
			 * If we failed to grab the owner's thread lock,
			 * turnstile_interlock() will have dropped t's
			 * thread lock, so at this point we don't even know
			 * that 't' exists anymore.  The simplest solution
			 * is to restart the entire priority inheritance dance
			 * from the beginning of the blocking chain, since
			 * we *do* know that 'curthread' still exists.
			 * Application of priority inheritance is idempotent,
			 * so it's OK that we're doing it more than once.
			 * Note also that since we've dropped our thread lock,
			 * we may already have been woken up; if so, our
			 * t_sobj_ops will be NULL, the loop will terminate,
			 * and the call to swtch() will be a no-op.  Phew.
			 *
			 * There is one further complication: if two (or more)
			 * threads keep trying to grab the turnstile locks out
			 * of order and keep losing the race to another thread,
			 * these "dueling losers" can livelock the system.
			 * Therefore, once we get into this rare situation,
			 * we serialize all the losers.
			 */
			if (loser == 0) {
				loser = 1;
				lock_set(&turnstile_loser_lock);
			}
			t = curthread;
			thread_lock_high(t);
			continue;
		}

		/*
		 * We now have the owner's thread lock.  If we are traversing
		 * from non-SOBJ_USER_PI ops to SOBJ_USER_PI ops, then we know
		 * that we have caught the thread while in the TS_SLEEP state,
		 * but holding mp.  We know that this situation is transient
		 * (mp will be dropped before the holder actually sleeps on
		 * the SOBJ_USER_PI sobj), so we will spin waiting for mp to
		 * be dropped.  Then, as in the turnstile_interlock() failure
		 * case, we will restart the priority inheritance dance.
		 */
		if (SOBJ_TYPE(t->t_sobj_ops) != SOBJ_USER_PI &&
		    owner->t_sobj_ops != NULL &&
		    SOBJ_TYPE(owner->t_sobj_ops) == SOBJ_USER_PI) {
			kmutex_t *upi_lock = (kmutex_t *)t->t_wchan;

			ASSERT(IS_UPI(upi_lock));
			ASSERT(SOBJ_TYPE(t->t_sobj_ops) == SOBJ_MUTEX);

			if (t->t_lockp != owner->t_lockp)
				thread_unlock_high(owner);
			thread_unlock_high(t);
			if (loser)
				lock_clear(&turnstile_loser_lock);

			while (mutex_owner(upi_lock) == owner) {
				SMT_PAUSE();
				continue;
			}

			if (loser)
				lock_set(&turnstile_loser_lock);
			t = curthread;
			thread_lock_high(t);
			continue;
		}

		turnstile_pi_inherit(t->t_ts, owner, DISP_PRIO(t));
		if (t->t_lockp != owner->t_lockp)
			thread_unlock_high(t);
		t = owner;
	}

	if (loser)
		lock_clear(&turnstile_loser_lock);

	/*
	 * Note: 't' and 'curthread' were synonymous before the loop above,
	 * but now they may be different.  ('t' is now the last thread in
	 * the blocking chain.)
	 */
	if (SOBJ_TYPE(sobj_ops) == SOBJ_USER_PI) {
		ushort_t s = curthread->t_oldspl;
		int timedwait = 0;
		uint_t imm_timeout = 0;
		clock_t tim = -1;

		thread_unlock_high(t);
		if (lwptp->lwpt_id != 0) {
			/*
			 * We enqueued a timeout.  If it has already fired,
			 * lwptp->lwpt_imm_timeout has been set with cas,
			 * so fetch it with cas.
			 */
			timedwait = 1;
			imm_timeout =
			    atomic_cas_uint(&lwptp->lwpt_imm_timeout, 0, 0);
		}
		mutex_exit(mp);
		splx(s);

		if (ISSIG(curthread, JUSTLOOKING) ||
		    MUSTRETURN(p, curthread) || imm_timeout)
			setrun(curthread);
		swtch();
		curthread->t_flag &= ~T_WAKEABLE;
		if (timedwait)
			tim = lwp_timer_dequeue(lwptp);
		setallwatch();
		if (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort ||
		    MUSTRETURN(p, curthread))
			error = EINTR;
		else if (imm_timeout || (timedwait && tim == -1))
			error = ETIME;
		lwp->lwp_sysabort = 0;
		lwp->lwp_asleep = 0;
	} else {
		thread_unlock_nopreempt(t);
		swtch();
	}

	return (error);
}
Exemple #11
0
/*
 * Mark the current thread as sleeping on a shuttle object, and
 * resume the specified thread. The 't' thread must be marked as ONPROC.
 *
 * No locks other than 'l' should be held at this point.
 */
void
shuttle_resume(kthread_t *t, kmutex_t *l)
{
	klwp_t	*lwp = ttolwp(curthread);
	cpu_t	*cp;
	disp_lock_t *oldtlp;

	thread_lock(curthread);
	disp_lock_enter_high(&shuttle_lock);
	if (lwp != NULL) {
		lwp->lwp_asleep = 1;			/* /proc */
		lwp->lwp_sysabort = 0;			/* /proc */
		lwp->lwp_ru.nvcsw++;
	}
	curthread->t_flag |= T_WAKEABLE;
	curthread->t_sobj_ops = &shuttle_sobj_ops;
	/*
	 * setting cpu_dispthread before changing thread state
	 * so that kernel preemption will be deferred to after swtch_to()
	 */
	cp = CPU;
	cp->cpu_dispthread = t;
	cp->cpu_dispatch_pri = DISP_PRIO(t);
	/*
	 * Set the wchan0 field so that /proc won't just do a setrun
	 * on this thread when trying to stop a process. Instead,
	 * /proc will mark the thread as VSTOPPED similar to threads
	 * that are blocked on user level condition variables.
	 */
	curthread->t_wchan0 = (caddr_t)1;
	CL_INACTIVE(curthread);
	DTRACE_SCHED1(wakeup, kthread_t *, t);
	DTRACE_SCHED(sleep);
	THREAD_SLEEP(curthread, &shuttle_lock);
	disp_lock_exit_high(&shuttle_lock);

	/*
	 * Update ustate records (there is no waitrq obviously)
	 */
	(void) new_mstate(curthread, LMS_SLEEP);

	thread_lock_high(t);
	oldtlp = t->t_lockp;

	restore_mstate(t);
	t->t_flag &= ~T_WAKEABLE;
	t->t_wchan0 = NULL;
	t->t_sobj_ops = NULL;

	/*
	 * Make sure we end up on the right CPU if we are dealing with bound
	 * CPU's or processor partitions.
	 */
	if (t->t_bound_cpu != NULL || t->t_cpupart != cp->cpu_part) {
		aston(t);
		cp->cpu_runrun = 1;
	}

	/*
	 * We re-assign t_disp_queue and t_lockp of 't' here because
	 * 't' could have been preempted.
	 */
	if (t->t_disp_queue != cp->cpu_disp) {
		t->t_disp_queue = cp->cpu_disp;
		thread_onproc(t, cp);
	}

	/*
	 * We can't call thread_unlock_high() here because t's thread lock
	 * could have changed by thread_onproc() call above to point to
	 * CPU->cpu_thread_lock.
	 */
	disp_lock_exit_high(oldtlp);

	mutex_exit(l);
	/*
	 * Make sure we didn't receive any important events while
	 * we weren't looking
	 */
	if (lwp &&
	    (ISSIG(curthread, JUSTLOOKING) || MUSTRETURN(curproc, curthread)))
		setrun(curthread);

	swtch_to(t);
	/*
	 * Caller must check for ISSIG/lwp_sysabort conditions
	 * and clear lwp->lwp_asleep/lwp->lwp_sysabort
	 */
}