コード例 #1
0
ファイル: schedctl.c プロジェクト: apprisi/illumos-gate
/*
 * Clean up scheduler activations state associated with an exiting
 * (or execing) lwp.  t is always the current thread.
 */
void
schedctl_lwp_cleanup(kthread_t *t)
{
	sc_shared_t	*ssp = t->t_schedctl;
	proc_t		*p = ttoproc(t);
	sc_page_ctl_t	*pagep;
	index_t		index;

	ASSERT(MUTEX_NOT_HELD(&p->p_lock));

	thread_lock(t);		/* protect against ts_tick and ts_update */
	t->t_schedctl = NULL;
	t->t_sc_uaddr = 0;
	thread_unlock(t);

	/*
	 * Remove the context op to avoid the final call to
	 * schedctl_save when switching away from this lwp.
	 */
	(void) removectx(t, ssp, schedctl_save, schedctl_restore,
	    schedctl_fork, NULL, NULL, NULL);

	/*
	 * Do not unmap the shared page until the process exits.
	 * User-level library code relies on this for adaptive mutex locking.
	 */
	mutex_enter(&p->p_sc_lock);
	ssp->sc_state = SC_FREE;
	pagep = schedctl_page_lookup(ssp);
	index = (index_t)(ssp - pagep->spc_base);
	BT_CLEAR(pagep->spc_map, index);
	pagep->spc_space += sizeof (sc_shared_t);
	mutex_exit(&p->p_sc_lock);
}
コード例 #2
0
ファイル: sundep.c プロジェクト: pcd1193182/openzfs
/*
 * If this is a process in a branded zone, then we want it to disable the
 * brand syscall entry points.  This routine must be called when the last
 * lwp in a process is exiting in proc_exit().
 */
void
lwp_detach_brand_hdlrs(klwp_t *lwp)
{
	kthread_t *t = lwptot(lwp);

	ASSERT(PROC_IS_BRANDED(lwptoproc(lwp)));
	if (t == curthread)
		kpreempt_disable();

	/* Remove the original context handlers */
	VERIFY(removectx(t, NULL, brand_interpositioning_disable,
	    brand_interpositioning_enable, NULL, NULL,
	    brand_interpositioning_disable, NULL) != 0);

	if (t == curthread) {
		/* Cleanup our MSR and IDT entries. */
		brand_interpositioning_disable();
		kpreempt_enable();
	}
}
コード例 #3
0
ファイル: sundep.c プロジェクト: pcd1193182/openzfs
/*
 * If this is a process in a branded zone, then we want it to use the brand
 * syscall entry points instead of the standard Solaris entry points.  This
 * routine must be called when a new lwp is created within a branded zone
 * or when an existing lwp moves into a branded zone via a zone_enter()
 * operation.
 */
void
lwp_attach_brand_hdlrs(klwp_t *lwp)
{
	kthread_t *t = lwptot(lwp);

	ASSERT(PROC_IS_BRANDED(lwptoproc(lwp)));

	ASSERT(removectx(t, NULL, brand_interpositioning_disable,
	    brand_interpositioning_enable, NULL, NULL,
	    brand_interpositioning_disable, NULL) == 0);
	installctx(t, NULL, brand_interpositioning_disable,
	    brand_interpositioning_enable, NULL, NULL,
	    brand_interpositioning_disable, NULL);

	if (t == curthread) {
		kpreempt_disable();
		brand_interpositioning_enable();
		kpreempt_enable();
	}
}
コード例 #4
0
int
kcpc_unbind(kcpc_set_t *set)
{
	kcpc_ctx_t	*ctx = set->ks_ctx;
	kthread_t	*t;

	if (ctx == NULL)
		return (EINVAL);

	atomic_or_uint(&ctx->kc_flags, KCPC_CTX_INVALID);

	if (ctx->kc_cpuid == -1) {
		t = ctx->kc_thread;
		/*
		 * The context is thread-bound and therefore has a device
		 * context.  It will be freed via removectx() calling
		 * freectx() calling kcpc_free().
		 */
		if (t == curthread &&
		    (ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) == 0) {
			kpreempt_disable();
			pcbe_ops->pcbe_allstop();
			atomic_or_uint(&ctx->kc_flags,
			    KCPC_CTX_INVALID_STOPPED);
			kpreempt_enable();
		}
#ifdef DEBUG
		if (removectx(t, ctx, kcpc_save, kcpc_restore, NULL,
		    kcpc_lwp_create, NULL, kcpc_free) == 0)
			panic("kcpc_unbind: context %p not preset on thread %p",
			    ctx, t);
#else
		(void) removectx(t, ctx, kcpc_save, kcpc_restore, NULL,
		    kcpc_lwp_create, NULL, kcpc_free);
#endif /* DEBUG */
		t->t_cpc_set = NULL;
		t->t_cpc_ctx = NULL;
	} else {
		/*
		 * If we are unbinding a CPU-bound set from a remote CPU, the
		 * native CPU's idle thread could be in the midst of programming
		 * this context onto the CPU. We grab the context's lock here to
		 * ensure that the idle thread is done with it. When we release
		 * the lock, the CPU no longer has a context and the idle thread
		 * will move on.
		 *
		 * cpu_lock must be held to prevent the CPU from being DR'd out
		 * while we disassociate the context from the cpu_t.
		 */
		cpu_t *cp;
		mutex_enter(&cpu_lock);
		cp = cpu_get(ctx->kc_cpuid);
		if (cp != NULL) {
			/*
			 * The CPU may have been DR'd out of the system.
			 */
			mutex_enter(&cp->cpu_cpc_ctxlock);
			if ((ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) == 0)
				kcpc_stop_hw(ctx);
			ASSERT(ctx->kc_flags & KCPC_CTX_INVALID_STOPPED);
			cp->cpu_cpc_ctx = NULL;
			mutex_exit(&cp->cpu_cpc_ctxlock);
		}
		mutex_exit(&cpu_lock);
		if (ctx->kc_thread == curthread) {
			kcpc_free(ctx, 0);
			curthread->t_cpc_set = NULL;
		}
	}

	return (0);
}
コード例 #5
0
ファイル: sundep.c プロジェクト: pcd1193182/openzfs
/*
 * Add any lwp-associated context handlers to the lwp at the beginning
 * of the lwp's useful life.
 *
 * All paths which create lwp's invoke lwp_create(); lwp_create()
 * invokes lwp_stk_init() which initializes the stack, sets up
 * lwp_regs, and invokes this routine.
 *
 * All paths which destroy lwp's invoke lwp_exit() to rip the lwp
 * apart and put it on 'lwp_deathrow'; if the lwp is destroyed it
 * ends up in thread_free() which invokes freectx(t, 0) before
 * invoking lwp_stk_fini().  When the lwp is recycled from death
 * row, lwp_stk_fini() is invoked, then thread_free(), and thus
 * freectx(t, 0) as before.
 *
 * In the case of exec, the surviving lwp is thoroughly scrubbed
 * clean; exec invokes freectx(t, 1) to destroy associated contexts.
 * On the way back to the new image, it invokes setregs() which
 * in turn invokes this routine.
 */
void
lwp_installctx(klwp_t *lwp)
{
	kthread_t *t = lwptot(lwp);
	int thisthread = t == curthread;
#ifdef _SYSCALL32_IMPL
	void (*restop)(klwp_t *) = lwp_getdatamodel(lwp) == DATAMODEL_NATIVE ?
	    lwp_segregs_restore : lwp_segregs_restore32;
#else
	void (*restop)(klwp_t *) = lwp_segregs_restore;
#endif

	/*
	 * Install the basic lwp context handlers on each lwp.
	 *
	 * On the amd64 kernel, the context handlers are responsible for
	 * virtualizing %ds, %es, %fs, and %gs to the lwp.  The register
	 * values are only ever changed via sys_rtt when the
	 * pcb->pcb_rupdate == 1.  Only sys_rtt gets to clear the bit.
	 *
	 * On the i386 kernel, the context handlers are responsible for
	 * virtualizing %gs/%fs to the lwp by updating the per-cpu GDTs
	 */
	ASSERT(removectx(t, lwp, lwp_segregs_save, restop,
	    NULL, NULL, NULL, NULL) == 0);
	if (thisthread)
		kpreempt_disable();
	installctx(t, lwp, lwp_segregs_save, restop,
	    NULL, NULL, NULL, NULL);
	if (thisthread) {
		/*
		 * Since we're the right thread, set the values in the GDT
		 */
		restop(lwp);
		kpreempt_enable();
	}

	/*
	 * If we have sysenter/sysexit instructions enabled, we need
	 * to ensure that the hardware mechanism is kept up-to-date with the
	 * lwp's kernel stack pointer across context switches.
	 *
	 * sep_save zeros the sysenter stack pointer msr; sep_restore sets
	 * it to the lwp's kernel stack pointer (kstktop).
	 */
	if (is_x86_feature(x86_featureset, X86FSET_SEP)) {
#if defined(__amd64)
		caddr_t kstktop = (caddr_t)lwp->lwp_regs;
#elif defined(__i386)
		caddr_t kstktop = ((caddr_t)lwp->lwp_regs - MINFRAME) +
		    SA(sizeof (struct regs) + MINFRAME);
#endif
		ASSERT(removectx(t, kstktop,
		    sep_save, sep_restore, NULL, NULL, NULL, NULL) == 0);

		if (thisthread)
			kpreempt_disable();
		installctx(t, kstktop,
		    sep_save, sep_restore, NULL, NULL, NULL, NULL);
		if (thisthread) {
			/*
			 * We're the right thread, so set the stack pointer
			 * for the first sysenter instruction to use
			 */
			sep_restore(kstktop);
			kpreempt_enable();
		}
	}

	if (PROC_IS_BRANDED(ttoproc(t)))
		lwp_attach_brand_hdlrs(lwp);
}