Пример #1
0
/*
 * Get the arguments to the current system call.
 *	lwp->lwp_ap normally points to the out regs in the reg structure.
 *	If the user is going to change the out registers and might want to
 *	get the args (for /proc tracing), it must copy the args elsewhere
 *	via save_syscall_args().
 */
uint_t
get_syscall_args(klwp_t *lwp, long *argp, int *nargsp)
{
	kthread_t	*t = lwptot(lwp);
	uint_t	code = t->t_sysnum;
	long	mask;
	long	*ap;
	int	nargs;

	if (lwptoproc(lwp)->p_model == DATAMODEL_ILP32)
		mask = (uint32_t)0xffffffffU;
	else
		mask = 0xffffffffffffffff;

	if (code != 0 && code < NSYSCALL) {

		nargs = LWP_GETSYSENT(lwp)[code].sy_narg;

		ASSERT(nargs <= MAXSYSARGS);

		*nargsp = nargs;
		ap = lwp->lwp_ap;
		while (nargs-- > 0)
			*argp++ = *ap++ & mask;
	} else {
		*nargsp = 0;
	}
	return (code);
}
Пример #2
0
/*ARGSUSED2*/
void
lwp_load(klwp_t *lwp, gregset_t grp, uintptr_t thrptr)
{
	struct regs *rp = lwptoregs(lwp);

	setgregs(lwp, grp);
	rp->r_ps = PSL_USER;

	/*
	 * For 64-bit lwps, we allow one magic %fs selector value, and one
	 * magic %gs selector to point anywhere in the address space using
	 * %fsbase and %gsbase behind the scenes.  libc uses %fs to point
	 * at the ulwp_t structure.
	 *
	 * For 32-bit lwps, libc wedges its lwp thread pointer into the
	 * ucontext ESP slot (which is otherwise irrelevant to setting a
	 * ucontext) and LWPGS_SEL value into gregs[REG_GS].  This is so
	 * syslwp_create() can atomically setup %gs.
	 *
	 * See setup_context() in libc.
	 */
#ifdef _SYSCALL32_IMPL
	if (lwp_getdatamodel(lwp) == DATAMODEL_ILP32) {
		if (grp[REG_GS] == LWPGS_SEL)
			(void) lwp_setprivate(lwp, _LWP_GSBASE, thrptr);
	} else {
		/*
		 * See lwp_setprivate in kernel and setup_context in libc.
		 *
		 * Currently libc constructs a ucontext from whole cloth for
		 * every new (not main) lwp created.  For 64 bit processes
		 * %fsbase is directly set to point to current thread pointer.
		 * In the past (solaris 10) %fs was also set LWPFS_SEL to
		 * indicate %fsbase. Now we use the null GDT selector for
		 * this purpose. LWP[FS|GS]_SEL are only intended for 32 bit
		 * processes. To ease transition we support older libcs in
		 * the newer kernel by forcing %fs or %gs selector to null
		 * by calling lwp_setprivate if LWP[FS|GS]_SEL is passed in
		 * the ucontext.  This is should be ripped out at some future
		 * date.  Another fix would be for libc to do a getcontext
		 * and inherit the null %fs/%gs from the current context but
		 * that means an extra system call and could hurt performance.
		 */
		if (grp[REG_FS] == 0x1bb) /* hard code legacy LWPFS_SEL */
			(void) lwp_setprivate(lwp, _LWP_FSBASE,
			    (uintptr_t)grp[REG_FSBASE]);

		if (grp[REG_GS] == 0x1c3) /* hard code legacy LWPGS_SEL */
			(void) lwp_setprivate(lwp, _LWP_GSBASE,
			    (uintptr_t)grp[REG_GSBASE]);
	}
#else
	if (grp[GS] == LWPGS_SEL)
		(void) lwp_setprivate(lwp, _LWP_GSBASE, thrptr);
#endif

	lwp->lwp_eosys = JUSTRETURN;
	lwptot(lwp)->t_post_sys = 1;
}
Пример #3
0
/*
 * Free lwp fpu regs.
 */
void
lwp_freeregs(klwp_t *lwp, int isexec)
{
	kfpu_t *fp = lwptofpu(lwp);

	if (lwptot(lwp) == curthread)
		fp->fpu_fprs = _fp_read_fprs();
	if ((fp->fpu_en) || (fp->fpu_fprs & FPRS_FEF))
		fp_free(fp, isexec);
}
Пример #4
0
/*ARGSUSED*/
static void
s10_amd64_correct_fsreg(klwp_t *l)
{
	if (lwp_getdatamodel(l) == DATAMODEL_NATIVE) {
		kpreempt_disable();
		l->lwp_pcb.pcb_fs = LWPFS_SEL;
		l->lwp_pcb.pcb_rupdate = 1;
		lwptot(l)->t_post_sys = 1;	/* Guarantee update_sregs() */
		kpreempt_enable();
	}
}
Пример #5
0
/*
 * Copy regs from parent to child.
 */
void
lwp_forkregs(klwp_t *lwp, klwp_t *clwp)
{
#if defined(__amd64)
	struct pcb *pcb = &clwp->lwp_pcb;
	struct regs *rp = lwptoregs(lwp);

	if (pcb->pcb_rupdate == 0) {
		pcb->pcb_ds = rp->r_ds;
		pcb->pcb_es = rp->r_es;
		pcb->pcb_fs = rp->r_fs;
		pcb->pcb_gs = rp->r_gs;
		pcb->pcb_rupdate = 1;
		lwptot(clwp)->t_post_sys = 1;
	}
	ASSERT(lwptot(clwp)->t_post_sys);
#endif

	fp_lwp_dup(clwp);

	bcopy(lwp->lwp_regs, clwp->lwp_regs, sizeof (struct regs));
}
Пример #6
0
uint_t
get_syscall_args(klwp_t *lwp, long *argp, int *nargsp)
{
	kthread_t	*t = lwptot(lwp);
	ulong_t	mask = 0xfffffffful;
	uint_t	code;
	long	*ap;
	int	nargs;

#if defined(_LP64)
	if (lwp_getdatamodel(lwp) == DATAMODEL_LP64)
		mask = 0xfffffffffffffffful;
#endif

	/*
	 * The thread lock must be held while looking at the arguments to ensure
	 * they don't go away via post_syscall().
	 * get_syscall_args() is the only routine to read them which is callable
	 * outside the LWP in question and hence the only one that must be
	 * synchronized in this manner.
	 */
	thread_lock(t);

	code = t->t_sysnum;
	ap = lwp->lwp_ap;

	thread_unlock(t);

	if (code != 0 && code < NSYSCALL) {
		nargs = LWP_GETSYSENT(lwp)[code].sy_narg;

		ASSERT(nargs <= MAXSYSARGS);

		*nargsp = nargs;
		while (nargs-- > 0)
			*argp++ = *ap++ & mask;
	} else {
		*nargsp = 0;
	}

	return (code);
}
Пример #7
0
/*
 * If this is a process in a branded zone, then we want it to disable the
 * brand syscall entry points.  This routine must be called when the last
 * lwp in a process is exiting in proc_exit().
 */
void
lwp_detach_brand_hdlrs(klwp_t *lwp)
{
	kthread_t *t = lwptot(lwp);

	ASSERT(PROC_IS_BRANDED(lwptoproc(lwp)));
	if (t == curthread)
		kpreempt_disable();

	/* Remove the original context handlers */
	VERIFY(removectx(t, NULL, brand_interpositioning_disable,
	    brand_interpositioning_enable, NULL, NULL,
	    brand_interpositioning_disable, NULL) != 0);

	if (t == curthread) {
		/* Cleanup our MSR and IDT entries. */
		brand_interpositioning_disable();
		kpreempt_enable();
	}
}
Пример #8
0
/*
 * If this is a process in a branded zone, then we want it to use the brand
 * syscall entry points instead of the standard Solaris entry points.  This
 * routine must be called when a new lwp is created within a branded zone
 * or when an existing lwp moves into a branded zone via a zone_enter()
 * operation.
 */
void
lwp_attach_brand_hdlrs(klwp_t *lwp)
{
	kthread_t *t = lwptot(lwp);

	ASSERT(PROC_IS_BRANDED(lwptoproc(lwp)));

	ASSERT(removectx(t, NULL, brand_interpositioning_disable,
	    brand_interpositioning_enable, NULL, NULL,
	    brand_interpositioning_disable, NULL) == 0);
	installctx(t, NULL, brand_interpositioning_disable,
	    brand_interpositioning_enable, NULL, NULL,
	    brand_interpositioning_disable, NULL);

	if (t == curthread) {
		kpreempt_disable();
		brand_interpositioning_enable();
		kpreempt_enable();
	}
}
Пример #9
0
/*
 * System call to create an lwp.
 *
 * Notes on the LWP_DETACHED and LWP_DAEMON flags:
 *
 * A detached lwp (LWP_DETACHED) cannot be the specific target of
 * lwp_wait() (it is not joinable), but lwp_wait(0, ...) is required
 * to sleep until all non-daemon detached lwps have terminated before
 * returning EDEADLK because a detached lwp might create a non-detached lwp
 * that could then be returned by lwp_wait(0, ...).  See also lwp_detach().
 *
 * A daemon lwp (LWP_DAEMON) is a detached lwp that has the additional
 * property that it does not affect the termination condition of the
 * process:  The last non-daemon lwp to call lwp_exit() causes the process
 * to exit and lwp_wait(0, ...) does not sleep waiting for daemon lwps
 * to terminate.  See the block comment before lwp_wait().
 */
int
syslwp_create(ucontext_t *ucp, int flags, id_t *new_lwp)
{
	klwp_t *lwp;
	proc_t *p = ttoproc(curthread);
	kthread_t *t;
	ucontext_t uc;
#ifdef _SYSCALL32_IMPL
	ucontext32_t uc32;
#endif /* _SYSCALL32_IMPL */
	k_sigset_t sigmask;
	int	tid;
	model_t model = get_udatamodel();
	uintptr_t thrptr = 0;

	if (flags & ~(LWP_DAEMON|LWP_DETACHED|LWP_SUSPENDED))
		return (set_errno(EINVAL));

	/*
	 * lwp_create() is disallowed for the /proc agent lwp.
	 */
	if (curthread == p->p_agenttp)
		return (set_errno(ENOTSUP));

	if (model == DATAMODEL_NATIVE) {
		if (copyin(ucp, &uc, sizeof (ucontext_t)))
			return (set_errno(EFAULT));
		sigutok(&uc.uc_sigmask, &sigmask);
#if defined(__i386)
		/*
		 * libc stashed thrptr into unused kernel %sp.
		 * See setup_context() in libc.
		 */
		thrptr = (uint32_t)uc.uc_mcontext.gregs[ESP];
#endif
	}
#ifdef _SYSCALL32_IMPL
	else {
		if (copyin(ucp, &uc32, sizeof (ucontext32_t)))
			return (set_errno(EFAULT));
		sigutok(&uc32.uc_sigmask, &sigmask);
#if defined(__sparc)
		ucontext_32ton(&uc32, &uc, NULL, NULL);
#else	/* __amd64 */
		ucontext_32ton(&uc32, &uc);
		/*
		 * libc stashed thrptr into unused kernel %sp.
		 * See setup_context() in libc.
		 */
		thrptr = (uint32_t)uc32.uc_mcontext.gregs[ESP];
#endif
	}
#endif /* _SYSCALL32_IMPL */

	(void) save_syscall_args();	/* save args for tracing first */

	mutex_enter(&curproc->p_lock);
	pool_barrier_enter();
	mutex_exit(&curproc->p_lock);
	lwp = lwp_create(lwp_rtt, NULL, NULL, curproc, TS_STOPPED,
		curthread->t_pri, &sigmask, curthread->t_cid, 0);
	mutex_enter(&curproc->p_lock);
	pool_barrier_exit();
	mutex_exit(&curproc->p_lock);
	if (lwp == NULL)
		return (set_errno(EAGAIN));

	lwp_load(lwp, uc.uc_mcontext.gregs, thrptr);

	t = lwptot(lwp);
	/*
	 * Copy the new lwp's lwpid into the caller's specified buffer.
	 */
	if (new_lwp && copyout(&t->t_tid, new_lwp, sizeof (id_t))) {
		/*
		 * caller's buffer is not writable, return
		 * EFAULT, and terminate new lwp.
		 */
		mutex_enter(&p->p_lock);
		t->t_proc_flag |= TP_EXITLWP;
		t->t_sig_check = 1;
		t->t_sysnum = 0;
		t->t_proc_flag &= ~TP_HOLDLWP;
		lwp_create_done(t);
		mutex_exit(&p->p_lock);
		return (set_errno(EFAULT));
	}

	/*
	 * clone callers context, if any.  must be invoked
	 * while -not- holding p_lock.
	 */
	if (curthread->t_ctx)
		lwp_createctx(curthread, t);

	/*
	 * copy current contract templates
	 */
	lwp_ctmpl_copy(lwp, ttolwp(curthread));

	mutex_enter(&p->p_lock);
	/*
	 * Copy the syscall arguments to the new lwp's arg area
	 * for the benefit of debuggers.
	 */
	t->t_sysnum = SYS_lwp_create;
	lwp->lwp_ap = lwp->lwp_arg;
	lwp->lwp_arg[0] = (long)ucp;
	lwp->lwp_arg[1] = (long)flags;
	lwp->lwp_arg[2] = (long)new_lwp;
	lwp->lwp_argsaved = 1;

	if (!(flags & (LWP_DETACHED|LWP_DAEMON)))
		t->t_proc_flag |= TP_TWAIT;
	if (flags & LWP_DAEMON) {
		t->t_proc_flag |= TP_DAEMON;
		p->p_lwpdaemon++;
	}

	tid = (int)t->t_tid;	/* for /proc debuggers */

	/*
	 * We now set the newly-created lwp running.
	 * If it is being created as LWP_SUSPENDED, we leave its
	 * TP_HOLDLWP flag set so it will stop in system call exit.
	 */
	if (!(flags & LWP_SUSPENDED))
		t->t_proc_flag &= ~TP_HOLDLWP;
	lwp_create_done(t);
	mutex_exit(&p->p_lock);

	return (tid);
}
Пример #10
0
/*ARGSUSED*/
kcpc_ctx_t *
kcpc_overflow_intr(caddr_t arg, uint64_t bitmap)
{
	kcpc_ctx_t	*ctx;
	kthread_t	*t = curthread;
	int		i;

	/*
	 * On both x86 and UltraSPARC, we may deliver the high-level
	 * interrupt in kernel mode, just after we've started to run an
	 * interrupt thread.  (That's because the hardware helpfully
	 * delivers the overflow interrupt some random number of cycles
	 * after the instruction that caused the overflow by which time
	 * we're in some part of the kernel, not necessarily running on
	 * the right thread).
	 *
	 * Check for this case here -- find the pinned thread
	 * that was running when the interrupt went off.
	 */
	if (t->t_flag & T_INTR_THREAD) {
		klwp_t *lwp;

		atomic_add_32(&kcpc_intrctx_count, 1);

		/*
		 * Note that t_lwp is always set to point at the underlying
		 * thread, thus this will work in the presence of nested
		 * interrupts.
		 */
		ctx = NULL;
		if ((lwp = t->t_lwp) != NULL) {
			t = lwptot(lwp);
			ctx = t->t_cpc_ctx;
		}
	} else
		ctx = t->t_cpc_ctx;

	if (ctx == NULL) {
		/*
		 * This can easily happen if we're using the counters in
		 * "shared" mode, for example, and an overflow interrupt
		 * occurs while we are running cpustat.  In that case, the
		 * bound thread that has the context that belongs to this
		 * CPU is almost certainly sleeping (if it was running on
		 * the CPU we'd have found it above), and the actual
		 * interrupted thread has no knowledge of performance counters!
		 */
		ctx = curthread->t_cpu->cpu_cpc_ctx;
		if (ctx != NULL) {
			/*
			 * Return the bound context for this CPU to
			 * the interrupt handler so that it can synchronously
			 * sample the hardware counters and restart them.
			 */
			return (ctx);
		}

		/*
		 * As long as the overflow interrupt really is delivered early
		 * enough after trapping into the kernel to avoid switching
		 * threads, we must always be able to find the cpc context,
		 * or something went terribly wrong i.e. we ended up
		 * running a passivated interrupt thread, a kernel
		 * thread or we interrupted idle, all of which are Very Bad.
		 */
		if (kcpc_nullctx_panic)
			panic("null cpc context, thread %p", (void *)t);
		atomic_add_32(&kcpc_nullctx_count, 1);
	} else if ((ctx->kc_flags & KCPC_CTX_INVALID) == 0) {
		/*
		 * Schedule an ast to sample the counters, which will
		 * propagate any overflow into the virtualized performance
		 * counter(s), and may deliver a signal.
		 */
		ttolwp(t)->lwp_pcb.pcb_flags |= CPC_OVERFLOW;
		/*
		 * If a counter has overflowed which was counting on behalf of
		 * a request which specified CPC_OVF_NOTIFY_EMT, send the
		 * process a signal.
		 */
		for (i = 0; i < cpc_ncounters; i++) {
			if (ctx->kc_pics[i].kp_req != NULL &&
			    bitmap & (1 << i) &&
			    ctx->kc_pics[i].kp_req->kr_flags &
			    CPC_OVF_NOTIFY_EMT) {
				/*
				 * A signal has been requested for this PIC, so
				 * so freeze the context. The interrupt handler
				 * has already stopped the counter hardware.
				 */
				atomic_or_uint(&ctx->kc_flags, KCPC_CTX_FREEZE);
				atomic_or_uint(&ctx->kc_pics[i].kp_flags,
				    KCPC_PIC_OVERFLOWED);
			}
		}
		aston(t);
	}
	return (NULL);
}
Пример #11
0
/*
 * Copy regs from parent to child.
 */
void
lwp_forkregs(klwp_t *lwp, klwp_t *clwp)
{
	kthread_t *t, *pt = lwptot(lwp);
	struct machpcb *mpcb = lwptompcb(clwp);
	struct machpcb *pmpcb = lwptompcb(lwp);
	kfpu_t *fp, *pfp = lwptofpu(lwp);
	caddr_t wbuf;
	uint_t wstate;

	t = mpcb->mpcb_thread;
	/*
	 * remember child's fp and wbuf since they will get erased during
	 * the bcopy.
	 */
	fp = mpcb->mpcb_fpu;
	wbuf = mpcb->mpcb_wbuf;
	wstate = mpcb->mpcb_wstate;
	/*
	 * Don't copy mpcb_frame since we hand-crafted it
	 * in thread_load().
	 */
	bcopy(lwp->lwp_regs, clwp->lwp_regs, sizeof (struct machpcb) - REGOFF);
	mpcb->mpcb_thread = t;
	mpcb->mpcb_fpu = fp;
	fp->fpu_q = mpcb->mpcb_fpu_q;

	/*
	 * It is theoretically possibly for the lwp's wstate to
	 * be different from its value assigned in lwp_stk_init,
	 * since lwp_stk_init assumed the data model of the process.
	 * Here, we took on the data model of the cloned lwp.
	 */
	if (mpcb->mpcb_wstate != wstate) {
		if (wstate == WSTATE_USER32) {
			kmem_cache_free(wbuf32_cache, wbuf);
			wbuf = kmem_cache_alloc(wbuf64_cache, KM_SLEEP);
			wstate = WSTATE_USER64;
		} else {
			kmem_cache_free(wbuf64_cache, wbuf);
			wbuf = kmem_cache_alloc(wbuf32_cache, KM_SLEEP);
			wstate = WSTATE_USER32;
		}
	}

	mpcb->mpcb_pa = va_to_pa(mpcb);
	mpcb->mpcb_wbuf = wbuf;
	mpcb->mpcb_wbuf_pa = va_to_pa(wbuf);

	ASSERT(mpcb->mpcb_wstate == wstate);

	if (mpcb->mpcb_wbcnt != 0) {
		bcopy(pmpcb->mpcb_wbuf, mpcb->mpcb_wbuf,
		    mpcb->mpcb_wbcnt * ((mpcb->mpcb_wstate == WSTATE_USER32) ?
		    sizeof (struct rwindow32) : sizeof (struct rwindow64)));
	}

	if (pt == curthread)
		pfp->fpu_fprs = _fp_read_fprs();
	if ((pfp->fpu_en) || (pfp->fpu_fprs & FPRS_FEF)) {
		if (pt == curthread && fpu_exists) {
			save_gsr(clwp->lwp_fpu);
		} else {
			uint64_t gsr;
			gsr = get_gsr(lwp->lwp_fpu);
			set_gsr(gsr, clwp->lwp_fpu);
		}
		fp_fork(lwp, clwp);
	}
}
Пример #12
0
int
lwp_setprivate(klwp_t *lwp, int which, uintptr_t base)
{
	pcb_t *pcb = &lwp->lwp_pcb;
	struct regs *rp = lwptoregs(lwp);
	kthread_t *t = lwptot(lwp);
	int thisthread = t == curthread;
	int rval;

	if (thisthread)
		kpreempt_disable();

#if defined(__amd64)

	/*
	 * 32-bit compatibility processes point to the per-cpu GDT segment
	 * descriptors that are virtualized to the lwp.  That allows 32-bit
	 * programs to mess with %fs and %gs; in particular it allows
	 * things like this:
	 *
	 *	movw	%gs, %ax
	 *	...
	 *	movw	%ax, %gs
	 *
	 * to work, which is needed by emulators for legacy application
	 * environments ..
	 *
	 * 64-bit processes may also point to a per-cpu GDT segment descriptor
	 * virtualized to the lwp.  However the descriptor base is forced
	 * to zero (because we can't express the full 64-bit address range
	 * in a long mode descriptor), so don't reload segment registers
	 * in a 64-bit program! 64-bit processes must have selector values
	 * of zero for %fs and %gs to use the 64-bit fs_base and gs_base
	 * respectively.
	 */
	if (pcb->pcb_rupdate == 0) {
		pcb->pcb_ds = rp->r_ds;
		pcb->pcb_es = rp->r_es;
		pcb->pcb_fs = rp->r_fs;
		pcb->pcb_gs = rp->r_gs;
		pcb->pcb_rupdate = 1;
		t->t_post_sys = 1;
	}
	ASSERT(t->t_post_sys);

	switch (which) {
	case _LWP_FSBASE:
		if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) {
			set_usegd(&pcb->pcb_fsdesc, SDP_LONG, 0, 0,
			    SDT_MEMRWA, SEL_UPL, SDP_BYTES, SDP_OP32);
			rval = pcb->pcb_fs = 0;	/* null gdt descriptor */
		} else {
			set_usegd(&pcb->pcb_fsdesc, SDP_SHORT, (void *)base, -1,
			    SDT_MEMRWA, SEL_UPL, SDP_PAGES, SDP_OP32);
			rval = pcb->pcb_fs = LWPFS_SEL;
		}
		if (thisthread)
			gdt_update_usegd(GDT_LWPFS, &pcb->pcb_fsdesc);

		pcb->pcb_fsbase = base;
		break;
	case _LWP_GSBASE:
		if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) {
			set_usegd(&pcb->pcb_gsdesc, SDP_LONG, 0, 0,
			    SDT_MEMRWA, SEL_UPL, SDP_BYTES, SDP_OP32);
			rval = pcb->pcb_gs = 0;	/* null gdt descriptor */
		} else {
			set_usegd(&pcb->pcb_gsdesc, SDP_SHORT, (void *)base, -1,
			    SDT_MEMRWA, SEL_UPL, SDP_PAGES, SDP_OP32);
			rval = pcb->pcb_gs = LWPGS_SEL;
		}
		if (thisthread)
			gdt_update_usegd(GDT_LWPGS, &pcb->pcb_gsdesc);

		pcb->pcb_gsbase = base;
		break;
	default:
		rval = -1;
		break;
	}

#elif defined(__i386)

	/*
	 * 32-bit processes point to the per-cpu GDT segment
	 * descriptors that are virtualized to the lwp.
	 */

	switch	(which) {
	case _LWP_FSBASE:
		set_usegd(&pcb->pcb_fsdesc, (void *)base, -1,
		    SDT_MEMRWA, SEL_UPL, SDP_PAGES, SDP_OP32);
		if (thisthread)
			gdt_update_usegd(GDT_LWPFS, &pcb->pcb_fsdesc);

		rval = rp->r_fs = LWPFS_SEL;
		break;
	case _LWP_GSBASE:
		set_usegd(&pcb->pcb_gsdesc, (void *)base, -1,
		    SDT_MEMRWA, SEL_UPL, SDP_PAGES, SDP_OP32);
		if (thisthread)
			gdt_update_usegd(GDT_LWPGS, &pcb->pcb_gsdesc);

		rval = rp->r_gs = LWPGS_SEL;
		break;
	default:
		rval = -1;
		break;
	}

#endif	/* __i386 */

	if (thisthread)
		kpreempt_enable();
	return (rval);
}
Пример #13
0
static int
lwp_getprivate(klwp_t *lwp, int which, uintptr_t base)
{
	pcb_t *pcb = &lwp->lwp_pcb;
	struct regs *rp = lwptoregs(lwp);
	uintptr_t sbase;
	int error = 0;

	ASSERT(lwptot(lwp) == curthread);

	kpreempt_disable();
	switch (which) {
#if defined(__amd64)

	case _LWP_FSBASE:
		if ((sbase = pcb->pcb_fsbase) != 0) {
			if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) {
				if (pcb->pcb_rupdate == 1) {
					if (pcb->pcb_fs == 0)
						break;
				} else {
					if (rp->r_fs == 0)
						break;
				}
			} else {
				if (pcb->pcb_rupdate == 1) {
					if (pcb->pcb_fs == LWPFS_SEL)
						break;
				} else {
					if (rp->r_fs == LWPFS_SEL)
						break;
				}
			}
		}
		error = EINVAL;
		break;
	case _LWP_GSBASE:
		if ((sbase = pcb->pcb_gsbase) != 0) {
			if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) {
				if (pcb->pcb_rupdate == 1) {
					if (pcb->pcb_gs == 0)
						break;
				} else {
					if (rp->r_gs == 0)
						break;
				}
			} else {
				if (pcb->pcb_rupdate == 1) {
					if (pcb->pcb_gs == LWPGS_SEL)
						break;
				} else {
					if (rp->r_gs == LWPGS_SEL)
						break;
				}
			}
		}
		error = EINVAL;
		break;

#elif defined(__i386)

	case _LWP_FSBASE:
		if (rp->r_fs == LWPFS_SEL) {
			sbase = USEGD_GETBASE(&pcb->pcb_fsdesc);
			break;
		}
		error = EINVAL;
		break;
	case _LWP_GSBASE:
		if (rp->r_gs == LWPGS_SEL) {
			sbase = USEGD_GETBASE(&pcb->pcb_gsdesc);
			break;
		}
		error = EINVAL;
		break;

#endif	/* __i386 */

	default:
		error = ENOTSUP;
		break;
	}
	kpreempt_enable();

	if (error != 0)
		return (error);

	if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) {
		if (sulword((void *)base, sbase) == -1)
			error = EFAULT;
#if defined(_SYSCALL32_IMPL)
	} else {
		if (suword32((void *)base, (uint32_t)sbase) == -1)
			error = EFAULT;
#endif
	}
	return (error);
}
Пример #14
0
/*
 * Add any lwp-associated context handlers to the lwp at the beginning
 * of the lwp's useful life.
 *
 * All paths which create lwp's invoke lwp_create(); lwp_create()
 * invokes lwp_stk_init() which initializes the stack, sets up
 * lwp_regs, and invokes this routine.
 *
 * All paths which destroy lwp's invoke lwp_exit() to rip the lwp
 * apart and put it on 'lwp_deathrow'; if the lwp is destroyed it
 * ends up in thread_free() which invokes freectx(t, 0) before
 * invoking lwp_stk_fini().  When the lwp is recycled from death
 * row, lwp_stk_fini() is invoked, then thread_free(), and thus
 * freectx(t, 0) as before.
 *
 * In the case of exec, the surviving lwp is thoroughly scrubbed
 * clean; exec invokes freectx(t, 1) to destroy associated contexts.
 * On the way back to the new image, it invokes setregs() which
 * in turn invokes this routine.
 */
void
lwp_installctx(klwp_t *lwp)
{
	kthread_t *t = lwptot(lwp);
	int thisthread = t == curthread;
#ifdef _SYSCALL32_IMPL
	void (*restop)(klwp_t *) = lwp_getdatamodel(lwp) == DATAMODEL_NATIVE ?
	    lwp_segregs_restore : lwp_segregs_restore32;
#else
	void (*restop)(klwp_t *) = lwp_segregs_restore;
#endif

	/*
	 * Install the basic lwp context handlers on each lwp.
	 *
	 * On the amd64 kernel, the context handlers are responsible for
	 * virtualizing %ds, %es, %fs, and %gs to the lwp.  The register
	 * values are only ever changed via sys_rtt when the
	 * pcb->pcb_rupdate == 1.  Only sys_rtt gets to clear the bit.
	 *
	 * On the i386 kernel, the context handlers are responsible for
	 * virtualizing %gs/%fs to the lwp by updating the per-cpu GDTs
	 */
	ASSERT(removectx(t, lwp, lwp_segregs_save, restop,
	    NULL, NULL, NULL, NULL) == 0);
	if (thisthread)
		kpreempt_disable();
	installctx(t, lwp, lwp_segregs_save, restop,
	    NULL, NULL, NULL, NULL);
	if (thisthread) {
		/*
		 * Since we're the right thread, set the values in the GDT
		 */
		restop(lwp);
		kpreempt_enable();
	}

	/*
	 * If we have sysenter/sysexit instructions enabled, we need
	 * to ensure that the hardware mechanism is kept up-to-date with the
	 * lwp's kernel stack pointer across context switches.
	 *
	 * sep_save zeros the sysenter stack pointer msr; sep_restore sets
	 * it to the lwp's kernel stack pointer (kstktop).
	 */
	if (is_x86_feature(x86_featureset, X86FSET_SEP)) {
#if defined(__amd64)
		caddr_t kstktop = (caddr_t)lwp->lwp_regs;
#elif defined(__i386)
		caddr_t kstktop = ((caddr_t)lwp->lwp_regs - MINFRAME) +
		    SA(sizeof (struct regs) + MINFRAME);
#endif
		ASSERT(removectx(t, kstktop,
		    sep_save, sep_restore, NULL, NULL, NULL, NULL) == 0);

		if (thisthread)
			kpreempt_disable();
		installctx(t, kstktop,
		    sep_save, sep_restore, NULL, NULL, NULL, NULL);
		if (thisthread) {
			/*
			 * We're the right thread, so set the stack pointer
			 * for the first sysenter instruction to use
			 */
			sep_restore(kstktop);
			kpreempt_enable();
		}
	}

	if (PROC_IS_BRANDED(ttoproc(t)))
		lwp_attach_brand_hdlrs(lwp);
}