示例#1
0
void
need_resched(struct cpu_info *ci)
{
	ci->ci_want_resched = 1;
	if ((ci)->ci_curproc != NULL)
		aston((ci)->ci_curproc);
}
示例#2
0
static void
fasttrap_sigsegv(proc_t *p, kthread_t *t, uintptr_t addr)
{
#if defined(sun)
	sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);

	sqp->sq_info.si_signo = SIGSEGV;
	sqp->sq_info.si_code = SEGV_MAPERR;
	sqp->sq_info.si_addr = (caddr_t)addr;

	mutex_enter(&p->p_lock);
	sigaddqa(p, t, sqp);
	mutex_exit(&p->p_lock);

	if (t != NULL)
		aston(t);
#else
	ksiginfo_t *ksi = kmem_zalloc(sizeof (ksiginfo_t), KM_SLEEP);

	ksiginfo_init(ksi);
	ksi->ksi_signo = SIGSEGV;
	ksi->ksi_code = SEGV_MAPERR;
	ksi->ksi_addr = (caddr_t)addr;
	(void) tdksignal(t, SIGSEGV, ksi);
#endif
}
示例#3
0
/*
 * Notify the current process (p) that it has a signal pending,
 * process as soon as possible.
 */
void
signotify(struct proc *p)
{
	aston(p);
#ifdef MULTIPROCESSOR
	if (p->p_cpu != curcpu() && p->p_cpu != NULL)
		x86_send_ipi(p->p_cpu, X86_IPI_NOP);
#endif
}
示例#4
0
/*ARGSUSED*/
static void
kcpc_lwp_create(kthread_t *t, kthread_t *ct)
{
	kcpc_ctx_t	*ctx = t->t_cpc_ctx, *cctx;
	int		i;

	if (ctx == NULL || (ctx->kc_flags & KCPC_CTX_LWPINHERIT) == 0)
		return;

	rw_enter(&kcpc_cpuctx_lock, RW_READER);
	if (ctx->kc_flags & KCPC_CTX_INVALID) {
		rw_exit(&kcpc_cpuctx_lock);
		return;
	}
	cctx = kcpc_ctx_alloc();
	kcpc_ctx_clone(ctx, cctx);
	rw_exit(&kcpc_cpuctx_lock);

	/*
	 * Copy the parent context's kc_flags field, but don't overwrite
	 * the child's in case it was modified during kcpc_ctx_clone.
	 */
	cctx->kc_flags |= ctx->kc_flags;
	cctx->kc_thread = ct;
	cctx->kc_cpuid = -1;
	ct->t_cpc_set = cctx->kc_set;
	ct->t_cpc_ctx = cctx;

	if (cctx->kc_flags & KCPC_CTX_SIGOVF) {
		kcpc_set_t *ks = cctx->kc_set;
		/*
		 * Our contract with the user requires us to immediately send an
		 * overflow signal to all children if we have the LWPINHERIT
		 * and SIGOVF flags set. In addition, all counters should be
		 * set to UINT64_MAX, and their pic's overflow flag turned on
		 * so that our trap() processing knows to send a signal.
		 */
		atomic_or_uint(&cctx->kc_flags, KCPC_CTX_FREEZE);
		for (i = 0; i < ks->ks_nreqs; i++) {
			kcpc_request_t *kr = &ks->ks_req[i];

			if (kr->kr_flags & CPC_OVF_NOTIFY_EMT) {
				*(kr->kr_data) = UINT64_MAX;
				kr->kr_picp->kp_flags |= KCPC_PIC_OVERFLOWED;
			}
		}
		ttolwp(ct)->lwp_pcb.pcb_flags |= CPC_OVERFLOW;
		aston(ct);
	}

	installctx(ct, cctx, kcpc_save, kcpc_restore,
	    NULL, kcpc_lwp_create, NULL, kcpc_free);
}
示例#5
0
void
dtrace_return_probe(struct regs *rp)
{
	krwlock_t *rwp;
	uintptr_t npc = curthread->t_dtrace_npc;
	uint8_t step = curthread->t_dtrace_step;
	uint8_t ret = curthread->t_dtrace_ret;

	if (curthread->t_dtrace_ast) {
		aston(curthread);
		curthread->t_sig_check = 1;
	}

	/*
	 * Clear all user tracing flags.
	 */
	curthread->t_dtrace_ft = 0;

	/*
	 * If we weren't expecting to take a return probe trap, kill the
	 * process as though it had just executed an unassigned trap
	 * instruction.
	 */
	if (step == 0) {
		tsignal(curthread, SIGILL);
		return;
	}

	ASSERT(rp->r_npc == rp->r_pc + 4);

	/*
	 * If we hit this trap unrelated to a return probe, we're just here
	 * to reset the AST flag since we deferred a signal until after we
	 * logically single-stepped the instruction we copied out.
	 */
	if (ret == 0) {
		rp->r_pc = npc;
		rp->r_npc = npc + 4;
		return;
	}

	/*
	 * We need to wait until after we've called the dtrace_return_probe_ptr
	 * function pointer to set %pc and %npc.
	 */
	rwp = &CPU->cpu_ft_lock;
	rw_enter(rwp, RW_READER);
	if (dtrace_return_probe_ptr != NULL)
		(void) (*dtrace_return_probe_ptr)(rp);
	rw_exit(rwp);
	rp->r_pc = npc;
	rp->r_npc = npc + 4;
}
示例#6
0
文件: syscall.c 项目: andreiw/polaris
/*
 * set_proc_ast - Set asynchronous service trap (AST) flag for all
 * threads in process.
 */
void
set_proc_ast(proc_t *p)
{
	kthread_t	*t;
	kthread_t	*first;

	ASSERT(MUTEX_HELD(&p->p_lock));

	t = first = p->p_tlist;
	do {
		aston(t);
	} while ((t = t->t_forw) != first);
}
示例#7
0
void
dtrace_pid_probe(struct regs *rp)
{
	krwlock_t *rwp = &CPU->cpu_ft_lock;
	uint32_t instr;

	/*
	 * This trap should only be invoked if there's a corresponding
	 * enabled dtrace probe. If there isn't, send SIGILL as though
	 * the process had executed an invalid trap instruction.
	 */
	rw_enter(rwp, RW_READER);
	if (dtrace_pid_probe_ptr != NULL && (*dtrace_pid_probe_ptr)(rp) == 0) {
		rw_exit(rwp);
		return;
	}
	rw_exit(rwp);

	/*
	 * It is possible that we were preempted after entering the kernel,
	 * and the tracepoint was removed. If it appears that the process hit
	 * our reserved trap instruction, we call send SIGILL just as though
	 * the user had executed an unused trap instruction.
	 */
	if (fuword32((void *)rp->r_pc, &instr) != 0 ||
	    instr == FASTTRAP_INSTR) {
		sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
		proc_t *p = curproc;

		sqp->sq_info.si_signo = SIGILL;
		sqp->sq_info.si_code = ILL_ILLTRP;
		sqp->sq_info.si_addr = (caddr_t)rp->r_pc;
		sqp->sq_info.si_trapno = 0x38;

		mutex_enter(&p->p_lock);
		sigaddqa(p, curthread, sqp);
		mutex_exit(&p->p_lock);
		aston(curthread);
	}
}
示例#8
0
文件: syscall.c 项目: andreiw/polaris
/*
 * Post-syscall processing.  Perform abnormal system call completion
 * actions such as /proc tracing, profiling, signals, preemption, etc.
 *
 * This routine is called only if t_post_sys, t_sig_check, or t_astflag is set.
 * Any condition requiring pre-syscall handling must set one of these.
 * If the condition is persistent, this routine will repost t_post_sys.
 */
void
post_syscall(long rval1, long rval2)
{
	kthread_t *t = curthread;
	klwp_t *lwp = ttolwp(t);
	proc_t *p = ttoproc(t);
	struct regs *rp = lwptoregs(lwp);
	uint_t	error;
	uint_t	code = t->t_sysnum;
	int	repost = 0;
	int	proc_stop = 0;		/* non-zero if stopping */
	int	sigprof = 0;		/* non-zero if sending SIGPROF */

	t->t_post_sys = 0;

	error = lwp->lwp_errno;

	/*
	 * Code can be zero if this is a new LWP returning after a forkall(),
	 * other than the one which matches the one in the parent which called
	 * forkall().  In these LWPs, skip most of post-syscall activity.
	 */
	if (code == 0)
		goto sig_check;
	/*
	 * If the trace flag is set, mark the lwp to take a single-step trap
	 * on return to user level (below). The x86 lcall interface and
	 * sysenter has already done this, and turned off the flag, but
	 * amd64 syscall interface has not.
	 */
	if (rp->r_ps & PS_T) {
		lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
		rp->r_ps &= ~PS_T;
		aston(curthread);
	}
#ifdef C2_AUDIT
	if (audit_active) {	/* put out audit record for this syscall */
		rval_t	rval;

		/* XX64 -- truncation of 64-bit return values? */
		rval.r_val1 = (int)rval1;
		rval.r_val2 = (int)rval2;
		audit_finish(T_SYSCALL, code, error, &rval);
		repost = 1;
	}
#endif /* C2_AUDIT */

	if (curthread->t_pdmsg != NULL) {
		char *m = curthread->t_pdmsg;

		uprintf("%s", m);
		kmem_free(m, strlen(m) + 1);
		curthread->t_pdmsg = NULL;
	}

	/*
	 * If we're going to stop for /proc tracing, set the flag and
	 * save the arguments so that the return values don't smash them.
	 */
	if (PTOU(p)->u_systrap) {
		if (prismember(&PTOU(p)->u_exitmask, code)) {
			if (lwp_getdatamodel(lwp) == DATAMODEL_LP64)
				(void) save_syscall_args();
			proc_stop = 1;
		}
		repost = 1;
	}

	/*
	 * Similarly check to see if SIGPROF might be sent.
	 */
	if (curthread->t_rprof != NULL &&
	    curthread->t_rprof->rp_anystate != 0) {
		if (lwp_getdatamodel(lwp) == DATAMODEL_LP64)
			(void) save_syscall_args();
		sigprof = 1;
	}

	if (lwp->lwp_eosys == NORMALRETURN) {
		if (error == 0) {
#ifdef SYSCALLTRACE
			if (syscalltrace) {
				mutex_enter(&systrace_lock);
				printf(
				    "%d: r_val1=0x%lx, r_val2=0x%lx, id 0x%p\n",
				    p->p_pid, rval1, rval2, curthread);
				mutex_exit(&systrace_lock);
			}
#endif /* SYSCALLTRACE */
			rp->r_ps &= ~PS_C;
			rp->r_r0 = rval1;
			rp->r_r1 = rval2;
		} else {
			int sig;
#ifdef SYSCALLTRACE
			if (syscalltrace) {
				mutex_enter(&systrace_lock);
				printf("%d: error=%d, id 0x%p\n",
				    p->p_pid, error, curthread);
				mutex_exit(&systrace_lock);
			}
#endif /* SYSCALLTRACE */
			if (error == EINTR && t->t_activefd.a_stale)
				error = EBADF;
			if (error == EINTR &&
			    (sig = lwp->lwp_cursig) != 0 &&
			    sigismember(&PTOU(p)->u_sigrestart, sig) &&
			    PTOU(p)->u_signal[sig - 1] != SIG_DFL &&
			    PTOU(p)->u_signal[sig - 1] != SIG_IGN)
				error = ERESTART;
			rp->r_r0 = error;
			rp->r_ps |= PS_C;
		}
	}

	/*
	 * From the proc(4) manual page:
	 * When exit from a system call is being traced, the traced process
	 * stops on completion of the system call just prior to checking for
	 * signals and returning to user level.  At this point all return
	 * values have been stored into the traced process's saved registers.
	 */
	if (proc_stop) {
		mutex_enter(&p->p_lock);
		if (PTOU(p)->u_systrap &&
		    prismember(&PTOU(p)->u_exitmask, code))
			stop(PR_SYSEXIT, code);
		mutex_exit(&p->p_lock);
	}

	/*
	 * If we are the parent returning from a successful
	 * vfork, wait for the child to exec or exit.
	 * This code must be here and not in the bowels of the system
	 * so that /proc can intercept exit from vfork in a timely way.
	 */
	if (code == SYS_vfork && rp->r_r1 == 0 && error == 0)
		vfwait((pid_t)rval1);

	/*
	 * If profiling is active, bill the current PC in user-land
	 * and keep reposting until profiling is disabled.
	 */
	if (p->p_prof.pr_scale) {
		if (lwp->lwp_oweupc)
			profil_tick(rp->r_pc);
		repost = 1;
	}

sig_check:
	/*
	 * Reset flag for next time.
	 * We must do this after stopping on PR_SYSEXIT
	 * because /proc uses the information in lwp_eosys.
	 */
	lwp->lwp_eosys = NORMALRETURN;
	clear_stale_fd();
	t->t_flag &= ~T_FORKALL;

	if (t->t_astflag | t->t_sig_check) {
		/*
		 * Turn off the AST flag before checking all the conditions that
		 * may have caused an AST.  This flag is on whenever a signal or
		 * unusual condition should be handled after the next trap or
		 * syscall.
		 */
		astoff(t);
		/*
		 * If a single-step trap occurred on a syscall (see trap())
		 * recognize it now.  Do this before checking for signals
		 * because deferred_singlestep_trap() may generate a SIGTRAP to
		 * the LWP or may otherwise mark the LWP to call issig(FORREAL).
		 */
		if (lwp->lwp_pcb.pcb_flags & DEBUG_PENDING)
			deferred_singlestep_trap((caddr_t)rp->r_pc);

		t->t_sig_check = 0;

		/*
		 * The following check is legal for the following reasons:
		 *	1) The thread we are checking, is ourselves, so there is
		 *	   no way the proc can go away.
		 *	2) The only time we need to be protected by the
		 *	   lock is if the binding is changed.
		 *
		 *	Note we will still take the lock and check the binding
		 *	if the condition was true without the lock held.  This
		 *	prevents lock contention among threads owned by the
		 * 	same proc.
		 */

		if (curthread->t_proc_flag & TP_CHANGEBIND) {
			mutex_enter(&p->p_lock);
			if (curthread->t_proc_flag & TP_CHANGEBIND) {
				timer_lwpbind();
				curthread->t_proc_flag &= ~TP_CHANGEBIND;
			}
			mutex_exit(&p->p_lock);
		}

		/*
		 * for kaio requests on the special kaio poll queue,
		 * copyout their results to user memory.
		 */
		if (p->p_aio)
			aio_cleanup(0);
		/*
		 * If this LWP was asked to hold, call holdlwp(), which will
		 * stop.  holdlwps() sets this up and calls pokelwps() which
		 * sets the AST flag.
		 *
		 * Also check TP_EXITLWP, since this is used by fresh new LWPs
		 * through lwp_rtt().  That flag is set if the lwp_create(2)
		 * syscall failed after creating the LWP.
		 */
		if (ISHOLD(p) || (t->t_proc_flag & TP_EXITLWP))
			holdlwp();

		/*
		 * All code that sets signals and makes ISSIG_PENDING
		 * evaluate true must set t_sig_check afterwards.
		 */
		if (ISSIG_PENDING(t, lwp, p)) {
			if (issig(FORREAL))
				psig();
			t->t_sig_check = 1;	/* recheck next time */
		}

		if (sigprof) {
			realsigprof(code, error);
			t->t_sig_check = 1;	/* recheck next time */
		}

		/*
		 * If a performance counter overflow interrupt was
		 * delivered *during* the syscall, then re-enable the
		 * AST so that we take a trip through trap() to cause
		 * the SIGEMT to be delivered.
		 */
		if (lwp->lwp_pcb.pcb_flags & CPC_OVERFLOW)
			aston(t);

		/*
		 * /proc can't enable/disable the trace bit itself
		 * because that could race with the call gate used by
		 * system calls via "lcall". If that happened, an
		 * invalid EFLAGS would result. prstep()/prnostep()
		 * therefore schedule an AST for the purpose.
		 */
		if (lwp->lwp_pcb.pcb_flags & REQUEST_STEP) {
			lwp->lwp_pcb.pcb_flags &= ~REQUEST_STEP;
			rp->r_ps |= PS_T;
		}
		if (lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP) {
			lwp->lwp_pcb.pcb_flags &= ~REQUEST_NOSTEP;
			rp->r_ps &= ~PS_T;
		}
	}

	lwp->lwp_errno = 0;		/* clear error for next time */

#ifndef NPROBE
	/* Kernel probe */
	if (tnf_tracing_active) {
		TNF_PROBE_3(syscall_end, "syscall thread", /* CSTYLED */,
			tnf_long,	rval1,		rval1,
			tnf_long,	rval2,		rval2,
			tnf_long,	errno,		(long)error);
		repost = 1;
	}
#endif /* NPROBE */

	/*
	 * Set state to LWP_USER here so preempt won't give us a kernel
	 * priority if it occurs after this point.  Call CL_TRAPRET() to
	 * restore the user-level priority.
	 *
	 * It is important that no locks (other than spinlocks) be entered
	 * after this point before returning to user mode (unless lwp_state
	 * is set back to LWP_SYS).
	 *
	 * XXX Sampled times past this point are charged to the user.
	 */
	lwp->lwp_state = LWP_USER;

	if (t->t_trapret) {
		t->t_trapret = 0;
		thread_lock(t);
		CL_TRAPRET(t);
		thread_unlock(t);
	}
	if (CPU->cpu_runrun)
		preempt();

	lwp->lwp_errno = 0;		/* clear error for next time */

	/*
	 * The thread lock must be held in order to clear sysnum and reset
	 * lwp_ap atomically with respect to other threads in the system that
	 * may be looking at the args via lwp_ap from get_syscall_args().
	 */

	thread_lock(t);
	t->t_sysnum = 0;		/* no longer in a system call */

	if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) {
#if defined(_LP64)
		/*
		 * In case the args were copied to the lwp, reset the
		 * pointer so the next syscall will have the right
		 * lwp_ap pointer.
		 */
		lwp->lwp_ap = (long *)&rp->r_rdi;
	} else {
#endif
		lwp->lwp_ap = NULL;	/* reset on every syscall entry */
	}
	thread_unlock(t);

	lwp->lwp_argsaved = 0;

	/*
	 * If there was a continuing reason for post-syscall processing,
	 * set the t_post_sys flag for the next system call.
	 */
	if (repost)
		t->t_post_sys = 1;

	/*
	 * If there is a ustack registered for this lwp, and the stack rlimit
	 * has been altered, read in the ustack. If the saved stack rlimit
	 * matches the bounds of the ustack, update the ustack to reflect
	 * the new rlimit. If the new stack rlimit is RLIM_INFINITY, disable
	 * stack checking by setting the size to 0.
	 */
	if (lwp->lwp_ustack != 0 && lwp->lwp_old_stk_ctl != 0) {
		rlim64_t new_size;
		caddr_t top;
		stack_t stk;
		struct rlimit64 rl;

		mutex_enter(&p->p_lock);
		new_size = p->p_stk_ctl;
		top = p->p_usrstack;
		(void) rctl_rlimit_get(rctlproc_legacy[RLIMIT_STACK], p, &rl);
		mutex_exit(&p->p_lock);

		if (rl.rlim_cur == RLIM64_INFINITY)
			new_size = 0;

		if (copyin((stack_t *)lwp->lwp_ustack, &stk,
		    sizeof (stack_t)) == 0 &&
		    (stk.ss_size == lwp->lwp_old_stk_ctl ||
			stk.ss_size == 0) &&
		    stk.ss_sp == top - stk.ss_size) {
			stk.ss_sp = (void *)((uintptr_t)stk.ss_sp +
			    stk.ss_size - (uintptr_t)new_size);
			stk.ss_size = new_size;

			(void) copyout(&stk, (stack_t *)lwp->lwp_ustack,
			    sizeof (stack_t));
		}

		lwp->lwp_old_stk_ctl = 0;
	}
}
示例#9
0
/*
 * This routine tries to stop all user threads before we get rid of all
 * its pages.It goes through allthreads list and set the TP_CHKPT flag
 * for all user threads and make them runnable. If all of the threads
 * can be stopped within the max wait time, CPR will proceed. Otherwise
 * CPR is aborted after a few of similiar retries.
 */
static void
cpr_stop_user(int wait)
{
	kthread_id_t tp;
	proc_t *p;

	/* The whole loop below needs to be atomic */
	mutex_enter(&pidlock);

	/* faster this way */
	tp = curthread->t_next;
	do {
		/* kernel threads will be handled later */
		p = ttoproc(tp);
		if (p->p_as == &kas || p->p_stat == SZOMB)
			continue;

		/*
		 * If the thread is stopped (by CPR) already, do nothing;
		 * if running, mark TP_CHKPT;
		 * if sleeping normally, mark TP_CHKPT and setrun;
		 * if sleeping non-interruptable, mark TP_CHKPT only for now;
		 * if sleeping with t_wchan0 != 0 etc, virtually stopped,
		 * do nothing.
		 */

		/* p_lock is needed for modifying t_proc_flag */
		mutex_enter(&p->p_lock);
		thread_lock(tp); /* needed to check CPR_ISTOPPED */

		if (tp->t_state == TS_STOPPED) {
			/*
			 * if already stopped by other reasons, add this new
			 * reason to it.
			 */
			if (tp->t_schedflag & TS_RESUME)
				tp->t_schedflag &= ~TS_RESUME;
		} else {

			tp->t_proc_flag |= TP_CHKPT;

			thread_unlock(tp);
			mutex_exit(&p->p_lock);
			add_one_utstop();
			mutex_enter(&p->p_lock);
			thread_lock(tp);

			aston(tp);

			if (tp->t_state == TS_SLEEP &&
			    (tp->t_flag & T_WAKEABLE)) {
				setrun_locked(tp);
			}
		}
		/*
		 * force the thread into the kernel if it is not already there.
		 */
		if (tp->t_state == TS_ONPROC && tp->t_cpu != CPU)
			poke_cpu(tp->t_cpu->cpu_id);
		thread_unlock(tp);
		mutex_exit(&p->p_lock);

	} while ((tp = tp->t_next) != curthread);
	mutex_exit(&pidlock);

	utstop_timedwait(wait);
}
示例#10
0
/* ARGSUSED */
static int
dr_stop_user_threads(dr_sr_handle_t *srh)
{
	int		count;
	int		bailout;
	dr_handle_t	*handle = srh->sr_dr_handlep;
	static fn_t	f = "dr_stop_user_threads";
	kthread_id_t 	tp;

	extern void add_one_utstop();
	extern void utstop_timedwait(clock_t);
	extern void utstop_init(void);

#define	DR_UTSTOP_RETRY	4
#define	DR_UTSTOP_WAIT	hz

	if (dr_skip_user_threads)
		return (DDI_SUCCESS);

	utstop_init();

	/* we need to try a few times to get past fork, etc. */
	srh->sr_err_idx = 0;
	for (count = 0; count < DR_UTSTOP_RETRY; count++) {
		/* walk the entire threadlist */
		mutex_enter(&pidlock);
		for (tp = curthread->t_next; tp != curthread; tp = tp->t_next) {
			proc_t *p = ttoproc(tp);

			/* handle kernel threads separately */
			if (p->p_as == &kas || p->p_stat == SZOMB)
				continue;

			mutex_enter(&p->p_lock);
			thread_lock(tp);

			if (tp->t_state == TS_STOPPED) {
				/* add another reason to stop this thread */
				tp->t_schedflag &= ~TS_RESUME;
			} else {
				tp->t_proc_flag |= TP_CHKPT;

				thread_unlock(tp);
				mutex_exit(&p->p_lock);
				add_one_utstop();
				mutex_enter(&p->p_lock);
				thread_lock(tp);

				aston(tp);

				if (tp->t_state == TS_SLEEP &&
				    (tp->t_flag & T_WAKEABLE)) {
					setrun_locked(tp);
				}

			}

			/* grab thread if needed */
			if (tp->t_state == TS_ONPROC && tp->t_cpu != CPU)
				poke_cpu(tp->t_cpu->cpu_id);


			thread_unlock(tp);
			mutex_exit(&p->p_lock);
		}
		mutex_exit(&pidlock);


		/* let everything catch up */
		utstop_timedwait(count * count * DR_UTSTOP_WAIT);


		/* now, walk the threadlist again to see if we are done */
		mutex_enter(&pidlock);
		for (tp = curthread->t_next, bailout = 0;
		    tp != curthread; tp = tp->t_next) {
			proc_t *p = ttoproc(tp);

			/* handle kernel threads separately */
			if (p->p_as == &kas || p->p_stat == SZOMB)
				continue;

			/*
			 * If this thread didn't stop, and we don't allow
			 * unstopped blocked threads, bail.
			 */
			thread_lock(tp);
			if (!CPR_ISTOPPED(tp) &&
			    !(dr_allow_blocked_threads &&
			    DR_VSTOPPED(tp))) {
				bailout = 1;
				if (count == DR_UTSTOP_RETRY - 1) {
					/*
					 * save the pid for later reporting
					 */
					srh->sr_err_idx =
					    dr_add_int(srh->sr_err_ints,
					    srh->sr_err_idx, DR_MAX_ERR_INT,
					    (uint64_t)p->p_pid);

					cmn_err(CE_WARN, "%s: "
					    "failed to stop thread: "
					    "process=%s, pid=%d",
					    f, p->p_user.u_psargs, p->p_pid);

					PR_QR("%s: failed to stop thread: "
					    "process=%s, pid=%d, t_id=0x%p, "
					    "t_state=0x%x, t_proc_flag=0x%x, "
					    "t_schedflag=0x%x\n",
					    f, p->p_user.u_psargs, p->p_pid,
					    tp, tp->t_state, tp->t_proc_flag,
					    tp->t_schedflag);
				}

			}
			thread_unlock(tp);
		}
		mutex_exit(&pidlock);

		/* were all the threads stopped? */
		if (!bailout)
			break;
	}

	/* were we unable to stop all threads after a few tries? */
	if (bailout) {
		handle->h_err = drerr_int(ESBD_UTHREAD, srh->sr_err_ints,
			srh->sr_err_idx, 0);
		return (ESRCH);
	}

	return (DDI_SUCCESS);
}
示例#11
0
/*ARGSUSED*/
kcpc_ctx_t *
kcpc_overflow_intr(caddr_t arg, uint64_t bitmap)
{
	kcpc_ctx_t	*ctx;
	kthread_t	*t = curthread;
	int		i;

	/*
	 * On both x86 and UltraSPARC, we may deliver the high-level
	 * interrupt in kernel mode, just after we've started to run an
	 * interrupt thread.  (That's because the hardware helpfully
	 * delivers the overflow interrupt some random number of cycles
	 * after the instruction that caused the overflow by which time
	 * we're in some part of the kernel, not necessarily running on
	 * the right thread).
	 *
	 * Check for this case here -- find the pinned thread
	 * that was running when the interrupt went off.
	 */
	if (t->t_flag & T_INTR_THREAD) {
		klwp_t *lwp;

		atomic_add_32(&kcpc_intrctx_count, 1);

		/*
		 * Note that t_lwp is always set to point at the underlying
		 * thread, thus this will work in the presence of nested
		 * interrupts.
		 */
		ctx = NULL;
		if ((lwp = t->t_lwp) != NULL) {
			t = lwptot(lwp);
			ctx = t->t_cpc_ctx;
		}
	} else
		ctx = t->t_cpc_ctx;

	if (ctx == NULL) {
		/*
		 * This can easily happen if we're using the counters in
		 * "shared" mode, for example, and an overflow interrupt
		 * occurs while we are running cpustat.  In that case, the
		 * bound thread that has the context that belongs to this
		 * CPU is almost certainly sleeping (if it was running on
		 * the CPU we'd have found it above), and the actual
		 * interrupted thread has no knowledge of performance counters!
		 */
		ctx = curthread->t_cpu->cpu_cpc_ctx;
		if (ctx != NULL) {
			/*
			 * Return the bound context for this CPU to
			 * the interrupt handler so that it can synchronously
			 * sample the hardware counters and restart them.
			 */
			return (ctx);
		}

		/*
		 * As long as the overflow interrupt really is delivered early
		 * enough after trapping into the kernel to avoid switching
		 * threads, we must always be able to find the cpc context,
		 * or something went terribly wrong i.e. we ended up
		 * running a passivated interrupt thread, a kernel
		 * thread or we interrupted idle, all of which are Very Bad.
		 */
		if (kcpc_nullctx_panic)
			panic("null cpc context, thread %p", (void *)t);
		atomic_add_32(&kcpc_nullctx_count, 1);
	} else if ((ctx->kc_flags & KCPC_CTX_INVALID) == 0) {
		/*
		 * Schedule an ast to sample the counters, which will
		 * propagate any overflow into the virtualized performance
		 * counter(s), and may deliver a signal.
		 */
		ttolwp(t)->lwp_pcb.pcb_flags |= CPC_OVERFLOW;
		/*
		 * If a counter has overflowed which was counting on behalf of
		 * a request which specified CPC_OVF_NOTIFY_EMT, send the
		 * process a signal.
		 */
		for (i = 0; i < cpc_ncounters; i++) {
			if (ctx->kc_pics[i].kp_req != NULL &&
			    bitmap & (1 << i) &&
			    ctx->kc_pics[i].kp_req->kr_flags &
			    CPC_OVF_NOTIFY_EMT) {
				/*
				 * A signal has been requested for this PIC, so
				 * so freeze the context. The interrupt handler
				 * has already stopped the counter hardware.
				 */
				atomic_or_uint(&ctx->kc_flags, KCPC_CTX_FREEZE);
				atomic_or_uint(&ctx->kc_pics[i].kp_flags,
				    KCPC_PIC_OVERFLOWED);
			}
		}
		aston(t);
	}
	return (NULL);
}
示例#12
0
/* ARGSUSED */
static int64_t
cfork(int isvfork, int isfork1, int flags)
{
	proc_t *p = ttoproc(curthread);
	struct as *as;
	proc_t *cp, **orphpp;
	klwp_t *clone;
	kthread_t *t;
	task_t *tk;
	rval_t	r;
	int error;
	int i;
	rctl_set_t *dup_set;
	rctl_alloc_gp_t *dup_gp;
	rctl_entity_p_t e;
	lwpdir_t *ldp;
	lwpent_t *lep;
	lwpent_t *clep;

	/*
	 * Allow only these two flags.
	 */
	if ((flags & ~(FORK_NOSIGCHLD | FORK_WAITPID)) != 0) {
		error = EINVAL;
		goto forkerr;
	}

	/*
	 * fork is not supported for the /proc agent lwp.
	 */
	if (curthread == p->p_agenttp) {
		error = ENOTSUP;
		goto forkerr;
	}

	if ((error = secpolicy_basic_fork(CRED())) != 0)
		goto forkerr;

	/*
	 * If the calling lwp is doing a fork1() then the
	 * other lwps in this process are not duplicated and
	 * don't need to be held where their kernel stacks can be
	 * cloned.  If doing forkall(), the process is held with
	 * SHOLDFORK, so that the lwps are at a point where their
	 * stacks can be copied which is on entry or exit from
	 * the kernel.
	 */
	if (!holdlwps(isfork1 ? SHOLDFORK1 : SHOLDFORK)) {
		aston(curthread);
		error = EINTR;
		goto forkerr;
	}

#if defined(__sparc)
	/*
	 * Ensure that the user stack is fully constructed
	 * before creating the child process structure.
	 */
	(void) flush_user_windows_to_stack(NULL);
#endif

	mutex_enter(&p->p_lock);
	/*
	 * If this is vfork(), cancel any suspend request we might
	 * have gotten from some other thread via lwp_suspend().
	 * Otherwise we could end up with a deadlock on return
	 * from the vfork() in both the parent and the child.
	 */
	if (isvfork)
		curthread->t_proc_flag &= ~TP_HOLDLWP;
	/*
	 * Prevent our resource set associations from being changed during fork.
	 */
	pool_barrier_enter();
	mutex_exit(&p->p_lock);

	/*
	 * Create a child proc struct. Place a VN_HOLD on appropriate vnodes.
	 */
	if (getproc(&cp, 0) < 0) {
		mutex_enter(&p->p_lock);
		pool_barrier_exit();
		continuelwps(p);
		mutex_exit(&p->p_lock);
		error = EAGAIN;
		goto forkerr;
	}

	TRACE_2(TR_FAC_PROC, TR_PROC_FORK, "proc_fork:cp %p p %p", cp, p);

	/*
	 * Assign an address space to child
	 */
	if (isvfork) {
		/*
		 * Clear any watched areas and remember the
		 * watched pages for restoring in vfwait().
		 */
		as = p->p_as;
		if (avl_numnodes(&as->a_wpage) != 0) {
			AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
			as_clearwatch(as);
			p->p_wpage = as->a_wpage;
			avl_create(&as->a_wpage, wp_compare,
			    sizeof (struct watched_page),
			    offsetof(struct watched_page, wp_link));
			AS_LOCK_EXIT(as, &as->a_lock);
		}
		cp->p_as = as;
		cp->p_flag |= SVFORK;
	} else {
static void
sbdp_resume_devices(dev_info_t *start, sbdp_sr_handle_t *srh)
{
	int circ;
	dev_info_t	*dip, *next, *last = NULL;
	char		*bn;
	sbd_error_t	*sep;

	sep = &srh->sep;

	/* attach in reverse device tree order */
	while (last != start) {
		dip = start;
		next = ddi_get_next_sibling(dip);
		while (next != last && dip != SR_FAILED_DIP(srh)) {
			dip = next;
			next = ddi_get_next_sibling(dip);
		}
		if (dip == SR_FAILED_DIP(srh)) {
			/* Release hold acquired in sbdp_suspend_devices() */
			ndi_rele_devi(dip);
			SR_FAILED_DIP(srh) = NULL;
		} else if (sbdp_is_real_device(dip) &&
				SR_FAILED_DIP(srh) == NULL) {

			if (DEVI(dip)->devi_binding_name != NULL) {
				bn = ddi_binding_name(dip);
			}
#ifdef DEBUG
			if (!sbdp_bypass_device(bn)) {
#else
			{
#endif
				char	d_name[40], d_alias[40], *d_info;

				d_name[0] = 0;
				d_info = ddi_get_name_addr(dip);
				if (d_info == NULL)
					d_info = "<null>";

				if (!sbdp_resolve_devname(dip, d_name,
								d_alias)) {
					if (d_alias[0] != 0) {
						SBDP_DBG_QR("\tresuming "
							"%s@%s (aka %s)\n",
							d_name, d_info,
							d_alias);
					} else {
						SBDP_DBG_QR("\tresuming "
							"%s@%s\n",
							d_name, d_info);
					}
				} else {
					SBDP_DBG_QR("\tresuming %s@%s\n",
						bn, d_info);
				}

				if (devi_attach(dip, DDI_RESUME) !=
							DDI_SUCCESS) {
					/*
					 * Print a console warning,
					 * set an errno of ESGT_RESUME,
					 * and save the driver major
					 * number in the e_str.
					 */

					(void) sprintf(sbdp_get_err_buf(sep),
					    "%s@%s",
					    d_name[0] ? d_name : bn, d_info);
					SBDP_DBG_QR("\tFAILED to resume "
						"%s\n", sbdp_get_err_buf(sep));
					sbdp_set_err(sep,
					    ESGT_RESUME, NULL);
				}
			}
		}
		ndi_devi_enter(dip, &circ);
		sbdp_resume_devices(ddi_get_child(dip), srh);
		ndi_devi_exit(dip, circ);
		last = dip;
	}
}

/*
 * True if thread is virtually stopped.  Similar to CPR_VSTOPPED
 * but from DR point of view.  These user threads are waiting in
 * the kernel.  Once they return from kernel, they will process
 * the stop signal and stop.
 */
#define	SBDP_VSTOPPED(t)			\
	((t)->t_state == TS_SLEEP &&		\
	(t)->t_wchan != NULL &&			\
	(t)->t_astflag &&		\
	((t)->t_proc_flag & TP_CHKPT))


static int
sbdp_stop_user_threads(sbdp_sr_handle_t *srh)
{
	int		count;
	char		cache_psargs[PSARGSZ];
	kthread_id_t	cache_tp;
	uint_t		cache_t_state;
	int		bailout;
	sbd_error_t	*sep;
	kthread_id_t 	tp;

	extern void add_one_utstop();
	extern void utstop_timedwait(clock_t);
	extern void utstop_init(void);

#define	SBDP_UTSTOP_RETRY	4
#define	SBDP_UTSTOP_WAIT	hz

	if (sbdp_skip_user_threads)
		return (DDI_SUCCESS);

	sep = &srh->sep;
	ASSERT(sep);

	utstop_init();

	/* we need to try a few times to get past fork, etc. */
	for (count = 0; count < SBDP_UTSTOP_RETRY; count++) {
		/* walk the entire threadlist */
		mutex_enter(&pidlock);
		for (tp = curthread->t_next; tp != curthread; tp = tp->t_next) {
			proc_t *p = ttoproc(tp);

			/* handle kernel threads separately */
			if (p->p_as == &kas || p->p_stat == SZOMB)
				continue;

			mutex_enter(&p->p_lock);
			thread_lock(tp);

			if (tp->t_state == TS_STOPPED) {
				/* add another reason to stop this thread */
				tp->t_schedflag &= ~TS_RESUME;
			} else {
				tp->t_proc_flag |= TP_CHKPT;

				thread_unlock(tp);
				mutex_exit(&p->p_lock);
				add_one_utstop();
				mutex_enter(&p->p_lock);
				thread_lock(tp);

				aston(tp);

				if (ISWAKEABLE(tp) || ISWAITING(tp)) {
					setrun_locked(tp);
				}
			}

			/* grab thread if needed */
			if (tp->t_state == TS_ONPROC && tp->t_cpu != CPU)
				poke_cpu(tp->t_cpu->cpu_id);


			thread_unlock(tp);
			mutex_exit(&p->p_lock);
		}
		mutex_exit(&pidlock);


		/* let everything catch up */
		utstop_timedwait(count * count * SBDP_UTSTOP_WAIT);


		/* now, walk the threadlist again to see if we are done */
		mutex_enter(&pidlock);
		for (tp = curthread->t_next, bailout = 0;
			tp != curthread; tp = tp->t_next) {
			proc_t *p = ttoproc(tp);

			/* handle kernel threads separately */
			if (p->p_as == &kas || p->p_stat == SZOMB)
				continue;

			/*
			 * If this thread didn't stop, and we don't allow
			 * unstopped blocked threads, bail.
			 */
			thread_lock(tp);
			if (!CPR_ISTOPPED(tp) &&
			    !(sbdp_allow_blocked_threads &&
			    SBDP_VSTOPPED(tp))) {

				/* nope, cache the details for later */
				bcopy(p->p_user.u_psargs, cache_psargs,
					sizeof (cache_psargs));
				cache_tp = tp;
				cache_t_state = tp->t_state;
				bailout = 1;
			}
			thread_unlock(tp);
		}
		mutex_exit(&pidlock);

		/* were all the threads stopped? */
		if (!bailout)
			break;
	}

	/* were we unable to stop all threads after a few tries? */
	if (bailout) {
		cmn_err(CE_NOTE, "process: %s id: %p state: %x\n",
			cache_psargs, cache_tp, cache_t_state);

		(void) sprintf(sbdp_get_err_buf(sep), "%s", cache_psargs);
		sbdp_set_err(sep, ESGT_UTHREAD, NULL);
		return (ESRCH);
	}

	return (DDI_SUCCESS);
}
示例#14
0
void
cpu_need_resched(struct cpu_info *ci, int flags)
{
	ci->ci_want_resched |= flags;
	aston(ci);
}
示例#15
0
文件: shuttle.c 项目: andreiw/polaris
/*
 * Mark the current thread as sleeping on a shuttle object, and
 * resume the specified thread. The 't' thread must be marked as ONPROC.
 *
 * No locks other than 'l' should be held at this point.
 */
void
shuttle_resume(kthread_t *t, kmutex_t *l)
{
	klwp_t	*lwp = ttolwp(curthread);
	cpu_t	*cp;
	disp_lock_t *oldtlp;

	thread_lock(curthread);
	disp_lock_enter_high(&shuttle_lock);
	if (lwp != NULL) {
		lwp->lwp_asleep = 1;			/* /proc */
		lwp->lwp_sysabort = 0;			/* /proc */
		lwp->lwp_ru.nvcsw++;
	}
	curthread->t_flag |= T_WAKEABLE;
	curthread->t_sobj_ops = &shuttle_sobj_ops;
	/*
	 * setting cpu_dispthread before changing thread state
	 * so that kernel preemption will be deferred to after swtch_to()
	 */
	cp = CPU;
	cp->cpu_dispthread = t;
	cp->cpu_dispatch_pri = DISP_PRIO(t);
	/*
	 * Set the wchan0 field so that /proc won't just do a setrun
	 * on this thread when trying to stop a process. Instead,
	 * /proc will mark the thread as VSTOPPED similar to threads
	 * that are blocked on user level condition variables.
	 */
	curthread->t_wchan0 = (caddr_t)1;
	CL_INACTIVE(curthread);
	DTRACE_SCHED1(wakeup, kthread_t *, t);
	DTRACE_SCHED(sleep);
	THREAD_SLEEP(curthread, &shuttle_lock);
	disp_lock_exit_high(&shuttle_lock);

	/*
	 * Update ustate records (there is no waitrq obviously)
	 */
	(void) new_mstate(curthread, LMS_SLEEP);

	thread_lock_high(t);
	oldtlp = t->t_lockp;

	restore_mstate(t);
	t->t_flag &= ~T_WAKEABLE;
	t->t_wchan0 = NULL;
	t->t_sobj_ops = NULL;

	/*
	 * Make sure we end up on the right CPU if we are dealing with bound
	 * CPU's or processor partitions.
	 */
	if (t->t_bound_cpu != NULL || t->t_cpupart != cp->cpu_part) {
		aston(t);
		cp->cpu_runrun = 1;
	}

	/*
	 * We re-assign t_disp_queue and t_lockp of 't' here because
	 * 't' could have been preempted.
	 */
	if (t->t_disp_queue != cp->cpu_disp) {
		t->t_disp_queue = cp->cpu_disp;
		thread_onproc(t, cp);
	}

	/*
	 * We can't call thread_unlock_high() here because t's thread lock
	 * could have changed by thread_onproc() call above to point to
	 * CPU->cpu_thread_lock.
	 */
	disp_lock_exit_high(oldtlp);

	mutex_exit(l);
	/*
	 * Make sure we didn't receive any important events while
	 * we weren't looking
	 */
	if (lwp &&
	    (ISSIG(curthread, JUSTLOOKING) || MUSTRETURN(curproc, curthread)))
		setrun(curthread);

	swtch_to(t);
	/*
	 * Caller must check for ISSIG/lwp_sysabort conditions
	 * and clear lwp->lwp_asleep/lwp->lwp_sysabort
	 */
}
示例#16
0
/*
 * Move thread to new partition.  If ignore is non-zero, then CPU
 * bindings should be ignored (this is used when destroying a
 * partition).
 */
static int
cpupart_move_thread(kthread_id_t tp, cpupart_t *newpp, int ignore,
    void *projbuf, void *zonebuf)
{
	cpupart_t *oldpp = tp->t_cpupart;
	int ret;

	ASSERT(MUTEX_HELD(&cpu_lock));
	ASSERT(MUTEX_HELD(&pidlock));
	ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock));
	ASSERT(newpp != NULL);

	if (newpp->cp_cpulist == NULL)
		return (EINVAL);

	/*
	 * Check for errors first.
	 */
	thread_lock(tp);
	if ((ret = cpupart_movable_thread(tp, newpp, ignore)) != 0) {
		thread_unlock(tp);
		return (ret);
	}

	/* move the thread */
	if (oldpp != newpp) {
		/*
		 * Make the thread switch to the new partition.
		 */
		tp->t_cpupart = newpp;
		ASSERT(tp->t_lpl != NULL);
		/*
		 * Leave the thread on the same lgroup if possible; otherwise
		 * choose a new lgroup for it.  In either case, update its
		 * t_lpl.
		 */
		if (LGRP_CPUS_IN_PART(tp->t_lpl->lpl_lgrpid, newpp) &&
		    tp->t_lgrp_affinity == NULL) {
			/*
			 * The thread's lgroup has CPUs in the thread's new
			 * partition, so the thread can stay assigned to the
			 * same lgroup.  Update its t_lpl to point to the
			 * lpl_t for its lgroup in its new partition.
			 */
			lgrp_move_thread(tp, &tp->t_cpupart->\
			    cp_lgrploads[tp->t_lpl->lpl_lgrpid], 1);
		} else {
			/*
			 * The thread's lgroup has no cpus in its new
			 * partition or it has specified lgroup affinities,
			 * so choose the best lgroup for the thread and
			 * assign it to that lgroup.
			 */
			lgrp_move_thread(tp, lgrp_choose(tp, tp->t_cpupart),
			    1);
		}
		/*
		 * make sure lpl points to our own partition
		 */
		ASSERT((tp->t_lpl >= tp->t_cpupart->cp_lgrploads) &&
		    (tp->t_lpl < tp->t_cpupart->cp_lgrploads +
		    tp->t_cpupart->cp_nlgrploads));

		ASSERT(tp->t_lpl->lpl_ncpu > 0);

		if (tp->t_state == TS_ONPROC) {
			cpu_surrender(tp);
		} else if (tp->t_state == TS_RUN) {
			(void) dispdeq(tp);
			setbackdq(tp);
		}
	}

	/*
	 * Our binding has changed; set TP_CHANGEBIND.
	 */
	tp->t_proc_flag |= TP_CHANGEBIND;
	aston(tp);

	thread_unlock(tp);
	fss_changepset(tp, newpp, projbuf, zonebuf);

	return (0);		/* success */
}