示例#1
0
void
mp_enter_barrier(void)
{
	hrtime_t last_poke_time = 0;
	int poke_allowed = 0;
	int done = 0;
	int i;

	ASSERT(MUTEX_HELD(&cpu_lock));

	pause_cpus(NULL);

	while (!done) {
		done = 1;
		poke_allowed = 0;

		if (xpv_gethrtime() - last_poke_time > POKE_TIMEOUT) {
			last_poke_time = xpv_gethrtime();
			poke_allowed = 1;
		}

		for (i = 0; i < NCPU; i++) {
			cpu_t *cp = cpu_get(i);

			if (cp == NULL || cp == CPU)
				continue;

			switch (cpu_phase[i]) {
			case CPU_PHASE_NONE:
				cpu_phase[i] = CPU_PHASE_WAIT_SAFE;
				poke_cpu(i);
				done = 0;
				break;

			case CPU_PHASE_WAIT_SAFE:
				if (poke_allowed)
					poke_cpu(i);
				done = 0;
				break;

			case CPU_PHASE_SAFE:
			case CPU_PHASE_POWERED_OFF:
				break;
			}
		}

		SMT_PAUSE();
	}
}
示例#2
0
RTDECL(int) RTMpPokeCpu(RTCPUID idCpu)
{
    RT_ASSERT_INTS_ON();
    if (idCpu < ncpus)
        poke_cpu(idCpu);
    return VINF_SUCCESS;
}
示例#3
0
/*
 * This routine tries to stop all user threads before we get rid of all
 * its pages.It goes through allthreads list and set the TP_CHKPT flag
 * for all user threads and make them runnable. If all of the threads
 * can be stopped within the max wait time, CPR will proceed. Otherwise
 * CPR is aborted after a few of similiar retries.
 */
static void
cpr_stop_user(int wait)
{
	kthread_id_t tp;
	proc_t *p;

	/* The whole loop below needs to be atomic */
	mutex_enter(&pidlock);

	/* faster this way */
	tp = curthread->t_next;
	do {
		/* kernel threads will be handled later */
		p = ttoproc(tp);
		if (p->p_as == &kas || p->p_stat == SZOMB)
			continue;

		/*
		 * If the thread is stopped (by CPR) already, do nothing;
		 * if running, mark TP_CHKPT;
		 * if sleeping normally, mark TP_CHKPT and setrun;
		 * if sleeping non-interruptable, mark TP_CHKPT only for now;
		 * if sleeping with t_wchan0 != 0 etc, virtually stopped,
		 * do nothing.
		 */

		/* p_lock is needed for modifying t_proc_flag */
		mutex_enter(&p->p_lock);
		thread_lock(tp); /* needed to check CPR_ISTOPPED */

		if (tp->t_state == TS_STOPPED) {
			/*
			 * if already stopped by other reasons, add this new
			 * reason to it.
			 */
			if (tp->t_schedflag & TS_RESUME)
				tp->t_schedflag &= ~TS_RESUME;
		} else {

			tp->t_proc_flag |= TP_CHKPT;

			thread_unlock(tp);
			mutex_exit(&p->p_lock);
			add_one_utstop();
			mutex_enter(&p->p_lock);
			thread_lock(tp);

			aston(tp);

			if (tp->t_state == TS_SLEEP &&
			    (tp->t_flag & T_WAKEABLE)) {
				setrun_locked(tp);
			}
		}
		/*
		 * force the thread into the kernel if it is not already there.
		 */
		if (tp->t_state == TS_ONPROC && tp->t_cpu != CPU)
			poke_cpu(tp->t_cpu->cpu_id);
		thread_unlock(tp);
		mutex_exit(&p->p_lock);

	} while ((tp = tp->t_next) != curthread);
	mutex_exit(&pidlock);

	utstop_timedwait(wait);
}
示例#4
0
/* ARGSUSED */
static int
dr_stop_user_threads(dr_sr_handle_t *srh)
{
	int		count;
	int		bailout;
	dr_handle_t	*handle = srh->sr_dr_handlep;
	static fn_t	f = "dr_stop_user_threads";
	kthread_id_t 	tp;

	extern void add_one_utstop();
	extern void utstop_timedwait(clock_t);
	extern void utstop_init(void);

#define	DR_UTSTOP_RETRY	4
#define	DR_UTSTOP_WAIT	hz

	if (dr_skip_user_threads)
		return (DDI_SUCCESS);

	utstop_init();

	/* we need to try a few times to get past fork, etc. */
	srh->sr_err_idx = 0;
	for (count = 0; count < DR_UTSTOP_RETRY; count++) {
		/* walk the entire threadlist */
		mutex_enter(&pidlock);
		for (tp = curthread->t_next; tp != curthread; tp = tp->t_next) {
			proc_t *p = ttoproc(tp);

			/* handle kernel threads separately */
			if (p->p_as == &kas || p->p_stat == SZOMB)
				continue;

			mutex_enter(&p->p_lock);
			thread_lock(tp);

			if (tp->t_state == TS_STOPPED) {
				/* add another reason to stop this thread */
				tp->t_schedflag &= ~TS_RESUME;
			} else {
				tp->t_proc_flag |= TP_CHKPT;

				thread_unlock(tp);
				mutex_exit(&p->p_lock);
				add_one_utstop();
				mutex_enter(&p->p_lock);
				thread_lock(tp);

				aston(tp);

				if (tp->t_state == TS_SLEEP &&
				    (tp->t_flag & T_WAKEABLE)) {
					setrun_locked(tp);
				}

			}

			/* grab thread if needed */
			if (tp->t_state == TS_ONPROC && tp->t_cpu != CPU)
				poke_cpu(tp->t_cpu->cpu_id);


			thread_unlock(tp);
			mutex_exit(&p->p_lock);
		}
		mutex_exit(&pidlock);


		/* let everything catch up */
		utstop_timedwait(count * count * DR_UTSTOP_WAIT);


		/* now, walk the threadlist again to see if we are done */
		mutex_enter(&pidlock);
		for (tp = curthread->t_next, bailout = 0;
		    tp != curthread; tp = tp->t_next) {
			proc_t *p = ttoproc(tp);

			/* handle kernel threads separately */
			if (p->p_as == &kas || p->p_stat == SZOMB)
				continue;

			/*
			 * If this thread didn't stop, and we don't allow
			 * unstopped blocked threads, bail.
			 */
			thread_lock(tp);
			if (!CPR_ISTOPPED(tp) &&
			    !(dr_allow_blocked_threads &&
			    DR_VSTOPPED(tp))) {
				bailout = 1;
				if (count == DR_UTSTOP_RETRY - 1) {
					/*
					 * save the pid for later reporting
					 */
					srh->sr_err_idx =
					    dr_add_int(srh->sr_err_ints,
					    srh->sr_err_idx, DR_MAX_ERR_INT,
					    (uint64_t)p->p_pid);

					cmn_err(CE_WARN, "%s: "
					    "failed to stop thread: "
					    "process=%s, pid=%d",
					    f, p->p_user.u_psargs, p->p_pid);

					PR_QR("%s: failed to stop thread: "
					    "process=%s, pid=%d, t_id=0x%p, "
					    "t_state=0x%x, t_proc_flag=0x%x, "
					    "t_schedflag=0x%x\n",
					    f, p->p_user.u_psargs, p->p_pid,
					    tp, tp->t_state, tp->t_proc_flag,
					    tp->t_schedflag);
				}

			}
			thread_unlock(tp);
		}
		mutex_exit(&pidlock);

		/* were all the threads stopped? */
		if (!bailout)
			break;
	}

	/* were we unable to stop all threads after a few tries? */
	if (bailout) {
		handle->h_err = drerr_int(ESBD_UTHREAD, srh->sr_err_ints,
			srh->sr_err_idx, 0);
		return (ESRCH);
	}

	return (DDI_SUCCESS);
}
示例#5
0
/*
 * c-state wakeup function.
 * Similar to cpu_wakeup and cpu_wakeup_mwait except this function deals
 * with CPUs asleep in MWAIT, HLT, or ACPI Deep C-State.
 */
void
cstate_wakeup(cpu_t *cp, int bound)
{
	struct machcpu	*mcpu = &(cp->cpu_m);
	volatile uint32_t *mcpu_mwait = mcpu->mcpu_mwait;
	cpupart_t	*cpu_part;
	uint_t		cpu_found;
	processorid_t	cpu_sid;

	cpu_part = cp->cpu_part;
	cpu_sid = cp->cpu_seqid;
	/*
	 * Clear the halted bit for that CPU since it will be woken up
	 * in a moment.
	 */
	if (bitset_in_set(&cpu_part->cp_haltset, cpu_sid)) {
		/*
		 * Clear the halted bit for that CPU since it will be
		 * poked in a moment.
		 */
		bitset_atomic_del(&cpu_part->cp_haltset, cpu_sid);

		/*
		 * We may find the current CPU present in the halted cpuset
		 * if we're in the context of an interrupt that occurred
		 * before we had a chance to clear our bit in cpu_idle().
		 * Waking ourself is obviously unnecessary, since if
		 * we're here, we're not halted.
		 */
		if (cp != CPU) {
			/*
			 * Use correct wakeup mechanism
			 */
			if ((mcpu_mwait != NULL) &&
			    (*mcpu_mwait == MWAIT_HALTED))
				MWAIT_WAKEUP(cp);
			else
				poke_cpu(cp->cpu_id);
		}
		return;
	} else {
		/*
		 * This cpu isn't halted, but it's idle or undergoing a
		 * context switch. No need to awaken anyone else.
		 */
		if (cp->cpu_thread == cp->cpu_idle_thread ||
		    cp->cpu_disp_flags & CPU_DISP_DONTSTEAL)
			return;
	}

	/*
	 * No need to wake up other CPUs if the thread we just enqueued
	 * is bound.
	 */
	if (bound)
		return;


	/*
	 * See if there's any other halted CPUs. If there are, then
	 * select one, and awaken it.
	 * It's possible that after we find a CPU, somebody else
	 * will awaken it before we get the chance.
	 * In that case, look again.
	 */
	do {
		cpu_found = bitset_find(&cpu_part->cp_haltset);
		if (cpu_found == (uint_t)-1)
			return;

	} while (bitset_atomic_test_and_del(&cpu_part->cp_haltset,
	    cpu_found) < 0);

	/*
	 * Must use correct wakeup mechanism to avoid lost wakeup of
	 * alternate cpu.
	 */
	if (cpu_found != CPU->cpu_seqid) {
		mcpu_mwait = cpu[cpu_found]->cpu_m.mcpu_mwait;
		if ((mcpu_mwait != NULL) && (*mcpu_mwait == MWAIT_HALTED))
			MWAIT_WAKEUP(cpu_seq[cpu_found]);
		else
			poke_cpu(cpu_seq[cpu_found]->cpu_id);
	}
}
static void
sbdp_resume_devices(dev_info_t *start, sbdp_sr_handle_t *srh)
{
	int circ;
	dev_info_t	*dip, *next, *last = NULL;
	char		*bn;
	sbd_error_t	*sep;

	sep = &srh->sep;

	/* attach in reverse device tree order */
	while (last != start) {
		dip = start;
		next = ddi_get_next_sibling(dip);
		while (next != last && dip != SR_FAILED_DIP(srh)) {
			dip = next;
			next = ddi_get_next_sibling(dip);
		}
		if (dip == SR_FAILED_DIP(srh)) {
			/* Release hold acquired in sbdp_suspend_devices() */
			ndi_rele_devi(dip);
			SR_FAILED_DIP(srh) = NULL;
		} else if (sbdp_is_real_device(dip) &&
				SR_FAILED_DIP(srh) == NULL) {

			if (DEVI(dip)->devi_binding_name != NULL) {
				bn = ddi_binding_name(dip);
			}
#ifdef DEBUG
			if (!sbdp_bypass_device(bn)) {
#else
			{
#endif
				char	d_name[40], d_alias[40], *d_info;

				d_name[0] = 0;
				d_info = ddi_get_name_addr(dip);
				if (d_info == NULL)
					d_info = "<null>";

				if (!sbdp_resolve_devname(dip, d_name,
								d_alias)) {
					if (d_alias[0] != 0) {
						SBDP_DBG_QR("\tresuming "
							"%s@%s (aka %s)\n",
							d_name, d_info,
							d_alias);
					} else {
						SBDP_DBG_QR("\tresuming "
							"%s@%s\n",
							d_name, d_info);
					}
				} else {
					SBDP_DBG_QR("\tresuming %s@%s\n",
						bn, d_info);
				}

				if (devi_attach(dip, DDI_RESUME) !=
							DDI_SUCCESS) {
					/*
					 * Print a console warning,
					 * set an errno of ESGT_RESUME,
					 * and save the driver major
					 * number in the e_str.
					 */

					(void) sprintf(sbdp_get_err_buf(sep),
					    "%s@%s",
					    d_name[0] ? d_name : bn, d_info);
					SBDP_DBG_QR("\tFAILED to resume "
						"%s\n", sbdp_get_err_buf(sep));
					sbdp_set_err(sep,
					    ESGT_RESUME, NULL);
				}
			}
		}
		ndi_devi_enter(dip, &circ);
		sbdp_resume_devices(ddi_get_child(dip), srh);
		ndi_devi_exit(dip, circ);
		last = dip;
	}
}

/*
 * True if thread is virtually stopped.  Similar to CPR_VSTOPPED
 * but from DR point of view.  These user threads are waiting in
 * the kernel.  Once they return from kernel, they will process
 * the stop signal and stop.
 */
#define	SBDP_VSTOPPED(t)			\
	((t)->t_state == TS_SLEEP &&		\
	(t)->t_wchan != NULL &&			\
	(t)->t_astflag &&		\
	((t)->t_proc_flag & TP_CHKPT))


static int
sbdp_stop_user_threads(sbdp_sr_handle_t *srh)
{
	int		count;
	char		cache_psargs[PSARGSZ];
	kthread_id_t	cache_tp;
	uint_t		cache_t_state;
	int		bailout;
	sbd_error_t	*sep;
	kthread_id_t 	tp;

	extern void add_one_utstop();
	extern void utstop_timedwait(clock_t);
	extern void utstop_init(void);

#define	SBDP_UTSTOP_RETRY	4
#define	SBDP_UTSTOP_WAIT	hz

	if (sbdp_skip_user_threads)
		return (DDI_SUCCESS);

	sep = &srh->sep;
	ASSERT(sep);

	utstop_init();

	/* we need to try a few times to get past fork, etc. */
	for (count = 0; count < SBDP_UTSTOP_RETRY; count++) {
		/* walk the entire threadlist */
		mutex_enter(&pidlock);
		for (tp = curthread->t_next; tp != curthread; tp = tp->t_next) {
			proc_t *p = ttoproc(tp);

			/* handle kernel threads separately */
			if (p->p_as == &kas || p->p_stat == SZOMB)
				continue;

			mutex_enter(&p->p_lock);
			thread_lock(tp);

			if (tp->t_state == TS_STOPPED) {
				/* add another reason to stop this thread */
				tp->t_schedflag &= ~TS_RESUME;
			} else {
				tp->t_proc_flag |= TP_CHKPT;

				thread_unlock(tp);
				mutex_exit(&p->p_lock);
				add_one_utstop();
				mutex_enter(&p->p_lock);
				thread_lock(tp);

				aston(tp);

				if (ISWAKEABLE(tp) || ISWAITING(tp)) {
					setrun_locked(tp);
				}
			}

			/* grab thread if needed */
			if (tp->t_state == TS_ONPROC && tp->t_cpu != CPU)
				poke_cpu(tp->t_cpu->cpu_id);


			thread_unlock(tp);
			mutex_exit(&p->p_lock);
		}
		mutex_exit(&pidlock);


		/* let everything catch up */
		utstop_timedwait(count * count * SBDP_UTSTOP_WAIT);


		/* now, walk the threadlist again to see if we are done */
		mutex_enter(&pidlock);
		for (tp = curthread->t_next, bailout = 0;
			tp != curthread; tp = tp->t_next) {
			proc_t *p = ttoproc(tp);

			/* handle kernel threads separately */
			if (p->p_as == &kas || p->p_stat == SZOMB)
				continue;

			/*
			 * If this thread didn't stop, and we don't allow
			 * unstopped blocked threads, bail.
			 */
			thread_lock(tp);
			if (!CPR_ISTOPPED(tp) &&
			    !(sbdp_allow_blocked_threads &&
			    SBDP_VSTOPPED(tp))) {

				/* nope, cache the details for later */
				bcopy(p->p_user.u_psargs, cache_psargs,
					sizeof (cache_psargs));
				cache_tp = tp;
				cache_t_state = tp->t_state;
				bailout = 1;
			}
			thread_unlock(tp);
		}
		mutex_exit(&pidlock);

		/* were all the threads stopped? */
		if (!bailout)
			break;
	}

	/* were we unable to stop all threads after a few tries? */
	if (bailout) {
		cmn_err(CE_NOTE, "process: %s id: %p state: %x\n",
			cache_psargs, cache_tp, cache_t_state);

		(void) sprintf(sbdp_get_err_buf(sep), "%s", cache_psargs);
		sbdp_set_err(sep, ESGT_UTHREAD, NULL);
		return (ESRCH);
	}

	return (DDI_SUCCESS);
}