Beispiel #1
0
/*
 * pserialize_switchpoint:
 *
 *	Monitor system context switch activity.  Called from machine
 *	independent code after mi_switch() returns.
 */ 
void
pserialize_switchpoint(void)
{
	pserialize_t psz, next;
	cpuid_t cid;

	/*
	 * If no updates pending, bail out.  No need to lock in order to
	 * test psz_work_todo; the only ill effect of missing an update
	 * would be to delay LWPs waiting in pserialize_perform().  That
	 * will not happen because updates are on the queue before an
	 * xcall is generated (serialization) to tickle every CPU.
	 */
	if (__predict_true(psz_work_todo == 0)) {
		return;
	}
	mutex_spin_enter(&psz_lock);
	cid = cpu_index(curcpu());

	/*
	 * At first, scan through the second queue and update each request,
	 * if passed all processors, then transfer to the third queue. 
	 */
	for (psz = TAILQ_FIRST(&psz_queue1); psz != NULL; psz = next) {
		next = TAILQ_NEXT(psz, psz_chain);
		kcpuset_set(psz->psz_pass, cid);
		if (!kcpuset_match(psz->psz_pass, psz->psz_target)) {
			continue;
		}
		kcpuset_zero(psz->psz_pass);
		TAILQ_REMOVE(&psz_queue1, psz, psz_chain);
		TAILQ_INSERT_TAIL(&psz_queue2, psz, psz_chain);
	}
	/*
	 * Scan through the first queue and update each request,
	 * if passed all processors, then move to the second queue. 
	 */
	for (psz = TAILQ_FIRST(&psz_queue0); psz != NULL; psz = next) {
		next = TAILQ_NEXT(psz, psz_chain);
		kcpuset_set(psz->psz_pass, cid);
		if (!kcpuset_match(psz->psz_pass, psz->psz_target)) {
			continue;
		}
		kcpuset_zero(psz->psz_pass);
		TAILQ_REMOVE(&psz_queue0, psz, psz_chain);
		TAILQ_INSERT_TAIL(&psz_queue1, psz, psz_chain);
	}
	/*
	 * Process the third queue: entries have been seen twice on every
	 * processor, remove from the queue and notify the updating thread.
	 */
	while ((psz = TAILQ_FIRST(&psz_queue2)) != NULL) {
		TAILQ_REMOVE(&psz_queue2, psz, psz_chain);
		kcpuset_zero(psz->psz_target);
		psz_work_todo--;
	}
	mutex_spin_exit(&psz_lock);
}
Beispiel #2
0
void
interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
{
	struct intr_source *is;

	kcpuset_zero(cpuset);

	is = intr_get_source(intrid);
	if (is != NULL)
		kcpuset_set(cpuset, 0);	/* XXX */
}
Beispiel #3
0
void
interrupt_get_available(kcpuset_t *cpuset)
{
	CPU_INFO_ITERATOR cii;
	struct cpu_info *ci;

	kcpuset_zero(cpuset);

	mutex_enter(&cpu_lock);
	for (CPU_INFO_FOREACH(cii, ci)) {
		if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0)
			kcpuset_set(cpuset, cpu_index(ci));
	}
	mutex_exit(&cpu_lock);
}
Beispiel #4
0
/*
 * pserialize_perform:
 *
 *	Perform the write side of passive serialization.  The calling
 *	thread holds an exclusive lock on the data object(s) being updated.
 *	We wait until every processor in the system has made at least two
 *	passes through cpu_swichto().  The wait is made with the caller's
 *	update lock held, but is short term.
 */
void
pserialize_perform(pserialize_t psz)
{
	uint64_t xc;

	KASSERT(!cpu_intr_p());
	KASSERT(!cpu_softintr_p());

	if (__predict_false(panicstr != NULL)) {
		return;
	}
	KASSERT(psz->psz_owner == NULL);
	KASSERT(ncpu > 0);

	/*
	 * Set up the object and put it onto the queue.  The lock
	 * activity here provides the necessary memory barrier to
	 * make the caller's data update completely visible to
	 * other processors.
	 */
	psz->psz_owner = curlwp;
	kcpuset_copy(psz->psz_target, kcpuset_running);
	kcpuset_zero(psz->psz_pass);

	mutex_spin_enter(&psz_lock);
	TAILQ_INSERT_TAIL(&psz_queue0, psz, psz_chain);
	psz_work_todo++;

	do {
		mutex_spin_exit(&psz_lock);

		/*
		 * Force some context switch activity on every CPU, as
		 * the system may not be busy.  Pause to not flood.
		 */
		xc = xc_broadcast(XC_HIGHPRI, (xcfunc_t)nullop, NULL, NULL);
		xc_wait(xc);
		kpause("psrlz", false, 1, NULL);

		mutex_spin_enter(&psz_lock);
	} while (!kcpuset_iszero(psz->psz_target));

	psz_ev_excl.ev_count++;
	mutex_spin_exit(&psz_lock);

	psz->psz_owner = NULL;
}