Exemplo n.º 1
0
/*
 * pserialize_switchpoint:
 *
 *	Monitor system context switch activity.  Called from machine
 *	independent code after mi_switch() returns.
 */ 
void
pserialize_switchpoint(void)
{
	pserialize_t psz, next;
	cpuid_t cid;

	/*
	 * If no updates pending, bail out.  No need to lock in order to
	 * test psz_work_todo; the only ill effect of missing an update
	 * would be to delay LWPs waiting in pserialize_perform().  That
	 * will not happen because updates are on the queue before an
	 * xcall is generated (serialization) to tickle every CPU.
	 */
	if (__predict_true(psz_work_todo == 0)) {
		return;
	}
	mutex_spin_enter(&psz_lock);
	cid = cpu_index(curcpu());

	/*
	 * At first, scan through the second queue and update each request,
	 * if passed all processors, then transfer to the third queue. 
	 */
	for (psz = TAILQ_FIRST(&psz_queue1); psz != NULL; psz = next) {
		next = TAILQ_NEXT(psz, psz_chain);
		kcpuset_set(psz->psz_pass, cid);
		if (!kcpuset_match(psz->psz_pass, psz->psz_target)) {
			continue;
		}
		kcpuset_zero(psz->psz_pass);
		TAILQ_REMOVE(&psz_queue1, psz, psz_chain);
		TAILQ_INSERT_TAIL(&psz_queue2, psz, psz_chain);
	}
	/*
	 * Scan through the first queue and update each request,
	 * if passed all processors, then move to the second queue. 
	 */
	for (psz = TAILQ_FIRST(&psz_queue0); psz != NULL; psz = next) {
		next = TAILQ_NEXT(psz, psz_chain);
		kcpuset_set(psz->psz_pass, cid);
		if (!kcpuset_match(psz->psz_pass, psz->psz_target)) {
			continue;
		}
		kcpuset_zero(psz->psz_pass);
		TAILQ_REMOVE(&psz_queue0, psz, psz_chain);
		TAILQ_INSERT_TAIL(&psz_queue1, psz, psz_chain);
	}
	/*
	 * Process the third queue: entries have been seen twice on every
	 * processor, remove from the queue and notify the updating thread.
	 */
	while ((psz = TAILQ_FIRST(&psz_queue2)) != NULL) {
		TAILQ_REMOVE(&psz_queue2, psz, psz_chain);
		kcpuset_zero(psz->psz_target);
		psz_work_todo--;
	}
	mutex_spin_exit(&psz_lock);
}
Exemplo n.º 2
0
/*
 * Halt all running cpus, excluding current cpu.
 */
void
cpu_halt_others(void)
{
	kcpuset_t *kcp;

	// If we are the only CPU running, there's nothing to do.
	if (kcpuset_match(cpus_running, curcpu()->ci_data.cpu_kcpuset))
		return;

	// Get all running CPUs
	kcpuset_clone(&kcp, cpus_running);
	// Remove ourself
	kcpuset_remove(kcp, curcpu()->ci_data.cpu_kcpuset);
	// Remove any halted CPUs
	kcpuset_remove(kcp, cpus_halted);
	// If there are CPUs left, send the IPIs
	if (!kcpuset_iszero(kcp)) {
		cpu_multicast_ipi(kcp, IPI_HALT);
		cpu_ipi_wait("halt", cpus_halted, kcp);
	}
	kcpuset_destroy(kcp);

	/*
	 * TBD
	 * Depending on available firmware methods, other cpus will
	 * either shut down themselves, or spin and wait for us to
	 * stop them.
	 */
}
Exemplo n.º 3
0
static void
cpu_ipi_wait(const char *s, const kcpuset_t *watchset, const kcpuset_t *wanted)
{
	bool done = false;
	struct cpu_info * const ci = curcpu();
	kcpuset_t *kcp = ci->ci_watchcpus;

	/* some finite amount of time */

	for (u_long limit = curcpu()->ci_cpu_freq/10; !done && limit--; ) {
		kcpuset_copy(kcp, watchset);
		kcpuset_intersect(kcp, wanted);
		done = kcpuset_match(kcp, wanted);
	}

	if (!done) {
		cpuid_t cii;
		kcpuset_copy(kcp, wanted);
		kcpuset_remove(kcp, watchset);
		if ((cii = kcpuset_ffs(kcp)) != 0) {
			printf("Failed to %s:", s);
			do {
				kcpuset_clear(kcp, --cii);
				printf(" cpu%lu", cii);
			} while ((cii = kcpuset_ffs(kcp)) != 0);
			printf("\n");
		}
	}
}
Exemplo n.º 4
0
/*
 * Pause all running cpus, excluding current cpu.
 */
void
cpu_pause_others(void)
{
	struct cpu_info * const ci = curcpu();
	if (cold || kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset))
		return;

	kcpuset_t *kcp = ci->ci_ddbcpus;

	kcpuset_copy(kcp, cpus_running);
	kcpuset_remove(kcp, ci->ci_data.cpu_kcpuset);
	kcpuset_remove(kcp, cpus_paused);

	cpu_broadcast_ipi(IPI_SUSPEND);
	cpu_ipi_wait("pause", cpus_paused, kcp);
}
Exemplo n.º 5
0
void
cpu_multicast_ipi(const kcpuset_t *kcp, int tag)
{
	struct cpu_info * const ci = curcpu();
	kcpuset_t *kcp2 = ci->ci_multicastcpus;

	if (kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset))
		return;

	kcpuset_copy(kcp2, kcp);
	kcpuset_remove(kcp2, ci->ci_data.cpu_kcpuset);
	for (cpuid_t cii; (cii = kcpuset_ffs(kcp2)) != 0; ) {
		kcpuset_clear(kcp2, --cii);
		(void)cpu_send_ipi(cpu_lookup(cii), tag);
	}
}
Exemplo n.º 6
0
static inline void
pmap_tlb_processpacket(pmap_tlb_packet_t *tp, kcpuset_t *target)
{
	int err = 0;

	if (!kcpuset_match(target, kcpuset_attached)) {
		const struct cpu_info * const self = curcpu();
		CPU_INFO_ITERATOR cii;
		struct cpu_info *lci;

		for (CPU_INFO_FOREACH(cii, lci)) {
			const cpuid_t lcid = cpu_index(lci);

			if (__predict_false(lci == self) ||
			    !kcpuset_isset(target, lcid)) {
				continue;
			}
			err |= x86_ipi(LAPIC_TLB_VECTOR,
			    lci->ci_cpuid, LAPIC_DLMODE_FIXED);
		}
	} else {