コード例 #1
0
ファイル: cpu_subr.c プロジェクト: ryo/netbsd-src
static void
cpu_ipi_wait(const char *s, const kcpuset_t *watchset, const kcpuset_t *wanted)
{
	bool done = false;
	struct cpu_info * const ci = curcpu();
	kcpuset_t *kcp = ci->ci_watchcpus;

	/* some finite amount of time */

	for (u_long limit = curcpu()->ci_cpu_freq/10; !done && limit--; ) {
		kcpuset_copy(kcp, watchset);
		kcpuset_intersect(kcp, wanted);
		done = kcpuset_match(kcp, wanted);
	}

	if (!done) {
		cpuid_t cii;
		kcpuset_copy(kcp, wanted);
		kcpuset_remove(kcp, watchset);
		if ((cii = kcpuset_ffs(kcp)) != 0) {
			printf("Failed to %s:", s);
			do {
				kcpuset_clear(kcp, --cii);
				printf(" cpu%lu", cii);
			} while ((cii = kcpuset_ffs(kcp)) != 0);
			printf("\n");
		}
	}
}
コード例 #2
0
ファイル: pmap_tlb.c プロジェクト: goroutines/rumprun
/*
 * pmap_tlb_shootdown: invalidate a page on all CPUs using pmap 'pm'.
 */
void
pmap_tlb_shootdown(struct pmap *pm, vaddr_t va, pt_entry_t pte, tlbwhy_t why)
{
	pmap_tlb_packet_t *tp;
	int s;

#ifndef XEN
	KASSERT((pte & PG_G) == 0 || pm == pmap_kernel());
#endif

	/*
	 * If tearing down the pmap, do nothing.  We will flush later
	 * when we are ready to recycle/destroy it.
	 */
	if (__predict_false(curlwp->l_md.md_gc_pmap == pm)) {
		return;
	}

	if ((pte & PG_PS) != 0) {
		va &= PG_LGFRAME;
	}

	/*
	 * Add the shootdown operation to our pending set.
	 */
	s = splvm();
	tp = (pmap_tlb_packet_t *)curcpu()->ci_pmap_data;

	/* Whole address flush will be needed if PG_G is set. */
	CTASSERT(PG_G == (uint16_t)PG_G);
	tp->tp_pte |= (uint16_t)pte;

	if (tp->tp_count == (uint16_t)-1) {
		/*
		 * Already flushing everything.
		 */
	} else if (tp->tp_count < TP_MAXVA && va != (vaddr_t)-1LL) {
		/* Flush a single page. */
		tp->tp_va[tp->tp_count++] = va;
		KASSERT(tp->tp_count > 0);
	} else {
		/* Flush everything. */
		tp->tp_count = (uint16_t)-1;
	}

	if (pm != pmap_kernel()) {
		kcpuset_merge(tp->tp_cpumask, pm->pm_cpus);
		if (va >= VM_MAXUSER_ADDRESS) {
			kcpuset_merge(tp->tp_cpumask, pm->pm_kernel_cpus);
		}
		tp->tp_userpmap = 1;
	} else {
		kcpuset_copy(tp->tp_cpumask, kcpuset_running);
	}
	pmap_tlbstat_count(pm, va, why);
	splx(s);
}
コード例 #3
0
ファイル: cpu_subr.c プロジェクト: ryo/netbsd-src
/*
 * Resume all paused cpus.
 */
void
cpu_resume_others(void)
{
	if (__predict_false(cold))
		return;

	struct cpu_info * const ci = curcpu();
	kcpuset_t *kcp = ci->ci_ddbcpus;

	kcpuset_atomicly_remove(cpus_resumed, cpus_resumed);
	kcpuset_copy(kcp, cpus_paused);
	kcpuset_atomicly_remove(cpus_paused, cpus_paused);

	/* CPUs awake on cpus_paused clear */
	cpu_ipi_wait("resume", cpus_resumed, kcp);
}
コード例 #4
0
ファイル: cpu_subr.c プロジェクト: ryo/netbsd-src
/*
 * Pause all running cpus, excluding current cpu.
 */
void
cpu_pause_others(void)
{
	struct cpu_info * const ci = curcpu();
	if (cold || kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset))
		return;

	kcpuset_t *kcp = ci->ci_ddbcpus;

	kcpuset_copy(kcp, cpus_running);
	kcpuset_remove(kcp, ci->ci_data.cpu_kcpuset);
	kcpuset_remove(kcp, cpus_paused);

	cpu_broadcast_ipi(IPI_SUSPEND);
	cpu_ipi_wait("pause", cpus_paused, kcp);
}
コード例 #5
0
ファイル: cpu_subr.c プロジェクト: ryo/netbsd-src
void
cpu_multicast_ipi(const kcpuset_t *kcp, int tag)
{
	struct cpu_info * const ci = curcpu();
	kcpuset_t *kcp2 = ci->ci_multicastcpus;

	if (kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset))
		return;

	kcpuset_copy(kcp2, kcp);
	kcpuset_remove(kcp2, ci->ci_data.cpu_kcpuset);
	for (cpuid_t cii; (cii = kcpuset_ffs(kcp2)) != 0; ) {
		kcpuset_clear(kcp2, --cii);
		(void)cpu_send_ipi(cpu_lookup(cii), tag);
	}
}
コード例 #6
0
/*
 * pserialize_perform:
 *
 *	Perform the write side of passive serialization.  The calling
 *	thread holds an exclusive lock on the data object(s) being updated.
 *	We wait until every processor in the system has made at least two
 *	passes through cpu_swichto().  The wait is made with the caller's
 *	update lock held, but is short term.
 */
void
pserialize_perform(pserialize_t psz)
{
	uint64_t xc;

	KASSERT(!cpu_intr_p());
	KASSERT(!cpu_softintr_p());

	if (__predict_false(panicstr != NULL)) {
		return;
	}
	KASSERT(psz->psz_owner == NULL);
	KASSERT(ncpu > 0);

	/*
	 * Set up the object and put it onto the queue.  The lock
	 * activity here provides the necessary memory barrier to
	 * make the caller's data update completely visible to
	 * other processors.
	 */
	psz->psz_owner = curlwp;
	kcpuset_copy(psz->psz_target, kcpuset_running);
	kcpuset_zero(psz->psz_pass);

	mutex_spin_enter(&psz_lock);
	TAILQ_INSERT_TAIL(&psz_queue0, psz, psz_chain);
	psz_work_todo++;

	do {
		mutex_spin_exit(&psz_lock);

		/*
		 * Force some context switch activity on every CPU, as
		 * the system may not be busy.  Pause to not flood.
		 */
		xc = xc_broadcast(XC_HIGHPRI, (xcfunc_t)nullop, NULL, NULL);
		xc_wait(xc);
		kpause("psrlz", false, 1, NULL);

		mutex_spin_enter(&psz_lock);
	} while (!kcpuset_iszero(psz->psz_target));

	psz_ev_excl.ev_count++;
	mutex_spin_exit(&psz_lock);

	psz->psz_owner = NULL;
}