Esempio n. 1
0
static int
xendebug_attach(device_t dev)
{
	int i, error;

	mtx_init(&lock, "xen-dbg", NULL, MTX_SPIN);
	buf = sbuf_new(NULL, NULL, 1024, SBUF_FIXEDLEN);
	if (buf == NULL)
		panic("Unable to create sbuf for stack dump");
	sbuf_set_drain(buf, xendebug_drain, NULL);

	/* Bind an event channel to a VIRQ on each VCPU. */
	CPU_FOREACH(i) {
		error = xen_intr_bind_virq(dev, VIRQ_DEBUG, i, xendebug_filter,
		    NULL, NULL, INTR_TYPE_TTY,
		    DPCPU_ID_PTR(i, xendebug_handler));
		if (error != 0) {
			printf("Failed to bind VIRQ_DEBUG to vCPU %d: %d",
			    i, error);
			continue;
		}
		xen_intr_describe(DPCPU_ID_GET(i, xendebug_handler), "d%d", i);
	}

	return (0);
}
Esempio n. 2
0
/*
 * Reconfigure specified timer.
 * For per-CPU timers use IPI to make other CPUs to reconfigure.
 */
static void
configtimer(int i)
{
#ifdef SMP
	tc *conf;
	int cpu;

	critical_enter();
#endif
	/* Start/stop global timer or per-CPU timer of this CPU. */
	if (i == 0 ? timer1hz : timer2hz)
		et_start(timer[i], NULL, &timerperiod[i]);
	else
		et_stop(timer[i]);
#ifdef SMP
	if ((timer[i]->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) {
		critical_exit();
		return;
	}
	/* Set reconfigure flags for other CPUs. */
	CPU_FOREACH(cpu) {
		conf = DPCPU_ID_PTR(cpu, configtimer);
		atomic_store_rel_int(*conf + i, (cpu == curcpu) ? 0 : 1);
	}
	/* Send reconfigure IPI. */
	ipi_all_but_self(i == 0 ? IPI_HARDCLOCK : IPI_STATCLOCK);
	/* Wait for reconfiguration completed. */
restart:
	cpu_spinwait();
	CPU_FOREACH(cpu) {
		if (cpu == curcpu)
			continue;
		conf = DPCPU_ID_PTR(cpu, configtimer);
		if (atomic_load_acq_int(*conf + i))
			goto restart;
	}
	critical_exit();
#endif
}
Esempio n. 3
0
/*
 * Sysctl monitoring for netisr: query per-protocol data across all
 * workstreams.
 */
static int
sysctl_netisr_work(void)
{
	struct rm_priotracker tracker;
	struct sysctl_netisr_work *snwp, *snw_array;
	struct netisr_workstream *nwsp;
	struct netisr_proto *npp;
	struct netisr_work *nwp;
	u_int counter, cpuid, proto;
	int error;

	if (req->newptr != NULL)
		return (EINVAL);
	snw_array = malloc(sizeof(*snw_array) * MAXCPU * NETISR_MAXPROT,
	    M_TEMP, M_ZERO | M_WAITOK);
	counter = 0;
	NETISR_RLOCK(&tracker);
	CPU_FOREACH(cpuid) {
		nwsp = DPCPU_ID_PTR(cpuid, nws);
		if (nwsp->nws_intr_event == NULL)
			continue;
		NWS_LOCK(nwsp);
		for (proto = 0; proto < NETISR_MAXPROT; proto++) {
			npp = &netisr_proto[proto];
			if (npp->np_name == NULL)
				continue;
			nwp = &nwsp->nws_work[proto];
			snwp = &snw_array[counter];
			snwp->snw_version = sizeof(*snwp);
			snwp->snw_wsid = cpuid;		/* See comment above. */
			snwp->snw_proto = proto;
			snwp->snw_len = nwp->nw_len;
			snwp->snw_watermark = nwp->nw_watermark;
			snwp->snw_dispatched = nwp->nw_dispatched;
			snwp->snw_hybrid_dispatched =
			    nwp->nw_hybrid_dispatched;
			snwp->snw_qdrops = nwp->nw_qdrops;
			snwp->snw_queued = nwp->nw_queued;
			snwp->snw_handled = nwp->nw_handled;
			counter++;
		}
		NWS_UNLOCK(nwsp);
	}
	KASSERT(counter <= MAXCPU * NETISR_MAXPROT,
	    ("sysctl_netisr_work: counter too big (%d)", counter));
	NETISR_RUNLOCK(&tracker);
	error = SYSCTL_OUT(req, snw_array, sizeof(*snw_array) * counter);
	free(snw_array, M_TEMP);
	return (error);
}
Esempio n. 4
0
static void
pefs_aesni_uninit(struct pefs_alg *pa)
{
	struct fpu_kern_ctx *fpu_ctx;
	u_int cpuid;

	CPU_FOREACH(cpuid) {
		fpu_ctx = (void *)atomic_swap_ptr(
		    (volatile void *)DPCPU_ID_PTR(cpuid, pefs_aesni_fpu),
		    (uintptr_t)NULL);
		if (fpu_ctx != NULL)
			fpu_kern_free_ctx(fpu_ctx);
	}
}
Esempio n. 5
0
/*
 * Sysctl monitoring for netisr: query a list of workstreams.
 */
static int
sysctl_netisr_workstream(void)
{
	struct rm_priotracker tracker;
	struct sysctl_netisr_workstream *snwsp, *snws_array;
	struct netisr_workstream *nwsp;
	u_int counter, cpuid;
	int error;

	if (req->newptr != NULL)
		return (EINVAL);
	snws_array = malloc(sizeof(*snws_array) * MAXCPU, M_TEMP,
	    M_ZERO | M_WAITOK);
	counter = 0;
	NETISR_RLOCK(&tracker);
	CPU_FOREACH(cpuid) {
		nwsp = DPCPU_ID_PTR(cpuid, nws);
		if (nwsp->nws_intr_event == NULL)
			continue;
		NWS_LOCK(nwsp);
		snwsp = &snws_array[counter];
		snwsp->snws_version = sizeof(*snwsp);

		/*
		 * For now, we equate workstream IDs and CPU IDs in the
		 * kernel, but expose them independently to userspace in case
		 * that assumption changes in the future.
		 */
		snwsp->snws_wsid = cpuid;
		snwsp->snws_cpu = cpuid;
		if (nwsp->nws_intr_event != NULL)
			snwsp->snws_flags |= NETISR_SNWS_FLAGS_INTR;
		NWS_UNLOCK(nwsp);
		counter++;
	}
	NETISR_RUNLOCK(&tracker);
	KASSERT(counter <= MAXCPU,
	    ("sysctl_netisr_workstream: counter too big (%d)", counter));
	error = SYSCTL_OUT(req, snws_array, sizeof(*snws_array) * counter);
	free(snws_array, M_TEMP);
	return (error);
}