Ejemplo n.º 1
0
/*
 * Wait specified idle threads to switch once.  This ensures that even
 * preempted threads have cycled through the switch function once,
 * exiting their codepaths.  This allows us to change global pointers
 * with no other synchronization.
 */
int
quiesce_cpus(cpuset_t map, const char *wmesg, int prio)
{
	struct pcpu *pcpu;
	u_int gen[MAXCPU];
	int error;
	int cpu;

	error = 0;
	for (cpu = 0; cpu <= mp_maxid; cpu++) {
		if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
			continue;
		pcpu = pcpu_find(cpu);
		gen[cpu] = pcpu->pc_idlethread->td_generation;
	}
	for (cpu = 0; cpu <= mp_maxid; cpu++) {
		if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
			continue;
		pcpu = pcpu_find(cpu);
		thread_lock(curthread);
		sched_bind(curthread, cpu);
		thread_unlock(curthread);
		while (gen[cpu] == pcpu->pc_idlethread->td_generation) {
			error = tsleep(quiesce_cpus, prio, wmesg, 1);
			if (error)
				goto out;
		}
	}
out:
	thread_lock(curthread);
	sched_unbind(curthread);
	thread_unlock(curthread);

	return (error);
}
Ejemplo n.º 2
0
/*
 * Find the nth present CPU and return its pc_cpuid as well as set the
 * pc_acpi_id from the most reliable source.
 */
static int
acpi_pcpu_get_id(uint32_t idx, uint32_t *acpi_id, uint32_t *cpu_id)
{
    struct pcpu	*pcpu_data;
    uint32_t	 i;

    KASSERT(acpi_id != NULL, ("Null acpi_id"));
    KASSERT(cpu_id != NULL, ("Null cpu_id"));
    for (i = 0; i <= mp_maxid; i++) {
	if (CPU_ABSENT(i))
	    continue;
	pcpu_data = pcpu_find(i);
	KASSERT(pcpu_data != NULL, ("no pcpu data for %d", i));
	if (idx-- == 0) {
	    /*
	     * If pc_acpi_id was not initialized (e.g., a non-APIC UP box)
	     * override it with the value from the ASL.  Otherwise, if the
	     * two don't match, prefer the MADT-derived value.  Finally,
	     * return the pc_cpuid to reference this processor.
	     */
	    if (pcpu_data->pc_acpi_id == 0xffffffff)
		pcpu_data->pc_acpi_id = *acpi_id;
	    else if (pcpu_data->pc_acpi_id != *acpi_id)
		*acpi_id = pcpu_data->pc_acpi_id;
	    *cpu_id = pcpu_data->pc_cpuid;
	    return (0);
	}
    }

    return (ESRCH);
}
Ejemplo n.º 3
0
/*
 * Map the given inp to a CPU id.
 *
 * This queries RSS if it's compiled in, else it defaults to the current
 * CPU ID.
 */
static inline int
inp_to_cpuid(struct inpcb *inp)
{
	u_int cpuid;

#ifdef	RSS
	if (per_cpu_timers) {
		cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
		if (cpuid == NETISR_CPUID_NONE)
			return (curcpu);	/* XXX */
		else
			return (cpuid);
	}
#else
	/* Legacy, pre-RSS behaviour */
	if (per_cpu_timers) {
		/*
		 * We don't have a flowid -> cpuid mapping, so cheat and
		 * just map unknown cpuids to curcpu.  Not the best, but
		 * apparently better than defaulting to swi 0.
		 */
		cpuid = inp->inp_flowid % (mp_maxid + 1);
		if (! CPU_ABSENT(cpuid))
			return (cpuid);
		return (curcpu);
	}
#endif
	/* Default for RSS and non-RSS - cpuid 0 */
	else {
		return (0);
	}
}
Ejemplo n.º 4
0
/*
 * Setup per-CPU domain IDs.
 */
static void
srat_set_cpus(void *dummy)
{
	struct cpu_info *cpu;
	struct pcpu *pc;
	u_int i;

	if (srat_physaddr == 0)
		return;
	for (i = 0; i < MAXCPU; i++) {
		if (CPU_ABSENT(i))
			continue;
		pc = pcpu_find(i);
		KASSERT(pc != NULL, ("no pcpu data for CPU %u", i));
		cpu = &cpus[pc->pc_apic_id];
		if (!cpu->enabled)
			panic("SRAT: CPU with APIC ID %u is not known",
			    pc->pc_apic_id);
		pc->pc_domain = cpu->domain;
		CPU_SET(i, &cpuset_domain[cpu->domain]);
		if (bootverbose)
			printf("SRAT: CPU %u has memory domain %d\n", i,
			    cpu->domain);
	}
}
static void
start_softclock(void *dummy)
{
	struct callout_cpu *cc;
#ifdef SMP
	int cpu;
#endif

	cc = CC_CPU(timeout_cpu);
	if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK,
	    INTR_MPSAFE, &softclock_ih))
		panic("died while creating standard software ithreads");
	cc->cc_cookie = softclock_ih;
#ifdef SMP
	for (cpu = 0; cpu <= mp_maxid; cpu++) {
		if (cpu == timeout_cpu)
			continue;
		if (CPU_ABSENT(cpu))
			continue;
		cc = CC_CPU(cpu);
		if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK,
		    INTR_MPSAFE, &cc->cc_cookie))
			panic("died while creating standard software ithreads");
		cc->cc_callout = NULL;	/* Only cpu0 handles timeout(). */
		cc->cc_callwheel = malloc(
		    sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT,
		    M_WAITOK);
		callout_cpu_init(cc);
	}
#endif
}
Ejemplo n.º 6
0
int
pmc_cpu_is_present(int cpu)
{
#ifdef	SMP
    return (!CPU_ABSENT(cpu));
#else
    return (1);
#endif
}
Ejemplo n.º 7
0
void
smp_rendezvous_cpus(cpumask_t map,
	void (* setup_func)(void *), 
	void (* action_func)(void *),
	void (* teardown_func)(void *),
	void *arg)
{
	int i, ncpus = 0;

	if (!smp_started) {
		if (setup_func != NULL)
			setup_func(arg);
		if (action_func != NULL)
			action_func(arg);
		if (teardown_func != NULL)
			teardown_func(arg);
		return;
	}

	for (i = 0; i <= mp_maxid; i++)
		if (((1 << i) & map) != 0 && !CPU_ABSENT(i))
			ncpus++;
	if (ncpus == 0)
		panic("ncpus is 0 with map=0x%x", map);

	/* obtain rendezvous lock */
	mtx_lock_spin(&smp_ipi_mtx);

	/* set static function pointers */
	smp_rv_ncpus = ncpus;
	smp_rv_setup_func = setup_func;
	smp_rv_action_func = action_func;
	smp_rv_teardown_func = teardown_func;
	smp_rv_func_arg = arg;
	smp_rv_waiters[1] = 0;
	smp_rv_waiters[2] = 0;
	atomic_store_rel_int(&smp_rv_waiters[0], 0);

	/* signal other processors, which will enter the IPI with interrupts off */
	ipi_selected(map & ~(1 << curcpu), IPI_RENDEZVOUS);

	/* Check if the current CPU is in the map */
	if ((map & (1 << curcpu)) != 0)
		smp_rendezvous_action();

	if (teardown_func == smp_no_rendevous_barrier)
		while (atomic_load_acq_int(&smp_rv_waiters[2]) < ncpus)
			cpu_spinwait();

	/* release lock */
	mtx_unlock_spin(&smp_ipi_mtx);
}
Ejemplo n.º 8
0
/*
 * Setup per-CPU domain IDs from information saved in 'cpus'.
 */
void
acpi_pxm_set_cpu_locality(void)
{
	struct cpu_info *cpu;
	struct pcpu *pc;
	u_int i;

	if (srat_physaddr == 0)
		return;
	for (i = 0; i < MAXCPU; i++) {
		if (CPU_ABSENT(i))
			continue;
		pc = pcpu_find(i);
		KASSERT(pc != NULL, ("no pcpu data for CPU %u", i));
		cpu = cpu_get_info(pc);
		pc->pc_domain = vm_ndomains > 1 ? cpu->domain : 0;
		CPU_SET(i, &cpuset_domain[pc->pc_domain]);
		if (bootverbose)
			printf("SRAT: CPU %u has memory domain %d\n", i,
			    pc->pc_domain);
	}
}
Ejemplo n.º 9
0
static int
sysctl_kern_cp_times(SYSCTL_HANDLER_ARGS)
{
	struct pcpu *pcpu;
	int error;
	int c;
	long *cp_time;
#ifdef SCTL_MASK32
	unsigned int cp_time32[CPUSTATES];
	int i;
#endif

	if (!req->oldptr) {
#ifdef SCTL_MASK32
		if (req->flags & SCTL_MASK32)
			return SYSCTL_OUT(req, 0, sizeof(cp_time32) * (mp_maxid + 1));
		else
#endif
			return SYSCTL_OUT(req, 0, sizeof(long) * CPUSTATES * (mp_maxid + 1));
	}
	for (error = 0, c = 0; error == 0 && c <= mp_maxid; c++) {
		if (!CPU_ABSENT(c)) {
			pcpu = pcpu_find(c);
			cp_time = pcpu->pc_cp_time;
		} else {
			cp_time = empty;
		}
#ifdef SCTL_MASK32
		if (req->flags & SCTL_MASK32) {
			for (i = 0; i < CPUSTATES; i++)
				cp_time32[i] = (unsigned int)cp_time[i];
			error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
		} else
#endif
			error = SYSCTL_OUT(req, cp_time, sizeof(long) * CPUSTATES);
	}
	return error;
}
RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
{
    return idCpu <= mp_maxid
        && !CPU_ABSENT(idCpu);
}
Ejemplo n.º 11
0
static void
rss_init(__unused void *arg)
{
	u_int i;
	u_int cpuid;

	/*
	 * Validate tunables, coerce to sensible values.
	 */
	switch (rss_hashalgo) {
	case RSS_HASH_TOEPLITZ:
	case RSS_HASH_NAIVE:
		break;

	default:
		RSS_DEBUG("invalid RSS hashalgo %u, coercing to %u\n",
		    rss_hashalgo, RSS_HASH_TOEPLITZ);
		rss_hashalgo = RSS_HASH_TOEPLITZ;
	}

	/*
	 * Count available CPUs.
	 *
	 * XXXRW: Note incorrect assumptions regarding contiguity of this set
	 * elsewhere.
	 */
	rss_ncpus = 0;
	for (i = 0; i <= mp_maxid; i++) {
		if (CPU_ABSENT(i))
			continue;
		rss_ncpus++;
	}
	if (rss_ncpus > RSS_MAXCPUS)
		rss_ncpus = RSS_MAXCPUS;

	/*
	 * Tune RSS table entries to be no less than 2x the number of CPUs
	 * -- unless we're running uniprocessor, in which case there's not
	 * much point in having buckets to rearrange for load-balancing!
	 */
	if (rss_ncpus > 1) {
		if (rss_bits == 0)
			rss_bits = fls(rss_ncpus - 1) + 1;

		/*
		 * Microsoft limits RSS table entries to 128, so apply that
		 * limit to both auto-detected CPU counts and user-configured
		 * ones.
		 */
		if (rss_bits == 0 || rss_bits > RSS_MAXBITS) {
			RSS_DEBUG("RSS bits %u not valid, coercing to %u\n",
			    rss_bits, RSS_MAXBITS);
			rss_bits = RSS_MAXBITS;
		}

		/*
		 * Figure out how many buckets to use; warn if less than the
		 * number of configured CPUs, although this is not a fatal
		 * problem.
		 */
		rss_buckets = (1 << rss_bits);
		if (rss_buckets < rss_ncpus)
			RSS_DEBUG("WARNING: rss_buckets (%u) less than "
			    "rss_ncpus (%u)\n", rss_buckets, rss_ncpus);
		rss_mask = rss_buckets - 1;
	} else {
		rss_bits = 0;
		rss_buckets = 1;
		rss_mask = 0;
	}

	/*
	 * Set up initial CPU assignments: round-robin by default.
	 */
	cpuid = CPU_FIRST();
	for (i = 0; i < rss_buckets; i++) {
		rss_table[i].rte_cpu = cpuid;
		cpuid = CPU_NEXT(cpuid);
	}

	/*
	 * Randomize rrs_key.
	 *
	 * XXXRW: Not yet.  If nothing else, will require an rss_isbadkey()
	 * loop to check for "bad" RSS keys.
	 */
}