Beispiel #1
0
static int
tegra_cpufreq_freq_helper(SYSCTLFN_ARGS)
{
	struct sysctlnode node;
	int fq, oldfq = 0, error;
	uint64_t xc;

	node = *rnode;
	node.sysctl_data = &fq;

	fq = cpufreq_get_rate();
	if (rnode->sysctl_num == cpufreq_node_target)
		oldfq = fq;

	error = sysctl_lookup(SYSCTLFN_CALL(&node));
	if (error || newp == NULL)
		return error;

	if (fq == oldfq || rnode->sysctl_num != cpufreq_node_target)
		return 0;

	if (atomic_cas_uint(&cpufreq_busy, 0, 1) != 0)
		return EBUSY;

	error = cpufreq_set_rate(fq);
	if (error == 0) {
		xc = xc_broadcast(0, tegra_cpufreq_post, NULL, NULL);
		xc_wait(xc);
		pmf_event_inject(NULL, PMFE_SPEED_CHANGED);
	}

	atomic_dec_uint(&cpufreq_busy);

	return error;
}
/*
 * Force all CPUs through cpu_switchto(), waiting until complete.
 * Context switching will drain the write buffer on the calling
 * CPU.
 */
static void
ras_sync(void)
{

	/* No need to sync if exiting or single threaded. */
	if (curproc->p_nlwps > 1 && ncpu > 1) {
#ifdef NO_SOFTWARE_PATENTS
		uint64_t where;
		where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
		xc_wait(where);
#else
		/*
		 * Assumptions:
		 *
		 * o preemption is disabled by the thread in
		 *   ras_lookup().
		 * o proc::p_raslist is only inspected with
		 *   preemption disabled.
		 * o ras_lookup() plus loads reordered in advance
		 *   will take no longer than 1/8s to complete.
		 */
		const int delta = hz >> 3;
		int target = hardclock_ticks + delta;
		do {
			kpause("ras", false, delta, NULL);
		} while (hardclock_ticks < target);
#endif
	}
Beispiel #3
0
int
acpicpu_md_cstate_stop(void)
{
	static char text[16];
	void (*func)(void);
	uint64_t xc;
	bool ipi;

	x86_cpu_idle_get(&func, text, sizeof(text));

	if (func == native_idle)
		return EALREADY;

	ipi = (native_idle != x86_cpu_idle_halt) ? false : true;
	x86_cpu_idle_set(native_idle, native_idle_text, ipi);

	/*
	 * Run a cross-call to ensure that all CPUs are
	 * out from the ACPI idle-loop before detachment.
	 */
	xc = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
	xc_wait(xc);

	return 0;
}
static void
cpufreq_set_all_raw(uint32_t freq)
{
	struct cpufreq *cf = cf_backend;
	uint64_t xc;

	KASSERT(cf->cf_init != false);
	KASSERT(mutex_owned(&cpufreq_lock) != 0);

	xc = xc_broadcast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq);
	xc_wait(xc);
}
static void
tprof_amdpmi_stop(tprof_backend_cookie_t *cookie)
{
	uint64_t xc;

	xc = xc_broadcast(0, tprof_amdpmi_stop_cpu, NULL, NULL);
	xc_wait(xc);

	KASSERT(tprof_amdpmi_nmi_handle != NULL);
	KASSERT(tprof_cookie == cookie);
	nmi_disestablish(tprof_amdpmi_nmi_handle);
	tprof_amdpmi_nmi_handle = NULL;
	tprof_cookie = NULL;
}
int
kobj_machdep(kobj_t ko, void *base, size_t size, bool load)
{
	uint64_t where;

	if (load) {
		if (cold) {
			wbinvd();
		} else {
			where = xc_broadcast(0, (xcfunc_t)wbinvd, NULL, NULL);
			xc_wait(where);
		}
	}

	return 0;
}
Beispiel #7
0
/*
 * pserialize_perform:
 *
 *	Perform the write side of passive serialization.  The calling
 *	thread holds an exclusive lock on the data object(s) being updated.
 *	We wait until every processor in the system has made at least two
 *	passes through cpu_swichto().  The wait is made with the caller's
 *	update lock held, but is short term.
 */
void
pserialize_perform(pserialize_t psz)
{
	uint64_t xc;

	KASSERT(!cpu_intr_p());
	KASSERT(!cpu_softintr_p());

	if (__predict_false(panicstr != NULL)) {
		return;
	}
	KASSERT(psz->psz_owner == NULL);
	KASSERT(ncpu > 0);

	/*
	 * Set up the object and put it onto the queue.  The lock
	 * activity here provides the necessary memory barrier to
	 * make the caller's data update completely visible to
	 * other processors.
	 */
	psz->psz_owner = curlwp;
	kcpuset_copy(psz->psz_target, kcpuset_running);
	kcpuset_zero(psz->psz_pass);

	mutex_spin_enter(&psz_lock);
	TAILQ_INSERT_TAIL(&psz_queue0, psz, psz_chain);
	psz_work_todo++;

	do {
		mutex_spin_exit(&psz_lock);

		/*
		 * Force some context switch activity on every CPU, as
		 * the system may not be busy.  Pause to not flood.
		 */
		xc = xc_broadcast(XC_HIGHPRI, (xcfunc_t)nullop, NULL, NULL);
		xc_wait(xc);
		kpause("psrlz", false, 1, NULL);

		mutex_spin_enter(&psz_lock);
	} while (!kcpuset_iszero(psz->psz_target));

	psz_ev_excl.ev_count++;
	mutex_spin_exit(&psz_lock);

	psz->psz_owner = NULL;
}
Beispiel #8
0
int
acpicpu_md_pstate_start(struct acpicpu_softc *sc)
{
	uint64_t xc, val;

	switch (cpu_vendor) {

	case CPUVENDOR_IDT:
	case CPUVENDOR_INTEL:

		/*
		 * Make sure EST is enabled.
		 */
		if ((sc->sc_flags & ACPICPU_FLAG_P_FFH) != 0) {

			val = rdmsr(MSR_MISC_ENABLE);

			if ((val & MSR_MISC_ENABLE_EST) == 0) {

				val |= MSR_MISC_ENABLE_EST;
				wrmsr(MSR_MISC_ENABLE, val);
				val = rdmsr(MSR_MISC_ENABLE);

				if ((val & MSR_MISC_ENABLE_EST) == 0)
					return ENOTTY;
			}
		}
	}

	/*
	 * Reset the APERF and MPERF counters.
	 */
	if ((sc->sc_flags & ACPICPU_FLAG_P_HWF) != 0) {
		xc = xc_broadcast(0, acpicpu_md_pstate_hwf_reset, NULL, NULL);
		xc_wait(xc);
	}

	return acpicpu_md_pstate_sysctl_init();
}
static int
tprof_amdpmi_start(tprof_backend_cookie_t *cookie)
{
	struct cpu_info * const ci = curcpu();
	uint64_t xc;

	if (!(cpu_vendor == CPUVENDOR_AMD) ||
	    CPUID_TO_FAMILY(ci->ci_signature) != 0xf) { /* XXX */
		return ENOTSUP;
	}

	KASSERT(tprof_amdpmi_nmi_handle == NULL);
	tprof_amdpmi_nmi_handle = nmi_establish(tprof_amdpmi_nmi, NULL);

	counter_reset_val = - counter_val + 1;
	xc = xc_broadcast(0, tprof_amdpmi_start_cpu, NULL, NULL);
	xc_wait(xc);

	KASSERT(tprof_cookie == NULL);
	tprof_cookie = cookie;

	return 0;
}