Esempio n. 1
0
static int
tegra_cpufreq_freq_helper(SYSCTLFN_ARGS)
{
	struct sysctlnode node;
	int fq, oldfq = 0, error;
	uint64_t xc;

	node = *rnode;
	node.sysctl_data = &fq;

	fq = cpufreq_get_rate();
	if (rnode->sysctl_num == cpufreq_node_target)
		oldfq = fq;

	error = sysctl_lookup(SYSCTLFN_CALL(&node));
	if (error || newp == NULL)
		return error;

	if (fq == oldfq || rnode->sysctl_num != cpufreq_node_target)
		return 0;

	if (atomic_cas_uint(&cpufreq_busy, 0, 1) != 0)
		return EBUSY;

	error = cpufreq_set_rate(fq);
	if (error == 0) {
		xc = xc_broadcast(0, tegra_cpufreq_post, NULL, NULL);
		xc_wait(xc);
		pmf_event_inject(NULL, PMFE_SPEED_CHANGED);
	}

	atomic_dec_uint(&cpufreq_busy);

	return error;
}
/*
 * Force all CPUs through cpu_switchto(), waiting until complete.
 * Context switching will drain the write buffer on the calling
 * CPU.
 */
static void
ras_sync(void)
{

	/* No need to sync if exiting or single threaded. */
	if (curproc->p_nlwps > 1 && ncpu > 1) {
#ifdef NO_SOFTWARE_PATENTS
		uint64_t where;
		where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
		xc_wait(where);
#else
		/*
		 * Assumptions:
		 *
		 * o preemption is disabled by the thread in
		 *   ras_lookup().
		 * o proc::p_raslist is only inspected with
		 *   preemption disabled.
		 * o ras_lookup() plus loads reordered in advance
		 *   will take no longer than 1/8s to complete.
		 */
		const int delta = hz >> 3;
		int target = hardclock_ticks + delta;
		do {
			kpause("ras", false, delta, NULL);
		} while (hardclock_ticks < target);
#endif
	}
Esempio n. 3
0
static void
percpu_cpu_enlarge(size_t size)
{
    CPU_INFO_ITERATOR cii;
    struct cpu_info *ci;

    for (CPU_INFO_FOREACH(cii, ci)) {
        percpu_cpu_t pcc;

        pcc.pcc_data = kmem_alloc(size, KM_SLEEP); /* XXX cacheline */
        pcc.pcc_size = size;
        if (!mp_online) {
            percpu_cpu_swap(ci, &pcc);
        } else {
            uint64_t where;

            uvm_lwp_hold(curlwp); /* don't swap out pcc */
            where = xc_unicast(0, percpu_cpu_swap, ci, &pcc, ci);
            xc_wait(where);
            uvm_lwp_rele(curlwp);
        }
        KASSERT(pcc.pcc_size < size);
        if (pcc.pcc_data != NULL) {
            kmem_free(pcc.pcc_data, pcc.pcc_size);
        }
    }
}
Esempio n. 4
0
int
acpicpu_md_cstate_stop(void)
{
	static char text[16];
	void (*func)(void);
	uint64_t xc;
	bool ipi;

	x86_cpu_idle_get(&func, text, sizeof(text));

	if (func == native_idle)
		return EALREADY;

	ipi = (native_idle != x86_cpu_idle_halt) ? false : true;
	x86_cpu_idle_set(native_idle, native_idle_text, ipi);

	/*
	 * Run a cross-call to ensure that all CPUs are
	 * out from the ACPI idle-loop before detachment.
	 */
	xc = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
	xc_wait(xc);

	return 0;
}
static void
cpufreq_set_all_raw(uint32_t freq)
{
	struct cpufreq *cf = cf_backend;
	uint64_t xc;

	KASSERT(cf->cf_init != false);
	KASSERT(mutex_owned(&cpufreq_lock) != 0);

	xc = xc_broadcast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq);
	xc_wait(xc);
}
static void
tprof_amdpmi_stop(tprof_backend_cookie_t *cookie)
{
	uint64_t xc;

	xc = xc_broadcast(0, tprof_amdpmi_stop_cpu, NULL, NULL);
	xc_wait(xc);

	KASSERT(tprof_amdpmi_nmi_handle != NULL);
	KASSERT(tprof_cookie == cookie);
	nmi_disestablish(tprof_amdpmi_nmi_handle);
	tprof_amdpmi_nmi_handle = NULL;
	tprof_cookie = NULL;
}
/*
 * pcu_lwp_op: perform PCU state save, release or both operations on LWP.
 */
static void
pcu_lwp_op(const pcu_ops_t *pcu, lwp_t *l, const int flags)
{
	const u_int id = pcu->pcu_id;
	struct cpu_info *ci;
	uint64_t where;
	int s;

	/*
	 * Caller should have re-checked if there is any state to manage.
	 * Block the interrupts and inspect again, since cross-call sent
	 * by remote CPU could have changed the state.
	 */
	s = splsoftclock();
	ci = l->l_pcu_cpu[id];
	if (ci == curcpu()) {
		/*
		 * State is on the current CPU - just perform the operations.
		 */
		KASSERT((flags & PCU_CLAIM) == 0);
		KASSERTMSG(ci->ci_pcu_curlwp[id] == l,
		    "%s: cpu%u: pcu_curlwp[%u] (%p) != l (%p)",
		     __func__, cpu_index(ci), id, ci->ci_pcu_curlwp[id], l);
		pcu_do_op(pcu, l, flags);
		splx(s);
		return;
	}

	if (__predict_false(ci == NULL)) {
		if (flags & PCU_CLAIM) {
			pcu_do_op(pcu, l, flags);
		}
		/* Cross-call has won the race - no state to manage. */
		splx(s);
		return;
	}

	splx(s);

	/*
	 * State is on the remote CPU - perform the operations there.
	 * Note: there is a race condition; see description in the top.
	 */
	where = xc_unicast(XC_HIGHPRI, (xcfunc_t)pcu_cpu_op,
	    __UNCONST(pcu), (void *)(uintptr_t)flags, ci);
	xc_wait(where);

	KASSERT((flags & PCU_RELEASE) == 0 || l->l_pcu_cpu[id] == NULL);
}
static uint32_t
cpufreq_get_raw(struct cpu_info *ci)
{
	struct cpufreq *cf = cf_backend;
	uint32_t freq = 0;
	uint64_t xc;

	KASSERT(cf->cf_init != false);
	KASSERT(mutex_owned(&cpufreq_lock) != 0);

	xc = xc_unicast(0, (*cf->cf_get_freq), cf->cf_cookie, &freq, ci);
	xc_wait(xc);

	return freq;
}
/*
 * pcu_load: load/initialize the PCU state of current LWP on current CPU.
 */
void
pcu_load(const pcu_ops_t *pcu)
{
	const u_int id = pcu->pcu_id;
	struct cpu_info *ci, *curci;
	lwp_t * const l = curlwp;
	uint64_t where;
	int s;

	KASSERT(!cpu_intr_p() && !cpu_softintr_p());

	s = splsoftclock();
	curci = curcpu();
	ci = l->l_pcu_cpu[id];

	/* Does this CPU already have our PCU state loaded? */
	if (ci == curci) {
		KASSERT(curci->ci_pcu_curlwp[id] == l);
		pcu->pcu_state_load(l, PCU_ENABLE);	/* Re-enable */
		splx(s);
		return;
	}

	/* If PCU state of this LWP is on the remote CPU - save it there. */
	if (ci) {
		splx(s);
		/* Note: there is a race; see description in the top. */
		where = xc_unicast(XC_HIGHPRI, (xcfunc_t)pcu_cpu_op,
		    __UNCONST(pcu), (void *)(PCU_SAVE | PCU_RELEASE), ci);
		xc_wait(where);

		/* Enter IPL_SOFTCLOCK and re-fetch the current CPU. */
		s = splsoftclock();
		curci = curcpu();
	}
	KASSERT(l->l_pcu_cpu[id] == NULL);

	/* Save the PCU state on the current CPU, if there is any. */
	pcu_cpu_op(pcu, PCU_SAVE | PCU_RELEASE);
	KASSERT(curci->ci_pcu_curlwp[id] == NULL);

	/*
	 * Finally, load the state for this LWP on this CPU.  Indicate to
	 * load function whether PCU was used before.  Note the usage.
	 */
	pcu_do_op(pcu, l, PCU_CLAIM | PCU_ENABLE | PCU_RELOAD);
	splx(s);
}
int
kobj_machdep(kobj_t ko, void *base, size_t size, bool load)
{
	uint64_t where;

	if (load) {
		if (cold) {
			wbinvd();
		} else {
			where = xc_broadcast(0, (xcfunc_t)wbinvd, NULL, NULL);
			xc_wait(where);
		}
	}

	return 0;
}
Esempio n. 11
0
/*
 * pserialize_perform:
 *
 *	Perform the write side of passive serialization.  The calling
 *	thread holds an exclusive lock on the data object(s) being updated.
 *	We wait until every processor in the system has made at least two
 *	passes through cpu_swichto().  The wait is made with the caller's
 *	update lock held, but is short term.
 */
void
pserialize_perform(pserialize_t psz)
{
	uint64_t xc;

	KASSERT(!cpu_intr_p());
	KASSERT(!cpu_softintr_p());

	if (__predict_false(panicstr != NULL)) {
		return;
	}
	KASSERT(psz->psz_owner == NULL);
	KASSERT(ncpu > 0);

	/*
	 * Set up the object and put it onto the queue.  The lock
	 * activity here provides the necessary memory barrier to
	 * make the caller's data update completely visible to
	 * other processors.
	 */
	psz->psz_owner = curlwp;
	kcpuset_copy(psz->psz_target, kcpuset_running);
	kcpuset_zero(psz->psz_pass);

	mutex_spin_enter(&psz_lock);
	TAILQ_INSERT_TAIL(&psz_queue0, psz, psz_chain);
	psz_work_todo++;

	do {
		mutex_spin_exit(&psz_lock);

		/*
		 * Force some context switch activity on every CPU, as
		 * the system may not be busy.  Pause to not flood.
		 */
		xc = xc_broadcast(XC_HIGHPRI, (xcfunc_t)nullop, NULL, NULL);
		xc_wait(xc);
		kpause("psrlz", false, 1, NULL);

		mutex_spin_enter(&psz_lock);
	} while (!kcpuset_iszero(psz->psz_target));

	psz_ev_excl.ev_count++;
	mutex_spin_exit(&psz_lock);

	psz->psz_owner = NULL;
}
Esempio n. 12
0
int
acpicpu_md_pstate_start(struct acpicpu_softc *sc)
{
	uint64_t xc, val;

	switch (cpu_vendor) {

	case CPUVENDOR_IDT:
	case CPUVENDOR_INTEL:

		/*
		 * Make sure EST is enabled.
		 */
		if ((sc->sc_flags & ACPICPU_FLAG_P_FFH) != 0) {

			val = rdmsr(MSR_MISC_ENABLE);

			if ((val & MSR_MISC_ENABLE_EST) == 0) {

				val |= MSR_MISC_ENABLE_EST;
				wrmsr(MSR_MISC_ENABLE, val);
				val = rdmsr(MSR_MISC_ENABLE);

				if ((val & MSR_MISC_ENABLE_EST) == 0)
					return ENOTTY;
			}
		}
	}

	/*
	 * Reset the APERF and MPERF counters.
	 */
	if ((sc->sc_flags & ACPICPU_FLAG_P_HWF) != 0) {
		xc = xc_broadcast(0, acpicpu_md_pstate_hwf_reset, NULL, NULL);
		xc_wait(xc);
	}

	return acpicpu_md_pstate_sysctl_init();
}
Esempio n. 13
0
static int
tprof_amdpmi_start(tprof_backend_cookie_t *cookie)
{
	struct cpu_info * const ci = curcpu();
	uint64_t xc;

	if (!(cpu_vendor == CPUVENDOR_AMD) ||
	    CPUID_TO_FAMILY(ci->ci_signature) != 0xf) { /* XXX */
		return ENOTSUP;
	}

	KASSERT(tprof_amdpmi_nmi_handle == NULL);
	tprof_amdpmi_nmi_handle = nmi_establish(tprof_amdpmi_nmi, NULL);

	counter_reset_val = - counter_val + 1;
	xc = xc_broadcast(0, tprof_amdpmi_start_cpu, NULL, NULL);
	xc_wait(xc);

	KASSERT(tprof_cookie == NULL);
	tprof_cookie = cookie;

	return 0;
}