示例#1
0
static void
acpicpu_attach(device_t parent, device_t self, void *aux)
{
	struct acpicpu_softc *sc = device_private(self);
	struct cpu_info *ci;
	ACPI_HANDLE hdl;
	cpuid_t id;
	int rv;

	ci = acpicpu_md_attach(parent, self, aux);

	if (ci == NULL)
		return;

	sc->sc_ci = ci;
	sc->sc_dev = self;
	sc->sc_cold = true;

	hdl = acpi_match_cpu_info(ci);

	if (hdl == NULL) {
		aprint_normal(": failed to match processor\n");
		return;
	}

	sc->sc_node = acpi_match_node(hdl);

	if (acpicpu_once_attach() != 0) {
		aprint_normal(": failed to initialize\n");
		return;
	}

	KASSERT(acpi_softc != NULL);
	KASSERT(acpicpu_sc != NULL);
	KASSERT(sc->sc_node != NULL);

	id = sc->sc_ci->ci_acpiid;

	if (acpicpu_sc[id] != NULL) {
		aprint_normal(": already attached\n");
		return;
	}

	aprint_naive("\n");
	aprint_normal(": ACPI CPU\n");

	rv = acpicpu_object(sc->sc_node->ad_handle, &sc->sc_object);

	if (ACPI_FAILURE(rv))
		aprint_verbose_dev(self, "failed to obtain CPU object\n");

	acpicpu_count++;
	acpicpu_sc[id] = sc;

	sc->sc_cap = acpicpu_cap(sc);
	sc->sc_ncpus = acpi_md_ncpus();
	sc->sc_flags = acpicpu_md_flags();

	KASSERT(acpicpu_count <= sc->sc_ncpus);
	KASSERT(sc->sc_node->ad_device == NULL);

	sc->sc_node->ad_device = self;
	mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE);

	acpicpu_cstate_attach(self);
	acpicpu_pstate_attach(self);
	acpicpu_tstate_attach(self);

	acpicpu_debug_print(self);
	acpicpu_evcnt_attach(self);

	(void)config_interrupts(self, acpicpu_start);
	(void)acpi_register_notify(sc->sc_node, acpicpu_notify);
	(void)pmf_device_register(self, acpicpu_suspend, acpicpu_resume);
}
示例#2
0
uint32_t
acpicpu_md_flags(void)
{
	struct cpu_info *ci = curcpu();
	struct pci_attach_args pa;
	uint32_t family, val = 0;
	uint32_t regs[4];
	uint64_t msr;

	if (acpi_md_ncpus() == 1)
		val |= ACPICPU_FLAG_C_BM;

	if ((ci->ci_feat_val[1] & CPUID2_MONITOR) != 0)
		val |= ACPICPU_FLAG_C_FFH;

	/*
	 * By default, assume that the local APIC timer
	 * as well as TSC are stalled during C3 sleep.
	 */
	val |= ACPICPU_FLAG_C_APIC | ACPICPU_FLAG_C_TSC;

	/*
	 * Detect whether TSC is invariant. If it is not, we keep the flag to
	 * note that TSC will not run at constant rate. Depending on the CPU,
	 * this may affect P- and T-state changes, but especially relevant
	 * are C-states; with variant TSC, states larger than C1 may
	 * completely stop the counter.
	 */
	if (tsc_is_invariant())
		val &= ~ACPICPU_FLAG_C_TSC;

	switch (cpu_vendor) {

	case CPUVENDOR_IDT:

		if ((ci->ci_feat_val[1] & CPUID2_EST) != 0)
			val |= ACPICPU_FLAG_P_FFH;

		if ((ci->ci_feat_val[0] & CPUID_ACPI) != 0)
			val |= ACPICPU_FLAG_T_FFH;

		break;

	case CPUVENDOR_INTEL:

		/*
		 * Bus master control and arbitration should be
		 * available on all supported Intel CPUs (to be
		 * sure, this is double-checked later from the
		 * firmware data). These flags imply that it is
		 * not necessary to flush caches before C3 state.
		 */
		val |= ACPICPU_FLAG_C_BM | ACPICPU_FLAG_C_ARB;

		/*
		 * Check if we can use "native", MSR-based,
		 * access. If not, we have to resort to I/O.
		 */
		if ((ci->ci_feat_val[1] & CPUID2_EST) != 0)
			val |= ACPICPU_FLAG_P_FFH;

		if ((ci->ci_feat_val[0] & CPUID_ACPI) != 0)
			val |= ACPICPU_FLAG_T_FFH;

		/*
		 * Check whether MSR_APERF, MSR_MPERF, and Turbo
		 * Boost are available. Also see if we might have
		 * an invariant local APIC timer ("ARAT").
		 */
		if (cpuid_level >= 0x06) {

			x86_cpuid(0x00000006, regs);

			if ((regs[2] & CPUID_DSPM_HWF) != 0)
				val |= ACPICPU_FLAG_P_HWF;

			if ((regs[0] & CPUID_DSPM_IDA) != 0)
				val |= ACPICPU_FLAG_P_TURBO;

			if ((regs[0] & CPUID_DSPM_ARAT) != 0)
				val &= ~ACPICPU_FLAG_C_APIC;
		}

		break;

	case CPUVENDOR_AMD:

		x86_cpuid(0x80000000, regs);

		if (regs[0] < 0x80000007)
			break;

		x86_cpuid(0x80000007, regs);

		family = CPUID_TO_FAMILY(ci->ci_signature);

    		switch (family) {

		case 0x0f:

			/*
			 * Disable C1E if present.
			 */
			if (rdmsr_safe(MSR_CMPHALT, &msr) != EFAULT)
				val |= ACPICPU_FLAG_C_C1E;

			/*
			 * Evaluate support for the "FID/VID
			 * algorithm" also used by powernow(4).
			 */
			if ((regs[3] & CPUID_APM_FID) == 0)
				break;

			if ((regs[3] & CPUID_APM_VID) == 0)
				break;

			val |= ACPICPU_FLAG_P_FFH | ACPICPU_FLAG_P_FIDVID;
			break;

		case 0x10:
		case 0x11:

			/*
			 * Disable C1E if present.
			 */
			if (rdmsr_safe(MSR_CMPHALT, &msr) != EFAULT)
				val |= ACPICPU_FLAG_C_C1E;

			/* FALLTHROUGH */

		case 0x12:
		case 0x14: /* AMD Fusion */
		case 0x15: /* AMD Bulldozer */

			/*
			 * Like with Intel, detect MSR-based P-states,
			 * and AMD's "turbo" (Core Performance Boost),
			 * respectively.
			 */
			if ((regs[3] & CPUID_APM_HWP) != 0)
				val |= ACPICPU_FLAG_P_FFH;

			if ((regs[3] & CPUID_APM_CPB) != 0)
				val |= ACPICPU_FLAG_P_TURBO;

			/*
			 * Also check for APERF and MPERF,
			 * first available in the family 10h.
			 */
			if (cpuid_level >= 0x06) {

				x86_cpuid(0x00000006, regs);

				if ((regs[2] & CPUID_DSPM_HWF) != 0)
					val |= ACPICPU_FLAG_P_HWF;
			}

			break;
		}

		break;
	}

	/*
	 * There are several erratums for PIIX4.
	 */
	if (pci_find_device(&pa, acpicpu_md_quirk_piix4) != 0)
		val |= ACPICPU_FLAG_PIIX4;

	return val;
}