コード例 #1
0
void
kcpc_hw_init(cpu_t *cp)
{
	kthread_t *t = cp->cpu_idle_thread;
	uint32_t versionid;
	struct cpuid_regs cpuid;

	strands_perfmon_shared = 0;
	if (x86_feature & X86_HTT) {
		if (cpuid_getvendor(cpu[0]) == X86_VENDOR_Intel) {
			/*
			 * Intel processors that support Architectural
			 * Performance Monitoring Version 3 have per strand
			 * performance monitoring hardware.
			 * Hence we can allow use of performance counters on
			 * multiple strands on the same core simultaneously.
			 */
			cpuid.cp_eax = 0x0;
			(void) __cpuid_insn(&cpuid);
			if (cpuid.cp_eax < 0xa) {
				strands_perfmon_shared = 1;
			} else {
				cpuid.cp_eax = 0xa;
				(void) __cpuid_insn(&cpuid);

				versionid = cpuid.cp_eax & 0xFF;
				if (versionid < 3) {
					strands_perfmon_shared = 1;
				}
			}
		} else {
			strands_perfmon_shared = 1;
		}
	}

	if (strands_perfmon_shared) {
		mutex_enter(&cpu_setup_lock);
		if (setup_registered == 0) {
			mutex_enter(&cpu_lock);
			register_cpu_setup_func(kcpc_cpu_setup, NULL);
			mutex_exit(&cpu_lock);
			setup_registered = 1;
		}
		mutex_exit(&cpu_setup_lock);
	}

	mutex_init(&cp->cpu_cpc_ctxlock, "cpu_cpc_ctxlock", MUTEX_DEFAULT, 0);

	if (kcpc_counts_include_idle)
		return;

	installctx(t, cp, kcpc_idle_save, kcpc_idle_restore,
	    NULL, NULL, NULL, NULL);
}
コード例 #2
0
ファイル: pwrnow.c プロジェクト: apprisi/illumos-gate
boolean_t
pwrnow_supported()
{
	struct cpuid_regs cpu_regs;

	/* Required features */
	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
	    !is_x86_feature(x86_featureset, X86FSET_MSR)) {
		PWRNOW_DEBUG(("No CPUID or MSR support."));
		return (B_FALSE);
	}

	/*
	 * Get the Advanced Power Management Information.
	 */
	cpu_regs.cp_eax = 0x80000007;
	(void) __cpuid_insn(&cpu_regs);

	/*
	 * We currently only support CPU power management of
	 * processors that are P-state TSC invariant
	 */
	if (!(cpu_regs.cp_edx & AMD_CPUID_TSC_CONSTANT)) {
		PWRNOW_DEBUG(("No support for CPUs that are not P-state "
		    "TSC invariant.\n"));
		return (B_FALSE);
	}

	/*
	 * We only support the "Fire and Forget" style of PowerNow! (i.e.,
	 * single MSR write to change speed).
	 */
	if (!(cpu_regs.cp_edx & AMD_CPUID_PSTATE_HARDWARE)) {
		PWRNOW_DEBUG(("Hardware P-State control is not supported.\n"));
		return (B_FALSE);
	}
	return (B_TRUE);
}
コード例 #3
0
ファイル: pwrnow.c プロジェクト: apprisi/illumos-gate
static boolean_t
pwrnow_cpb_supported(void)
{
	struct cpuid_regs cpu_regs;

	/* Required features */
	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
	    !is_x86_feature(x86_featureset, X86FSET_MSR)) {
		PWRNOW_DEBUG(("No CPUID or MSR support."));
		return (B_FALSE);
	}

	/*
	 * Get the Advanced Power Management Information.
	 */
	cpu_regs.cp_eax = 0x80000007;
	(void) __cpuid_insn(&cpu_regs);

	if (!(cpu_regs.cp_edx & AMD_CPUID_CPB))
		return (B_FALSE);

	return (B_TRUE);
}
コード例 #4
0
void
xen_hvm_init(void)
{
	struct cpuid_regs cp;
	uint32_t xen_signature[4], base;
	char *xen_str;
	struct xen_add_to_physmap xatp;
	xen_capabilities_info_t caps;
	pfn_t pfn;
	uint64_t msrval, val;
	extern int apix_enable;

	if (xen_hvm_inited != 0)
		return;

	xen_hvm_inited = 1;

	/*
	 * Xen's pseudo-cpuid function returns a string representing
	 * the Xen signature in %ebx, %ecx, and %edx.
	 * Loop over the base values, since it may be different if
	 * the hypervisor has hyper-v emulation switched on.
	 *
	 * %eax contains the maximum supported cpuid function.
	 */
	for (base = 0x40000000; base < 0x40010000; base += 0x100) {
		cp.cp_eax = base;
		(void) __cpuid_insn(&cp);
		xen_signature[0] = cp.cp_ebx;
		xen_signature[1] = cp.cp_ecx;
		xen_signature[2] = cp.cp_edx;
		xen_signature[3] = 0;
		xen_str = (char *)xen_signature;
		if (strcmp("XenVMMXenVMM", xen_str)  == 0 &&
		    cp.cp_eax >= (base + 2))
			break;
	}
	if (base >= 0x40010000)
		return;

	/*
	 * cpuid function at base + 1 returns the Xen version in %eax.  The
	 * top 16 bits are the major version, the bottom 16 are the minor
	 * version.
	 */
	cp.cp_eax = base + 1;
	(void) __cpuid_insn(&cp);
	xen_major = cp.cp_eax >> 16;
	xen_minor = cp.cp_eax & 0xffff;

	/*
	 * Below version 3.1 we can't do anything special as a HVM domain;
	 * the PV drivers don't work, many hypercalls are not available,
	 * etc.
	 */
	if (xen_major < 3 || (xen_major == 3 && xen_minor < 1))
		return;

	/*
	 * cpuid function at base + 2 returns information about the
	 * hypercall page.  %eax nominally contains the number of pages
	 * with hypercall code, but according to the Xen guys, "I'll
	 * guarantee that remains one forever more, so you can just
	 * allocate a single page and get quite upset if you ever see CPUID
	 * return more than one page."  %ebx contains an MSR we use to ask
	 * Xen to remap each page at a specific pfn.
	 */
	cp.cp_eax = base + 2;
	(void) __cpuid_insn(&cp);

	/*
	 * Let Xen know where we want the hypercall page mapped.  We
	 * already have a page allocated in the .text section to simplify
	 * the wrapper code.
	 */
	pfn = va_to_pfn(&hypercall_page);
	msrval = mmu_ptob(pfn);
	wrmsr(cp.cp_ebx, msrval);

	/* Fill in the xen_info data */
	xen_info = &__xen_info;
	(void) sprintf(xen_info->magic, "xen-%d.%d", xen_major, xen_minor);

	if (hvm_get_param(HVM_PARAM_STORE_PFN, &val) < 0)
		return;
	/*
	 * The first hypercall worked, so mark hypercalls as working.
	 */
	xen_hvm_features |= XEN_HVM_HYPERCALLS;

	xen_info->store_mfn = (mfn_t)val;
	if (hvm_get_param(HVM_PARAM_STORE_EVTCHN, &val) < 0)
		return;
	xen_info->store_evtchn = (mfn_t)val;

	/* Figure out whether the hypervisor is 32-bit or 64-bit.  */
	if ((HYPERVISOR_xen_version(XENVER_capabilities, &caps) == 0)) {
		((char *)(caps))[sizeof (caps) - 1] = '\0';
		if (strstr(caps, "x86_64") != NULL)
			xen_bits = 64;
		else if (strstr(caps, "x86_32") != NULL)
			xen_bits = 32;
	}

	if (xen_bits < 0)
		return;
#ifdef __amd64
	ASSERT(xen_bits == 64);
#endif

	/*
	 * Allocate space for the shared_info page and tell Xen where it
	 * is.
	 */
	xen_shared_info_frame = va_to_pfn(&hypercall_shared_info_page);
	xatp.domid = DOMID_SELF;
	xatp.idx = 0;
	xatp.space = XENMAPSPACE_shared_info;
	xatp.gpfn = xen_shared_info_frame;
	if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp) != 0)
		return;

	HYPERVISOR_shared_info = (void *)&hypercall_shared_info_page;

	/*
	 * A working HVM tlb flush hypercall was introduced in Xen 3.3.
	 */
	if (xen_major > 3 || (xen_major == 3 && xen_minor >= 3))
		xen_hvm_features |= XEN_HVM_TLBFLUSH;

	/* FIXME Disable apix for the time being */
	apix_enable = 0;
}
コード例 #5
0
static void
synth_amd_info(uint_t family, uint_t model, uint_t step,
    uint32_t *skt_p, uint32_t *chiprev_p, const char **chiprevstr_p)
{
	const struct amd_rev_mapent *rmp;
	int found = 0;
	int i;

	if (family < 0xf)
		return;

	for (i = 0, rmp = amd_revmap; i < sizeof (amd_revmap) / sizeof (*rmp);
	    i++, rmp++) {
		if (family == rmp->rm_family &&
		    model >= rmp->rm_modello && model <= rmp->rm_modelhi &&
		    step >= rmp->rm_steplo && step <= rmp->rm_stephi) {
			found = 1;
			break;
		}
	}

	if (!found)
		return;

	if (chiprev_p != NULL)
		*chiprev_p = rmp->rm_chiprev;
	if (chiprevstr_p != NULL)
		*chiprevstr_p = rmp->rm_chiprevstr;

	if (skt_p != NULL) {
		int platform;

#ifdef __xpv
		/* PV guest */
		if (!is_controldom()) {
			*skt_p = X86_SOCKET_UNKNOWN;
			return;
		}
#endif
		platform = get_hwenv();

		if ((platform == HW_XEN_HVM) || (platform == HW_VMWARE)) {
			*skt_p = X86_SOCKET_UNKNOWN;
		} else if (family == 0xf) {
			*skt_p = amd_skts[rmp->rm_sktidx][model & 0x3];
		} else {
			/*
			 * Starting with family 10h, socket type is stored in
			 * CPUID Fn8000_0001_EBX
			 */
			struct cpuid_regs cp;
			int idx;

			cp.cp_eax = 0x80000001;
			(void) __cpuid_insn(&cp);

			/* PkgType bits */
			idx = BITX(cp.cp_ebx, 31, 28);

			if (idx > 7) {
				/* Reserved bits */
				*skt_p = X86_SOCKET_UNKNOWN;
			} else if (family == 0x10 &&
			    amd_skts[rmp->rm_sktidx][idx] ==
			    X86_SOCKET_AM) {
				/*
				 * Look at Ddr3Mode bit of DRAM Configuration
				 * High Register to decide whether this is
				 * AM2r2 (aka AM2+) or AM3.
				 */
				uint32_t val;

				val = pci_getl_func(0, 24, 2, 0x94);
				if (BITX(val, 8, 8))
					*skt_p = X86_SOCKET_AM3;
				else
					*skt_p = X86_SOCKET_AM2R2;
			} else {
				*skt_p = amd_skts[rmp->rm_sktidx][idx];
			}
		}
	}
}