void npe_enable_htmsi_children(dev_info_t *dip) { dev_info_t *cdip = ddi_get_child(dip); ddi_acc_handle_t cfg_hdl; if (!npe_enable_htmsi_flag) return; /* * Hypertransport MSI remapping only applies to AMD CPUs using * Hypertransport (K8 and above) and not other platforms with non-AMD * CPUs that may be using Hypertransport internally in the chipset(s) */ if (!(cpuid_getvendor(CPU) == X86_VENDOR_AMD && cpuid_getfamily(CPU) >= 0xf)) return; for (; cdip != NULL; cdip = ddi_get_next_sibling(cdip)) { if (pci_config_setup(cdip, &cfg_hdl) != DDI_SUCCESS) { cmn_err(CE_NOTE, "!npe_enable_htmsi_children: " "pci_config_setup failed for %s", ddi_node_name(cdip)); return; } (void) npe_enable_htmsi(cfg_hdl); pci_config_teardown(&cfg_hdl); } }
void kcpc_hw_init(cpu_t *cp) { kthread_t *t = cp->cpu_idle_thread; uint32_t versionid; struct cpuid_regs cpuid; strands_perfmon_shared = 0; if (x86_feature & X86_HTT) { if (cpuid_getvendor(cpu[0]) == X86_VENDOR_Intel) { /* * Intel processors that support Architectural * Performance Monitoring Version 3 have per strand * performance monitoring hardware. * Hence we can allow use of performance counters on * multiple strands on the same core simultaneously. */ cpuid.cp_eax = 0x0; (void) __cpuid_insn(&cpuid); if (cpuid.cp_eax < 0xa) { strands_perfmon_shared = 1; } else { cpuid.cp_eax = 0xa; (void) __cpuid_insn(&cpuid); versionid = cpuid.cp_eax & 0xFF; if (versionid < 3) { strands_perfmon_shared = 1; } } } else { strands_perfmon_shared = 1; } } if (strands_perfmon_shared) { mutex_enter(&cpu_setup_lock); if (setup_registered == 0) { mutex_enter(&cpu_lock); register_cpu_setup_func(kcpc_cpu_setup, NULL); mutex_exit(&cpu_lock); setup_registered = 1; } mutex_exit(&cpu_setup_lock); } mutex_init(&cp->cpu_cpc_ctxlock, "cpu_cpc_ctxlock", MUTEX_DEFAULT, 0); if (kcpc_counts_include_idle) return; installctx(t, cp, kcpc_idle_save, kcpc_idle_restore, NULL, NULL, NULL, NULL); }
/* * Return 1 if executing on Intel, otherwise 0 (e.g., AMD64). * Cache the result, as the CPU can't change. * * Note: the userland version uses getisax() and checks for an AMD-64-only * feature. The kernel version uses cpuid_getvendor(). */ int arcfour_crypt_on_intel(void) { static int cached_result = -1; if (cached_result == -1) { /* first time */ #ifdef _KERNEL cached_result = (cpuid_getvendor(CPU) == X86_VENDOR_Intel); #else uint_t ui; (void) getisax(&ui, 1); cached_result = ((ui & AV_386_AMD_MMX) == 0); #endif /* _KERNEL */ } return (cached_result); }
static int opt_pcbe_init(void) { amd_event_t *evp; amd_generic_event_t *gevp; amd_family = cpuid_getfamily(CPU); /* * Make sure this really _is_ an Opteron or Athlon 64 system. The kernel * loads this module based on its name in the module directory, but it * could have been renamed. */ if (cpuid_getvendor(CPU) != X86_VENDOR_AMD || amd_family < 0xf) return (-1); if (amd_family == 0xf) /* Some tools expect this string for family 0fh */ snprintf(amd_pcbe_impl_name, sizeof (amd_pcbe_impl_name), "AMD Opteron & Athlon64"); else snprintf(amd_pcbe_impl_name, sizeof (amd_pcbe_impl_name), "AMD Family %02xh%s", amd_family, AMD_PCBE_SUPPORTED(amd_family) ? "" :" (unsupported)"); /* * Figure out processor revision here and assign appropriate * event configuration. */ if (amd_family == 0xf) { uint32_t rev; rev = cpuid_getchiprev(CPU); if (X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_F_REV_F)) amd_pcbe_cpuref = amd_fam_f_NPT_bkdg; else amd_pcbe_cpuref = amd_fam_f_rev_ae_bkdg; amd_events = family_f_events; amd_generic_events = opt_generic_events; } else if (amd_family == 0x10) { amd_pcbe_cpuref = amd_fam_10h_bkdg; amd_events = family_10h_events; amd_generic_events = family_10h_generic_events; } else if (amd_family == 0x11) { amd_pcbe_cpuref = amd_fam_11h_bkdg; amd_events = family_11h_events; amd_generic_events = opt_generic_events; } else { amd_pcbe_cpuref = amd_generic_bkdg; snprintf(amd_pcbe_cpuref, AMD_CPUREF_SIZE, "See BIOS and Kernel Developer's Guide " \ "(BKDG) For AMD Family %02xh Processors. " \ "(Note that this pcbe does not explicitly " \ "support this family)", amd_family); /* * For families that are not explicitly supported we'll use * events for family 0xf. Even if they are not quite right, * it's OK --- we state that pcbe is unsupported. */ amd_events = family_f_events; amd_generic_events = opt_generic_events; } /* * Construct event list. * * First pass: Calculate size needed. We'll need an additional byte * for the NULL pointer during the last strcat. * * Second pass: Copy strings. */ for (evp = amd_events; evp->name != NULL; evp++) evlist_sz += strlen(evp->name) + 1; for (gevp = amd_generic_events; gevp->name != NULL; gevp++) evlist_sz += strlen(gevp->name) + 1; evlist = kmem_alloc(evlist_sz + 1, KM_SLEEP); evlist[0] = '\0'; for (evp = amd_events; evp->name != NULL; evp++) { (void) strcat(evlist, evp->name); (void) strcat(evlist, ","); } for (gevp = amd_generic_events; gevp->name != NULL; gevp++) { (void) strcat(evlist, gevp->name); (void) strcat(evlist, ","); } /* * Remove trailing comma. */ evlist[evlist_sz - 1] = '\0'; return (0); }