static void init_amd(void) { /* * Work around Erratum 721 for Family 10h and 12h processors. * These processors may incorrectly update the stack pointer * after a long series of push and/or near-call instructions, * or a long series of pop and/or near-return instructions. * * http://support.amd.com/us/Processor_TechDocs/41322_10h_Rev_Gd.pdf * http://support.amd.com/us/Processor_TechDocs/44739_12h_Rev_Gd.pdf * * Hypervisors do not provide access to the errata MSR, * causing #GP exception on attempt to apply the errata. The * MSR write shall be done on host and persist globally * anyway, so do not try to do it when under virtualization. */ switch (CPUID_TO_FAMILY(cpu_id)) { case 0x10: case 0x12: if ((cpu_feature2 & CPUID2_HV) == 0) wrmsr(0xc0011029, rdmsr(0xc0011029) | 1); break; } }
static void qpi_identify(driver_t *driver, device_t parent) { /* Check CPUID to ensure this is an i7 CPU of some sort. */ if (!(cpu_vendor_id == CPU_VENDOR_INTEL && CPUID_TO_FAMILY(cpu_id) == 0x6 && (CPUID_TO_MODEL(cpu_id) == 0x1a || CPUID_TO_MODEL(cpu_id) == 0x2c))) return; /* PCI config register access is required. */ if (pci_cfgregopen() == 0) return; /* Add a qpi bus device. */ if (BUS_ADD_CHILD(parent, 20, "qpi", -1) == NULL) panic("Failed to add qpi bus"); }
static int hwpstate_get_info_from_msr(device_t dev) { struct hwpstate_softc *sc; struct hwpstate_setting *hwpstate_set; uint64_t msr; int family, i, fid, did; family = CPUID_TO_FAMILY(cpu_id); sc = device_get_softc(dev); /* Get pstate count */ msr = rdmsr(MSR_AMD_10H_11H_LIMIT); sc->cfnum = 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr); hwpstate_set = sc->hwpstate_settings; for (i = 0; i < sc->cfnum; i++) { msr = rdmsr(MSR_AMD_10H_11H_CONFIG + i); if ((msr & ((uint64_t)1 << 63)) != ((uint64_t)1 << 63)) { HWPSTATE_DEBUG(dev, "msr is not valid.\n"); return (ENXIO); } did = AMD_10H_11H_CUR_DID(msr); fid = AMD_10H_11H_CUR_FID(msr); switch(family) { case 0x11: /* fid/did to frequency */ hwpstate_set[i].freq = 100 * (fid + 0x08) / (1 << did); break; case 0x10: /* fid/did to frequency */ hwpstate_set[i].freq = 100 * (fid + 0x10) / (1 << did); break; default: HWPSTATE_DEBUG(dev, "get_info_from_msr: AMD family %d CPU's are not implemented yet. sorry.\n", family); return (ENXIO); break; } hwpstate_set[i].pstate_id = i; /* There was volts calculation, but deleted it. */ hwpstate_set[i].volts = CPUFREQ_VAL_UNKNOWN; hwpstate_set[i].power = CPUFREQ_VAL_UNKNOWN; hwpstate_set[i].lat = CPUFREQ_VAL_UNKNOWN; } return (0); }
static int km_probe(struct device *dev) { char *desc; if (pci_get_vendor(dev) != PCI_VENDOR_AMD) return ENXIO; switch (pci_get_device(dev)) { case PCI_PRODUCT_AMD_AMD64_F10_MISC: desc = "AMD Family 10h temperature sensor"; break; case PCI_PRODUCT_AMD_AMD64_F11_MISC: desc = "AMD Family 11h temperature sensor"; break; case PCI_PRODUCT_AMD_AMD64_F14_MISC: if (CPUID_TO_FAMILY(cpu_id) == 0x12) desc = "AMD Family 12h temperature sensor"; else desc = "AMD Family 14h temperature sensor"; break; case PCI_PRODUCT_AMD_AMD64_F15_0x_MISC: desc = "AMD Family 15/0xh temperature sensor"; break; case PCI_PRODUCT_AMD_AMD64_F15_1x_MISC: desc = "AMD Family 15/1xh temperature sensor"; break; case PCI_PRODUCT_AMD_AMD64_F15_3x_MISC: desc = "AMD Family 15/3xh temperature sensor"; break; case PCI_PRODUCT_AMD_AMD64_F16_MISC: desc = "AMD Family 16h temperature sensor"; break; default: return ENXIO; } if (device_get_desc(dev) == NULL) device_set_desc(dev, desc); return 0; }
void msi_init(void) { /* Check if we have a supported CPU. */ switch (cpu_vendor_id) { case CPU_VENDOR_INTEL: case CPU_VENDOR_AMD: break; case CPU_VENDOR_CENTAUR: if (CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) >= 0xf) break; /* FALLTHROUGH */ default: return; } msi_enabled = 1; intr_register_pic(&msi_pic); mtx_init(&msi_lock, "msi", NULL, MTX_DEF); }
static int tprof_amdpmi_start(tprof_backend_cookie_t *cookie) { struct cpu_info * const ci = curcpu(); uint64_t xc; if (!(cpu_vendor == CPUVENDOR_AMD) || CPUID_TO_FAMILY(ci->ci_signature) != 0xf) { /* XXX */ return ENOTSUP; } KASSERT(tprof_amdpmi_nmi_handle == NULL); tprof_amdpmi_nmi_handle = nmi_establish(tprof_amdpmi_nmi, NULL); counter_reset_val = - counter_val + 1; xc = xc_broadcast(0, tprof_amdpmi_start_cpu, NULL, NULL); xc_wait(xc); KASSERT(tprof_cookie == NULL); tprof_cookie = cookie; return 0; }
static void hwpstate_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "hwpstate", -1) != NULL) return; if (cpu_vendor_id != CPU_VENDOR_AMD || CPUID_TO_FAMILY(cpu_id) < 0x10) return; /* * Check if hardware pstate enable bit is set. */ if ((amd_pminfo & AMDPM_HW_PSTATE) == 0) { HWPSTATE_DEBUG(parent, "hwpstate enable bit is not set.\n"); return; } if (resource_disabled("hwpstate", 0)) return; if (BUS_ADD_CHILD(parent, 10, "hwpstate", -1) == NULL) device_printf(parent, "hwpstate: add child failed\n"); }
void printcpuinfo(void) { u_int regs[4], i; char *brand; cpu_class = i386_cpus[cpu].cpu_class; printf("CPU: "); strncpy(cpu_model, i386_cpus[cpu].cpu_name, sizeof (cpu_model)); /* Check for extended CPUID information and a processor name. */ init_exthigh(); if (cpu_exthigh >= 0x80000004) { brand = cpu_brand; for (i = 0x80000002; i < 0x80000005; i++) { do_cpuid(i, regs); memcpy(brand, regs, sizeof(regs)); brand += sizeof(regs); } } if (cpu_vendor_id == CPU_VENDOR_INTEL) { if ((cpu_id & 0xf00) > 0x300) { u_int brand_index; cpu_model[0] = '\0'; switch (cpu_id & 0x3000) { case 0x1000: strcpy(cpu_model, "Overdrive "); break; case 0x2000: strcpy(cpu_model, "Dual "); break; } switch (cpu_id & 0xf00) { case 0x400: strcat(cpu_model, "i486 "); /* Check the particular flavor of 486 */ switch (cpu_id & 0xf0) { case 0x00: case 0x10: strcat(cpu_model, "DX"); break; case 0x20: strcat(cpu_model, "SX"); break; case 0x30: strcat(cpu_model, "DX2"); break; case 0x40: strcat(cpu_model, "SL"); break; case 0x50: strcat(cpu_model, "SX2"); break; case 0x70: strcat(cpu_model, "DX2 Write-Back Enhanced"); break; case 0x80: strcat(cpu_model, "DX4"); break; } break; case 0x500: /* Check the particular flavor of 586 */ strcat(cpu_model, "Pentium"); switch (cpu_id & 0xf0) { case 0x00: strcat(cpu_model, " A-step"); break; case 0x10: strcat(cpu_model, "/P5"); break; case 0x20: strcat(cpu_model, "/P54C"); break; case 0x30: strcat(cpu_model, "/P24T"); break; case 0x40: strcat(cpu_model, "/P55C"); break; case 0x70: strcat(cpu_model, "/P54C"); break; case 0x80: strcat(cpu_model, "/P55C (quarter-micron)"); break; default: /* nothing */ break; } #if defined(I586_CPU) && !defined(NO_F00F_HACK) /* * XXX - If/when Intel fixes the bug, this * should also check the version of the * CPU, not just that it's a Pentium. */ has_f00f_bug = 1; #endif break; case 0x600: /* Check the particular flavor of 686 */ switch (cpu_id & 0xf0) { case 0x00: strcat(cpu_model, "Pentium Pro A-step"); break; case 0x10: strcat(cpu_model, "Pentium Pro"); break; case 0x30: case 0x50: case 0x60: strcat(cpu_model, "Pentium II/Pentium II Xeon/Celeron"); cpu = CPU_PII; break; case 0x70: case 0x80: case 0xa0: case 0xb0: strcat(cpu_model, "Pentium III/Pentium III Xeon/Celeron"); cpu = CPU_PIII; break; default: strcat(cpu_model, "Unknown 80686"); break; } break; case 0xf00: strcat(cpu_model, "Pentium 4"); cpu = CPU_P4; break; default: strcat(cpu_model, "unknown"); break; } /* * If we didn't get a brand name from the extended * CPUID, try to look it up in the brand table. */ if (cpu_high > 0 && *cpu_brand == '\0') { brand_index = cpu_procinfo & CPUID_BRAND_INDEX; if (brand_index <= MAX_BRAND_INDEX && cpu_brandtable[brand_index] != NULL) strcpy(cpu_brand, cpu_brandtable[brand_index]); } } } else if (cpu_vendor_id == CPU_VENDOR_AMD) { /* * Values taken from AMD Processor Recognition * http://www.amd.com/K6/k6docs/pdf/20734g.pdf * (also describes ``Features'' encodings. */ strcpy(cpu_model, "AMD "); switch (cpu_id & 0xFF0) { case 0x410: strcat(cpu_model, "Standard Am486DX"); break; case 0x430: strcat(cpu_model, "Enhanced Am486DX2 Write-Through"); break; case 0x470: strcat(cpu_model, "Enhanced Am486DX2 Write-Back"); break; case 0x480: strcat(cpu_model, "Enhanced Am486DX4/Am5x86 Write-Through"); break; case 0x490: strcat(cpu_model, "Enhanced Am486DX4/Am5x86 Write-Back"); break; case 0x4E0: strcat(cpu_model, "Am5x86 Write-Through"); break; case 0x4F0: strcat(cpu_model, "Am5x86 Write-Back"); break; case 0x500: strcat(cpu_model, "K5 model 0"); tsc_freq = 0; break; case 0x510: strcat(cpu_model, "K5 model 1"); break; case 0x520: strcat(cpu_model, "K5 PR166 (model 2)"); break; case 0x530: strcat(cpu_model, "K5 PR200 (model 3)"); break; case 0x560: strcat(cpu_model, "K6"); break; case 0x570: strcat(cpu_model, "K6 266 (model 1)"); break; case 0x580: strcat(cpu_model, "K6-2"); break; case 0x590: strcat(cpu_model, "K6-III"); break; case 0x5a0: strcat(cpu_model, "Geode LX"); /* * Make sure the TSC runs through suspension, * otherwise we can't use it as timecounter */ wrmsr(0x1900, rdmsr(0x1900) | 0x20ULL); break; default: strcat(cpu_model, "Unknown"); break; } #if defined(I586_CPU) && defined(CPU_WT_ALLOC) if ((cpu_id & 0xf00) == 0x500) { if (((cpu_id & 0x0f0) > 0) && ((cpu_id & 0x0f0) < 0x60) && ((cpu_id & 0x00f) > 3)) enable_K5_wt_alloc(); else if (((cpu_id & 0x0f0) > 0x80) || (((cpu_id & 0x0f0) == 0x80) && (cpu_id & 0x00f) > 0x07)) enable_K6_2_wt_alloc(); else if ((cpu_id & 0x0f0) > 0x50) enable_K6_wt_alloc(); } #endif } else if (cpu_vendor_id == CPU_VENDOR_CYRIX) { strcpy(cpu_model, "Cyrix "); switch (cpu_id & 0xff0) { case 0x440: strcat(cpu_model, "MediaGX"); break; case 0x520: strcat(cpu_model, "6x86"); break; case 0x540: cpu_class = CPUCLASS_586; strcat(cpu_model, "GXm"); break; case 0x600: strcat(cpu_model, "6x86MX"); break; default: /* * Even though CPU supports the cpuid * instruction, it can be disabled. * Therefore, this routine supports all Cyrix * CPUs. */ switch (cyrix_did & 0xf0) { case 0x00: switch (cyrix_did & 0x0f) { case 0x00: strcat(cpu_model, "486SLC"); break; case 0x01: strcat(cpu_model, "486DLC"); break; case 0x02: strcat(cpu_model, "486SLC2"); break; case 0x03: strcat(cpu_model, "486DLC2"); break; case 0x04: strcat(cpu_model, "486SRx"); break; case 0x05: strcat(cpu_model, "486DRx"); break; case 0x06: strcat(cpu_model, "486SRx2"); break; case 0x07: strcat(cpu_model, "486DRx2"); break; case 0x08: strcat(cpu_model, "486SRu"); break; case 0x09: strcat(cpu_model, "486DRu"); break; case 0x0a: strcat(cpu_model, "486SRu2"); break; case 0x0b: strcat(cpu_model, "486DRu2"); break; default: strcat(cpu_model, "Unknown"); break; } break; case 0x10: switch (cyrix_did & 0x0f) { case 0x00: strcat(cpu_model, "486S"); break; case 0x01: strcat(cpu_model, "486S2"); break; case 0x02: strcat(cpu_model, "486Se"); break; case 0x03: strcat(cpu_model, "486S2e"); break; case 0x0a: strcat(cpu_model, "486DX"); break; case 0x0b: strcat(cpu_model, "486DX2"); break; case 0x0f: strcat(cpu_model, "486DX4"); break; default: strcat(cpu_model, "Unknown"); break; } break; case 0x20: if ((cyrix_did & 0x0f) < 8) strcat(cpu_model, "6x86"); /* Where did you get it? */ else strcat(cpu_model, "5x86"); break; case 0x30: strcat(cpu_model, "6x86"); break; case 0x40: if ((cyrix_did & 0xf000) == 0x3000) { cpu_class = CPUCLASS_586; strcat(cpu_model, "GXm"); } else strcat(cpu_model, "MediaGX"); break; case 0x50: strcat(cpu_model, "6x86MX"); break; case 0xf0: switch (cyrix_did & 0x0f) { case 0x0d: strcat(cpu_model, "Overdrive CPU"); break; case 0x0e: strcpy(cpu_model, "Texas Instruments 486SXL"); break; case 0x0f: strcat(cpu_model, "486SLC/DLC"); break; default: strcat(cpu_model, "Unknown"); break; } break; default: strcat(cpu_model, "Unknown"); break; } break; } } else if (cpu_vendor_id == CPU_VENDOR_RISE) { strcpy(cpu_model, "Rise "); switch (cpu_id & 0xff0) { case 0x500: /* 6401 and 6441 (Kirin) */ case 0x520: /* 6510 (Lynx) */ strcat(cpu_model, "mP6"); break; default: strcat(cpu_model, "Unknown"); } } else if (cpu_vendor_id == CPU_VENDOR_CENTAUR) { switch (cpu_id & 0xff0) { case 0x540: strcpy(cpu_model, "IDT WinChip C6"); /* * http://www.centtech.com/c6_data_sheet.pdf * * I-12 RDTSC may return incoherent values in EDX:EAX * I-13 RDTSC hangs when certain event counters are used */ tsc_freq = 0; break; case 0x580: strcpy(cpu_model, "IDT WinChip 2"); break; case 0x590: strcpy(cpu_model, "IDT WinChip 3"); break; case 0x660: strcpy(cpu_model, "VIA C3 Samuel"); break; case 0x670: if (cpu_id & 0x8) strcpy(cpu_model, "VIA C3 Ezra"); else strcpy(cpu_model, "VIA C3 Samuel 2"); break; case 0x680: strcpy(cpu_model, "VIA C3 Ezra-T"); break; case 0x690: strcpy(cpu_model, "VIA C3 Nehemiah"); break; case 0x6a0: case 0x6d0: strcpy(cpu_model, "VIA C7 Esther"); break; case 0x6f0: strcpy(cpu_model, "VIA Nano"); break; default: strcpy(cpu_model, "VIA/IDT Unknown"); } } else if (cpu_vendor_id == CPU_VENDOR_IBM) { strcpy(cpu_model, "Blue Lightning CPU"); } else if (cpu_vendor_id == CPU_VENDOR_NSC) { switch (cpu_id & 0xff0) { case 0x540: strcpy(cpu_model, "Geode SC1100"); cpu = CPU_GEODE1100; if ((cpu_id & CPUID_STEPPING) == 0) tsc_freq = 0; break; default: strcpy(cpu_model, "Geode/NSC unknown"); break; } } /* * Replace cpu_model with cpu_brand minus leading spaces if * we have one. */ brand = cpu_brand; while (*brand == ' ') ++brand; if (*brand != '\0') strcpy(cpu_model, brand); printf("%s (", cpu_model); switch(cpu_class) { case CPUCLASS_286: printf("286"); break; case CPUCLASS_386: printf("386"); break; #if defined(I486_CPU) case CPUCLASS_486: printf("486"); break; #endif #if defined(I586_CPU) case CPUCLASS_586: if (tsc_freq != 0) { hw_clockrate = (tsc_freq + 5000) / 1000000; printf("%jd.%02d-MHz ", (intmax_t)(tsc_freq + 4999) / 1000000, (u_int)((tsc_freq + 4999) / 10000) % 100); } printf("586"); break; #endif #if defined(I686_CPU) case CPUCLASS_686: if (tsc_freq != 0) { hw_clockrate = (tsc_freq + 5000) / 1000000; printf("%jd.%02d-MHz ", (intmax_t)(tsc_freq + 4999) / 1000000, (u_int)((tsc_freq + 4999) / 10000) % 100); } printf("686"); break; #endif default: printf("Unknown"); /* will panic below... */ } printf("-class CPU)\n"); if(*cpu_vendor) printf(" Origin = \"%s\"",cpu_vendor); if(cpu_id) printf(" Id = 0x%x", cpu_id); if (cpu_vendor_id == CPU_VENDOR_INTEL || cpu_vendor_id == CPU_VENDOR_AMD || cpu_vendor_id == CPU_VENDOR_TRANSMETA || cpu_vendor_id == CPU_VENDOR_RISE || cpu_vendor_id == CPU_VENDOR_CENTAUR || cpu_vendor_id == CPU_VENDOR_NSC || (cpu_vendor_id == CPU_VENDOR_CYRIX && ((cpu_id & 0xf00) > 0x500))) { printf(" Family = 0x%x", CPUID_TO_FAMILY(cpu_id)); printf(" Model = 0x%x", CPUID_TO_MODEL(cpu_id)); printf(" Stepping = %u", cpu_id & CPUID_STEPPING); if (cpu_vendor_id == CPU_VENDOR_CYRIX) printf("\n DIR=0x%04x", cyrix_did); /* * AMD CPUID Specification * http://support.amd.com/us/Embedded_TechDocs/25481.pdf * * Intel Processor Identification and CPUID Instruction * http://www.intel.com/assets/pdf/appnote/241618.pdf */ if (cpu_high > 0) { /* * Here we should probably set up flags indicating * whether or not various features are available. * The interesting ones are probably VME, PSE, PAE, * and PGE. The code already assumes without bothering * to check that all CPUs >= Pentium have a TSC and * MSRs. */ printf("\n Features=0x%b", cpu_feature, "\020" "\001FPU" /* Integral FPU */ "\002VME" /* Extended VM86 mode support */ "\003DE" /* Debugging Extensions (CR4.DE) */ "\004PSE" /* 4MByte page tables */ "\005TSC" /* Timestamp counter */ "\006MSR" /* Machine specific registers */ "\007PAE" /* Physical address extension */ "\010MCE" /* Machine Check support */ "\011CX8" /* CMPEXCH8 instruction */ "\012APIC" /* SMP local APIC */ "\013oldMTRR" /* Previous implementation of MTRR */ "\014SEP" /* Fast System Call */ "\015MTRR" /* Memory Type Range Registers */ "\016PGE" /* PG_G (global bit) support */ "\017MCA" /* Machine Check Architecture */ "\020CMOV" /* CMOV instruction */ "\021PAT" /* Page attributes table */ "\022PSE36" /* 36 bit address space support */ "\023PN" /* Processor Serial number */ "\024CLFLUSH" /* Has the CLFLUSH instruction */ "\025<b20>" "\026DTS" /* Debug Trace Store */ "\027ACPI" /* ACPI support */ "\030MMX" /* MMX instructions */ "\031FXSR" /* FXSAVE/FXRSTOR */ "\032SSE" /* Streaming SIMD Extensions */ "\033SSE2" /* Streaming SIMD Extensions #2 */ "\034SS" /* Self snoop */ "\035HTT" /* Hyperthreading (see EBX bit 16-23) */ "\036TM" /* Thermal Monitor clock slowdown */ "\037IA64" /* CPU can execute IA64 instructions */ "\040PBE" /* Pending Break Enable */ ); if (cpu_feature2 != 0) { printf("\n Features2=0x%b", cpu_feature2, "\020" "\001SSE3" /* SSE3 */ "\002PCLMULQDQ" /* Carry-Less Mul Quadword */ "\003DTES64" /* 64-bit Debug Trace */ "\004MON" /* MONITOR/MWAIT Instructions */ "\005DS_CPL" /* CPL Qualified Debug Store */ "\006VMX" /* Virtual Machine Extensions */ "\007SMX" /* Safer Mode Extensions */ "\010EST" /* Enhanced SpeedStep */ "\011TM2" /* Thermal Monitor 2 */ "\012SSSE3" /* SSSE3 */ "\013CNXT-ID" /* L1 context ID available */ "\014<b11>" "\015FMA" /* Fused Multiply Add */ "\016CX16" /* CMPXCHG16B Instruction */ "\017xTPR" /* Send Task Priority Messages*/ "\020PDCM" /* Perf/Debug Capability MSR */ "\021<b16>" "\022PCID" /* Process-context Identifiers*/ "\023DCA" /* Direct Cache Access */ "\024SSE4.1" /* SSE 4.1 */ "\025SSE4.2" /* SSE 4.2 */ "\026x2APIC" /* xAPIC Extensions */ "\027MOVBE" /* MOVBE Instruction */ "\030POPCNT" /* POPCNT Instruction */ "\031TSCDLT" /* TSC-Deadline Timer */ "\032AESNI" /* AES Crypto */ "\033XSAVE" /* XSAVE/XRSTOR States */ "\034OSXSAVE" /* OS-Enabled State Management*/ "\035AVX" /* Advanced Vector Extensions */ "\036F16C" /* Half-precision conversions */ "\037RDRAND" /* RDRAND Instruction */ "\040HV" /* Hypervisor */ ); } if (amd_feature != 0) { printf("\n AMD Features=0x%b", amd_feature, "\020" /* in hex */ "\001<s0>" /* Same */ "\002<s1>" /* Same */ "\003<s2>" /* Same */ "\004<s3>" /* Same */ "\005<s4>" /* Same */ "\006<s5>" /* Same */ "\007<s6>" /* Same */ "\010<s7>" /* Same */ "\011<s8>" /* Same */ "\012<s9>" /* Same */ "\013<b10>" /* Undefined */ "\014SYSCALL" /* Have SYSCALL/SYSRET */ "\015<s12>" /* Same */ "\016<s13>" /* Same */ "\017<s14>" /* Same */ "\020<s15>" /* Same */ "\021<s16>" /* Same */ "\022<s17>" /* Same */ "\023<b18>" /* Reserved, unknown */ "\024MP" /* Multiprocessor Capable */ "\025NX" /* Has EFER.NXE, NX */ "\026<b21>" /* Undefined */ "\027MMX+" /* AMD MMX Extensions */ "\030<s23>" /* Same */ "\031<s24>" /* Same */ "\032FFXSR" /* Fast FXSAVE/FXRSTOR */ "\033Page1GB" /* 1-GB large page support */ "\034RDTSCP" /* RDTSCP */ "\035<b28>" /* Undefined */ "\036LM" /* 64 bit long mode */ "\0373DNow!+" /* AMD 3DNow! Extensions */ "\0403DNow!" /* AMD 3DNow! */ ); } if (amd_feature2 != 0) { printf("\n AMD Features2=0x%b", amd_feature2, "\020" "\001LAHF" /* LAHF/SAHF in long mode */ "\002CMP" /* CMP legacy */ "\003SVM" /* Secure Virtual Mode */ "\004ExtAPIC" /* Extended APIC register */ "\005CR8" /* CR8 in legacy mode */ "\006ABM" /* LZCNT instruction */ "\007SSE4A" /* SSE4A */ "\010MAS" /* Misaligned SSE mode */ "\011Prefetch" /* 3DNow! Prefetch/PrefetchW */ "\012OSVW" /* OS visible workaround */ "\013IBS" /* Instruction based sampling */ "\014XOP" /* XOP extended instructions */ "\015SKINIT" /* SKINIT/STGI */ "\016WDT" /* Watchdog timer */ "\017<b14>" "\020LWP" /* Lightweight Profiling */ "\021FMA4" /* 4-operand FMA instructions */ "\022<b17>" "\023<b18>" "\024NodeId" /* NodeId MSR support */ "\025<b20>" "\026TBM" /* Trailing Bit Manipulation */ "\027Topology" /* Topology Extensions */ "\030<b23>" "\031<b24>" "\032<b25>" "\033<b26>" "\034<b27>" "\035<b28>" "\036<b29>" "\037<b30>" "\040<b31>" ); } if (via_feature_rng != 0 || via_feature_xcrypt != 0) print_via_padlock_info(); if ((cpu_feature & CPUID_HTT) && cpu_vendor_id == CPU_VENDOR_AMD) cpu_feature &= ~CPUID_HTT; /* * If this CPU supports P-state invariant TSC then * mention the capability. */ if (tsc_is_invariant) { printf("\n TSC: P-state invariant"); if (tsc_perf_stat) printf(", performance statistics"); } } } else if (cpu_vendor_id == CPU_VENDOR_CYRIX) { printf(" DIR=0x%04x", cyrix_did); printf(" Stepping=%u", (cyrix_did & 0xf000) >> 12); printf(" Revision=%u", (cyrix_did & 0x0f00) >> 8); #ifndef CYRIX_CACHE_REALLY_WORKS if (cpu == CPU_M1 && (cyrix_did & 0xff00) < 0x1700) printf("\n CPU cache: write-through mode"); #endif }
/* * Initialize the local APIC on the BSP. */ static int madt_setup_local(void) { ACPI_TABLE_DMAR *dmartbl; vm_paddr_t dmartbl_physaddr; const char *reason; char *hw_vendor; u_int p[4]; int user_x2apic; bool bios_x2apic; madt = pmap_mapbios(madt_physaddr, madt_length); if ((cpu_feature2 & CPUID2_X2APIC) != 0) { reason = NULL; /* * Automatically detect several configurations where * x2APIC mode is known to cause troubles. User can * override the setting with hw.x2apic_enable tunable. */ dmartbl_physaddr = acpi_find_table(ACPI_SIG_DMAR); if (dmartbl_physaddr != 0) { dmartbl = acpi_map_table(dmartbl_physaddr, ACPI_SIG_DMAR); if ((dmartbl->Flags & ACPI_DMAR_X2APIC_OPT_OUT) != 0) reason = "by DMAR table"; acpi_unmap_table(dmartbl); } if (vm_guest == VM_GUEST_VMWARE) { vmware_hvcall(VMW_HVCMD_GETVCPU_INFO, p); if ((p[0] & VMW_VCPUINFO_VCPU_RESERVED) != 0 || (p[0] & VMW_VCPUINFO_LEGACY_X2APIC) == 0) reason = "inside VMWare without intr redirection"; } else if (vm_guest == VM_GUEST_XEN) { reason = "due to running under XEN"; } else if (vm_guest == VM_GUEST_NO && CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) == 0x2a) { hw_vendor = kern_getenv("smbios.planar.maker"); /* * It seems that some Lenovo and ASUS * SandyBridge-based notebook BIOSes have a * bug which prevents booting AP in x2APIC * mode. Since the only way to detect mobile * CPU is to check northbridge pci id, which * cannot be done that early, disable x2APIC * for all Lenovo and ASUS SandyBridge * machines. */ if (hw_vendor != NULL) { if (!strcmp(hw_vendor, "LENOVO") || !strcmp(hw_vendor, "ASUSTeK Computer Inc.")) { reason = "for a suspected SandyBridge BIOS bug"; } freeenv(hw_vendor); } } bios_x2apic = lapic_is_x2apic(); if (reason != NULL && bios_x2apic) { if (bootverbose) printf("x2APIC should be disabled %s but " "already enabled by BIOS; enabling.\n", reason); reason = NULL; } if (reason == NULL) x2apic_mode = 1; else if (bootverbose) printf("x2APIC available but disabled %s\n", reason); user_x2apic = x2apic_mode; TUNABLE_INT_FETCH("hw.x2apic_enable", &user_x2apic); if (user_x2apic != x2apic_mode) { if (bios_x2apic && !user_x2apic) printf("x2APIC disabled by tunable and " "enabled by BIOS; ignoring tunable."); else x2apic_mode = user_x2apic; } } lapic_init(madt->Address); printf("ACPI APIC Table: <%.*s %.*s>\n", (int)sizeof(madt->Header.OemId), madt->Header.OemId, (int)sizeof(madt->Header.OemTableId), madt->Header.OemTableId); /* * We ignore 64-bit local APIC override entries. Should we * perhaps emit a warning here if we find one? */ return (0); }
void printcpuinfo(void) { u_int regs[4], i; char *brand; cpu_class = amd64_cpus[cpu].cpu_class; printf("CPU: "); strncpy(cpu_model, amd64_cpus[cpu].cpu_name, sizeof (cpu_model)); /* Check for extended CPUID information and a processor name. */ if (cpu_exthigh >= 0x80000004) { brand = cpu_brand; for (i = 0x80000002; i < 0x80000005; i++) { do_cpuid(i, regs); memcpy(brand, regs, sizeof(regs)); brand += sizeof(regs); } } switch (cpu_vendor_id) { case CPU_VENDOR_INTEL: /* Please make up your mind folks! */ strcat(cpu_model, "EM64T"); break; case CPU_VENDOR_AMD: /* * Values taken from AMD Processor Recognition * http://www.amd.com/K6/k6docs/pdf/20734g.pdf * (also describes ``Features'' encodings. */ strcpy(cpu_model, "AMD "); if ((cpu_id & 0xf00) == 0xf00) strcat(cpu_model, "AMD64 Processor"); else strcat(cpu_model, "Unknown"); break; case CPU_VENDOR_CENTAUR: strcpy(cpu_model, "VIA "); if ((cpu_id & 0xff0) == 0x6f0) strcat(cpu_model, "Nano Processor"); else strcat(cpu_model, "Unknown"); break; default: strcat(cpu_model, "Unknown"); break; } /* * Replace cpu_model with cpu_brand minus leading spaces if * we have one. */ brand = cpu_brand; while (*brand == ' ') ++brand; if (*brand != '\0') strcpy(cpu_model, brand); printf("%s (", cpu_model); switch(cpu_class) { case CPUCLASS_K8: if (tsc_freq != 0) { hw_clockrate = (tsc_freq + 5000) / 1000000; printf("%jd.%02d-MHz ", (intmax_t)(tsc_freq + 4999) / 1000000, (u_int)((tsc_freq + 4999) / 10000) % 100); } printf("K8"); break; default: printf("Unknown"); /* will panic below... */ } printf("-class CPU)\n"); if (*cpu_vendor) printf(" Origin = \"%s\"", cpu_vendor); if (cpu_id) printf(" Id = 0x%x", cpu_id); if (cpu_vendor_id == CPU_VENDOR_INTEL || cpu_vendor_id == CPU_VENDOR_AMD || cpu_vendor_id == CPU_VENDOR_CENTAUR) { printf(" Family = 0x%x", CPUID_TO_FAMILY(cpu_id)); printf(" Model = 0x%x", CPUID_TO_MODEL(cpu_id)); printf(" Stepping = %u", cpu_id & CPUID_STEPPING); /* * AMD CPUID Specification * http://support.amd.com/us/Embedded_TechDocs/25481.pdf * * Intel Processor Identification and CPUID Instruction * http://www.intel.com/assets/pdf/appnote/241618.pdf */ if (cpu_high > 0) { /* * Here we should probably set up flags indicating * whether or not various features are available. * The interesting ones are probably VME, PSE, PAE, * and PGE. The code already assumes without bothering * to check that all CPUs >= Pentium have a TSC and * MSRs. */ printf("\n Features=0x%b", cpu_feature, "\020" "\001FPU" /* Integral FPU */ "\002VME" /* Extended VM86 mode support */ "\003DE" /* Debugging Extensions (CR4.DE) */ "\004PSE" /* 4MByte page tables */ "\005TSC" /* Timestamp counter */ "\006MSR" /* Machine specific registers */ "\007PAE" /* Physical address extension */ "\010MCE" /* Machine Check support */ "\011CX8" /* CMPEXCH8 instruction */ "\012APIC" /* SMP local APIC */ "\013oldMTRR" /* Previous implementation of MTRR */ "\014SEP" /* Fast System Call */ "\015MTRR" /* Memory Type Range Registers */ "\016PGE" /* PG_G (global bit) support */ "\017MCA" /* Machine Check Architecture */ "\020CMOV" /* CMOV instruction */ "\021PAT" /* Page attributes table */ "\022PSE36" /* 36 bit address space support */ "\023PN" /* Processor Serial number */ "\024CLFLUSH" /* Has the CLFLUSH instruction */ "\025<b20>" "\026DTS" /* Debug Trace Store */ "\027ACPI" /* ACPI support */ "\030MMX" /* MMX instructions */ "\031FXSR" /* FXSAVE/FXRSTOR */ "\032SSE" /* Streaming SIMD Extensions */ "\033SSE2" /* Streaming SIMD Extensions #2 */ "\034SS" /* Self snoop */ "\035HTT" /* Hyperthreading (see EBX bit 16-23) */ "\036TM" /* Thermal Monitor clock slowdown */ "\037IA64" /* CPU can execute IA64 instructions */ "\040PBE" /* Pending Break Enable */ ); if (cpu_feature2 != 0) { printf("\n Features2=0x%b", cpu_feature2, "\020" "\001SSE3" /* SSE3 */ "\002PCLMULQDQ" /* Carry-Less Mul Quadword */ "\003DTES64" /* 64-bit Debug Trace */ "\004MON" /* MONITOR/MWAIT Instructions */ "\005DS_CPL" /* CPL Qualified Debug Store */ "\006VMX" /* Virtual Machine Extensions */ "\007SMX" /* Safer Mode Extensions */ "\010EST" /* Enhanced SpeedStep */ "\011TM2" /* Thermal Monitor 2 */ "\012SSSE3" /* SSSE3 */ "\013CNXT-ID" /* L1 context ID available */ "\014<b11>" "\015FMA" /* Fused Multiply Add */ "\016CX16" /* CMPXCHG16B Instruction */ "\017xTPR" /* Send Task Priority Messages*/ "\020PDCM" /* Perf/Debug Capability MSR */ "\021<b16>" "\022PCID" /* Process-context Identifiers*/ "\023DCA" /* Direct Cache Access */ "\024SSE4.1" /* SSE 4.1 */ "\025SSE4.2" /* SSE 4.2 */ "\026x2APIC" /* xAPIC Extensions */ "\027MOVBE" /* MOVBE Instruction */ "\030POPCNT" /* POPCNT Instruction */ "\031TSCDLT" /* TSC-Deadline Timer */ "\032AESNI" /* AES Crypto */ "\033XSAVE" /* XSAVE/XRSTOR States */ "\034OSXSAVE" /* OS-Enabled State Management*/ "\035AVX" /* Advanced Vector Extensions */ "\036F16C" /* Half-precision conversions */ "\037RDRAND" /* RDRAND Instruction */ "\040HV" /* Hypervisor */ ); } if (amd_feature != 0) { printf("\n AMD Features=0x%b", amd_feature, "\020" /* in hex */ "\001<s0>" /* Same */ "\002<s1>" /* Same */ "\003<s2>" /* Same */ "\004<s3>" /* Same */ "\005<s4>" /* Same */ "\006<s5>" /* Same */ "\007<s6>" /* Same */ "\010<s7>" /* Same */ "\011<s8>" /* Same */ "\012<s9>" /* Same */ "\013<b10>" /* Undefined */ "\014SYSCALL" /* Have SYSCALL/SYSRET */ "\015<s12>" /* Same */ "\016<s13>" /* Same */ "\017<s14>" /* Same */ "\020<s15>" /* Same */ "\021<s16>" /* Same */ "\022<s17>" /* Same */ "\023<b18>" /* Reserved, unknown */ "\024MP" /* Multiprocessor Capable */ "\025NX" /* Has EFER.NXE, NX */ "\026<b21>" /* Undefined */ "\027MMX+" /* AMD MMX Extensions */ "\030<s23>" /* Same */ "\031<s24>" /* Same */ "\032FFXSR" /* Fast FXSAVE/FXRSTOR */ "\033Page1GB" /* 1-GB large page support */ "\034RDTSCP" /* RDTSCP */ "\035<b28>" /* Undefined */ "\036LM" /* 64 bit long mode */ "\0373DNow!+" /* AMD 3DNow! Extensions */ "\0403DNow!" /* AMD 3DNow! */ ); } if (amd_feature2 != 0) { printf("\n AMD Features2=0x%b", amd_feature2, "\020" "\001LAHF" /* LAHF/SAHF in long mode */ "\002CMP" /* CMP legacy */ "\003SVM" /* Secure Virtual Mode */ "\004ExtAPIC" /* Extended APIC register */ "\005CR8" /* CR8 in legacy mode */ "\006ABM" /* LZCNT instruction */ "\007SSE4A" /* SSE4A */ "\010MAS" /* Misaligned SSE mode */ "\011Prefetch" /* 3DNow! Prefetch/PrefetchW */ "\012OSVW" /* OS visible workaround */ "\013IBS" /* Instruction based sampling */ "\014XOP" /* XOP extended instructions */ "\015SKINIT" /* SKINIT/STGI */ "\016WDT" /* Watchdog timer */ "\017<b14>" "\020LWP" /* Lightweight Profiling */ "\021FMA4" /* 4-operand FMA instructions */ "\022<b17>" "\023<b18>" "\024NodeId" /* NodeId MSR support */ "\025<b20>" "\026TBM" /* Trailing Bit Manipulation */ "\027Topology" /* Topology Extensions */ "\030<b23>" "\031<b24>" "\032<b25>" "\033<b26>" "\034<b27>" "\035<b28>" "\036<b29>" "\037<b30>" "\040<b31>" ); } if (cpu_stdext_feature != 0) { printf("\n Standard Extended Features=0x%b", cpu_stdext_feature, "\020" /* RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */ "\001GSFSBASE" "\002TSCADJ" /* Bit Manipulation Instructions */ "\004BMI1" /* Hardware Lock Elision */ "\005HLE" /* Advanced Vector Instructions 2 */ "\006AVX2" /* Supervisor Mode Execution Prot. */ "\010SMEP" /* Bit Manipulation Instructions */ "\011BMI2" "\012ENHMOVSB" /* Invalidate Processor Context ID */ "\013INVPCID" /* Restricted Transactional Memory */ "\014RTM" /* Enhanced NRBG */ "\022RDSEED" /* ADCX + ADOX */ "\023ADX" /* Supervisor Mode Access Prevention */ "\024SMAP" ); } if (via_feature_rng != 0 || via_feature_xcrypt != 0) print_via_padlock_info(); if ((cpu_feature & CPUID_HTT) && cpu_vendor_id == CPU_VENDOR_AMD) cpu_feature &= ~CPUID_HTT; /* * If this CPU supports P-state invariant TSC then * mention the capability. */ if (tsc_is_invariant) { printf("\n TSC: P-state invariant"); if (tsc_perf_stat) printf(", performance statistics"); } } } /* Avoid ugly blank lines: only print newline when we have to. */ if (*cpu_vendor || cpu_id) printf("\n"); if (!bootverbose) return; if (cpu_vendor_id == CPU_VENDOR_AMD) print_AMD_info(); }
static int procfs_getonecpu(int xcpu, struct cpu_info *ci, char *bf, size_t *len) { size_t left, l, size; char featurebuf[1024], *p; p = featurebuf; left = sizeof(featurebuf); size = *len; procfs_getonecpufeatures(ci, p, &left); p = bf; left = *len; size = 0; l = snprintf(p, left, "processor\t: %d\n" "vendor_id\t: %s\n" "cpu family\t: %d\n" "model\t\t: %d\n" "model name\t: %s\n" "stepping\t: ", xcpu, (char *)ci->ci_vendor, CPUID_TO_FAMILY(ci->ci_signature), CPUID_TO_MODEL(ci->ci_signature), cpu_brand_string ); size += l; if (l < left) { left -= l; p += l; } else left = 0; if (cpuid_level >= 0) l = snprintf(p, left, "%d\n", CPUID_TO_STEPPING(ci->ci_signature)); else l = snprintf(p, left, "unknown\n"); size += l; if (l < left) { left -= l; p += l; } else left = 0; if (ci->ci_data.cpu_cc_freq != 0) { uint64_t freq, fraq; freq = (ci->ci_data.cpu_cc_freq + 4999) / 1000000; fraq = ((ci->ci_data.cpu_cc_freq + 4999) / 10000) % 100; l = snprintf(p, left, "cpu MHz\t\t: %" PRIu64 ".%02" PRIu64 "\n", freq, fraq); } else l = snprintf(p, left, "cpu MHz\t\t: unknown\n"); size += l; if (l < left) { left -= l; p += l; } else left = 0; l = snprintf(p, left, "fdiv_bug\t: %s\n" "fpu\t\t: %s\n" "fpu_exception\t: yes\n" "cpuid level\t: %d\n" "wp\t\t: %s\n" "flags\t\t: %s\n", i386_fpu_fdivbug ? "yes" : "no", /* an old pentium */ i386_fpu_present ? "yes" : "no", /* not a 486SX */ cpuid_level, (rcr0() & CR0_WP) ? "yes" : "no", featurebuf ); size += l; left = *len; *len = size; return left < *len ? -1 : 0; }
static void init_amd(void) { uint64_t msr; /* * Work around Erratum 721 for Family 10h and 12h processors. * These processors may incorrectly update the stack pointer * after a long series of push and/or near-call instructions, * or a long series of pop and/or near-return instructions. * * http://support.amd.com/us/Processor_TechDocs/41322_10h_Rev_Gd.pdf * http://support.amd.com/us/Processor_TechDocs/44739_12h_Rev_Gd.pdf * * Hypervisors do not provide access to the errata MSR, * causing #GP exception on attempt to apply the errata. The * MSR write shall be done on host and persist globally * anyway, so do not try to do it when under virtualization. */ switch (CPUID_TO_FAMILY(cpu_id)) { case 0x10: case 0x12: if ((cpu_feature2 & CPUID2_HV) == 0) wrmsr(0xc0011029, rdmsr(0xc0011029) | 1); break; } /* * BIOS may fail to set InitApicIdCpuIdLo to 1 as it should per BKDG. * So, do it here or otherwise some tools could be confused by * Initial Local APIC ID reported with CPUID Function 1 in EBX. */ if (CPUID_TO_FAMILY(cpu_id) == 0x10) { if ((cpu_feature2 & CPUID2_HV) == 0) { msr = rdmsr(MSR_NB_CFG1); msr |= (uint64_t)1 << 54; wrmsr(MSR_NB_CFG1, msr); } } /* * BIOS may configure Family 10h processors to convert WC+ cache type * to CD. That can hurt performance of guest VMs using nested paging. * The relevant MSR bit is not documented in the BKDG, * the fix is borrowed from Linux. */ if (CPUID_TO_FAMILY(cpu_id) == 0x10) { if ((cpu_feature2 & CPUID2_HV) == 0) { msr = rdmsr(0xc001102a); msr &= ~((uint64_t)1 << 24); wrmsr(0xc001102a, msr); } } /* * Work around Erratum 793: Specific Combination of Writes to Write * Combined Memory Types and Locked Instructions May Cause Core Hang. * See Revision Guide for AMD Family 16h Models 00h-0Fh Processors, * revision 3.04 or later, publication 51810. */ if (CPUID_TO_FAMILY(cpu_id) == 0x16 && CPUID_TO_MODEL(cpu_id) <= 0xf) { if ((cpu_feature2 & CPUID2_HV) == 0) { msr = rdmsr(0xc0011020); msr |= (uint64_t)1 << 15; wrmsr(0xc0011020, msr); } } /* * Work around a problem on Ryzen that is triggered by executing * code near the top of user memory, in our case the signal * trampoline code in the shared page on amd64. * * This function is executed once for the BSP before tunables take * effect so the value determined here can be overridden by the * tunable. This function is then executed again for each AP and * also on resume. Set a flag the first time so that value set by * the tunable is not overwritten. * * The stepping and/or microcode versions should be checked after * this issue is fixed by AMD so that we don't use this mode if not * needed. */ if (lower_sharedpage_init == 0) { lower_sharedpage_init = 1; if (CPUID_TO_FAMILY(cpu_id) == 0x17) { hw_lower_amd64_sharedpage = 1; } } }
/* * Initialize CPU control registers */ void initializecpu(int cpu) { uint64_t msr; /*Check for FXSR and SSE support and enable if available.*/ if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) { load_cr4(rcr4() | CR4_FXSR | CR4_XMM); cpu_fxsr = hw_instruction_sse = 1; } if (cpu == 0) { /* Check if we are running in a hypervisor. */ vmm_guest = detect_virtual(); if (vmm_guest == VMM_GUEST_NONE && (cpu_feature2 & CPUID2_VMM)) vmm_guest = VMM_GUEST_UNKNOWN; } #if !defined(CPU_DISABLE_AVX) /*Check for XSAVE and AVX support and enable if available.*/ if ((cpu_feature2 & CPUID2_AVX) && (cpu_feature2 & CPUID2_XSAVE) && (cpu_feature & CPUID_SSE)) { load_cr4(rcr4() | CR4_XSAVE); /* Adjust size of savefpu in npx.h before adding to mask.*/ xsetbv(0, CPU_XFEATURE_X87 | CPU_XFEATURE_SSE | CPU_XFEATURE_YMM, 0); cpu_xsave = 1; } #endif if (cpu_vendor_id == CPU_VENDOR_AMD) { switch((cpu_id & 0xFF0000)) { case 0x100000: case 0x120000: /* * Errata 721 is the cpu bug found by your's truly * (Matthew Dillon). It is a bug where a sequence * of 5 or more popq's + a retq, under involved * deep recursion circumstances, can cause the %rsp * to not be properly updated, almost always * resulting in a seg-fault soon after. * * Do not install the workaround when we are running * in a virtual machine. */ if (vmm_guest) break; msr = rdmsr(MSR_AMD_DE_CFG); if ((msr & 1) == 0) { if (cpu == 0) kprintf("Errata 721 workaround " "installed\n"); msr |= 1; wrmsr(MSR_AMD_DE_CFG, msr); } break; } } if ((amd_feature & AMDID_NX) != 0) { msr = rdmsr(MSR_EFER) | EFER_NXE; wrmsr(MSR_EFER, msr); #if 0 /* JG */ pg_nx = PG_NX; #endif } if (cpu_vendor_id == CPU_VENDOR_CENTAUR && CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) >= 0xf) init_via(); TUNABLE_INT_FETCH("hw.clflush_enable", &hw_clflush_enable); if (cpu_feature & CPUID_CLFSH) { cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8; if (hw_clflush_enable == 0 || ((hw_clflush_enable == -1) && vmm_guest)) cpu_feature &= ~CPUID_CLFSH; }
int acpicpu_md_pstate_init(struct acpicpu_softc *sc) { struct cpu_info *ci = sc->sc_ci; struct acpicpu_pstate *ps, msr; uint32_t family, i = 0; (void)memset(&msr, 0, sizeof(struct acpicpu_pstate)); switch (cpu_vendor) { case CPUVENDOR_IDT: case CPUVENDOR_INTEL: /* * If the so-called Turbo Boost is present, * the P0-state is always the "turbo state". * It is shown as the P1 frequency + 1 MHz. * * For discussion, see: * * Intel Corporation: Intel Turbo Boost Technology * in Intel Core(tm) Microarchitectures (Nehalem) * Based Processors. White Paper, November 2008. */ if (sc->sc_pstate_count >= 2 && (sc->sc_flags & ACPICPU_FLAG_P_TURBO) != 0) { ps = &sc->sc_pstate[0]; if (ps->ps_freq == sc->sc_pstate[1].ps_freq + 1) ps->ps_flags |= ACPICPU_FLAG_P_TURBO; } msr.ps_control_addr = MSR_PERF_CTL; msr.ps_control_mask = __BITS(0, 15); msr.ps_status_addr = MSR_PERF_STATUS; msr.ps_status_mask = __BITS(0, 15); break; case CPUVENDOR_AMD: if ((sc->sc_flags & ACPICPU_FLAG_P_FIDVID) != 0) msr.ps_flags |= ACPICPU_FLAG_P_FIDVID; family = CPUID_TO_FAMILY(ci->ci_signature); switch (family) { case 0x0f: msr.ps_control_addr = MSR_0FH_CONTROL; msr.ps_status_addr = MSR_0FH_STATUS; break; case 0x10: case 0x11: case 0x12: case 0x14: case 0x15: msr.ps_control_addr = MSR_10H_CONTROL; msr.ps_control_mask = __BITS(0, 2); msr.ps_status_addr = MSR_10H_STATUS; msr.ps_status_mask = __BITS(0, 2); break; default: /* * If we have an unknown AMD CPU, rely on XPSS. */ if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) == 0) return EOPNOTSUPP; } break; default: return ENODEV; } /* * Fill the P-state structures with MSR addresses that are * known to be correct. If we do not know the addresses, * leave the values intact. If a vendor uses XPSS, we do * not necessarily need to do anything to support new CPUs. */ while (i < sc->sc_pstate_count) { ps = &sc->sc_pstate[i]; if (msr.ps_flags != 0) ps->ps_flags |= msr.ps_flags; if (msr.ps_status_addr != 0) ps->ps_status_addr = msr.ps_status_addr; if (msr.ps_status_mask != 0) ps->ps_status_mask = msr.ps_status_mask; if (msr.ps_control_addr != 0) ps->ps_control_addr = msr.ps_control_addr; if (msr.ps_control_mask != 0) ps->ps_control_mask = msr.ps_control_mask; i++; } return 0; }
uint32_t acpicpu_md_flags(void) { struct cpu_info *ci = curcpu(); struct pci_attach_args pa; uint32_t family, val = 0; uint32_t regs[4]; uint64_t msr; if (acpi_md_ncpus() == 1) val |= ACPICPU_FLAG_C_BM; if ((ci->ci_feat_val[1] & CPUID2_MONITOR) != 0) val |= ACPICPU_FLAG_C_FFH; /* * By default, assume that the local APIC timer * as well as TSC are stalled during C3 sleep. */ val |= ACPICPU_FLAG_C_APIC | ACPICPU_FLAG_C_TSC; /* * Detect whether TSC is invariant. If it is not, we keep the flag to * note that TSC will not run at constant rate. Depending on the CPU, * this may affect P- and T-state changes, but especially relevant * are C-states; with variant TSC, states larger than C1 may * completely stop the counter. */ if (tsc_is_invariant()) val &= ~ACPICPU_FLAG_C_TSC; switch (cpu_vendor) { case CPUVENDOR_IDT: if ((ci->ci_feat_val[1] & CPUID2_EST) != 0) val |= ACPICPU_FLAG_P_FFH; if ((ci->ci_feat_val[0] & CPUID_ACPI) != 0) val |= ACPICPU_FLAG_T_FFH; break; case CPUVENDOR_INTEL: /* * Bus master control and arbitration should be * available on all supported Intel CPUs (to be * sure, this is double-checked later from the * firmware data). These flags imply that it is * not necessary to flush caches before C3 state. */ val |= ACPICPU_FLAG_C_BM | ACPICPU_FLAG_C_ARB; /* * Check if we can use "native", MSR-based, * access. If not, we have to resort to I/O. */ if ((ci->ci_feat_val[1] & CPUID2_EST) != 0) val |= ACPICPU_FLAG_P_FFH; if ((ci->ci_feat_val[0] & CPUID_ACPI) != 0) val |= ACPICPU_FLAG_T_FFH; /* * Check whether MSR_APERF, MSR_MPERF, and Turbo * Boost are available. Also see if we might have * an invariant local APIC timer ("ARAT"). */ if (cpuid_level >= 0x06) { x86_cpuid(0x00000006, regs); if ((regs[2] & CPUID_DSPM_HWF) != 0) val |= ACPICPU_FLAG_P_HWF; if ((regs[0] & CPUID_DSPM_IDA) != 0) val |= ACPICPU_FLAG_P_TURBO; if ((regs[0] & CPUID_DSPM_ARAT) != 0) val &= ~ACPICPU_FLAG_C_APIC; } break; case CPUVENDOR_AMD: x86_cpuid(0x80000000, regs); if (regs[0] < 0x80000007) break; x86_cpuid(0x80000007, regs); family = CPUID_TO_FAMILY(ci->ci_signature); switch (family) { case 0x0f: /* * Disable C1E if present. */ if (rdmsr_safe(MSR_CMPHALT, &msr) != EFAULT) val |= ACPICPU_FLAG_C_C1E; /* * Evaluate support for the "FID/VID * algorithm" also used by powernow(4). */ if ((regs[3] & CPUID_APM_FID) == 0) break; if ((regs[3] & CPUID_APM_VID) == 0) break; val |= ACPICPU_FLAG_P_FFH | ACPICPU_FLAG_P_FIDVID; break; case 0x10: case 0x11: /* * Disable C1E if present. */ if (rdmsr_safe(MSR_CMPHALT, &msr) != EFAULT) val |= ACPICPU_FLAG_C_C1E; /* FALLTHROUGH */ case 0x12: case 0x14: /* AMD Fusion */ case 0x15: /* AMD Bulldozer */ /* * Like with Intel, detect MSR-based P-states, * and AMD's "turbo" (Core Performance Boost), * respectively. */ if ((regs[3] & CPUID_APM_HWP) != 0) val |= ACPICPU_FLAG_P_FFH; if ((regs[3] & CPUID_APM_CPB) != 0) val |= ACPICPU_FLAG_P_TURBO; /* * Also check for APERF and MPERF, * first available in the family 10h. */ if (cpuid_level >= 0x06) { x86_cpuid(0x00000006, regs); if ((regs[2] & CPUID_DSPM_HWF) != 0) val |= ACPICPU_FLAG_P_HWF; } break; } break; } /* * There are several erratums for PIIX4. */ if (pci_find_device(&pa, acpicpu_md_quirk_piix4) != 0) val |= ACPICPU_FLAG_PIIX4; return val; }
/* * Enable LAPIC, configure interrupts. */ void lapic_init(boolean_t bsp) { uint32_t timer; u_int temp; /* * Install vectors * * Since IDT is shared between BSP and APs, these vectors * only need to be installed once; we do it on BSP. */ if (bsp) { if (cpu_vendor_id == CPU_VENDOR_AMD && CPUID_TO_FAMILY(cpu_id) >= 0xf) { uint32_t tcr; /* * Set the LINTEN bit in the HyperTransport * Transaction Control Register. * * This will cause EXTINT and NMI interrupts * routed over the hypertransport bus to be * fed into the LAPIC LINT0/LINT1. If the bit * isn't set, the interrupts will go to the * general cpu INTR/NMI pins. On a dual-core * cpu the interrupt winds up going to BOTH cpus. * The first cpu that does the interrupt ack * cycle will get the correct interrupt. The * second cpu that does it will get a spurious * interrupt vector (typically IRQ 7). */ outl(0x0cf8, (1 << 31) | /* enable */ (0 << 16) | /* bus */ (0x18 << 11) | /* dev (cpu + 0x18) */ (0 << 8) | /* func */ 0x68 /* reg */ ); tcr = inl(0xcfc); if ((tcr & 0x00010000) == 0) { kprintf("LAPIC: AMD LINTEN on\n"); outl(0xcfc, tcr|0x00010000); } outl(0x0cf8, 0); } /* Install a 'Spurious INTerrupt' vector */ setidt_global(XSPURIOUSINT_OFFSET, Xspuriousint, SDT_SYSIGT, SEL_KPL, 0); /* Install a timer vector */ setidt_global(XTIMER_OFFSET, Xtimer, SDT_SYSIGT, SEL_KPL, 0); /* Install an inter-CPU IPI for TLB invalidation */ setidt_global(XINVLTLB_OFFSET, Xinvltlb, SDT_SYSIGT, SEL_KPL, 0); /* Install an inter-CPU IPI for IPIQ messaging */ setidt_global(XIPIQ_OFFSET, Xipiq, SDT_SYSIGT, SEL_KPL, 0); /* Install an inter-CPU IPI for CPU stop/restart */ setidt_global(XCPUSTOP_OFFSET, Xcpustop, SDT_SYSIGT, SEL_KPL, 0); } /* * Setup LINT0 as ExtINT on the BSP. This is theoretically an * aggregate interrupt input from the 8259. The INTA cycle * will be routed to the external controller (the 8259) which * is expected to supply the vector. * * Must be setup edge triggered, active high. * * Disable LINT0 on BSP, if I/O APIC is enabled. * * Disable LINT0 on the APs. It doesn't matter what delivery * mode we use because we leave it masked. */ temp = lapic->lvt_lint0; temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK | APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK); if (bsp) { temp |= APIC_LVT_DM_EXTINT; if (ioapic_enable) temp |= APIC_LVT_MASKED; } else { temp |= APIC_LVT_DM_FIXED | APIC_LVT_MASKED; } lapic->lvt_lint0 = temp; /* * Setup LINT1 as NMI. * * Must be setup edge trigger, active high. * * Enable LINT1 on BSP, if I/O APIC is enabled. * * Disable LINT1 on the APs. */ temp = lapic->lvt_lint1; temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK | APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK); temp |= APIC_LVT_MASKED | APIC_LVT_DM_NMI; if (bsp && ioapic_enable) temp &= ~APIC_LVT_MASKED; lapic->lvt_lint1 = temp; /* * Mask the LAPIC error interrupt, LAPIC performance counter * interrupt. */ lapic->lvt_error = lapic->lvt_error | APIC_LVT_MASKED; lapic->lvt_pcint = lapic->lvt_pcint | APIC_LVT_MASKED; /* * Set LAPIC timer vector and mask the LAPIC timer interrupt. */ timer = lapic->lvt_timer; timer &= ~APIC_LVTT_VECTOR; timer |= XTIMER_OFFSET; timer |= APIC_LVTT_MASKED; lapic->lvt_timer = timer; /* * Set the Task Priority Register as needed. At the moment allow * interrupts on all cpus (the APs will remain CLId until they are * ready to deal). */ temp = lapic->tpr; temp &= ~APIC_TPR_PRIO; /* clear priority field */ lapic->tpr = temp; /* * Enable the LAPIC */ temp = lapic->svr; temp |= APIC_SVR_ENABLE; /* enable the LAPIC */ temp &= ~APIC_SVR_FOCUS_DISABLE; /* enable lopri focus processor */ /* * Set the spurious interrupt vector. The low 4 bits of the vector * must be 1111. */ if ((XSPURIOUSINT_OFFSET & 0x0F) != 0x0F) panic("bad XSPURIOUSINT_OFFSET: 0x%08x", XSPURIOUSINT_OFFSET); temp &= ~APIC_SVR_VECTOR; temp |= XSPURIOUSINT_OFFSET; lapic->svr = temp; /* * Pump out a few EOIs to clean out interrupts that got through * before we were able to set the TPR. */ lapic->eoi = 0; lapic->eoi = 0; lapic->eoi = 0; if (bsp) { lapic_timer_calibrate(); if (lapic_timer_enable) { cputimer_intr_register(&lapic_cputimer_intr); cputimer_intr_select(&lapic_cputimer_intr, 0); } } else { lapic_timer_set_divisor(lapic_timer_divisor_idx); } if (bootverbose) apic_dump("apic_initialize()"); }