static void qpi_identify(driver_t *driver, device_t parent) { /* Check CPUID to ensure this is an i7 CPU of some sort. */ if (!(cpu_vendor_id == CPU_VENDOR_INTEL && CPUID_TO_FAMILY(cpu_id) == 0x6 && (CPUID_TO_MODEL(cpu_id) == 0x1a || CPUID_TO_MODEL(cpu_id) == 0x2c))) return; /* PCI config register access is required. */ if (pci_cfgregopen() == 0) return; /* Add a qpi bus device. */ if (BUS_ADD_CHILD(parent, 20, "qpi", -1) == NULL) panic("Failed to add qpi bus"); }
/* * Initialize special VIA features */ static void init_via(void) { u_int regs[4], val; uint64_t fcr; /* * Explicitly enable CX8 and PGE on C3. * * http://www.via.com.tw/download/mainboards/6/13/VIA_C3_EBGA%20datasheet110.pdf */ if (CPUID_TO_MODEL(cpu_id) <= 9) fcr = (1 << 1) | (1 << 7); else fcr = 0; /* * Check extended CPUID for PadLock features. * * http://www.via.com.tw/en/downloads/whitepapers/initiatives/padlock/programming_guide.pdf */ do_cpuid(0xc0000000, regs); if (regs[0] >= 0xc0000001) { do_cpuid(0xc0000001, regs); val = regs[3]; } else val = 0; /* Enable RNG if present. */ if ((val & VIA_CPUID_HAS_RNG) != 0) { via_feature_rng = VIA_HAS_RNG; wrmsr(0x110B, rdmsr(0x110B) | VIA_CPUID_DO_RNG); } /* Enable PadLock if present. */ if ((val & VIA_CPUID_HAS_ACE) != 0) via_feature_xcrypt |= VIA_HAS_AES; if ((val & VIA_CPUID_HAS_ACE2) != 0) via_feature_xcrypt |= VIA_HAS_AESCTR; if ((val & VIA_CPUID_HAS_PHE) != 0) via_feature_xcrypt |= VIA_HAS_SHA; if ((val & VIA_CPUID_HAS_PMM) != 0) via_feature_xcrypt |= VIA_HAS_MM; if (via_feature_xcrypt != 0) fcr |= 1 << 28; wrmsr(0x1107, rdmsr(0x1107) | fcr); }
void msi_init(void) { /* Check if we have a supported CPU. */ switch (cpu_vendor_id) { case CPU_VENDOR_INTEL: case CPU_VENDOR_AMD: break; case CPU_VENDOR_CENTAUR: if (CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) >= 0xf) break; /* FALLTHROUGH */ default: return; } msi_enabled = 1; intr_register_pic(&msi_pic); mtx_init(&msi_lock, "msi", NULL, MTX_DEF); }
/* * IDT WinChip C6/2/2A/2B/3 * * http://www.centtech.com/winchip_bios_writers_guide_v4_0.pdf */ static void init_winchip(void) { u_int regs[4]; uint64_t fcr; fcr = rdmsr(0x0107); /* * Set ECX8, DSMC, DTLOCK/EDCTLB, EMMX, and ERETSTK and clear DPDC. */ fcr |= (1 << 1) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 16); fcr &= ~(1ULL << 11); /* * Additioanlly, set EBRPRED, E2MMX and EAMD3D for WinChip 2 and 3. */ if (CPUID_TO_MODEL(cpu_id) >= 8) fcr |= (1 << 12) | (1 << 19) | (1 << 20); wrmsr(0x0107, fcr); do_cpuid(1, regs); cpu_feature = regs[3]; }
void printcpuinfo(void) { u_int regs[4], i; char *brand; cpu_class = i386_cpus[cpu].cpu_class; printf("CPU: "); strncpy(cpu_model, i386_cpus[cpu].cpu_name, sizeof (cpu_model)); /* Check for extended CPUID information and a processor name. */ init_exthigh(); if (cpu_exthigh >= 0x80000004) { brand = cpu_brand; for (i = 0x80000002; i < 0x80000005; i++) { do_cpuid(i, regs); memcpy(brand, regs, sizeof(regs)); brand += sizeof(regs); } } if (cpu_vendor_id == CPU_VENDOR_INTEL) { if ((cpu_id & 0xf00) > 0x300) { u_int brand_index; cpu_model[0] = '\0'; switch (cpu_id & 0x3000) { case 0x1000: strcpy(cpu_model, "Overdrive "); break; case 0x2000: strcpy(cpu_model, "Dual "); break; } switch (cpu_id & 0xf00) { case 0x400: strcat(cpu_model, "i486 "); /* Check the particular flavor of 486 */ switch (cpu_id & 0xf0) { case 0x00: case 0x10: strcat(cpu_model, "DX"); break; case 0x20: strcat(cpu_model, "SX"); break; case 0x30: strcat(cpu_model, "DX2"); break; case 0x40: strcat(cpu_model, "SL"); break; case 0x50: strcat(cpu_model, "SX2"); break; case 0x70: strcat(cpu_model, "DX2 Write-Back Enhanced"); break; case 0x80: strcat(cpu_model, "DX4"); break; } break; case 0x500: /* Check the particular flavor of 586 */ strcat(cpu_model, "Pentium"); switch (cpu_id & 0xf0) { case 0x00: strcat(cpu_model, " A-step"); break; case 0x10: strcat(cpu_model, "/P5"); break; case 0x20: strcat(cpu_model, "/P54C"); break; case 0x30: strcat(cpu_model, "/P24T"); break; case 0x40: strcat(cpu_model, "/P55C"); break; case 0x70: strcat(cpu_model, "/P54C"); break; case 0x80: strcat(cpu_model, "/P55C (quarter-micron)"); break; default: /* nothing */ break; } #if defined(I586_CPU) && !defined(NO_F00F_HACK) /* * XXX - If/when Intel fixes the bug, this * should also check the version of the * CPU, not just that it's a Pentium. */ has_f00f_bug = 1; #endif break; case 0x600: /* Check the particular flavor of 686 */ switch (cpu_id & 0xf0) { case 0x00: strcat(cpu_model, "Pentium Pro A-step"); break; case 0x10: strcat(cpu_model, "Pentium Pro"); break; case 0x30: case 0x50: case 0x60: strcat(cpu_model, "Pentium II/Pentium II Xeon/Celeron"); cpu = CPU_PII; break; case 0x70: case 0x80: case 0xa0: case 0xb0: strcat(cpu_model, "Pentium III/Pentium III Xeon/Celeron"); cpu = CPU_PIII; break; default: strcat(cpu_model, "Unknown 80686"); break; } break; case 0xf00: strcat(cpu_model, "Pentium 4"); cpu = CPU_P4; break; default: strcat(cpu_model, "unknown"); break; } /* * If we didn't get a brand name from the extended * CPUID, try to look it up in the brand table. */ if (cpu_high > 0 && *cpu_brand == '\0') { brand_index = cpu_procinfo & CPUID_BRAND_INDEX; if (brand_index <= MAX_BRAND_INDEX && cpu_brandtable[brand_index] != NULL) strcpy(cpu_brand, cpu_brandtable[brand_index]); } } } else if (cpu_vendor_id == CPU_VENDOR_AMD) { /* * Values taken from AMD Processor Recognition * http://www.amd.com/K6/k6docs/pdf/20734g.pdf * (also describes ``Features'' encodings. */ strcpy(cpu_model, "AMD "); switch (cpu_id & 0xFF0) { case 0x410: strcat(cpu_model, "Standard Am486DX"); break; case 0x430: strcat(cpu_model, "Enhanced Am486DX2 Write-Through"); break; case 0x470: strcat(cpu_model, "Enhanced Am486DX2 Write-Back"); break; case 0x480: strcat(cpu_model, "Enhanced Am486DX4/Am5x86 Write-Through"); break; case 0x490: strcat(cpu_model, "Enhanced Am486DX4/Am5x86 Write-Back"); break; case 0x4E0: strcat(cpu_model, "Am5x86 Write-Through"); break; case 0x4F0: strcat(cpu_model, "Am5x86 Write-Back"); break; case 0x500: strcat(cpu_model, "K5 model 0"); tsc_freq = 0; break; case 0x510: strcat(cpu_model, "K5 model 1"); break; case 0x520: strcat(cpu_model, "K5 PR166 (model 2)"); break; case 0x530: strcat(cpu_model, "K5 PR200 (model 3)"); break; case 0x560: strcat(cpu_model, "K6"); break; case 0x570: strcat(cpu_model, "K6 266 (model 1)"); break; case 0x580: strcat(cpu_model, "K6-2"); break; case 0x590: strcat(cpu_model, "K6-III"); break; case 0x5a0: strcat(cpu_model, "Geode LX"); /* * Make sure the TSC runs through suspension, * otherwise we can't use it as timecounter */ wrmsr(0x1900, rdmsr(0x1900) | 0x20ULL); break; default: strcat(cpu_model, "Unknown"); break; } #if defined(I586_CPU) && defined(CPU_WT_ALLOC) if ((cpu_id & 0xf00) == 0x500) { if (((cpu_id & 0x0f0) > 0) && ((cpu_id & 0x0f0) < 0x60) && ((cpu_id & 0x00f) > 3)) enable_K5_wt_alloc(); else if (((cpu_id & 0x0f0) > 0x80) || (((cpu_id & 0x0f0) == 0x80) && (cpu_id & 0x00f) > 0x07)) enable_K6_2_wt_alloc(); else if ((cpu_id & 0x0f0) > 0x50) enable_K6_wt_alloc(); } #endif } else if (cpu_vendor_id == CPU_VENDOR_CYRIX) { strcpy(cpu_model, "Cyrix "); switch (cpu_id & 0xff0) { case 0x440: strcat(cpu_model, "MediaGX"); break; case 0x520: strcat(cpu_model, "6x86"); break; case 0x540: cpu_class = CPUCLASS_586; strcat(cpu_model, "GXm"); break; case 0x600: strcat(cpu_model, "6x86MX"); break; default: /* * Even though CPU supports the cpuid * instruction, it can be disabled. * Therefore, this routine supports all Cyrix * CPUs. */ switch (cyrix_did & 0xf0) { case 0x00: switch (cyrix_did & 0x0f) { case 0x00: strcat(cpu_model, "486SLC"); break; case 0x01: strcat(cpu_model, "486DLC"); break; case 0x02: strcat(cpu_model, "486SLC2"); break; case 0x03: strcat(cpu_model, "486DLC2"); break; case 0x04: strcat(cpu_model, "486SRx"); break; case 0x05: strcat(cpu_model, "486DRx"); break; case 0x06: strcat(cpu_model, "486SRx2"); break; case 0x07: strcat(cpu_model, "486DRx2"); break; case 0x08: strcat(cpu_model, "486SRu"); break; case 0x09: strcat(cpu_model, "486DRu"); break; case 0x0a: strcat(cpu_model, "486SRu2"); break; case 0x0b: strcat(cpu_model, "486DRu2"); break; default: strcat(cpu_model, "Unknown"); break; } break; case 0x10: switch (cyrix_did & 0x0f) { case 0x00: strcat(cpu_model, "486S"); break; case 0x01: strcat(cpu_model, "486S2"); break; case 0x02: strcat(cpu_model, "486Se"); break; case 0x03: strcat(cpu_model, "486S2e"); break; case 0x0a: strcat(cpu_model, "486DX"); break; case 0x0b: strcat(cpu_model, "486DX2"); break; case 0x0f: strcat(cpu_model, "486DX4"); break; default: strcat(cpu_model, "Unknown"); break; } break; case 0x20: if ((cyrix_did & 0x0f) < 8) strcat(cpu_model, "6x86"); /* Where did you get it? */ else strcat(cpu_model, "5x86"); break; case 0x30: strcat(cpu_model, "6x86"); break; case 0x40: if ((cyrix_did & 0xf000) == 0x3000) { cpu_class = CPUCLASS_586; strcat(cpu_model, "GXm"); } else strcat(cpu_model, "MediaGX"); break; case 0x50: strcat(cpu_model, "6x86MX"); break; case 0xf0: switch (cyrix_did & 0x0f) { case 0x0d: strcat(cpu_model, "Overdrive CPU"); break; case 0x0e: strcpy(cpu_model, "Texas Instruments 486SXL"); break; case 0x0f: strcat(cpu_model, "486SLC/DLC"); break; default: strcat(cpu_model, "Unknown"); break; } break; default: strcat(cpu_model, "Unknown"); break; } break; } } else if (cpu_vendor_id == CPU_VENDOR_RISE) { strcpy(cpu_model, "Rise "); switch (cpu_id & 0xff0) { case 0x500: /* 6401 and 6441 (Kirin) */ case 0x520: /* 6510 (Lynx) */ strcat(cpu_model, "mP6"); break; default: strcat(cpu_model, "Unknown"); } } else if (cpu_vendor_id == CPU_VENDOR_CENTAUR) { switch (cpu_id & 0xff0) { case 0x540: strcpy(cpu_model, "IDT WinChip C6"); /* * http://www.centtech.com/c6_data_sheet.pdf * * I-12 RDTSC may return incoherent values in EDX:EAX * I-13 RDTSC hangs when certain event counters are used */ tsc_freq = 0; break; case 0x580: strcpy(cpu_model, "IDT WinChip 2"); break; case 0x590: strcpy(cpu_model, "IDT WinChip 3"); break; case 0x660: strcpy(cpu_model, "VIA C3 Samuel"); break; case 0x670: if (cpu_id & 0x8) strcpy(cpu_model, "VIA C3 Ezra"); else strcpy(cpu_model, "VIA C3 Samuel 2"); break; case 0x680: strcpy(cpu_model, "VIA C3 Ezra-T"); break; case 0x690: strcpy(cpu_model, "VIA C3 Nehemiah"); break; case 0x6a0: case 0x6d0: strcpy(cpu_model, "VIA C7 Esther"); break; case 0x6f0: strcpy(cpu_model, "VIA Nano"); break; default: strcpy(cpu_model, "VIA/IDT Unknown"); } } else if (cpu_vendor_id == CPU_VENDOR_IBM) { strcpy(cpu_model, "Blue Lightning CPU"); } else if (cpu_vendor_id == CPU_VENDOR_NSC) { switch (cpu_id & 0xff0) { case 0x540: strcpy(cpu_model, "Geode SC1100"); cpu = CPU_GEODE1100; if ((cpu_id & CPUID_STEPPING) == 0) tsc_freq = 0; break; default: strcpy(cpu_model, "Geode/NSC unknown"); break; } } /* * Replace cpu_model with cpu_brand minus leading spaces if * we have one. */ brand = cpu_brand; while (*brand == ' ') ++brand; if (*brand != '\0') strcpy(cpu_model, brand); printf("%s (", cpu_model); switch(cpu_class) { case CPUCLASS_286: printf("286"); break; case CPUCLASS_386: printf("386"); break; #if defined(I486_CPU) case CPUCLASS_486: printf("486"); break; #endif #if defined(I586_CPU) case CPUCLASS_586: if (tsc_freq != 0) { hw_clockrate = (tsc_freq + 5000) / 1000000; printf("%jd.%02d-MHz ", (intmax_t)(tsc_freq + 4999) / 1000000, (u_int)((tsc_freq + 4999) / 10000) % 100); } printf("586"); break; #endif #if defined(I686_CPU) case CPUCLASS_686: if (tsc_freq != 0) { hw_clockrate = (tsc_freq + 5000) / 1000000; printf("%jd.%02d-MHz ", (intmax_t)(tsc_freq + 4999) / 1000000, (u_int)((tsc_freq + 4999) / 10000) % 100); } printf("686"); break; #endif default: printf("Unknown"); /* will panic below... */ } printf("-class CPU)\n"); if(*cpu_vendor) printf(" Origin = \"%s\"",cpu_vendor); if(cpu_id) printf(" Id = 0x%x", cpu_id); if (cpu_vendor_id == CPU_VENDOR_INTEL || cpu_vendor_id == CPU_VENDOR_AMD || cpu_vendor_id == CPU_VENDOR_TRANSMETA || cpu_vendor_id == CPU_VENDOR_RISE || cpu_vendor_id == CPU_VENDOR_CENTAUR || cpu_vendor_id == CPU_VENDOR_NSC || (cpu_vendor_id == CPU_VENDOR_CYRIX && ((cpu_id & 0xf00) > 0x500))) { printf(" Family = 0x%x", CPUID_TO_FAMILY(cpu_id)); printf(" Model = 0x%x", CPUID_TO_MODEL(cpu_id)); printf(" Stepping = %u", cpu_id & CPUID_STEPPING); if (cpu_vendor_id == CPU_VENDOR_CYRIX) printf("\n DIR=0x%04x", cyrix_did); /* * AMD CPUID Specification * http://support.amd.com/us/Embedded_TechDocs/25481.pdf * * Intel Processor Identification and CPUID Instruction * http://www.intel.com/assets/pdf/appnote/241618.pdf */ if (cpu_high > 0) { /* * Here we should probably set up flags indicating * whether or not various features are available. * The interesting ones are probably VME, PSE, PAE, * and PGE. The code already assumes without bothering * to check that all CPUs >= Pentium have a TSC and * MSRs. */ printf("\n Features=0x%b", cpu_feature, "\020" "\001FPU" /* Integral FPU */ "\002VME" /* Extended VM86 mode support */ "\003DE" /* Debugging Extensions (CR4.DE) */ "\004PSE" /* 4MByte page tables */ "\005TSC" /* Timestamp counter */ "\006MSR" /* Machine specific registers */ "\007PAE" /* Physical address extension */ "\010MCE" /* Machine Check support */ "\011CX8" /* CMPEXCH8 instruction */ "\012APIC" /* SMP local APIC */ "\013oldMTRR" /* Previous implementation of MTRR */ "\014SEP" /* Fast System Call */ "\015MTRR" /* Memory Type Range Registers */ "\016PGE" /* PG_G (global bit) support */ "\017MCA" /* Machine Check Architecture */ "\020CMOV" /* CMOV instruction */ "\021PAT" /* Page attributes table */ "\022PSE36" /* 36 bit address space support */ "\023PN" /* Processor Serial number */ "\024CLFLUSH" /* Has the CLFLUSH instruction */ "\025<b20>" "\026DTS" /* Debug Trace Store */ "\027ACPI" /* ACPI support */ "\030MMX" /* MMX instructions */ "\031FXSR" /* FXSAVE/FXRSTOR */ "\032SSE" /* Streaming SIMD Extensions */ "\033SSE2" /* Streaming SIMD Extensions #2 */ "\034SS" /* Self snoop */ "\035HTT" /* Hyperthreading (see EBX bit 16-23) */ "\036TM" /* Thermal Monitor clock slowdown */ "\037IA64" /* CPU can execute IA64 instructions */ "\040PBE" /* Pending Break Enable */ ); if (cpu_feature2 != 0) { printf("\n Features2=0x%b", cpu_feature2, "\020" "\001SSE3" /* SSE3 */ "\002PCLMULQDQ" /* Carry-Less Mul Quadword */ "\003DTES64" /* 64-bit Debug Trace */ "\004MON" /* MONITOR/MWAIT Instructions */ "\005DS_CPL" /* CPL Qualified Debug Store */ "\006VMX" /* Virtual Machine Extensions */ "\007SMX" /* Safer Mode Extensions */ "\010EST" /* Enhanced SpeedStep */ "\011TM2" /* Thermal Monitor 2 */ "\012SSSE3" /* SSSE3 */ "\013CNXT-ID" /* L1 context ID available */ "\014<b11>" "\015FMA" /* Fused Multiply Add */ "\016CX16" /* CMPXCHG16B Instruction */ "\017xTPR" /* Send Task Priority Messages*/ "\020PDCM" /* Perf/Debug Capability MSR */ "\021<b16>" "\022PCID" /* Process-context Identifiers*/ "\023DCA" /* Direct Cache Access */ "\024SSE4.1" /* SSE 4.1 */ "\025SSE4.2" /* SSE 4.2 */ "\026x2APIC" /* xAPIC Extensions */ "\027MOVBE" /* MOVBE Instruction */ "\030POPCNT" /* POPCNT Instruction */ "\031TSCDLT" /* TSC-Deadline Timer */ "\032AESNI" /* AES Crypto */ "\033XSAVE" /* XSAVE/XRSTOR States */ "\034OSXSAVE" /* OS-Enabled State Management*/ "\035AVX" /* Advanced Vector Extensions */ "\036F16C" /* Half-precision conversions */ "\037RDRAND" /* RDRAND Instruction */ "\040HV" /* Hypervisor */ ); } if (amd_feature != 0) { printf("\n AMD Features=0x%b", amd_feature, "\020" /* in hex */ "\001<s0>" /* Same */ "\002<s1>" /* Same */ "\003<s2>" /* Same */ "\004<s3>" /* Same */ "\005<s4>" /* Same */ "\006<s5>" /* Same */ "\007<s6>" /* Same */ "\010<s7>" /* Same */ "\011<s8>" /* Same */ "\012<s9>" /* Same */ "\013<b10>" /* Undefined */ "\014SYSCALL" /* Have SYSCALL/SYSRET */ "\015<s12>" /* Same */ "\016<s13>" /* Same */ "\017<s14>" /* Same */ "\020<s15>" /* Same */ "\021<s16>" /* Same */ "\022<s17>" /* Same */ "\023<b18>" /* Reserved, unknown */ "\024MP" /* Multiprocessor Capable */ "\025NX" /* Has EFER.NXE, NX */ "\026<b21>" /* Undefined */ "\027MMX+" /* AMD MMX Extensions */ "\030<s23>" /* Same */ "\031<s24>" /* Same */ "\032FFXSR" /* Fast FXSAVE/FXRSTOR */ "\033Page1GB" /* 1-GB large page support */ "\034RDTSCP" /* RDTSCP */ "\035<b28>" /* Undefined */ "\036LM" /* 64 bit long mode */ "\0373DNow!+" /* AMD 3DNow! Extensions */ "\0403DNow!" /* AMD 3DNow! */ ); } if (amd_feature2 != 0) { printf("\n AMD Features2=0x%b", amd_feature2, "\020" "\001LAHF" /* LAHF/SAHF in long mode */ "\002CMP" /* CMP legacy */ "\003SVM" /* Secure Virtual Mode */ "\004ExtAPIC" /* Extended APIC register */ "\005CR8" /* CR8 in legacy mode */ "\006ABM" /* LZCNT instruction */ "\007SSE4A" /* SSE4A */ "\010MAS" /* Misaligned SSE mode */ "\011Prefetch" /* 3DNow! Prefetch/PrefetchW */ "\012OSVW" /* OS visible workaround */ "\013IBS" /* Instruction based sampling */ "\014XOP" /* XOP extended instructions */ "\015SKINIT" /* SKINIT/STGI */ "\016WDT" /* Watchdog timer */ "\017<b14>" "\020LWP" /* Lightweight Profiling */ "\021FMA4" /* 4-operand FMA instructions */ "\022<b17>" "\023<b18>" "\024NodeId" /* NodeId MSR support */ "\025<b20>" "\026TBM" /* Trailing Bit Manipulation */ "\027Topology" /* Topology Extensions */ "\030<b23>" "\031<b24>" "\032<b25>" "\033<b26>" "\034<b27>" "\035<b28>" "\036<b29>" "\037<b30>" "\040<b31>" ); } if (via_feature_rng != 0 || via_feature_xcrypt != 0) print_via_padlock_info(); if ((cpu_feature & CPUID_HTT) && cpu_vendor_id == CPU_VENDOR_AMD) cpu_feature &= ~CPUID_HTT; /* * If this CPU supports P-state invariant TSC then * mention the capability. */ if (tsc_is_invariant) { printf("\n TSC: P-state invariant"); if (tsc_perf_stat) printf(", performance statistics"); } } } else if (cpu_vendor_id == CPU_VENDOR_CYRIX) { printf(" DIR=0x%04x", cyrix_did); printf(" Stepping=%u", (cyrix_did & 0xf000) >> 12); printf(" Revision=%u", (cyrix_did & 0x0f00) >> 8); #ifndef CYRIX_CACHE_REALLY_WORKS if (cpu == CPU_M1 && (cyrix_did & 0xff00) < 0x1700) printf("\n CPU cache: write-through mode"); #endif }
/* * Initialize the local APIC on the BSP. */ static int madt_setup_local(void) { ACPI_TABLE_DMAR *dmartbl; vm_paddr_t dmartbl_physaddr; const char *reason; char *hw_vendor; u_int p[4]; int user_x2apic; bool bios_x2apic; madt = pmap_mapbios(madt_physaddr, madt_length); if ((cpu_feature2 & CPUID2_X2APIC) != 0) { reason = NULL; /* * Automatically detect several configurations where * x2APIC mode is known to cause troubles. User can * override the setting with hw.x2apic_enable tunable. */ dmartbl_physaddr = acpi_find_table(ACPI_SIG_DMAR); if (dmartbl_physaddr != 0) { dmartbl = acpi_map_table(dmartbl_physaddr, ACPI_SIG_DMAR); if ((dmartbl->Flags & ACPI_DMAR_X2APIC_OPT_OUT) != 0) reason = "by DMAR table"; acpi_unmap_table(dmartbl); } if (vm_guest == VM_GUEST_VMWARE) { vmware_hvcall(VMW_HVCMD_GETVCPU_INFO, p); if ((p[0] & VMW_VCPUINFO_VCPU_RESERVED) != 0 || (p[0] & VMW_VCPUINFO_LEGACY_X2APIC) == 0) reason = "inside VMWare without intr redirection"; } else if (vm_guest == VM_GUEST_XEN) { reason = "due to running under XEN"; } else if (vm_guest == VM_GUEST_NO && CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) == 0x2a) { hw_vendor = kern_getenv("smbios.planar.maker"); /* * It seems that some Lenovo and ASUS * SandyBridge-based notebook BIOSes have a * bug which prevents booting AP in x2APIC * mode. Since the only way to detect mobile * CPU is to check northbridge pci id, which * cannot be done that early, disable x2APIC * for all Lenovo and ASUS SandyBridge * machines. */ if (hw_vendor != NULL) { if (!strcmp(hw_vendor, "LENOVO") || !strcmp(hw_vendor, "ASUSTeK Computer Inc.")) { reason = "for a suspected SandyBridge BIOS bug"; } freeenv(hw_vendor); } } bios_x2apic = lapic_is_x2apic(); if (reason != NULL && bios_x2apic) { if (bootverbose) printf("x2APIC should be disabled %s but " "already enabled by BIOS; enabling.\n", reason); reason = NULL; } if (reason == NULL) x2apic_mode = 1; else if (bootverbose) printf("x2APIC available but disabled %s\n", reason); user_x2apic = x2apic_mode; TUNABLE_INT_FETCH("hw.x2apic_enable", &user_x2apic); if (user_x2apic != x2apic_mode) { if (bios_x2apic && !user_x2apic) printf("x2APIC disabled by tunable and " "enabled by BIOS; ignoring tunable."); else x2apic_mode = user_x2apic; } } lapic_init(madt->Address); printf("ACPI APIC Table: <%.*s %.*s>\n", (int)sizeof(madt->Header.OemId), madt->Header.OemId, (int)sizeof(madt->Header.OemTableId), madt->Header.OemTableId); /* * We ignore 64-bit local APIC override entries. Should we * perhaps emit a warning here if we find one? */ return (0); }
void printcpuinfo(void) { u_int regs[4], i; char *brand; cpu_class = amd64_cpus[cpu].cpu_class; printf("CPU: "); strncpy(cpu_model, amd64_cpus[cpu].cpu_name, sizeof (cpu_model)); /* Check for extended CPUID information and a processor name. */ if (cpu_exthigh >= 0x80000004) { brand = cpu_brand; for (i = 0x80000002; i < 0x80000005; i++) { do_cpuid(i, regs); memcpy(brand, regs, sizeof(regs)); brand += sizeof(regs); } } switch (cpu_vendor_id) { case CPU_VENDOR_INTEL: /* Please make up your mind folks! */ strcat(cpu_model, "EM64T"); break; case CPU_VENDOR_AMD: /* * Values taken from AMD Processor Recognition * http://www.amd.com/K6/k6docs/pdf/20734g.pdf * (also describes ``Features'' encodings. */ strcpy(cpu_model, "AMD "); if ((cpu_id & 0xf00) == 0xf00) strcat(cpu_model, "AMD64 Processor"); else strcat(cpu_model, "Unknown"); break; case CPU_VENDOR_CENTAUR: strcpy(cpu_model, "VIA "); if ((cpu_id & 0xff0) == 0x6f0) strcat(cpu_model, "Nano Processor"); else strcat(cpu_model, "Unknown"); break; default: strcat(cpu_model, "Unknown"); break; } /* * Replace cpu_model with cpu_brand minus leading spaces if * we have one. */ brand = cpu_brand; while (*brand == ' ') ++brand; if (*brand != '\0') strcpy(cpu_model, brand); printf("%s (", cpu_model); switch(cpu_class) { case CPUCLASS_K8: if (tsc_freq != 0) { hw_clockrate = (tsc_freq + 5000) / 1000000; printf("%jd.%02d-MHz ", (intmax_t)(tsc_freq + 4999) / 1000000, (u_int)((tsc_freq + 4999) / 10000) % 100); } printf("K8"); break; default: printf("Unknown"); /* will panic below... */ } printf("-class CPU)\n"); if (*cpu_vendor) printf(" Origin = \"%s\"", cpu_vendor); if (cpu_id) printf(" Id = 0x%x", cpu_id); if (cpu_vendor_id == CPU_VENDOR_INTEL || cpu_vendor_id == CPU_VENDOR_AMD || cpu_vendor_id == CPU_VENDOR_CENTAUR) { printf(" Family = 0x%x", CPUID_TO_FAMILY(cpu_id)); printf(" Model = 0x%x", CPUID_TO_MODEL(cpu_id)); printf(" Stepping = %u", cpu_id & CPUID_STEPPING); /* * AMD CPUID Specification * http://support.amd.com/us/Embedded_TechDocs/25481.pdf * * Intel Processor Identification and CPUID Instruction * http://www.intel.com/assets/pdf/appnote/241618.pdf */ if (cpu_high > 0) { /* * Here we should probably set up flags indicating * whether or not various features are available. * The interesting ones are probably VME, PSE, PAE, * and PGE. The code already assumes without bothering * to check that all CPUs >= Pentium have a TSC and * MSRs. */ printf("\n Features=0x%b", cpu_feature, "\020" "\001FPU" /* Integral FPU */ "\002VME" /* Extended VM86 mode support */ "\003DE" /* Debugging Extensions (CR4.DE) */ "\004PSE" /* 4MByte page tables */ "\005TSC" /* Timestamp counter */ "\006MSR" /* Machine specific registers */ "\007PAE" /* Physical address extension */ "\010MCE" /* Machine Check support */ "\011CX8" /* CMPEXCH8 instruction */ "\012APIC" /* SMP local APIC */ "\013oldMTRR" /* Previous implementation of MTRR */ "\014SEP" /* Fast System Call */ "\015MTRR" /* Memory Type Range Registers */ "\016PGE" /* PG_G (global bit) support */ "\017MCA" /* Machine Check Architecture */ "\020CMOV" /* CMOV instruction */ "\021PAT" /* Page attributes table */ "\022PSE36" /* 36 bit address space support */ "\023PN" /* Processor Serial number */ "\024CLFLUSH" /* Has the CLFLUSH instruction */ "\025<b20>" "\026DTS" /* Debug Trace Store */ "\027ACPI" /* ACPI support */ "\030MMX" /* MMX instructions */ "\031FXSR" /* FXSAVE/FXRSTOR */ "\032SSE" /* Streaming SIMD Extensions */ "\033SSE2" /* Streaming SIMD Extensions #2 */ "\034SS" /* Self snoop */ "\035HTT" /* Hyperthreading (see EBX bit 16-23) */ "\036TM" /* Thermal Monitor clock slowdown */ "\037IA64" /* CPU can execute IA64 instructions */ "\040PBE" /* Pending Break Enable */ ); if (cpu_feature2 != 0) { printf("\n Features2=0x%b", cpu_feature2, "\020" "\001SSE3" /* SSE3 */ "\002PCLMULQDQ" /* Carry-Less Mul Quadword */ "\003DTES64" /* 64-bit Debug Trace */ "\004MON" /* MONITOR/MWAIT Instructions */ "\005DS_CPL" /* CPL Qualified Debug Store */ "\006VMX" /* Virtual Machine Extensions */ "\007SMX" /* Safer Mode Extensions */ "\010EST" /* Enhanced SpeedStep */ "\011TM2" /* Thermal Monitor 2 */ "\012SSSE3" /* SSSE3 */ "\013CNXT-ID" /* L1 context ID available */ "\014<b11>" "\015FMA" /* Fused Multiply Add */ "\016CX16" /* CMPXCHG16B Instruction */ "\017xTPR" /* Send Task Priority Messages*/ "\020PDCM" /* Perf/Debug Capability MSR */ "\021<b16>" "\022PCID" /* Process-context Identifiers*/ "\023DCA" /* Direct Cache Access */ "\024SSE4.1" /* SSE 4.1 */ "\025SSE4.2" /* SSE 4.2 */ "\026x2APIC" /* xAPIC Extensions */ "\027MOVBE" /* MOVBE Instruction */ "\030POPCNT" /* POPCNT Instruction */ "\031TSCDLT" /* TSC-Deadline Timer */ "\032AESNI" /* AES Crypto */ "\033XSAVE" /* XSAVE/XRSTOR States */ "\034OSXSAVE" /* OS-Enabled State Management*/ "\035AVX" /* Advanced Vector Extensions */ "\036F16C" /* Half-precision conversions */ "\037RDRAND" /* RDRAND Instruction */ "\040HV" /* Hypervisor */ ); } if (amd_feature != 0) { printf("\n AMD Features=0x%b", amd_feature, "\020" /* in hex */ "\001<s0>" /* Same */ "\002<s1>" /* Same */ "\003<s2>" /* Same */ "\004<s3>" /* Same */ "\005<s4>" /* Same */ "\006<s5>" /* Same */ "\007<s6>" /* Same */ "\010<s7>" /* Same */ "\011<s8>" /* Same */ "\012<s9>" /* Same */ "\013<b10>" /* Undefined */ "\014SYSCALL" /* Have SYSCALL/SYSRET */ "\015<s12>" /* Same */ "\016<s13>" /* Same */ "\017<s14>" /* Same */ "\020<s15>" /* Same */ "\021<s16>" /* Same */ "\022<s17>" /* Same */ "\023<b18>" /* Reserved, unknown */ "\024MP" /* Multiprocessor Capable */ "\025NX" /* Has EFER.NXE, NX */ "\026<b21>" /* Undefined */ "\027MMX+" /* AMD MMX Extensions */ "\030<s23>" /* Same */ "\031<s24>" /* Same */ "\032FFXSR" /* Fast FXSAVE/FXRSTOR */ "\033Page1GB" /* 1-GB large page support */ "\034RDTSCP" /* RDTSCP */ "\035<b28>" /* Undefined */ "\036LM" /* 64 bit long mode */ "\0373DNow!+" /* AMD 3DNow! Extensions */ "\0403DNow!" /* AMD 3DNow! */ ); } if (amd_feature2 != 0) { printf("\n AMD Features2=0x%b", amd_feature2, "\020" "\001LAHF" /* LAHF/SAHF in long mode */ "\002CMP" /* CMP legacy */ "\003SVM" /* Secure Virtual Mode */ "\004ExtAPIC" /* Extended APIC register */ "\005CR8" /* CR8 in legacy mode */ "\006ABM" /* LZCNT instruction */ "\007SSE4A" /* SSE4A */ "\010MAS" /* Misaligned SSE mode */ "\011Prefetch" /* 3DNow! Prefetch/PrefetchW */ "\012OSVW" /* OS visible workaround */ "\013IBS" /* Instruction based sampling */ "\014XOP" /* XOP extended instructions */ "\015SKINIT" /* SKINIT/STGI */ "\016WDT" /* Watchdog timer */ "\017<b14>" "\020LWP" /* Lightweight Profiling */ "\021FMA4" /* 4-operand FMA instructions */ "\022<b17>" "\023<b18>" "\024NodeId" /* NodeId MSR support */ "\025<b20>" "\026TBM" /* Trailing Bit Manipulation */ "\027Topology" /* Topology Extensions */ "\030<b23>" "\031<b24>" "\032<b25>" "\033<b26>" "\034<b27>" "\035<b28>" "\036<b29>" "\037<b30>" "\040<b31>" ); } if (cpu_stdext_feature != 0) { printf("\n Standard Extended Features=0x%b", cpu_stdext_feature, "\020" /* RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */ "\001GSFSBASE" "\002TSCADJ" /* Bit Manipulation Instructions */ "\004BMI1" /* Hardware Lock Elision */ "\005HLE" /* Advanced Vector Instructions 2 */ "\006AVX2" /* Supervisor Mode Execution Prot. */ "\010SMEP" /* Bit Manipulation Instructions */ "\011BMI2" "\012ENHMOVSB" /* Invalidate Processor Context ID */ "\013INVPCID" /* Restricted Transactional Memory */ "\014RTM" /* Enhanced NRBG */ "\022RDSEED" /* ADCX + ADOX */ "\023ADX" /* Supervisor Mode Access Prevention */ "\024SMAP" ); } if (via_feature_rng != 0 || via_feature_xcrypt != 0) print_via_padlock_info(); if ((cpu_feature & CPUID_HTT) && cpu_vendor_id == CPU_VENDOR_AMD) cpu_feature &= ~CPUID_HTT; /* * If this CPU supports P-state invariant TSC then * mention the capability. */ if (tsc_is_invariant) { printf("\n TSC: P-state invariant"); if (tsc_perf_stat) printf(", performance statistics"); } } } /* Avoid ugly blank lines: only print newline when we have to. */ if (*cpu_vendor || cpu_id) printf("\n"); if (!bootverbose) return; if (cpu_vendor_id == CPU_VENDOR_AMD) print_AMD_info(); }
static int procfs_getonecpu(int xcpu, struct cpu_info *ci, char *bf, size_t *len) { size_t left, l, size; char featurebuf[1024], *p; p = featurebuf; left = sizeof(featurebuf); size = *len; procfs_getonecpufeatures(ci, p, &left); p = bf; left = *len; size = 0; l = snprintf(p, left, "processor\t: %d\n" "vendor_id\t: %s\n" "cpu family\t: %d\n" "model\t\t: %d\n" "model name\t: %s\n" "stepping\t: ", xcpu, (char *)ci->ci_vendor, CPUID_TO_FAMILY(ci->ci_signature), CPUID_TO_MODEL(ci->ci_signature), cpu_brand_string ); size += l; if (l < left) { left -= l; p += l; } else left = 0; if (cpuid_level >= 0) l = snprintf(p, left, "%d\n", CPUID_TO_STEPPING(ci->ci_signature)); else l = snprintf(p, left, "unknown\n"); size += l; if (l < left) { left -= l; p += l; } else left = 0; if (ci->ci_data.cpu_cc_freq != 0) { uint64_t freq, fraq; freq = (ci->ci_data.cpu_cc_freq + 4999) / 1000000; fraq = ((ci->ci_data.cpu_cc_freq + 4999) / 10000) % 100; l = snprintf(p, left, "cpu MHz\t\t: %" PRIu64 ".%02" PRIu64 "\n", freq, fraq); } else l = snprintf(p, left, "cpu MHz\t\t: unknown\n"); size += l; if (l < left) { left -= l; p += l; } else left = 0; l = snprintf(p, left, "fdiv_bug\t: %s\n" "fpu\t\t: %s\n" "fpu_exception\t: yes\n" "cpuid level\t: %d\n" "wp\t\t: %s\n" "flags\t\t: %s\n", i386_fpu_fdivbug ? "yes" : "no", /* an old pentium */ i386_fpu_present ? "yes" : "no", /* not a 486SX */ cpuid_level, (rcr0() & CR0_WP) ? "yes" : "no", featurebuf ); size += l; left = *len; *len = size; return left < *len ? -1 : 0; }
static void init_amd(void) { uint64_t msr; /* * Work around Erratum 721 for Family 10h and 12h processors. * These processors may incorrectly update the stack pointer * after a long series of push and/or near-call instructions, * or a long series of pop and/or near-return instructions. * * http://support.amd.com/us/Processor_TechDocs/41322_10h_Rev_Gd.pdf * http://support.amd.com/us/Processor_TechDocs/44739_12h_Rev_Gd.pdf * * Hypervisors do not provide access to the errata MSR, * causing #GP exception on attempt to apply the errata. The * MSR write shall be done on host and persist globally * anyway, so do not try to do it when under virtualization. */ switch (CPUID_TO_FAMILY(cpu_id)) { case 0x10: case 0x12: if ((cpu_feature2 & CPUID2_HV) == 0) wrmsr(0xc0011029, rdmsr(0xc0011029) | 1); break; } /* * BIOS may fail to set InitApicIdCpuIdLo to 1 as it should per BKDG. * So, do it here or otherwise some tools could be confused by * Initial Local APIC ID reported with CPUID Function 1 in EBX. */ if (CPUID_TO_FAMILY(cpu_id) == 0x10) { if ((cpu_feature2 & CPUID2_HV) == 0) { msr = rdmsr(MSR_NB_CFG1); msr |= (uint64_t)1 << 54; wrmsr(MSR_NB_CFG1, msr); } } /* * BIOS may configure Family 10h processors to convert WC+ cache type * to CD. That can hurt performance of guest VMs using nested paging. * The relevant MSR bit is not documented in the BKDG, * the fix is borrowed from Linux. */ if (CPUID_TO_FAMILY(cpu_id) == 0x10) { if ((cpu_feature2 & CPUID2_HV) == 0) { msr = rdmsr(0xc001102a); msr &= ~((uint64_t)1 << 24); wrmsr(0xc001102a, msr); } } /* * Work around Erratum 793: Specific Combination of Writes to Write * Combined Memory Types and Locked Instructions May Cause Core Hang. * See Revision Guide for AMD Family 16h Models 00h-0Fh Processors, * revision 3.04 or later, publication 51810. */ if (CPUID_TO_FAMILY(cpu_id) == 0x16 && CPUID_TO_MODEL(cpu_id) <= 0xf) { if ((cpu_feature2 & CPUID2_HV) == 0) { msr = rdmsr(0xc0011020); msr |= (uint64_t)1 << 15; wrmsr(0xc0011020, msr); } } /* * Work around a problem on Ryzen that is triggered by executing * code near the top of user memory, in our case the signal * trampoline code in the shared page on amd64. * * This function is executed once for the BSP before tunables take * effect so the value determined here can be overridden by the * tunable. This function is then executed again for each AP and * also on resume. Set a flag the first time so that value set by * the tunable is not overwritten. * * The stepping and/or microcode versions should be checked after * this issue is fixed by AMD so that we don't use this mode if not * needed. */ if (lower_sharedpage_init == 0) { lower_sharedpage_init = 1; if (CPUID_TO_FAMILY(cpu_id) == 0x17) { hw_lower_amd64_sharedpage = 1; } } }
/* * Initialize CPU control registers */ void initializecpu(int cpu) { uint64_t msr; /*Check for FXSR and SSE support and enable if available.*/ if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) { load_cr4(rcr4() | CR4_FXSR | CR4_XMM); cpu_fxsr = hw_instruction_sse = 1; } if (cpu == 0) { /* Check if we are running in a hypervisor. */ vmm_guest = detect_virtual(); if (vmm_guest == VMM_GUEST_NONE && (cpu_feature2 & CPUID2_VMM)) vmm_guest = VMM_GUEST_UNKNOWN; } #if !defined(CPU_DISABLE_AVX) /*Check for XSAVE and AVX support and enable if available.*/ if ((cpu_feature2 & CPUID2_AVX) && (cpu_feature2 & CPUID2_XSAVE) && (cpu_feature & CPUID_SSE)) { load_cr4(rcr4() | CR4_XSAVE); /* Adjust size of savefpu in npx.h before adding to mask.*/ xsetbv(0, CPU_XFEATURE_X87 | CPU_XFEATURE_SSE | CPU_XFEATURE_YMM, 0); cpu_xsave = 1; } #endif if (cpu_vendor_id == CPU_VENDOR_AMD) { switch((cpu_id & 0xFF0000)) { case 0x100000: case 0x120000: /* * Errata 721 is the cpu bug found by your's truly * (Matthew Dillon). It is a bug where a sequence * of 5 or more popq's + a retq, under involved * deep recursion circumstances, can cause the %rsp * to not be properly updated, almost always * resulting in a seg-fault soon after. * * Do not install the workaround when we are running * in a virtual machine. */ if (vmm_guest) break; msr = rdmsr(MSR_AMD_DE_CFG); if ((msr & 1) == 0) { if (cpu == 0) kprintf("Errata 721 workaround " "installed\n"); msr |= 1; wrmsr(MSR_AMD_DE_CFG, msr); } break; } } if ((amd_feature & AMDID_NX) != 0) { msr = rdmsr(MSR_EFER) | EFER_NXE; wrmsr(MSR_EFER, msr); #if 0 /* JG */ pg_nx = PG_NX; #endif } if (cpu_vendor_id == CPU_VENDOR_CENTAUR && CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) >= 0xf) init_via(); TUNABLE_INT_FETCH("hw.clflush_enable", &hw_clflush_enable); if (cpu_feature & CPUID_CLFSH) { cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8; if (hw_clflush_enable == 0 || ((hw_clflush_enable == -1) && vmm_guest)) cpu_feature &= ~CPUID_CLFSH; }