static void model_10_init(device_t dev) { printk(BIOS_DEBUG, "Model 10 Init - a no-op.\n"); u8 i; msr_t msr; #if CONFIG_LOGICAL_CPUS u32 siblings; #endif /* Turn on caching if we haven't already */ x86_enable_cache(); amd_setup_mtrrs(); x86_mtrr_check(); disable_cache(); /* zero the machine check error status registers */ msr.lo = 0; msr.hi = 0; for (i = 0; i < 6; i++) { wrmsr(MCI_STATUS + (i * 4), msr); } enable_cache(); /* Enable the local cpu apics */ setup_lapic(); /* Set the processor name string */ // init_processor_name(); #if CONFIG_LOGICAL_CPUS siblings = cpuid_ecx(0x80000008) & 0xff; if (siblings > 0) { msr = rdmsr_amd(CPU_ID_FEATURES_MSR); msr.lo |= 1 << 28; wrmsr_amd(CPU_ID_FEATURES_MSR, msr); msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR); msr.hi |= 1 << (33 - 32); wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr); } printk(BIOS_DEBUG, "siblings = %02d, ", siblings); #endif /* DisableCf8ExtCfg */ msr = rdmsr(NB_CFG_MSR); msr.hi &= ~(1 << (46 - 32)); wrmsr(NB_CFG_MSR, msr); /* Write protect SMM space with SMMLOCK. */ msr = rdmsr(HWCR_MSR); msr.lo |= (1 << 0); wrmsr(HWCR_MSR, msr); }
static void model_15_init(device_t dev) { printk(BIOS_DEBUG, "Model 15 Init.\n"); u8 i; msr_t msr; int msrno; unsigned int cpu_idx; #if IS_ENABLED(CONFIG_LOGICAL_CPUS) u32 siblings; #endif //x86_enable_cache(); //amd_setup_mtrrs(); //x86_mtrr_check(); disable_cache (); /* Enable access to AMD RdDram and WrDram extension bits */ msr = rdmsr(SYSCFG_MSR); msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn; wrmsr(SYSCFG_MSR, msr); // BSP: make a0000-bffff UC, c0000-fffff WB, same as OntarioApMtrrSettingsList for APs msr.lo = msr.hi = 0; wrmsr (0x259, msr); msr.lo = msr.hi = 0x1e1e1e1e; wrmsr(0x250, msr); wrmsr(0x258, msr); for (msrno = 0x268; msrno <= 0x26f; msrno++) wrmsr (msrno, msr); msr = rdmsr(SYSCFG_MSR); msr.lo &= ~SYSCFG_MSR_MtrrFixDramModEn; msr.lo |= SYSCFG_MSR_MtrrFixDramEn; wrmsr(SYSCFG_MSR, msr); if (acpi_is_wakeup()) restore_mtrr(); x86_mtrr_check(); x86_enable_cache(); /* zero the machine check error status registers */ msr.lo = 0; msr.hi = 0; for (i = 0; i < 6; i++) { wrmsr(MCI_STATUS + (i * 4), msr); } /* Enable the local cpu apics */ setup_lapic(); #if IS_ENABLED(CONFIG_LOGICAL_CPUS) siblings = cpuid_ecx(0x80000008) & 0xff; if (siblings > 0) { msr = rdmsr_amd(CPU_ID_FEATURES_MSR); msr.lo |= 1 << 28; wrmsr_amd(CPU_ID_FEATURES_MSR, msr); msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR); msr.hi |= 1 << (33 - 32); wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr); } printk(BIOS_DEBUG, "siblings = %02d, ", siblings); #endif /* DisableCf8ExtCfg */ msr = rdmsr(NB_CFG_MSR); msr.hi &= ~(1 << (46 - 32)); wrmsr(NB_CFG_MSR, msr); if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER)) { cpu_idx = cpu_info()->index; printk(BIOS_INFO, "Initializing SMM for CPU %u\n", cpu_idx); /* Set SMM base address for this CPU */ msr = rdmsr(MSR_SMM_BASE); msr.lo = SMM_BASE - (cpu_idx * 0x400); wrmsr(MSR_SMM_BASE, msr); /* Enable the SMM memory window */ msr = rdmsr(MSR_SMM_MASK); msr.lo |= (1 << 0); /* Enable ASEG SMRAM Range */ wrmsr(MSR_SMM_MASK, msr); } /* Write protect SMM space with SMMLOCK. */ msr = rdmsr(HWCR_MSR); msr.lo |= (1 << 0); wrmsr(HWCR_MSR, msr); }
static void model_15_init(device_t dev) { printk(BIOS_DEBUG, "Model 15 Init.\n"); u8 i; msr_t msr; int msrno; #if CONFIG_LOGICAL_CPUS u32 siblings; #endif disable_cache(); /* Enable access to AMD RdDram and WrDram extension bits */ msr = rdmsr(SYSCFG_MSR); msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn; wrmsr(SYSCFG_MSR, msr); // BSP: make a0000-bffff UC, c0000-fffff WB msr.lo = msr.hi = 0; wrmsr(0x259, msr); msr.lo = msr.hi = 0x1e1e1e1e; wrmsr(0x250, msr); wrmsr(0x258, msr); for (msrno = 0x268; msrno <= 0x26f; msrno++) wrmsr(msrno, msr); msr = rdmsr(SYSCFG_MSR); msr.lo &= ~SYSCFG_MSR_MtrrFixDramModEn; msr.lo |= SYSCFG_MSR_MtrrFixDramEn; wrmsr(SYSCFG_MSR, msr); if (acpi_is_wakeup()) restore_mtrr(); x86_mtrr_check(); x86_enable_cache(); /* zero the machine check error status registers */ msr.lo = 0; msr.hi = 0; for (i = 0; i < 6; i++) wrmsr(MCI_STATUS + (i * 4), msr); /* Enable the local CPU APICs */ setup_lapic(); #if CONFIG_LOGICAL_CPUS siblings = cpuid_ecx(0x80000008) & 0xff; if (siblings > 0) { msr = rdmsr_amd(CPU_ID_FEATURES_MSR); msr.lo |= 1 << 28; wrmsr_amd(CPU_ID_FEATURES_MSR, msr); msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR); msr.hi |= 1 << (33 - 32); wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr); } printk(BIOS_DEBUG, "siblings = %02d, ", siblings); #endif PSPProgBar3Msr(NULL); /* DisableCf8ExtCfg */ msr = rdmsr(NB_CFG_MSR); msr.hi &= ~(1 << (46 - 32)); wrmsr(NB_CFG_MSR, msr); /* Write protect SMM space with SMMLOCK. */ msr = rdmsr(HWCR_MSR); msr.lo |= (1 << 0); wrmsr(HWCR_MSR, msr); }
static void model_12_init(device_t dev) { printk(BIOS_DEBUG, "Model 12 Init.\n"); u8 i; msr_t msr; #if IS_ENABLED(CONFIG_LOGICAL_CPUS) u32 siblings; #endif // struct node_core_id id; // id = get_node_core_id(read_nb_cfg_54()); /* nb_cfg_54 can not be set */ // printk(BIOS_DEBUG, "nodeid = %02d, coreid = %02d\n", id.nodeid, id.coreid); /* Turn on caching if we haven't already */ x86_enable_cache(); amd_setup_mtrrs(); x86_mtrr_check(); disable_cache(); /* zero the machine check error status registers */ msr.lo = 0; msr.hi = 0; for (i = 0; i < 5; i++) { wrmsr(MCI_STATUS + (i * 4), msr); } enable_cache(); /* Enable the local CPU apics */ setup_lapic(); /* Set the processor name string */ // init_processor_name(); #if IS_ENABLED(CONFIG_LOGICAL_CPUS) siblings = cpuid_ecx(0x80000008) & 0xff; if (siblings > 0) { msr = rdmsr_amd(CPU_ID_FEATURES_MSR); msr.lo |= 1 << 28; wrmsr_amd(CPU_ID_FEATURES_MSR, msr); msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR); msr.hi |= 1 << (33 - 32); wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr); } printk(BIOS_DEBUG, "siblings = %02d, ", siblings); #endif /* DisableCf8ExtCfg */ msr = rdmsr(NB_CFG_MSR); msr.hi &= ~(1 << (46 - 32)); wrmsr(NB_CFG_MSR, msr); /* Write protect SMM space with SMMLOCK. */ msr = rdmsr(HWCR_MSR); msr.lo |= (1 << 0); wrmsr(HWCR_MSR, msr); }
int init_processor_name(void) { /* variable names taken from fam10 revision guide for clarity */ u32 BrandId; /* CPUID Fn8000_0001_EBX */ u8 String1; /* BrandID[14:11] */ u8 String2; /* BrandID[3:0] */ u8 Model; /* BrandID[10:4] */ u8 Pg; /* BrandID[15] */ u8 PkgTyp; /* BrandID[31:28] */ u8 NC; /* CPUID Fn8000_0008_ECX */ const char *processor_name_string = unknown; char program_string[48]; u32 *p_program_string = (u32 *)program_string; msr_t msr; int i, j = 0, str2_checkNC = 1; const struct str_s *str, *str2; /* Find out which CPU brand it is */ BrandId = cpuid_ebx(0x80000001); String1 = (u8)((BrandId >> 11) & 0x0F); String2 = (u8)((BrandId >> 0) & 0x0F); Model = (u8)((BrandId >> 4) & 0x7F); Pg = (u8)((BrandId >> 15) & 0x01); PkgTyp = (u8)((BrandId >> 28) & 0x0F); NC = (u8)(cpuid_ecx(0x80000008) & 0xFF); /* null the string */ memset(program_string, 0, sizeof(program_string)); if (!Model) { processor_name_string = Pg ? thermal : sample; goto done; } switch (PkgTyp) { case 0: /* F1207 */ str = String1_socket_F; str2 = String2_socket_F; str2_checkNC = 0; break; case 1: /* AM2 */ str = String1_socket_AM2; str2 = String2_socket_AM2; break; case 5: /* C32 */ str = String1_socket_C32; str2 = String2_socket_C32; break; default: goto done; } /* String1 */ for (i = 0; str[i].value; i++) { if ((str[i].Pg == Pg) && (str[i].NC == NC) && (str[i].String == String1)) { processor_name_string = str[i].value; break; } } if (!str[i].value) goto done; j = strcpymax(program_string, processor_name_string, sizeof(program_string)); /* Translate Model from 01-99 to ASCII and put it on the end. * Numbers less than 10 should include a leading zero, e.g., 09.*/ if (Model < 100 && j < sizeof(program_string) - 2) { program_string[j++] = (Model / 10) + '0'; program_string[j++] = (Model % 10) + '0'; } processor_name_string = unknown2; /* String 2 */ for(i = 0; str2[i].value; i++) { if ((str2[i].Pg == Pg) && ((str2[i].NC == NC) || !str2_checkNC) && (str2[i].String == String2)) { processor_name_string = str2[i].value; break; } } done: strcpymax(&program_string[j], processor_name_string, sizeof(program_string) - j); printk(BIOS_DEBUG, "CPU model: %s\n", program_string); for (i = 0; i < 6; i++) { msr.lo = p_program_string[(2 * i) + 0]; msr.hi = p_program_string[(2 * i) + 1]; wrmsr_amd(0xC0010030 + i, msr); } return 0; }
static void model_16_init(struct device *dev) { printk(BIOS_DEBUG, "Model 16 Init.\n"); u8 i; msr_t msr; int num_banks; int msrno; #if IS_ENABLED(CONFIG_LOGICAL_CPUS) u32 siblings; #endif //x86_enable_cache(); //amd_setup_mtrrs(); //x86_mtrr_check(); disable_cache(); /* Enable access to AMD RdDram and WrDram extension bits */ msr = rdmsr(SYSCFG_MSR); msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn; wrmsr(SYSCFG_MSR, msr); // BSP: make a0000-bffff UC, c0000-fffff WB, same as OntarioApMtrrSettingsList for APs msr.lo = msr.hi = 0; wrmsr(MTRR_FIX_16K_A0000, msr); msr.lo = msr.hi = 0x1e1e1e1e; wrmsr(MTRR_FIX_64K_00000, msr); wrmsr(MTRR_FIX_16K_80000, msr); for (msrno = MTRR_FIX_4K_C0000; msrno <= MTRR_FIX_4K_F8000; msrno++) wrmsr(msrno, msr); msr = rdmsr(SYSCFG_MSR); msr.lo &= ~SYSCFG_MSR_MtrrFixDramModEn; msr.lo |= SYSCFG_MSR_MtrrFixDramEn; wrmsr(SYSCFG_MSR, msr); if (acpi_is_wakeup()) restore_mtrr(); x86_mtrr_check(); x86_enable_cache(); /* zero the machine check error status registers */ msr = rdmsr(IA32_MCG_CAP); num_banks = msr.lo & MCA_BANKS_MASK; msr.lo = 0; msr.hi = 0; for (i = 0; i < num_banks; i++) wrmsr(IA32_MC0_STATUS + (i * 4), msr); /* Enable the local CPU APICs */ setup_lapic(); #if IS_ENABLED(CONFIG_LOGICAL_CPUS) siblings = cpuid_ecx(0x80000008) & 0xff; if (siblings > 0) { msr = rdmsr_amd(CPU_ID_FEATURES_MSR); msr.lo |= 1 << 28; wrmsr_amd(CPU_ID_FEATURES_MSR, msr); msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR); msr.hi |= 1 << (33 - 32); wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr); } printk(BIOS_DEBUG, "siblings = %02d, ", siblings); #endif /* DisableCf8ExtCfg */ msr = rdmsr(NB_CFG_MSR); msr.hi &= ~(1 << (46 - 32)); wrmsr(NB_CFG_MSR, msr); /* Write protect SMM space with SMMLOCK. */ msr = rdmsr(HWCR_MSR); msr.lo |= (1 << 0); wrmsr(HWCR_MSR, msr); }
static void model_14_init(device_t dev) { u32 i; msr_t msr; #if IS_ENABLED(CONFIG_LOGICAL_CPUS) u32 siblings; #endif printk(BIOS_DEBUG, "Model 14 Init.\n"); disable_cache (); /* * AGESA sets the MTRRs main MTRRs. The shadow area needs to be set * by coreboot. The amd_setup_mtrrs should work, but needs debug on fam14. * TODO: * amd_setup_mtrrs(); */ /* Enable access to AMD RdDram and WrDram extension bits */ msr = rdmsr(SYSCFG_MSR); msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn; wrmsr(SYSCFG_MSR, msr); /* Set shadow WB, RdMEM, WrMEM */ msr.lo = msr.hi = 0; wrmsr (0x259, msr); msr.hi = msr.lo = 0x1e1e1e1e; wrmsr(0x250, msr); wrmsr(0x258, msr); for (i = 0x268; i <= 0x26f; i++) wrmsr(i, msr); msr = rdmsr(SYSCFG_MSR); msr.lo &= ~SYSCFG_MSR_MtrrFixDramModEn; msr.lo |= SYSCFG_MSR_MtrrFixDramEn; wrmsr(SYSCFG_MSR, msr); if (acpi_is_wakeup()) restore_mtrr(); x86_mtrr_check(); x86_enable_cache(); /* zero the machine check error status registers */ msr.lo = 0; msr.hi = 0; for (i = 0; i < 6; i++) { wrmsr(MCI_STATUS + (i * 4), msr); } /* Enable the local cpu apics */ setup_lapic(); #if IS_ENABLED(CONFIG_LOGICAL_CPUS) siblings = cpuid_ecx(0x80000008) & 0xff; if (siblings > 0) { msr = rdmsr_amd(CPU_ID_FEATURES_MSR); msr.lo |= 1 << 28; wrmsr_amd(CPU_ID_FEATURES_MSR, msr); msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR); msr.hi |= 1 << (33 - 32); wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr); } #endif /* DisableCf8ExtCfg */ msr = rdmsr(NB_CFG_MSR); msr.hi &= ~(1 << (46 - 32)); wrmsr(NB_CFG_MSR, msr); /* Write protect SMM space with SMMLOCK. */ msr = rdmsr(HWCR_MSR); msr.lo |= (1 << 0); wrmsr(HWCR_MSR, msr); printk(BIOS_SPEW, "%s done.\n", __func__); }
static void model_10xxx_init(device_t dev) { u8 i; msr_t msr; struct node_core_id id; #if CONFIG_LOGICAL_CPUS u32 siblings; #endif id = get_node_core_id(read_nb_cfg_54()); /* nb_cfg_54 can not be set */ printk(BIOS_DEBUG, "nodeid = %02d, coreid = %02d\n", id.nodeid, id.coreid); /* Turn on caching if we haven't already */ x86_enable_cache(); amd_setup_mtrrs(); x86_mtrr_check(); disable_cache(); /* zero the machine check error status registers */ msr.lo = 0; msr.hi = 0; for (i = 0; i < 5; i++) { wrmsr(MCI_STATUS + (i * 4), msr); } enable_cache(); /* Enable the local cpu apics */ setup_lapic(); /* Set the processor name string */ init_processor_name(); #if CONFIG_LOGICAL_CPUS siblings = cpuid_ecx(0x80000008) & 0xff; if (siblings > 0) { msr = rdmsr_amd(CPU_ID_FEATURES_MSR); msr.lo |= 1 << 28; wrmsr_amd(CPU_ID_FEATURES_MSR, msr); msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR); msr.hi |= 1 << (33 - 32); wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr); } printk(BIOS_DEBUG, "siblings = %02d, ", siblings); #endif /* DisableCf8ExtCfg */ msr = rdmsr(NB_CFG_MSR); msr.hi &= ~(1 << (46 - 32)); wrmsr(NB_CFG_MSR, msr); msr = rdmsr(BU_CFG2_MSR); /* Clear ClLinesToNbDis */ msr.lo &= ~(1 << 15); /* Clear bit 35 as per Erratum 343 */ msr.hi &= ~(1 << (35-32)); wrmsr(BU_CFG2_MSR, msr); if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER)) { printk(BIOS_DEBUG, "Initializing SMM ASeg memory\n"); /* Set SMM base address for this CPU */ msr = rdmsr(SMM_BASE_MSR); msr.lo = SMM_BASE - (lapicid() * 0x400); wrmsr(SMM_BASE_MSR, msr); /* Enable the SMM memory window */ msr = rdmsr(SMM_MASK_MSR); msr.lo |= (1 << 0); /* Enable ASEG SMRAM Range */ wrmsr(SMM_MASK_MSR, msr); } else { printk(BIOS_DEBUG, "Disabling SMM ASeg memory\n"); /* Set SMM base address for this CPU */ msr = rdmsr(SMM_BASE_MSR); msr.lo = SMM_BASE - (lapicid() * 0x400); wrmsr(SMM_BASE_MSR, msr); /* Disable the SMM memory window */ msr.hi = 0x0; msr.lo = 0x0; wrmsr(SMM_MASK_MSR, msr); } /* Set SMMLOCK to avoid exploits messing with SMM */ msr = rdmsr(HWCR_MSR); msr.lo |= (1 << 0); wrmsr(HWCR_MSR, msr); }