void mct_BeforeDQSTrain_Samp_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat) { /* Bug#15115: Uncertainty In The Sync Chain Leads To Setup Violations * In TX FIFO * Solution: BIOS should program DRAM Control Register[RdPtrInit] = 5h, * (F2x[1, 0]78[3:0] = 5h). * Silicon Status: Fixed In Rev B0 */ /* Bug#15880: Determine validity of reset settings for DDR PHY timing * regi.. * Solution: At least, set WrDqs fine delay to be 0 for DDR2 training. */ u32 dev; u32 reg_off; u32 index_reg; u32 index; u32 reg; u32 val; uint64_t tmp; u32 Channel; tmp = pDCTstat->LogicalCPUID; if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) { dev = pDCTstat->dev_dct; index = 0; for (Channel = 0; Channel<2; Channel++) { index_reg = 0x98 + 0x100 * Channel; val = Get_NB32_index_wait(dev, index_reg, 0x0d004007); val |= 0x3ff; Set_NB32_index_wait(dev, index_reg, 0x0d0f4f07, val); } for (Channel = 0; Channel<2; Channel++) { if (pDCTstat->GangedMode && Channel) break; reg_off = 0x100 * Channel; reg = 0x78 + reg_off; val = Get_NB32(dev, reg); val &= ~(0x07); val |= 5; Set_NB32(dev, reg, val); } for (Channel = 0; Channel<2; Channel++) { reg_off = 0x100 * Channel; val = 0; index_reg = 0x98 + reg_off; for ( index = 0x30; index < (0x45 + 1); index++) { Set_NB32_index_wait(dev, index_reg, index, val); } } } }
void mct_ExtMCTConfig_Dx(struct DCTStatStruc *pDCTstat) { uint32_t dword; if (pDCTstat->LogicalCPUID & AMD_DR_Dx) { dword = 0x0ce00f00 | 0x1 << 29; /* FlushWrOnStpGnt */ if (!(pDCTstat->GangedMode)) dword |= 0x18 << 2; /* MctWrLimit = 0x18 for unganged mode */ else dword |= 0x10 << 2; /* MctWrLimit = 0x10 for ganged mode */ Set_NB32(pDCTstat->dev_dct, 0x11c, dword); dword = Get_NB32(pDCTstat->dev_dct, 0x1b0); dword &= ~0x3; /* AdapPrefMissRatio = 0x1 */ dword |= 0x1; dword &= ~(0x3 << 2); /* AdapPrefPositiveStep = 0x0 */ dword &= ~(0x3 << 4); /* AdapPrefNegativeStep = 0x0 */ dword &= ~(0x7 << 8); /* CohPrefPrbLmt = 0x1 */ dword |= (0x1 << 8); dword |= (0x7 << 22); /* PrefFourConf = 0x7 */ dword |= (0x7 << 25); /* PrefFiveConf = 0x7 */ if (!(pDCTstat->GangedMode)) dword |= (0x1 << 12); /* EnSplitDctLimits = 0x1 */ else dword &= ~(0x1 << 12); /* EnSplitDctLimits = 0x0 */ dword &= ~(0xf << 28); /* DcqBwThrotWm = ... */ switch (pDCTstat->Speed) { case 4: dword |= (0x5 << 28); /* ...5 for DDR800 */ break; case 5: dword |= (0x6 << 28); /* ...6 for DDR1066 */ break; case 6: dword |= (0x8 << 28); /* ...8 for DDR800 */ break; default: dword |= (0x9 << 28); /* ...9 for DDR1600 */ break; } Set_NB32(pDCTstat->dev_dct, 0x1b0, dword); } }
static inline void fam15h_switch_nb_pstate_config_reg(uint32_t dev, uint8_t nb_pstate) { uint32_t dword; dword = Get_NB32(dev, 0x10c); dword &= ~(0x3 << 4); dword |= (nb_pstate & 0x3) << 4; Set_NB32(dev, 0x10c, dword); }
static inline void fam15h_switch_dct(uint32_t dev, uint8_t dct) { uint32_t dword; dword = Get_NB32(dev, 0x10c); dword &= ~0x1; dword |= (dct & 0x1); Set_NB32(dev, 0x10c, dword); }
static void set_fam10_ext_cfg_enable_bits(pci_devfn_t fam10_dev, u32 reg_pos, u32 mask, u32 val) { u32 reg_old, reg; reg = reg_old = Get_NB32(fam10_dev, reg_pos); reg &= ~mask; reg |= val; if (reg != reg_old) { Set_NB32(fam10_dev, reg_pos, reg); } }
static void fam10_optimization(void) { pci_devfn_t cpu_f0, cpu_f2, cpu_f3; u32 val; printk(BIOS_INFO, "fam10_optimization()\n"); cpu_f0 = PCI_DEV(0, 0x18, 0); cpu_f2 = PCI_DEV(0, 0x18, 2); cpu_f3 = PCI_DEV(0, 0x18, 3); /* 8.6.4.1 */ /* Table 8-13 */ pci_write_config32(cpu_f0, 0x90, 0x808502D0); /* Table 8-14 */ pci_write_config32(cpu_f0, 0x94, 0x00000000); /* Table 8-15 */ val = pci_read_config32(cpu_f0, 0x68); val |= 1 << 24; pci_write_config32(cpu_f0, 0x68, val); /* Table 8-16 */ val = pci_read_config32(cpu_f0, 0x84); val &= ~(1 << 12); pci_write_config32(cpu_f0, 0x84, val); /* Table 8-17 */ val = pci_read_config32(cpu_f2, 0x90); val &= ~(1 << 10); pci_write_config32(cpu_f2, 0x90, val); /* Table 8-18 */ pci_write_config32(cpu_f3, 0x6C, 0x60018051); /* Table 8-19 */ pci_write_config32(cpu_f3, 0x70, 0x60321151); /* Table 8-20 */ pci_write_config32(cpu_f3, 0x74, 0x00980101); /* Table 8-21 */ pci_write_config32(cpu_f3, 0x78, 0x00200C14); /* Table 8-22 */ pci_write_config32(cpu_f3, 0x7C, 0x00070811); /* TODO: Check if L3 Cache is enabled. */ /* Table 8-23 */ Set_NB32(cpu_f3, 0x140, 0x00D33656); /* Table 8-24 */ Set_NB32(cpu_f3, 0x144, 0x00000036); /* Table 8-25 */ Set_NB32(cpu_f3, 0x148, 0x8000832A); /* Table 8-26 */ Set_NB32(cpu_f3, 0x158, 0); /* L3 Disabled: L3 Enabled: */ /* cores: 2 3 4 2 3 4 */ /* bit8:4 28 26 24 24 20 16 */ if (!l3_cache()) { Set_NB32(cpu_f3, 0x1A0, 4 << 12 | (24 + 2*(4-cpu_core_number())) << 4 | 2); } else { Set_NB32(cpu_f3, 0x1A0, 4 << 12 | (16 + 4*(4-cpu_core_number())) << 4 | 4); } }
void mct_DramInit_Hw_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 dct) { u32 val; u32 reg; u32 dev = pDCTstat->dev_dct; /*flag for selecting HW/SW DRAM Init HW DRAM Init */ reg = 0x90 + 0x100 * dct; /*DRAM Configuration Low */ val = Get_NB32(dev, reg); val |= (1<<InitDram); Set_NB32(dev, reg, val); }
static void vErratum414(struct DCTStatStruc *pDCTstat) { int dct=0; for(; dct < 2 ; dct++) { int dRAMConfigHi = Get_NB32(pDCTstat->dev_dct,0x94 + (0x100 * dct)); int powerDown = dRAMConfigHi & (1 << PowerDownEn ); int ddr3 = dRAMConfigHi & (1 << Ddr3Mode ); int dRAMMRS = Get_NB32(pDCTstat->dev_dct,0x84 + (0x100 * dct)); int pchgPDModeSel = dRAMMRS & (1 << PchgPDModeSel); if (powerDown && ddr3 && pchgPDModeSel ) { Set_NB32(pDCTstat->dev_dct,0x84 + (0x100 * dct), dRAMMRS & ~(1 << PchgPDModeSel) ); } } }
void mct_EndDQSTraining_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA) { /* Bug#13341: Prefetch is getting killed when the limit is reached in * PrefDramTrainMode * Solution: Explicitly clear the PrefDramTrainMode bit after training * sequence in order to ensure resumption of normal HW prefetch * behavior. * NOTE -- this has been documented with a note at the end of this * section in the BKDG (although, admittedly, the note does not really * stand out). * Silicon Status: Fixed in Rev B ( confirm) * FIXME: check this. */ uint64_t tmp; u32 dev; u32 reg; u32 val; u32 Node; for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) { struct DCTStatStruc *pDCTstat; pDCTstat = pDCTstatA + Node; if (!pDCTstat->NodePresent) break; tmp = pDCTstat->LogicalCPUID; if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) { dev = pDCTstat->dev_dct; reg = 0x11c; val = Get_NB32(dev, reg); val &= ~(1<<PrefDramTrainMode); Set_NB32(dev, reg, val); } } }
void mct_BeforeDQSTrainSamp(struct DCTStatStruc *pDCTstat) { u32 val; if (pDCTstat->LogicalCPUID & AMD_DR_Bx) { Set_NB32(pDCTstat->dev_dct, 0x98, 0x0D004007); val = Get_NB32(pDCTstat->dev_dct, 0x9C); val |= 0x3FF; Set_NB32(pDCTstat->dev_dct, 0x9C, val); Set_NB32(pDCTstat->dev_dct, 0x98, 0x4D0F4F07); Set_NB32(pDCTstat->dev_dct, 0x198, 0x0D004007); val = Get_NB32(pDCTstat->dev_dct, 0x19C); val |= 0x3FF; Set_NB32(pDCTstat->dev_dct, 0x19C, val); Set_NB32(pDCTstat->dev_dct, 0x198, 0x4D0F4F07); } }
void mct_ExtMCTConfig_Bx(struct DCTStatStruc *pDCTstat) { if (pDCTstat->LogicalCPUID & (AMD_DR_Bx)) { Set_NB32(pDCTstat->dev_dct, 0x11C, 0x0FE40FC0 | 1 << 29/* FlushWrOnStpGnt */); } }