/** * Apply any hardware workarounds that are required upon detach or suspend * of the bridge device. */ static int bhndb_pci_wars_hwdown(struct bhndb_pci_softc *sc) { int error; /* Reduce L1 timer for better power savings. * TODO: We could enable/disable this on demand for better power * savings if we tie this to HT clock request handling */ if (BHNDB_PCIE_QUIRK(sc, L1_TIMER_PERF)) { uint32_t pmt; pmt = bhndb_pcie_read_proto_reg(sc, BHND_PCIE_DLLP_PMTHRESHREG); pmt &= ~BHND_PCIE_ASPMTIMER_EXTEND; bhndb_pcie_write_proto_reg(sc, BHND_PCIE_DLLP_PMTHRESHREG, pmt); } /* Disable clocks */ if (BHNDB_PCI_QUIRK(sc, EXT_CLOCK_GATING)) { if ((error = bhndb_disable_pci_clocks(sc))) { device_printf(sc->dev, "failed to disable clocks\n"); return (error); } } return (0); }
/** * Initialize the full bridge configuration. * * This is called during the DEVICE_ATTACH() process by the bridged bhndb(4) * bus, prior to probe/attachment of child cores. * * At this point, we can introspect the enumerated cores, find our host * bridge device, and apply any bridge-level hardware workarounds required * for proper operation of the bridged device cores. */ static int bhndb_pci_init_full_config(device_t dev, device_t child, const struct bhndb_hw_priority *prio_table) { struct bhnd_core_info core; const struct bhndb_pci_id *id; struct bhndb_pci_softc *sc; struct bhndb_region *pcir; bhnd_addr_t pcir_addr; bhnd_size_t pcir_size; int error; sc = device_get_softc(dev); /* Let bhndb perform full discovery and initialization of the * available register windows and bridge resources. */ if ((error = bhndb_generic_init_full_config(dev, child, prio_table))) return (error); /* * Identify our PCI bridge core, its register family, and any * applicable hardware quirks. */ KASSERT(sc->bhndb.hostb_dev, ("missing hostb device\n")); core = bhnd_get_core_info(sc->bhndb.hostb_dev); id = bhndb_pci_find_core_id(&core); if (id == NULL) { device_printf(dev, "%s %s hostb core is not recognized\n", bhnd_vendor_name(core.vendor), bhnd_core_name(&core)); } sc->regfmt = id->regfmt; /* Now that we've identified the PCI bridge core, we can determine the * full set of device quirks */ sc->quirks = bhndb_pci_discover_quirks(sc, id); /* * Determine and save a reference to the bhndb resource and offset * at which the bridge core's device registers are mapped. * * All known bhnd(4) hardware provides a fixed static mapping of * the PCI core's registers. If this changes in the future -- which * is unlikely -- this driver will need to be adjusted to use * dynamic register windows. */ /* Find base address and size of the PCI core's register block. */ error = bhnd_get_region_addr(sc->bhndb.hostb_dev, BHND_PORT_DEVICE, 0, 0, &pcir_addr, &pcir_size); if (error) { device_printf(dev, "failed to locate PCI core registers\n"); return (error); } /* Find the bhndb_region that statically maps this block */ pcir = bhndb_find_resource_region(sc->bhndb.bus_res, pcir_addr, pcir_size); if (pcir == NULL || pcir->static_regwin == NULL) { device_printf(dev, "missing static PCI core register window\n"); return (ENXIO); } /* Save borrowed reference to the mapped PCI core registers */ sc->mem_off = pcir->static_regwin->win_offset; sc->mem_res = bhndb_find_regwin_resource(sc->bhndb.bus_res, pcir->static_regwin); if (sc->mem_res == NULL || !(rman_get_flags(sc->mem_res) & RF_ACTIVE)) { device_printf(dev, "no active resource maps the PCI core register window\n"); return (ENXIO); } /* Configure a direct bhnd_resource wrapper that we can pass to * bhnd_resource APIs */ sc->bhnd_mem_res = (struct bhnd_resource) { .res = sc->mem_res, .direct = true }; /* * Attach MMIO device (if this is a PCIe device), which is used for * access to the PCIe SerDes required by the quirk workarounds. */ if (sc->pci_devclass == BHND_DEVCLASS_PCIE) { sc->mdio = device_add_child(dev, devclass_get_name(bhnd_mdio_pci_devclass), 0); if (sc->mdio == NULL) return (ENXIO); if ((error = device_probe_and_attach(sc->mdio))) { device_printf(dev, "failed to attach MDIO device\n"); return (error); } } /* Apply any early one-time quirk workarounds */ if ((error = bhndb_pci_wars_early_once(sc))) return (error); /* Apply attach-time quirk workarounds, required before the bridged * bhnd(4) bus itself performs a full attach(). */ if ((error = bhndb_pci_wars_hwup(sc))) return (error); return (0); } /** * Apply any hardware workarounds that must be executed prior to attempting * register access on the bridged chipset. * * This must be called very early in attach() or resume(), after the basic * set of applicable device quirks has been determined. */ static int bhndb_pci_wars_register_access(struct bhndb_pci_softc *sc) { int error; if (BHNDB_PCI_QUIRK(sc, EXT_CLOCK_GATING)) { if ((error = bhndb_enable_pci_clocks(sc))) { device_printf(sc->dev, "failed to enable clocks\n"); return (error); } } return (0); }
/** * Apply any hardware workarounds that are required upon attach or resume * of the bridge device. */ static int bhndb_pci_wars_hwup(struct bhndb_pci_softc *sc) { /* Note that the order here matters; these work-arounds * should not be re-ordered without careful review of their * interdependencies */ /* Fix up any PoR defaults on SROMless devices */ bhndb_init_sromless_pci_config(sc); /* Enable PCI prefetch/burst/readmulti flags */ if (BHNDB_PCI_QUIRK(sc, SBTOPCI2_PREF_BURST) || BHNDB_PCI_QUIRK(sc, SBTOPCI2_READMULTI)) { uint32_t sbp2; sbp2 = BHNDB_PCI_READ_4(sc, BHND_PCI_SBTOPCI2); if (BHNDB_PCI_QUIRK(sc, SBTOPCI2_PREF_BURST)) sbp2 |= (BHND_PCI_SBTOPCI_PREF|BHND_PCI_SBTOPCI_BURST); if (BHNDB_PCI_QUIRK(sc, SBTOPCI2_READMULTI)) sbp2 |= BHND_PCI_SBTOPCI_RC_READMULTI; BHNDB_PCI_WRITE_4(sc, BHND_PCI_SBTOPCI2, sbp2); } /* Disable PCI CLKRUN# */ if (BHNDB_PCI_QUIRK(sc, CLKRUN_DSBL)) { uint32_t ctl; ctl = BHNDB_PCI_READ_4(sc, BHND_PCI_CLKRUN_CTL); ctl |= BHND_PCI_CLKRUN_DSBL; BHNDB_PCI_WRITE_4(sc, BHND_PCI_CLKRUN_CTL, ctl); } /* Enable TLP unmatched address handling work-around */ if (BHNDB_PCIE_QUIRK(sc, UR_STATUS_FIX)) { uint32_t wrs; wrs = bhndb_pcie_read_proto_reg(sc, BHND_PCIE_TLP_WORKAROUNDSREG); wrs |= BHND_PCIE_TLP_WORKAROUND_URBIT; bhndb_pcie_write_proto_reg(sc, BHND_PCIE_TLP_WORKAROUNDSREG, wrs); } /* Adjust SerDes CDR tuning to ensure that CDR is stable before sending * data during L0s to L0 exit transitions. */ if (BHNDB_PCIE_QUIRK(sc, SDR9_L0s_HANG)) { uint16_t sdv; /* Set RX track/acquire timers to 2.064us/40.96us */ sdv = BPCI_REG_INSERT(0, PCIE_SDR9_RX_TIMER1_LKTRK, (2064/16)); sdv = BPCI_REG_INSERT(sdv, PCIE_SDR9_RX_TIMER1_LKACQ, (40960/1024)); MDIO_WRITEREG(sc->mdio, BHND_PCIE_PHY_SDR9_TXRX, BHND_PCIE_SDR9_RX_TIMER1, sdv); /* Apply CDR frequency workaround */ sdv = BHND_PCIE_SDR9_RX_CDR_FREQ_OVR_EN; sdv = BPCI_REG_INSERT(sdv, PCIE_SDR9_RX_CDR_FREQ_OVR, 0x0); MDIO_WRITEREG(sc->mdio, BHND_PCIE_PHY_SDR9_TXRX, BHND_PCIE_SDR9_RX_CDR, sdv); /* Apply CDR BW tunings */ sdv = 0; sdv = BPCI_REG_INSERT(sdv, PCIE_SDR9_RX_CDRBW_INTGTRK, 0x2); sdv = BPCI_REG_INSERT(sdv, PCIE_SDR9_RX_CDRBW_INTGACQ, 0x4); sdv = BPCI_REG_INSERT(sdv, PCIE_SDR9_RX_CDRBW_PROPTRK, 0x6); sdv = BPCI_REG_INSERT(sdv, PCIE_SDR9_RX_CDRBW_PROPACQ, 0x6); MDIO_WRITEREG(sc->mdio, BHND_PCIE_PHY_SDR9_TXRX, BHND_PCIE_SDR9_RX_CDRBW, sdv); } /* Force correct SerDes polarity */ if (BHNDB_PCIE_QUIRK(sc, SDR9_POLARITY)) { uint16_t rxctl; rxctl = MDIO_READREG(sc->mdio, BHND_PCIE_PHY_SDR9_TXRX, BHND_PCIE_SDR9_RX_CTRL); rxctl |= BHND_PCIE_SDR9_RX_CTRL_FORCE; if (sc->sdr9_quirk_polarity.inv) rxctl |= BHND_PCIE_SDR9_RX_CTRL_POLARITY_INV; else rxctl &= ~BHND_PCIE_SDR9_RX_CTRL_POLARITY_INV; MDIO_WRITEREG(sc->mdio, BHND_PCIE_PHY_SDR9_TXRX, BHND_PCIE_SDR9_RX_CTRL, rxctl); } /* Disable startup retry on PLL frequency detection failure */ if (BHNDB_PCIE_QUIRK(sc, SDR9_NO_FREQRETRY)) { uint16_t pctl; pctl = MDIO_READREG(sc->mdio, BHND_PCIE_PHY_SDR9_PLL, BHND_PCIE_SDR9_PLL_CTRL); pctl &= ~BHND_PCIE_SDR9_PLL_CTRL_FREQDET_EN; MDIO_WRITEREG(sc->mdio, BHND_PCIE_PHY_SDR9_PLL, BHND_PCIE_SDR9_PLL_CTRL, pctl); } /* Explicitly enable PCI-PM */ if (BHNDB_PCIE_QUIRK(sc, PCIPM_REQEN)) { uint32_t lcreg; lcreg = bhndb_pcie_read_proto_reg(sc, BHND_PCIE_DLLP_LCREG); lcreg |= BHND_PCIE_DLLP_LCREG_PCIPM_EN; bhndb_pcie_write_proto_reg(sc, BHND_PCIE_DLLP_LCREG, lcreg); } /* Adjust L1 timer to fix slow L1->L0 transitions */ if (BHNDB_PCIE_QUIRK(sc, L1_IDLE_THRESH)) { uint32_t pmt; pmt = bhndb_pcie_read_proto_reg(sc, BHND_PCIE_DLLP_PMTHRESHREG); pmt = BPCI_REG_INSERT(pmt, PCIE_L1THRESHOLDTIME, BHND_PCIE_L1THRESHOLD_WARVAL); bhndb_pcie_write_proto_reg(sc, BHND_PCIE_DLLP_PMTHRESHREG, pmt); } /* Extend L1 timer for better performance. * TODO: We could enable/disable this on demand for better power * savings if we tie this to HT clock request handling */ if (BHNDB_PCIE_QUIRK(sc, L1_TIMER_PERF)) { uint32_t pmt; pmt = bhndb_pcie_read_proto_reg(sc, BHND_PCIE_DLLP_PMTHRESHREG); pmt |= BHND_PCIE_ASPMTIMER_EXTEND; bhndb_pcie_write_proto_reg(sc, BHND_PCIE_DLLP_PMTHRESHREG, pmt); } /* Enable L23READY_EXIT_NOPRST if not already set in SPROM. */ if (BHNDB_PCIE_QUIRK(sc, SPROM_L23_PCI_RESET)) { bus_size_t reg; uint16_t cfg; /* Fetch the misc cfg flags from SPROM */ reg = BHND_PCIE_SPROM_SHADOW + BHND_PCIE_SRSH_PCIE_MISC_CONFIG; cfg = BHNDB_PCI_READ_2(sc, reg); /* Write EXIT_NOPRST flag if not already set in SPROM */ if (!(cfg & BHND_PCIE_SRSH_L23READY_EXIT_NOPRST)) { cfg |= BHND_PCIE_SRSH_L23READY_EXIT_NOPRST; BHNDB_PCI_WRITE_2(sc, reg, cfg); } } return (0); }
bool m_valid; /**< true if a valid mapping exists, false otherwise */ struct bhndb_host_resources *hr; /**< backing host resources */ }; static struct bhndb_pci_quirk bhndb_pci_quirks[] = { /* Backplane interrupt flags must be routed via siba-specific * SIBA_CFG0_INTVEC configuration register; the BHNDB_PCI_INT_MASK * PCI configuration register is unsupported. */ {{ BHND_MATCH_CHIP_TYPE (SIBA) }, { BHND_MATCH_CORE_REV (HWREV_LTE(5)) }, BHNDB_PCI_QUIRK_SIBA_INTVEC }, /* All PCI core revisions require the SRSH work-around */ BHNDB_PCI_QUIRK(HWREV_ANY, BHNDB_PCI_QUIRK_SRSH_WAR), BHNDB_PCI_QUIRK_END }; static struct bhndb_pci_quirk bhndb_pcie_quirks[] = { /* All PCIe-G1 core revisions require the SRSH work-around */ BHNDB_PCI_QUIRK(HWREV_ANY, BHNDB_PCI_QUIRK_SRSH_WAR), BHNDB_PCI_QUIRK_END }; static struct bhndb_pci_quirk bhndb_pcie2_quirks[] = { BHNDB_PCI_QUIRK_END }; /** * Return the device table entry for @p ci, or NULL if none.