Esempio n. 1
0
/**
 * Initialize the full bridge configuration.
 * 
 * This is called during the DEVICE_ATTACH() process by the bridged bhndb(4)
 * bus, prior to probe/attachment of child cores.
 * 
 * At this point, we can introspect the enumerated cores, find our host
 * bridge device, and apply any bridge-level hardware workarounds required
 * for proper operation of the bridged device cores.
 */
static int
bhndb_pci_init_full_config(device_t dev, device_t child,
    const struct bhndb_hw_priority *prio_table)
{
	struct bhnd_core_info		 core;
	const struct bhndb_pci_id	*id;
	struct bhndb_pci_softc		*sc;
	struct bhndb_region		*pcir;
	bhnd_addr_t			 pcir_addr;
	bhnd_size_t			 pcir_size;
	int				 error;

	sc = device_get_softc(dev);

	/* Let bhndb perform full discovery and initialization of the
	 * available register windows and bridge resources. */
	if ((error = bhndb_generic_init_full_config(dev, child, prio_table)))
		return (error);

	/* 
	 * Identify our PCI bridge core, its register family, and any
	 * applicable hardware quirks.
	 */
	KASSERT(sc->bhndb.hostb_dev,
	    ("missing hostb device\n"));

	core = bhnd_get_core_info(sc->bhndb.hostb_dev);
	id = bhndb_pci_find_core_id(&core);
	if (id == NULL) {
		device_printf(dev, "%s %s hostb core is not recognized\n",
		    bhnd_vendor_name(core.vendor), bhnd_core_name(&core));
	}

	sc->regfmt = id->regfmt;

	/* Now that we've identified the PCI bridge core, we can determine the
	 * full set of device quirks */
	sc->quirks = bhndb_pci_discover_quirks(sc, id);

	/*
	 * Determine and save a reference to the bhndb resource and offset
	 * at which the bridge core's device registers are mapped.
	 * 
	 * All known bhnd(4) hardware provides a fixed static mapping of
	 * the PCI core's registers. If this changes in the future -- which
	 * is unlikely -- this driver will need to be adjusted to use
	 * dynamic register windows.
	 */

	/* Find base address and size of the PCI core's register block. */
	error = bhnd_get_region_addr(sc->bhndb.hostb_dev, BHND_PORT_DEVICE, 0,
	    0, &pcir_addr, &pcir_size);
	if (error) {
		device_printf(dev,
		    "failed to locate PCI core registers\n");
		return (error);
	}

	/* Find the bhndb_region that statically maps this block */
	pcir = bhndb_find_resource_region(sc->bhndb.bus_res, pcir_addr,
	    pcir_size);
	if (pcir == NULL || pcir->static_regwin == NULL) {
		device_printf(dev,
		    "missing static PCI core register window\n");
		return (ENXIO);
	}

	/* Save borrowed reference to the mapped PCI core registers */
	sc->mem_off = pcir->static_regwin->win_offset;
	sc->mem_res = bhndb_find_regwin_resource(sc->bhndb.bus_res,
	    pcir->static_regwin);
	if (sc->mem_res == NULL || !(rman_get_flags(sc->mem_res) & RF_ACTIVE)) {
		device_printf(dev,
		    "no active resource maps the PCI core register window\n");
		return (ENXIO);
	}

	/* Configure a direct bhnd_resource wrapper that we can pass to
	 * bhnd_resource APIs */
	sc->bhnd_mem_res = (struct bhnd_resource) {
		.res = sc->mem_res,
		.direct = true
	};

	/*
	 * Attach MMIO device (if this is a PCIe device), which is used for
	 * access to the PCIe SerDes required by the quirk workarounds.
	 */
	if (sc->pci_devclass == BHND_DEVCLASS_PCIE) {
		sc->mdio = device_add_child(dev, 
		    devclass_get_name(bhnd_mdio_pci_devclass), 0);
		if (sc->mdio == NULL)
			return (ENXIO);

		if ((error = device_probe_and_attach(sc->mdio))) {
			device_printf(dev, "failed to attach MDIO device\n");
			return (error);
		}
	}

	/* Apply any early one-time quirk workarounds */
	if ((error = bhndb_pci_wars_early_once(sc)))
		return (error);

	/* Apply attach-time quirk workarounds, required before the bridged
	 * bhnd(4) bus itself performs a full attach(). */
	if ((error = bhndb_pci_wars_hwup(sc)))
		return (error);

	return (0);
}

/**
 * Apply any hardware workarounds that must be executed prior to attempting
 * register access on the bridged chipset.
 * 
 * This must be called very early in attach() or resume(), after the basic
 * set of applicable device quirks has been determined.
 */
static int
bhndb_pci_wars_register_access(struct bhndb_pci_softc *sc)
{
	int error;

	if (BHNDB_PCI_QUIRK(sc, EXT_CLOCK_GATING)) {
		if ((error = bhndb_enable_pci_clocks(sc))) {
			device_printf(sc->dev, "failed to enable clocks\n");
			return (error);
		}
	}

	return (0);
}
Esempio n. 2
0
/**
 * Parse the next core entry from the EROM table and produce a bcma_corecfg
 * to be owned by the caller.
 * 
 * @param erom EROM read state.
 * @param[out] result On success, the core's device info. The caller inherits
 * ownership of this allocation.
 * 
 * @return If successful, returns 0. If the end of the EROM table is hit,
 * ENOENT will be returned. On error, returns a non-zero error value.
 */
int
bcma_erom_parse_corecfg(struct bcma_erom *erom, struct bcma_corecfg **result)
{
	struct bcma_corecfg	*cfg;
	struct bcma_erom_core	 core;
	uint8_t			 first_region_type;
	bus_size_t		 initial_offset;
	u_int			 core_index;
	int			 core_unit;
	int			 error;

	cfg = NULL;
	initial_offset = bcma_erom_tell(erom);

	/* Parse the next core entry */
	if ((error = bcma_erom_parse_core(erom, &core)))
		return (error);

	/* Determine the core's index and unit numbers */
	bcma_erom_reset(erom);
	core_unit = 0;
	core_index = 0;
	for (; bcma_erom_tell(erom) != initial_offset; core_index++) {
		struct bcma_erom_core prev_core;

		/* Parse next core */
		if ((error = erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE)))
			return (error);

		if ((error = bcma_erom_parse_core(erom, &prev_core)))
			return (error);

		/* Is earlier unit? */
		if (core.vendor == prev_core.vendor &&
		    core.device == prev_core.device)
		{
			core_unit++;
		}

		/* Seek to next core */
		if ((error = erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE)))
			return (error);
	}

	/* We already parsed the core descriptor */
	if ((error = erom_skip_core(erom)))
		return (error);

	/* Allocate our corecfg */
	cfg = bcma_alloc_corecfg(core_index, core_unit, core.vendor,
	    core.device, core.rev);
	if (cfg == NULL)
		return (ENOMEM);
	
	/* These are 5-bit values in the EROM table, and should never be able
	 * to overflow BCMA_PID_MAX. */
	KASSERT(core.num_mport <= BCMA_PID_MAX, ("unsupported mport count"));
	KASSERT(core.num_dport <= BCMA_PID_MAX, ("unsupported dport count"));
	KASSERT(core.num_mwrap + core.num_swrap <= BCMA_PID_MAX,
	    ("unsupported wport count"));

	if (bootverbose) {
		EROM_LOG(erom, 
		    "core%u: %s %s (cid=%hx, rev=%hu, unit=%d)\n",
		    core_index,
		    bhnd_vendor_name(core.vendor),
		    bhnd_find_core_name(core.vendor, core.device), 
		    core.device, core.rev, core_unit);
	}

	cfg->num_master_ports = core.num_mport;
	cfg->num_dev_ports = 0;		/* determined below */
	cfg->num_bridge_ports = 0;	/* determined blow */
	cfg->num_wrapper_ports = core.num_mwrap + core.num_swrap;

	/* Parse Master Port Descriptors */
	for (uint8_t i = 0; i < core.num_mport; i++) {
		struct bcma_mport	*mport;
		struct bcma_erom_mport	 mpd;
	
		/* Parse the master port descriptor */
		error = bcma_erom_parse_mport(erom, &mpd);
		if (error)
			goto failed;

		/* Initialize a new bus mport structure */
		mport = malloc(sizeof(struct bcma_mport), M_BHND, M_NOWAIT);
		if (mport == NULL) {
			error = ENOMEM;
			goto failed;
		}
		
		mport->mp_vid = mpd.port_vid;
		mport->mp_num = mpd.port_num;

		/* Update dinfo */
		STAILQ_INSERT_TAIL(&cfg->master_ports, mport, mp_link);
	}
	

	/*
	 * Determine whether this is a bridge device; if so, we can
	 * expect the first sequence of address region descriptors to
	 * be of EROM_REGION_TYPE_BRIDGE instead of
	 * BCMA_EROM_REGION_TYPE_DEVICE.
	 * 
	 * It's unclear whether this is the correct mechanism by which we
	 * should detect/handle bridge devices, but this approach matches
	 * that of (some of) Broadcom's published drivers.
	 */
	if (core.num_dport > 0) {
		uint32_t entry;

		if ((error = bcma_erom_peek32(erom, &entry)))
			goto failed;

		if (BCMA_EROM_ENTRY_IS(entry, REGION) && 
		    BCMA_EROM_GET_ATTR(entry, REGION_TYPE) == BCMA_EROM_REGION_TYPE_BRIDGE)
		{
			first_region_type = BCMA_EROM_REGION_TYPE_BRIDGE;
			cfg->num_dev_ports = 0;
			cfg->num_bridge_ports = core.num_dport;
		} else {
			first_region_type = BCMA_EROM_REGION_TYPE_DEVICE;
			cfg->num_dev_ports = core.num_dport;
			cfg->num_bridge_ports = 0;
		}
	}
	
	/* Device/bridge port descriptors */
	for (uint8_t sp_num = 0; sp_num < core.num_dport; sp_num++) {
		error = erom_corecfg_fill_port_regions(erom, cfg, sp_num,
		    first_region_type);

		if (error)
			goto failed;
	}

	/* Wrapper (aka device management) descriptors (for master ports). */
	for (uint8_t sp_num = 0; sp_num < core.num_mwrap; sp_num++) {
		error = erom_corecfg_fill_port_regions(erom, cfg, sp_num,
		    BCMA_EROM_REGION_TYPE_MWRAP);

		if (error)
			goto failed;
	}

	
	/* Wrapper (aka device management) descriptors (for slave ports). */	
	for (uint8_t i = 0; i < core.num_swrap; i++) {
		/* Slave wrapper ports are not numbered distinctly from master
		 * wrapper ports. */

		/* 
		 * Broadcom DDR1/DDR2 Memory Controller
		 * (cid=82e, rev=1, unit=0, d/mw/sw = 2/0/1 ) ->
		 * bhnd0: erom[0xdc]: core6 agent0.0: mismatch got: 0x1 (0x2)
		 *
		 * ARM BP135 AMBA3 AXI to APB Bridge
		 * (cid=135, rev=0, unit=0, d/mw/sw = 1/0/1 ) ->
		 * bhnd0: erom[0x124]: core9 agent1.0: mismatch got: 0x0 (0x2)
		 *
		 * core.num_mwrap
		 * ===>
		 * (core.num_mwrap > 0) ?
		 *           core.num_mwrap :
		 *           ((core.vendor == BHND_MFGID_BCM) ? 1 : 0)
		 */
		uint8_t sp_num;
		sp_num = (core.num_mwrap > 0) ?
				core.num_mwrap :
				((core.vendor == BHND_MFGID_BCM) ? 1 : 0) + i;
		error = erom_corecfg_fill_port_regions(erom, cfg, sp_num,
		    BCMA_EROM_REGION_TYPE_SWRAP);

		if (error)
			goto failed;
	}

	*result = cfg;
	return (0);
	
failed:
	if (cfg != NULL)
		bcma_free_corecfg(cfg);

	return error;
}