Пример #1
0
static int bcm_sf2_mdio_register(struct dsa_switch *ds)
{
	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
	struct device_node *dn;
	static int index;
	int err;

	/* Find our integrated MDIO bus node */
	dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
	priv->master_mii_bus = of_mdio_find_bus(dn);
	if (!priv->master_mii_bus)
		return -EPROBE_DEFER;

	get_device(&priv->master_mii_bus->dev);
	priv->master_mii_dn = dn;

	priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
	if (!priv->slave_mii_bus)
		return -ENOMEM;

	priv->slave_mii_bus->priv = priv;
	priv->slave_mii_bus->name = "sf2 slave mii";
	priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read;
	priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write;
	snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
		 index++);
	priv->slave_mii_bus->dev.of_node = dn;

	/* Include the pseudo-PHY address to divert reads towards our
	 * workaround. This is only required for 7445D0, since 7445E0
	 * disconnects the internal switch pseudo-PHY such that we can use the
	 * regular SWITCH_MDIO master controller instead.
	 *
	 * Here we flag the pseudo PHY as needing special treatment and would
	 * otherwise make all other PHY read/writes go to the master MDIO bus
	 * controller that comes with this switch backed by the "mdio-unimac"
	 * driver.
	 */
	if (of_machine_is_compatible("brcm,bcm7445d0"))
		priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR);
	else
		priv->indir_phy_mask = 0;

	ds->phys_mii_mask = priv->indir_phy_mask;
	ds->slave_mii_bus = priv->slave_mii_bus;
	priv->slave_mii_bus->parent = ds->dev->parent;
	priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;

	err = of_mdiobus_register(priv->slave_mii_bus, dn);
	if (err && dn)
		of_node_put(dn);

	return err;
}
Пример #2
0
static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
{
	struct dsa_switch *ds = dev_id;
	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);

	priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
				~priv->irq0_mask;
	intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);

	return IRQ_HANDLED;
}
Пример #3
0
static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
{
	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);

	/* The BCM7xxx PHY driver expects to find the integrated PHY revision
	 * in bits 15:8 and the patch level in bits 7:0 which is exactly what
	 * the REG_PHY_REVISION register layout is.
	 */

	return priv->hw_params.gphy_rev;
}
Пример #4
0
static void bcm_sf2_enable_acb(struct dsa_switch *ds)
{
	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
	u32 reg;

	/* Enable ACB globally */
	reg = acb_readl(priv, ACB_CONTROL);
	reg |= (ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
	acb_writel(priv, reg, ACB_CONTROL);
	reg &= ~(ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
	reg |= ACB_EN | ACB_ALGORITHM;
	acb_writel(priv, reg, ACB_CONTROL);
}
Пример #5
0
static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
				   unsigned int mode,
				   phy_interface_t interface,
				   struct phy_device *phydev)
{
	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
	struct ethtool_eee *p = &priv->dev->ports[port].eee;

	bcm_sf2_sw_mac_link_set(ds, port, interface, true);

	if (mode == MLO_AN_PHY && phydev)
		p->eee_enabled = b53_eee_init(ds, port, phydev);
}
Пример #6
0
static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
{
	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
	unsigned int i;
	u32 reg, offset;

	if (priv->type == BCM7445_DEVICE_ID)
		offset = CORE_STS_OVERRIDE_IMP;
	else
		offset = CORE_STS_OVERRIDE_IMP2;

	/* Enable the port memories */
	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
	reg &= ~P_TXQ_PSM_VDD(port);
	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);

	/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
	reg = core_readl(priv, CORE_IMP_CTL);
	reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
	reg &= ~(RX_DIS | TX_DIS);
	core_writel(priv, reg, CORE_IMP_CTL);

	/* Enable forwarding */
	core_writel(priv, SW_FWDG_EN, CORE_SWMODE);

	/* Enable IMP port in dumb mode */
	reg = core_readl(priv, CORE_SWITCH_CTRL);
	reg |= MII_DUMB_FWDG_EN;
	core_writel(priv, reg, CORE_SWITCH_CTRL);

	/* Configure Traffic Class to QoS mapping, allow each priority to map
	 * to a different queue number
	 */
	reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
	for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
		reg |= i << (PRT_TO_QID_SHIFT * i);
	core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));

	b53_brcm_hdr_setup(ds, port);

	/* Force link status for IMP port */
	reg = core_readl(priv, offset);
	reg |= (MII_SW_OR | LINK_STS);
	core_writel(priv, reg, offset);
}
Пример #7
0
static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
{
	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
	unsigned int port;

	bcm_sf2_intr_disable(priv);

	/* Disable all ports physically present including the IMP
	 * port, the other ones have already been disabled during
	 * bcm_sf2_sw_setup
	 */
	for (port = 0; port < ds->num_ports; port++) {
		if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
			bcm_sf2_port_disable(ds, port, NULL);
	}

	return 0;
}
Пример #8
0
static int bcm_sf2_sw_resume(struct dsa_switch *ds)
{
	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
	int ret;

	ret = bcm_sf2_sw_rst(priv);
	if (ret) {
		pr_err("%s: failed to software reset switch\n", __func__);
		return ret;
	}

	if (priv->hw_params.num_gphy == 1)
		bcm_sf2_gphy_enable_set(ds, true);

	ds->ops->setup(ds);

	return 0;
}
Пример #9
0
static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
				    phy_interface_t interface, bool link)
{
	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
	u32 reg;

	if (!phy_interface_mode_is_rgmii(interface) &&
	    interface != PHY_INTERFACE_MODE_MII &&
	    interface != PHY_INTERFACE_MODE_REVMII)
		return;

	/* If the link is down, just disable the interface to conserve power */
	reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
	if (link)
		reg |= RGMII_MODE_EN;
	else
		reg &= ~RGMII_MODE_EN;
	reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
}
Пример #10
0
static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
{
	struct dsa_switch *ds = dev_id;
	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);

	priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
				~priv->irq1_mask;
	intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);

	if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) {
		priv->port_sts[7].link = true;
		dsa_port_phylink_mac_change(ds, 7, true);
	}
	if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) {
		priv->port_sts[7].link = false;
		dsa_port_phylink_mac_change(ds, 7, false);
	}

	return IRQ_HANDLED;
}
Пример #11
0
static int bcm_sf2_sw_setup(struct dsa_switch *ds)
{
	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
	unsigned int port;

	/* Enable all valid ports and disable those unused */
	for (port = 0; port < priv->hw_params.num_ports; port++) {
		/* IMP port receives special treatment */
		if (dsa_is_user_port(ds, port))
			bcm_sf2_port_setup(ds, port, NULL);
		else if (dsa_is_cpu_port(ds, port))
			bcm_sf2_imp_setup(ds, port);
		else
			bcm_sf2_port_disable(ds, port, NULL);
	}

	b53_configure_vlan(ds);
	bcm_sf2_enable_acb(ds);

	return 0;
}
Пример #12
0
static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
			       struct ethtool_wolinfo *wol)
{
	struct net_device *p = ds->ports[port].cpu_dp->master;
	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
	struct ethtool_wolinfo pwol;

	/* Get the parent device WoL settings */
	p->ethtool_ops->get_wol(p, &pwol);

	/* Advertise the parent device supported settings */
	wol->supported = pwol.supported;
	memset(&wol->sopass, 0, sizeof(wol->sopass));

	if (pwol.wolopts & WAKE_MAGICSECURE)
		memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));

	if (priv->wol_ports_mask & (1 << port))
		wol->wolopts = pwol.wolopts;
	else
		wol->wolopts = 0;
}
Пример #13
0
static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
				  unsigned int mode,
				  const struct phylink_link_state *state)
{
	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
	u32 id_mode_dis = 0, port_mode;
	u32 reg, offset;

	if (priv->type == BCM7445_DEVICE_ID)
		offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
	else
		offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);

	switch (state->interface) {
	case PHY_INTERFACE_MODE_RGMII:
		id_mode_dis = 1;
		/* fallthrough */
	case PHY_INTERFACE_MODE_RGMII_TXID:
		port_mode = EXT_GPHY;
		break;
	case PHY_INTERFACE_MODE_MII:
		port_mode = EXT_EPHY;
		break;
	case PHY_INTERFACE_MODE_REVMII:
		port_mode = EXT_REVMII;
		break;
	default:
		/* all other PHYs: internal and MoCA */
		goto force_link;
	}

	/* Clear id_mode_dis bit, and the existing port mode, let
	 * RGMII_MODE_EN bet set by mac_link_{up,down}
	 */
	reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
	reg &= ~ID_MODE_DIS;
	reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
	reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);

	reg |= port_mode;
	if (id_mode_dis)
		reg |= ID_MODE_DIS;

	if (state->pause & MLO_PAUSE_TXRX_MASK) {
		if (state->pause & MLO_PAUSE_TX)
			reg |= TX_PAUSE_EN;
		reg |= RX_PAUSE_EN;
	}

	reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));

force_link:
	/* Force link settings detected from the PHY */
	reg = SW_OVERRIDE;
	switch (state->speed) {
	case SPEED_1000:
		reg |= SPDSTS_1000 << SPEED_SHIFT;
		break;
	case SPEED_100:
		reg |= SPDSTS_100 << SPEED_SHIFT;
		break;
	}

	if (state->link)
		reg |= LINK_STS;
	if (state->duplex == DUPLEX_FULL)
		reg |= DUPLX_MODE;

	core_writel(priv, reg, offset);
}
Пример #14
0
static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
			      struct phy_device *phy)
{
	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
	unsigned int i;
	u32 reg;

	/* Clear the memory power down */
	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
	reg &= ~P_TXQ_PSM_VDD(port);
	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);

	/* Enable learning */
	reg = core_readl(priv, CORE_DIS_LEARN);
	reg &= ~BIT(port);
	core_writel(priv, reg, CORE_DIS_LEARN);

	/* Enable Broadcom tags for that port if requested */
	if (priv->brcm_tag_mask & BIT(port))
		b53_brcm_hdr_setup(ds, port);

	/* Configure Traffic Class to QoS mapping, allow each priority to map
	 * to a different queue number
	 */
	reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
	for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
		reg |= i << (PRT_TO_QID_SHIFT * i);
	core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));

	/* Re-enable the GPHY and re-apply workarounds */
	if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
		bcm_sf2_gphy_enable_set(ds, true);
		if (phy) {
			/* if phy_stop() has been called before, phy
			 * will be in halted state, and phy_start()
			 * will call resume.
			 *
			 * the resume path does not configure back
			 * autoneg settings, and since we hard reset
			 * the phy manually here, we need to reset the
			 * state machine also.
			 */
			phy->state = PHY_READY;
			phy_init_hw(phy);
		}
	}

	/* Enable MoCA port interrupts to get notified */
	if (port == priv->moca_port)
		bcm_sf2_port_intr_enable(priv, port);

	/* Set per-queue pause threshold to 32 */
	core_writel(priv, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port));

	/* Set ACB threshold to 24 */
	for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) {
		reg = acb_readl(priv, ACB_QUEUE_CFG(port *
						    SF2_NUM_EGRESS_QUEUES + i));
		reg &= ~XOFF_THRESHOLD_MASK;
		reg |= 24;
		acb_writel(priv, reg, ACB_QUEUE_CFG(port *
						    SF2_NUM_EGRESS_QUEUES + i));
	}

	return b53_enable_port(ds, port, phy);
}