Пример #1
0
void bcmgenet_mii_reset(struct net_device *dev)
{
	struct bcmgenet_priv *priv = netdev_priv(dev);

	if (priv->phydev) {
		phy_init_hw(priv->phydev);
		phy_start_aneg(priv->phydev);
	}
}
Пример #2
0
static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
			      struct phy_device *phy)
{
	struct bcm_sf2_priv *priv = ds_to_priv(ds);
	s8 cpu_port = ds->dst[ds->index].cpu_port;
	u32 reg;

	/* Clear the memory power down */
	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
	reg &= ~P_TXQ_PSM_VDD(port);
	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);

	/* Clear the Rx and Tx disable bits and set to no spanning tree */
	core_writel(priv, 0, CORE_G_PCTL_PORT(port));

	/* Re-enable the GPHY and re-apply workarounds */
	if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
		bcm_sf2_gphy_enable_set(ds, true);
		if (phy) {
			/* if phy_stop() has been called before, phy
			 * will be in halted state, and phy_start()
			 * will call resume.
			 *
			 * the resume path does not configure back
			 * autoneg settings, and since we hard reset
			 * the phy manually here, we need to reset the
			 * state machine also.
			 */
			phy->state = PHY_READY;
			phy_init_hw(phy);
		}
	}

	/* Enable MoCA port interrupts to get notified */
	if (port == priv->moca_port)
		bcm_sf2_port_intr_enable(priv, port);

	/* Set this port, and only this one to be in the default VLAN,
	 * if member of a bridge, restore its membership prior to
	 * bringing down this port.
	 */
	reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
	reg &= ~PORT_VLAN_CTRL_MASK;
	reg |= (1 << port);
	reg |= priv->port_sts[port].vlan_ctl_mask;
	core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port));

	bcm_sf2_imp_vlan_setup(ds, cpu_port);

	/* If EEE was enabled, restore it */
	if (priv->port_sts[port].eee.eee_enabled)
		bcm_sf2_eee_enable_set(ds, port, true);

	return 0;
}
Пример #3
0
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
static void bcma_mdio_phy_init(struct bgmac *bgmac)
{
	struct bcma_chipinfo *ci = &bgmac->bcma.core->bus->chipinfo;
	u8 i;

	/* For some legacy hardware we do chipset-based PHY initialization here
	 * without even detecting PHY ID. It's hacky and should be cleaned as
	 * soon as someone can test it.
	 */
	if (ci->id == BCMA_CHIP_ID_BCM5356) {
		for (i = 0; i < 5; i++) {
			bcma_mdio_phy_write(bgmac, i, 0x1f, 0x008b);
			bcma_mdio_phy_write(bgmac, i, 0x15, 0x0100);
			bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f);
			bcma_mdio_phy_write(bgmac, i, 0x12, 0x2aaa);
			bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b);
		}
		return;
	}
	if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
	    (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
	    (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
		struct bcma_drv_cc *cc = &bgmac->bcma.core->bus->drv_cc;

		bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
		bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
		for (i = 0; i < 5; i++) {
			bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f);
			bcma_mdio_phy_write(bgmac, i, 0x16, 0x5284);
			bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b);
			bcma_mdio_phy_write(bgmac, i, 0x17, 0x0010);
			bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f);
			bcma_mdio_phy_write(bgmac, i, 0x16, 0x5296);
			bcma_mdio_phy_write(bgmac, i, 0x17, 0x1073);
			bcma_mdio_phy_write(bgmac, i, 0x17, 0x9073);
			bcma_mdio_phy_write(bgmac, i, 0x16, 0x52b6);
			bcma_mdio_phy_write(bgmac, i, 0x17, 0x9273);
			bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b);
		}
		return;
	}

	/* For all other hw do initialization using PHY subsystem. */
	if (bgmac->net_dev && bgmac->net_dev->phydev)
		phy_init_hw(bgmac->net_dev->phydev);
}
Пример #4
0
static int ksz9031_read_status(struct phy_device *phydev)
{
	int err;
	int regval;

	err = genphy_read_status(phydev);
	if (err)
		return err;

	/* Make sure the PHY is not broken. Read idle error count,
	 * and reset the PHY if it is maxed out.
	 */
	regval = phy_read(phydev, MII_STAT1000);
	if ((regval & 0xff) == 0xff) {
		phy_init_hw(phydev);
		phydev->link = 0;
	}

	return 0;
}
Пример #5
0
static int mdio_bus_probe(struct device_d *_dev)
{
	struct phy_device *dev = to_phy_device(_dev);
	struct phy_driver *drv = to_phy_driver(_dev->driver);

	int ret;
	char str[16];

	dev->attached_dev->phydev = dev;

	if (drv->probe) {
		ret = drv->probe(dev);
		if (ret)
			goto err;
	}

	if (dev->dev_flags) {
		if (dev->dev_flags & PHYLIB_FORCE_10) {
			dev->speed = SPEED_10;
			dev->duplex = DUPLEX_FULL;
			dev->autoneg = !AUTONEG_ENABLE;
			dev->force = 1;
			dev->link = 1;
		} else if (dev->dev_flags & PHYLIB_FORCE_100) {
			dev->speed = SPEED_100;
			dev->duplex = DUPLEX_FULL;
			dev->autoneg = !AUTONEG_ENABLE;
			dev->force = 1;
			dev->link = 1;
		}
	}

	/* Start out supporting everything. Eventually,
	 * a controller will attach, and may modify one
	 * or both of these values */
	dev->supported = drv->features;
	dev->advertising = drv->features;

	ret = phy_init_hw(dev);
	if (ret)
		goto err;

	/* Sanitize settings based on PHY capabilities */
	if ((dev->supported & SUPPORTED_Autoneg) == 0)
		dev->autoneg = AUTONEG_DISABLE;

	sprintf(str, "%d", dev->addr);
	dev_add_param_fixed(&dev->dev, "phy_addr", str);

	sprintf(str, "0x%08x", dev->phy_id);
	dev_add_param_fixed(&dev->dev, "phy_id", str);

	dev->cdev.name = asprintf("phy%d", _dev->id);
	dev->cdev.size = 64;
	dev->cdev.ops = &phydev_ops;
	dev->cdev.priv = dev;
	dev->cdev.dev = _dev;
	devfs_create(&dev->cdev);

	return 0;

err:
	dev->attached_dev->phydev = NULL;
	dev->attached_dev = NULL;
	return ret;
}
Пример #6
0
static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
			      struct phy_device *phy)
{
	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
	unsigned int i;
	u32 reg;

	/* Clear the memory power down */
	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
	reg &= ~P_TXQ_PSM_VDD(port);
	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);

	/* Enable learning */
	reg = core_readl(priv, CORE_DIS_LEARN);
	reg &= ~BIT(port);
	core_writel(priv, reg, CORE_DIS_LEARN);

	/* Enable Broadcom tags for that port if requested */
	if (priv->brcm_tag_mask & BIT(port))
		b53_brcm_hdr_setup(ds, port);

	/* Configure Traffic Class to QoS mapping, allow each priority to map
	 * to a different queue number
	 */
	reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
	for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
		reg |= i << (PRT_TO_QID_SHIFT * i);
	core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));

	/* Re-enable the GPHY and re-apply workarounds */
	if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
		bcm_sf2_gphy_enable_set(ds, true);
		if (phy) {
			/* if phy_stop() has been called before, phy
			 * will be in halted state, and phy_start()
			 * will call resume.
			 *
			 * the resume path does not configure back
			 * autoneg settings, and since we hard reset
			 * the phy manually here, we need to reset the
			 * state machine also.
			 */
			phy->state = PHY_READY;
			phy_init_hw(phy);
		}
	}

	/* Enable MoCA port interrupts to get notified */
	if (port == priv->moca_port)
		bcm_sf2_port_intr_enable(priv, port);

	/* Set per-queue pause threshold to 32 */
	core_writel(priv, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port));

	/* Set ACB threshold to 24 */
	for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) {
		reg = acb_readl(priv, ACB_QUEUE_CFG(port *
						    SF2_NUM_EGRESS_QUEUES + i));
		reg &= ~XOFF_THRESHOLD_MASK;
		reg |= 24;
		acb_writel(priv, reg, ACB_QUEUE_CFG(port *
						    SF2_NUM_EGRESS_QUEUES + i));
	}

	return b53_enable_port(ds, port, phy);
}