/** * phylink_start() - start a phylink instance * @pl: a pointer to a &struct phylink returned from phylink_create() * * Start the phylink instance specified by @pl, configuring the MAC for the * desired link mode(s) and negotiation style. This should be called from the * network device driver's &struct net_device_ops ndo_open() method. */ void phylink_start(struct phylink *pl) { ASSERT_RTNL(); netdev_info(pl->netdev, "configuring for %s/%s link mode\n", phylink_an_mode_str(pl->link_an_mode), phy_modes(pl->link_config.interface)); /* Always set the carrier off */ netif_carrier_off(pl->netdev); /* Apply the link configuration to the MAC when starting. This allows * a fixed-link to start with the correct parameters, and also * ensures that we set the appropriate advertisement for Serdes links. */ phylink_resolve_flow(pl, &pl->link_config); phylink_mac_config(pl, &pl->link_config); /* Restart autonegotiation if using 802.3z to ensure that the link * parameters are properly negotiated. This is necessary for DSA * switches using 802.3z negotiation to ensure they see our modes. */ phylink_mac_an_restart(pl); clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); phylink_run_resolve(pl); if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio)) mod_timer(&pl->link_poll, jiffies + HZ); if (pl->sfp_bus) sfp_upstream_start(pl->sfp_bus); if (pl->phydev) phy_start(pl->phydev); }
static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv, phy_interface_t phy_mode, int slave) { u32 reg; u32 mask; u32 mode = 0; bool rgmii_id = false; reg = readl(priv->gmii_sel); switch (phy_mode) { case PHY_INTERFACE_MODE_RMII: mode = AM33XX_GMII_SEL_MODE_RMII; break; case PHY_INTERFACE_MODE_RGMII: mode = AM33XX_GMII_SEL_MODE_RGMII; break; case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: mode = AM33XX_GMII_SEL_MODE_RGMII; rgmii_id = true; break; default: dev_warn(priv->dev, "Unsupported PHY mode: \"%s\". Defaulting to MII.\n", phy_modes(phy_mode)); /* fallthrough */ case PHY_INTERFACE_MODE_MII: mode = AM33XX_GMII_SEL_MODE_MII; break; } mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6); mask |= BIT(slave + 4); mode <<= slave * 2; if (priv->rmii_clock_external) { if (slave == 0) mode |= AM33XX_GMII_SEL_RMII1_IO_CLK_EN; else mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN; } if (rgmii_id) { if (slave == 0) mode |= AM33XX_GMII_SEL_RGMII1_IDMODE; else mode |= AM33XX_GMII_SEL_RGMII2_IDMODE; } reg &= ~mask; reg |= mode; writel(reg, priv->gmii_sel); }
static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv, phy_interface_t phy_mode, int slave) { u32 reg; u32 mask; u32 mode = 0; reg = readl(priv->gmii_sel); switch (phy_mode) { case PHY_INTERFACE_MODE_RMII: mode = AM33XX_GMII_SEL_MODE_RMII; break; case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: mode = AM33XX_GMII_SEL_MODE_RGMII; break; default: dev_warn(priv->dev, "Unsupported PHY mode: \"%s\". Defaulting to MII.\n", phy_modes(phy_mode)); /* fallthrough */ case PHY_INTERFACE_MODE_MII: mode = AM33XX_GMII_SEL_MODE_MII; break; }; switch (slave) { case 0: mask = GMII_SEL_MODE_MASK; break; case 1: mask = GMII_SEL_MODE_MASK << 4; mode <<= 4; break; default: dev_err(priv->dev, "invalid slave number...\n"); return; } if (priv->rmii_clock_external) dev_err(priv->dev, "RMII External clock is not supported\n"); reg &= ~mask; reg |= mode; writel(reg, priv->gmii_sel); }
static void phylink_mac_config(struct phylink *pl, const struct phylink_link_state *state) { netdev_dbg(pl->netdev, "%s: mode=%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\n", __func__, phylink_an_mode_str(pl->link_an_mode), phy_modes(state->interface), phy_speed_to_str(state->speed), phy_duplex_to_str(state->duplex), __ETHTOOL_LINK_MODE_MASK_NBITS, state->advertising, state->pause, state->link, state->an_enabled); pl->ops->mac_config(pl->netdev, pl->link_an_mode, state); }
/** * of_get_phy_mode - Get phy mode for given device_node * @np: Pointer to the given device_node * * The function gets phy interface string from property 'phy-mode' or * 'phy-connection-type', and return its index in phy_modes table, or errno in * error case. */ int of_get_phy_mode(struct device_node *np) { const char *pm; int err, i; err = of_property_read_string(np, "phy-mode", &pm); if (err < 0) err = of_property_read_string(np, "phy-connection-type", &pm); if (err < 0) return err; for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) if (!strcasecmp(pm, phy_modes(i))) return i; return -ENODEV; }
/** * fwnode_get_phy_mode - Get phy mode for given firmware node * @fwnode: Pointer to the given node * * The function gets phy interface string from property 'phy-mode' or * 'phy-connection-type', and return its index in phy_modes table, or errno in * error case. */ int fwnode_get_phy_mode(struct fwnode_handle *fwnode) { const char *pm; int err, i; err = fwnode_property_read_string(fwnode, "phy-mode", &pm); if (err < 0) err = fwnode_property_read_string(fwnode, "phy-connection-type", &pm); if (err < 0) return err; for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) if (!strcasecmp(pm, phy_modes(i))) return i; return -ENODEV; }
static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed) { uint32_t clk_bits, val; int div; switch (gmac->phy_mode) { case PHY_INTERFACE_MODE_RGMII: div = get_clk_div_rgmii(gmac, speed); clk_bits = NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) | NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id); break; case PHY_INTERFACE_MODE_SGMII: div = get_clk_div_sgmii(gmac, speed); clk_bits = NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) | NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id); break; default: dev_err(&gmac->pdev->dev, "Unsupported PHY mode: \"%s\"\n", phy_modes(gmac->phy_mode)); return -EINVAL; } /* Disable the clocks */ regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val); val &= ~clk_bits; regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val); /* Set the divider */ regmap_read(gmac->nss_common, NSS_COMMON_CLK_DIV0, &val); val &= ~(NSS_COMMON_CLK_DIV_MASK << NSS_COMMON_CLK_DIV_OFFSET(gmac->id)); val |= div << NSS_COMMON_CLK_DIV_OFFSET(gmac->id); regmap_write(gmac->nss_common, NSS_COMMON_CLK_DIV0, val); /* Enable the clock back */ regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val); val |= clk_bits; regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val); return 0; }
void phylink_start(struct phylink *pl) { WARN_ON(!lockdep_rtnl_is_held()); netdev_info(pl->netdev, "configuring for %s/%s link mode\n", phylink_an_mode_str(pl->link_an_mode), phy_modes(pl->link_config.interface)); /* Apply the link configuration to the MAC when starting. This allows * a fixed-link to start with the correct parameters, and also * ensures that we set the appropriate advertisment for Serdes links. */ phylink_resolve_flow(pl, &pl->link_config); phylink_mac_config(pl, &pl->link_config); clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); phylink_run_resolve(pl); if (pl->sfp_bus) sfp_upstream_start(pl->sfp_bus); if (pl->phydev) phy_start(pl->phydev); }
static void phylink_phy_change(struct phy_device *phydev, bool up, bool do_carrier) { struct phylink *pl = phydev->phylink; mutex_lock(&pl->state_mutex); pl->phy_state.speed = phydev->speed; pl->phy_state.duplex = phydev->duplex; pl->phy_state.pause = MLO_PAUSE_NONE; if (phydev->pause) pl->phy_state.pause |= MLO_PAUSE_SYM; if (phydev->asym_pause) pl->phy_state.pause |= MLO_PAUSE_ASYM; pl->phy_state.interface = phydev->interface; pl->phy_state.link = up; mutex_unlock(&pl->state_mutex); phylink_run_resolve(pl); netdev_dbg(pl->netdev, "phy link %s %s/%s/%s\n", up ? "up" : "down", phy_modes(phydev->interface), phy_speed_to_str(phydev->speed), phy_duplex_to_str(phydev->duplex)); }
static int ipq806x_gmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; struct stmmac_resources stmmac_res; struct device *dev = &pdev->dev; struct ipq806x_gmac *gmac; int val; void *err; val = stmmac_get_platform_resources(pdev, &stmmac_res); if (val) return val; plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); if (!gmac) return -ENOMEM; gmac->pdev = pdev; err = ipq806x_gmac_of_parse(gmac); if (IS_ERR(err)) { dev_err(dev, "device tree parsing error\n"); return PTR_ERR(err); } regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL, QSGMII_PCS_CAL_LCKDT_CTL_RST); /* Inter frame gap is set to 12 */ val = 12 << NSS_COMMON_GMAC_CTL_IFG_OFFSET | 12 << NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET; /* We also initiate an AXI low power exit request */ val |= NSS_COMMON_GMAC_CTL_CSYS_REQ; switch (gmac->phy_mode) { case PHY_INTERFACE_MODE_RGMII: val |= NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL; break; case PHY_INTERFACE_MODE_SGMII: val &= ~NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL; break; default: dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", phy_modes(gmac->phy_mode)); return -EINVAL; } regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val); /* Configure the clock src according to the mode */ regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val); val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id)); switch (gmac->phy_mode) { case PHY_INTERFACE_MODE_RGMII: val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); break; case PHY_INTERFACE_MODE_SGMII: val |= NSS_COMMON_CLK_SRC_CTRL_SGMII(gmac->id) << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); break; default: dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", phy_modes(gmac->phy_mode)); return -EINVAL; } regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val); /* Enable PTP clock */ regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val); val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id); regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val); if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) { regmap_write(gmac->qsgmii_csr, QSGMII_PHY_SGMII_CTL(gmac->id), QSGMII_PHY_CDR_EN | QSGMII_PHY_RX_FRONT_EN | QSGMII_PHY_RX_SIGNAL_DETECT_EN | QSGMII_PHY_TX_DRIVER_EN | QSGMII_PHY_QSGMII_EN | 0x4 << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET | 0x3 << QSGMII_PHY_RX_DC_BIAS_OFFSET | 0x1 << QSGMII_PHY_RX_INPUT_EQU_OFFSET | 0x2 << QSGMII_PHY_CDR_PI_SLEW_OFFSET | 0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET); } plat_dat->has_gmac = true; plat_dat->bsp_priv = gmac; plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed; return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); }
static int xgbe_probe(struct platform_device *pdev) { struct xgbe_prv_data *pdata; struct xgbe_hw_if *hw_if; struct xgbe_desc_if *desc_if; struct net_device *netdev; struct device *dev = &pdev->dev; struct resource *res; const char *phy_mode; unsigned int i; int ret; DBGPR("--> xgbe_probe\n"); netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data), XGBE_MAX_DMA_CHANNELS); if (!netdev) { dev_err(dev, "alloc_etherdev failed\n"); ret = -ENOMEM; goto err_alloc; } SET_NETDEV_DEV(netdev, dev); pdata = netdev_priv(netdev); pdata->netdev = netdev; pdata->pdev = pdev; pdata->adev = ACPI_COMPANION(dev); pdata->dev = dev; platform_set_drvdata(pdev, netdev); spin_lock_init(&pdata->lock); mutex_init(&pdata->xpcs_mutex); mutex_init(&pdata->rss_mutex); spin_lock_init(&pdata->tstamp_lock); /* Check if we should use ACPI or DT */ pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1; /* Set and validate the number of descriptors for a ring */ BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT); pdata->tx_desc_count = XGBE_TX_DESC_CNT; if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) { dev_err(dev, "tx descriptor count (%d) is not valid\n", pdata->tx_desc_count); ret = -EINVAL; goto err_io; } BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT); pdata->rx_desc_count = XGBE_RX_DESC_CNT; if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) { dev_err(dev, "rx descriptor count (%d) is not valid\n", pdata->rx_desc_count); ret = -EINVAL; goto err_io; } /* Obtain the mmio areas for the device */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pdata->xgmac_regs = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->xgmac_regs)) { dev_err(dev, "xgmac ioremap failed\n"); ret = PTR_ERR(pdata->xgmac_regs); goto err_io; } DBGPR(" xgmac_regs = %p\n", pdata->xgmac_regs); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); pdata->xpcs_regs = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->xpcs_regs)) { dev_err(dev, "xpcs ioremap failed\n"); ret = PTR_ERR(pdata->xpcs_regs); goto err_io; } DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs); /* Retrieve the MAC address */ ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY, pdata->mac_addr, sizeof(pdata->mac_addr)); if (ret || !is_valid_ether_addr(pdata->mac_addr)) { dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY); if (!ret) ret = -EINVAL; goto err_io; } /* Retrieve the PHY mode - it must be "xgmii" */ ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY, &phy_mode); if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) { dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY); if (!ret) ret = -EINVAL; goto err_io; } pdata->phy_mode = PHY_INTERFACE_MODE_XGMII; /* Check for per channel interrupt support */ if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY)) pdata->per_channel_irq = 1; /* Obtain device settings unique to ACPI/OF */ if (pdata->use_acpi) ret = xgbe_acpi_support(pdata); else ret = xgbe_of_support(pdata); if (ret) goto err_io; /* Set the DMA coherency values */ if (pdata->coherent) { pdata->axdomain = XGBE_DMA_OS_AXDOMAIN; pdata->arcache = XGBE_DMA_OS_ARCACHE; pdata->awcache = XGBE_DMA_OS_AWCACHE; } else { pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN; pdata->arcache = XGBE_DMA_SYS_ARCACHE; pdata->awcache = XGBE_DMA_SYS_AWCACHE; } /* Get the device interrupt */ ret = platform_get_irq(pdev, 0); if (ret < 0) { dev_err(dev, "platform_get_irq 0 failed\n"); goto err_io; } pdata->dev_irq = ret; netdev->irq = pdata->dev_irq; netdev->base_addr = (unsigned long)pdata->xgmac_regs; memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len); /* Set all the function pointers */ hw_if = pdata->hw_if = &default_xgbe_hw_if; desc_if = pdata->desc_if = &default_xgbe_desc_if; /* Issue software reset to device */ hw_if->exit(pdata); /* Populate the hardware features */ xgbe_get_all_hw_features(pdata); /* Set default configuration data */ xgbe_default_config(pdata); /* Set the DMA mask */ if (!dev->dma_mask) dev->dma_mask = &dev->coherent_dma_mask; ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(pdata->hw_feat.dma_width)); if (ret) { dev_err(dev, "dma_set_mask_and_coherent failed\n"); goto err_io; } /* Calculate the number of Tx and Rx rings to be created * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set * the number of Tx queues to the number of Tx channels * enabled * -Rx (DMA) Channels do not map 1-to-1 so use the actual * number of Rx queues */ pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(), pdata->hw_feat.tx_ch_cnt); pdata->tx_q_count = pdata->tx_ring_count; ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count); if (ret) { dev_err(dev, "error setting real tx queue count\n"); goto err_io; } pdata->rx_ring_count = min_t(unsigned int, netif_get_num_default_rss_queues(), pdata->hw_feat.rx_ch_cnt); pdata->rx_q_count = pdata->hw_feat.rx_q_cnt; ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count); if (ret) { dev_err(dev, "error setting real rx queue count\n"); goto err_io; } /* Initialize RSS hash key and lookup table */ netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key)); for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++) XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, i % pdata->rx_ring_count); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); /* Prepare to regsiter with MDIO */ pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name); if (!pdata->mii_bus_id) { dev_err(dev, "failed to allocate mii bus id\n"); ret = -ENOMEM; goto err_io; } ret = xgbe_mdio_register(pdata); if (ret) goto err_bus_id; /* Set device operations */ netdev->netdev_ops = xgbe_get_netdev_ops(); netdev->ethtool_ops = xgbe_get_ethtool_ops(); #ifdef CONFIG_AMD_XGBE_DCB netdev->dcbnl_ops = xgbe_get_dcbnl_ops(); #endif /* Set device features */ netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER; if (pdata->hw_feat.rss) netdev->hw_features |= NETIF_F_RXHASH; netdev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; netdev->features |= netdev->hw_features; pdata->netdev_features = netdev->features; netdev->priv_flags |= IFF_UNICAST_FLT; /* Use default watchdog timeout */ netdev->watchdog_timeo = 0; xgbe_init_rx_coalesce(pdata); xgbe_init_tx_coalesce(pdata); netif_carrier_off(netdev); ret = register_netdev(netdev); if (ret) { dev_err(dev, "net device registration failed\n"); goto err_reg_netdev; } xgbe_ptp_register(pdata); xgbe_debugfs_init(pdata); netdev_notice(netdev, "net device enabled\n"); DBGPR("<-- xgbe_probe\n"); return 0; err_reg_netdev: xgbe_mdio_unregister(pdata); err_bus_id: kfree(pdata->mii_bus_id); err_io: free_netdev(netdev); err_alloc: dev_notice(dev, "net device not enabled\n"); return ret; }
static int phylink_parse_mode(struct phylink *pl, struct device_node *np) { struct device_node *dn; const char *managed; dn = of_get_child_by_name(np, "fixed-link"); if (dn || of_find_property(np, "fixed-link", NULL)) pl->link_an_mode = MLO_AN_FIXED; of_node_put(dn); if (of_property_read_string(np, "managed", &managed) == 0 && strcmp(managed, "in-band-status") == 0) { if (pl->link_an_mode == MLO_AN_FIXED) { netdev_err(pl->netdev, "can't use both fixed-link and in-band-status\n"); return -EINVAL; } linkmode_zero(pl->supported); phylink_set(pl->supported, MII); phylink_set(pl->supported, Autoneg); phylink_set(pl->supported, Asym_Pause); phylink_set(pl->supported, Pause); pl->link_config.an_enabled = true; switch (pl->link_config.interface) { case PHY_INTERFACE_MODE_SGMII: phylink_set(pl->supported, 10baseT_Half); phylink_set(pl->supported, 10baseT_Full); phylink_set(pl->supported, 100baseT_Half); phylink_set(pl->supported, 100baseT_Full); phylink_set(pl->supported, 1000baseT_Half); phylink_set(pl->supported, 1000baseT_Full); pl->link_an_mode = MLO_AN_SGMII; break; case PHY_INTERFACE_MODE_1000BASEX: phylink_set(pl->supported, 1000baseX_Full); pl->link_an_mode = MLO_AN_8023Z; break; case PHY_INTERFACE_MODE_2500BASEX: phylink_set(pl->supported, 2500baseX_Full); pl->link_an_mode = MLO_AN_8023Z; break; case PHY_INTERFACE_MODE_10GKR: phylink_set(pl->supported, 10baseT_Half); phylink_set(pl->supported, 10baseT_Full); phylink_set(pl->supported, 100baseT_Half); phylink_set(pl->supported, 100baseT_Full); phylink_set(pl->supported, 1000baseT_Half); phylink_set(pl->supported, 1000baseT_Full); phylink_set(pl->supported, 1000baseX_Full); phylink_set(pl->supported, 10000baseKR_Full); phylink_set(pl->supported, 10000baseCR_Full); phylink_set(pl->supported, 10000baseSR_Full); phylink_set(pl->supported, 10000baseLR_Full); phylink_set(pl->supported, 10000baseLRM_Full); phylink_set(pl->supported, 10000baseER_Full); pl->link_an_mode = MLO_AN_SGMII; break; default: netdev_err(pl->netdev, "incorrect link mode %s for in-band status\n", phy_modes(pl->link_config.interface)); return -EINVAL; } linkmode_copy(pl->link_config.advertising, pl->supported); if (phylink_validate(pl, pl->supported, &pl->link_config)) { netdev_err(pl->netdev, "failed to validate link configuration for in-band status\n"); return -EINVAL; } } return 0; }
static int phylink_sfp_module_insert(void *upstream, const struct sfp_eeprom_id *id) { struct phylink *pl = upstream; __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, }; struct phylink_link_state config; phy_interface_t iface; int mode, ret = 0; bool changed; u8 port; sfp_parse_support(pl->sfp_bus, id, support); port = sfp_parse_port(pl->sfp_bus, id, support); iface = sfp_parse_interface(pl->sfp_bus, id); WARN_ON(!lockdep_rtnl_is_held()); switch (iface) { case PHY_INTERFACE_MODE_SGMII: mode = MLO_AN_SGMII; break; case PHY_INTERFACE_MODE_1000BASEX: mode = MLO_AN_8023Z; break; default: return -EINVAL; } memset(&config, 0, sizeof(config)); linkmode_copy(config.advertising, support); config.interface = iface; config.speed = SPEED_UNKNOWN; config.duplex = DUPLEX_UNKNOWN; config.pause = MLO_PAUSE_AN; config.an_enabled = pl->link_config.an_enabled; /* Ignore errors if we're expecting a PHY to attach later */ ret = phylink_validate(pl, support, &config); if (ret) { netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n", phylink_an_mode_str(mode), phy_modes(config.interface), __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret); return ret; } netdev_dbg(pl->netdev, "requesting link mode %s/%s with support %*pb\n", phylink_an_mode_str(mode), phy_modes(config.interface), __ETHTOOL_LINK_MODE_MASK_NBITS, support); if (mode == MLO_AN_8023Z && pl->phydev) return -EINVAL; changed = !bitmap_equal(pl->supported, support, __ETHTOOL_LINK_MODE_MASK_NBITS); if (changed) { linkmode_copy(pl->supported, support); linkmode_copy(pl->link_config.advertising, config.advertising); } if (pl->link_an_mode != mode || pl->link_config.interface != config.interface) { pl->link_config.interface = config.interface; pl->link_an_mode = mode; changed = true; netdev_info(pl->netdev, "switched to %s/%s link mode\n", phylink_an_mode_str(mode), phy_modes(config.interface)); } pl->link_port = port; if (changed && !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) phylink_mac_config(pl, &pl->link_config); return ret; }
static int phylink_sfp_module_insert(void *upstream, const struct sfp_eeprom_id *id) { struct phylink *pl = upstream; __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, }; struct phylink_link_state config; phy_interface_t iface; int ret = 0; bool changed; u8 port; ASSERT_RTNL(); sfp_parse_support(pl->sfp_bus, id, support); port = sfp_parse_port(pl->sfp_bus, id, support); memset(&config, 0, sizeof(config)); linkmode_copy(config.advertising, support); config.interface = PHY_INTERFACE_MODE_NA; config.speed = SPEED_UNKNOWN; config.duplex = DUPLEX_UNKNOWN; config.pause = MLO_PAUSE_AN; config.an_enabled = pl->link_config.an_enabled; /* Ignore errors if we're expecting a PHY to attach later */ ret = phylink_validate(pl, support, &config); if (ret) { netdev_err(pl->netdev, "validation with support %*pb failed: %d\n", __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret); return ret; } iface = sfp_select_interface(pl->sfp_bus, id, config.advertising); if (iface == PHY_INTERFACE_MODE_NA) { netdev_err(pl->netdev, "selection of interface failed, advertisement %*pb\n", __ETHTOOL_LINK_MODE_MASK_NBITS, config.advertising); return -EINVAL; } config.interface = iface; ret = phylink_validate(pl, support, &config); if (ret) { netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n", phylink_an_mode_str(MLO_AN_INBAND), phy_modes(config.interface), __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret); return ret; } netdev_dbg(pl->netdev, "requesting link mode %s/%s with support %*pb\n", phylink_an_mode_str(MLO_AN_INBAND), phy_modes(config.interface), __ETHTOOL_LINK_MODE_MASK_NBITS, support); if (phy_interface_mode_is_8023z(iface) && pl->phydev) return -EINVAL; changed = !bitmap_equal(pl->supported, support, __ETHTOOL_LINK_MODE_MASK_NBITS); if (changed) { linkmode_copy(pl->supported, support); linkmode_copy(pl->link_config.advertising, config.advertising); } if (pl->link_an_mode != MLO_AN_INBAND || pl->link_config.interface != config.interface) { pl->link_config.interface = config.interface; pl->link_an_mode = MLO_AN_INBAND; changed = true; netdev_info(pl->netdev, "switched to %s/%s link mode\n", phylink_an_mode_str(MLO_AN_INBAND), phy_modes(config.interface)); } pl->link_port = port; if (changed && !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) phylink_mac_config(pl, &pl->link_config); return ret; }
static int xgbe_probe(struct platform_device *pdev) { struct xgbe_prv_data *pdata; struct net_device *netdev; struct device *dev = &pdev->dev, *phy_dev; struct platform_device *phy_pdev; struct resource *res; const char *phy_mode; unsigned int i, phy_memnum, phy_irqnum; enum dev_dma_attr attr; int ret; DBGPR("--> xgbe_probe\n"); netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data), XGBE_MAX_DMA_CHANNELS); if (!netdev) { dev_err(dev, "alloc_etherdev failed\n"); ret = -ENOMEM; goto err_alloc; } SET_NETDEV_DEV(netdev, dev); pdata = netdev_priv(netdev); pdata->netdev = netdev; pdata->pdev = pdev; pdata->adev = ACPI_COMPANION(dev); pdata->dev = dev; platform_set_drvdata(pdev, netdev); spin_lock_init(&pdata->lock); spin_lock_init(&pdata->xpcs_lock); mutex_init(&pdata->rss_mutex); spin_lock_init(&pdata->tstamp_lock); pdata->msg_enable = netif_msg_init(debug, default_msg_level); set_bit(XGBE_DOWN, &pdata->dev_state); /* Check if we should use ACPI or DT */ pdata->use_acpi = dev->of_node ? 0 : 1; phy_pdev = xgbe_get_phy_pdev(pdata); if (!phy_pdev) { dev_err(dev, "unable to obtain phy device\n"); ret = -EINVAL; goto err_phydev; } phy_dev = &phy_pdev->dev; if (pdev == phy_pdev) { /* New style device tree or ACPI: * The XGBE and PHY resources are grouped together with * the PHY resources listed last */ phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3; phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1; } else { /* Old style device tree: * The XGBE and PHY resources are separate */ phy_memnum = 0; phy_irqnum = 0; } /* Set and validate the number of descriptors for a ring */ BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT); pdata->tx_desc_count = XGBE_TX_DESC_CNT; if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) { dev_err(dev, "tx descriptor count (%d) is not valid\n", pdata->tx_desc_count); ret = -EINVAL; goto err_io; } BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT); pdata->rx_desc_count = XGBE_RX_DESC_CNT; if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) { dev_err(dev, "rx descriptor count (%d) is not valid\n", pdata->rx_desc_count); ret = -EINVAL; goto err_io; } /* Obtain the mmio areas for the device */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pdata->xgmac_regs = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->xgmac_regs)) { dev_err(dev, "xgmac ioremap failed\n"); ret = PTR_ERR(pdata->xgmac_regs); goto err_io; } if (netif_msg_probe(pdata)) dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); pdata->xpcs_regs = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->xpcs_regs)) { dev_err(dev, "xpcs ioremap failed\n"); ret = PTR_ERR(pdata->xpcs_regs); goto err_io; } if (netif_msg_probe(pdata)) dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs); res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++); pdata->rxtx_regs = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->rxtx_regs)) { dev_err(dev, "rxtx ioremap failed\n"); ret = PTR_ERR(pdata->rxtx_regs); goto err_io; } if (netif_msg_probe(pdata)) dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs); res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++); pdata->sir0_regs = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->sir0_regs)) { dev_err(dev, "sir0 ioremap failed\n"); ret = PTR_ERR(pdata->sir0_regs); goto err_io; } if (netif_msg_probe(pdata)) dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs); res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++); pdata->sir1_regs = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->sir1_regs)) { dev_err(dev, "sir1 ioremap failed\n"); ret = PTR_ERR(pdata->sir1_regs); goto err_io; } if (netif_msg_probe(pdata)) dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs); /* Retrieve the MAC address */ ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY, pdata->mac_addr, sizeof(pdata->mac_addr)); if (ret || !is_valid_ether_addr(pdata->mac_addr)) { dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY); if (!ret) ret = -EINVAL; goto err_io; } /* Retrieve the PHY mode - it must be "xgmii" */ ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY, &phy_mode); if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) { dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY); if (!ret) ret = -EINVAL; goto err_io; } pdata->phy_mode = PHY_INTERFACE_MODE_XGMII; /* Check for per channel interrupt support */ if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY)) pdata->per_channel_irq = 1; /* Retrieve the PHY speedset */ ret = device_property_read_u32(phy_dev, XGBE_SPEEDSET_PROPERTY, &pdata->speed_set); if (ret) { dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY); goto err_io; } switch (pdata->speed_set) { case XGBE_SPEEDSET_1000_10000: case XGBE_SPEEDSET_2500_10000: break; default: dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY); ret = -EINVAL; goto err_io; } /* Retrieve the PHY configuration properties */ if (device_property_present(phy_dev, XGBE_BLWC_PROPERTY)) { ret = device_property_read_u32_array(phy_dev, XGBE_BLWC_PROPERTY, pdata->serdes_blwc, XGBE_SPEEDS); if (ret) { dev_err(dev, "invalid %s property\n", XGBE_BLWC_PROPERTY); goto err_io; } } else { memcpy(pdata->serdes_blwc, xgbe_serdes_blwc, sizeof(pdata->serdes_blwc)); } if (device_property_present(phy_dev, XGBE_CDR_RATE_PROPERTY)) { ret = device_property_read_u32_array(phy_dev, XGBE_CDR_RATE_PROPERTY, pdata->serdes_cdr_rate, XGBE_SPEEDS); if (ret) { dev_err(dev, "invalid %s property\n", XGBE_CDR_RATE_PROPERTY); goto err_io; } } else { memcpy(pdata->serdes_cdr_rate, xgbe_serdes_cdr_rate, sizeof(pdata->serdes_cdr_rate)); } if (device_property_present(phy_dev, XGBE_PQ_SKEW_PROPERTY)) { ret = device_property_read_u32_array(phy_dev, XGBE_PQ_SKEW_PROPERTY, pdata->serdes_pq_skew, XGBE_SPEEDS); if (ret) { dev_err(dev, "invalid %s property\n", XGBE_PQ_SKEW_PROPERTY); goto err_io; } } else { memcpy(pdata->serdes_pq_skew, xgbe_serdes_pq_skew, sizeof(pdata->serdes_pq_skew)); } if (device_property_present(phy_dev, XGBE_TX_AMP_PROPERTY)) { ret = device_property_read_u32_array(phy_dev, XGBE_TX_AMP_PROPERTY, pdata->serdes_tx_amp, XGBE_SPEEDS); if (ret) { dev_err(dev, "invalid %s property\n", XGBE_TX_AMP_PROPERTY); goto err_io; } } else { memcpy(pdata->serdes_tx_amp, xgbe_serdes_tx_amp, sizeof(pdata->serdes_tx_amp)); } if (device_property_present(phy_dev, XGBE_DFE_CFG_PROPERTY)) { ret = device_property_read_u32_array(phy_dev, XGBE_DFE_CFG_PROPERTY, pdata->serdes_dfe_tap_cfg, XGBE_SPEEDS); if (ret) { dev_err(dev, "invalid %s property\n", XGBE_DFE_CFG_PROPERTY); goto err_io; } } else { memcpy(pdata->serdes_dfe_tap_cfg, xgbe_serdes_dfe_tap_cfg, sizeof(pdata->serdes_dfe_tap_cfg)); } if (device_property_present(phy_dev, XGBE_DFE_ENA_PROPERTY)) { ret = device_property_read_u32_array(phy_dev, XGBE_DFE_ENA_PROPERTY, pdata->serdes_dfe_tap_ena, XGBE_SPEEDS); if (ret) { dev_err(dev, "invalid %s property\n", XGBE_DFE_ENA_PROPERTY); goto err_io; } } else { memcpy(pdata->serdes_dfe_tap_ena, xgbe_serdes_dfe_tap_ena, sizeof(pdata->serdes_dfe_tap_ena)); } /* Obtain device settings unique to ACPI/OF */ if (pdata->use_acpi) ret = xgbe_acpi_support(pdata); else ret = xgbe_of_support(pdata); if (ret) goto err_io; /* Set the DMA coherency values */ attr = device_get_dma_attr(dev); if (attr == DEV_DMA_NOT_SUPPORTED) { dev_err(dev, "DMA is not supported"); goto err_io; } pdata->coherent = (attr == DEV_DMA_COHERENT); if (pdata->coherent) { pdata->axdomain = XGBE_DMA_OS_AXDOMAIN; pdata->arcache = XGBE_DMA_OS_ARCACHE; pdata->awcache = XGBE_DMA_OS_AWCACHE; } else { pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN; pdata->arcache = XGBE_DMA_SYS_ARCACHE; pdata->awcache = XGBE_DMA_SYS_AWCACHE; } /* Get the device interrupt */ ret = platform_get_irq(pdev, 0); if (ret < 0) { dev_err(dev, "platform_get_irq 0 failed\n"); goto err_io; } pdata->dev_irq = ret; /* Get the auto-negotiation interrupt */ ret = platform_get_irq(phy_pdev, phy_irqnum++); if (ret < 0) { dev_err(dev, "platform_get_irq phy 0 failed\n"); goto err_io; } pdata->an_irq = ret; netdev->irq = pdata->dev_irq; netdev->base_addr = (unsigned long)pdata->xgmac_regs; memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len); /* Set all the function pointers */ xgbe_init_all_fptrs(pdata); /* Issue software reset to device */ pdata->hw_if.exit(pdata); /* Populate the hardware features */ xgbe_get_all_hw_features(pdata); /* Set default configuration data */ xgbe_default_config(pdata); /* Set the DMA mask */ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(pdata->hw_feat.dma_width)); if (ret) { dev_err(dev, "dma_set_mask_and_coherent failed\n"); goto err_io; } /* Calculate the number of Tx and Rx rings to be created * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set * the number of Tx queues to the number of Tx channels * enabled * -Rx (DMA) Channels do not map 1-to-1 so use the actual * number of Rx queues */ pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(), pdata->hw_feat.tx_ch_cnt); pdata->tx_q_count = pdata->tx_ring_count; ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count); if (ret) { dev_err(dev, "error setting real tx queue count\n"); goto err_io; } pdata->rx_ring_count = min_t(unsigned int, netif_get_num_default_rss_queues(), pdata->hw_feat.rx_ch_cnt); pdata->rx_q_count = pdata->hw_feat.rx_q_cnt; ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count); if (ret) { dev_err(dev, "error setting real rx queue count\n"); goto err_io; } /* Initialize RSS hash key and lookup table */ netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key)); for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++) XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, i % pdata->rx_ring_count); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); /* Call MDIO/PHY initialization routine */ pdata->phy_if.phy_init(pdata); /* Set device operations */ netdev->netdev_ops = xgbe_get_netdev_ops(); netdev->ethtool_ops = xgbe_get_ethtool_ops(); #ifdef CONFIG_AMD_XGBE_DCB netdev->dcbnl_ops = xgbe_get_dcbnl_ops(); #endif /* Set device features */ netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER; if (pdata->hw_feat.rss) netdev->hw_features |= NETIF_F_RXHASH; netdev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; netdev->features |= netdev->hw_features; pdata->netdev_features = netdev->features; netdev->priv_flags |= IFF_UNICAST_FLT; /* Use default watchdog timeout */ netdev->watchdog_timeo = 0; xgbe_init_rx_coalesce(pdata); xgbe_init_tx_coalesce(pdata); netif_carrier_off(netdev); ret = register_netdev(netdev); if (ret) { dev_err(dev, "net device registration failed\n"); goto err_io; } /* Create the PHY/ANEG name based on netdev name */ snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs", netdev_name(netdev)); /* Create workqueues */ pdata->dev_workqueue = create_singlethread_workqueue(netdev_name(netdev)); if (!pdata->dev_workqueue) { netdev_err(netdev, "device workqueue creation failed\n"); ret = -ENOMEM; goto err_netdev; } pdata->an_workqueue = create_singlethread_workqueue(pdata->an_name); if (!pdata->an_workqueue) { netdev_err(netdev, "phy workqueue creation failed\n"); ret = -ENOMEM; goto err_wq; } xgbe_ptp_register(pdata); xgbe_debugfs_init(pdata); platform_device_put(phy_pdev); netdev_notice(netdev, "net device enabled\n"); DBGPR("<-- xgbe_probe\n"); return 0; err_wq: destroy_workqueue(pdata->dev_workqueue); err_netdev: unregister_netdev(netdev); err_io: platform_device_put(phy_pdev); err_phydev: free_netdev(netdev); err_alloc: dev_notice(dev, "net device not enabled\n"); return ret; }
static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) { int ret; u8 tx_dly_val = 0; switch (dwmac->phy_mode) { case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_RXID: /* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where * 8ns are exactly one cycle of the 125MHz RGMII TX clock): * 0ns = 0x0, 2ns = 0x1, 4ns = 0x2, 6ns = 0x3 */ tx_dly_val = dwmac->tx_delay_ns >> 1; /* fall through */ case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_TXID: /* enable RGMII mode */ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE, PRG_ETH0_RGMII_MODE); /* only relevant for RMII mode -> disable in RGMII mode */ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_INVERTED_RMII_CLK, 0); meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK, tx_dly_val << PRG_ETH0_TXDLY_SHIFT); /* Configure the 125MHz RGMII TX clock, the IP block changes * the output automatically (= without us having to configure * a register) based on the line-speed (125MHz for Gbit speeds, * 25MHz for 100Mbit/s and 2.5MHz for 10Mbit/s). */ ret = clk_set_rate(dwmac->rgmii_tx_en_clk, 125 * 1000 * 1000); if (ret) { dev_err(&dwmac->pdev->dev, "failed to set RGMII TX clock\n"); return ret; } ret = clk_prepare_enable(dwmac->rgmii_tx_en_clk); if (ret) { dev_err(&dwmac->pdev->dev, "failed to enable the RGMII TX clock\n"); return ret; } break; case PHY_INTERFACE_MODE_RMII: /* disable RGMII mode -> enables RMII mode */ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE, 0); /* invert internal clk_rmii_i to generate 25/2.5 tx_rx_clk */ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_INVERTED_RMII_CLK, PRG_ETH0_INVERTED_RMII_CLK); /* TX clock delay cannot be configured in RMII mode */ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK, 0); break; default: dev_err(&dwmac->pdev->dev, "unsupported phy-mode %s\n", phy_modes(dwmac->phy_mode)); return -EINVAL; } /* enable TX_CLK and PHY_REF_CLK generator */ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TX_AND_PHY_REF_CLK, PRG_ETH0_TX_AND_PHY_REF_CLK); return 0; }