static int xgbe_remove(struct platform_device *pdev) { struct net_device *netdev = platform_get_drvdata(pdev); struct xgbe_prv_data *pdata = netdev_priv(netdev); DBGPR("-->xgbe_remove\n"); xgbe_debugfs_exit(pdata); xgbe_ptp_unregister(pdata); flush_workqueue(pdata->an_workqueue); destroy_workqueue(pdata->an_workqueue); flush_workqueue(pdata->dev_workqueue); destroy_workqueue(pdata->dev_workqueue); unregister_netdev(netdev); free_netdev(netdev); DBGPR("<--xgbe_remove\n"); return 0; }
static int xgbe_init_ring(struct xgbe_prv_data *pdata, struct xgbe_ring *ring, unsigned int rdesc_count) { DBGPR("-->xgbe_init_ring\n"); if (!ring) return 0; /* Descriptors */ ring->rdesc_count = rdesc_count; ring->rdesc = dma_alloc_coherent(pdata->dev, (sizeof(struct xgbe_ring_desc) * rdesc_count), &ring->rdesc_dma, GFP_KERNEL); if (!ring->rdesc) return -ENOMEM; /* Descriptor information */ ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data), GFP_KERNEL); if (!ring->rdata) return -ENOMEM; DBGPR(" rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n", ring->rdesc, ring->rdesc_dma, ring->rdata); DBGPR("<--xgbe_init_ring\n"); return 0; }
static int xgbe_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct phy_device *phydev = pdata->phydev; int ret = 0; DBGPR("-->xgbe_set_pauseparam\n"); DBGPR(" autoneg = %d, tx_pause = %d, rx_pause = %d\n", pause->autoneg, pause->tx_pause, pause->rx_pause); pdata->pause_autoneg = pause->autoneg; if (pause->autoneg) { phydev->advertising |= ADVERTISED_Pause; phydev->advertising |= ADVERTISED_Asym_Pause; } else { phydev->advertising &= ~ADVERTISED_Pause; phydev->advertising &= ~ADVERTISED_Asym_Pause; pdata->tx_pause = pause->tx_pause; pdata->rx_pause = pause->rx_pause; } if (netif_running(netdev)) ret = phy_start_aneg(phydev); DBGPR("<--xgbe_set_pauseparam\n"); return ret; }
static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) { struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_channel *channel; struct xgbe_ring *ring; struct xgbe_ring_desc *rdesc; struct xgbe_ring_data *rdata; dma_addr_t rdesc_dma, skb_dma; struct sk_buff *skb = NULL; unsigned int i, j; DBGPR("-->xgbe_wrapper_rx_descriptor_init\n"); channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++, channel++) { ring = channel->rx_ring; if (!ring) break; rdesc = ring->rdesc; rdesc_dma = ring->rdesc_dma; for (j = 0; j < ring->rdesc_count; j++) { rdata = XGBE_GET_DESC_DATA(ring, j); rdata->rdesc = rdesc; rdata->rdesc_dma = rdesc_dma; /* Allocate skb & assign to each rdesc */ skb = dev_alloc_skb(pdata->rx_buf_size); if (skb == NULL) break; skb_dma = dma_map_single(pdata->dev, skb->data, pdata->rx_buf_size, DMA_FROM_DEVICE); if (dma_mapping_error(pdata->dev, skb_dma)) { netdev_alert(pdata->netdev, "failed to do the dma map\n"); dev_kfree_skb_any(skb); break; } rdata->skb = skb; rdata->skb_dma = skb_dma; rdata->skb_dma_len = pdata->rx_buf_size; rdesc++; rdesc_dma += sizeof(struct xgbe_ring_desc); } ring->cur = 0; ring->dirty = 0; ring->rx.realloc_index = 0; ring->rx.realloc_threshold = 0; hw_if->rx_desc_init(channel); } DBGPR("<--xgbe_wrapper_rx_descriptor_init\n"); }
static int xgbe_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct phy_device *phydev = pdata->phydev; u32 speed; int ret; DBGPR("-->xgbe_set_settings\n"); if (!pdata->phydev) return -ENODEV; speed = ethtool_cmd_speed(cmd); if (cmd->phy_address != phydev->addr) return -EINVAL; if ((cmd->autoneg != AUTONEG_ENABLE) && (cmd->autoneg != AUTONEG_DISABLE)) return -EINVAL; if (cmd->autoneg == AUTONEG_DISABLE) { switch (speed) { case SPEED_10000: case SPEED_2500: case SPEED_1000: break; default: return -EINVAL; } if (cmd->duplex != DUPLEX_FULL) return -EINVAL; } cmd->advertising &= phydev->supported; if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising) return -EINVAL; ret = 0; phydev->autoneg = cmd->autoneg; phydev->speed = speed; phydev->duplex = cmd->duplex; phydev->advertising = cmd->advertising; if (cmd->autoneg == AUTONEG_ENABLE) phydev->advertising |= ADVERTISED_Autoneg; else phydev->advertising &= ~ADVERTISED_Autoneg; if (netif_running(netdev)) ret = phy_start_aneg(phydev); DBGPR("<--xgbe_set_settings\n"); return ret; }
static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata) { struct xgbe_channel *channel_mem, *channel; struct xgbe_ring *tx_ring, *rx_ring; unsigned int count, i; DBGPR("-->xgbe_alloc_rings\n"); count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); channel_mem = devm_kcalloc(pdata->dev, count, sizeof(struct xgbe_channel), GFP_KERNEL); if (!channel_mem) return NULL; tx_ring = devm_kcalloc(pdata->dev, pdata->tx_ring_count, sizeof(struct xgbe_ring), GFP_KERNEL); if (!tx_ring) return NULL; rx_ring = devm_kcalloc(pdata->dev, pdata->rx_ring_count, sizeof(struct xgbe_ring), GFP_KERNEL); if (!rx_ring) return NULL; for (i = 0, channel = channel_mem; i < count; i++, channel++) { snprintf(channel->name, sizeof(channel->name), "channel-%d", i); channel->pdata = pdata; channel->queue_index = i; channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE + (DMA_CH_INC * i); if (i < pdata->tx_ring_count) { spin_lock_init(&tx_ring->lock); channel->tx_ring = tx_ring++; } if (i < pdata->rx_ring_count) { spin_lock_init(&rx_ring->lock); channel->rx_ring = rx_ring++; } DBGPR(" %s - queue_index=%u, dma_regs=%p, tx=%p, rx=%p\n", channel->name, channel->queue_index, channel->dma_regs, channel->tx_ring, channel->rx_ring); } pdata->channel_count = count; DBGPR("<--xgbe_alloc_rings\n"); return channel_mem; }
static void xgbe_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct xgbe_prv_data *pdata = netdev_priv(netdev); DBGPR("-->xgbe_get_pauseparam\n"); pause->autoneg = pdata->pause_autoneg; pause->tx_pause = pdata->tx_pause; pause->rx_pause = pdata->rx_pause; DBGPR("<--xgbe_get_pauseparam\n"); }
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if) { DBGPR("-->xgbe_init_function_ptrs_desc\n"); desc_if->alloc_ring_resources = xgbe_alloc_ring_resources; desc_if->free_ring_resources = xgbe_free_ring_resources; desc_if->map_tx_skb = xgbe_map_tx_skb; desc_if->realloc_skb = xgbe_realloc_skb; desc_if->unmap_skb = xgbe_unmap_skb; desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init; desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init; DBGPR("<--xgbe_init_function_ptrs_desc\n"); }
static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata) { struct xgbe_channel *channel; unsigned int i; DBGPR("-->xgbe_free_ring_resources\n"); channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++, channel++) { xgbe_free_ring(pdata, channel->tx_ring); xgbe_free_ring(pdata, channel->rx_ring); } DBGPR("<--xgbe_free_ring_resources\n"); }
static uint64_t axgbe_get_counter(struct ifnet *ifp, ift_counter c) { struct xgbe_prv_data *pdata = ifp->if_softc; struct xgbe_mmc_stats *pstats = &pdata->mmc_stats; DBGPR("-->%s\n", __func__); pdata->hw_if.read_mmc_stats(pdata); switch(c) { case IFCOUNTER_IPACKETS: return (pstats->rxframecount_gb); case IFCOUNTER_IERRORS: return (pstats->rxframecount_gb - pstats->rxbroadcastframes_g - pstats->rxmulticastframes_g - pstats->rxunicastframes_g); case IFCOUNTER_OPACKETS: return (pstats->txframecount_gb); case IFCOUNTER_OERRORS: return (pstats->txframecount_gb - pstats->txframecount_g); case IFCOUNTER_IBYTES: return (pstats->rxoctetcount_gb); case IFCOUNTER_OBYTES: return (pstats->txoctetcount_gb); default: return (if_get_counter_default(ifp, c)); } }
void xgbe_mdio_unregister(struct xgbe_prv_data *pdata) { DBGPR("-->xgbe_mdio_unregister\n"); pdata->phydev = NULL; module_put(pdata->phy_module); pdata->phy_module = NULL; mdiobus_unregister(pdata->mii); pdata->mii->priv = NULL; mdiobus_free(pdata->mii); pdata->mii = NULL; DBGPR("<--xgbe_mdio_unregister\n"); }
static void xgbe_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct xgbe_prv_data *pdata = netdev_priv(netdev); u8 *stat; int i; DBGPR("-->%s\n", __func__); pdata->hw_if.read_mmc_stats(pdata); for (i = 0; i < XGBE_STATS_COUNT; i++) { stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset; *data++ = *(u64 *)stat; } DBGPR("<--%s\n", __func__); }
static int xgbe_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) { struct xgbe_prv_data *pdata = netdev_priv(netdev); int ret; DBGPR("-->xgbe_get_settings\n"); if (!pdata->phydev) return -ENODEV; ret = phy_ethtool_gset(pdata->phydev, cmd); cmd->transceiver = XCVR_EXTERNAL; DBGPR("<--xgbe_get_settings\n"); return ret; }
static int xgbe_resume(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); struct xgbe_prv_data *pdata = netdev_priv(netdev); int ret = 0; DBGPR("-->xgbe_resume\n"); pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER; XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl); if (netif_running(netdev)) ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT); DBGPR("<--xgbe_resume\n"); return ret; }
static int xgbe_resume(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); int ret; DBGPR("-->xgbe_resume\n"); if (!netif_running(netdev)) { DBGPR("<--xgbe_dev_resume\n"); return -EINVAL; } ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT); DBGPR("<--xgbe_resume\n"); return ret; }
static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { int i; DBGPR("-->%s\n", __func__); switch (stringset) { case ETH_SS_STATS: for (i = 0; i < XGBE_STATS_COUNT; i++) { memcpy(data, xgbe_gstring_stats[i].stat_string, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } break; } DBGPR("<--%s\n", __func__); }
static void xgbe_realloc_skb(struct xgbe_channel *channel) { struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring_data *rdata; struct sk_buff *skb = NULL; dma_addr_t skb_dma; int i; DBGPR("-->xgbe_realloc_skb: rx_ring->rx.realloc_index = %u\n", ring->rx.realloc_index); for (i = 0; i < ring->dirty; i++) { rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index); /* Reset rdata values */ xgbe_unmap_skb(pdata, rdata); /* Allocate skb & assign to each rdesc */ skb = dev_alloc_skb(pdata->rx_buf_size); if (skb == NULL) break; skb_dma = dma_map_single(pdata->dev, skb->data, pdata->rx_buf_size, DMA_FROM_DEVICE); if (dma_mapping_error(pdata->dev, skb_dma)) { netdev_alert(pdata->netdev, "failed to do the dma map\n"); dev_kfree_skb_any(skb); break; } rdata->skb = skb; rdata->skb_dma = skb_dma; rdata->skb_dma_len = pdata->rx_buf_size; hw_if->rx_desc_reset(rdata); ring->rx.realloc_index++; } ring->dirty = 0; DBGPR("<--xgbe_realloc_skb\n"); }
static int xgbe_get_sset_count(struct net_device *netdev, int stringset) { int ret; DBGPR("-->%s\n", __func__); switch (stringset) { case ETH_SS_STATS: ret = XGBE_STATS_COUNT; break; default: ret = -EOPNOTSUPP; } DBGPR("<--%s\n", __func__); return ret; }
static void xgbe_default_config(struct xgbe_prv_data *pdata) { DBGPR("-->xgbe_default_config\n"); pdata->pblx8 = DMA_PBL_X8_ENABLE; pdata->tx_sf_mode = MTL_TSF_ENABLE; pdata->tx_threshold = MTL_TX_THRESHOLD_64; pdata->tx_pbl = DMA_PBL_16; pdata->tx_osp_mode = DMA_OSP_ENABLE; pdata->rx_sf_mode = MTL_RSF_DISABLE; pdata->rx_threshold = MTL_RX_THRESHOLD_64; pdata->rx_pbl = DMA_PBL_16; pdata->pause_autoneg = 1; pdata->tx_pause = 1; pdata->rx_pause = 1; pdata->phy_speed = SPEED_UNKNOWN; pdata->power_down = 0; DBGPR("<--xgbe_default_config\n"); }
static int xgbe_remove(struct platform_device *pdev) { struct net_device *netdev = platform_get_drvdata(pdev); struct xgbe_prv_data *pdata = netdev_priv(netdev); DBGPR("-->xgbe_remove\n"); xgbe_debugfs_exit(pdata); unregister_netdev(netdev); xgbe_mdio_unregister(pdata); kfree(pdata->mii_bus_id); free_netdev(netdev); DBGPR("<--xgbe_remove\n"); return 0; }
static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata) { struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_channel *channel; struct xgbe_ring *ring; struct xgbe_ring_data *rdata; struct xgbe_ring_desc *rdesc; dma_addr_t rdesc_dma; unsigned int i, j; DBGPR("-->xgbe_wrapper_tx_descriptor_init\n"); channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++, channel++) { ring = channel->tx_ring; if (!ring) break; rdesc = ring->rdesc; rdesc_dma = ring->rdesc_dma; for (j = 0; j < ring->rdesc_count; j++) { rdata = XGBE_GET_DESC_DATA(ring, j); rdata->rdesc = rdesc; rdata->rdesc_dma = rdesc_dma; rdesc++; rdesc_dma += sizeof(struct xgbe_ring_desc); } ring->cur = 0; ring->dirty = 0; ring->tx.queue_stopped = 0; hw_if->tx_desc_init(channel); } DBGPR("<--xgbe_wrapper_tx_descriptor_init\n"); }
static int xgbe_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; unsigned int riwt; DBGPR("-->xgbe_get_coalesce\n"); memset(ec, 0, sizeof(struct ethtool_coalesce)); riwt = pdata->rx_riwt; ec->rx_coalesce_usecs = hw_if->riwt_to_usec(pdata, riwt); ec->rx_max_coalesced_frames = pdata->rx_frames; ec->tx_coalesce_usecs = pdata->tx_usecs; ec->tx_max_coalesced_frames = pdata->tx_frames; DBGPR("<--xgbe_get_coalesce\n"); return 0; }
static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata) { struct xgbe_channel *channel; unsigned int i; int ret; DBGPR("-->xgbe_alloc_ring_resources\n"); channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++, channel++) { DBGPR(" %s - tx_ring:\n", channel->name); ret = xgbe_init_ring(pdata, channel->tx_ring, pdata->tx_desc_count); if (ret) { netdev_alert(pdata->netdev, "error initializing Tx ring\n"); goto err_ring; } DBGPR(" %s - rx_ring:\n", channel->name); ret = xgbe_init_ring(pdata, channel->rx_ring, pdata->rx_desc_count); if (ret) { netdev_alert(pdata->netdev, "error initializing Tx ring\n"); goto err_ring; } } DBGPR("<--xgbe_alloc_ring_resources\n"); return 0; err_ring: xgbe_free_ring_resources(pdata); return ret; }
static int xgbe_probe(struct platform_device *pdev) { struct xgbe_prv_data *pdata; struct xgbe_hw_if *hw_if; struct xgbe_desc_if *desc_if; struct net_device *netdev; struct device *dev = &pdev->dev; struct resource *res; const char *phy_mode; unsigned int i; int ret; DBGPR("--> xgbe_probe\n"); netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data), XGBE_MAX_DMA_CHANNELS); if (!netdev) { dev_err(dev, "alloc_etherdev failed\n"); ret = -ENOMEM; goto err_alloc; } SET_NETDEV_DEV(netdev, dev); pdata = netdev_priv(netdev); pdata->netdev = netdev; pdata->pdev = pdev; pdata->adev = ACPI_COMPANION(dev); pdata->dev = dev; platform_set_drvdata(pdev, netdev); spin_lock_init(&pdata->lock); mutex_init(&pdata->xpcs_mutex); mutex_init(&pdata->rss_mutex); spin_lock_init(&pdata->tstamp_lock); /* Check if we should use ACPI or DT */ pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1; /* Set and validate the number of descriptors for a ring */ BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT); pdata->tx_desc_count = XGBE_TX_DESC_CNT; if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) { dev_err(dev, "tx descriptor count (%d) is not valid\n", pdata->tx_desc_count); ret = -EINVAL; goto err_io; } BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT); pdata->rx_desc_count = XGBE_RX_DESC_CNT; if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) { dev_err(dev, "rx descriptor count (%d) is not valid\n", pdata->rx_desc_count); ret = -EINVAL; goto err_io; } /* Obtain the mmio areas for the device */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pdata->xgmac_regs = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->xgmac_regs)) { dev_err(dev, "xgmac ioremap failed\n"); ret = PTR_ERR(pdata->xgmac_regs); goto err_io; } DBGPR(" xgmac_regs = %p\n", pdata->xgmac_regs); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); pdata->xpcs_regs = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->xpcs_regs)) { dev_err(dev, "xpcs ioremap failed\n"); ret = PTR_ERR(pdata->xpcs_regs); goto err_io; } DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs); /* Retrieve the MAC address */ ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY, pdata->mac_addr, sizeof(pdata->mac_addr)); if (ret || !is_valid_ether_addr(pdata->mac_addr)) { dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY); if (!ret) ret = -EINVAL; goto err_io; } /* Retrieve the PHY mode - it must be "xgmii" */ ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY, &phy_mode); if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) { dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY); if (!ret) ret = -EINVAL; goto err_io; } pdata->phy_mode = PHY_INTERFACE_MODE_XGMII; /* Check for per channel interrupt support */ if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY)) pdata->per_channel_irq = 1; /* Obtain device settings unique to ACPI/OF */ if (pdata->use_acpi) ret = xgbe_acpi_support(pdata); else ret = xgbe_of_support(pdata); if (ret) goto err_io; /* Set the DMA coherency values */ if (pdata->coherent) { pdata->axdomain = XGBE_DMA_OS_AXDOMAIN; pdata->arcache = XGBE_DMA_OS_ARCACHE; pdata->awcache = XGBE_DMA_OS_AWCACHE; } else { pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN; pdata->arcache = XGBE_DMA_SYS_ARCACHE; pdata->awcache = XGBE_DMA_SYS_AWCACHE; } /* Get the device interrupt */ ret = platform_get_irq(pdev, 0); if (ret < 0) { dev_err(dev, "platform_get_irq 0 failed\n"); goto err_io; } pdata->dev_irq = ret; netdev->irq = pdata->dev_irq; netdev->base_addr = (unsigned long)pdata->xgmac_regs; memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len); /* Set all the function pointers */ hw_if = pdata->hw_if = &default_xgbe_hw_if; desc_if = pdata->desc_if = &default_xgbe_desc_if; /* Issue software reset to device */ hw_if->exit(pdata); /* Populate the hardware features */ xgbe_get_all_hw_features(pdata); /* Set default configuration data */ xgbe_default_config(pdata); /* Set the DMA mask */ if (!dev->dma_mask) dev->dma_mask = &dev->coherent_dma_mask; ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(pdata->hw_feat.dma_width)); if (ret) { dev_err(dev, "dma_set_mask_and_coherent failed\n"); goto err_io; } /* Calculate the number of Tx and Rx rings to be created * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set * the number of Tx queues to the number of Tx channels * enabled * -Rx (DMA) Channels do not map 1-to-1 so use the actual * number of Rx queues */ pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(), pdata->hw_feat.tx_ch_cnt); pdata->tx_q_count = pdata->tx_ring_count; ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count); if (ret) { dev_err(dev, "error setting real tx queue count\n"); goto err_io; } pdata->rx_ring_count = min_t(unsigned int, netif_get_num_default_rss_queues(), pdata->hw_feat.rx_ch_cnt); pdata->rx_q_count = pdata->hw_feat.rx_q_cnt; ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count); if (ret) { dev_err(dev, "error setting real rx queue count\n"); goto err_io; } /* Initialize RSS hash key and lookup table */ netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key)); for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++) XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, i % pdata->rx_ring_count); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); /* Prepare to regsiter with MDIO */ pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name); if (!pdata->mii_bus_id) { dev_err(dev, "failed to allocate mii bus id\n"); ret = -ENOMEM; goto err_io; } ret = xgbe_mdio_register(pdata); if (ret) goto err_bus_id; /* Set device operations */ netdev->netdev_ops = xgbe_get_netdev_ops(); netdev->ethtool_ops = xgbe_get_ethtool_ops(); #ifdef CONFIG_AMD_XGBE_DCB netdev->dcbnl_ops = xgbe_get_dcbnl_ops(); #endif /* Set device features */ netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER; if (pdata->hw_feat.rss) netdev->hw_features |= NETIF_F_RXHASH; netdev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; netdev->features |= netdev->hw_features; pdata->netdev_features = netdev->features; netdev->priv_flags |= IFF_UNICAST_FLT; /* Use default watchdog timeout */ netdev->watchdog_timeo = 0; xgbe_init_rx_coalesce(pdata); xgbe_init_tx_coalesce(pdata); netif_carrier_off(netdev); ret = register_netdev(netdev); if (ret) { dev_err(dev, "net device registration failed\n"); goto err_reg_netdev; } xgbe_ptp_register(pdata); xgbe_debugfs_init(pdata); netdev_notice(netdev, "net device enabled\n"); DBGPR("<-- xgbe_probe\n"); return 0; err_reg_netdev: xgbe_mdio_unregister(pdata); err_bus_id: kfree(pdata->mii_bus_id); err_io: free_netdev(netdev); err_alloc: dev_notice(dev, "net device not enabled\n"); return ret; }
static int xgbe_probe(struct platform_device *pdev) { struct xgbe_prv_data *pdata; struct xgbe_hw_if *hw_if; struct xgbe_desc_if *desc_if; struct net_device *netdev; struct device *dev = &pdev->dev; struct resource *res; const u8 *mac_addr; int ret; DBGPR("--> xgbe_probe\n"); netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data), XGBE_MAX_DMA_CHANNELS); if (!netdev) { dev_err(dev, "alloc_etherdev failed\n"); ret = -ENOMEM; goto err_alloc; } SET_NETDEV_DEV(netdev, dev); pdata = netdev_priv(netdev); pdata->netdev = netdev; pdata->pdev = pdev; pdata->dev = dev; platform_set_drvdata(pdev, netdev); spin_lock_init(&pdata->lock); mutex_init(&pdata->xpcs_mutex); spin_lock_init(&pdata->tstamp_lock); /* Set and validate the number of descriptors for a ring */ BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT); pdata->tx_desc_count = XGBE_TX_DESC_CNT; if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) { dev_err(dev, "tx descriptor count (%d) is not valid\n", pdata->tx_desc_count); ret = -EINVAL; goto err_io; } BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT); pdata->rx_desc_count = XGBE_RX_DESC_CNT; if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) { dev_err(dev, "rx descriptor count (%d) is not valid\n", pdata->rx_desc_count); ret = -EINVAL; goto err_io; } /* Obtain the system clock setting */ pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK); if (IS_ERR(pdata->sysclk)) { dev_err(dev, "dma devm_clk_get failed\n"); ret = PTR_ERR(pdata->sysclk); goto err_io; } /* Obtain the PTP clock setting */ pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK); if (IS_ERR(pdata->ptpclk)) { dev_err(dev, "ptp devm_clk_get failed\n"); ret = PTR_ERR(pdata->ptpclk); goto err_io; } /* Obtain the mmio areas for the device */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pdata->xgmac_regs = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->xgmac_regs)) { dev_err(dev, "xgmac ioremap failed\n"); ret = PTR_ERR(pdata->xgmac_regs); goto err_io; } DBGPR(" xgmac_regs = %p\n", pdata->xgmac_regs); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); pdata->xpcs_regs = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->xpcs_regs)) { dev_err(dev, "xpcs ioremap failed\n"); ret = PTR_ERR(pdata->xpcs_regs); goto err_io; } DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs); /* Set the DMA mask */ if (!dev->dma_mask) dev->dma_mask = &dev->coherent_dma_mask; ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); if (ret) { dev_err(dev, "dma_set_mask_and_coherent failed\n"); goto err_io; } if (of_property_read_bool(dev->of_node, "dma-coherent")) { pdata->axdomain = XGBE_DMA_OS_AXDOMAIN; pdata->arcache = XGBE_DMA_OS_ARCACHE; pdata->awcache = XGBE_DMA_OS_AWCACHE; } else { pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN; pdata->arcache = XGBE_DMA_SYS_ARCACHE; pdata->awcache = XGBE_DMA_SYS_AWCACHE; } ret = platform_get_irq(pdev, 0); if (ret < 0) { dev_err(dev, "platform_get_irq failed\n"); goto err_io; } netdev->irq = ret; netdev->base_addr = (unsigned long)pdata->xgmac_regs; /* Set all the function pointers */ xgbe_init_all_fptrs(pdata); hw_if = &pdata->hw_if; desc_if = &pdata->desc_if; /* Issue software reset to device */ hw_if->exit(pdata); /* Populate the hardware features */ xgbe_get_all_hw_features(pdata); /* Retrieve the MAC address */ mac_addr = of_get_mac_address(dev->of_node); if (!mac_addr) { dev_err(dev, "invalid mac address for this device\n"); ret = -EINVAL; goto err_io; } memcpy(netdev->dev_addr, mac_addr, netdev->addr_len); /* Retrieve the PHY mode - it must be "xgmii" */ pdata->phy_mode = of_get_phy_mode(dev->of_node); if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) { dev_err(dev, "invalid phy-mode specified for this device\n"); ret = -EINVAL; goto err_io; } /* Set default configuration data */ xgbe_default_config(pdata); /* Calculate the number of Tx and Rx rings to be created * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set * the number of Tx queues to the number of Tx channels * enabled * -Rx (DMA) Channels do not map 1-to-1 so use the actual * number of Rx queues */ pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(), pdata->hw_feat.tx_ch_cnt); pdata->tx_q_count = pdata->tx_ring_count; ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count); if (ret) { dev_err(dev, "error setting real tx queue count\n"); goto err_io; } pdata->rx_ring_count = min_t(unsigned int, netif_get_num_default_rss_queues(), pdata->hw_feat.rx_ch_cnt); pdata->rx_q_count = pdata->hw_feat.rx_q_cnt; ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count); if (ret) { dev_err(dev, "error setting real rx queue count\n"); goto err_io; } /* Allocate the rings for the DMA channels */ pdata->channel = xgbe_alloc_rings(pdata); if (!pdata->channel) { dev_err(dev, "ring allocation failed\n"); ret = -ENOMEM; goto err_io; } /* Prepare to regsiter with MDIO */ pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name); if (!pdata->mii_bus_id) { dev_err(dev, "failed to allocate mii bus id\n"); ret = -ENOMEM; goto err_io; } ret = xgbe_mdio_register(pdata); if (ret) goto err_bus_id; /* Set device operations */ netdev->netdev_ops = xgbe_get_netdev_ops(); netdev->ethtool_ops = xgbe_get_ethtool_ops(); #ifdef CONFIG_AMD_XGBE_DCB netdev->dcbnl_ops = xgbe_get_dcbnl_ops(); #endif /* Set device features */ netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER; netdev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; netdev->features |= netdev->hw_features; pdata->netdev_features = netdev->features; netdev->priv_flags |= IFF_UNICAST_FLT; xgbe_init_rx_coalesce(pdata); xgbe_init_tx_coalesce(pdata); netif_carrier_off(netdev); ret = register_netdev(netdev); if (ret) { dev_err(dev, "net device registration failed\n"); goto err_reg_netdev; } xgbe_ptp_register(pdata); xgbe_debugfs_init(pdata); netdev_notice(netdev, "net device enabled\n"); DBGPR("<-- xgbe_probe\n"); return 0; err_reg_netdev: xgbe_mdio_unregister(pdata); err_bus_id: kfree(pdata->mii_bus_id); err_io: free_netdev(netdev); err_alloc: dev_notice(dev, "net device not enabled\n"); return ret; }
static int xgbe_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; unsigned int rx_frames, rx_riwt, rx_usecs; unsigned int tx_frames, tx_usecs; DBGPR("-->xgbe_set_coalesce\n"); /* Check for not supported parameters */ if ((ec->rx_coalesce_usecs_irq) || (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) || (ec->tx_max_coalesced_frames_irq) || (ec->stats_block_coalesce_usecs) || (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) || (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) || (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_low) || (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) || (ec->rx_coalesce_usecs_high) || (ec->rx_max_coalesced_frames_high) || (ec->tx_coalesce_usecs_high) || (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval)) return -EOPNOTSUPP; /* Can only change rx-frames when interface is down (see * rx_descriptor_init in xgbe-dev.c) */ rx_frames = pdata->rx_frames; if (rx_frames != ec->rx_max_coalesced_frames && netif_running(netdev)) { netdev_alert(netdev, "interface must be down to change rx-frames\n"); return -EINVAL; } rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs); rx_frames = ec->rx_max_coalesced_frames; /* Use smallest possible value if conversion resulted in zero */ if (ec->rx_coalesce_usecs && !rx_riwt) rx_riwt = 1; /* Check the bounds of values for Rx */ if (rx_riwt > XGMAC_MAX_DMA_RIWT) { rx_usecs = hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT); netdev_alert(netdev, "rx-usec is limited to %d usecs\n", rx_usecs); return -EINVAL; } if (rx_frames > pdata->channel->rx_ring->rdesc_count) { netdev_alert(netdev, "rx-frames is limited to %d frames\n", pdata->channel->rx_ring->rdesc_count); return -EINVAL; } tx_usecs = ec->tx_coalesce_usecs; tx_frames = ec->tx_max_coalesced_frames; /* Check the bounds of values for Tx */ if (tx_frames > pdata->channel->tx_ring->rdesc_count) { netdev_alert(netdev, "tx-frames is limited to %d frames\n", pdata->channel->tx_ring->rdesc_count); return -EINVAL; } pdata->rx_riwt = rx_riwt; pdata->rx_frames = rx_frames; hw_if->config_rx_coalesce(pdata); pdata->tx_usecs = tx_usecs; pdata->tx_frames = tx_frames; hw_if->config_tx_coalesce(pdata); DBGPR("<--xgbe_set_coalesce\n"); return 0; }
int xgbe_mdio_register(struct xgbe_prv_data *pdata) { struct device_node *phy_node; struct mii_bus *mii; struct phy_device *phydev; int ret = 0; DBGPR("-->xgbe_mdio_register\n"); /* Retrieve the phy-handle */ phy_node = of_parse_phandle(pdata->dev->of_node, "phy-handle", 0); if (!phy_node) { dev_err(pdata->dev, "unable to parse phy-handle\n"); return -EINVAL; } mii = mdiobus_alloc(); if (mii == NULL) { dev_err(pdata->dev, "mdiobus_alloc failed\n"); ret = -ENOMEM; goto err_node_get; } /* Register on the MDIO bus (don't probe any PHYs) */ mii->name = XGBE_PHY_NAME; mii->read = xgbe_mdio_read; mii->write = xgbe_mdio_write; snprintf(mii->id, sizeof(mii->id), "%s", pdata->mii_bus_id); mii->priv = pdata; mii->phy_mask = ~0; mii->parent = pdata->dev; ret = mdiobus_register(mii); if (ret) { dev_err(pdata->dev, "mdiobus_register failed\n"); goto err_mdiobus_alloc; } DBGPR(" mdiobus_register succeeded for %s\n", pdata->mii_bus_id); /* Probe the PCS using Clause 45 */ phydev = get_phy_device(mii, XGBE_PRTAD, true); if (IS_ERR(phydev) || !phydev || !phydev->c45_ids.device_ids[MDIO_MMD_PCS]) { dev_err(pdata->dev, "get_phy_device failed\n"); ret = phydev ? PTR_ERR(phydev) : -ENOLINK; goto err_mdiobus_register; } request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS])); of_node_get(phy_node); phydev->dev.of_node = phy_node; ret = phy_device_register(phydev); if (ret) { dev_err(pdata->dev, "phy_device_register failed\n"); of_node_put(phy_node); goto err_phy_device; } /* Add a reference to the PHY driver so it can't be unloaded */ pdata->phy_module = phydev->dev.driver ? phydev->dev.driver->owner : NULL; if (!try_module_get(pdata->phy_module)) { dev_err(pdata->dev, "try_module_get failed\n"); ret = -EIO; goto err_phy_device; } pdata->mii = mii; pdata->mdio_mmd = MDIO_MMD_PCS; phydev->autoneg = pdata->default_autoneg; if (phydev->autoneg == AUTONEG_DISABLE) { phydev->speed = pdata->default_speed; phydev->duplex = DUPLEX_FULL; phydev->advertising &= ~ADVERTISED_Autoneg; } pdata->phydev = phydev; of_node_put(phy_node); DBGPHY_REGS(pdata); DBGPR("<--xgbe_mdio_register\n"); return 0; err_phy_device: phy_device_free(phydev); err_mdiobus_register: mdiobus_unregister(mii); err_mdiobus_alloc: mdiobus_free(mii); err_node_get: of_node_put(phy_node); return ret; }
static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) { struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_ring *ring = channel->tx_ring; struct xgbe_ring_data *rdata; struct xgbe_packet_data *packet; struct skb_frag_struct *frag; dma_addr_t skb_dma; unsigned int start_index, cur_index; unsigned int offset, tso, vlan, datalen, len; unsigned int i; DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur); offset = 0; start_index = ring->cur; cur_index = ring->cur; packet = &ring->packet_data; packet->rdesc_count = 0; packet->length = 0; tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, TSO_ENABLE); vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VLAN_CTAG); /* Save space for a context descriptor if needed */ if ((tso && (packet->mss != ring->tx.cur_mss)) || (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))) cur_index++; rdata = XGBE_GET_DESC_DATA(ring, cur_index); if (tso) { DBGPR(" TSO packet\n"); /* Map the TSO header */ skb_dma = dma_map_single(pdata->dev, skb->data, packet->header_len, DMA_TO_DEVICE); if (dma_mapping_error(pdata->dev, skb_dma)) { netdev_alert(pdata->netdev, "dma_map_single failed\n"); goto err_out; } rdata->skb_dma = skb_dma; rdata->skb_dma_len = packet->header_len; rdata->tso_header = 1; offset = packet->header_len; packet->length += packet->header_len; cur_index++; rdata = XGBE_GET_DESC_DATA(ring, cur_index); } /* Map the (remainder of the) packet */ for (datalen = skb_headlen(skb) - offset; datalen; ) { len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE); skb_dma = dma_map_single(pdata->dev, skb->data + offset, len, DMA_TO_DEVICE); if (dma_mapping_error(pdata->dev, skb_dma)) { netdev_alert(pdata->netdev, "dma_map_single failed\n"); goto err_out; } rdata->skb_dma = skb_dma; rdata->skb_dma_len = len; DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n", cur_index, skb_dma, len); datalen -= len; offset += len; packet->length += len; cur_index++; rdata = XGBE_GET_DESC_DATA(ring, cur_index); } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { DBGPR(" mapping frag %u\n", i); frag = &skb_shinfo(skb)->frags[i]; offset = 0; for (datalen = skb_frag_size(frag); datalen; ) { len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE); skb_dma = skb_frag_dma_map(pdata->dev, frag, offset, len, DMA_TO_DEVICE); if (dma_mapping_error(pdata->dev, skb_dma)) { netdev_alert(pdata->netdev, "skb_frag_dma_map failed\n"); goto err_out; } rdata->skb_dma = skb_dma; rdata->skb_dma_len = len; rdata->mapped_as_page = 1; DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n", cur_index, skb_dma, len); datalen -= len; offset += len; packet->length += len; cur_index++; rdata = XGBE_GET_DESC_DATA(ring, cur_index); } } /* Save the skb address in the last entry */ rdata->skb = skb; /* Save the number of descriptor entries used */ packet->rdesc_count = cur_index - start_index; DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count); return packet->rdesc_count; err_out: while (start_index < cur_index) { rdata = XGBE_GET_DESC_DATA(ring, start_index++); xgbe_unmap_skb(pdata, rdata); } DBGPR("<--xgbe_map_tx_skb: count=0\n"); return 0; }
static int xgbe_probe(struct platform_device *pdev) { struct xgbe_prv_data *pdata; struct net_device *netdev; struct device *dev = &pdev->dev, *phy_dev; struct platform_device *phy_pdev; struct resource *res; const char *phy_mode; unsigned int i, phy_memnum, phy_irqnum; enum dev_dma_attr attr; int ret; DBGPR("--> xgbe_probe\n"); netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data), XGBE_MAX_DMA_CHANNELS); if (!netdev) { dev_err(dev, "alloc_etherdev failed\n"); ret = -ENOMEM; goto err_alloc; } SET_NETDEV_DEV(netdev, dev); pdata = netdev_priv(netdev); pdata->netdev = netdev; pdata->pdev = pdev; pdata->adev = ACPI_COMPANION(dev); pdata->dev = dev; platform_set_drvdata(pdev, netdev); spin_lock_init(&pdata->lock); spin_lock_init(&pdata->xpcs_lock); mutex_init(&pdata->rss_mutex); spin_lock_init(&pdata->tstamp_lock); pdata->msg_enable = netif_msg_init(debug, default_msg_level); set_bit(XGBE_DOWN, &pdata->dev_state); /* Check if we should use ACPI or DT */ pdata->use_acpi = dev->of_node ? 0 : 1; phy_pdev = xgbe_get_phy_pdev(pdata); if (!phy_pdev) { dev_err(dev, "unable to obtain phy device\n"); ret = -EINVAL; goto err_phydev; } phy_dev = &phy_pdev->dev; if (pdev == phy_pdev) { /* New style device tree or ACPI: * The XGBE and PHY resources are grouped together with * the PHY resources listed last */ phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3; phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1; } else { /* Old style device tree: * The XGBE and PHY resources are separate */ phy_memnum = 0; phy_irqnum = 0; } /* Set and validate the number of descriptors for a ring */ BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT); pdata->tx_desc_count = XGBE_TX_DESC_CNT; if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) { dev_err(dev, "tx descriptor count (%d) is not valid\n", pdata->tx_desc_count); ret = -EINVAL; goto err_io; } BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT); pdata->rx_desc_count = XGBE_RX_DESC_CNT; if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) { dev_err(dev, "rx descriptor count (%d) is not valid\n", pdata->rx_desc_count); ret = -EINVAL; goto err_io; } /* Obtain the mmio areas for the device */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pdata->xgmac_regs = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->xgmac_regs)) { dev_err(dev, "xgmac ioremap failed\n"); ret = PTR_ERR(pdata->xgmac_regs); goto err_io; } if (netif_msg_probe(pdata)) dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); pdata->xpcs_regs = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->xpcs_regs)) { dev_err(dev, "xpcs ioremap failed\n"); ret = PTR_ERR(pdata->xpcs_regs); goto err_io; } if (netif_msg_probe(pdata)) dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs); res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++); pdata->rxtx_regs = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->rxtx_regs)) { dev_err(dev, "rxtx ioremap failed\n"); ret = PTR_ERR(pdata->rxtx_regs); goto err_io; } if (netif_msg_probe(pdata)) dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs); res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++); pdata->sir0_regs = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->sir0_regs)) { dev_err(dev, "sir0 ioremap failed\n"); ret = PTR_ERR(pdata->sir0_regs); goto err_io; } if (netif_msg_probe(pdata)) dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs); res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++); pdata->sir1_regs = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->sir1_regs)) { dev_err(dev, "sir1 ioremap failed\n"); ret = PTR_ERR(pdata->sir1_regs); goto err_io; } if (netif_msg_probe(pdata)) dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs); /* Retrieve the MAC address */ ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY, pdata->mac_addr, sizeof(pdata->mac_addr)); if (ret || !is_valid_ether_addr(pdata->mac_addr)) { dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY); if (!ret) ret = -EINVAL; goto err_io; } /* Retrieve the PHY mode - it must be "xgmii" */ ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY, &phy_mode); if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) { dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY); if (!ret) ret = -EINVAL; goto err_io; } pdata->phy_mode = PHY_INTERFACE_MODE_XGMII; /* Check for per channel interrupt support */ if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY)) pdata->per_channel_irq = 1; /* Retrieve the PHY speedset */ ret = device_property_read_u32(phy_dev, XGBE_SPEEDSET_PROPERTY, &pdata->speed_set); if (ret) { dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY); goto err_io; } switch (pdata->speed_set) { case XGBE_SPEEDSET_1000_10000: case XGBE_SPEEDSET_2500_10000: break; default: dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY); ret = -EINVAL; goto err_io; } /* Retrieve the PHY configuration properties */ if (device_property_present(phy_dev, XGBE_BLWC_PROPERTY)) { ret = device_property_read_u32_array(phy_dev, XGBE_BLWC_PROPERTY, pdata->serdes_blwc, XGBE_SPEEDS); if (ret) { dev_err(dev, "invalid %s property\n", XGBE_BLWC_PROPERTY); goto err_io; } } else { memcpy(pdata->serdes_blwc, xgbe_serdes_blwc, sizeof(pdata->serdes_blwc)); } if (device_property_present(phy_dev, XGBE_CDR_RATE_PROPERTY)) { ret = device_property_read_u32_array(phy_dev, XGBE_CDR_RATE_PROPERTY, pdata->serdes_cdr_rate, XGBE_SPEEDS); if (ret) { dev_err(dev, "invalid %s property\n", XGBE_CDR_RATE_PROPERTY); goto err_io; } } else { memcpy(pdata->serdes_cdr_rate, xgbe_serdes_cdr_rate, sizeof(pdata->serdes_cdr_rate)); } if (device_property_present(phy_dev, XGBE_PQ_SKEW_PROPERTY)) { ret = device_property_read_u32_array(phy_dev, XGBE_PQ_SKEW_PROPERTY, pdata->serdes_pq_skew, XGBE_SPEEDS); if (ret) { dev_err(dev, "invalid %s property\n", XGBE_PQ_SKEW_PROPERTY); goto err_io; } } else { memcpy(pdata->serdes_pq_skew, xgbe_serdes_pq_skew, sizeof(pdata->serdes_pq_skew)); } if (device_property_present(phy_dev, XGBE_TX_AMP_PROPERTY)) { ret = device_property_read_u32_array(phy_dev, XGBE_TX_AMP_PROPERTY, pdata->serdes_tx_amp, XGBE_SPEEDS); if (ret) { dev_err(dev, "invalid %s property\n", XGBE_TX_AMP_PROPERTY); goto err_io; } } else { memcpy(pdata->serdes_tx_amp, xgbe_serdes_tx_amp, sizeof(pdata->serdes_tx_amp)); } if (device_property_present(phy_dev, XGBE_DFE_CFG_PROPERTY)) { ret = device_property_read_u32_array(phy_dev, XGBE_DFE_CFG_PROPERTY, pdata->serdes_dfe_tap_cfg, XGBE_SPEEDS); if (ret) { dev_err(dev, "invalid %s property\n", XGBE_DFE_CFG_PROPERTY); goto err_io; } } else { memcpy(pdata->serdes_dfe_tap_cfg, xgbe_serdes_dfe_tap_cfg, sizeof(pdata->serdes_dfe_tap_cfg)); } if (device_property_present(phy_dev, XGBE_DFE_ENA_PROPERTY)) { ret = device_property_read_u32_array(phy_dev, XGBE_DFE_ENA_PROPERTY, pdata->serdes_dfe_tap_ena, XGBE_SPEEDS); if (ret) { dev_err(dev, "invalid %s property\n", XGBE_DFE_ENA_PROPERTY); goto err_io; } } else { memcpy(pdata->serdes_dfe_tap_ena, xgbe_serdes_dfe_tap_ena, sizeof(pdata->serdes_dfe_tap_ena)); } /* Obtain device settings unique to ACPI/OF */ if (pdata->use_acpi) ret = xgbe_acpi_support(pdata); else ret = xgbe_of_support(pdata); if (ret) goto err_io; /* Set the DMA coherency values */ attr = device_get_dma_attr(dev); if (attr == DEV_DMA_NOT_SUPPORTED) { dev_err(dev, "DMA is not supported"); goto err_io; } pdata->coherent = (attr == DEV_DMA_COHERENT); if (pdata->coherent) { pdata->axdomain = XGBE_DMA_OS_AXDOMAIN; pdata->arcache = XGBE_DMA_OS_ARCACHE; pdata->awcache = XGBE_DMA_OS_AWCACHE; } else { pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN; pdata->arcache = XGBE_DMA_SYS_ARCACHE; pdata->awcache = XGBE_DMA_SYS_AWCACHE; } /* Get the device interrupt */ ret = platform_get_irq(pdev, 0); if (ret < 0) { dev_err(dev, "platform_get_irq 0 failed\n"); goto err_io; } pdata->dev_irq = ret; /* Get the auto-negotiation interrupt */ ret = platform_get_irq(phy_pdev, phy_irqnum++); if (ret < 0) { dev_err(dev, "platform_get_irq phy 0 failed\n"); goto err_io; } pdata->an_irq = ret; netdev->irq = pdata->dev_irq; netdev->base_addr = (unsigned long)pdata->xgmac_regs; memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len); /* Set all the function pointers */ xgbe_init_all_fptrs(pdata); /* Issue software reset to device */ pdata->hw_if.exit(pdata); /* Populate the hardware features */ xgbe_get_all_hw_features(pdata); /* Set default configuration data */ xgbe_default_config(pdata); /* Set the DMA mask */ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(pdata->hw_feat.dma_width)); if (ret) { dev_err(dev, "dma_set_mask_and_coherent failed\n"); goto err_io; } /* Calculate the number of Tx and Rx rings to be created * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set * the number of Tx queues to the number of Tx channels * enabled * -Rx (DMA) Channels do not map 1-to-1 so use the actual * number of Rx queues */ pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(), pdata->hw_feat.tx_ch_cnt); pdata->tx_q_count = pdata->tx_ring_count; ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count); if (ret) { dev_err(dev, "error setting real tx queue count\n"); goto err_io; } pdata->rx_ring_count = min_t(unsigned int, netif_get_num_default_rss_queues(), pdata->hw_feat.rx_ch_cnt); pdata->rx_q_count = pdata->hw_feat.rx_q_cnt; ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count); if (ret) { dev_err(dev, "error setting real rx queue count\n"); goto err_io; } /* Initialize RSS hash key and lookup table */ netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key)); for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++) XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, i % pdata->rx_ring_count); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); /* Call MDIO/PHY initialization routine */ pdata->phy_if.phy_init(pdata); /* Set device operations */ netdev->netdev_ops = xgbe_get_netdev_ops(); netdev->ethtool_ops = xgbe_get_ethtool_ops(); #ifdef CONFIG_AMD_XGBE_DCB netdev->dcbnl_ops = xgbe_get_dcbnl_ops(); #endif /* Set device features */ netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER; if (pdata->hw_feat.rss) netdev->hw_features |= NETIF_F_RXHASH; netdev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; netdev->features |= netdev->hw_features; pdata->netdev_features = netdev->features; netdev->priv_flags |= IFF_UNICAST_FLT; /* Use default watchdog timeout */ netdev->watchdog_timeo = 0; xgbe_init_rx_coalesce(pdata); xgbe_init_tx_coalesce(pdata); netif_carrier_off(netdev); ret = register_netdev(netdev); if (ret) { dev_err(dev, "net device registration failed\n"); goto err_io; } /* Create the PHY/ANEG name based on netdev name */ snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs", netdev_name(netdev)); /* Create workqueues */ pdata->dev_workqueue = create_singlethread_workqueue(netdev_name(netdev)); if (!pdata->dev_workqueue) { netdev_err(netdev, "device workqueue creation failed\n"); ret = -ENOMEM; goto err_netdev; } pdata->an_workqueue = create_singlethread_workqueue(pdata->an_name); if (!pdata->an_workqueue) { netdev_err(netdev, "phy workqueue creation failed\n"); ret = -ENOMEM; goto err_wq; } xgbe_ptp_register(pdata); xgbe_debugfs_init(pdata); platform_device_put(phy_pdev); netdev_notice(netdev, "net device enabled\n"); DBGPR("<-- xgbe_probe\n"); return 0; err_wq: destroy_workqueue(pdata->dev_workqueue); err_netdev: unregister_netdev(netdev); err_io: platform_device_put(phy_pdev); err_phydev: free_netdev(netdev); err_alloc: dev_notice(dev, "net device not enabled\n"); return ret; }