/* * Change rx mode (promiscous/allmulti) and update multicast list */ static void bcm_enet_set_multicast_list(struct net_device *dev) { struct bcm_enet_priv *priv; struct dev_mc_list *mc_list; u32 val; int i; priv = netdev_priv(dev); val = enet_readl(priv, ENET_RXCFG_REG); if (dev->flags & IFF_PROMISC) val |= ENET_RXCFG_PROMISC_MASK; else val &= ~ENET_RXCFG_PROMISC_MASK; /* only 3 perfect match registers left, first one is used for * own mac address */ if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 3) val |= ENET_RXCFG_ALLMCAST_MASK; else val &= ~ENET_RXCFG_ALLMCAST_MASK; /* no need to set perfect match registers if we catch all * multicast */ if (val & ENET_RXCFG_ALLMCAST_MASK) { enet_writel(priv, val, ENET_RXCFG_REG); return; } for (i = 0, mc_list = dev->mc_list; (mc_list != NULL) && (i < dev->mc_count) && (i < 3); i++, mc_list = mc_list->next) { u8 *dmi_addr; u32 tmp; /* filter non ethernet address */ if (mc_list->dmi_addrlen != 6) continue; /* update perfect match registers */ dmi_addr = mc_list->dmi_addr; tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | (dmi_addr[4] << 8) | dmi_addr[5]; enet_writel(priv, tmp, ENET_PML_REG(i + 1)); tmp = (dmi_addr[0] << 8 | dmi_addr[1]); tmp |= ENET_PMH_DATAVALID_MASK; enet_writel(priv, tmp, ENET_PMH_REG(i + 1)); } for (; i < 3; i++) { enet_writel(priv, 0, ENET_PML_REG(i + 1)); enet_writel(priv, 0, ENET_PMH_REG(i + 1)); } enet_writel(priv, val, ENET_RXCFG_REG); }
/* * Change rx mode (promiscuous/allmulti) and update multicast list */ static void bcm_enet_set_multicast_list(struct net_device *dev) { struct bcm_enet_priv *priv; struct netdev_hw_addr *ha; u32 val; int i; priv = netdev_priv(dev); val = enet_readl(priv, ENET_RXCFG_REG); if (dev->flags & IFF_PROMISC) val |= ENET_RXCFG_PROMISC_MASK; else val &= ~ENET_RXCFG_PROMISC_MASK; /* only 3 perfect match registers left, first one is used for * own mac address */ if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3) val |= ENET_RXCFG_ALLMCAST_MASK; else val &= ~ENET_RXCFG_ALLMCAST_MASK; /* no need to set perfect match registers if we catch all * multicast */ if (val & ENET_RXCFG_ALLMCAST_MASK) { enet_writel(priv, val, ENET_RXCFG_REG); return; } i = 0; netdev_for_each_mc_addr(ha, dev) { u8 *dmi_addr; u32 tmp; if (i == 3) break; /* update perfect match registers */ dmi_addr = ha->addr; tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | (dmi_addr[4] << 8) | dmi_addr[5]; enet_writel(priv, tmp, ENET_PML_REG(i + 1)); tmp = (dmi_addr[0] << 8 | dmi_addr[1]); tmp |= ENET_PMH_DATAVALID_MASK; enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1)); }
/* * Change the interface's mac address. */ static int bcm_enet_set_mac_address(struct net_device *dev, void *p) { struct bcm_enet_priv *priv; struct sockaddr *addr = p; u32 val; priv = netdev_priv(dev); memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); /* use perfect match register 0 to store my mac address */ val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) | (dev->dev_addr[4] << 8) | dev->dev_addr[5]; enet_writel(priv, val, ENET_PML_REG(0)); val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]); val |= ENET_PMH_DATAVALID_MASK; enet_writel(priv, val, ENET_PMH_REG(0)); return 0; }
/* * open callback, allocate dma rings & buffers and start rx operation */ static int bcm_enet_open(struct net_device *dev) { struct bcm_enet_priv *priv; struct sockaddr addr; struct device *kdev; struct phy_device *phydev; int i, ret; unsigned int size; char phy_id[MII_BUS_ID_SIZE + 3]; void *p; u32 val; priv = netdev_priv(dev); kdev = &priv->pdev->dev; if (priv->has_phy) { /* connect to PHY */ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, priv->mac_id ? "1" : "0", priv->phy_id); phydev = phy_connect(dev, phy_id, &bcm_enet_adjust_phy_link, 0, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { dev_err(kdev, "could not attach to PHY\n"); return PTR_ERR(phydev); } /* mask with MAC supported features */ phydev->supported &= (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_Pause | SUPPORTED_MII); phydev->advertising = phydev->supported; if (priv->pause_auto && priv->pause_rx && priv->pause_tx) phydev->advertising |= SUPPORTED_Pause; else phydev->advertising &= ~SUPPORTED_Pause; dev_info(kdev, "attached PHY at address %d [%s]\n", phydev->addr, phydev->drv->name); priv->old_link = 0; priv->old_duplex = -1; priv->old_pause = -1; priv->phydev = phydev; } /* mask all interrupts and request them */ enet_writel(priv, 0, ENET_IRMASK_REG); enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); if (ret) goto out_phy_disconnect; ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_SAMPLE_RANDOM | IRQF_DISABLED, dev->name, dev); if (ret) goto out_freeirq; ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, IRQF_DISABLED, dev->name, dev); if (ret) goto out_freeirq_rx; /* initialize perfect match registers */ for (i = 0; i < 4; i++) { enet_writel(priv, 0, ENET_PML_REG(i)); enet_writel(priv, 0, ENET_PMH_REG(i)); } /* write device mac address */ memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN); bcm_enet_set_mac_address(dev, &addr); /* allocate rx dma ring */ size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); if (!p) { dev_err(kdev, "cannot allocate rx ring %u\n", size); ret = -ENOMEM; goto out_freeirq_tx; } memset(p, 0, size); priv->rx_desc_alloc_size = size; priv->rx_desc_cpu = p; /* allocate tx dma ring */ size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); if (!p) { dev_err(kdev, "cannot allocate tx ring\n"); ret = -ENOMEM; goto out_free_rx_ring; } memset(p, 0, size); priv->tx_desc_alloc_size = size; priv->tx_desc_cpu = p; priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size, GFP_KERNEL); if (!priv->tx_skb) { dev_err(kdev, "cannot allocate rx skb queue\n"); ret = -ENOMEM; goto out_free_tx_ring; } priv->tx_desc_count = priv->tx_ring_size; priv->tx_dirty_desc = 0; priv->tx_curr_desc = 0; spin_lock_init(&priv->tx_lock); /* init & fill rx ring with skbs */ priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size, GFP_KERNEL); if (!priv->rx_skb) { dev_err(kdev, "cannot allocate rx skb queue\n"); ret = -ENOMEM; goto out_free_tx_skb; } priv->rx_desc_count = 0; priv->rx_dirty_desc = 0; priv->rx_curr_desc = 0; /* initialize flow control buffer allocation */ enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, ENETDMA_BUFALLOC_REG(priv->rx_chan)); if (bcm_enet_refill_rx(dev)) { dev_err(kdev, "cannot allocate rx skb queue\n"); ret = -ENOMEM; goto out; } /* write rx & tx ring addresses */ enet_dma_writel(priv, priv->rx_desc_dma, ENETDMA_RSTART_REG(priv->rx_chan)); enet_dma_writel(priv, priv->tx_desc_dma, ENETDMA_RSTART_REG(priv->tx_chan)); /* clear remaining state ram for rx & tx channel */ enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan)); enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan)); enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan)); enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan)); enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan)); enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan)); /* set max rx/tx length */ enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); /* set dma maximum burst len */ enet_dma_writel(priv, BCMENET_DMA_MAXBURST, ENETDMA_MAXBURST_REG(priv->rx_chan)); enet_dma_writel(priv, BCMENET_DMA_MAXBURST, ENETDMA_MAXBURST_REG(priv->tx_chan)); /* set correct transmit fifo watermark */ enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); /* set flow control low/high threshold to 1/3 / 2/3 */ val = priv->rx_ring_size / 3; enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); val = (priv->rx_ring_size * 2) / 3; enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); /* all set, enable mac and interrupts, start dma engine and * kick rx dma channel */ wmb(); enet_writel(priv, ENET_CTL_ENABLE_MASK, ENET_CTL_REG); enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, ENETDMA_CHANCFG_REG(priv->rx_chan)); /* watch "mib counters about to overflow" interrupt */ enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); /* watch "packet transferred" interrupt in rx and tx */ enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, ENETDMA_IR_REG(priv->rx_chan)); enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, ENETDMA_IR_REG(priv->tx_chan)); /* make sure we enable napi before rx interrupt */ napi_enable(&priv->napi); enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, ENETDMA_IRMASK_REG(priv->rx_chan)); enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, ENETDMA_IRMASK_REG(priv->tx_chan)); if (priv->has_phy) phy_start(priv->phydev); else bcm_enet_adjust_link(dev); netif_start_queue(dev); return 0; out: for (i = 0; i < priv->rx_ring_size; i++) { struct bcm_enet_desc *desc; if (!priv->rx_skb[i]) continue; desc = &priv->rx_desc_cpu[i]; dma_unmap_single(kdev, desc->address, priv->rx_skb_size, DMA_FROM_DEVICE); kfree_skb(priv->rx_skb[i]); } kfree(priv->rx_skb); out_free_tx_skb: kfree(priv->tx_skb); out_free_tx_ring: dma_free_coherent(kdev, priv->tx_desc_alloc_size, priv->tx_desc_cpu, priv->tx_desc_dma); out_free_rx_ring: dma_free_coherent(kdev, priv->rx_desc_alloc_size, priv->rx_desc_cpu, priv->rx_desc_dma); out_freeirq_tx: free_irq(priv->irq_tx, dev); out_freeirq_rx: free_irq(priv->irq_rx, dev); out_freeirq: free_irq(dev->irq, dev); out_phy_disconnect: phy_disconnect(priv->phydev); return ret; }