/* * tx request callback */ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bcm_enet_priv *priv; struct bcm_enet_desc *desc; u32 len_stat; int ret; priv = netdev_priv(dev); /* lock against tx reclaim */ spin_lock(&priv->tx_lock); /* make sure the tx hw queue is not full, should not happen * since we stop queue before it's the case */ if (unlikely(!priv->tx_desc_count)) { netif_stop_queue(dev); dev_err(&priv->pdev->dev, "xmit called with no tx desc " "available?\n"); ret = NETDEV_TX_BUSY; goto out_unlock; } /* point to the next available desc */ desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; priv->tx_skb[priv->tx_curr_desc] = skb; /* fill descriptor */ desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; len_stat |= DMADESC_ESOP_MASK | DMADESC_APPEND_CRC | DMADESC_OWNER_MASK; priv->tx_curr_desc++; if (priv->tx_curr_desc == priv->tx_ring_size) { priv->tx_curr_desc = 0; len_stat |= DMADESC_WRAP_MASK; } priv->tx_desc_count--; /* dma might be already polling, make sure we update desc * fields in correct order */ wmb(); desc->len_stat = len_stat; wmb(); /* kick tx dma */ enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, ENETDMA_CHANCFG_REG(priv->tx_chan)); /* stop queue if no more desc available */ if (!priv->tx_desc_count) netif_stop_queue(dev); priv->stats.tx_bytes += skb->len; priv->stats.tx_packets++; dev->trans_start = jiffies; ret = NETDEV_TX_OK; out_unlock: spin_unlock(&priv->tx_lock); return ret; }
/* * stop callback */ static int bcm_enet_stop(struct net_device *dev) { struct bcm_enet_priv *priv; struct device *kdev; int i; priv = netdev_priv(dev); kdev = &priv->pdev->dev; netif_stop_queue(dev); napi_disable(&priv->napi); if (priv->has_phy) phy_stop(priv->phydev); del_timer_sync(&priv->rx_timeout); /* mask all interrupts */ enet_writel(priv, 0, ENET_IRMASK_REG); enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); /* make sure no mib update is scheduled */ flush_scheduled_work(); /* disable dma & mac */ bcm_enet_disable_dma(priv, priv->tx_chan); bcm_enet_disable_dma(priv, priv->rx_chan); bcm_enet_disable_mac(priv); /* force reclaim of all tx buffers */ bcm_enet_tx_reclaim(dev, 1); /* free the rx skb ring */ for (i = 0; i < priv->rx_ring_size; i++) { struct bcm_enet_desc *desc; if (!priv->rx_skb[i]) continue; desc = &priv->rx_desc_cpu[i]; dma_unmap_single(kdev, desc->address, priv->rx_skb_size, DMA_FROM_DEVICE); kfree_skb(priv->rx_skb[i]); } /* free remaining allocated memory */ kfree(priv->rx_skb); kfree(priv->tx_skb); dma_free_coherent(kdev, priv->rx_desc_alloc_size, priv->rx_desc_cpu, priv->rx_desc_dma); dma_free_coherent(kdev, priv->tx_desc_alloc_size, priv->tx_desc_cpu, priv->tx_desc_dma); free_irq(priv->irq_tx, dev); free_irq(priv->irq_rx, dev); free_irq(dev->irq, dev); /* release phy */ if (priv->has_phy) { phy_disconnect(priv->phydev); priv->phydev = NULL; } return 0; }
/* * extract packet from rx queue */ static int bcm_enet_receive_queue(struct net_device *dev, int budget) { struct bcm_enet_priv *priv; struct device *kdev; int processed; priv = netdev_priv(dev); kdev = &priv->pdev->dev; processed = 0; /* don't scan ring further than number of refilled * descriptor */ if (budget > priv->rx_desc_count) budget = priv->rx_desc_count; do { struct bcm_enet_desc *desc; struct sk_buff *skb; int desc_idx; u32 len_stat; unsigned int len; desc_idx = priv->rx_curr_desc; desc = &priv->rx_desc_cpu[desc_idx]; /* make sure we actually read the descriptor status at * each loop */ rmb(); len_stat = desc->len_stat; /* break if dma ownership belongs to hw */ if (len_stat & DMADESC_OWNER_MASK) break; processed++; priv->rx_curr_desc++; if (priv->rx_curr_desc == priv->rx_ring_size) priv->rx_curr_desc = 0; priv->rx_desc_count--; /* if the packet does not have start of packet _and_ * end of packet flag set, then just recycle it */ if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) { priv->stats.rx_dropped++; continue; } /* recycle packet if it's marked as bad */ if (unlikely(len_stat & DMADESC_ERR_MASK)) { priv->stats.rx_errors++; if (len_stat & DMADESC_OVSIZE_MASK) priv->stats.rx_length_errors++; if (len_stat & DMADESC_CRC_MASK) priv->stats.rx_crc_errors++; if (len_stat & DMADESC_UNDER_MASK) priv->stats.rx_frame_errors++; if (len_stat & DMADESC_OV_MASK) priv->stats.rx_fifo_errors++; continue; } /* valid packet */ skb = priv->rx_skb[desc_idx]; len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT; /* don't include FCS */ len -= 4; if (len < copybreak) { struct sk_buff *nskb; nskb = netdev_alloc_skb(dev, len + NET_IP_ALIGN); if (!nskb) { /* forget packet, just rearm desc */ priv->stats.rx_dropped++; continue; } /* since we're copying the data, we can align * them properly */ skb_reserve(nskb, NET_IP_ALIGN); dma_sync_single_for_cpu(kdev, desc->address, len, DMA_FROM_DEVICE); memcpy(nskb->data, skb->data, len); dma_sync_single_for_device(kdev, desc->address, len, DMA_FROM_DEVICE); skb = nskb; } else { dma_unmap_single(&priv->pdev->dev, desc->address, priv->rx_skb_size, DMA_FROM_DEVICE); priv->rx_skb[desc_idx] = NULL; } skb_put(skb, len); skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); priv->stats.rx_packets++; priv->stats.rx_bytes += len; dev->last_rx = jiffies; netif_receive_skb(skb); } while (--budget > 0); if (processed || !priv->rx_desc_count) { bcm_enet_refill_rx(dev); /* kick rx dma */ enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, ENETDMA_CHANCFG_REG(priv->rx_chan)); } return processed; }
/* * open callback, allocate dma rings & buffers and start rx operation */ static int bcm_enet_open(struct net_device *dev) { struct bcm_enet_priv *priv; struct sockaddr addr; struct device *kdev; struct phy_device *phydev; int irq_requested, i, ret; unsigned int size; char phy_id[BUS_ID_SIZE]; void *p; u32 val; priv = netdev_priv(dev); priv->rx_desc_cpu = priv->tx_desc_cpu = NULL; priv->rx_skb = priv->tx_skb = NULL; kdev = &priv->pdev->dev; if (priv->has_phy) { /* connect to PHY */ snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->mac_id ? "1" : "0", priv->phy_id); phydev = phy_connect(dev, phy_id, &bcm_enet_adjust_phy_link, 0, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { dev_err(kdev, "could not attach to PHY\n"); return PTR_ERR(phydev); } /* mask with MAC supported features */ phydev->supported &= (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_Pause | SUPPORTED_MII); phydev->advertising = phydev->supported; if (priv->pause_auto && priv->pause_rx && priv->pause_tx) phydev->advertising |= SUPPORTED_Pause; else phydev->advertising &= ~SUPPORTED_Pause; dev_info(kdev, "attached PHY at address %d [%s]\n", phydev->addr, phydev->drv->name); priv->old_link = 0; priv->old_duplex = -1; priv->old_pause = -1; priv->phydev = phydev; } /* mask all interrupts and request them */ enet_writel(priv, 0, ENET_IRMASK_REG); enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); irq_requested = 0; ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); if (ret) goto out; irq_requested++; ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_SAMPLE_RANDOM | IRQF_DISABLED, dev->name, dev); if (ret) goto out; irq_requested++; ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, IRQF_DISABLED, dev->name, dev); if (ret) goto out; irq_requested++; /* initialize perfect match registers */ for (i = 0; i < 4; i++) { enet_writel(priv, 0, ENET_PML_REG(i)); enet_writel(priv, 0, ENET_PMH_REG(i)); } /* write device mac address */ memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN); bcm_enet_set_mac_address(dev, &addr); /* allocate rx dma ring */ size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); if (!p) { dev_err(kdev, "cannot allocate rx ring %u\n", size); ret = -ENOMEM; goto out; } memset(p, 0, size); priv->rx_desc_alloc_size = size; priv->rx_desc_cpu = p; /* allocate tx dma ring */ size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); if (!p) { dev_err(kdev, "cannot allocate tx ring\n"); ret = -ENOMEM; goto out; } memset(p, 0, size); priv->tx_desc_alloc_size = size; priv->tx_desc_cpu = p; priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size, GFP_KERNEL); if (!priv->tx_skb) { dev_err(kdev, "cannot allocate rx skb queue\n"); ret = -ENOMEM; goto out; } priv->tx_desc_count = priv->tx_ring_size; priv->tx_dirty_desc = 0; priv->tx_curr_desc = 0; spin_lock_init(&priv->tx_lock); /* init & fill rx ring with skbs */ priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size, GFP_KERNEL); if (!priv->rx_skb) { dev_err(kdev, "cannot allocate rx skb queue\n"); ret = -ENOMEM; goto out; } priv->rx_desc_count = 0; priv->rx_dirty_desc = 0; priv->rx_curr_desc = 0; /* initialize flow control buffer allocation */ enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, ENETDMA_BUFALLOC_REG(priv->rx_chan)); if (bcm_enet_refill_rx(dev)) { dev_err(kdev, "cannot allocate rx skb queue\n"); ret = -ENOMEM; goto out; } /* write rx & tx ring addresses */ enet_dma_writel(priv, priv->rx_desc_dma, ENETDMA_RSTART_REG(priv->rx_chan)); enet_dma_writel(priv, priv->tx_desc_dma, ENETDMA_RSTART_REG(priv->tx_chan)); /* clear remaining state ram for rx & tx channel */ enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan)); enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan)); enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan)); enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan)); enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan)); enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan)); /* set max rx/tx length */ enet_writel(priv, BCMENET_MAX_RX_SIZE, ENET_RXMAXLEN_REG); enet_writel(priv, BCMENET_MAX_TX_SIZE, ENET_TXMAXLEN_REG); /* set dma maximum burst len */ enet_dma_writel(priv, BCMENET_DMA_MAXBURST, ENETDMA_MAXBURST_REG(priv->rx_chan)); enet_dma_writel(priv, BCMENET_DMA_MAXBURST, ENETDMA_MAXBURST_REG(priv->tx_chan)); /* set correct transmit fifo watermark */ enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); /* set flow control low/high threshold to 1/3 / 2/3 */ val = priv->rx_ring_size / 3; enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); val = (priv->rx_ring_size * 2) / 3; enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); /* all set, enable mac and interrupts, start dma engine and * kick rx dma channel */ wmb(); enet_writel(priv, ENET_CTL_ENABLE_MASK, ENET_CTL_REG); enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, ENETDMA_CHANCFG_REG(priv->rx_chan)); /* watch "mib counters about to overflow" interrupt */ enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); /* watch "packet transferred" interrupt in rx and tx */ enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, ENETDMA_IR_REG(priv->rx_chan)); enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, ENETDMA_IR_REG(priv->tx_chan)); /* make sure we enable napi before rx interrupt */ napi_enable(&priv->napi); enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, ENETDMA_IRMASK_REG(priv->rx_chan)); enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, ENETDMA_IRMASK_REG(priv->tx_chan)); if (priv->has_phy) phy_start(priv->phydev); else bcm_enet_adjust_link(dev); netif_start_queue(dev); return 0; out: phy_disconnect(priv->phydev); if (irq_requested > 2) free_irq(priv->irq_tx, dev); if (irq_requested > 1) free_irq(priv->irq_rx, dev); if (irq_requested > 0) free_irq(dev->irq, dev); for (i = 0; i < priv->rx_ring_size; i++) { struct bcm_enet_desc *desc; if (!priv->rx_skb[i]) continue; desc = &priv->rx_desc_cpu[i]; dma_unmap_single(kdev, desc->address, BCMENET_MAX_RX_SIZE, DMA_FROM_DEVICE); kfree_skb(priv->rx_skb[i]); } if (priv->rx_desc_cpu) dma_free_coherent(kdev, priv->rx_desc_alloc_size, priv->rx_desc_cpu, priv->rx_desc_dma); if (priv->tx_desc_cpu) dma_free_coherent(kdev, priv->tx_desc_alloc_size, priv->tx_desc_cpu, priv->tx_desc_dma); kfree(priv->rx_skb); kfree(priv->tx_skb); return ret; }