static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv, struct cpmac_desc *desc) { struct sk_buff *skb, *result = NULL; if (unlikely(netif_msg_hw(priv))) cpmac_dump_desc(priv->dev, desc); cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); if (unlikely(!desc->datalen)) { if (netif_msg_rx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: rx: spurious interrupt\n", priv->dev->name); return NULL; } skb = netdev_alloc_skb(priv->dev, CPMAC_SKB_SIZE); if (likely(skb)) { skb_reserve(skb, 2); skb_put(desc->skb, desc->datalen); desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); desc->skb->ip_summed = CHECKSUM_NONE; priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += desc->datalen; result = desc->skb; dma_unmap_single(&priv->dev->dev, desc->data_mapping, CPMAC_SKB_SIZE, DMA_FROM_DEVICE); desc->skb = skb; desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data, CPMAC_SKB_SIZE, DMA_FROM_DEVICE); desc->hw_data = (u32)desc->data_mapping; if (unlikely(netif_msg_pktdata(priv))) { printk(KERN_DEBUG "%s: received packet:\n", priv->dev->name); cpmac_dump_skb(priv->dev, result); } } else { if (netif_msg_rx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: low on skbs, dropping packet\n", priv->dev->name); priv->dev->stats.rx_dropped++; } desc->buflen = CPMAC_SKB_SIZE; desc->dataflags = CPMAC_OWN; return result; }
static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) { int queue, len; struct cpmac_desc *desc; struct cpmac_priv *priv = netdev_priv(dev); if (unlikely(atomic_read(&priv->reset_pending))) return NETDEV_TX_BUSY; if (unlikely(skb_padto(skb, ETH_ZLEN))) return NETDEV_TX_OK; len = max(skb->len, ETH_ZLEN); queue = skb_get_queue_mapping(skb); #ifdef CONFIG_NETDEVICES_MULTIQUEUE netif_stop_subqueue(dev, queue); #else netif_stop_queue(dev); #endif desc = &priv->desc_ring[queue]; if (unlikely(desc->dataflags & CPMAC_OWN)) { if (netif_msg_tx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: tx dma ring full\n", dev->name); return NETDEV_TX_BUSY; } spin_lock(&priv->lock); dev->trans_start = jiffies; spin_unlock(&priv->lock); desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN; desc->skb = skb; desc->data_mapping = dma_map_single(&dev->dev, skb->data, len, DMA_TO_DEVICE); desc->hw_data = (u32)desc->data_mapping; desc->datalen = len; desc->buflen = len; if (unlikely(netif_msg_tx_queued(priv))) printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb, skb->len); if (unlikely(netif_msg_hw(priv))) cpmac_dump_desc(dev, desc); if (unlikely(netif_msg_pktdata(priv))) cpmac_dump_skb(dev, skb); cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); return NETDEV_TX_OK; }
static void cpmac_clear_rx(struct net_device *dev) { struct cpmac_priv *priv = netdev_priv(dev); struct cpmac_desc *desc; int i; if (unlikely(!priv->rx_head)) return; desc = priv->rx_head; for (i = 0; i < priv->ring_size; i++) { if ((desc->dataflags & CPMAC_OWN) == 0) { if (netif_msg_rx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: packet dropped\n", dev->name); if (unlikely(netif_msg_hw(priv))) cpmac_dump_desc(dev, desc); desc->dataflags = CPMAC_OWN; dev->stats.rx_dropped++; } desc->hw_next = desc->next->mapping; desc = desc->next; } priv->rx_head->prev->hw_next = 0; }
/** * stmmac_open - open entry point of the driver * @dev : pointer to the device structure. * Description: * This function is the open entry point of the driver. * Return value: * 0 on success and an appropriate (-)ve integer as defined in errno.h * file on failure. */ static int stmmac_open(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); int ret; /* Check that the MAC address is valid. If its not, refuse * to bring the device up. The user must specify an * address using the following linux command: * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */ if (!is_valid_ether_addr(dev->dev_addr)) { random_ether_addr(dev->dev_addr); pr_warning("%s: generated random MAC address %pM\n", dev->name, dev->dev_addr); } stmmac_verify_args(); #ifdef CONFIG_STMMAC_TIMER priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); if (unlikely(priv->tm == NULL)) { pr_err("%s: ERROR: timer memory alloc failed\n", __func__); return -ENOMEM; } priv->tm->freq = tmrate; /* Test if the external timer can be actually used. * In case of failure continue without timer. */ if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) { pr_warning("stmmaceth: cannot attach the external timer.\n"); priv->tm->freq = 0; priv->tm->timer_start = stmmac_no_timer_started; priv->tm->timer_stop = stmmac_no_timer_stopped; } else priv->tm->enable = 1; #endif ret = stmmac_init_phy(dev); if (unlikely(ret)) { pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); goto open_error; } /* Create and initialize the TX/RX descriptors chains. */ priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); init_dma_desc_rings(dev); /* DMA initialization and SW reset */ ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl, priv->dma_tx_phy, priv->dma_rx_phy); if (ret < 0) { pr_err("%s: DMA initialization failed\n", __func__); goto open_error; } /* Copy the MAC addr into the HW */ priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0); /* If required, perform hw setup of the bus. */ if (priv->plat->bus_setup) priv->plat->bus_setup(priv->ioaddr); /* Initialize the MAC Core */ priv->hw->mac->core_init(priv->ioaddr); priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr); if (priv->rx_coe) pr_info("stmmac: Rx Checksum Offload Engine supported\n"); if (priv->plat->tx_coe) pr_info("\tTX Checksum insertion supported\n"); netdev_update_features(dev); /* Initialise the MMC (if present) to disable all interrupts. */ writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK); writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK); /* Request the IRQ lines */ ret = request_irq(dev->irq, stmmac_interrupt, IRQF_SHARED, dev->name, dev); if (unlikely(ret < 0)) { pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", __func__, dev->irq, ret); goto open_error; } /* Enable the MAC Rx/Tx */ stmmac_enable_mac(priv->ioaddr); /* Set the HW DMA mode and the COE */ stmmac_dma_operation_mode(priv); /* Extra statistics */ memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); priv->xstats.threshold = tc; /* Start the ball rolling... */ DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); priv->hw->dma->start_tx(priv->ioaddr); priv->hw->dma->start_rx(priv->ioaddr); #ifdef CONFIG_STMMAC_TIMER priv->tm->timer_start(tmrate); #endif /* Dump DMA/MAC registers */ if (netif_msg_hw(priv)) { priv->hw->mac->dump_regs(priv->ioaddr); priv->hw->dma->dump_regs(priv->ioaddr); } if (priv->phydev) phy_start(priv->phydev); napi_enable(&priv->napi); skb_queue_head_init(&priv->rx_recycle); netif_start_queue(dev); return 0; open_error: #ifdef CONFIG_STMMAC_TIMER kfree(priv->tm); #endif if (priv->phydev) phy_disconnect(priv->phydev); return ret; }
/** * init_dma_desc_rings - init the RX/TX descriptor rings * @dev: net device structure * Description: this function initializes the DMA RX/TX descriptors * and allocates the socket buffers. */ static void init_dma_desc_rings(struct net_device *dev) { int i; struct stmmac_priv *priv = netdev_priv(dev); struct sk_buff *skb; unsigned int txsize = priv->dma_tx_size; unsigned int rxsize = priv->dma_rx_size; unsigned int bfsize = priv->dma_buf_sz; int buff2_needed = 0, dis_ic = 0; /* Set the Buffer size according to the MTU; * indeed, in case of jumbo we need to bump-up the buffer sizes. */ if (unlikely(dev->mtu >= BUF_SIZE_8KiB)) bfsize = BUF_SIZE_16KiB; else if (unlikely(dev->mtu >= BUF_SIZE_4KiB)) bfsize = BUF_SIZE_8KiB; else if (unlikely(dev->mtu >= BUF_SIZE_2KiB)) bfsize = BUF_SIZE_4KiB; else if (unlikely(dev->mtu >= DMA_BUFFER_SIZE)) bfsize = BUF_SIZE_2KiB; else bfsize = DMA_BUFFER_SIZE; #ifdef CONFIG_STMMAC_TIMER /* Disable interrupts on completion for the reception if timer is on */ if (likely(priv->tm->enable)) dis_ic = 1; #endif /* If the MTU exceeds 8k so use the second buffer in the chain */ if (bfsize >= BUF_SIZE_8KiB) buff2_needed = 1; DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n", txsize, rxsize, bfsize); priv->rx_skbuff_dma = kmalloc(rxsize * sizeof(dma_addr_t), GFP_KERNEL); priv->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * rxsize, GFP_KERNEL); priv->dma_rx = (struct dma_desc *)dma_alloc_coherent(priv->device, rxsize * sizeof(struct dma_desc), &priv->dma_rx_phy, GFP_KERNEL); priv->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * txsize, GFP_KERNEL); priv->dma_tx = (struct dma_desc *)dma_alloc_coherent(priv->device, txsize * sizeof(struct dma_desc), &priv->dma_tx_phy, GFP_KERNEL); if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) { pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__); return; } DBG(probe, INFO, "stmmac (%s) DMA desc rings: virt addr (Rx %p, " "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n", dev->name, priv->dma_rx, priv->dma_tx, (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy); /* RX INITIALIZATION */ DBG(probe, INFO, "stmmac: SKB addresses:\n" "skb\t\tskb data\tdma data\n"); for (i = 0; i < rxsize; i++) { struct dma_desc *p = priv->dma_rx + i; skb = netdev_alloc_skb_ip_align(dev, bfsize); if (unlikely(skb == NULL)) { pr_err("%s: Rx init fails; skb is NULL\n", __func__); break; } priv->rx_skbuff[i] = skb; priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, bfsize, DMA_FROM_DEVICE); p->des2 = priv->rx_skbuff_dma[i]; if (unlikely(buff2_needed)) p->des3 = p->des2 + BUF_SIZE_8KiB; DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]); } priv->cur_rx = 0; priv->dirty_rx = (unsigned int)(i - rxsize); priv->dma_buf_sz = bfsize; buf_sz = bfsize; /* TX INITIALIZATION */ for (i = 0; i < txsize; i++) { priv->tx_skbuff[i] = NULL; priv->dma_tx[i].des2 = 0; } priv->dirty_tx = 0; priv->cur_tx = 0; /* Clear the Rx/Tx descriptors */ priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic); priv->hw->desc->init_tx_desc(priv->dma_tx, txsize); if (netif_msg_hw(priv)) { pr_info("RX descriptor ring:\n"); display_ring(priv->dma_rx, rxsize); pr_info("TX descriptor ring:\n"); display_ring(priv->dma_tx, txsize); } }
static int stmmac_rx(struct stmmac_priv *priv, int limit) { unsigned int rxsize = priv->dma_rx_size; unsigned int entry = priv->cur_rx % rxsize; unsigned int next_entry; unsigned int count = 0; struct dma_desc *p = priv->dma_rx + entry; struct dma_desc *p_next; #ifdef STMMAC_RX_DEBUG if (netif_msg_hw(priv)) { pr_debug(">>> stmmac_rx: descriptor ring:\n"); display_ring(priv->dma_rx, rxsize); } #endif count = 0; while (!priv->hw->desc->get_rx_owner(p)) { int status; if (count >= limit) break; count++; next_entry = (++priv->cur_rx) % rxsize; p_next = priv->dma_rx + next_entry; prefetch(p_next); /* read the status of the incoming frame */ status = (priv->hw->desc->rx_status(&priv->dev->stats, &priv->xstats, p)); if (unlikely(status == discard_frame)) priv->dev->stats.rx_errors++; else { struct sk_buff *skb; int frame_len; frame_len = priv->hw->desc->get_rx_frame_len(p); /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 * Type frames (LLC/LLC-SNAP) */ if (unlikely(status != llc_snap)) frame_len -= ETH_FCS_LEN; #ifdef STMMAC_RX_DEBUG if (frame_len > ETH_FRAME_LEN) pr_debug("\tRX frame size %d, COE status: %d\n", frame_len, status); if (netif_msg_hw(priv)) pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", p, entry, p->des2); #endif skb = priv->rx_skbuff[entry]; if (unlikely(!skb)) { pr_err("%s: Inconsistent Rx descriptor chain\n", priv->dev->name); priv->dev->stats.rx_dropped++; break; } prefetch(skb->data - NET_IP_ALIGN); priv->rx_skbuff[entry] = NULL; skb_put(skb, frame_len); dma_unmap_single(priv->device, priv->rx_skbuff_dma[entry], priv->dma_buf_sz, DMA_FROM_DEVICE); #ifdef STMMAC_RX_DEBUG if (netif_msg_pktdata(priv)) { pr_info(" frame received (%dbytes)", frame_len); print_pkt(skb->data, frame_len); } #endif skb->protocol = eth_type_trans(skb, priv->dev); if (unlikely(status == csum_none)) { /* always for the old mac 10/100 */ skb_checksum_none_assert(skb); netif_receive_skb(skb); } else { skb->ip_summed = CHECKSUM_UNNECESSARY; napi_gro_receive(&priv->napi, skb); } priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += frame_len; } entry = next_entry; p = p_next; /* use prefetched values */ } stmmac_rx_refill(priv); priv->xstats.rx_pkt_n += count; return count; }