static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) { int queue, len; struct cpmac_desc *desc; struct cpmac_priv *priv = netdev_priv(dev); if (unlikely(atomic_read(&priv->reset_pending))) return NETDEV_TX_BUSY; if (unlikely(skb_padto(skb, ETH_ZLEN))) return NETDEV_TX_OK; len = max(skb->len, ETH_ZLEN); queue = skb_get_queue_mapping(skb); #ifdef CONFIG_NETDEVICES_MULTIQUEUE netif_stop_subqueue(dev, queue); #else netif_stop_queue(dev); #endif desc = &priv->desc_ring[queue]; if (unlikely(desc->dataflags & CPMAC_OWN)) { if (netif_msg_tx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: tx dma ring full\n", dev->name); return NETDEV_TX_BUSY; } spin_lock(&priv->lock); dev->trans_start = jiffies; spin_unlock(&priv->lock); desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN; desc->skb = skb; desc->data_mapping = dma_map_single(&dev->dev, skb->data, len, DMA_TO_DEVICE); desc->hw_data = (u32)desc->data_mapping; desc->datalen = len; desc->buflen = len; if (unlikely(netif_msg_tx_queued(priv))) printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb, skb->len); if (unlikely(netif_msg_hw(priv))) cpmac_dump_desc(dev, desc); if (unlikely(netif_msg_pktdata(priv))) cpmac_dump_skb(dev, skb); cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); return NETDEV_TX_OK; }
static void cpmac_hw_start() { int i; //struct cpmac_priv *priv = netdev_priv(dev); //struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); ar7_device_reset(pdata.reset_bit); //for (i = 0; i < 8; i++) { cpmac_write(CPMAC_TX_PTR(i), 0); lock_s(synthlock_0); cpmac_write_CPMAC_RX_PTR(i, 0); //} cpmac_write_CPMAC_RX_PTR(0, rx_head->mapping); cpmac_write(CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST | MBP_RXMCAST); cpmac_write(CPMAC_BUFFER_OFFSET, 0); //for (i = 0; i < 8; i++) cpmac_write(CPMAC_MAC_ADDR_LO(i), netdev.dev_addr[5]); cpmac_write(CPMAC_MAC_ADDR_MID, netdev.dev_addr[4]); cpmac_write(CPMAC_MAC_ADDR_HI, netdev.dev_addr[0] | (netdev.dev_addr[1] << 8) | (netdev.dev_addr[2] << 16) | (netdev.dev_addr[3] << 24)); cpmac_write(CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE); cpmac_write(CPMAC_UNICAST_CLEAR, 0xff); cpmac_write(CPMAC_RX_INT_CLEAR, 0xff); cpmac_write(CPMAC_TX_INT_CLEAR, 0xff); cpmac_write(CPMAC_MAC_INT_CLEAR, 0xff); cpmac_write(CPMAC_UNICAST_ENABLE, 1); cpmac_write(CPMAC_RX_INT_ENABLE, 1); cpmac_write(CPMAC_TX_INT_ENABLE, 0xff); cpmac_write(CPMAC_MAC_INT_ENABLE, 3); cpmac_write(CPMAC_RX_CONTROL, cpmac_read(CPMAC_RX_CONTROL) | 1); cpmac_write(CPMAC_TX_CONTROL, cpmac_read(CPMAC_TX_CONTROL) | 1); cpmac_write(CPMAC_MAC_CONTROL, cpmac_read(CPMAC_MAC_CONTROL) | MAC_MII | MAC_FDX); unlock_s(synthlock_0); }
static void cpmac_hw_start(struct net_device *dev) { int i; struct cpmac_priv *priv = netdev_priv(dev); struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data; ar7_device_reset(pdata->reset_bit); for (i = 0; i < 8; i++) { cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); } cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping); cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST | MBP_RXMCAST); cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0); for (i = 0; i < 8; i++) cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]); cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]); cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] | (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24)); cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE); cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1); cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff); cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); cpmac_write(priv->regs, CPMAC_RX_CONTROL, cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1); cpmac_write(priv->regs, CPMAC_TX_CONTROL, cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1); cpmac_write(priv->regs, CPMAC_MAC_CONTROL, cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII | MAC_FDX); }
static void cpmac_hw_stop(/*struct net_device *dev*/) { int i; //struct cpmac_priv *priv = netdev_priv(dev); //struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); ar7_device_reset(pdata.reset_bit); cpmac_write(CPMAC_RX_CONTROL, cpmac_read(CPMAC_RX_CONTROL) & ~1); cpmac_write(CPMAC_TX_CONTROL, cpmac_read(CPMAC_TX_CONTROL) & ~1); //for (i = 0; i < 8; i++) { cpmac_write(CPMAC_TX_PTR(i), 0); cpmac_write_CPMAC_RX_PTR(i, 0); //} cpmac_write(CPMAC_UNICAST_CLEAR, 0xff); cpmac_write(CPMAC_RX_INT_CLEAR, 0xff); cpmac_write(CPMAC_TX_INT_CLEAR, 0xff); cpmac_write(CPMAC_MAC_INT_CLEAR, 0xff); cpmac_write(CPMAC_MAC_CONTROL, cpmac_read(CPMAC_MAC_CONTROL) & ~MAC_MII); }
static void cpmac_hw_stop(struct net_device *dev) { int i; struct cpmac_priv *priv = netdev_priv(dev); struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data; ar7_device_reset(pdata->reset_bit); cpmac_write(priv->regs, CPMAC_RX_CONTROL, cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1); cpmac_write(priv->regs, CPMAC_TX_CONTROL, cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1); for (i = 0; i < 8; i++) { cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); } cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); cpmac_write(priv->regs, CPMAC_MAC_CONTROL, cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII); }