static void psc_load_rxdma_base(int set, void *base) { psc_write_word(PSC_ENETRD_CMD + set, 0x0100); psc_write_long(PSC_ENETRD_ADDR + set, (u32)base); psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING); psc_write_word(PSC_ENETRD_CMD + set, 0x9800); }
static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) { struct mace_data *mp = netdev_priv(dev); unsigned long flags; /* Stop the queue since there's only the one buffer */ local_irq_save(flags); netif_stop_queue(dev); if (!mp->tx_count) { printk(KERN_ERR "macmace: tx queue running but no free buffers.\n"); local_irq_restore(flags); return NETDEV_TX_BUSY; } mp->tx_count--; local_irq_restore(flags); dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; /* We need to copy into our xmit buffer to take care of alignment and caching issues */ skb_copy_from_linear_data(skb, mp->tx_ring, skb->len); /* load the Tx DMA and fire it off */ psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32) mp->tx_ring_phys); psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len); psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800); mp->tx_slot ^= 0x10; dev_kfree_skb(skb); return NETDEV_TX_OK; }
static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) { struct mace_data *mp = (struct mace_data *) dev->priv; /* Stop the queue if the buffer is full */ if (!mp->tx_count) { netif_stop_queue(dev); return 1; } mp->tx_count--; mp->stats.tx_packets++; mp->stats.tx_bytes += skb->len; /* We need to copy into our xmit buffer to take care of alignment and caching issues */ memcpy((void *) mp->tx_ring, skb->data, skb->len); /* load the Tx DMA and fire it off */ psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32) mp->tx_ring_phys); psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len); psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800); mp->tx_slot ^= 0x10; dev_kfree_skb(skb); return 0; }
static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) { struct mace_data *mp = netdev_priv(dev); unsigned long flags; local_irq_save(flags); netif_stop_queue(dev); if (!mp->tx_count) { printk(KERN_ERR "macmace: tx queue running but no free buffers.\n"); local_irq_restore(flags); return NETDEV_TX_BUSY; } mp->tx_count--; local_irq_restore(flags); dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; skb_copy_from_linear_data(skb, mp->tx_ring, skb->len); psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32) mp->tx_ring_phys); psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len); psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800); mp->tx_slot ^= 0x10; dev_kfree_skb(skb); return NETDEV_TX_OK; }
static void mace_load_rxdma_base(struct net_device *dev, int set) { struct mace_data *mp = netdev_priv(dev); psc_write_word(PSC_ENETRD_CMD + set, 0x0100); psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys); psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING); psc_write_word(PSC_ENETRD_CMD + set, 0x9800); mp->rx_tail = 0; }
static int mace68k_xmit_start(struct sk_buff *skb, struct net_device *dev) { struct mace68k_data *mp = (struct mace68k_data *) dev->priv; /* * This may need atomic types ??? */ printk("mace68k_xmit_start: mp->tx_count = %d, dev->tbusy = %d, mp->tx_ring = %p (%p)\n", mp->tx_count, dev->tbusy, mp->tx_ring, virt_to_bus(mp->tx_ring)); psc_debug_dump(); if(mp->tx_count == 0) { dev->tbusy=1; mace68k_dma_intr(IRQ_MAC_MACE_DMA, dev, NULL); return 1; } mp->tx_count--; /* * FIXME: * This is hackish. The memcpy probably isnt needed but * the rules for alignment are not known. Ideally we'd like * to just blast the skb directly to ethernet. We also don't * use the ring properly - just a one frame buffer. That * also requires cache pushes ;). */ memcpy((void *)mp->tx_ring, skb, skb->len); psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, virt_to_bus(mp->tx_ring)); psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len); psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800); mp->stats.tx_packets++; mp->stats.tx_bytes+=skb->len; dev_kfree_skb(skb); return 0; }