static void au1k_tx_ack(struct net_device *dev) { struct au1k_private *aup = netdev_priv(dev); volatile struct ring_dest *ptxd; ptxd = aup->tx_ring[aup->tx_tail]; while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) { update_tx_stats(dev, ptxd->flags, (ptxd->count_1 << 8) | ptxd->count_0); ptxd->count_0 = 0; ptxd->count_1 = 0; wmb(); aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1); ptxd = aup->tx_ring[aup->tx_tail]; if (aup->tx_full) { aup->tx_full = 0; netif_wake_queue(dev); } } if (aup->tx_tail == aup->tx_head) { if (aup->newspeed) { au1k_irda_set_speed(dev, aup->newspeed); aup->newspeed = 0; } else { irda_write(aup, IR_CONFIG_1, irda_read(aup, IR_CONFIG_1) & ~IR_TX_ENABLE); irda_write(aup, IR_CONFIG_1, irda_read(aup, IR_CONFIG_1) | IR_RX_ENABLE); irda_write(aup, IR_RING_PROMPT, 0); } } }
/* * Au1000 transmit routine. */ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev) { struct au1000_private *aup = netdev_priv(dev); struct net_device_stats *ps = &dev->stats; volatile tx_dma_t *ptxd; u32 buff_stat; db_dest_t *pDB; int i; if (au1000_debug > 5) printk("%s: tx: aup %x len=%d, data=%p, head %d\n", dev->name, (unsigned)aup, skb->len, skb->data, aup->tx_head); ptxd = aup->tx_dma_ring[aup->tx_head]; buff_stat = ptxd->buff_stat; if (buff_stat & TX_DMA_ENABLE) { /* We've wrapped around and the transmitter is still busy */ netif_stop_queue(dev); aup->tx_full = 1; return NETDEV_TX_BUSY; } else if (buff_stat & TX_T_DONE) { update_tx_stats(dev, ptxd->status); ptxd->len = 0; } if (aup->tx_full) { aup->tx_full = 0; netif_wake_queue(dev); } pDB = aup->tx_db_inuse[aup->tx_head]; skb_copy_from_linear_data(skb, pDB->vaddr, skb->len); if (skb->len < ETH_ZLEN) { for (i=skb->len; i<ETH_ZLEN; i++) { ((char *)pDB->vaddr)[i] = 0; } ptxd->len = ETH_ZLEN; } else ptxd->len = skb->len; ps->tx_packets++; ps->tx_bytes += ptxd->len; ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE; au_sync(); dev_kfree_skb(skb); aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1); dev->trans_start = jiffies; return NETDEV_TX_OK; }
/* * Au1000 transmit routine. */ static int au1000_tx(struct sk_buff *skb, struct net_device *dev) { struct au1000_private *aup = (struct au1000_private *) dev->priv; volatile tx_dma_t *ptxd; u32 buff_stat; db_dest_t *pDB; int i; if (au1000_debug > 4) printk("%s: tx: aup %x len=%d, data=%p, head %d\n", dev->name, (unsigned)aup, skb->len, skb->data, aup->tx_head); ptxd = aup->tx_dma_ring[aup->tx_head]; buff_stat = ptxd->buff_stat; if (buff_stat & TX_DMA_ENABLE) { /* We've wrapped around and the transmitter is still busy */ netif_stop_queue(dev); aup->tx_full = 1; return 1; } else if (buff_stat & TX_T_DONE) { update_tx_stats(dev, ptxd->status, ptxd->len & 0x3ff); ptxd->len = 0; } if (aup->tx_full) { aup->tx_full = 0; netif_wake_queue(dev); } pDB = aup->tx_db_inuse[aup->tx_head]; memcpy((void *)pDB->vaddr, skb->data, skb->len); if (skb->len < MAC_MIN_PKT_SIZE) { for (i=skb->len; i<MAC_MIN_PKT_SIZE; i++) { ((char *)pDB->vaddr)[i] = 0; } ptxd->len = MAC_MIN_PKT_SIZE; } else ptxd->len = skb->len; ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE; au_sync(); dev_kfree_skb(skb); aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1); dev->trans_start = jiffies; return 0; }
int flux_send (flux_t h, const flux_msg_t *msg, int flags) { if (!h->ops->send) { errno = ENOSYS; goto fatal; } flags |= h->flags; update_tx_stats (h, msg); if (flags & FLUX_O_TRACE) flux_msg_fprint (stderr, msg); if (h->ops->send (h->impl, msg, flags) < 0) goto fatal; #if HAVE_CALIPER profiling_msg_snapshot(h, msg, flags, "send"); #endif return 0; fatal: FLUX_FATAL (h); return -1; }
/* * Called from the interrupt service routine to acknowledge * the TX DONE bits. This is a must if the irq is setup as * edge triggered. */ static void au1000_tx_ack(struct net_device *dev) { struct au1000_private *aup = (struct au1000_private *) dev->priv; volatile tx_dma_t *ptxd; ptxd = aup->tx_dma_ring[aup->tx_tail]; while (ptxd->buff_stat & TX_T_DONE) { update_tx_stats(dev, ptxd->status, ptxd->len & 0x3ff); ptxd->buff_stat &= ~TX_T_DONE; ptxd->len = 0; au_sync(); aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1); ptxd = aup->tx_dma_ring[aup->tx_tail]; if (aup->tx_full) { aup->tx_full = 0; netif_wake_queue(dev); } } }
static void au1k_tx_ack(struct net_device *dev) { struct au1k_private *aup = (struct au1k_private *) dev->priv; volatile ring_dest_t *ptxd; ptxd = aup->tx_ring[aup->tx_tail]; while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) { update_tx_stats(dev, ptxd->flags, ptxd->count_1<<8 | ptxd->count_0); ptxd->count_0 = 0; ptxd->count_1 = 0; au_sync(); aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1); ptxd = aup->tx_ring[aup->tx_tail]; if (aup->tx_full) { aup->tx_full = 0; netif_wake_queue(dev); } } if (aup->tx_tail == aup->tx_head) { if (aup->newspeed) { au1k_irda_set_speed(dev, aup->newspeed); aup->newspeed = 0; } else { writel(read_ir_reg(IR_CONFIG_1) & ~IR_TX_ENABLE, IR_CONFIG_1); au_sync(); writel(read_ir_reg(IR_CONFIG_1) | IR_RX_ENABLE, IR_CONFIG_1); writel(0, IR_RING_PROMPT); au_sync(); } } }