/* hard_xmit interface of irda device */ static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) { struct pxa_irda *si = netdev_priv(dev); int speed = irda_get_next_speed(skb); /* * Does this packet contain a request to change the interface * speed? If so, remember it until we complete the transmission * of this frame. */ if (speed != si->speed && speed != -1) si->newspeed = speed; /* * If this is an empty frame, we can bypass a lot. */ if (skb->len == 0) { if (si->newspeed) { si->newspeed = 0; pxa_irda_set_speed(si, speed); } dev_kfree_skb(skb); return 0; } netif_stop_queue(dev); if (!IS_FIR(si)) { si->tx_buff.data = si->tx_buff.head; si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize); /* Disable STUART interrupts and switch to transmit mode. */ STIER = 0; STISR = IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6; /* enable STUART and transmit interrupts */ STIER = IER_UUE | IER_TIE; } else { unsigned long mtt = irda_get_mtt(skb); si->dma_tx_buff_len = skb->len; skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len); if (mtt) while ((unsigned)(OSCR - si->last_oscr)/4 < mtt) cpu_relax(); /* stop RX DMA, disable FICP */ DCSR(si->rxdma) &= ~DCSR_RUN; ICCR0 = 0; pxa_irda_fir_dma_tx_start(si); ICCR0 = ICCR0_ITR | ICCR0_TXE; } dev_kfree_skb(skb); dev->trans_start = jiffies; return 0; }
static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) { struct pxa_irda *si = netdev_priv(dev); int speed = irda_get_next_speed(skb); if (speed != si->speed && speed != -1) si->newspeed = speed; if (skb->len == 0) { if (si->newspeed) { si->newspeed = 0; pxa_irda_set_speed(si, speed); } dev_kfree_skb(skb); return NETDEV_TX_OK; } netif_stop_queue(dev); if (!IS_FIR(si)) { si->tx_buff.data = si->tx_buff.head; si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize); STIER = 0; STISR = IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6; STIER = IER_UUE | IER_TIE; } else { unsigned long mtt = irda_get_mtt(skb); si->dma_tx_buff_len = skb->len; skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len); if (mtt) while ((unsigned)(OSCR - si->last_oscr)/4 < mtt) cpu_relax(); DCSR(si->rxdma) &= ~DCSR_RUN; ICCR0 = 0; pxa_irda_fir_dma_tx_start(si); ICCR0 = ICCR0_ITR | ICCR0_TXE; } dev_kfree_skb(skb); return NETDEV_TX_OK; }
/* * Called from net/core when new frame is available. */ static int kingsun_hard_xmit(struct sk_buff *skb, struct net_device *netdev) { struct kingsun_cb *kingsun; int wraplen; int ret = 0; if (skb == NULL || netdev == NULL) return -EINVAL; netif_stop_queue(netdev); /* the IRDA wrapping routines don't deal with non linear skb */ SKB_LINEAR_ASSERT(skb); kingsun = netdev_priv(netdev); spin_lock(&kingsun->lock); /* Append data to the end of whatever data remains to be transmitted */ wraplen = async_wrap_skb(skb, kingsun->out_buf, KINGSUN_FIFO_SIZE); /* Calculate how much data can be transmitted in this urb */ usb_fill_int_urb(kingsun->tx_urb, kingsun->usbdev, usb_sndintpipe(kingsun->usbdev, kingsun->ep_out), kingsun->out_buf, wraplen, kingsun_send_irq, kingsun, 1); if ((ret = usb_submit_urb(kingsun->tx_urb, GFP_ATOMIC))) { err("kingsun_hard_xmit: failed tx_urb submit: %d", ret); switch (ret) { case -ENODEV: case -EPIPE: break; default: kingsun->stats.tx_errors++; netif_start_queue(netdev); } } else { kingsun->stats.tx_packets++; kingsun->stats.tx_bytes += skb->len; } dev_kfree_skb(skb); spin_unlock(&kingsun->lock); return ret; }
/* * This function is called by Linux IrDA network subsystem to * transmit the Infrared data packet. The TX DMA channel is configured * to transfer SK buffer data to IrDA TX FIFO along with DMA transfer * completion routine. * * @param skb The packet that is queued to be sent * @param dev net_device structure. * * @return The function returns 0 on success and a negative value on * failure. */ static int mxc_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) { struct mxc_irda *si = netdev_priv(dev); int speed = irda_get_next_speed(skb); u16 cr; /* * Does this packet contain a request to change the interface * speed? If so, remember it until we complete the transmission * of this frame. */ if (speed != si->speed && speed != -1) { si->newspeed = speed; } /* If this is an empty frame, we can bypass a lot. */ if (skb->len == 0) { if (si->newspeed) { si->newspeed = 0; mxc_irda_set_speed(si, speed); } dev_kfree_skb(skb); return 0; } if (IS_SIR(si)) { /* We must not be transmitting... */ netif_stop_queue(dev); si->tx_buff.data = si->tx_buff.head; si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize); /* * Set the tramsmit interrupt enable. This will fire * off an interrupt immediately. Note that we disable * the receiver so we don't get spurious characters * received. */ cr = readl(si->uart_base + MXC_UARTUCR1); cr |= MXC_UARTUCR1_TRDYEN; writel(cr, si->uart_base + MXC_UARTUCR1); dev_kfree_skb(skb); } dev->trans_start = jiffies; return 0; }
static netdev_tx_t kingsun_hard_xmit(struct sk_buff *skb, struct net_device *netdev) { struct kingsun_cb *kingsun; int wraplen; int ret = 0; netif_stop_queue(netdev); SKB_LINEAR_ASSERT(skb); kingsun = netdev_priv(netdev); spin_lock(&kingsun->lock); wraplen = async_wrap_skb(skb, kingsun->out_buf, KINGSUN_FIFO_SIZE); usb_fill_int_urb(kingsun->tx_urb, kingsun->usbdev, usb_sndintpipe(kingsun->usbdev, kingsun->ep_out), kingsun->out_buf, wraplen, kingsun_send_irq, kingsun, 1); if ((ret = usb_submit_urb(kingsun->tx_urb, GFP_ATOMIC))) { err("kingsun_hard_xmit: failed tx_urb submit: %d", ret); switch (ret) { case -ENODEV: case -EPIPE: break; default: netdev->stats.tx_errors++; netif_start_queue(netdev); } } else { netdev->stats.tx_packets++; netdev->stats.tx_bytes += skb->len; } dev_kfree_skb(skb); spin_unlock(&kingsun->lock); return NETDEV_TX_OK; }
/* * Called from net/core when new frame is available. */ static int ksdazzle_hard_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ksdazzle_cb *kingsun; unsigned int wraplen; int ret = 0; if (skb == NULL || netdev == NULL) return -EINVAL; netif_stop_queue(netdev); /* the IRDA wrapping routines don't deal with non linear skb */ SKB_LINEAR_ASSERT(skb); kingsun = netdev_priv(netdev); spin_lock(&kingsun->lock); kingsun->new_speed = irda_get_next_speed(skb); /* Append data to the end of whatever data remains to be transmitted */ wraplen = async_wrap_skb(skb, kingsun->tx_buf_clear, KINGSUN_SND_FIFO_SIZE); kingsun->tx_buf_clear_used = wraplen; if ((ret = ksdazzle_submit_tx_fragment(kingsun)) != 0) { err("ksdazzle_hard_xmit: failed tx_urb submit: %d", ret); switch (ret) { case -ENODEV: case -EPIPE: break; default: netdev->stats.tx_errors++; netif_start_queue(netdev); } } else { netdev->stats.tx_packets++; netdev->stats.tx_bytes += skb->len; } dev_kfree_skb(skb); spin_unlock(&kingsun->lock); return ret; }
static int bfin_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev) { struct bfin_sir_self *self = netdev_priv(dev); int speed = irda_get_next_speed(skb); netif_stop_queue(dev); self->mtt = irda_get_mtt(skb); if (speed != self->speed && speed != -1) self->newspeed = speed; self->tx_buff.data = self->tx_buff.head; if (skb->len == 0) self->tx_buff.len = 0; else self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, self->tx_buff.truesize); schedule_work(&self->work); dev_kfree_skb(skb); return 0; }
static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) { struct sa1100_irda *si = dev->priv; int speed = irda_get_next_speed(skb); /* * Does this packet contain a request to change the interface * speed? If so, remember it until we complete the transmission * of this frame. */ if (speed != si->speed && speed != -1) si->newspeed = speed; /* * If this is an empty frame, we can bypass a lot. */ if (skb->len == 0) { if (si->newspeed) { si->newspeed = 0; sa1100_irda_set_speed(si, speed); } dev_kfree_skb(skb); return 0; } if (!IS_FIR(si)) { netif_stop_queue(dev); si->tx_buff.data = si->tx_buff.head; si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize); /* * Set the transmit interrupt enable. This will fire * off an interrupt immediately. Note that we disable * the receiver so we won't get spurious characteres * received. */ Ser2UTCR3 = UTCR3_TIE | UTCR3_TXE; dev_kfree_skb(skb); } else { int mtt = irda_get_mtt(skb); /* * We must not be transmitting... */ if (si->txskb) BUG(); netif_stop_queue(dev); si->txskb = skb; si->txbuf_dma = dma_map_single(si->dev, skb->data, skb->len, DMA_TO_DEVICE); sa1100_start_dma(si->txdma, si->txbuf_dma, skb->len); /* * If we have a mean turn-around time, impose the specified * specified delay. We could shorten this by timing from * the point we received the packet. */ if (mtt) udelay(mtt); Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE; } dev->trans_start = jiffies; return 0; }
static int pxa250_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) { struct pxa250_irda *si = dev->priv; int speed = irda_get_next_speed(skb); int mtt; __ECHO_IN; /* * Does this packet contain a request to change the interface * speed? If so, remember it until we complete the transmission * of this frame. */ if (speed != si->speed && speed != -1) si->newspeed = speed; /* * If this is an empty frame, we can bypass a lot. */ if (skb->len == 0) { if (si->newspeed) { si->newspeed = 0; pxa250_irda_set_speed(dev, speed); } dev_kfree_skb(skb); return 0; } DBG("stop queue\n"); netif_stop_queue(dev); if(!IS_FIR(si)) { si->tx_buff.data = si->tx_buff.head; si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize); pxa250_sir_transmit(dev); dev_kfree_skb(skb); dev->trans_start = jiffies; return 0; } else /* FIR */ { DBG("Enter FIR transmit\n"); /* * We must not be transmitting... */ if (si->txskb) BUG(); disable_irq(si->fir_irq); netif_stop_queue(dev); DBG("queue stoped\n"); si->txskb = skb; /* we could not just map so we'll need some triks */ /* skb->data may be not DMA capable -Sed- */ if (skb->len > TXBUFF_MAX_SIZE) { printk (KERN_ERR "skb data too large\n"); printk (KERN_ERR "len=%d",skb->len); BUG(); } DBG("gonna copy %d bytes to txbuf\n",skb->len); memcpy (si->txbuf_dma_virt, skb->data , skb->len); /* Actual sending ;must not be receiving !!! */ /* Write data and source address */ DBG("ICSR1 & RNE =%d\n",(ICSR1 & ICSR1_RNE) ? 1 : 0 ); /*Disable receiver and enable transifer */ ICCR0 &= ~ICCR0_RXE; if (ICSR1 & ICSR1_TBY) BUG(); ICCR0 |= ICCR0_TXE; DBG("FICP status %x\n",ICSR0); if (0){ int i; DBG("sending packet\n"); for (i=0;i<skb->len;i++) (i % 64) ? printk ("%2x ",skb->data[i]) : printk ("%2x \n",skb->data[i]) ; DBG(" done\n"); } /* * If we have a mean turn-around time, impose the specified * specified delay. We could shorten this by timing from * the point we received the packet. */ mtt = irda_get_mtt(skb); if(mtt) udelay(mtt); DCSR(si->txdma_ch)=0; DCSR(si->txdma_ch)=DCSR_NODESC; DSADR(si->txdma_ch) = si->txbuf_dma; /* phisic address */ DTADR(si->txdma_ch) = __PREG(ICDR); DCMD(si->txdma_ch) = DCMD_ENDIRQEN| DCMD_INCSRCADDR | DCMD_FLOWTRG | DCMD_BURST8 | DCMD_WIDTH1 | skb->len; DCSR(si->txdma_ch) = DCSR_ENDINTR | DCSR_BUSERR; DCSR(si->txdma_ch) = DCSR_RUN | DCSR_NODESC ; DBG("FICP status %x\n",ICSR0); return 0; } }