static irqreturn_t bu92747_irda_irq(int irqno, void *dev_id) { struct bu92747_port *s = dev_id; u32 irq_src = 0; unsigned long len; struct rev_frame_length *f = &(s->rev_frames); dev_dbg(s->dev, "%s\n", __func__); BU92747_IRDA_DBG("line %d, enter %s \n", __LINE__, __FUNCTION__); irq_src = irda_hw_get_irqsrc(); printk("[%s][%d], 0x%x\n",__FUNCTION__,__LINE__, irq_src); /* error */ if (irq_src & (REG_INT_TO| REG_INT_CRC | REG_INT_OE | REG_INT_FE | REG_INT_AC | REG_INT_DECE | REG_INT_RDOE | REG_INT_DEX)) { BU92747_IRDA_DBG("[%s][%d]: do err\n", __FUNCTION__, __LINE__); //BU92725GUW_dump_register(); BU92725GUW_clr_fifo(); BU92725GUW_reset(); if ((BU92725GUW_SEND==irda_hw_get_mode()) || (BU92725GUW_MULTI_SEND==irda_hw_get_mode())) { s->tx_empty = 1; } } if (irq_src & (REG_INT_DRX | FRM_EVT_RX_EOFRX | FRM_EVT_RX_RDE)) { len = bu92747_irda_do_rx(s); if (!IS_FIR(s)) tty_flip_buffer_push(s->port.state->port.tty); else { spin_lock(&s->data_lock); s->cur_frame_length += len; spin_unlock(&s->data_lock); } } if ((irq_src & REG_INT_EOF) && (s->port.state->port.tty != NULL)) { tty_flip_buffer_push(s->port.state->port.tty); if (IS_FIR(s)) { spin_lock(&s->data_lock); if (add_frame_length(f, s->cur_frame_length) == 0) { s->cur_frame_length = 0; //atomic_set(&(s->data_ready), 1); //wake_up(&(s->data_ready_wq) ); } else { printk("line %d: FIR frame length buf full......\n", __LINE__); } spin_unlock(&s->data_lock); } } if (irq_src & (FRM_EVT_TX_TXE | FRM_EVT_TX_WRE)) { s->tx_empty = 1; irda_hw_set_moderx(); } return IRQ_HANDLED; }
/* hard_xmit interface of irda device */ static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) { struct pxa_irda *si = netdev_priv(dev); int speed = irda_get_next_speed(skb); /* * Does this packet contain a request to change the interface * speed? If so, remember it until we complete the transmission * of this frame. */ if (speed != si->speed && speed != -1) si->newspeed = speed; /* * If this is an empty frame, we can bypass a lot. */ if (skb->len == 0) { if (si->newspeed) { si->newspeed = 0; pxa_irda_set_speed(si, speed); } dev_kfree_skb(skb); return 0; } netif_stop_queue(dev); if (!IS_FIR(si)) { si->tx_buff.data = si->tx_buff.head; si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize); /* Disable STUART interrupts and switch to transmit mode. */ STIER = 0; STISR = IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6; /* enable STUART and transmit interrupts */ STIER = IER_UUE | IER_TIE; } else { unsigned long mtt = irda_get_mtt(skb); si->dma_tx_buff_len = skb->len; skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len); if (mtt) while ((unsigned)(OSCR - si->last_oscr)/4 < mtt) cpu_relax(); /* stop RX DMA, disable FICP */ DCSR(si->rxdma) &= ~DCSR_RUN; ICCR0 = 0; pxa_irda_fir_dma_tx_start(si); ICCR0 = ICCR0_ITR | ICCR0_TXE; } dev_kfree_skb(skb); dev->trans_start = jiffies; return 0; }
static void sa1100_irda_irq(int irq, void *dev_id, struct pt_regs *regs) { struct net_device *dev = dev_id; if (IS_FIR(((struct sa1100_irda *)dev->priv))) sa1100_irda_fir_irq(dev); else sa1100_irda_hpsir_irq(dev); }
static irqreturn_t sa1100_irda_irq(int irq, void *dev_id) { struct net_device *dev = dev_id; if (IS_FIR(((struct sa1100_irda *)netdev_priv(dev)))) sa1100_irda_fir_irq(dev); else sa1100_irda_hpsir_irq(dev); return IRQ_HANDLED; }
static irqreturn_t sa1100_irda_irq(int irq, void *dev_id, struct pt_regs *regs) { struct net_device *dev = dev_id; if (IS_FIR(((struct sa1100_irda *)dev->priv))) sa1100_irda_fir_irq(dev); else sa1100_irda_hpsir_irq(dev); return IRQ_HANDLED; }
static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) { struct pxa_irda *si = netdev_priv(dev); int speed = irda_get_next_speed(skb); if (speed != si->speed && speed != -1) si->newspeed = speed; if (skb->len == 0) { if (si->newspeed) { si->newspeed = 0; pxa_irda_set_speed(si, speed); } dev_kfree_skb(skb); return NETDEV_TX_OK; } netif_stop_queue(dev); if (!IS_FIR(si)) { si->tx_buff.data = si->tx_buff.head; si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize); STIER = 0; STISR = IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6; STIER = IER_UUE | IER_TIE; } else { unsigned long mtt = irda_get_mtt(skb); si->dma_tx_buff_len = skb->len; skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len); if (mtt) while ((unsigned)(OSCR - si->last_oscr)/4 < mtt) cpu_relax(); DCSR(si->rxdma) &= ~DCSR_RUN; ICCR0 = 0; pxa_irda_fir_dma_tx_start(si); ICCR0 = ICCR0_ITR | ICCR0_TXE; } dev_kfree_skb(skb); return NETDEV_TX_OK; }
static int pxa250_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd) { struct if_irda_req *rq = (struct if_irda_req *)ifreq; struct pxa250_irda *si = dev->priv; int ret = -EOPNOTSUPP; __ECHO_IN; switch (cmd) { case SIOCSBANDWIDTH: if (capable(CAP_NET_ADMIN)) { /* * We are unable to set the speed if the * device is not running. */ if (si->open) { ret = pxa250_irda_set_speed(dev, rq->ifr_baudrate); } else { printk("pxa250_irda_ioctl: SIOCSBANDWIDTH: !netif_running\n"); ret = 0; } } break; case SIOCSMEDIABUSY: ret = -EPERM; if (capable(CAP_NET_ADMIN)) { irda_device_set_media_busy(dev, TRUE); ret = 0; } break; case SIOCGRECEIVING: rq->ifr_receiving = IS_FIR(si) ? 0 : si->rx_buff.state != OUTSIDE_FRAME; break; default: break; } __ECHO_OUT; return ret; }
static void bu92747_irda_start_tx(struct uart_port *port) { struct bu92747_port *s = container_of(port, struct bu92747_port, port); BU92747_IRDA_DBG("line %d, enter %s \n", __LINE__, __FUNCTION__); dev_dbg(s->dev, "%s\n", __func__); //wait for start cmd if (IS_FIR(s)) return ; if (s->tx_empty) bu92747_irda_do_tx(s); else bu92747_irda_dowork(s); }
static int pxa250_irda_suspend(struct net_device *dev, int state) { struct pxa250_irda *si = dev->priv; if (si && si->open) { /* * Stop the transmit queue */ if (IS_FIR(si)) return -1; netif_stop_queue(dev); disable_irq(dev->irq); disable_irq(si->fir_irq); pxa250_sir_irda_shutdown(si); } return 0; }
static int pxa_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd) { struct if_irda_req *rq = (struct if_irda_req *)ifreq; struct pxa_irda *si = netdev_priv(dev); int ret; switch (cmd) { case SIOCSBANDWIDTH: ret = -EPERM; if (capable(CAP_NET_ADMIN)) { if (netif_running(dev)) { ret = pxa_irda_set_speed(si, rq->ifr_baudrate); } else { printk(KERN_INFO "pxa_ir: SIOCSBANDWIDTH: !netif_running\n"); ret = 0; } } break; case SIOCSMEDIABUSY: ret = -EPERM; if (capable(CAP_NET_ADMIN)) { irda_device_set_media_busy(dev, TRUE); ret = 0; } break; case SIOCGRECEIVING: ret = 0; rq->ifr_receiving = IS_FIR(si) ? 0 : si->rx_buff.state != OUTSIDE_FRAME; break; default: ret = -EOPNOTSUPP; break; } return ret; }
static int bu92747_irda_do_tx(struct bu92747_port *s) { int i; struct circ_buf *xmit = &s->port.state->xmit; int len = uart_circ_chars_pending(xmit); BU92747_IRDA_DBG("line %d, enter %s \n", __LINE__, __FUNCTION__); if (IS_FIR(s)) { //printk("fir sending.....\n"); irda_hw_tx_enable_irq(BU92725GUW_FIR); } else { //printk("sir sending.....\n"); irda_hw_tx_enable_irq(BU92725GUW_SIR); } BU92747_IRDA_DBG("data:\n"); for (i=0; i<len; i++) { BU92747_IRDA_DBG("%d ", xmit->buf[xmit->tail+i]); } BU92747_IRDA_DBG("\n"); if (len>0) { s->tx_empty = 0; } BU92725GUW_send_data(xmit->buf+xmit->tail, len, NULL, 0); s->port.icount.tx += len; xmit->tail = (xmit->tail + len) & (UART_XMIT_SIZE - 1); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&s->port); return len; }
/* * Set the IrDA communications speed. */ static int pxa_irda_set_speed(struct pxa_irda *si, int speed) { unsigned long flags; unsigned int divisor; switch (speed) { case 9600: case 19200: case 38400: case 57600: case 115200: /* refer to PXA250/210 Developer's Manual 10-7 */ /* BaudRate = 14.7456 MHz / (16*Divisor) */ divisor = 14745600 / (16 * speed); local_irq_save(flags); if (IS_FIR(si)) { /* stop RX DMA */ DCSR(si->rxdma) &= ~DCSR_RUN; /* disable FICP */ ICCR0 = 0; pxa_irda_disable_clk(si); /* set board transceiver to SIR mode */ si->pdata->transceiver_mode(si->dev, IR_SIRMODE); /* enable the STUART clock */ pxa_irda_enable_sirclk(si); } /* disable STUART first */ STIER = 0; /* access DLL & DLH */ STLCR |= LCR_DLAB; STDLL = divisor & 0xff; STDLH = divisor >> 8; STLCR &= ~LCR_DLAB; si->speed = speed; STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6; STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE; local_irq_restore(flags); break; case 4000000: local_irq_save(flags); /* disable STUART */ STIER = 0; STISR = 0; pxa_irda_disable_clk(si); /* disable FICP first */ ICCR0 = 0; /* set board transceiver to FIR mode */ si->pdata->transceiver_mode(si->dev, IR_FIRMODE); /* enable the FICP clock */ pxa_irda_enable_firclk(si); si->speed = speed; pxa_irda_fir_dma_rx_start(si); ICCR0 = ICCR0_ITR | ICCR0_RXE; local_irq_restore(flags); break; default: return -EINVAL; } return 0; }
static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) { struct sa1100_irda *si = dev->priv; int speed = irda_get_next_speed(skb); /* * Does this packet contain a request to change the interface * speed? If so, remember it until we complete the transmission * of this frame. */ if (speed != si->speed && speed != -1) si->newspeed = speed; /* * If this is an empty frame, we can bypass a lot. */ if (skb->len == 0) { if (si->newspeed) { si->newspeed = 0; sa1100_irda_set_speed(si, speed); } dev_kfree_skb(skb); return 0; } if (!IS_FIR(si)) { netif_stop_queue(dev); si->tx_buff.data = si->tx_buff.head; si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize); /* * Set the transmit interrupt enable. This will fire * off an interrupt immediately. Note that we disable * the receiver so we won't get spurious characteres * received. */ Ser2UTCR3 = UTCR3_TIE | UTCR3_TXE; dev_kfree_skb(skb); } else { int mtt = irda_get_mtt(skb); /* * We must not be transmitting... */ if (si->txskb) BUG(); netif_stop_queue(dev); si->txskb = skb; si->txbuf_dma = dma_map_single(si->dev, skb->data, skb->len, DMA_TO_DEVICE); sa1100_start_dma(si->txdma, si->txbuf_dma, skb->len); /* * If we have a mean turn-around time, impose the specified * specified delay. We could shorten this by timing from * the point we received the packet. */ if (mtt) udelay(mtt); Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE; } dev->trans_start = jiffies; return 0; }
/* * Set the IrDA communications speed. */ static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed) { unsigned long flags; int brd, ret = -EINVAL; switch (speed) { case 9600: case 19200: case 38400: case 57600: case 115200: brd = 3686400 / (16 * speed) - 1; /* * Stop the receive DMA. */ if (IS_FIR(si)) sa1100_stop_dma(si->rxdma); local_irq_save(flags); Ser2UTCR3 = 0; Ser2HSCR0 = HSCR0_UART; Ser2UTCR1 = brd >> 8; Ser2UTCR2 = brd; /* * Clear status register */ Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE; if (si->pdata->set_speed) si->pdata->set_speed(si->dev, speed); si->speed = speed; local_irq_restore(flags); ret = 0; break; case 4000000: local_irq_save(flags); si->hscr0 = 0; Ser2HSSR0 = 0xff; Ser2HSCR0 = si->hscr0 | HSCR0_HSSP; Ser2UTCR3 = 0; si->speed = speed; if (si->pdata->set_speed) si->pdata->set_speed(si->dev, speed); sa1100_irda_rx_alloc(si); sa1100_irda_rx_dma_start(si); local_irq_restore(flags); break; default: break; } return ret; }
static int pxa250_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) { struct pxa250_irda *si = dev->priv; int speed = irda_get_next_speed(skb); int mtt; __ECHO_IN; /* * Does this packet contain a request to change the interface * speed? If so, remember it until we complete the transmission * of this frame. */ if (speed != si->speed && speed != -1) si->newspeed = speed; /* * If this is an empty frame, we can bypass a lot. */ if (skb->len == 0) { if (si->newspeed) { si->newspeed = 0; pxa250_irda_set_speed(dev, speed); } dev_kfree_skb(skb); return 0; } DBG("stop queue\n"); netif_stop_queue(dev); if(!IS_FIR(si)) { si->tx_buff.data = si->tx_buff.head; si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize); pxa250_sir_transmit(dev); dev_kfree_skb(skb); dev->trans_start = jiffies; return 0; } else /* FIR */ { DBG("Enter FIR transmit\n"); /* * We must not be transmitting... */ if (si->txskb) BUG(); disable_irq(si->fir_irq); netif_stop_queue(dev); DBG("queue stoped\n"); si->txskb = skb; /* we could not just map so we'll need some triks */ /* skb->data may be not DMA capable -Sed- */ if (skb->len > TXBUFF_MAX_SIZE) { printk (KERN_ERR "skb data too large\n"); printk (KERN_ERR "len=%d",skb->len); BUG(); } DBG("gonna copy %d bytes to txbuf\n",skb->len); memcpy (si->txbuf_dma_virt, skb->data , skb->len); /* Actual sending ;must not be receiving !!! */ /* Write data and source address */ DBG("ICSR1 & RNE =%d\n",(ICSR1 & ICSR1_RNE) ? 1 : 0 ); /*Disable receiver and enable transifer */ ICCR0 &= ~ICCR0_RXE; if (ICSR1 & ICSR1_TBY) BUG(); ICCR0 |= ICCR0_TXE; DBG("FICP status %x\n",ICSR0); if (0){ int i; DBG("sending packet\n"); for (i=0;i<skb->len;i++) (i % 64) ? printk ("%2x ",skb->data[i]) : printk ("%2x \n",skb->data[i]) ; DBG(" done\n"); } /* * If we have a mean turn-around time, impose the specified * specified delay. We could shorten this by timing from * the point we received the packet. */ mtt = irda_get_mtt(skb); if(mtt) udelay(mtt); DCSR(si->txdma_ch)=0; DCSR(si->txdma_ch)=DCSR_NODESC; DSADR(si->txdma_ch) = si->txbuf_dma; /* phisic address */ DTADR(si->txdma_ch) = __PREG(ICDR); DCMD(si->txdma_ch) = DCMD_ENDIRQEN| DCMD_INCSRCADDR | DCMD_FLOWTRG | DCMD_BURST8 | DCMD_WIDTH1 | skb->len; DCSR(si->txdma_ch) = DCSR_ENDINTR | DCSR_BUSERR; DCSR(si->txdma_ch) = DCSR_RUN | DCSR_NODESC ; DBG("FICP status %x\n",ICSR0); return 0; } }
/* * Set the IrDA communications speed. */ static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed) { unsigned long flags; int brd, ret = -EINVAL; switch (speed) { case 9600: case 19200: case 38400: case 57600: case 115200: brd = 3686400 / (16 * speed) - 1; /* * Stop the receive DMA. */ if (IS_FIR(si)) sa1100_stop_dma(si->rxdma); local_irq_save(flags); Ser2UTCR3 = 0; Ser2HSCR0 = HSCR0_UART; Ser2UTCR1 = brd >> 8; Ser2UTCR2 = brd; /* * Clear status register */ Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE; if (machine_is_assabet()) ASSABET_BCR_clear(ASSABET_BCR_IRDA_FSEL); if (machine_is_h3xxx()) clr_h3600_egpio(IPAQ_EGPIO_IR_FSEL); if (machine_is_yopy()) PPSR &= ~GPIO_IRDA_FIR; si->speed = speed; local_irq_restore(flags); ret = 0; break; case 4000000: local_irq_save(flags); si->hscr0 = 0; Ser2HSSR0 = 0xff; Ser2HSCR0 = si->hscr0 | HSCR0_HSSP; Ser2UTCR3 = 0; si->speed = speed; if (machine_is_assabet()) ASSABET_BCR_set(ASSABET_BCR_IRDA_FSEL); if (machine_is_h3xxx()) set_h3600_egpio(IPAQ_EGPIO_IR_FSEL); if (machine_is_yopy()) PPSR |= GPIO_IRDA_FIR; sa1100_irda_rx_alloc(si); sa1100_irda_rx_dma_start(si); local_irq_restore(flags); break; default: break; } return ret; }
static irqreturn_t omap_irda_irq(int irq, void *dev_id) { struct net_device *dev = dev_id; struct omap_irda *omap_ir = netdev_priv(dev); struct sk_buff *skb; u8 status; int w = 0; /* Clear EOF interrupt */ status = uart_reg_in(UART3_IIR); if (status & UART3_IIR_TX_STATUS) { u8 mdr2 = uart_reg_in(UART3_MDR2); if (mdr2 & UART3_MDR2_IRTX_UNDERRUN) printk(KERN_ERR "IrDA Buffer underrun error\n"); omap_ir->stats.tx_packets++; if (omap_ir->newspeed) { omap_irda_set_speed(dev, omap_ir->newspeed); omap_ir->newspeed = 0; } netif_wake_queue(dev); if (!(status & UART3_IIR_EOF)) return IRQ_HANDLED; } /* Stop DMA and if there are no errors, send frame to upper layer */ omap_stop_dma(omap_ir->rx_dma_channel); status = uart_reg_in(UART3_SFLSR); /* Take a frame status */ if (status != 0) { /* Bad frame? */ omap_ir->stats.rx_frame_errors++; uart_reg_in(UART3_RESUME); } else { /* We got a frame! */ skb = dev_alloc_skb(IRDA_SKB_MAX_MTU); if (!skb) { printk(KERN_ERR "omap_sir: out of memory for RX SKB\n"); return IRQ_HANDLED; } /* * Align any IP headers that may be contained * within the frame. */ skb_reserve(skb, 1); w = OMAP_DMA_CDAC_REG(omap_ir->rx_dma_channel); if (cpu_is_omap16xx()) w -= OMAP1_DMA_CDSA_L_REG(omap_ir->rx_dma_channel); if (cpu_is_omap24xx()) w -= OMAP2_DMA_CDSA_REG(omap_ir->rx_dma_channel); if (!IS_FIR(omap_ir)) /* Copy DMA buffer to skb */ memcpy(skb_put(skb, w - 2), omap_ir->rx_buf_dma_virt, w - 2); else /* Copy DMA buffer to skb */ memcpy(skb_put(skb, w - 4), omap_ir->rx_buf_dma_virt, w - 4); skb->dev = dev; skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); omap_ir->stats.rx_packets++; omap_ir->stats.rx_bytes += skb->len; netif_receive_skb(skb); /* Send data to upper level */ } /* Re-init RX DMA */ omap_irda_start_rx_dma(omap_ir); dev->last_rx = jiffies; return IRQ_HANDLED; }
static int pxa_irda_set_speed(struct pxa_irda *si, int speed) { unsigned long flags; unsigned int divisor; switch (speed) { case 9600: case 19200: case 38400: case 57600: case 115200: divisor = 14745600 / (16 * speed); local_irq_save(flags); if (IS_FIR(si)) { DCSR(si->rxdma) &= ~DCSR_RUN; ICCR0 = 0; pxa_irda_disable_clk(si); pxa_irda_set_mode(si, IR_SIRMODE); pxa_irda_enable_sirclk(si); } STIER = 0; STLCR |= LCR_DLAB; STDLL = divisor & 0xff; STDLH = divisor >> 8; STLCR &= ~LCR_DLAB; si->speed = speed; STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6; STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE; local_irq_restore(flags); break; case 4000000: local_irq_save(flags); STIER = 0; STISR = 0; pxa_irda_disable_clk(si); ICCR0 = 0; pxa_irda_set_mode(si, IR_FIRMODE); pxa_irda_enable_firclk(si); si->speed = speed; pxa_irda_fir_dma_rx_start(si); ICCR0 = ICCR0_ITR | ICCR0_RXE; local_irq_restore(flags); break; default: return -EINVAL; } return 0; }