static irqreturn_t bfin_sir_dma_rx_int(int irq, void *dev_id) { struct net_device *dev = dev_id; struct bfin_sir_self *self = netdev_priv(dev); struct bfin_sir_port *port = self->sir_port; unsigned short irqstat; spin_lock(&self->lock); port->rx_dma_nrows++; port->rx_dma_buf.tail = DMA_SIR_RX_XCNT * port->rx_dma_nrows; bfin_sir_dma_rx_chars(dev); if (port->rx_dma_nrows >= DMA_SIR_RX_YCNT) { port->rx_dma_nrows = 0; port->rx_dma_buf.tail = 0; } port->rx_dma_buf.head = port->rx_dma_buf.tail; irqstat = get_dma_curr_irqstat(port->rx_dma_channel); clear_dma_irqstat(port->rx_dma_channel); spin_unlock(&self->lock); mod_timer(&port->rx_dma_timer, jiffies + DMA_SIR_RX_FLUSH_JIFS); return IRQ_HANDLED; }
static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id) { struct bfin_serial_port *uart = dev_id; unsigned short irqstat; int x_pos, pos; spin_lock(&uart->rx_lock); irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); clear_dma_irqstat(uart->rx_dma_channel); uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel); x_pos = get_dma_curr_xcount(uart->rx_dma_channel); uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows; if (uart->rx_dma_nrows == DMA_RX_YCOUNT || x_pos == 0) uart->rx_dma_nrows = 0; pos = uart->rx_dma_nrows * DMA_RX_XCOUNT; if (pos > uart->rx_dma_buf.tail || uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) { uart->rx_dma_buf.head = pos; bfin_serial_dma_rx_chars(uart); uart->rx_dma_buf.tail = uart->rx_dma_buf.head; } spin_unlock(&uart->rx_lock); return IRQ_HANDLED; }
static irqreturn_t bfin_sir_dma_tx_int(int irq, void *dev_id) { struct net_device *dev = dev_id; struct bfin_sir_self *self = netdev_priv(dev); struct bfin_sir_port *port = self->sir_port; spin_lock(&self->lock); if (!(get_dma_curr_irqstat(port->tx_dma_channel) & DMA_RUN)) { clear_dma_irqstat(port->tx_dma_channel); bfin_sir_stop_tx(port); self->stats.tx_packets++; self->stats.tx_bytes += self->tx_buff.len; self->tx_buff.len = 0; if (self->newspeed) { bfin_sir_set_speed(port, self->newspeed); self->speed = self->newspeed; self->newspeed = 0; } bfin_sir_enable_rx(port); /* I'm hungry! */ netif_wake_queue(dev); port->tx_done = 1; } spin_unlock(&self->lock); return IRQ_HANDLED; }
static int ppi_stop(struct ppi_if *ppi) { const struct ppi_info *info = ppi->info; ppi->ppi_control &= ~PORT_EN; switch (info->type) { case PPI_TYPE_PPI: { struct bfin_ppi_regs *reg = info->base; bfin_write16(®->control, ppi->ppi_control); break; } case PPI_TYPE_EPPI: { struct bfin_eppi_regs *reg = info->base; bfin_write32(®->control, ppi->ppi_control); break; } default: return -EINVAL; } clear_dma_irqstat(info->dma_ch); disable_dma(info->dma_ch); SSYNC(); return 0; }
static irqreturn_t ppifcd_irq_error(int irq, void *dev_id, struct pt_regs *regs) { ppi_device_t *pdev = (ppi_device_t *) dev_id; pr_debug("ppifcd_error_irq:\n"); pr_debug("PPI Status = 0x%X\n", bfin_read_PPI_STATUS()); bfin_clear_PPI_STATUS(); /* Acknowledge DMA Interrupt */ clear_dma_irqstat(CH_PPI); /* disable ppi */ bfin_write_PPI_CONTROL(pdev->ppi_control & ~PORT_EN); pdev->done = 1; /* Give a signal to user program. */ if (pdev->fasyc) kill_fasync(&(pdev->fasyc), SIGIO, POLLIN); pr_debug("ppifcd_error_irq: wake_up_interruptible pdev->done=%d\n", pdev->done); /* wake up read */ wake_up_interruptible(pdev->rx_avail); pr_debug("ppifcd_error_irq: return\n"); return IRQ_HANDLED; }
static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id) { struct bfin_serial_port *uart = dev_id; struct circ_buf *xmit = &uart->port.state->xmit; #ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS if (uart->scts && !(bfin_serial_get_mctrl(&uart->port)&TIOCM_CTS)) { uart->scts = 0; uart_handle_cts_change(&uart->port, uart->scts); } #endif spin_lock(&uart->port.lock); if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) { disable_dma(uart->tx_dma_channel); clear_dma_irqstat(uart->tx_dma_channel); /* Anomaly notes: * 05000215 - we always clear ETBEI within last UART TX * interrupt to end a string. It is always set * when start a new tx. */ UART_CLEAR_IER(uart, ETBEI); xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1); uart->port.icount.tx += uart->tx_count; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&uart->port); bfin_serial_dma_tx_chars(uart); } spin_unlock(&uart->port.lock); return IRQ_HANDLED; }
static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id) { struct bfin_serial_port *uart = dev_id; struct circ_buf *xmit = &uart->port.info->xmit; #ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS if (uart->scts && !(bfin_serial_get_mctrl(&uart->port)&TIOCM_CTS)) { uart->scts = 0; uart_handle_cts_change(&uart->port, uart->scts); } #endif spin_lock(&uart->port.lock); if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) { disable_dma(uart->tx_dma_channel); clear_dma_irqstat(uart->tx_dma_channel); UART_CLEAR_IER(uart, ETBEI); xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1); uart->port.icount.tx += uart->tx_count; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&uart->port); bfin_serial_dma_tx_chars(uart); } spin_unlock(&uart->port.lock); return IRQ_HANDLED; }
static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id) { struct bfin_serial_port *uart = dev_id; unsigned short irqstat; spin_lock(&uart->port.lock); irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); clear_dma_irqstat(uart->rx_dma_channel); bfin_serial_dma_rx_chars(uart); spin_unlock(&uart->port.lock); return IRQ_HANDLED; }
static irqreturn_t sport_rx_irq(int irq, void *dev_id) { struct sport_device *sport = dev_id; unsigned long status; status = get_dma_curr_irqstat(sport->rx_dma_chan); if (status & (DMA_DONE|DMA_ERR)) { clear_dma_irqstat(sport->rx_dma_chan); SSYNC(); } if (sport->rx_callback) sport->rx_callback(sport->rx_data); return IRQ_HANDLED; }
static irqreturn_t buffer_full_handler(int irq, void* data) { /* Compute the absolute address of the individual buffer */ current_buffer_pointer = dma_buffer + (current_buffer_index * BUFFER_SIZE); /* Advance the buffer number */ current_buffer_index = (current_buffer_index + 1) % BUFFER_COUNT; complete(&buffer_ready); #ifdef PPIADC_DEBUG printk(KERN_INFO DRIVER_NAME ": 0x%08lX 0x%04X\n", current_buffer_pointer, ((unsigned short*) current_buffer_pointer)[0]); #endif clear_dma_irqstat(CH_PPI); return IRQ_HANDLED; }
static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id) { struct bfin_serial_port *uart = dev_id; struct circ_buf *xmit = &uart->port.info->xmit; spin_lock(&uart->port.lock); if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) { disable_dma(uart->tx_dma_channel); clear_dma_irqstat(uart->tx_dma_channel); UART_CLEAR_IER(uart, ETBEI); xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1); uart->port.icount.tx += uart->tx_count; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&uart->port); bfin_serial_dma_tx_chars(uart); } spin_unlock(&uart->port.lock); return IRQ_HANDLED; }
static irqreturn_t coreb_dma_interrupt(int irq, void *dev_id, struct pt_regs *regs) { clear_dma_irqstat(CH_MEM_STREAM2_DEST); wake_up(&coreb_dma_wait); return IRQ_HANDLED; }