/** * snd_dmaengine_pcm_trigger - dmaengine based PCM trigger implementation * @substream: PCM substream * @cmd: Trigger command * * Returns 0 on success, a negative error code otherwise. * * This function can be used as the PCM trigger callback for dmaengine based PCM * driver implementations. */ int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); struct snd_pcm_runtime *runtime = substream->runtime; int ret; switch (cmd) { case SNDRV_PCM_TRIGGER_START: ret = dmaengine_pcm_prepare_and_submit(substream); if (ret) return ret; dma_async_issue_pending(prtd->dma_chan); break; case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: dmaengine_resume(prtd->dma_chan); break; case SNDRV_PCM_TRIGGER_SUSPEND: if (runtime->info & SNDRV_PCM_INFO_PAUSE) dmaengine_pause(prtd->dma_chan); else dmaengine_terminate_all(prtd->dma_chan); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: dmaengine_pause(prtd->dma_chan); break; case SNDRV_PCM_TRIGGER_STOP: dmaengine_terminate_all(prtd->dma_chan); break; default: return -EINVAL; } return 0; }
static int serial_pxa_suspend(struct device *dev) { struct uart_pxa_port *sport = dev_get_drvdata(dev); struct uart_pxa_dma *pxa_dma = &sport->uart_dma; struct dma_tx_state dma_state; if (sport && (sport->ier & UART_IER_DMAE)) { int sent = 0; unsigned long flags; local_irq_save(flags); /* * tx stop and suspend and when resume, * tx startup would be called and set it to 0 */ pxa_dma->tx_stop = 1; pxa_dma->rx_stop = 1; pxa_dma->tx_saved_len = 0; if (dma_async_is_tx_complete(pxa_dma->txdma_chan, pxa_dma->tx_cookie, NULL, NULL) != DMA_SUCCESS) { dmaengine_pause(pxa_dma->txdma_chan); dmaengine_tx_status(pxa_dma->txdma_chan, pxa_dma->tx_cookie, &dma_state); sent = pxa_dma->tx_size - dma_state.residue; pxa_dma->tx_saved_len = dma_state.residue; memcpy(pxa_dma->tx_buf_save, pxa_dma->txdma_addr + sent, dma_state.residue); stop_dma(sport, PXA_UART_TX); } if (dma_async_is_tx_complete(pxa_dma->rxdma_chan, pxa_dma->rx_cookie, NULL, NULL) != DMA_SUCCESS) { dmaengine_pause(pxa_dma->rxdma_chan); pxa_uart_receive_dma_cb(sport); stop_dma(sport, PXA_UART_RX); } local_irq_restore(flags); } if (sport) uart_suspend_port(&serial_pxa_reg, &sport->port); /* Remove uart rx constraint which will block system entering D1p. */ if (del_timer_sync(&sport->pxa_timer)) pxa_timer_handler((unsigned long)sport); return 0; }
static void s3c24xx_serial_stop_rx(struct uart_port *port) { struct s3c24xx_uart_port *ourport = to_ourport(port); struct s3c24xx_uart_dma *dma = ourport->dma; struct tty_port *t = &port->state->port; struct dma_tx_state state; enum dma_status dma_status; unsigned int received; if (rx_enabled(port)) { dbg("s3c24xx_serial_stop_rx: port=%p\n", port); if (s3c24xx_serial_has_interrupt_mask(port)) s3c24xx_set_bit(port, S3C64XX_UINTM_RXD, S3C64XX_UINTM); else disable_irq_nosync(ourport->rx_irq); rx_enabled(port) = 0; } if (dma && dma->rx_chan) { dmaengine_pause(dma->tx_chan); dma_status = dmaengine_tx_status(dma->rx_chan, dma->rx_cookie, &state); if (dma_status == DMA_IN_PROGRESS || dma_status == DMA_PAUSED) { received = dma->rx_bytes_requested - state.residue; dmaengine_terminate_all(dma->rx_chan); s3c24xx_uart_copy_rx_to_tty(ourport, t, received); } } }
static void sirfsoc_uart_stop_tx(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; if (sirfport->tx_dma_chan) { if (sirfport->tx_dma_state == TX_DMA_RUNNING) { dmaengine_pause(sirfport->tx_dma_chan); sirfport->tx_dma_state = TX_DMA_PAUSE; } else { if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) & ~uint_en->sirfsoc_txfifo_empty_en); else wr_regl(port, SIRFUART_INT_EN_CLR, uint_en->sirfsoc_txfifo_empty_en); } } else { if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) & ~uint_en->sirfsoc_txfifo_empty_en); else wr_regl(port, SIRFUART_INT_EN_CLR, uint_en->sirfsoc_txfifo_empty_en); } }
static void omap_8250_rx_dma_flush(struct uart_8250_port *p) { struct omap8250_priv *priv = p->port.private_data; struct uart_8250_dma *dma = p->dma; struct dma_tx_state state; unsigned long flags; int ret; spin_lock_irqsave(&priv->rx_dma_lock, flags); if (!dma->rx_running) { spin_unlock_irqrestore(&priv->rx_dma_lock, flags); return; } ret = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); if (ret == DMA_IN_PROGRESS) { ret = dmaengine_pause(dma->rxchan); if (WARN_ON_ONCE(ret)) priv->rx_dma_broken = true; } spin_unlock_irqrestore(&priv->rx_dma_lock, flags); __dma_rx_do_complete(p); dmaengine_terminate_all(dma->rxchan); }
void serial8250_rx_dma_flush(struct uart_8250_port *p) { struct uart_8250_dma *dma = p->dma; if (dma->rx_running) { dmaengine_pause(dma->rxchan); __dma_rx_complete(p); dmaengine_terminate_async(dma->rxchan); } }
int serial8250_rx_dma(struct uart_8250_port *p, unsigned int iir) { struct uart_8250_dma *dma = p->dma; struct dma_async_tx_descriptor *desc; struct dma_tx_state state; int dma_status; dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); switch (iir & 0x3f) { case UART_IIR_RLSI: /* 8250_core handles errors and break interrupts */ return -EIO; case UART_IIR_RX_TIMEOUT: /* * If RCVR FIFO trigger level was not reached, complete the * transfer and let 8250_core copy the remaining data. */ if (dma_status == DMA_IN_PROGRESS) { dmaengine_pause(dma->rxchan); __dma_rx_complete(p); } return -ETIMEDOUT; default: break; } if (dma_status) return 0; desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr, dma->rx_size, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) return -EBUSY; desc->callback = __dma_rx_complete; desc->callback_param = p; dma->rx_cookie = dmaengine_submit(desc); dma_sync_single_for_device(dma->rxchan->device->dev, dma->rx_addr, dma->rx_size, DMA_FROM_DEVICE); dma_async_issue_pending(dma->rxchan); return 0; }
static irqreturn_t s3c24xx_serial_rx_chars_dma(void *dev_id) { unsigned int utrstat, ufstat, received; struct s3c24xx_uart_port *ourport = dev_id; struct uart_port *port = &ourport->port; struct s3c24xx_uart_dma *dma = ourport->dma; struct tty_struct *tty = tty_port_tty_get(&ourport->port.state->port); struct tty_port *t = &port->state->port; unsigned long flags; struct dma_tx_state state; utrstat = rd_regl(port, S3C2410_UTRSTAT); ufstat = rd_regl(port, S3C2410_UFSTAT); spin_lock_irqsave(&port->lock, flags); if (!(utrstat & S3C2410_UTRSTAT_TIMEOUT)) { s3c64xx_start_rx_dma(ourport); if (ourport->rx_mode == S3C24XX_RX_PIO) enable_rx_dma(ourport); goto finish; } if (ourport->rx_mode == S3C24XX_RX_DMA) { dmaengine_pause(dma->rx_chan); dmaengine_tx_status(dma->rx_chan, dma->rx_cookie, &state); dmaengine_terminate_all(dma->rx_chan); received = dma->rx_bytes_requested - state.residue; s3c24xx_uart_copy_rx_to_tty(ourport, t, received); enable_rx_pio(ourport); } s3c24xx_serial_rx_drain_fifo(ourport); if (tty) { tty_flip_buffer_push(t); tty_kref_put(tty); } wr_regl(port, S3C2410_UTRSTAT, S3C2410_UTRSTAT_TIMEOUT); finish: spin_unlock_irqrestore(&port->lock, flags); return IRQ_HANDLED; }
static void sprd_stop_tx_dma(struct uart_port *port) { struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port, port); struct circ_buf *xmit = &port->state->xmit; struct dma_tx_state state; u32 trans_len; dmaengine_pause(sp->tx_dma.chn); dmaengine_tx_status(sp->tx_dma.chn, sp->tx_dma.cookie, &state); if (state.residue) { trans_len = state.residue - sp->tx_dma.phys_addr; xmit->tail = (xmit->tail + trans_len) & (UART_XMIT_SIZE - 1); port->icount.tx += trans_len; dma_unmap_single(port->dev, sp->tx_dma.phys_addr, sp->tx_dma.trans_len, DMA_TO_DEVICE); } dmaengine_terminate_all(sp->tx_dma.chn); sp->tx_dma.trans_len = 0; }
static void s3c24xx_serial_stop_tx(struct uart_port *port) { struct s3c24xx_uart_port *ourport = to_ourport(port); struct s3c24xx_uart_dma *dma = ourport->dma; struct circ_buf *xmit = &port->state->xmit; struct dma_tx_state state; int count; if (!tx_enabled(port)) return; if (s3c24xx_serial_has_interrupt_mask(port)) s3c24xx_set_bit(port, S3C64XX_UINTM_TXD, S3C64XX_UINTM); else disable_irq_nosync(ourport->tx_irq); if (dma && dma->tx_chan && ourport->tx_in_progress == S3C24XX_TX_DMA) { dmaengine_pause(dma->tx_chan); dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state); dmaengine_terminate_all(dma->tx_chan); dma_sync_single_for_cpu(ourport->port.dev, dma->tx_transfer_addr, dma->tx_size, DMA_TO_DEVICE); async_tx_ack(dma->tx_desc); count = dma->tx_bytes_requested - state.residue; xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); port->icount.tx += count; } tx_enabled(port) = 0; ourport->tx_in_progress = 0; if (port->flags & UPF_CONS_FLOW) s3c24xx_serial_rx_enable(port); ourport->tx_mode = 0; }
static enum hrtimer_restart sirfsoc_uart_rx_dma_hrtimer_callback(struct hrtimer *hrt) { struct sirfsoc_uart_port *sirfport; struct uart_port *port; int count, inserted; struct dma_tx_state tx_state; struct tty_struct *tty; struct sirfsoc_register *ureg; struct circ_buf *xmit; struct sirfsoc_fifo_status *ufifo_st; int max_pio_cnt; sirfport = container_of(hrt, struct sirfsoc_uart_port, hrt); port = &sirfport->port; inserted = 0; tty = port->state->port.tty; ureg = &sirfport->uart_reg->uart_reg; xmit = &sirfport->rx_dma_items.xmit; ufifo_st = &sirfport->uart_reg->fifo_status; dmaengine_tx_status(sirfport->rx_dma_chan, sirfport->rx_dma_items.cookie, &tx_state); if (SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue != sirfport->rx_last_pos) { xmit->head = SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue; sirfport->rx_last_pos = xmit->head; sirfport->pio_fetch_cnt = 0; } count = CIRC_CNT_TO_END(xmit->head, xmit->tail, SIRFSOC_RX_DMA_BUF_SIZE); while (count > 0) { inserted = tty_insert_flip_string(tty->port, (const unsigned char *)&xmit->buf[xmit->tail], count); if (!inserted) goto next_hrt; port->icount.rx += inserted; xmit->tail = (xmit->tail + inserted) & (SIRFSOC_RX_DMA_BUF_SIZE - 1); count = CIRC_CNT_TO_END(xmit->head, xmit->tail, SIRFSOC_RX_DMA_BUF_SIZE); tty_flip_buffer_push(tty->port); } /* * if RX DMA buffer data have all push into tty buffer, and there is * only little data(less than a dma transfer unit) left in rxfifo, * fetch it out in pio mode and switch back to dma immediately */ if (!inserted && !count && ((rd_regl(port, ureg->sirfsoc_rx_fifo_status) & SIRFUART_RX_FIFO_MASK) > sirfport->pio_fetch_cnt)) { dmaengine_pause(sirfport->rx_dma_chan); /* switch to pio mode */ wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | SIRFUART_IO_MODE); /* * UART controller SWH_DMA_IO register have CLEAR_RX_ADDR_EN * When found changing I/O to DMA mode, it clears * two low bits of read point; * USP have similar FRADDR_CLR_EN bit in USP_RX_DMA_IO_CTRL. * Fetch data out from rxfifo into DMA buffer in PIO mode, * while switch back to DMA mode, the data fetched will override * by DMA, as hardware have a strange behaviour: * after switch back to DMA mode, check rxfifo status it will * be the number PIO fetched, so record the fetched data count * to avoid the repeated fetch */ max_pio_cnt = 3; while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) & ufifo_st->ff_empty(port)) && max_pio_cnt--) { xmit->buf[xmit->head] = rd_regl(port, ureg->sirfsoc_rx_fifo_data); xmit->head = (xmit->head + 1) & (SIRFSOC_RX_DMA_BUF_SIZE - 1); sirfport->pio_fetch_cnt++; } /* switch back to dma mode */ wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) & ~SIRFUART_IO_MODE); dmaengine_resume(sirfport->rx_dma_chan); } next_hrt: hrtimer_forward_now(hrt, ns_to_ktime(sirfport->rx_period_time)); return HRTIMER_RESTART; }
static int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir) { struct uart_8250_dma *dma = p->dma; struct dma_async_tx_descriptor *desc; switch (iir & 0x3f) { case UART_IIR_RLSI: /* 8250_core handles errors and break interrupts */ if (dma->rx_running) { dmaengine_pause(dma->rxchan); __dma_rx_do_complete(p, true); } return -EIO; case UART_IIR_RX_TIMEOUT: /* * If RCVR FIFO trigger level was not reached, complete the * transfer and let 8250_core copy the remaining data. */ if (dma->rx_running) { dmaengine_pause(dma->rxchan); __dma_rx_do_complete(p, true); } return -ETIMEDOUT; case UART_IIR_RDI: /* * The OMAP UART is a special BEAST. If we receive RDI we _have_ * a DMA transfer programmed but it didn't work. One reason is * that we were too slow and there were too many bytes in the * FIFO, the UART counted wrong and never kicked the DMA engine * to do anything. That means once we receive RDI on OMAP then * the DMA won't do anything soon so we have to cancel the DMA * transfer and purge the FIFO manually. */ if (dma->rx_running) { dmaengine_pause(dma->rxchan); __dma_rx_do_complete(p, true); } return -ETIMEDOUT; default: break; } if (dma->rx_running) return 0; desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr, dma->rx_size, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) return -EBUSY; dma->rx_running = 1; desc->callback = __dma_rx_complete; desc->callback_param = p; dma->rx_cookie = dmaengine_submit(desc); dma_sync_single_for_device(dma->rxchan->device->dev, dma->rx_addr, dma->rx_size, DMA_FROM_DEVICE); dma_async_issue_pending(dma->rxchan); return 0; }
static inline void dma_receive_chars(struct uart_pxa_port *up, int *status) { struct tty_port *port = &up->port.state->port; unsigned char ch; int max_count = 256; int count = 0; unsigned char *tmp; unsigned int flag = TTY_NORMAL; struct uart_pxa_dma *pxa_dma = &up->uart_dma; struct dma_tx_state dma_state; /* * Pause DMA channel and deal with the bytes received by DMA */ dmaengine_pause(pxa_dma->rxdma_chan); dmaengine_tx_status(pxa_dma->rxdma_chan, pxa_dma->rx_cookie, &dma_state); count = DMA_BLOCK - dma_state.residue; tmp = pxa_dma->rxdma_addr; if (up->port.sysrq) { while (count > 0) { if (!uart_handle_sysrq_char(&up->port, *tmp)) { uart_insert_char(&up->port, *status, 0, *tmp, flag); up->port.icount.rx++; } tmp++; count--; } } else { tty_insert_flip_string(port, tmp, count); up->port.icount.rx += count; } /* deal with the bytes in rx FIFO */ do { ch = serial_in(up, UART_RX); flag = TTY_NORMAL; up->port.icount.rx++; if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE | UART_LSR_FE | UART_LSR_OE))) { /* * For statistics only */ if (*status & UART_LSR_BI) { *status &= ~(UART_LSR_FE | UART_LSR_PE); up->port.icount.brk++; /* * We do the SysRQ and SAK checking * here because otherwise the break * may get masked by ignore_status_mask * or read_status_mask. */ if (uart_handle_break(&up->port)) goto ignore_char2; } else if (*status & UART_LSR_PE) { up->port.icount.parity++; } else if (*status & UART_LSR_FE) { up->port.icount.frame++; } if (*status & UART_LSR_OE) up->port.icount.overrun++; /* * Mask off conditions which should be ignored. */ *status &= up->port.read_status_mask; #ifdef CONFIG_SERIAL_PXA_CONSOLE if (up->port.line == up->port.cons->index) { /* Recover the break flag from console xmit */ *status |= up->lsr_break_flag; up->lsr_break_flag = 0; } #endif if (*status & UART_LSR_BI) flag = TTY_BREAK; else if (*status & UART_LSR_PE) flag = TTY_PARITY; else if (*status & UART_LSR_FE) flag = TTY_FRAME; } if (!uart_handle_sysrq_char(&up->port, ch)) uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag); ignore_char2: *status = serial_in(up, UART_LSR); } while ((*status & UART_LSR_DR) && (max_count-- > 0)); tty_schedule_flip(port); stop_dma(up, 1); if (pxa_dma->rx_stop) return; pxa_uart_receive_dma_start(up); }