static void __dma_rx_complete(void *param) { struct uart_8250_port *p = param; struct omap8250_priv *priv = p->port.private_data; struct uart_8250_dma *dma = p->dma; struct dma_tx_state state; unsigned long flags; spin_lock_irqsave(&p->port.lock, flags); /* * If the tx status is not DMA_COMPLETE, then this is a delayed * completion callback. A previous RX timeout flush would have * already pushed the data, so exit. */ if (dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state) != DMA_COMPLETE) { spin_unlock_irqrestore(&p->port.lock, flags); return; } __dma_rx_do_complete(p); if (!priv->throttled) omap_8250_rx_dma(p); spin_unlock_irqrestore(&p->port.lock, flags); }
static void omap_8250_rx_dma_flush(struct uart_8250_port *p) { struct omap8250_priv *priv = p->port.private_data; struct uart_8250_dma *dma = p->dma; struct dma_tx_state state; unsigned long flags; int ret; spin_lock_irqsave(&priv->rx_dma_lock, flags); if (!dma->rx_running) { spin_unlock_irqrestore(&priv->rx_dma_lock, flags); return; } ret = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); if (ret == DMA_IN_PROGRESS) { ret = dmaengine_pause(dma->rxchan); if (WARN_ON_ONCE(ret)) priv->rx_dma_broken = true; } spin_unlock_irqrestore(&priv->rx_dma_lock, flags); __dma_rx_do_complete(p); dmaengine_terminate_all(dma->rxchan); }
static int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir) { struct uart_8250_dma *dma = p->dma; struct dma_async_tx_descriptor *desc; switch (iir & 0x3f) { case UART_IIR_RLSI: /* 8250_core handles errors and break interrupts */ if (dma->rx_running) { dmaengine_pause(dma->rxchan); __dma_rx_do_complete(p, true); } return -EIO; case UART_IIR_RX_TIMEOUT: /* * If RCVR FIFO trigger level was not reached, complete the * transfer and let 8250_core copy the remaining data. */ if (dma->rx_running) { dmaengine_pause(dma->rxchan); __dma_rx_do_complete(p, true); } return -ETIMEDOUT; case UART_IIR_RDI: /* * The OMAP UART is a special BEAST. If we receive RDI we _have_ * a DMA transfer programmed but it didn't work. One reason is * that we were too slow and there were too many bytes in the * FIFO, the UART counted wrong and never kicked the DMA engine * to do anything. That means once we receive RDI on OMAP then * the DMA won't do anything soon so we have to cancel the DMA * transfer and purge the FIFO manually. */ if (dma->rx_running) { dmaengine_pause(dma->rxchan); __dma_rx_do_complete(p, true); } return -ETIMEDOUT; default: break; } if (dma->rx_running) return 0; desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr, dma->rx_size, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) return -EBUSY; dma->rx_running = 1; desc->callback = __dma_rx_complete; desc->callback_param = p; dma->rx_cookie = dmaengine_submit(desc); dma_sync_single_for_device(dma->rxchan->device->dev, dma->rx_addr, dma->rx_size, DMA_FROM_DEVICE); dma_async_issue_pending(dma->rxchan); return 0; }
static void __dma_rx_complete(void *param) { __dma_rx_do_complete(param, false); }
static void __dma_rx_complete(void *param) { __dma_rx_do_complete(param); omap_8250_rx_dma(param); }