/* Called with u->lock taken */ static void tegra_start_next_tx(struct tegra_uart_port *t) { unsigned long tail; unsigned long count; struct circ_buf *xmit; xmit = &t->uport.state->xmit; tail = (unsigned long)&xmit->buf[xmit->tail]; count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); dev_vdbg(t->uport.dev, "+%s %lu %d\n", __func__, count, t->tx_in_progress); if (count == 0) goto out; if (!t->use_tx_dma || count < TEGRA_UART_MIN_DMA) tegra_start_pio_tx(t, count); else if (BYTES_TO_ALIGN(tail) > 0) tegra_start_pio_tx(t, BYTES_TO_ALIGN(tail)); else tegra_start_dma_tx(t, count); out: dev_vdbg(t->uport.dev, "-%s", __func__); }
static void tegra_uart_start_next_tx(struct tegra_uart_port *tup) { unsigned long tail; unsigned long count; struct circ_buf *xmit = &tup->uport.state->xmit; tail = (unsigned long)&xmit->buf[xmit->tail]; count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); if (!count) return; if (count < TEGRA_UART_MIN_DMA) tegra_uart_start_pio_tx(tup, count); else if (BYTES_TO_ALIGN(tail) > 0) tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail)); else tegra_uart_start_tx_dma(tup, count); }
/* Called with u->lock taken */ static void tegra_start_next_tx(struct tegra_uart_port *t) { unsigned long tail; unsigned long count; unsigned long lsr; struct uart_port *u = &t->uport; struct circ_buf *xmit; xmit = &t->uport.state->xmit; tail = (unsigned long)&xmit->buf[xmit->tail]; count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); dev_vdbg(t->uport.dev, "+%s %lu %d\n", __func__, count, t->tx_in_progress); if (count == 0) { if (t->is_irda && !irda_loop) { do { lsr = uart_readb(t, UART_LSR); if (lsr & UART_LSR_TEMT) break; } while (1); tegra_start_rx(u); } goto out; } if (t->is_irda && !irda_loop) { if (t->rx_in_progress) tegra_stop_rx(u); } if (!t->use_tx_dma || count < TEGRA_UART_MIN_DMA) tegra_start_pio_tx(t, count); else if (BYTES_TO_ALIGN(tail) > 0) tegra_start_pio_tx(t, BYTES_TO_ALIGN(tail)); else tegra_start_dma_tx(t, count); out: dev_vdbg(t->uport.dev, "-%s", __func__); }
static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port *sirfport) { struct uart_port *port = &sirfport->port; struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; struct circ_buf *xmit = &port->state->xmit; unsigned long tran_size; unsigned long tran_start; unsigned long pio_tx_size; tran_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); tran_start = (unsigned long)(xmit->buf + xmit->tail); if (uart_circ_empty(xmit) || uart_tx_stopped(port) || !tran_size) return; if (sirfport->tx_dma_state == TX_DMA_PAUSE) { dmaengine_resume(sirfport->tx_dma_chan); return; } if (sirfport->tx_dma_state == TX_DMA_RUNNING) return; if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg)& ~(uint_en->sirfsoc_txfifo_empty_en)); else wr_regl(port, SIRFUART_INT_EN_CLR, uint_en->sirfsoc_txfifo_empty_en); /* * DMA requires buffer address and buffer length are both aligned with * 4 bytes, so we use PIO for * 1. if address is not aligned with 4bytes, use PIO for the first 1~3 * bytes, and move to DMA for the left part aligned with 4bytes * 2. if buffer length is not aligned with 4bytes, use DMA for aligned * part first, move to PIO for the left 1~3 bytes */ if (tran_size < 4 || BYTES_TO_ALIGN(tran_start)) { wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP); wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)| SIRFUART_IO_MODE); if (BYTES_TO_ALIGN(tran_start)) { pio_tx_size = sirfsoc_uart_pio_tx_chars(sirfport, BYTES_TO_ALIGN(tran_start)); tran_size -= pio_tx_size; } if (tran_size < 4) sirfsoc_uart_pio_tx_chars(sirfport, tran_size); if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg)| uint_en->sirfsoc_txfifo_empty_en); else wr_regl(port, ureg->sirfsoc_int_en_reg, uint_en->sirfsoc_txfifo_empty_en); wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START); } else { /* tx transfer mode switch into dma mode */ wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP); wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)& ~SIRFUART_IO_MODE); wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START); tran_size &= ~(0x3); sirfport->tx_dma_addr = dma_map_single(port->dev, xmit->buf + xmit->tail, tran_size, DMA_TO_DEVICE); sirfport->tx_dma_desc = dmaengine_prep_slave_single( sirfport->tx_dma_chan, sirfport->tx_dma_addr, tran_size, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); if (!sirfport->tx_dma_desc) { dev_err(port->dev, "DMA prep slave single fail\n"); return; } sirfport->tx_dma_desc->callback = sirfsoc_uart_tx_dma_complete_callback; sirfport->tx_dma_desc->callback_param = (void *)sirfport; sirfport->transfer_size = tran_size; dmaengine_submit(sirfport->tx_dma_desc); dma_async_issue_pending(sirfport->tx_dma_chan); sirfport->tx_dma_state = TX_DMA_RUNNING; } }