/* submit rx dma task into dmaengine */ static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) & ~SIRFUART_IO_MODE); sirfport->rx_dma_items.xmit.tail = sirfport->rx_dma_items.xmit.head = 0; sirfport->rx_dma_items.desc = dmaengine_prep_dma_cyclic(sirfport->rx_dma_chan, sirfport->rx_dma_items.dma_addr, SIRFSOC_RX_DMA_BUF_SIZE, SIRFSOC_RX_DMA_BUF_SIZE / 2, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); if (IS_ERR_OR_NULL(sirfport->rx_dma_items.desc)) { dev_err(port->dev, "DMA slave single fail\n"); return; } sirfport->rx_dma_items.desc->callback = sirfsoc_uart_rx_dma_complete_callback; sirfport->rx_dma_items.desc->callback_param = sirfport; sirfport->rx_dma_items.cookie = dmaengine_submit(sirfport->rx_dma_items.desc); dma_async_issue_pending(sirfport->rx_dma_chan); if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) | SIRFUART_RX_DMA_INT_EN(uint_en, sirfport->uart_reg->uart_type)); else wr_regl(port, ureg->sirfsoc_int_en_reg, SIRFUART_RX_DMA_INT_EN(uint_en, sirfport->uart_reg->uart_type)); }
static void sirfsoc_uart_stop_rx(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); if (sirfport->rx_dma_chan) { if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) & ~(SIRFUART_RX_DMA_INT_EN(port, uint_en) | uint_en->sirfsoc_rx_done_en)); else wr_regl(port, SIRFUART_INT_EN_CLR, SIRFUART_RX_DMA_INT_EN(port, uint_en)| uint_en->sirfsoc_rx_done_en); dmaengine_terminate_all(sirfport->rx_dma_chan); } else { if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg)& ~(SIRFUART_RX_IO_INT_EN(port, uint_en))); else wr_regl(port, SIRFUART_INT_EN_CLR, SIRFUART_RX_IO_INT_EN(port, uint_en)); } }
/* submit rx dma task into dmaengine */ static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; unsigned long flags; int i; spin_lock_irqsave(&sirfport->rx_lock, flags); sirfport->rx_io_count = 0; wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) & ~SIRFUART_IO_MODE); spin_unlock_irqrestore(&sirfport->rx_lock, flags); for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++) sirfsoc_rx_submit_one_dma_desc(port, i); sirfport->rx_completed = sirfport->rx_issued = 0; spin_lock_irqsave(&sirfport->rx_lock, flags); if (!sirfport->is_marco) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) | SIRFUART_RX_DMA_INT_EN(port, uint_en)); else wr_regl(port, ureg->sirfsoc_int_en_reg, SIRFUART_RX_DMA_INT_EN(port, uint_en)); spin_unlock_irqrestore(&sirfport->rx_lock, flags); }
/* submit rx dma task into dmaengine */ static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; int i; sirfport->rx_io_count = 0; wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) & ~SIRFUART_IO_MODE); for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++) sirfsoc_rx_submit_one_dma_desc(port, i); sirfport->rx_completed = sirfport->rx_issued = 0; if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) | SIRFUART_RX_DMA_INT_EN(port, uint_en)); else wr_regl(port, ureg->sirfsoc_int_en_reg, SIRFUART_RX_DMA_INT_EN(port, uint_en)); }