void sirfsoc_uart_start_tx(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); unsigned long regv; sirfsoc_uart_pio_tx_chars(sirfport, 1); wr_regl(port, SIRFUART_TX_FIFO_OP, SIRFUART_TX_FIFO_START); regv = rd_regl(port, SIRFUART_INT_EN); wr_regl(port, SIRFUART_INT_EN, regv | SIRFUART_TX_INT_EN); }
static void sirfsoc_uart_start_tx(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) sirfsoc_uart_tx_with_dma(sirfport); else { sirfsoc_uart_pio_tx_chars(sirfport, 1); wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START); if (!sirfport->is_marco) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg)| uint_en->sirfsoc_txfifo_empty_en); else wr_regl(port, ureg->sirfsoc_int_en_reg, uint_en->sirfsoc_txfifo_empty_en); } }
static void sirfsoc_uart_start_tx(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; if (sirfport->tx_dma_chan) sirfsoc_uart_tx_with_dma(sirfport); else { sirfsoc_uart_pio_tx_chars(sirfport, SIRFSOC_UART_IO_TX_REASONABLE_CNT); wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START); if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg)| uint_en->sirfsoc_txfifo_empty_en); else wr_regl(port, ureg->sirfsoc_int_en_reg, uint_en->sirfsoc_txfifo_empty_en); } }
static void sirfsoc_uart_start_tx(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; if (sirfport->tx_dma_chan) sirfsoc_uart_tx_with_dma(sirfport); else { if (sirfport->uart_reg->uart_type == SIRF_USP_UART) wr_regl(port, ureg->sirfsoc_tx_rx_en, rd_regl(port, ureg->sirfsoc_tx_rx_en) | SIRFUART_TX_EN); wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP); sirfsoc_uart_pio_tx_chars(sirfport, port->fifosize); wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START); if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg)| uint_en->sirfsoc_txfifo_empty_en); else wr_regl(port, ureg->sirfsoc_int_en_reg, uint_en->sirfsoc_txfifo_empty_en); } }
static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id) { unsigned long intr_status; unsigned long cts_status; unsigned long flag = TTY_NORMAL; struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id; struct uart_port *port = &sirfport->port; struct uart_state *state = port->state; struct circ_buf *xmit = &port->state->xmit; spin_lock(&port->lock); intr_status = rd_regl(port, SIRFUART_INT_STATUS); wr_regl(port, SIRFUART_INT_STATUS, intr_status); intr_status &= rd_regl(port, SIRFUART_INT_EN); if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT))) { if (intr_status & SIRFUART_RXD_BREAK) { if (uart_handle_break(port)) goto recv_char; uart_insert_char(port, intr_status, SIRFUART_RX_OFLOW, 0, TTY_BREAK); spin_unlock(&port->lock); return IRQ_HANDLED; } if (intr_status & SIRFUART_RX_OFLOW) port->icount.overrun++; if (intr_status & SIRFUART_FRM_ERR) { port->icount.frame++; flag = TTY_FRAME; } if (intr_status & SIRFUART_PARITY_ERR) flag = TTY_PARITY; wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_RESET); wr_regl(port, SIRFUART_RX_FIFO_OP, 0); wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_START); intr_status &= port->read_status_mask; uart_insert_char(port, intr_status, SIRFUART_RX_OFLOW_INT, 0, flag); } recv_char: if (intr_status & SIRFUART_CTS_INT_EN) { cts_status = !(rd_regl(port, SIRFUART_AFC_CTRL) & SIRFUART_CTS_IN_STATUS); if (cts_status != 0) { uart_handle_cts_change(port, 1); } else { uart_handle_cts_change(port, 0); wake_up_interruptible(&state->port.delta_msr_wait); } } if (intr_status & SIRFUART_RX_IO_INT_EN) sirfsoc_uart_pio_rx_chars(port, SIRFSOC_UART_IO_RX_MAX_CNT); if (intr_status & SIRFUART_TX_INT_EN) { if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { spin_unlock(&port->lock); return IRQ_HANDLED; } else { sirfsoc_uart_pio_tx_chars(sirfport, SIRFSOC_UART_IO_TX_REASONABLE_CNT); if ((uart_circ_empty(xmit)) && (rd_regl(port, SIRFUART_TX_FIFO_STATUS) & SIRFUART_FIFOEMPTY_MASK(port))) sirfsoc_uart_stop_tx(port); } } spin_unlock(&port->lock); return IRQ_HANDLED; }
static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id) { unsigned long intr_status; unsigned long cts_status; unsigned long flag = TTY_NORMAL; struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id; struct uart_port *port = &sirfport->port; struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status; struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; struct uart_state *state = port->state; struct circ_buf *xmit = &port->state->xmit; spin_lock(&port->lock); intr_status = rd_regl(port, ureg->sirfsoc_int_st_reg); wr_regl(port, ureg->sirfsoc_int_st_reg, intr_status); intr_status &= rd_regl(port, ureg->sirfsoc_int_en_reg); if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT(port, uint_st)))) { if (intr_status & uint_st->sirfsoc_rxd_brk) { port->icount.brk++; if (uart_handle_break(port)) goto recv_char; } if (intr_status & uint_st->sirfsoc_rx_oflow) port->icount.overrun++; if (intr_status & uint_st->sirfsoc_frm_err) { port->icount.frame++; flag = TTY_FRAME; } if (intr_status & uint_st->sirfsoc_parity_err) flag = TTY_PARITY; wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET); wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START); intr_status &= port->read_status_mask; uart_insert_char(port, intr_status, uint_en->sirfsoc_rx_oflow_en, 0, flag); } recv_char: if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) && (intr_status & SIRFUART_CTS_INT_ST(uint_st)) && !sirfport->tx_dma_state) { cts_status = rd_regl(port, ureg->sirfsoc_afc_ctrl) & SIRFUART_AFC_CTS_STATUS; if (cts_status != 0) cts_status = 0; else cts_status = 1; uart_handle_cts_change(port, cts_status); wake_up_interruptible(&state->port.delta_msr_wait); } if (sirfport->rx_dma_chan) { if (intr_status & uint_st->sirfsoc_rx_timeout) sirfsoc_uart_handle_rx_tmo(sirfport); if (intr_status & uint_st->sirfsoc_rx_done) sirfsoc_uart_handle_rx_done(sirfport); } else { if (intr_status & SIRFUART_RX_IO_INT_ST(uint_st)) sirfsoc_uart_pio_rx_chars(port, SIRFSOC_UART_IO_RX_MAX_CNT); } spin_unlock(&port->lock); tty_flip_buffer_push(&state->port); spin_lock(&port->lock); if (intr_status & uint_st->sirfsoc_txfifo_empty) { if (sirfport->tx_dma_chan) sirfsoc_uart_tx_with_dma(sirfport); else { if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { spin_unlock(&port->lock); return IRQ_HANDLED; } else { sirfsoc_uart_pio_tx_chars(sirfport, SIRFSOC_UART_IO_TX_REASONABLE_CNT); if ((uart_circ_empty(xmit)) && (rd_regl(port, ureg->sirfsoc_tx_fifo_status) & ufifo_st->ff_empty(port->line))) sirfsoc_uart_stop_tx(port); } } } spin_unlock(&port->lock); return IRQ_HANDLED; }
static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port *sirfport) { struct uart_port *port = &sirfport->port; struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; struct circ_buf *xmit = &port->state->xmit; unsigned long tran_size; unsigned long tran_start; unsigned long pio_tx_size; tran_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); tran_start = (unsigned long)(xmit->buf + xmit->tail); if (uart_circ_empty(xmit) || uart_tx_stopped(port) || !tran_size) return; if (sirfport->tx_dma_state == TX_DMA_PAUSE) { dmaengine_resume(sirfport->tx_dma_chan); return; } if (sirfport->tx_dma_state == TX_DMA_RUNNING) return; if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg)& ~(uint_en->sirfsoc_txfifo_empty_en)); else wr_regl(port, SIRFUART_INT_EN_CLR, uint_en->sirfsoc_txfifo_empty_en); /* * DMA requires buffer address and buffer length are both aligned with * 4 bytes, so we use PIO for * 1. if address is not aligned with 4bytes, use PIO for the first 1~3 * bytes, and move to DMA for the left part aligned with 4bytes * 2. if buffer length is not aligned with 4bytes, use DMA for aligned * part first, move to PIO for the left 1~3 bytes */ if (tran_size < 4 || BYTES_TO_ALIGN(tran_start)) { wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP); wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)| SIRFUART_IO_MODE); if (BYTES_TO_ALIGN(tran_start)) { pio_tx_size = sirfsoc_uart_pio_tx_chars(sirfport, BYTES_TO_ALIGN(tran_start)); tran_size -= pio_tx_size; } if (tran_size < 4) sirfsoc_uart_pio_tx_chars(sirfport, tran_size); if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg)| uint_en->sirfsoc_txfifo_empty_en); else wr_regl(port, ureg->sirfsoc_int_en_reg, uint_en->sirfsoc_txfifo_empty_en); wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START); } else { /* tx transfer mode switch into dma mode */ wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP); wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)& ~SIRFUART_IO_MODE); wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START); tran_size &= ~(0x3); sirfport->tx_dma_addr = dma_map_single(port->dev, xmit->buf + xmit->tail, tran_size, DMA_TO_DEVICE); sirfport->tx_dma_desc = dmaengine_prep_slave_single( sirfport->tx_dma_chan, sirfport->tx_dma_addr, tran_size, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); if (!sirfport->tx_dma_desc) { dev_err(port->dev, "DMA prep slave single fail\n"); return; } sirfport->tx_dma_desc->callback = sirfsoc_uart_tx_dma_complete_callback; sirfport->tx_dma_desc->callback_param = (void *)sirfport; sirfport->transfer_size = tran_size; dmaengine_submit(sirfport->tx_dma_desc); dma_async_issue_pending(sirfport->tx_dma_chan); sirfport->tx_dma_state = TX_DMA_RUNNING; } }
static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id) { unsigned long intr_status; unsigned long cts_status; unsigned long flag = TTY_NORMAL; struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id; struct uart_port *port = &sirfport->port; struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status; struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; struct uart_state *state = port->state; struct circ_buf *xmit = &port->state->xmit; spin_lock(&port->lock); intr_status = rd_regl(port, ureg->sirfsoc_int_st_reg); wr_regl(port, ureg->sirfsoc_int_st_reg, intr_status); intr_status &= rd_regl(port, ureg->sirfsoc_int_en_reg); if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT(uint_st, sirfport->uart_reg->uart_type)))) { if (intr_status & uint_st->sirfsoc_rxd_brk) { port->icount.brk++; if (uart_handle_break(port)) goto recv_char; } if (intr_status & uint_st->sirfsoc_rx_oflow) { port->icount.overrun++; flag = TTY_OVERRUN; } if (intr_status & uint_st->sirfsoc_frm_err) { port->icount.frame++; flag = TTY_FRAME; } if (intr_status & uint_st->sirfsoc_parity_err) { port->icount.parity++; flag = TTY_PARITY; } wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET); wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START); intr_status &= port->read_status_mask; uart_insert_char(port, intr_status, uint_en->sirfsoc_rx_oflow_en, 0, flag); } recv_char: if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) && (intr_status & SIRFUART_CTS_INT_ST(uint_st)) && !sirfport->tx_dma_state) { cts_status = rd_regl(port, ureg->sirfsoc_afc_ctrl) & SIRFUART_AFC_CTS_STATUS; if (cts_status != 0) cts_status = 0; else cts_status = 1; uart_handle_cts_change(port, cts_status); wake_up_interruptible(&state->port.delta_msr_wait); } if (!sirfport->rx_dma_chan && (intr_status & SIRFUART_RX_IO_INT_ST(uint_st))) { /* * chip will trigger continuous RX_TIMEOUT interrupt * in RXFIFO empty and not trigger if RXFIFO recevice * data in limit time, original method use RX_TIMEOUT * will trigger lots of useless interrupt in RXFIFO * empty.RXFIFO received one byte will trigger RX_DONE * interrupt.use RX_DONE to wait for data received * into RXFIFO, use RX_THD/RX_FULL for lots data receive * and use RX_TIMEOUT for the last left data. */ if (intr_status & uint_st->sirfsoc_rx_done) { if (!sirfport->is_atlas7) { wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) & ~(uint_en->sirfsoc_rx_done_en)); wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) | (uint_en->sirfsoc_rx_timeout_en)); } else { wr_regl(port, ureg->sirfsoc_int_en_clr_reg, uint_en->sirfsoc_rx_done_en); wr_regl(port, ureg->sirfsoc_int_en_reg, uint_en->sirfsoc_rx_timeout_en); } } else { if (intr_status & uint_st->sirfsoc_rx_timeout) { if (!sirfport->is_atlas7) { wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) & ~(uint_en->sirfsoc_rx_timeout_en)); wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) | (uint_en->sirfsoc_rx_done_en)); } else { wr_regl(port, ureg->sirfsoc_int_en_clr_reg, uint_en->sirfsoc_rx_timeout_en); wr_regl(port, ureg->sirfsoc_int_en_reg, uint_en->sirfsoc_rx_done_en); } } sirfsoc_uart_pio_rx_chars(port, port->fifosize); } } spin_unlock(&port->lock); tty_flip_buffer_push(&state->port); spin_lock(&port->lock); if (intr_status & uint_st->sirfsoc_txfifo_empty) { if (sirfport->tx_dma_chan) sirfsoc_uart_tx_with_dma(sirfport); else { if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { spin_unlock(&port->lock); return IRQ_HANDLED; } else { sirfsoc_uart_pio_tx_chars(sirfport, port->fifosize); if ((uart_circ_empty(xmit)) && (rd_regl(port, ureg->sirfsoc_tx_fifo_status) & ufifo_st->ff_empty(port))) sirfsoc_uart_stop_tx(port); } } } spin_unlock(&port->lock); return IRQ_HANDLED; }