static void sirfsoc_rx_tmo_process_tl(unsigned long param) { struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param; struct uart_port *port = &sirfport->port; struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st; unsigned int count; unsigned long flags; spin_lock_irqsave(&sirfport->rx_lock, flags); while (sirfport->rx_completed != sirfport->rx_issued) { sirfsoc_uart_insert_rx_buf_to_tty(sirfport, SIRFSOC_RX_DMA_BUF_SIZE); sirfport->rx_completed++; sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT; } count = CIRC_CNT(sirfport->rx_dma_items[sirfport->rx_issued].xmit.head, sirfport->rx_dma_items[sirfport->rx_issued].xmit.tail, SIRFSOC_RX_DMA_BUF_SIZE); if (count > 0) sirfsoc_uart_insert_rx_buf_to_tty(sirfport, count); wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | SIRFUART_IO_MODE); sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count); spin_unlock_irqrestore(&sirfport->rx_lock, flags); if (sirfport->rx_io_count == 4) { spin_lock_irqsave(&sirfport->rx_lock, flags); sirfport->rx_io_count = 0; wr_regl(port, ureg->sirfsoc_int_st_reg, uint_st->sirfsoc_rx_done); if (!sirfport->is_marco) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) & ~(uint_en->sirfsoc_rx_done_en)); else wr_regl(port, SIRFUART_INT_EN_CLR, uint_en->sirfsoc_rx_done_en); spin_unlock_irqrestore(&sirfport->rx_lock, flags); sirfsoc_uart_start_next_rx_dma(port); } else { spin_lock_irqsave(&sirfport->rx_lock, flags); wr_regl(port, ureg->sirfsoc_int_st_reg, uint_st->sirfsoc_rx_done); if (!sirfport->is_marco) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) | (uint_en->sirfsoc_rx_done_en)); else wr_regl(port, ureg->sirfsoc_int_en_reg, uint_en->sirfsoc_rx_done_en); spin_unlock_irqrestore(&sirfport->rx_lock, flags); } }
static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param) { struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param; struct uart_port *port = &sirfport->port; struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; unsigned long flags; spin_lock_irqsave(&sirfport->rx_lock, flags); while (sirfport->rx_completed != sirfport->rx_issued) { sirfsoc_uart_insert_rx_buf_to_tty(sirfport, SIRFSOC_RX_DMA_BUF_SIZE); if (rd_regl(port, ureg->sirfsoc_int_en_reg) & uint_en->sirfsoc_rx_timeout_en) sirfsoc_rx_submit_one_dma_desc(port, sirfport->rx_completed++); else sirfport->rx_completed++; sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT; } spin_unlock_irqrestore(&sirfport->rx_lock, flags); }
static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param) { struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param; struct uart_port *port = &sirfport->port; struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; unsigned long flags; struct dma_tx_state tx_state; spin_lock_irqsave(&port->lock, flags); while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan, sirfport->rx_dma_items[sirfport->rx_completed].cookie, &tx_state)) { sirfsoc_uart_insert_rx_buf_to_tty(sirfport, SIRFSOC_RX_DMA_BUF_SIZE); if (rd_regl(port, ureg->sirfsoc_int_en_reg) & uint_en->sirfsoc_rx_timeout_en) sirfsoc_rx_submit_one_dma_desc(port, sirfport->rx_completed++); else sirfport->rx_completed++; sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT; } spin_unlock_irqrestore(&port->lock, flags); tty_flip_buffer_push(&port->state->port); }