static void sirfsoc_uart_stop_tx(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; if (sirfport->tx_dma_chan) { if (sirfport->tx_dma_state == TX_DMA_RUNNING) { dmaengine_pause(sirfport->tx_dma_chan); sirfport->tx_dma_state = TX_DMA_PAUSE; } else { if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) & ~uint_en->sirfsoc_txfifo_empty_en); else wr_regl(port, SIRFUART_INT_EN_CLR, uint_en->sirfsoc_txfifo_empty_en); } } else { if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) & ~uint_en->sirfsoc_txfifo_empty_en); else wr_regl(port, SIRFUART_INT_EN_CLR, uint_en->sirfsoc_txfifo_empty_en); } }
static unsigned int sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status; unsigned int ch, rx_count = 0; struct tty_struct *tty; tty = tty_port_tty_get(&port->state->port); if (!tty) return -ENODEV; while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) & ufifo_st->ff_empty(port->line))) { ch = rd_regl(port, ureg->sirfsoc_rx_fifo_data) | SIRFUART_DUMMY_READ; if (unlikely(uart_handle_sysrq_char(port, ch))) continue; uart_insert_char(port, 0, 0, ch, TTY_NORMAL); rx_count++; if (rx_count >= max_rx_count) break; } sirfport->rx_io_count += rx_count; port->icount.rx += rx_count; return rx_count; }
static int __init sirfsoc_uart_console_setup(struct console *co, char *options) { unsigned int baud = 115200; unsigned int bits = 8; unsigned int parity = 'n'; unsigned int flow = 'n'; struct uart_port *port = &sirfsoc_uart_ports[co->index].port; struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; if (co->index < 0 || co->index >= SIRFSOC_UART_NR) return -EINVAL; if (!port->mapbase) return -ENODEV; /* enable usp in mode1 register */ if (sirfport->uart_reg->uart_type == SIRF_USP_UART) wr_regl(port, ureg->sirfsoc_mode1, SIRFSOC_USP_EN | SIRFSOC_USP_ENDIAN_CTRL_LSBF); if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); port->cons = co; /* default console tx/rx transfer using io mode */ sirfport->rx_dma_chan = NULL; sirfport->tx_dma_chan = NULL; return uart_set_options(port, co, baud, parity, bits, flow); }
/* submit rx dma task into dmaengine */ static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; unsigned long flags; int i; spin_lock_irqsave(&sirfport->rx_lock, flags); sirfport->rx_io_count = 0; wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) & ~SIRFUART_IO_MODE); spin_unlock_irqrestore(&sirfport->rx_lock, flags); for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++) sirfsoc_rx_submit_one_dma_desc(port, i); sirfport->rx_completed = sirfport->rx_issued = 0; spin_lock_irqsave(&sirfport->rx_lock, flags); if (!sirfport->is_marco) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) | SIRFUART_RX_DMA_INT_EN(port, uint_en)); else wr_regl(port, ureg->sirfsoc_int_en_reg, SIRFUART_RX_DMA_INT_EN(port, uint_en)); spin_unlock_irqrestore(&sirfport->rx_lock, flags); }
/* submit rx dma task into dmaengine */ static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) & ~SIRFUART_IO_MODE); sirfport->rx_dma_items.xmit.tail = sirfport->rx_dma_items.xmit.head = 0; sirfport->rx_dma_items.desc = dmaengine_prep_dma_cyclic(sirfport->rx_dma_chan, sirfport->rx_dma_items.dma_addr, SIRFSOC_RX_DMA_BUF_SIZE, SIRFSOC_RX_DMA_BUF_SIZE / 2, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); if (IS_ERR_OR_NULL(sirfport->rx_dma_items.desc)) { dev_err(port->dev, "DMA slave single fail\n"); return; } sirfport->rx_dma_items.desc->callback = sirfsoc_uart_rx_dma_complete_callback; sirfport->rx_dma_items.desc->callback_param = sirfport; sirfport->rx_dma_items.cookie = dmaengine_submit(sirfport->rx_dma_items.desc); dma_async_issue_pending(sirfport->rx_dma_chan); if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) | SIRFUART_RX_DMA_INT_EN(uint_en, sirfport->uart_reg->uart_type)); else wr_regl(port, ureg->sirfsoc_int_en_reg, SIRFUART_RX_DMA_INT_EN(uint_en, sirfport->uart_reg->uart_type)); }
static void sirfsoc_uart_shutdown(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct circ_buf *xmit; xmit = &sirfport->rx_dma_items.xmit; if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, 0); else wr_regl(port, ureg->sirfsoc_int_en_clr_reg, ~0UL); free_irq(port->irq, sirfport); if (sirfport->ms_enabled) sirfsoc_uart_disable_ms(port); if (sirfport->uart_reg->uart_type == SIRF_USP_UART && sirfport->hw_flow_ctrl) { gpio_set_value(sirfport->rts_gpio, 1); free_irq(gpio_to_irq(sirfport->cts_gpio), sirfport); } if (sirfport->tx_dma_chan) sirfport->tx_dma_state = TX_DMA_IDLE; if (sirfport->rx_dma_chan && sirfport->is_hrt_enabled) { while (((rd_regl(port, ureg->sirfsoc_rx_fifo_status) & SIRFUART_RX_FIFO_MASK) > sirfport->pio_fetch_cnt) && !CIRC_CNT(xmit->head, xmit->tail, SIRFSOC_RX_DMA_BUF_SIZE)) ; sirfport->is_hrt_enabled = false; hrtimer_cancel(&sirfport->hrt); } }
static void sirfsoc_uart_stop_rx(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); if (sirfport->rx_dma_chan) { if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) & ~(SIRFUART_RX_DMA_INT_EN(port, uint_en) | uint_en->sirfsoc_rx_done_en)); else wr_regl(port, SIRFUART_INT_EN_CLR, SIRFUART_RX_DMA_INT_EN(port, uint_en)| uint_en->sirfsoc_rx_done_en); dmaengine_terminate_all(sirfport->rx_dma_chan); } else { if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg)& ~(SIRFUART_RX_IO_INT_EN(port, uint_en))); else wr_regl(port, SIRFUART_INT_EN_CLR, SIRFUART_RX_IO_INT_EN(port, uint_en)); } }
static int sirfsoc_uart_request_port(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_uart_param *uart_param = &sirfport->uart_reg->uart_param; void *ret; ret = request_mem_region(port->mapbase, SIRFUART_MAP_SIZE, uart_param->port_name); return ret ? 0 : -EBUSY; }
static void sirfsoc_uart_pm(struct uart_port *port, unsigned int state, unsigned int oldstate) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); if (!state) clk_prepare_enable(sirfport->clk); else clk_disable_unprepare(sirfport->clk); }
static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port *port) { unsigned long reg; struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status; reg = rd_regl(port, ureg->sirfsoc_tx_fifo_status); return (reg & ufifo_st->ff_empty(port)) ? TIOCSER_TEMT : 0; }
void sirfsoc_uart_start_tx(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); unsigned long regv; sirfsoc_uart_pio_tx_chars(sirfport, 1); wr_regl(port, SIRFUART_TX_FIFO_OP, SIRFUART_TX_FIFO_START); regv = rd_regl(port, SIRFUART_INT_EN); wr_regl(port, SIRFUART_INT_EN, regv | SIRFUART_TX_INT_EN); }
static void sirfsoc_uart_console_putchar(struct uart_port *port, int ch) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status; while (rd_regl(port, ureg->sirfsoc_tx_fifo_status) & ufifo_st->ff_full(port->line)) cpu_relax(); wr_regl(port, ureg->sirfsoc_tx_fifo_data, ch); }
static void sirfsoc_uart_shutdown(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); wr_regl(port, SIRFUART_INT_EN, 0); free_irq(port->irq, sirfport); if (sirfport->ms_enabled) { sirfsoc_uart_disable_ms(port); sirfport->ms_enabled = 0; } }
static void sirfsoc_uart_disable_ms(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); unsigned long reg; sirfport->ms_enabled = 0; if (!sirfport->hw_flow_ctrl) return; reg = rd_regl(port, SIRFUART_AFC_CTRL); wr_regl(port, SIRFUART_AFC_CTRL, reg & ~0x3FF); reg = rd_regl(port, SIRFUART_INT_EN); wr_regl(port, SIRFUART_INT_EN, reg & ~SIRFUART_CTS_INT_EN); }
static void sirfsoc_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); unsigned int assert = mctrl & TIOCM_RTS; unsigned int val = assert ? SIRFUART_AFC_CTRL_RX_THD : 0x0; unsigned int current_val; if (sirfport->hw_flow_ctrl) { current_val = rd_regl(port, SIRFUART_AFC_CTRL) & ~0xFF; val |= current_val; wr_regl(port, SIRFUART_AFC_CTRL, val); } }
static void sirfsoc_uart_break_ctl(struct uart_port *port, int break_state) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { unsigned long ulcon = rd_regl(port, ureg->sirfsoc_line_ctrl); if (break_state) ulcon |= SIRFUART_SET_BREAK; else ulcon &= ~SIRFUART_SET_BREAK; wr_regl(port, ureg->sirfsoc_line_ctrl, ulcon); } }
static void sirfsoc_uart_enable_ms(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); unsigned long reg; unsigned long flg; if (!sirfport->hw_flow_ctrl) return; flg = SIRFUART_AFC_RX_EN | SIRFUART_AFC_TX_EN; reg = rd_regl(port, SIRFUART_AFC_CTRL); wr_regl(port, SIRFUART_AFC_CTRL, reg | flg); reg = rd_regl(port, SIRFUART_INT_EN); wr_regl(port, SIRFUART_INT_EN, reg | SIRFUART_CTS_INT_EN); uart_handle_cts_change(port, !(rd_regl(port, SIRFUART_AFC_CTRL) & SIRFUART_CTS_IN_STATUS)); sirfport->ms_enabled = 1; }
static unsigned int sirfsoc_uart_get_mctrl(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); if (!(sirfport->ms_enabled)) { goto cts_asserted; } else if (sirfport->hw_flow_ctrl) { if (!(rd_regl(port, SIRFUART_AFC_CTRL) & SIRFUART_CTS_IN_STATUS)) goto cts_asserted; else goto cts_deasserted; } cts_deasserted: return TIOCM_CAR | TIOCM_DSR; cts_asserted: return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; }
static void sirfsoc_uart_pm(struct uart_port *port, unsigned int state, unsigned int oldstate) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); if (!state) { if (sirfport->is_bt_uart) { clk_prepare_enable(sirfport->clk_noc); clk_prepare_enable(sirfport->clk_general); } clk_prepare_enable(sirfport->clk); } else { clk_disable_unprepare(sirfport->clk); if (sirfport->is_bt_uart) { clk_disable_unprepare(sirfport->clk_general); clk_disable_unprepare(sirfport->clk_noc); } } }
static void sirfsoc_uart_start_tx(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) sirfsoc_uart_tx_with_dma(sirfport); else { sirfsoc_uart_pio_tx_chars(sirfport, 1); wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START); if (!sirfport->is_marco) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg)| uint_en->sirfsoc_txfifo_empty_en); else wr_regl(port, ureg->sirfsoc_int_en_reg, uint_en->sirfsoc_txfifo_empty_en); } }
static void sirfsoc_uart_start_tx(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; if (sirfport->tx_dma_chan) sirfsoc_uart_tx_with_dma(sirfport); else { sirfsoc_uart_pio_tx_chars(sirfport, SIRFSOC_UART_IO_TX_REASONABLE_CNT); wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START); if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg)| uint_en->sirfsoc_txfifo_empty_en); else wr_regl(port, ureg->sirfsoc_int_en_reg, uint_en->sirfsoc_txfifo_empty_en); } }
static void sirfsoc_uart_shutdown(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, 0); else wr_regl(port, SIRFUART_INT_EN_CLR, ~0UL); free_irq(port->irq, sirfport); if (sirfport->ms_enabled) sirfsoc_uart_disable_ms(port); if (sirfport->uart_reg->uart_type == SIRF_USP_UART && sirfport->hw_flow_ctrl) { gpio_set_value(sirfport->rts_gpio, 1); free_irq(gpio_to_irq(sirfport->cts_gpio), sirfport); } if (sirfport->tx_dma_chan) sirfport->tx_dma_state = TX_DMA_IDLE; }
static void sirfsoc_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; unsigned int assert = mctrl & TIOCM_RTS; unsigned int val = assert ? SIRFUART_AFC_CTRL_RX_THD : 0x0; unsigned int current_val; if (mctrl & TIOCM_LOOP) { if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) wr_regl(port, ureg->sirfsoc_line_ctrl, rd_regl(port, ureg->sirfsoc_line_ctrl) | SIRFUART_LOOP_BACK); else wr_regl(port, ureg->sirfsoc_mode1, rd_regl(port, ureg->sirfsoc_mode1) | SIRFSOC_USP_LOOP_BACK_CTRL); } else { if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) wr_regl(port, ureg->sirfsoc_line_ctrl, rd_regl(port, ureg->sirfsoc_line_ctrl) & ~SIRFUART_LOOP_BACK); else wr_regl(port, ureg->sirfsoc_mode1, rd_regl(port, ureg->sirfsoc_mode1) & ~SIRFSOC_USP_LOOP_BACK_CTRL); } if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled) return; if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { current_val = rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0xFF; val |= current_val; wr_regl(port, ureg->sirfsoc_afc_ctrl, val); } else { if (!val) gpio_set_value(sirfport->rts_gpio, 1); else gpio_set_value(sirfport->rts_gpio, 0); } }
static void sirfsoc_rx_submit_one_dma_desc(struct uart_port *port, int index) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); sirfport->rx_dma_items[index].xmit.tail = sirfport->rx_dma_items[index].xmit.head = 0; sirfport->rx_dma_items[index].desc = dmaengine_prep_slave_single(sirfport->rx_dma_chan, sirfport->rx_dma_items[index].dma_addr, SIRFSOC_RX_DMA_BUF_SIZE, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); if (!sirfport->rx_dma_items[index].desc) { dev_err(port->dev, "DMA slave single fail\n"); return; } sirfport->rx_dma_items[index].desc->callback = sirfsoc_uart_rx_dma_complete_callback; sirfport->rx_dma_items[index].desc->callback_param = sirfport; sirfport->rx_dma_items[index].cookie = dmaengine_submit(sirfport->rx_dma_items[index].desc); dma_async_issue_pending(sirfport->rx_dma_chan); }
/* submit rx dma task into dmaengine */ static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; int i; sirfport->rx_io_count = 0; wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) & ~SIRFUART_IO_MODE); for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++) sirfsoc_rx_submit_one_dma_desc(port, i); sirfport->rx_completed = sirfport->rx_issued = 0; if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) | SIRFUART_RX_DMA_INT_EN(port, uint_en)); else wr_regl(port, ureg->sirfsoc_int_en_reg, SIRFUART_RX_DMA_INT_EN(port, uint_en)); }
static int sirfsoc_uart_startup(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); unsigned int index = port->line; int ret; set_irq_flags(port->irq, IRQF_VALID | IRQF_NOAUTOEN); ret = request_irq(port->irq, sirfsoc_uart_isr, 0, SIRFUART_PORT_NAME, sirfport); if (ret != 0) { dev_err(port->dev, "UART%d request IRQ line (%d) failed.\n", index, port->irq); goto irq_err; } startup_uart_controller(port); enable_irq(port->irq); irq_err: return ret; }
static void sirfsoc_uart_disable_ms(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; if (!sirfport->hw_flow_ctrl) return; sirfport->ms_enabled = false; if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { wr_regl(port, ureg->sirfsoc_afc_ctrl, rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0x3FF); if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg)& ~uint_en->sirfsoc_cts_en); else wr_regl(port, SIRFUART_INT_EN_CLR, uint_en->sirfsoc_cts_en); } else disable_irq(gpio_to_irq(sirfport->cts_gpio)); }
static void sirfsoc_uart_start_rx(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; sirfport->rx_io_count = 0; wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET); wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START); if (sirfport->rx_dma_chan) sirfsoc_uart_start_next_rx_dma(port); else { if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) | SIRFUART_RX_IO_INT_EN(port, uint_en)); else wr_regl(port, ureg->sirfsoc_int_en_reg, SIRFUART_RX_IO_INT_EN(port, uint_en)); } }
static unsigned int sirfsoc_uart_get_mctrl(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled) goto cts_asserted; if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { if (!(rd_regl(port, ureg->sirfsoc_afc_ctrl) & SIRFUART_AFC_CTS_STATUS)) goto cts_asserted; else goto cts_deasserted; } else { if (!gpio_get_value(sirfport->cts_gpio)) goto cts_asserted; else goto cts_deasserted; } cts_deasserted: return TIOCM_CAR | TIOCM_DSR; cts_asserted: return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; }
static void sirfsoc_uart_enable_ms(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; if (!sirfport->hw_flow_ctrl) return; sirfport->ms_enabled = true; if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { wr_regl(port, ureg->sirfsoc_afc_ctrl, rd_regl(port, ureg->sirfsoc_afc_ctrl) | SIRFUART_AFC_TX_EN | SIRFUART_AFC_RX_EN); if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) | uint_en->sirfsoc_cts_en); else wr_regl(port, ureg->sirfsoc_int_en_reg, uint_en->sirfsoc_cts_en); } else enable_irq(gpio_to_irq(sirfport->cts_gpio)); }