IRAM NOINSTR static void esp_handle_uart_int(struct mgos_uart_state *us) { if (us == NULL) return; const int uart_no = us->uart_no; /* Since both UARTs use the same int, we need to apply the mask manually. */ const unsigned int int_st = READ_PERI_REG(UART_INT_ST(uart_no)) & READ_PERI_REG(UART_INT_ENA(uart_no)); const struct mgos_uart_config *cfg = &us->cfg; if (int_st == 0) return; us->stats.ints++; if (int_st & UART_RXFIFO_OVF_INT_ST) us->stats.rx_overflows++; if (int_st & UART_CTS_CHG_INT_ST) { if (esp_uart_cts(uart_no) != 0 && esp_uart_tx_fifo_len(uart_no) > 0) { us->stats.tx_throttles++; } } if (int_st & (UART_RX_INTS | UART_TX_INTS)) { int int_ena = UART_INFO_INTS; if (int_st & UART_RX_INTS) us->stats.rx_ints++; if (int_st & UART_TX_INTS) us->stats.tx_ints++; if (adj_rx_fifo_full_thresh(us)) { int_ena |= UART_RXFIFO_FULL_INT_ENA; } else if (cfg->rx_fc_type == MGOS_UART_FC_SW) { /* Send XOFF and keep RX ints disabled */ while (esp_uart_tx_fifo_len(uart_no) >= 127) { } esp_uart_tx_byte(uart_no, MGOS_UART_XOFF_CHAR); us->xoff_sent = true; } WRITE_PERI_REG(UART_INT_ENA(uart_no), int_ena); mgos_uart_schedule_dispatcher(uart_no, true /* from_isr */); } WRITE_PERI_REG(UART_INT_CLR(uart_no), int_st); }
void mgos_uart_hal_dispatch_tx_top(struct mgos_uart_state *us) { int uart_no = us->uart_no; struct mbuf *txb = &us->tx_buf; uint32_t txn = 0; /* TX */ if (txb->len > 0) { while (txb->len > 0) { size_t tx_av = 128 - esp_uart_tx_fifo_len(uart_no); size_t len = MIN(txb->len, tx_av); if (len == 0) break; for (size_t i = 0; i < len; i++) { esp_uart_tx_byte(uart_no, *(txb->buf + i)); } txn += len; mbuf_remove(txb, len); } us->stats.tx_bytes += txn; } }
IRAM NOINSTR static void esp_handle_uart_int(struct miot_uart_state *us) { const int uart_no = us->uart_no; /* Since both UARTs use the same int, we need to apply the mask manually. */ const unsigned int int_st = READ_PERI_REG(UART_INT_ST(uart_no)) & READ_PERI_REG(UART_INT_ENA(uart_no)); if (int_st == 0) return; us->stats.ints++; if (int_st & UART_RXFIFO_OVF_INT_ST) us->stats.rx_overflows++; if (int_st & UART_CTS_CHG_INT_ST) { if (esp_uart_cts(uart_no) != 0 && esp_uart_tx_fifo_len(uart_no) > 0) { us->stats.tx_throttles++; } } if (int_st & (UART_RX_INTS | UART_TX_INTS)) { if (int_st & UART_RX_INTS) us->stats.rx_ints++; if (int_st & UART_TX_INTS) us->stats.tx_ints++; /* Wake up the processor and disable TX and RX ints until it runs. */ WRITE_PERI_REG(UART_INT_ENA(uart_no), UART_INFO_INTS); miot_uart_schedule_dispatcher(uart_no); } WRITE_PERI_REG(UART_INT_CLR(uart_no), int_st); }
IRAM void miot_uart_dev_dispatch_tx_top(struct miot_uart_state *us) { int uart_no = us->uart_no; cs_rbuf_t *txb = &us->tx_buf; uint32_t txn = 0; /* TX */ if (txb->used > 0) { while (txb->used > 0) { int i; uint8_t *data; uint16_t len; int tx_av = us->cfg->tx_fifo_full_thresh - esp_uart_tx_fifo_len(uart_no); if (tx_av <= 0) break; len = cs_rbuf_get(txb, tx_av, &data); for (i = 0; i < len; i++, data++) { tx_byte(uart_no, *data); } txn += len; cs_rbuf_consume(txb, len); } us->stats.tx_bytes += txn; } }
void mgos_uart_hal_flush_fifo(struct mgos_uart_state *us) { while (esp_uart_tx_fifo_len(us->uart_no) > 0) { } }