static void ser_phy_uart_evt_callback(app_uart_evt_t * uart_evt) { if (uart_evt == NULL) { return; } switch (uart_evt->evt_type) { case APP_UART_COMMUNICATION_ERROR: // Process error only if this is parity or overrun error. // Break and framing error is always present when app side is not active if (uart_evt->data.error_communication & (UART_ERRORSRC_PARITY_Msk | UART_ERRORSRC_OVERRUN_Msk)) { callback_hw_error(uart_evt->data.error_communication); } break; case APP_UART_TX_EMPTY: (void)ser_phy_hci_tx_byte(); break; case APP_UART_DATA: // After first reception disable pulldown - it was only needed before start // of the other side if (!m_other_side_active) { nrf_gpio_cfg_input(comm_params.rx_pin_no, NRF_GPIO_PIN_NOPULL); m_other_side_active = true; } m_rx_byte = uart_evt->data.value; ser_phi_hci_rx_byte(m_rx_byte); break; default: APP_ERROR_CHECK(NRF_ERROR_INTERNAL); break; } }
static void cdc_acm_user_ev_handler(app_usbd_class_inst_t const * p_inst, app_usbd_cdc_acm_user_event_t event) { app_usbd_cdc_acm_t const * p_cdc_acm = app_usbd_cdc_acm_class_get(p_inst); switch (event) { case APP_USBD_CDC_ACM_USER_EVT_PORT_OPEN: NRF_LOG_DEBUG("EVT_PORT_OPEN"); if (!m_port_open) { ret_code_t ret_code; m_port_open = true; do { ret_code = app_usbd_cdc_acm_read(p_cdc_acm, &m_rx_byte, 1); if (ret_code == NRF_SUCCESS) { ser_phi_hci_rx_byte(m_rx_byte); } else if (ret_code != NRF_ERROR_IO_PENDING) { APP_ERROR_CHECK(ret_code); } } while (ret_code == NRF_SUCCESS); } break; case APP_USBD_CDC_ACM_USER_EVT_PORT_CLOSE: NRF_LOG_DEBUG("EVT_PORT_CLOSE"); if (m_tx_in_progress) { m_ser_phy_hci_slip_event.evt_type = SER_PHY_HCI_SLIP_EVT_PKT_SENT; m_ser_phy_hci_slip_event_handler(&m_ser_phy_hci_slip_event); m_tx_in_progress = false; } m_port_open = false; break; case APP_USBD_CDC_ACM_USER_EVT_TX_DONE: // If there is a pending transfer (the second buffer is ready to // be sent), start it immediately. if (m_tx_pending) { APP_ERROR_CHECK(app_usbd_cdc_acm_write(p_cdc_acm, mp_tx_buf, m_tx_bytes)); // Switch to the buffer that has just been sent completely // and now can be filled again. mp_tx_buf = (mp_tx_buf == m_tx_buf0) ? m_tx_buf1 : m_tx_buf0; m_tx_bytes = 0; m_ser_phy_hci_slip_event.evt_type = m_tx_evt_type; m_tx_evt_type = m_tx_pending_evt_type; m_tx_pending = false; } else { m_tx_in_progress = false; m_ser_phy_hci_slip_event.evt_type = m_tx_evt_type; } // If needed, notify the upper layer that the packet transfer is // complete (note that this notification may result in another // packet send request, so everything must be cleaned up above). if (m_ser_phy_hci_slip_event.evt_type != NO_EVENT) { m_ser_phy_hci_slip_event_handler(&m_ser_phy_hci_slip_event); } // And if the sending process is not yet finished, look what is // to be done next. if (m_tx_phase != PHASE_IDLE) { tx_buf_fill(); } break; case APP_USBD_CDC_ACM_USER_EVT_RX_DONE: { ret_code_t ret_code; do { ser_phi_hci_rx_byte(m_rx_byte); ret_code = app_usbd_cdc_acm_read(p_cdc_acm, &m_rx_byte, 1); } while (ret_code == NRF_SUCCESS); } break; default: break; } }
static void uart_event_handler(nrf_drv_uart_event_t * p_event, void * p_context) { (void)p_context; switch (p_event->type) { case NRF_DRV_UART_EVT_ERROR: // Process the error only if this is a parity or overrun error. // Break and framing errors will always occur before the other // side becomes active. if (p_event->data.error.error_mask & (NRF_UART_ERROR_PARITY_MASK | NRF_UART_ERROR_OVERRUN_MASK)) { // Pass error source to upper layer m_ser_phy_hci_slip_event.evt_type = SER_PHY_HCI_SLIP_EVT_HW_ERROR; m_ser_phy_hci_slip_event.evt_params.hw_error.error_code = p_event->data.error.error_mask; m_ser_phy_hci_slip_event_handler(&m_ser_phy_hci_slip_event); } APP_ERROR_CHECK(nrf_drv_uart_rx(&m_uart, m_rx_buf, 1)); break; case NRF_DRV_UART_EVT_TX_DONE: // If there is a pending transfer (the second buffer is ready to // be sent), start it immediately. if (m_tx_pending) { APP_ERROR_CHECK(nrf_drv_uart_tx(&m_uart, mp_tx_buf, m_tx_bytes)); // Switch to the buffer that has just been sent completely // and now can be filled again. mp_tx_buf = (mp_tx_buf == m_tx_buf0) ? m_tx_buf1 : m_tx_buf0; m_tx_bytes = 0; m_ser_phy_hci_slip_event.evt_type = m_tx_evt_type; m_tx_evt_type = m_tx_pending_evt_type; m_tx_pending = false; } else { m_tx_in_progress = false; m_ser_phy_hci_slip_event.evt_type = m_tx_evt_type; } // If needed, notify the upper layer that the packet transfer is // complete (note that this notification may result in another // packet send request, so everything must be cleaned up above). if (m_ser_phy_hci_slip_event.evt_type != NO_EVENT) { m_ser_phy_hci_slip_event_handler(&m_ser_phy_hci_slip_event); } // And if the sending process is not yet finished, look what is // to be done next. if (m_tx_phase != PHASE_IDLE) { tx_buf_fill(); } break; case NRF_DRV_UART_EVT_RX_DONE: { uint8_t rx_byte = m_rx_buf[0]; APP_ERROR_CHECK(nrf_drv_uart_rx(&m_uart, m_rx_buf, 1)); ser_phi_hci_rx_byte(rx_byte); } break; default: APP_ERROR_CHECK(NRF_ERROR_INTERNAL); } }