/** * @brief Class specific request data stage setup. * * @param[in] p_inst Generic class instance. * @param[in] p_setup_ev Setup event. * * @return Standard error code. */ static ret_code_t cdc_acm_req_out_datastage(app_usbd_class_inst_t const * p_inst, app_usbd_setup_evt_t const * p_setup_ev) { app_usbd_cdc_acm_t const * p_cdc_acm = cdc_acm_get(p_inst); app_usbd_cdc_acm_ctx_t * p_cdc_acm_ctx = cdc_acm_ctx_get(p_cdc_acm); p_cdc_acm_ctx->request.type = p_setup_ev->setup.bmRequest; p_cdc_acm_ctx->request.len = p_setup_ev->setup.wLength.w; /*Request setup data*/ NRF_DRV_USBD_TRANSFER_OUT(transfer, &p_cdc_acm_ctx->request.payload, p_cdc_acm_ctx->request.len); ret_code_t ret; CRITICAL_REGION_ENTER(); ret = app_usbd_core_setup_data_transfer(NRF_DRV_USBD_EPOUT0, &transfer); if (ret == NRF_SUCCESS) { const app_usbd_core_setup_data_handler_desc_t desc = { .handler = cdc_acm_req_out_data_cb, .p_context = (void*)p_cdc_acm }; ret = app_usbd_core_setup_data_handler_set(NRF_DRV_USBD_EPOUT0, &desc); } CRITICAL_REGION_EXIT(); return ret; } /** * @brief Internal SETUP class OUT request handler. * * @param[in] p_inst Generic class instance. * @param[in] p_setup_ev Setup event. * * @return Standard error code. */ static ret_code_t setup_req_class_out(app_usbd_class_inst_t const * p_inst, app_usbd_setup_evt_t const * p_setup_ev) { app_usbd_cdc_acm_t const * p_cdc_acm = cdc_acm_get(p_inst); app_usbd_cdc_acm_ctx_t * p_cdc_acm_ctx = cdc_acm_ctx_get(p_cdc_acm); switch (p_setup_ev->setup.bmRequest) { case APP_USBD_CDC_REQ_SET_LINE_CODING: { if (p_setup_ev->setup.wLength.w != sizeof(app_usbd_cdc_line_coding_t)) { return NRF_ERROR_NOT_SUPPORTED; } return cdc_acm_req_out_datastage(p_inst, p_setup_ev); } case APP_USBD_CDC_REQ_SET_CONTROL_LINE_STATE: { if (p_setup_ev->setup.wLength.w != 0) { return NRF_ERROR_NOT_SUPPORTED; } NRF_LOG_INFO("REQ_SET_CONTROL_LINE_STATE: 0x%x", p_setup_ev->setup.wValue.w); bool old_dtr = (p_cdc_acm_ctx->line_state & APP_USBD_CDC_ACM_LINE_STATE_DTR) ? true : false; p_cdc_acm_ctx->line_state = p_setup_ev->setup.wValue.w; bool new_dtr = (p_cdc_acm_ctx->line_state & APP_USBD_CDC_ACM_LINE_STATE_DTR) ? true : false; if (old_dtr == new_dtr) { return NRF_SUCCESS; } const app_usbd_cdc_acm_user_event_t ev = new_dtr ? APP_USBD_CDC_ACM_USER_EVT_PORT_OPEN : APP_USBD_CDC_ACM_USER_EVT_PORT_CLOSE; user_event_handler(p_inst, ev); if (!new_dtr) { /*Abort DATA endpoints on port close */ nrf_drv_usbd_ep_t ep; ep = data_ep_in_addr_get(p_inst); usbd_drv_ep_abort(ep); ep = data_ep_out_addr_get(p_inst); usbd_drv_ep_abort(ep); } return NRF_SUCCESS; } default: break; } return NRF_ERROR_NOT_SUPPORTED; }
/** * @brief Function for transferring data. * * @note Transmission will be stopped when error or timeout occurs. * * @param[in] p_instance TWI. * @param[in] address Address of specific slave device (only 7 LSB). * @param[in] p_data Pointer to a receive buffer. * @param[in] length Number of bytes to be received. * @param[in] xfer_pending After a specified number of bytes transmission will be * suspended (if xfer_pending is set) or stopped (if not) * @param[in] is_tx Indicate transfer direction (true for master to slave transmission). * * @retval NRF_SUCCESS If the procedure was successful. * @retval NRF_ERROR_BUSY Driver is not ready for new transfer. * @retval NRF_ERROR_INTERNAL NRF_TWI_EVENTS_ERROR or timeout has occured (only in blocking mode). */ static ret_code_t twi_transfer(nrf_drv_twi_t const * const p_instance, uint8_t address, uint8_t const * p_data, uint32_t length, bool xfer_pending, bool is_tx) { ASSERT(m_cb[p_instance->instance_id].state == NRF_DRV_STATE_POWERED_ON); ASSERT(length > 0); volatile transfer_t * p_transfer = &(m_cb[p_instance->instance_id].transfer); bool is_busy = false; CRITICAL_REGION_ENTER(); if (p_transfer->transfer_in_progress) { is_busy = true; } else { p_transfer->transfer_in_progress = true; } CRITICAL_REGION_EXIT(); if (is_busy) { return NRF_ERROR_BUSY; } p_transfer->address = address; p_transfer->length = (uint16_t)length; p_transfer->p_data = (uint8_t *)p_data; p_transfer->count = 0; p_transfer->xfer_pending = xfer_pending; p_transfer->is_tx = is_tx; p_transfer->error_condition = false; state_machine(p_instance, p_transfer->is_tx ? TX_ADDR_REQ : RX_ADDR_REQ); if (!m_handlers[p_instance->instance_id]) { // blocking mode sm_evt_t evt = p_transfer->is_tx ? TX_DONE : RX_DONE; do { if (twi_action_wait(p_instance) == false) { nrf_twi_event_clear(p_instance->p_reg, NRF_TWI_EVENTS_ERROR); evt = ON_ERROR; } nrf_twi_event_clear(p_instance->p_reg, p_transfer->end_event); state_machine(p_instance, evt); if (p_transfer->error_condition) { p_transfer->transfer_in_progress = false; return NRF_ERROR_INTERNAL; } } while (p_transfer->count < p_transfer->length); p_transfer->transfer_in_progress = false; } return NRF_SUCCESS; }
uint32_t ser_phy_hci_slip_tx_pkt_send(const ser_phy_hci_pkt_params_t * p_header, const ser_phy_hci_pkt_params_t * p_payload, const ser_phy_hci_pkt_params_t * p_crc) { if (p_header == NULL) { return NRF_ERROR_NULL; } CRITICAL_REGION_ENTER(); // If some packet is already transmitted, schedule this new one to be sent // as next. A critical region is needed here to ensure that the transmission // won't finish before the following assignments are done. if (m_tx_phase != PHASE_IDLE) { m_tx_next_packet.header = *p_header; if (p_payload == NULL) { m_tx_next_packet.payload.p_buffer = NULL; } else { m_tx_next_packet.payload = *p_payload; } if (p_crc == NULL) { m_tx_next_packet.crc.p_buffer = NULL; } else { m_tx_next_packet.crc = *p_crc; } } else { m_tx_curr_packet.header = *p_header; if (p_payload == NULL) { m_tx_curr_packet.payload.p_buffer = NULL; } else { m_tx_curr_packet.payload = *p_payload; } if (p_crc == NULL) { m_tx_curr_packet.crc.p_buffer = NULL; } else { m_tx_curr_packet.crc = *p_crc; } m_tx_phase = PHASE_BEGIN; tx_buf_fill(); } CRITICAL_REGION_EXIT(); return NRF_SUCCESS; }