/** * @brief Function for prepering shortcut register. * * @param[in] p_instance TWI. */ static void txrx_shorts_set_task_start(volatile nrf_drv_twi_t const * const p_instance) { uint32_t short_mask; volatile transfer_t * p_transfer = &(m_cb[p_instance->instance_id].transfer); nrf_twi_shorts_clear(p_instance->p_reg, NRF_TWI_SHORTS_BB_SUSPEND_MASK | NRF_TWI_SHORTS_BB_STOP_MASK); // if the last one and no pending transfer prepare to wait for stopped event if (((p_transfer->count + 1) == p_transfer->length) && p_transfer->xfer_pending == false) { short_mask = NRF_TWI_SHORTS_BB_STOP_MASK; p_transfer->end_event = NRF_TWI_EVENTS_STOPPED; nrf_twi_event_clear(p_instance->p_reg, p_transfer->end_event); if (m_handlers[p_instance->instance_id]) { nrf_twi_int_disable(p_instance->p_reg, p_transfer->end_int); p_transfer->end_int = NRF_TWI_INT_STOPPED_MASK; nrf_twi_int_enable(p_instance->p_reg, p_transfer->end_int); } state_machine(p_instance, TO_STOP); } else { short_mask = NRF_TWI_SHORTS_BB_SUSPEND_MASK; } nrf_twi_shorts_set(p_instance->p_reg, short_mask); nrf_twi_tasks_t prev_task = p_transfer->task; p_transfer->task = NRF_TWI_TASKS_RESUME; nrf_twi_task_set(p_instance->p_reg, prev_task); }
void nrf_drv_twi_disable(nrf_drv_twi_t const * const p_instance) { ASSERT(m_cb[p_instance->instance_id].state != NRF_DRV_STATE_UNINITIALIZED); nrf_twi_task_set(p_instance->p_reg, NRF_TWI_TASKS_STOP); nrf_twi_disable(p_instance->p_reg); nrf_twi_int_disable(p_instance->p_reg, DISABLE_MASK); m_cb[p_instance->instance_id].state = NRF_DRV_STATE_INITIALIZED; }
static void on_error(volatile nrf_drv_twi_t const * const p_instance) { volatile transfer_t * p_transfer = &(m_cb[p_instance->instance_id].transfer); p_transfer->end_event = NRF_TWI_EVENTS_STOPPED; nrf_twi_event_clear(p_instance->p_reg, p_transfer->end_event); if (m_handlers[p_instance->instance_id]) { nrf_twi_int_disable(p_instance->p_reg, p_transfer->end_int); p_transfer->end_int = NRF_TWI_INT_STOPPED_MASK; nrf_twi_int_enable(p_instance->p_reg, p_transfer->end_int); } nrf_twi_task_set(p_instance->p_reg, NRF_TWI_TASKS_RESUME); nrf_twi_task_set(p_instance->p_reg, NRF_TWI_TASKS_STOP); }
static void rx_address_req(volatile nrf_drv_twi_t const * const p_instance) { volatile transfer_t * p_transfer = &(m_cb[p_instance->instance_id].transfer); nrf_twi_address_set(p_instance->p_reg, p_transfer->address); p_transfer->task = NRF_TWI_TASKS_STARTRX; p_transfer->end_event = NRF_TWI_EVENTS_RXDREADY; nrf_twi_event_clear(p_instance->p_reg, p_transfer->end_event); if (m_handlers[p_instance->instance_id]) { nrf_twi_int_disable(p_instance->p_reg, p_transfer->end_int); p_transfer->end_int = NRF_TWI_INT_RXDREADY_MASK; nrf_twi_int_enable(p_instance->p_reg, p_transfer->end_int); } rx_prepare(p_instance); }
static void twi_irq_handler(uint8_t instance_idx) { twi_info_t *twi_info = &m_twi_info[instance_idx]; NRF_TWI_Type *twi = m_twi_instances[instance_idx]; if (nrf_twi_event_check(twi, NRF_TWI_EVENT_ERROR)) { nrf_twi_event_clear(twi, NRF_TWI_EVENT_ERROR); // In case of an error, force STOP. // The current transfer may be suspended (if it is RX), so it must be // resumed before the STOP task is triggered. nrf_twi_task_trigger(twi, NRF_TWI_TASK_RESUME); nrf_twi_task_trigger(twi, NRF_TWI_TASK_STOP); uint32_t errorsrc = nrf_twi_errorsrc_get_and_clear(twi); twi_info->events |= I2C_EVENT_ERROR; if (errorsrc & NRF_TWI_ERROR_ADDRESS_NACK) { twi_info->events |= I2C_EVENT_ERROR_NO_SLAVE; } if (errorsrc & NRF_TWI_ERROR_DATA_NACK) { twi_info->events |= I2C_EVENT_TRANSFER_EARLY_NACK; } } bool finished = false; if (nrf_twi_event_check(twi, NRF_TWI_EVENT_TXDSENT)) { nrf_twi_event_clear(twi, NRF_TWI_EVENT_TXDSENT); MBED_ASSERT(twi_info->tx_length > 0); --(twi_info->tx_length); // Send next byte if there is still something to be sent. if (twi_info->tx_length > 0) { nrf_twi_txd_set(twi, *(twi_info->tx)); ++(twi_info->tx); // It TX is done, start RX if requested. } else if (twi_info->rx_length > 0) { start_asynch_rx(twi_info, twi); // If there is nothing more to do, finalize the transfer. } else { if (twi_info->stop) { nrf_twi_task_trigger(twi, NRF_TWI_TASK_STOP); } else { nrf_twi_task_trigger(twi, NRF_TWI_TASK_SUSPEND); finished = true; } twi_info->events |= I2C_EVENT_TRANSFER_COMPLETE; } } if (nrf_twi_event_check(twi, NRF_TWI_EVENT_RXDREADY)) { nrf_twi_event_clear(twi, NRF_TWI_EVENT_RXDREADY); MBED_ASSERT(twi_info->rx_length > 0); *(twi_info->rx) = nrf_twi_rxd_get(twi); ++(twi_info->rx); --(twi_info->rx_length); if (twi_info->rx_length > 0) { // If more bytes should be received, resume the transfer // (in case the stop condition should be generated after the next // byte, change the shortcuts configuration first). if (twi_info->rx_length == 1 && twi_info->stop) { nrf_twi_shorts_set(twi, NRF_TWI_SHORT_BB_STOP_MASK); } nrf_twi_task_trigger(twi, NRF_TWI_TASK_RESUME); } else { // If all requested bytes were received, finalize the transfer. finished = true; twi_info->events |= I2C_EVENT_TRANSFER_COMPLETE; } } if (finished || nrf_twi_event_check(twi, NRF_TWI_EVENT_STOPPED) || (nrf_twi_int_enable_check(twi, NRF_TWI_INT_SUSPENDED_MASK) && nrf_twi_event_check(twi, NRF_TWI_EVENT_SUSPENDED))) { // There is no need to clear the STOPPED and SUSPENDED events here, // they will no longer generate the interrupt - see below. nrf_twi_shorts_set(twi, 0); // Disable all interrupt sources. nrf_twi_int_disable(twi, UINT32_MAX); twi_info->active = false; if (twi_info->handler) { twi_info->handler(); } } }