static int transceive(struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { struct spi_nrfx_data *dev_data = get_dev_data(dev); int error; error = configure(dev, spi_cfg); if (error != 0) { /* Invalid configuration. */ } else if ((tx_bufs && tx_bufs->count > 1) || (rx_bufs && rx_bufs->count > 1)) { LOG_ERR("Scattered buffers are not supported"); error = -ENOTSUP; } else if (tx_bufs && tx_bufs->buffers[0].len && !nrfx_is_in_ram(tx_bufs->buffers[0].buf)) { LOG_ERR("Only buffers located in RAM are supported"); error = -ENOTSUP; } else { spi_context_buffers_setup(&dev_data->ctx, tx_bufs, rx_bufs, 1); prepare_for_transfer(dev); error = spi_context_wait_for_completion(&dev_data->ctx); } spi_context_release(&dev_data->ctx, error); return error; }
static void prepare_for_transfer(struct device *dev) { struct spi_nrfx_data *dev_data = get_dev_data(dev); const struct spi_nrfx_config *dev_config = get_dev_config(dev); struct spi_context *ctx = &dev_data->ctx; int status; size_t buf_len = spi_context_longest_current_buf(ctx); if (buf_len > 0) { nrfx_err_t result; if (buf_len > dev_config->max_buf_len) { buf_len = dev_config->max_buf_len; } result = nrfx_spis_buffers_set( &dev_config->spis, ctx->tx_buf, spi_context_tx_buf_on(ctx) ? buf_len : 0, ctx->rx_buf, spi_context_rx_buf_on(ctx) ? buf_len : 0); if (result == NRFX_SUCCESS) { return; } /* Cannot prepare for transfer. */ status = -EIO; } else { /* Zero-length buffer provided. */ status = 0; } spi_context_complete(ctx, status); }
static int counter_nrfx_set_top_value(struct device *dev, u32_t ticks, counter_top_callback_t callback, void *user_data) { const struct counter_nrfx_config *nrfx_config = get_nrfx_config(dev); const nrfx_rtc_t *rtc = &nrfx_config->rtc; struct counter_nrfx_data *dev_data = get_dev_data(dev); for (int i = 0; i < counter_get_num_of_channels(dev); i++) { /* Overflow can be changed only when all alarms are * disables. */ if (nrfx_config->ch_data[i].callback) { return -EBUSY; } } nrfx_rtc_cc_disable(rtc, TOP_CH); nrfx_rtc_counter_clear(rtc); dev_data->top_cb = callback; dev_data->top_user_data = user_data; dev_data->top = ticks; nrfx_rtc_cc_set(rtc, TOP_CH, ticks, callback ? true : false); return 0; }
static void event_handler(nrfx_rtc_int_type_t int_type, void *p_context) { struct device *dev = p_context; struct counter_nrfx_data *data = get_dev_data(dev); if (int_type == COUNTER_TOP_INT) { /* Manually reset counter if top value is different than max. */ if ((data->top != COUNTER_MAX_TOP_VALUE) #if CONFIG_COUNTER_RTC_WITH_PPI_WRAP && !get_nrfx_config(dev)->use_ppi #endif ) { nrfx_rtc_counter_clear(&get_nrfx_config(dev)->rtc); } nrfx_rtc_cc_set(&get_nrfx_config(dev)->rtc, TOP_CH, data->top, true); if (data->top_cb) { data->top_cb(dev, data->top_user_data); } } else if (int_type > COUNTER_TOP_INT) { alarm_event_handler(dev, CC_TO_ID(int_type)); } }
static int init_rtc(struct device *dev, const nrfx_rtc_config_t *config, nrfx_rtc_handler_t handler) { struct device *clock; const struct counter_nrfx_config *nrfx_config = get_nrfx_config(dev); const nrfx_rtc_t *rtc = &nrfx_config->rtc; clock = device_get_binding(DT_NORDIC_NRF_CLOCK_0_LABEL "_32K"); if (!clock) { return -ENODEV; } clock_control_on(clock, (void *)CLOCK_CONTROL_NRF_K32SRC); nrfx_err_t result = nrfx_rtc_init(rtc, config, handler); if (result != NRFX_SUCCESS) { LOG_INST_ERR(nrfx_config->log, "Failed to initialize device."); return -EBUSY; } get_dev_data(dev)->top = COUNTER_MAX_TOP_VALUE; LOG_INST_DBG(nrfx_config->log, "Initialized"); return 0; }
/** Set the callback function */ static void uarte_nrfx_irq_callback_set(struct device *dev, uart_irq_callback_t cb) { struct uarte_nrfx_data *data = get_dev_data(dev); data->cb = cb; }
/** Interrupt driven FIFO fill function */ static int uarte_nrfx_fifo_fill(struct device *dev, const u8_t *tx_data, int len) { NRF_UARTE_Type *uarte = get_uarte_instance(dev); struct uarte_nrfx_data *data = get_dev_data(dev); const struct uarte_nrfx_config *config = get_dev_config(dev); if (len > config->tx_buff_size) { len = config->tx_buff_size; } nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX); /* Copy data to RAM buffer for EasyDMA transfer */ for (int i = 0; i < len; i++) { data->tx_buffer[i] = tx_data[i]; } nrf_uarte_tx_buffer_set(uarte, data->tx_buffer, len); nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX); return len; }
static int counter_nrfx_set_top_value(struct device *dev, u32_t ticks, counter_top_callback_t callback, void *user_data) { const struct counter_nrfx_config *nrfx_config = get_nrfx_config(dev); const nrfx_timer_t *timer = &nrfx_config->timer; struct counter_nrfx_data *data = get_dev_data(dev); for (int i = 0; i < counter_get_num_of_channels(dev); i++) { /* Overflow can be changed only when all alarms are * disables. */ if (nrfx_config->ch_data[i].callback) { return -EBUSY; } } nrfx_timer_compare_int_disable(timer, TOP_CH); nrfx_timer_clear(timer); data->top_cb = callback; data->top_user_data = user_data; nrfx_timer_extended_compare(timer, TOP_CH, ticks, COUNTER_OVERFLOW_SHORT, callback ? true : false); return 0; }
static int spi_nrfx_transceive(struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { spi_context_lock(&get_dev_data(dev)->ctx, false, NULL); return transceive(dev, spi_cfg, tx_bufs, rx_bufs); }
static void event_handler(const nrfx_spis_evt_t *p_event, void *p_context) { struct device *dev = p_context; struct spi_nrfx_data *dev_data = get_dev_data(dev); if (p_event->evt_type == NRFX_SPIS_XFER_DONE) { spi_context_complete(&dev_data->ctx, p_event->rx_amount); } }
/** * @brief Interrupt service routine. * * This simply calls the callback function, if one exists. * * @param arg Argument to ISR. * * @return N/A */ static void uarte_nrfx_isr(void *arg) { struct device *dev = arg; const struct uarte_nrfx_data *data = get_dev_data(dev); if (data->cb) { data->cb(data->cb_data); } }
static int spi_nrfx_transceive_async(struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, struct k_poll_signal *async) { spi_context_lock(&get_dev_data(dev)->ctx, true, async); return transceive(dev, spi_cfg, tx_bufs, rx_bufs); }
/** Set the callback function */ static void uarte_nrfx_irq_callback_set(struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uarte_nrfx_data *data = get_dev_data(dev); data->cb = cb; data->cb_data = cb_data; }
static int counter_nrfx_set_alarm(struct device *dev, u8_t chan_id, const struct counter_alarm_cfg *alarm_cfg) { const struct counter_nrfx_config *nrfx_config = get_nrfx_config(dev); const nrfx_rtc_t *rtc = &nrfx_config->rtc; u32_t cc_val; if (alarm_cfg->ticks > get_dev_data(dev)->top) { return -EINVAL; } if (nrfx_config->ch_data[chan_id].callback) { return -EBUSY; } if (alarm_cfg->absolute) { cc_val = alarm_cfg->ticks; } else { /* As RTC is 24 bit there is no risk of overflow. */ cc_val = alarm_cfg->ticks + nrfx_rtc_counter_get(rtc); cc_val -= (cc_val > get_dev_data(dev)->top) ? get_dev_data(dev)->top : 0; } nrfx_config->ch_data[chan_id].callback = alarm_cfg->callback; nrfx_config->ch_data[chan_id].user_data = alarm_cfg->user_data; if ((cc_val == 0) && (get_dev_data(dev)->top != counter_get_max_top_value(dev))) { /* From Product Specification: If a CC register value is 0 when * a CLEAR task is set, this will not trigger a COMPARE event. */ LOG_INST_INF(nrfx_config->log, "Attempt to set CC to 0, delayed to 1."); cc_val++; } nrfx_rtc_cc_set(rtc, ID_TO_CC(chan_id), cc_val, true); return 0; }
static int spi_nrfx_release(struct device *dev, const struct spi_config *spi_cfg) { struct spi_nrfx_data *dev_data = get_dev_data(dev); if (!spi_context_configured(&dev_data->ctx, spi_cfg)) { return -EINVAL; } spi_context_unlock_unconditionally(&dev_data->ctx); return 0; }
static void event_handler(nrf_timer_event_t event_type, void *p_context) { struct device *dev = p_context; struct counter_nrfx_data *dev_data = get_dev_data(dev); if (event_type == COUNTER_TOP_INT) { if (dev_data->top_cb) { dev_data->top_cb(dev, dev_data->top_user_data); } } else if (event_type > NRF_TIMER_EVENT_COMPARE1) { alarm_event_handler(dev, COUNTER_EVENT_TO_ID(event_type)); } }
/** * @brief Poll the device for input. * * @param dev UARTE device struct * @param c Pointer to character * * @return 0 if a character arrived, -1 if the input buffer is empty. */ static int uarte_nrfx_poll_in(struct device *dev, unsigned char *c) { const struct uarte_nrfx_data *data = get_dev_data(dev); NRF_UARTE_Type *uarte = get_uarte_instance(dev); if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) { return -1; } *c = data->rx_data; /* clear the interrupt */ nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); return 0; }
static int configure(struct device *dev, const struct spi_config *spi_cfg) { struct spi_context *ctx = &get_dev_data(dev)->ctx; if (spi_context_configured(ctx, spi_cfg)) { /* Already configured. No need to do it again. */ return 0; } if (SPI_OP_MODE_GET(spi_cfg->operation) == SPI_OP_MODE_MASTER) { LOG_ERR("Master mode is not supported on %s", dev->config->name); return -EINVAL; } if (spi_cfg->operation & SPI_MODE_LOOP) { LOG_ERR("Loopback mode is not supported"); return -EINVAL; } if ((spi_cfg->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) { LOG_ERR("Only single line mode is supported"); return -EINVAL; } if (SPI_WORD_SIZE_GET(spi_cfg->operation) != 8) { LOG_ERR("Word sizes other than 8 bits" " are not supported"); return -EINVAL; } if (spi_cfg->cs) { LOG_ERR("CS control via GPIO is not supported"); return -EINVAL; } ctx->config = spi_cfg; nrf_spis_configure(get_dev_config(dev)->spis.p_reg, get_nrf_spis_mode(spi_cfg->operation), get_nrf_spis_bit_order(spi_cfg->operation)); return 0; }
static int init_spis(struct device *dev, const nrfx_spis_config_t *config) { /* This sets only default values of frequency, mode and bit order. * The proper ones are set in configure() when a transfer is started. */ nrfx_err_t result = nrfx_spis_init(&get_dev_config(dev)->spis, config, event_handler, dev); if (result != NRFX_SUCCESS) { LOG_ERR("Failed to initialize device: %s", dev->config->name); return -EBUSY; } spi_context_unlock_unconditionally(&get_dev_data(dev)->ctx); return 0; }
static int ppi_setup(struct device *dev) { #if CONFIG_COUNTER_RTC_WITH_PPI_WRAP const struct counter_nrfx_config *nrfx_config = get_nrfx_config(dev); struct counter_nrfx_data *data = get_dev_data(dev); const nrfx_rtc_t *rtc = &nrfx_config->rtc; nrfx_err_t result; if (!nrfx_config->use_ppi) { return 0; } #ifdef DPPI_PRESENT result = nrfx_dppi_channel_alloc(&data->ppi_ch); if (result != NRFX_SUCCESS) { LOG_INST_ERR(nrfx_config->log, "Failed to allocate PPI channel."); return -ENODEV; } nrf_rtc_subscribe_set(rtc->p_reg, NRF_RTC_TASK_CLEAR, data->ppi_ch); nrf_rtc_publish_set(rtc->p_reg, NRF_RTC_EVENT_COMPARE_0, data->ppi_ch); (void)nrfx_dppi_channel_enable(data->ppi_ch); #else /* DPPI_PRESENT */ u32_t evt; u32_t task; evt = nrfx_rtc_event_address_get(rtc, NRF_RTC_EVENT_COMPARE_0); task = nrfx_rtc_task_address_get(rtc, NRF_RTC_TASK_CLEAR); result = nrfx_ppi_channel_alloc(&data->ppi_ch); if (result != NRFX_SUCCESS) { LOG_INST_ERR(nrfx_config->log, "Failed to allocate PPI channel."); return -ENODEV; } (void)nrfx_ppi_channel_assign(data->ppi_ch, evt, task); (void)nrfx_ppi_channel_enable(data->ppi_ch); #endif #endif /* CONFIG_COUNTER_RTC_WITH_PPI_WRAP */ return 0; }
/** Interrupt driven FIFO read function */ static int uarte_nrfx_fifo_read(struct device *dev, u8_t *rx_data, const int size) { int num_rx = 0; NRF_UARTE_Type *uarte = get_uarte_instance(dev); const struct uarte_nrfx_data *data = get_dev_data(dev); if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) { /* Clear the interrupt */ nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); /* Receive a character */ rx_data[num_rx++] = (u8_t)data->rx_data; nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); } return num_rx; }
static u32_t counter_nrfx_get_top_value(struct device *dev) { return get_dev_data(dev)->top; }
static int uarte_instance_init(struct device *dev, const struct uarte_init_config *config, u8_t interrupts_active) { int err; NRF_UARTE_Type *uarte = get_uarte_instance(dev); struct uarte_nrfx_data *data = get_dev_data(dev); nrf_gpio_pin_write(config->pseltxd, 1); nrf_gpio_cfg_output(config->pseltxd); nrf_gpio_cfg_input(config->pselrxd, NRF_GPIO_PIN_NOPULL); nrf_uarte_txrx_pins_set(uarte, config->pseltxd, config->pselrxd); if (config->hwfc == NRF_UARTE_HWFC_ENABLED) { nrf_gpio_pin_write(config->pselrts, 1); nrf_gpio_cfg_output(config->pselrts); nrf_gpio_cfg_input(config->pselcts, NRF_GPIO_PIN_NOPULL); nrf_uarte_hwfc_pins_set(uarte, config->pselrts, config->pselcts); } /* Configure flow control and parity checking */ nrf_uarte_configure(uarte, config->parity, config->hwfc); err = baudrate_set(dev, config->baudrate); if (err) { return err; } /* Enable receiver and transmitter */ nrf_uarte_enable(uarte); nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); nrf_uarte_rx_buffer_set(uarte, &data->rx_data, 1); nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); #if UARTE_INTERRUPT_DRIVEN if (interrupts_active) { /* Set ENDTX event by requesting fake (zero-length) transfer. * Pointer to RAM variable (data->tx_buffer) is set because * otherwise such operation may result in HardFault or RAM * corruption. */ nrf_uarte_tx_buffer_set(uarte, data->tx_buffer, 0); nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX); /* switch off transmitter to save an energy */ nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX); } #endif /* UARTE_INTERRUPT_DRIVEN */ return 0; }
static u32_t counter_nrfx_get_max_relative_alarm(struct device *dev) { /* Maybe decreased. */ return get_dev_data(dev)->top; }