static struct uart_mcumgr_rx_buf *uart_mcumgr_alloc_rx_buf(void) { struct uart_mcumgr_rx_buf *rx_buf; void *block; int rc; rc = k_mem_slab_alloc(&uart_mcumgr_slab, &block, K_NO_WAIT); if (rc != 0) { return NULL; } rx_buf = block; rx_buf->length = 0; return rx_buf; }
/* thread entry simply invoke the APIs*/ static void tmslab_api(void *p1, void *p2, void *p3) { void *block[BLK_NUM]; struct k_mem_slab *slab = slabs[atomic_inc(&slab_id) % SLAB_NUM]; int i = LOOP; while (i--) { memset(block, 0, sizeof(block)); for (int i = 0; i < BLK_NUM; i++) { k_mem_slab_alloc(slab, &block[i], TIMEOUT); } for (int i = 0; i < BLK_NUM; i++) { if (block[i]) { k_mem_slab_free(slab, &block[i]); block[i] = NULL; } } } k_sem_give(&sync_sema); }
static int rx_stream_start(struct stream *stream, struct device *dev) { const struct i2s_stm32_cfg *cfg = DEV_CFG(dev); struct i2s_stm32_data *const dev_data = DEV_DATA(dev); int ret; ret = k_mem_slab_alloc(stream->cfg.mem_slab, &stream->mem_block, K_NO_WAIT); if (ret < 0) { return ret; } if (stream->master) { LL_I2S_SetTransferMode(cfg->i2s, LL_I2S_MODE_MASTER_RX); } else { LL_I2S_SetTransferMode(cfg->i2s, LL_I2S_MODE_SLAVE_RX); } /* remember active RX DMA channel (used in callback) */ active_dma_rx_channel[stream->dma_channel] = dev; ret = start_dma(dev_data->dev_dma, stream->dma_channel, &stream->dma_cfg, (void *)LL_SPI_DMA_GetRegAddr(cfg->i2s), stream->mem_block, stream->cfg.block_size); if (ret < 0) { LOG_ERR("Failed to start RX DMA transfer: %d", ret); return ret; } LL_I2S_EnableDMAReq_RX(cfg->i2s); LL_I2S_EnableIT_ERR(cfg->i2s); LL_I2S_Enable(cfg->i2s); return 0; }
/* This function is executed in the interrupt context */ static void dma_rx_callback(void *arg, u32_t channel, int status) { struct device *dev = get_dev_from_rx_dma_channel(channel); const struct i2s_stm32_cfg *cfg = DEV_CFG(dev); struct i2s_stm32_data *const dev_data = DEV_DATA(dev); struct stream *stream = &dev_data->rx; void *mblk_tmp; int ret; if (status != 0) { ret = -EIO; stream->state = I2S_STATE_ERROR; goto rx_disable; } __ASSERT_NO_MSG(stream->mem_block != NULL); /* Stop reception if there was an error */ if (stream->state == I2S_STATE_ERROR) { goto rx_disable; } mblk_tmp = stream->mem_block; /* Prepare to receive the next data block */ ret = k_mem_slab_alloc(stream->cfg.mem_slab, &stream->mem_block, K_NO_WAIT); if (ret < 0) { stream->state = I2S_STATE_ERROR; goto rx_disable; } ret = reload_dma(dev_data->dev_dma, stream->dma_channel, &stream->dma_cfg, (void *)LL_SPI_DMA_GetRegAddr(cfg->i2s), stream->mem_block, stream->cfg.block_size); if (ret < 0) { LOG_DBG("Failed to start RX DMA transfer: %d", ret); goto rx_disable; } /* Assure cache coherency after DMA write operation */ DCACHE_INVALIDATE(mblk_tmp, stream->cfg.block_size); /* All block data received */ ret = queue_put(&stream->mem_block_queue, mblk_tmp, stream->cfg.block_size); if (ret < 0) { stream->state = I2S_STATE_ERROR; goto rx_disable; } k_sem_give(&stream->sem); /* Stop reception if we were requested */ if (stream->state == I2S_STATE_STOPPING) { stream->state = I2S_STATE_READY; goto rx_disable; } return; rx_disable: rx_stream_disable(stream, dev); }