int qm_spi_dma_channel_config( const qm_spi_t spi, const qm_dma_t dma_ctrl_id, const qm_dma_channel_id_t dma_channel_id, const qm_dma_channel_direction_t dma_channel_direction) { QM_CHECK(spi < QM_SPI_NUM, -EINVAL); QM_CHECK(dma_ctrl_id < QM_DMA_NUM, -EINVAL); QM_CHECK(dma_channel_id < QM_DMA_CHANNEL_NUM, -EINVAL); dma_context_t *dma_context_p = NULL; qm_dma_channel_config_t dma_chan_cfg = {0}; dma_chan_cfg.handshake_polarity = QM_DMA_HANDSHAKE_POLARITY_HIGH; dma_chan_cfg.channel_direction = dma_channel_direction; dma_chan_cfg.client_callback = spi_dma_callback; dma_chan_cfg.transfer_type = QM_DMA_TYPE_SINGLE; /* Every data transfer performed by the DMA core corresponds to an SPI * data frame, the SPI uses the number of bits determined by a previous * qm_spi_set_config call where the frame size was specified. */ switch (dfs[spi]) { case 1: dma_chan_cfg.source_transfer_width = QM_DMA_TRANS_WIDTH_8; break; case 2: dma_chan_cfg.source_transfer_width = QM_DMA_TRANS_WIDTH_16; break; case 4: dma_chan_cfg.source_transfer_width = QM_DMA_TRANS_WIDTH_32; break; default: /* The DMA core cannot handle 3 byte frame sizes. */ return -EINVAL; } dma_chan_cfg.destination_transfer_width = dma_chan_cfg.source_transfer_width; switch (dma_channel_direction) { case QM_DMA_MEMORY_TO_PERIPHERAL: #if (QUARK_SE) dma_chan_cfg.handshake_interface = (QM_SPI_MST_0 == spi) ? DMA_HW_IF_SPI_MASTER_0_TX : DMA_HW_IF_SPI_MASTER_1_TX; #else dma_chan_cfg.handshake_interface = DMA_HW_IF_SPI_MASTER_0_TX; #endif /* The DMA burst length has to fit in the space remaining in the * TX FIFO after the watermark level, DMATDLR. */ dma_chan_cfg.source_burst_length = SPI_DMA_WRITE_BURST_LENGTH; dma_chan_cfg.destination_burst_length = SPI_DMA_WRITE_BURST_LENGTH; dma_context_p = &dma_context_tx[spi]; break; case QM_DMA_PERIPHERAL_TO_MEMORY: #if (QUARK_SE) dma_chan_cfg.handshake_interface = (QM_SPI_MST_0 == spi) ? DMA_HW_IF_SPI_MASTER_0_RX : DMA_HW_IF_SPI_MASTER_1_RX; #else dma_chan_cfg.handshake_interface = DMA_HW_IF_SPI_MASTER_0_RX; #endif /* The DMA burst length has to match the value of the receive * watermark level, DMARDLR + 1. */ dma_chan_cfg.source_burst_length = SPI_DMA_READ_BURST_LENGTH; dma_chan_cfg.destination_burst_length = SPI_DMA_READ_BURST_LENGTH; dma_context_p = &dma_context_rx[spi]; break; default: /* Memory to memory not allowed on SPI transfers. */ return -EINVAL; } /* The DMA driver needs a pointer to the client callback function so * that later we can identify to which SPI controller the DMA callback * corresponds to as well as whether we are dealing with a TX or RX * dma_context struct. */ QM_ASSERT(dma_context_p); dma_chan_cfg.callback_context = dma_context_p; /* To be used on received DMA callback. */ dma_context_p->spi_id = spi; dma_context_p->dma_channel_id = dma_channel_id; /* To be used on transfer setup. */ dma_core[spi] = dma_ctrl_id; return qm_dma_channel_set_config(dma_ctrl_id, dma_channel_id, &dma_chan_cfg); }
int main(void) { qm_dma_channel_config_t cfg = {0}; static dma_channel_desc_t chan_desc; int return_code, i; QM_PUTS("Starting: DMA"); /* * Request the required interrupts. Depending on the channel used a * different isr is set: * qm_irq_request(QM_IRQ_DMA_0_INT_<channel>, * qm_dma_0_isr_<channel>) */ qm_irq_request(QM_IRQ_DMA_0_INT_0, qm_dma_0_isr_0); qm_irq_request(QM_IRQ_DMA_0_ERROR_INT, qm_dma_0_error_isr); /* Set the controller and channel IDs. */ chan_desc.controller_id = QM_DMA_0; chan_desc.channel_id = QM_DMA_CHANNEL_0; return_code = qm_dma_init(chan_desc.controller_id); if (return_code) { QM_PUTS("ERROR: qm_dma_init"); } /* Configure DMA channel. */ cfg.channel_direction = QM_DMA_MEMORY_TO_MEMORY; cfg.source_transfer_width = QM_DMA_TRANS_WIDTH_8; cfg.destination_transfer_width = QM_DMA_TRANS_WIDTH_8; cfg.source_burst_length = QM_DMA_BURST_TRANS_LENGTH_1; cfg.destination_burst_length = QM_DMA_BURST_TRANS_LENGTH_1; cfg.client_callback = transfer_callback; cfg.transfer_type = QM_DMA_TYPE_SINGLE; /* * Set the context as the channel descriptor. This will allow the * descriptor to be available in the callback. * The callback context is not actually used in this app. It is * provided as an example. */ cfg.callback_context = (void *)&chan_desc; return_code = qm_dma_channel_set_config(chan_desc.controller_id, chan_desc.channel_id, &cfg); if (return_code) { QM_PUTS("ERROR: qm_dma_channel_set_config"); } /* Do the transfers. */ do_transfer(&chan_desc); QM_PUTS("Each RX buffer should contain the full TX buffer string."); QM_PRINTF("TX data: %s\n", tx_data); /* Print copied data. */ for (i = 0; i < NUM_TRANSFERS; i++) { QM_PRINTF("RX data Loop %d: %s\n", i, rx_data[i]); } /* Configure DMA channel for multiblock usage. */ cfg.transfer_type = QM_DMA_TYPE_MULTI_LL; return_code = qm_dma_channel_set_config(chan_desc.controller_id, chan_desc.channel_id, &cfg); if (return_code) { QM_PUTS("ERROR: qm_dma_channel_set_config"); } /* Do the multiblock transfer. */ do_transfer_multi(&chan_desc); QM_PRINTF("RX data (multiblock transfer):\n"); rx_data[1][0] = '\0'; printf("%s\n", (char *)&rx_data[0][0]); QM_PUTS("Finished: DMA"); return 0; }
static int dma_qmsi_chan_config(struct device *dev, u32_t channel, struct dma_config *config) { const struct dma_qmsi_config_info *info = dev->config->config_info; struct dma_qmsi_driver_data *data = dev->driver_data; qm_dma_transfer_t qmsi_transfer_cfg = { 0 }; qm_dma_channel_config_t qmsi_cfg = { 0 }; u32_t temp = 0U; int ret = 0; if (config->block_count != 1) { return -ENOTSUP; } qmsi_cfg.handshake_interface = (qm_dma_handshake_interface_t) config->dma_slot; qmsi_cfg.channel_direction = (qm_dma_channel_direction_t) config->channel_direction; ret = width_index(config->source_data_size, &temp); if (ret != 0) { return ret; } qmsi_cfg.source_transfer_width = (qm_dma_transfer_width_t) temp; ret = width_index(config->dest_data_size, &temp); if (ret != 0) { return ret; } qmsi_cfg.destination_transfer_width = (qm_dma_transfer_width_t) temp; ret = bst_index(config->dest_burst_length, &temp); if (ret != 0) { return ret; } qmsi_cfg.destination_burst_length = (qm_dma_burst_length_t) temp; ret = bst_index(config->source_burst_length, &temp); if (ret != 0) { return ret; } qmsi_cfg.source_burst_length = (qm_dma_burst_length_t) temp; /* TODO: add support for using other DMA transfer types. */ qmsi_cfg.transfer_type = QM_DMA_TYPE_SINGLE; data->callback_data[channel] = config->callback_arg; data->dma_user_callback[channel] = config->dma_callback; dma_context[channel].index = channel; dma_context[channel].dev = dev; qmsi_cfg.callback_context = &dma_context[channel]; qmsi_cfg.client_callback = dma_drv_callback; ret = qm_dma_channel_set_config(info->instance, channel, &qmsi_cfg); if (ret != 0) { return ret; } qmsi_transfer_cfg.block_size = config->head_block->block_size; qmsi_transfer_cfg.source_address = (u32_t *) config->head_block->source_address; qmsi_transfer_cfg.destination_address = (u32_t *) config->head_block->dest_address; return qm_dma_transfer_set_config(info->instance, channel, &qmsi_transfer_cfg); }