Ejemplo n.º 1
0
static int dma_qmsi_start(struct device *dev, u32_t channel)
{
	int ret;
	const struct dma_qmsi_config_info *info = dev->config->config_info;

	ret = qm_dma_transfer_start(info->instance, channel);

	CYCLE_NOP;

	return ret;
}
Ejemplo n.º 2
0
Archivo: main.c Proyecto: jeez/qmsi
static void do_transfer_multi(dma_channel_desc_t *p_chan_desc)
{
	int return_code;
	qm_dma_multi_transfer_t multi_transfer = {0};

	/*
	 * We own the memory where the driver will set the linked lists. 2 LLIs
	 * are needed for each DMA transfer configuration call.
	 */
	qm_dma_linked_list_item_t
	    lli_buf[MULTIBLOCK_NUM_BUFFERS * MULTIBLOCK_NUM_LLI_PER_BUFFER];

	/* Clear RX buffer. */
	for (unsigned int i = 0; i < RX_BUFF_SIZE; i++) {
		rx_data[0][i] = '.';
	}

	/*
	 * Linked list multiblock transfer with 4 blocks, using 2 calls to the
	 * DMA transfer configuration function.
	 *
	 *  tx_data:
	 *  <------+ TX Buffer 2 +------><-------+ TX Buffer 1 +------>
	 *  +---------------------------------------------------------+
	 *  |   Block A   |   Block B   |   Block C   |   Block D     |
	 *  +---------------------------------------------------------+
	 *
	 *  RX Buffer:
	 *  +--------------------------+     +------------------------------+
	 *  |  Block C   |   Block D   |.....|  Block A     |   Block B     |
	 *  +--------------------------+     +------------------------------+
	 */

	/* Add LLIs for second half of tx_data (blocks C and D). */
	multi_transfer.block_size =
	    strlen(tx_data_multiblock) /
	    (MULTIBLOCK_NUM_BUFFERS * MULTIBLOCK_NUM_LLI_PER_BUFFER);
	multi_transfer.num_blocks = MULTIBLOCK_NUM_LLI_PER_BUFFER;
	multi_transfer.source_address =
	    (uint32_t *)&tx_data_multiblock[strlen(tx_data_multiblock) /
					    MULTIBLOCK_NUM_BUFFERS];
	multi_transfer.destination_address = (uint32_t *)&rx_data[0][0];
	multi_transfer.linked_list_first = &lli_buf[0];
	return_code = qm_dma_multi_transfer_set_config(
	    p_chan_desc->controller_id, p_chan_desc->channel_id,
	    &multi_transfer);
	if (return_code) {
		QM_PRINTF("ERROR: qm_dma_mem_to_mem_transfer\n");
	}

	/* Add LLIs for first half of tx_data (blocks A and B). */
	multi_transfer.source_address = (uint32_t *)&tx_data_multiblock[0];
	multi_transfer.destination_address =
	    (uint32_t *)&rx_data[0][RX_BUFF_SIZE - (strlen(tx_data_multiblock) /
						    MULTIBLOCK_NUM_BUFFERS)];
	multi_transfer.linked_list_first =
	    &lli_buf[MULTIBLOCK_NUM_LLI_PER_BUFFER];
	return_code = qm_dma_multi_transfer_set_config(
	    p_chan_desc->controller_id, p_chan_desc->channel_id,
	    &multi_transfer);
	if (return_code) {
		QM_PRINTF("ERROR: qm_dma_mem_to_mem_transfer\n");
	}

	irq_fired = false;
	return_code = qm_dma_transfer_start(p_chan_desc->controller_id,
					    p_chan_desc->channel_id);
	if (return_code) {
		QM_PRINTF("ERROR: qm_dma_transfer_start\n");
	}

	/* Wait for completion callback. */
	while (!irq_fired)
		;
}
Ejemplo n.º 3
0
Archivo: qm_spi.c Proyecto: jeez/qmsi
int qm_spi_dma_transfer(const qm_spi_t spi,
			const qm_spi_async_transfer_t *const xfer)
{
	QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
	QM_CHECK(xfer, -EINVAL);
	QM_CHECK(xfer->tx_len
		     ? (xfer->tx &&
			dma_context_tx[spi].dma_channel_id < QM_DMA_CHANNEL_NUM)
		     : 1,
		 -EINVAL);
	QM_CHECK(xfer->rx_len
		     ? (xfer->rx &&
			dma_context_rx[spi].dma_channel_id < QM_DMA_CHANNEL_NUM)
		     : 1,
		 -EINVAL);
	QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX_RX ? (xfer->tx && xfer->rx) : 1,
		 -EINVAL);
	QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX_RX
		     ? (xfer->tx_len == xfer->rx_len)
		     : 1,
		 -EINVAL);
	QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX ? (xfer->tx_len && !xfer->rx_len)
					      : 1,
		 -EINVAL);
	QM_CHECK(tmode[spi] == QM_SPI_TMOD_RX ? (xfer->rx_len && !xfer->tx_len)
					      : 1,
		 -EINVAL);
	QM_CHECK(tmode[spi] == QM_SPI_TMOD_EEPROM_READ
		     ? (xfer->tx_len && xfer->rx_len)
		     : 1,
		 -EINVAL);
	QM_CHECK(dma_core[spi] < QM_DMA_NUM, -EINVAL);

	int ret;
	qm_dma_transfer_t dma_trans = {0};
	qm_spi_reg_t *const controller = QM_SPI[spi];
	if (0 != controller->ssienr) {
		return -EBUSY;
	}

	/* Mask interrupts. */
	controller->imr = QM_SPI_IMR_MASK_ALL;

	if (xfer->rx_len) {
		dma_trans.block_size = xfer->rx_len;
		dma_trans.source_address = (uint32_t *)&controller->dr[0];
		dma_trans.destination_address = (uint32_t *)xfer->rx;
		ret = qm_dma_transfer_set_config(
		    dma_core[spi], dma_context_rx[spi].dma_channel_id,
		    &dma_trans);
		if (ret) {
			return ret;
		}

		/* In RX-only or EEPROM mode, the ctrlr1 register holds how
		 * many data frames the controller solicits, minus 1. */
		controller->ctrlr1 = xfer->rx_len - 1;
	}

	if (xfer->tx_len) {
		dma_trans.block_size = xfer->tx_len;
		dma_trans.source_address = (uint32_t *)xfer->tx;
		dma_trans.destination_address = (uint32_t *)&controller->dr[0];
		ret = qm_dma_transfer_set_config(
		    dma_core[spi], dma_context_tx[spi].dma_channel_id,
		    &dma_trans);
		if (ret) {
			return ret;
		}
	}

	/* Transfer pointer kept to extract user callback address and transfer
	 * client id when DMA completes. */
	spi_async_transfer[spi] = xfer;

	/* Enable the SPI device. */
	controller->ssienr = QM_SPI_SSIENR_SSIENR;

	if (xfer->rx_len) {
		/* Enable receive DMA. */
		controller->dmacr |= QM_SPI_DMACR_RDMAE;

		/* Set the DMA receive threshold. */
		controller->dmardlr = SPI_DMARDLR_DMARDL;

		dma_context_rx[spi].cb_pending = true;

		ret = qm_dma_transfer_start(dma_core[spi],
					    dma_context_rx[spi].dma_channel_id);
		if (ret) {
			dma_context_rx[spi].cb_pending = false;

			/* Disable DMA setting and SPI controller. */
			controller->dmacr = 0;
			controller->ssienr = 0;
			return ret;
		}

		if (!xfer->tx_len) {
			/* In RX-only mode we need to transfer an initial dummy
			 * byte. */
			write_frame(spi, (uint8_t *)&tx_dummy_frame);
		}
	}

	if (xfer->tx_len) {
		/* Enable transmit DMA. */
		controller->dmacr |= QM_SPI_DMACR_TDMAE;

		/* Set the DMA transmit threshold. */
		controller->dmatdlr = SPI_DMATDLR_DMATDL;

		dma_context_tx[spi].cb_pending = true;

		ret = qm_dma_transfer_start(dma_core[spi],
					    dma_context_tx[spi].dma_channel_id);
		if (ret) {
			dma_context_tx[spi].cb_pending = false;
			if (xfer->rx_len) {
				/* If a RX transfer was previously started, we
				 * need to stop it - the SPI device will be
				 * disabled when handling the DMA callback. */
				qm_spi_dma_transfer_terminate(spi);
			} else {
				/* Disable DMA setting and SPI controller. */
				controller->dmacr = 0;
				controller->ssienr = 0;
			}
			return ret;
		}
	}

	return 0;
}