Ejemplo n.º 1
0
Archivo: qm_spi.c Proyecto: jeez/qmsi
int qm_spi_dma_transfer(const qm_spi_t spi,
			const qm_spi_async_transfer_t *const xfer)
{
	QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
	QM_CHECK(xfer, -EINVAL);
	QM_CHECK(xfer->tx_len
		     ? (xfer->tx &&
			dma_context_tx[spi].dma_channel_id < QM_DMA_CHANNEL_NUM)
		     : 1,
		 -EINVAL);
	QM_CHECK(xfer->rx_len
		     ? (xfer->rx &&
			dma_context_rx[spi].dma_channel_id < QM_DMA_CHANNEL_NUM)
		     : 1,
		 -EINVAL);
	QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX_RX ? (xfer->tx && xfer->rx) : 1,
		 -EINVAL);
	QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX_RX
		     ? (xfer->tx_len == xfer->rx_len)
		     : 1,
		 -EINVAL);
	QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX ? (xfer->tx_len && !xfer->rx_len)
					      : 1,
		 -EINVAL);
	QM_CHECK(tmode[spi] == QM_SPI_TMOD_RX ? (xfer->rx_len && !xfer->tx_len)
					      : 1,
		 -EINVAL);
	QM_CHECK(tmode[spi] == QM_SPI_TMOD_EEPROM_READ
		     ? (xfer->tx_len && xfer->rx_len)
		     : 1,
		 -EINVAL);
	QM_CHECK(dma_core[spi] < QM_DMA_NUM, -EINVAL);

	int ret;
	qm_dma_transfer_t dma_trans = {0};
	qm_spi_reg_t *const controller = QM_SPI[spi];
	if (0 != controller->ssienr) {
		return -EBUSY;
	}

	/* Mask interrupts. */
	controller->imr = QM_SPI_IMR_MASK_ALL;

	if (xfer->rx_len) {
		dma_trans.block_size = xfer->rx_len;
		dma_trans.source_address = (uint32_t *)&controller->dr[0];
		dma_trans.destination_address = (uint32_t *)xfer->rx;
		ret = qm_dma_transfer_set_config(
		    dma_core[spi], dma_context_rx[spi].dma_channel_id,
		    &dma_trans);
		if (ret) {
			return ret;
		}

		/* In RX-only or EEPROM mode, the ctrlr1 register holds how
		 * many data frames the controller solicits, minus 1. */
		controller->ctrlr1 = xfer->rx_len - 1;
	}

	if (xfer->tx_len) {
		dma_trans.block_size = xfer->tx_len;
		dma_trans.source_address = (uint32_t *)xfer->tx;
		dma_trans.destination_address = (uint32_t *)&controller->dr[0];
		ret = qm_dma_transfer_set_config(
		    dma_core[spi], dma_context_tx[spi].dma_channel_id,
		    &dma_trans);
		if (ret) {
			return ret;
		}
	}

	/* Transfer pointer kept to extract user callback address and transfer
	 * client id when DMA completes. */
	spi_async_transfer[spi] = xfer;

	/* Enable the SPI device. */
	controller->ssienr = QM_SPI_SSIENR_SSIENR;

	if (xfer->rx_len) {
		/* Enable receive DMA. */
		controller->dmacr |= QM_SPI_DMACR_RDMAE;

		/* Set the DMA receive threshold. */
		controller->dmardlr = SPI_DMARDLR_DMARDL;

		dma_context_rx[spi].cb_pending = true;

		ret = qm_dma_transfer_start(dma_core[spi],
					    dma_context_rx[spi].dma_channel_id);
		if (ret) {
			dma_context_rx[spi].cb_pending = false;

			/* Disable DMA setting and SPI controller. */
			controller->dmacr = 0;
			controller->ssienr = 0;
			return ret;
		}

		if (!xfer->tx_len) {
			/* In RX-only mode we need to transfer an initial dummy
			 * byte. */
			write_frame(spi, (uint8_t *)&tx_dummy_frame);
		}
	}

	if (xfer->tx_len) {
		/* Enable transmit DMA. */
		controller->dmacr |= QM_SPI_DMACR_TDMAE;

		/* Set the DMA transmit threshold. */
		controller->dmatdlr = SPI_DMATDLR_DMATDL;

		dma_context_tx[spi].cb_pending = true;

		ret = qm_dma_transfer_start(dma_core[spi],
					    dma_context_tx[spi].dma_channel_id);
		if (ret) {
			dma_context_tx[spi].cb_pending = false;
			if (xfer->rx_len) {
				/* If a RX transfer was previously started, we
				 * need to stop it - the SPI device will be
				 * disabled when handling the DMA callback. */
				qm_spi_dma_transfer_terminate(spi);
			} else {
				/* Disable DMA setting and SPI controller. */
				controller->dmacr = 0;
				controller->ssienr = 0;
			}
			return ret;
		}
	}

	return 0;
}
Ejemplo n.º 2
0
static int dma_qmsi_chan_config(struct device *dev, u32_t channel,
				struct dma_config *config)
{
	const struct dma_qmsi_config_info *info = dev->config->config_info;
	struct dma_qmsi_driver_data *data = dev->driver_data;
	qm_dma_transfer_t qmsi_transfer_cfg = { 0 };
	qm_dma_channel_config_t qmsi_cfg = { 0 };
	u32_t temp = 0U;
	int ret = 0;

	if (config->block_count != 1) {
		return -ENOTSUP;
	}

	qmsi_cfg.handshake_interface = (qm_dma_handshake_interface_t)
					config->dma_slot;
	qmsi_cfg.channel_direction = (qm_dma_channel_direction_t)
				      config->channel_direction;

	ret = width_index(config->source_data_size, &temp);
	if (ret != 0) {
		return ret;
	}
	qmsi_cfg.source_transfer_width = (qm_dma_transfer_width_t) temp;

	ret = width_index(config->dest_data_size, &temp);
	if (ret != 0) {
		return ret;
	}
	qmsi_cfg.destination_transfer_width = (qm_dma_transfer_width_t) temp;

	ret = bst_index(config->dest_burst_length, &temp);
	if (ret != 0) {
		return ret;
	}
	qmsi_cfg.destination_burst_length = (qm_dma_burst_length_t) temp;

	ret = bst_index(config->source_burst_length, &temp);
	if (ret != 0) {
		return ret;
	}
	qmsi_cfg.source_burst_length = (qm_dma_burst_length_t) temp;

	/* TODO: add support for using other DMA transfer types. */
	qmsi_cfg.transfer_type = QM_DMA_TYPE_SINGLE;

	data->callback_data[channel] = config->callback_arg;
	data->dma_user_callback[channel] = config->dma_callback;

	dma_context[channel].index = channel;
	dma_context[channel].dev = dev;

	qmsi_cfg.callback_context = &dma_context[channel];
	qmsi_cfg.client_callback = dma_drv_callback;

	ret = qm_dma_channel_set_config(info->instance, channel, &qmsi_cfg);
	if (ret != 0) {
		return ret;
	}

	qmsi_transfer_cfg.block_size = config->head_block->block_size;
	qmsi_transfer_cfg.source_address = (u32_t *)
					   config->head_block->source_address;
	qmsi_transfer_cfg.destination_address = (u32_t *)
					      config->head_block->dest_address;

	return qm_dma_transfer_set_config(info->instance, channel,
					  &qmsi_transfer_cfg);
}