Beispiel #1
0
static int spi_qmsi_transceive(struct device *dev,
			     const void *tx_buf, uint32_t tx_buf_len,
			     void *rx_buf, uint32_t rx_buf_len)
{
	struct spi_qmsi_config *spi_config = dev->config->config_info;
	qm_spi_t spi = spi_config->spi;
	struct spi_qmsi_runtime *context = dev->driver_data;
	qm_spi_config_t *cfg = &context->cfg;
	uint8_t dfs = frame_size_to_dfs(cfg->frame_size);
	qm_spi_async_transfer_t *xfer;
	qm_rc_t rc;

	if (pending_transfers[spi].dev)
		return -EBUSY;

	pending_transfers[spi].dev = dev;
	xfer = &pending_transfers[spi].xfer;

	xfer->rx = rx_buf;
	xfer->rx_len = rx_buf_len / dfs;
	xfer->tx = tx_buf;
	xfer->tx_len = tx_buf_len / dfs;
	xfer->id = spi;
	xfer->tx_callback = spi_qmsi_tx_callback;
	xfer->rx_callback = spi_qmsi_rx_callback;
	xfer->err_callback = spi_qmsi_err_callback;

	if (tx_buf_len == 0)
		cfg->transfer_mode = QM_SPI_TMOD_RX;
	else if (rx_buf_len == 0)
		cfg->transfer_mode = QM_SPI_TMOD_TX;
	else {
		/* FIXME: QMSI expects rx_buf_len and tx_buf_len to
		 * have the same size.
		 */
		cfg->transfer_mode = QM_SPI_TMOD_TX_RX;
	}

	if (context->loopback)
		QM_SPI[spi]->ctrlr0 |= BIT(11);

	rc = qm_spi_set_config(spi, cfg);
	if (rc != QM_RC_OK)
		return -EINVAL;

	spi_control_cs(dev, true);

	rc = qm_spi_irq_transfer(spi, xfer);
	if (rc != QM_RC_OK) {
		spi_control_cs(dev, false);
		return -EIO;
	}

	device_sync_call_wait(&context->sync);

	return context->rc ? -EIO : 0;
}
SOL_API int
sol_spi_transfer(struct sol_spi *spi, const uint8_t *tx, uint8_t *rx,
    size_t count, void (*transfer_cb)(void *cb_data, struct sol_spi *spi,
    const uint8_t *tx, uint8_t *rx, ssize_t status), const void *cb_data)
{
    qm_rc_t ret;

    SOL_NULL_CHECK(spi, -EINVAL);
    SOL_INT_CHECK(count, == 0, -EINVAL);

    if (qm_spi_get_status(spi->bus) == QM_SPI_BUSY)
        return -EBUSY;

    spi->xfer.xfer.tx = (uint8_t *)tx;
    spi->xfer.xfer.tx_len = count;
    spi->xfer.xfer.rx = (uint8_t *)rx;
    spi->xfer.xfer.rx_len = count;
    spi->xfer.xfer.tx_callback = tx_callback;
    spi->xfer.xfer.rx_callback = rx_callback;
    spi->xfer.xfer.err_callback = err_callback;
    spi->xfer.xfer.id = spi->bus;

    spi->xfer.cb = transfer_cb;
    spi->xfer.data = cb_data;

    ret = qm_spi_set_config(spi->bus, &spi->config);
    SOL_EXP_CHECK(ret != QM_RC_OK, -EINVAL);

    ret = qm_spi_slave_select(spi->bus, spi->slave);
    SOL_EXP_CHECK(ret != QM_RC_OK, -EINVAL);

    qm_gpio_clear_pin(spi->slave_select.port, spi->slave_select.pin);

    ret = qm_spi_irq_transfer(spi->bus, &spi->xfer.xfer);
    SOL_EXP_CHECK(ret != QM_RC_OK, -EINVAL);

    in_transfer[spi->xfer.xfer.id] = spi;

    return 0;
}
Beispiel #3
0
/*  QMSI SPI app example */
int main(void)
{
	/*  Variables */
	qm_spi_config_t cfg;
	qm_spi_transfer_t polled_xfer_desc;
	qm_spi_async_transfer_t irq_xfer_desc;
	unsigned int i;

	for (i = 0; i < BUFFERSIZE; i++) {
		tx_buff[i] = i;
	}

/* Mux out SPI tx/rx pins and enable input for rx. */
#if (QUARK_SE)
	qm_pmux_select(QM_PIN_ID_55, QM_PMUX_FN_1); /* SPI0_M SCK */
	qm_pmux_select(QM_PIN_ID_56, QM_PMUX_FN_1); /* SPI0_M MISO */
	qm_pmux_select(QM_PIN_ID_57, QM_PMUX_FN_1); /* SPI0_M MOSI */
	qm_pmux_select(QM_PIN_ID_58, QM_PMUX_FN_1); /* SPI0_M SS0 */
	qm_pmux_select(QM_PIN_ID_59, QM_PMUX_FN_1); /* SPI0_M SS1 */
	qm_pmux_select(QM_PIN_ID_60, QM_PMUX_FN_1); /* SPI0_M SS2 */
	qm_pmux_select(QM_PIN_ID_61, QM_PMUX_FN_1); /* SPI0_M SS3 */
	qm_pmux_input_en(QM_PIN_ID_56, true);

#elif(QUARK_D2000)
	qm_pmux_select(QM_PIN_ID_0, QM_PMUX_FN_2);  /* SS0 */
	qm_pmux_select(QM_PIN_ID_1, QM_PMUX_FN_2);  /* SS1 */
	qm_pmux_select(QM_PIN_ID_2, QM_PMUX_FN_2);  /* SS2 */
	qm_pmux_select(QM_PIN_ID_3, QM_PMUX_FN_2);  /* SS3 */
	qm_pmux_select(QM_PIN_ID_16, QM_PMUX_FN_2); /* SCK */
	qm_pmux_select(QM_PIN_ID_17, QM_PMUX_FN_2); /* TXD */
	qm_pmux_select(QM_PIN_ID_18, QM_PMUX_FN_2); /* RXD */
	qm_pmux_input_en(QM_PIN_ID_18, true);       /* RXD input */
#else
#error("Unsupported / unspecified processor detected.")
#endif

	/* Enable the clock to the controller. */
	clk_periph_enable(CLK_PERIPH_CLK | CLK_PERIPH_SPI_M0_REGISTER);

	/*  Initialise SPI configuration */
	cfg.frame_size = QM_SPI_FRAME_SIZE_8_BIT;
	cfg.transfer_mode = QM_SPI_TMOD_TX_RX;
	cfg.bus_mode = QM_SPI_BMODE_0;
	cfg.clk_divider = SPI_CLOCK_DIV;

	qm_spi_set_config(QM_SPI_MST_0, &cfg);

	/* Set up loopback mode by RMW directly to the ctrlr0 register. */
	QM_SPI[QM_SPI_MST_0]->ctrlr0 |= BIT(11);

	qm_spi_slave_select(QM_SPI_MST_0, QM_SPI_SS_0);

	QM_PRINTF("\nStatus = 0x%08x", qm_spi_get_status(QM_SPI_MST_0));

	/* Set up the sync transfer struct and call a polled transfer. */
	polled_xfer_desc.tx = tx_buff;
	polled_xfer_desc.rx = rx_buff;
	polled_xfer_desc.tx_len = BUFFERSIZE;
	polled_xfer_desc.rx_len = BUFFERSIZE;

	qm_spi_transfer(QM_SPI_MST_0, &polled_xfer_desc);

	/* Compare RX and TX buffers */
	/* Also reset the buffers for the IRQ example */
	for (i = 0; i < BUFFERSIZE; i++) {
		QM_CHECK(tx_buff[i] == rx_buff[i], QM_RC_EINVAL);
		tx_buff[i] = i;
		rx_buff[i] = 0;
	}

	/* Set up the async transfer struct. */
	irq_xfer_desc.tx = tx_buff;
	irq_xfer_desc.rx = rx_buff;
	irq_xfer_desc.tx_len = BUFFERSIZE;
	irq_xfer_desc.rx_len = BUFFERSIZE;
	irq_xfer_desc.id = SPI_EXAMPLE_IRQ_ID;
	irq_xfer_desc.rx_callback = spi_rx_example_cb;
	irq_xfer_desc.tx_callback = spi_tx_example_cb;
	irq_xfer_desc.err_callback = spi_err_example_cb;

	/* Register driver IRQ. */
	qm_irq_request(QM_IRQ_SPI_MASTER_0, qm_spi_master_0_isr);

	/* Start the async (irq-based) transfer. */
	qm_spi_irq_transfer(QM_SPI_MST_0, &irq_xfer_desc);

	while (1)
		;

	return 0;
}
Beispiel #4
0
static int spi_qmsi_transceive(struct device *dev,
			       const void *tx_buf, u32_t tx_buf_len,
			       void *rx_buf, u32_t rx_buf_len)
{
	const struct spi_qmsi_config *spi_config = dev->config->config_info;
	qm_spi_t spi = spi_config->spi;
	struct spi_qmsi_runtime *context = dev->driver_data;
	qm_spi_config_t *cfg = &context->cfg;
	u8_t dfs = frame_size_to_dfs(cfg->frame_size);
	qm_spi_async_transfer_t *xfer;
	int rc;

	k_sem_take(&context->sem, K_FOREVER);
	if (pending_transfers[spi].dev) {
		k_sem_give(&context->sem);
		return -EBUSY;
	}
	pending_transfers[spi].dev = dev;
	k_sem_give(&context->sem);

	device_busy_set(dev);

	xfer = &pending_transfers[spi].xfer;

	xfer->rx = rx_buf;
	xfer->rx_len = rx_buf_len / dfs;
	/* This cast is necessary to drop the "const" modifier, since QMSI xfer
	 * does not take a const pointer.
	 */
	xfer->tx = (u8_t *)tx_buf;
	xfer->tx_len = tx_buf_len / dfs;
	xfer->callback_data = dev;
	xfer->callback = transfer_complete;

	if (tx_buf_len == 0) {
		cfg->transfer_mode = QM_SPI_TMOD_RX;
	} else if (rx_buf_len == 0) {
		cfg->transfer_mode = QM_SPI_TMOD_TX;
	} else {
		/* FIXME: QMSI expects rx_buf_len and tx_buf_len to
		 * have the same size.
		 */
		cfg->transfer_mode = QM_SPI_TMOD_TX_RX;
	}

	if (context->loopback) {
		QM_SPI[spi]->ctrlr0 |= BIT(11);
	}

	rc = qm_spi_set_config(spi, cfg);
	if (rc != 0) {
		device_busy_clear(dev);
		return -EINVAL;
	}

	spi_control_cs(dev, true);

	rc = qm_spi_irq_transfer(spi, xfer);
	if (rc != 0) {
		spi_control_cs(dev, false);
		device_busy_clear(dev);
		return -EIO;
	}
	k_sem_take(&context->device_sync_sem, K_FOREVER);

	device_busy_clear(dev);

	return context->rc ? -EIO : 0;
}