static int spi_qmsi_transceive(struct device *dev, const void *tx_buf, uint32_t tx_buf_len, void *rx_buf, uint32_t rx_buf_len) { struct spi_qmsi_config *spi_config = dev->config->config_info; qm_spi_t spi = spi_config->spi; struct spi_qmsi_runtime *context = dev->driver_data; qm_spi_config_t *cfg = &context->cfg; uint8_t dfs = frame_size_to_dfs(cfg->frame_size); qm_spi_async_transfer_t *xfer; qm_rc_t rc; if (pending_transfers[spi].dev) return -EBUSY; pending_transfers[spi].dev = dev; xfer = &pending_transfers[spi].xfer; xfer->rx = rx_buf; xfer->rx_len = rx_buf_len / dfs; xfer->tx = tx_buf; xfer->tx_len = tx_buf_len / dfs; xfer->id = spi; xfer->tx_callback = spi_qmsi_tx_callback; xfer->rx_callback = spi_qmsi_rx_callback; xfer->err_callback = spi_qmsi_err_callback; if (tx_buf_len == 0) cfg->transfer_mode = QM_SPI_TMOD_RX; else if (rx_buf_len == 0) cfg->transfer_mode = QM_SPI_TMOD_TX; else { /* FIXME: QMSI expects rx_buf_len and tx_buf_len to * have the same size. */ cfg->transfer_mode = QM_SPI_TMOD_TX_RX; } if (context->loopback) QM_SPI[spi]->ctrlr0 |= BIT(11); rc = qm_spi_set_config(spi, cfg); if (rc != QM_RC_OK) return -EINVAL; spi_control_cs(dev, true); rc = qm_spi_irq_transfer(spi, xfer); if (rc != QM_RC_OK) { spi_control_cs(dev, false); return -EIO; } device_sync_call_wait(&context->sync); return context->rc ? -EIO : 0; }
static int spi_dw_transceive(struct device *dev, const void *tx_buf, uint32_t tx_buf_len, void *rx_buf, uint32_t rx_buf_len) { struct spi_dw_config *info = dev->config->config_info; struct spi_dw_data *spi = dev->driver_data; uint32_t rx_thsld = DW_SPI_RXFTLR_DFLT; DBG("%s: %p, %p, %u, %p, %u\n", __func__, dev, tx_buf, tx_buf_len, rx_buf, rx_buf_len); /* Check status */ if (!_spi_dw_is_controller_ready(dev)) { DBG("%s: Controller is busy\n", __func__); return DEV_USED; } /* Set buffers info */ spi->tx_buf = tx_buf; spi->tx_buf_len = tx_buf_len/spi->dfs; spi->rx_buf = rx_buf; spi->rx_buf_len = rx_buf_len/spi->dfs; spi->fifo_diff = 0; /* Tx Threshold, always at default */ write_txftlr(DW_SPI_TXFTLR_DFLT, info->regs); /* Does Rx thresholds needs to be lower? */ if (rx_buf_len && spi->rx_buf_len < DW_SPI_FIFO_DEPTH) { rx_thsld = spi->rx_buf_len - 1; } else if (!rx_buf_len && spi->tx_buf_len < DW_SPI_FIFO_DEPTH) { rx_thsld = spi->tx_buf_len - 1; } write_rxftlr(rx_thsld, info->regs); /* Slave select */ write_ser(spi->slave, info->regs); _spi_control_cs(dev, 1); /* Enable interrupts */ write_imr(DW_SPI_IMR_UNMASK, info->regs); /* Enable the controller */ set_bit_ssienr(info->regs); device_sync_call_wait(&spi->sync); if (spi->error) { spi->error = 0; return DEV_FAIL; } return DEV_OK; }
static int i2c_dw_transfer(struct device *dev, struct i2c_msg *msgs, uint8_t num_msgs, uint16_t slave_address) { struct i2c_dw_rom_config const * const rom = dev->config->config_info; struct i2c_dw_dev_config * const dw = dev->driver_data; struct i2c_msg *cur_msg = msgs; uint8_t msg_left = num_msgs; uint8_t pflags; int ret; volatile struct i2c_dw_registers * const regs = (struct i2c_dw_registers *)rom->base_address; /* Why bother processing no messages */ if (!msgs || !num_msgs) { return -ENOTSUP; } /* First step, check if there is current activity */ if ((regs->ic_status.bits.activity) || (dw->state & I2C_DW_BUSY)) { return -EIO; } dw->state |= I2C_DW_BUSY; ret = _i2c_dw_setup(dev, slave_address); if (ret) { dw->state = I2C_DW_STATE_READY; return ret; } /* Enable controller */ regs->ic_enable.bits.enable = 1; /* * While waiting at device_sync_call_wait(), kernel can switch to idle * task which in turn can call _sys_soc_suspend() hook of Power * Management App (PMA). * device_busy_set() call here, would indicate to PMA that it should not * execute PM policies that would turn off this ip block, causing an * ongoing hw transaction to be left in an inconsistent state. * Note : This is just a sample to show a possible use of the API, it is * upto the driver expert to see, if he actually needs it here, or * somewhere else, or not needed as the driver's suspend()/resume() * can handle everything */ device_busy_set(dev); /* Process all the messages */ while (msg_left > 0) { pflags = dw->xfr_flags; dw->xfr_buf = cur_msg->buf; dw->xfr_len = cur_msg->len; dw->xfr_flags = cur_msg->flags; dw->rx_pending = 0; /* Need to RESTART if changing transfer direction */ if ((pflags & I2C_MSG_RW_MASK) != (dw->xfr_flags & I2C_MSG_RW_MASK)) { dw->xfr_flags |= I2C_MSG_RESTART; } /* Send STOP if this is the last message */ if (msg_left == 1) { dw->xfr_flags |= I2C_MSG_STOP; } dw->state &= ~(I2C_DW_CMD_SEND | I2C_DW_CMD_RECV); if ((dw->xfr_flags & I2C_MSG_RW_MASK) == I2C_MSG_WRITE) { dw->state |= I2C_DW_CMD_SEND; dw->request_bytes = 0; } else { dw->state |= I2C_DW_CMD_RECV; dw->request_bytes = dw->xfr_len; } /* Enable interrupts to trigger ISR */ if (regs->ic_con.bits.master_mode) { /* Enable necessary interrupts */ regs->ic_intr_mask.raw = (DW_ENABLE_TX_INT_I2C_MASTER | DW_ENABLE_RX_INT_I2C_MASTER); } else { /* Enable necessary interrupts */ regs->ic_intr_mask.raw = DW_ENABLE_TX_INT_I2C_SLAVE; } /* Wait for transfer to be done */ device_sync_call_wait(&dw->sync); if (dw->state & I2C_DW_CMD_ERROR) { ret = -EIO; break; } /* Something wrong if there is something left to do */ if (dw->xfr_len > 0) { ret = -EIO; break; } cur_msg++; msg_left--; } device_busy_clear(dev); dw->state = I2C_DW_STATE_READY; return ret; }
static int adc_qmsi_read(struct device *dev, struct adc_seq_table *seq_tbl) { int i, ret = 0; qm_adc_xfer_t xfer; qm_adc_config_t cfg; struct adc_info *info = dev->driver_data; if (qm_adc_get_config(QM_ADC_0, &cfg) != QM_RC_OK) { return -ENOTSUP; } for (i = 0; i < seq_tbl->num_entries; i++) { xfer.ch = (qm_adc_channel_t *)&seq_tbl->entries[i].channel_id; /* Just one channel at the time using the Zephyr sequence table */ xfer.ch_len = 1; xfer.samples = (uint32_t *)seq_tbl->entries[i].buffer; xfer.samples_len = (seq_tbl->entries[i].buffer_length) >> 2; xfer.complete_callback = complete_callback; xfer.error_callback = error_callback; cfg.window = seq_tbl->entries[i].sampling_delay; adc_lock(info); if (qm_adc_set_config(QM_ADC_0, &cfg) != QM_RC_OK) { ret = -EINVAL; adc_unlock(info); break; } /* ADC info used by the callbacks */ adc_context = info; /* This is the interrupt driven API, will generate and interrupt and * call the complete_callback function once the samples have been * obtained */ if (qm_adc_irq_convert(QM_ADC_0, &xfer) != QM_RC_OK) { adc_context = NULL; ret = -EIO; adc_unlock(info); break; } /* Wait for the interrupt to finish */ device_sync_call_wait(&info->sync); if (info->state == ADC_STATE_ERROR) { ret = -EIO; adc_unlock(info); break; } adc_context = NULL; /* Successful Analog to Digital conversion */ adc_unlock(info); } return ret; }
static int i2c_qse_ss_intr_transfer(struct device *dev, struct i2c_msg *msgs, uint8_t num_msgs, uint16_t slave_address) { struct i2c_qse_ss_dev_config * const dw = dev->driver_data; struct i2c_msg *cur_msg = msgs; uint8_t msg_left = num_msgs; uint8_t pflags; int ret; /* Why bother processing no messages */ if (!msgs || !num_msgs) { return DEV_INVALID_OP; } /* First step, check if device is idle */ if (_i2c_qse_ss_is_busy(dev) || (dw->state & I2C_QSE_SS_BUSY)) { return DEV_USED; } dw->state |= I2C_QSE_SS_BUSY; ret = _i2c_qse_ss_setup(dev, slave_address); if (ret) { dw->state = I2C_QSE_SS_STATE_READY; return ret; } /* To prevent RESTART for first message */ dw->xfr_flags = msgs[0].flags; /* Enable controller */ _i2c_qse_ss_reg_write_or(dev, REG_CON, IC_CON_ENABLE); /* Process all the messages */ while (msg_left > 0) { pflags = dw->xfr_flags; dw->xfr_buf = cur_msg->buf; dw->xfr_len = cur_msg->len; dw->xfr_flags = cur_msg->flags; dw->rx_pending = 0; /* Need to RESTART if changing transfer direction */ if ((pflags & I2C_MSG_RW_MASK) != (dw->xfr_flags & I2C_MSG_RW_MASK)) { dw->xfr_flags |= I2C_MSG_RESTART; } /* Send STOP if this is the last message */ if (msg_left == 1) { dw->xfr_flags |= I2C_MSG_STOP; } dw->state &= ~(I2C_QSE_SS_CMD_SEND | I2C_QSE_SS_CMD_RECV); if ((dw->xfr_flags & I2C_MSG_RW_MASK) == I2C_MSG_WRITE) { dw->state |= I2C_QSE_SS_CMD_SEND; dw->request_bytes = 0; } else { dw->state |= I2C_QSE_SS_CMD_RECV; dw->request_bytes = dw->xfr_len; } /* Enable interrupts to trigger ISR */ _i2c_qse_ss_reg_write(dev, REG_INTR_MASK, (IC_INTR_MASK_TX | IC_INTR_MASK_RX)); /* Wait for transfer to be done */ device_sync_call_wait(&dw->sync); if (dw->state & I2C_QSE_SS_CMD_ERROR) { ret = DEV_FAIL; break; } /* Something wrong if there is something left to do */ if (dw->xfr_len > 0) { ret = DEV_FAIL; break; } cur_msg++; msg_left--; } dw->state = I2C_QSE_SS_STATE_READY; return ret; }
/** * @brief Read and/or write a defined amount of data through an SPI driver * * @param dev Pointer to the device structure for the driver instance * @param tx_buf Memory buffer that data should be transferred from * @param tx_buf_len Size of the memory buffer available for reading from * @param rx_buf Memory buffer that data should be transferred to * @param rx_buf_len Size of the memory buffer available for writing to * * @return DEV_OK if successful, another DEV_* code otherwise. */ static int spi_k64_transceive(struct device *dev, uint8_t *tx_buf, uint32_t tx_buf_len, uint8_t *rx_buf, uint32_t rx_buf_len) { struct spi_k64_config *info = dev->config->config_info; struct spi_k64_data *spi_data = dev->driver_data; uint32_t int_config; /* interrupt configuration */ DBG("spi_k64_transceive: dev %p, Tx buf %p, ", dev, tx_buf); DBG("Tx len %u, Rx buf %p, Rx len %u\n", tx_buf_len, rx_buf, rx_buf_len); /* Check parameters */ if ((tx_buf_len && (tx_buf == NULL)) || (rx_buf_len && (rx_buf == NULL))) { DBG("spi_k64_transceive: ERROR - NULL buffer\n"); return DEV_INVALID_OP; } /* Check Tx FIFO status */ if (tx_buf_len && ((sys_read32(info->regs + SPI_K64_REG_SR) & SPI_K64_SR_TFFF) == 0)) { DBG("spi_k64_transceive: Tx FIFO is full\n"); return DEV_USED; } /* Set buffers info */ spi_data->tx_buf = tx_buf; spi_data->tx_buf_len = tx_buf_len; spi_data->rx_buf = rx_buf; spi_data->rx_buf_len = rx_buf_len; /* enable transfer operations - must be done before enabling interrupts */ spi_k64_start(dev); /* * Enable interrupts: * - Transmit FIFO Fill (Tx FIFO not full); and/or * - Receive FIFO Drain (Rx FIFO not empty); * * Note: DMA requests are not supported. */ int_config = sys_read32(info->regs + SPI_K64_REG_RSER); if (tx_buf_len) { int_config |= SPI_K64_RSER_TFFF_RE; } if (rx_buf_len) { int_config |= SPI_K64_RSER_RFDF_RE; } sys_write32(int_config, (info->regs + SPI_K64_REG_RSER)); /* wait for transfer to complete */ device_sync_call_wait(&spi_data->sync_info); /* check completion status */ if (spi_data->error) { spi_data->error = 0; return DEV_FAIL; } return DEV_OK; }