static int spi_dw_transceive(struct device *dev, const void *tx_buf, uint32_t tx_buf_len, void *rx_buf, uint32_t rx_buf_len) { struct spi_dw_config *info = dev->config->config_info; struct spi_dw_data *spi = dev->driver_data; uint32_t rx_thsld = DW_SPI_RXFTLR_DFLT; DBG("%s: %p, %p, %u, %p, %u\n", __func__, dev, tx_buf, tx_buf_len, rx_buf, rx_buf_len); /* Check status */ if (!_spi_dw_is_controller_ready(dev)) { DBG("%s: Controller is busy\n", __func__); return DEV_USED; } /* Set buffers info */ spi->tx_buf = tx_buf; spi->tx_buf_len = tx_buf_len/spi->dfs; spi->rx_buf = rx_buf; spi->rx_buf_len = rx_buf_len/spi->dfs; spi->fifo_diff = 0; /* Tx Threshold, always at default */ write_txftlr(DW_SPI_TXFTLR_DFLT, info->regs); /* Does Rx thresholds needs to be lower? */ if (rx_buf_len && spi->rx_buf_len < DW_SPI_FIFO_DEPTH) { rx_thsld = spi->rx_buf_len - 1; } else if (!rx_buf_len && spi->tx_buf_len < DW_SPI_FIFO_DEPTH) { rx_thsld = spi->tx_buf_len - 1; } write_rxftlr(rx_thsld, info->regs); /* Slave select */ write_ser(spi->slave, info->regs); _spi_control_cs(dev, 1); /* Enable interrupts */ write_imr(DW_SPI_IMR_UNMASK, info->regs); /* Enable the controller */ set_bit_ssienr(info->regs); device_sync_call_wait(&spi->sync); if (spi->error) { spi->error = 0; return DEV_FAIL; } return DEV_OK; }
static void push_data(struct device *dev) { struct spi_dw_config *info = dev->config->config_info; struct spi_dw_data *spi = dev->driver_data; uint32_t data = 0; uint32_t f_tx; DBG_COUNTER_INIT(); f_tx = DW_SPI_FIFO_DEPTH - read_txflr(info->regs) - read_rxflr(info->regs) - 1; while (f_tx) { if (spi->tx_buf && spi->tx_buf_len > 0) { switch (spi->dfs) { case 1: data = UNALIGNED_GET((uint8_t *)(spi->tx_buf)); break; case 2: data = UNALIGNED_GET((uint16_t *)(spi->tx_buf)); break; #ifndef CONFIG_ARC case 4: data = UNALIGNED_GET((uint32_t *)(spi->tx_buf)); break; #endif } spi->tx_buf += spi->dfs; spi->tx_buf_len--; } else if (spi->rx_buf && spi->rx_buf_len > 0) { /* No need to push more than necessary */ if (spi->rx_buf_len - spi->fifo_diff <= 0) { break; } data = 0; } else { /* Nothing to push anymore */ break; } write_dr(data, info->regs); f_tx--; spi->fifo_diff++; DBG_COUNTER_INC(); } if (!spi->tx_buf_len && !spi->rx_buf_len) { write_txftlr(0, info->regs); } DBG("Pushed: %d\n", DBG_COUNTER_RESULT()); }
static void spi_dw_update_txftlr(const struct spi_dw_config *info, struct spi_dw_data *spi) { u32_t reg_data = DW_SPI_TXFTLR_DFLT; if (spi_dw_is_slave(spi)) { if (!spi->ctx.tx_len) { reg_data = 0U; } else if (spi->ctx.tx_len < DW_SPI_TXFTLR_DFLT) { reg_data = spi->ctx.tx_len - 1; } } LOG_DBG("TxFTLR: %u", reg_data); write_txftlr(reg_data, info->regs); }
static void push_data(struct device *dev) { const struct spi_dw_config *info = dev->config->config_info; struct spi_dw_data *spi = dev->driver_data; u32_t data = 0; u32_t f_tx; DBG_COUNTER_INIT(); if (spi->rx_buf) { f_tx = DW_SPI_FIFO_DEPTH - read_txflr(info->regs) - read_rxflr(info->regs); if ((int)f_tx < 0) { f_tx = 0; /* if rx-fifo is full, hold off tx */ } } else { f_tx = DW_SPI_FIFO_DEPTH - read_txflr(info->regs); } if (f_tx && (spi->tx_buf_len == 0)) { /* room in fifo, yet nothing to send */ spi->last_tx = 1; /* setting last_tx indicates TX is done */ } while (f_tx) { if (spi->tx_buf && spi->tx_buf_len > 0) { switch (spi->dfs) { case 1: data = UNALIGNED_GET((u8_t *)(spi->tx_buf)); break; case 2: data = UNALIGNED_GET((u16_t *)(spi->tx_buf)); break; #ifndef CONFIG_ARC case 4: data = UNALIGNED_GET((u32_t *)(spi->tx_buf)); break; #endif } spi->tx_buf += spi->dfs; spi->tx_buf_len--; } else if (spi->rx_buf && spi->rx_buf_len > 0) { /* No need to push more than necessary */ if (spi->rx_buf_len - spi->fifo_diff <= 0) { break; } data = 0; } else { /* Nothing to push anymore */ break; } write_dr(data, info->regs); f_tx--; spi->fifo_diff++; DBG_COUNTER_INC(); } if (spi->last_tx) { write_txftlr(0, info->regs); /* prevents any further interrupts demanding TX fifo fill */ } SYS_LOG_DBG("Pushed: %d", DBG_COUNTER_RESULT()); }
static int spi_dw_transceive(struct device *dev, const void *tx_buf, u32_t tx_buf_len, void *rx_buf, u32_t rx_buf_len) { const struct spi_dw_config *info = dev->config->config_info; struct spi_dw_data *spi = dev->driver_data; u32_t rx_thsld = DW_SPI_RXFTLR_DFLT; u32_t imask; SYS_LOG_DBG("%p, %p, %u, %p, %u", dev, tx_buf, tx_buf_len, rx_buf, rx_buf_len); /* Check status */ if (!_spi_dw_is_controller_ready(dev)) { SYS_LOG_DBG("Controller is busy"); return -EBUSY; } /* Set buffers info */ spi->tx_buf = tx_buf; spi->tx_buf_len = tx_buf_len/spi->dfs; spi->rx_buf = rx_buf; if (rx_buf) { spi->rx_buf_len = rx_buf_len/spi->dfs; } else { spi->rx_buf_len = 0; /* must be zero if no buffer */ } spi->fifo_diff = 0; spi->last_tx = 0; /* Tx Threshold */ write_txftlr(DW_SPI_TXFTLR_DFLT, info->regs); /* Does Rx thresholds needs to be lower? */ if (spi->rx_buf_len && spi->rx_buf_len < DW_SPI_FIFO_DEPTH) { rx_thsld = spi->rx_buf_len - 1; } else if (!spi->rx_buf_len && spi->tx_buf_len < DW_SPI_FIFO_DEPTH) { rx_thsld = spi->tx_buf_len - 1; /* TODO: why? */ } write_rxftlr(rx_thsld, info->regs); /* Slave select */ write_ser(spi->slave, info->regs); _spi_control_cs(dev, 1); /* Enable interrupts */ imask = DW_SPI_IMR_UNMASK; if (!rx_buf) { /* if there is no rx buffer, keep all rx interrupts masked */ imask &= DW_SPI_IMR_MASK_RX; } write_imr(imask, info->regs); /* Enable the controller */ set_bit_ssienr(info->regs); k_sem_take(&spi->device_sync_sem, K_FOREVER); if (spi->error) { spi->error = 0; return -EIO; } return 0; }
static void push_data(struct device *dev) { const struct spi_dw_config *info = dev->config->config_info; struct spi_dw_data *spi = dev->driver_data; u32_t data = 0U; u32_t f_tx; DBG_COUNTER_INIT(); if (spi_context_rx_on(&spi->ctx)) { f_tx = DW_SPI_FIFO_DEPTH - read_txflr(info->regs) - read_rxflr(info->regs); if ((int)f_tx < 0) { f_tx = 0U; /* if rx-fifo is full, hold off tx */ } } else { f_tx = DW_SPI_FIFO_DEPTH - read_txflr(info->regs); } while (f_tx) { if (spi_context_tx_buf_on(&spi->ctx)) { switch (spi->dfs) { case 1: data = UNALIGNED_GET((u8_t *) (spi->ctx.tx_buf)); break; case 2: data = UNALIGNED_GET((u16_t *) (spi->ctx.tx_buf)); break; #ifndef CONFIG_ARC case 4: data = UNALIGNED_GET((u32_t *) (spi->ctx.tx_buf)); break; #endif } } else if (spi_context_rx_on(&spi->ctx)) { /* No need to push more than necessary */ if ((int)(spi->ctx.rx_len - spi->fifo_diff) <= 0) { break; } data = 0U; } else if (spi_context_tx_on(&spi->ctx)) { data = 0U; } else { /* Nothing to push anymore */ break; } write_dr(data, info->regs); spi_context_update_tx(&spi->ctx, spi->dfs, 1); spi->fifo_diff++; f_tx--; DBG_COUNTER_INC(); } if (!spi_context_tx_on(&spi->ctx)) { /* prevents any further interrupts demanding TX fifo fill */ write_txftlr(0, info->regs); } LOG_DBG("Pushed: %d", DBG_COUNTER_RESULT()); }