static unsigned omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) { struct omap2_mcspi *mcspi; struct omap2_mcspi_cs *cs = spi->controller_state; struct omap2_mcspi_dma *mcspi_dma; unsigned int count, c; unsigned long base, tx_reg, rx_reg; int word_len, data_type, element_count; int elements; u32 l; u8 * rx; const u8 * tx; mcspi = spi_master_get_devdata(spi->master); mcspi_dma = &mcspi->dma_channels[spi->chip_select]; l = mcspi_cached_chconf0(spi); count = xfer->len; c = count; word_len = cs->word_len; base = cs->phys; tx_reg = base + OMAP2_MCSPI_TX0; rx_reg = base + OMAP2_MCSPI_RX0; rx = xfer->rx_buf; tx = xfer->tx_buf; if (word_len <= 8) { data_type = OMAP_DMA_DATA_TYPE_S8; element_count = count; } else if (word_len <= 16) { data_type = OMAP_DMA_DATA_TYPE_S16; element_count = count >> 1; } else /* word_len <= 32 */ {
static unsigned omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer, struct dma_slave_config cfg, unsigned es) { struct omap2_mcspi *mcspi; struct omap2_mcspi_dma *mcspi_dma; unsigned int count, dma_count; u32 l; int elements = 0; int word_len, element_count; struct omap2_mcspi_cs *cs = spi->controller_state; mcspi = spi_master_get_devdata(spi->master); mcspi_dma = &mcspi->dma_channels[spi->chip_select]; count = xfer->len; dma_count = xfer->len; if (mcspi->fifo_depth == 0) dma_count -= es; word_len = cs->word_len; l = mcspi_cached_chconf0(spi); if (word_len <= 8) element_count = count; else if (word_len <= 16) element_count = count >> 1; else /* word_len <= 32 */
static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable) { struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); u32 l; /* The controller handles the inverted chip selects * using the OMAP2_MCSPI_CHCONF_EPOL bit so revert * the inversion from the core spi_set_cs function. */ if (spi->mode & SPI_CS_HIGH) enable = !enable; if (spi->controller_state) { int err = pm_runtime_get_sync(mcspi->dev); if (err < 0) { pm_runtime_put_noidle(mcspi->dev); dev_err(mcspi->dev, "failed to get sync: %d\n", err); return; } l = mcspi_cached_chconf0(spi); if (enable) l &= ~OMAP2_MCSPI_CHCONF_FORCE; else l |= OMAP2_MCSPI_CHCONF_FORCE; mcspi_write_chconf0(spi, l); pm_runtime_mark_last_busy(mcspi->dev); pm_runtime_put_autosuspend(mcspi->dev); } }
static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active) { u32 l; l = mcspi_cached_chconf0(spi); MOD_REG_BIT(l, OMAP2_MCSPI_CHCONF_FORCE, cs_active); mcspi_write_chconf0(spi, l); }
static void omap2_mcspi_set_fifo(const struct spi_device *spi, struct spi_transfer *t, int enable) { struct spi_master *master = spi->master; struct omap2_mcspi_cs *cs = spi->controller_state; struct omap2_mcspi *mcspi; unsigned int wcnt; int max_fifo_depth, fifo_depth, bytes_per_word; u32 chconf, xferlevel; mcspi = spi_master_get_devdata(master); chconf = mcspi_cached_chconf0(spi); if (enable) { bytes_per_word = mcspi_bytes_per_word(cs->word_len); if (t->len % bytes_per_word != 0) goto disable_fifo; if (t->rx_buf != NULL && t->tx_buf != NULL) max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2; else max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH; fifo_depth = gcd(t->len, max_fifo_depth); if (fifo_depth < 2 || fifo_depth % bytes_per_word != 0) goto disable_fifo; wcnt = t->len / bytes_per_word; if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT) goto disable_fifo; xferlevel = wcnt << 16; if (t->rx_buf != NULL) { chconf |= OMAP2_MCSPI_CHCONF_FFER; xferlevel |= (fifo_depth - 1) << 8; } if (t->tx_buf != NULL) { chconf |= OMAP2_MCSPI_CHCONF_FFET; xferlevel |= fifo_depth - 1; } mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel); mcspi_write_chconf0(spi, chconf); mcspi->fifo_depth = fifo_depth; return; } disable_fifo: if (t->rx_buf != NULL) chconf &= ~OMAP2_MCSPI_CHCONF_FFER; if (t->tx_buf != NULL) chconf &= ~OMAP2_MCSPI_CHCONF_FFET; mcspi_write_chconf0(spi, chconf); mcspi->fifo_depth = 0; }
static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active) { u32 l; l = mcspi_cached_chconf0(spi); if (cs_active) l |= OMAP2_MCSPI_CHCONF_FORCE; else l &= ~OMAP2_MCSPI_CHCONF_FORCE; mcspi_write_chconf0(spi, l); }
static int omap2_mcspi_set_rxfifo(const struct spi_device *spi, int buf_size, int enable, int bytes_per_wl ) { u32 l, rw, s; unsigned short revert = 0; struct spi_master *master = spi->master; struct omap2_mcspi *mcspi = spi_master_get_devdata(master); buf_size = buf_size/bytes_per_wl; // l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0); l = mcspi_cached_chconf0(spi); s = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0); if (enable == 1) { /* FIFO cannot be enabled for both TX and RX * simultaneously */ // if (l & OMAP2_MCSPI_CHCONF_FFET) // return -EPERM; /* Channel needs to be disabled and enabled * for FIFO setting to take affect */ if (s & OMAP2_MCSPI_CHCTRL_EN) { omap2_mcspi_set_enable(spi, 0); revert = 1; } if (buf_size < mcspi->fifo_depth) mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, ((buf_size << 16) | (buf_size - 1) << 8)); else mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, ((buf_size << 16) | //(mcspi->fifo_depth - 1) << 8)); ((mcspi->fifo_depth - 1) << 8) | ((mcspi->fifo_depth - 1) << 0) )); } rw = OMAP2_MCSPI_CHCONF_FFER; MOD_REG_BIT(l, rw, enable); // mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l); mcspi_write_chconf0(spi,l); if (revert) omap2_mcspi_set_enable(spi, 1); return 0; }
static int omap2_mcspi_set_rxfifo(const struct spi_device *spi, int buf_size, int wl_bytes, int enable) { u32 l, rw, s, xfer_ael; unsigned short revert = 0; struct spi_master *master = spi->master; struct omap2_mcspi *mcspi = spi_master_get_devdata(master); u32 wcnt = buf_size/wl_bytes; l = mcspi_cached_chconf0(spi); s = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0); /* Read settings for TX FIFO */ xfer_ael = mcspi_read_reg(master, OMAP2_MCSPI_XFERLEVEL) & 0xff; if (enable == 1) { /* Channel needs to be disabled and enabled * for FIFO setting to take affect */ if (s & OMAP2_MCSPI_CHCTRL_EN) { omap2_mcspi_set_enable(spi, 0); revert = 1; } if (buf_size < mcspi->fifo_depth) mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, ((wcnt << 16) | (buf_size - 1) << 8) | xfer_ael); else mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, ((wcnt << 16) | (mcspi->fifo_depth - 1) << 8) | xfer_ael); } else { /* Reset register value for disable case */ mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xfer_ael); } rw = OMAP2_MCSPI_CHCONF_FFER; MOD_REG_BIT(l, rw, enable); mcspi_write_chconf0(spi, l); if (revert) omap2_mcspi_set_enable(spi, 1); return 0; }
static void omap2_mcspi_set_dma_req(const struct spi_device *spi, int is_read, int enable) { u32 l, rw; l = mcspi_cached_chconf0(spi); if (is_read) /* 1 is read, 0 write */ rw = OMAP2_MCSPI_CHCONF_DMAR; else rw = OMAP2_MCSPI_CHCONF_DMAW; MOD_REG_BIT(l, rw, enable); mcspi_write_chconf0(spi, l); }
static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active) { u32 l; struct omap2_mcspi* mcspi = spi_master_get_devdata(spi->master); /* allow GPIOs as chip select if defined */ if (mcspi->cs_gpios && mcspi->cs_gpios[spi->chip_select]) { int gpio = mcspi->cs_gpios[spi->chip_select]; gpio_set_value(gpio, !cs_active); /* low active */ } // TXS times out unless we force the CHCONF reg as well l = mcspi_cached_chconf0(spi); MOD_REG_BIT(l, OMAP2_MCSPI_CHCONF_FORCE, cs_active); mcspi_write_chconf0(spi, l); }
static unsigned omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) { struct omap2_mcspi *mcspi; struct omap2_mcspi_cs *cs = spi->controller_state; struct omap2_mcspi_dma *mcspi_dma; unsigned int count, c, bytes_per_transfer; unsigned long base, tx_reg, rx_reg; int word_len, data_type, element_count; int elements = 0, frame_count, sync_type; // LGE_UPDATE_S [email protected] [EBS] For BURST Mode Setting #ifdef LGE_RIL_SPI #ifdef CONFIG_LGE_SPI int chk_set = 0; #endif #endif // LGE_UPDATE_E [email protected] [EBS] u32 l; u8 * rx; const u8 * tx; void __iomem *irqstat_reg; mcspi = spi_master_get_devdata(spi->master); mcspi_dma = &mcspi->dma_channels[spi->chip_select]; irqstat_reg = mcspi->base + mcspi->regs[OMAP2_MCSPI_IRQSTATUS]; l = mcspi_cached_chconf0(spi); count = xfer->len; c = count; word_len = cs->word_len; base = cs->phys; tx_reg = base + mcspi->regs[OMAP2_MCSPI_TX0]; rx_reg = base + mcspi->regs[OMAP2_MCSPI_RX0]; rx = xfer->rx_buf; tx = xfer->tx_buf; if (word_len <= 8) { data_type = OMAP_DMA_DATA_TYPE_S8; element_count = count; bytes_per_transfer = 1; } else if (word_len <= 16) { data_type = OMAP_DMA_DATA_TYPE_S16; element_count = count >> 1; bytes_per_transfer = 2; } else /* word_len <= 32 */ {
static unsigned omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) { struct omap2_mcspi *mcspi; struct omap2_mcspi_cs *cs = spi->controller_state; struct omap2_mcspi_dma *mcspi_dma; unsigned int count, c, bytes_per_transfer; unsigned long base, tx_reg, rx_reg; int word_len, data_type, element_count; int elements = 0, frame_count, sync_type; u32 l; u8 * rx; const u8 * tx; void __iomem *irqstat_reg; long wait_rx = msecs_to_jiffies(1000); long wait_tx = msecs_to_jiffies(1000); dma_addr_t offset_rx, offset_tx; mcspi = spi_master_get_devdata(spi->master); mcspi_dma = &mcspi->dma_channels[spi->chip_select]; irqstat_reg = mcspi->base + mcspi->regs[OMAP2_MCSPI_IRQSTATUS]; l = mcspi_cached_chconf0(spi); count = xfer->len; c = count; word_len = cs->word_len; base = cs->phys; tx_reg = base + mcspi->regs[OMAP2_MCSPI_TX0]; rx_reg = base + mcspi->regs[OMAP2_MCSPI_RX0]; rx = xfer->rx_buf; tx = xfer->tx_buf; if (word_len <= 8) { data_type = OMAP_DMA_DATA_TYPE_S8; element_count = count; bytes_per_transfer = 1; } else if (word_len <= 16) { data_type = OMAP_DMA_DATA_TYPE_S16; element_count = count >> 1; bytes_per_transfer = 2; } else /* word_len <= 32 */ {
static unsigned omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer, struct dma_slave_config cfg, unsigned es) { struct omap2_mcspi *mcspi; struct omap2_mcspi_dma *mcspi_dma; unsigned int count, transfer_reduction = 0; struct scatterlist *sg_out[2]; int nb_sizes = 0, out_mapped_nents[2], ret, x; size_t sizes[2]; u32 l; int elements = 0; int word_len, element_count; struct omap2_mcspi_cs *cs = spi->controller_state; void __iomem *chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; mcspi = spi_master_get_devdata(spi->master); mcspi_dma = &mcspi->dma_channels[spi->chip_select]; count = xfer->len; /* * In the "End-of-Transfer Procedure" section for DMA RX in OMAP35x TRM * it mentions reducing DMA transfer length by one element in master * normal mode. */ if (mcspi->fifo_depth == 0) transfer_reduction = es; word_len = cs->word_len; l = mcspi_cached_chconf0(spi); if (word_len <= 8) element_count = count; else if (word_len <= 16) element_count = count >> 1; else /* word_len <= 32 */