static unsigned omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) { struct omap2_mcspi *mcspi; struct omap2_mcspi_cs *cs = spi->controller_state; struct omap2_mcspi_dma *mcspi_dma; unsigned int count, c; unsigned long base, tx_reg, rx_reg; int word_len, data_type, element_count; int elements = 0; u32 l; u8 * rx; const u8 * tx; void __iomem *chstat_reg; mcspi = spi_master_get_devdata(spi->master); mcspi_dma = &mcspi->dma_channels[spi->chip_select]; l = mcspi_cached_chconf0(spi); chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; count = xfer->len; c = count; word_len = cs->word_len; base = cs->phys; tx_reg = base + OMAP2_MCSPI_TX0; rx_reg = base + OMAP2_MCSPI_RX0; rx = xfer->rx_buf; tx = xfer->tx_buf; if (word_len <= 8) { data_type = OMAP_DMA_DATA_TYPE_S8; element_count = count; } else if (word_len <= 16) { data_type = OMAP_DMA_DATA_TYPE_S16; element_count = count >> 1; } else /* word_len <= 32 */ {
/* spi_bitbang requires custom setup_transfer() to be defined if there is a * custom txrx_bufs(). We have nothing to setup here as the SPI IP block * supports just 8 bits per word, and SPI clock can't be changed in software. * Check for 8 bits per word. Chip select delay calculations could be * added here as soon as bitbang_work() can be made aware of the delay value. */ static int xilinx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { u8 bits_per_word; u32 hz; struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word; hz = (t) ? t->speed_hz : spi->max_speed_hz; if (bits_per_word != 8) { dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", __func__, bits_per_word); return -EINVAL; } if (hz && xspi->speed_hz > hz) { dev_err(&spi->dev, "%s, unsupported clock rate %uHz\n", __func__, hz); return -EINVAL; } return 0; }
static unsigned omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer, struct dma_slave_config cfg, unsigned es) { struct omap2_mcspi *mcspi; struct omap2_mcspi_dma *mcspi_dma; unsigned int count; u32 l; int elements = 0; int word_len, element_count; struct omap2_mcspi_cs *cs = spi->controller_state; mcspi = spi_master_get_devdata(spi->master); mcspi_dma = &mcspi->dma_channels[spi->chip_select]; count = xfer->len; word_len = cs->word_len; l = mcspi_cached_chconf0(spi); if (word_len <= 8) element_count = count; else if (word_len <= 16) element_count = count >> 1; else /* word_len <= 32 */
static int __devexit spi_remove(struct platform_device *pdev) { struct spi_master *master = platform_get_drvdata(pdev); struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct spi_stellaris_data *priv = spi_master_get_devdata(master); spi_bitbang_stop(&priv->bitbang); #ifndef POLLING_MODE free_irq(priv->irq, priv); #endif iounmap(priv->base); spi_master_put(master); release_mem_region(res->start, resource_size(res)); platform_set_drvdata(pdev, NULL); disable_ssi_clock(); return 0; }
static void omap2_mcspi_set_master_mode(struct spi_master *master) { u32 l; struct omap2_mcspi *mcspi = spi_master_get_devdata(master); /* setup when switching from (reset default) slave mode * to single-channel master mode based on config value */ l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL); MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_STEST, 0); MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_MS, 0); #ifdef CONFIG_SPI_SW_CS if (mcspi->force_cs_mode) MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_SINGLE, 1); #else MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_SINGLE, 0); #endif mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l); omap2_mcspi_ctx[master->bus_num - 1].modulctrl = l; }
static int spi_stm_remove(struct platform_device *pdev) { struct spi_stm *spi_stm; struct spi_master *master; master = platform_get_drvdata(pdev); spi_stm = spi_master_get_devdata(master); spi_bitbang_stop(&spi_stm->bitbang); clk_disable(spi_stm->clk); stm_pad_release(spi_stm->pad_state); free_irq(spi_stm->r_irq.start, spi_stm); iounmap(spi_stm->base); release_mem_region(spi_stm->r_mem.start, resource_size(&spi_stm->r_mem)); spi_master_put(spi_stm->bitbang.master); platform_set_drvdata(pdev, NULL); return 0; }
static int spi_qup_remove(struct platform_device *pdev) { struct spi_master *master = dev_get_drvdata(&pdev->dev); struct spi_qup *controller = spi_master_get_devdata(master); int ret; ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) return ret; ret = spi_qup_set_state(controller, QUP_STATE_RESET); if (ret) return ret; spi_qup_release_dma(master); clk_disable_unprepare(controller->cclk); clk_disable_unprepare(controller->iclk); pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; }
static int spi_ppc4xx_txrx(struct spi_device *spi, struct spi_transfer *t) { struct ppc4xx_spi *hw; u8 data; dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n", t->tx_buf, t->rx_buf, t->len); hw = spi_master_get_devdata(spi->master); hw->tx = t->tx_buf; hw->rx = t->rx_buf; hw->len = t->len; hw->count = 0; /* send the first byte */ data = hw->tx ? hw->tx[0] : 0; out_8(&hw->regs->txd, data); out_8(&hw->regs->cr, SPI_PPC4XX_CR_STR); wait_for_completion(&hw->done); return hw->count; }
static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) { struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); if (is_on == BITBANG_CS_INACTIVE) { xspi->write_fn(0xffff, xspi->regs + XSPI_SSR_OFFSET); } else if (is_on == BITBANG_CS_ACTIVE) { u16 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_MODE_MASK; if (spi->mode & SPI_CPHA) cr |= XSPI_CR_CPHA; if (spi->mode & SPI_CPOL) cr |= XSPI_CR_CPOL; xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); xspi->write_fn(~(0x0001 << spi->chip_select), xspi->regs + XSPI_SSR_OFFSET); } }
static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t) { struct spi_bitbang_cs *cs = spi->controller_state; unsigned nsecs = cs->nsecs; struct spi_bitbang *bitbang; bitbang = spi_master_get_devdata(spi->master); if (bitbang->set_line_direction) { int err; err = bitbang->set_line_direction(spi, !!(t->tx_buf)); if (err < 0) return err; } if (spi->mode & SPI_3WIRE) { unsigned flags; flags = t->tx_buf ? SPI_MASTER_NO_RX : SPI_MASTER_NO_TX; return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t, flags); } return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t, 0); }
static int mx21_config(struct spi_device *spi, struct spi_imx_config *config) { struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER; unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18; reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max) << MX21_CSPICTRL_DR_SHIFT; reg |= config->bpw - 1; if (spi->mode & SPI_CPHA) reg |= MX21_CSPICTRL_PHA; if (spi->mode & SPI_CPOL) reg |= MX21_CSPICTRL_POL; if (spi->mode & SPI_CS_HIGH) reg |= MX21_CSPICTRL_SSPOL; if (spi->cs_gpio < 0) reg |= (spi->cs_gpio + 32) << MX21_CSPICTRL_CS_SHIFT; writel(reg, spi_imx->base + MXC_CSPICTRL); return 0; }
static int bcm2835aux_spi_prepare_message(struct spi_master *master, struct spi_message *msg) { struct spi_device *spi = msg->spi; struct bcm2835aux_spi *bs = spi_master_get_devdata(master); bs->cntl[0] = BCM2835_AUX_SPI_CNTL0_ENABLE | BCM2835_AUX_SPI_CNTL0_VAR_WIDTH | BCM2835_AUX_SPI_CNTL0_MSBF_OUT; bs->cntl[1] = BCM2835_AUX_SPI_CNTL1_MSBF_IN; /* handle all the modes */ if (spi->mode & SPI_CPOL) { bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_CPOL; bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_OUT_RISING; } else { bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_IN_RISING; } bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]); bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]); return 0; }
static int p3_enable_clk(struct p3_dev *p3_device) { int ret_val = 0; struct spi_device *spidev = NULL; struct s3c64xx_spi_driver_data *sdd = NULL; /* for defence MULTI-OPEN */ if (p3_device->enabled_clk) { P3_ERR_MSG("%s - clock was ALREADY enabled!\n", __func__); return -EBUSY; } spin_lock_irq(&p3_device->ese_spi_lock); spidev = spi_dev_get(p3_device->spi); spin_unlock_irq(&p3_device->ese_spi_lock); if (spidev == NULL) { P3_ERR_MSG("%s - Failed to get spi dev.\n", __func__); return -1; } sdd = spi_master_get_devdata(spidev->master); if (!sdd){ P3_ERR_MSG("%s - Failed to get spi dev.\n", __func__); return -EFAULT; } pm_runtime_get_sync(&sdd->pdev->dev); /* Enable clk */ /* set spi clock rate */ clk_set_rate(sdd->src_clk, spidev->max_speed_hz * 2); #ifdef FEATURE_ESE_WAKELOCK wake_lock(&p3_device->ese_lock); #endif p3_device->enabled_clk = true; spi_dev_put(spidev); return ret_val; }
static int sifive_spi_transfer_one(struct spi_master *master, struct spi_device *device, struct spi_transfer *t) { struct sifive_spi *spi = spi_master_get_devdata(master); int poll = sifive_spi_prep_transfer(spi, device, t); const u8 *tx_ptr = t->tx_buf; u8 *rx_ptr = t->rx_buf; unsigned int remaining_words = t->len; while (remaining_words) { unsigned int n_words = min(remaining_words, spi->fifo_depth); unsigned int i; /* Enqueue n_words for transmission */ for (i = 0; i < n_words; i++) sifive_spi_tx(spi, tx_ptr++); if (rx_ptr) { /* Wait for transmission + reception to complete */ sifive_spi_write(spi, SIFIVE_SPI_REG_RXMARK, n_words - 1); sifive_spi_wait(spi, SIFIVE_SPI_IP_RXWM, poll); /* Read out all the data from the RX FIFO */ for (i = 0; i < n_words; i++) sifive_spi_rx(spi, rx_ptr++); } else { /* Wait for transmission to complete */ sifive_spi_wait(spi, SIFIVE_SPI_IP_TXWM, poll); } remaining_words -= n_words; } return 0; }
static int p61_set_clk(struct p61_dev *p61_device) { int ret_val = 0; struct spi_device *spidev = NULL; struct s3c64xx_spi_driver_data *sdd = NULL; spin_lock_irq(&p61_device->ese_spi_lock); spidev = spi_dev_get(p61_device->spi); spin_unlock_irq(&p61_device->ese_spi_lock); if (spidev == NULL) { pr_err("%s - Failed to get spi dev\n", __func__); return -1; } spidev->max_speed_hz = P61_SPI_CLOCK; sdd = spi_master_get_devdata(spidev->master); if (!sdd) { pr_err("%s - Failed to get spi dev.\n", __func__); return -1; } pm_runtime_get_sync(&sdd->pdev->dev); /* Enable clk */ /* set spi clock rate */ clk_set_rate(sdd->src_clk, spidev->max_speed_hz * 2); p61_device->enabled_clk = true; spi_dev_put(spidev); //CS enable gpio_set_value(p61_device->cspin, 0); usleep_range(50, 70); if (!wake_lock_active(&p61_device->ese_lock)) { pr_info("%s: [NFC-ESE] wake lock.\n", __func__); wake_lock(&p61_device->ese_lock); } return ret_val; }
static unsigned omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) { struct omap2_mcspi *mcspi; struct omap2_mcspi_cs *cs = spi->controller_state; struct omap2_mcspi_dma *mcspi_dma; unsigned int count, c, bytes_per_transfer; unsigned long base, tx_reg, rx_reg; int word_len, data_type, element_count; int elements = 0, frame_count, sync_type; u32 l, irq_enable; u8 * rx; const u8 * tx; mcspi = spi_master_get_devdata(spi->master); mcspi_dma = &mcspi->dma_channels[spi->chip_select]; l = mcspi_cached_chconf0(spi); count = xfer->len; c = count; word_len = cs->word_len; base = cs->phys; tx_reg = base + mcspi->regs[OMAP2_MCSPI_TX0]; rx_reg = base + mcspi->regs[OMAP2_MCSPI_RX0]; rx = xfer->rx_buf; tx = xfer->tx_buf; if (word_len <= 8) { data_type = OMAP_DMA_DATA_TYPE_S8; element_count = count; bytes_per_transfer = 1; } else if (word_len <= 16) { data_type = OMAP_DMA_DATA_TYPE_S16; element_count = count >> 1; bytes_per_transfer = 2; } else /* word_len <= 32 */ {
static int bcm53xxspi_transfer_one(struct spi_master *master, struct spi_device *spi, struct spi_transfer *t) { struct bcm53xxspi *b53spi = spi_master_get_devdata(master); u8 *buf; size_t left; if (t->tx_buf) { buf = (u8 *)t->tx_buf; left = t->len; while (left) { size_t to_write = min_t(size_t, 16, left); bool cont = left - to_write > 0; bcm53xxspi_buf_write(b53spi, buf, to_write, cont); left -= to_write; buf += to_write; } } if (t->rx_buf) { buf = (u8 *)t->rx_buf; left = t->len; while (left) { size_t to_read = min_t(size_t, 16 - b53spi->read_offset, left); bool cont = left - to_read > 0; bcm53xxspi_buf_read(b53spi, buf, to_read, cont); left -= to_read; buf += to_read; } } return 0; }
static int dspi_txrx_transfer(struct spi_device *spi, struct spi_transfer *t) { struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); dspi->cur_transfer = t; dspi->cur_chip = spi_get_ctldata(spi); dspi->cs = spi->chip_select; dspi->void_write_data = dspi->cur_chip->void_write_data; dspi->dataflags = 0; dspi->tx = (void *)t->tx_buf; dspi->tx_end = dspi->tx + t->len; dspi->rx = t->rx_buf; dspi->rx_end = dspi->rx + t->len; dspi->len = t->len; if (!dspi->rx) dspi->dataflags |= TRAN_STATE_RX_VOID; if (!dspi->tx) dspi->dataflags |= TRAN_STATE_TX_VOID; writel(dspi->cur_chip->mcr_val, dspi->base + SPI_MCR); writel(dspi->cur_chip->ctar_val, dspi->base + SPI_CTAR(dspi->cs)); writel(SPI_RSER_EOQFE, dspi->base + SPI_RSER); if (t->speed_hz) writel(dspi->cur_chip->ctar_val, dspi->base + SPI_CTAR(dspi->cs)); dspi_transfer_write(dspi); if (wait_event_interruptible(dspi->waitq, dspi->waitflags)) dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n"); dspi->waitflags = 0; return t->len - dspi->len; }
static int efm32_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) { struct efm32_spi_ddata *ddata = spi_master_get_devdata(spi->master); int ret = -EBUSY; spin_lock_irq(&ddata->lock); if (ddata->tx_buf || ddata->rx_buf) goto out_unlock; ddata->tx_buf = t->tx_buf; ddata->rx_buf = t->rx_buf; ddata->tx_len = ddata->rx_len = t->len * DIV_ROUND_UP(t->bits_per_word, 8); efm32_spi_filltx(ddata); reinit_completion(&ddata->done); efm32_spi_write32(ddata, REG_IF_TXBL | REG_IF_RXDATAV, REG_IEN); spin_unlock_irq(&ddata->lock); wait_for_completion(&ddata->done); spin_lock_irq(&ddata->lock); ret = t->len - max(ddata->tx_len, ddata->rx_len); efm32_spi_write32(ddata, 0, REG_IEN); ddata->tx_buf = ddata->rx_buf = NULL; out_unlock: spin_unlock_irq(&ddata->lock); return ret; }
static unsigned omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer, struct dma_slave_config cfg, unsigned es) { struct omap2_mcspi *mcspi; struct omap2_mcspi_dma *mcspi_dma; unsigned int count, transfer_reduction = 0; struct scatterlist *sg_out[2]; int nb_sizes = 0, out_mapped_nents[2], ret, x; size_t sizes[2]; u32 l; int elements = 0; int word_len, element_count; struct omap2_mcspi_cs *cs = spi->controller_state; void __iomem *chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; mcspi = spi_master_get_devdata(spi->master); mcspi_dma = &mcspi->dma_channels[spi->chip_select]; count = xfer->len; /* * In the "End-of-Transfer Procedure" section for DMA RX in OMAP35x TRM * it mentions reducing DMA transfer length by one element in master * normal mode. */ if (mcspi->fifo_depth == 0) transfer_reduction = es; word_len = cs->word_len; l = mcspi_cached_chconf0(spi); if (word_len <= 8) element_count = count; else if (word_len <= 16) element_count = count >> 1; else /* word_len <= 32 */
static void omap2_mcspi_tx_dma(struct spi_device *spi, struct spi_transfer *xfer, struct dma_slave_config cfg) { struct omap2_mcspi *mcspi; struct omap2_mcspi_dma *mcspi_dma; unsigned int count; mcspi = spi_master_get_devdata(spi->master); mcspi_dma = &mcspi->dma_channels[spi->chip_select]; count = xfer->len; if (mcspi_dma->dma_tx) { struct dma_async_tx_descriptor *tx; struct scatterlist sg; dmaengine_slave_config(mcspi_dma->dma_tx, &cfg); sg_init_table(&sg, 1); sg_dma_address(&sg) = xfer->tx_dma; sg_dma_len(&sg) = xfer->len; tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (tx) { tx->callback = omap2_mcspi_tx_callback; tx->callback_param = spi; dmaengine_submit(tx); } else { /* FIXME: fall back to PIO? */ } } dma_async_issue_pending(mcspi_dma->dma_tx); omap2_mcspi_set_dma_req(spi, 0, 1); }
/** * zynq_qspi_chipselect - Select or deselect the chip select line * @qspi: Pointer to the spi_device structure * @is_high: Select(0) or deselect (1) the chip select line */ static void zynq_qspi_chipselect(struct spi_device *qspi, bool is_high) { struct zynq_qspi *xqspi = spi_master_get_devdata(qspi->master); u32 config_reg; #ifdef CONFIG_SPI_ZYNQ_QSPI_DUAL_STACKED u32 lqspi_cfg_reg; #endif config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET); /* Select upper/lower page before asserting CS */ #ifdef CONFIG_SPI_ZYNQ_QSPI_DUAL_STACKED lqspi_cfg_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET); if (qspi->master->flags & SPI_MASTER_U_PAGE) lqspi_cfg_reg |= ZYNQ_QSPI_LCFG_U_PAGE_MASK; else lqspi_cfg_reg &= ~ZYNQ_QSPI_LCFG_U_PAGE_MASK; zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET, lqspi_cfg_reg); #endif if (is_high) { /* Deselect the slave */ config_reg |= ZYNQ_QSPI_CONFIG_SSCTRL_MASK; } else { /* Select the slave */ config_reg &= ~ZYNQ_QSPI_CONFIG_SSCTRL_MASK; config_reg |= (((~(BIT(qspi->chip_select))) << ZYNQ_QSPI_SS_SHIFT) & ZYNQ_QSPI_CONFIG_SSCTRL_MASK); xqspi->is_instr = 1; } zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg); }
/* * Interface to control the chip select signal */ static void davinci_spi_chipselect(struct spi_device *spi, int value) { struct davinci_spi *davinci_spi; struct davinci_spi_platform_data *pdata; u32 data1_reg_val = 0; davinci_spi = spi_master_get_devdata(spi->master); pdata = davinci_spi->pdata; /* * Board specific chip select logic decides the polarity and cs * line for the controller */ if (value == BITBANG_CS_INACTIVE) { set_io_bits(davinci_spi->base + SPIDEF, CS_DEFAULT); data1_reg_val |= CS_DEFAULT << SPIDAT1_CSNR_SHIFT; iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); while ((ioread32(davinci_spi->base + SPIBUF) & SPIBUF_RXEMPTY_MASK) == 0) cpu_relax(); } }
static int p3_disable_clk(struct p3_dev *p3_device) { int ret_val = 0; //unsigned short clock = 0; struct spi_device *spidev = NULL; struct s3c64xx_spi_driver_data *sdd = NULL; if (!p3_device->enabled_clk) { P3_ERR_MSG("%s - clock was not enabled!\n", __func__); return ret_val; } spin_lock_irq(&p3_device->ese_spi_lock); spidev = spi_dev_get(p3_device->spi); spin_unlock_irq(&p3_device->ese_spi_lock); if (spidev == NULL) { P3_ERR_MSG("%s - Failed to get spi dev!\n", __func__); return -1; } sdd = spi_master_get_devdata(spidev->master); if (!sdd){ P3_ERR_MSG("%s - Failed to get spi dev.\n", __func__); return -EFAULT; } p3_device->enabled_clk = false; pm_runtime_put_sync(&sdd->pdev->dev); /* Disable clock */ spi_dev_put(spidev); #ifdef FEATURE_ESE_WAKELOCK if (wake_lock_active(&p3_device->ese_lock)) wake_unlock(&p3_device->ese_lock); #endif return ret_val; }
static int bcm53xxspi_bcma_probe(struct bcma_device *core) { struct bcm53xxspi *b53spi; struct spi_master *master; int err; if (core->bus->drv_cc.core->id.rev != 42) { pr_err("SPI on SoC with unsupported ChipCommon rev\n"); return -ENOTSUPP; } master = spi_alloc_master(&core->dev, sizeof(*b53spi)); if (!master) return -ENOMEM; b53spi = spi_master_get_devdata(master); b53spi->master = master; b53spi->core = core; master->transfer_one = bcm53xxspi_transfer_one; bcma_set_drvdata(core, b53spi); err = devm_spi_register_master(&core->dev, master); if (err) { spi_master_put(master); bcma_set_drvdata(core, NULL); goto out; } /* Broadcom SoCs (at least with the CC rev 42) use SPI for flash only */ spi_new_device(master, &bcm53xx_info); out: return err; }
/** * cdns_spi_resume - Resume method for the SPI driver * @dev: Address of the platform_device structure * * This function changes the driver state to "ready" * * Return: 0 on success and error value on error */ static int __maybe_unused cdns_spi_resume(struct device *dev) { struct platform_device *pdev = container_of(dev, struct platform_device, dev); struct spi_master *master = platform_get_drvdata(pdev); struct cdns_spi *xspi = spi_master_get_devdata(master); int ret = 0; ret = clk_prepare_enable(xspi->pclk); if (ret) { dev_err(dev, "Cannot enable APB clock.\n"); return ret; } ret = clk_prepare_enable(xspi->ref_clk); if (ret) { dev_err(dev, "Cannot enable device clock.\n"); clk_disable(xspi->pclk); return ret; } spi_master_resume(master); return 0; }
static int bcm2835aux_spi_transfer_one_irq(struct spi_master *master, struct spi_device *spi, struct spi_transfer *tfr) { struct bcm2835aux_spi *bs = spi_master_get_devdata(master); /* update statistics */ bs->count_transfer_irq++; /* fill in registers and fifos before enabling interrupts */ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]); bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]); /* fill in tx fifo with data before enabling interrupts */ while ((bs->tx_len) && (bs->pending < 12) && (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) & BCM2835_AUX_SPI_STAT_TX_FULL))) { bcm2835aux_wr_fifo(bs); } /* now run the interrupt mode */ return __bcm2835aux_spi_transfer_one_irq(master, spi, tfr); }
/** * cdns_spi_chipselect - Select or deselect the chip select line * @spi: Pointer to the spi_device structure * @enable: Select (1) or deselect (0) the chip select line */ static void cdns_spi_chipselect(struct spi_device *spi, bool enable) { struct cdns_spi *xspi = spi_master_get_devdata(spi->master); u32 ctrl_reg; ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR); if (!enable) { /* Deselect the slave */ ctrl_reg |= CDNS_SPI_CR_SSCTRL; } else { /* Select the slave */ ctrl_reg &= ~CDNS_SPI_CR_SSCTRL; if (!(xspi->is_decoded_cs)) ctrl_reg |= ((~(CDNS_SPI_SS0 << spi->chip_select)) << CDNS_SPI_SS_SHIFT) & CDNS_SPI_CR_SSCTRL; else ctrl_reg |= (spi->chip_select << CDNS_SPI_SS_SHIFT) & CDNS_SPI_CR_SSCTRL; } cdns_spi_write(xspi, CDNS_SPI_CR, ctrl_reg); }
static int sun6i_spi_runtime_resume(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct sun6i_spi *sspi = spi_master_get_devdata(master); int ret; ret = clk_prepare_enable(sspi->hclk); if (ret) { dev_err(dev, "Couldn't enable AHB clock\n"); goto out; } ret = clk_prepare_enable(sspi->mclk); if (ret) { dev_err(dev, "Couldn't enable module clock\n"); goto err; } ret = reset_control_deassert(sspi->rstc); if (ret) { dev_err(dev, "Couldn't deassert the device from reset\n"); goto err2; } sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG, SUN6I_GBL_CTL_BUS_ENABLE | SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP); return 0; err2: clk_disable_unprepare(sspi->mclk); err: clk_disable_unprepare(sspi->hclk); out: return ret; }
static inline struct s3c24xx_spi *to_hw(struct spi_device *sdev) { return spi_master_get_devdata(sdev->master); }