/** * cdns_spi_irq - Interrupt service routine of the SPI controller * @irq: IRQ number * @dev_id: Pointer to the xspi structure * * This function handles TX empty and Mode Fault interrupts only. * On TX empty interrupt this function reads the received data from RX FIFO and * fills the TX FIFO if there is any data remaining to be transferred. * On Mode Fault interrupt this function indicates that transfer is completed, * the SPI subsystem will identify the error as the remaining bytes to be * transferred is non-zero. * * Return: IRQ_HANDLED when handled; IRQ_NONE otherwise. */ static irqreturn_t cdns_spi_irq(int irq, void *dev_id) { struct spi_master *master = dev_id; struct cdns_spi *xspi = spi_master_get_devdata(master); u32 intr_status, status; status = IRQ_NONE; intr_status = cdns_spi_read(xspi, CDNS_SPI_ISR_OFFSET); cdns_spi_write(xspi, CDNS_SPI_ISR_OFFSET, intr_status); if (intr_status & CDNS_SPI_IXR_MODF_MASK) { /* Indicate that transfer is completed, the SPI subsystem will * identify the error as the remaining bytes to be * transferred is non-zero */ cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET, CDNS_SPI_IXR_DEFAULT_MASK); spi_finalize_current_transfer(master); status = IRQ_HANDLED; } else if (intr_status & CDNS_SPI_IXR_TXOW_MASK) { unsigned long trans_cnt; trans_cnt = xspi->rx_bytes - xspi->tx_bytes; /* Read out the data from the RX FIFO */ while (trans_cnt) { u8 data; data = cdns_spi_read(xspi, CDNS_SPI_RXD_OFFSET); if (xspi->rxbuf) *xspi->rxbuf++ = data; xspi->rx_bytes--; trans_cnt--; } if (xspi->tx_bytes) { /* There is more data to send */ cdns_spi_fill_tx_fifo(xspi); } else { /* Transfer is completed */ cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET, CDNS_SPI_IXR_DEFAULT_MASK); spi_finalize_current_transfer(master); } status = IRQ_HANDLED; } return status; }
static int spi_bitbang_transfer_one(struct spi_master *master, struct spi_device *spi, struct spi_transfer *transfer) { struct spi_bitbang *bitbang = spi_master_get_devdata(master); int status = 0; if (bitbang->setup_transfer) { status = bitbang->setup_transfer(spi, transfer); if (status < 0) goto out; } if (transfer->len) status = bitbang->txrx_bufs(spi, transfer); if (status == transfer->len) status = 0; else if (status >= 0) status = -EREMOTEIO; out: spi_finalize_current_transfer(master); return status; }
static irqreturn_t interrupt_transfer(struct dw_spi *dws) { u16 irq_status = dw_readl(dws, DW_SPI_ISR); /* Error handling */ if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) { dw_readl(dws, DW_SPI_ICR); int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun"); return IRQ_HANDLED; } dw_reader(dws); if (dws->rx_end == dws->rx) { spi_mask_intr(dws, SPI_INT_TXEI); spi_finalize_current_transfer(dws->master); return IRQ_HANDLED; } if (irq_status & SPI_INT_TXEI) { spi_mask_intr(dws, SPI_INT_TXEI); dw_writer(dws); /* Enable TX irq always, it will be disabled when RX finished */ spi_umask_intr(dws, SPI_INT_TXEI); } return IRQ_HANDLED; }
static void int_error_stop(struct dw_spi *dws, const char *msg) { spi_reset_chip(dws); dev_err(&dws->master->dev, "%s\n", msg); dws->master->cur_msg->status = -EIO; spi_finalize_current_transfer(dws->master); }
static int spi_st_transfer_one(struct spi_master *master, struct spi_device *spi, struct spi_transfer *t) { struct spi_st *spi_st = spi_master_get_devdata(master); uint32_t ctl = 0; /* Setup transfer */ spi_st->tx_ptr = t->tx_buf; spi_st->rx_ptr = t->rx_buf; if (spi->bits_per_word > 8) { /* * Anything greater than 8 bits-per-word requires 2 * bytes-per-word in the RX/TX buffers */ spi_st->bytes_per_word = 2; spi_st->words_remaining = t->len / 2; } else if (spi->bits_per_word == 8 && !(t->len & 0x1)) { /* * If transfer is even-length, and 8 bits-per-word, then * implement as half-length 16 bits-per-word transfer */ spi_st->bytes_per_word = 2; spi_st->words_remaining = t->len / 2; /* Set SSC_CTL to 16 bits-per-word */ ctl = readl_relaxed(spi_st->base + SSC_CTL); writel_relaxed((ctl | 0xf), spi_st->base + SSC_CTL); readl_relaxed(spi_st->base + SSC_RBUF); } else { spi_st->bytes_per_word = 1; spi_st->words_remaining = t->len; } reinit_completion(&spi_st->done); /* Start transfer by writing to the TX FIFO */ ssc_write_tx_fifo(spi_st); writel_relaxed(SSC_IEN_TEEN, spi_st->base + SSC_IEN); /* Wait for transfer to complete */ wait_for_completion(&spi_st->done); /* Restore SSC_CTL if necessary */ if (ctl) writel_relaxed(ctl, spi_st->base + SSC_CTL); spi_finalize_current_transfer(spi->master); return t->len; }
static void img_spfi_dma_tx_cb(void *data) { struct img_spfi *spfi = data; unsigned long flags; spfi_wait_all_done(spfi); spin_lock_irqsave(&spfi->lock, flags); spfi->tx_dma_busy = false; if (!spfi->rx_dma_busy) spi_finalize_current_transfer(spfi->master); spin_unlock_irqrestore(&spfi->lock, flags); }
static void rockchip_spi_dma_rxcb(void *data) { unsigned long flags; struct rockchip_spi *rs = data; spin_lock_irqsave(&rs->lock, flags); rs->state &= ~RXBUSY; if (!(rs->state & TXBUSY)) spi_finalize_current_transfer(rs->master); spin_unlock_irqrestore(&rs->lock, flags); }
static void rockchip_spi_dma_txcb(void *data) { unsigned long flags; struct rockchip_spi *rs = data; /* Wait until the FIFO data completely. */ wait_for_idle(rs); spin_lock_irqsave(&rs->lock, flags); rs->state &= ~TXBUSY; if (!(rs->state & RXBUSY)) spi_finalize_current_transfer(rs->master); spin_unlock_irqrestore(&rs->lock, flags); }
static int xlp_spi_transfer_one(struct spi_master *master, struct spi_device *spi, struct spi_transfer *t) { struct xlp_spi_priv *xspi = spi_master_get_devdata(master); int ret = 0; xspi->cs = spi->chip_select; xspi->dev = spi->dev; if (spi_transfer_is_last(master, t)) xspi->cmd_cont = 0; else xspi->cmd_cont = 1; if (xlp_spi_txrx_bufs(xspi, t)) ret = -EIO; spi_finalize_current_transfer(master); return ret; }
static irqreturn_t spi_clps711x_isr(int irq, void *dev_id) { struct spi_master *master = dev_id; struct spi_clps711x_data *hw = spi_master_get_devdata(master); u8 data; /* Handle RX */ data = readb(hw->syncio); if (hw->rx_buf) *hw->rx_buf++ = data; /* Handle TX */ if (--hw->len > 0) { data = hw->tx_buf ? *hw->tx_buf++ : 0; writel(data | SYNCIO_FRMLEN(hw->bpw) | SYNCIO_TXFRMEN, hw->syncio); } else spi_finalize_current_transfer(master); return IRQ_HANDLED; }
/** * zynq_qspi_irq - Interrupt service routine of the QSPI controller * @irq: IRQ number * @dev_id: Pointer to the xqspi structure * * This function handles TX empty only. * On TX empty interrupt this function reads the received data from RX FIFO and * fills the TX FIFO if there is any data remaining to be transferred. * * Return: IRQ_HANDLED when interrupt is handled; IRQ_NONE otherwise. */ static irqreturn_t zynq_qspi_irq(int irq, void *dev_id) { struct spi_master *master = dev_id; struct zynq_qspi *xqspi = spi_master_get_devdata(master); u32 intr_status, rxcount, rxindex = 0; u8 offset[3] = {ZYNQ_QSPI_TXD_00_01_OFFSET, ZYNQ_QSPI_TXD_00_10_OFFSET, ZYNQ_QSPI_TXD_00_11_OFFSET}; intr_status = zynq_qspi_read(xqspi, ZYNQ_QSPI_STATUS_OFFSET); zynq_qspi_write(xqspi, ZYNQ_QSPI_STATUS_OFFSET , intr_status); if ((intr_status & ZYNQ_QSPI_IXR_TXNFULL_MASK) || (intr_status & ZYNQ_QSPI_IXR_RXNEMTY_MASK)) { /* * This bit is set when Tx FIFO has < THRESHOLD entries. * We have the THRESHOLD value set to 1, * so this bit indicates Tx FIFO is empty. */ u32 data; rxcount = xqspi->bytes_to_receive - xqspi->bytes_to_transfer; rxcount = (rxcount % 4) ? ((rxcount/4) + 1) : (rxcount/4); /* Read out the data from the RX FIFO */ while ((rxindex < rxcount) && (rxindex < ZYNQ_QSPI_RX_THRESHOLD)) { if (xqspi->bytes_to_receive >= 4) { if (xqspi->rxbuf) { (*(u32 *)xqspi->rxbuf) = zynq_qspi_read(xqspi, ZYNQ_QSPI_RXD_OFFSET); xqspi->rxbuf += 4; } else { data = zynq_qspi_read(xqspi, ZYNQ_QSPI_RXD_OFFSET); } xqspi->bytes_to_receive -= 4; } else { data = zynq_qspi_read(xqspi, ZYNQ_QSPI_RXD_OFFSET); zynq_qspi_copy_read_data(xqspi, data, xqspi->bytes_to_receive); } rxindex++; } if (xqspi->bytes_to_transfer) { if (xqspi->bytes_to_transfer >= 4) { /* There is more data to send */ zynq_qspi_fill_tx_fifo(xqspi, ZYNQ_QSPI_RX_THRESHOLD); } else if (intr_status & ZYNQ_QSPI_IXR_TXNFULL_MASK) { int tmp; tmp = xqspi->bytes_to_transfer; zynq_qspi_copy_write_data(xqspi, &data, xqspi->bytes_to_transfer); if (!xqspi->is_dual || xqspi->is_instr) zynq_qspi_write(xqspi, offset[tmp - 1], data); else { zynq_qspi_tx_dual_parallel(xqspi, data, tmp); } } } else { /* * If transfer and receive is completed then only send * complete signal. */ if (!xqspi->bytes_to_receive) { zynq_qspi_write(xqspi, ZYNQ_QSPI_IDIS_OFFSET, ZYNQ_QSPI_IXR_ALL_MASK); spi_finalize_current_transfer(master); xqspi->is_instr = 0; } } return IRQ_HANDLED; } return IRQ_NONE; }