static int spi_qup_transfer_one(struct spi_master *master, struct spi_device *spi, struct spi_transfer *xfer) { struct spi_qup *controller = spi_master_get_devdata(master); unsigned long timeout, flags; int ret = -EIO; ret = spi_qup_io_config(spi, xfer); if (ret) return ret; timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC); timeout = DIV_ROUND_UP(xfer->len * 8, timeout); timeout = 100 * msecs_to_jiffies(timeout); reinit_completion(&controller->done); spin_lock_irqsave(&controller->lock, flags); controller->xfer = xfer; controller->error = 0; controller->rx_bytes = 0; controller->tx_bytes = 0; spin_unlock_irqrestore(&controller->lock, flags); if (spi_qup_set_state(controller, QUP_STATE_RUN)) { dev_warn(controller->dev, "cannot set RUN state\n"); goto exit; } if (spi_qup_set_state(controller, QUP_STATE_PAUSE)) { dev_warn(controller->dev, "cannot set PAUSE state\n"); goto exit; } spi_qup_fifo_write(controller, xfer); if (spi_qup_set_state(controller, QUP_STATE_RUN)) { dev_warn(controller->dev, "cannot set EXECUTE state\n"); goto exit; } if (!wait_for_completion_timeout(&controller->done, timeout)) ret = -ETIMEDOUT; exit: spi_qup_set_state(controller, QUP_STATE_RESET); spin_lock_irqsave(&controller->lock, flags); controller->xfer = NULL; if (!ret) ret = controller->error; spin_unlock_irqrestore(&controller->lock, flags); return ret; }
static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer) { struct spi_qup *qup = spi_master_get_devdata(master); int ret; ret = spi_qup_set_state(qup, QUP_STATE_RUN); if (ret) { dev_warn(qup->dev, "cannot set RUN state\n"); return ret; } ret = spi_qup_set_state(qup, QUP_STATE_PAUSE); if (ret) { dev_warn(qup->dev, "cannot set PAUSE state\n"); return ret; } spi_qup_fifo_write(qup, xfer); return 0; }
static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id) { struct spi_qup *controller = dev_id; struct spi_transfer *xfer; u32 opflags, qup_err, spi_err; unsigned long flags; int error = 0; spin_lock_irqsave(&controller->lock, flags); xfer = controller->xfer; controller->xfer = NULL; spin_unlock_irqrestore(&controller->lock, flags); qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS); spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS); opflags = readl_relaxed(controller->base + QUP_OPERATIONAL); writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS); writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS); writel_relaxed(opflags, controller->base + QUP_OPERATIONAL); if (!xfer) { dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n", qup_err, spi_err, opflags); return IRQ_HANDLED; } if (qup_err) { if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN) dev_warn(controller->dev, "OUTPUT_OVER_RUN\n"); if (qup_err & QUP_ERROR_INPUT_UNDER_RUN) dev_warn(controller->dev, "INPUT_UNDER_RUN\n"); if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN) dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n"); if (qup_err & QUP_ERROR_INPUT_OVER_RUN) dev_warn(controller->dev, "INPUT_OVER_RUN\n"); error = -EIO; } if (spi_err) { if (spi_err & SPI_ERROR_CLK_OVER_RUN) dev_warn(controller->dev, "CLK_OVER_RUN\n"); if (spi_err & SPI_ERROR_CLK_UNDER_RUN) dev_warn(controller->dev, "CLK_UNDER_RUN\n"); error = -EIO; } if (!controller->use_dma) { if (opflags & QUP_OP_IN_SERVICE_FLAG) spi_qup_fifo_read(controller, xfer); if (opflags & QUP_OP_OUT_SERVICE_FLAG) spi_qup_fifo_write(controller, xfer); } spin_lock_irqsave(&controller->lock, flags); controller->error = error; controller->xfer = xfer; spin_unlock_irqrestore(&controller->lock, flags); if (controller->rx_bytes == xfer->len || error) complete(&controller->done); return IRQ_HANDLED; }