irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
{
    u32 irq_status;
    void __iomem *reg = drv_data->ioaddr;

    irq_status = read_SSSR(reg) & drv_data->mask_sr;
    if (irq_status & SSSR_ROR) {
        pxa2xx_spi_dma_error_stop(drv_data,
                                  "dma_transfer: fifo overrun");
        return IRQ_HANDLED;
    }

    /* Check for false positive timeout */
    if ((irq_status & SSSR_TINT)
            && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
        write_SSSR(SSSR_TINT, reg);
        return IRQ_HANDLED;
    }

    if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {

        /* Clear and disable timeout interrupt, do the rest in
         * dma_transfer_complete */
        if (!pxa25x_ssp_comp(drv_data))
            write_SSTO(0, reg);

        /* finish this transfer, start the next */
        pxa2xx_spi_dma_transfer_complete(drv_data);

        return IRQ_HANDLED;
    }

    /* Opps problem detected */
    return IRQ_NONE;
}
Esempio n. 2
0
static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
					     bool error)
{
	struct spi_message *msg = drv_data->master->cur_msg;

	/*
	 * It is possible that one CPU is handling ROR interrupt and other
	 * just gets DMA completion. Calling pump_transfers() twice for the
	 * same transfer leads to problems thus we prevent concurrent calls
	 * by using ->dma_running.
	 */
	if (atomic_dec_and_test(&drv_data->dma_running)) {
		/*
		 * If the other CPU is still handling the ROR interrupt we
		 * might not know about the error yet. So we re-check the
		 * ROR bit here before we clear the status register.
		 */
		if (!error) {
			u32 status = pxa2xx_spi_read(drv_data, SSSR)
				     & drv_data->mask_sr;
			error = status & SSSR_ROR;
		}

		/* Clear status & disable interrupts */
		pxa2xx_spi_write(drv_data, SSCR1,
				 pxa2xx_spi_read(drv_data, SSCR1)
				 & ~drv_data->dma_cr1);
		write_SSSR_CS(drv_data, drv_data->clear_sr);
		if (!pxa25x_ssp_comp(drv_data))
			pxa2xx_spi_write(drv_data, SSTO, 0);

		if (!error) {
			msg->actual_length += drv_data->len;
			msg->state = pxa2xx_spi_next_transfer(drv_data);
		} else {
			/* In case we got an error we disable the SSP now */
			pxa2xx_spi_write(drv_data, SSCR0,
					 pxa2xx_spi_read(drv_data, SSCR0)
					 & ~SSCR0_SSE);

			msg->state = ERROR_STATE;
		}

		tasklet_schedule(&drv_data->pump_transfers);
	}
}
static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
                                      const char *msg)
{
    void __iomem *reg = drv_data->ioaddr;

    /* Stop and reset */
    DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
    DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
    write_SSSR_CS(drv_data, drv_data->clear_sr);
    write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
    if (!pxa25x_ssp_comp(drv_data))
        write_SSTO(0, reg);
    pxa2xx_spi_flush(drv_data);
    write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);

    pxa2xx_spi_unmap_dma_buffers(drv_data);

    dev_err(&drv_data->pdev->dev, "%s\n", msg);

    drv_data->cur_msg->state = ERROR_STATE;
    tasklet_schedule(&drv_data->pump_transfers);
}
Esempio n. 4
0
static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
				      const char *msg)
{
	/* Stop and reset */
	DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
	DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
	write_SSSR_CS(drv_data, drv_data->clear_sr);
	pxa2xx_spi_write(drv_data, SSCR1,
			 pxa2xx_spi_read(drv_data, SSCR1)
			 & ~drv_data->dma_cr1);
	if (!pxa25x_ssp_comp(drv_data))
		pxa2xx_spi_write(drv_data, SSTO, 0);
	pxa2xx_spi_flush(drv_data);
	pxa2xx_spi_write(drv_data, SSCR0,
			 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);

	pxa2xx_spi_unmap_dma_buffers(drv_data);

	dev_err(&drv_data->pdev->dev, "%s\n", msg);

	drv_data->cur_msg->state = ERROR_STATE;
	tasklet_schedule(&drv_data->pump_transfers);
}