/** * davinci_spi_irq - probe function for SPI Master Controller * @irq: IRQ number for this SPI Master * @context_data: structure for SPI Master controller davinci_spi * @ptregs: * * ISR will determine that interrupt arrives either for READ or WRITE command. * According to command it will do the appropriate action. It will check * transfer length and if it is not zero then dispatch transfer command again. * If transfer length is zero then it will indicate the COMPLETION so that * davinci_spi_bufs function can go ahead. */ static irqreturn_t davinci_spi_irq(s32 irq, void *context_data, struct pt_regs *ptregs) { struct davinci_spi *davinci_spi = context_data; u32 int_status, rx_data = 0; irqreturn_t ret = IRQ_NONE; int_status = ioread32(davinci_spi->base + SPIFLG); while ((int_status & SPI_SPIFLG_MASK) != 0) { ret = IRQ_HANDLED; if (likely(int_status & SPI_SPIFLG_RX_INTR_MASK)) { rx_data = ioread32(davinci_spi->base + SPIBUF); davinci_spi->get_rx(rx_data, davinci_spi); /* Disable Receive Interrupt */ iowrite32(~SPI_SPIINT_RX_INTR, davinci_spi->base + SPIINT); } else /* Ignore errors if have good intr */ (void)davinci_spi_check_error(davinci_spi, int_status); int_status = ioread32(davinci_spi->base + SPIFLG); } return ret; }
/** * davinci_spi_irq - IRQ handler for DaVinci SPI * @irq: IRQ number for this SPI Master * @context_data: structure for SPI Master controller davinci_spi */ static irqreturn_t davinci_spi_irq(s32 irq, void *context_data) { struct davinci_spi *davinci_spi = context_data; u32 int_status, rx_data = 0; irqreturn_t ret = IRQ_NONE; int_status = ioread32(davinci_spi->base + SPIFLG); while ((int_status & SPIFLG_RX_INTR_MASK)) { if (likely(int_status & SPIFLG_RX_INTR_MASK)) { ret = IRQ_HANDLED; rx_data = ioread32(davinci_spi->base + SPIBUF); davinci_spi->get_rx(rx_data, davinci_spi); /* Disable Receive Interrupt */ iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR), davinci_spi->base + SPIINT); } else (void)davinci_spi_check_error(davinci_spi, int_status); int_status = ioread32(davinci_spi->base + SPIFLG); } return ret; }
static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) { struct davinci_spi *davinci_spi; int int_status = 0; int count; u8 conv = 1; u8 tmp; u32 data1_reg_val; struct davinci_spi_dma *davinci_spi_dma; int word_len, data_type, ret; unsigned long tx_reg, rx_reg; struct davinci_spi_config_t *spi_cfg; struct davinci_spi_platform_data *pdata; davinci_spi = spi_master_get_devdata(spi->master); pdata = davinci_spi->pdata; BUG_ON(davinci_spi->dma_channels == NULL); davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1; rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF; /* used for macro defs */ davinci_spi->tx = t->tx_buf; davinci_spi->rx = t->rx_buf; /* convert len to words bbased on bits_per_word */ conv = davinci_spi->slave[spi->chip_select].bytes_per_word; davinci_spi->count = t->len / conv; INIT_COMPLETION(davinci_spi->done); init_completion(&davinci_spi_dma->dma_rx_completion); init_completion(&davinci_spi_dma->dma_tx_completion); word_len = conv * 8; if (word_len <= 8) data_type = DAVINCI_DMA_DATA_TYPE_S8; else if (word_len <= 16) data_type = DAVINCI_DMA_DATA_TYPE_S16; else if (word_len <= 32) data_type = DAVINCI_DMA_DATA_TYPE_S32; else return -1; spi_cfg = (struct davinci_spi_config_t *)spi->controller_data; ret = davinci_spi_bufs_prep(spi, davinci_spi, spi_cfg); if (ret) return ret; /* Put delay val if required */ iowrite32(0, davinci_spi->base + SPIDELAY); count = davinci_spi->count; /* the number of elements */ data1_reg_val = spi_cfg->cs_hold << SPI_SPIDAT1_CSHOLD_SHIFT; /* CD default = 0xFF */ tmp = ~(0x1 << spi->chip_select); if ((pdata->chip_sel != NULL) && (pdata->chip_sel[spi->chip_select] != DAVINCI_SPI_INTERN_CS)) gpio_set_value(pdata->chip_sel[spi->chip_select], 0); else clear_bits(davinci_spi->base + SPIDEF, ~tmp); data1_reg_val |= tmp << SPI_SPIDAT1_CSNR_SHIFT; /* disable all interrupts for dma transfers */ clear_bits(davinci_spi->base + SPIINT, SPI_SPIINT_MASKALL); /* Disable SPI to write configuration bits in SPIDAT */ clear_bits(davinci_spi->base + SPIGCR1, SPI_SPIGCR1_SPIENA_MASK); iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); /* Enable SPI */ set_bits(davinci_spi->base + SPIGCR1, SPI_SPIGCR1_SPIENA_MASK); while (1) if (ioread32(davinci_spi->base + SPIBUF) & SPI_SPIBUF_RXEMPTY_MASK) break; if (t->tx_buf != NULL) { t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count, DMA_TO_DEVICE); if (dma_mapping_error(t->tx_dma)) { pr_err("%s(): Couldn't DMA map a %d bytes TX buffer\n", __func__, count); return -1; } davinci_set_dma_transfer_params(davinci_spi_dma->dma_tx_channel, data_type, count, 1, 0, ASYNC); davinci_set_dma_dest_params(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT); davinci_set_dma_src_params(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT); davinci_set_dma_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0); davinci_set_dma_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0); } else { /* We need TX clocking for RX transaction */ t->tx_dma = dma_map_single(&spi->dev, (void *)davinci_spi->tmp_buf, count + 1, DMA_TO_DEVICE); if (dma_mapping_error(t->tx_dma)) { pr_err("%s(): Couldn't DMA map a %d bytes TX " "tmp buffer\n", __func__, count); return -1; } davinci_set_dma_transfer_params(davinci_spi_dma->dma_tx_channel, data_type, count + 1, 1, 0, ASYNC); davinci_set_dma_dest_params(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT); davinci_set_dma_src_params(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT); davinci_set_dma_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0); davinci_set_dma_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0); } if (t->rx_buf != NULL) { /* initiate transaction */ iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count, DMA_FROM_DEVICE); if (dma_mapping_error(t->rx_dma)) { pr_err("%s(): Couldn't DMA map a %d bytes RX buffer\n", __func__, count); if (t->tx_buf != NULL) dma_unmap_single(NULL, t->tx_dma, count, DMA_TO_DEVICE); return -1; } davinci_set_dma_transfer_params(davinci_spi_dma->dma_rx_channel, data_type, count, 1, 0, ASYNC); davinci_set_dma_src_params(davinci_spi_dma->dma_rx_channel, rx_reg, INCR, W8BIT); davinci_set_dma_dest_params(davinci_spi_dma->dma_rx_channel, t->rx_dma, INCR, W8BIT); davinci_set_dma_src_index(davinci_spi_dma->dma_rx_channel, 0, 0); davinci_set_dma_dest_index(davinci_spi_dma->dma_rx_channel, data_type, 0); } if ((t->tx_buf != NULL) || (t->rx_buf != NULL)) davinci_start_dma(davinci_spi_dma->dma_tx_channel); if (t->rx_buf != NULL) davinci_start_dma(davinci_spi_dma->dma_rx_channel); if ((t->rx_buf != NULL) || (t->tx_buf != NULL)) davinci_spi_set_dma_req(spi, 1); if (t->tx_buf != NULL) wait_for_completion_interruptible( &davinci_spi_dma->dma_tx_completion); if (t->rx_buf != NULL) wait_for_completion_interruptible( &davinci_spi_dma->dma_rx_completion); if (t->tx_buf != NULL) dma_unmap_single(NULL, t->tx_dma, count, DMA_TO_DEVICE); else dma_unmap_single(NULL, t->tx_dma, count + 1, DMA_TO_DEVICE); if (t->rx_buf != NULL) dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE); /* * Check for bit error, desync error,parity error,timeout error and * receive overflow errors */ int_status = ioread32(davinci_spi->base + SPIFLG); ret = davinci_spi_check_error(davinci_spi, int_status); if (ret != 0) return ret; /* SPI Framework maintains the count only in bytes so convert back */ davinci_spi->count *= conv; return t->len; }
/** * davinci_spi_bufs - functions which will handle transfer data * @spi: spi device on which data transfer to be done * @t: spi transfer in which transfer info is filled * * This function will put data to be transferred into data register * of SPI controller and then wait untill the completion will be marked * by the IRQ Handler. */ static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t) { struct davinci_spi *davinci_spi; int int_status, count, ret; u8 conv, tmp; u32 tx_data, data1_reg_val; struct davinci_spi_config_t *spi_cfg; u32 buf_val, flg_val; struct davinci_spi_platform_data *pdata; davinci_spi = spi_master_get_devdata(spi->master); pdata = davinci_spi->pdata; davinci_spi->tx = t->tx_buf; davinci_spi->rx = t->rx_buf; /* convert len to words bbased on bits_per_word */ conv = davinci_spi->slave[spi->chip_select].bytes_per_word; davinci_spi->count = t->len / conv; INIT_COMPLETION(davinci_spi->done); spi_cfg = (struct davinci_spi_config_t *)spi->controller_data; ret = davinci_spi_bufs_prep(spi, davinci_spi, spi_cfg); if (ret) return ret; /* Enable SPI */ set_bits(davinci_spi->base + SPIGCR1, SPI_SPIGCR1_SPIENA_MASK); /* Put delay val if required */ iowrite32(0 | (8 << 24) | (8 << 16), davinci_spi->base + SPIDELAY); count = davinci_spi->count; data1_reg_val = spi_cfg->cs_hold << SPI_SPIDAT1_CSHOLD_SHIFT; tmp = ~(0x1 << spi->chip_select); /* CD default = 0xFF */ /* check for GPIO */ if ((pdata->chip_sel != NULL) && (pdata->chip_sel[spi->chip_select] != DAVINCI_SPI_INTERN_CS)) gpio_set_value(pdata->chip_sel[spi->chip_select], 0); else clear_bits(davinci_spi->base + SPIDEF, ~tmp); data1_reg_val |= tmp << SPI_SPIDAT1_CSNR_SHIFT; while (1) if (ioread32(davinci_spi->base + SPIBUF) & SPI_SPIBUF_RXEMPTY_MASK) break; /* Determine the command to execute READ or WRITE */ if (t->tx_buf) { clear_bits(davinci_spi->base + SPIINT, SPI_SPIINT_MASKALL); while (1) { tx_data = davinci_spi->get_tx(davinci_spi); data1_reg_val &= ~(0xFFFF); data1_reg_val |= (0xFFFF & tx_data); buf_val = ioread32(davinci_spi->base + SPIBUF); if ((buf_val & SPI_SPIBUF_TXFULL_MASK) == 0) { iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); count--; } while (ioread32(davinci_spi->base + SPIBUF) & SPI_SPIBUF_RXEMPTY_MASK) udelay(1); /* getting the returned byte */ if (t->rx_buf) { buf_val = ioread32(davinci_spi->base + SPIBUF); davinci_spi->get_rx(buf_val, davinci_spi); } if (count <= 0) break; } } else { if (spi_cfg->poll_mode) { /* In Polling mode receive */ while (1) { /* keeps the serial clock going */ if ((ioread32(davinci_spi->base + SPIBUF) & SPI_SPIBUF_TXFULL_MASK) == 0) iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); while (ioread32(davinci_spi->base + SPIBUF) & SPI_SPIBUF_RXEMPTY_MASK) { } flg_val = ioread32(davinci_spi->base + SPIFLG); buf_val = ioread32(davinci_spi->base + SPIBUF); davinci_spi->get_rx(buf_val, davinci_spi); count--; if (count <= 0) break; } } else { /* Receive in Interrupt mode */ int i; for (i = 0; i < davinci_spi->count; i++) { set_bits(davinci_spi->base + SPIINT, SPI_SPIINT_BITERR_INTR | SPI_SPIINT_OVRRUN_INTR | SPI_SPIINT_RX_INTR); iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); while (ioread32(davinci_spi->base + SPIINT) & SPI_SPIINT_RX_INTR) { } } iowrite32((data1_reg_val & 0x0ffcffff), davinci_spi->base + SPIDAT1); } } /* * Check for bit error, desync error,parity error,timeout error and * receive overflow errors */ int_status = ioread32(davinci_spi->base + SPIFLG); ret = davinci_spi_check_error(davinci_spi, int_status); if (ret != 0) return ret; /* SPI Framework maintains the count only in bytes so convert back */ davinci_spi->count *= conv; return t->len; }
static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) { struct davinci_spi *davinci_spi; int int_status = 0; int count, temp_count; u8 conv = 1; u8 tmp; u32 data1_reg_val; struct davinci_spi_dma *davinci_spi_dma; int word_len, data_type, ret; unsigned long tx_reg, rx_reg; struct davinci_spi_platform_data *pdata; struct device *sdev; davinci_spi = spi_master_get_devdata(spi->master); pdata = davinci_spi->pdata; sdev = davinci_spi->bitbang.master->dev.parent; davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1; rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF; davinci_spi->tx = t->tx_buf; davinci_spi->rx = t->rx_buf; /* convert len to words based on bits_per_word */ conv = davinci_spi->slave[spi->chip_select].bytes_per_word; davinci_spi->count = t->len / conv; INIT_COMPLETION(davinci_spi->done); init_completion(&davinci_spi_dma->dma_rx_completion); init_completion(&davinci_spi_dma->dma_tx_completion); word_len = conv * 8; if (word_len <= 8) data_type = DAVINCI_DMA_DATA_TYPE_S8; else if (word_len <= 16) data_type = DAVINCI_DMA_DATA_TYPE_S16; else if (word_len <= 32) data_type = DAVINCI_DMA_DATA_TYPE_S32; else return -EINVAL; ret = davinci_spi_bufs_prep(spi, davinci_spi); if (ret) return ret; /* Put delay val if required */ iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) | (pdata->t2cdelay << SPI_T2CDELAY_SHIFT), davinci_spi->base + SPIDELAY); count = davinci_spi->count; /* the number of elements */ data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT; /* CS default = 0xFF */ tmp = ~(0x1 << spi->chip_select); clear_io_bits(davinci_spi->base + SPIDEF, ~tmp); data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT; /* disable all interrupts for dma transfers */ clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); /* Disable SPI to write configuration bits in SPIDAT */ clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); /* Enable SPI */ set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); while ((ioread32(davinci_spi->base + SPIBUF) & SPIBUF_RXEMPTY_MASK) == 0) cpu_relax(); if (t->tx_buf) { t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count, DMA_TO_DEVICE); if (dma_mapping_error(&spi->dev, t->tx_dma)) { dev_dbg(sdev, "Unable to DMA map a %d bytes" " TX buffer\n", count); return -ENOMEM; } temp_count = count; } else { /* We need TX clocking for RX transaction */ t->tx_dma = dma_map_single(&spi->dev, (void *)davinci_spi->tmp_buf, count + 1, DMA_TO_DEVICE); if (dma_mapping_error(&spi->dev, t->tx_dma)) { dev_dbg(sdev, "Unable to DMA map a %d bytes" " TX tmp buffer\n", count); return -ENOMEM; } temp_count = count + 1; } edma_set_transfer_params(davinci_spi_dma->dma_tx_channel, data_type, temp_count, 1, 0, ASYNC); edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT); edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT); edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0); edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0); if (t->rx_buf) { /* initiate transaction */ iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count, DMA_FROM_DEVICE); if (dma_mapping_error(&spi->dev, t->rx_dma)) { dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", count); if (t->tx_buf != NULL) dma_unmap_single(NULL, t->tx_dma, count, DMA_TO_DEVICE); return -ENOMEM; } edma_set_transfer_params(davinci_spi_dma->dma_rx_channel, data_type, count, 1, 0, ASYNC); edma_set_src(davinci_spi_dma->dma_rx_channel, rx_reg, INCR, W8BIT); edma_set_dest(davinci_spi_dma->dma_rx_channel, t->rx_dma, INCR, W8BIT); edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0); edma_set_dest_index(davinci_spi_dma->dma_rx_channel, data_type, 0); } if ((t->tx_buf) || (t->rx_buf)) edma_start(davinci_spi_dma->dma_tx_channel); if (t->rx_buf) edma_start(davinci_spi_dma->dma_rx_channel); if ((t->rx_buf) || (t->tx_buf)) davinci_spi_set_dma_req(spi, 1); if (t->tx_buf) wait_for_completion_interruptible( &davinci_spi_dma->dma_tx_completion); if (t->rx_buf) wait_for_completion_interruptible( &davinci_spi_dma->dma_rx_completion); dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE); if (t->rx_buf) dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE); /* * Check for bit error, desync error,parity error,timeout error and * receive overflow errors */ int_status = ioread32(davinci_spi->base + SPIFLG); ret = davinci_spi_check_error(davinci_spi, int_status); if (ret != 0) return ret; /* SPI Framework maintains the count only in bytes so convert back */ davinci_spi->count *= conv; return t->len; }
/* * davinci_spi_bufs - functions which will handle transfer data * @spi: spi device on which data transfer to be done * @t: spi transfer in which transfer info is filled * * This function will put data to be transferred into data register * of SPI controller and then wait until the completion will be marked * by the IRQ Handler. */ static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t) { struct davinci_spi *davinci_spi; int int_status, count, ret; u8 conv, tmp; u32 tx_data, data1_reg_val; u32 buf_val, flg_val; struct davinci_spi_platform_data *pdata; davinci_spi = spi_master_get_devdata(spi->master); pdata = davinci_spi->pdata; davinci_spi->tx = t->tx_buf; davinci_spi->rx = t->rx_buf; /* convert len to words bbased on bits_per_word */ conv = davinci_spi->slave[spi->chip_select].bytes_per_word; davinci_spi->count = t->len / conv; INIT_COMPLETION(davinci_spi->done); ret = davinci_spi_bufs_prep(spi, davinci_spi); if (ret) return ret; /* Enable SPI */ set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) | (pdata->t2cdelay << SPI_T2CDELAY_SHIFT), davinci_spi->base + SPIDELAY); count = davinci_spi->count; data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT; if (!spi->controller_data) tmp = 0x1 << (1 - spi->chip_select); else tmp = CS_DEFAULT; data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT; data1_reg_val |= spi->chip_select << SPIDAT1_DFSEL_SHIFT; while ((ioread32(davinci_spi->base + SPIBUF) & SPIBUF_RXEMPTY_MASK) == 0) cpu_relax(); /* Determine the command to execute READ or WRITE */ if (t->tx_buf) { clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); while (1) { tx_data = davinci_spi->get_tx(davinci_spi); data1_reg_val &= ~(0xFFFF); data1_reg_val |= (0xFFFF & tx_data); buf_val = ioread32(davinci_spi->base + SPIBUF); if ((buf_val & SPIBUF_TXFULL_MASK) == 0) { iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); count--; } while (ioread32(davinci_spi->base + SPIBUF) & SPIBUF_RXEMPTY_MASK) cpu_relax(); /* getting the returned byte */ if (t->rx_buf) { buf_val = ioread32(davinci_spi->base + SPIBUF); davinci_spi->get_rx(buf_val, davinci_spi); } if (count <= 0) break; } } else { if (pdata->poll_mode) { while (1) { /* keeps the serial clock going */ if ((ioread32(davinci_spi->base + SPIBUF) & SPIBUF_TXFULL_MASK) == 0) iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); while (ioread32(davinci_spi->base + SPIBUF) & SPIBUF_RXEMPTY_MASK) cpu_relax(); flg_val = ioread32(davinci_spi->base + SPIFLG); buf_val = ioread32(davinci_spi->base + SPIBUF); davinci_spi->get_rx(buf_val, davinci_spi); count--; if (count <= 0) break; } } else { /* Receive in Interrupt mode */ int i; for (i = 0; i < davinci_spi->count; i++) { set_io_bits(davinci_spi->base + SPIINT, SPIINT_BITERR_INTR | SPIINT_OVRRUN_INTR | SPIINT_RX_INTR); iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); while (ioread32(davinci_spi->base + SPIINT) & SPIINT_RX_INTR) cpu_relax(); } iowrite32((data1_reg_val & 0x0ffcffff), davinci_spi->base + SPIDAT1); } } /* * Check for bit error, desync error,parity error,timeout error and * receive overflow errors */ int_status = ioread32(davinci_spi->base + SPIFLG); ret = davinci_spi_check_error(davinci_spi, int_status); if (ret != 0) return ret; /* SPI Framework maintains the count only in bytes so convert back */ davinci_spi->count *= conv; return t->len; }