static void davinci_spi_dma_tx_callback(int lch, u16 ch_status, void *data) { struct spi_device *spi = (struct spi_device *)data; struct davinci_spi *davinci_spi; struct davinci_spi_dma *davinci_spi_dma; davinci_spi = spi_master_get_devdata(spi->master); davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); if (ch_status == DMA_COMPLETE) davinci_stop_dma(davinci_spi_dma->dma_tx_channel); else davinci_clean_channel(davinci_spi_dma->dma_tx_channel); complete(&davinci_spi_dma->dma_tx_completion); /* We must disable the DMA TX request */ davinci_spi_set_dma_req(spi, 0); }
static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data) { struct spi_device *spi = (struct spi_device *)data; struct davinci_spi *davinci_spi; struct davinci_spi_dma *davinci_spi_dma; struct davinci_spi_platform_data *pdata; davinci_spi = spi_master_get_devdata(spi->master); davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); pdata = davinci_spi->pdata; if (ch_status == DMA_COMPLETE) edma_stop(davinci_spi_dma->dma_rx_channel); else edma_clean_channel(davinci_spi_dma->dma_rx_channel); complete(&davinci_spi_dma->dma_rx_completion); /* We must disable the DMA RX request */ davinci_spi_set_dma_req(spi, 0); }
static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) { struct davinci_spi *davinci_spi; int int_status = 0; int count; u8 conv = 1; u8 tmp; u32 data1_reg_val; struct davinci_spi_dma *davinci_spi_dma; int word_len, data_type, ret; unsigned long tx_reg, rx_reg; struct davinci_spi_config_t *spi_cfg; struct davinci_spi_platform_data *pdata; davinci_spi = spi_master_get_devdata(spi->master); pdata = davinci_spi->pdata; BUG_ON(davinci_spi->dma_channels == NULL); davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1; rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF; /* used for macro defs */ davinci_spi->tx = t->tx_buf; davinci_spi->rx = t->rx_buf; /* convert len to words bbased on bits_per_word */ conv = davinci_spi->slave[spi->chip_select].bytes_per_word; davinci_spi->count = t->len / conv; INIT_COMPLETION(davinci_spi->done); init_completion(&davinci_spi_dma->dma_rx_completion); init_completion(&davinci_spi_dma->dma_tx_completion); word_len = conv * 8; if (word_len <= 8) data_type = DAVINCI_DMA_DATA_TYPE_S8; else if (word_len <= 16) data_type = DAVINCI_DMA_DATA_TYPE_S16; else if (word_len <= 32) data_type = DAVINCI_DMA_DATA_TYPE_S32; else return -1; spi_cfg = (struct davinci_spi_config_t *)spi->controller_data; ret = davinci_spi_bufs_prep(spi, davinci_spi, spi_cfg); if (ret) return ret; /* Put delay val if required */ iowrite32(0, davinci_spi->base + SPIDELAY); count = davinci_spi->count; /* the number of elements */ data1_reg_val = spi_cfg->cs_hold << SPI_SPIDAT1_CSHOLD_SHIFT; /* CD default = 0xFF */ tmp = ~(0x1 << spi->chip_select); if ((pdata->chip_sel != NULL) && (pdata->chip_sel[spi->chip_select] != DAVINCI_SPI_INTERN_CS)) gpio_set_value(pdata->chip_sel[spi->chip_select], 0); else clear_bits(davinci_spi->base + SPIDEF, ~tmp); data1_reg_val |= tmp << SPI_SPIDAT1_CSNR_SHIFT; /* disable all interrupts for dma transfers */ clear_bits(davinci_spi->base + SPIINT, SPI_SPIINT_MASKALL); /* Disable SPI to write configuration bits in SPIDAT */ clear_bits(davinci_spi->base + SPIGCR1, SPI_SPIGCR1_SPIENA_MASK); iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); /* Enable SPI */ set_bits(davinci_spi->base + SPIGCR1, SPI_SPIGCR1_SPIENA_MASK); while (1) if (ioread32(davinci_spi->base + SPIBUF) & SPI_SPIBUF_RXEMPTY_MASK) break; if (t->tx_buf != NULL) { t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count, DMA_TO_DEVICE); if (dma_mapping_error(t->tx_dma)) { pr_err("%s(): Couldn't DMA map a %d bytes TX buffer\n", __func__, count); return -1; } davinci_set_dma_transfer_params(davinci_spi_dma->dma_tx_channel, data_type, count, 1, 0, ASYNC); davinci_set_dma_dest_params(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT); davinci_set_dma_src_params(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT); davinci_set_dma_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0); davinci_set_dma_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0); } else { /* We need TX clocking for RX transaction */ t->tx_dma = dma_map_single(&spi->dev, (void *)davinci_spi->tmp_buf, count + 1, DMA_TO_DEVICE); if (dma_mapping_error(t->tx_dma)) { pr_err("%s(): Couldn't DMA map a %d bytes TX " "tmp buffer\n", __func__, count); return -1; } davinci_set_dma_transfer_params(davinci_spi_dma->dma_tx_channel, data_type, count + 1, 1, 0, ASYNC); davinci_set_dma_dest_params(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT); davinci_set_dma_src_params(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT); davinci_set_dma_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0); davinci_set_dma_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0); } if (t->rx_buf != NULL) { /* initiate transaction */ iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count, DMA_FROM_DEVICE); if (dma_mapping_error(t->rx_dma)) { pr_err("%s(): Couldn't DMA map a %d bytes RX buffer\n", __func__, count); if (t->tx_buf != NULL) dma_unmap_single(NULL, t->tx_dma, count, DMA_TO_DEVICE); return -1; } davinci_set_dma_transfer_params(davinci_spi_dma->dma_rx_channel, data_type, count, 1, 0, ASYNC); davinci_set_dma_src_params(davinci_spi_dma->dma_rx_channel, rx_reg, INCR, W8BIT); davinci_set_dma_dest_params(davinci_spi_dma->dma_rx_channel, t->rx_dma, INCR, W8BIT); davinci_set_dma_src_index(davinci_spi_dma->dma_rx_channel, 0, 0); davinci_set_dma_dest_index(davinci_spi_dma->dma_rx_channel, data_type, 0); } if ((t->tx_buf != NULL) || (t->rx_buf != NULL)) davinci_start_dma(davinci_spi_dma->dma_tx_channel); if (t->rx_buf != NULL) davinci_start_dma(davinci_spi_dma->dma_rx_channel); if ((t->rx_buf != NULL) || (t->tx_buf != NULL)) davinci_spi_set_dma_req(spi, 1); if (t->tx_buf != NULL) wait_for_completion_interruptible( &davinci_spi_dma->dma_tx_completion); if (t->rx_buf != NULL) wait_for_completion_interruptible( &davinci_spi_dma->dma_rx_completion); if (t->tx_buf != NULL) dma_unmap_single(NULL, t->tx_dma, count, DMA_TO_DEVICE); else dma_unmap_single(NULL, t->tx_dma, count + 1, DMA_TO_DEVICE); if (t->rx_buf != NULL) dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE); /* * Check for bit error, desync error,parity error,timeout error and * receive overflow errors */ int_status = ioread32(davinci_spi->base + SPIFLG); ret = davinci_spi_check_error(davinci_spi, int_status); if (ret != 0) return ret; /* SPI Framework maintains the count only in bytes so convert back */ davinci_spi->count *= conv; return t->len; }
static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) { struct davinci_spi *davinci_spi; int int_status = 0; int count, temp_count; u8 conv = 1; u8 tmp; u32 data1_reg_val; struct davinci_spi_dma *davinci_spi_dma; int word_len, data_type, ret; unsigned long tx_reg, rx_reg; struct davinci_spi_platform_data *pdata; struct device *sdev; davinci_spi = spi_master_get_devdata(spi->master); pdata = davinci_spi->pdata; sdev = davinci_spi->bitbang.master->dev.parent; davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1; rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF; davinci_spi->tx = t->tx_buf; davinci_spi->rx = t->rx_buf; /* convert len to words based on bits_per_word */ conv = davinci_spi->slave[spi->chip_select].bytes_per_word; davinci_spi->count = t->len / conv; INIT_COMPLETION(davinci_spi->done); init_completion(&davinci_spi_dma->dma_rx_completion); init_completion(&davinci_spi_dma->dma_tx_completion); word_len = conv * 8; if (word_len <= 8) data_type = DAVINCI_DMA_DATA_TYPE_S8; else if (word_len <= 16) data_type = DAVINCI_DMA_DATA_TYPE_S16; else if (word_len <= 32) data_type = DAVINCI_DMA_DATA_TYPE_S32; else return -EINVAL; ret = davinci_spi_bufs_prep(spi, davinci_spi); if (ret) return ret; /* Put delay val if required */ iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) | (pdata->t2cdelay << SPI_T2CDELAY_SHIFT), davinci_spi->base + SPIDELAY); count = davinci_spi->count; /* the number of elements */ data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT; /* CS default = 0xFF */ tmp = ~(0x1 << spi->chip_select); clear_io_bits(davinci_spi->base + SPIDEF, ~tmp); data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT; /* disable all interrupts for dma transfers */ clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); /* Disable SPI to write configuration bits in SPIDAT */ clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); /* Enable SPI */ set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); while ((ioread32(davinci_spi->base + SPIBUF) & SPIBUF_RXEMPTY_MASK) == 0) cpu_relax(); if (t->tx_buf) { t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count, DMA_TO_DEVICE); if (dma_mapping_error(&spi->dev, t->tx_dma)) { dev_dbg(sdev, "Unable to DMA map a %d bytes" " TX buffer\n", count); return -ENOMEM; } temp_count = count; } else { /* We need TX clocking for RX transaction */ t->tx_dma = dma_map_single(&spi->dev, (void *)davinci_spi->tmp_buf, count + 1, DMA_TO_DEVICE); if (dma_mapping_error(&spi->dev, t->tx_dma)) { dev_dbg(sdev, "Unable to DMA map a %d bytes" " TX tmp buffer\n", count); return -ENOMEM; } temp_count = count + 1; } edma_set_transfer_params(davinci_spi_dma->dma_tx_channel, data_type, temp_count, 1, 0, ASYNC); edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT); edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT); edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0); edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0); if (t->rx_buf) { /* initiate transaction */ iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count, DMA_FROM_DEVICE); if (dma_mapping_error(&spi->dev, t->rx_dma)) { dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", count); if (t->tx_buf != NULL) dma_unmap_single(NULL, t->tx_dma, count, DMA_TO_DEVICE); return -ENOMEM; } edma_set_transfer_params(davinci_spi_dma->dma_rx_channel, data_type, count, 1, 0, ASYNC); edma_set_src(davinci_spi_dma->dma_rx_channel, rx_reg, INCR, W8BIT); edma_set_dest(davinci_spi_dma->dma_rx_channel, t->rx_dma, INCR, W8BIT); edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0); edma_set_dest_index(davinci_spi_dma->dma_rx_channel, data_type, 0); } if ((t->tx_buf) || (t->rx_buf)) edma_start(davinci_spi_dma->dma_tx_channel); if (t->rx_buf) edma_start(davinci_spi_dma->dma_rx_channel); if ((t->rx_buf) || (t->tx_buf)) davinci_spi_set_dma_req(spi, 1); if (t->tx_buf) wait_for_completion_interruptible( &davinci_spi_dma->dma_tx_completion); if (t->rx_buf) wait_for_completion_interruptible( &davinci_spi_dma->dma_rx_completion); dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE); if (t->rx_buf) dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE); /* * Check for bit error, desync error,parity error,timeout error and * receive overflow errors */ int_status = ioread32(davinci_spi->base + SPIFLG); ret = davinci_spi_check_error(davinci_spi, int_status); if (ret != 0) return ret; /* SPI Framework maintains the count only in bytes so convert back */ davinci_spi->count *= conv; return t->len; }