Ejemplo n.º 1
0
static int davinci_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
	struct davinci_runtime_data *prtd = substream->runtime->private_data;
	int ret = 0;

	spin_lock(&prtd->lock);

	switch (cmd) {
	case SNDRV_PCM_TRIGGER_START:
		edma_start(prtd->asp_channel);
		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
		    prtd->ram_channel >= 0) {
			/* copy 1st iram buffer */
			edma_start(prtd->ram_channel);
		}
		break;
	case SNDRV_PCM_TRIGGER_RESUME:
	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
		edma_resume(prtd->asp_channel);
		break;
	case SNDRV_PCM_TRIGGER_STOP:
	case SNDRV_PCM_TRIGGER_SUSPEND:
	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
		edma_pause(prtd->asp_channel);
		break;
	default:
		ret = -EINVAL;
		break;
	}

	spin_unlock(&prtd->lock);

	return ret;
}
Ejemplo n.º 2
0
static int davinci_pcm_prepare(struct snd_pcm_substream *substream)
{
	struct davinci_runtime_data *prtd = substream->runtime->private_data;

	if (prtd->ram_channel >= 0) {
		int ret = ping_pong_dma_setup(substream);
		if (ret < 0)
			return ret;

		edma_write_slot(prtd->ram_channel, &prtd->ram_params);
		edma_write_slot(prtd->asp_channel, &prtd->asp_params);

		print_buf_info(prtd->ram_channel, "ram_channel");
		print_buf_info(prtd->ram_link, "ram_link");
		print_buf_info(prtd->ram_link2, "ram_link2");
		print_buf_info(prtd->asp_channel, "asp_channel");
		print_buf_info(prtd->asp_link[0], "asp_link[0]");
		print_buf_info(prtd->asp_link[1], "asp_link[1]");

		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
			/* copy 1st iram buffer */
			edma_start(prtd->ram_channel);
		}
		edma_start(prtd->asp_channel);
		return 0;
	}
	prtd->period = 0;
	davinci_pcm_enqueue_dma(substream);

	/* Copy self-linked parameter RAM entry into master channel */
	edma_read_slot(prtd->asp_link[0], &prtd->asp_params);
	edma_write_slot(prtd->asp_channel, &prtd->asp_params);
	davinci_pcm_enqueue_dma(substream);
	edma_start(prtd->asp_channel);

	return 0;
}
Ejemplo n.º 3
0
static int edma_memtomemcpy(int count, unsigned long src_addr, unsigned long trgt_addr, int dma_ch)
{
	int result = 0;
	struct edmacc_param param_set;

	edma_set_src(dma_ch, src_addr, INCR, W256BIT);
	edma_set_dest(dma_ch, trgt_addr, INCR, W256BIT);
	edma_set_src_index(dma_ch, 1, 1);
	edma_set_dest_index(dma_ch, 1, 1);
	/* A Sync Transfer Mode */
	edma_set_transfer_params(dma_ch, count, 1, 1, 1, ASYNC); //one block of one frame of one array of count bytes

	/* Enable the Interrupts on Channel 1 */
	edma_read_slot(dma_ch, &param_set);
	param_set.opt |= ITCINTEN;
	param_set.opt |= TCINTEN;
	param_set.opt |= EDMA_TCC(EDMA_CHAN_SLOT(dma_ch));
	edma_write_slot(dma_ch, &param_set);
	irqraised1 = 0u;
	dma_comp.done = 0;
	result = edma_start(dma_ch);

	if (result != 0) {
		printk("%s: edma copy failed \n", DEVICE_NAME);
	}

	wait_for_completion(&dma_comp);

	/* Check the status of the completed transfer */
	if (irqraised1 < 0) {
		printk("%s: edma copy: Event Miss Occured!!!\n", DEVICE_NAME);
		edma_stop(dma_ch);
		result = -EAGAIN;
	}

	return result;
}
Ejemplo n.º 4
0
static int __init fpga_perh_init(void)
{
    unsigned int cnt;
    u32 val = 0;
    int ret = 0;
    int chk = 0;

    gpio_store();     // GPIO初始化
    gpio_config();
    gpmc_config();    // GPMC配置
    edma_config();    // EDMA配置

    for(cnt=0; cnt<7; cnt++){
        val = gpmc_cs_read_reg(GPMC_FPGA_CS, GPMC_CS_CONFIG1 + cnt*0x04);
        printk("GPMC_CS3_CONFIG_%d : [%08X]\n", cnt+1, val);
    }

    printk("Gpmc now start reading...\n");

    FPGA_RRST_L;
    _delay_ns(1);   // 1us
    FPGA_RRST_H;


    ret = edma_start(dma_ch);
    
    if (ret != 0) {
        printk ("dm8168_start_dma failed, error:%d", ret);
        return ret;
    }

    // wait for completion ISR
    while(irqraised1 == 0u){
        _delay_ms(10);
 //       break;
    }


    if (ret == 0) {
        for (cnt=0; cnt<FPGA_FIFO_SIZE; cnt++) {
//            fpga_buf[cnt] = readw(fpga_membase);
            if (fpga_buf[cnt] != cnt+1) {            // 进行数据校验
                chk = cnt+1;
                break;
            }
        }
        edma_stop(dma_ch);
        edma_free_channel(dma_ch);
    }

    if (chk == 0){
        printk ("Gpmc&edma reading sequence data check successful!\n");
    }else{
        printk ("Gpmc&edma reading data check error at: %d\n", chk);
    }

    for(cnt=0; cnt<8; cnt++){
        printk("[%04X] [%04X] [%04X] [%04X]\n", fpga_buf[cnt*4], fpga_buf[cnt*4+1], fpga_buf[cnt*4+2], fpga_buf[cnt*4+3]);
    }

//    gpmc_cs_free(GPMC_FPGA_CS);  
    return 0;
}
Ejemplo n.º 5
0
int logi_dma_copy(struct drvr_mem* mem_dev, unsigned long trgt_addr,
		  unsigned long src_addr, int count)
{
	int result = 0;

#ifdef USE_DMA_ENGINE
	struct dma_chan *chan;
	struct dma_device *dev;
	struct dma_async_tx_descriptor *tx;
	unsigned long flags;

	chan = mem_dev->dma.chan;
	dev = chan->device;
	flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
	tx = dev->device_prep_dma_memcpy(chan, trgt_addr, src_addr, count, flags);

	if (!tx) {
		DBG_LOG("device_prep_dma_memcpy failed\n");
		return -ENODEV;
	}

	irqraised1 = 0u;
	dma_comp.done = 0;
	/* set the callback and submit the transaction */
	tx->callback = dma_callback;
	tx->callback_param = mem_dev;
	cookie = dmaengine_submit(tx);
	dma_async_issue_pending(chan);
#else
	struct edmacc_param param_set;
	int dma_ch = mem_dev->dma.dma_chan;

	edma_set_src(dma_ch, src_addr, INCR, W256BIT);
	edma_set_dest(dma_ch, trgt_addr, INCR, W256BIT);
	edma_set_src_index(dma_ch, 1, 1);
	edma_set_dest_index(dma_ch, 1, 1);
	/* A Sync Transfer Mode */
	edma_set_transfer_params(dma_ch, count, 1, 1, 1, ASYNC);//one block of one frame of one array of count bytes

	/* Enable the Interrupts on Channel 1 */
	edma_read_slot(dma_ch, &param_set);
	param_set.opt |= ITCINTEN;
	param_set.opt |= TCINTEN;
	param_set.opt |= EDMA_TCC(EDMA_CHAN_SLOT(dma_ch));
	edma_write_slot(dma_ch, &param_set);
	irqraised1 = 0u;
	dma_comp.done = 0;
	result = edma_start(dma_ch);

	if (result != 0) {
		DBG_LOG("edma copy failed\n");
		return result;
	}

#endif /* USE_DMA_ENGINE */

	wait_for_completion(&dma_comp);

	/* Check the status of the completed transfer */

	if (irqraised1 < 0) {
		DBG_LOG("edma copy: Event Miss Occured!!!\n");
#ifdef USE_DMA_ENGINE
		dmaengine_terminate_all(chan);
#else
		edma_stop(dma_ch);
#endif /* USE_DMA_ENGINE */
		result = -EAGAIN;
	}

	return result;
}
Ejemplo n.º 6
0
static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
{
	struct davinci_spi *davinci_spi;
	int int_status = 0;
	int count, temp_count;
	u8 conv = 1;
	u8 tmp;
	u32 data1_reg_val;
	struct davinci_spi_dma *davinci_spi_dma;
	int word_len, data_type, ret;
	unsigned long tx_reg, rx_reg;
	struct davinci_spi_platform_data *pdata;
	struct device *sdev;

	davinci_spi = spi_master_get_devdata(spi->master);
	pdata = davinci_spi->pdata;
	sdev = davinci_spi->bitbang.master->dev.parent;

	davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];

	tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
	rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;

	davinci_spi->tx = t->tx_buf;
	davinci_spi->rx = t->rx_buf;

	/* convert len to words based on bits_per_word */
	conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
	davinci_spi->count = t->len / conv;

	INIT_COMPLETION(davinci_spi->done);

	init_completion(&davinci_spi_dma->dma_rx_completion);
	init_completion(&davinci_spi_dma->dma_tx_completion);

	word_len = conv * 8;

	if (word_len <= 8)
		data_type = DAVINCI_DMA_DATA_TYPE_S8;
	else if (word_len <= 16)
		data_type = DAVINCI_DMA_DATA_TYPE_S16;
	else if (word_len <= 32)
		data_type = DAVINCI_DMA_DATA_TYPE_S32;
	else
		return -EINVAL;

	ret = davinci_spi_bufs_prep(spi, davinci_spi);
	if (ret)
		return ret;

	/* Put delay val if required */
	iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
			(pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
			davinci_spi->base + SPIDELAY);

	count = davinci_spi->count;	/* the number of elements */
	data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;

	/* CS default = 0xFF */
	tmp = ~(0x1 << spi->chip_select);

	clear_io_bits(davinci_spi->base + SPIDEF, ~tmp);

	data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT;

	/* disable all interrupts for dma transfers */
	clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
	/* Disable SPI to write configuration bits in SPIDAT */
	clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
	iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
	/* Enable SPI */
	set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);

	while ((ioread32(davinci_spi->base + SPIBUF)
				& SPIBUF_RXEMPTY_MASK) == 0)
		cpu_relax();


	if (t->tx_buf) {
		t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count,
				DMA_TO_DEVICE);
		if (dma_mapping_error(&spi->dev, t->tx_dma)) {
			dev_dbg(sdev, "Unable to DMA map a %d bytes"
				" TX buffer\n", count);
			return -ENOMEM;
		}
		temp_count = count;
	} else {
		/* We need TX clocking for RX transaction */
		t->tx_dma = dma_map_single(&spi->dev,
				(void *)davinci_spi->tmp_buf, count + 1,
				DMA_TO_DEVICE);
		if (dma_mapping_error(&spi->dev, t->tx_dma)) {
			dev_dbg(sdev, "Unable to DMA map a %d bytes"
				" TX tmp buffer\n", count);
			return -ENOMEM;
		}
		temp_count = count + 1;
	}

	edma_set_transfer_params(davinci_spi_dma->dma_tx_channel,
					data_type, temp_count, 1, 0, ASYNC);
	edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT);
	edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT);
	edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0);
	edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0);

	if (t->rx_buf) {
		/* initiate transaction */
		iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);

		t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count,
				DMA_FROM_DEVICE);
		if (dma_mapping_error(&spi->dev, t->rx_dma)) {
			dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
					count);
			if (t->tx_buf != NULL)
				dma_unmap_single(NULL, t->tx_dma,
						 count, DMA_TO_DEVICE);
			return -ENOMEM;
		}
		edma_set_transfer_params(davinci_spi_dma->dma_rx_channel,
				data_type, count, 1, 0, ASYNC);
		edma_set_src(davinci_spi_dma->dma_rx_channel,
				rx_reg, INCR, W8BIT);
		edma_set_dest(davinci_spi_dma->dma_rx_channel,
				t->rx_dma, INCR, W8BIT);
		edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0);
		edma_set_dest_index(davinci_spi_dma->dma_rx_channel,
				data_type, 0);
	}

	if ((t->tx_buf) || (t->rx_buf))
		edma_start(davinci_spi_dma->dma_tx_channel);

	if (t->rx_buf)
		edma_start(davinci_spi_dma->dma_rx_channel);

	if ((t->rx_buf) || (t->tx_buf))
		davinci_spi_set_dma_req(spi, 1);

	if (t->tx_buf)
		wait_for_completion_interruptible(
				&davinci_spi_dma->dma_tx_completion);

	if (t->rx_buf)
		wait_for_completion_interruptible(
				&davinci_spi_dma->dma_rx_completion);

	dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE);

	if (t->rx_buf)
		dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE);

	/*
	 * Check for bit error, desync error,parity error,timeout error and
	 * receive overflow errors
	 */
	int_status = ioread32(davinci_spi->base + SPIFLG);

	ret = davinci_spi_check_error(davinci_spi, int_status);
	if (ret != 0)
		return ret;

	/* SPI Framework maintains the count only in bytes so convert back */
	davinci_spi->count *= conv;

	return t->len;
}