static void snd_imx_dma_err_callback(int channel, void *data, int err)
{
	struct snd_pcm_substream *substream = data;
	struct snd_soc_pcm_runtime *rtd = substream->private_data;
	struct imx_pcm_dma_params *dma_params = 
		snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream);
	struct snd_pcm_runtime *runtime = substream->runtime;
	struct imx_pcm_runtime_data *iprtd = runtime->private_data;
	int ret;

	pr_err("DMA timeout on channel %d -%s%s%s%s\n",
		 channel,
		 err & IMX_DMA_ERR_BURST ?    " burst" : "",
		 err & IMX_DMA_ERR_REQUEST ?  " request" : "",
		 err & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
		 err & IMX_DMA_ERR_BUFFER ?   " buffer" : "");

	imx_dma_disable(iprtd->dma);
	ret = imx_dma_setup_sg(iprtd->dma, iprtd->sg_list, iprtd->sg_count,
			IMX_DMA_LENGTH_LOOP, dma_params->dma_addr,
			substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
			DMA_MODE_WRITE : DMA_MODE_READ);
	if (!ret)
		imx_dma_enable(iprtd->dma);
}
static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream)
{
	struct snd_pcm_runtime *runtime = substream->runtime;
	struct snd_soc_pcm_runtime *rtd = substream->private_data;
	struct imx_pcm_dma_params *dma_params;
	struct imx_pcm_runtime_data *iprtd = runtime->private_data;
	int err;

	dma_params = snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream);

	iprtd->substream = substream;
	iprtd->buf = (unsigned int *)substream->dma_buffer.area;
	iprtd->period_cnt = 0;

	pr_debug("%s: buf: %p period: %d periods: %d\n",
			__func__, iprtd->buf, iprtd->period, iprtd->periods);

	err = imx_dma_setup_sg(iprtd->dma, iprtd->sg_list, iprtd->sg_count,
			IMX_DMA_LENGTH_LOOP, dma_params->dma_addr,
			substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
			DMA_MODE_WRITE : DMA_MODE_READ);
	if (err)
		return err;

	return 0;
}
Example #3
0
static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
{
	unsigned int nob = data->blocks;
	unsigned int blksz = data->blksz;
	unsigned int datasz = nob * blksz;
	int i;

	if (data->flags & MMC_DATA_STREAM)
		nob = 0xffff;

	host->data = data;
	data->bytes_xfered = 0;

	MMC_NOB = nob;
	MMC_BLK_LEN = blksz;

	/*
	 * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise.
	 * We are in big troubles for non-512 byte transfers according to note in the paragraph
	 * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least.
	 * The situation is even more complex in reality. The SDHC in not able to handle wll
	 * partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
	 * This is required for SCR read at least.
	 */
	if (datasz < 512) {
		host->dma_size = datasz;
		if (data->flags & MMC_DATA_READ) {
			host->dma_dir = DMA_FROM_DEVICE;

			/* Hack to enable read SCR */
			MMC_NOB = 1;
			MMC_BLK_LEN = 512;
		} else {
			host->dma_dir = DMA_TO_DEVICE;
		}

		/* Convert back to virtual address */
		host->data_ptr = (u16*)(page_address(data->sg->page) + data->sg->offset);
		host->data_cnt = 0;

		clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
		set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);

		return;
	}

	if (data->flags & MMC_DATA_READ) {
		host->dma_dir = DMA_FROM_DEVICE;
		host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
						data->sg_len,  host->dma_dir);

		imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
			host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_READ);

		/*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/
		CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN;
	} else {
		host->dma_dir = DMA_TO_DEVICE;

		host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
						data->sg_len,  host->dma_dir);

		imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
			host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_WRITE);

		/*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/
		CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN;
	}

#if 1	/* This code is there only for consistency checking and can be disabled in future */
	host->dma_size = 0;
	for(i=0; i<host->dma_nents; i++)
		host->dma_size+=data->sg[i].length;

	if (datasz > host->dma_size) {
		dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n",
		       datasz, host->dma_size);
	}
#endif

	host->dma_size = datasz;

	wmb();

	if(host->actual_bus_width == MMC_BUS_WIDTH_4)
		BLR(host->dma) = 0;	/* burst 64 byte read / 64 bytes write */
	else
		BLR(host->dma) = 16;	/* burst 16 byte read / 16 bytes write */

	RSSR(host->dma) = DMA_REQ_SDHC;

	set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
	clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);

	/* start DMA engine for read, write is delayed after initial response */
	if (host->dma_dir == DMA_FROM_DEVICE) {
		imx_dma_enable(host->dma);
	}
}