示例#1
0
static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
{
	struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
	struct dma_chan *chan = prtd->dma_chan;
	struct dma_async_tx_descriptor *desc;
	enum dma_transfer_direction direction;
	unsigned long flags = DMA_CTRL_ACK;

	direction = snd_pcm_substream_to_dma_direction(substream);

	if (!substream->runtime->no_period_wakeup)
		flags |= DMA_PREP_INTERRUPT;

	prtd->pos = 0;
#if defined CONFIG_ARCH_SUN9IW1 || defined (CONFIG_ARCH_SUN8IW6) || defined CONFIG_ARCH_SUN8IW7
		if (!strcmp(substream->pcm->card->id, "sndhdmiraw")) {
			desc = dmaengine_prep_dma_cyclic(chan,
				substream->runtime->dma_addr,
				2*snd_pcm_lib_buffer_bytes(substream),
				2*snd_pcm_lib_period_bytes(substream), direction, flags);
		} else if (!strcmp(substream->pcm->card->id, "snddaudio")) {
		#ifdef CONFIG_ARCH_SUN9IW1
			desc = dmaengine_prep_dma_cyclic(chan,
				substream->runtime->dma_addr,
				snd_pcm_lib_buffer_bytes(substream),
				snd_pcm_lib_buffer_bytes(substream), direction, flags);
		#else//CONFIG_ARCH_SUN8IW6
			desc = dmaengine_prep_dma_cyclic(chan,
				substream->runtime->dma_addr,
				snd_pcm_lib_buffer_bytes(substream),
				snd_pcm_lib_period_bytes(substream), direction, flags);
		#endif
		} else {
			desc = dmaengine_prep_dma_cyclic(chan,
				substream->runtime->dma_addr,
				snd_pcm_lib_buffer_bytes(substream),
				snd_pcm_lib_period_bytes(substream), direction, flags);
		}
#else
		desc = dmaengine_prep_dma_cyclic(chan,
			substream->runtime->dma_addr,
			snd_pcm_lib_buffer_bytes(substream),
			snd_pcm_lib_period_bytes(substream), direction, flags);
#endif

	if (!desc)
		return -ENOMEM;

	desc->callback = dmaengine_pcm_dma_complete;
	desc->callback_param = substream;
	prtd->cookie = dmaengine_submit(desc);

	return 0;
}
示例#2
0
文件: dma.c 项目: 513855417/linux
static int rsnd_dmaen_start(struct rsnd_mod *mod,
			    struct rsnd_dai_stream *io,
			    struct rsnd_priv *priv)
{
	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
	struct snd_pcm_substream *substream = io->substream;
	struct device *dev = rsnd_priv_to_dev(priv);
	struct dma_async_tx_descriptor *desc;
	int is_play = rsnd_io_is_play(io);

	desc = dmaengine_prep_dma_cyclic(dmaen->chan,
					 substream->runtime->dma_addr,
					 snd_pcm_lib_buffer_bytes(substream),
					 snd_pcm_lib_period_bytes(substream),
					 is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
					 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);

	if (!desc) {
		dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
		return -EIO;
	}

	desc->callback		= rsnd_dmaen_complete;
	desc->callback_param	= rsnd_mod_get(dma);

	if (dmaengine_submit(desc) < 0) {
		dev_err(dev, "dmaengine_submit() fail\n");
		return -EIO;
	}

	dma_async_issue_pending(dmaen->chan);

	return 0;
}
示例#3
0
/* submit rx dma task into dmaengine */
static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port)
{
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
	wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
		rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
		~SIRFUART_IO_MODE);
	sirfport->rx_dma_items.xmit.tail =
		sirfport->rx_dma_items.xmit.head = 0;
	sirfport->rx_dma_items.desc =
		dmaengine_prep_dma_cyclic(sirfport->rx_dma_chan,
		sirfport->rx_dma_items.dma_addr, SIRFSOC_RX_DMA_BUF_SIZE,
		SIRFSOC_RX_DMA_BUF_SIZE / 2,
		DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
	if (IS_ERR_OR_NULL(sirfport->rx_dma_items.desc)) {
		dev_err(port->dev, "DMA slave single fail\n");
		return;
	}
	sirfport->rx_dma_items.desc->callback =
		sirfsoc_uart_rx_dma_complete_callback;
	sirfport->rx_dma_items.desc->callback_param = sirfport;
	sirfport->rx_dma_items.cookie =
		dmaengine_submit(sirfport->rx_dma_items.desc);
	dma_async_issue_pending(sirfport->rx_dma_chan);
	if (!sirfport->is_atlas7)
		wr_regl(port, ureg->sirfsoc_int_en_reg,
				rd_regl(port, ureg->sirfsoc_int_en_reg) |
				SIRFUART_RX_DMA_INT_EN(uint_en,
				sirfport->uart_reg->uart_type));
	else
		wr_regl(port, ureg->sirfsoc_int_en_reg,
				SIRFUART_RX_DMA_INT_EN(uint_en,
				sirfport->uart_reg->uart_type));
}
示例#4
0
static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
{
	struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
	struct dma_chan *chan = prtd->dma_chan;
	struct dma_async_tx_descriptor *desc;
	enum dma_transfer_direction direction;
	unsigned long flags = DMA_CTRL_ACK;

	direction = snd_pcm_substream_to_dma_direction(substream);

	if (!substream->runtime->no_period_wakeup)
		flags |= DMA_PREP_INTERRUPT;

	prtd->pos = 0;
	desc = dmaengine_prep_dma_cyclic(chan,
		substream->runtime->dma_addr,
		snd_pcm_lib_buffer_bytes(substream),
		snd_pcm_lib_period_bytes(substream), direction, flags);

	if (!desc)
		return -ENOMEM;

	desc->callback = dmaengine_pcm_dma_complete;
	desc->callback_param = substream;
	prtd->cookie = dmaengine_submit(desc);

	return 0;
}
示例#5
0
文件: core.c 项目: 3null/linux
void rsnd_dma_start(struct rsnd_dma *dma)
{
	struct rsnd_mod *mod = rsnd_dma_to_mod(dma);
	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
	struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
	struct snd_pcm_substream *substream = io->substream;
	struct device *dev = rsnd_priv_to_dev(priv);
	struct dma_async_tx_descriptor *desc;

	desc = dmaengine_prep_dma_cyclic(dma->chan,
					 (dma->addr) ? dma->addr :
					 substream->runtime->dma_addr,
					 snd_pcm_lib_buffer_bytes(substream),
					 snd_pcm_lib_period_bytes(substream),
					 dma->dir,
					 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);

	if (!desc) {
		dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
		return;
	}

	desc->callback		= rsnd_dma_complete;
	desc->callback_param	= dma;

	if (dmaengine_submit(desc) < 0) {
		dev_err(dev, "dmaengine_submit() fail\n");
		return;
	}

	dma_async_issue_pending(dma->chan);
}
static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
{
	struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
	struct dma_chan *chan = prtd->dma_chan;
	struct dma_async_tx_descriptor *desc;
	enum dma_transfer_direction direction;
	unsigned long flags = DMA_CTRL_ACK;

	direction = snd_pcm_substream_to_dma_direction(substream);

	if (!substream->runtime->no_period_wakeup)
		flags |= DMA_PREP_INTERRUPT;

	prtd->pos = 0;
	desc = dmaengine_prep_dma_cyclic(chan,
		substream->runtime->dma_addr,
		snd_pcm_lib_buffer_bytes(substream),
		snd_pcm_lib_period_bytes(substream), direction, flags);

	if (!desc)
		return -ENOMEM;

	desc->callback = dmaengine_pcm_dma_complete;
	desc->callback_param = substream;
	prtd->cookie = dmaengine_submit(desc);

#ifdef CONFIG_SND_PXA_SSP_DUMP
	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK
			&& ssp_playback_enable) {
		prtd->playback_totsize = snd_pcm_lib_buffer_bytes(substream);
		prtd->playback_transfer_addr = substream->runtime->dma_addr;
		prtd->playback_dump_addr = substream->runtime->dma_addr;
	} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE
			&& ssp_capture_enable) {
		prtd->capture_totsize = snd_pcm_lib_buffer_bytes(substream);
		prtd->capture_transfer_addr = substream->runtime->dma_addr;
		prtd->capture_dump_addr = substream->runtime->dma_addr;
	}
#endif
	return 0;
}
static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
{
	struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
	struct dma_chan *chan = prtd->dma_chan;
	struct dma_async_tx_descriptor *desc;
	enum dma_transfer_direction direction;
	unsigned long flags = DMA_CTRL_ACK;

	direction = snd_pcm_substream_to_dma_direction(substream);

	if (!substream->runtime->no_period_wakeup)
		flags |= DMA_PREP_INTERRUPT;

	prtd->pos = 0;
#ifdef CONFIG_ARCH_ROCKCHIP
	//printk("soc dma buffersize = %d , periodsize=%d, periods=%d\n",
	//	snd_pcm_lib_buffer_bytes(substream),
	//	snd_pcm_lib_period_bytes(substream),
	//	snd_pcm_lib_buffer_bytes(substream)/snd_pcm_lib_period_bytes(substream));
	desc = dmaengine_prep_dma_infiniteloop(chan,
		substream->runtime->dma_addr,
		snd_pcm_lib_buffer_bytes(substream),
		snd_pcm_lib_period_bytes(substream),
		direction, flags,
		snd_pcm_lib_buffer_bytes(substream)/snd_pcm_lib_period_bytes(substream));
#else
	desc = dmaengine_prep_dma_cyclic(chan,
		substream->runtime->dma_addr,
		snd_pcm_lib_buffer_bytes(substream),
		snd_pcm_lib_period_bytes(substream), direction, flags);
#endif

	if (!desc)
		return -ENOMEM;

	desc->callback = dmaengine_pcm_dma_complete;
	desc->callback_param = substream;
	prtd->cookie = dmaengine_submit(desc);

	return 0;
}
示例#8
0
static int samsung_dmadev_prepare(unsigned ch,
			struct samsung_dma_prep_info *info)
{
	struct scatterlist sg;
	struct dma_chan *chan = (struct dma_chan *)ch;
	struct dma_async_tx_descriptor *desc;

	switch (info->cap) {
	case DMA_SLAVE:
		sg_init_table(&sg, 1);
		sg_dma_len(&sg) = info->len;
		sg_set_page(&sg, pfn_to_page(PFN_DOWN(info->buf)),
			    info->len, offset_in_page(info->buf));
		sg_dma_address(&sg) = info->buf;

		desc = dmaengine_prep_slave_sg(chan,
			&sg, 1, info->direction, DMA_PREP_INTERRUPT);
		break;
	case DMA_CYCLIC:
		desc = dmaengine_prep_dma_cyclic(chan,
			info->buf, info->len, info->period, info->direction);
		break;
	default:
		dev_err(&chan->dev->device, "unsupported format\n");
		return -EFAULT;
	}

	if (!desc) {
		dev_err(&chan->dev->device, "cannot prepare cyclic dma\n");
		return -EFAULT;
	}

	desc->callback = info->fp;
	desc->callback_param = info->fp_param;

	dmaengine_submit((struct dma_async_tx_descriptor *)desc);

	return 0;
}
static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
{
	struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
	struct dma_chan *chan = prtd->dma_chan;
	struct dma_async_tx_descriptor *desc;
	enum dma_transfer_direction direction;

	direction = snd_pcm_substream_to_dma_direction(substream);

	prtd->pos = 0;
	desc = dmaengine_prep_dma_cyclic(chan,
		substream->runtime->dma_addr,
		snd_pcm_lib_buffer_bytes(substream),
		snd_pcm_lib_period_bytes(substream), direction);

	if (!desc)
		return -ENOMEM;

	desc->callback = dmaengine_pcm_dma_complete;
	desc->callback_param = substream;
	dmaengine_submit(desc);

	return 0;
}
示例#10
0
static int stm32_of_dma_rx_probe(struct stm32_port *stm32port,
				 struct platform_device *pdev)
{
	struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
	struct uart_port *port = &stm32port->port;
	struct device *dev = &pdev->dev;
	struct dma_slave_config config;
	struct dma_async_tx_descriptor *desc = NULL;
	dma_cookie_t cookie;
	int ret;

	/* Request DMA RX channel */
	stm32port->rx_ch = dma_request_slave_channel(dev, "rx");
	if (!stm32port->rx_ch) {
		dev_info(dev, "rx dma alloc failed\n");
		return -ENODEV;
	}
	stm32port->rx_buf = dma_alloc_coherent(&pdev->dev, RX_BUF_L,
						 &stm32port->rx_dma_buf,
						 GFP_KERNEL);
	if (!stm32port->rx_buf) {
		ret = -ENOMEM;
		goto alloc_err;
	}

	/* Configure DMA channel */
	memset(&config, 0, sizeof(config));
	config.src_addr = port->mapbase + ofs->rdr;
	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;

	ret = dmaengine_slave_config(stm32port->rx_ch, &config);
	if (ret < 0) {
		dev_err(dev, "rx dma channel config failed\n");
		ret = -ENODEV;
		goto config_err;
	}

	/* Prepare a DMA cyclic transaction */
	desc = dmaengine_prep_dma_cyclic(stm32port->rx_ch,
					 stm32port->rx_dma_buf,
					 RX_BUF_L, RX_BUF_P, DMA_DEV_TO_MEM,
					 DMA_PREP_INTERRUPT);
	if (!desc) {
		dev_err(dev, "rx dma prep cyclic failed\n");
		ret = -ENODEV;
		goto config_err;
	}

	/* No callback as dma buffer is drained on usart interrupt */
	desc->callback = NULL;
	desc->callback_param = NULL;

	/* Push current DMA transaction in the pending queue */
	cookie = dmaengine_submit(desc);

	/* Issue pending DMA requests */
	dma_async_issue_pending(stm32port->rx_ch);

	return 0;

config_err:
	dma_free_coherent(&pdev->dev,
			  RX_BUF_L, stm32port->rx_buf,
			  stm32port->rx_dma_buf);

alloc_err:
	dma_release_channel(stm32port->rx_ch);
	stm32port->rx_ch = NULL;

	return ret;
}