Beispiel #1
0
static irqreturn_t moxart_irq(int irq, void *devid)
{
	struct moxart_host *host = (struct moxart_host *)devid;
	u32 status;
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);

	status = readl(host->base + REG_STATUS);
	if (status & CARD_CHANGE) {
		host->is_removed = status & CARD_DETECT;
		if (host->is_removed && host->have_dma) {
			dmaengine_terminate_all(host->dma_chan_tx);
			dmaengine_terminate_all(host->dma_chan_rx);
		}
		host->mrq = NULL;
		writel(MASK_INTR_PIO, host->base + REG_CLEAR);
		writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK);
		mmc_detect_change(host->mmc, 0);
	}
	if (status & (FIFO_ORUN | FIFO_URUN) && host->mrq)
		moxart_transfer_pio(host);

	spin_unlock_irqrestore(&host->lock, flags);

	return IRQ_HANDLED;
}
Beispiel #2
0
/**
 * snd_dmaengine_pcm_trigger - dmaengine based PCM trigger implementation
 * @substream: PCM substream
 * @cmd: Trigger command
 *
 * Returns 0 on success, a negative error code otherwise.
 *
 * This function can be used as the PCM trigger callback for dmaengine based PCM
 * driver implementations.
 */
int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
	struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
	struct snd_pcm_runtime *runtime = substream->runtime;
	int ret;

	switch (cmd) {
	case SNDRV_PCM_TRIGGER_START:
		ret = dmaengine_pcm_prepare_and_submit(substream);
		if (ret)
			return ret;
		dma_async_issue_pending(prtd->dma_chan);
		break;
	case SNDRV_PCM_TRIGGER_RESUME:
	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
		dmaengine_resume(prtd->dma_chan);
		break;
	case SNDRV_PCM_TRIGGER_SUSPEND:
		if (runtime->info & SNDRV_PCM_INFO_PAUSE)
			dmaengine_pause(prtd->dma_chan);
		else
			dmaengine_terminate_all(prtd->dma_chan);
		break;
	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
		dmaengine_pause(prtd->dma_chan);
		break;
	case SNDRV_PCM_TRIGGER_STOP:
		dmaengine_terminate_all(prtd->dma_chan);
		break;
	default:
		return -EINVAL;
	}

	return 0;
}
Beispiel #3
0
static int rockchip_spi_unprepare_message(struct spi_master *master,
					  struct spi_message *msg)
{
	unsigned long flags;
	struct rockchip_spi *rs = spi_master_get_devdata(master);

	spin_lock_irqsave(&rs->lock, flags);

	/*
	 * For DMA mode, we need terminate DMA channel and flush
	 * fifo for the next transfer if DMA thansfer timeout.
	 * unprepare_message() was called by core if transfer complete
	 * or timeout. Maybe it is reasonable for error handling here.
	 */
	if (rs->use_dma) {
		if (rs->state & RXBUSY) {
			dmaengine_terminate_all(rs->dma_rx.ch);
			flush_fifo(rs);
		}

		if (rs->state & TXBUSY)
			dmaengine_terminate_all(rs->dma_tx.ch);
	}

	spin_unlock_irqrestore(&rs->lock, flags);

	spi_enable_chip(rs, 0);

	return 0;
}
int qce_dma_terminate_all(struct qce_dma_data *dma)
{
	int ret;

	ret = dmaengine_terminate_all(dma->rxchan);
	return ret ?: dmaengine_terminate_all(dma->txchan);
}
Beispiel #5
0
static void spi_qup_dma_terminate(struct spi_master *master,
				  struct spi_transfer *xfer)
{
	if (xfer->tx_buf)
		dmaengine_terminate_all(master->dma_tx);
	if (xfer->rx_buf)
		dmaengine_terminate_all(master->dma_rx);
}
Beispiel #6
0
static void sh_mobile_i2c_cleanup_dma(struct sh_mobile_i2c_data *pd)
{
	if (pd->dma_direction == DMA_NONE)
		return;
	else if (pd->dma_direction == DMA_FROM_DEVICE)
		dmaengine_terminate_all(pd->dma_rx);
	else if (pd->dma_direction == DMA_TO_DEVICE)
		dmaengine_terminate_all(pd->dma_tx);

	sh_mobile_i2c_dma_unmap(pd);
}
Beispiel #7
0
static void usdhi6_dma_kill(struct usdhi6_host *host)
{
	struct mmc_data *data = host->mrq->data;

	dev_dbg(mmc_dev(host->mmc), "%s(): SG of %u: %ux%u\n",
		__func__, data->sg_len, data->blocks, data->blksz);
	/* Abort DMA */
	if (data->flags & MMC_DATA_READ)
		dmaengine_terminate_all(host->chan_rx);
	else
		dmaengine_terminate_all(host->chan_tx);
}
Beispiel #8
0
static void
mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
		     int abort)
{
	enum dma_data_direction dma_data_dir;
	struct device *dev = mmc_dev(host->mmc);
	struct dma_chan *c;

	if (data->flags & MMC_DATA_WRITE) {
		dma_data_dir = DMA_TO_DEVICE;
		c = host->dma_tx;
	} else {
		dma_data_dir = DMA_FROM_DEVICE;
		c = host->dma_rx;
	}
	if (c) {
		if (data->error) {
			dmaengine_terminate_all(c);
			/* Claim nothing transferred on error... */
			data->bytes_xfered = 0;
		}
		dev = c->device->dev;
	}
	dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir);
}
Beispiel #9
0
static int ux500_dma_channel_abort(struct dma_channel *channel)
{
	struct ux500_dma_channel *ux500_channel = channel->private_data;
	struct ux500_dma_controller *controller = ux500_channel->controller;
	struct musb *musb = controller->private_data;
	void __iomem *epio = musb->endpoints[ux500_channel->hw_ep->epnum].regs;
	u16 csr;

	dev_dbg(musb->controller, "channel=%d, is_tx=%d\n",
		ux500_channel->ch_num, ux500_channel->is_tx);

	if (channel->status == MUSB_DMA_STATUS_BUSY) {
		if (ux500_channel->is_tx) {
			csr = musb_readw(epio, MUSB_TXCSR);
			csr &= ~(MUSB_TXCSR_AUTOSET |
				 MUSB_TXCSR_DMAENAB |
				 MUSB_TXCSR_DMAMODE);
			musb_writew(epio, MUSB_TXCSR, csr);
		} else {
			csr = musb_readw(epio, MUSB_RXCSR);
			csr &= ~(MUSB_RXCSR_AUTOCLEAR |
				 MUSB_RXCSR_DMAENAB |
				 MUSB_RXCSR_DMAMODE);
			musb_writew(epio, MUSB_RXCSR, csr);
		}

		dmaengine_terminate_all(ux500_channel->dma_chan);
		channel->status = MUSB_DMA_STATUS_FREE;
	}
	return 0;
}
Beispiel #10
0
static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
{
	struct omap8250_priv	*priv = p->port.private_data;
	struct uart_8250_dma	*dma = p->dma;
	struct dma_tx_state     state;
	unsigned long		flags;
	int ret;

	spin_lock_irqsave(&priv->rx_dma_lock, flags);

	if (!dma->rx_running) {
		spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
		return;
	}

	ret = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
	if (ret == DMA_IN_PROGRESS) {
		ret = dmaengine_pause(dma->rxchan);
		if (WARN_ON_ONCE(ret))
			priv->rx_dma_broken = true;
	}
	spin_unlock_irqrestore(&priv->rx_dma_lock, flags);

	__dma_rx_do_complete(p);
	dmaengine_terminate_all(dma->rxchan);
}
Beispiel #11
0
static void sirfsoc_uart_stop_rx(struct uart_port *port)
{
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;

	wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
	if (sirfport->rx_dma_chan) {
		if (!sirfport->is_atlas7)
			wr_regl(port, ureg->sirfsoc_int_en_reg,
				rd_regl(port, ureg->sirfsoc_int_en_reg) &
				~(SIRFUART_RX_DMA_INT_EN(port, uint_en) |
				uint_en->sirfsoc_rx_done_en));
		else
			wr_regl(port, SIRFUART_INT_EN_CLR,
					SIRFUART_RX_DMA_INT_EN(port, uint_en)|
					uint_en->sirfsoc_rx_done_en);
		dmaengine_terminate_all(sirfport->rx_dma_chan);
	} else {
		if (!sirfport->is_atlas7)
			wr_regl(port, ureg->sirfsoc_int_en_reg,
				rd_regl(port, ureg->sirfsoc_int_en_reg)&
				~(SIRFUART_RX_IO_INT_EN(port, uint_en)));
		else
			wr_regl(port, SIRFUART_INT_EN_CLR,
					SIRFUART_RX_IO_INT_EN(port, uint_en));
	}
}
Beispiel #12
0
static void s3c24xx_serial_stop_rx(struct uart_port *port)
{
	struct s3c24xx_uart_port *ourport = to_ourport(port);
	struct s3c24xx_uart_dma *dma = ourport->dma;
	struct tty_port *t = &port->state->port;
	struct dma_tx_state state;
	enum dma_status dma_status;
	unsigned int received;

	if (rx_enabled(port)) {
		dbg("s3c24xx_serial_stop_rx: port=%p\n", port);
		if (s3c24xx_serial_has_interrupt_mask(port))
			s3c24xx_set_bit(port, S3C64XX_UINTM_RXD,
					S3C64XX_UINTM);
		else
			disable_irq_nosync(ourport->rx_irq);
		rx_enabled(port) = 0;
	}
	if (dma && dma->rx_chan) {
		dmaengine_pause(dma->tx_chan);
		dma_status = dmaengine_tx_status(dma->rx_chan,
				dma->rx_cookie, &state);
		if (dma_status == DMA_IN_PROGRESS ||
			dma_status == DMA_PAUSED) {
			received = dma->rx_bytes_requested - state.residue;
			dmaengine_terminate_all(dma->rx_chan);
			s3c24xx_uart_copy_rx_to_tty(ourport, t, received);
		}
	}
}
Beispiel #13
0
static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
			      int err)
{
	struct mmci_host *host = mmc_priv(mmc);
	struct mmc_data *data = mrq->data;

	if (!data || !data->host_cookie)
		return;

	mmci_dma_unmap(host, data);

	if (err) {
		struct mmci_host_next *next = &host->next_data;
		struct dma_chan *chan;
		if (data->flags & MMC_DATA_READ)
			chan = host->dma_rx_channel;
		else
			chan = host->dma_tx_channel;
		dmaengine_terminate_all(chan);

		if (host->dma_desc_current == next->dma_desc)
			host->dma_desc_current = NULL;

		if (host->dma_current == next->dma_chan) {
			host->dma_in_progress = false;
			host->dma_current = NULL;
		}

		next->dma_desc = NULL;
		next->dma_chan = NULL;
		data->host_cookie = 0;
	}
}
Beispiel #14
0
static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup,
		unsigned long *flags)
{
	struct dma_tx_state state;
	struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
	struct tty_port *port = &tup->uport.state->port;
	struct uart_port *u = &tup->uport;
	int count;

	/* Deactivate flow control to stop sender */
	if (tup->rts_active)
		set_rts(tup, false);

	dmaengine_terminate_all(tup->rx_dma_chan);
	dmaengine_tx_status(tup->rx_dma_chan,  tup->rx_cookie, &state);
	async_tx_ack(tup->rx_dma_desc);
	count = tup->rx_bytes_requested - state.residue;

	/* If we are here, DMA is stopped */
	if (count)
		tegra_uart_copy_rx_to_tty(tup, port, count);

	tegra_uart_handle_rx_pio(tup, port);
	if (tty) {
		spin_unlock_irqrestore(&u->lock, *flags);
		tty_flip_buffer_push(port);
		spin_lock_irqsave(&u->lock, *flags);
		tty_kref_put(tty);
	}
	tegra_uart_start_rx_dma(tup);

	if (tup->rts_active)
		set_rts(tup, true);
}
Beispiel #15
0
/*
 * Flush any TX data submitted for DMA and PIO. Called when the
 * TX circular buffer is reset.
 */
static void tegra_uart_flush_buffer(struct uart_port *u)
{
	struct tegra_uart_port *tup = to_tegra_uport(u);

	tup->tx_bytes = 0;
	if (tup->tx_dma_chan)
		dmaengine_terminate_all(tup->tx_dma_chan);
}
static int cppi41_dma_channel_abort(struct dma_channel *channel)
{
	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
	struct cppi41_dma_controller *controller = cppi41_channel->controller;
	struct musb *musb = controller->musb;
	void __iomem *epio = cppi41_channel->hw_ep->regs;
	int tdbit;
	int ret;
	unsigned is_tx;
	u16 csr;

	is_tx = cppi41_channel->is_tx;
	dev_dbg(musb->controller, "abort channel=%d, is_tx=%d\n",
			cppi41_channel->port_num, is_tx);

	if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
		return 0;

	if (is_tx) {
		csr = musb_readw(epio, MUSB_TXCSR);
		csr &= ~MUSB_TXCSR_DMAENAB;
		musb_writew(epio, MUSB_TXCSR, csr);
	} else {
		csr = musb_readw(epio, MUSB_RXCSR);
		csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
		musb_writew(epio, MUSB_RXCSR, csr);

		csr = musb_readw(epio, MUSB_RXCSR);
		if (csr & MUSB_RXCSR_RXPKTRDY) {
			csr |= MUSB_RXCSR_FLUSHFIFO;
			musb_writew(epio, MUSB_RXCSR, csr);
			musb_writew(epio, MUSB_RXCSR, csr);
		}
	}

	tdbit = 1 << cppi41_channel->port_num;
	if (is_tx)
		tdbit <<= 16;

	do {
		musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
		ret = dmaengine_terminate_all(cppi41_channel->dc);
	} while (ret == -EAGAIN);

	musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);

	if (is_tx) {
		csr = musb_readw(epio, MUSB_TXCSR);
		if (csr & MUSB_TXCSR_TXPKTRDY) {
			csr |= MUSB_TXCSR_FLUSHFIFO;
			musb_writew(epio, MUSB_TXCSR, csr);
		}
	}

	cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
	return 0;
}
Beispiel #17
0
static void mmci_dma_data_error(struct mmci_host *host)
{
	dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
	dmaengine_terminate_all(host->dma_current);
	host->dma_in_progress = false;
	host->dma_current = NULL;
	host->dma_desc_current = NULL;
	host->data->host_cookie = 0;
}
static int mxc_asrc_close(struct inode *inode, struct file *file)
{
	struct asrc_pair_params *params;
	unsigned long lock_flags;

	params = file->private_data;

	if (!params)
		return 0;

	if (params->asrc_active) {
		params->asrc_active = 0;

		dmaengine_terminate_all(params->input_dma_channel);
		dmaengine_terminate_all(params->output_dma_channel);

		asrc_stop_conv(params->index);

		complete(&params->input_complete);
		complete(&params->output_complete);
		complete(&params->lastperiod_complete);
	}

	if (params->pair_hold) {
		spin_lock_irqsave(&pair_lock, lock_flags);
		params->pair_hold = 0;
		spin_unlock_irqrestore(&pair_lock, lock_flags);

		if (params->input_dma_channel)
			dma_release_channel(params->input_dma_channel);
		if (params->output_dma_channel)
			dma_release_channel(params->output_dma_channel);

		mxc_free_dma_buf(params);

		asrc_release_pair(params->index);
		asrc_finish_conv(params->index);
	}

	kfree(params);
	file->private_data = NULL;

	return 0;
}
Beispiel #19
0
static int pic32_spi_dma_transfer(struct pic32_spi *pic32s,
				  struct spi_transfer *xfer)
{
	struct spi_master *master = pic32s->master;
	struct dma_async_tx_descriptor *desc_rx;
	struct dma_async_tx_descriptor *desc_tx;
	dma_cookie_t cookie;
	int ret;

	if (!master->dma_rx || !master->dma_tx)
		return -ENODEV;

	desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
					  xfer->rx_sg.sgl,
					  xfer->rx_sg.nents,
					  DMA_DEV_TO_MEM,
					  DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc_rx) {
		ret = -EINVAL;
		goto err_dma;
	}

	desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
					  xfer->tx_sg.sgl,
					  xfer->tx_sg.nents,
					  DMA_MEM_TO_DEV,
					  DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc_tx) {
		ret = -EINVAL;
		goto err_dma;
	}

	/* Put callback on the RX transfer, that should finish last */
	desc_rx->callback = pic32_spi_dma_rx_notify;
	desc_rx->callback_param = pic32s;

	cookie = dmaengine_submit(desc_rx);
	ret = dma_submit_error(cookie);
	if (ret)
		goto err_dma;

	cookie = dmaengine_submit(desc_tx);
	ret = dma_submit_error(cookie);
	if (ret)
		goto err_dma_tx;

	dma_async_issue_pending(master->dma_rx);
	dma_async_issue_pending(master->dma_tx);

	return 0;

err_dma_tx:
	dmaengine_terminate_all(master->dma_rx);
err_dma:
	return ret;
}
static long asrc_ioctl_stop_conv(struct asrc_pair_params *params,
				void __user *user)
{
	enum asrc_pair_index index;
	long ret;

	ret = copy_from_user(&index, user, sizeof(index));
	if (ret) {
		dev_err(asrc->dev, "failed to get index from user space: %ld\n", ret);
		return ret;
	}

	dmaengine_terminate_all(params->input_dma_channel);
	dmaengine_terminate_all(params->output_dma_channel);

	asrc_stop_conv(index);
	params->asrc_active = 0;

	return 0;
}
Beispiel #21
0
static int rsnd_dmaen_stop(struct rsnd_mod *mod,
			   struct rsnd_dai_stream *io,
			   struct rsnd_priv *priv)
{
	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);

	dmaengine_terminate_all(dmaen->chan);

	return 0;
}
Beispiel #22
0
static void img_spfi_handle_err(struct spi_master *master,
				struct spi_message *msg)
{
	struct img_spfi *spfi = spi_master_get_devdata(master);
	unsigned long flags;

	/*
	 * Stop all DMA and reset the controller if the previous transaction
	 * timed-out and never completed it's DMA.
	 */
	spin_lock_irqsave(&spfi->lock, flags);
	if (spfi->tx_dma_busy || spfi->rx_dma_busy) {
		spfi->tx_dma_busy = false;
		spfi->rx_dma_busy = false;

		dmaengine_terminate_all(spfi->tx_ch);
		dmaengine_terminate_all(spfi->rx_ch);
	}
	spin_unlock_irqrestore(&spfi->lock, flags);
}
Beispiel #23
0
static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
		bool dma_to_memory)
{
	if (dma_to_memory) {
		dmaengine_terminate_all(tup->rx_dma_chan);
		dma_release_channel(tup->rx_dma_chan);
		dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
				tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
		tup->rx_dma_chan = NULL;
		tup->rx_dma_buf_phys = 0;
		tup->rx_dma_buf_virt = NULL;
	} else {
		dmaengine_terminate_all(tup->tx_dma_chan);
		dma_release_channel(tup->tx_dma_chan);
		dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
			UART_XMIT_SIZE, DMA_TO_DEVICE);
		tup->tx_dma_chan = NULL;
		tup->tx_dma_buf_phys = 0;
		tup->tx_dma_buf_virt = NULL;
	}
}
Beispiel #24
0
static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
{
	struct s3c24xx_uart_dma	*dma = p->dma;

	if (dma->rx_chan) {
		dmaengine_terminate_all(dma->rx_chan);
		dma_unmap_single(dma->rx_chan->device->dev, dma->rx_addr,
				dma->rx_size, DMA_FROM_DEVICE);
		kfree(dma->rx_buf);
		dma_release_channel(dma->rx_chan);
		dma->rx_chan = NULL;
	}

	if (dma->tx_chan) {
		dmaengine_terminate_all(dma->tx_chan);
		dma_unmap_single(dma->tx_chan->device->dev, dma->tx_addr,
				UART_XMIT_SIZE, DMA_TO_DEVICE);
		dma_release_channel(dma->tx_chan);
		dma->tx_chan = NULL;
	}
}
Beispiel #25
0
/*
 * Wait until the DMA transfer completes, then stop the DMA controller.
 */
static void pxa_bmdma_stop(struct ata_queued_cmd *qc)
{
	struct pata_pxa_data *pd = qc->ap->private_data;
	enum dma_status status;

	status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, NULL);
	if (status != DMA_ERROR && status != DMA_COMPLETE &&
	    wait_for_completion_timeout(&pd->dma_done, HZ))
		ata_dev_err(qc->dev, "Timeout waiting for DMA completion!");

	dmaengine_terminate_all(pd->dma_chan);
}
Beispiel #26
0
static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
{
	struct at91_twi_dma *dma = &dev->dma;

	at91_twi_irq_save(dev);

	if (dma->xfer_in_progress) {
		if (dma->direction == DMA_FROM_DEVICE)
			dmaengine_terminate_all(dma->chan_rx);
		else
			dmaengine_terminate_all(dma->chan_tx);
		dma->xfer_in_progress = false;
	}
	if (dma->buf_mapped) {
		dma_unmap_single(dev->dev, sg_dma_address(&dma->sg),
				 dev->buf_len, dma->direction);
		dma->buf_mapped = false;
	}

	at91_twi_irq_restore(dev);
}
Beispiel #27
0
void serial8250_release_dma(struct uart_8250_port *p)
{
	struct uart_8250_dma *dma = p->dma;

	if (!dma)
		return;

	/* Release RX resources */
	dmaengine_terminate_all(dma->rxchan);
	dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf,
			  dma->rx_addr);
	dma_release_channel(dma->rxchan);
	dma->rxchan = NULL;

	/* Release TX resources */
	dmaengine_terminate_all(dma->txchan);
	dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
			 UART_XMIT_SIZE, DMA_TO_DEVICE);
	dma_release_channel(dma->txchan);
	dma->txchan = NULL;
	dma->tx_running = 0;

	dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
}
Beispiel #28
0
static void dma_stop_watchdog(struct work_struct *work)
{
	struct jz_pcm_runtime_data *prtd =
		container_of(work, struct jz_pcm_runtime_data, dwork_stop_dma.work);
	struct snd_pcm_substream *substream = prtd->substream;
	struct snd_soc_pcm_runtime *rtd = substream->private_data;
	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;

	if (!atomic_dec_if_positive(&prtd->stopped_pending)) {
		DMA_SUBSTREAM_MSG(substream,"stop real\n");
		dmaengine_terminate_all(prtd->dma_chan);
		if (cpu_dai->driver->ops->trigger)
			cpu_dai->driver->ops->trigger(substream, prtd->stopped_cmd, cpu_dai);
	}
}
Beispiel #29
0
static void tegra_uart_stop_tx(struct uart_port *u)
{
	struct tegra_uart_port *tup = to_tegra_uport(u);
	struct circ_buf *xmit = &tup->uport.state->xmit;
	struct dma_tx_state state;
	int count;

	dmaengine_terminate_all(tup->tx_dma_chan);
	dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
	count = tup->tx_bytes_requested - state.residue;
	async_tx_ack(tup->tx_dma_desc);
	xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
	tup->tx_in_progress = 0;
	return;
}
Beispiel #30
0
static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
{
	struct dma_tx_state state;

	/* Deactivate flow control to stop sender */
	if (tup->rts_active)
		set_rts(tup, false);

	dmaengine_terminate_all(tup->rx_dma_chan);
	dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
	tegra_uart_rx_buffer_push(tup, state.residue);
	tegra_uart_start_rx_dma(tup);

	if (tup->rts_active)
		set_rts(tup, true);
}