Beispiel #1
0
static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
					 struct mmc_data *data)
{
	int ret;
	struct dma_chan *chan;
	struct dma_async_tx_descriptor *desc;
	struct dma_slave_config conf = {
		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
		.src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
		.dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
	};

	if (data->flags & MMC_DATA_WRITE) {
		conf.direction = DMA_MEM_TO_DEV;
		conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
		conf.slave_id = JZ4740_DMA_TYPE_MMC_TRANSMIT;
		chan = host->dma_tx;
	} else {
		conf.direction = DMA_DEV_TO_MEM;
		conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO;
		conf.slave_id = JZ4740_DMA_TYPE_MMC_RECEIVE;
		chan = host->dma_rx;
	}

	ret = jz4740_mmc_prepare_dma_data(host, data, NULL, chan);
	if (ret)
		return ret;

	dmaengine_slave_config(chan, &conf);
	desc = dmaengine_prep_slave_sg(chan,
				       data->sg,
				       host->sg_len,
				       conf.direction,
				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc) {
		dev_err(mmc_dev(host->mmc),
			"Failed to allocate DMA %s descriptor",
			 conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX");
		goto dma_unmap;
	}

	dmaengine_submit(desc);
	dma_async_issue_pending(chan);

	return 0;

dma_unmap:
	jz4740_mmc_dma_unmap(host, data);
	return -ENOMEM;
}
Beispiel #2
0
int serial8250_tx_dma(struct uart_8250_port *p)
{
	struct uart_8250_dma		*dma = p->dma;
	struct circ_buf			*xmit = &p->port.state->xmit;
	struct dma_async_tx_descriptor	*desc;
	int ret;

	if (dma->tx_running)
		return 0;

	if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
		/* We have been called from __dma_tx_complete() */
		serial8250_rpm_put_tx(p);
		return 0;
	}

	dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);

	desc = dmaengine_prep_slave_single(dma->txchan,
					   dma->tx_addr + xmit->tail,
					   dma->tx_size, DMA_MEM_TO_DEV,
					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc) {
		ret = -EBUSY;
		goto err;
	}

	dma->tx_running = 1;
	desc->callback = __dma_tx_complete;
	desc->callback_param = p;

	dma->tx_cookie = dmaengine_submit(desc);

	dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
				   UART_XMIT_SIZE, DMA_TO_DEVICE);

	dma_async_issue_pending(dma->txchan);
	if (dma->tx_err) {
		dma->tx_err = 0;
		if (p->ier & UART_IER_THRI) {
			p->ier &= ~UART_IER_THRI;
			serial_out(p, UART_IER, p->ier);
		}
	}
	return 0;
err:
	dma->tx_err = 1;
	return ret;
}
Beispiel #3
0
static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
{
	u32 len, dir_data, dir_slave;
	unsigned long dma_time;
	struct dma_async_tx_descriptor *desc = NULL;
	struct dma_chan *dma_chan;

	if (host->data_len == data->bytes_xfered)
		return;

	if (data->flags & MMC_DATA_WRITE) {
		dma_chan = host->dma_chan_tx;
		dir_data = DMA_TO_DEVICE;
		dir_slave = DMA_MEM_TO_DEV;
	} else {
		dma_chan = host->dma_chan_rx;
		dir_data = DMA_FROM_DEVICE;
		dir_slave = DMA_DEV_TO_MEM;
	}

	len = dma_map_sg(dma_chan->device->dev, data->sg,
			 data->sg_len, dir_data);

	if (len > 0) {
		desc = dmaengine_prep_slave_sg(dma_chan, data->sg,
					       len, dir_slave,
					       DMA_PREP_INTERRUPT |
					       DMA_CTRL_ACK);
	} else {
		dev_err(mmc_dev(host->mmc), "dma_map_sg returned zero length\n");
	}

	if (desc) {
		host->tx_desc = desc;
		desc->callback = moxart_dma_complete;
		desc->callback_param = host;
		dmaengine_submit(desc);
		dma_async_issue_pending(dma_chan);
	}

	data->bytes_xfered += host->data_remain;

	dma_time = wait_for_completion_interruptible_timeout(
		   &host->dma_complete, host->timeout);

	dma_unmap_sg(dma_chan->device->dev,
		     data->sg, data->sg_len,
		     dir_data);
}
int serial8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
{
	struct uart_8250_dma		*dma = p->dma;
	struct dma_async_tx_descriptor	*desc;
	struct dma_tx_state		state;
	int				dma_status;

	dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);

	switch (iir & 0x3f) {
	case UART_IIR_RLSI:
		/* 8250_core handles errors and break interrupts */
		return -EIO;
	case UART_IIR_RX_TIMEOUT:
		/*
		 * If RCVR FIFO trigger level was not reached, complete the
		 * transfer and let 8250_core copy the remaining data.
		 */
		if (dma_status == DMA_IN_PROGRESS) {
			dmaengine_pause(dma->rxchan);
			__dma_rx_complete(p);
		}
		return -ETIMEDOUT;
	default:
		break;
	}

	if (dma_status)
		return 0;

	desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
					   dma->rx_size, DMA_DEV_TO_MEM,
					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc)
		return -EBUSY;

	desc->callback = __dma_rx_complete;
	desc->callback_param = p;

	dma->rx_cookie = dmaengine_submit(desc);

	dma_sync_single_for_device(dma->rxchan->device->dev, dma->rx_addr,
				   dma->rx_size, DMA_FROM_DEVICE);

	dma_async_issue_pending(dma->rxchan);

	return 0;
}
static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
{
	struct mmc_data *data = host->mrq->data;
	struct scatterlist *sg = data->sg;
	struct dma_async_tx_descriptor *desc = NULL;
	struct dma_chan *chan = host->chan_tx;
	dma_cookie_t cookie = -EINVAL;
	int ret;

	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
			 DMA_TO_DEVICE);
	if (ret > 0) {
		host->dma_active = true;
		desc = dmaengine_prep_slave_sg(chan, sg, ret,
			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	}

	if (desc) {
		desc->callback = mmcif_dma_complete;
		desc->callback_param = host;
		cookie = dmaengine_submit(desc);
		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
		dma_async_issue_pending(chan);
	}
	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
		__func__, data->sg_len, ret, cookie);

	if (!desc) {
		/* DMA failed, fall back to PIO */
		if (ret >= 0)
			ret = -EIO;
		host->chan_tx = NULL;
		host->dma_active = false;
		dma_release_channel(chan);
		/* Free the Rx channel too */
		chan = host->chan_rx;
		if (chan) {
			host->chan_rx = NULL;
			dma_release_channel(chan);
		}
		dev_warn(&host->pd->dev,
			 "DMA failed: %d, falling back to PIO\n", ret);
		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
	}

	dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
		desc, cookie);
}
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
{
	enum dma_status status;
	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);

	dma_async_issue_pending(chan);
	do {
		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
			printk(KERN_ERR "dma_sync_wait_timeout!\n");
			return DMA_ERROR;
		}
	} while (status == DMA_IN_PROGRESS);

	return status;
}
Beispiel #7
0
static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
{
	struct xvip_dma *dma = vb2_get_drv_priv(vq);
	struct xvip_pipeline *pipe;
	int ret;

	dma->sequence = 0;

	/*
	 * Start streaming on the pipeline. No link touching an entity in the
	 * pipeline can be activated or deactivated once streaming is started.
	 *
	 * Use the pipeline object embedded in the first DMA object that starts
	 * streaming.
	 */
	pipe = dma->video.entity.pipe
	     ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;

	ret = media_entity_pipeline_start(&dma->video.entity, &pipe->pipe);
	if (ret < 0)
		return ret;

	/* Verify that the configured format matches the output of the
	 * connected subdev.
	 */
	ret = xvip_dma_verify_format(dma);
	if (ret < 0)
		goto error;

	ret = xvip_pipeline_prepare(pipe, dma);
	if (ret < 0)
		goto error;

	/* Start the DMA engine. This must be done before starting the blocks
	 * in the pipeline to avoid DMA synchronization issues.
	 */
	dma_async_issue_pending(dma->dma);

	/* Start the pipeline. */
	xvip_pipeline_set_stream(pipe, true);

	return 0;

error:
	media_entity_pipeline_stop(&dma->video.entity);
	return ret;
}
Beispiel #8
0
static void mmc_omap_start_request(struct mmc_omap_host *host,
				   struct mmc_request *req)
{
	BUG_ON(host->mrq != NULL);

	host->mrq = req;

	/* only touch fifo AFTER the controller readies it */
	mmc_omap_prepare_data(host, req);
	mmc_omap_start_command(host, req->cmd);
	if (host->dma_in_use) {
		struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ?
				host->dma_tx : host->dma_rx;

		dma_async_issue_pending(c);
	}
}
static void pxa_uart_receive_dma_start(struct uart_pxa_port *up)
{
	unsigned long flags;
	struct uart_pxa_dma *uart_dma = &up->uart_dma;
	struct dma_slave_config slave_config;
	int ret;

	spin_lock_irqsave(&up->port.lock, flags);
	if (uart_dma->dma_status & RX_DMA_RUNNING) {
		spin_unlock_irqrestore(&up->port.lock, flags);
		return;
	}
	uart_dma->dma_status |= RX_DMA_RUNNING;
	spin_unlock_irqrestore(&up->port.lock, flags);

	slave_config.direction	    = DMA_DEV_TO_MEM;
	slave_config.src_addr	    = up->port.mapbase;
	slave_config.src_maxburst   = 16;
	slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
	/*
	 * FIXME: keep slave_id for compatibility.
	 * If mmp_pdma doesn't use it any more, we should remove all code
	 * related with slave_id/drcmr_rx/drdmr_tx.
	 */
	if (uart_dma->drcmr_rx)
		slave_config.slave_id       = uart_dma->drcmr_rx;
	else
		slave_config.slave_id	    = 0;

	ret = dmaengine_slave_config(uart_dma->rxdma_chan, &slave_config);
	if (ret) {
		dev_err(up->port.dev,
			"%s: dmaengine slave config err.\n", __func__);
		return;
	}

	uart_dma->rx_desc = dmaengine_prep_slave_single(uart_dma->rxdma_chan,
		uart_dma->rxdma_addr_phys, DMA_BLOCK, DMA_DEV_TO_MEM, 0);
	uart_dma->rx_desc->callback = pxa_uart_receive_dma_cb;
	uart_dma->rx_desc->callback_param = up;

	uart_dma->rx_cookie = dmaengine_submit(uart_dma->rx_desc);
	dma_async_issue_pending(uart_dma->rxdma_chan);
}
static void bcm2835_sdhost_transfer_dma(struct bcm2835_host *host)
{
	u32 len, dir_data, dir_slave;
	struct dma_async_tx_descriptor *desc = NULL;
	struct dma_chan *dma_chan;

	pr_debug("bcm2835_sdhost_transfer_dma()\n");

	WARN_ON(!host->data);

	if (!host->data)
		return;

	if (host->data->flags & MMC_DATA_READ) {
		dma_chan = host->dma_chan_rx;
		dir_data = DMA_FROM_DEVICE;
		dir_slave = DMA_DEV_TO_MEM;
	} else {
		dma_chan = host->dma_chan_tx;
		dir_data = DMA_TO_DEVICE;
		dir_slave = DMA_MEM_TO_DEV;
	}

	BUG_ON(!dma_chan->device);
	BUG_ON(!dma_chan->device->dev);
	BUG_ON(!host->data->sg);

	len = dma_map_sg(dma_chan->device->dev, host->data->sg,
			 host->data->sg_len, dir_data);
	if (len > 0) {
		desc = dmaengine_prep_slave_sg(dma_chan, host->data->sg,
					       len, dir_slave,
					       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	} else {
		dev_err(mmc_dev(host->mmc), "dma_map_sg returned zero length\n");
	}
	if (desc) {
		desc->callback = bcm2835_sdhost_dma_complete;
		desc->callback_param = host;
		dmaengine_submit(desc);
		dma_async_issue_pending(dma_chan);
	}

}
Beispiel #11
0
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
{
    enum dma_status status;
    unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);

    dma_async_issue_pending(chan);
    do {
        status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
        if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
            pr_err("%s: timeout!\n", __func__);
            return DMA_ERROR;
        }
        if (status != DMA_IN_PROGRESS)
            break;
        cpu_relax();
    } while (1);

    return status;
}
Beispiel #12
0
static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
{
	dma_addr_t dma_addr;
	struct dma_async_tx_descriptor *txdesc;
	struct at91_twi_dma *dma = &dev->dma;
	struct dma_chan *chan_tx = dma->chan_tx;

	if (dev->buf_len <= 0)
		return;

	dma->direction = DMA_TO_DEVICE;

	at91_twi_irq_save(dev);
	dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
				  DMA_TO_DEVICE);
	if (dma_mapping_error(dev->dev, dma_addr)) {
		dev_err(dev->dev, "dma map failed\n");
		return;
	}
	dma->buf_mapped = true;
	at91_twi_irq_restore(dev);
	sg_dma_len(&dma->sg) = dev->buf_len;
	sg_dma_address(&dma->sg) = dma_addr;

	txdesc = dmaengine_prep_slave_sg(chan_tx, &dma->sg, 1, DMA_MEM_TO_DEV,
					 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!txdesc) {
		dev_err(dev->dev, "dma prep slave sg failed\n");
		goto error;
	}

	txdesc->callback = at91_twi_write_data_dma_callback;
	txdesc->callback_param = dev;

	dma->xfer_in_progress = true;
	dmaengine_submit(txdesc);
	dma_async_issue_pending(chan_tx);

	return;

error:
	at91_twi_dma_cleanup(dev);
}
Beispiel #13
0
static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
{
	dma_addr_t dma_addr;
	struct dma_async_tx_descriptor *rxdesc;
	struct at91_twi_dma *dma = &dev->dma;
	struct dma_chan *chan_rx = dma->chan_rx;

	dma->direction = DMA_FROM_DEVICE;

	/* Keep in mind that we won't use dma to read the last two bytes */
	at91_twi_irq_save(dev);
	dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len - 2,
				  DMA_FROM_DEVICE);
	if (dma_mapping_error(dev->dev, dma_addr)) {
		dev_err(dev->dev, "dma map failed\n");
		return;
	}
	dma->buf_mapped = true;
	at91_twi_irq_restore(dev);
	dma->sg.dma_address = dma_addr;
	sg_dma_len(&dma->sg) = dev->buf_len - 2;

	rxdesc = dmaengine_prep_slave_sg(chan_rx, &dma->sg, 1, DMA_DEV_TO_MEM,
					 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!rxdesc) {
		dev_err(dev->dev, "dma prep slave sg failed\n");
		goto error;
	}

	rxdesc->callback = at91_twi_read_data_dma_callback;
	rxdesc->callback_param = dev;

	dma->xfer_in_progress = true;
	dmaengine_submit(rxdesc);
	dma_async_issue_pending(dma->chan_rx);

	return;

error:
	at91_twi_dma_cleanup(dev);
}
Beispiel #14
0
static u32 tegra20_fuse_readl(const unsigned int offset)
{
	int ret;
	u32 val = 0;
	struct dma_async_tx_descriptor *dma_desc;

	mutex_lock(&apb_dma_lock);

	dma_sconfig.src_addr = fuse_phys + FUSE_BEGIN + offset;
	ret = dmaengine_slave_config(apb_dma_chan, &dma_sconfig);
	if (ret)
		goto out;

	dma_desc = dmaengine_prep_slave_single(apb_dma_chan, apb_buffer_phys,
			sizeof(u32), DMA_DEV_TO_MEM,
			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!dma_desc)
		goto out;

	dma_desc->callback = apb_dma_complete;
	dma_desc->callback_param = NULL;

	reinit_completion(&apb_dma_wait);

	clk_prepare_enable(fuse_clk);

	dmaengine_submit(dma_desc);
	dma_async_issue_pending(apb_dma_chan);
	ret = wait_for_completion_timeout(&apb_dma_wait, msecs_to_jiffies(50));

	if (WARN(ret == 0, "apb read dma timed out"))
		dmaengine_terminate_all(apb_dma_chan);
	else
		val = *apb_buffer;

	clk_disable_unprepare(fuse_clk);
out:
	mutex_unlock(&apb_dma_lock);

	return val;
}
Beispiel #15
0
static void mxs_mmc_ac(struct mxs_mmc_host *host)
{
	struct mxs_ssp *ssp = &host->ssp;
	struct mmc_command *cmd = host->cmd;
	struct dma_async_tx_descriptor *desc;
	u32 ignore_crc, get_resp, long_resp;
	u32 ctrl0, cmd0, cmd1;

	ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
			0 : BM_SSP_CTRL0_IGNORE_CRC;
	get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
			BM_SSP_CTRL0_GET_RESP : 0;
	long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
			BM_SSP_CTRL0_LONG_RESP : 0;

	ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
	cmd1 = cmd->arg;

	if (host->sdio_irq_en) {
		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
	}

	ssp->ssp_pio_words[0] = ctrl0;
	ssp->ssp_pio_words[1] = cmd0;
	ssp->ssp_pio_words[2] = cmd1;
	ssp->dma_dir = DMA_NONE;
	ssp->slave_dirn = DMA_TRANS_NONE;
	desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
	if (!desc)
		goto out;

	dmaengine_submit(desc);
	dma_async_issue_pending(ssp->dmach);
	return;

out:
	dev_warn(mmc_dev(host->mmc),
		 "%s: failed to prep dma\n", __func__);
}
Beispiel #16
0
static void
smi_dma_write_sgl(struct bcm2835_smi_instance *inst,
	struct scatterlist *sgl, size_t sg_len, size_t n_bytes)
{
	struct dma_async_tx_descriptor *desc;

	if (inst->settings.data_width == SMI_WIDTH_8BIT)
		smi_init_programmed_write(inst, n_bytes);
	else
		smi_init_programmed_write(inst, n_bytes / 2);

	desc = smi_dma_submit_sgl(inst, sgl, sg_len, DMA_MEM_TO_DEV, NULL);
	dma_async_issue_pending(inst->dma_chan);

	if (dma_wait_for_async_tx(desc) == DMA_ERROR)
		smi_dump_context_labelled(inst, "DMA timeout!");
	else
		/* Wait for SMI to finish our writes */
		while (!(read_smi_reg(inst, SMICS) & SMICS_DONE))
			cpu_relax();
}
Beispiel #17
0
static void sirfsoc_rx_submit_one_dma_desc(struct uart_port *port, int index)
{
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);

	sirfport->rx_dma_items[index].xmit.tail =
		sirfport->rx_dma_items[index].xmit.head = 0;
	sirfport->rx_dma_items[index].desc =
		dmaengine_prep_slave_single(sirfport->rx_dma_chan,
		sirfport->rx_dma_items[index].dma_addr, SIRFSOC_RX_DMA_BUF_SIZE,
		DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
	if (!sirfport->rx_dma_items[index].desc) {
		dev_err(port->dev, "DMA slave single fail\n");
		return;
	}
	sirfport->rx_dma_items[index].desc->callback =
		sirfsoc_uart_rx_dma_complete_callback;
	sirfport->rx_dma_items[index].desc->callback_param = sirfport;
	sirfport->rx_dma_items[index].cookie =
		dmaengine_submit(sirfport->rx_dma_items[index].desc);
	dma_async_issue_pending(sirfport->rx_dma_chan);
}
Beispiel #18
0
static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
{
	unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;

	tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
				tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
				DMA_PREP_INTERRUPT);
	if (!tup->rx_dma_desc) {
		dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
		return -EIO;
	}

	tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
	tup->rx_dma_desc->callback_param = tup;
	dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
				count, DMA_TO_DEVICE);
	tup->rx_bytes_requested = count;
	tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
	dma_async_issue_pending(tup->rx_dma_chan);
	return 0;
}
Beispiel #19
0
static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport)
{
	struct s3c24xx_uart_dma *dma = ourport->dma;

	dma_sync_single_for_device(ourport->port.dev, dma->rx_addr,
				dma->rx_size, DMA_FROM_DEVICE);

	dma->rx_desc = dmaengine_prep_slave_single(dma->rx_chan,
				dma->rx_addr, dma->rx_size, DMA_DEV_TO_MEM,
				DMA_PREP_INTERRUPT);
	if (!dma->rx_desc) {
		dev_err(ourport->port.dev, "Unable to get desc for Rx\n");
		return;
	}

	dma->rx_desc->callback = s3c24xx_serial_rx_dma_complete;
	dma->rx_desc->callback_param = ourport;
	dma->rx_bytes_requested = dma->rx_size;

	dma->rx_cookie = dmaengine_submit(dma->rx_desc);
	dma_async_issue_pending(dma->rx_chan);
}
static void pxa_uart_transmit_dma_start(struct uart_pxa_port *up, int count)
{
	struct uart_pxa_dma *pxa_dma = &up->uart_dma;
	struct dma_slave_config slave_config;
	int ret;

	slave_config.direction	    = DMA_MEM_TO_DEV;
	slave_config.dst_addr	    = up->port.mapbase;
	slave_config.dst_maxburst   = 16;
	slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
	/*
	 * FIXME: keep slave_id for compatibility.
	 * If mmp_pdma doesn't use it any more, we should remove all code
	 * related with slave_id/drcmr_rx/drdmr_tx.
	 */
	if (pxa_dma->drcmr_tx)
		slave_config.slave_id       = pxa_dma->drcmr_tx;
	else
		slave_config.slave_id	    = 0;

	ret = dmaengine_slave_config(pxa_dma->txdma_chan, &slave_config);
	if (ret) {
		dev_err(up->port.dev,
			"%s: dmaengine slave config err.\n", __func__);
		return;
	}

	pxa_dma->tx_size = count;
	pxa_dma->tx_desc = dmaengine_prep_slave_single(pxa_dma->txdma_chan,
		pxa_dma->txdma_addr_phys, count, DMA_MEM_TO_DEV, 0);
	pxa_dma->tx_desc->callback = pxa_uart_transmit_dma_cb;
	pxa_dma->tx_desc->callback_param = up;

	pxa_dma->tx_cookie = dmaengine_submit(pxa_dma->tx_desc);
	pm_qos_update_request(&up->qos_idle[PXA_UART_TX],
			      up->lpm_qos);

	dma_async_issue_pending(pxa_dma->txdma_chan);
}
/* apply mode to plane pipe */
void xilinx_drm_plane_commit(struct drm_plane *base_plane)
{
	struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
	struct dma_async_tx_descriptor *desc;
	enum dma_ctrl_flags flags;

	DRM_DEBUG_KMS("plane->id: %d\n", plane->id);

	flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
	desc = dmaengine_prep_interleaved_dma(plane->dma.chan, &plane->dma.xt,
					      flags);
	if (!desc) {
		DRM_ERROR("failed to prepare DMA descriptor\n");
		return;
	}

	/* submit dma desc */
	dmaengine_submit(desc);

	/* start dma with new mode */
	dma_async_issue_pending(plane->dma.chan);
}
Beispiel #22
0
static void omap2_mcspi_tx_dma(struct spi_device *spi,
				struct spi_transfer *xfer,
				struct dma_slave_config cfg)
{
	struct omap2_mcspi	*mcspi;
	struct omap2_mcspi_dma  *mcspi_dma;
	unsigned int		count;

	mcspi = spi_master_get_devdata(spi->master);
	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
	count = xfer->len;

	if (mcspi_dma->dma_tx) {
		struct dma_async_tx_descriptor *tx;
		struct scatterlist sg;

		dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);

		sg_init_table(&sg, 1);
		sg_dma_address(&sg) = xfer->tx_dma;
		sg_dma_len(&sg) = xfer->len;

		tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
		DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (tx) {
			tx->callback = omap2_mcspi_tx_callback;
			tx->callback_param = spi;
			dmaengine_submit(tx);
		} else {
			/* FIXME: fall back to PIO? */
		}
	}
	dma_async_issue_pending(mcspi_dma->dma_tx);
	omap2_mcspi_set_dma_req(spi, 0, 1);

}
Beispiel #23
0
static int stm32_spdifrx_dma_ctrl_start(struct stm32_spdifrx_data *spdifrx)
{
	dma_cookie_t cookie;
	int err;

	spdifrx->desc = dmaengine_prep_slave_single(spdifrx->ctrl_chan,
						    spdifrx->dmab->addr,
						    SPDIFRX_CSR_BUF_LENGTH,
						    DMA_DEV_TO_MEM,
						    DMA_CTRL_ACK);
	if (!spdifrx->desc)
		return -EINVAL;

	spdifrx->desc->callback = stm32_spdifrx_dma_complete;
	spdifrx->desc->callback_param = spdifrx;
	cookie = dmaengine_submit(spdifrx->desc);
	err = dma_submit_error(cookie);
	if (err)
		return -EINVAL;

	dma_async_issue_pending(spdifrx->ctrl_chan);

	return 0;
}
Beispiel #24
0
static int tegra_i2c_dma_submit(struct tegra_i2c_dev *i2c_dev, size_t len)
{
	struct dma_async_tx_descriptor *dma_desc;
	enum dma_transfer_direction dir;
	struct dma_chan *chan;

	dev_dbg(i2c_dev->dev, "starting DMA for length: %zu\n", len);
	reinit_completion(&i2c_dev->dma_complete);
	dir = i2c_dev->msg_read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
	chan = i2c_dev->msg_read ? i2c_dev->rx_dma_chan : i2c_dev->tx_dma_chan;
	dma_desc = dmaengine_prep_slave_single(chan, i2c_dev->dma_phys,
					       len, dir, DMA_PREP_INTERRUPT |
					       DMA_CTRL_ACK);
	if (!dma_desc) {
		dev_err(i2c_dev->dev, "failed to get DMA descriptor\n");
		return -EINVAL;
	}

	dma_desc->callback = tegra_i2c_dma_complete;
	dma_desc->callback_param = i2c_dev;
	dmaengine_submit(dma_desc);
	dma_async_issue_pending(chan);
	return 0;
}
Beispiel #25
0
int serial8250_rx_dma(struct uart_8250_port *p)
{
	struct uart_8250_dma		*dma = p->dma;
	struct dma_async_tx_descriptor	*desc;

	if (dma->rx_running)
		return 0;

	desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
					   dma->rx_size, DMA_DEV_TO_MEM,
					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc)
		return -EBUSY;

	dma->rx_running = 1;
	desc->callback = __dma_rx_complete;
	desc->callback_param = p;

	dma->rx_cookie = dmaengine_submit(desc);

	dma_async_issue_pending(dma->rxchan);

	return 0;
}
Beispiel #26
0
static int spi_pl330_dma_transfer(struct dw_spi *dws, int cs_change)
{
	struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
	struct dma_chan *txchan, *rxchan;
	struct dma_slave_config txconf, rxconf;
	u16 dma_ctrl = 0;

	/* 1. setup DMA related registers */
	if (cs_change) {
		spi_enable_chip(dws, 0);
		/* Setup peripheral's burst watermark for TX and RX FIFO */
		dw_writew(dws, DW_SPI_DMARDLR, SSI_DMA_MAXBURST - 1);
		dw_writew(dws, DW_SPI_DMATDLR, SSI_FIFO_DEPTH - SSI_DMA_MAXBURST);

		if (dws->tx_dma)
			dma_ctrl |= 0x2;
		if (dws->rx_dma)
			dma_ctrl |= 0x1;
		dw_writew(dws, DW_SPI_DMACR, dma_ctrl);
		spi_enable_chip(dws, 1);
	}

	dws->dma_chan_done = 0;
	txchan = dws->txchan;
	rxchan = dws->rxchan;

	/* 2. Prepare the TX dma transfer */
	txconf.direction = DMA_MEM_TO_DEV;
	txconf.dst_addr = dws->dma_addr;
	/* Note: By default the burst_len (dst_maxburst) for DMA_MEM_TO_DEV is set
			 to 1 and the burst_size (src_addr_width) for memory is set to
			 peripheral's configuration in PL330 driver (driver/dma/pl330.c).
			 Therefore the config listed below can be skipped
				i. txconf.dst_maxburst
				ii. txconf.src_addr_width
			 Max DMA width is 16-bit
	*/
	if (dws->dma_width == 1)
		txconf.dst_addr_width = PL330_DMA_BRSTSZ_1B;
	else
		txconf.dst_addr_width = PL330_DMA_BRSTSZ_2B;

	txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
				       (unsigned long) &txconf);

	memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl));
	dws->tx_sgl.dma_address = dws->tx_dma;
	dws->tx_sgl.length = dws->len;

	txdesc = txchan->device->device_prep_slave_sg(txchan,
				&dws->tx_sgl,
				1,
				DMA_MEM_TO_DEV,
				DMA_PREP_INTERRUPT,
				NULL);
	txdesc->callback = spi_pl330_dma_done;
	txdesc->callback_param = dws;

	/* 3. Prepare the RX dma transfer */
	rxconf.direction = DMA_DEV_TO_MEM;
	rxconf.src_addr = dws->dma_addr;
	/* Note: By default the burst_len (src_maxburst) for DMA_DEV_TO_MEM is set
			 to 1 and the burst_size (dst_addr_width) for memory is set to
			 peripheral's configuration in PL330 driver (driver/dma/pl330.c).
			 Therefore the config listed below can be skipped
		txconf.src_maxburst
		txconf.dst_addr_width
	*/
	if (dws->dma_width == 1)
		rxconf.src_addr_width = PL330_DMA_BRSTSZ_1B;
	else
		rxconf.src_addr_width = PL330_DMA_BRSTSZ_2B;

	rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
				       (unsigned long) &rxconf);

	memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl));
	dws->rx_sgl.dma_address = dws->rx_dma;
	dws->rx_sgl.length = dws->len;

	rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
				&dws->rx_sgl,
				1,
				DMA_DEV_TO_MEM,
				DMA_PREP_INTERRUPT,
				NULL);
	rxdesc->callback = spi_pl330_dma_done;
	rxdesc->callback_param = dws;

	/* rx must be started before tx due to spi instinct */
	rxdesc->tx_submit(rxdesc);
	dma_async_issue_pending(rxchan);
	txdesc->tx_submit(txdesc);
	dma_async_issue_pending(txchan);

	return 0;
}
Beispiel #27
0
static void mxs_mmc_adtc(struct mxs_mmc_host *host)
{
	struct mmc_command *cmd = host->cmd;
	struct mmc_data *data = cmd->data;
	struct dma_async_tx_descriptor *desc;
	struct scatterlist *sgl = data->sg, *sg;
	unsigned int sg_len = data->sg_len;
	unsigned int i;

	unsigned short dma_data_dir, timeout;
	enum dma_transfer_direction slave_dirn;
	unsigned int data_size = 0, log2_blksz;
	unsigned int blocks = data->blocks;

	struct mxs_ssp *ssp = &host->ssp;

	u32 ignore_crc, get_resp, long_resp, read;
	u32 ctrl0, cmd0, cmd1, val;

	ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
			0 : BM_SSP_CTRL0_IGNORE_CRC;
	get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
			BM_SSP_CTRL0_GET_RESP : 0;
	long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
			BM_SSP_CTRL0_LONG_RESP : 0;

	if (data->flags & MMC_DATA_WRITE) {
		dma_data_dir = DMA_TO_DEVICE;
		slave_dirn = DMA_MEM_TO_DEV;
		read = 0;
	} else {
		dma_data_dir = DMA_FROM_DEVICE;
		slave_dirn = DMA_DEV_TO_MEM;
		read = BM_SSP_CTRL0_READ;
	}

	ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
		ignore_crc | get_resp | long_resp |
		BM_SSP_CTRL0_DATA_XFER | read |
		BM_SSP_CTRL0_WAIT_FOR_IRQ |
		BM_SSP_CTRL0_ENABLE;

	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);

	/* get logarithm to base 2 of block size for setting register */
	log2_blksz = ilog2(data->blksz);

	/*
	 * take special care of the case that data size from data->sg
	 * is not equal to blocks x blksz
	 */
	for_each_sg(sgl, sg, sg_len, i)
		data_size += sg->length;

	if (data_size != data->blocks * data->blksz)
		blocks = 1;

	/* xfer count, block size and count need to be set differently */
	if (ssp_is_old(ssp)) {
		ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
		cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
			BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
	} else {
		writel(data_size, ssp->base + HW_SSP_XFER_SIZE);
		writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
		       BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
		       ssp->base + HW_SSP_BLOCK_SIZE);
	}

	if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
	    (cmd->opcode == SD_IO_RW_EXTENDED))
		cmd0 |= BM_SSP_CMD0_APPEND_8CYC;

	cmd1 = cmd->arg;

	if (host->sdio_irq_en) {
		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
	}

	/* set the timeout count */
	timeout = mxs_ns_to_ssp_ticks(ssp->clk_rate, data->timeout_ns);
	val = readl(ssp->base + HW_SSP_TIMING(ssp));
	val &= ~(BM_SSP_TIMING_TIMEOUT);
	val |= BF_SSP(timeout, TIMING_TIMEOUT);
	writel(val, ssp->base + HW_SSP_TIMING(ssp));

	/* pio */
	ssp->ssp_pio_words[0] = ctrl0;
	ssp->ssp_pio_words[1] = cmd0;
	ssp->ssp_pio_words[2] = cmd1;
	ssp->dma_dir = DMA_NONE;
	ssp->slave_dirn = DMA_TRANS_NONE;
	desc = mxs_mmc_prep_dma(host, 0);
	if (!desc)
		goto out;

	/* append data sg */
	WARN_ON(host->data != NULL);
	host->data = data;
	ssp->dma_dir = dma_data_dir;
	ssp->slave_dirn = slave_dirn;
	desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc)
		goto out;

	dmaengine_submit(desc);
	dma_async_issue_pending(ssp->dmach);
	return;
out:
	dev_warn(mmc_dev(host->mmc),
		 "%s: failed to prep dma\n", __func__);
}
static int vdmafb_setupfb(struct vdmafb_dev *fbdev)
{
	struct fb_var_screeninfo *var = &fbdev->info.var;
	struct dma_async_tx_descriptor *desc;
	struct dma_interleaved_template *dma_template = fbdev->dma_template;
	struct xilinx_vdma_config vdma_config;
	int hsize = var->xres * 4;
	u32 frame;
	int ret;

	/* Disable display */
	vdmafb_writereg(fbdev, VDMAFB_CONTROL, 0);

	dmaengine_terminate_all(fbdev->dma);

	/* Setup VDMA address etc */
	memset(&vdma_config, 0, sizeof(vdma_config));
	vdma_config.park = 1;
	vdma_config.coalesc = 255; /* Reduces unused interrupts */
	xilinx_vdma_channel_set_config(fbdev->dma, &vdma_config);

       /*
        * Interleaved DMA:
        * Each interleaved frame is a row (hsize) implemented in ONE
        * chunk (sgl has len 1).
        * The number of interleaved frames is the number of rows (vsize).
        * The icg in used to pack data to the HW, so that the buffer len
        * is fb->piches[0], but the actual size for the hw is somewhat less
        */
       dma_template->dir = DMA_MEM_TO_DEV;
       dma_template->src_start = fbdev->fb_phys;
       /* sgl list have just one entry (each interleaved frame have 1 chunk) */
       dma_template->frame_size = 1;
       /* the number of interleaved frame, each has the size specified in sgl */
       dma_template->numf = var->yres;
       dma_template->src_sgl = 1;
       dma_template->src_inc = 1;
       /* vdma IP does not provide any addr to the hdmi IP */
       dma_template->dst_inc = 0;
       dma_template->dst_sgl = 0;
       /* horizontal size */
       dma_template->sgl[0].size = hsize;
       /* the vdma driver seems to look at icg, and not src_icg */
       dma_template->sgl[0].icg = 0; /*  stride - hsize */

	for (frame = 0; frame < fbdev->frames; ++frame) {
		desc = dmaengine_prep_interleaved_dma(fbdev->dma, dma_template, 0);
		if (!desc) {
			pr_err("Failed to prepare DMA descriptor\n");
			return -ENOMEM;
		}
		dmaengine_submit(desc);
	}
	dma_async_issue_pending(fbdev->dma);

	/* Configure IP via registers */
	vdmafb_writereg(fbdev, VDMAFB_HORIZONTAL_TOTAL,
		var->hsync_len + var->left_margin + var->xres + var->right_margin);
	vdmafb_writereg(fbdev, VDMAFB_HORIZONTAL_SYNC, var->hsync_len);
	vdmafb_writereg(fbdev, VDMAFB_HORIZONTAL_FRONT_PORCH, var->left_margin);
	vdmafb_writereg(fbdev, VDMAFB_HORIZONTAL_WIDTH, var->xres);
	vdmafb_writereg(fbdev, VDMAFB_HORIZONTAL_BACK_PORCH, var->right_margin);
	vdmafb_writereg(fbdev, VDMAFB_HORIZONTAL_POLARITY, 0); /* TODO */
	vdmafb_writereg(fbdev, VDMAFB_VERTICAL_TOTAL,
		var->vsync_len + var->upper_margin + var->yres + var->lower_margin);
	vdmafb_writereg(fbdev, VDMAFB_VERTICAL_SYNC, var->vsync_len);
	vdmafb_writereg(fbdev, VDMAFB_VERTICAL_FRONT_PORCH, var->upper_margin);
	vdmafb_writereg(fbdev, VDMAFB_VERTICAL_HEIGHT, var->yres);
	vdmafb_writereg(fbdev, VDMAFB_VERTICAL_BACK_PORCH, var->lower_margin);
	vdmafb_writereg(fbdev, VDMAFB_VERTICAL_POLARITY, 0);
	/* Enable output */
	vdmafb_writereg(fbdev, VDMAFB_CONTROL, VDMAFB_CONTROL_ENABLE);

	/* Set brightness */

	vdmafb_writereg(fbdev, VDMAFB_BACKLIGHT_CONTROL, 1);
	vdmafb_writereg(fbdev, VDMAFB_BACKLIGHT_LEVEL_1K, 800);

	return 0;
}
dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
	struct dma_pinned_list *pinned_list, struct page *page,
	unsigned int offset, size_t len)
{
	int iov_byte_offset;
	int copy;
	dma_cookie_t dma_cookie = 0;
	int iovec_idx;
	int page_idx;
	int err;

	/* this needs as-yet-unimplemented buf-to-buff, so punt. */
	/* TODO: use dma for this */
	if (!chan || !pinned_list) {
		u8 *vaddr = kmap(page);
		err = memcpy_toiovec(iov, vaddr + offset, len);
		kunmap(page);
		return err;
	}

	iovec_idx = 0;
	while (iovec_idx < pinned_list->nr_iovecs) {
		struct dma_page_list *page_list;

		/* skip already used-up iovecs */
		while (!iov[iovec_idx].iov_len)
			iovec_idx++;

		page_list = &pinned_list->page_list[iovec_idx];

		iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK);
		page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK)
			 - ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT;

		/* break up copies to not cross page boundary */
		while (iov[iovec_idx].iov_len) {
			copy = min_t(int, PAGE_SIZE - iov_byte_offset, len);
			copy = min_t(int, copy, iov[iovec_idx].iov_len);

			dma_cookie = dma_async_memcpy_pg_to_pg(chan,
					page_list->pages[page_idx],
					iov_byte_offset,
					page,
					offset,
					copy);
			/* poll for a descriptor slot */
			if (unlikely(dma_cookie < 0)) {
				dma_async_issue_pending(chan);
				continue;
			}

			len -= copy;
			iov[iovec_idx].iov_len -= copy;
			iov[iovec_idx].iov_base += copy;

			if (!len)
				return dma_cookie;

			offset += copy;
			iov_byte_offset = 0;
			page_idx++;
		}
		iovec_idx++;
	}

	/* really bad if we ever run out of iovecs */
	BUG();
	return -EFAULT;
}
Beispiel #30
0
static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
{
	unsigned long flags;
	struct dma_slave_config rxconf, txconf;
	struct dma_async_tx_descriptor *rxdesc, *txdesc;

	spin_lock_irqsave(&rs->lock, flags);
	rs->state &= ~RXBUSY;
	rs->state &= ~TXBUSY;
	spin_unlock_irqrestore(&rs->lock, flags);

	rxdesc = NULL;
	if (rs->rx) {
		rxconf.direction = rs->dma_rx.direction;
		rxconf.src_addr = rs->dma_rx.addr;
		rxconf.src_addr_width = rs->n_bytes;
		rxconf.src_maxburst = rs->n_bytes;
		dmaengine_slave_config(rs->dma_rx.ch, &rxconf);

		rxdesc = dmaengine_prep_slave_sg(
				rs->dma_rx.ch,
				rs->rx_sg.sgl, rs->rx_sg.nents,
				rs->dma_rx.direction, DMA_PREP_INTERRUPT);

		rxdesc->callback = rockchip_spi_dma_rxcb;
		rxdesc->callback_param = rs;
	}

	txdesc = NULL;
	if (rs->tx) {
		txconf.direction = rs->dma_tx.direction;
		txconf.dst_addr = rs->dma_tx.addr;
		txconf.dst_addr_width = rs->n_bytes;
		txconf.dst_maxburst = rs->n_bytes;
		dmaengine_slave_config(rs->dma_tx.ch, &txconf);

		txdesc = dmaengine_prep_slave_sg(
				rs->dma_tx.ch,
				rs->tx_sg.sgl, rs->tx_sg.nents,
				rs->dma_tx.direction, DMA_PREP_INTERRUPT);

		txdesc->callback = rockchip_spi_dma_txcb;
		txdesc->callback_param = rs;
	}

	/* rx must be started before tx due to spi instinct */
	if (rxdesc) {
		spin_lock_irqsave(&rs->lock, flags);
		rs->state |= RXBUSY;
		spin_unlock_irqrestore(&rs->lock, flags);
		dmaengine_submit(rxdesc);
		dma_async_issue_pending(rs->dma_rx.ch);
	}

	if (txdesc) {
		spin_lock_irqsave(&rs->lock, flags);
		rs->state |= TXBUSY;
		spin_unlock_irqrestore(&rs->lock, flags);
		dmaengine_submit(txdesc);
		dma_async_issue_pending(rs->dma_tx.ch);
	}
}