Exemple #1
0
static void rsnd_dma_do_work(struct work_struct *work)
{
	struct rsnd_dma *dma = container_of(work, struct rsnd_dma, work);
	struct rsnd_priv *priv = dma->priv;
	struct device *dev = rsnd_priv_to_dev(priv);
	struct dma_async_tx_descriptor *desc;
	dma_addr_t buf;
	size_t len;
	int i;

	for (i = 0; i < dma->submit_loop; i++) {

		if (dma->inquiry(dma, &buf, &len) < 0)
			return;

		desc = dmaengine_prep_slave_single(
			dma->chan, buf, len, dma->dir,
			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (!desc) {
			dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
			return;
		}

		desc->callback		= rsnd_dma_complete;
		desc->callback_param	= dma;

		if (dmaengine_submit(desc) < 0) {
			dev_err(dev, "dmaengine_submit() fail\n");
			return;
		}

		dma_async_issue_pending(dma->chan);
	}
}
Exemple #2
0
static int omap_8250_rx_dma(struct uart_8250_port *p)
{
	struct omap8250_priv		*priv = p->port.private_data;
	struct uart_8250_dma            *dma = p->dma;
	int				err = 0;
	struct dma_async_tx_descriptor  *desc;
	unsigned long			flags;

	if (priv->rx_dma_broken)
		return -EINVAL;

	spin_lock_irqsave(&priv->rx_dma_lock, flags);

	if (dma->rx_running)
		goto out;

	desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
					   dma->rx_size, DMA_DEV_TO_MEM,
					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc) {
		err = -EBUSY;
		goto out;
	}

	dma->rx_running = 1;
	desc->callback = __dma_rx_complete;
	desc->callback_param = p;

	dma->rx_cookie = dmaengine_submit(desc);

	dma_async_issue_pending(dma->rxchan);
out:
	spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
	return err;
}
Exemple #3
0
static int sprd_uart_dma_submit(struct uart_port *port,
				struct sprd_uart_dma *ud, u32 trans_len,
				enum dma_transfer_direction direction,
				dma_async_tx_callback callback)
{
	struct dma_async_tx_descriptor *dma_des;
	unsigned long flags;

	flags = SPRD_DMA_FLAGS(SPRD_DMA_CHN_MODE_NONE,
			       SPRD_DMA_NO_TRG,
			       SPRD_DMA_FRAG_REQ,
			       SPRD_DMA_TRANS_INT);

	dma_des = dmaengine_prep_slave_single(ud->chn, ud->phys_addr, trans_len,
					      direction, flags);
	if (!dma_des)
		return -ENODEV;

	dma_des->callback = callback;
	dma_des->callback_param = port;

	ud->cookie = dmaengine_submit(dma_des);
	if (dma_submit_error(ud->cookie))
		return dma_submit_error(ud->cookie);

	dma_async_issue_pending(ud->chn);

	return 0;
}
Exemple #4
0
static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
		unsigned long count)
{
	struct circ_buf *xmit = &tup->uport.state->xmit;
	dma_addr_t tx_phys_addr;

	dma_sync_single_for_device(tup->uport.dev, tup->tx_dma_buf_phys,
				UART_XMIT_SIZE, DMA_TO_DEVICE);

	tup->tx_bytes = count & ~(0xF);
	tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
	tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
				tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
				DMA_PREP_INTERRUPT);
	if (!tup->tx_dma_desc) {
		dev_err(tup->uport.dev, "Not able to get desc for Tx\n");
		return -EIO;
	}

	tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete;
	tup->tx_dma_desc->callback_param = tup;
	tup->tx_in_progress = TEGRA_UART_TX_DMA;
	tup->tx_bytes_requested = tup->tx_bytes;
	tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc);
	dma_async_issue_pending(tup->tx_dma_chan);
	return 0;
}
Exemple #5
0
static void mtk8250_rx_dma(struct uart_8250_port *up)
{
	struct uart_8250_dma *dma = up->dma;
	struct mtk8250_data *data = up->port.private_data;
	struct dma_async_tx_descriptor	*desc;
	struct dma_tx_state	 state;

	desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
					   dma->rx_size, DMA_DEV_TO_MEM,
					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc) {
		pr_err("failed to prepare rx slave single\n");
		return;
	}

	desc->callback = mtk8250_dma_rx_complete;
	desc->callback_param = up;

	dma->rx_cookie = dmaengine_submit(desc);

	dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
	data->rx_pos = state.residue;

	dma_sync_single_for_device(dma->rxchan->device->dev, dma->rx_addr,
				   dma->rx_size, DMA_FROM_DEVICE);

	dma_async_issue_pending(dma->rxchan);
}
int nand_dma_config_start(__u32 rw, dma_addr_t addr,__u32 length)
{
	//int ret = 0;
	//int nents = 0;
	struct dma_slave_config dma_conf = {0};
	struct dma_async_tx_descriptor *dma_desc = NULL;

	dma_conf.direction = DMA_DEV_TO_MEM;
	dma_conf.src_addr = 0x01c03300;
	dma_conf.dst_addr = 0x01c03300;
	dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	dma_conf.src_maxburst = 1;
	dma_conf.dst_maxburst = 1;
	dma_conf.slave_id = rw ? sunxi_slave_id(DRQDST_NAND0, DRQSRC_SDRAM) : sunxi_slave_id(DRQDST_SDRAM, DRQSRC_NAND0);
	dmaengine_slave_config(dma_hdl, &dma_conf);

	dma_desc = dmaengine_prep_slave_single(dma_hdl, addr, length,
				(rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE), DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
	if (!dma_desc) {
		printk("dmaengine prepare failed!\n");
		return -1;
	}

	dma_desc->callback = nand_dma_callback;
	dma_desc->callback_param = NULL;
	dmaengine_submit(dma_desc);

	dma_async_issue_pending(dma_hdl);

	return 0;
}
Exemple #7
0
static int s3c24xx_serial_start_tx_dma(struct s3c24xx_uart_port *ourport,
				      unsigned int count)
{
	struct uart_port *port = &ourport->port;
	struct circ_buf *xmit = &port->state->xmit;
	struct s3c24xx_uart_dma *dma = ourport->dma;


	if (ourport->tx_mode != S3C24XX_TX_DMA)
		enable_tx_dma(ourport);

	dma->tx_size = count & ~(dma_get_cache_alignment() - 1);
	dma->tx_transfer_addr = dma->tx_addr + xmit->tail;

	dma_sync_single_for_device(ourport->port.dev, dma->tx_transfer_addr,
				dma->tx_size, DMA_TO_DEVICE);

	dma->tx_desc = dmaengine_prep_slave_single(dma->tx_chan,
				dma->tx_transfer_addr, dma->tx_size,
				DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
	if (!dma->tx_desc) {
		dev_err(ourport->port.dev, "Unable to get desc for Tx\n");
		return -EIO;
	}

	dma->tx_desc->callback = s3c24xx_serial_tx_dma_complete;
	dma->tx_desc->callback_param = ourport;
	dma->tx_bytes_requested = dma->tx_size;

	ourport->tx_in_progress = S3C24XX_TX_DMA;
	dma->tx_cookie = dmaengine_submit(dma->tx_desc);
	dma_async_issue_pending(dma->tx_chan);
	return 0;
}
/* apply mode to plane pipe */
void xilinx_drm_plane_commit(struct drm_plane *base_plane)
{
	struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
	struct dma_async_tx_descriptor *desc;
	uint32_t height = plane->vdma.dma_config.hsize;
	int pitch = plane->vdma.dma_config.stride;
	size_t offset;

	DRM_DEBUG_KMS("plane->id: %d\n", plane->id);

	offset = plane->x * plane->bpp + plane->y * pitch;
	desc = dmaengine_prep_slave_single(plane->vdma.chan,
					   plane->paddr + offset,
					   height * pitch, DMA_MEM_TO_DEV, 0);
	if (!desc) {
		DRM_ERROR("failed to prepare DMA descriptor\n");
		return;
	}

	/* submit vdma desc */
	dmaengine_submit(desc);

	/* start vdma with new mode */
	dma_async_issue_pending(plane->vdma.chan);
}
Exemple #9
0
int serial8250_tx_dma(struct uart_8250_port *p)
{
	struct uart_8250_dma		*dma = p->dma;
	struct circ_buf			*xmit = &p->port.state->xmit;
	struct dma_async_tx_descriptor	*desc;

	if (dma->tx_running) {
		uart_write_wakeup(&p->port);
		return -EBUSY;
	}

	dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);

	desc = dmaengine_prep_slave_single(dma->txchan,
					   dma->tx_addr + xmit->tail,
					   dma->tx_size, DMA_MEM_TO_DEV,
					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc)
		return -EBUSY;

	dma->tx_running = 1;

	desc->callback = __dma_tx_complete;
	desc->callback_param = p;

	dma->tx_cookie = dmaengine_submit(desc);

	dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
				   UART_XMIT_SIZE, DMA_TO_DEVICE);

	dma_async_issue_pending(dma->txchan);

	return 0;
}
Exemple #10
0
static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
{
	struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
	struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vb);
	struct dma_async_tx_descriptor *desc;
	enum dma_transfer_direction dir;
	u32 flags;

	if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
		flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
		dir = DMA_DEV_TO_MEM;
	} else {
		flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
		dir = DMA_MEM_TO_DEV;
	}

	desc = dmaengine_prep_slave_single(dma->dma, buf->addr, buf->length,
					   dir, flags);
	if (!desc) {
		dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
		vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
		return;
	}
	desc->callback = xvip_dma_complete;
	desc->callback_param = buf;

	dmaengine_submit(desc);

	if (vb2_is_streaming(&dma->queue))
		dma_async_issue_pending(dma->dma);
}
Exemple #11
0
static int hist_buf_dma(struct ispstat *hist)
{
	dma_addr_t dma_addr = hist->active_buf->dma_addr;
	struct dma_async_tx_descriptor *tx;
	struct dma_slave_config cfg;
	dma_cookie_t cookie;
	int ret;

	if (unlikely(!dma_addr)) {
		dev_dbg(hist->isp->dev, "hist: invalid DMA buffer address\n");
		goto error;
	}

	isp_reg_writel(hist->isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
	isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
		    ISPHIST_CNT_CLEAR);
	omap3isp_flush(hist->isp);

	memset(&cfg, 0, sizeof(cfg));
	cfg.src_addr = hist->isp->mmio_hist_base_phys + ISPHIST_DATA;
	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	cfg.src_maxburst = hist->buf_size / 4;

	ret = dmaengine_slave_config(hist->dma_ch, &cfg);
	if (ret < 0) {
		dev_dbg(hist->isp->dev,
			"hist: DMA slave configuration failed\n");
		goto error;
	}

	tx = dmaengine_prep_slave_single(hist->dma_ch, dma_addr,
					 hist->buf_size, DMA_DEV_TO_MEM,
					 DMA_CTRL_ACK);
	if (tx == NULL) {
		dev_dbg(hist->isp->dev,
			"hist: DMA slave preparation failed\n");
		goto error;
	}

	tx->callback = hist_dma_cb;
	tx->callback_param = hist;
	cookie = tx->tx_submit(tx);
	if (dma_submit_error(cookie)) {
		dev_dbg(hist->isp->dev, "hist: DMA submission failed\n");
		goto error;
	}

	dma_async_issue_pending(hist->dma_ch);

	return STAT_BUF_WAITING_DMA;

error:
	hist_reset_mem(hist);
	return STAT_NO_BUF;
}
Exemple #12
0
int serial8250_tx_dma(struct uart_8250_port *p)
{
	struct uart_8250_dma		*dma = p->dma;
	struct circ_buf			*xmit = &p->port.state->xmit;
	struct dma_async_tx_descriptor	*desc;
	int ret;

	if (dma->tx_running)
		return 0;

	if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
		/* We have been called from __dma_tx_complete() */
		serial8250_rpm_put_tx(p);
		return 0;
	}

	dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);

	desc = dmaengine_prep_slave_single(dma->txchan,
					   dma->tx_addr + xmit->tail,
					   dma->tx_size, DMA_MEM_TO_DEV,
					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc) {
		ret = -EBUSY;
		goto err;
	}

	dma->tx_running = 1;
	desc->callback = __dma_tx_complete;
	desc->callback_param = p;

	dma->tx_cookie = dmaengine_submit(desc);

	dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
				   UART_XMIT_SIZE, DMA_TO_DEVICE);

	dma_async_issue_pending(dma->txchan);
	if (dma->tx_err) {
		dma->tx_err = 0;
		if (p->ier & UART_IER_THRI) {
			p->ier &= ~UART_IER_THRI;
			serial_out(p, UART_IER, p->ier);
		}
	}
	return 0;
err:
	dma->tx_err = 1;
	return ret;
}
int serial8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
{
	struct uart_8250_dma		*dma = p->dma;
	struct dma_async_tx_descriptor	*desc;
	struct dma_tx_state		state;
	int				dma_status;

	dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);

	switch (iir & 0x3f) {
	case UART_IIR_RLSI:
		/* 8250_core handles errors and break interrupts */
		return -EIO;
	case UART_IIR_RX_TIMEOUT:
		/*
		 * If RCVR FIFO trigger level was not reached, complete the
		 * transfer and let 8250_core copy the remaining data.
		 */
		if (dma_status == DMA_IN_PROGRESS) {
			dmaengine_pause(dma->rxchan);
			__dma_rx_complete(p);
		}
		return -ETIMEDOUT;
	default:
		break;
	}

	if (dma_status)
		return 0;

	desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
					   dma->rx_size, DMA_DEV_TO_MEM,
					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc)
		return -EBUSY;

	desc->callback = __dma_rx_complete;
	desc->callback_param = p;

	dma->rx_cookie = dmaengine_submit(desc);

	dma_sync_single_for_device(dma->rxchan->device->dev, dma->rx_addr,
				   dma->rx_size, DMA_FROM_DEVICE);

	dma_async_issue_pending(dma->rxchan);

	return 0;
}
static void pxa_uart_receive_dma_start(struct uart_pxa_port *up)
{
	unsigned long flags;
	struct uart_pxa_dma *uart_dma = &up->uart_dma;
	struct dma_slave_config slave_config;
	int ret;

	spin_lock_irqsave(&up->port.lock, flags);
	if (uart_dma->dma_status & RX_DMA_RUNNING) {
		spin_unlock_irqrestore(&up->port.lock, flags);
		return;
	}
	uart_dma->dma_status |= RX_DMA_RUNNING;
	spin_unlock_irqrestore(&up->port.lock, flags);

	slave_config.direction	    = DMA_DEV_TO_MEM;
	slave_config.src_addr	    = up->port.mapbase;
	slave_config.src_maxburst   = 16;
	slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
	/*
	 * FIXME: keep slave_id for compatibility.
	 * If mmp_pdma doesn't use it any more, we should remove all code
	 * related with slave_id/drcmr_rx/drdmr_tx.
	 */
	if (uart_dma->drcmr_rx)
		slave_config.slave_id       = uart_dma->drcmr_rx;
	else
		slave_config.slave_id	    = 0;

	ret = dmaengine_slave_config(uart_dma->rxdma_chan, &slave_config);
	if (ret) {
		dev_err(up->port.dev,
			"%s: dmaengine slave config err.\n", __func__);
		return;
	}

	uart_dma->rx_desc = dmaengine_prep_slave_single(uart_dma->rxdma_chan,
		uart_dma->rxdma_addr_phys, DMA_BLOCK, DMA_DEV_TO_MEM, 0);
	uart_dma->rx_desc->callback = pxa_uart_receive_dma_cb;
	uart_dma->rx_desc->callback_param = up;

	uart_dma->rx_cookie = dmaengine_submit(uart_dma->rx_desc);
	dma_async_issue_pending(uart_dma->rxdma_chan);
}
Exemple #15
0
static u32 tegra20_fuse_readl(const unsigned int offset)
{
	int ret;
	u32 val = 0;
	struct dma_async_tx_descriptor *dma_desc;

	mutex_lock(&apb_dma_lock);

	dma_sconfig.src_addr = fuse_phys + FUSE_BEGIN + offset;
	ret = dmaengine_slave_config(apb_dma_chan, &dma_sconfig);
	if (ret)
		goto out;

	dma_desc = dmaengine_prep_slave_single(apb_dma_chan, apb_buffer_phys,
			sizeof(u32), DMA_DEV_TO_MEM,
			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!dma_desc)
		goto out;

	dma_desc->callback = apb_dma_complete;
	dma_desc->callback_param = NULL;

	reinit_completion(&apb_dma_wait);

	clk_prepare_enable(fuse_clk);

	dmaengine_submit(dma_desc);
	dma_async_issue_pending(apb_dma_chan);
	ret = wait_for_completion_timeout(&apb_dma_wait, msecs_to_jiffies(50));

	if (WARN(ret == 0, "apb read dma timed out"))
		dmaengine_terminate_all(apb_dma_chan);
	else
		val = *apb_buffer;

	clk_disable_unprepare(fuse_clk);
out:
	mutex_unlock(&apb_dma_lock);

	return val;
}
Exemple #16
0
static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
{
	unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;

	tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
				tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
				DMA_PREP_INTERRUPT);
	if (!tup->rx_dma_desc) {
		dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
		return -EIO;
	}

	tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
	tup->rx_dma_desc->callback_param = tup;
	dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
				count, DMA_TO_DEVICE);
	tup->rx_bytes_requested = count;
	tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
	dma_async_issue_pending(tup->rx_dma_chan);
	return 0;
}
Exemple #17
0
static void sirfsoc_rx_submit_one_dma_desc(struct uart_port *port, int index)
{
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);

	sirfport->rx_dma_items[index].xmit.tail =
		sirfport->rx_dma_items[index].xmit.head = 0;
	sirfport->rx_dma_items[index].desc =
		dmaengine_prep_slave_single(sirfport->rx_dma_chan,
		sirfport->rx_dma_items[index].dma_addr, SIRFSOC_RX_DMA_BUF_SIZE,
		DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
	if (!sirfport->rx_dma_items[index].desc) {
		dev_err(port->dev, "DMA slave single fail\n");
		return;
	}
	sirfport->rx_dma_items[index].desc->callback =
		sirfsoc_uart_rx_dma_complete_callback;
	sirfport->rx_dma_items[index].desc->callback_param = sirfport;
	sirfport->rx_dma_items[index].cookie =
		dmaengine_submit(sirfport->rx_dma_items[index].desc);
	dma_async_issue_pending(sirfport->rx_dma_chan);
}
Exemple #18
0
static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport)
{
	struct s3c24xx_uart_dma *dma = ourport->dma;

	dma_sync_single_for_device(ourport->port.dev, dma->rx_addr,
				dma->rx_size, DMA_FROM_DEVICE);

	dma->rx_desc = dmaengine_prep_slave_single(dma->rx_chan,
				dma->rx_addr, dma->rx_size, DMA_DEV_TO_MEM,
				DMA_PREP_INTERRUPT);
	if (!dma->rx_desc) {
		dev_err(ourport->port.dev, "Unable to get desc for Rx\n");
		return;
	}

	dma->rx_desc->callback = s3c24xx_serial_rx_dma_complete;
	dma->rx_desc->callback_param = ourport;
	dma->rx_bytes_requested = dma->rx_size;

	dma->rx_cookie = dmaengine_submit(dma->rx_desc);
	dma_async_issue_pending(dma->rx_chan);
}
static void pxa_uart_transmit_dma_start(struct uart_pxa_port *up, int count)
{
	struct uart_pxa_dma *pxa_dma = &up->uart_dma;
	struct dma_slave_config slave_config;
	int ret;

	slave_config.direction	    = DMA_MEM_TO_DEV;
	slave_config.dst_addr	    = up->port.mapbase;
	slave_config.dst_maxburst   = 16;
	slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
	/*
	 * FIXME: keep slave_id for compatibility.
	 * If mmp_pdma doesn't use it any more, we should remove all code
	 * related with slave_id/drcmr_rx/drdmr_tx.
	 */
	if (pxa_dma->drcmr_tx)
		slave_config.slave_id       = pxa_dma->drcmr_tx;
	else
		slave_config.slave_id	    = 0;

	ret = dmaengine_slave_config(pxa_dma->txdma_chan, &slave_config);
	if (ret) {
		dev_err(up->port.dev,
			"%s: dmaengine slave config err.\n", __func__);
		return;
	}

	pxa_dma->tx_size = count;
	pxa_dma->tx_desc = dmaengine_prep_slave_single(pxa_dma->txdma_chan,
		pxa_dma->txdma_addr_phys, count, DMA_MEM_TO_DEV, 0);
	pxa_dma->tx_desc->callback = pxa_uart_transmit_dma_cb;
	pxa_dma->tx_desc->callback_param = up;

	pxa_dma->tx_cookie = dmaengine_submit(pxa_dma->tx_desc);
	pm_qos_update_request(&up->qos_idle[PXA_UART_TX],
			      up->lpm_qos);

	dma_async_issue_pending(pxa_dma->txdma_chan);
}
Exemple #20
0
static int tegra_i2c_dma_submit(struct tegra_i2c_dev *i2c_dev, size_t len)
{
	struct dma_async_tx_descriptor *dma_desc;
	enum dma_transfer_direction dir;
	struct dma_chan *chan;

	dev_dbg(i2c_dev->dev, "starting DMA for length: %zu\n", len);
	reinit_completion(&i2c_dev->dma_complete);
	dir = i2c_dev->msg_read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
	chan = i2c_dev->msg_read ? i2c_dev->rx_dma_chan : i2c_dev->tx_dma_chan;
	dma_desc = dmaengine_prep_slave_single(chan, i2c_dev->dma_phys,
					       len, dir, DMA_PREP_INTERRUPT |
					       DMA_CTRL_ACK);
	if (!dma_desc) {
		dev_err(i2c_dev->dev, "failed to get DMA descriptor\n");
		return -EINVAL;
	}

	dma_desc->callback = tegra_i2c_dma_complete;
	dma_desc->callback_param = i2c_dev;
	dmaengine_submit(dma_desc);
	dma_async_issue_pending(chan);
	return 0;
}
Exemple #21
0
static int stm32_spdifrx_dma_ctrl_start(struct stm32_spdifrx_data *spdifrx)
{
	dma_cookie_t cookie;
	int err;

	spdifrx->desc = dmaengine_prep_slave_single(spdifrx->ctrl_chan,
						    spdifrx->dmab->addr,
						    SPDIFRX_CSR_BUF_LENGTH,
						    DMA_DEV_TO_MEM,
						    DMA_CTRL_ACK);
	if (!spdifrx->desc)
		return -EINVAL;

	spdifrx->desc->callback = stm32_spdifrx_dma_complete;
	spdifrx->desc->callback_param = spdifrx;
	cookie = dmaengine_submit(spdifrx->desc);
	err = dma_submit_error(cookie);
	if (err)
		return -EINVAL;

	dma_async_issue_pending(spdifrx->ctrl_chan);

	return 0;
}
Exemple #22
0
int serial8250_rx_dma(struct uart_8250_port *p)
{
	struct uart_8250_dma		*dma = p->dma;
	struct dma_async_tx_descriptor	*desc;

	if (dma->rx_running)
		return 0;

	desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
					   dma->rx_size, DMA_DEV_TO_MEM,
					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc)
		return -EBUSY;

	dma->rx_running = 1;
	desc->callback = __dma_rx_complete;
	desc->callback_param = p;

	dma->rx_cookie = dmaengine_submit(desc);

	dma_async_issue_pending(dma->rxchan);

	return 0;
}
Exemple #23
0
static void stm32_transmit_chars_dma(struct uart_port *port)
{
	struct stm32_port *stm32port = to_stm32_port(port);
	struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
	struct circ_buf *xmit = &port->state->xmit;
	struct dma_async_tx_descriptor *desc = NULL;
	dma_cookie_t cookie;
	unsigned int count, i;

	if (stm32port->tx_dma_busy)
		return;

	stm32port->tx_dma_busy = true;

	count = uart_circ_chars_pending(xmit);

	if (count > TX_BUF_L)
		count = TX_BUF_L;

	if (xmit->tail < xmit->head) {
		memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count);
	} else {
		size_t one = UART_XMIT_SIZE - xmit->tail;
		size_t two;

		if (one > count)
			one = count;
		two = count - one;

		memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one);
		if (two)
			memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two);
	}

	desc = dmaengine_prep_slave_single(stm32port->tx_ch,
					   stm32port->tx_dma_buf,
					   count,
					   DMA_MEM_TO_DEV,
					   DMA_PREP_INTERRUPT);

	if (!desc) {
		for (i = count; i > 0; i--)
			stm32_transmit_chars_pio(port);
		return;
	}

	desc->callback = stm32_tx_dma_complete;
	desc->callback_param = port;

	/* Push current DMA TX transaction in the pending queue */
	cookie = dmaengine_submit(desc);

	/* Issue pending DMA TX requests */
	dma_async_issue_pending(stm32port->tx_ch);

	stm32_clr_bits(port, ofs->isr, USART_SR_TC);
	stm32_set_bits(port, ofs->cr3, USART_CR3_DMAT);

	xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
	port->icount.tx += count;
}
Exemple #24
0
static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port *sirfport)
{
	struct uart_port *port = &sirfport->port;
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
	struct circ_buf *xmit = &port->state->xmit;
	unsigned long tran_size;
	unsigned long tran_start;
	unsigned long pio_tx_size;

	tran_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
	tran_start = (unsigned long)(xmit->buf + xmit->tail);
	if (uart_circ_empty(xmit) || uart_tx_stopped(port) ||
			!tran_size)
		return;
	if (sirfport->tx_dma_state == TX_DMA_PAUSE) {
		dmaengine_resume(sirfport->tx_dma_chan);
		return;
	}
	if (sirfport->tx_dma_state == TX_DMA_RUNNING)
		return;
	if (!sirfport->is_atlas7)
		wr_regl(port, ureg->sirfsoc_int_en_reg,
				rd_regl(port, ureg->sirfsoc_int_en_reg)&
				~(uint_en->sirfsoc_txfifo_empty_en));
	else
		wr_regl(port, SIRFUART_INT_EN_CLR,
				uint_en->sirfsoc_txfifo_empty_en);
	/*
	 * DMA requires buffer address and buffer length are both aligned with
	 * 4 bytes, so we use PIO for
	 * 1. if address is not aligned with 4bytes, use PIO for the first 1~3
	 * bytes, and move to DMA for the left part aligned with 4bytes
	 * 2. if buffer length is not aligned with 4bytes, use DMA for aligned
	 * part first, move to PIO for the left 1~3 bytes
	 */
	if (tran_size < 4 || BYTES_TO_ALIGN(tran_start)) {
		wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
		wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
			rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)|
			SIRFUART_IO_MODE);
		if (BYTES_TO_ALIGN(tran_start)) {
			pio_tx_size = sirfsoc_uart_pio_tx_chars(sirfport,
				BYTES_TO_ALIGN(tran_start));
			tran_size -= pio_tx_size;
		}
		if (tran_size < 4)
			sirfsoc_uart_pio_tx_chars(sirfport, tran_size);
		if (!sirfport->is_atlas7)
			wr_regl(port, ureg->sirfsoc_int_en_reg,
				rd_regl(port, ureg->sirfsoc_int_en_reg)|
				uint_en->sirfsoc_txfifo_empty_en);
		else
			wr_regl(port, ureg->sirfsoc_int_en_reg,
				uint_en->sirfsoc_txfifo_empty_en);
		wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
	} else {
		/* tx transfer mode switch into dma mode */
		wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
		wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
			rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)&
			~SIRFUART_IO_MODE);
		wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
		tran_size &= ~(0x3);

		sirfport->tx_dma_addr = dma_map_single(port->dev,
			xmit->buf + xmit->tail,
			tran_size, DMA_TO_DEVICE);
		sirfport->tx_dma_desc = dmaengine_prep_slave_single(
			sirfport->tx_dma_chan, sirfport->tx_dma_addr,
			tran_size, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
		if (!sirfport->tx_dma_desc) {
			dev_err(port->dev, "DMA prep slave single fail\n");
			return;
		}
		sirfport->tx_dma_desc->callback =
			sirfsoc_uart_tx_dma_complete_callback;
		sirfport->tx_dma_desc->callback_param = (void *)sirfport;
		sirfport->transfer_size = tran_size;

		dmaengine_submit(sirfport->tx_dma_desc);
		dma_async_issue_pending(sirfport->tx_dma_chan);
		sirfport->tx_dma_state = TX_DMA_RUNNING;
	}
}
static bool cppi41_configure_channel(struct dma_channel *channel,
				u16 packet_sz, u8 mode,
				dma_addr_t dma_addr, u32 len)
{
	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
	struct dma_chan *dc = cppi41_channel->dc;
	struct dma_async_tx_descriptor *dma_desc;
	enum dma_transfer_direction direction;
	struct musb *musb = cppi41_channel->controller->musb;
	unsigned use_gen_rndis = 0;

	dev_dbg(musb->controller,
		"configure ep%d/%x packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n",
		cppi41_channel->port_num, RNDIS_REG(cppi41_channel->port_num),
		packet_sz, mode, (unsigned long long) dma_addr,
		len, cppi41_channel->is_tx);

	cppi41_channel->buf_addr = dma_addr;
	cppi41_channel->total_len = len;
	cppi41_channel->transferred = 0;
	cppi41_channel->packet_sz = packet_sz;

	/*
	 * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
	 * than max packet size at a time.
	 */
	if (cppi41_channel->is_tx)
		use_gen_rndis = 1;

	if (use_gen_rndis) {
		/* RNDIS mode */
		if (len > packet_sz) {
			musb_writel(musb->ctrl_base,
				RNDIS_REG(cppi41_channel->port_num), len);
			/* gen rndis */
			cppi41_set_dma_mode(cppi41_channel,
					EP_MODE_DMA_GEN_RNDIS);

			/* auto req */
			cppi41_set_autoreq_mode(cppi41_channel,
					EP_MODE_AUTOREG_ALL_NEOP);
		} else {
			musb_writel(musb->ctrl_base,
					RNDIS_REG(cppi41_channel->port_num), 0);
			cppi41_set_dma_mode(cppi41_channel,
					EP_MODE_DMA_TRANSPARENT);
			cppi41_set_autoreq_mode(cppi41_channel,
					EP_MODE_AUTOREG_NONE);
		}
	} else {
		/* fallback mode */
		cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
		cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREG_NONE);
		len = min_t(u32, packet_sz, len);
	}
	cppi41_channel->prog_len = len;
	direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
	dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!dma_desc)
		return false;

	dma_desc->callback = cppi41_dma_callback;
	dma_desc->callback_param = channel;
	cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);

	save_rx_toggle(cppi41_channel);
	dma_async_issue_pending(dc);
	return true;
}
static void cppi41_dma_callback(void *private_data)
{
	struct dma_channel *channel = private_data;
	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
	struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
	struct musb *musb = hw_ep->musb;
	unsigned long flags;
	struct dma_tx_state txstate;
	u32 transferred;

	spin_lock_irqsave(&musb->lock, flags);

	dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
			&txstate);
	transferred = cppi41_channel->prog_len - txstate.residue;
	cppi41_channel->transferred += transferred;

	dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
		hw_ep->epnum, cppi41_channel->transferred,
		cppi41_channel->total_len);

	update_rx_toggle(cppi41_channel);

	if (cppi41_channel->transferred == cppi41_channel->total_len ||
			transferred < cppi41_channel->packet_sz) {

		/* done, complete */
		cppi41_channel->channel.actual_len =
			cppi41_channel->transferred;
		cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
		musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
	} else {
		/* next iteration, reload */
		struct dma_chan *dc = cppi41_channel->dc;
		struct dma_async_tx_descriptor *dma_desc;
		enum dma_transfer_direction direction;
		u16 csr;
		u32 remain_bytes;
		void __iomem *epio = cppi41_channel->hw_ep->regs;

		cppi41_channel->buf_addr += cppi41_channel->packet_sz;

		remain_bytes = cppi41_channel->total_len;
		remain_bytes -= cppi41_channel->transferred;
		remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
		cppi41_channel->prog_len = remain_bytes;

		direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
			: DMA_DEV_TO_MEM;
		dma_desc = dmaengine_prep_slave_single(dc,
				cppi41_channel->buf_addr,
				remain_bytes,
				direction,
				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (WARN_ON(!dma_desc)) {
			spin_unlock_irqrestore(&musb->lock, flags);
			return;
		}

		dma_desc->callback = cppi41_dma_callback;
		dma_desc->callback_param = channel;
		cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
		dma_async_issue_pending(dc);

		if (!cppi41_channel->is_tx) {
			csr = musb_readw(epio, MUSB_RXCSR);
			csr |= MUSB_RXCSR_H_REQPKT;
			musb_writew(epio, MUSB_RXCSR, csr);
		}
	}
	spin_unlock_irqrestore(&musb->lock, flags);
}
Exemple #27
0
static int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
{
	struct uart_8250_dma            *dma = p->dma;
	struct dma_async_tx_descriptor  *desc;

	switch (iir & 0x3f) {
	case UART_IIR_RLSI:
		/* 8250_core handles errors and break interrupts */
		if (dma->rx_running) {
			dmaengine_pause(dma->rxchan);
			__dma_rx_do_complete(p, true);
		}
		return -EIO;
	case UART_IIR_RX_TIMEOUT:
		/*
		 * If RCVR FIFO trigger level was not reached, complete the
		 * transfer and let 8250_core copy the remaining data.
		 */
		if (dma->rx_running) {
			dmaengine_pause(dma->rxchan);
			__dma_rx_do_complete(p, true);
		}
		return -ETIMEDOUT;
	case UART_IIR_RDI:
		/*
		 * The OMAP UART is a special BEAST. If we receive RDI we _have_
		 * a DMA transfer programmed but it didn't work. One reason is
		 * that we were too slow and there were too many bytes in the
		 * FIFO, the UART counted wrong and never kicked the DMA engine
		 * to do anything. That means once we receive RDI on OMAP then
		 * the DMA won't do anything soon so we have to cancel the DMA
		 * transfer and purge the FIFO manually.
		 */
		if (dma->rx_running) {
			dmaengine_pause(dma->rxchan);
			__dma_rx_do_complete(p, true);
		}
		return -ETIMEDOUT;

	default:
		break;
	}

	if (dma->rx_running)
		return 0;

	desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
					   dma->rx_size, DMA_DEV_TO_MEM,
					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc)
		return -EBUSY;

	dma->rx_running = 1;
	desc->callback = __dma_rx_complete;
	desc->callback_param = p;

	dma->rx_cookie = dmaengine_submit(desc);

	dma_sync_single_for_device(dma->rxchan->device->dev, dma->rx_addr,
				   dma->rx_size, DMA_FROM_DEVICE);

	dma_async_issue_pending(dma->rxchan);
	return 0;
}
Exemple #28
0
static int omap_8250_tx_dma(struct uart_8250_port *p)
{
	struct uart_8250_dma		*dma = p->dma;
	struct omap8250_priv		*priv = p->port.private_data;
	struct circ_buf			*xmit = &p->port.state->xmit;
	struct dma_async_tx_descriptor	*desc;
	unsigned int	skip_byte = 0;
	int ret;

	if (dma->tx_running)
		return 0;
	if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {

		/*
		 * Even if no data, we need to return an error for the two cases
		 * below so serial8250_tx_chars() is invoked and properly clears
		 * THRI and/or runtime suspend.
		 */
		if (dma->tx_err || p->capabilities & UART_CAP_RPM) {
			ret = -EBUSY;
			goto err;
		}
		if (p->ier & UART_IER_THRI) {
			p->ier &= ~UART_IER_THRI;
			serial_out(p, UART_IER, p->ier);
		}
		return 0;
	}

	dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
	if (priv->habit & OMAP_DMA_TX_KICK) {
		u8 tx_lvl;

		/*
		 * We need to put the first byte into the FIFO in order to start
		 * the DMA transfer. For transfers smaller than four bytes we
		 * don't bother doing DMA at all. It seem not matter if there
		 * are still bytes in the FIFO from the last transfer (in case
		 * we got here directly from omap_8250_dma_tx_complete()). Bytes
		 * leaving the FIFO seem not to trigger the DMA transfer. It is
		 * really the byte that we put into the FIFO.
		 * If the FIFO is already full then we most likely got here from
		 * omap_8250_dma_tx_complete(). And this means the DMA engine
		 * just completed its work. We don't have to wait the complete
		 * 86us at 115200,8n1 but around 60us (not to mention lower
		 * baudrates). So in that case we take the interrupt and try
		 * again with an empty FIFO.
		 */
		tx_lvl = serial_in(p, UART_OMAP_TX_LVL);
		if (tx_lvl == p->tx_loadsz) {
			ret = -EBUSY;
			goto err;
		}
		if (dma->tx_size < 4) {
			ret = -EINVAL;
			goto err;
		}
		skip_byte = 1;
	}

	desc = dmaengine_prep_slave_single(dma->txchan,
			dma->tx_addr + xmit->tail + skip_byte,
			dma->tx_size - skip_byte, DMA_MEM_TO_DEV,
			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc) {
		ret = -EBUSY;
		goto err;
	}

	dma->tx_running = 1;

	desc->callback = omap_8250_dma_tx_complete;
	desc->callback_param = p;

	dma->tx_cookie = dmaengine_submit(desc);

	dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
				   UART_XMIT_SIZE, DMA_TO_DEVICE);

	dma_async_issue_pending(dma->txchan);
	if (dma->tx_err)
		dma->tx_err = 0;

	if (p->ier & UART_IER_THRI) {
		p->ier &= ~UART_IER_THRI;
		serial_out(p, UART_IER, p->ier);
	}
	if (skip_byte)
		serial_out(p, UART_TX, xmit->buf[xmit->tail]);
	return 0;
err:
	dma->tx_err = 1;
	return ret;
}