Exemple #1
0
static int sprd_uart_dma_submit(struct uart_port *port,
				struct sprd_uart_dma *ud, u32 trans_len,
				enum dma_transfer_direction direction,
				dma_async_tx_callback callback)
{
	struct dma_async_tx_descriptor *dma_des;
	unsigned long flags;

	flags = SPRD_DMA_FLAGS(SPRD_DMA_CHN_MODE_NONE,
			       SPRD_DMA_NO_TRG,
			       SPRD_DMA_FRAG_REQ,
			       SPRD_DMA_TRANS_INT);

	dma_des = dmaengine_prep_slave_single(ud->chn, ud->phys_addr, trans_len,
					      direction, flags);
	if (!dma_des)
		return -ENODEV;

	dma_des->callback = callback;
	dma_des->callback_param = port;

	ud->cookie = dmaengine_submit(dma_des);
	if (dma_submit_error(ud->cookie))
		return dma_submit_error(ud->cookie);

	dma_async_issue_pending(ud->chn);

	return 0;
}
Exemple #2
0
static int pic32_spi_dma_transfer(struct pic32_spi *pic32s,
				  struct spi_transfer *xfer)
{
	struct spi_master *master = pic32s->master;
	struct dma_async_tx_descriptor *desc_rx;
	struct dma_async_tx_descriptor *desc_tx;
	dma_cookie_t cookie;
	int ret;

	if (!master->dma_rx || !master->dma_tx)
		return -ENODEV;

	desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
					  xfer->rx_sg.sgl,
					  xfer->rx_sg.nents,
					  DMA_DEV_TO_MEM,
					  DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc_rx) {
		ret = -EINVAL;
		goto err_dma;
	}

	desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
					  xfer->tx_sg.sgl,
					  xfer->tx_sg.nents,
					  DMA_MEM_TO_DEV,
					  DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc_tx) {
		ret = -EINVAL;
		goto err_dma;
	}

	/* Put callback on the RX transfer, that should finish last */
	desc_rx->callback = pic32_spi_dma_rx_notify;
	desc_rx->callback_param = pic32s;

	cookie = dmaengine_submit(desc_rx);
	ret = dma_submit_error(cookie);
	if (ret)
		goto err_dma;

	cookie = dmaengine_submit(desc_tx);
	ret = dma_submit_error(cookie);
	if (ret)
		goto err_dma_tx;

	dma_async_issue_pending(master->dma_rx);
	dma_async_issue_pending(master->dma_tx);

	return 0;

err_dma_tx:
	dmaengine_terminate_all(master->dma_rx);
err_dma:
	return ret;
}
Exemple #3
0
static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer,
			   enum dma_transfer_direction dir,
			   dma_async_tx_callback callback)
{
	struct spi_qup *qup = spi_master_get_devdata(master);
	unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
	struct dma_async_tx_descriptor *desc;
	struct scatterlist *sgl;
	struct dma_chan *chan;
	dma_cookie_t cookie;
	unsigned int nents;

	if (dir == DMA_MEM_TO_DEV) {
		chan = master->dma_tx;
		nents = xfer->tx_sg.nents;
		sgl = xfer->tx_sg.sgl;
	} else {
		chan = master->dma_rx;
		nents = xfer->rx_sg.nents;
		sgl = xfer->rx_sg.sgl;
	}

	desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
	if (!desc)
		return -EINVAL;

	desc->callback = callback;
	desc->callback_param = qup;

	cookie = dmaengine_submit(desc);

	return dma_submit_error(cookie);
}
Exemple #4
0
static int hist_buf_dma(struct ispstat *hist)
{
	dma_addr_t dma_addr = hist->active_buf->dma_addr;
	struct dma_async_tx_descriptor *tx;
	struct dma_slave_config cfg;
	dma_cookie_t cookie;
	int ret;

	if (unlikely(!dma_addr)) {
		dev_dbg(hist->isp->dev, "hist: invalid DMA buffer address\n");
		goto error;
	}

	isp_reg_writel(hist->isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
	isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
		    ISPHIST_CNT_CLEAR);
	omap3isp_flush(hist->isp);

	memset(&cfg, 0, sizeof(cfg));
	cfg.src_addr = hist->isp->mmio_hist_base_phys + ISPHIST_DATA;
	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	cfg.src_maxburst = hist->buf_size / 4;

	ret = dmaengine_slave_config(hist->dma_ch, &cfg);
	if (ret < 0) {
		dev_dbg(hist->isp->dev,
			"hist: DMA slave configuration failed\n");
		goto error;
	}

	tx = dmaengine_prep_slave_single(hist->dma_ch, dma_addr,
					 hist->buf_size, DMA_DEV_TO_MEM,
					 DMA_CTRL_ACK);
	if (tx == NULL) {
		dev_dbg(hist->isp->dev,
			"hist: DMA slave preparation failed\n");
		goto error;
	}

	tx->callback = hist_dma_cb;
	tx->callback_param = hist;
	cookie = tx->tx_submit(tx);
	if (dma_submit_error(cookie)) {
		dev_dbg(hist->isp->dev, "hist: DMA submission failed\n");
		goto error;
	}

	dma_async_issue_pending(hist->dma_ch);

	return STAT_BUF_WAITING_DMA;

error:
	hist_reset_mem(hist);
	return STAT_NO_BUF;
}
Exemple #5
0
static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
{
	bool read = pd->msg->flags & I2C_M_RD;
	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
	struct dma_chan *chan = read ? pd->dma_rx : pd->dma_tx;
	struct dma_async_tx_descriptor *txdesc;
	dma_addr_t dma_addr;
	dma_cookie_t cookie;

	if (PTR_ERR(chan) == -EPROBE_DEFER) {
		if (read)
			chan = pd->dma_rx = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_DEV_TO_MEM,
									   pd->res->start + ICDR);
		else
			chan = pd->dma_tx = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_MEM_TO_DEV,
									   pd->res->start + ICDR);
	}

	if (IS_ERR(chan))
		return;

	dma_addr = dma_map_single(chan->device->dev, pd->dma_buf, pd->msg->len, dir);
	if (dma_mapping_error(chan->device->dev, dma_addr)) {
		dev_dbg(pd->dev, "dma map failed, using PIO\n");
		return;
	}

	sg_dma_len(&pd->sg) = pd->msg->len;
	sg_dma_address(&pd->sg) = dma_addr;

	pd->dma_direction = dir;

	txdesc = dmaengine_prep_slave_sg(chan, &pd->sg, 1,
					 read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV,
					 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!txdesc) {
		dev_dbg(pd->dev, "dma prep slave sg failed, using PIO\n");
		sh_mobile_i2c_cleanup_dma(pd);
		return;
	}

	txdesc->callback = sh_mobile_i2c_dma_callback;
	txdesc->callback_param = pd;

	cookie = dmaengine_submit(txdesc);
	if (dma_submit_error(cookie)) {
		dev_dbg(pd->dev, "submitting dma failed, using PIO\n");
		sh_mobile_i2c_cleanup_dma(pd);
		return;
	}

	dma_async_issue_pending(chan);
}
Exemple #6
0
static int bcm2835_spi_prepare_sg(struct spi_master *master,
				  struct spi_transfer *tfr,
				  bool is_tx)
{
	struct dma_chan *chan;
	struct scatterlist *sgl;
	unsigned int nents;
	enum dma_transfer_direction dir;
	unsigned long flags;

	struct dma_async_tx_descriptor *desc;
	dma_cookie_t cookie;

	if (is_tx) {
		dir   = DMA_MEM_TO_DEV;
		chan  = master->dma_tx;
		nents = tfr->tx_sg.nents;
		sgl   = tfr->tx_sg.sgl;
		flags = 0 /* no  tx interrupt */;

	} else {
		dir   = DMA_DEV_TO_MEM;
		chan  = master->dma_rx;
		nents = tfr->rx_sg.nents;
		sgl   = tfr->rx_sg.sgl;
		flags = DMA_PREP_INTERRUPT;
	}
	/* prepare the channel */
	desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
	if (!desc)
		return -EINVAL;

	/* set callback for rx */
	if (!is_tx) {
		desc->callback = bcm2835_spi_dma_done;
		desc->callback_param = master;
	}

	/* submit it to DMA-engine */
	cookie = dmaengine_submit(desc);

	return dma_submit_error(cookie);
}
static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg,
			   int nents, unsigned long flags,
			   enum dma_transfer_direction dir,
			   dma_async_tx_callback cb, void *cb_param)
{
	struct dma_async_tx_descriptor *desc;
	dma_cookie_t cookie;

	if (!sg || !nents)
		return -EINVAL;

	desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags);
	if (!desc)
		return -EINVAL;

	desc->callback = cb;
	desc->callback_param = cb_param;
	cookie = dmaengine_submit(desc);

	return dma_submit_error(cookie);
}
Exemple #8
0
static int stm32_spdifrx_dma_ctrl_start(struct stm32_spdifrx_data *spdifrx)
{
	dma_cookie_t cookie;
	int err;

	spdifrx->desc = dmaengine_prep_slave_single(spdifrx->ctrl_chan,
						    spdifrx->dmab->addr,
						    SPDIFRX_CSR_BUF_LENGTH,
						    DMA_DEV_TO_MEM,
						    DMA_CTRL_ACK);
	if (!spdifrx->desc)
		return -EINVAL;

	spdifrx->desc->callback = stm32_spdifrx_dma_complete;
	spdifrx->desc->callback_param = spdifrx;
	cookie = dmaengine_submit(spdifrx->desc);
	err = dma_submit_error(cookie);
	if (err)
		return -EINVAL;

	dma_async_issue_pending(spdifrx->ctrl_chan);

	return 0;
}
Exemple #9
0
static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst,
			 char *src, size_t size)
{
	struct perf_ctx *perf = pctx->perf;
	struct dma_async_tx_descriptor *txd;
	struct dma_chan *chan = pctx->dma_chan;
	struct dma_device *device;
	struct dmaengine_unmap_data *unmap;
	dma_cookie_t cookie;
	size_t src_off, dst_off;
	struct perf_mw *mw = &perf->mw;
	void __iomem *vbase;
	void __iomem *dst_vaddr;
	dma_addr_t dst_phys;
	int retries = 0;

	if (!use_dma) {
		memcpy_toio(dst, src, size);
		return size;
	}

	if (!chan) {
		dev_err(&perf->ntb->dev, "DMA engine does not exist\n");
		return -EINVAL;
	}

	device = chan->device;
	src_off = (uintptr_t)src & ~PAGE_MASK;
	dst_off = (uintptr_t __force)dst & ~PAGE_MASK;

	if (!is_dma_copy_aligned(device, src_off, dst_off, size))
		return -ENODEV;

	vbase = mw->vbase;
	dst_vaddr = dst;
	dst_phys = mw->phys_addr + (dst_vaddr - vbase);

	unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
	if (!unmap)
		return -ENOMEM;

	unmap->len = size;
	unmap->addr[0] = dma_map_page(device->dev, virt_to_page(src),
				      src_off, size, DMA_TO_DEVICE);
	if (dma_mapping_error(device->dev, unmap->addr[0]))
		goto err_get_unmap;

	unmap->to_cnt = 1;

	do {
		txd = device->device_prep_dma_memcpy(chan, dst_phys,
						     unmap->addr[0],
						     size, DMA_PREP_INTERRUPT);
		if (!txd) {
			set_current_state(TASK_INTERRUPTIBLE);
			schedule_timeout(DMA_OUT_RESOURCE_TO);
		}
	} while (!txd && (++retries < DMA_RETRIES));

	if (!txd) {
		pctx->dma_prep_err++;
		goto err_get_unmap;
	}

	txd->callback = perf_copy_callback;
	txd->callback_param = pctx;
	dma_set_unmap(txd, unmap);

	cookie = dmaengine_submit(txd);
	if (dma_submit_error(cookie))
		goto err_set_unmap;

	dmaengine_unmap_put(unmap);

	atomic_inc(&pctx->dma_sync);
	dma_async_issue_pending(chan);

	return size;

err_set_unmap:
	dmaengine_unmap_put(unmap);
err_get_unmap:
	dmaengine_unmap_put(unmap);
	return 0;
}