コード例 #1
0
ファイル: stm32-usart.c プロジェクト: ReneNyffenegger/linux
static void stm32_transmit_chars_dma(struct uart_port *port)
{
	struct stm32_port *stm32port = to_stm32_port(port);
	struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
	struct circ_buf *xmit = &port->state->xmit;
	struct dma_async_tx_descriptor *desc = NULL;
	dma_cookie_t cookie;
	unsigned int count, i;

	if (stm32port->tx_dma_busy)
		return;

	stm32port->tx_dma_busy = true;

	count = uart_circ_chars_pending(xmit);

	if (count > TX_BUF_L)
		count = TX_BUF_L;

	if (xmit->tail < xmit->head) {
		memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count);
	} else {
		size_t one = UART_XMIT_SIZE - xmit->tail;
		size_t two;

		if (one > count)
			one = count;
		two = count - one;

		memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one);
		if (two)
			memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two);
	}

	desc = dmaengine_prep_slave_single(stm32port->tx_ch,
					   stm32port->tx_dma_buf,
					   count,
					   DMA_MEM_TO_DEV,
					   DMA_PREP_INTERRUPT);

	if (!desc) {
		for (i = count; i > 0; i--)
			stm32_transmit_chars_pio(port);
		return;
	}

	desc->callback = stm32_tx_dma_complete;
	desc->callback_param = port;

	/* Push current DMA TX transaction in the pending queue */
	cookie = dmaengine_submit(desc);

	/* Issue pending DMA TX requests */
	dma_async_issue_pending(stm32port->tx_ch);

	stm32_clr_bits(port, ofs->isr, USART_SR_TC);
	stm32_set_bits(port, ofs->cr3, USART_CR3_DMAT);

	xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
	port->icount.tx += count;
}
コード例 #2
0
ファイル: stm32-usart.c プロジェクト: ReneNyffenegger/linux
static int stm32_of_dma_rx_probe(struct stm32_port *stm32port,
				 struct platform_device *pdev)
{
	struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
	struct uart_port *port = &stm32port->port;
	struct device *dev = &pdev->dev;
	struct dma_slave_config config;
	struct dma_async_tx_descriptor *desc = NULL;
	dma_cookie_t cookie;
	int ret;

	/* Request DMA RX channel */
	stm32port->rx_ch = dma_request_slave_channel(dev, "rx");
	if (!stm32port->rx_ch) {
		dev_info(dev, "rx dma alloc failed\n");
		return -ENODEV;
	}
	stm32port->rx_buf = dma_alloc_coherent(&pdev->dev, RX_BUF_L,
						 &stm32port->rx_dma_buf,
						 GFP_KERNEL);
	if (!stm32port->rx_buf) {
		ret = -ENOMEM;
		goto alloc_err;
	}

	/* Configure DMA channel */
	memset(&config, 0, sizeof(config));
	config.src_addr = port->mapbase + ofs->rdr;
	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;

	ret = dmaengine_slave_config(stm32port->rx_ch, &config);
	if (ret < 0) {
		dev_err(dev, "rx dma channel config failed\n");
		ret = -ENODEV;
		goto config_err;
	}

	/* Prepare a DMA cyclic transaction */
	desc = dmaengine_prep_dma_cyclic(stm32port->rx_ch,
					 stm32port->rx_dma_buf,
					 RX_BUF_L, RX_BUF_P, DMA_DEV_TO_MEM,
					 DMA_PREP_INTERRUPT);
	if (!desc) {
		dev_err(dev, "rx dma prep cyclic failed\n");
		ret = -ENODEV;
		goto config_err;
	}

	/* No callback as dma buffer is drained on usart interrupt */
	desc->callback = NULL;
	desc->callback_param = NULL;

	/* Push current DMA transaction in the pending queue */
	cookie = dmaengine_submit(desc);

	/* Issue pending DMA requests */
	dma_async_issue_pending(stm32port->rx_ch);

	return 0;

config_err:
	dma_free_coherent(&pdev->dev,
			  RX_BUF_L, stm32port->rx_buf,
			  stm32port->rx_dma_buf);

alloc_err:
	dma_release_channel(stm32port->rx_ch);
	stm32port->rx_ch = NULL;

	return ret;
}
コード例 #3
0
static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
			struct i2c_msg *msg, uint32_t flags)
{
	struct dma_async_tx_descriptor *desc;
	struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);

	if (msg->flags & I2C_M_RD) {
		i2c->dma_read = 1;
		i2c->addr_data = (msg->addr << 1) | I2C_SMBUS_READ;

		/*
		 * SELECT command.
		 */

		/* Queue the PIO register write transfer. */
		i2c->pio_data[0] = MXS_CMD_I2C_SELECT;
		desc = dmaengine_prep_slave_sg(i2c->dmach,
					(struct scatterlist *)&i2c->pio_data[0],
					1, DMA_TRANS_NONE, 0);
		if (!desc) {
			dev_err(i2c->dev,
				"Failed to get PIO reg. write descriptor.\n");
			goto select_init_pio_fail;
		}

		/* Queue the DMA data transfer. */
		sg_init_one(&i2c->sg_io[0], &i2c->addr_data, 1);
		dma_map_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE);
		desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[0], 1,
					DMA_MEM_TO_DEV,
					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (!desc) {
			dev_err(i2c->dev,
				"Failed to get DMA data write descriptor.\n");
			goto select_init_dma_fail;
		}

		/*
		 * READ command.
		 */

		/* Queue the PIO register write transfer. */
		i2c->pio_data[1] = flags | MXS_CMD_I2C_READ |
				MXS_I2C_CTRL0_XFER_COUNT(msg->len);
		desc = dmaengine_prep_slave_sg(i2c->dmach,
					(struct scatterlist *)&i2c->pio_data[1],
					1, DMA_TRANS_NONE, DMA_PREP_INTERRUPT);
		if (!desc) {
			dev_err(i2c->dev,
				"Failed to get PIO reg. write descriptor.\n");
			goto select_init_dma_fail;
		}

		/* Queue the DMA data transfer. */
		sg_init_one(&i2c->sg_io[1], msg->buf, msg->len);
		dma_map_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE);
		desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[1], 1,
					DMA_DEV_TO_MEM,
					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (!desc) {
			dev_err(i2c->dev,
				"Failed to get DMA data write descriptor.\n");
			goto read_init_dma_fail;
		}
	} else {
		i2c->dma_read = 0;
		i2c->addr_data = (msg->addr << 1) | I2C_SMBUS_WRITE;

		/*
		 * WRITE command.
		 */

		/* Queue the PIO register write transfer. */
		i2c->pio_data[0] = flags | MXS_CMD_I2C_WRITE |
				MXS_I2C_CTRL0_XFER_COUNT(msg->len + 1);
		desc = dmaengine_prep_slave_sg(i2c->dmach,
					(struct scatterlist *)&i2c->pio_data[0],
					1, DMA_TRANS_NONE, 0);
		if (!desc) {
			dev_err(i2c->dev,
				"Failed to get PIO reg. write descriptor.\n");
			goto write_init_pio_fail;
		}

		/* Queue the DMA data transfer. */
		sg_init_table(i2c->sg_io, 2);
		sg_set_buf(&i2c->sg_io[0], &i2c->addr_data, 1);
		sg_set_buf(&i2c->sg_io[1], msg->buf, msg->len);
		dma_map_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
		desc = dmaengine_prep_slave_sg(i2c->dmach, i2c->sg_io, 2,
					DMA_MEM_TO_DEV,
					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (!desc) {
			dev_err(i2c->dev,
				"Failed to get DMA data write descriptor.\n");
			goto write_init_dma_fail;
		}
	}

	/*
	 * The last descriptor must have this callback,
	 * to finish the DMA transaction.
	 */
	desc->callback = mxs_i2c_dma_irq_callback;
	desc->callback_param = i2c;

	/* Start the transfer. */
	dmaengine_submit(desc);
	dma_async_issue_pending(i2c->dmach);
	return 0;

/* Read failpath. */
read_init_dma_fail:
	dma_unmap_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE);
select_init_dma_fail:
	dma_unmap_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE);
select_init_pio_fail:
	dmaengine_terminate_all(i2c->dmach);
	return -EINVAL;

/* Write failpath. */
write_init_dma_fail:
	dma_unmap_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
write_init_pio_fail:
	dmaengine_terminate_all(i2c->dmach);
	return -EINVAL;
}
コード例 #4
0
ファイル: ntb_perf.c プロジェクト: AshishNamdev/linux
static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst,
			 char *src, size_t size)
{
	struct perf_ctx *perf = pctx->perf;
	struct dma_async_tx_descriptor *txd;
	struct dma_chan *chan = pctx->dma_chan;
	struct dma_device *device;
	struct dmaengine_unmap_data *unmap;
	dma_cookie_t cookie;
	size_t src_off, dst_off;
	struct perf_mw *mw = &perf->mw;
	void __iomem *vbase;
	void __iomem *dst_vaddr;
	dma_addr_t dst_phys;
	int retries = 0;

	if (!use_dma) {
		memcpy_toio(dst, src, size);
		return size;
	}

	if (!chan) {
		dev_err(&perf->ntb->dev, "DMA engine does not exist\n");
		return -EINVAL;
	}

	device = chan->device;
	src_off = (uintptr_t)src & ~PAGE_MASK;
	dst_off = (uintptr_t __force)dst & ~PAGE_MASK;

	if (!is_dma_copy_aligned(device, src_off, dst_off, size))
		return -ENODEV;

	vbase = mw->vbase;
	dst_vaddr = dst;
	dst_phys = mw->phys_addr + (dst_vaddr - vbase);

	unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
	if (!unmap)
		return -ENOMEM;

	unmap->len = size;
	unmap->addr[0] = dma_map_page(device->dev, virt_to_page(src),
				      src_off, size, DMA_TO_DEVICE);
	if (dma_mapping_error(device->dev, unmap->addr[0]))
		goto err_get_unmap;

	unmap->to_cnt = 1;

	do {
		txd = device->device_prep_dma_memcpy(chan, dst_phys,
						     unmap->addr[0],
						     size, DMA_PREP_INTERRUPT);
		if (!txd) {
			set_current_state(TASK_INTERRUPTIBLE);
			schedule_timeout(DMA_OUT_RESOURCE_TO);
		}
	} while (!txd && (++retries < DMA_RETRIES));

	if (!txd) {
		pctx->dma_prep_err++;
		goto err_get_unmap;
	}

	txd->callback = perf_copy_callback;
	txd->callback_param = pctx;
	dma_set_unmap(txd, unmap);

	cookie = dmaengine_submit(txd);
	if (dma_submit_error(cookie))
		goto err_set_unmap;

	dmaengine_unmap_put(unmap);

	atomic_inc(&pctx->dma_sync);
	dma_async_issue_pending(chan);

	return size;

err_set_unmap:
	dmaengine_unmap_put(unmap);
err_get_unmap:
	dmaengine_unmap_put(unmap);
	return 0;
}
コード例 #5
0
ファイル: spi-imx.c プロジェクト: lixuhui112/myimx6-linux
static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
				struct spi_transfer *transfer)
{
	struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
	int ret;
	int left = 0;
	struct spi_master *master = spi_imx->bitbang.master;
	struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;

	if (tx) {
		desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
					tx->sgl, tx->nents, DMA_MEM_TO_DEV,
					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (!desc_tx)
			goto tx_nodma;

		desc_tx->callback = spi_imx_dma_tx_callback;
		desc_tx->callback_param = (void *)spi_imx;
		dmaengine_submit(desc_tx);
	}

	if (rx) {
		struct scatterlist *sgl_last = &rx->sgl[rx->nents - 1];
		unsigned int	orig_length = sgl_last->length;
		int	wml_mask = ~(spi_imx->rx_wml - 1);
		/*
		 * Adjust the transfer lenth of the last scattlist if there are
		 * some tail data, use PIO read to get the tail data since DMA
		 * sometimes miss the last tail interrupt.
		 */
		left = transfer->len % spi_imx->rx_wml;
		if (left)
			sgl_last->length = orig_length & wml_mask;

		desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
					rx->sgl, rx->nents, DMA_DEV_TO_MEM,
					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (!desc_rx)
			goto rx_nodma;

		desc_rx->callback = spi_imx_dma_rx_callback;
		desc_rx->callback_param = (void *)spi_imx;
		dmaengine_submit(desc_rx);
	}

	reinit_completion(&spi_imx->dma_rx_completion);
	reinit_completion(&spi_imx->dma_tx_completion);

	/* Trigger the cspi module. */
	spi_imx->dma_finished = 0;
	spi_imx->devtype_data->trigger(spi_imx);

	dma_async_issue_pending(master->dma_tx);
	dma_async_issue_pending(master->dma_rx);
	/* Wait SDMA to finish the data transfer.*/
	ret = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
					  IMX_DMA_TIMEOUT(transfer->len));
	if (!ret) {
		pr_warn("%s %s: I/O Error in DMA TX:%x\n",
			dev_driver_string(&master->dev),
			dev_name(&master->dev), transfer->len);
		dmaengine_terminate_all(master->dma_tx);
	} else {
		ret = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
				IMX_DMA_TIMEOUT(transfer->len));
		if (!ret) {
			pr_warn("%s %s: I/O Error in DMA RX:%x\n",
				dev_driver_string(&master->dev),
				dev_name(&master->dev), transfer->len);
			spi_imx->devtype_data->reset(spi_imx);
			dmaengine_terminate_all(master->dma_rx);
		} else if (left) {
			/* read the tail data by PIO */
			dma_sync_sg_for_cpu(master->dma_rx->device->dev,
					    &rx->sgl[rx->nents - 1], 1,
					    DMA_FROM_DEVICE);
			spi_imx->rx_buf = transfer->rx_buf
						+ (transfer->len - left);
			spi_imx_tail_pio_set(spi_imx, left);
			reinit_completion(&spi_imx->xfer_done);

			spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TCEN);

			ret = wait_for_completion_timeout(&spi_imx->xfer_done,
						IMX_DMA_TIMEOUT(transfer->len));
			if (!ret) {
				pr_warn("%s %s: I/O Error in RX tail\n",
					dev_driver_string(&master->dev),
					dev_name(&master->dev));
			}
		}
	}

	spi_imx->dma_finished = 1;
	if (spi_imx->devtype_data->devtype == IMX6UL_ECSPI)
		spi_imx->devtype_data->trigger(spi_imx);

	if (!ret)
		ret = -ETIMEDOUT;
	else if (ret > 0)
		ret = transfer->len;

	return ret;

rx_nodma:
	dmaengine_terminate_all(master->dma_tx);
tx_nodma:
	pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
		     dev_driver_string(&master->dev),
		     dev_name(&master->dev));
	return -EAGAIN;
}
コード例 #6
0
int logi_dma_copy(struct drvr_mem* mem_dev, unsigned long trgt_addr,
		  unsigned long src_addr, int count)
{
	int result = 0;

#ifdef USE_DMA_ENGINE
	struct dma_chan *chan;
	struct dma_device *dev;
	struct dma_async_tx_descriptor *tx;
	unsigned long flags;

	chan = mem_dev->dma.chan;
	dev = chan->device;
	flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
	tx = dev->device_prep_dma_memcpy(chan, trgt_addr, src_addr, count, flags);

	if (!tx) {
		DBG_LOG("device_prep_dma_memcpy failed\n");
		return -ENODEV;
	}

	irqraised1 = 0u;
	dma_comp.done = 0;
	/* set the callback and submit the transaction */
	tx->callback = dma_callback;
	tx->callback_param = mem_dev;
	cookie = dmaengine_submit(tx);
	dma_async_issue_pending(chan);
#else
	struct edmacc_param param_set;
	int dma_ch = mem_dev->dma.dma_chan;

	edma_set_src(dma_ch, src_addr, INCR, W256BIT);
	edma_set_dest(dma_ch, trgt_addr, INCR, W256BIT);
	edma_set_src_index(dma_ch, 1, 1);
	edma_set_dest_index(dma_ch, 1, 1);
	/* A Sync Transfer Mode */
	edma_set_transfer_params(dma_ch, count, 1, 1, 1, ASYNC);//one block of one frame of one array of count bytes

	/* Enable the Interrupts on Channel 1 */
	edma_read_slot(dma_ch, &param_set);
	param_set.opt |= ITCINTEN;
	param_set.opt |= TCINTEN;
	param_set.opt |= EDMA_TCC(EDMA_CHAN_SLOT(dma_ch));
	edma_write_slot(dma_ch, &param_set);
	irqraised1 = 0u;
	dma_comp.done = 0;
	result = edma_start(dma_ch);

	if (result != 0) {
		DBG_LOG("edma copy failed\n");
		return result;
	}

#endif /* USE_DMA_ENGINE */

	wait_for_completion(&dma_comp);

	/* Check the status of the completed transfer */

	if (irqraised1 < 0) {
		DBG_LOG("edma copy: Event Miss Occured!!!\n");
#ifdef USE_DMA_ENGINE
		dmaengine_terminate_all(chan);
#else
		edma_stop(dma_ch);
#endif /* USE_DMA_ENGINE */
		result = -EAGAIN;
	}

	return result;
}
コード例 #7
0
ファイル: spi-imx.c プロジェクト: GAXUSXX/G935FGaXusKernel2
static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
				struct spi_transfer *transfer)
{
	struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
	int ret;
	u32 dma;
	int left;
	struct spi_master *master = spi_imx->bitbang.master;
	struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;

	if (tx) {
		desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
					tx->sgl, tx->nents, DMA_TO_DEVICE,
					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (!desc_tx)
			goto no_dma;

		desc_tx->callback = spi_imx_dma_tx_callback;
		desc_tx->callback_param = (void *)spi_imx;
		dmaengine_submit(desc_tx);
	}

	if (rx) {
		desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
					rx->sgl, rx->nents, DMA_FROM_DEVICE,
					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (!desc_rx)
			goto no_dma;

		desc_rx->callback = spi_imx_dma_rx_callback;
		desc_rx->callback_param = (void *)spi_imx;
		dmaengine_submit(desc_rx);
	}

	reinit_completion(&spi_imx->dma_rx_completion);
	reinit_completion(&spi_imx->dma_tx_completion);

	/* Trigger the cspi module. */
	spi_imx->dma_finished = 0;

	dma = readl(spi_imx->base + MX51_ECSPI_DMA);
	dma = dma & (~MX51_ECSPI_DMA_RXT_WML_MASK);
	/* Change RX_DMA_LENGTH trigger dma fetch tail data */
	left = transfer->len % spi_imx->rxt_wml;
	if (left)
		writel(dma | (left << MX51_ECSPI_DMA_RXT_WML_OFFSET),
				spi_imx->base + MX51_ECSPI_DMA);
	spi_imx->devtype_data->trigger(spi_imx);

	dma_async_issue_pending(master->dma_tx);
	dma_async_issue_pending(master->dma_rx);
	/* Wait SDMA to finish the data transfer.*/
	ret = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
						IMX_DMA_TIMEOUT);
	if (!ret) {
		pr_warn("%s %s: I/O Error in DMA TX\n",
			dev_driver_string(&master->dev),
			dev_name(&master->dev));
		dmaengine_terminate_all(master->dma_tx);
	} else {
		ret = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
				IMX_DMA_TIMEOUT);
		if (!ret) {
			pr_warn("%s %s: I/O Error in DMA RX\n",
				dev_driver_string(&master->dev),
				dev_name(&master->dev));
			spi_imx->devtype_data->reset(spi_imx);
			dmaengine_terminate_all(master->dma_rx);
		}
		writel(dma |
		       spi_imx->rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET,
		       spi_imx->base + MX51_ECSPI_DMA);
	}

	spi_imx->dma_finished = 1;
	spi_imx->devtype_data->trigger(spi_imx);

	if (!ret)
		ret = -ETIMEDOUT;
	else if (ret > 0)
		ret = transfer->len;

	return ret;

no_dma:
	pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
		     dev_driver_string(&master->dev),
		     dev_name(&master->dev));
	return -EAGAIN;
}
コード例 #8
0
ファイル: spi-rockchip.c プロジェクト: Abioy/kasan
static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
{
	unsigned long flags;
	struct dma_slave_config rxconf, txconf;
	struct dma_async_tx_descriptor *rxdesc, *txdesc;

	spin_lock_irqsave(&rs->lock, flags);
	rs->state &= ~RXBUSY;
	rs->state &= ~TXBUSY;
	spin_unlock_irqrestore(&rs->lock, flags);

	rxdesc = NULL;
	if (rs->rx) {
		rxconf.direction = rs->dma_rx.direction;
		rxconf.src_addr = rs->dma_rx.addr;
		rxconf.src_addr_width = rs->n_bytes;
		rxconf.src_maxburst = rs->n_bytes;
		dmaengine_slave_config(rs->dma_rx.ch, &rxconf);

		rxdesc = dmaengine_prep_slave_sg(
				rs->dma_rx.ch,
				rs->rx_sg.sgl, rs->rx_sg.nents,
				rs->dma_rx.direction, DMA_PREP_INTERRUPT);

		rxdesc->callback = rockchip_spi_dma_rxcb;
		rxdesc->callback_param = rs;
	}

	txdesc = NULL;
	if (rs->tx) {
		txconf.direction = rs->dma_tx.direction;
		txconf.dst_addr = rs->dma_tx.addr;
		txconf.dst_addr_width = rs->n_bytes;
		txconf.dst_maxburst = rs->n_bytes;
		dmaengine_slave_config(rs->dma_tx.ch, &txconf);

		txdesc = dmaengine_prep_slave_sg(
				rs->dma_tx.ch,
				rs->tx_sg.sgl, rs->tx_sg.nents,
				rs->dma_tx.direction, DMA_PREP_INTERRUPT);

		txdesc->callback = rockchip_spi_dma_txcb;
		txdesc->callback_param = rs;
	}

	/* rx must be started before tx due to spi instinct */
	if (rxdesc) {
		spin_lock_irqsave(&rs->lock, flags);
		rs->state |= RXBUSY;
		spin_unlock_irqrestore(&rs->lock, flags);
		dmaengine_submit(rxdesc);
		dma_async_issue_pending(rs->dma_rx.ch);
	}

	if (txdesc) {
		spin_lock_irqsave(&rs->lock, flags);
		rs->state |= TXBUSY;
		spin_unlock_irqrestore(&rs->lock, flags);
		dmaengine_submit(txdesc);
		dma_async_issue_pending(rs->dma_tx.ch);
	}
}
コード例 #9
0
ファイル: 8250_omap.c プロジェクト: 383530895/linux
static int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
{
	struct uart_8250_dma            *dma = p->dma;
	struct dma_async_tx_descriptor  *desc;

	switch (iir & 0x3f) {
	case UART_IIR_RLSI:
		/* 8250_core handles errors and break interrupts */
		if (dma->rx_running) {
			dmaengine_pause(dma->rxchan);
			__dma_rx_do_complete(p, true);
		}
		return -EIO;
	case UART_IIR_RX_TIMEOUT:
		/*
		 * If RCVR FIFO trigger level was not reached, complete the
		 * transfer and let 8250_core copy the remaining data.
		 */
		if (dma->rx_running) {
			dmaengine_pause(dma->rxchan);
			__dma_rx_do_complete(p, true);
		}
		return -ETIMEDOUT;
	case UART_IIR_RDI:
		/*
		 * The OMAP UART is a special BEAST. If we receive RDI we _have_
		 * a DMA transfer programmed but it didn't work. One reason is
		 * that we were too slow and there were too many bytes in the
		 * FIFO, the UART counted wrong and never kicked the DMA engine
		 * to do anything. That means once we receive RDI on OMAP then
		 * the DMA won't do anything soon so we have to cancel the DMA
		 * transfer and purge the FIFO manually.
		 */
		if (dma->rx_running) {
			dmaengine_pause(dma->rxchan);
			__dma_rx_do_complete(p, true);
		}
		return -ETIMEDOUT;

	default:
		break;
	}

	if (dma->rx_running)
		return 0;

	desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
					   dma->rx_size, DMA_DEV_TO_MEM,
					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc)
		return -EBUSY;

	dma->rx_running = 1;
	desc->callback = __dma_rx_complete;
	desc->callback_param = p;

	dma->rx_cookie = dmaengine_submit(desc);

	dma_sync_single_for_device(dma->rxchan->device->dev, dma->rx_addr,
				   dma->rx_size, DMA_FROM_DEVICE);

	dma_async_issue_pending(dma->rxchan);
	return 0;
}
コード例 #10
0
ファイル: 8250_omap.c プロジェクト: 383530895/linux
static int omap_8250_tx_dma(struct uart_8250_port *p)
{
	struct uart_8250_dma		*dma = p->dma;
	struct omap8250_priv		*priv = p->port.private_data;
	struct circ_buf			*xmit = &p->port.state->xmit;
	struct dma_async_tx_descriptor	*desc;
	unsigned int	skip_byte = 0;
	int ret;

	if (dma->tx_running)
		return 0;
	if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {

		/*
		 * Even if no data, we need to return an error for the two cases
		 * below so serial8250_tx_chars() is invoked and properly clears
		 * THRI and/or runtime suspend.
		 */
		if (dma->tx_err || p->capabilities & UART_CAP_RPM) {
			ret = -EBUSY;
			goto err;
		}
		if (p->ier & UART_IER_THRI) {
			p->ier &= ~UART_IER_THRI;
			serial_out(p, UART_IER, p->ier);
		}
		return 0;
	}

	dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
	if (priv->habit & OMAP_DMA_TX_KICK) {
		u8 tx_lvl;

		/*
		 * We need to put the first byte into the FIFO in order to start
		 * the DMA transfer. For transfers smaller than four bytes we
		 * don't bother doing DMA at all. It seem not matter if there
		 * are still bytes in the FIFO from the last transfer (in case
		 * we got here directly from omap_8250_dma_tx_complete()). Bytes
		 * leaving the FIFO seem not to trigger the DMA transfer. It is
		 * really the byte that we put into the FIFO.
		 * If the FIFO is already full then we most likely got here from
		 * omap_8250_dma_tx_complete(). And this means the DMA engine
		 * just completed its work. We don't have to wait the complete
		 * 86us at 115200,8n1 but around 60us (not to mention lower
		 * baudrates). So in that case we take the interrupt and try
		 * again with an empty FIFO.
		 */
		tx_lvl = serial_in(p, UART_OMAP_TX_LVL);
		if (tx_lvl == p->tx_loadsz) {
			ret = -EBUSY;
			goto err;
		}
		if (dma->tx_size < 4) {
			ret = -EINVAL;
			goto err;
		}
		skip_byte = 1;
	}

	desc = dmaengine_prep_slave_single(dma->txchan,
			dma->tx_addr + xmit->tail + skip_byte,
			dma->tx_size - skip_byte, DMA_MEM_TO_DEV,
			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc) {
		ret = -EBUSY;
		goto err;
	}

	dma->tx_running = 1;

	desc->callback = omap_8250_dma_tx_complete;
	desc->callback_param = p;

	dma->tx_cookie = dmaengine_submit(desc);

	dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
				   UART_XMIT_SIZE, DMA_TO_DEVICE);

	dma_async_issue_pending(dma->txchan);
	if (dma->tx_err)
		dma->tx_err = 0;

	if (p->ier & UART_IER_THRI) {
		p->ier &= ~UART_IER_THRI;
		serial_out(p, UART_IER, p->ier);
	}
	if (skip_byte)
		serial_out(p, UART_TX, xmit->buf[xmit->tail]);
	return 0;
err:
	dma->tx_err = 1;
	return ret;
}
コード例 #11
0
ファイル: i2c-at91-master.c プロジェクト: avagin/linux
static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
{
	dma_addr_t dma_addr;
	struct dma_async_tx_descriptor *txdesc;
	struct at91_twi_dma *dma = &dev->dma;
	struct dma_chan *chan_tx = dma->chan_tx;
	unsigned int sg_len = 1;

	if (!dev->buf_len)
		return;

	dma->direction = DMA_TO_DEVICE;

	at91_twi_irq_save(dev);
	dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
				  DMA_TO_DEVICE);
	if (dma_mapping_error(dev->dev, dma_addr)) {
		dev_err(dev->dev, "dma map failed\n");
		return;
	}
	dma->buf_mapped = true;
	at91_twi_irq_restore(dev);

	if (dev->fifo_size) {
		size_t part1_len, part2_len;
		struct scatterlist *sg;
		unsigned fifo_mr;

		sg_len = 0;

		part1_len = dev->buf_len & ~0x3;
		if (part1_len) {
			sg = &dma->sg[sg_len++];
			sg_dma_len(sg) = part1_len;
			sg_dma_address(sg) = dma_addr;
		}

		part2_len = dev->buf_len & 0x3;
		if (part2_len) {
			sg = &dma->sg[sg_len++];
			sg_dma_len(sg) = part2_len;
			sg_dma_address(sg) = dma_addr + part1_len;
		}

		/*
		 * DMA controller is triggered when at least 4 data can be
		 * written into the TX FIFO
		 */
		fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
		fifo_mr &= ~AT91_TWI_FMR_TXRDYM_MASK;
		fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_FOUR_DATA);
		at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
	} else {
		sg_dma_len(&dma->sg[0]) = dev->buf_len;
		sg_dma_address(&dma->sg[0]) = dma_addr;
	}

	txdesc = dmaengine_prep_slave_sg(chan_tx, dma->sg, sg_len,
					 DMA_MEM_TO_DEV,
					 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!txdesc) {
		dev_err(dev->dev, "dma prep slave sg failed\n");
		goto error;
	}

	txdesc->callback = at91_twi_write_data_dma_callback;
	txdesc->callback_param = dev;

	dma->xfer_in_progress = true;
	dmaengine_submit(txdesc);
	dma_async_issue_pending(chan_tx);

	return;

error:
	at91_twi_dma_cleanup(dev);
}
コード例 #12
0
ファイル: odp_tx.c プロジェクト: kalray/odp-mppa
netdev_tx_t mpodp_start_xmit(struct sk_buff *skb,
			     struct net_device *netdev)
{
	struct mpodp_if_priv *priv = netdev_priv(netdev);
	struct mpodp_tx *tx;
	struct dma_async_tx_descriptor *dma_txd;
	struct mpodp_cache_entry *entry;
	int ret;
	uint8_t fifo_mode;
	int16_t requested_engine;
	struct mpodp_pkt_hdr *hdr;
	uint32_t tx_autoloop_next;
	uint32_t tx_submitted, tx_next, tx_done;
	uint32_t tx_mppa_idx;
	int qidx;
	unsigned long flags = 0;
	struct mpodp_txq *txq;

	/* Fetch HW queue selected by the kernel */
	qidx = skb_get_queue_mapping(skb);
	txq = &priv->txqs[qidx];

	if (atomic_read(&priv->reset) == 1) {
		mpodp_clean_tx_unlocked(priv, txq, -1);
		goto addr_error;
	}

	tx_submitted = atomic_read(&txq->submitted);
	/* Compute txd id */
	tx_next = (tx_submitted + 1);
	if (tx_next == txq->size)
		tx_next = 0;

	/* MPPA H2C Entry to use */
	tx_mppa_idx = atomic_read(&txq->autoloop_cur);

	tx_done = atomic_read(&txq->done);
	if (tx_done != tx_submitted &&
	    ((txq->ring[tx_done].jiffies + msecs_to_jiffies(5) >= jiffies) ||
	     (tx_submitted < tx_done && tx_submitted + txq->size - tx_done >= TX_POLL_THRESHOLD) ||
	     (tx_submitted >= tx_done && tx_submitted - tx_done >= TX_POLL_THRESHOLD))) {
		mpodp_clean_tx_unlocked(priv, txq, -1);
	}

	/* Check if there are txd available */
	if (tx_next == atomic_read(&txq->done)) {
		/* Ring is full */
		if (netif_msg_tx_err(priv))
			netdev_err(netdev, "txq[%d]: ring full \n", txq->id);
		netif_tx_stop_queue(txq->txq);
		return NETDEV_TX_BUSY;
	}

	tx = &(txq->ring[tx_submitted]);
	entry = &(txq->cache[tx_mppa_idx]);

	/* take the time */
	mppa_pcie_time_get(priv->tx_time, &tx->time);

	/* configure channel */
	tx->dst_addr = entry->addr;

	/* Check the provided address */
	ret =
	    mppa_pcie_dma_check_addr(priv->pdata, tx->dst_addr, &fifo_mode,
				     &requested_engine);
	if (ret) {
		if (netif_msg_tx_err(priv))
			netdev_err(netdev, "txq[%d] tx[%d]: invalid send address %llx\n",
				   txq->id, tx_submitted, tx->dst_addr);
		goto addr_error;
	}
	if (!fifo_mode) {
		if (netif_msg_tx_err(priv))
			netdev_err(netdev, "txq[%d] tx[%d]: %llx is not a PCI2Noc addres\n",
				   txq->id, tx_submitted, tx->dst_addr);
		goto addr_error;
	}
	if (requested_engine >= MPODP_NOC_CHAN_COUNT) {
		if (netif_msg_tx_err(priv))
			netdev_err(netdev,
				   "txq[%d] tx[%d]: address %llx using NoC engine out of range (%d >= %d)\n",
				   txq->id, tx_submitted, tx->dst_addr,
				   requested_engine, MPODP_NOC_CHAN_COUNT);
		goto addr_error;
	}

	tx->chanidx = requested_engine;

	/* The packet needs a header to determine size,timestamp, etc.
	 * Add it */
	if (skb_headroom(skb) < sizeof(struct mpodp_pkt_hdr)) {
		struct sk_buff *skb_new;

		skb_new =
			skb_realloc_headroom(skb, sizeof(struct mpodp_pkt_hdr));
		if (!skb_new) {
			netdev->stats.tx_errors++;
			kfree_skb(skb);
			return NETDEV_TX_OK;
		}
		kfree_skb(skb);
		skb = skb_new;
	}

	hdr = (struct mpodp_pkt_hdr *)
		skb_push(skb, sizeof(struct mpodp_pkt_hdr));
	hdr->timestamp = priv->packet_id;
	hdr->info._.pkt_id = priv->packet_id;
	hdr->info.dword = 0ULL;
	hdr->info._.pkt_size = skb->len; /* Also count the header size */
	hdr->info._.pkt_id = priv->packet_id;
	priv->packet_id++;

	/* save skb to free it later */
	tx->skb = skb;
	tx->len = skb->len;

	/* prepare sg */
	if (map_skb(&priv->pdev->dev, skb, tx)){
		if (netif_msg_tx_err(priv))
			netdev_err(netdev, "tx %d: failed to map skb to dma\n",
				   tx_submitted);
		goto busy;
	}

	if (priv->n_txqs > MPODP_NOC_CHAN_COUNT)
		spin_lock_irqsave(&priv->tx_lock[requested_engine], flags);

	/* Prepare slave args */
	priv->tx_config[requested_engine].cfg.dst_addr = tx->dst_addr;
	priv->tx_config[requested_engine].requested_engine = requested_engine;
	/* FIFO mode, direction, latency were filled at setup */

	if (dmaengine_slave_config(priv->tx_chan[requested_engine],
				   &priv->tx_config[requested_engine].cfg)) {
		/* board has reset, wait for reset of netdev */
		netif_tx_stop_queue(txq->txq);
		netif_carrier_off(netdev);
		if (netif_msg_tx_err(priv))
			netdev_err(netdev, "txq[%d] tx[%d]: cannot configure channel\n",
				   txq->id, tx_submitted);
		goto busy;
	}

	/* get transfer descriptor */
	dma_txd =
	    dmaengine_prep_slave_sg(priv->tx_chan[requested_engine], tx->sg,
				    tx->sg_len, DMA_MEM_TO_DEV, 0);
	if (dma_txd == NULL) {
		/* dmaengine_prep_slave_sg failed, retry */
		if (netif_msg_tx_err(priv))
			netdev_err(netdev, "txq[%d] tx[%d]: cannot get dma descriptor\n",
				   txq->id, tx_submitted);
		goto busy;
	}
	if (netif_msg_tx_queued(priv))
		netdev_info(netdev,
			    "txq[%d] tx[%d]: transfer start (submitted: %d done: %d) len=%d, sg_len=%d\n",
			    txq->id, tx_submitted, tx_next, atomic_read(&txq->done),
			    tx->len, tx->sg_len);

	skb_orphan(skb);

	/* submit and issue descriptor */
	tx->jiffies = jiffies;
	tx->cookie = dmaengine_submit(dma_txd);
	dma_async_issue_pending(priv->tx_chan[requested_engine]);

	if (priv->n_txqs > MPODP_NOC_CHAN_COUNT)
		spin_unlock_irqrestore(&priv->tx_lock[requested_engine], flags);

	/* Count number of bytes on the fly for DQL */
	netdev_tx_sent_queue(txq->txq, skb->len);
	if (test_bit(__QUEUE_STATE_STACK_XOFF, &txq->txq->state)){
		/* We reached over the limit of DQL. Try to clean some
		 * tx so we are rescheduled right now */
		mpodp_clean_tx_unlocked(priv, txq, -1);
	}

	/* Increment tail pointer locally */
	atomic_set(&txq->submitted, tx_next);

	/* Update H2C entry offset */
	tx_autoloop_next = tx_mppa_idx + 1;
	if (tx_autoloop_next == txq->cached_head)
		tx_autoloop_next = 0;
	atomic_set(&txq->autoloop_cur, tx_autoloop_next);

	skb_tx_timestamp(skb);

	/* Check if there is room for another txd
	 * or stop the queue if there is not */
	tx_next = (tx_next + 1);
	if (tx_next == txq->size)
		tx_next = 0;

	if (tx_next == atomic_read(&txq->done)) {
		if (netif_msg_tx_queued(priv))
			netdev_info(netdev, "txq[%d]: ring full \n", txq->id);
		netif_tx_stop_queue(txq->txq);
	}

	return NETDEV_TX_OK;

      busy:
	unmap_skb(&priv->pdev->dev, skb, tx);
	return NETDEV_TX_BUSY;

 addr_error:
	netdev->stats.tx_dropped++;
	dev_kfree_skb(skb);
	/* We can't do anything, just stop the queue artificially */
	netif_tx_stop_queue(txq->txq);
	return NETDEV_TX_OK;
}
コード例 #13
0
ファイル: omap.c プロジェクト: AllenDou/linux
static void
mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
{
	struct mmc_data *data = req->data;
	int i, use_dma, block_size;
	unsigned sg_len;

	host->data = data;
	if (data == NULL) {
		OMAP_MMC_WRITE(host, BLEN, 0);
		OMAP_MMC_WRITE(host, NBLK, 0);
		OMAP_MMC_WRITE(host, BUF, 0);
		host->dma_in_use = 0;
		set_cmd_timeout(host, req);
		return;
	}

	block_size = data->blksz;

	OMAP_MMC_WRITE(host, NBLK, data->blocks - 1);
	OMAP_MMC_WRITE(host, BLEN, block_size - 1);
	set_data_timeout(host, req);

	/* cope with calling layer confusion; it issues "single
	 * block" writes using multi-block scatterlists.
	 */
	sg_len = (data->blocks == 1) ? 1 : data->sg_len;

	/* Only do DMA for entire blocks */
	use_dma = host->use_dma;
	if (use_dma) {
		for (i = 0; i < sg_len; i++) {
			if ((data->sg[i].length % block_size) != 0) {
				use_dma = 0;
				break;
			}
		}
	}

	host->sg_idx = 0;
	if (use_dma) {
		enum dma_data_direction dma_data_dir;
		struct dma_async_tx_descriptor *tx;
		struct dma_chan *c;
		u32 burst, *bp;
		u16 buf;

		/*
		 * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx
		 * and 24xx. Use 16 or 32 word frames when the
		 * blocksize is at least that large. Blocksize is
		 * usually 512 bytes; but not for some SD reads.
		 */
		burst = cpu_is_omap15xx() ? 32 : 64;
		if (burst > data->blksz)
			burst = data->blksz;

		burst >>= 1;

		if (data->flags & MMC_DATA_WRITE) {
			c = host->dma_tx;
			bp = &host->dma_tx_burst;
			buf = 0x0f80 | (burst - 1) << 0;
			dma_data_dir = DMA_TO_DEVICE;
		} else {
			c = host->dma_rx;
			bp = &host->dma_rx_burst;
			buf = 0x800f | (burst - 1) << 8;
			dma_data_dir = DMA_FROM_DEVICE;
		}

		if (!c)
			goto use_pio;

		/* Only reconfigure if we have a different burst size */
		if (*bp != burst) {
			struct dma_slave_config cfg;

			cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
			cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
			cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
			cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
			cfg.src_maxburst = burst;
			cfg.dst_maxburst = burst;

			if (dmaengine_slave_config(c, &cfg))
				goto use_pio;

			*bp = burst;
		}

		host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len,
					  dma_data_dir);
		if (host->sg_len == 0)
			goto use_pio;

		tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len,
			data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (!tx)
			goto use_pio;

		OMAP_MMC_WRITE(host, BUF, buf);

		tx->callback = mmc_omap_dma_callback;
		tx->callback_param = host;
		dmaengine_submit(tx);
		host->brs_received = 0;
		host->dma_done = 0;
		host->dma_in_use = 1;
		return;
	}
 use_pio:

	/* Revert to PIO? */
	OMAP_MMC_WRITE(host, BUF, 0x1f1f);
	host->total_bytes_left = data->blocks * block_size;
	host->sg_len = sg_len;
	mmc_omap_sg_to_buf(host);
	host->dma_in_use = 0;
}
コード例 #14
0
ファイル: mmci.c プロジェクト: Lyude/linux
/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
				struct dma_chan **dma_chan,
				struct dma_async_tx_descriptor **dma_desc)
{
	struct variant_data *variant = host->variant;
	struct dma_slave_config conf = {
		.src_addr = host->phybase + MMCIFIFO,
		.dst_addr = host->phybase + MMCIFIFO,
		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
		.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
		.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
		.device_fc = false,
	};
	struct dma_chan *chan;
	struct dma_device *device;
	struct dma_async_tx_descriptor *desc;
	int nr_sg;
	unsigned long flags = DMA_CTRL_ACK;

	if (data->flags & MMC_DATA_READ) {
		conf.direction = DMA_DEV_TO_MEM;
		chan = host->dma_rx_channel;
	} else {
		conf.direction = DMA_MEM_TO_DEV;
		chan = host->dma_tx_channel;
	}

	/* If there's no DMA channel, fall back to PIO */
	if (!chan)
		return -EINVAL;

	/* If less than or equal to the fifo size, don't bother with DMA */
	if (data->blksz * data->blocks <= variant->fifosize)
		return -EINVAL;

	device = chan->device;
	nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
			   mmc_get_dma_dir(data));
	if (nr_sg == 0)
		return -EINVAL;

	if (host->variant->qcom_dml)
		flags |= DMA_PREP_INTERRUPT;

	dmaengine_slave_config(chan, &conf);
	desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
					    conf.direction, flags);
	if (!desc)
		goto unmap_exit;

	*dma_chan = chan;
	*dma_desc = desc;

	return 0;

 unmap_exit:
	dma_unmap_sg(device->dev, data->sg, data->sg_len,
		     mmc_get_dma_dir(data));
	return -ENOMEM;
}

static inline int mmci_dma_prep_data(struct mmci_host *host,
				     struct mmc_data *data)
{
	/* Check if next job is already prepared. */
	if (host->dma_current && host->dma_desc_current)
		return 0;

	/* No job were prepared thus do it now. */
	return __mmci_dma_prep_data(host, data, &host->dma_current,
				    &host->dma_desc_current);
}

static inline int mmci_dma_prep_next(struct mmci_host *host,
				     struct mmc_data *data)
{
	struct mmci_host_next *nd = &host->next_data;
	return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
}

static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
{
	int ret;
	struct mmc_data *data = host->data;

	ret = mmci_dma_prep_data(host, host->data);
	if (ret)
		return ret;

	/* Okay, go for it. */
	dev_vdbg(mmc_dev(host->mmc),
		 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
		 data->sg_len, data->blksz, data->blocks, data->flags);
	host->dma_in_progress = true;
	dmaengine_submit(host->dma_desc_current);
	dma_async_issue_pending(host->dma_current);

	if (host->variant->qcom_dml)
		dml_start_xfer(host, data);

	datactrl |= MCI_DPSM_DMAENABLE;

	/* Trigger the DMA transfer */
	mmci_write_datactrlreg(host, datactrl);

	/*
	 * Let the MMCI say when the data is ended and it's time
	 * to fire next DMA request. When that happens, MMCI will
	 * call mmci_data_end()
	 */
	writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
	       host->base + MMCIMASK0);
	return 0;
}
コード例 #15
0
ファイル: mxs-mmc.c プロジェクト: MaxChina/linux
static void mxs_mmc_adtc(struct mxs_mmc_host *host)
{
	struct mmc_command *cmd = host->cmd;
	struct mmc_data *data = cmd->data;
	struct dma_async_tx_descriptor *desc;
	struct scatterlist *sgl = data->sg, *sg;
	unsigned int sg_len = data->sg_len;
	unsigned int i;

	unsigned short dma_data_dir, timeout;
	enum dma_transfer_direction slave_dirn;
	unsigned int data_size = 0, log2_blksz;
	unsigned int blocks = data->blocks;

	struct mxs_ssp *ssp = &host->ssp;

	u32 ignore_crc, get_resp, long_resp, read;
	u32 ctrl0, cmd0, cmd1, val;

	ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
			0 : BM_SSP_CTRL0_IGNORE_CRC;
	get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
			BM_SSP_CTRL0_GET_RESP : 0;
	long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
			BM_SSP_CTRL0_LONG_RESP : 0;

	if (data->flags & MMC_DATA_WRITE) {
		dma_data_dir = DMA_TO_DEVICE;
		slave_dirn = DMA_MEM_TO_DEV;
		read = 0;
	} else {
		dma_data_dir = DMA_FROM_DEVICE;
		slave_dirn = DMA_DEV_TO_MEM;
		read = BM_SSP_CTRL0_READ;
	}

	ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
		ignore_crc | get_resp | long_resp |
		BM_SSP_CTRL0_DATA_XFER | read |
		BM_SSP_CTRL0_WAIT_FOR_IRQ |
		BM_SSP_CTRL0_ENABLE;

	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);

	/* get logarithm to base 2 of block size for setting register */
	log2_blksz = ilog2(data->blksz);

	/*
	 * take special care of the case that data size from data->sg
	 * is not equal to blocks x blksz
	 */
	for_each_sg(sgl, sg, sg_len, i)
		data_size += sg->length;

	if (data_size != data->blocks * data->blksz)
		blocks = 1;

	/* xfer count, block size and count need to be set differently */
	if (ssp_is_old(ssp)) {
		ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
		cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
			BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
	} else {
		writel(data_size, ssp->base + HW_SSP_XFER_SIZE);
		writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
		       BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
		       ssp->base + HW_SSP_BLOCK_SIZE);
	}

	if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
	    (cmd->opcode == SD_IO_RW_EXTENDED))
		cmd0 |= BM_SSP_CMD0_APPEND_8CYC;

	cmd1 = cmd->arg;

	if (host->sdio_irq_en) {
		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
	}

	/* set the timeout count */
	timeout = mxs_ns_to_ssp_ticks(ssp->clk_rate, data->timeout_ns);
	val = readl(ssp->base + HW_SSP_TIMING(ssp));
	val &= ~(BM_SSP_TIMING_TIMEOUT);
	val |= BF_SSP(timeout, TIMING_TIMEOUT);
	writel(val, ssp->base + HW_SSP_TIMING(ssp));

	/* pio */
	ssp->ssp_pio_words[0] = ctrl0;
	ssp->ssp_pio_words[1] = cmd0;
	ssp->ssp_pio_words[2] = cmd1;
	ssp->dma_dir = DMA_NONE;
	ssp->slave_dirn = DMA_TRANS_NONE;
	desc = mxs_mmc_prep_dma(host, 0);
	if (!desc)
		goto out;

	/* append data sg */
	WARN_ON(host->data != NULL);
	host->data = data;
	ssp->dma_dir = dma_data_dir;
	ssp->slave_dirn = slave_dirn;
	desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc)
		goto out;

	dmaengine_submit(desc);
	dma_async_issue_pending(ssp->dmach);
	return;
out:
	dev_warn(mmc_dev(host->mmc),
		 "%s: failed to prep dma\n", __func__);
}
コード例 #16
0
ファイル: spi-img-spfi.c プロジェクト: AshishNamdev/linux
static int img_spfi_start_dma(struct spi_master *master,
			      struct spi_device *spi,
			      struct spi_transfer *xfer)
{
	struct img_spfi *spfi = spi_master_get_devdata(spi->master);
	struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
	struct dma_slave_config rxconf, txconf;

	spfi->rx_dma_busy = false;
	spfi->tx_dma_busy = false;

	if (xfer->rx_buf) {
		rxconf.direction = DMA_DEV_TO_MEM;
		if (xfer->len % 4 == 0) {
			rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA;
			rxconf.src_addr_width = 4;
			rxconf.src_maxburst = 4;
		} else {
			rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
			rxconf.src_addr_width = 1;
			rxconf.src_maxburst = 4;
		}
		dmaengine_slave_config(spfi->rx_ch, &rxconf);

		rxdesc = dmaengine_prep_slave_sg(spfi->rx_ch, xfer->rx_sg.sgl,
						 xfer->rx_sg.nents,
						 DMA_DEV_TO_MEM,
						 DMA_PREP_INTERRUPT);
		if (!rxdesc)
			goto stop_dma;

		rxdesc->callback = img_spfi_dma_rx_cb;
		rxdesc->callback_param = spfi;
	}

	if (xfer->tx_buf) {
		txconf.direction = DMA_MEM_TO_DEV;
		if (xfer->len % 4 == 0) {
			txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA;
			txconf.dst_addr_width = 4;
			txconf.dst_maxburst = 4;
		} else {
			txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
			txconf.dst_addr_width = 1;
			txconf.dst_maxburst = 4;
		}
		dmaengine_slave_config(spfi->tx_ch, &txconf);

		txdesc = dmaengine_prep_slave_sg(spfi->tx_ch, xfer->tx_sg.sgl,
						 xfer->tx_sg.nents,
						 DMA_MEM_TO_DEV,
						 DMA_PREP_INTERRUPT);
		if (!txdesc)
			goto stop_dma;

		txdesc->callback = img_spfi_dma_tx_cb;
		txdesc->callback_param = spfi;
	}

	if (xfer->rx_buf) {
		spfi->rx_dma_busy = true;
		dmaengine_submit(rxdesc);
		dma_async_issue_pending(spfi->rx_ch);
	}

	spfi_start(spfi);

	if (xfer->tx_buf) {
		spfi->tx_dma_busy = true;
		dmaengine_submit(txdesc);
		dma_async_issue_pending(spfi->tx_ch);
	}

	return 1;

stop_dma:
	dmaengine_terminate_all(spfi->rx_ch);
	dmaengine_terminate_all(spfi->tx_ch);
	return -EIO;
}
コード例 #17
0
ファイル: sirfsoc_uart.c プロジェクト: 19Dan01/linux
static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port *sirfport)
{
	struct uart_port *port = &sirfport->port;
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
	struct circ_buf *xmit = &port->state->xmit;
	unsigned long tran_size;
	unsigned long tran_start;
	unsigned long pio_tx_size;

	tran_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
	tran_start = (unsigned long)(xmit->buf + xmit->tail);
	if (uart_circ_empty(xmit) || uart_tx_stopped(port) ||
			!tran_size)
		return;
	if (sirfport->tx_dma_state == TX_DMA_PAUSE) {
		dmaengine_resume(sirfport->tx_dma_chan);
		return;
	}
	if (sirfport->tx_dma_state == TX_DMA_RUNNING)
		return;
	if (!sirfport->is_atlas7)
		wr_regl(port, ureg->sirfsoc_int_en_reg,
				rd_regl(port, ureg->sirfsoc_int_en_reg)&
				~(uint_en->sirfsoc_txfifo_empty_en));
	else
		wr_regl(port, SIRFUART_INT_EN_CLR,
				uint_en->sirfsoc_txfifo_empty_en);
	/*
	 * DMA requires buffer address and buffer length are both aligned with
	 * 4 bytes, so we use PIO for
	 * 1. if address is not aligned with 4bytes, use PIO for the first 1~3
	 * bytes, and move to DMA for the left part aligned with 4bytes
	 * 2. if buffer length is not aligned with 4bytes, use DMA for aligned
	 * part first, move to PIO for the left 1~3 bytes
	 */
	if (tran_size < 4 || BYTES_TO_ALIGN(tran_start)) {
		wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
		wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
			rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)|
			SIRFUART_IO_MODE);
		if (BYTES_TO_ALIGN(tran_start)) {
			pio_tx_size = sirfsoc_uart_pio_tx_chars(sirfport,
				BYTES_TO_ALIGN(tran_start));
			tran_size -= pio_tx_size;
		}
		if (tran_size < 4)
			sirfsoc_uart_pio_tx_chars(sirfport, tran_size);
		if (!sirfport->is_atlas7)
			wr_regl(port, ureg->sirfsoc_int_en_reg,
				rd_regl(port, ureg->sirfsoc_int_en_reg)|
				uint_en->sirfsoc_txfifo_empty_en);
		else
			wr_regl(port, ureg->sirfsoc_int_en_reg,
				uint_en->sirfsoc_txfifo_empty_en);
		wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
	} else {
		/* tx transfer mode switch into dma mode */
		wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
		wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
			rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)&
			~SIRFUART_IO_MODE);
		wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
		tran_size &= ~(0x3);

		sirfport->tx_dma_addr = dma_map_single(port->dev,
			xmit->buf + xmit->tail,
			tran_size, DMA_TO_DEVICE);
		sirfport->tx_dma_desc = dmaengine_prep_slave_single(
			sirfport->tx_dma_chan, sirfport->tx_dma_addr,
			tran_size, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
		if (!sirfport->tx_dma_desc) {
			dev_err(port->dev, "DMA prep slave single fail\n");
			return;
		}
		sirfport->tx_dma_desc->callback =
			sirfsoc_uart_tx_dma_complete_callback;
		sirfport->tx_dma_desc->callback_param = (void *)sirfport;
		sirfport->transfer_size = tran_size;

		dmaengine_submit(sirfport->tx_dma_desc);
		dma_async_issue_pending(sirfport->tx_dma_chan);
		sirfport->tx_dma_state = TX_DMA_RUNNING;
	}
}
コード例 #18
0
static int vdmafb_setupfb(struct vdmafb_dev *fbdev)
{
	struct fb_var_screeninfo *var = &fbdev->info.var;
	struct dma_async_tx_descriptor *desc;
	struct dma_interleaved_template *dma_template = fbdev->dma_template;
	struct xilinx_vdma_config vdma_config;
	int hsize = var->xres * 4;
	u32 frame;
	int ret;

	/* Disable display */
	vdmafb_writereg(fbdev, VDMAFB_CONTROL, 0);

	dmaengine_terminate_all(fbdev->dma);

	/* Setup VDMA address etc */
	memset(&vdma_config, 0, sizeof(vdma_config));
	vdma_config.park = 1;
	vdma_config.coalesc = 255; /* Reduces unused interrupts */
	xilinx_vdma_channel_set_config(fbdev->dma, &vdma_config);

       /*
        * Interleaved DMA:
        * Each interleaved frame is a row (hsize) implemented in ONE
        * chunk (sgl has len 1).
        * The number of interleaved frames is the number of rows (vsize).
        * The icg in used to pack data to the HW, so that the buffer len
        * is fb->piches[0], but the actual size for the hw is somewhat less
        */
       dma_template->dir = DMA_MEM_TO_DEV;
       dma_template->src_start = fbdev->fb_phys;
       /* sgl list have just one entry (each interleaved frame have 1 chunk) */
       dma_template->frame_size = 1;
       /* the number of interleaved frame, each has the size specified in sgl */
       dma_template->numf = var->yres;
       dma_template->src_sgl = 1;
       dma_template->src_inc = 1;
       /* vdma IP does not provide any addr to the hdmi IP */
       dma_template->dst_inc = 0;
       dma_template->dst_sgl = 0;
       /* horizontal size */
       dma_template->sgl[0].size = hsize;
       /* the vdma driver seems to look at icg, and not src_icg */
       dma_template->sgl[0].icg = 0; /*  stride - hsize */

	for (frame = 0; frame < fbdev->frames; ++frame) {
		desc = dmaengine_prep_interleaved_dma(fbdev->dma, dma_template, 0);
		if (!desc) {
			pr_err("Failed to prepare DMA descriptor\n");
			return -ENOMEM;
		}
		dmaengine_submit(desc);
	}
	dma_async_issue_pending(fbdev->dma);

	/* Configure IP via registers */
	vdmafb_writereg(fbdev, VDMAFB_HORIZONTAL_TOTAL,
		var->hsync_len + var->left_margin + var->xres + var->right_margin);
	vdmafb_writereg(fbdev, VDMAFB_HORIZONTAL_SYNC, var->hsync_len);
	vdmafb_writereg(fbdev, VDMAFB_HORIZONTAL_FRONT_PORCH, var->left_margin);
	vdmafb_writereg(fbdev, VDMAFB_HORIZONTAL_WIDTH, var->xres);
	vdmafb_writereg(fbdev, VDMAFB_HORIZONTAL_BACK_PORCH, var->right_margin);
	vdmafb_writereg(fbdev, VDMAFB_HORIZONTAL_POLARITY, 0); /* TODO */
	vdmafb_writereg(fbdev, VDMAFB_VERTICAL_TOTAL,
		var->vsync_len + var->upper_margin + var->yres + var->lower_margin);
	vdmafb_writereg(fbdev, VDMAFB_VERTICAL_SYNC, var->vsync_len);
	vdmafb_writereg(fbdev, VDMAFB_VERTICAL_FRONT_PORCH, var->upper_margin);
	vdmafb_writereg(fbdev, VDMAFB_VERTICAL_HEIGHT, var->yres);
	vdmafb_writereg(fbdev, VDMAFB_VERTICAL_BACK_PORCH, var->lower_margin);
	vdmafb_writereg(fbdev, VDMAFB_VERTICAL_POLARITY, 0);
	/* Enable output */
	vdmafb_writereg(fbdev, VDMAFB_CONTROL, VDMAFB_CONTROL_ENABLE);

	/* Set brightness */

	vdmafb_writereg(fbdev, VDMAFB_BACKLIGHT_CONTROL, 1);
	vdmafb_writereg(fbdev, VDMAFB_BACKLIGHT_LEVEL_1K, 800);

	return 0;
}
コード例 #19
0
ファイル: spi-imx.c プロジェクト: acton393/linux
static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
				struct spi_transfer *transfer)
{
	struct dma_async_tx_descriptor *desc_tx, *desc_rx;
	unsigned long transfer_timeout;
	unsigned long timeout;
	struct spi_master *master = spi_imx->bitbang.master;
	struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;

	/*
	 * The TX DMA setup starts the transfer, so make sure RX is configured
	 * before TX.
	 */
	desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
				rx->sgl, rx->nents, DMA_DEV_TO_MEM,
				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc_rx)
		return -EINVAL;

	desc_rx->callback = spi_imx_dma_rx_callback;
	desc_rx->callback_param = (void *)spi_imx;
	dmaengine_submit(desc_rx);
	reinit_completion(&spi_imx->dma_rx_completion);
	dma_async_issue_pending(master->dma_rx);

	desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
				tx->sgl, tx->nents, DMA_MEM_TO_DEV,
				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc_tx) {
		dmaengine_terminate_all(master->dma_tx);
		return -EINVAL;
	}

	desc_tx->callback = spi_imx_dma_tx_callback;
	desc_tx->callback_param = (void *)spi_imx;
	dmaengine_submit(desc_tx);
	reinit_completion(&spi_imx->dma_tx_completion);
	dma_async_issue_pending(master->dma_tx);

	transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);

	/* Wait SDMA to finish the data transfer.*/
	timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
						transfer_timeout);
	if (!timeout) {
		dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
		dmaengine_terminate_all(master->dma_tx);
		dmaengine_terminate_all(master->dma_rx);
		return -ETIMEDOUT;
	}

	timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
					      transfer_timeout);
	if (!timeout) {
		dev_err(&master->dev, "I/O Error in DMA RX\n");
		spi_imx->devtype_data->reset(spi_imx);
		dmaengine_terminate_all(master->dma_rx);
		return -ETIMEDOUT;
	}

	return transfer->len;
}