Exemple #1
0
static void mtk8250_dma_enable(struct uart_8250_port *up)
{
	struct uart_8250_dma *dma = up->dma;
	struct mtk8250_data *data = up->port.private_data;
	int lcr = serial_in(up, UART_LCR);

	if (data->rx_status != DMA_RX_START)
		return;

	dma->rxconf.direction		= DMA_DEV_TO_MEM;
	dma->rxconf.src_addr_width	= dma->rx_size / 1024;
	dma->rxconf.src_addr		= dma->rx_addr;

	dma->txconf.direction		= DMA_MEM_TO_DEV;
	dma->txconf.dst_addr_width	= MTK_UART_TX_SIZE / 1024;
	dma->txconf.dst_addr		= dma->tx_addr;

	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR |
		UART_FCR_CLEAR_XMIT);
	serial_out(up, MTK_UART_DMA_EN,
		   MTK_UART_DMA_EN_RX | MTK_UART_DMA_EN_TX);

	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
	serial_out(up, UART_EFR, UART_EFR_ECB);
	serial_out(up, UART_LCR, lcr);

	if (dmaengine_slave_config(dma->rxchan, &dma->rxconf) != 0)
		pr_err("failed to configure rx dma channel\n");
	if (dmaengine_slave_config(dma->txchan, &dma->txconf) != 0)
		pr_err("failed to configure tx dma channel\n");

	data->rx_status = DMA_RX_RUNNING;
	data->rx_pos = 0;
	mtk8250_rx_dma(up);
}
Exemple #2
0
static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
				struct samsung_dma_info *info)
{
	struct dma_chan *chan;
	dma_cap_mask_t mask;
	struct dma_slave_config slave_config;

	dma_cap_zero(mask);
	dma_cap_set(info->cap, mask);

	chan = dma_request_channel(mask, pl330_filter, (void *)dma_ch);

	if (info->direction == DMA_FROM_DEVICE) {
		memset(&slave_config, 0, sizeof(struct dma_slave_config));
		slave_config.direction = info->direction;
		slave_config.src_addr = info->fifo;
		slave_config.src_addr_width = info->width;
		slave_config.src_maxburst = 1;
		dmaengine_slave_config(chan, &slave_config);
	} else if (info->direction == DMA_TO_DEVICE) {
		memset(&slave_config, 0, sizeof(struct dma_slave_config));
		slave_config.direction = info->direction;
		slave_config.dst_addr = info->fifo;
		slave_config.dst_addr_width = info->width;
		slave_config.dst_maxburst = 1;
		dmaengine_slave_config(chan, &slave_config);
	}

	return (unsigned)chan;
}
Exemple #3
0
static void bcm2835_dma_init(struct spi_master *master, struct device *dev)
{
	struct dma_slave_config slave_config;
	const __be32 *addr;
	dma_addr_t dma_reg_base;
	int ret;

	/* base address in dma-space */
	addr = of_get_address(master->dev.of_node, 0, NULL, NULL);
	if (!addr) {
		dev_err(dev, "could not get DMA-register address - not using dma mode\n");
		goto err;
	}
	dma_reg_base = be32_to_cpup(addr);

	/* get tx/rx dma */
	master->dma_tx = dma_request_slave_channel(dev, "tx");
	if (!master->dma_tx) {
		dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
		goto err;
	}
	master->dma_rx = dma_request_slave_channel(dev, "rx");
	if (!master->dma_rx) {
		dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
		goto err_release;
	}

	/* configure DMAs */
	slave_config.direction = DMA_MEM_TO_DEV;
	slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
	slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

	ret = dmaengine_slave_config(master->dma_tx, &slave_config);
	if (ret)
		goto err_config;

	slave_config.direction = DMA_DEV_TO_MEM;
	slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
	slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

	ret = dmaengine_slave_config(master->dma_rx, &slave_config);
	if (ret)
		goto err_config;

	/* all went well, so set can_dma */
	master->can_dma = bcm2835_spi_can_dma;
	master->max_dma_len = 65535; /* limitation by BCM2835_SPI_DLEN */
	/* need to do TX AND RX DMA, so we need dummy buffers */
	master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;

	return;

err_config:
	dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
		ret);
err_release:
	bcm2835_dma_release(master);
err:
	return;
}
Exemple #4
0
static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
{
	int buf_offset = offsetof(struct pic32_spi_regs, buf);
	struct spi_master *master = pic32s->master;
	struct dma_slave_config cfg;
	int ret;

	cfg.device_fc = true;
	cfg.src_addr = pic32s->dma_base + buf_offset;
	cfg.dst_addr = pic32s->dma_base + buf_offset;
	cfg.src_maxburst = pic32s->fifo_n_elm / 2; /* fill one-half */
	cfg.dst_maxburst = pic32s->fifo_n_elm / 2; /* drain one-half */
	cfg.src_addr_width = dma_width;
	cfg.dst_addr_width = dma_width;
	/* tx channel */
	cfg.slave_id = pic32s->tx_irq;
	cfg.direction = DMA_MEM_TO_DEV;
	ret = dmaengine_slave_config(master->dma_tx, &cfg);
	if (ret) {
		dev_err(&master->dev, "tx channel setup failed\n");
		return ret;
	}
	/* rx channel */
	cfg.slave_id = pic32s->rx_irq;
	cfg.direction = DMA_DEV_TO_MEM;
	ret = dmaengine_slave_config(master->dma_rx, &cfg);
	if (ret)
		dev_err(&master->dev, "rx channel setup failed\n");

	return ret;
}
int serial8250_request_dma(struct uart_8250_port *p)
{
	struct uart_8250_dma	*dma = p->dma;
	dma_cap_mask_t		mask;

	/* Default slave configuration parameters */
	dma->rxconf.direction		= DMA_DEV_TO_MEM;
	dma->rxconf.src_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
	dma->rxconf.src_addr		= p->port.mapbase + UART_RX;

	dma->txconf.direction		= DMA_MEM_TO_DEV;
	dma->txconf.dst_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
	dma->txconf.dst_addr		= p->port.mapbase + UART_TX;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	/* Get a channel for RX */
	dma->rxchan = dma_request_slave_channel_compat(mask,
						       dma->fn, dma->rx_param,
						       p->port.dev, "rx");
	if (!dma->rxchan)
		return -ENODEV;

	dmaengine_slave_config(dma->rxchan, &dma->rxconf);

	/* Get a channel for TX */
	dma->txchan = dma_request_slave_channel_compat(mask,
						       dma->fn, dma->tx_param,
						       p->port.dev, "tx");
	if (!dma->txchan) {
		dma_release_channel(dma->rxchan);
		return -ENODEV;
	}

	dmaengine_slave_config(dma->txchan, &dma->txconf);

	/* RX buffer */
	if (!dma->rx_size)
		dma->rx_size = PAGE_SIZE;

	dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
					&dma->rx_addr, GFP_KERNEL);
	if (!dma->rx_buf) {
		dma_release_channel(dma->rxchan);
		dma_release_channel(dma->txchan);
		return -ENOMEM;
	}

	/* TX buffer */
	dma->tx_addr = dma_map_single(dma->txchan->device->dev,
					p->port.state->xmit.buf,
					UART_XMIT_SIZE,
					DMA_TO_DEVICE);

	dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");

	return 0;
}
Exemple #6
0
static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
			     struct spi_master *master,
			     const struct resource *res)
{
	int ret;

	/* Prepare for TX DMA: */
	master->dma_tx = dma_request_slave_channel(dev, "tx");
	if (!master->dma_tx) {
		dev_err(dev, "cannot get the TX DMA channel!\n");
		ret = -EINVAL;
		goto err;
	}

	spi_imx->tx_config.direction = DMA_MEM_TO_DEV;
	spi_imx->tx_config.dst_addr = res->start + MXC_CSPITXDATA;
	spi_imx->tx_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
	spi_imx->tx_config.dst_maxburst = spi_imx_get_fifosize(spi_imx) / 4;
	ret = dmaengine_slave_config(master->dma_tx, &spi_imx->tx_config);
	if (ret) {
		dev_err(dev, "error in TX dma configuration.\n");
		goto err;
	}

	/* Prepare for RX : */
	master->dma_rx = dma_request_slave_channel(dev, "rx");
	if (!master->dma_rx) {
		dev_dbg(dev, "cannot get the DMA channel.\n");
		ret = -EINVAL;
		goto err;
	}

	spi_imx->rx_config.direction = DMA_DEV_TO_MEM;
	spi_imx->rx_config.src_addr = res->start + MXC_CSPIRXDATA;
	spi_imx->rx_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
	spi_imx->rx_config.src_maxburst = spi_imx_get_fifosize(spi_imx) / 2;
	ret = dmaengine_slave_config(master->dma_rx, &spi_imx->rx_config);
	if (ret) {
		dev_err(dev, "error in RX dma configuration.\n");
		goto err;
	}

	init_completion(&spi_imx->dma_rx_completion);
	init_completion(&spi_imx->dma_tx_completion);
	master->can_dma = spi_imx_can_dma;
	master->max_dma_len = MAX_SDMA_BD_BYTES;
	spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
					 SPI_MASTER_MUST_TX;
	spi_imx->rx_wml = spi_imx->rx_config.src_maxburst;
	spi_imx->tx_wml = spi_imx->tx_config.dst_maxburst;
	spi_imx->dma_is_inited = 1;

	return 0;
err:
	spi_imx_sdma_exit(spi_imx);
	return ret;
}
Exemple #7
0
static int spi_imx_setupxfer(struct spi_device *spi,
				 struct spi_transfer *t)
{
	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
	struct spi_imx_config config;
	int ret;

	config.bpw = t ? t->bits_per_word : spi->bits_per_word;
	config.speed_hz  = t ? t->speed_hz : spi->max_speed_hz;
	config.mode = spi->mode;
	config.cs = spi->chip_select;

	if (!config.speed_hz)
		config.speed_hz = spi->max_speed_hz;
	if (!config.bpw)
		config.bpw = spi->bits_per_word;

	/* Initialize the functions for transfer */
	if (config.bpw <= 8) {
		spi_imx->rx = spi_imx_buf_rx_u8;
		spi_imx->tx = spi_imx_buf_tx_u8;
		spi_imx->tx_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
		spi_imx->rx_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
	} else if (config.bpw <= 16) {
		spi_imx->rx = spi_imx_buf_rx_u16;
		spi_imx->tx = spi_imx_buf_tx_u16;
		spi_imx->tx_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
		spi_imx->rx_config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
	} else {
		spi_imx->rx = spi_imx_buf_rx_u32;
		spi_imx->tx = spi_imx_buf_tx_u32;
		spi_imx->tx_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
		spi_imx->rx_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	}

	if (spi_imx->bitbang.master->can_dma &&
	    spi_imx_can_dma(spi_imx->bitbang.master, spi, t)) {
		ret = dmaengine_slave_config(spi_imx->bitbang.master->dma_tx,
						&spi_imx->tx_config);
		if (ret) {
			dev_err(&spi->dev, "error in TX dma configuration.\n");
			return ret;
		}

		ret = dmaengine_slave_config(spi_imx->bitbang.master->dma_rx,
						&spi_imx->rx_config);
		if (ret) {
			dev_err(&spi->dev, "error in RX dma configuration.\n");
			return ret;
		}
	}

	spi_imx->devtype_data->config(spi_imx, &config);

	return 0;
}
Exemple #8
0
static int spi_imx_dma_configure(struct spi_master *master,
				 int bytes_per_word)
{
	int ret;
	enum dma_slave_buswidth buswidth;
	struct dma_slave_config rx = {}, tx = {};
	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);

	if (bytes_per_word == spi_imx->bytes_per_word)
		/* Same as last time */
		return 0;

	switch (bytes_per_word) {
	case 4:
		buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
		break;
	case 2:
		buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
		break;
	case 1:
		buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
		break;
	default:
		return -EINVAL;
	}

	tx.direction = DMA_MEM_TO_DEV;
	tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
	tx.dst_addr_width = buswidth;
	tx.dst_maxburst = spi_imx->wml;
	ret = dmaengine_slave_config(master->dma_tx, &tx);
	if (ret) {
		dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret);
		return ret;
	}

	rx.direction = DMA_DEV_TO_MEM;
	rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
	rx.src_addr_width = buswidth;
	rx.src_maxburst = spi_imx->wml;
	ret = dmaengine_slave_config(master->dma_rx, &rx);
	if (ret) {
		dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret);
		return ret;
	}

	spi_imx->bytes_per_word = bytes_per_word;

	return 0;
}
Exemple #9
0
static int spi_qup_init_dma(struct spi_master *master, resource_size_t base)
{
	struct spi_qup *spi = spi_master_get_devdata(master);
	struct dma_slave_config *rx_conf = &spi->rx_conf,
				*tx_conf = &spi->tx_conf;
	struct device *dev = spi->dev;
	int ret;

	/* allocate dma resources, if available */
	master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
	if (IS_ERR(master->dma_rx))
		return PTR_ERR(master->dma_rx);

	master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
	if (IS_ERR(master->dma_tx)) {
		ret = PTR_ERR(master->dma_tx);
		goto err_tx;
	}

	/* set DMA parameters */
	rx_conf->direction = DMA_DEV_TO_MEM;
	rx_conf->device_fc = 1;
	rx_conf->src_addr = base + QUP_INPUT_FIFO;
	rx_conf->src_maxburst = spi->in_blk_sz;

	tx_conf->direction = DMA_MEM_TO_DEV;
	tx_conf->device_fc = 1;
	tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
	tx_conf->dst_maxburst = spi->out_blk_sz;

	ret = dmaengine_slave_config(master->dma_rx, rx_conf);
	if (ret) {
		dev_err(dev, "failed to configure RX channel\n");
		goto err;
	}

	ret = dmaengine_slave_config(master->dma_tx, tx_conf);
	if (ret) {
		dev_err(dev, "failed to configure TX channel\n");
		goto err;
	}

	return 0;

err:
	dma_release_channel(master->dma_tx);
err_tx:
	dma_release_channel(master->dma_rx);
	return ret;
}
Exemple #10
0
static int rsnd_dmaen_init(struct rsnd_dai_stream *io,
			   struct rsnd_dma *dma, int id,
			   struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
{
	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
	struct rsnd_priv *priv = rsnd_io_to_priv(io);
	struct device *dev = rsnd_priv_to_dev(priv);
	struct dma_slave_config cfg = {};
	int is_play = rsnd_io_is_play(io);
	int ret;

	if (dmaen->chan) {
		dev_err(dev, "it already has dma channel\n");
		return -EIO;
	}

	if (dev->of_node) {
		dmaen->chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
	} else {
		dma_cap_mask_t mask;

		dma_cap_zero(mask);
		dma_cap_set(DMA_SLAVE, mask);

		dmaen->chan = dma_request_channel(mask, shdma_chan_filter,
						  (void *)id);
	}
	if (IS_ERR_OR_NULL(dmaen->chan)) {
		dmaen->chan = NULL;
		dev_err(dev, "can't get dma channel\n");
		goto rsnd_dma_channel_err;
	}

	cfg.direction	= is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
	cfg.src_addr	= dma->src_addr;
	cfg.dst_addr	= dma->dst_addr;
	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

	dev_dbg(dev, "dma : %pad -> %pad\n",
		&cfg.src_addr, &cfg.dst_addr);

	ret = dmaengine_slave_config(dmaen->chan, &cfg);
	if (ret < 0)
		goto rsnd_dma_init_err;

	return 0;

rsnd_dma_init_err:
	rsnd_dma_quit(io, dma);
rsnd_dma_channel_err:

	/*
	 * DMA failed. try to PIO mode
	 * see
	 *	rsnd_ssi_fallback()
	 *	rsnd_rdai_continuance_probe()
	 */
	return -EAGAIN;
}
Exemple #11
0
/* this may get called several times by oss emulation */
static int omap_pcm_hw_params(struct snd_pcm_substream *substream,
			      struct snd_pcm_hw_params *params)
{
	struct snd_pcm_runtime *runtime = substream->runtime;
	struct snd_soc_pcm_runtime *rtd = substream->private_data;
	struct omap_pcm_dma_data *dma_data;
	struct dma_slave_config config;
	struct dma_chan *chan;
	int err = 0;

	dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);

	/* return if this is a bufferless transfer e.g.
	 * codec <--> BT codec or GSM modem -- lg FIXME */
	if (!dma_data)
		return 0;

	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
	runtime->dma_bytes = params_buffer_bytes(params);

	chan = snd_dmaengine_pcm_get_chan(substream);
	if (!chan)
		return -EINVAL;

	/* fills in addr_width and direction */
	err = snd_hwparams_to_dma_slave_config(substream, params, &config);
	if (err)
		return err;

	snd_dmaengine_pcm_set_config_from_dai_data(substream,
			snd_soc_dai_get_dma_data(rtd->cpu_dai, substream),
			&config);

	return dmaengine_slave_config(chan, &config);
}
Exemple #12
0
static int bcm2835_smi_dma_setup(struct bcm2835_smi_instance *inst)
{
	int i, rv = 0;

	inst->dma_chan = dma_request_slave_channel(inst->dev, "rx-tx");

	inst->dma_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	inst->dma_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	inst->dma_config.src_addr = inst->smi_regs_busaddr + SMID;
	inst->dma_config.dst_addr = inst->dma_config.src_addr;
	/* Direction unimportant - always overridden by prep_slave_sg */
	inst->dma_config.direction = DMA_DEV_TO_MEM;
	dmaengine_slave_config(inst->dma_chan, &inst->dma_config);
	/* Alloc and map bounce buffers */
	for (i = 0; i < DMA_BOUNCE_BUFFER_COUNT; ++i) {
		inst->bounce.buffer[i] =
		dmam_alloc_coherent(inst->dev, DMA_BOUNCE_BUFFER_SIZE,
				&inst->bounce.phys[i],
				GFP_KERNEL);
		if (!inst->bounce.buffer[i]) {
			dev_err(inst->dev, "Could not allocate buffer!");
			rv = -ENOMEM;
			break;
		}
		smi_scatterlist_from_buffer(
			inst,
			inst->bounce.phys[i],
			DMA_BOUNCE_BUFFER_SIZE,
			&inst->bounce.sgl[i]
		);
	}

	return rv;
}
int nand_dma_config_start(__u32 rw, dma_addr_t addr,__u32 length)
{
	//int ret = 0;
	//int nents = 0;
	struct dma_slave_config dma_conf = {0};
	struct dma_async_tx_descriptor *dma_desc = NULL;

	dma_conf.direction = DMA_DEV_TO_MEM;
	dma_conf.src_addr = 0x01c03300;
	dma_conf.dst_addr = 0x01c03300;
	dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	dma_conf.src_maxburst = 1;
	dma_conf.dst_maxburst = 1;
	dma_conf.slave_id = rw ? sunxi_slave_id(DRQDST_NAND0, DRQSRC_SDRAM) : sunxi_slave_id(DRQDST_SDRAM, DRQSRC_NAND0);
	dmaengine_slave_config(dma_hdl, &dma_conf);

	dma_desc = dmaengine_prep_slave_single(dma_hdl, addr, length,
				(rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE), DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
	if (!dma_desc) {
		printk("dmaengine prepare failed!\n");
		return -1;
	}

	dma_desc->callback = nand_dma_callback;
	dma_desc->callback_param = NULL;
	dmaengine_submit(dma_desc);

	dma_async_issue_pending(dma_hdl);

	return 0;
}
Exemple #14
0
static struct dma_chan *sh_mobile_i2c_request_dma_chan(struct device *dev,
				enum dma_transfer_direction dir, dma_addr_t port_addr)
{
	struct dma_chan *chan;
	struct dma_slave_config cfg;
	char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx";
	int ret;

	chan = dma_request_slave_channel_reason(dev, chan_name);
	if (IS_ERR(chan)) {
		dev_dbg(dev, "request_channel failed for %s (%ld)\n", chan_name,
			PTR_ERR(chan));
		return chan;
	}

	memset(&cfg, 0, sizeof(cfg));
	cfg.direction = dir;
	if (dir == DMA_MEM_TO_DEV) {
		cfg.dst_addr = port_addr;
		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
	} else {
		cfg.src_addr = port_addr;
		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
	}

	ret = dmaengine_slave_config(chan, &cfg);
	if (ret) {
		dev_dbg(dev, "slave_config failed for %s (%d)\n", chan_name, ret);
		dma_release_channel(chan);
		return ERR_PTR(ret);
	}

	dev_dbg(dev, "got DMA channel for %s\n", chan_name);
	return chan;
}
Exemple #15
0
static void omap2_mcspi_tx_dma(struct spi_device *spi,
				struct spi_transfer *xfer,
				struct dma_slave_config cfg)
{
	struct omap2_mcspi	*mcspi;
	struct omap2_mcspi_dma  *mcspi_dma;

	mcspi = spi_master_get_devdata(spi->master);
	mcspi_dma = &mcspi->dma_channels[spi->chip_select];

	if (mcspi_dma->dma_tx) {
		struct dma_async_tx_descriptor *tx;

		dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);

		tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
					     xfer->tx_sg.nents,
					     DMA_MEM_TO_DEV,
					     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (tx) {
			tx->callback = omap2_mcspi_tx_callback;
			tx->callback_param = spi;
			dmaengine_submit(tx);
		} else {
			/* FIXME: fall back to PIO? */
		}
	}
	dma_async_issue_pending(mcspi_dma->dma_tx);
	omap2_mcspi_set_dma_req(spi, 0, 1);

}
Exemple #16
0
static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start)
{
	struct dma_slave_config cfg = {
		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
	};
	int ret;

	host->chan_tx = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
	dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__,
		host->chan_tx);

	if (!host->chan_tx)
		return;

	cfg.direction = DMA_MEM_TO_DEV;
	cfg.dst_addr = start + USDHI6_SD_BUF0;
	cfg.dst_maxburst = 128;	/* 128 words * 4 bytes = 512 bytes */
	cfg.src_addr = 0;
	ret = dmaengine_slave_config(host->chan_tx, &cfg);
	if (ret < 0)
		goto e_release_tx;

	host->chan_rx = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
	dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__,
		host->chan_rx);

	if (!host->chan_rx)
		goto e_release_tx;

	cfg.direction = DMA_DEV_TO_MEM;
	cfg.src_addr = cfg.dst_addr;
	cfg.src_maxburst = 128;	/* 128 words * 4 bytes = 512 bytes */
	cfg.dst_addr = 0;
	ret = dmaengine_slave_config(host->chan_rx, &cfg);
	if (ret < 0)
		goto e_release_rx;

	return;

e_release_rx:
	dma_release_channel(host->chan_rx);
	host->chan_rx = NULL;
e_release_tx:
	dma_release_channel(host->chan_tx);
	host->chan_tx = NULL;
}
Exemple #17
0
static bool ux500_configure_channel(struct dma_channel *channel,
				u16 packet_sz, u8 mode,
				dma_addr_t dma_addr, u32 len)
{
	struct ux500_dma_channel *ux500_channel = channel->private_data;
	struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
	struct dma_chan *dma_chan = ux500_channel->dma_chan;
	struct dma_async_tx_descriptor *dma_desc;
	enum dma_transfer_direction direction;
	struct scatterlist sg;
	struct dma_slave_config slave_conf;
	enum dma_slave_buswidth addr_width;
	struct musb *musb = ux500_channel->controller->private_data;
	dma_addr_t usb_fifo_addr = (musb->io.fifo_offset(hw_ep->epnum) +
					ux500_channel->controller->phy_base);

	dev_dbg(musb->controller,
		"packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n",
		packet_sz, mode, (unsigned long long) dma_addr,
		len, ux500_channel->is_tx);

	ux500_channel->cur_len = len;

	sg_init_table(&sg, 1);
	sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_addr)), len,
					    offset_in_page(dma_addr));
	sg_dma_address(&sg) = dma_addr;
	sg_dma_len(&sg) = len;

	direction = ux500_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
	addr_width = (len & 0x3) ? DMA_SLAVE_BUSWIDTH_1_BYTE :
					DMA_SLAVE_BUSWIDTH_4_BYTES;

	slave_conf.direction = direction;
	slave_conf.src_addr = usb_fifo_addr;
	slave_conf.src_addr_width = addr_width;
	slave_conf.src_maxburst = 16;
	slave_conf.dst_addr = usb_fifo_addr;
	slave_conf.dst_addr_width = addr_width;
	slave_conf.dst_maxburst = 16;
	slave_conf.device_fc = false;

	dmaengine_slave_config(dma_chan, &slave_conf);

	dma_desc = dmaengine_prep_slave_sg(dma_chan, &sg, 1, direction,
					     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!dma_desc)
		return false;

	dma_desc->callback = ux500_dma_callback;
	dma_desc->callback_param = channel;
	ux500_channel->cookie = dma_desc->tx_submit(dma_desc);

	dma_async_issue_pending(dma_chan);

	return true;
}
Exemple #18
0
static int lpc32xx_xmit_dma(struct mtd_info *mtd, dma_addr_t dma,
			    void *mem, int len, enum dma_transfer_direction dir)
{
	struct nand_chip *chip = mtd->priv;
	struct lpc32xx_nand_host *host = chip->priv;
	struct dma_async_tx_descriptor *desc;
	int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
	int res;

	host->dma_slave_config.direction = dir;
	host->dma_slave_config.src_addr = dma;
	host->dma_slave_config.dst_addr = dma;
	host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	host->dma_slave_config.src_maxburst = 4;
	host->dma_slave_config.dst_maxburst = 4;
	/* DMA controller does flow control: */
	host->dma_slave_config.device_fc = false;
	if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
		dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
		return -ENXIO;
	}

	sg_init_one(&host->sgl, mem, len);

	res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
			 DMA_BIDIRECTIONAL);
	if (res != 1) {
		dev_err(mtd->dev.parent, "Failed to map sg list\n");
		return -ENXIO;
	}
	desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
				       flags);
	if (!desc) {
		dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
		goto out1;
	}

	init_completion(&host->comp);
	desc->callback = lpc32xx_dma_complete_func;
	desc->callback_param = &host->comp;

	dmaengine_submit(desc);
	dma_async_issue_pending(host->dma_chan);

	wait_for_completion_timeout(&host->comp, msecs_to_jiffies(1000));

	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
		     DMA_BIDIRECTIONAL);

	return 0;
out1:
	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
		     DMA_BIDIRECTIONAL);
	return -ENXIO;
}
Exemple #19
0
static int sprd_tx_dma_config(struct uart_port *port)
{
	struct sprd_uart_port *sp =
		container_of(port, struct sprd_uart_port, port);
	u32 burst = sp->tx_dma.trans_len > SPRD_TX_FIFO_FULL ?
		SPRD_TX_FIFO_FULL : sp->tx_dma.trans_len;
	int ret;
	struct dma_slave_config cfg = {
		.dst_addr = port->mapbase + SPRD_TXD,
		.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
		.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
		.src_maxburst = burst,
	};

	ret = dmaengine_slave_config(sp->tx_dma.chn, &cfg);
	if (ret < 0)
		return ret;

	return sprd_uart_dma_submit(port, &sp->tx_dma, sp->tx_dma.trans_len,
				    DMA_MEM_TO_DEV, sprd_complete_tx_dma);
}

static void sprd_start_tx_dma(struct uart_port *port)
{
	struct sprd_uart_port *sp =
		container_of(port, struct sprd_uart_port, port);
	struct circ_buf *xmit = &port->state->xmit;

	if (port->x_char) {
		serial_out(port, SPRD_TXD, port->x_char);
		port->icount.tx++;
		port->x_char = 0;
		return;
	}

	if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
		sprd_stop_tx_dma(port);
		return;
	}

	if (sp->tx_dma.trans_len)
		return;

	if (sprd_tx_buf_remap(port) || sprd_tx_dma_config(port))
		sp->tx_dma.trans_len = 0;
}

static void sprd_rx_full_thld(struct uart_port *port, u32 thld)
{
	u32 val = serial_in(port, SPRD_CTL2);

	val &= ~THLD_RX_FULL_MASK;
	val |= thld & THLD_RX_FULL_MASK;
	serial_out(port, SPRD_CTL2, val);
}
Exemple #20
0
static int hist_buf_dma(struct ispstat *hist)
{
	dma_addr_t dma_addr = hist->active_buf->dma_addr;
	struct dma_async_tx_descriptor *tx;
	struct dma_slave_config cfg;
	dma_cookie_t cookie;
	int ret;

	if (unlikely(!dma_addr)) {
		dev_dbg(hist->isp->dev, "hist: invalid DMA buffer address\n");
		goto error;
	}

	isp_reg_writel(hist->isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
	isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
		    ISPHIST_CNT_CLEAR);
	omap3isp_flush(hist->isp);

	memset(&cfg, 0, sizeof(cfg));
	cfg.src_addr = hist->isp->mmio_hist_base_phys + ISPHIST_DATA;
	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	cfg.src_maxburst = hist->buf_size / 4;

	ret = dmaengine_slave_config(hist->dma_ch, &cfg);
	if (ret < 0) {
		dev_dbg(hist->isp->dev,
			"hist: DMA slave configuration failed\n");
		goto error;
	}

	tx = dmaengine_prep_slave_single(hist->dma_ch, dma_addr,
					 hist->buf_size, DMA_DEV_TO_MEM,
					 DMA_CTRL_ACK);
	if (tx == NULL) {
		dev_dbg(hist->isp->dev,
			"hist: DMA slave preparation failed\n");
		goto error;
	}

	tx->callback = hist_dma_cb;
	tx->callback_param = hist;
	cookie = tx->tx_submit(tx);
	if (dma_submit_error(cookie)) {
		dev_dbg(hist->isp->dev, "hist: DMA submission failed\n");
		goto error;
	}

	dma_async_issue_pending(hist->dma_ch);

	return STAT_BUF_WAITING_DMA;

error:
	hist_reset_mem(hist);
	return STAT_NO_BUF;
}
Exemple #21
0
int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma,
		  int is_play, int id)
{
	struct device *dev = rsnd_priv_to_dev(priv);
	struct dma_slave_config cfg;
	struct rsnd_mod *mod_from;
	struct rsnd_mod *mod_to;
	char dma_name[DMA_NAME_SIZE];
	dma_cap_mask_t mask;
	int ret;

	if (dma->chan) {
		dev_err(dev, "it already has dma channel\n");
		return -EIO;
	}

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	rsnd_dma_of_path(dma, is_play, &mod_from, &mod_to);
	rsnd_dma_of_name(mod_from, mod_to, dma_name);

	cfg.slave_id	= id;
	cfg.direction	= is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
	cfg.src_addr	= rsnd_gen_dma_addr(priv, mod_from, is_play, 1);
	cfg.dst_addr	= rsnd_gen_dma_addr(priv, mod_to,   is_play, 0);
	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

	dev_dbg(dev, "dma : %s %pad -> %pad\n",
		dma_name, &cfg.src_addr, &cfg.dst_addr);

	dma->chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
						     (void *)id, dev,
						     dma_name);
	if (!dma->chan) {
		dev_err(dev, "can't get dma channel\n");
		return -EIO;
	}

	ret = dmaengine_slave_config(dma->chan, &cfg);
	if (ret < 0)
		goto rsnd_dma_init_err;

	dma->addr = is_play ? cfg.src_addr : cfg.dst_addr;
	dma->dir = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;

	return 0;

rsnd_dma_init_err:
	rsnd_dma_quit(priv, dma);

	return ret;
}
static int sunxi_pcm_hw_params(struct snd_pcm_substream *substream,
	struct snd_pcm_hw_params *params)
{
   struct snd_soc_pcm_runtime *rtd = substream->private_data;
	struct device *dev = rtd->platform->dev;
	struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
	struct sunxi_dma_params *dmap;
	struct dma_slave_config slave_config;
	int ret;

	dmap = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);

	ret = snd_hwparams_to_dma_slave_config(substream, params,
						&slave_config);
	if (ret) {
		dev_err(dev, "hw params config failed with err %d\n", ret);
		return ret;
	}
	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
		slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
		slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
		slave_config.dst_addr = dmap->dma_addr;
		slave_config.src_maxburst = 4;
		slave_config.dst_maxburst = 4;
		slave_config.slave_id = sunxi_slave_id(DRQDST_AUDIO_CODEC, DRQSRC_SDRAM);
	} else {
		slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
		slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
		slave_config.src_addr = dmap->dma_addr;
		slave_config.src_maxburst = 4;
		slave_config.dst_maxburst = 4;
		slave_config.slave_id = sunxi_slave_id(DRQDST_SDRAM, DRQSRC_AUDIO_CODEC);
	}

	ret = dmaengine_slave_config(chan, &slave_config);
	if (ret < 0) {
		dev_err(dev, "dma slave config failed with err %d\n", ret);
		return ret;
	}

#if defined USED_SRAM_ADDR
/*for a23*/
#if defined CONFIG_ARCH_SUN8IW3
	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
		substream->dma_buffer.addr = 0x00002000;
		substream->dma_buffer.area = 0xf0002000;
		memset(0xf0002000, 0, 0x4000-0x00002000);
	}
#endif
#endif

	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
	return 0;
}
Exemple #23
0
static int ss_dma_src_config(sunxi_ss_t *sss, void *ctx, ss_aes_req_ctx_t *req_ctx, int len, int cb)
{
	int flow = ((ss_comm_ctx_t *)ctx)->flow;
	ss_dma_info_t *info = &req_ctx->dma_src;
	struct dma_slave_config dma_conf = {0};
	struct dma_async_tx_descriptor *dma_desc = NULL;

	info->dir = DMA_MEM_TO_MEM;
	dma_conf.direction = info->dir;
#ifdef SS_CTR_MODE_ENABLE
	if (req_ctx->mode == SS_AES_MODE_CTR)
		dma_conf.src_addr_width = dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
	else
#endif
	dma_conf.src_addr_width = dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

	dma_conf.src_maxburst = 1;
	dma_conf.dst_maxburst = 1;
	dma_conf.slave_id = sunxi_slave_id(DRQDST_SDRAM, DRQSRC_SDRAM);
	dmaengine_slave_config(info->chan, &dma_conf);

	ss_sg_table_init(&info->sgt_for_cp, info->sg, len, sss->flows[flow].buf_src, sss->flows[flow].buf_src_dma);
	SS_DBG("chan: 0x%p, info->sgt_for_cp.sgl: 0x%p \n", info->chan, info->sgt_for_cp.sgl);

	info->nents = info->sgt_for_cp.nents;
	SS_DBG("flow: %d, sg num: %d, total len: %d \n", flow, info->nents, len);

	dma_map_sg(&sss->pdev->dev, info->sg, info->nents, info->dir);
	dma_map_sg(&sss->pdev->dev, info->sgt_for_cp.sgl, info->nents, info->dir);

	if (SS_METHOD_IS_HASH(req_ctx->type)) {
		/* Total len is to small, so there is no data for DMA. */
		if (len < SHA1_BLOCK_SIZE)
			return 1;

		ss_hash_padding_sg_prepare(&info->sg[info->nents-1], len);
		ss_hash_padding_sg_prepare(&info->sgt_for_cp.sgl[info->nents-1], len);
	}

	dma_desc = info->chan->device->device_prep_dma_sg(info->chan,
			info->sgt_for_cp.sgl, info->nents,
			info->sg, info->nents, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!dma_desc) {
		SS_ERR("dmaengine_prep_slave_sg() failed!\n");
		return -1;
	}

	if (cb == 1) {
		dma_desc->callback = ss_dma_cb;
		dma_desc->callback_param = (void *)req_ctx;
	}
	dmaengine_submit(dma_desc);
	return 0;
}
Exemple #24
0
static struct dma_async_tx_descriptor *
pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
			   enum dma_transfer_direction dir)
{
	struct chip_data *chip =
		spi_get_ctldata(drv_data->master->cur_msg->spi);
	struct spi_transfer *xfer = drv_data->cur_transfer;
	enum dma_slave_buswidth width;
	struct dma_slave_config cfg;
	struct dma_chan *chan;
	struct sg_table *sgt;
	int ret;

	switch (drv_data->n_bytes) {
	case 1:
		width = DMA_SLAVE_BUSWIDTH_1_BYTE;
		break;
	case 2:
		width = DMA_SLAVE_BUSWIDTH_2_BYTES;
		break;
	default:
		width = DMA_SLAVE_BUSWIDTH_4_BYTES;
		break;
	}

	memset(&cfg, 0, sizeof(cfg));
	cfg.direction = dir;

	if (dir == DMA_MEM_TO_DEV) {
		cfg.dst_addr = drv_data->ssdr_physical;
		cfg.dst_addr_width = width;
		cfg.dst_maxburst = chip->dma_burst_size;

		sgt = &xfer->tx_sg;
		chan = drv_data->master->dma_tx;
	} else {
		cfg.src_addr = drv_data->ssdr_physical;
		cfg.src_addr_width = width;
		cfg.src_maxburst = chip->dma_burst_size;

		sgt = &xfer->rx_sg;
		chan = drv_data->master->dma_rx;
	}

	ret = dmaengine_slave_config(chan, &cfg);
	if (ret) {
		dev_warn(&drv_data->pdev->dev, "DMA slave config failed\n");
		return NULL;
	}

	return dmaengine_prep_slave_sg(chan, sgt->sgl, sgt->nents, dir,
				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
Exemple #25
0
/* ctx - only used for HASH. */
static int ss_dma_src_config(sunxi_ss_t *sss, void *ctx, ss_aes_req_ctx_t *req_ctx, int len, int cb)
{
	int nents = 0;
	int npages = 0;
	ss_dma_info_t *info = &req_ctx->dma_src;
	struct dma_slave_config dma_conf = {0};
	struct dma_async_tx_descriptor *dma_desc = NULL;

	info->dir = DMA_MEM_TO_DEV;
	dma_conf.direction = info->dir;
	dma_conf.dst_addr = sss->base_addr_phy + SS_REG_RXFIFO;
	dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	dma_conf.src_maxburst = 1;
	dma_conf.dst_maxburst = 1;
	dma_conf.slave_id = sunxi_slave_id(DRQDST_SS, DRQSRC_SDRAM);
	dmaengine_slave_config(info->chan, &dma_conf);

	npages = ss_sg_cnt(info->sg, len);
	WARN_ON(npages == 0);

	nents = dma_map_sg(&sss->pdev->dev, info->sg, npages, info->dir);
	SS_DBG("npages = %d, nents = %d, len = %d, sg.len = %d \n", npages, nents, len, sg_dma_len(info->sg));
	if (!nents) {
		SS_ERR("dma_map_sg() error\n");
		return -EINVAL;
	}

	info->nents = nents;

	if (SS_METHOD_IS_HASH(req_ctx->type)) {
		ss_hash_padding_sg_prepare(&info->sg[nents-1], len);

		/* Total len is too small, so there is no data for DMA. */
		if (len < SHA1_BLOCK_SIZE)
			return 1;
	}

	dma_desc = dmaengine_prep_slave_sg(info->chan, info->sg, nents,
				info->dir,	DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!dma_desc) {
		SS_ERR("dmaengine_prep_slave_sg() failed!\n");
		return -1;
	}

	if (cb == 1) {
		dma_desc->callback = ss_dma_cb;
		dma_desc->callback_param = (void *)req_ctx;
	}
	dmaengine_submit(dma_desc);
	return 0;
}
Exemple #26
0
static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
					 struct mmc_data *data)
{
	int ret;
	struct dma_chan *chan;
	struct dma_async_tx_descriptor *desc;
	struct dma_slave_config conf = {
		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
		.src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
		.dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
	};

	if (data->flags & MMC_DATA_WRITE) {
		conf.direction = DMA_MEM_TO_DEV;
		conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
		conf.slave_id = JZ4740_DMA_TYPE_MMC_TRANSMIT;
		chan = host->dma_tx;
	} else {
		conf.direction = DMA_DEV_TO_MEM;
		conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO;
		conf.slave_id = JZ4740_DMA_TYPE_MMC_RECEIVE;
		chan = host->dma_rx;
	}

	ret = jz4740_mmc_prepare_dma_data(host, data, NULL, chan);
	if (ret)
		return ret;

	dmaengine_slave_config(chan, &conf);
	desc = dmaengine_prep_slave_sg(chan,
				       data->sg,
				       host->sg_len,
				       conf.direction,
				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc) {
		dev_err(mmc_dev(host->mmc),
			"Failed to allocate DMA %s descriptor",
			 conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX");
		goto dma_unmap;
	}

	dmaengine_submit(desc);
	dma_async_issue_pending(chan);

	return 0;

dma_unmap:
	jz4740_mmc_dma_unmap(host, data);
	return -ENOMEM;
}
Exemple #27
0
static int stm32_of_dma_tx_probe(struct stm32_port *stm32port,
				 struct platform_device *pdev)
{
	struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
	struct uart_port *port = &stm32port->port;
	struct device *dev = &pdev->dev;
	struct dma_slave_config config;
	int ret;

	stm32port->tx_dma_busy = false;

	/* Request DMA TX channel */
	stm32port->tx_ch = dma_request_slave_channel(dev, "tx");
	if (!stm32port->tx_ch) {
		dev_info(dev, "tx dma alloc failed\n");
		return -ENODEV;
	}
	stm32port->tx_buf = dma_alloc_coherent(&pdev->dev, TX_BUF_L,
						 &stm32port->tx_dma_buf,
						 GFP_KERNEL);
	if (!stm32port->tx_buf) {
		ret = -ENOMEM;
		goto alloc_err;
	}

	/* Configure DMA channel */
	memset(&config, 0, sizeof(config));
	config.dst_addr = port->mapbase + ofs->tdr;
	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;

	ret = dmaengine_slave_config(stm32port->tx_ch, &config);
	if (ret < 0) {
		dev_err(dev, "tx dma channel config failed\n");
		ret = -ENODEV;
		goto config_err;
	}

	return 0;

config_err:
	dma_free_coherent(&pdev->dev,
			  TX_BUF_L, stm32port->tx_buf,
			  stm32port->tx_dma_buf);

alloc_err:
	dma_release_channel(stm32port->tx_ch);
	stm32port->tx_ch = NULL;

	return ret;
}
Exemple #28
0
static struct dma_chan *
sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
			 struct sh_mmcif_plat_data *pdata,
			 enum dma_transfer_direction direction)
{
	struct dma_slave_config cfg = { 0, };
	struct dma_chan *chan;
	void *slave_data = NULL;
	struct resource *res;
	dma_cap_mask_t mask;
	int ret;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	if (pdata)
		slave_data = direction == DMA_MEM_TO_DEV ?
			(void *)pdata->slave_id_tx :
			(void *)pdata->slave_id_rx;

	chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
				slave_data, &host->pd->dev,
				direction == DMA_MEM_TO_DEV ? "tx" : "rx");

	dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__,
		direction == DMA_MEM_TO_DEV ? "TX" : "RX", chan);

	if (!chan)
		return NULL;

	res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);

	cfg.direction = direction;

	if (direction == DMA_DEV_TO_MEM) {
		cfg.src_addr = res->start + MMCIF_CE_DATA;
		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	} else {
		cfg.dst_addr = res->start + MMCIF_CE_DATA;
		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	}

	ret = dmaengine_slave_config(chan, &cfg);
	if (ret < 0) {
		dma_release_channel(chan);
		return NULL;
	}

	return chan;
}
Exemple #29
0
int logi_dma_open(struct drvr_mem* mem_dev, dma_addr_t *physbuf)
{
#ifdef USE_DMA_ENGINE
	struct dma_slave_config	conf;
	dma_cap_mask_t mask;
#endif

	/* Allocate DMA buffer */
	mem_dev->dma.buf = dma_alloc_coherent(NULL, MAX_DMA_TRANSFER_IN_BYTES,
					      &dmaphysbuf, 0);

	if (!mem_dev->dma.buf) {
		DBG_LOG("failed to allocate DMA buffer\n");
		return -ENOMEM;
	}

	*physbuf = dmaphysbuf;

#ifdef USE_DMA_ENGINE
	/* Allocate DMA channel */
	dma_cap_zero(mask);
	dma_cap_set(DMA_MEMCPY, mask);
	mem_dev->dma.chan = dma_request_channel(mask, NULL, NULL);

	if (!mem_dev->dma.chan)
		return -ENODEV;

	/* Configure DMA channel */
	conf.direction = DMA_MEM_TO_MEM;
	/*conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;*/
	dmaengine_slave_config(mem_dev->dma.chan, &conf);

	DBG_LOG("Using Linux DMA Engine API");
#else
	mem_dev->dma.dma_chan = edma_alloc_channel(EDMA_CHANNEL_ANY, dma_callback,
					       NULL, EVENTQ_0);

	if (mem_dev->dma.dma_chan < 0) {
		DBG_LOG("edma_alloc_channel failed for dma_ch, error: %d\n",
			mem_dev->dma.dma_chan);
		return mem_dev->dma.dma_chan;
	}

	DBG_LOG("Using EDMA/DMA Engine");
#endif /* USE_DMA_ENGINE */

	DBG_LOG("EDMA channel %d reserved\n", mem_dev->dma.dma_chan);

	return 0;
}
Exemple #30
0
static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
				struct samsung_dma_info *info)
{
	struct dma_chan *chan;
	dma_cap_mask_t mask;
	struct dma_slave_config slave_config;
	void *filter_param;

	dma_cap_zero(mask);
	dma_cap_set(info->cap, mask);

	/*
	 * If a dma channel property of a device node from device tree is
	 * specified, use that as the fliter parameter.
	 */
	filter_param = (dma_ch == DMACH_DT_PROP) ? (void *)info->dt_dmach_prop :
				(void *)dma_ch;
	chan = dma_request_channel(mask, pl330_filter, filter_param);

	if (info->direction == DMA_FROM_DEVICE) {
		memset(&slave_config, 0, sizeof(struct dma_slave_config));
		slave_config.direction = info->direction;
		slave_config.src_addr = info->fifo;
		slave_config.src_addr_width = info->width;
		slave_config.src_maxburst = 1;
		dmaengine_slave_config(chan, &slave_config);
	} else if (info->direction == DMA_TO_DEVICE) {
		memset(&slave_config, 0, sizeof(struct dma_slave_config));
		slave_config.direction = info->direction;
		slave_config.dst_addr = info->fifo;
		slave_config.dst_addr_width = info->width;
		slave_config.dst_maxburst = 1;
		dmaengine_slave_config(chan, &slave_config);
	}

	return (unsigned)chan;
}