Example #1
0
static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
				struct samsung_dma_info *info)
{
	struct dma_chan *chan;
	dma_cap_mask_t mask;
	struct dma_slave_config slave_config;

	dma_cap_zero(mask);
	dma_cap_set(info->cap, mask);

	chan = dma_request_channel(mask, pl330_filter, (void *)dma_ch);

	if (info->direction == DMA_FROM_DEVICE) {
		memset(&slave_config, 0, sizeof(struct dma_slave_config));
		slave_config.direction = info->direction;
		slave_config.src_addr = info->fifo;
		slave_config.src_addr_width = info->width;
		slave_config.src_maxburst = 1;
		dmaengine_slave_config(chan, &slave_config);
	} else if (info->direction == DMA_TO_DEVICE) {
		memset(&slave_config, 0, sizeof(struct dma_slave_config));
		slave_config.direction = info->direction;
		slave_config.dst_addr = info->fifo;
		slave_config.dst_addr_width = info->width;
		slave_config.dst_maxburst = 1;
		dmaengine_slave_config(chan, &slave_config);
	}

	return (unsigned)chan;
}
Example #2
0
static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
{
	dma_cap_mask_t mask;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	host->dma_tx = dma_request_channel(mask, NULL, host);
	if (!host->dma_tx) {
		dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n");
		return -ENODEV;
	}

	host->dma_rx = dma_request_channel(mask, NULL, host);
	if (!host->dma_rx) {
		dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n");
		goto free_master_write;
	}

	/* Initialize DMA pre request cookie */
	host->next_data.cookie = 1;

	return 0;

free_master_write:
	dma_release_channel(host->dma_tx);
	return -ENODEV;
}
Example #3
0
int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
{
	struct pxa2xx_spi_master *pdata = drv_data->master_info;
	struct device *dev = &drv_data->pdev->dev;
	struct spi_master *master = drv_data->master;
	dma_cap_mask_t mask;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	master->dma_tx = dma_request_slave_channel_compat(mask,
				pdata->dma_filter, pdata->tx_param, dev, "tx");
	if (!master->dma_tx)
		return -ENODEV;

	master->dma_rx = dma_request_slave_channel_compat(mask,
				pdata->dma_filter, pdata->rx_param, dev, "rx");
	if (!master->dma_rx) {
		dma_release_channel(master->dma_tx);
		master->dma_tx = NULL;
		return -ENODEV;
	}

	return 0;
}
Example #4
0
static int rsnd_dmaen_init(struct rsnd_dai_stream *io,
			   struct rsnd_dma *dma, int id,
			   struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
{
	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
	struct rsnd_priv *priv = rsnd_io_to_priv(io);
	struct device *dev = rsnd_priv_to_dev(priv);
	struct dma_slave_config cfg = {};
	int is_play = rsnd_io_is_play(io);
	int ret;

	if (dmaen->chan) {
		dev_err(dev, "it already has dma channel\n");
		return -EIO;
	}

	if (dev->of_node) {
		dmaen->chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
	} else {
		dma_cap_mask_t mask;

		dma_cap_zero(mask);
		dma_cap_set(DMA_SLAVE, mask);

		dmaen->chan = dma_request_channel(mask, shdma_chan_filter,
						  (void *)id);
	}
	if (IS_ERR_OR_NULL(dmaen->chan)) {
		dmaen->chan = NULL;
		dev_err(dev, "can't get dma channel\n");
		goto rsnd_dma_channel_err;
	}

	cfg.direction	= is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
	cfg.src_addr	= dma->src_addr;
	cfg.dst_addr	= dma->dst_addr;
	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

	dev_dbg(dev, "dma : %pad -> %pad\n",
		&cfg.src_addr, &cfg.dst_addr);

	ret = dmaengine_slave_config(dmaen->chan, &cfg);
	if (ret < 0)
		goto rsnd_dma_init_err;

	return 0;

rsnd_dma_init_err:
	rsnd_dma_quit(io, dma);
rsnd_dma_channel_err:

	/*
	 * DMA failed. try to PIO mode
	 * see
	 *	rsnd_ssi_fallback()
	 *	rsnd_rdai_continuance_probe()
	 */
	return -EAGAIN;
}
Example #5
0
static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
					struct of_dma *ofdma)
{
	struct dw_dma *dw = ofdma->of_dma_data;
	struct dw_dma_of_filter_args fargs = {
		.dw = dw,
	};
	dma_cap_mask_t cap;

	if (dma_spec->args_count != 3)
		return NULL;

	fargs.req = dma_spec->args[0];
	fargs.src = dma_spec->args[1];
	fargs.dst = dma_spec->args[2];

	if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
		    fargs.src >= dw->nr_masters ||
		    fargs.dst >= dw->nr_masters))
		return NULL;

	dma_cap_zero(cap);
	dma_cap_set(DMA_SLAVE, cap);

	/* TODO: there should be a simpler way to do this */
	return dma_request_channel(cap, dw_dma_of_filter, &fargs);
}
int serial8250_request_dma(struct uart_8250_port *p)
{
	struct uart_8250_dma	*dma = p->dma;
	dma_cap_mask_t		mask;

	/* Default slave configuration parameters */
	dma->rxconf.direction		= DMA_DEV_TO_MEM;
	dma->rxconf.src_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
	dma->rxconf.src_addr		= p->port.mapbase + UART_RX;

	dma->txconf.direction		= DMA_MEM_TO_DEV;
	dma->txconf.dst_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
	dma->txconf.dst_addr		= p->port.mapbase + UART_TX;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	/* Get a channel for RX */
	dma->rxchan = dma_request_slave_channel_compat(mask,
						       dma->fn, dma->rx_param,
						       p->port.dev, "rx");
	if (!dma->rxchan)
		return -ENODEV;

	dmaengine_slave_config(dma->rxchan, &dma->rxconf);

	/* Get a channel for TX */
	dma->txchan = dma_request_slave_channel_compat(mask,
						       dma->fn, dma->tx_param,
						       p->port.dev, "tx");
	if (!dma->txchan) {
		dma_release_channel(dma->rxchan);
		return -ENODEV;
	}

	dmaengine_slave_config(dma->txchan, &dma->txconf);

	/* RX buffer */
	if (!dma->rx_size)
		dma->rx_size = PAGE_SIZE;

	dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
					&dma->rx_addr, GFP_KERNEL);
	if (!dma->rx_buf) {
		dma_release_channel(dma->rxchan);
		dma_release_channel(dma->txchan);
		return -ENOMEM;
	}

	/* TX buffer */
	dma->tx_addr = dma_map_single(dma->txchan->device->dev,
					p->port.state->xmit.buf,
					UART_XMIT_SIZE,
					DMA_TO_DEVICE);

	dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");

	return 0;
}
Example #7
0
static int __devinit rt_dma_probe(struct platform_device *pdev)
{
	struct dma_device *dma_dev;
	struct rt_dma_chan *rt_chan;
	int err;
	int ret;
	int reg;

	printk("%s\n",__FUNCTION__);
	
	dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev), GFP_KERNEL);
	if (!dma_dev)
		return -ENOMEM;


	INIT_LIST_HEAD(&dma_dev->channels);
	dma_cap_zero(dma_dev->cap_mask);
	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
	dma_dev->device_alloc_chan_resources = rt_dma_alloc_chan_resources;
	dma_dev->device_free_chan_resources = rt_dma_free_chan_resources;
	dma_dev->device_tx_status = rt_dma_status;
	dma_dev->device_issue_pending = rt_dma_issue_pending;
	dma_dev->device_prep_dma_memcpy = rt_dma_prep_dma_memcpy;
	dma_dev->dev = &pdev->dev;

	rt_chan = devm_kzalloc(&pdev->dev, sizeof(*rt_chan), GFP_KERNEL);
        if (!rt_chan) {
		return -ENOMEM;
	}

	spin_lock_init(&rt_chan->lock);
        INIT_LIST_HEAD(&rt_chan->chain);
	INIT_LIST_HEAD(&rt_chan->completed_slots);
	INIT_LIST_HEAD(&rt_chan->all_slots);
	rt_chan->common.device = dma_dev;
	rt_chan->txd.tx_submit = rt_dma_tx_submit;

	list_add_tail(&rt_chan->common.device_node, &dma_dev->channels);
	
	err = dma_async_device_register(dma_dev);
	if (0 != err) {
		pr_err("ERR_MDMA:device_register failed: %d\n", err);
		return 1;
	}

	ret = request_irq(SURFBOARDINT_DMA, rt_dma_interrupt_handler, IRQF_DISABLED, "Ralink_DMA", NULL);
	if(ret){
		pr_err("IRQ %d is not free.\n", SURFBOARDINT_DMA);
		return 1;
	}

	//set GDMA register in advance.
	reg = (32 << 16) | (32 << 8) | (MEMCPY_DMA_CH << 3);
	RT_DMA_WRITE_REG(RT_DMA_CTRL_REG1(MEMCPY_DMA_CH), reg);
	
	return 0;
}
/**
 * snd_dmaengine_pcm_request_channel - Request channel for the dmaengine PCM
 * @filter_fn: Filter function used to request the DMA channel
 * @filter_data: Data passed to the DMA filter function
 *
 * Returns NULL or the requested DMA channel.
 *
 * This function request a DMA channel for usage with dmaengine PCM.
 */
struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
	void *filter_data)
{
	dma_cap_mask_t mask;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);
	dma_cap_set(DMA_CYCLIC, mask);

	return dma_request_channel(mask, filter_fn, filter_data);
}
Example #9
0
File: core.c Project: 3null/linux
int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma,
		  int is_play, int id)
{
	struct device *dev = rsnd_priv_to_dev(priv);
	struct dma_slave_config cfg;
	struct rsnd_mod *mod_from;
	struct rsnd_mod *mod_to;
	char dma_name[DMA_NAME_SIZE];
	dma_cap_mask_t mask;
	int ret;

	if (dma->chan) {
		dev_err(dev, "it already has dma channel\n");
		return -EIO;
	}

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	rsnd_dma_of_path(dma, is_play, &mod_from, &mod_to);
	rsnd_dma_of_name(mod_from, mod_to, dma_name);

	cfg.slave_id	= id;
	cfg.direction	= is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
	cfg.src_addr	= rsnd_gen_dma_addr(priv, mod_from, is_play, 1);
	cfg.dst_addr	= rsnd_gen_dma_addr(priv, mod_to,   is_play, 0);
	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

	dev_dbg(dev, "dma : %s %pad -> %pad\n",
		dma_name, &cfg.src_addr, &cfg.dst_addr);

	dma->chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
						     (void *)id, dev,
						     dma_name);
	if (!dma->chan) {
		dev_err(dev, "can't get dma channel\n");
		return -EIO;
	}

	ret = dmaengine_slave_config(dma->chan, &cfg);
	if (ret < 0)
		goto rsnd_dma_init_err;

	dma->addr = is_play ? cfg.src_addr : cfg.dst_addr;
	dma->dir = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;

	return 0;

rsnd_dma_init_err:
	rsnd_dma_quit(priv, dma);

	return ret;
}
Example #10
0
/*
 * omap3isp_hist_init - Module Initialization.
 */
int omap3isp_hist_init(struct isp_device *isp)
{
	struct ispstat *hist = &isp->isp_hist;
	struct omap3isp_hist_config *hist_cfg;
	int ret = -1;

	hist_cfg = devm_kzalloc(isp->dev, sizeof(*hist_cfg), GFP_KERNEL);
	if (hist_cfg == NULL)
		return -ENOMEM;

	hist->isp = isp;

	if (HIST_CONFIG_DMA) {
		struct platform_device *pdev = to_platform_device(isp->dev);
		struct resource *res;
		unsigned int sig = 0;
		dma_cap_mask_t mask;

		dma_cap_zero(mask);
		dma_cap_set(DMA_SLAVE, mask);

		res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
						   "hist");
		if (res)
			sig = res->start;

		hist->dma_ch = dma_request_slave_channel_compat_reason(mask,
				omap_dma_filter_fn, &sig, isp->dev, "hist");
		if (IS_ERR(hist->dma_ch)) {
			ret = PTR_ERR(hist->dma_ch);
			if (ret == -EPROBE_DEFER)
				return ret;

			hist->dma_ch = NULL;
			dev_warn(isp->dev,
				 "hist: DMA channel request failed, using PIO\n");
		} else {
			dev_dbg(isp->dev, "hist: using DMA channel %s\n",
				dma_chan_name(hist->dma_ch));
		}
	}

	hist->ops = &hist_ops;
	hist->priv = hist_cfg;
	hist->event_type = V4L2_EVENT_OMAP3ISP_HIST;

	ret = omap3isp_stat_init(hist, "histogram", &hist_subdev_ops);
	if (ret) {
		if (hist->dma_ch)
			dma_release_channel(hist->dma_ch);
	}

	return ret;
}
Example #11
0
int logi_dma_open(struct drvr_mem* mem_dev, dma_addr_t *physbuf)
{
#ifdef USE_DMA_ENGINE
	struct dma_slave_config	conf;
	dma_cap_mask_t mask;
#endif

	/* Allocate DMA buffer */
	mem_dev->dma.buf = dma_alloc_coherent(NULL, MAX_DMA_TRANSFER_IN_BYTES,
					      &dmaphysbuf, 0);

	if (!mem_dev->dma.buf) {
		DBG_LOG("failed to allocate DMA buffer\n");
		return -ENOMEM;
	}

	*physbuf = dmaphysbuf;

#ifdef USE_DMA_ENGINE
	/* Allocate DMA channel */
	dma_cap_zero(mask);
	dma_cap_set(DMA_MEMCPY, mask);
	mem_dev->dma.chan = dma_request_channel(mask, NULL, NULL);

	if (!mem_dev->dma.chan)
		return -ENODEV;

	/* Configure DMA channel */
	conf.direction = DMA_MEM_TO_MEM;
	/*conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;*/
	dmaengine_slave_config(mem_dev->dma.chan, &conf);

	DBG_LOG("Using Linux DMA Engine API");
#else
	mem_dev->dma.dma_chan = edma_alloc_channel(EDMA_CHANNEL_ANY, dma_callback,
					       NULL, EVENTQ_0);

	if (mem_dev->dma.dma_chan < 0) {
		DBG_LOG("edma_alloc_channel failed for dma_ch, error: %d\n",
			mem_dev->dma.dma_chan);
		return mem_dev->dma.dma_chan;
	}

	DBG_LOG("Using EDMA/DMA Engine");
#endif /* USE_DMA_ENGINE */

	DBG_LOG("EDMA channel %d reserved\n", mem_dev->dma.dma_chan);

	return 0;
}
Example #12
0
static struct dma_chan *
sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
			 struct sh_mmcif_plat_data *pdata,
			 enum dma_transfer_direction direction)
{
	struct dma_slave_config cfg = { 0, };
	struct dma_chan *chan;
	void *slave_data = NULL;
	struct resource *res;
	dma_cap_mask_t mask;
	int ret;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	if (pdata)
		slave_data = direction == DMA_MEM_TO_DEV ?
			(void *)pdata->slave_id_tx :
			(void *)pdata->slave_id_rx;

	chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
				slave_data, &host->pd->dev,
				direction == DMA_MEM_TO_DEV ? "tx" : "rx");

	dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__,
		direction == DMA_MEM_TO_DEV ? "TX" : "RX", chan);

	if (!chan)
		return NULL;

	res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);

	cfg.direction = direction;

	if (direction == DMA_DEV_TO_MEM) {
		cfg.src_addr = res->start + MMCIF_CE_DATA;
		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	} else {
		cfg.dst_addr = res->start + MMCIF_CE_DATA;
		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	}

	ret = dmaengine_slave_config(chan, &cfg);
	if (ret < 0) {
		dma_release_channel(chan);
		return NULL;
	}

	return chan;
}
Example #13
0
int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma,
		  int is_play, int id,
		  int (*inquiry)(struct rsnd_dma *dma,
				  dma_addr_t *buf, int *len),
		  int (*complete)(struct rsnd_dma *dma))
{
	struct device *dev = rsnd_priv_to_dev(priv);
	struct dma_slave_config cfg;
	dma_cap_mask_t mask;
	int ret;

	if (dma->chan) {
		dev_err(dev, "it already has dma channel\n");
		return -EIO;
	}

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	dma->chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
						     (void *)id, dev,
						     is_play ? "tx" : "rx");
	if (!dma->chan) {
		dev_err(dev, "can't get dma channel\n");
		return -EIO;
	}

	cfg.slave_id	= id;
	cfg.dst_addr	= 0; /* use default addr when playback */
	cfg.src_addr	= 0; /* use default addr when capture */
	cfg.direction	= is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;

	ret = dmaengine_slave_config(dma->chan, &cfg);
	if (ret < 0)
		goto rsnd_dma_init_err;

	dma->dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
	dma->priv = priv;
	dma->inquiry = inquiry;
	dma->complete = complete;
	INIT_WORK(&dma->work, rsnd_dma_do_work);

	return 0;

rsnd_dma_init_err:
	rsnd_dma_quit(priv, dma);

	return ret;
}
Example #14
0
/* request dma channel and set callback function */
static int ss_dma_prepare(ss_dma_info_t *info)
{
	dma_cap_mask_t mask;

	/* Try to acquire a generic DMA engine slave channel */
	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	info->chan = dma_request_channel(mask, NULL, NULL);
    if (info->chan == NULL) {
        SS_ERR("Request DMA() failed!\n");
        return -EINVAL;
    }
    return 0;
}
static int dmaengine_pcm_request_channel(struct dmaengine_pcm_runtime_data *prtd,
	dma_filter_fn filter_fn, void *filter_data)
{
	dma_cap_mask_t mask;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);
	dma_cap_set(DMA_CYCLIC, mask);
	prtd->dma_chan = dma_request_channel(mask, filter_fn, filter_data);

	if (!prtd->dma_chan)
		return -ENXIO;

	return 0;
}
Example #16
0
static int mic_request_dma_chans(struct mic_driver *mdrv)
{
	dma_cap_mask_t mask;
	struct dma_chan *chan;

	dma_cap_zero(mask);
	dma_cap_set(DMA_MEMCPY, mask);

	do {
		chan = dma_request_channel(mask, NULL, NULL);
		if (chan) {
			mdrv->dma_ch[mdrv->num_dma_ch++] = chan;
			if (mdrv->num_dma_ch >= MIC_MAX_DMA_CHAN)
				break;
		}
	} while (chan);
	dev_info(mdrv->dev, "DMA channels # %d\n", mdrv->num_dma_ch);
	return mdrv->num_dma_ch;
}
Example #17
0
int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma,
		  int is_play, int id)
{
	struct device *dev = rsnd_priv_to_dev(priv);
	struct dma_slave_config cfg;
	char dma_name[DMA_NAME_SIZE];
	dma_cap_mask_t mask;
	int ret;

	if (dma->chan) {
		dev_err(dev, "it already has dma channel\n");
		return -EIO;
	}

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	rsnd_dma_of_name(dma, is_play, dma_name);
	rsnd_gen_dma_addr(priv, dma, &cfg, is_play, id);

	dev_dbg(dev, "dma name : %s\n", dma_name);

	dma->chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
						     (void *)id, dev,
						     dma_name);
	if (!dma->chan) {
		dev_err(dev, "can't get dma channel\n");
		return -EIO;
	}

	ret = dmaengine_slave_config(dma->chan, &cfg);
	if (ret < 0)
		goto rsnd_dma_init_err;

	dma->dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE;

	return 0;

rsnd_dma_init_err:
	rsnd_dma_quit(priv, dma);

	return ret;
}
Example #18
0
static void dw_dma_acpi_controller_register(struct dw_dma *dw)
{
	struct device *dev = dw->dma.dev;
	struct acpi_dma_filter_info *info;
	int ret;

	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
	if (!info)
		return;

	dma_cap_zero(info->dma_cap);
	dma_cap_set(DMA_SLAVE, info->dma_cap);
	info->filter_fn = dw_dma_acpi_filter;

	ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
						info);
	if (ret)
		dev_err(dev, "could not register acpi_dma_controller\n");
}
Example #19
0
static int spi_pl330_dma_chan_alloc(struct dw_spi *dws)
{
	struct device_node *np = dws->master->dev.of_node;
	void *filter_param_rx, *filter_param_tx;
	dma_cap_mask_t mask;
	int lenp;


	/* If DMA channel already allocated */
	if (dws->rxchan && dws->txchan)
		return 0;

	filter_param_tx = of_find_property(np, "tx-dma-channel", &lenp);
	if (!filter_param_tx)
		return -1;
	filter_param_rx = of_find_property(np, "rx-dma-channel", &lenp);
	if (!filter_param_rx)
		return -1;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	/* 1. Init rx channel */
	dws->rxchan = dma_request_channel(mask, pl330_filter, filter_param_rx);
	while (!dws->rxchan) {
		/* all DMA channels are busy, try again */
		msleep(10);
		dws->rxchan = dma_request_channel(mask, pl330_filter, filter_param_rx);
	}

	/* 2. Init tx channel */
	dws->txchan = dma_request_channel(mask, pl330_filter, filter_param_tx);
	while (!dws->txchan) {
		/* all DMA channels are busy, try again */
		msleep(10);
		dws->txchan = dma_request_channel(mask, pl330_filter, filter_param_tx);
	}

	dws->dma_inited = 1;

	return 0;
}
Example #20
0
static struct dma_chan *shdma_of_xlate(struct of_phandle_args *dma_spec,
				       struct of_dma *ofdma)
{
	u32 id = dma_spec->args[0];
	dma_cap_mask_t mask;
	struct dma_chan *chan;

	if (dma_spec->args_count != 1)
		return NULL;

	dma_cap_zero(mask);
	/* Only slave DMA channels can be allocated via DT */
	dma_cap_set(DMA_SLAVE, mask);

	chan = dma_request_channel(mask, shdma_chan_filter, (void *)id);
	if (chan)
		to_shdma_chan(chan)->hw_req = id;

	return chan;
}
/* request dma channel and set callback function */
int nand_request_dma(void)
{
	dma_cap_mask_t mask;

	printk("request DMA");

	/* Try to acquire a generic DMA engine slave channel */
	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	if (dma_hdl == NULL) {
		dma_hdl = dma_request_channel(mask, NULL, NULL);
	    if (dma_hdl == NULL) {
	        printk("Request DMA failed!\n");
	        return -EINVAL;
	    }
	}

	return 0;
}
Example #22
0
static int acquire_dma_channel(struct mx3_camera_dev *mx3_cam)
{
	dma_cap_mask_t mask;
	struct dma_chan *chan;
	struct idmac_channel **ichan = &mx3_cam->idmac_channel[0];
	/* We have to use IDMAC_IC_7 for Bayer / generic data */
	struct dma_chan_request rq = {.mx3_cam = mx3_cam,
				      .id = IDMAC_IC_7};

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);
	dma_cap_set(DMA_PRIVATE, mask);
	chan = dma_request_channel(mask, chan_filter, &rq);
	if (!chan)
		return -EBUSY;

	*ichan = to_idmac_chan(chan);
	(*ichan)->client = mx3_cam;

	return 0;
}
Example #23
0
static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
{
	struct mtd_info *mtd = &host->mtd;
	dma_cap_mask_t mask;

	if (!host->pdata || !host->pdata->dma_filter) {
		dev_err(mtd->dev.parent, "no DMA platform data\n");
		return -ENOENT;
	}

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);
	host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
					     "nand-slc");
	if (!host->dma_chan) {
		dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
		return -EBUSY;
	}

	return 0;
}
/* Function to request PXP DMA channel */
static int pxp_chan_init(cam_data *cam)
{
	dma_cap_mask_t mask;
	struct dma_chan *chan;

	/* Request a free channel */
	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);
	dma_cap_set(DMA_PRIVATE, mask);
	chan = dma_request_channel(mask, chan_filter, NULL);
	if (!chan) {
		pr_err("Unsuccessfully request channel!\n");
		return -EBUSY;
	}

	cam->pxp_chan = to_pxp_channel(chan);
	cam->pxp_chan->client = cam;

	init_completion(&cam->pxp_tx_cmpl);

	return 0;
}
Example #25
0
static void pic32_spi_dma_prep(struct pic32_spi *pic32s, struct device *dev)
{
	struct spi_master *master = pic32s->master;
	dma_cap_mask_t mask;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	master->dma_rx = dma_request_slave_channel_compat(mask, NULL, NULL,
							  dev, "spi-rx");
	if (!master->dma_rx) {
		dev_warn(dev, "RX channel not found.\n");
		goto out_err;
	}

	master->dma_tx = dma_request_slave_channel_compat(mask, NULL, NULL,
							  dev, "spi-tx");
	if (!master->dma_tx) {
		dev_warn(dev, "TX channel not found.\n");
		goto out_err;
	}

	if (pic32_spi_dma_config(pic32s, DMA_SLAVE_BUSWIDTH_1_BYTE))
		goto out_err;

	/* DMA chnls allocated and prepared */
	set_bit(PIC32F_DMA_PREP, &pic32s->flags);

	return;

out_err:
	if (master->dma_rx)
		dma_release_channel(master->dma_rx);

	if (master->dma_tx)
		dma_release_channel(master->dma_tx);
}
Example #26
0
static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
				struct samsung_dma_info *info)
{
	struct dma_chan *chan;
	dma_cap_mask_t mask;
	struct dma_slave_config slave_config;
	void *filter_param;

	dma_cap_zero(mask);
	dma_cap_set(info->cap, mask);

	/*
	 * If a dma channel property of a device node from device tree is
	 * specified, use that as the fliter parameter.
	 */
	filter_param = (dma_ch == DMACH_DT_PROP) ? (void *)info->dt_dmach_prop :
				(void *)dma_ch;
	chan = dma_request_channel(mask, pl330_filter, filter_param);

	if (info->direction == DMA_FROM_DEVICE) {
		memset(&slave_config, 0, sizeof(struct dma_slave_config));
		slave_config.direction = info->direction;
		slave_config.src_addr = info->fifo;
		slave_config.src_addr_width = info->width;
		slave_config.src_maxburst = 1;
		dmaengine_slave_config(chan, &slave_config);
	} else if (info->direction == DMA_TO_DEVICE) {
		memset(&slave_config, 0, sizeof(struct dma_slave_config));
		slave_config.direction = info->direction;
		slave_config.dst_addr = info->fifo;
		slave_config.dst_addr_width = info->width;
		slave_config.dst_maxburst = 1;
		dmaengine_slave_config(chan, &slave_config);
	}

	return (unsigned)chan;
}
Example #27
0
static int apb_dma_init(void)
{
	dma_cap_mask_t mask;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);
	apb_dma_chan = dma_request_channel(mask, NULL, NULL);
	if (!apb_dma_chan)
		return -EPROBE_DEFER;

	apb_buffer = dma_alloc_coherent(NULL, sizeof(u32), &apb_buffer_phys,
					GFP_KERNEL);
	if (!apb_buffer) {
		dma_release_channel(apb_dma_chan);
		return -ENOMEM;
	}

	dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	dma_sconfig.src_maxburst = 1;
	dma_sconfig.dst_maxburst = 1;

	return 0;
}
Example #28
0
static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
{
	struct s3c24xx_uart_dma	*dma = p->dma;
	dma_cap_mask_t mask;
	unsigned long flags;

	/* Default slave configuration parameters */
	dma->rx_conf.direction		= DMA_DEV_TO_MEM;
	dma->rx_conf.src_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
	dma->rx_conf.src_addr		= p->port.mapbase + S3C2410_URXH;
	dma->rx_conf.src_maxburst	= 16;

	dma->tx_conf.direction		= DMA_MEM_TO_DEV;
	dma->tx_conf.dst_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
	dma->tx_conf.dst_addr		= p->port.mapbase + S3C2410_UTXH;
	if (dma_get_cache_alignment() >= 16)
		dma->tx_conf.dst_maxburst = 16;
	else
		dma->tx_conf.dst_maxburst = 1;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	dma->rx_chan = dma_request_slave_channel_compat(mask, dma->fn,
					dma->rx_param, p->port.dev, "rx");
	if (!dma->rx_chan)
		return -ENODEV;

	dmaengine_slave_config(dma->rx_chan, &dma->rx_conf);

	dma->tx_chan = dma_request_slave_channel_compat(mask, dma->fn,
					dma->tx_param, p->port.dev, "tx");
	if (!dma->tx_chan) {
		dma_release_channel(dma->rx_chan);
		return -ENODEV;
	}

	dmaengine_slave_config(dma->tx_chan, &dma->tx_conf);

	/* RX buffer */
	dma->rx_size = PAGE_SIZE;

	dma->rx_buf = kmalloc(dma->rx_size, GFP_KERNEL);

	if (!dma->rx_buf) {
		dma_release_channel(dma->rx_chan);
		dma_release_channel(dma->tx_chan);
		return -ENOMEM;
	}

	dma->rx_addr = dma_map_single(dma->rx_chan->device->dev, dma->rx_buf,
				dma->rx_size, DMA_FROM_DEVICE);

	spin_lock_irqsave(&p->port.lock, flags);

	/* TX buffer */
	dma->tx_addr = dma_map_single(dma->tx_chan->device->dev,
				p->port.state->xmit.buf,
				UART_XMIT_SIZE, DMA_TO_DEVICE);

	spin_unlock_irqrestore(&p->port.lock, flags);

	return 0;
}
Example #29
0
static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
					struct of_dma *ofdma)
{
	struct dw_dma *dw = ofdma->of_dma_data;
	struct dw_dma_slave slave = {
		.dma_dev = dw->dma.dev,
	};
	dma_cap_mask_t cap;

	if (dma_spec->args_count != 3)
		return NULL;

	slave.src_id = dma_spec->args[0];
	slave.dst_id = dma_spec->args[0];
	slave.m_master = dma_spec->args[1];
	slave.p_master = dma_spec->args[2];

	if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
		    slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
		    slave.m_master >= dw->pdata->nr_masters ||
		    slave.p_master >= dw->pdata->nr_masters))
		return NULL;

	dma_cap_zero(cap);
	dma_cap_set(DMA_SLAVE, cap);

	/* TODO: there should be a simpler way to do this */
	return dma_request_channel(cap, dw_dma_filter, &slave);
}

#ifdef CONFIG_ACPI
static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
{
	struct acpi_dma_spec *dma_spec = param;
	struct dw_dma_slave slave = {
		.dma_dev = dma_spec->dev,
		.src_id = dma_spec->slave_id,
		.dst_id = dma_spec->slave_id,
		.m_master = 0,
		.p_master = 1,
	};

	return dw_dma_filter(chan, &slave);
}

static void dw_dma_acpi_controller_register(struct dw_dma *dw)
{
	struct device *dev = dw->dma.dev;
	struct acpi_dma_filter_info *info;
	int ret;

	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
	if (!info)
		return;

	dma_cap_zero(info->dma_cap);
	dma_cap_set(DMA_SLAVE, info->dma_cap);
	info->filter_fn = dw_dma_acpi_filter;

	ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
						info);
	if (ret)
		dev_err(dev, "could not register acpi_dma_controller\n");
}
#else /* !CONFIG_ACPI */
static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
#endif /* !CONFIG_ACPI */

#ifdef CONFIG_OF
static struct dw_dma_platform_data *
dw_dma_parse_dt(struct platform_device *pdev)
{
	struct device_node *np = pdev->dev.of_node;
	struct dw_dma_platform_data *pdata;
	u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
	u32 nr_masters;
	u32 nr_channels;

	if (!np) {
		dev_err(&pdev->dev, "Missing DT data\n");
		return NULL;
	}

	if (of_property_read_u32(np, "dma-masters", &nr_masters))
		return NULL;
	if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
		return NULL;

	if (of_property_read_u32(np, "dma-channels", &nr_channels))
		return NULL;

	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata)
		return NULL;

	pdata->nr_masters = nr_masters;
	pdata->nr_channels = nr_channels;

	if (of_property_read_bool(np, "is_private"))
		pdata->is_private = true;

	if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
		pdata->chan_allocation_order = (unsigned char)tmp;

	if (!of_property_read_u32(np, "chan_priority", &tmp))
		pdata->chan_priority = tmp;

	if (!of_property_read_u32(np, "block_size", &tmp))
		pdata->block_size = tmp;

	if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
		for (tmp = 0; tmp < nr_masters; tmp++)
			pdata->data_width[tmp] = arr[tmp];
	} else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
		for (tmp = 0; tmp < nr_masters; tmp++)
			pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
	}

	return pdata;
}
#else
static inline struct dw_dma_platform_data *
dw_dma_parse_dt(struct platform_device *pdev)
{
	return NULL;
}
#endif

static int dw_probe(struct platform_device *pdev)
{
	struct dw_dma_chip *chip;
	struct device *dev = &pdev->dev;
	struct resource *mem;
	const struct dw_dma_platform_data *pdata;
	int err;

	chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
	if (!chip)
		return -ENOMEM;

	chip->irq = platform_get_irq(pdev, 0);
	if (chip->irq < 0)
		return chip->irq;

	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	chip->regs = devm_ioremap_resource(dev, mem);
	if (IS_ERR(chip->regs))
		return PTR_ERR(chip->regs);

	err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
	if (err)
		return err;

	pdata = dev_get_platdata(dev);
	if (!pdata)
		pdata = dw_dma_parse_dt(pdev);

	chip->dev = dev;
	chip->pdata = pdata;

	chip->clk = devm_clk_get(chip->dev, "hclk");
	if (IS_ERR(chip->clk))
		return PTR_ERR(chip->clk);
	err = clk_prepare_enable(chip->clk);
	if (err)
		return err;

	pm_runtime_enable(&pdev->dev);

	err = dw_dma_probe(chip);
	if (err)
		goto err_dw_dma_probe;

	platform_set_drvdata(pdev, chip);

	if (pdev->dev.of_node) {
		err = of_dma_controller_register(pdev->dev.of_node,
						 dw_dma_of_xlate, chip->dw);
		if (err)
			dev_err(&pdev->dev,
				"could not register of_dma_controller\n");
	}

	if (ACPI_HANDLE(&pdev->dev))
		dw_dma_acpi_controller_register(chip->dw);

	return 0;

err_dw_dma_probe:
	pm_runtime_disable(&pdev->dev);
	clk_disable_unprepare(chip->clk);
	return err;
}

static int dw_remove(struct platform_device *pdev)
{
	struct dw_dma_chip *chip = platform_get_drvdata(pdev);

	if (pdev->dev.of_node)
		of_dma_controller_free(pdev->dev.of_node);

	dw_dma_remove(chip);
	pm_runtime_disable(&pdev->dev);
	clk_disable_unprepare(chip->clk);

	return 0;
}

static void dw_shutdown(struct platform_device *pdev)
{
	struct dw_dma_chip *chip = platform_get_drvdata(pdev);

	/*
	 * We have to call dw_dma_disable() to stop any ongoing transfer. On
	 * some platforms we can't do that since DMA device is powered off.
	 * Moreover we have no possibility to check if the platform is affected
	 * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put()
	 * unconditionally. On the other hand we can't use
	 * pm_runtime_suspended() because runtime PM framework is not fully
	 * used by the driver.
	 */
	pm_runtime_get_sync(chip->dev);
	dw_dma_disable(chip);
	pm_runtime_put_sync_suspend(chip->dev);

	clk_disable_unprepare(chip->clk);
}

#ifdef CONFIG_OF
static const struct of_device_id dw_dma_of_id_table[] = {
	{ .compatible = "snps,dma-spear1340" },
	{}
};
static int bcm2835_sdhost_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct device_node *node = dev->of_node;
	struct clk *clk;
	struct resource *iomem;
	struct bcm2835_host *host;
	struct mmc_host *mmc;
	int ret;

	pr_debug("bcm2835_sdhost_probe\n");
	mmc = mmc_alloc_host(sizeof(*host), dev);
	if (!mmc)
		return -ENOMEM;

	mmc->ops = &bcm2835_sdhost_ops;
	host = mmc_priv(mmc);
	host->mmc = mmc;
	host->pio_timeout = msecs_to_jiffies(500);
	host->max_delay = 1; /* Warn if over 1ms */
	spin_lock_init(&host->lock);

	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	host->ioaddr = devm_ioremap_resource(dev, iomem);
	if (IS_ERR(host->ioaddr)) {
		ret = PTR_ERR(host->ioaddr);
		goto err;
	}

	host->phys_addr = iomem->start + BCM2835_VCMMU_SHIFT;
	pr_debug(" - ioaddr %lx, iomem->start %lx, phys_addr %lx\n",
		 (unsigned long)host->ioaddr,
		 (unsigned long)iomem->start,
		 (unsigned long)host->phys_addr);

	host->allow_dma = ALLOW_DMA;

	if (node) {
		/* Read any custom properties */
		of_property_read_u32(node,
				     "brcm,delay-after-stop",
				     &host->delay_after_stop);
		of_property_read_u32(node,
				     "brcm,overclock-50",
				     &host->overclock_50);
		of_property_read_u32(node,
				     "brcm,pio-limit",
				     &host->pio_limit);
		host->allow_dma = ALLOW_DMA &&
			!of_property_read_bool(node, "brcm,force-pio");
		host->debug = of_property_read_bool(node, "brcm,debug");
	}

	if (host->allow_dma) {
		if (node) {
			host->dma_chan_tx =
				dma_request_slave_channel(dev, "tx");
			host->dma_chan_rx =
				dma_request_slave_channel(dev, "rx");
		} else {
			dma_cap_mask_t mask;

			dma_cap_zero(mask);
			/* we don't care about the channel, any would work */
			dma_cap_set(DMA_SLAVE, mask);
			host->dma_chan_tx =
				dma_request_channel(mask, NULL, NULL);
			host->dma_chan_rx =
				dma_request_channel(mask, NULL, NULL);
		}
	}

	clk = devm_clk_get(dev, NULL);
	if (IS_ERR(clk)) {
		dev_err(dev, "could not get clk\n");
		ret = PTR_ERR(clk);
		goto err;
	}

	host->max_clk = clk_get_rate(clk);

	host->irq = platform_get_irq(pdev, 0);
	if (host->irq <= 0) {
		dev_err(dev, "get IRQ failed\n");
		ret = -EINVAL;
		goto err;
	}

	pr_debug(" - max_clk %lx, irq %d\n",
		 (unsigned long)host->max_clk,
		 (int)host->irq);

	if (node)
		mmc_of_parse(mmc);
	else
		mmc->caps |= MMC_CAP_4_BIT_DATA;

	ret = bcm2835_sdhost_add_host(host);
	if (ret)
		goto err;

	platform_set_drvdata(pdev, host);

	pr_debug("bcm2835_sdhost_probe -> OK\n");

	return 0;

err:
	pr_debug("bcm2835_sdhost_probe -> err %d\n", ret);
	mmc_free_host(mmc);

	return ret;
}