示例#1
0
int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
{
	struct pxa2xx_spi_master *pdata = drv_data->master_info;
	struct device *dev = &drv_data->pdev->dev;
	struct spi_master *master = drv_data->master;
	dma_cap_mask_t mask;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	master->dma_tx = dma_request_slave_channel_compat(mask,
				pdata->dma_filter, pdata->tx_param, dev, "tx");
	if (!master->dma_tx)
		return -ENODEV;

	master->dma_rx = dma_request_slave_channel_compat(mask,
				pdata->dma_filter, pdata->rx_param, dev, "rx");
	if (!master->dma_rx) {
		dma_release_channel(master->dma_tx);
		master->dma_tx = NULL;
		return -ENODEV;
	}

	return 0;
}
int serial8250_request_dma(struct uart_8250_port *p)
{
	struct uart_8250_dma	*dma = p->dma;
	dma_cap_mask_t		mask;

	/* Default slave configuration parameters */
	dma->rxconf.direction		= DMA_DEV_TO_MEM;
	dma->rxconf.src_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
	dma->rxconf.src_addr		= p->port.mapbase + UART_RX;

	dma->txconf.direction		= DMA_MEM_TO_DEV;
	dma->txconf.dst_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
	dma->txconf.dst_addr		= p->port.mapbase + UART_TX;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	/* Get a channel for RX */
	dma->rxchan = dma_request_slave_channel_compat(mask,
						       dma->fn, dma->rx_param,
						       p->port.dev, "rx");
	if (!dma->rxchan)
		return -ENODEV;

	dmaengine_slave_config(dma->rxchan, &dma->rxconf);

	/* Get a channel for TX */
	dma->txchan = dma_request_slave_channel_compat(mask,
						       dma->fn, dma->tx_param,
						       p->port.dev, "tx");
	if (!dma->txchan) {
		dma_release_channel(dma->rxchan);
		return -ENODEV;
	}

	dmaengine_slave_config(dma->txchan, &dma->txconf);

	/* RX buffer */
	if (!dma->rx_size)
		dma->rx_size = PAGE_SIZE;

	dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
					&dma->rx_addr, GFP_KERNEL);
	if (!dma->rx_buf) {
		dma_release_channel(dma->rxchan);
		dma_release_channel(dma->txchan);
		return -ENOMEM;
	}

	/* TX buffer */
	dma->tx_addr = dma_map_single(dma->txchan->device->dev,
					p->port.state->xmit.buf,
					UART_XMIT_SIZE,
					DMA_TO_DEVICE);

	dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");

	return 0;
}
示例#3
0
文件: core.c 项目: 3null/linux
int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma,
		  int is_play, int id)
{
	struct device *dev = rsnd_priv_to_dev(priv);
	struct dma_slave_config cfg;
	struct rsnd_mod *mod_from;
	struct rsnd_mod *mod_to;
	char dma_name[DMA_NAME_SIZE];
	dma_cap_mask_t mask;
	int ret;

	if (dma->chan) {
		dev_err(dev, "it already has dma channel\n");
		return -EIO;
	}

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	rsnd_dma_of_path(dma, is_play, &mod_from, &mod_to);
	rsnd_dma_of_name(mod_from, mod_to, dma_name);

	cfg.slave_id	= id;
	cfg.direction	= is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
	cfg.src_addr	= rsnd_gen_dma_addr(priv, mod_from, is_play, 1);
	cfg.dst_addr	= rsnd_gen_dma_addr(priv, mod_to,   is_play, 0);
	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

	dev_dbg(dev, "dma : %s %pad -> %pad\n",
		dma_name, &cfg.src_addr, &cfg.dst_addr);

	dma->chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
						     (void *)id, dev,
						     dma_name);
	if (!dma->chan) {
		dev_err(dev, "can't get dma channel\n");
		return -EIO;
	}

	ret = dmaengine_slave_config(dma->chan, &cfg);
	if (ret < 0)
		goto rsnd_dma_init_err;

	dma->addr = is_play ? cfg.src_addr : cfg.dst_addr;
	dma->dir = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;

	return 0;

rsnd_dma_init_err:
	rsnd_dma_quit(priv, dma);

	return ret;
}
示例#4
0
文件: sh_mmcif.c 项目: 3bsa/linux
static struct dma_chan *
sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
			 struct sh_mmcif_plat_data *pdata,
			 enum dma_transfer_direction direction)
{
	struct dma_slave_config cfg = { 0, };
	struct dma_chan *chan;
	void *slave_data = NULL;
	struct resource *res;
	dma_cap_mask_t mask;
	int ret;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	if (pdata)
		slave_data = direction == DMA_MEM_TO_DEV ?
			(void *)pdata->slave_id_tx :
			(void *)pdata->slave_id_rx;

	chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
				slave_data, &host->pd->dev,
				direction == DMA_MEM_TO_DEV ? "tx" : "rx");

	dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__,
		direction == DMA_MEM_TO_DEV ? "TX" : "RX", chan);

	if (!chan)
		return NULL;

	res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);

	cfg.direction = direction;

	if (direction == DMA_DEV_TO_MEM) {
		cfg.src_addr = res->start + MMCIF_CE_DATA;
		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	} else {
		cfg.dst_addr = res->start + MMCIF_CE_DATA;
		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	}

	ret = dmaengine_slave_config(chan, &cfg);
	if (ret < 0) {
		dma_release_channel(chan);
		return NULL;
	}

	return chan;
}
示例#5
0
static void pic32_spi_dma_prep(struct pic32_spi *pic32s, struct device *dev)
{
	struct spi_master *master = pic32s->master;
	dma_cap_mask_t mask;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	master->dma_rx = dma_request_slave_channel_compat(mask, NULL, NULL,
							  dev, "spi-rx");
	if (!master->dma_rx) {
		dev_warn(dev, "RX channel not found.\n");
		goto out_err;
	}

	master->dma_tx = dma_request_slave_channel_compat(mask, NULL, NULL,
							  dev, "spi-tx");
	if (!master->dma_tx) {
		dev_warn(dev, "TX channel not found.\n");
		goto out_err;
	}

	if (pic32_spi_dma_config(pic32s, DMA_SLAVE_BUSWIDTH_1_BYTE))
		goto out_err;

	/* DMA chnls allocated and prepared */
	set_bit(PIC32F_DMA_PREP, &pic32s->flags);

	return;

out_err:
	if (master->dma_rx)
		dma_release_channel(master->dma_rx);

	if (master->dma_tx)
		dma_release_channel(master->dma_tx);
}
示例#6
0
int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma,
		  int is_play, int id,
		  int (*inquiry)(struct rsnd_dma *dma,
				  dma_addr_t *buf, int *len),
		  int (*complete)(struct rsnd_dma *dma))
{
	struct device *dev = rsnd_priv_to_dev(priv);
	struct dma_slave_config cfg;
	dma_cap_mask_t mask;
	int ret;

	if (dma->chan) {
		dev_err(dev, "it already has dma channel\n");
		return -EIO;
	}

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	dma->chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
						     (void *)id, dev,
						     is_play ? "tx" : "rx");
	if (!dma->chan) {
		dev_err(dev, "can't get dma channel\n");
		return -EIO;
	}

	cfg.slave_id	= id;
	cfg.dst_addr	= 0; /* use default addr when playback */
	cfg.src_addr	= 0; /* use default addr when capture */
	cfg.direction	= is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;

	ret = dmaengine_slave_config(dma->chan, &cfg);
	if (ret < 0)
		goto rsnd_dma_init_err;

	dma->dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
	dma->priv = priv;
	dma->inquiry = inquiry;
	dma->complete = complete;
	INIT_WORK(&dma->work, rsnd_dma_do_work);

	return 0;

rsnd_dma_init_err:
	rsnd_dma_quit(priv, dma);

	return ret;
}
示例#7
0
文件: isphist.c 项目: 020gzh/linux
/*
 * omap3isp_hist_init - Module Initialization.
 */
int omap3isp_hist_init(struct isp_device *isp)
{
	struct ispstat *hist = &isp->isp_hist;
	struct omap3isp_hist_config *hist_cfg;
	int ret = -1;

	hist_cfg = devm_kzalloc(isp->dev, sizeof(*hist_cfg), GFP_KERNEL);
	if (hist_cfg == NULL)
		return -ENOMEM;

	hist->isp = isp;

	if (HIST_CONFIG_DMA) {
		struct platform_device *pdev = to_platform_device(isp->dev);
		struct resource *res;
		unsigned int sig = 0;
		dma_cap_mask_t mask;

		dma_cap_zero(mask);
		dma_cap_set(DMA_SLAVE, mask);

		res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
						   "hist");
		if (res)
			sig = res->start;

		hist->dma_ch = dma_request_slave_channel_compat(mask,
				omap_dma_filter_fn, &sig, isp->dev, "hist");
		if (!hist->dma_ch)
			dev_warn(isp->dev,
				 "hist: DMA channel request failed, using PIO\n");
		else
			dev_dbg(isp->dev, "hist: using DMA channel %s\n",
				dma_chan_name(hist->dma_ch));
	}

	hist->ops = &hist_ops;
	hist->priv = hist_cfg;
	hist->event_type = V4L2_EVENT_OMAP3ISP_HIST;

	ret = omap3isp_stat_init(hist, "histogram", &hist_subdev_ops);
	if (ret) {
		if (hist->dma_ch)
			dma_release_channel(hist->dma_ch);
	}

	return ret;
}
示例#8
0
int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma,
		  int is_play, int id)
{
	struct device *dev = rsnd_priv_to_dev(priv);
	struct dma_slave_config cfg;
	char dma_name[DMA_NAME_SIZE];
	dma_cap_mask_t mask;
	int ret;

	if (dma->chan) {
		dev_err(dev, "it already has dma channel\n");
		return -EIO;
	}

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	rsnd_dma_of_name(dma, is_play, dma_name);
	rsnd_gen_dma_addr(priv, dma, &cfg, is_play, id);

	dev_dbg(dev, "dma name : %s\n", dma_name);

	dma->chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
						     (void *)id, dev,
						     dma_name);
	if (!dma->chan) {
		dev_err(dev, "can't get dma channel\n");
		return -EIO;
	}

	ret = dmaengine_slave_config(dma->chan, &cfg);
	if (ret < 0)
		goto rsnd_dma_init_err;

	dma->dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE;

	return 0;

rsnd_dma_init_err:
	rsnd_dma_quit(priv, dma);

	return ret;
}
示例#9
0
static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
{
	struct s3c24xx_uart_dma	*dma = p->dma;
	dma_cap_mask_t mask;
	unsigned long flags;

	/* Default slave configuration parameters */
	dma->rx_conf.direction		= DMA_DEV_TO_MEM;
	dma->rx_conf.src_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
	dma->rx_conf.src_addr		= p->port.mapbase + S3C2410_URXH;
	dma->rx_conf.src_maxburst	= 16;

	dma->tx_conf.direction		= DMA_MEM_TO_DEV;
	dma->tx_conf.dst_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
	dma->tx_conf.dst_addr		= p->port.mapbase + S3C2410_UTXH;
	if (dma_get_cache_alignment() >= 16)
		dma->tx_conf.dst_maxburst = 16;
	else
		dma->tx_conf.dst_maxburst = 1;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	dma->rx_chan = dma_request_slave_channel_compat(mask, dma->fn,
					dma->rx_param, p->port.dev, "rx");
	if (!dma->rx_chan)
		return -ENODEV;

	dmaengine_slave_config(dma->rx_chan, &dma->rx_conf);

	dma->tx_chan = dma_request_slave_channel_compat(mask, dma->fn,
					dma->tx_param, p->port.dev, "tx");
	if (!dma->tx_chan) {
		dma_release_channel(dma->rx_chan);
		return -ENODEV;
	}

	dmaengine_slave_config(dma->tx_chan, &dma->tx_conf);

	/* RX buffer */
	dma->rx_size = PAGE_SIZE;

	dma->rx_buf = kmalloc(dma->rx_size, GFP_KERNEL);

	if (!dma->rx_buf) {
		dma_release_channel(dma->rx_chan);
		dma_release_channel(dma->tx_chan);
		return -ENOMEM;
	}

	dma->rx_addr = dma_map_single(dma->rx_chan->device->dev, dma->rx_buf,
				dma->rx_size, DMA_FROM_DEVICE);

	spin_lock_irqsave(&p->port.lock, flags);

	/* TX buffer */
	dma->tx_addr = dma_map_single(dma->tx_chan->device->dev,
				p->port.state->xmit.buf,
				UART_XMIT_SIZE, DMA_TO_DEVICE);

	spin_unlock_irqrestore(&p->port.lock, flags);

	return 0;
}
static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
				 struct sh_mmcif_plat_data *pdata)
{
	struct resource *res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
	struct dma_slave_config cfg;
	dma_cap_mask_t mask;
	int ret;

	host->dma_active = false;

	if (pdata) {
		if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
			return;
	} else if (!host->pd->dev.of_node) {
		return;
	}

	/* We can only either use DMA for both Tx and Rx or not use it at all */
	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	host->chan_tx = dma_request_slave_channel_compat(mask, shdma_chan_filter,
				pdata ? (void *)pdata->slave_id_tx : NULL,
				&host->pd->dev, "tx");
	dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
		host->chan_tx);

	if (!host->chan_tx)
		return;

	/* In the OF case the driver will get the slave ID from the DT */
	if (pdata)
		cfg.slave_id = pdata->slave_id_tx;
	cfg.direction = DMA_MEM_TO_DEV;
	cfg.dst_addr = res->start + MMCIF_CE_DATA;
	cfg.src_addr = 0;
	ret = dmaengine_slave_config(host->chan_tx, &cfg);
	if (ret < 0)
		goto ecfgtx;

	host->chan_rx = dma_request_slave_channel_compat(mask, shdma_chan_filter,
				pdata ? (void *)pdata->slave_id_rx : NULL,
				&host->pd->dev, "rx");
	dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
		host->chan_rx);

	if (!host->chan_rx)
		goto erqrx;

	if (pdata)
		cfg.slave_id = pdata->slave_id_rx;
	cfg.direction = DMA_DEV_TO_MEM;
	cfg.dst_addr = 0;
	cfg.src_addr = res->start + MMCIF_CE_DATA;
	ret = dmaengine_slave_config(host->chan_rx, &cfg);
	if (ret < 0)
		goto ecfgrx;

	return;

ecfgrx:
	dma_release_channel(host->chan_rx);
	host->chan_rx = NULL;
erqrx:
ecfgtx:
	dma_release_channel(host->chan_tx);
	host->chan_tx = NULL;
}
示例#11
0
int serial8250_request_dma(struct uart_8250_port *p)
{
	struct uart_8250_dma	*dma = p->dma;
	phys_addr_t rx_dma_addr = dma->rx_dma_addr ?
				  dma->rx_dma_addr : p->port.mapbase;
	phys_addr_t tx_dma_addr = dma->tx_dma_addr ?
				  dma->tx_dma_addr : p->port.mapbase;
	dma_cap_mask_t		mask;
	struct dma_slave_caps	caps;
	int			ret;

	/* Default slave configuration parameters */
	dma->rxconf.direction		= DMA_DEV_TO_MEM;
	dma->rxconf.src_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
	dma->rxconf.src_addr		= rx_dma_addr + UART_RX;

	dma->txconf.direction		= DMA_MEM_TO_DEV;
	dma->txconf.dst_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
	dma->txconf.dst_addr		= tx_dma_addr + UART_TX;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	/* Get a channel for RX */
	dma->rxchan = dma_request_slave_channel_compat(mask,
						       dma->fn, dma->rx_param,
						       p->port.dev, "rx");
	if (!dma->rxchan)
		return -ENODEV;

	/* 8250 rx dma requires dmaengine driver to support pause/terminate */
	ret = dma_get_slave_caps(dma->rxchan, &caps);
	if (ret)
		goto release_rx;
	if (!caps.cmd_pause || !caps.cmd_terminate ||
	    caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
		ret = -EINVAL;
		goto release_rx;
	}

	dmaengine_slave_config(dma->rxchan, &dma->rxconf);

	/* Get a channel for TX */
	dma->txchan = dma_request_slave_channel_compat(mask,
						       dma->fn, dma->tx_param,
						       p->port.dev, "tx");
	if (!dma->txchan) {
		ret = -ENODEV;
		goto release_rx;
	}

	/* 8250 tx dma requires dmaengine driver to support terminate */
	ret = dma_get_slave_caps(dma->txchan, &caps);
	if (ret)
		goto err;
	if (!caps.cmd_terminate) {
		ret = -EINVAL;
		goto err;
	}

	dmaengine_slave_config(dma->txchan, &dma->txconf);

	/* RX buffer */
	if (!dma->rx_size)
		dma->rx_size = PAGE_SIZE;

	dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
					&dma->rx_addr, GFP_KERNEL);
	if (!dma->rx_buf) {
		ret = -ENOMEM;
		goto err;
	}

	/* TX buffer */
	dma->tx_addr = dma_map_single(dma->txchan->device->dev,
					p->port.state->xmit.buf,
					UART_XMIT_SIZE,
					DMA_TO_DEVICE);
	if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
		dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
				  dma->rx_buf, dma->rx_addr);
		ret = -ENOMEM;
		goto err;
	}

	dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");

	return 0;
err:
	dma_release_channel(dma->txchan);
release_rx:
	dma_release_channel(dma->rxchan);
	return ret;
}
示例#12
0
文件: omap.c 项目: 7799/linux
static int mmc_omap_probe(struct platform_device *pdev)
{
	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
	struct mmc_omap_host *host = NULL;
	struct resource *res;
	dma_cap_mask_t mask;
	unsigned sig = 0;
	int i, ret = 0;
	int irq;

	if (pdata == NULL) {
		dev_err(&pdev->dev, "platform data missing\n");
		return -ENXIO;
	}
	if (pdata->nr_slots == 0) {
		dev_err(&pdev->dev, "no slots\n");
		return -EPROBE_DEFER;
	}

	host = devm_kzalloc(&pdev->dev, sizeof(struct mmc_omap_host),
			    GFP_KERNEL);
	if (host == NULL)
		return -ENOMEM;

	irq = platform_get_irq(pdev, 0);
	if (irq < 0)
		return -ENXIO;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	host->virt_base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(host->virt_base))
		return PTR_ERR(host->virt_base);

	INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
	INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);

	INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command);
	setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer,
		    (unsigned long) host);

	spin_lock_init(&host->clk_lock);
	setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);

	spin_lock_init(&host->dma_lock);
	spin_lock_init(&host->slot_lock);
	init_waitqueue_head(&host->slot_wq);

	host->pdata = pdata;
	host->features = host->pdata->slots[0].features;
	host->dev = &pdev->dev;
	platform_set_drvdata(pdev, host);

	host->id = pdev->id;
	host->irq = irq;
	host->phys_base = res->start;
	host->iclk = clk_get(&pdev->dev, "ick");
	if (IS_ERR(host->iclk))
		return PTR_ERR(host->iclk);
	clk_enable(host->iclk);

	host->fclk = clk_get(&pdev->dev, "fck");
	if (IS_ERR(host->fclk)) {
		ret = PTR_ERR(host->fclk);
		goto err_free_iclk;
	}

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	host->dma_tx_burst = -1;
	host->dma_rx_burst = -1;

	res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
	if (res)
		sig = res->start;
	host->dma_tx = dma_request_slave_channel_compat(mask,
				omap_dma_filter_fn, &sig, &pdev->dev, "tx");
	if (!host->dma_tx)
		dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
			sig);

	res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
	if (res)
		sig = res->start;
	host->dma_rx = dma_request_slave_channel_compat(mask,
				omap_dma_filter_fn, &sig, &pdev->dev, "rx");
	if (!host->dma_rx)
		dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
			sig);

	ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
	if (ret)
		goto err_free_dma;

	if (pdata->init != NULL) {
		ret = pdata->init(&pdev->dev);
		if (ret < 0)
			goto err_free_irq;
	}

	host->nr_slots = pdata->nr_slots;
	host->reg_shift = (mmc_omap7xx() ? 1 : 2);

	host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
	if (!host->mmc_omap_wq)
		goto err_plat_cleanup;

	for (i = 0; i < pdata->nr_slots; i++) {
		ret = mmc_omap_new_slot(host, i);
		if (ret < 0) {
			while (--i >= 0)
				mmc_omap_remove_slot(host->slots[i]);

			goto err_destroy_wq;
		}
	}

	return 0;

err_destroy_wq:
	destroy_workqueue(host->mmc_omap_wq);
err_plat_cleanup:
	if (pdata->cleanup)
		pdata->cleanup(&pdev->dev);
err_free_irq:
	free_irq(host->irq, host);
err_free_dma:
	if (host->dma_tx)
		dma_release_channel(host->dma_tx);
	if (host->dma_rx)
		dma_release_channel(host->dma_rx);
	clk_put(host->fclk);
err_free_iclk:
	clk_disable(host->iclk);
	clk_put(host->iclk);
	return ret;
}
示例#13
0
static int pxa_ata_probe(struct platform_device *pdev)
{
	struct ata_host *host;
	struct ata_port *ap;
	struct pata_pxa_data *data;
	struct resource *cmd_res;
	struct resource *ctl_res;
	struct resource *dma_res;
	struct resource *irq_res;
	struct pata_pxa_pdata *pdata = dev_get_platdata(&pdev->dev);
	struct dma_slave_config	config;
	dma_cap_mask_t mask;
	struct pxad_param param;
	int ret = 0;

	/*
	 * Resource validation, three resources are needed:
	 *  - CMD port base address
	 *  - CTL port base address
	 *  - DMA port base address
	 *  - IRQ pin
	 */
	if (pdev->num_resources != 4) {
		dev_err(&pdev->dev, "invalid number of resources\n");
		return -EINVAL;
	}

	/*
	 * CMD port base address
	 */
	cmd_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (unlikely(cmd_res == NULL))
		return -EINVAL;

	/*
	 * CTL port base address
	 */
	ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	if (unlikely(ctl_res == NULL))
		return -EINVAL;

	/*
	 * DMA port base address
	 */
	dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
	if (unlikely(dma_res == NULL))
		return -EINVAL;

	/*
	 * IRQ pin
	 */
	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (unlikely(irq_res == NULL))
		return -EINVAL;

	/*
	 * Allocate the host
	 */
	host = ata_host_alloc(&pdev->dev, 1);
	if (!host)
		return -ENOMEM;

	ap		= host->ports[0];
	ap->ops		= &pxa_ata_port_ops;
	ap->pio_mask	= ATA_PIO4;
	ap->mwdma_mask	= ATA_MWDMA2;

	ap->ioaddr.cmd_addr	= devm_ioremap(&pdev->dev, cmd_res->start,
						resource_size(cmd_res));
	ap->ioaddr.ctl_addr	= devm_ioremap(&pdev->dev, ctl_res->start,
						resource_size(ctl_res));
	ap->ioaddr.bmdma_addr	= devm_ioremap(&pdev->dev, dma_res->start,
						resource_size(dma_res));

	/*
	 * Adjust register offsets
	 */
	ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
	ap->ioaddr.data_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_DATA << pdata->reg_shift);
	ap->ioaddr.error_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_ERR << pdata->reg_shift);
	ap->ioaddr.feature_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_FEATURE << pdata->reg_shift);
	ap->ioaddr.nsect_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_NSECT << pdata->reg_shift);
	ap->ioaddr.lbal_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_LBAL << pdata->reg_shift);
	ap->ioaddr.lbam_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_LBAM << pdata->reg_shift);
	ap->ioaddr.lbah_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_LBAH << pdata->reg_shift);
	ap->ioaddr.device_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_DEVICE << pdata->reg_shift);
	ap->ioaddr.status_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_STATUS << pdata->reg_shift);
	ap->ioaddr.command_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_CMD << pdata->reg_shift);

	/*
	 * Allocate and load driver's internal data structure
	 */
	data = devm_kzalloc(&pdev->dev, sizeof(struct pata_pxa_data),
								GFP_KERNEL);
	if (!data)
		return -ENOMEM;

	ap->private_data = data;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);
	param.prio = PXAD_PRIO_LOWEST;
	param.drcmr = pdata->dma_dreq;
	memset(&config, 0, sizeof(config));
	config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
	config.src_addr = dma_res->start;
	config.dst_addr = dma_res->start;
	config.src_maxburst = 32;
	config.dst_maxburst = 32;

	/*
	 * Request the DMA channel
	 */
	data->dma_chan =
		dma_request_slave_channel_compat(mask, pxad_filter_fn,
						 &param, &pdev->dev, "data");
	if (!data->dma_chan)
		return -EBUSY;
	ret = dmaengine_slave_config(data->dma_chan, &config);
	if (ret < 0) {
		dev_err(&pdev->dev, "dma configuration failed: %d\n", ret);
		return ret;
	}

	/*
	 * Activate the ATA host
	 */
	ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt,
				pdata->irq_flags, &pxa_ata_sht);
	if (ret)
		dma_release_channel(data->dma_chan);

	return ret;
}