Beispiel #1
0
static int sirfsoc_uart_probe(struct platform_device *pdev)
{
	struct sirfsoc_uart_port *sirfport;
	struct uart_port *port;
	struct resource *res;
	int ret;
	int i, j;
	struct dma_slave_config slv_cfg = {
		.src_maxburst = 2,
	};
	struct dma_slave_config tx_slv_cfg = {
		.dst_maxburst = 2,
	};
	const struct of_device_id *match;

	match = of_match_node(sirfsoc_uart_ids, pdev->dev.of_node);
	if (of_property_read_u32(pdev->dev.of_node, "cell-index", &pdev->id)) {
		dev_err(&pdev->dev,
			"Unable to find cell-index in uart node.\n");
		ret = -EFAULT;
		goto err;
	}
	if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart"))
		pdev->id += ((struct sirfsoc_uart_register *)
				match->data)->uart_param.register_uart_nr;
	sirfport = &sirfsoc_uart_ports[pdev->id];
	port = &sirfport->port;
	port->dev = &pdev->dev;
	port->private_data = sirfport;
	sirfport->uart_reg = (struct sirfsoc_uart_register *)match->data;

	sirfport->hw_flow_ctrl = of_property_read_bool(pdev->dev.of_node,
		"sirf,uart-has-rtscts");
	if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-uart"))
		sirfport->uart_reg->uart_type = SIRF_REAL_UART;
	if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart")) {
		sirfport->uart_reg->uart_type =	SIRF_USP_UART;
		if (!sirfport->hw_flow_ctrl)
			goto usp_no_flow_control;
		if (of_find_property(pdev->dev.of_node, "cts-gpios", NULL))
			sirfport->cts_gpio = of_get_named_gpio(
					pdev->dev.of_node, "cts-gpios", 0);
		else
			sirfport->cts_gpio = -1;
		if (of_find_property(pdev->dev.of_node, "rts-gpios", NULL))
			sirfport->rts_gpio = of_get_named_gpio(
					pdev->dev.of_node, "rts-gpios", 0);
		else
			sirfport->rts_gpio = -1;

		if ((!gpio_is_valid(sirfport->cts_gpio) ||
			 !gpio_is_valid(sirfport->rts_gpio))) {
			ret = -EINVAL;
			dev_err(&pdev->dev,
				"Usp flow control must have cts and rts gpio");
			goto err;
		}
		ret = devm_gpio_request(&pdev->dev, sirfport->cts_gpio,
				"usp-cts-gpio");
		if (ret) {
			dev_err(&pdev->dev, "Unable request cts gpio");
			goto err;
		}
		gpio_direction_input(sirfport->cts_gpio);
		ret = devm_gpio_request(&pdev->dev, sirfport->rts_gpio,
				"usp-rts-gpio");
		if (ret) {
			dev_err(&pdev->dev, "Unable request rts gpio");
			goto err;
		}
		gpio_direction_output(sirfport->rts_gpio, 1);
	}
usp_no_flow_control:
	if (of_device_is_compatible(pdev->dev.of_node, "sirf,marco-uart"))
		sirfport->is_marco = true;

	if (of_property_read_u32(pdev->dev.of_node,
			"fifosize",
			&port->fifosize)) {
		dev_err(&pdev->dev,
			"Unable to find fifosize in uart node.\n");
		ret = -EFAULT;
		goto err;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL) {
		dev_err(&pdev->dev, "Insufficient resources.\n");
		ret = -EFAULT;
		goto err;
	}
	tasklet_init(&sirfport->rx_dma_complete_tasklet,
			sirfsoc_uart_rx_dma_complete_tl, (unsigned long)sirfport);
	tasklet_init(&sirfport->rx_tmo_process_tasklet,
			sirfsoc_rx_tmo_process_tl, (unsigned long)sirfport);
	port->mapbase = res->start;
	port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
	if (!port->membase) {
		dev_err(&pdev->dev, "Cannot remap resource.\n");
		ret = -ENOMEM;
		goto err;
	}
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (res == NULL) {
		dev_err(&pdev->dev, "Insufficient resources.\n");
		ret = -EFAULT;
		goto err;
	}
	port->irq = res->start;

	sirfport->clk = clk_get(&pdev->dev, NULL);
	if (IS_ERR(sirfport->clk)) {
		ret = PTR_ERR(sirfport->clk);
		goto err;
	}
	port->uartclk = clk_get_rate(sirfport->clk);

	port->ops = &sirfsoc_uart_ops;
	spin_lock_init(&port->lock);

	platform_set_drvdata(pdev, sirfport);
	ret = uart_add_one_port(&sirfsoc_uart_drv, port);
	if (ret != 0) {
		dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id);
		goto port_err;
	}

	sirfport->rx_dma_chan = dma_request_slave_channel(port->dev, "rx");
	for (i = 0; sirfport->rx_dma_chan && i < SIRFSOC_RX_LOOP_BUF_CNT; i++) {
		sirfport->rx_dma_items[i].xmit.buf =
			dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
			&sirfport->rx_dma_items[i].dma_addr, GFP_KERNEL);
		if (!sirfport->rx_dma_items[i].xmit.buf) {
			dev_err(port->dev, "Uart alloc bufa failed\n");
			ret = -ENOMEM;
			goto alloc_coherent_err;
		}
		sirfport->rx_dma_items[i].xmit.head =
			sirfport->rx_dma_items[i].xmit.tail = 0;
	}
	if (sirfport->rx_dma_chan)
		dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg);
	sirfport->tx_dma_chan = dma_request_slave_channel(port->dev, "tx");
	if (sirfport->tx_dma_chan)
		dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg);

	return 0;
alloc_coherent_err:
	for (j = 0; j < i; j++)
		dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
				sirfport->rx_dma_items[j].xmit.buf,
				sirfport->rx_dma_items[j].dma_addr);
	dma_release_channel(sirfport->rx_dma_chan);
port_err:
	clk_put(sirfport->clk);
err:
	return ret;
}

static int sirfsoc_uart_remove(struct platform_device *pdev)
{
	struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
	struct uart_port *port = &sirfport->port;
	clk_put(sirfport->clk);
	uart_remove_one_port(&sirfsoc_uart_drv, port);
	if (sirfport->rx_dma_chan) {
		int i;
		dmaengine_terminate_all(sirfport->rx_dma_chan);
		dma_release_channel(sirfport->rx_dma_chan);
		for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
			dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
					sirfport->rx_dma_items[i].xmit.buf,
					sirfport->rx_dma_items[i].dma_addr);
	}
	if (sirfport->tx_dma_chan) {
		dmaengine_terminate_all(sirfport->tx_dma_chan);
		dma_release_channel(sirfport->tx_dma_chan);
	}
	return 0;
}

#ifdef CONFIG_PM_SLEEP
static int
sirfsoc_uart_suspend(struct device *pdev)
{
	struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
	struct uart_port *port = &sirfport->port;
	uart_suspend_port(&sirfsoc_uart_drv, port);
	return 0;
}

static int sirfsoc_uart_resume(struct device *pdev)
{
	struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
	struct uart_port *port = &sirfport->port;
	uart_resume_port(&sirfsoc_uart_drv, port);
	return 0;
}
#endif

static const struct dev_pm_ops sirfsoc_uart_pm_ops = {
	SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_uart_suspend, sirfsoc_uart_resume)
};

static struct platform_driver sirfsoc_uart_driver = {
	.probe		= sirfsoc_uart_probe,
	.remove		= sirfsoc_uart_remove,
	.driver		= {
		.name	= SIRFUART_PORT_NAME,
		.owner	= THIS_MODULE,
		.of_match_table = sirfsoc_uart_ids,
		.pm	= &sirfsoc_uart_pm_ops,
	},
};

static int __init sirfsoc_uart_init(void)
{
	int ret = 0;

	ret = uart_register_driver(&sirfsoc_uart_drv);
	if (ret)
		goto out;

	ret = platform_driver_register(&sirfsoc_uart_driver);
	if (ret)
		uart_unregister_driver(&sirfsoc_uart_drv);
out:
	return ret;
}
module_init(sirfsoc_uart_init);

static void __exit sirfsoc_uart_exit(void)
{
	platform_driver_unregister(&sirfsoc_uart_driver);
	uart_unregister_driver(&sirfsoc_uart_drv);
}
Beispiel #2
0
static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
				 struct sh_mmcif_plat_data *pdata)
{
	struct resource *res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
	struct dma_slave_config cfg;
	dma_cap_mask_t mask;
	int ret;

	host->dma_active = false;

	if (!pdata)
		return;

	if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
		return;

	/* We can only either use DMA for both Tx and Rx or not use it at all */
	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	host->chan_tx = dma_request_channel(mask, shdma_chan_filter,
					    (void *)pdata->slave_id_tx);
	dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
		host->chan_tx);

	if (!host->chan_tx)
		return;

	cfg.slave_id = pdata->slave_id_tx;
	cfg.direction = DMA_MEM_TO_DEV;
	cfg.dst_addr = res->start + MMCIF_CE_DATA;
	cfg.src_addr = 0;
	ret = dmaengine_slave_config(host->chan_tx, &cfg);
	if (ret < 0)
		goto ecfgtx;

	host->chan_rx = dma_request_channel(mask, shdma_chan_filter,
					    (void *)pdata->slave_id_rx);
	dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
		host->chan_rx);

	if (!host->chan_rx)
		goto erqrx;

	cfg.slave_id = pdata->slave_id_rx;
	cfg.direction = DMA_DEV_TO_MEM;
	cfg.dst_addr = 0;
	cfg.src_addr = res->start + MMCIF_CE_DATA;
	ret = dmaengine_slave_config(host->chan_rx, &cfg);
	if (ret < 0)
		goto ecfgrx;

	init_completion(&host->dma_complete);

	return;

ecfgrx:
	dma_release_channel(host->chan_rx);
	host->chan_rx = NULL;
erqrx:
ecfgtx:
	dma_release_channel(host->chan_tx);
	host->chan_tx = NULL;
}
Beispiel #3
0
static int __devinit mmc_omap_probe(struct platform_device *pdev)
{
	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
	struct mmc_omap_host *host = NULL;
	struct resource *res;
	dma_cap_mask_t mask;
	unsigned sig;
	int i, ret = 0;
	int irq;

	if (pdata == NULL) {
		dev_err(&pdev->dev, "platform data missing\n");
		return -ENXIO;
	}
	if (pdata->nr_slots == 0) {
		dev_err(&pdev->dev, "no slots\n");
		return -ENXIO;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	irq = platform_get_irq(pdev, 0);
	if (res == NULL || irq < 0)
		return -ENXIO;

	res = request_mem_region(res->start, resource_size(res),
				 pdev->name);
	if (res == NULL)
		return -EBUSY;

	host = kzalloc(sizeof(struct mmc_omap_host), GFP_KERNEL);
	if (host == NULL) {
		ret = -ENOMEM;
		goto err_free_mem_region;
	}

	INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
	INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);

	INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command);
	setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer,
		    (unsigned long) host);

	spin_lock_init(&host->clk_lock);
	setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);

	spin_lock_init(&host->dma_lock);
	spin_lock_init(&host->slot_lock);
	init_waitqueue_head(&host->slot_wq);

	host->pdata = pdata;
	host->dev = &pdev->dev;
	platform_set_drvdata(pdev, host);

	host->id = pdev->id;
	host->mem_res = res;
	host->irq = irq;
	host->use_dma = 1;
	host->irq = irq;
	host->phys_base = host->mem_res->start;
	host->virt_base = ioremap(res->start, resource_size(res));
	if (!host->virt_base)
		goto err_ioremap;

	host->iclk = clk_get(&pdev->dev, "ick");
	if (IS_ERR(host->iclk)) {
		ret = PTR_ERR(host->iclk);
		goto err_free_mmc_host;
	}
	clk_enable(host->iclk);

	host->fclk = clk_get(&pdev->dev, "fck");
	if (IS_ERR(host->fclk)) {
		ret = PTR_ERR(host->fclk);
		goto err_free_iclk;
	}

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	host->dma_tx_burst = -1;
	host->dma_rx_burst = -1;

	if (cpu_is_omap24xx())
		sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX;
	else
		sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX;
	host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
#if 0
	if (!host->dma_tx) {
		dev_err(host->dev, "unable to obtain TX DMA engine channel %u\n",
			sig);
		goto err_dma;
	}
#else
	if (!host->dma_tx)
		dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
			sig);
#endif
	if (cpu_is_omap24xx())
		sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX;
	else
		sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX;
	host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
#if 0
	if (!host->dma_rx) {
		dev_err(host->dev, "unable to obtain RX DMA engine channel %u\n",
			sig);
		goto err_dma;
	}
#else
	if (!host->dma_rx)
		dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
			sig);
#endif

	ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
	if (ret)
		goto err_free_dma;

	if (pdata->init != NULL) {
		ret = pdata->init(&pdev->dev);
		if (ret < 0)
			goto err_free_irq;
	}

	host->nr_slots = pdata->nr_slots;
	host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);

	host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
	if (!host->mmc_omap_wq)
		goto err_plat_cleanup;

	for (i = 0; i < pdata->nr_slots; i++) {
		ret = mmc_omap_new_slot(host, i);
		if (ret < 0) {
			while (--i >= 0)
				mmc_omap_remove_slot(host->slots[i]);

			goto err_destroy_wq;
		}
	}

	return 0;

err_destroy_wq:
	destroy_workqueue(host->mmc_omap_wq);
err_plat_cleanup:
	if (pdata->cleanup)
		pdata->cleanup(&pdev->dev);
err_free_irq:
	free_irq(host->irq, host);
err_free_dma:
	if (host->dma_tx)
		dma_release_channel(host->dma_tx);
	if (host->dma_rx)
		dma_release_channel(host->dma_rx);
	clk_put(host->fclk);
err_free_iclk:
	clk_disable(host->iclk);
	clk_put(host->iclk);
err_free_mmc_host:
	iounmap(host->virt_base);
err_ioremap:
	kfree(host);
err_free_mem_region:
	release_mem_region(res->start, resource_size(res));
	return ret;
}
Beispiel #4
0
Datei: omap.c Projekt: 7799/linux
static int mmc_omap_probe(struct platform_device *pdev)
{
	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
	struct mmc_omap_host *host = NULL;
	struct resource *res;
	dma_cap_mask_t mask;
	unsigned sig = 0;
	int i, ret = 0;
	int irq;

	if (pdata == NULL) {
		dev_err(&pdev->dev, "platform data missing\n");
		return -ENXIO;
	}
	if (pdata->nr_slots == 0) {
		dev_err(&pdev->dev, "no slots\n");
		return -EPROBE_DEFER;
	}

	host = devm_kzalloc(&pdev->dev, sizeof(struct mmc_omap_host),
			    GFP_KERNEL);
	if (host == NULL)
		return -ENOMEM;

	irq = platform_get_irq(pdev, 0);
	if (irq < 0)
		return -ENXIO;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	host->virt_base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(host->virt_base))
		return PTR_ERR(host->virt_base);

	INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
	INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);

	INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command);
	setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer,
		    (unsigned long) host);

	spin_lock_init(&host->clk_lock);
	setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);

	spin_lock_init(&host->dma_lock);
	spin_lock_init(&host->slot_lock);
	init_waitqueue_head(&host->slot_wq);

	host->pdata = pdata;
	host->features = host->pdata->slots[0].features;
	host->dev = &pdev->dev;
	platform_set_drvdata(pdev, host);

	host->id = pdev->id;
	host->irq = irq;
	host->phys_base = res->start;
	host->iclk = clk_get(&pdev->dev, "ick");
	if (IS_ERR(host->iclk))
		return PTR_ERR(host->iclk);
	clk_enable(host->iclk);

	host->fclk = clk_get(&pdev->dev, "fck");
	if (IS_ERR(host->fclk)) {
		ret = PTR_ERR(host->fclk);
		goto err_free_iclk;
	}

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	host->dma_tx_burst = -1;
	host->dma_rx_burst = -1;

	res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
	if (res)
		sig = res->start;
	host->dma_tx = dma_request_slave_channel_compat(mask,
				omap_dma_filter_fn, &sig, &pdev->dev, "tx");
	if (!host->dma_tx)
		dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
			sig);

	res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
	if (res)
		sig = res->start;
	host->dma_rx = dma_request_slave_channel_compat(mask,
				omap_dma_filter_fn, &sig, &pdev->dev, "rx");
	if (!host->dma_rx)
		dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
			sig);

	ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
	if (ret)
		goto err_free_dma;

	if (pdata->init != NULL) {
		ret = pdata->init(&pdev->dev);
		if (ret < 0)
			goto err_free_irq;
	}

	host->nr_slots = pdata->nr_slots;
	host->reg_shift = (mmc_omap7xx() ? 1 : 2);

	host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
	if (!host->mmc_omap_wq)
		goto err_plat_cleanup;

	for (i = 0; i < pdata->nr_slots; i++) {
		ret = mmc_omap_new_slot(host, i);
		if (ret < 0) {
			while (--i >= 0)
				mmc_omap_remove_slot(host->slots[i]);

			goto err_destroy_wq;
		}
	}

	return 0;

err_destroy_wq:
	destroy_workqueue(host->mmc_omap_wq);
err_plat_cleanup:
	if (pdata->cleanup)
		pdata->cleanup(&pdev->dev);
err_free_irq:
	free_irq(host->irq, host);
err_free_dma:
	if (host->dma_tx)
		dma_release_channel(host->dma_tx);
	if (host->dma_rx)
		dma_release_channel(host->dma_rx);
	clk_put(host->fclk);
err_free_iclk:
	clk_disable(host->iclk);
	clk_put(host->iclk);
	return ret;
}
Beispiel #5
0
static int img_spfi_probe(struct platform_device *pdev)
{
	struct spi_master *master;
	struct img_spfi *spfi;
	struct resource *res;
	int ret;
	u32 max_speed_hz;

	master = spi_alloc_master(&pdev->dev, sizeof(*spfi));
	if (!master)
		return -ENOMEM;
	platform_set_drvdata(pdev, master);

	spfi = spi_master_get_devdata(master);
	spfi->dev = &pdev->dev;
	spfi->master = master;
	spin_lock_init(&spfi->lock);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	spfi->regs = devm_ioremap_resource(spfi->dev, res);
	if (IS_ERR(spfi->regs)) {
		ret = PTR_ERR(spfi->regs);
		goto put_spi;
	}
	spfi->phys = res->start;

	spfi->irq = platform_get_irq(pdev, 0);
	if (spfi->irq < 0) {
		ret = spfi->irq;
		goto put_spi;
	}
	ret = devm_request_irq(spfi->dev, spfi->irq, img_spfi_irq,
			       IRQ_TYPE_LEVEL_HIGH, dev_name(spfi->dev), spfi);
	if (ret)
		goto put_spi;

	spfi->sys_clk = devm_clk_get(spfi->dev, "sys");
	if (IS_ERR(spfi->sys_clk)) {
		ret = PTR_ERR(spfi->sys_clk);
		goto put_spi;
	}
	spfi->spfi_clk = devm_clk_get(spfi->dev, "spfi");
	if (IS_ERR(spfi->spfi_clk)) {
		ret = PTR_ERR(spfi->spfi_clk);
		goto put_spi;
	}

	ret = clk_prepare_enable(spfi->sys_clk);
	if (ret)
		goto put_spi;
	ret = clk_prepare_enable(spfi->spfi_clk);
	if (ret)
		goto disable_pclk;

	spfi_reset(spfi);
	/*
	 * Only enable the error (IACCESS) interrupt.  In PIO mode we'll
	 * poll the status of the FIFOs.
	 */
	spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_ENABLE);

	master->auto_runtime_pm = true;
	master->bus_num = pdev->id;
	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL;
	if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode"))
		master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD;
	master->dev.of_node = pdev->dev.of_node;
	master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8);
	master->max_speed_hz = clk_get_rate(spfi->spfi_clk) / 4;
	master->min_speed_hz = clk_get_rate(spfi->spfi_clk) / 512;

	/*
	 * Maximum speed supported by spfi is limited to the lower value
	 * between 1/4 of the SPFI clock or to "spfi-max-frequency"
	 * defined in the device tree.
	 * If no value is defined in the device tree assume the maximum
	 * speed supported to be 1/4 of the SPFI clock.
	 */
	if (!of_property_read_u32(spfi->dev->of_node, "spfi-max-frequency",
				  &max_speed_hz)) {
		if (master->max_speed_hz > max_speed_hz)
			master->max_speed_hz = max_speed_hz;
	}

	master->setup = img_spfi_setup;
	master->cleanup = img_spfi_cleanup;
	master->transfer_one = img_spfi_transfer_one;
	master->prepare_message = img_spfi_prepare;
	master->unprepare_message = img_spfi_unprepare;
	master->handle_err = img_spfi_handle_err;

	spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx");
	spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx");
	if (!spfi->tx_ch || !spfi->rx_ch) {
		if (spfi->tx_ch)
			dma_release_channel(spfi->tx_ch);
		if (spfi->rx_ch)
			dma_release_channel(spfi->rx_ch);
		dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n");
	} else {
		master->dma_tx = spfi->tx_ch;
		master->dma_rx = spfi->rx_ch;
		master->can_dma = img_spfi_can_dma;
	}

	pm_runtime_set_active(spfi->dev);
	pm_runtime_enable(spfi->dev);

	ret = devm_spi_register_master(spfi->dev, master);
	if (ret)
		goto disable_pm;

	return 0;

disable_pm:
	pm_runtime_disable(spfi->dev);
	if (spfi->rx_ch)
		dma_release_channel(spfi->rx_ch);
	if (spfi->tx_ch)
		dma_release_channel(spfi->tx_ch);
	clk_disable_unprepare(spfi->spfi_clk);
disable_pclk:
	clk_disable_unprepare(spfi->sys_clk);
put_spi:
	spi_master_put(master);

	return ret;
}
Beispiel #6
0
static int pxa_ata_probe(struct platform_device *pdev)
{
	struct ata_host *host;
	struct ata_port *ap;
	struct pata_pxa_data *data;
	struct resource *cmd_res;
	struct resource *ctl_res;
	struct resource *dma_res;
	struct resource *irq_res;
	struct pata_pxa_pdata *pdata = dev_get_platdata(&pdev->dev);
	struct dma_slave_config	config;
	int ret = 0;

	/*
	 * Resource validation, three resources are needed:
	 *  - CMD port base address
	 *  - CTL port base address
	 *  - DMA port base address
	 *  - IRQ pin
	 */
	if (pdev->num_resources != 4) {
		dev_err(&pdev->dev, "invalid number of resources\n");
		return -EINVAL;
	}

	/*
	 * CMD port base address
	 */
	cmd_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (unlikely(cmd_res == NULL))
		return -EINVAL;

	/*
	 * CTL port base address
	 */
	ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	if (unlikely(ctl_res == NULL))
		return -EINVAL;

	/*
	 * DMA port base address
	 */
	dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
	if (unlikely(dma_res == NULL))
		return -EINVAL;

	/*
	 * IRQ pin
	 */
	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (unlikely(irq_res == NULL))
		return -EINVAL;

	/*
	 * Allocate the host
	 */
	host = ata_host_alloc(&pdev->dev, 1);
	if (!host)
		return -ENOMEM;

	ap		= host->ports[0];
	ap->ops		= &pxa_ata_port_ops;
	ap->pio_mask	= ATA_PIO4;
	ap->mwdma_mask	= ATA_MWDMA2;

	ap->ioaddr.cmd_addr	= devm_ioremap(&pdev->dev, cmd_res->start,
						resource_size(cmd_res));
	ap->ioaddr.ctl_addr	= devm_ioremap(&pdev->dev, ctl_res->start,
						resource_size(ctl_res));
	ap->ioaddr.bmdma_addr	= devm_ioremap(&pdev->dev, dma_res->start,
						resource_size(dma_res));

	/*
	 * Adjust register offsets
	 */
	ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
	ap->ioaddr.data_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_DATA << pdata->reg_shift);
	ap->ioaddr.error_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_ERR << pdata->reg_shift);
	ap->ioaddr.feature_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_FEATURE << pdata->reg_shift);
	ap->ioaddr.nsect_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_NSECT << pdata->reg_shift);
	ap->ioaddr.lbal_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_LBAL << pdata->reg_shift);
	ap->ioaddr.lbam_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_LBAM << pdata->reg_shift);
	ap->ioaddr.lbah_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_LBAH << pdata->reg_shift);
	ap->ioaddr.device_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_DEVICE << pdata->reg_shift);
	ap->ioaddr.status_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_STATUS << pdata->reg_shift);
	ap->ioaddr.command_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_CMD << pdata->reg_shift);

	/*
	 * Allocate and load driver's internal data structure
	 */
	data = devm_kzalloc(&pdev->dev, sizeof(struct pata_pxa_data),
								GFP_KERNEL);
	if (!data)
		return -ENOMEM;

	ap->private_data = data;

	memset(&config, 0, sizeof(config));
	config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
	config.src_addr = dma_res->start;
	config.dst_addr = dma_res->start;
	config.src_maxburst = 32;
	config.dst_maxburst = 32;

	/*
	 * Request the DMA channel
	 */
	data->dma_chan =
		dma_request_slave_channel(&pdev->dev, "data");
	if (!data->dma_chan)
		return -EBUSY;
	ret = dmaengine_slave_config(data->dma_chan, &config);
	if (ret < 0) {
		dev_err(&pdev->dev, "dma configuration failed: %d\n", ret);
		return ret;
	}

	/*
	 * Activate the ATA host
	 */
	ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt,
				pdata->irq_flags, &pxa_ata_sht);
	if (ret)
		dma_release_channel(data->dma_chan);

	return ret;
}
Beispiel #7
0
/* create a plane */
static struct xilinx_drm_plane *
xilinx_drm_plane_create(struct xilinx_drm_plane_manager *manager,
			unsigned int possible_crtcs, bool priv)
{
	struct xilinx_drm_plane *plane;
	struct device *dev = manager->drm->dev;
	char plane_name[16];
	struct device_node *plane_node;
	struct device_node *sub_node;
	uint32_t fmt_in = -1;
	uint32_t fmt_out = -1;
	const char *fmt;
	int i;
	int ret;

	for (i = 0; i < manager->num_planes; i++)
		if (!manager->planes[i])
			break;

	if (i >= manager->num_planes) {
		DRM_ERROR("failed to allocate plane\n");
		return ERR_PTR(-ENODEV);
	}

	snprintf(plane_name, sizeof(plane_name), "plane%d", i);
	plane_node = of_get_child_by_name(manager->node, plane_name);
	if (!plane_node) {
		DRM_ERROR("failed to find a plane node\n");
		return ERR_PTR(-ENODEV);
	}

	plane = devm_kzalloc(dev, sizeof(*plane), GFP_KERNEL);
	if (!plane) {
		ret = -ENOMEM;
		goto err_out;
	}

	plane->priv = priv;
	plane->id = i;
	plane->prio = i;
	plane->zpos = i;
	plane->alpha = manager->default_alpha;
	plane->dpms = DRM_MODE_DPMS_OFF;
	plane->format = -1;
	DRM_DEBUG_KMS("plane->id: %d\n", plane->id);

	plane->vdma.chan = of_dma_request_slave_channel(plane_node, "vdma");
	if (!plane->vdma.chan) {
		DRM_ERROR("failed to request dma channel\n");
		ret = -ENODEV;
		goto err_out;
	}

	/* probe color space converter */
	sub_node = of_parse_phandle(plane_node, "rgb2yuv", i);
	if (sub_node) {
		plane->rgb2yuv = xilinx_rgb2yuv_probe(dev, sub_node);
		of_node_put(sub_node);
		if (IS_ERR(plane->rgb2yuv)) {
			DRM_ERROR("failed to probe a rgb2yuv\n");
			ret = PTR_ERR(plane->rgb2yuv);
			goto err_dma;
		}

		/* rgb2yuv input format */
		plane->format = DRM_FORMAT_XRGB8888;

		/* rgb2yuv output format */
		fmt_out = DRM_FORMAT_YUV444;
	}

	/* probe chroma resampler */
	sub_node = of_parse_phandle(plane_node, "cresample", i);
	if (sub_node) {
		plane->cresample = xilinx_cresample_probe(dev, sub_node);
		of_node_put(sub_node);
		if (IS_ERR(plane->cresample)) {
			DRM_ERROR("failed to probe a cresample\n");
			ret = PTR_ERR(plane->cresample);
			goto err_dma;
		}

		/* cresample input format */
		fmt = xilinx_cresample_get_input_format_name(plane->cresample);
		ret = xilinx_drm_format_by_name(fmt, &fmt_in);
		if (ret)
			goto err_dma;

		/* format sanity check */
		if ((fmt_out != -1) && (fmt_out != fmt_in)) {
			DRM_ERROR("input/output format mismatch\n");
			ret = -EINVAL;
			goto err_dma;
		}

		if (plane->format == -1)
			plane->format = fmt_in;

		/* cresample output format */
		fmt = xilinx_cresample_get_output_format_name(plane->cresample);
		ret = xilinx_drm_format_by_name(fmt, &fmt_out);
		if (ret)
			goto err_dma;
	}

	/* create an OSD layer when OSD is available */
	if (manager->osd) {
		/* format sanity check */
		if ((fmt_out != -1) && (fmt_out != manager->format)) {
			DRM_ERROR("input/output format mismatch\n");
			ret = -EINVAL;
			goto err_dma;
		}

		/* create an osd layer */
		plane->osd_layer = xilinx_osd_layer_get(manager->osd);
		if (IS_ERR(plane->osd_layer)) {
			DRM_ERROR("failed to create a osd layer\n");
			ret = PTR_ERR(plane->osd_layer);
			plane->osd_layer = NULL;
			goto err_dma;
		}

		if (plane->format == -1)
			plane->format = manager->format;
	}

	/* If there's no IP other than VDMA, pick the manager's format */
	if (plane->format == -1)
		plane->format = manager->format;

	/* initialize drm plane */
	ret = drm_plane_init(manager->drm, &plane->base, possible_crtcs,
			     &xilinx_drm_plane_funcs, &plane->format, 1, priv);
	if (ret) {
		DRM_ERROR("failed to initialize plane\n");
		goto err_init;
	}
	plane->manager = manager;
	manager->planes[i] = plane;

	of_node_put(plane_node);

	return plane;

err_init:
	if (manager->osd) {
		xilinx_osd_layer_disable(plane->osd_layer);
		xilinx_osd_layer_put(plane->osd_layer);
	}
err_dma:
	dma_release_channel(plane->vdma.chan);
err_out:
	of_node_put(plane_node);
	return ERR_PTR(ret);
}
Beispiel #8
0
static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
			bool dma_to_memory)
{
	struct dma_chan *dma_chan;
	unsigned char *dma_buf;
	dma_addr_t dma_phys;
	int ret;
	struct dma_slave_config dma_sconfig;

	dma_chan = dma_request_slave_channel_reason(tup->uport.dev,
						dma_to_memory ? "rx" : "tx");
	if (IS_ERR(dma_chan)) {
		ret = PTR_ERR(dma_chan);
		dev_err(tup->uport.dev,
			"DMA channel alloc failed: %d\n", ret);
		return ret;
	}

	if (dma_to_memory) {
		dma_buf = dma_alloc_coherent(tup->uport.dev,
				TEGRA_UART_RX_DMA_BUFFER_SIZE,
				 &dma_phys, GFP_KERNEL);
		if (!dma_buf) {
			dev_err(tup->uport.dev,
				"Not able to allocate the dma buffer\n");
			dma_release_channel(dma_chan);
			return -ENOMEM;
		}
		dma_sconfig.src_addr = tup->uport.mapbase;
		dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
		dma_sconfig.src_maxburst = 4;
		tup->rx_dma_chan = dma_chan;
		tup->rx_dma_buf_virt = dma_buf;
		tup->rx_dma_buf_phys = dma_phys;
	} else {
		dma_phys = dma_map_single(tup->uport.dev,
			tup->uport.state->xmit.buf, UART_XMIT_SIZE,
			DMA_TO_DEVICE);
		if (dma_mapping_error(tup->uport.dev, dma_phys)) {
			dev_err(tup->uport.dev, "dma_map_single tx failed\n");
			dma_release_channel(dma_chan);
			return -ENOMEM;
		}
		dma_buf = tup->uport.state->xmit.buf;
		dma_sconfig.dst_addr = tup->uport.mapbase;
		dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
		dma_sconfig.dst_maxburst = 16;
		tup->tx_dma_chan = dma_chan;
		tup->tx_dma_buf_virt = dma_buf;
		tup->tx_dma_buf_phys = dma_phys;
	}

	ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
	if (ret < 0) {
		dev_err(tup->uport.dev,
			"Dma slave config failed, err = %d\n", ret);
		tegra_uart_dma_channel_free(tup, dma_to_memory);
		return ret;
	}

	return 0;
}
Beispiel #9
0
static int rockchip_spi_probe(struct platform_device *pdev)
{
	int ret = 0;
	struct rockchip_spi *rs;
	struct spi_master *master;
	struct resource *mem;
	u32 rsd_nsecs;

	master = spi_alloc_master(&pdev->dev, sizeof(struct rockchip_spi));
	if (!master)
		return -ENOMEM;

	platform_set_drvdata(pdev, master);

	rs = spi_master_get_devdata(master);

	/* Get basic io resource and map it */
	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	rs->regs = devm_ioremap_resource(&pdev->dev, mem);
	if (IS_ERR(rs->regs)) {
		ret =  PTR_ERR(rs->regs);
		goto err_ioremap_resource;
	}

	rs->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
	if (IS_ERR(rs->apb_pclk)) {
		dev_err(&pdev->dev, "Failed to get apb_pclk\n");
		ret = PTR_ERR(rs->apb_pclk);
		goto err_ioremap_resource;
	}

	rs->spiclk = devm_clk_get(&pdev->dev, "spiclk");
	if (IS_ERR(rs->spiclk)) {
		dev_err(&pdev->dev, "Failed to get spi_pclk\n");
		ret = PTR_ERR(rs->spiclk);
		goto err_ioremap_resource;
	}

	ret = clk_prepare_enable(rs->apb_pclk);
	if (ret) {
		dev_err(&pdev->dev, "Failed to enable apb_pclk\n");
		goto err_ioremap_resource;
	}

	ret = clk_prepare_enable(rs->spiclk);
	if (ret) {
		dev_err(&pdev->dev, "Failed to enable spi_clk\n");
		goto err_spiclk_enable;
	}

	spi_enable_chip(rs, 0);

	rs->type = SSI_MOTO_SPI;
	rs->master = master;
	rs->dev = &pdev->dev;
	rs->max_freq = clk_get_rate(rs->spiclk);

	if (!of_property_read_u32(pdev->dev.of_node, "rx-sample-delay-ns",
				  &rsd_nsecs))
		rs->rsd_nsecs = rsd_nsecs;

	rs->fifo_len = get_fifo_len(rs);
	if (!rs->fifo_len) {
		dev_err(&pdev->dev, "Failed to get fifo length\n");
		ret = -EINVAL;
		goto err_get_fifo_len;
	}

	spin_lock_init(&rs->lock);

	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);

	master->auto_runtime_pm = true;
	master->bus_num = pdev->id;
	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
	master->num_chipselect = 2;
	master->dev.of_node = pdev->dev.of_node;
	master->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8);

	master->set_cs = rockchip_spi_set_cs;
	master->prepare_message = rockchip_spi_prepare_message;
	master->unprepare_message = rockchip_spi_unprepare_message;
	master->transfer_one = rockchip_spi_transfer_one;
	master->handle_err = rockchip_spi_handle_err;

	rs->dma_tx.ch = dma_request_chan(rs->dev, "tx");
	if (IS_ERR(rs->dma_tx.ch)) {
		/* Check tx to see if we need defer probing driver */
		if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) {
			ret = -EPROBE_DEFER;
			goto err_get_fifo_len;
		}
		dev_warn(rs->dev, "Failed to request TX DMA channel\n");
		rs->dma_tx.ch = NULL;
	}

	rs->dma_rx.ch = dma_request_chan(rs->dev, "rx");
	if (IS_ERR(rs->dma_rx.ch)) {
		if (PTR_ERR(rs->dma_rx.ch) == -EPROBE_DEFER) {
			dma_release_channel(rs->dma_tx.ch);
			rs->dma_tx.ch = NULL;
			ret = -EPROBE_DEFER;
			goto err_get_fifo_len;
		}
		dev_warn(rs->dev, "Failed to request RX DMA channel\n");
		rs->dma_rx.ch = NULL;
	}

	if (rs->dma_tx.ch && rs->dma_rx.ch) {
		dma_get_slave_caps(rs->dma_rx.ch, &(rs->dma_caps));
		rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR);
		rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR);
		rs->dma_tx.direction = DMA_MEM_TO_DEV;
		rs->dma_rx.direction = DMA_DEV_TO_MEM;

		master->can_dma = rockchip_spi_can_dma;
		master->dma_tx = rs->dma_tx.ch;
		master->dma_rx = rs->dma_rx.ch;
	}

	ret = devm_spi_register_master(&pdev->dev, master);
	if (ret) {
		dev_err(&pdev->dev, "Failed to register master\n");
		goto err_register_master;
	}

	return 0;

err_register_master:
	pm_runtime_disable(&pdev->dev);
	if (rs->dma_tx.ch)
		dma_release_channel(rs->dma_tx.ch);
	if (rs->dma_rx.ch)
		dma_release_channel(rs->dma_rx.ch);
err_get_fifo_len:
	clk_disable_unprepare(rs->spiclk);
err_spiclk_enable:
	clk_disable_unprepare(rs->apb_pclk);
err_ioremap_resource:
	spi_master_put(master);

	return ret;
}
static void pxa_uart_dma_init(struct uart_pxa_port *up)
{
	struct uart_pxa_dma *pxa_dma = &up->uart_dma;
	dma_cap_mask_t mask;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	if (NULL == pxa_dma->rxdma_chan) {
		pxa_dma->rxdma_chan = dma_request_slave_channel(up->port.dev,
								"rx");
		if (NULL == pxa_dma->rxdma_chan)
			goto out;
	}

	if (NULL == pxa_dma->txdma_chan) {
		pxa_dma->txdma_chan = dma_request_slave_channel(up->port.dev,
								"tx");
		if (NULL == pxa_dma->txdma_chan)
			goto err_txdma;
	}

	if (NULL == pxa_dma->txdma_addr) {
		pxa_dma->txdma_addr = dma_alloc_coherent(up->port.dev,
			DMA_BLOCK, &pxa_dma->txdma_addr_phys, GFP_KERNEL);
		if (!pxa_dma->txdma_addr)
			goto txdma_err_alloc;
	}

	if (NULL == pxa_dma->rxdma_addr) {
		pxa_dma->rxdma_addr = dma_alloc_coherent(up->port.dev,
			DMA_BLOCK, &pxa_dma->rxdma_addr_phys, GFP_KERNEL);
		if (!pxa_dma->rxdma_addr)
			goto rxdma_err_alloc;
	}

#ifdef CONFIG_PM
	pxa_dma->tx_buf_save = kmalloc(DMA_BLOCK, GFP_KERNEL);
	if (!pxa_dma->tx_buf_save)
		goto buf_err_alloc;
#endif

	pxa_dma->dma_status = 0;
	return;

#ifdef CONFIG_PM
buf_err_alloc:
	dma_free_coherent(up->port.dev, DMA_BLOCK, pxa_dma->rxdma_addr,
			  pxa_dma->rxdma_addr_phys);
	pxa_dma->rxdma_addr = NULL;
#endif
rxdma_err_alloc:
	dma_free_coherent(up->port.dev, DMA_BLOCK, pxa_dma->txdma_addr,
			  pxa_dma->txdma_addr_phys);
	pxa_dma->txdma_addr = NULL;
txdma_err_alloc:
	dma_release_channel(pxa_dma->txdma_chan);
	pxa_dma->txdma_chan = NULL;
err_txdma:
	dma_release_channel(pxa_dma->rxdma_chan);
	pxa_dma->rxdma_chan = NULL;
out:
	return;
}
Beispiel #11
0
static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
			bool dma_to_memory)
{
	struct dma_chan *dma_chan;
	unsigned char *dma_buf;
	dma_addr_t dma_phys;
	int ret;
	struct dma_slave_config dma_sconfig;
	dma_cap_mask_t mask;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);
	dma_chan = dma_request_channel(mask, NULL, NULL);
	if (!dma_chan) {
		dev_err(tup->uport.dev,
			"Dma channel is not available, will try later\n");
		return -EPROBE_DEFER;
	}

	if (dma_to_memory) {
		dma_buf = dma_alloc_coherent(tup->uport.dev,
				TEGRA_UART_RX_DMA_BUFFER_SIZE,
				 &dma_phys, GFP_KERNEL);
		if (!dma_buf) {
			dev_err(tup->uport.dev,
				"Not able to allocate the dma buffer\n");
			dma_release_channel(dma_chan);
			return -ENOMEM;
		}
	} else {
		dma_phys = dma_map_single(tup->uport.dev,
			tup->uport.state->xmit.buf, UART_XMIT_SIZE,
			DMA_TO_DEVICE);
		dma_buf = tup->uport.state->xmit.buf;
	}

	dma_sconfig.slave_id = tup->dma_req_sel;
	if (dma_to_memory) {
		dma_sconfig.src_addr = tup->uport.mapbase;
		dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
		dma_sconfig.src_maxburst = 4;
	} else {
		dma_sconfig.dst_addr = tup->uport.mapbase;
		dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
		dma_sconfig.dst_maxburst = 16;
	}

	ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
	if (ret < 0) {
		dev_err(tup->uport.dev,
			"Dma slave config failed, err = %d\n", ret);
		goto scrub;
	}

	if (dma_to_memory) {
		tup->rx_dma_chan = dma_chan;
		tup->rx_dma_buf_virt = dma_buf;
		tup->rx_dma_buf_phys = dma_phys;
	} else {
		tup->tx_dma_chan = dma_chan;
		tup->tx_dma_buf_virt = dma_buf;
		tup->tx_dma_buf_phys = dma_phys;
	}
	return 0;

scrub:
	dma_release_channel(dma_chan);
	return ret;
}
Beispiel #12
0
int serial8250_request_dma(struct uart_8250_port *p)
{
    struct uart_8250_dma	*dma = p->dma;
    dma_cap_mask_t		mask;

    /* Default slave configuration parameters */
    dma->rxconf.direction		= DMA_DEV_TO_MEM;
    dma->rxconf.src_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
    dma->rxconf.src_addr		= p->port.mapbase + UART_RX;

    dma->txconf.direction		= DMA_MEM_TO_DEV;
    dma->txconf.dst_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
    dma->txconf.dst_addr		= p->port.mapbase + UART_TX;

    dma_cap_zero(mask);
    dma_cap_set(DMA_SLAVE, mask);

    /* Get a channel for RX */
    dma->rxchan = dma_request_slave_channel_compat(mask,
                  dma->fn, dma->rx_param,
                  p->port.dev, "rx");
    if (!dma->rxchan)
        return -ENODEV;

    dmaengine_slave_config(dma->rxchan, &dma->rxconf);

    /* Get a channel for TX */
    dma->txchan = dma_request_slave_channel_compat(mask,
                  dma->fn, dma->tx_param,
                  p->port.dev, "tx");
    if (!dma->txchan) {
        dma_release_channel(dma->rxchan);
        return -ENODEV;
    }

    dmaengine_slave_config(dma->txchan, &dma->txconf);

    /* RX buffer */
    if (!dma->rx_size)
        dma->rx_size = PAGE_SIZE;

    dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
                                     &dma->rx_addr, GFP_KERNEL);
    if (!dma->rx_buf)
        goto err;

    /* TX buffer */
    dma->tx_addr = dma_map_single(dma->txchan->device->dev,
                                  p->port.state->xmit.buf,
                                  UART_XMIT_SIZE,
                                  DMA_TO_DEVICE);
    if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
        dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
                          dma->rx_buf, dma->rx_addr);
        goto err;
    }

    dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");

    return 0;
err:
    dma_release_channel(dma->rxchan);
    dma_release_channel(dma->txchan);

    return -ENOMEM;
}
void qce_dma_release(struct qce_dma_data *dma)
{
	dma_release_channel(dma->txchan);
	dma_release_channel(dma->rxchan);
	kfree(dma->result_buf);
}
Beispiel #14
0
static int sirfsoc_uart_probe(struct platform_device *pdev)
{
	struct device_node *np = pdev->dev.of_node;
	struct sirfsoc_uart_port *sirfport;
	struct uart_port *port;
	struct resource *res;
	int ret;
	struct dma_slave_config slv_cfg = {
		.src_maxburst = 1,
	};
	struct dma_slave_config tx_slv_cfg = {
		.dst_maxburst = 2,
	};
	const struct of_device_id *match;

	match = of_match_node(sirfsoc_uart_ids, np);
	sirfport = devm_kzalloc(&pdev->dev, sizeof(*sirfport), GFP_KERNEL);
	if (!sirfport) {
		ret = -ENOMEM;
		goto err;
	}
	sirfport->port.line = of_alias_get_id(np, "serial");
	sirf_ports[sirfport->port.line] = sirfport;
	sirfport->port.iotype = UPIO_MEM;
	sirfport->port.flags = UPF_BOOT_AUTOCONF;
	port = &sirfport->port;
	port->dev = &pdev->dev;
	port->private_data = sirfport;
	sirfport->uart_reg = (struct sirfsoc_uart_register *)match->data;

	sirfport->hw_flow_ctrl =
		of_property_read_bool(np, "uart-has-rtscts") ||
		of_property_read_bool(np, "sirf,uart-has-rtscts") /* deprecated */;
	if (of_device_is_compatible(np, "sirf,prima2-uart") ||
		of_device_is_compatible(np, "sirf,atlas7-uart"))
		sirfport->uart_reg->uart_type = SIRF_REAL_UART;
	if (of_device_is_compatible(np, "sirf,prima2-usp-uart") ||
	    of_device_is_compatible(np, "sirf,atlas7-usp-uart")) {
		sirfport->uart_reg->uart_type =	SIRF_USP_UART;
		if (!sirfport->hw_flow_ctrl)
			goto usp_no_flow_control;
		if (of_find_property(np, "cts-gpios", NULL))
			sirfport->cts_gpio =
				of_get_named_gpio(np, "cts-gpios", 0);
		else
			sirfport->cts_gpio = -1;
		if (of_find_property(np, "rts-gpios", NULL))
			sirfport->rts_gpio =
				of_get_named_gpio(np, "rts-gpios", 0);
		else
			sirfport->rts_gpio = -1;

		if ((!gpio_is_valid(sirfport->cts_gpio) ||
			 !gpio_is_valid(sirfport->rts_gpio))) {
			ret = -EINVAL;
			dev_err(&pdev->dev,
				"Usp flow control must have cts and rts gpio");
			goto err;
		}
		ret = devm_gpio_request(&pdev->dev, sirfport->cts_gpio,
				"usp-cts-gpio");
		if (ret) {
			dev_err(&pdev->dev, "Unable request cts gpio");
			goto err;
		}
		gpio_direction_input(sirfport->cts_gpio);
		ret = devm_gpio_request(&pdev->dev, sirfport->rts_gpio,
				"usp-rts-gpio");
		if (ret) {
			dev_err(&pdev->dev, "Unable request rts gpio");
			goto err;
		}
		gpio_direction_output(sirfport->rts_gpio, 1);
	}
usp_no_flow_control:
	if (of_device_is_compatible(np, "sirf,atlas7-uart") ||
	    of_device_is_compatible(np, "sirf,atlas7-usp-uart"))
		sirfport->is_atlas7 = true;

	if (of_property_read_u32(np, "fifosize", &port->fifosize)) {
		dev_err(&pdev->dev,
			"Unable to find fifosize in uart node.\n");
		ret = -EFAULT;
		goto err;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL) {
		dev_err(&pdev->dev, "Insufficient resources.\n");
		ret = -EFAULT;
		goto err;
	}
	port->mapbase = res->start;
	port->membase = devm_ioremap(&pdev->dev,
			res->start, resource_size(res));
	if (!port->membase) {
		dev_err(&pdev->dev, "Cannot remap resource.\n");
		ret = -ENOMEM;
		goto err;
	}
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (res == NULL) {
		dev_err(&pdev->dev, "Insufficient resources.\n");
		ret = -EFAULT;
		goto err;
	}
	port->irq = res->start;

	sirfport->clk = devm_clk_get(&pdev->dev, NULL);
	if (IS_ERR(sirfport->clk)) {
		ret = PTR_ERR(sirfport->clk);
		goto err;
	}
	port->uartclk = clk_get_rate(sirfport->clk);

	port->ops = &sirfsoc_uart_ops;
	spin_lock_init(&port->lock);

	platform_set_drvdata(pdev, sirfport);
	ret = uart_add_one_port(&sirfsoc_uart_drv, port);
	if (ret != 0) {
		dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id);
		goto err;
	}

	sirfport->rx_dma_chan = dma_request_slave_channel(port->dev, "rx");
	sirfport->rx_dma_items.xmit.buf =
		dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
		&sirfport->rx_dma_items.dma_addr, GFP_KERNEL);
	if (!sirfport->rx_dma_items.xmit.buf) {
		dev_err(port->dev, "Uart alloc bufa failed\n");
		ret = -ENOMEM;
		goto alloc_coherent_err;
	}
	sirfport->rx_dma_items.xmit.head =
		sirfport->rx_dma_items.xmit.tail = 0;
	if (sirfport->rx_dma_chan)
		dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg);
	sirfport->tx_dma_chan = dma_request_slave_channel(port->dev, "tx");
	if (sirfport->tx_dma_chan)
		dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg);
	if (sirfport->rx_dma_chan) {
		hrtimer_init(&sirfport->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
		sirfport->hrt.function = sirfsoc_uart_rx_dma_hrtimer_callback;
		sirfport->is_hrt_enabled = false;
	}

	return 0;
alloc_coherent_err:
	dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
			sirfport->rx_dma_items.xmit.buf,
			sirfport->rx_dma_items.dma_addr);
	dma_release_channel(sirfport->rx_dma_chan);
err:
	return ret;
}

static int sirfsoc_uart_remove(struct platform_device *pdev)
{
	struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
	struct uart_port *port = &sirfport->port;
	uart_remove_one_port(&sirfsoc_uart_drv, port);
	if (sirfport->rx_dma_chan) {
		dmaengine_terminate_all(sirfport->rx_dma_chan);
		dma_release_channel(sirfport->rx_dma_chan);
		dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
				sirfport->rx_dma_items.xmit.buf,
				sirfport->rx_dma_items.dma_addr);
	}
	if (sirfport->tx_dma_chan) {
		dmaengine_terminate_all(sirfport->tx_dma_chan);
		dma_release_channel(sirfport->tx_dma_chan);
	}
	return 0;
}

#ifdef CONFIG_PM_SLEEP
static int
sirfsoc_uart_suspend(struct device *pdev)
{
	struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
	struct uart_port *port = &sirfport->port;
	uart_suspend_port(&sirfsoc_uart_drv, port);
	return 0;
}

static int sirfsoc_uart_resume(struct device *pdev)
{
	struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
	struct uart_port *port = &sirfport->port;
	uart_resume_port(&sirfsoc_uart_drv, port);
	return 0;
}
#endif

static const struct dev_pm_ops sirfsoc_uart_pm_ops = {
	SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_uart_suspend, sirfsoc_uart_resume)
};

static struct platform_driver sirfsoc_uart_driver = {
	.probe		= sirfsoc_uart_probe,
	.remove		= sirfsoc_uart_remove,
	.driver		= {
		.name	= SIRFUART_PORT_NAME,
		.of_match_table = sirfsoc_uart_ids,
		.pm	= &sirfsoc_uart_pm_ops,
	},
};

static int __init sirfsoc_uart_init(void)
{
	int ret = 0;

	ret = uart_register_driver(&sirfsoc_uart_drv);
	if (ret)
		goto out;

	ret = platform_driver_register(&sirfsoc_uart_driver);
	if (ret)
		uart_unregister_driver(&sirfsoc_uart_drv);
out:
	return ret;
}
module_init(sirfsoc_uart_init);

static void __exit sirfsoc_uart_exit(void)
{
	platform_driver_unregister(&sirfsoc_uart_driver);
	uart_unregister_driver(&sirfsoc_uart_drv);
}
Beispiel #15
0
static int mxs_mmc_probe(struct platform_device *pdev)
{
	const struct of_device_id *of_id =
			of_match_device(mxs_mmc_dt_ids, &pdev->dev);
	struct device_node *np = pdev->dev.of_node;
	struct mxs_mmc_host *host;
	struct mmc_host *mmc;
	struct resource *iores;
	int ret = 0, irq_err;
	struct regulator *reg_vmmc;
	struct mxs_ssp *ssp;

	irq_err = platform_get_irq(pdev, 0);
	if (irq_err < 0)
		return irq_err;

	mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
	if (!mmc)
		return -ENOMEM;

	host = mmc_priv(mmc);
	ssp = &host->ssp;
	ssp->dev = &pdev->dev;
	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	ssp->base = devm_ioremap_resource(&pdev->dev, iores);
	if (IS_ERR(ssp->base)) {
		ret = PTR_ERR(ssp->base);
		goto out_mmc_free;
	}

	ssp->devid = (enum mxs_ssp_id) of_id->data;

	host->mmc = mmc;
	host->sdio_irq_en = 0;

	reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc");
	if (!IS_ERR(reg_vmmc)) {
		ret = regulator_enable(reg_vmmc);
		if (ret) {
			dev_err(&pdev->dev,
				"Failed to enable vmmc regulator: %d\n", ret);
			goto out_mmc_free;
		}
	}

	ssp->clk = devm_clk_get(&pdev->dev, NULL);
	if (IS_ERR(ssp->clk)) {
		ret = PTR_ERR(ssp->clk);
		goto out_mmc_free;
	}
	ret = clk_prepare_enable(ssp->clk);
	if (ret)
		goto out_mmc_free;

	ret = mxs_mmc_reset(host);
	if (ret) {
		dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret);
		goto out_clk_disable;
	}

	ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
	if (!ssp->dmach) {
		dev_err(mmc_dev(host->mmc),
			"%s: failed to request dma\n", __func__);
		ret = -ENODEV;
		goto out_clk_disable;
	}

	/* set mmc core parameters */
	mmc->ops = &mxs_mmc_ops;
	mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
		    MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;

	host->broken_cd = of_property_read_bool(np, "broken-cd");

	mmc->f_min = 400000;
	mmc->f_max = 288000000;

	ret = mmc_of_parse(mmc);
	if (ret)
		goto out_clk_disable;

	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;

	mmc->max_segs = 52;
	mmc->max_blk_size = 1 << 0xf;
	mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff;
	mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff;
	mmc->max_seg_size = dma_get_max_seg_size(ssp->dmach->device->dev);

	platform_set_drvdata(pdev, mmc);

	ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
			       dev_name(&pdev->dev), host);
	if (ret)
		goto out_free_dma;

	spin_lock_init(&host->lock);

	ret = mmc_add_host(mmc);
	if (ret)
		goto out_free_dma;

	dev_info(mmc_dev(host->mmc), "initialized\n");

	return 0;

out_free_dma:
	if (ssp->dmach)
		dma_release_channel(ssp->dmach);
out_clk_disable:
	clk_disable_unprepare(ssp->clk);
out_mmc_free:
	mmc_free_host(mmc);
	return ret;
}
Beispiel #16
0
/*
 * Probe for NAND controller
 */
static int lpc32xx_nand_probe(struct platform_device *pdev)
{
	struct lpc32xx_nand_host *host;
	struct mtd_info *mtd;
	struct nand_chip *chip;
	struct resource *rc;
	struct mtd_part_parser_data ppdata = {};
	int res;

	rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (rc == NULL) {
		dev_err(&pdev->dev, "No memory resource found for device\n");
		return -EBUSY;
	}

	/* Allocate memory for the device structure (and zero it) */
	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
	if (!host)
		return -ENOMEM;
	host->io_base_dma = rc->start;

	host->io_base = devm_ioremap_resource(&pdev->dev, rc);
	if (IS_ERR(host->io_base))
		return PTR_ERR(host->io_base);

	if (pdev->dev.of_node)
		host->ncfg = lpc32xx_parse_dt(&pdev->dev);
	if (!host->ncfg) {
		dev_err(&pdev->dev,
			"Missing or bad NAND config from device tree\n");
		return -ENOENT;
	}
	if (host->ncfg->wp_gpio == -EPROBE_DEFER)
		return -EPROBE_DEFER;
	if (gpio_is_valid(host->ncfg->wp_gpio) && devm_gpio_request(&pdev->dev,
			host->ncfg->wp_gpio, "NAND WP")) {
		dev_err(&pdev->dev, "GPIO not available\n");
		return -EBUSY;
	}
	lpc32xx_wp_disable(host);

	host->pdata = dev_get_platdata(&pdev->dev);

	mtd = &host->mtd;
	chip = &host->nand_chip;
	chip->priv = host;
	mtd->priv = chip;
	mtd->owner = THIS_MODULE;
	mtd->dev.parent = &pdev->dev;

	/* Get NAND clock */
	host->clk = devm_clk_get(&pdev->dev, NULL);
	if (IS_ERR(host->clk)) {
		dev_err(&pdev->dev, "Clock failure\n");
		res = -ENOENT;
		goto err_exit1;
	}
	clk_enable(host->clk);

	/* Set NAND IO addresses and command/ready functions */
	chip->IO_ADDR_R = SLC_DATA(host->io_base);
	chip->IO_ADDR_W = SLC_DATA(host->io_base);
	chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
	chip->dev_ready = lpc32xx_nand_device_ready;
	chip->chip_delay = 20; /* 20us command delay time */

	/* Init NAND controller */
	lpc32xx_nand_setup(host);

	platform_set_drvdata(pdev, host);

	/* NAND callbacks for LPC32xx SLC hardware */
	chip->ecc.mode = NAND_ECC_HW_SYNDROME;
	chip->read_byte = lpc32xx_nand_read_byte;
	chip->read_buf = lpc32xx_nand_read_buf;
	chip->write_buf = lpc32xx_nand_write_buf;
	chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
	chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
	chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
	chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
	chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
	chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
	chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
	chip->ecc.correct = nand_correct_data;
	chip->ecc.strength = 1;
	chip->ecc.hwctl = lpc32xx_nand_ecc_enable;

	/* bitflip_threshold's default is defined as ecc_strength anyway.
	 * Unfortunately, it is set only later at add_mtd_device(). Meanwhile
	 * being 0, it causes bad block table scanning errors in
	 * nand_scan_tail(), so preparing it here already. */
	mtd->bitflip_threshold = chip->ecc.strength;

	/*
	 * Allocate a large enough buffer for a single huge page plus
	 * extra space for the spare area and ECC storage area
	 */
	host->dma_buf_len = LPC32XX_DMA_DATA_SIZE + LPC32XX_ECC_SAVE_SIZE;
	host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len,
				      GFP_KERNEL);
	if (host->data_buf == NULL) {
		res = -ENOMEM;
		goto err_exit2;
	}

	res = lpc32xx_nand_dma_setup(host);
	if (res) {
		res = -EIO;
		goto err_exit2;
	}

	/* Find NAND device */
	if (nand_scan_ident(mtd, 1, NULL)) {
		res = -ENXIO;
		goto err_exit3;
	}

	/* OOB and ECC CPU and DMA work areas */
	host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);

	/*
	 * Small page FLASH has a unique OOB layout, but large and huge
	 * page FLASH use the standard layout. Small page FLASH uses a
	 * custom BBT marker layout.
	 */
	if (mtd->writesize <= 512)
		chip->ecc.layout = &lpc32xx_nand_oob_16;

	/* These sizes remain the same regardless of page size */
	chip->ecc.size = 256;
	chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
	chip->ecc.prepad = chip->ecc.postpad = 0;

	/* Avoid extra scan if using BBT, setup BBT support */
	if (host->ncfg->use_bbt) {
		chip->bbt_options |= NAND_BBT_USE_FLASH;

		/*
		 * Use a custom BBT marker setup for small page FLASH that
		 * won't interfere with the ECC layout. Large and huge page
		 * FLASH use the standard layout.
		 */
		if (mtd->writesize <= 512) {
			chip->bbt_td = &bbt_smallpage_main_descr;
			chip->bbt_md = &bbt_smallpage_mirror_descr;
		}
	}

	/*
	 * Fills out all the uninitialized function pointers with the defaults
	 */
	if (nand_scan_tail(mtd)) {
		res = -ENXIO;
		goto err_exit3;
	}

	mtd->name = "nxp_lpc3220_slc";
	ppdata.of_node = pdev->dev.of_node;
	res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts,
					host->ncfg->num_parts);
	if (!res)
		return res;

	nand_release(mtd);

err_exit3:
	dma_release_channel(host->dma_chan);
err_exit2:
	clk_disable(host->clk);
err_exit1:
	lpc32xx_wp_enable(host);

	return res;
}
Beispiel #17
0
static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
{
	int ret = 0;
	struct dma_slave_config slave_config;
	struct at91_twi_dma *dma = &dev->dma;
	enum dma_slave_buswidth addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;

	/*
	 * The actual width of the access will be chosen in
	 * dmaengine_prep_slave_sg():
	 * for each buffer in the scatter-gather list, if its size is aligned
	 * to addr_width then addr_width accesses will be performed to transfer
	 * the buffer. On the other hand, if the buffer size is not aligned to
	 * addr_width then the buffer is transferred using single byte accesses.
	 * Please refer to the Atmel eXtended DMA controller driver.
	 * When FIFOs are used, the TXRDYM threshold can always be set to
	 * trigger the XDMAC when at least 4 data can be written into the TX
	 * FIFO, even if single byte accesses are performed.
	 * However the RXRDYM threshold must be set to fit the access width,
	 * deduced from buffer length, so the XDMAC is triggered properly to
	 * read data from the RX FIFO.
	 */
	if (dev->fifo_size)
		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

	memset(&slave_config, 0, sizeof(slave_config));
	slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
	slave_config.src_addr_width = addr_width;
	slave_config.src_maxburst = 1;
	slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR;
	slave_config.dst_addr_width = addr_width;
	slave_config.dst_maxburst = 1;
	slave_config.device_fc = false;

	dma->chan_tx = dma_request_slave_channel_reason(dev->dev, "tx");
	if (IS_ERR(dma->chan_tx)) {
		ret = PTR_ERR(dma->chan_tx);
		dma->chan_tx = NULL;
		goto error;
	}

	dma->chan_rx = dma_request_slave_channel_reason(dev->dev, "rx");
	if (IS_ERR(dma->chan_rx)) {
		ret = PTR_ERR(dma->chan_rx);
		dma->chan_rx = NULL;
		goto error;
	}

	slave_config.direction = DMA_MEM_TO_DEV;
	if (dmaengine_slave_config(dma->chan_tx, &slave_config)) {
		dev_err(dev->dev, "failed to configure tx channel\n");
		ret = -EINVAL;
		goto error;
	}

	slave_config.direction = DMA_DEV_TO_MEM;
	if (dmaengine_slave_config(dma->chan_rx, &slave_config)) {
		dev_err(dev->dev, "failed to configure rx channel\n");
		ret = -EINVAL;
		goto error;
	}

	sg_init_table(dma->sg, 2);
	dma->buf_mapped = false;
	dma->xfer_in_progress = false;
	dev->use_dma = true;

	dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
		 dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));

	return ret;

error:
	if (ret != -EPROBE_DEFER)
		dev_info(dev->dev, "can't get DMA channel, continue without DMA support\n");
	if (dma->chan_rx)
		dma_release_channel(dma->chan_rx);
	if (dma->chan_tx)
		dma_release_channel(dma->chan_tx);
	return ret;
}
Beispiel #18
0
static int stm32_spdifrx_probe(struct platform_device *pdev)
{
	struct stm32_spdifrx_data *spdifrx;
	struct reset_control *rst;
	const struct snd_dmaengine_pcm_config *pcm_config = NULL;
	int ret;

	spdifrx = devm_kzalloc(&pdev->dev, sizeof(*spdifrx), GFP_KERNEL);
	if (!spdifrx)
		return -ENOMEM;

	spdifrx->pdev = pdev;
	init_completion(&spdifrx->cs_completion);
	spin_lock_init(&spdifrx->lock);

	platform_set_drvdata(pdev, spdifrx);

	ret = stm_spdifrx_parse_of(pdev, spdifrx);
	if (ret)
		return ret;

	spdifrx->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "kclk",
						    spdifrx->base,
						    spdifrx->regmap_conf);
	if (IS_ERR(spdifrx->regmap)) {
		dev_err(&pdev->dev, "Regmap init failed\n");
		return PTR_ERR(spdifrx->regmap);
	}

	ret = devm_request_irq(&pdev->dev, spdifrx->irq, stm32_spdifrx_isr, 0,
			       dev_name(&pdev->dev), spdifrx);
	if (ret) {
		dev_err(&pdev->dev, "IRQ request returned %d\n", ret);
		return ret;
	}

	rst = devm_reset_control_get(&pdev->dev, NULL);
	if (!IS_ERR(rst)) {
		reset_control_assert(rst);
		udelay(2);
		reset_control_deassert(rst);
	}

	ret = devm_snd_soc_register_component(&pdev->dev,
					      &stm32_spdifrx_component,
					      stm32_spdifrx_dai,
					      ARRAY_SIZE(stm32_spdifrx_dai));
	if (ret)
		return ret;

	ret = stm32_spdifrx_dma_ctrl_register(&pdev->dev, spdifrx);
	if (ret)
		goto error;

	pcm_config = &stm32_spdifrx_pcm_config;
	ret = devm_snd_dmaengine_pcm_register(&pdev->dev, pcm_config, 0);
	if (ret) {
		dev_err(&pdev->dev, "PCM DMA register returned %d\n", ret);
		goto error;
	}

	return 0;

error:
	if (spdifrx->ctrl_chan)
		dma_release_channel(spdifrx->ctrl_chan);
	if (spdifrx->dmab)
		snd_dma_free_pages(spdifrx->dmab);

	return ret;
}
Beispiel #19
0
static int ntb_perf_thread(void *data)
{
	struct pthr_ctx *pctx = data;
	struct perf_ctx *perf = pctx->perf;
	struct pci_dev *pdev = perf->ntb->pdev;
	struct perf_mw *mw = &perf->mw;
	char __iomem *dst;
	u64 win_size, buf_size, total;
	void *src;
	int rc, node, i;
	struct dma_chan *dma_chan = NULL;

	pr_info("kthread %s starting...\n", current->comm);

	node = dev_to_node(&pdev->dev);

	if (use_dma && !pctx->dma_chan) {
		dma_cap_mask_t dma_mask;

		dma_cap_zero(dma_mask);
		dma_cap_set(DMA_MEMCPY, dma_mask);
		dma_chan = dma_request_channel(dma_mask, perf_dma_filter_fn,
					       (void *)(unsigned long)node);
		if (!dma_chan) {
			pr_warn("%s: cannot acquire DMA channel, quitting\n",
				current->comm);
			return -ENODEV;
		}
		pctx->dma_chan = dma_chan;
	}

	for (i = 0; i < MAX_SRCS; i++) {
		pctx->srcs[i] = kmalloc_node(MAX_TEST_SIZE, GFP_KERNEL, node);
		if (!pctx->srcs[i]) {
			rc = -ENOMEM;
			goto err;
		}
	}

	win_size = mw->phys_size;
	buf_size = 1ULL << seg_order;
	total = 1ULL << run_order;

	if (buf_size > MAX_TEST_SIZE)
		buf_size = MAX_TEST_SIZE;

	dst = (char __iomem *)mw->vbase;

	atomic_inc(&perf->tsync);
	while (atomic_read(&perf->tsync) != perf->perf_threads)
		schedule();

	src = pctx->srcs[pctx->src_idx];
	pctx->src_idx = (pctx->src_idx + 1) & (MAX_SRCS - 1);

	rc = perf_move_data(pctx, dst, src, buf_size, win_size, total);

	atomic_dec(&perf->tsync);

	if (rc < 0) {
		pr_err("%s: failed\n", current->comm);
		rc = -ENXIO;
		goto err;
	}

	for (i = 0; i < MAX_SRCS; i++) {
		kfree(pctx->srcs[i]);
		pctx->srcs[i] = NULL;
	}

	return 0;

err:
	for (i = 0; i < MAX_SRCS; i++) {
		kfree(pctx->srcs[i]);
		pctx->srcs[i] = NULL;
	}

	if (dma_chan) {
		dma_release_channel(dma_chan);
		pctx->dma_chan = NULL;
	}

	return rc;
}
Beispiel #20
0
/**
 * i2s_dma_start - prepare and reserve dma channels
 * @arg : intel_mid_i2s_hdl pointer to that should be driver data (context)
 *
 * "ssp open" context and dmac1 should already be filled in drv_data
 *
 * Output parameters
 *      int : should be zero, else it means error code
 */
static int i2s_dma_start(struct intel_mid_i2s_hdl *drv_data)
{
	struct intel_mid_dma_slave *rxs, *txs;
	struct pci_dev *l_pdev;
	struct intel_mid_i2s_settings *ssp_settings =
						&(drv_data->current_settings);
	dma_cap_mask_t mask;
	int retval = 0;
	int temp = 0;

	dev_dbg(&drv_data->pdev->dev, "DMAC1 start\n");
	drv_data->txchan = NULL;
	drv_data->rxchan = NULL;
	l_pdev = drv_data->pdev;

	if (ssp_settings->ssp_active_rx_slots_map) {
		/* 1. init rx channel */
		rxs = &drv_data->dmas_rx;
		rxs->dma_slave.direction = DMA_FROM_DEVICE;
		rxs->hs_mode = LNW_DMA_HW_HS;
		rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
		temp = i2s_compute_dma_width(ssp_settings->data_size,
					&rxs->dma_slave.src_addr_width);

		if (temp != 0) {
			dev_err(&(drv_data->pdev->dev),
				"RX DMA Channel Bad data_size = %d\n",
				ssp_settings->data_size);
			retval = -1;
			goto err_exit;

		}
		rxs->dma_slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

		temp = i2s_compute_dma_msize(
					ssp_settings->ssp_rx_fifo_threshold,
					&rxs->dma_slave.src_maxburst);
		if (temp != 0) {
			dev_err(&(drv_data->pdev->dev),
				"RX DMA Channel Bad RX FIFO Threshold src= %d\n",
				ssp_settings->ssp_rx_fifo_threshold);
			retval = -2;
			goto err_exit;

		}

		temp = i2s_compute_dma_msize(
					ssp_settings->ssp_rx_fifo_threshold,
					&rxs->dma_slave.dst_maxburst);
		if (temp != 0) {
			dev_err(&(drv_data->pdev->dev),
				"RX DMA Channel Bad RX FIFO Threshold dst= %d\n",
				ssp_settings->ssp_rx_fifo_threshold);
			retval = -3;
			goto err_exit;

		}

		rxs->device_instance = drv_data->device_instance;
		dma_cap_zero(mask);
		dma_cap_set(DMA_MEMCPY, mask);
		dma_cap_set(DMA_SLAVE, mask);
		drv_data->rxchan = dma_request_channel(mask,
							chan_filter, drv_data);
		if (!drv_data->rxchan) {
			dev_err(&(drv_data->pdev->dev),
				"Could not get Rx channel\n");
			retval = -4;
			goto err_exit;
		}

		temp = drv_data->rxchan->device->device_control(
					drv_data->rxchan, DMA_SLAVE_CONFIG,
					(unsigned long) &rxs->dma_slave);
		if (temp) {
			dev_err(&(drv_data->pdev->dev),
				"Rx slave control failed\n");
			retval = -5;
			goto err_exit;
		}

	}

	if (ssp_settings->ssp_active_tx_slots_map) {
		/* 2. init tx channel */
		txs = &drv_data->dmas_tx;
		txs->dma_slave.direction = DMA_TO_DEVICE;
		txs->hs_mode = LNW_DMA_HW_HS;
		txs->cfg_mode = LNW_DMA_MEM_TO_PER;

		txs->dma_slave.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

		temp = i2s_compute_dma_width(ssp_settings->data_size,
					&txs->dma_slave.dst_addr_width);
		if (temp != 0) {
			dev_err(&(drv_data->pdev->dev),
				"TX DMA Channel Bad data_size = %d\n",
				ssp_settings->data_size);
			retval = -6;
			goto err_exit;

		}

		temp = i2s_compute_dma_msize(
				ssp_settings->ssp_tx_fifo_threshold + 1,
				&txs->dma_slave.src_maxburst);
		if (temp != 0) {
			dev_err(&(drv_data->pdev->dev),
				"TX DMA Channel Bad TX FIFO Threshold src= %d\n",
				ssp_settings->ssp_tx_fifo_threshold);
			retval = -7;
			goto err_exit;

		}

		temp = i2s_compute_dma_msize(
				ssp_settings->ssp_tx_fifo_threshold + 1,
						&txs->dma_slave.dst_maxburst);
		if (temp != 0) {
			dev_err(&(drv_data->pdev->dev),
				"TX DMA Channel Bad TX FIFO Threshold dst= %d\n",
				ssp_settings->ssp_tx_fifo_threshold);
			retval = -8;
			goto err_exit;

		}

		txs->device_instance = drv_data->device_instance;
		dma_cap_set(DMA_SLAVE, mask);
		dma_cap_set(DMA_MEMCPY, mask);
		drv_data->txchan = dma_request_channel(mask,
							chan_filter, drv_data);

		if (!drv_data->txchan) {
			dev_err(&(drv_data->pdev->dev),
				"Could not get Tx channel\n");
			retval = -10;
			goto err_exit;
		}

		temp = drv_data->txchan->device->device_control(
					drv_data->txchan, DMA_SLAVE_CONFIG,
					(unsigned long) &txs->dma_slave);
		if (temp) {
			dev_err(&(drv_data->pdev->dev),
				"Tx slave control failed\n");
			retval = -9;
			goto err_exit;
		}
	}

	return retval;

err_exit:
	if (drv_data->txchan)
		dma_release_channel(drv_data->txchan);
	if (drv_data->rxchan)
		dma_release_channel(drv_data->rxchan);
	drv_data->rxchan = NULL;
	drv_data->txchan = NULL;
	return retval;
}