예제 #1
0
static int ux500_dma_controller_start(struct ux500_dma_controller *controller)
{
    struct ux500_dma_channel *ux500_channel = NULL;
    struct musb *musb = controller->private_data;
    struct device *dev = musb->controller;
    struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
    struct ux500_musb_board_data *data;
    struct dma_channel *dma_channel = NULL;
    char **chan_names;
    u32 ch_num;
    u8 dir;
    u8 is_tx = 0;

    void **param_array;
    struct ux500_dma_channel *channel_array;
    dma_cap_mask_t mask;

    if (!plat) {
        dev_err(musb->controller, "No platform data\n");
        return -EINVAL;
    }

    data = plat->board_data;

    dma_cap_zero(mask);
    dma_cap_set(DMA_SLAVE, mask);

    /* Prepare the loop for RX channels */
    channel_array = controller->rx_channel;
    param_array = data ? data->dma_rx_param_array : NULL;
    chan_names = (char **)iep_chan_names;

    for (dir = 0; dir < 2; dir++) {
        for (ch_num = 0;
                ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS;
                ch_num++) {
            ux500_channel = &channel_array[ch_num];
            ux500_channel->controller = controller;
            ux500_channel->ch_num = ch_num;
            ux500_channel->is_tx = is_tx;

            dma_channel = &(ux500_channel->channel);
            dma_channel->private_data = ux500_channel;
            dma_channel->status = MUSB_DMA_STATUS_FREE;
            dma_channel->max_len = SZ_16M;

            ux500_channel->dma_chan =
                dma_request_slave_channel(dev, chan_names[ch_num]);

            if (!ux500_channel->dma_chan)
                ux500_channel->dma_chan =
                    dma_request_channel(mask,
                                        data ?
                                        data->dma_filter :
                                        NULL,
                                        param_array[ch_num]);

            if (!ux500_channel->dma_chan) {
                ERR("Dma pipe allocation error dir=%d ch=%d\n",
                    dir, ch_num);

                /* Release already allocated channels */
                ux500_dma_controller_stop(controller);

                return -EBUSY;
            }

        }

        /* Prepare the loop for TX channels */
        channel_array = controller->tx_channel;
        param_array = data ? data->dma_tx_param_array : NULL;
        chan_names = (char **)oep_chan_names;
        is_tx = 1;
    }

    return 0;
}
예제 #2
0
파일: mmp_tdma.c 프로젝트: AK101111/linux
static int mmp_tdma_probe(struct platform_device *pdev)
{
	enum mmp_tdma_type type;
	const struct of_device_id *of_id;
	struct mmp_tdma_device *tdev;
	struct resource *iores;
	int i, ret;
	int irq = 0, irq_num = 0;
	int chan_num = TDMA_CHANNEL_NUM;
	struct gen_pool *pool = NULL;

	of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
	if (of_id)
		type = (enum mmp_tdma_type) of_id->data;
	else
		type = platform_get_device_id(pdev)->driver_data;

	/* always have couple channels */
	tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
	if (!tdev)
		return -ENOMEM;

	tdev->dev = &pdev->dev;

	for (i = 0; i < chan_num; i++) {
		if (platform_get_irq(pdev, i) > 0)
			irq_num++;
	}

	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	tdev->base = devm_ioremap_resource(&pdev->dev, iores);
	if (IS_ERR(tdev->base))
		return PTR_ERR(tdev->base);

	INIT_LIST_HEAD(&tdev->device.channels);

	if (pdev->dev.of_node)
		pool = of_gen_pool_get(pdev->dev.of_node, "asram", 0);
	else
		pool = sram_get_gpool("asram");
	if (!pool) {
		dev_err(&pdev->dev, "asram pool not available\n");
		return -ENOMEM;
	}

	if (irq_num != chan_num) {
		irq = platform_get_irq(pdev, 0);
		ret = devm_request_irq(&pdev->dev, irq,
			mmp_tdma_int_handler, 0, "tdma", tdev);
		if (ret)
			return ret;
	}

	/* initialize channel parameters */
	for (i = 0; i < chan_num; i++) {
		irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i);
		ret = mmp_tdma_chan_init(tdev, i, irq, type, pool);
		if (ret)
			return ret;
	}

	dma_cap_set(DMA_SLAVE, tdev->device.cap_mask);
	dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask);
	tdev->device.dev = &pdev->dev;
	tdev->device.device_alloc_chan_resources =
					mmp_tdma_alloc_chan_resources;
	tdev->device.device_free_chan_resources =
					mmp_tdma_free_chan_resources;
	tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic;
	tdev->device.device_tx_status = mmp_tdma_tx_status;
	tdev->device.device_issue_pending = mmp_tdma_issue_pending;
	tdev->device.device_config = mmp_tdma_config;
	tdev->device.device_pause = mmp_tdma_pause_chan;
	tdev->device.device_resume = mmp_tdma_resume_chan;
	tdev->device.device_terminate_all = mmp_tdma_terminate_all;
	tdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;

	dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
	platform_set_drvdata(pdev, tdev);

	ret = dma_async_device_register(&tdev->device);
	if (ret) {
		dev_err(tdev->device.dev, "unable to register\n");
		return ret;
	}

	if (pdev->dev.of_node) {
		ret = of_dma_controller_register(pdev->dev.of_node,
							mmp_tdma_xlate, tdev);
		if (ret) {
			dev_err(tdev->device.dev,
				"failed to register controller\n");
			dma_async_device_unregister(&tdev->device);
		}
	}

	dev_info(tdev->device.dev, "initialized\n");
	return 0;
}
예제 #3
0
static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
					struct of_dma *ofdma)
{
	struct dw_dma *dw = ofdma->of_dma_data;
	struct dw_dma_slave slave = {
		.dma_dev = dw->dma.dev,
	};
	dma_cap_mask_t cap;

	if (dma_spec->args_count != 3)
		return NULL;

	slave.src_id = dma_spec->args[0];
	slave.dst_id = dma_spec->args[0];
	slave.m_master = dma_spec->args[1];
	slave.p_master = dma_spec->args[2];

	if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
		    slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
		    slave.m_master >= dw->pdata->nr_masters ||
		    slave.p_master >= dw->pdata->nr_masters))
		return NULL;

	dma_cap_zero(cap);
	dma_cap_set(DMA_SLAVE, cap);

	/* TODO: there should be a simpler way to do this */
	return dma_request_channel(cap, dw_dma_filter, &slave);
}

#ifdef CONFIG_ACPI
static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
{
	struct acpi_dma_spec *dma_spec = param;
	struct dw_dma_slave slave = {
		.dma_dev = dma_spec->dev,
		.src_id = dma_spec->slave_id,
		.dst_id = dma_spec->slave_id,
		.m_master = 0,
		.p_master = 1,
	};

	return dw_dma_filter(chan, &slave);
}

static void dw_dma_acpi_controller_register(struct dw_dma *dw)
{
	struct device *dev = dw->dma.dev;
	struct acpi_dma_filter_info *info;
	int ret;

	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
	if (!info)
		return;

	dma_cap_zero(info->dma_cap);
	dma_cap_set(DMA_SLAVE, info->dma_cap);
	info->filter_fn = dw_dma_acpi_filter;

	ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
						info);
	if (ret)
		dev_err(dev, "could not register acpi_dma_controller\n");
}
#else /* !CONFIG_ACPI */
static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
#endif /* !CONFIG_ACPI */

#ifdef CONFIG_OF
static struct dw_dma_platform_data *
dw_dma_parse_dt(struct platform_device *pdev)
{
	struct device_node *np = pdev->dev.of_node;
	struct dw_dma_platform_data *pdata;
	u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
	u32 nr_masters;
	u32 nr_channels;

	if (!np) {
		dev_err(&pdev->dev, "Missing DT data\n");
		return NULL;
	}

	if (of_property_read_u32(np, "dma-masters", &nr_masters))
		return NULL;
	if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
		return NULL;

	if (of_property_read_u32(np, "dma-channels", &nr_channels))
		return NULL;
	if (nr_channels > DW_DMA_MAX_NR_CHANNELS)
		return NULL;

	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata)
		return NULL;

	pdata->nr_masters = nr_masters;
	pdata->nr_channels = nr_channels;

	if (of_property_read_bool(np, "is_private"))
		pdata->is_private = true;

	/*
	 * All known devices, which use DT for configuration, support
	 * memory-to-memory transfers. So enable it by default.
	 */
	pdata->is_memcpy = true;

	if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
		pdata->chan_allocation_order = (unsigned char)tmp;

	if (!of_property_read_u32(np, "chan_priority", &tmp))
		pdata->chan_priority = tmp;

	if (!of_property_read_u32(np, "block_size", &tmp))
		pdata->block_size = tmp;

	if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
		for (tmp = 0; tmp < nr_masters; tmp++)
			pdata->data_width[tmp] = arr[tmp];
	} else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
		for (tmp = 0; tmp < nr_masters; tmp++)
			pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
	}

	if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
		for (tmp = 0; tmp < nr_channels; tmp++)
			pdata->multi_block[tmp] = mb[tmp];
	} else {
		for (tmp = 0; tmp < nr_channels; tmp++)
			pdata->multi_block[tmp] = 1;
	}

	return pdata;
}
#else
static inline struct dw_dma_platform_data *
dw_dma_parse_dt(struct platform_device *pdev)
{
	return NULL;
}
#endif

static int dw_probe(struct platform_device *pdev)
{
	struct dw_dma_chip *chip;
	struct device *dev = &pdev->dev;
	struct resource *mem;
	const struct dw_dma_platform_data *pdata;
	int err;

	chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
	if (!chip)
		return -ENOMEM;

	chip->irq = platform_get_irq(pdev, 0);
	if (chip->irq < 0)
		return chip->irq;

	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	chip->regs = devm_ioremap_resource(dev, mem);
	if (IS_ERR(chip->regs))
		return PTR_ERR(chip->regs);

	err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
	if (err)
		return err;

	pdata = dev_get_platdata(dev);
	if (!pdata)
		pdata = dw_dma_parse_dt(pdev);

	chip->dev = dev;
	chip->pdata = pdata;

	chip->clk = devm_clk_get(chip->dev, "hclk");
	if (IS_ERR(chip->clk))
		return PTR_ERR(chip->clk);
	err = clk_prepare_enable(chip->clk);
	if (err)
		return err;

	pm_runtime_enable(&pdev->dev);

	err = dw_dma_probe(chip);
	if (err)
		goto err_dw_dma_probe;

	platform_set_drvdata(pdev, chip);

	if (pdev->dev.of_node) {
		err = of_dma_controller_register(pdev->dev.of_node,
						 dw_dma_of_xlate, chip->dw);
		if (err)
			dev_err(&pdev->dev,
				"could not register of_dma_controller\n");
	}

	if (ACPI_HANDLE(&pdev->dev))
		dw_dma_acpi_controller_register(chip->dw);

	return 0;

err_dw_dma_probe:
	pm_runtime_disable(&pdev->dev);
	clk_disable_unprepare(chip->clk);
	return err;
}

static int dw_remove(struct platform_device *pdev)
{
	struct dw_dma_chip *chip = platform_get_drvdata(pdev);

	if (pdev->dev.of_node)
		of_dma_controller_free(pdev->dev.of_node);

	dw_dma_remove(chip);
	pm_runtime_disable(&pdev->dev);
	clk_disable_unprepare(chip->clk);

	return 0;
}

static void dw_shutdown(struct platform_device *pdev)
{
	struct dw_dma_chip *chip = platform_get_drvdata(pdev);

	/*
	 * We have to call dw_dma_disable() to stop any ongoing transfer. On
	 * some platforms we can't do that since DMA device is powered off.
	 * Moreover we have no possibility to check if the platform is affected
	 * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put()
	 * unconditionally. On the other hand we can't use
	 * pm_runtime_suspended() because runtime PM framework is not fully
	 * used by the driver.
	 */
	pm_runtime_get_sync(chip->dev);
	dw_dma_disable(chip);
	pm_runtime_put_sync_suspend(chip->dev);

	clk_disable_unprepare(chip->clk);
}

#ifdef CONFIG_OF
static const struct of_device_id dw_dma_of_id_table[] = {
	{ .compatible = "snps,dma-spear1340" },
	{}
};
예제 #4
0
static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
				 struct sh_mmcif_plat_data *pdata)
{
	struct resource *res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
	struct dma_slave_config cfg;
	dma_cap_mask_t mask;
	int ret;

	host->dma_active = false;

	if (pdata) {
		if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
			return;
	} else if (!host->pd->dev.of_node) {
		return;
	}

	/* We can only either use DMA for both Tx and Rx or not use it at all */
	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	host->chan_tx = dma_request_slave_channel_compat(mask, shdma_chan_filter,
				pdata ? (void *)pdata->slave_id_tx : NULL,
				&host->pd->dev, "tx");
	dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
		host->chan_tx);

	if (!host->chan_tx)
		return;

	/* In the OF case the driver will get the slave ID from the DT */
	if (pdata)
		cfg.slave_id = pdata->slave_id_tx;
	cfg.direction = DMA_MEM_TO_DEV;
	cfg.dst_addr = res->start + MMCIF_CE_DATA;
	cfg.src_addr = 0;
	ret = dmaengine_slave_config(host->chan_tx, &cfg);
	if (ret < 0)
		goto ecfgtx;

	host->chan_rx = dma_request_slave_channel_compat(mask, shdma_chan_filter,
				pdata ? (void *)pdata->slave_id_rx : NULL,
				&host->pd->dev, "rx");
	dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
		host->chan_rx);

	if (!host->chan_rx)
		goto erqrx;

	if (pdata)
		cfg.slave_id = pdata->slave_id_rx;
	cfg.direction = DMA_DEV_TO_MEM;
	cfg.dst_addr = 0;
	cfg.src_addr = res->start + MMCIF_CE_DATA;
	ret = dmaengine_slave_config(host->chan_rx, &cfg);
	if (ret < 0)
		goto ecfgrx;

	return;

ecfgrx:
	dma_release_channel(host->chan_rx);
	host->chan_rx = NULL;
erqrx:
ecfgtx:
	dma_release_channel(host->chan_tx);
	host->chan_tx = NULL;
}
static int ux500_dma_controller_start(struct dma_controller *c)
{
	struct ux500_dma_controller *controller = container_of(c,
			struct ux500_dma_controller, controller);
	struct ux500_dma_channel *ux500_channel = NULL;
	struct musb *musb = controller->private_data;
	struct device *dev = musb->controller;
	struct musb_hdrc_platform_data *plat = dev->platform_data;
	struct ux500_musb_board_data *data = plat->board_data;
	struct dma_channel *dma_channel = NULL;
	u32 ch_num;
	u8 dir;
	u8 is_tx = 0;

	void **param_array;
	struct ux500_dma_channel *channel_array;
	u32 ch_count;
	dma_cap_mask_t mask;

	if ((data->num_rx_channels > UX500_MUSB_DMA_NUM_RX_CHANNELS) ||
		(data->num_tx_channels > UX500_MUSB_DMA_NUM_TX_CHANNELS))
		return -EINVAL;

	controller->num_rx_channels = data->num_rx_channels;
	controller->num_tx_channels = data->num_tx_channels;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	
	channel_array = controller->rx_channel;
	ch_count = data->num_rx_channels;
	param_array = data->dma_rx_param_array;

	for (dir = 0; dir < 2; dir++) {
		for (ch_num = 0; ch_num < ch_count; ch_num++) {
			ux500_channel = &channel_array[ch_num];
			ux500_channel->controller = controller;
			ux500_channel->ch_num = ch_num;
			ux500_channel->is_tx = is_tx;

			dma_channel = &(ux500_channel->channel);
			dma_channel->private_data = ux500_channel;
			dma_channel->status = MUSB_DMA_STATUS_FREE;
			dma_channel->max_len = SZ_16M;

			ux500_channel->dma_chan = dma_request_channel(mask,
							data->dma_filter,
							param_array[ch_num]);
			if (!ux500_channel->dma_chan) {
				ERR("Dma pipe allocation error dir=%d ch=%d\n",
					dir, ch_num);

				
				ux500_dma_controller_stop(c);

				return -EBUSY;
			}

		}

		
		channel_array = controller->tx_channel;
		ch_count = data->num_tx_channels;
		param_array = data->dma_tx_param_array;
		is_tx = 1;
	}

	return 0;
}
예제 #6
0
static int __init sdma_probe(struct platform_device *pdev)
{
	int ret;
	int irq;
	struct resource *iores;
	struct sdma_platform_data *pdata = pdev->dev.platform_data;
	int i;
	struct sdma_engine *sdma;

	sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
	if (!sdma)
		return -ENOMEM;

	sdma->dev = &pdev->dev;

	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	irq = platform_get_irq(pdev, 0);
	if (!iores || irq < 0 || !pdata) {
		ret = -EINVAL;
		goto err_irq;
	}

	if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) {
		ret = -EBUSY;
		goto err_request_region;
	}

	sdma->clk = clk_get(&pdev->dev, NULL);
	if (IS_ERR(sdma->clk)) {
		ret = PTR_ERR(sdma->clk);
		goto err_clk;
	}

	sdma->regs = ioremap(iores->start, resource_size(iores));
	if (!sdma->regs) {
		ret = -ENOMEM;
		goto err_ioremap;
	}

	ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma);
	if (ret)
		goto err_request_irq;

	sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
	if (!sdma->script_addrs)
		goto err_alloc;

	sdma->version = pdata->sdma_version;

	dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
	dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);

	INIT_LIST_HEAD(&sdma->dma_device.channels);
	/* Initialize channel parameters */
	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
		struct sdma_channel *sdmac = &sdma->channel[i];

		sdmac->sdma = sdma;
		spin_lock_init(&sdmac->lock);

		sdmac->chan.device = &sdma->dma_device;
		sdmac->channel = i;

		/*
		 * Add the channel to the DMAC list. Do not add channel 0 though
		 * because we need it internally in the SDMA driver. This also means
		 * that channel 0 in dmaengine counting matches sdma channel 1.
		 */
		if (i)
			list_add_tail(&sdmac->chan.device_node,
					&sdma->dma_device.channels);
	}

	ret = sdma_init(sdma);
	if (ret)
		goto err_init;

	if (pdata->script_addrs)
		sdma_add_scripts(sdma, pdata->script_addrs);

	sdma_get_firmware(sdma, pdata->cpu_name, pdata->to_version);

	sdma->dma_device.dev = &pdev->dev;

	sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
	sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
	sdma->dma_device.device_tx_status = sdma_tx_status;
	sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
	sdma->dma_device.device_control = sdma_control;
	sdma->dma_device.device_issue_pending = sdma_issue_pending;
	sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
	dma_set_max_seg_size(sdma->dma_device.dev, 65535);

	ret = dma_async_device_register(&sdma->dma_device);
	if (ret) {
		dev_err(&pdev->dev, "unable to register\n");
		goto err_init;
	}

	dev_info(sdma->dev, "initialized\n");

	return 0;

err_init:
	kfree(sdma->script_addrs);
err_alloc:
	free_irq(irq, sdma);
err_request_irq:
	iounmap(sdma->regs);
err_ioremap:
	clk_put(sdma->clk);
err_clk:
	release_mem_region(iores->start, resource_size(iores));
err_request_region:
err_irq:
	kfree(sdma);
	return ret;
}
예제 #7
0
static int __devinit mmp_tdma_probe(struct platform_device *pdev)
{
	const struct platform_device_id *id = platform_get_device_id(pdev);
	enum mmp_tdma_type type = id->driver_data;
	struct mmp_tdma_device *tdev;
	struct resource *iores;
	int i, ret;
	int irq = 0;
	int chan_num = TDMA_CHANNEL_NUM;

	/* always have couple channels */
	tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
	if (!tdev)
		return -ENOMEM;

	tdev->dev = &pdev->dev;
	iores = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!iores)
		return -EINVAL;

	if (resource_size(iores) != chan_num)
		tdev->irq = iores->start;
	else
		irq = iores->start;

	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!iores)
		return -EINVAL;

	tdev->base = devm_request_and_ioremap(&pdev->dev, iores);
	if (!tdev->base)
		return -EADDRNOTAVAIL;

	dma_cap_set(DMA_SLAVE, tdev->device.cap_mask);
	dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask);

	INIT_LIST_HEAD(&tdev->device.channels);

	/* initialize channel parameters */
	for (i = 0; i < chan_num; i++) {
		ret = mmp_tdma_chan_init(tdev, i, irq, type);
		if (ret)
			return ret;
	}

	if (tdev->irq) {
		ret = devm_request_irq(&pdev->dev, tdev->irq,
			mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev);
		if (ret)
			return ret;
	}

	tdev->device.dev = &pdev->dev;
	tdev->device.device_alloc_chan_resources =
					mmp_tdma_alloc_chan_resources;
	tdev->device.device_free_chan_resources =
					mmp_tdma_free_chan_resources;
	tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic;
	tdev->device.device_tx_status = mmp_tdma_tx_status;
	tdev->device.device_issue_pending = mmp_tdma_issue_pending;
	tdev->device.device_control = mmp_tdma_control;
	tdev->device.copy_align = TDMA_ALIGNMENT;

	dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
	platform_set_drvdata(pdev, tdev);

	ret = dma_async_device_register(&tdev->device);
	if (ret) {
		dev_err(tdev->device.dev, "unable to register\n");
		return ret;
	}

	dev_info(tdev->device.dev, "initialized\n");
	return 0;
}
예제 #8
0
파일: omap.c 프로젝트: AllenDou/linux
static int __devinit mmc_omap_probe(struct platform_device *pdev)
{
	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
	struct mmc_omap_host *host = NULL;
	struct resource *res;
	dma_cap_mask_t mask;
	unsigned sig;
	int i, ret = 0;
	int irq;

	if (pdata == NULL) {
		dev_err(&pdev->dev, "platform data missing\n");
		return -ENXIO;
	}
	if (pdata->nr_slots == 0) {
		dev_err(&pdev->dev, "no slots\n");
		return -ENXIO;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	irq = platform_get_irq(pdev, 0);
	if (res == NULL || irq < 0)
		return -ENXIO;

	res = request_mem_region(res->start, resource_size(res),
				 pdev->name);
	if (res == NULL)
		return -EBUSY;

	host = kzalloc(sizeof(struct mmc_omap_host), GFP_KERNEL);
	if (host == NULL) {
		ret = -ENOMEM;
		goto err_free_mem_region;
	}

	INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
	INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);

	INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command);
	setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer,
		    (unsigned long) host);

	spin_lock_init(&host->clk_lock);
	setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);

	spin_lock_init(&host->dma_lock);
	spin_lock_init(&host->slot_lock);
	init_waitqueue_head(&host->slot_wq);

	host->pdata = pdata;
	host->dev = &pdev->dev;
	platform_set_drvdata(pdev, host);

	host->id = pdev->id;
	host->mem_res = res;
	host->irq = irq;
	host->use_dma = 1;
	host->irq = irq;
	host->phys_base = host->mem_res->start;
	host->virt_base = ioremap(res->start, resource_size(res));
	if (!host->virt_base)
		goto err_ioremap;

	host->iclk = clk_get(&pdev->dev, "ick");
	if (IS_ERR(host->iclk)) {
		ret = PTR_ERR(host->iclk);
		goto err_free_mmc_host;
	}
	clk_enable(host->iclk);

	host->fclk = clk_get(&pdev->dev, "fck");
	if (IS_ERR(host->fclk)) {
		ret = PTR_ERR(host->fclk);
		goto err_free_iclk;
	}

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	host->dma_tx_burst = -1;
	host->dma_rx_burst = -1;

	if (cpu_is_omap24xx())
		sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX;
	else
		sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX;
	host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
#if 0
	if (!host->dma_tx) {
		dev_err(host->dev, "unable to obtain TX DMA engine channel %u\n",
			sig);
		goto err_dma;
	}
#else
	if (!host->dma_tx)
		dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
			sig);
#endif
	if (cpu_is_omap24xx())
		sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX;
	else
		sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX;
	host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
#if 0
	if (!host->dma_rx) {
		dev_err(host->dev, "unable to obtain RX DMA engine channel %u\n",
			sig);
		goto err_dma;
	}
#else
	if (!host->dma_rx)
		dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
			sig);
#endif

	ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
	if (ret)
		goto err_free_dma;

	if (pdata->init != NULL) {
		ret = pdata->init(&pdev->dev);
		if (ret < 0)
			goto err_free_irq;
	}

	host->nr_slots = pdata->nr_slots;
	host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);

	host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
	if (!host->mmc_omap_wq)
		goto err_plat_cleanup;

	for (i = 0; i < pdata->nr_slots; i++) {
		ret = mmc_omap_new_slot(host, i);
		if (ret < 0) {
			while (--i >= 0)
				mmc_omap_remove_slot(host->slots[i]);

			goto err_destroy_wq;
		}
	}

	return 0;

err_destroy_wq:
	destroy_workqueue(host->mmc_omap_wq);
err_plat_cleanup:
	if (pdata->cleanup)
		pdata->cleanup(&pdev->dev);
err_free_irq:
	free_irq(host->irq, host);
err_free_dma:
	if (host->dma_tx)
		dma_release_channel(host->dma_tx);
	if (host->dma_rx)
		dma_release_channel(host->dma_rx);
	clk_put(host->fclk);
err_free_iclk:
	clk_disable(host->iclk);
	clk_put(host->iclk);
err_free_mmc_host:
	iounmap(host->virt_base);
err_ioremap:
	kfree(host);
err_free_mem_region:
	release_mem_region(res->start, resource_size(res));
	return ret;
}
예제 #9
0
static int __init iop3xx_adma_cap_init(void)
{
	#ifdef CONFIG_ARCH_IOP32X /* the 32x DMA does not perform CRC32C */
	dma_cap_set(DMA_MEMCPY, iop3xx_dma_0_data.cap_mask);
	dma_cap_set(DMA_INTERRUPT, iop3xx_dma_0_data.cap_mask);
	#else
	dma_cap_set(DMA_MEMCPY, iop3xx_dma_0_data.cap_mask);
	dma_cap_set(DMA_INTERRUPT, iop3xx_dma_0_data.cap_mask);
	#endif

	#ifdef CONFIG_ARCH_IOP32X /* the 32x DMA does not perform CRC32C */
	dma_cap_set(DMA_MEMCPY, iop3xx_dma_1_data.cap_mask);
	dma_cap_set(DMA_INTERRUPT, iop3xx_dma_1_data.cap_mask);
	#else
	dma_cap_set(DMA_MEMCPY, iop3xx_dma_1_data.cap_mask);
	dma_cap_set(DMA_INTERRUPT, iop3xx_dma_1_data.cap_mask);
	#endif

	#ifdef CONFIG_ARCH_IOP32X /* the 32x AAU does not perform zero sum */
	dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask);
	dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask);
	dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask);
	#else
	dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask);
	dma_cap_set(DMA_XOR_VAL, iop3xx_aau_data.cap_mask);
	dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask);
	dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask);
	#endif

	return 0;
}
예제 #10
0
파일: pata_pxa.c 프로젝트: 0-T-0/ps4-linux
static int pxa_ata_probe(struct platform_device *pdev)
{
	struct ata_host *host;
	struct ata_port *ap;
	struct pata_pxa_data *data;
	struct resource *cmd_res;
	struct resource *ctl_res;
	struct resource *dma_res;
	struct resource *irq_res;
	struct pata_pxa_pdata *pdata = dev_get_platdata(&pdev->dev);
	struct dma_slave_config	config;
	dma_cap_mask_t mask;
	struct pxad_param param;
	int ret = 0;

	/*
	 * Resource validation, three resources are needed:
	 *  - CMD port base address
	 *  - CTL port base address
	 *  - DMA port base address
	 *  - IRQ pin
	 */
	if (pdev->num_resources != 4) {
		dev_err(&pdev->dev, "invalid number of resources\n");
		return -EINVAL;
	}

	/*
	 * CMD port base address
	 */
	cmd_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (unlikely(cmd_res == NULL))
		return -EINVAL;

	/*
	 * CTL port base address
	 */
	ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	if (unlikely(ctl_res == NULL))
		return -EINVAL;

	/*
	 * DMA port base address
	 */
	dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
	if (unlikely(dma_res == NULL))
		return -EINVAL;

	/*
	 * IRQ pin
	 */
	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (unlikely(irq_res == NULL))
		return -EINVAL;

	/*
	 * Allocate the host
	 */
	host = ata_host_alloc(&pdev->dev, 1);
	if (!host)
		return -ENOMEM;

	ap		= host->ports[0];
	ap->ops		= &pxa_ata_port_ops;
	ap->pio_mask	= ATA_PIO4;
	ap->mwdma_mask	= ATA_MWDMA2;

	ap->ioaddr.cmd_addr	= devm_ioremap(&pdev->dev, cmd_res->start,
						resource_size(cmd_res));
	ap->ioaddr.ctl_addr	= devm_ioremap(&pdev->dev, ctl_res->start,
						resource_size(ctl_res));
	ap->ioaddr.bmdma_addr	= devm_ioremap(&pdev->dev, dma_res->start,
						resource_size(dma_res));

	/*
	 * Adjust register offsets
	 */
	ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
	ap->ioaddr.data_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_DATA << pdata->reg_shift);
	ap->ioaddr.error_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_ERR << pdata->reg_shift);
	ap->ioaddr.feature_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_FEATURE << pdata->reg_shift);
	ap->ioaddr.nsect_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_NSECT << pdata->reg_shift);
	ap->ioaddr.lbal_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_LBAL << pdata->reg_shift);
	ap->ioaddr.lbam_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_LBAM << pdata->reg_shift);
	ap->ioaddr.lbah_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_LBAH << pdata->reg_shift);
	ap->ioaddr.device_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_DEVICE << pdata->reg_shift);
	ap->ioaddr.status_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_STATUS << pdata->reg_shift);
	ap->ioaddr.command_addr	= ap->ioaddr.cmd_addr +
					(ATA_REG_CMD << pdata->reg_shift);

	/*
	 * Allocate and load driver's internal data structure
	 */
	data = devm_kzalloc(&pdev->dev, sizeof(struct pata_pxa_data),
								GFP_KERNEL);
	if (!data)
		return -ENOMEM;

	ap->private_data = data;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);
	param.prio = PXAD_PRIO_LOWEST;
	param.drcmr = pdata->dma_dreq;
	memset(&config, 0, sizeof(config));
	config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
	config.src_addr = dma_res->start;
	config.dst_addr = dma_res->start;
	config.src_maxburst = 32;
	config.dst_maxburst = 32;

	/*
	 * Request the DMA channel
	 */
	data->dma_chan =
		dma_request_slave_channel_compat(mask, pxad_filter_fn,
						 &param, &pdev->dev, "data");
	if (!data->dma_chan)
		return -EBUSY;
	ret = dmaengine_slave_config(data->dma_chan, &config);
	if (ret < 0) {
		dev_err(&pdev->dev, "dma configuration failed: %d\n", ret);
		return ret;
	}

	/*
	 * Activate the ATA host
	 */
	ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt,
				pdata->irq_flags, &pxa_ata_sht);
	if (ret)
		dma_release_channel(data->dma_chan);

	return ret;
}
예제 #11
0
파일: omap.c 프로젝트: 7799/linux
static int mmc_omap_probe(struct platform_device *pdev)
{
	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
	struct mmc_omap_host *host = NULL;
	struct resource *res;
	dma_cap_mask_t mask;
	unsigned sig = 0;
	int i, ret = 0;
	int irq;

	if (pdata == NULL) {
		dev_err(&pdev->dev, "platform data missing\n");
		return -ENXIO;
	}
	if (pdata->nr_slots == 0) {
		dev_err(&pdev->dev, "no slots\n");
		return -EPROBE_DEFER;
	}

	host = devm_kzalloc(&pdev->dev, sizeof(struct mmc_omap_host),
			    GFP_KERNEL);
	if (host == NULL)
		return -ENOMEM;

	irq = platform_get_irq(pdev, 0);
	if (irq < 0)
		return -ENXIO;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	host->virt_base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(host->virt_base))
		return PTR_ERR(host->virt_base);

	INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
	INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);

	INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command);
	setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer,
		    (unsigned long) host);

	spin_lock_init(&host->clk_lock);
	setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);

	spin_lock_init(&host->dma_lock);
	spin_lock_init(&host->slot_lock);
	init_waitqueue_head(&host->slot_wq);

	host->pdata = pdata;
	host->features = host->pdata->slots[0].features;
	host->dev = &pdev->dev;
	platform_set_drvdata(pdev, host);

	host->id = pdev->id;
	host->irq = irq;
	host->phys_base = res->start;
	host->iclk = clk_get(&pdev->dev, "ick");
	if (IS_ERR(host->iclk))
		return PTR_ERR(host->iclk);
	clk_enable(host->iclk);

	host->fclk = clk_get(&pdev->dev, "fck");
	if (IS_ERR(host->fclk)) {
		ret = PTR_ERR(host->fclk);
		goto err_free_iclk;
	}

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	host->dma_tx_burst = -1;
	host->dma_rx_burst = -1;

	res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
	if (res)
		sig = res->start;
	host->dma_tx = dma_request_slave_channel_compat(mask,
				omap_dma_filter_fn, &sig, &pdev->dev, "tx");
	if (!host->dma_tx)
		dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
			sig);

	res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
	if (res)
		sig = res->start;
	host->dma_rx = dma_request_slave_channel_compat(mask,
				omap_dma_filter_fn, &sig, &pdev->dev, "rx");
	if (!host->dma_rx)
		dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
			sig);

	ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
	if (ret)
		goto err_free_dma;

	if (pdata->init != NULL) {
		ret = pdata->init(&pdev->dev);
		if (ret < 0)
			goto err_free_irq;
	}

	host->nr_slots = pdata->nr_slots;
	host->reg_shift = (mmc_omap7xx() ? 1 : 2);

	host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
	if (!host->mmc_omap_wq)
		goto err_plat_cleanup;

	for (i = 0; i < pdata->nr_slots; i++) {
		ret = mmc_omap_new_slot(host, i);
		if (ret < 0) {
			while (--i >= 0)
				mmc_omap_remove_slot(host->slots[i]);

			goto err_destroy_wq;
		}
	}

	return 0;

err_destroy_wq:
	destroy_workqueue(host->mmc_omap_wq);
err_plat_cleanup:
	if (pdata->cleanup)
		pdata->cleanup(&pdev->dev);
err_free_irq:
	free_irq(host->irq, host);
err_free_dma:
	if (host->dma_tx)
		dma_release_channel(host->dma_tx);
	if (host->dma_rx)
		dma_release_channel(host->dma_rx);
	clk_put(host->fclk);
err_free_iclk:
	clk_disable(host->iclk);
	clk_put(host->iclk);
	return ret;
}
예제 #12
0
static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
			bool dma_to_memory)
{
	struct dma_chan *dma_chan;
	unsigned char *dma_buf;
	dma_addr_t dma_phys;
	int ret;
	struct dma_slave_config dma_sconfig;
	dma_cap_mask_t mask;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);
	dma_chan = dma_request_channel(mask, NULL, NULL);
	if (!dma_chan) {
		dev_err(tup->uport.dev,
			"Dma channel is not available, will try later\n");
		return -EPROBE_DEFER;
	}

	if (dma_to_memory) {
		dma_buf = dma_alloc_coherent(tup->uport.dev,
				TEGRA_UART_RX_DMA_BUFFER_SIZE,
				 &dma_phys, GFP_KERNEL);
		if (!dma_buf) {
			dev_err(tup->uport.dev,
				"Not able to allocate the dma buffer\n");
			dma_release_channel(dma_chan);
			return -ENOMEM;
		}
	} else {
		dma_phys = dma_map_single(tup->uport.dev,
			tup->uport.state->xmit.buf, UART_XMIT_SIZE,
			DMA_TO_DEVICE);
		dma_buf = tup->uport.state->xmit.buf;
	}

	dma_sconfig.slave_id = tup->dma_req_sel;
	if (dma_to_memory) {
		dma_sconfig.src_addr = tup->uport.mapbase;
		dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
		dma_sconfig.src_maxburst = 4;
	} else {
		dma_sconfig.dst_addr = tup->uport.mapbase;
		dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
		dma_sconfig.dst_maxburst = 16;
	}

	ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
	if (ret < 0) {
		dev_err(tup->uport.dev,
			"Dma slave config failed, err = %d\n", ret);
		goto scrub;
	}

	if (dma_to_memory) {
		tup->rx_dma_chan = dma_chan;
		tup->rx_dma_buf_virt = dma_buf;
		tup->rx_dma_buf_phys = dma_phys;
	} else {
		tup->tx_dma_chan = dma_chan;
		tup->tx_dma_buf_virt = dma_buf;
		tup->tx_dma_buf_phys = dma_phys;
	}
	return 0;

scrub:
	dma_release_channel(dma_chan);
	return ret;
}
static void pxa_uart_dma_init(struct uart_pxa_port *up)
{
	struct uart_pxa_dma *pxa_dma = &up->uart_dma;
	dma_cap_mask_t mask;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	if (NULL == pxa_dma->rxdma_chan) {
		pxa_dma->rxdma_chan = dma_request_slave_channel(up->port.dev,
								"rx");
		if (NULL == pxa_dma->rxdma_chan)
			goto out;
	}

	if (NULL == pxa_dma->txdma_chan) {
		pxa_dma->txdma_chan = dma_request_slave_channel(up->port.dev,
								"tx");
		if (NULL == pxa_dma->txdma_chan)
			goto err_txdma;
	}

	if (NULL == pxa_dma->txdma_addr) {
		pxa_dma->txdma_addr = dma_alloc_coherent(up->port.dev,
			DMA_BLOCK, &pxa_dma->txdma_addr_phys, GFP_KERNEL);
		if (!pxa_dma->txdma_addr)
			goto txdma_err_alloc;
	}

	if (NULL == pxa_dma->rxdma_addr) {
		pxa_dma->rxdma_addr = dma_alloc_coherent(up->port.dev,
			DMA_BLOCK, &pxa_dma->rxdma_addr_phys, GFP_KERNEL);
		if (!pxa_dma->rxdma_addr)
			goto rxdma_err_alloc;
	}

#ifdef CONFIG_PM
	pxa_dma->tx_buf_save = kmalloc(DMA_BLOCK, GFP_KERNEL);
	if (!pxa_dma->tx_buf_save)
		goto buf_err_alloc;
#endif

	pxa_dma->dma_status = 0;
	return;

#ifdef CONFIG_PM
buf_err_alloc:
	dma_free_coherent(up->port.dev, DMA_BLOCK, pxa_dma->rxdma_addr,
			  pxa_dma->rxdma_addr_phys);
	pxa_dma->rxdma_addr = NULL;
#endif
rxdma_err_alloc:
	dma_free_coherent(up->port.dev, DMA_BLOCK, pxa_dma->txdma_addr,
			  pxa_dma->txdma_addr_phys);
	pxa_dma->txdma_addr = NULL;
txdma_err_alloc:
	dma_release_channel(pxa_dma->txdma_chan);
	pxa_dma->txdma_chan = NULL;
err_txdma:
	dma_release_channel(pxa_dma->rxdma_chan);
	pxa_dma->rxdma_chan = NULL;
out:
	return;
}
예제 #14
0
int serial8250_request_dma(struct uart_8250_port *p)
{
    struct uart_8250_dma	*dma = p->dma;
    dma_cap_mask_t		mask;

    /* Default slave configuration parameters */
    dma->rxconf.direction		= DMA_DEV_TO_MEM;
    dma->rxconf.src_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
    dma->rxconf.src_addr		= p->port.mapbase + UART_RX;

    dma->txconf.direction		= DMA_MEM_TO_DEV;
    dma->txconf.dst_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
    dma->txconf.dst_addr		= p->port.mapbase + UART_TX;

    dma_cap_zero(mask);
    dma_cap_set(DMA_SLAVE, mask);

    /* Get a channel for RX */
    dma->rxchan = dma_request_slave_channel_compat(mask,
                  dma->fn, dma->rx_param,
                  p->port.dev, "rx");
    if (!dma->rxchan)
        return -ENODEV;

    dmaengine_slave_config(dma->rxchan, &dma->rxconf);

    /* Get a channel for TX */
    dma->txchan = dma_request_slave_channel_compat(mask,
                  dma->fn, dma->tx_param,
                  p->port.dev, "tx");
    if (!dma->txchan) {
        dma_release_channel(dma->rxchan);
        return -ENODEV;
    }

    dmaengine_slave_config(dma->txchan, &dma->txconf);

    /* RX buffer */
    if (!dma->rx_size)
        dma->rx_size = PAGE_SIZE;

    dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
                                     &dma->rx_addr, GFP_KERNEL);
    if (!dma->rx_buf)
        goto err;

    /* TX buffer */
    dma->tx_addr = dma_map_single(dma->txchan->device->dev,
                                  p->port.state->xmit.buf,
                                  UART_XMIT_SIZE,
                                  DMA_TO_DEVICE);
    if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
        dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
                          dma->rx_buf, dma->rx_addr);
        goto err;
    }

    dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");

    return 0;
err:
    dma_release_channel(dma->rxchan);
    dma_release_channel(dma->txchan);

    return -ENOMEM;
}
예제 #15
0
static int __init iop3xx_adma_cap_init(void)
{
	#ifdef CONFIG_ARCH_IOP32X 
	dma_cap_set(DMA_MEMCPY, iop3xx_dma_0_data.cap_mask);
	dma_cap_set(DMA_INTERRUPT, iop3xx_dma_0_data.cap_mask);
	#else
	dma_cap_set(DMA_MEMCPY, iop3xx_dma_0_data.cap_mask);
	dma_cap_set(DMA_INTERRUPT, iop3xx_dma_0_data.cap_mask);
	#endif

	#ifdef CONFIG_ARCH_IOP32X 
	dma_cap_set(DMA_MEMCPY, iop3xx_dma_1_data.cap_mask);
	dma_cap_set(DMA_INTERRUPT, iop3xx_dma_1_data.cap_mask);
	#else
	dma_cap_set(DMA_MEMCPY, iop3xx_dma_1_data.cap_mask);
	dma_cap_set(DMA_INTERRUPT, iop3xx_dma_1_data.cap_mask);
	#endif

	#ifdef CONFIG_ARCH_IOP32X 
	dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask);
	dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask);
	dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask);
	#else
	dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask);
	dma_cap_set(DMA_XOR_VAL, iop3xx_aau_data.cap_mask);
	dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask);
	dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask);
	#endif

	return 0;
}
예제 #16
0
static int tegra_adma_probe(struct platform_device *pdev)
{
	const struct tegra_adma_chip_data *cdata;
	struct tegra_adma *tdma;
	struct resource	*res;
	struct clk *clk;
	int ret, i;

	cdata = of_device_get_match_data(&pdev->dev);
	if (!cdata) {
		dev_err(&pdev->dev, "device match data not found\n");
		return -ENODEV;
	}

	tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
			    sizeof(struct tegra_adma_chan), GFP_KERNEL);
	if (!tdma)
		return -ENOMEM;

	tdma->dev = &pdev->dev;
	tdma->nr_channels = cdata->nr_channels;
	platform_set_drvdata(pdev, tdma);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(tdma->base_addr))
		return PTR_ERR(tdma->base_addr);

	ret = pm_clk_create(&pdev->dev);
	if (ret)
		return ret;

	clk = clk_get(&pdev->dev, "d_audio");
	if (IS_ERR(clk)) {
		dev_err(&pdev->dev, "ADMA clock not found\n");
		ret = PTR_ERR(clk);
		goto clk_destroy;
	}

	ret = pm_clk_add_clk(&pdev->dev, clk);
	if (ret) {
		clk_put(clk);
		goto clk_destroy;
	}

	pm_runtime_enable(&pdev->dev);

	ret = pm_runtime_get_sync(&pdev->dev);
	if (ret < 0)
		goto rpm_disable;

	ret = tegra_adma_init(tdma);
	if (ret)
		goto rpm_put;

	INIT_LIST_HEAD(&tdma->dma_dev.channels);
	for (i = 0; i < tdma->nr_channels; i++) {
		struct tegra_adma_chan *tdc = &tdma->channels[i];

		tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i);

		tdc->irq = of_irq_get(pdev->dev.of_node, i);
		if (tdc->irq < 0) {
			ret = tdc->irq;
			goto irq_dispose;
		}

		vchan_init(&tdc->vc, &tdma->dma_dev);
		tdc->vc.desc_free = tegra_adma_desc_free;
		tdc->tdma = tdma;
	}

	dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
	dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
	dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);

	tdma->dma_dev.dev = &pdev->dev;
	tdma->dma_dev.device_alloc_chan_resources =
					tegra_adma_alloc_chan_resources;
	tdma->dma_dev.device_free_chan_resources =
					tegra_adma_free_chan_resources;
	tdma->dma_dev.device_issue_pending = tegra_adma_issue_pending;
	tdma->dma_dev.device_prep_dma_cyclic = tegra_adma_prep_dma_cyclic;
	tdma->dma_dev.device_config = tegra_adma_slave_config;
	tdma->dma_dev.device_tx_status = tegra_adma_tx_status;
	tdma->dma_dev.device_terminate_all = tegra_adma_terminate_all;
	tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
	tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
	tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
	tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;

	ret = dma_async_device_register(&tdma->dma_dev);
	if (ret < 0) {
		dev_err(&pdev->dev, "ADMA registration failed: %d\n", ret);
		goto irq_dispose;
	}

	ret = of_dma_controller_register(pdev->dev.of_node,
					 tegra_dma_of_xlate, tdma);
	if (ret < 0) {
		dev_err(&pdev->dev, "ADMA OF registration failed %d\n", ret);
		goto dma_remove;
	}

	pm_runtime_put(&pdev->dev);

	dev_info(&pdev->dev, "Tegra210 ADMA driver registered %d channels\n",
		 tdma->nr_channels);

	return 0;

dma_remove:
	dma_async_device_unregister(&tdma->dma_dev);
irq_dispose:
	while (--i >= 0)
		irq_dispose_mapping(tdma->channels[i].irq);
rpm_put:
	pm_runtime_put_sync(&pdev->dev);
rpm_disable:
	pm_runtime_disable(&pdev->dev);
clk_destroy:
	pm_clk_destroy(&pdev->dev);

	return ret;
}
예제 #17
0
static int __devinit rt_dma_probe(struct platform_device *pdev)
#endif
{
	struct dma_device *dma_dev;
	struct rt_dma_chan *rt_chan;
	int err;
	int ret;
#ifdef CONFIG_RT_DMA_HSDMA
	unsigned long reg_int_mask=0;
#else
	int reg;
#endif

	//printk("%s\n",__FUNCTION__);
	
	dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev), GFP_KERNEL);
	if (!dma_dev)
		return -ENOMEM;


	INIT_LIST_HEAD(&dma_dev->channels);
	dma_cap_zero(dma_dev->cap_mask);
	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
	//dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
	dma_dev->device_alloc_chan_resources = rt_dma_alloc_chan_resources;
	dma_dev->device_free_chan_resources = rt_dma_free_chan_resources;
	dma_dev->device_tx_status = rt_dma_status;
	dma_dev->device_issue_pending = rt_dma_issue_pending;
	dma_dev->device_prep_dma_memcpy = rt_dma_prep_dma_memcpy;
	dma_dev->dev = &pdev->dev;

	rt_chan = devm_kzalloc(&pdev->dev, sizeof(*rt_chan), GFP_KERNEL);
        if (!rt_chan) {
		return -ENOMEM;
	}

	spin_lock_init(&rt_chan->lock);	
  INIT_LIST_HEAD(&rt_chan->chain);
	INIT_LIST_HEAD(&rt_chan->completed_slots);
	INIT_LIST_HEAD(&rt_chan->all_slots);
	rt_chan->common.device = dma_dev;
	rt_chan->txd.tx_submit = rt_dma_tx_submit;

	list_add_tail(&rt_chan->common.device_node, &dma_dev->channels);

	err = dma_async_device_register(dma_dev);

	if (0 != err) {
		pr_err("ERR_MDMA:device_register failed: %d\n", err);
		return 1;
	}
	
#ifdef CONFIG_RT_DMA_HSDMA
	ret = request_irq(SURFBOARDINT_HSGDMA, rt_dma_interrupt_handler, IRQF_DISABLED, "HS_DMA", NULL);
#else
	ret = request_irq(SURFBOARDINT_DMA, rt_dma_interrupt_handler, IRQF_DISABLED, "GDMA", NULL);
#endif
	if(ret){
		pr_err("IRQ %d is not free.\n", SURFBOARDINT_DMA);
		return 1;
	}

#ifdef CONFIG_RT_DMA_HSDMA
		sysRegWrite(HSDMA_INT_MASK, reg_int_mask  & ~(HSDMA_FE_INT_TX));  // disable int TX DONE
		sysRegWrite(HSDMA_INT_MASK, reg_int_mask  & ~(HSDMA_FE_INT_RX) );  // disable int RX DONE		
		printk("reg_int_mask=%lu, INT_MASK= %x \n", reg_int_mask, sysRegRead(HSDMA_INT_MASK));
  	HSDMA_init();	
#else
	//set GDMA register in advance.
	reg = (32 << 16) | (32 << 8) | (MEMCPY_DMA_CH << 3);
	RT_DMA_WRITE_REG(RT_DMA_CTRL_REG1(MEMCPY_DMA_CH), reg);
#endif

	return 0;
}
static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
					struct of_dma *ofdma)
{
	struct dw_dma *dw = ofdma->of_dma_data;
	struct dw_dma_slave slave = {
		.dma_dev = dw->dma.dev,
	};
	dma_cap_mask_t cap;

	if (dma_spec->args_count != 3)
		return NULL;

	slave.src_id = dma_spec->args[0];
	slave.dst_id = dma_spec->args[0];
	slave.src_master = dma_spec->args[1];
	slave.dst_master = dma_spec->args[2];

	if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
		    slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
		    slave.src_master >= dw->nr_masters ||
		    slave.dst_master >= dw->nr_masters))
		return NULL;

	dma_cap_zero(cap);
	dma_cap_set(DMA_SLAVE, cap);

	/* TODO: there should be a simpler way to do this */
	return dma_request_channel(cap, dw_dma_filter, &slave);
}

#ifdef CONFIG_ACPI
static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
{
	struct acpi_dma_spec *dma_spec = param;
	struct dw_dma_slave slave = {
		.dma_dev = dma_spec->dev,
		.src_id = dma_spec->slave_id,
		.dst_id = dma_spec->slave_id,
		.src_master = 1,
		.dst_master = 0,
	};

	return dw_dma_filter(chan, &slave);
}

static void dw_dma_acpi_controller_register(struct dw_dma *dw)
{
	struct device *dev = dw->dma.dev;
	struct acpi_dma_filter_info *info;
	int ret;

	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
	if (!info)
		return;

	dma_cap_zero(info->dma_cap);
	dma_cap_set(DMA_SLAVE, info->dma_cap);
	info->filter_fn = dw_dma_acpi_filter;

	ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
						info);
	if (ret)
		dev_err(dev, "could not register acpi_dma_controller\n");
}
#else /* !CONFIG_ACPI */
static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
#endif /* !CONFIG_ACPI */

#ifdef CONFIG_OF
static struct dw_dma_platform_data *
dw_dma_parse_dt(struct platform_device *pdev)
{
	struct device_node *np = pdev->dev.of_node;
	struct dw_dma_platform_data *pdata;
	u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];

	if (!np) {
		dev_err(&pdev->dev, "Missing DT data\n");
		return NULL;
	}

	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata)
		return NULL;

	if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels))
		return NULL;

	if (of_property_read_bool(np, "is_private"))
		pdata->is_private = true;

	if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
		pdata->chan_allocation_order = (unsigned char)tmp;

	if (!of_property_read_u32(np, "chan_priority", &tmp))
		pdata->chan_priority = tmp;

	if (!of_property_read_u32(np, "block_size", &tmp))
		pdata->block_size = tmp;

	if (!of_property_read_u32(np, "dma-masters", &tmp)) {
		if (tmp > DW_DMA_MAX_NR_MASTERS)
			return NULL;

		pdata->nr_masters = tmp;
	}

	if (!of_property_read_u32_array(np, "data_width", arr,
				pdata->nr_masters))
		for (tmp = 0; tmp < pdata->nr_masters; tmp++)
			pdata->data_width[tmp] = arr[tmp];

	return pdata;
}
#else
static inline struct dw_dma_platform_data *
dw_dma_parse_dt(struct platform_device *pdev)
{
	return NULL;
}
#endif

static int dw_probe(struct platform_device *pdev)
{
	struct dw_dma_chip *chip;
	struct device *dev = &pdev->dev;
	struct resource *mem;
	const struct acpi_device_id *id;
	struct dw_dma_platform_data *pdata;
	int err;

	chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
	if (!chip)
		return -ENOMEM;

	chip->irq = platform_get_irq(pdev, 0);
	if (chip->irq < 0)
		return chip->irq;

	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	chip->regs = devm_ioremap_resource(dev, mem);
	if (IS_ERR(chip->regs))
		return PTR_ERR(chip->regs);

	err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
	if (err)
		return err;

	pdata = dev_get_platdata(dev);
	if (!pdata)
		pdata = dw_dma_parse_dt(pdev);
	if (!pdata && has_acpi_companion(dev)) {
		id = acpi_match_device(dev->driver->acpi_match_table, dev);
		if (id)
			pdata = (struct dw_dma_platform_data *)id->driver_data;
	}

	chip->dev = dev;

	chip->clk = devm_clk_get(chip->dev, "hclk");
	if (IS_ERR(chip->clk))
		return PTR_ERR(chip->clk);
	err = clk_prepare_enable(chip->clk);
	if (err)
		return err;

	pm_runtime_enable(&pdev->dev);

	err = dw_dma_probe(chip, pdata);
	if (err)
		goto err_dw_dma_probe;

	platform_set_drvdata(pdev, chip);

	if (pdev->dev.of_node) {
		err = of_dma_controller_register(pdev->dev.of_node,
						 dw_dma_of_xlate, chip->dw);
		if (err)
			dev_err(&pdev->dev,
				"could not register of_dma_controller\n");
	}

	if (ACPI_HANDLE(&pdev->dev))
		dw_dma_acpi_controller_register(chip->dw);

	return 0;

err_dw_dma_probe:
	pm_runtime_disable(&pdev->dev);
	clk_disable_unprepare(chip->clk);
	return err;
}

static int dw_remove(struct platform_device *pdev)
{
	struct dw_dma_chip *chip = platform_get_drvdata(pdev);

	if (pdev->dev.of_node)
		of_dma_controller_free(pdev->dev.of_node);

	dw_dma_remove(chip);
	pm_runtime_disable(&pdev->dev);
	clk_disable_unprepare(chip->clk);

	return 0;
}

static void dw_shutdown(struct platform_device *pdev)
{
	struct dw_dma_chip *chip = platform_get_drvdata(pdev);

	dw_dma_disable(chip);
	clk_disable_unprepare(chip->clk);
}

#ifdef CONFIG_OF
static const struct of_device_id dw_dma_of_id_table[] = {
	{ .compatible = "snps,dma-spear1340" },
	{}
};
예제 #19
0
static int ntb_perf_thread(void *data)
{
	struct pthr_ctx *pctx = data;
	struct perf_ctx *perf = pctx->perf;
	struct pci_dev *pdev = perf->ntb->pdev;
	struct perf_mw *mw = &perf->mw;
	char __iomem *dst;
	u64 win_size, buf_size, total;
	void *src;
	int rc, node, i;
	struct dma_chan *dma_chan = NULL;

	pr_debug("kthread %s starting...\n", current->comm);

	node = dev_to_node(&pdev->dev);

	if (use_dma && !pctx->dma_chan) {
		dma_cap_mask_t dma_mask;

		dma_cap_zero(dma_mask);
		dma_cap_set(DMA_MEMCPY, dma_mask);
		dma_chan = dma_request_channel(dma_mask, perf_dma_filter_fn,
					       (void *)(unsigned long)node);
		if (!dma_chan) {
			pr_warn("%s: cannot acquire DMA channel, quitting\n",
				current->comm);
			return -ENODEV;
		}
		pctx->dma_chan = dma_chan;
	}

	for (i = 0; i < MAX_SRCS; i++) {
		pctx->srcs[i] = kmalloc_node(MAX_TEST_SIZE, GFP_KERNEL, node);
		if (!pctx->srcs[i]) {
			rc = -ENOMEM;
			goto err;
		}
	}

	win_size = mw->phys_size;
	buf_size = 1ULL << seg_order;
	total = 1ULL << run_order;

	if (buf_size > MAX_TEST_SIZE)
		buf_size = MAX_TEST_SIZE;

	dst = (char __iomem *)mw->vbase;

	atomic_inc(&perf->tsync);
	while (atomic_read(&perf->tsync) != perf->perf_threads)
		schedule();

	src = pctx->srcs[pctx->src_idx];
	pctx->src_idx = (pctx->src_idx + 1) & (MAX_SRCS - 1);

	rc = perf_move_data(pctx, dst, src, buf_size, win_size, total);

	atomic_dec(&perf->tsync);

	if (rc < 0) {
		pr_err("%s: failed\n", current->comm);
		rc = -ENXIO;
		goto err;
	}

	for (i = 0; i < MAX_SRCS; i++) {
		kfree(pctx->srcs[i]);
		pctx->srcs[i] = NULL;
	}

	atomic_inc(&perf->tdone);
	wake_up(pctx->wq);
	rc = 0;
	goto done;

err:
	for (i = 0; i < MAX_SRCS; i++) {
		kfree(pctx->srcs[i]);
		pctx->srcs[i] = NULL;
	}

	if (dma_chan) {
		dma_release_channel(dma_chan);
		pctx->dma_chan = NULL;
	}

done:
	/* Wait until we are told to stop */
	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (kthread_should_stop())
			break;
		schedule();
	}
	__set_current_state(TASK_RUNNING);

	return rc;
}
예제 #20
0
int serial8250_request_dma(struct uart_8250_port *p)
{
	struct uart_8250_dma	*dma = p->dma;
	phys_addr_t rx_dma_addr = dma->rx_dma_addr ?
				  dma->rx_dma_addr : p->port.mapbase;
	phys_addr_t tx_dma_addr = dma->tx_dma_addr ?
				  dma->tx_dma_addr : p->port.mapbase;
	dma_cap_mask_t		mask;
	struct dma_slave_caps	caps;
	int			ret;

	/* Default slave configuration parameters */
	dma->rxconf.direction		= DMA_DEV_TO_MEM;
	dma->rxconf.src_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
	dma->rxconf.src_addr		= rx_dma_addr + UART_RX;

	dma->txconf.direction		= DMA_MEM_TO_DEV;
	dma->txconf.dst_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
	dma->txconf.dst_addr		= tx_dma_addr + UART_TX;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	/* Get a channel for RX */
	dma->rxchan = dma_request_slave_channel_compat(mask,
						       dma->fn, dma->rx_param,
						       p->port.dev, "rx");
	if (!dma->rxchan)
		return -ENODEV;

	/* 8250 rx dma requires dmaengine driver to support pause/terminate */
	ret = dma_get_slave_caps(dma->rxchan, &caps);
	if (ret)
		goto release_rx;
	if (!caps.cmd_pause || !caps.cmd_terminate ||
	    caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
		ret = -EINVAL;
		goto release_rx;
	}

	dmaengine_slave_config(dma->rxchan, &dma->rxconf);

	/* Get a channel for TX */
	dma->txchan = dma_request_slave_channel_compat(mask,
						       dma->fn, dma->tx_param,
						       p->port.dev, "tx");
	if (!dma->txchan) {
		ret = -ENODEV;
		goto release_rx;
	}

	/* 8250 tx dma requires dmaengine driver to support terminate */
	ret = dma_get_slave_caps(dma->txchan, &caps);
	if (ret)
		goto err;
	if (!caps.cmd_terminate) {
		ret = -EINVAL;
		goto err;
	}

	dmaengine_slave_config(dma->txchan, &dma->txconf);

	/* RX buffer */
	if (!dma->rx_size)
		dma->rx_size = PAGE_SIZE;

	dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
					&dma->rx_addr, GFP_KERNEL);
	if (!dma->rx_buf) {
		ret = -ENOMEM;
		goto err;
	}

	/* TX buffer */
	dma->tx_addr = dma_map_single(dma->txchan->device->dev,
					p->port.state->xmit.buf,
					UART_XMIT_SIZE,
					DMA_TO_DEVICE);
	if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
		dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
				  dma->rx_buf, dma->rx_addr);
		ret = -ENOMEM;
		goto err;
	}

	dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");

	return 0;
err:
	dma_release_channel(dma->txchan);
release_rx:
	dma_release_channel(dma->rxchan);
	return ret;
}
예제 #21
0
파일: dma.c 프로젝트: 513855417/linux
static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
			   struct rsnd_dma *dma, int id,
			   struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
{
	struct rsnd_mod *mod = rsnd_mod_get(dma);
	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
	struct rsnd_priv *priv = rsnd_io_to_priv(io);
	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
	struct device *dev = rsnd_priv_to_dev(priv);
	struct dma_slave_config cfg = {};
	int is_play = rsnd_io_is_play(io);
	int ret;

	if (dmaen->chan) {
		dev_err(dev, "it already has dma channel\n");
		return -EIO;
	}

	if (dev->of_node) {
		dmaen->chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
	} else {
		dma_cap_mask_t mask;

		dma_cap_zero(mask);
		dma_cap_set(DMA_SLAVE, mask);

		dmaen->chan = dma_request_channel(mask, shdma_chan_filter,
						  (void *)(uintptr_t)id);
	}
	if (IS_ERR_OR_NULL(dmaen->chan)) {
		dmaen->chan = NULL;
		dev_err(dev, "can't get dma channel\n");
		goto rsnd_dma_channel_err;
	}

	cfg.direction	= is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
	cfg.src_addr	= dma->src_addr;
	cfg.dst_addr	= dma->dst_addr;
	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

	dev_dbg(dev, "%s[%d] %pad -> %pad\n",
		rsnd_mod_name(mod), rsnd_mod_id(mod),
		&cfg.src_addr, &cfg.dst_addr);

	ret = dmaengine_slave_config(dmaen->chan, &cfg);
	if (ret < 0)
		goto rsnd_dma_attach_err;

	dmac->dmaen_num++;

	return 0;

rsnd_dma_attach_err:
	rsnd_dmaen_remove(mod, io, priv);
rsnd_dma_channel_err:

	/*
	 * DMA failed. try to PIO mode
	 * see
	 *	rsnd_ssi_fallback()
	 *	rsnd_rdai_continuance_probe()
	 */
	return -EAGAIN;
}
static unsigned int sirfsoc_uart_init_tx_dma(struct uart_port *port)
{
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
	dma_cap_mask_t dma_mask;
	struct dma_slave_config tx_slv_cfg = {
		.dst_maxburst = 2,
	};

	dma_cap_zero(dma_mask);
	dma_cap_set(DMA_SLAVE, dma_mask);
	sirfport->tx_dma_chan = dma_request_channel(dma_mask,
		(dma_filter_fn)sirfsoc_dma_filter_id,
		(void *)sirfport->tx_dma_no);
	if (!sirfport->tx_dma_chan) {
		dev_err(port->dev, "Uart Request Dma Channel Fail %d\n",
					sirfport->tx_dma_no);
		return  -EPROBE_DEFER;
	}
	dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg);

	return 0;
}

static unsigned int sirfsoc_uart_init_rx_dma(struct uart_port *port)
{
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
	dma_cap_mask_t dma_mask;
	int ret;
	int i, j;
	struct dma_slave_config slv_cfg = {
		.src_maxburst = 2,
	};

	dma_cap_zero(dma_mask);
	dma_cap_set(DMA_SLAVE, dma_mask);
	sirfport->rx_dma_chan = dma_request_channel(dma_mask,
					(dma_filter_fn)sirfsoc_dma_filter_id,
					(void *)sirfport->rx_dma_no);
	if (!sirfport->rx_dma_chan) {
		dev_err(port->dev, "Uart Request Dma Channel Fail %d\n",
				sirfport->rx_dma_no);
		ret = -EPROBE_DEFER;
		goto request_err;
	}
	for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++) {
		sirfport->rx_dma_items[i].xmit.buf =
			dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
			&sirfport->rx_dma_items[i].dma_addr, GFP_KERNEL);
		if (!sirfport->rx_dma_items[i].xmit.buf) {
			dev_err(port->dev, "Uart alloc bufa failed\n");
			ret = -ENOMEM;
			goto alloc_coherent_err;
		}
		sirfport->rx_dma_items[i].xmit.head =
			sirfport->rx_dma_items[i].xmit.tail = 0;
	}
	dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg);

	return 0;
alloc_coherent_err:
	for (j = 0; j < i; j++)
		dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
				sirfport->rx_dma_items[j].xmit.buf,
				sirfport->rx_dma_items[j].dma_addr);
	dma_release_channel(sirfport->rx_dma_chan);
request_err:
	return ret;
}

static void sirfsoc_uart_uninit_tx_dma(struct sirfsoc_uart_port *sirfport)
{
	dmaengine_terminate_all(sirfport->tx_dma_chan);
	dma_release_channel(sirfport->tx_dma_chan);
}

static void sirfsoc_uart_uninit_rx_dma(struct sirfsoc_uart_port *sirfport)
{
	int i;
	struct uart_port *port = &sirfport->port;
	dmaengine_terminate_all(sirfport->rx_dma_chan);
	dma_release_channel(sirfport->rx_dma_chan);
	for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
		dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
				sirfport->rx_dma_items[i].xmit.buf,
				sirfport->rx_dma_items[i].dma_addr);
}
void __init at91_add_device_hdmac(void)
{
    dma_cap_set(DMA_MEMCPY, atdma_pdata.cap_mask);
    dma_cap_set(DMA_SLAVE, atdma_pdata.cap_mask);
    platform_device_register(&at_hdmac_device);
}
예제 #24
0
void __init iop13xx_platform_init(void)
{
	int i;
	u32 uart_idx, i2c_idx, adma_idx, plat_idx;
	struct platform_device *iop13xx_devices[IQ81340_MAX_PLAT_DEVICES];

	
	iop13xx_set_atu_mmr_bases();

	memset(iop13xx_devices, 0, sizeof(iop13xx_devices));

	if (init_uart == IOP13XX_INIT_UART_DEFAULT) {
		switch (iop13xx_dev_id()) {
		
		case 0x3380:
		case 0x3384:
		case 0x3388:
		case 0x338c:
			init_uart |= IOP13XX_INIT_UART_0;
			init_uart |= IOP13XX_INIT_UART_1;
			break;
		
		default:
			init_uart |= IOP13XX_INIT_UART_1;
		}
	}

	if (init_i2c == IOP13XX_INIT_I2C_DEFAULT) {
		switch (iop13xx_dev_id()) {
		
		case 0x3380:
		case 0x3384:
		case 0x3388:
		case 0x338c:
		case 0x3382:
		case 0x3386:
		case 0x338a:
		case 0x338e:
			init_i2c |= IOP13XX_INIT_I2C_0;
			init_i2c |= IOP13XX_INIT_I2C_1;
			init_i2c |= IOP13XX_INIT_I2C_2;
			break;
		
		default:
			init_i2c |= IOP13XX_INIT_I2C_1;
			init_i2c |= IOP13XX_INIT_I2C_2;
		}
	}

	if (init_adma == IOP13XX_INIT_ADMA_DEFAULT) {
		init_adma |= IOP13XX_INIT_ADMA_0;
		init_adma |= IOP13XX_INIT_ADMA_1;
		init_adma |= IOP13XX_INIT_ADMA_2;
	}

	plat_idx = 0;
	uart_idx = 0;
	i2c_idx = 0;

	
	if (init_uart & IOP13XX_INIT_UART_1) {
		PRINTK("Adding uart1 to platform device list\n");
		iop13xx_uart1.id = uart_idx++;
		iop13xx_devices[plat_idx++] = &iop13xx_uart1;
	}
	if (init_uart & IOP13XX_INIT_UART_0) {
		PRINTK("Adding uart0 to platform device list\n");
		iop13xx_uart0.id = uart_idx++;
		iop13xx_devices[plat_idx++] = &iop13xx_uart0;
	}

	for(i = 0; i < IQ81340_NUM_I2C; i++) {
		if ((init_i2c & (1 << i)) && IOP13XX_SETUP_DEBUG)
			printk("Adding i2c%d to platform device list\n", i);
		switch(init_i2c & (1 << i)) {
		case IOP13XX_INIT_I2C_0:
			iop13xx_i2c_0_controller.id = i2c_idx++;
			iop13xx_devices[plat_idx++] =
				&iop13xx_i2c_0_controller;
			break;
		case IOP13XX_INIT_I2C_1:
			iop13xx_i2c_1_controller.id = i2c_idx++;
			iop13xx_devices[plat_idx++] =
				&iop13xx_i2c_1_controller;
			break;
		case IOP13XX_INIT_I2C_2:
			iop13xx_i2c_2_controller.id = i2c_idx++;
			iop13xx_devices[plat_idx++] =
				&iop13xx_i2c_2_controller;
			break;
		}
	}

	
	adma_idx = 0;
	for (i = 0; i < IQ81340_NUM_ADMA; i++) {
		struct iop_adma_platform_data *plat_data;
		if ((init_adma & (1 << i)) && IOP13XX_SETUP_DEBUG)
			printk(KERN_INFO
				"Adding adma%d to platform device list\n", i);
		switch (init_adma & (1 << i)) {
		case IOP13XX_INIT_ADMA_0:
			iop13xx_adma_0_channel.id = adma_idx++;
			iop13xx_devices[plat_idx++] = &iop13xx_adma_0_channel;
			plat_data = &iop13xx_adma_0_data;
			dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
			dma_cap_set(DMA_XOR, plat_data->cap_mask);
			dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask);
			dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
			dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
			break;
		case IOP13XX_INIT_ADMA_1:
			iop13xx_adma_1_channel.id = adma_idx++;
			iop13xx_devices[plat_idx++] = &iop13xx_adma_1_channel;
			plat_data = &iop13xx_adma_1_data;
			dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
			dma_cap_set(DMA_XOR, plat_data->cap_mask);
			dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask);
			dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
			dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
			break;
		case IOP13XX_INIT_ADMA_2:
			iop13xx_adma_2_channel.id = adma_idx++;
			iop13xx_devices[plat_idx++] = &iop13xx_adma_2_channel;
			plat_data = &iop13xx_adma_2_data;
			dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
			dma_cap_set(DMA_XOR, plat_data->cap_mask);
			dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask);
			dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
			dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
			dma_cap_set(DMA_PQ, plat_data->cap_mask);
			dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask);
			break;
		}
	}

#ifdef CONFIG_MTD_PHYSMAP
	iq8134x_flash_resource.end = iq8134x_flash_resource.start +
				iq8134x_probe_flash_size() - 1;
	if (iq8134x_flash_resource.end > iq8134x_flash_resource.start)
		iop13xx_devices[plat_idx++] = &iq8134x_flash;
	else
		printk(KERN_ERR "%s: Failed to probe flash size\n", __func__);
#endif

	platform_add_devices(iop13xx_devices, plat_idx);
}
예제 #25
0
static int bcm2835_sdhost_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct device_node *node = dev->of_node;
	struct clk *clk;
	struct resource *iomem;
	struct bcm2835_host *host;
	struct mmc_host *mmc;
	int ret;

	pr_debug("bcm2835_sdhost_probe\n");
	mmc = mmc_alloc_host(sizeof(*host), dev);
	if (!mmc)
		return -ENOMEM;

	mmc->ops = &bcm2835_sdhost_ops;
	host = mmc_priv(mmc);
	host->mmc = mmc;
	host->pio_timeout = msecs_to_jiffies(500);
	host->max_delay = 1; /* Warn if over 1ms */
	spin_lock_init(&host->lock);

	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	host->ioaddr = devm_ioremap_resource(dev, iomem);
	if (IS_ERR(host->ioaddr)) {
		ret = PTR_ERR(host->ioaddr);
		goto err;
	}

	host->phys_addr = iomem->start + BCM2835_VCMMU_SHIFT;
	pr_debug(" - ioaddr %lx, iomem->start %lx, phys_addr %lx\n",
		 (unsigned long)host->ioaddr,
		 (unsigned long)iomem->start,
		 (unsigned long)host->phys_addr);

	host->allow_dma = ALLOW_DMA;

	if (node) {
		/* Read any custom properties */
		of_property_read_u32(node,
				     "brcm,delay-after-stop",
				     &host->delay_after_stop);
		of_property_read_u32(node,
				     "brcm,overclock-50",
				     &host->overclock_50);
		of_property_read_u32(node,
				     "brcm,pio-limit",
				     &host->pio_limit);
		host->allow_dma = ALLOW_DMA &&
			!of_property_read_bool(node, "brcm,force-pio");
		host->debug = of_property_read_bool(node, "brcm,debug");
	}

	if (host->allow_dma) {
		if (node) {
			host->dma_chan_tx =
				dma_request_slave_channel(dev, "tx");
			host->dma_chan_rx =
				dma_request_slave_channel(dev, "rx");
		} else {
			dma_cap_mask_t mask;

			dma_cap_zero(mask);
			/* we don't care about the channel, any would work */
			dma_cap_set(DMA_SLAVE, mask);
			host->dma_chan_tx =
				dma_request_channel(mask, NULL, NULL);
			host->dma_chan_rx =
				dma_request_channel(mask, NULL, NULL);
		}
	}

	clk = devm_clk_get(dev, NULL);
	if (IS_ERR(clk)) {
		dev_err(dev, "could not get clk\n");
		ret = PTR_ERR(clk);
		goto err;
	}

	host->max_clk = clk_get_rate(clk);

	host->irq = platform_get_irq(pdev, 0);
	if (host->irq <= 0) {
		dev_err(dev, "get IRQ failed\n");
		ret = -EINVAL;
		goto err;
	}

	pr_debug(" - max_clk %lx, irq %d\n",
		 (unsigned long)host->max_clk,
		 (int)host->irq);

	if (node)
		mmc_of_parse(mmc);
	else
		mmc->caps |= MMC_CAP_4_BIT_DATA;

	ret = bcm2835_sdhost_add_host(host);
	if (ret)
		goto err;

	platform_set_drvdata(pdev, host);

	pr_debug("bcm2835_sdhost_probe -> OK\n");

	return 0;

err:
	pr_debug("bcm2835_sdhost_probe -> err %d\n", ret);
	mmc_free_host(mmc);

	return ret;
}
예제 #26
0
int __init p7_init_dma(void)
{
	int         err;
	int const   rev = p7_chiprev();

	pr_debug("p7: registering %s...\n", p7_dma_dev.dev.init_name);

	if (rev ==  P7_CHIPREV_R1) {
		/* P7 first revision initializes DMA controller in non secure mode
		 * when coming out of reset but it is not possible to switch back to
		 * secure mode. Since Linux / cores / L2 cache controller run in
		 * secure mode, and all DMA controller transactions going through ACP
		 * port are flagged as non secure, CPU and DMA accesses to the same
		 * address won't point to same L2 cache internal location.
		 * Therefore, we must disable DMA RAM to RAM ACP accesses (and
		 * bypass L2 cache) to perform transactions directly onto main AXI
		 * system bus (the one behind L2 cache).
		 */
#define P7_NIC_REMAP    P7_NIC
#define NIC_NOACP       (1U << 7)
		__raw_writel(NIC_NOACP, MMIO_P2V(P7_NIC_REMAP));

		/* On R1, DMA interrupts are not shared */
		p7_dma_dev.irq[0] = P7_R1_DMA_ABORT_IRQ;
		p7_dma_dev.irq[1] = P7_R1_DMA5_IRQ;
		p7_dma_pdata.flushp = true;
	}
	else if (rev == P7_CHIPREV_R2 ||
	         rev == P7_CHIPREV_R3) {
		/*
		 * P7_NIC_REMAP is write-only, we can't check the REMAP_DRAM bit
		 * value. The assumption is made that it is already set at this point,
		 * so we add it to our bitmask.
		 */
#define NIC_REMAP_DRAM  (1U)
		__raw_writel(NIC_REMAP_DRAM | NIC_NOACP, MMIO_P2V(P7_NIC_REMAP));

		p7_dma_pdata.flushp = true;
	}

	dma_cap_set(DMA_MEMCPY, p7_dma_pdata.cap_mask);
	dma_cap_set(DMA_SLAVE, p7_dma_pdata.cap_mask);
	dma_cap_set(DMA_CYCLIC, p7_dma_pdata.cap_mask);

	err = amba_device_register(&p7_dma_dev, &iomem_resource);
	if (err)
		panic("p7: failed to register %s (%d)\n",
		      p7_dma_dev.dev.init_name,
		      err);

	/*
	 * We want to store controller microcode into internal RAM for performance
	 * reasons.
	 * As amba_device holds a single resource and pl330 driver does not handle
	 * multiple memory resources, we have to reserve microcode memory region here.
	 * Related device must have been initialized (amba_device_register) before
	 * using dma_declare_coherent_memory.
	 * Moreover, dma_declare_coherent_memory must be performed before pl330
	 * driver loaded since it allocates microcode region at probing time.
	 */
	if (! (dma_declare_coherent_memory(&p7_dma_dev.dev,
	                                   p7_dma_ucode_addr(),
	                                   p7_dma_ucode_addr(),
	                                   p7_dma_ucode_sz(),
	                                   DMA_MEMORY_MAP |
	                                   DMA_MEMORY_EXCLUSIVE) &
	       DMA_MEMORY_MAP))
		/* Failure: will use DMA zone located in system RAM. */
		panic("p7: failed to map DMA controller microcode memory region [%08x:%08x]\n",
		      p7_dma_ucode_addr(),
		      p7_dma_ucode_addr() + p7_dma_ucode_sz() - 1);

	dev_info(&p7_dma_dev.dev,
			 "mapped microcode memory region [%08x:%08x]\n",
			 p7_dma_ucode_addr(),
			 p7_dma_ucode_addr() + p7_dma_ucode_sz() - 1);
	return 0;
}
예제 #27
0
static int __init exynos_dma_init(void)
{
	if (of_have_populated_dt())
		return 0;

	if (soc_is_exynos4210()) {
		exynos_pdma0_pdata.nr_valid_peri =
			ARRAY_SIZE(exynos4210_pdma0_peri);
		exynos_pdma0_pdata.peri_id = exynos4210_pdma0_peri;
		exynos_pdma1_pdata.nr_valid_peri =
			ARRAY_SIZE(exynos4210_pdma1_peri);
		exynos_pdma1_pdata.peri_id = exynos4210_pdma1_peri;
	} else if (soc_is_exynos4212() || soc_is_exynos4412()) {
		exynos_pdma0_pdata.nr_valid_peri =
			ARRAY_SIZE(exynos4212_pdma0_peri);
		exynos_pdma0_pdata.peri_id = exynos4212_pdma0_peri;
		exynos_pdma1_pdata.nr_valid_peri =
			ARRAY_SIZE(exynos4212_pdma1_peri);
		exynos_pdma1_pdata.peri_id = exynos4212_pdma1_peri;
	} else if (soc_is_exynos4415()) {
		exynos_pdma0_pdata.nr_valid_peri =
			ARRAY_SIZE(exynos4415_pdma0_peri);
		exynos_pdma0_pdata.peri_id = exynos4415_pdma0_peri;
		exynos_pdma1_pdata.nr_valid_peri =
			ARRAY_SIZE(exynos4415_pdma1_peri);
		exynos_pdma1_pdata.peri_id = exynos4415_pdma1_peri;
	} else if (soc_is_exynos5250()) {
		exynos_pdma0_pdata.nr_valid_peri =
			ARRAY_SIZE(exynos5250_pdma0_peri);
		exynos_pdma0_pdata.peri_id = exynos5250_pdma0_peri;
		exynos_pdma1_pdata.nr_valid_peri =
			ARRAY_SIZE(exynos5250_pdma1_peri);
		exynos_pdma1_pdata.peri_id = exynos5250_pdma1_peri;
	} else if (soc_is_exynos5410()) {
		exynos_pdma0_pdata.nr_valid_peri =
			ARRAY_SIZE(exynos5410_pdma0_peri);
		exynos_pdma0_pdata.peri_id = exynos5410_pdma0_peri;
		exynos_pdma1_pdata.nr_valid_peri =
			ARRAY_SIZE(exynos5410_pdma1_peri);
		exynos_pdma1_pdata.peri_id = exynos5410_pdma1_peri;
	} else if (soc_is_exynos5420()) {
		exynos_pdma0_pdata.nr_valid_peri =
			ARRAY_SIZE(exynos5420_pdma0_peri);
		exynos_pdma0_pdata.peri_id = exynos5420_pdma0_peri;
		exynos_pdma1_pdata.nr_valid_peri =
			ARRAY_SIZE(exynos5420_pdma1_peri);
		exynos_pdma1_pdata.peri_id = exynos5420_pdma1_peri;
		exynos_adma0_pdata.nr_valid_peri =
			ARRAY_SIZE(adma0_peri);
		exynos_adma0_pdata.peri_id = adma0_peri;
	} else if (soc_is_exynos5260()) {
		exynos5260_pdma0_pdata.nr_valid_peri =
			ARRAY_SIZE(exynos5260_pdma0_peri);
		exynos5260_pdma0_pdata.peri_id = exynos5260_pdma0_peri;
	} else if (soc_is_exynos3470()) {
		exynos_pdma0_pdata.nr_valid_peri =
			ARRAY_SIZE(exynos3470_pdma0_peri);
		exynos_pdma0_pdata.peri_id = exynos3470_pdma0_peri;
		exynos_pdma1_pdata.nr_valid_peri =
			ARRAY_SIZE(exynos3470_pdma1_peri);
		exynos_pdma1_pdata.peri_id = exynos3470_pdma1_peri;
	} else if (soc_is_exynos3250()) {
		exynos_pdma0_pdata.nr_valid_peri =
			ARRAY_SIZE(exynos3250_pdma0_peri);
		exynos_pdma0_pdata.peri_id = exynos3250_pdma0_peri;
		exynos_pdma1_pdata.nr_valid_peri =
			ARRAY_SIZE(exynos3250_pdma1_peri);
		exynos_pdma1_pdata.peri_id = exynos3250_pdma1_peri;
	}

	if (soc_is_exynos4210() || soc_is_exynos4212() ||
		soc_is_exynos4412() || soc_is_exynos3470()) {
		exynos_pdma0_device.res.start = EXYNOS4_PA_PDMA0;
		exynos_pdma0_device.res.end = EXYNOS4_PA_PDMA0 + SZ_4K;
		exynos_pdma0_device.irq[0] = EXYNOS4_IRQ_PDMA0;
		exynos_pdma1_device.res.start = EXYNOS4_PA_PDMA1;
		exynos_pdma1_device.res.end = EXYNOS4_PA_PDMA1 + SZ_4K;
		exynos_pdma1_device.irq[0] = EXYNOS4_IRQ_PDMA1;
		exynos_mdma_device.res.start = EXYNOS4_PA_MDMA1;
		exynos_mdma_device.res.end = EXYNOS4_PA_MDMA1 + SZ_4K;
		exynos_mdma_device.irq[0] = EXYNOS4_IRQ_MDMA1;
	} else if (soc_is_exynos5410() || soc_is_exynos5420()) {
		exynos_mdma_device.res.start = EXYNOS5_PA_MDMA0;
		exynos_mdma_device.res.end = EXYNOS5_PA_MDMA0 + SZ_4K;
		exynos_mdma_device.irq[0] = EXYNOS5_IRQ_MDMA0;
	} else if (soc_is_exynos5260()) {
		exynos5260_pdma0_device.res.start = EXYNOS5260_PA_PDMA0;
		exynos5260_pdma0_device.res.end = EXYNOS5260_PA_PDMA0 + SZ_4K;
		exynos5260_pdma0_device.irq[0] = EXYNOS5260_IRQ_PDMA0;
		exynos5260_mdma_device.res.start = EXYNOS5260_PA_NS_MDMA0;
		exynos5260_mdma_device.res.end = EXYNOS5260_PA_NS_MDMA0 + SZ_4K;
		exynos5260_mdma_device.irq[0] = EXYNOS5260_IRQ_MDMA_1;
		exynos5260_adma_device.res.start = EXYNOS5260_PA_ADMA0;
		exynos5260_adma_device.res.end = EXYNOS5260_PA_ADMA0 + SZ_4K;
		exynos5260_adma_device.irq[0] = EXYNOS5260_IRQ_ADMA;
	} else if (soc_is_exynos4415()) {
		exynos_pdma0_device.res.start = EXYNOS4_PA_PDMA0;
		exynos_pdma0_device.res.end = EXYNOS4_PA_PDMA0 + SZ_4K;
		exynos_pdma0_device.irq[0] = EXYNOS4_IRQ_PDMA0;
		exynos_pdma1_device.res.start = EXYNOS4_PA_PDMA1;
		exynos_pdma1_device.res.end = EXYNOS4_PA_PDMA1 + SZ_4K;
		exynos_pdma1_device.irq[0] = EXYNOS4_IRQ_PDMA1;
		exynos_mdma_device.res.start = EXYNOS4_PA_MDMA1;
		exynos_mdma_device.res.end = EXYNOS4_PA_MDMA1 + SZ_4K;
		exynos_mdma_device.irq[0] = EXYNOS4_IRQ_MDMA1;
	} else if (soc_is_exynos3250()) {
		exynos_pdma0_device.res.start = EXYNOS3_PA_PDMA0;
		exynos_pdma0_device.res.end = EXYNOS3_PA_PDMA0 + SZ_4K;
		exynos_pdma0_device.irq[0] = EXYNOS3_IRQ_PDMA0;
		exynos_pdma1_device.res.start = EXYNOS3_PA_PDMA1;
		exynos_pdma1_device.res.end = EXYNOS3_PA_PDMA1 + SZ_4K;
		exynos_pdma1_device.irq[0] = EXYNOS3_IRQ_PDMA1;
	}

	if (soc_is_exynos5260()) {
		dma_cap_set(DMA_SLAVE, exynos5260_pdma0_pdata.cap_mask);
		dma_cap_set(DMA_CYCLIC, exynos5260_pdma0_pdata.cap_mask);
		amba_device_register(&exynos5260_pdma0_device, &iomem_resource);

		dma_cap_set(DMA_MEMCPY, exynos5260_mdma_pdata.cap_mask);
		amba_device_register(&exynos5260_mdma_device, &iomem_resource);

		dma_cap_set(DMA_SLAVE, exynos5260_adma_pdata.cap_mask);
		dma_cap_set(DMA_CYCLIC, exynos5260_adma_pdata.cap_mask);
		amba_device_register(&exynos5260_adma_device, &iomem_resource);
	} else if (soc_is_exynos3250()) {
		dma_cap_set(DMA_SLAVE, exynos_pdma0_pdata.cap_mask);
		dma_cap_set(DMA_CYCLIC, exynos_pdma0_pdata.cap_mask);
		amba_device_register(&exynos_pdma0_device, &iomem_resource);

		dma_cap_set(DMA_SLAVE, exynos_pdma1_pdata.cap_mask);
		dma_cap_set(DMA_CYCLIC, exynos_pdma1_pdata.cap_mask);
		amba_device_register(&exynos_pdma1_device, &iomem_resource);
	} else {
		dma_cap_set(DMA_SLAVE, exynos_pdma0_pdata.cap_mask);
		dma_cap_set(DMA_CYCLIC, exynos_pdma0_pdata.cap_mask);
		amba_device_register(&exynos_pdma0_device, &iomem_resource);

		dma_cap_set(DMA_SLAVE, exynos_pdma1_pdata.cap_mask);
		dma_cap_set(DMA_CYCLIC, exynos_pdma1_pdata.cap_mask);
		amba_device_register(&exynos_pdma1_device, &iomem_resource);

		dma_cap_set(DMA_MEMCPY, exynos_mdma_pdata.cap_mask);
		amba_device_register(&exynos_mdma_device, &iomem_resource);
	}

	if (soc_is_exynos5420()) {
		dma_cap_set(DMA_SLAVE, exynos_adma0_pdata.cap_mask);
		dma_cap_set(DMA_CYCLIC, exynos_adma0_pdata.cap_mask);
		amba_device_register(&exynos_adma0_device, &iomem_resource);
	}

	return 0;
}
예제 #28
0
/**
 * i2s_dma_start - prepare and reserve dma channels
 * @arg : intel_mid_i2s_hdl pointer to that should be driver data (context)
 *
 * "ssp open" context and dmac1 should already be filled in drv_data
 *
 * Output parameters
 *      int : should be zero, else it means error code
 */
static int i2s_dma_start(struct intel_mid_i2s_hdl *drv_data)
{
	struct intel_mid_dma_slave *rxs, *txs;
	struct pci_dev *l_pdev;
	struct intel_mid_i2s_settings *ssp_settings =
						&(drv_data->current_settings);
	dma_cap_mask_t mask;
	int retval = 0;
	int temp = 0;

	dev_dbg(&drv_data->pdev->dev, "DMAC1 start\n");
	drv_data->txchan = NULL;
	drv_data->rxchan = NULL;
	l_pdev = drv_data->pdev;

	if (ssp_settings->ssp_active_rx_slots_map) {
		/* 1. init rx channel */
		rxs = &drv_data->dmas_rx;
		rxs->dma_slave.direction = DMA_FROM_DEVICE;
		rxs->hs_mode = LNW_DMA_HW_HS;
		rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
		temp = i2s_compute_dma_width(ssp_settings->data_size,
					&rxs->dma_slave.src_addr_width);

		if (temp != 0) {
			dev_err(&(drv_data->pdev->dev),
				"RX DMA Channel Bad data_size = %d\n",
				ssp_settings->data_size);
			retval = -1;
			goto err_exit;

		}
		rxs->dma_slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

		temp = i2s_compute_dma_msize(
					ssp_settings->ssp_rx_fifo_threshold,
					&rxs->dma_slave.src_maxburst);
		if (temp != 0) {
			dev_err(&(drv_data->pdev->dev),
				"RX DMA Channel Bad RX FIFO Threshold src= %d\n",
				ssp_settings->ssp_rx_fifo_threshold);
			retval = -2;
			goto err_exit;

		}

		temp = i2s_compute_dma_msize(
					ssp_settings->ssp_rx_fifo_threshold,
					&rxs->dma_slave.dst_maxburst);
		if (temp != 0) {
			dev_err(&(drv_data->pdev->dev),
				"RX DMA Channel Bad RX FIFO Threshold dst= %d\n",
				ssp_settings->ssp_rx_fifo_threshold);
			retval = -3;
			goto err_exit;

		}

		rxs->device_instance = drv_data->device_instance;
		dma_cap_zero(mask);
		dma_cap_set(DMA_MEMCPY, mask);
		dma_cap_set(DMA_SLAVE, mask);
		drv_data->rxchan = dma_request_channel(mask,
							chan_filter, drv_data);
		if (!drv_data->rxchan) {
			dev_err(&(drv_data->pdev->dev),
				"Could not get Rx channel\n");
			retval = -4;
			goto err_exit;
		}

		temp = drv_data->rxchan->device->device_control(
					drv_data->rxchan, DMA_SLAVE_CONFIG,
					(unsigned long) &rxs->dma_slave);
		if (temp) {
			dev_err(&(drv_data->pdev->dev),
				"Rx slave control failed\n");
			retval = -5;
			goto err_exit;
		}

	}

	if (ssp_settings->ssp_active_tx_slots_map) {
		/* 2. init tx channel */
		txs = &drv_data->dmas_tx;
		txs->dma_slave.direction = DMA_TO_DEVICE;
		txs->hs_mode = LNW_DMA_HW_HS;
		txs->cfg_mode = LNW_DMA_MEM_TO_PER;

		txs->dma_slave.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

		temp = i2s_compute_dma_width(ssp_settings->data_size,
					&txs->dma_slave.dst_addr_width);
		if (temp != 0) {
			dev_err(&(drv_data->pdev->dev),
				"TX DMA Channel Bad data_size = %d\n",
				ssp_settings->data_size);
			retval = -6;
			goto err_exit;

		}

		temp = i2s_compute_dma_msize(
				ssp_settings->ssp_tx_fifo_threshold + 1,
				&txs->dma_slave.src_maxburst);
		if (temp != 0) {
			dev_err(&(drv_data->pdev->dev),
				"TX DMA Channel Bad TX FIFO Threshold src= %d\n",
				ssp_settings->ssp_tx_fifo_threshold);
			retval = -7;
			goto err_exit;

		}

		temp = i2s_compute_dma_msize(
				ssp_settings->ssp_tx_fifo_threshold + 1,
						&txs->dma_slave.dst_maxburst);
		if (temp != 0) {
			dev_err(&(drv_data->pdev->dev),
				"TX DMA Channel Bad TX FIFO Threshold dst= %d\n",
				ssp_settings->ssp_tx_fifo_threshold);
			retval = -8;
			goto err_exit;

		}

		txs->device_instance = drv_data->device_instance;
		dma_cap_set(DMA_SLAVE, mask);
		dma_cap_set(DMA_MEMCPY, mask);
		drv_data->txchan = dma_request_channel(mask,
							chan_filter, drv_data);

		if (!drv_data->txchan) {
			dev_err(&(drv_data->pdev->dev),
				"Could not get Tx channel\n");
			retval = -10;
			goto err_exit;
		}

		temp = drv_data->txchan->device->device_control(
					drv_data->txchan, DMA_SLAVE_CONFIG,
					(unsigned long) &txs->dma_slave);
		if (temp) {
			dev_err(&(drv_data->pdev->dev),
				"Tx slave control failed\n");
			retval = -9;
			goto err_exit;
		}
	}

	return retval;

err_exit:
	if (drv_data->txchan)
		dma_release_channel(drv_data->txchan);
	if (drv_data->rxchan)
		dma_release_channel(drv_data->rxchan);
	drv_data->rxchan = NULL;
	drv_data->txchan = NULL;
	return retval;
}