Beispiel #1
0
static int ti_dma_xbar_probe(struct platform_device *pdev)
{
	struct device_node *node = pdev->dev.of_node;
	const struct of_device_id *match;
	struct device_node *dma_node;
	struct ti_dma_xbar_data *xbar;
	struct resource *res;
	u32 safe_val;
	void __iomem *iomem;
	int i, ret;

	if (!node)
		return -ENODEV;

	xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
	if (!xbar)
		return -ENOMEM;

	idr_init(&xbar->map_idr);

	dma_node = of_parse_phandle(node, "dma-masters", 0);
	if (!dma_node) {
		dev_err(&pdev->dev, "Can't get DMA master node\n");
		return -ENODEV;
	}

	match = of_match_node(ti_dma_master_match, dma_node);
	if (!match) {
		dev_err(&pdev->dev, "DMA master is not supported\n");
		return -EINVAL;
	}

	if (of_property_read_u32(dma_node, "dma-requests",
				 &xbar->dma_requests)) {
		dev_info(&pdev->dev,
			 "Missing XBAR output information, using %u.\n",
			 TI_XBAR_OUTPUTS);
		xbar->dma_requests = TI_XBAR_OUTPUTS;
	}
	of_node_put(dma_node);

	if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) {
		dev_info(&pdev->dev,
			 "Missing XBAR input information, using %u.\n",
			 TI_XBAR_INPUTS);
		xbar->xbar_requests = TI_XBAR_INPUTS;
	}

	if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val))
		xbar->safe_val = (u16)safe_val;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	iomem = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(iomem))
		return PTR_ERR(iomem);

	xbar->iomem = iomem;

	xbar->dmarouter.dev = &pdev->dev;
	xbar->dmarouter.route_free = ti_dma_xbar_free;
	xbar->dma_offset = (u32)match->data;

	platform_set_drvdata(pdev, xbar);

	/* Reset the crossbar */
	for (i = 0; i < xbar->dma_requests; i++)
		ti_dma_xbar_write(xbar->iomem, i, xbar->safe_val);

	ret = of_dma_router_register(node, ti_dma_xbar_route_allocate,
				     &xbar->dmarouter);
	if (ret) {
		/* Restore the defaults for the crossbar */
		for (i = 0; i < xbar->dma_requests; i++)
			ti_dma_xbar_write(xbar->iomem, i, i);
	}

	return ret;
}
Beispiel #2
0
static int stm32_dmamux_probe(struct platform_device *pdev)
{
	struct device_node *node = pdev->dev.of_node;
	const struct of_device_id *match;
	struct device_node *dma_node;
	struct stm32_dmamux_data *stm32_dmamux;
	struct resource *res;
	void __iomem *iomem;
	int i, count, ret;
	u32 dma_req;

	if (!node)
		return -ENODEV;

	count = device_property_read_u32_array(&pdev->dev, "dma-masters",
					       NULL, 0);
	if (count < 0) {
		dev_err(&pdev->dev, "Can't get DMA master(s) node\n");
		return -ENODEV;
	}

	stm32_dmamux = devm_kzalloc(&pdev->dev, sizeof(*stm32_dmamux) +
				    sizeof(u32) * (count + 1), GFP_KERNEL);
	if (!stm32_dmamux)
		return -ENOMEM;

	dma_req = 0;
	for (i = 1; i <= count; i++) {
		dma_node = of_parse_phandle(node, "dma-masters", i - 1);

		match = of_match_node(stm32_stm32dma_master_match, dma_node);
		if (!match) {
			dev_err(&pdev->dev, "DMA master is not supported\n");
			of_node_put(dma_node);
			return -EINVAL;
		}

		if (of_property_read_u32(dma_node, "dma-requests",
					 &stm32_dmamux->dma_reqs[i])) {
			dev_info(&pdev->dev,
				 "Missing MUX output information, using %u.\n",
				 STM32_DMAMUX_MAX_DMA_REQUESTS);
			stm32_dmamux->dma_reqs[i] =
				STM32_DMAMUX_MAX_DMA_REQUESTS;
		}
		dma_req += stm32_dmamux->dma_reqs[i];
		of_node_put(dma_node);
	}

	if (dma_req > STM32_DMAMUX_MAX_DMA_REQUESTS) {
		dev_err(&pdev->dev, "Too many DMA Master Requests to manage\n");
		return -ENODEV;
	}

	stm32_dmamux->dma_requests = dma_req;
	stm32_dmamux->dma_reqs[0] = count;
	stm32_dmamux->dma_inuse = devm_kcalloc(&pdev->dev,
					       BITS_TO_LONGS(dma_req),
					       sizeof(unsigned long),
					       GFP_KERNEL);
	if (!stm32_dmamux->dma_inuse)
		return -ENOMEM;

	if (device_property_read_u32(&pdev->dev, "dma-requests",
				     &stm32_dmamux->dmamux_requests)) {
		stm32_dmamux->dmamux_requests = STM32_DMAMUX_MAX_REQUESTS;
		dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n",
			 stm32_dmamux->dmamux_requests);
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -ENODEV;

	iomem = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(iomem))
		return PTR_ERR(iomem);

	spin_lock_init(&stm32_dmamux->lock);

	stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL);
	if (IS_ERR(stm32_dmamux->clk)) {
		ret = PTR_ERR(stm32_dmamux->clk);
		if (ret == -EPROBE_DEFER)
			dev_info(&pdev->dev, "Missing controller clock\n");
		return ret;
	}

	stm32_dmamux->rst = devm_reset_control_get(&pdev->dev, NULL);
	if (!IS_ERR(stm32_dmamux->rst)) {
		reset_control_assert(stm32_dmamux->rst);
		udelay(2);
		reset_control_deassert(stm32_dmamux->rst);
	}

	stm32_dmamux->iomem = iomem;
	stm32_dmamux->dmarouter.dev = &pdev->dev;
	stm32_dmamux->dmarouter.route_free = stm32_dmamux_free;

	platform_set_drvdata(pdev, stm32_dmamux);

	if (!IS_ERR(stm32_dmamux->clk)) {
		ret = clk_prepare_enable(stm32_dmamux->clk);
		if (ret < 0) {
			dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
			return ret;
		}
	}

	/* Reset the dmamux */
	for (i = 0; i < stm32_dmamux->dma_requests; i++)
		stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0);

	if (!IS_ERR(stm32_dmamux->clk))
		clk_disable(stm32_dmamux->clk);

	return of_dma_router_register(node, stm32_dmamux_route_allocate,
				     &stm32_dmamux->dmarouter);
}
Beispiel #3
0
static int ti_dra7_xbar_probe(struct platform_device *pdev)
{
	struct device_node *node = pdev->dev.of_node;
	const struct of_device_id *match;
	struct device_node *dma_node;
	struct ti_dra7_xbar_data *xbar;
	struct property *prop;
	struct resource *res;
	u32 safe_val;
	size_t sz;
	void __iomem *iomem;
	int i, ret;

	if (!node)
		return -ENODEV;

	xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
	if (!xbar)
		return -ENOMEM;

	dma_node = of_parse_phandle(node, "dma-masters", 0);
	if (!dma_node) {
		dev_err(&pdev->dev, "Can't get DMA master node\n");
		return -ENODEV;
	}

	match = of_match_node(ti_dra7_master_match, dma_node);
	if (!match) {
		dev_err(&pdev->dev, "DMA master is not supported\n");
		return -EINVAL;
	}

	if (of_property_read_u32(dma_node, "dma-requests",
				 &xbar->dma_requests)) {
		dev_info(&pdev->dev,
			 "Missing XBAR output information, using %u.\n",
			 TI_DRA7_XBAR_OUTPUTS);
		xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS;
	}
	of_node_put(dma_node);

	xbar->dma_inuse = devm_kcalloc(&pdev->dev,
				       BITS_TO_LONGS(xbar->dma_requests),
				       sizeof(unsigned long), GFP_KERNEL);
	if (!xbar->dma_inuse)
		return -ENOMEM;

	if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) {
		dev_info(&pdev->dev,
			 "Missing XBAR input information, using %u.\n",
			 TI_DRA7_XBAR_INPUTS);
		xbar->xbar_requests = TI_DRA7_XBAR_INPUTS;
	}

	if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val))
		xbar->safe_val = (u16)safe_val;


	prop = of_find_property(node, "ti,reserved-dma-request-ranges", &sz);
	if (prop) {
		const char pname[] = "ti,reserved-dma-request-ranges";
		u32 (*rsv_events)[2];
		size_t nelm = sz / sizeof(*rsv_events);
		int i;

		if (!nelm)
			return -EINVAL;

		rsv_events = kcalloc(nelm, sizeof(*rsv_events), GFP_KERNEL);
		if (!rsv_events)
			return -ENOMEM;

		ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
						 nelm * 2);
		if (ret)
			return ret;

		for (i = 0; i < nelm; i++) {
			ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
					     xbar->dma_inuse);
		}
		kfree(rsv_events);
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	iomem = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(iomem))
		return PTR_ERR(iomem);

	xbar->iomem = iomem;

	xbar->dmarouter.dev = &pdev->dev;
	xbar->dmarouter.route_free = ti_dra7_xbar_free;
	xbar->dma_offset = (u32)match->data;

	mutex_init(&xbar->mutex);
	platform_set_drvdata(pdev, xbar);

	/* Reset the crossbar */
	for (i = 0; i < xbar->dma_requests; i++) {
		if (!test_bit(i, xbar->dma_inuse))
			ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val);
	}

	ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate,
				     &xbar->dmarouter);
	if (ret) {
		/* Restore the defaults for the crossbar */
		for (i = 0; i < xbar->dma_requests; i++) {
			if (!test_bit(i, xbar->dma_inuse))
				ti_dra7_xbar_write(xbar->iomem, i, i);
		}
	}

	return ret;
}
Beispiel #4
0
static int omap_dma_xbar_probe(struct platform_device *pdev)
{
	int i, j, reserved = 0;
	const __be32 *dmar;
	uint max, size, entry, range;
	struct resource *res;
	struct dma_xbar_device *xbar;

	pd_xbar = pdev;

	xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
	if (!xbar)
		return -ENOMEM;

	xbar->ops = &dma_xbar_ops;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	xbar->dma_xbar_base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(xbar->dma_xbar_base))
		return PTR_ERR(xbar->dma_xbar_base);

	of_property_read_u32(pdev->dev.of_node, "ti,dma-reqs", &max);
	if (!max) {
		pr_err("missing 'ti,dma-irqs' property\n");
		return -EINVAL;
	}

	xbar->dma_map = devm_kzalloc(&pdev->dev, max * sizeof(int), GFP_KERNEL);
	if (!xbar->dma_map)
		return -ENOMEM;

	xbar->dma_max = max;

	for (i = 0; i < max; i++)
		xbar->dma_map[i] = DMASIGNAL_FREE;

	/* Get and mark reserved dma req lines */
	dmar = of_get_property(pdev->dev.of_node, "ti,dmas-reserved", &size);
	if (dmar) {
		size /= sizeof(__be32);

		for (i = 0; i < size; i++) {
			of_property_read_u32_index(pdev->dev.of_node,
						   "ti,dmas-reserved",
						   i++, &entry);
			of_property_read_u32_index(pdev->dev.of_node,
						   "ti,dmas-reserved",
						   i, &range);
			if ((entry + range > max) ||
			    ((entry + range) <= entry)) {
				pr_err("Invalid reserved entry\n");
				return -ENODEV;
			}

			for (j = entry; j <= range; j++)
				xbar->dma_map[j] = DMASIGNAL_RESERVED;

			/* For a single entry */
			if (!range)
				xbar->dma_map[entry] = DMASIGNAL_RESERVED;
		}
	}

	xbar->reg_offs = devm_kzalloc(&pdev->dev, max * sizeof(int),
				      GFP_KERNEL);
	if (!xbar->reg_offs)
		return -ENOMEM;

	of_property_read_u32(pdev->dev.of_node, "ti,reg-size", &size);

	switch (size) {
	case 1:
		xbar->write = dma_xbar_writeb;
		break;
	case 2:
		xbar->write = dma_xbar_writew;
		break;
	case 4:
		xbar->write = dma_xbar_writel;
		break;
	default:
		pr_err("Invalid reg-size property\n");
		return -ENODEV;
		break;
	}

	/*
	 * Register offsets are not linear because of the
	 * reserved lines. so find and store the offsets once.
	 */
	for (i = 0; i < max; i++) {
		if (xbar->dma_map[i] == DMASIGNAL_RESERVED)
			continue;

		xbar->reg_offs[i] = reserved;
		reserved += size;
	}

	of_property_read_u32(pdev->dev.of_node, "ti,dma-safe-map",
			     &xbar->safe_map);
	/* Initialize the crossbar with safe map to start with */
	for (i = 0; i < max; i++) {
		if (xbar->dma_map[i] == DMASIGNAL_RESERVED)
			continue;

		xbar->write(i, xbar->safe_map, xbar);
	}

	if (of_dma_router_register(pdev->dev.of_node, xbar))
		return -ENODEV;

	dev_info(&pdev->dev, "OMAP DMA Crossbar driver\n");

	return 0;
}