Esempio n. 1
0
int of_platform_bus_probe(struct device_node *root,
			  struct of_device_id *matches,
			  struct device *parent)
{
	struct device_node *child;
	struct of_device *dev;
	int rc = 0;

	if (matches == NULL)
		matches = of_default_bus_ids;
	if (matches == OF_NO_DEEP_PROBE)
		return -EINVAL;
	if (root == NULL)
		root = of_find_node_by_path("/");
	else
		of_node_get(root);

	pr_debug("of_platform_bus_probe()\n");
	pr_debug(" starting at: %s\n", root->full_name);

	/* Do a self check of bus type, if there's a match, create
	 * children
	 */
	if (of_match_node(matches, root)) {
		pr_debug(" root match, create all sub devices\n");
		dev = of_platform_device_create(root, NULL, parent);
		if (dev == NULL) {
			rc = -ENOMEM;
			goto bail;
		}
		pr_debug(" create all sub busses\n");
		rc = of_platform_bus_create(root, matches, &dev->dev);
		goto bail;
	}
	for (child = NULL; (child = of_get_next_child(root, child)); ) {
		if (!of_match_node(matches, child))
			continue;

		pr_debug("  match: %s\n", child->full_name);
		dev = of_platform_device_create(child, NULL, parent);
		if (dev == NULL)
			rc = -ENOMEM;
		else
			rc = of_platform_bus_create(child, matches, &dev->dev);
		if (rc) {
			of_node_put(child);
			break;
		}
	}
 bail:
	of_node_put(root);
	return rc;
}
Esempio n. 2
0
static int __init cell_publish_devices(void)
{
	struct device_node *root = of_find_node_by_path("/");
	struct device_node *np;
	int node;

	/*                                                 */
	of_platform_bus_probe(NULL, cell_bus_ids, NULL);

	/*                                                          
                                             
  */
	for_each_child_of_node(root, np) {
		if (np->type == NULL || (strcmp(np->type, "pci") != 0 &&
					 strcmp(np->type, "pciex") != 0))
			continue;
		of_platform_device_create(np, NULL, NULL);
	}

	/*                                                                 
                                                          
  */
	for_each_online_node(node) {
		if (cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(node)) == NULL)
			continue;
		platform_device_register_simple("cbe-mic", node, NULL, 0);
	}

	return 0;
}
Esempio n. 3
0
/**
 * of_platform_bus_create() - Create a device for a node and its children.
 * @bus: device node of the bus to instantiate
 * @matches: match table for bus nodes
 * @parent: parent for new device, or NULL for top level.
 *
 * Creates a platform_device for the provided device_node, and optionally
 * recursively create devices for all the child nodes.
 */
static int of_platform_bus_create(struct device_node *bus,
				const struct of_device_id *matches,
				struct device_d *parent)
{
	struct device_node *child;
	struct device_d *dev;
	int rc = 0;

	/* Make sure it has a compatible property */
	if (!of_get_property(bus, "compatible", NULL)) {
		pr_debug("%s() - skipping %s, no compatible prop\n",
			__func__, bus->full_name);
		return 0;
	}

	if (of_device_is_compatible(bus, "arm,primecell")) {
		of_amba_device_create(bus);
		return 0;
	}

	dev = of_platform_device_create(bus, parent);
	if (!dev || !of_match_node(matches, bus))
		return 0;

	for_each_child_of_node(bus, child) {
		pr_debug("   create child: %s\n", child->full_name);
		rc = of_platform_bus_create(child, matches, dev);
		if (rc)
			break;
	}
Esempio n. 4
0
static void mbigen_create_domain(struct mbigen_device *mgn_dev, struct device_node *np)
{
	struct device *parent = platform_bus_type.dev_root;
	struct device *dev = &mgn_dev->pdev->dev;
	struct platform_device *child;
	struct irq_domain *domain;
	u32 num_pins;

	if (!of_property_read_bool(np, "interrupt-controller"))
		goto err;

	if (of_property_read_u32(np, "num-pins", &num_pins))
		goto err;

	child = of_platform_device_create(np, NULL, parent);
	if (IS_ERR(child))
		goto err;

	domain = platform_msi_create_device_domain(&child->dev, num_pins,
						   mbigen_write_msg,
						   &mbigen_domain_ops,
						   mgn_dev);
	if (!domain)
		goto err;

	dev_info(dev, "%s domain created\n", np->full_name);

	return;
err:
	dev_err(dev, "unable to create %s domain\n", np->full_name);
}
Esempio n. 5
0
/**
 * of_platform_bus_create - Create an OF device for a bus node and all its
 * children. Optionally recursively instanciate matching busses.
 * @bus: device node of the bus to instanciate
 * @matches: match table, NULL to use the default, OF_NO_DEEP_PROBE to
 * disallow recursive creation of child busses
 */
static int of_platform_bus_create(struct device_node *bus,
				  struct of_device_id *matches,
				  struct device *parent)
{
	struct device_node *child;
	struct of_device *dev;
	int rc = 0;

	for (child = NULL; (child = of_get_next_child(bus, child)); ) {
		pr_debug("   create child: %s\n", child->full_name);
		dev = of_platform_device_create(child, NULL, parent);
		if (dev == NULL)
			rc = -ENOMEM;
		else if (!of_match_node(matches, child))
			continue;
		if (rc == 0) {
			pr_debug("   and sub busses\n");
			rc = of_platform_bus_create(child, matches, &dev->dev);
		} if (rc) {
			of_node_put(child);
			break;
		}
	}
	return rc;
}
static int __init hwspinlocks_init(void)
{
	int ret;

#ifdef CONFIG_OF
	struct device_node *np;
	struct platform_device *pdev;

	np = of_find_node_by_name(NULL, "hwspinlock1");
	if (!np) {
		pr_warn("Can't get the hwspinlock1 node!\n");
		return -ENODEV;
	}

	pdev = of_platform_device_create(np, 0, NULL);
	if (!pdev) {
		pr_warn("register hwspinlock1 failed!\n");
		return -ENODEV;
	}
	pr_info("*****hwspinlock1's name is %s\n", pdev->name);
	pr_info("SPRD register hwspinlock1 ok!\n");
//	of_detach_node(np);

	return 0;

#else
	ret = platform_device_register(&sprd_hwspinlock_device1);
	if (WARN(ret != 0, "register hwspinlock device error!!"))
		platform_device_unregister(&sprd_hwspinlock_device1);

	return ret;

#endif
}
Esempio n. 7
0
static void opal_i2c_create_devs(void)
{
	struct device_node *np;

	for_each_compatible_node(np, NULL, "ibm,opal-i2c")
		of_platform_device_create(np, NULL, NULL);
}
Esempio n. 8
0
/**
 * of_platform_bus_create - Create an OF device for a bus node and all its
 * children. Optionally recursively instantiate matching busses.
 * @bus: device node of the bus to instantiate
 * @matches: match table, NULL to use the default, OF_NO_DEEP_PROBE to
 * disallow recursive creation of child busses
 */
static int of_platform_bus_create(const struct device_node *bus,
				  const struct of_device_id *matches,
				  struct device *parent)
{
	struct device_node *child;
	struct platform_device *dev;
	int rc = 0;

	for_each_child_of_node(bus, child) {
		pr_debug("   create child: %s\n", child->full_name);
		dev = of_platform_device_create(child, NULL, parent);
		if (dev == NULL)
			continue;

		if (!of_match_node(matches, child))
			continue;
		if (rc == 0) {
			pr_debug("   and sub busses\n");
			rc = of_platform_bus_create(child, matches, &dev->dev);
		}
		if (rc) {
			of_node_put(child);
			break;
		}
	}
Esempio n. 9
0
File: setup.c Progetto: 08opt/linux
static int __init cell_publish_devices(void)
{
	struct device_node *root = of_find_node_by_path("/");
	struct device_node *np;
	int node;

	/* Publish OF platform devices for southbridge IOs */
	of_platform_bus_probe(NULL, cell_bus_ids, NULL);

	/* On spider based blades, we need to manually create the OF
	 * platform devices for the PCI host bridges
	 */
	for_each_child_of_node(root, np) {
		if (np->type == NULL || (strcmp(np->type, "pci") != 0 &&
					 strcmp(np->type, "pciex") != 0))
			continue;
		of_platform_device_create(np, NULL, NULL);
	}

	/* There is no device for the MIC memory controller, thus we create
	 * a platform device for it to attach the EDAC driver to.
	 */
	for_each_online_node(node) {
		if (cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(node)) == NULL)
			continue;
		platform_device_register_simple("cbe-mic", node, NULL, 0);
	}

	return 0;
}
Esempio n. 10
0
/* Call with ams_info.lock held! */
int ams_sensor_attach(void)
{
	int result;
	const u32 *prop;

	/* Get orientation */
	prop = of_get_property(ams_info.of_node, "orientation", NULL);
	if (!prop)
		return -ENODEV;
	ams_info.orient1 = *prop;
	ams_info.orient2 = *(prop + 1);

	/* Register freefall interrupt handler */
	result = pmf_register_irq_client(ams_info.of_node,
			"accel-int-1",
			&ams_freefall_client);
	if (result < 0)
		return -ENODEV;

	/* Reset saved irqs */
	ams_info.worker_irqs = 0;

	/* Register shock interrupt handler */
	result = pmf_register_irq_client(ams_info.of_node,
			"accel-int-2",
			&ams_shock_client);
	if (result < 0)
		goto release_freefall;

	/* Create device */
	ams_info.of_dev = of_platform_device_create(ams_info.of_node, "ams", NULL);
	if (!ams_info.of_dev) {
		result = -ENODEV;
		goto release_shock;
	}

	/* Create attributes */
	result = device_create_file(&ams_info.of_dev->dev, &dev_attr_current);
	if (result)
		goto release_of;

	ams_info.vflag = !!(ams_info.get_vendor() & 0x10);

	/* Init input device */
	result = ams_input_init();
	if (result)
		goto release_device_file;

	return result;
release_device_file:
	device_remove_file(&ams_info.of_dev->dev, &dev_attr_current);
release_of:
	of_device_unregister(ams_info.of_dev);
release_shock:
	pmf_unregister_irq_client(&ams_shock_client);
release_freefall:
	pmf_unregister_irq_client(&ams_freefall_client);
	return result;
}
Esempio n. 11
0
static void opal_ipmi_init(struct device_node *opal_node)
{
	struct device_node *np;

	for_each_child_of_node(opal_node, np)
		if (of_device_is_compatible(np, "ibm,opal-ipmi"))
			of_platform_device_create(np, NULL, NULL);
}
Esempio n. 12
0
struct ion_platform_data *ion_parse_dt(struct platform_device *pdev,
				       struct ion_of_heap *compatible)
{
	int num_heaps, ret;
	const struct device_node *dt_node = pdev->dev.of_node;
	struct device_node *node;
	struct ion_platform_heap *heaps;
	struct ion_platform_data *data;
	int i = 0;

	num_heaps = of_get_available_child_count(dt_node);

	if (!num_heaps)
		return ERR_PTR(-EINVAL);

	heaps = devm_kzalloc(&pdev->dev,
			     sizeof(struct ion_platform_heap) * num_heaps,
			     GFP_KERNEL);
	if (!heaps)
		return ERR_PTR(-ENOMEM);

	data = devm_kzalloc(&pdev->dev, sizeof(struct ion_platform_data),
			    GFP_KERNEL);
	if (!data)
		return ERR_PTR(-ENOMEM);

	for_each_available_child_of_node(dt_node, node) {
		struct platform_device *heap_pdev;

		ret = ion_parse_dt_heap_common(node, &heaps[i], compatible);
		if (ret)
			return ERR_PTR(ret);

		heap_pdev = of_platform_device_create(node, heaps[i].name,
						      &pdev->dev);
		if (!heap_pdev)
			return ERR_PTR(-ENOMEM);
		heap_pdev->dev.platform_data = &heaps[i];

		heaps[i].priv = &heap_pdev->dev;

		ret = ion_setup_heap_common(pdev, node, &heaps[i]);
		if (ret)
			goto out_err;
		i++;
	}

	data->heaps = heaps;
	data->nr = num_heaps;
	return data;

out_err:
	for ( ; i >= 0; i--)
		if (heaps[i].priv)
			of_device_unregister(to_platform_device(heaps[i].priv));

	return ERR_PTR(ret);
}
Esempio n. 13
0
static int __init opal_init(void)
{
	struct device_node *np, *consoles;
	int rc;

	opal_node = of_find_node_by_path("/ibm,opal");
	if (!opal_node) {
		pr_warn("Device node not found\n");
		return -ENODEV;
	}

	/* Register OPAL consoles if any ports */
	if (firmware_has_feature(FW_FEATURE_OPALv2))
		consoles = of_find_node_by_path("/ibm,opal/consoles");
	else
		consoles = of_node_get(opal_node);
	if (consoles) {
		for_each_child_of_node(consoles, np) {
			if (strcmp(np->name, "serial"))
				continue;
			of_platform_device_create(np, NULL, NULL);
		}
		of_node_put(consoles);
	}

	/* Create i2c platform devices */
	opal_i2c_create_devs();

	/* Setup a heatbeat thread if requested by OPAL */
	opal_init_heartbeat();

	/* Find all OPAL interrupts and request them */
	opal_irq_init(opal_node);

	/* Create "opal" kobject under /sys/firmware */
	rc = opal_sysfs_init();
	if (rc == 0) {
		/* Setup dump region interface */
		opal_dump_region_init();
		/* Setup error log interface */
		rc = opal_elog_init();
		/* Setup code update interface */
		opal_flash_init();
		/* Setup platform dump extract interface */
		opal_platform_dump_init();
		/* Setup system parameters interface */
		opal_sys_param_init();
		/* Setup message log interface. */
		opal_msglog_init();
	}

	/* Initialize OPAL IPMI backend */
	opal_ipmi_init(opal_node);

	return 0;
}
Esempio n. 14
0
static int __init opal_init(void)
{
	struct device_node *np, *consoles;
	const __be32 *irqs;
	int rc, i, irqlen;

	opal_node = of_find_node_by_path("/ibm,opal");
	if (!opal_node) {
		pr_warn("opal: Node not found\n");
		return -ENODEV;
	}

	/* Register OPAL consoles if any ports */
	if (firmware_has_feature(FW_FEATURE_OPALv2))
		consoles = of_find_node_by_path("/ibm,opal/consoles");
	else
		consoles = of_node_get(opal_node);
	if (consoles) {
		for_each_child_of_node(consoles, np) {
			if (strcmp(np->name, "serial"))
				continue;
			of_platform_device_create(np, NULL, NULL);
		}
		of_node_put(consoles);
	}

	/* Find all OPAL interrupts and request them */
	irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
	pr_debug("opal: Found %d interrupts reserved for OPAL\n",
		 irqs ? (irqlen / 4) : 0);
	opal_irq_count = irqlen / 4;
	opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL);
	for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) {
		unsigned int hwirq = be32_to_cpup(irqs);
		unsigned int irq = irq_create_mapping(NULL, hwirq);
		if (irq == NO_IRQ) {
			pr_warning("opal: Failed to map irq 0x%x\n", hwirq);
			continue;
		}
		rc = request_irq(irq, opal_interrupt, 0, "opal", NULL);
		if (rc)
			pr_warning("opal: Error %d requesting irq %d"
				   " (0x%x)\n", rc, irq, hwirq);
		opal_irqs[i] = irq;
	}

	/* Create "opal" kobject under /sys/firmware */
	rc = opal_sysfs_init();
	if (rc == 0) {
		/* Setup code update interface */
		opal_flash_init();
	}

	return 0;
}
Esempio n. 15
0
static int __init pmac_declare_of_platform_devices(void)
{
	struct device_node *np;

	np = find_devices("u3");
	if (np) {
		for (np = np->child; np != NULL; np = np->sibling)
			if (strncmp(np->name, "i2c", 3) == 0) {
				of_platform_device_create(np, "u3-i2c");
				break;
			}
	}

	return 0;
}
Esempio n. 16
0
int __init opal_sensor_init(void)
{
	struct platform_device *pdev;
	struct device_node *sensor;

	sensor = of_find_node_by_path("/ibm,opal/sensors");
	if (!sensor) {
		pr_err("Opal node 'sensors' not found\n");
		return -ENODEV;
	}

	pdev = of_platform_device_create(sensor, "opal-sensor", NULL);
	of_node_put(sensor);

	return PTR_ERR_OR_ZERO(pdev);
}
Esempio n. 17
0
static int __init wii_device_probe(void)
{
	struct device_node *np;

	if (!machine_is(wii))
		return 0;

	of_platform_bus_probe(NULL, wii_of_bus, NULL);

	np = of_find_compatible_node(NULL, NULL, "nintendo,hollywood-mem2");
	if (np) {
		of_platform_device_create(np, NULL, NULL);
		of_node_put(np);
	}

	return 0;
}
Esempio n. 18
0
static int vfe_probe(struct platform_device *pdev)
{
	struct vfe_parent_device *vfe_parent_dev;
	int rc = 0;
	struct device_node *node;
	struct platform_device *new_dev = NULL;
	const struct device_node *dt_node = pdev->dev.of_node;

	vfe_parent_dev = kzalloc(sizeof(struct vfe_parent_device),
		GFP_KERNEL);
	if (!vfe_parent_dev) {
		pr_err("%s: no enough memory\n", __func__);
		rc = -ENOMEM;
		goto end;
	}

	vfe_parent_dev->common_sd = kzalloc(
		sizeof(struct msm_vfe_common_subdev), GFP_KERNEL);
	if (!vfe_parent_dev->common_sd) {
		pr_err("%s: no enough memory\n", __func__);
		rc = -ENOMEM;
		goto probe_fail1;
	}

	vfe_parent_dev->common_sd->common_data = &vfe_common_data;
	memset(&vfe_common_data, 0, sizeof(vfe_common_data));
	spin_lock_init(&vfe_common_data.common_dev_data_lock);
	spin_lock_init(&vfe_common_data.common_dev_axi_lock);

	for_each_available_child_of_node(dt_node, node) {
		new_dev = of_platform_device_create(node, NULL, &pdev->dev);
		if (!new_dev) {
			pr_err("Failed to create device %s\n", node->name);
			goto probe_fail2;
		}
		vfe_parent_dev->child_list[vfe_parent_dev->num_hw_sd++] =
			new_dev;
		new_dev->dev.platform_data =
			(void *)vfe_parent_dev->common_sd->common_data;

		pr_debug("%s: device creation done\n", __func__);
	}
Esempio n. 19
0
/*
 * Probe routine for each detected JobR subsystem. It assumes that
 * property detection was picked up externally.
 */
int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
		  int ring)
{
	struct device *ctrldev, *jrdev;
	struct platform_device *jr_pdev;
	struct caam_drv_private *ctrlpriv;
	struct caam_drv_private_jr *jrpriv;
	int error;
	/* FIXME: perhaps "struct resource *" for OF and non? */
	u32 *jroffset, *irqres;
#ifndef CONFIG_OF
	char *rname, rinst;
#endif

	ctrldev = &pdev->dev;
	ctrlpriv = dev_get_drvdata(ctrldev);

	jrpriv = kmalloc(sizeof(struct caam_drv_private_jr),
			 GFP_KERNEL);
	if (jrpriv == NULL) {
		dev_err(ctrldev, "can't alloc private mem for job ring %d\n",
			ring);
		return -ENOMEM;
	}
	jrpriv->parentdev = ctrldev; /* point back to parent */
	jrpriv->ridx = ring; /* save ring identity relative to detection */

	/*
	 * Derive a pointer to the detected JobRs regs
	 * Driver has already iomapped the entire space, we just
	 * need to add in the offset to this JobR. Don't know if I
	 * like this long-term, but it'll run
	 */
#ifdef CONFIG_OF
	jroffset = (u32 *)of_get_property(np, "reg", NULL);
#else
	rname = kmalloc(strlen(JR_MEMRES_NAME_ROOT) + 1, 0);
	if (rname == NULL) {
		dev_err(ctrldev, "can't alloc resource detection buffer %d\n",
			ring);
		kfree(jrpriv);
		return -ENOMEM;
	}
	rname[0] = 0;
	rinst = '0' + ring;
	strcat(rname, JR_MEMRES_NAME_ROOT);
	strncat(rname, &rinst, 1);
	jroffset = (u32 *)platform_get_resource_byname(pdev, IORESOURCE_MEM,
						       rname);
	kfree(rname);
#endif
	jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl
							 + *jroffset);

	/* Build a local dev for each detected queue */
#ifdef CONFIG_OF
	jr_pdev = of_platform_device_create(np, NULL, ctrldev);
#else
	jr_pdev = platform_device_register_data(ctrldev, "caam_jr", ring,
						jrpriv,
					sizeof(struct caam_drv_private_jr));
#endif
	if (jr_pdev == NULL) {
		kfree(jrpriv);
		return -EINVAL;
	}
	jrdev = &jr_pdev->dev;
	dev_set_drvdata(jrdev, jrpriv);
	ctrlpriv->jrdev[ring] = jrdev;

	/* Identify the interrupt */
#ifdef CONFIG_OF
	jrpriv->irq = of_irq_to_resource(np, 0, NULL);
#else
	rname = kmalloc(strlen(JR_IRQRES_NAME_ROOT) + 1, 0);
	if (rname == NULL) {
		dev_err(ctrldev, "can't alloc resource detection buffer %d\n",
			ring);
		kfree(jrpriv);
		return -ENOMEM;
	}
	rname[0] = 0;
	strcat(rname, JR_IRQRES_NAME_ROOT);
	strncat(rname, &rinst, 1);
	irqres = (u32 *)platform_get_resource_byname(pdev, IORESOURCE_IRQ,
						     rname);
	jrpriv->irq = *irqres;
	kfree(rname);
#endif

	/* Now do the platform independent part */
	error = caam_jr_init(jrdev); /* now turn on hardware */
	if (error) {
		kfree(jrpriv);
		return error;
	}

	return error;
}
Esempio n. 20
0
int caam_secvio_startup(struct platform_device *pdev)
{
	struct device *ctrldev, *svdev;
	struct caam_drv_private *ctrlpriv;
	struct caam_drv_private_secvio *svpriv;
	struct platform_device *svpdev;
	struct device_node *np;
	const void *prop;
	int i, error, secvio_inten_src;

	ctrldev = &pdev->dev;
	ctrlpriv = dev_get_drvdata(ctrldev);
	/*
	 * Set up the private block for secure memory
	 * Only one instance is possible
	 */
	svpriv = kzalloc(sizeof(struct caam_drv_private_secvio), GFP_KERNEL);
	if (svpriv == NULL) {
		dev_err(ctrldev, "can't alloc private mem for secvio\n");
		return -ENOMEM;
	}
	svpriv->parentdev = ctrldev;

	/* Create the security violation dev */
#ifdef CONFIG_OF

	np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-caam-secvio");
	if (!np)
		return -ENODEV;

	ctrlpriv->secvio_irq = of_irq_to_resource(np, 0, NULL);

	prop = of_get_property(np, "secvio_src", NULL);
	if (prop)
		secvio_inten_src = of_read_ulong(prop, 1);
	else
		secvio_inten_src = HP_SECVIO_INTEN_ALL;

	printk(KERN_ERR "secvio_inten_src = %x\n", secvio_inten_src);

	svpdev = of_platform_device_create(np, NULL, ctrldev);
	if (!svpdev)
		return -ENODEV;

#else
	svpdev = platform_device_register_data(ctrldev, "caam_secvio", 0,
					       svpriv,
				sizeof(struct caam_drv_private_secvio));

	secvio_inten_src = HP_SECVIO_INTEN_ALL;
#endif
	if (svpdev == NULL) {
		kfree(svpriv);
		return -EINVAL;
	}
	svdev = &svpdev->dev;
	dev_set_drvdata(svdev, svpriv);
	ctrlpriv->secviodev = svdev;
	svpriv->svregs = ctrlpriv->snvs;

	/*
	 * Now we have all the dev data set up. Init interrupt
	 * source descriptions
	 */
	for (i = 0; i < MAX_SECVIO_SOURCES; i++) {
		svpriv->intsrc[i].intname = violation_src_name[i];
		svpriv->intsrc[i].handler = caam_secvio_default;
	}

	/* Connect main handler */
	for_each_possible_cpu(i)
		tasklet_init(&svpriv->irqtask[i], caam_secvio_dispatch,
			     (unsigned long)svdev);

	error = request_irq(ctrlpriv->secvio_irq, caam_secvio_interrupt,
			    IRQF_SHARED, "caam_secvio", svdev);
	if (error) {
		dev_err(svdev, "can't connect secvio interrupt\n");
		irq_dispose_mapping(ctrlpriv->secvio_irq);
		ctrlpriv->secvio_irq = 0;
		return -EINVAL;
	}

	/* Enable all sources */
	wr_reg32(&svpriv->svregs->hp.secvio_int_ctl, secvio_inten_src);

	dev_info(svdev, "security violation service handlers armed\n");

	return 0;
}
Esempio n. 21
0
static int thunder_mmc_probe(struct pci_dev *pdev,
			     const struct pci_device_id *id)
{
	struct device_node *node = pdev->dev.of_node;
	struct device *dev = &pdev->dev;
	struct device_node *child_node;
	struct cvm_mmc_host *host;
	int ret, i = 0;

	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
	if (!host)
		return -ENOMEM;

	pci_set_drvdata(pdev, host);
	ret = pcim_enable_device(pdev);
	if (ret)
		return ret;

	ret = pci_request_regions(pdev, KBUILD_MODNAME);
	if (ret)
		return ret;

	host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
	if (!host->base)
		return -EINVAL;

	/* On ThunderX these are identical */
	host->dma_base = host->base;

	host->reg_off = 0x2000;
	host->reg_off_dma = 0x160;

	host->clk = devm_clk_get(dev, NULL);
	if (IS_ERR(host->clk))
		return PTR_ERR(host->clk);

	ret = clk_prepare_enable(host->clk);
	if (ret)
		return ret;
	host->sys_freq = clk_get_rate(host->clk);

	spin_lock_init(&host->irq_handler_lock);
	sema_init(&host->mmc_serializer, 1);

	host->dev = dev;
	host->acquire_bus = thunder_mmc_acquire_bus;
	host->release_bus = thunder_mmc_release_bus;
	host->int_enable = thunder_mmc_int_enable;

	host->use_sg = true;
	host->big_dma_addr = true;
	host->need_irq_handler_lock = true;
	host->last_slot = -1;

	ret = dma_set_mask(dev, DMA_BIT_MASK(48));
	if (ret)
		goto error;

	/*
	 * Clear out any pending interrupts that may be left over from
	 * bootloader. Writing 1 to the bits clears them.
	 */
	writeq(127, host->base + MIO_EMM_INT_EN(host));
	writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host));
	/* Clear DMA FIFO */
	writeq(BIT_ULL(16), host->base + MIO_EMM_DMA_FIFO_CFG(host));

	ret = thunder_mmc_register_interrupts(host, pdev);
	if (ret)
		goto error;

	for_each_child_of_node(node, child_node) {
		/*
		 * mmc_of_parse and devm* require one device per slot.
		 * Create a dummy device per slot and set the node pointer to
		 * the slot. The easiest way to get this is using
		 * of_platform_device_create.
		 */
		if (of_device_is_compatible(child_node, "mmc-slot")) {
			host->slot_pdev[i] = of_platform_device_create(child_node, NULL,
								       &pdev->dev);
			if (!host->slot_pdev[i])
				continue;

			ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
			if (ret)
				goto error;
		}
		i++;
	}
	dev_info(dev, "probed\n");
	return 0;

error:
	for (i = 0; i < CAVIUM_MAX_MMC; i++) {
		if (host->slot[i])
			cvm_mmc_of_slot_remove(host->slot[i]);
		if (host->slot_pdev[i])
			of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
	}
	clk_disable_unprepare(host->clk);
	return ret;
}
static struct ion_platform_data *sprd_ion_parse_dt(struct platform_device *pdev)
{
	int i = 0, ret = 0;
	const struct device_node *parent = pdev->dev.of_node;
	struct device_node *child = NULL;
	struct ion_platform_data *pdata = NULL;
	struct ion_platform_heap *ion_heaps = NULL;
	struct platform_device *new_dev = NULL;
	uint32_t val = 0, type = 0;
	const char *name;
	uint32_t out_values[2];

	for_each_child_of_node(parent, child)
		num_heaps++;
	if (!num_heaps)
		return ERR_PTR(-EINVAL);

	pr_info("%s: num_heaps=%d\n", __func__, num_heaps);

	pdata = kzalloc(sizeof(struct ion_platform_data), GFP_KERNEL);
	if (!pdata)
		return ERR_PTR(-ENOMEM);

	ion_heaps = kzalloc(sizeof(struct ion_platform_heap)*num_heaps, GFP_KERNEL);
	if (!ion_heaps) {
		kfree(pdata);
		return ERR_PTR(-ENOMEM);
	}

	pdata->heaps = ion_heaps;
	pdata->nr = num_heaps;

	for_each_child_of_node(parent, child) {
		new_dev = of_platform_device_create(child, NULL, &pdev->dev);
		if (!new_dev) {
			pr_err("Failed to create device %s\n", child->name);
			goto out;
		}

		pdata->heaps[i].priv = &new_dev->dev;

		ret = of_property_read_u32(child, "reg", &val);
		if (ret) {
			pr_err("%s: Unable to find reg key, ret=%d", __func__, ret);
			goto out;
		}
		pdata->heaps[i].id = val;

		ret = of_property_read_string(child, "reg-names", &name);
		if (ret) {
			pr_err("%s: Unable to find reg-names key, ret=%d", __func__, ret);
			goto out;
		}
		pdata->heaps[i].name = name;

		ret = of_property_read_u32(child, "sprd,ion-heap-type", &type);
		if (ret) {
			pr_err("%s: Unable to find ion-heap-type key, ret=%d", __func__, ret);
			goto out;
		}
		pdata->heaps[i].type = type;

		ret = of_property_read_u32_array(child, "sprd,ion-heap-mem",
				out_values, 2);
		if (!ret) {
			pdata->heaps[i].base = out_values[0];
			pdata->heaps[i].size = out_values[1];
		}

		pr_info("%s: heaps[%d]: %s type: %d base: %lu size %u\n",
				__func__, i, pdata->heaps[i].name, pdata->heaps[i].type,
				pdata->heaps[i].base, pdata->heaps[i].size);
		++i;
	}
static int esd_fg_init(struct lcd_info *lcd)
{
	struct device *dev;
	struct device_node *np;
	struct platform_device *pdev;
	enum of_gpio_flags flags;
	unsigned int irqf_type = IRQF_TRIGGER_RISING;
	unsigned int active_level = 1;
	int ret = 0;
	lcd->esd_fg_gpio = -EINVAL;

	if (!lcd->connected)
		return ret;

	np = of_parse_phandle(lcd->dsim->dev->of_node, "lcd_info", 0);
	pdev = of_platform_device_create(np, NULL, lcd->dsim->dev);
	dev = &pdev->dev;

	lcd->esd_fg_gpio = of_get_named_gpio_flags(np, "gpio-tcon-int", 0, &flags);
	if (lcd->esd_fg_gpio < 0) {
		dev_err(&lcd->ld->dev, "failed to get proper gpio number\n");
		return -EINVAL;
	}

	if (flags & OF_GPIO_ACTIVE_LOW) {
		irqf_type = IRQF_TRIGGER_FALLING;
		active_level = 0;
	}

	if (gpio_get_value(lcd->esd_fg_gpio) == active_level) {
		dev_err(&lcd->ld->dev, "%s: gpio(%d) is already %s\n",
			__func__, lcd->esd_fg_gpio, active_level ? "high" : "low");
		return ret;
	}

	lcd->esd_fg_pins = devm_pinctrl_get(dev);
	if (IS_ERR(lcd->esd_fg_pins)) {
		dev_err(&lcd->ld->dev, "failed to get pinctrl\n");
		return -EINVAL;
	}

	lcd->esd_fg_pins_state[0] = pinctrl_lookup_state(lcd->esd_fg_pins, "errfg_off");
	if (IS_ERR(lcd->esd_fg_pins_state[0])) {
		dev_err(&lcd->ld->dev, "failed to get proper errfg off pinctrl\n");
		return -EINVAL;
	}

	lcd->esd_fg_pins_state[1] = pinctrl_lookup_state(lcd->esd_fg_pins, "errfg_on");
	if (IS_ERR(lcd->esd_fg_pins_state[1])) {
		dev_err(&lcd->ld->dev, "failed to get proper errfg on pinctrl\n");
		return -EINVAL;
	}

	lcd->esd_fg_irq = gpio_to_irq(lcd->esd_fg_gpio);
	if (!lcd->esd_fg_irq) {
		dev_err(&lcd->ld->dev, "failed to get proper irq number\n");
		return -EINVAL;
	}

	spin_lock_init(&lcd->esd_fg_lock);

	INIT_DELAYED_WORK(&lcd->esd_fg, esd_fg_work);

	irq_set_status_flags(lcd->esd_fg_irq, IRQ_NOAUTOEN);

	ret = request_irq(lcd->esd_fg_irq, esd_fg_isr, irqf_type, "esd_fg", lcd);
	if (ret) {
		dev_err(&lcd->ld->dev, "esd_fg irq request failed\n");
		return ret;
	}

	esd_fg_enable(lcd, 1);

	dev_info(&lcd->ld->dev, "%s: gpio(%d) is active %s(%d), %s edge\n",
		__func__, lcd->esd_fg_gpio, active_level ? "high" : "low",
		gpio_get_value(lcd->esd_fg_gpio),
		(irqf_type == IRQF_TRIGGER_RISING) ? "rising" : "falling");

	return ret;
}
Esempio n. 24
0
/*
 * Probe routine for each detected JobR subsystem. It assumes that
 * property detection was picked up externally.
 */
int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
		  int ring)
{
	struct device *ctrldev, *jrdev;
	struct platform_device *jr_pdev;
	struct caam_drv_private *ctrlpriv;
	struct caam_drv_private_jr *jrpriv;
	u32 *jroffset;
	int error;

	ctrldev = &pdev->dev;
	ctrlpriv = dev_get_drvdata(ctrldev);

	jrpriv = kmalloc(sizeof(struct caam_drv_private_jr),
			 GFP_KERNEL);
	if (jrpriv == NULL) {
		dev_err(ctrldev, "can't alloc private mem for job ring %d\n",
			ring);
		return -ENOMEM;
	}
	jrpriv->parentdev = ctrldev; /* point back to parent */
	jrpriv->ridx = ring; /* save ring identity relative to detection */

	/*
	 * Derive a pointer to the detected JobRs regs
	 * Driver has already iomapped the entire space, we just
	 * need to add in the offset to this JobR. Don't know if I
	 * like this long-term, but it'll run
	 */
	jroffset = (u32 *)of_get_property(np, "reg", NULL);
	jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl
							 + *jroffset);

	/* Build a local dev for each detected queue */
	jr_pdev = of_platform_device_create(np, NULL, ctrldev);
	if (jr_pdev == NULL) {
		kfree(jrpriv);
		return -EINVAL;
	}
	jrdev = &jr_pdev->dev;
	dev_set_drvdata(jrdev, jrpriv);
	ctrlpriv->jrdev[ring] = jrdev;

	if (sizeof(dma_addr_t) == sizeof(u64))
		if (of_device_is_compatible(np, "fsl,sec-v5.0-job-ring"))
			dma_set_mask(jrdev, DMA_BIT_MASK(40));
		else
			dma_set_mask(jrdev, DMA_BIT_MASK(36));
	else
		dma_set_mask(jrdev, DMA_BIT_MASK(32));

	/* Identify the interrupt */
	jrpriv->irq = of_irq_to_resource(np, 0, NULL);

	/* Now do the platform independent part */
	error = caam_jr_init(jrdev); /* now turn on hardware */
	if (error) {
		kfree(jrpriv);
		return error;
	}

	return error;
}