static int __devinit msm_rng_probe(struct platform_device *pdev)
{
	struct resource *res;
	struct msm_rng_device *msm_rng_dev = NULL;
	void __iomem *base = NULL;
	int error = 0;
	int ret = 0;
	struct device *dev;

	struct msm_bus_scale_pdata *qrng_platform_support = NULL;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL) {
		dev_err(&pdev->dev, "invalid address\n");
		error = -EFAULT;
		goto err_exit;
	}

	msm_rng_dev = kzalloc(sizeof(struct msm_rng_device), GFP_KERNEL);
	if (!msm_rng_dev) {
		dev_err(&pdev->dev, "cannot allocate memory\n");
		error = -ENOMEM;
		goto err_exit;
	}

	base = ioremap(res->start, resource_size(res));
	if (!base) {
		dev_err(&pdev->dev, "ioremap failed\n");
		error = -ENOMEM;
		goto err_iomap;
	}
	msm_rng_dev->base = base;

	msm_rng_dev->drbg_ctx = kzalloc(sizeof(struct fips_drbg_ctx_s),
					GFP_KERNEL);
	if (!msm_rng_dev->drbg_ctx) {
		dev_err(&pdev->dev, "cannot allocate memory\n");
		error = -ENOMEM;
		goto err_clk_get;
	}

	/* create a handle for clock control */
	if ((pdev->dev.of_node) && (of_property_read_bool(pdev->dev.of_node,
					"qcom,msm-rng-iface-clk")))
		msm_rng_dev->prng_clk = clk_get(&pdev->dev,
							"iface_clk");
	else
		msm_rng_dev->prng_clk = clk_get(&pdev->dev, "core_clk");
	if (IS_ERR(msm_rng_dev->prng_clk)) {
		dev_err(&pdev->dev, "failed to register clock source\n");
		error = -EPERM;
		goto err_clk_get;
	}

	/* save away pdev and register driver data */
	msm_rng_dev->pdev = pdev;
	platform_set_drvdata(pdev, msm_rng_dev);

	if (pdev->dev.of_node) {
		/* Register bus client */
		qrng_platform_support = msm_bus_cl_get_pdata(pdev);
		msm_rng_dev->qrng_perf_client = msm_bus_scale_register_client(
						qrng_platform_support);
		msm_rng_device_info.qrng_perf_client =
					msm_rng_dev->qrng_perf_client;
		if (!msm_rng_dev->qrng_perf_client)
			pr_err("Unable to register bus client\n");
	}

	/* Enable rng h/w */
	error = msm_rng_enable_hw(msm_rng_dev);

	if (error)
		goto rollback_clk;

	/* register with hwrng framework */
	msm_rng.priv = (unsigned long) msm_rng_dev;
	error = hwrng_register(&msm_rng);
	if (error) {
		dev_err(&pdev->dev, "failed to register hwrng\n");
		error = -EPERM;
		goto rollback_clk;
	}
	ret = register_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME, &msm_rng_fops);

	msm_rng_class = class_create(THIS_MODULE, "msm-rng");
	if (IS_ERR(msm_rng_class)) {
		pr_err("class_create failed\n");
		return PTR_ERR(msm_rng_class);
	}

	dev = device_create(msm_rng_class, NULL, MKDEV(QRNG_IOC_MAGIC, 0),
				NULL, "msm-rng");
	if (IS_ERR(dev)) {
		pr_err("Device create failed\n");
		error = PTR_ERR(dev);
		goto unregister_chrdev;
	}
	cdev_init(&msm_rng_cdev, &msm_rng_fops);

	sema_init(&msm_rng_dev->drbg_sem, 1);

	_first_msm_drbg_init(msm_rng_dev);

	return error;

unregister_chrdev:
	unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME);
rollback_clk:
	clk_put(msm_rng_dev->prng_clk);
err_clk_get:
	iounmap(msm_rng_dev->base);
err_iomap:
	kzfree(msm_rng_dev->drbg_ctx);
	kzfree(msm_rng_dev);
err_exit:
	return error;
}
Esempio n. 2
0
static int xhci_plat_probe(struct platform_device *pdev)
{
	const struct hc_driver	*driver;
	struct xhci_hcd		*xhci;
	struct resource         *res;
	struct usb_hcd		*hcd;
	int			ret;
	int			irq;

	if (usb_disabled())
		return -ENODEV;

	driver = &xhci_plat_xhci_driver;

	irq = platform_get_irq(pdev, 0);
	if (irq < 0)
		return -ENODEV;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -ENODEV;

	hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
	if (!hcd)
		return -ENOMEM;

	hcd_to_bus(hcd)->skip_resume = true;
	hcd->rsrc_start = res->start;
	hcd->rsrc_len = resource_size(res);

	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
				driver->description)) {
		dev_dbg(&pdev->dev, "controller already in use\n");
		ret = -EBUSY;
		goto put_hcd;
	}

	hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
	if (!hcd->regs) {
		dev_dbg(&pdev->dev, "error mapping memory\n");
		ret = -EFAULT;
		goto release_mem_region;
	}

	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);
	pm_runtime_get_sync(&pdev->dev);

	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
	if (ret)
		goto unmap_registers;

	/* USB 2.0 roothub is stored in the platform_device now. */
	hcd = dev_get_drvdata(&pdev->dev);
	xhci = hcd_to_xhci(hcd);
	xhci->shared_hcd = usb_create_shared_hcd(driver, &pdev->dev,
			dev_name(&pdev->dev), hcd);
	if (!xhci->shared_hcd) {
		ret = -ENOMEM;
		goto dealloc_usb2_hcd;
	}

	hcd_to_bus(xhci->shared_hcd)->skip_resume = true;
	/*
	 * Set the xHCI pointer before xhci_plat_setup() (aka hcd_driver.reset)
	 * is called by usb_add_hcd().
	 */
	*((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci;

	ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
	if (ret)
		goto put_usb3_hcd;

	phy = usb_get_transceiver();
	/* Register with OTG if present, ignore USB2 OTG using other PHY */
	if (phy && phy->otg && !(phy->flags & ENABLE_SECONDARY_PHY)) {
		dev_dbg(&pdev->dev, "%s otg support available\n", __func__);
		ret = otg_set_host(phy->otg, &hcd->self);
		if (ret) {
			dev_err(&pdev->dev, "%s otg_set_host failed\n",
				__func__);
			usb_put_transceiver(phy);
			goto put_usb3_hcd;
		}
	} else {
		pm_runtime_no_callbacks(&pdev->dev);
	}

	pm_runtime_put(&pdev->dev);

	return 0;

put_usb3_hcd:
	usb_put_hcd(xhci->shared_hcd);

dealloc_usb2_hcd:
	usb_remove_hcd(hcd);

unmap_registers:
	iounmap(hcd->regs);

release_mem_region:
	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);

put_hcd:
	usb_put_hcd(hcd);

	return ret;
}
Esempio n. 3
0
static int __init mmc_omap_probe(struct platform_device *pdev)
{
	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
	struct mmc_omap_host *host = NULL;
	struct resource *res;
	int i, ret = 0;
	int irq;

	if (pdata == NULL) {
		dev_err(&pdev->dev, "platform data missing\n");
		return -ENXIO;
	}
	if (pdata->nr_slots == 0) {
		dev_err(&pdev->dev, "no slots\n");
		return -ENXIO;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	irq = platform_get_irq(pdev, 0);
	if (res == NULL || irq < 0)
		return -ENXIO;

	res = request_mem_region(res->start, resource_size(res),
				 pdev->name);
	if (res == NULL)
		return -EBUSY;

	host = kzalloc(sizeof(struct mmc_omap_host), GFP_KERNEL);
	if (host == NULL) {
		ret = -ENOMEM;
		goto err_free_mem_region;
	}

	INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
	INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);

	INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command);
	setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer,
		    (unsigned long) host);

	spin_lock_init(&host->clk_lock);
	setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);

	spin_lock_init(&host->dma_lock);
	setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host);
	spin_lock_init(&host->slot_lock);
	init_waitqueue_head(&host->slot_wq);

	host->pdata = pdata;
	host->dev = &pdev->dev;
	platform_set_drvdata(pdev, host);

	host->id = pdev->id;
	host->mem_res = res;
	host->irq = irq;

	host->use_dma = 1;
	host->dev->dma_mask = &pdata->dma_mask;
	host->dma_ch = -1;

	host->irq = irq;
	host->phys_base = host->mem_res->start;
	host->virt_base = ioremap(res->start, resource_size(res));
	if (!host->virt_base)
		goto err_ioremap;

	host->iclk = clk_get(&pdev->dev, "ick");
	if (IS_ERR(host->iclk)) {
		ret = PTR_ERR(host->iclk);
		goto err_free_mmc_host;
	}
	clk_enable(host->iclk);

	host->fclk = clk_get(&pdev->dev, "fck");
	if (IS_ERR(host->fclk)) {
		ret = PTR_ERR(host->fclk);
		goto err_free_iclk;
	}

	ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
	if (ret)
		goto err_free_fclk;

	if (pdata->init != NULL) {
		ret = pdata->init(&pdev->dev);
		if (ret < 0)
			goto err_free_irq;
	}

	host->nr_slots = pdata->nr_slots;
	for (i = 0; i < pdata->nr_slots; i++) {
		ret = mmc_omap_new_slot(host, i);
		if (ret < 0) {
			while (--i >= 0)
				mmc_omap_remove_slot(host->slots[i]);

			goto err_plat_cleanup;
		}
	}

	host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);

	return 0;

err_plat_cleanup:
	if (pdata->cleanup)
		pdata->cleanup(&pdev->dev);
err_free_irq:
	free_irq(host->irq, host);
err_free_fclk:
	clk_put(host->fclk);
err_free_iclk:
	clk_disable(host->iclk);
	clk_put(host->iclk);
err_free_mmc_host:
	iounmap(host->virt_base);
err_ioremap:
	kfree(host);
err_free_mem_region:
	release_mem_region(res->start, resource_size(res));
	return ret;
}
Esempio n. 4
0
static int __init exynos_sata_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct ata_port_info pi = ahci_port_info;
	const struct ata_port_info *ppi[] = { &pi, NULL };
	struct ahci_host_priv *hpriv;
	struct exynos_sata *sata;
	struct ata_host *host;
	struct resource *mem;
	int n_ports, i, ret;

	sata = devm_kzalloc(dev, sizeof(*sata), GFP_KERNEL);
	if (!sata) {
		dev_err(dev, "can't alloc sata\n");
		return -EINVAL;
	}

	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
	if (!hpriv) {
		dev_err(dev, "can't alloc ahci_host_priv\n");
		ret = -ENOMEM;
		goto err1;
	}

	hpriv->flags |= (unsigned long)pi.private_data;

	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!mem) {
		dev_err(dev, "no mmio space\n");
		ret = -EINVAL;
		goto err2;
	}

	sata->irq = platform_get_irq(pdev, 0);
	if (sata->irq <= 0) {
		dev_err(dev, "no irq\n");
		ret = -EINVAL;
		goto err2;
	}

	hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
	if (!hpriv->mmio) {
		dev_err(dev, "can't map %pR\n", mem);
		ret = -ENOMEM;
		goto err2;
	}

	exynos_sata_parse_dt(dev->of_node, sata);
	if (!sata->freq) {
		dev_err(dev, "can't determine sata frequency \n");
		ret = -ENOMEM;
		goto err2;
	}

	sata->sclk = devm_clk_get(dev, "sclk_sata");
	if (IS_ERR(sata->sclk)) {
		dev_err(dev, "failed to get sclk_sata\n");
		ret = PTR_ERR(sata->sclk);
		goto err3;
	}
	clk_enable(sata->sclk);

	clk_set_rate(sata->sclk, sata->freq * MHZ);

	sata->clk = devm_clk_get(dev, "sata");
	if (IS_ERR(sata->clk)) {
		dev_err(dev, "failed to get sata clock\n");
		ret = PTR_ERR(sata->clk);
		goto err4;
	}
	clk_enable(sata->clk);

	/*  Get a gen 3 PHY controller */

	sata->phy = sata_get_phy(SATA_PHY_GENERATION3);
	if (!sata->phy) {
		dev_err(dev, "failed to get sata phy\n");
		ret = -EPROBE_DEFER;
		goto err5;
	}

	/* Initialize the controller */

	ret = sata_init_phy(sata->phy);
	if (ret < 0) {
		dev_err(dev, "failed to initialize sata phy\n");
		goto err6;
	}

	ahci_save_initial_config(dev, hpriv, 0, 0);

	/* prepare host */
	if (hpriv->cap & HOST_CAP_NCQ)
		pi.flags |= ATA_FLAG_NCQ;

	if (hpriv->cap & HOST_CAP_PMP)
		pi.flags |= ATA_FLAG_PMP;

	ahci_set_em_messages(hpriv, &pi);

	/* CAP.NP sometimes indicate the index of the last enabled
	 * port, at other times, that of the last possible port, so
	 * determining the maximum port number requires looking at
	 * both CAP.NP and port_map.
	 */
	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));

	host = ata_host_alloc_pinfo(dev, ppi, n_ports);
	if (!host) {
		ret = -ENOMEM;
		goto err7;
	}

	host->private_data = hpriv;

	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
		host->flags |= ATA_HOST_PARALLEL_SCAN;
	else
		pr_info(KERN_INFO
		       "ahci: SSS flag set, parallel bus scan disabled\n");

	if (pi.flags & ATA_FLAG_EM)
		ahci_reset_em(host);

	for (i = 0; i < host->n_ports; i++) {
		struct ata_port *ap = host->ports[i];

		ata_port_desc(ap, "mmio %pR", mem);
		ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);

		/* set enclosure management message type */
		if (ap->flags & ATA_FLAG_EM)
			ap->em_message_type = hpriv->em_msg_type;

		/* disabled/not-implemented port */
		if (!(hpriv->port_map & (1 << i)))
			ap->ops = &ata_dummy_port_ops;
	}

	ret = ahci_reset_controller(host);
	if (ret)
		goto err7;

	ahci_init_controller(host);
	ahci_print_info(host, "platform");

	ret = ata_host_activate(host, sata->irq, ahci_interrupt, IRQF_SHARED,
				&ahci_platform_sht);
	if (ret)
		goto err7;

	platform_set_drvdata(pdev, sata);

	return 0;

 err7:
	sata_shutdown_phy(sata->phy);

 err6:
	sata_put_phy(sata->phy);

 err5:
	clk_disable(sata->clk);
	devm_clk_put(dev, sata->clk);

 err4:
	clk_disable(sata->sclk);
	devm_clk_put(dev, sata->sclk);

 err3:
	devm_iounmap(dev, hpriv->mmio);

 err2:
	devm_kfree(dev, hpriv);

 err1:
	devm_kfree(dev, sata);

	return ret;
}
Esempio n. 5
0
static int omap4_keypad_probe(struct platform_device *pdev)
{
	struct omap4_keypad *keypad_data;
	struct input_dev *input_dev;
	struct resource *res;
	unsigned int max_keys;
	int rev;
	int irq;
	int error;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "no base address specified\n");
		return -EINVAL;
	}

	irq = platform_get_irq(pdev, 0);
	if (!irq) {
		dev_err(&pdev->dev, "no keyboard irq assigned\n");
		return -EINVAL;
	}

	keypad_data = kzalloc(sizeof(struct omap4_keypad), GFP_KERNEL);
	if (!keypad_data) {
		dev_err(&pdev->dev, "keypad_data memory allocation failed\n");
		return -ENOMEM;
	}

	keypad_data->irq = irq;

	error = omap4_keypad_parse_dt(&pdev->dev, keypad_data);
	if (error)
		return error;

	res = request_mem_region(res->start, resource_size(res), pdev->name);
	if (!res) {
		dev_err(&pdev->dev, "can't request mem region\n");
		error = -EBUSY;
		goto err_free_keypad;
	}

	keypad_data->base = ioremap(res->start, resource_size(res));
	if (!keypad_data->base) {
		dev_err(&pdev->dev, "can't ioremap mem resource\n");
		error = -ENOMEM;
		goto err_release_mem;
	}


	/*
	 * Enable clocks for the keypad module so that we can read
	 * revision register.
	 */
	pm_runtime_enable(&pdev->dev);
	error = pm_runtime_get_sync(&pdev->dev);
	if (error) {
		dev_err(&pdev->dev, "pm_runtime_get_sync() failed\n");
		goto err_unmap;
	}
	rev = __raw_readl(keypad_data->base + OMAP4_KBD_REVISION);
	rev &= 0x03 << 30;
	rev >>= 30;
	switch (rev) {
	case KBD_REVISION_OMAP4:
		keypad_data->reg_offset = 0x00;
		keypad_data->irqreg_offset = 0x00;
		break;
	case KBD_REVISION_OMAP5:
		keypad_data->reg_offset = 0x10;
		keypad_data->irqreg_offset = 0x0c;
		break;
	default:
		dev_err(&pdev->dev,
			"Keypad reports unsupported revision %d", rev);
		error = -EINVAL;
		goto err_pm_put_sync;
	}

	/* input device allocation */
	keypad_data->input = input_dev = input_allocate_device();
	if (!input_dev) {
		error = -ENOMEM;
		goto err_pm_put_sync;
	}

	input_dev->name = pdev->name;
	input_dev->dev.parent = &pdev->dev;
	input_dev->id.bustype = BUS_HOST;
	input_dev->id.vendor = 0x0001;
	input_dev->id.product = 0x0001;
	input_dev->id.version = 0x0001;

	input_dev->open = omap4_keypad_open;
	input_dev->close = omap4_keypad_close;

	input_set_capability(input_dev, EV_MSC, MSC_SCAN);
	if (!keypad_data->no_autorepeat)
		__set_bit(EV_REP, input_dev->evbit);

	input_set_drvdata(input_dev, keypad_data);

	keypad_data->row_shift = get_count_order(keypad_data->cols);
	max_keys = keypad_data->rows << keypad_data->row_shift;
	keypad_data->keymap = kzalloc(max_keys * sizeof(keypad_data->keymap[0]),
				      GFP_KERNEL);
	if (!keypad_data->keymap) {
		dev_err(&pdev->dev, "Not enough memory for keymap\n");
		error = -ENOMEM;
		goto err_free_input;
	}

	error = matrix_keypad_build_keymap(NULL, NULL,
					   keypad_data->rows, keypad_data->cols,
					   keypad_data->keymap, input_dev);
	if (error) {
		dev_err(&pdev->dev, "failed to build keymap\n");
		goto err_free_keymap;
	}

	error = request_threaded_irq(keypad_data->irq, omap4_keypad_irq_handler,
				     omap4_keypad_irq_thread_fn, 0,
				     "omap4-keypad", keypad_data);
	if (error) {
		dev_err(&pdev->dev, "failed to register interrupt\n");
		goto err_free_input;
	}

	device_init_wakeup(&pdev->dev, true);
	pm_runtime_put_sync(&pdev->dev);

	error = input_register_device(keypad_data->input);
	if (error < 0) {
		dev_err(&pdev->dev, "failed to register input device\n");
		goto err_pm_disable;
	}

	platform_set_drvdata(pdev, keypad_data);
	return 0;

err_pm_disable:
	pm_runtime_disable(&pdev->dev);
	device_init_wakeup(&pdev->dev, false);
	free_irq(keypad_data->irq, keypad_data);
err_free_keymap:
	kfree(keypad_data->keymap);
err_free_input:
	input_free_device(input_dev);
err_pm_put_sync:
	pm_runtime_put_sync(&pdev->dev);
err_unmap:
	iounmap(keypad_data->base);
err_release_mem:
	release_mem_region(res->start, resource_size(res));
err_free_keypad:
	kfree(keypad_data);
	return error;
}
static int __devinit c_can_plat_probe(struct platform_device *pdev)
{
	int ret;
	void __iomem *addr;
	struct net_device *dev;
	struct c_can_priv *priv;
	const struct platform_device_id *id;
	struct resource *mem;
	int irq;
	struct clk *clk;

	/* get the appropriate clk */
	clk = clk_get(&pdev->dev, NULL);
	if (IS_ERR(clk)) {
		dev_err(&pdev->dev, "no clock defined\n");
		ret = -ENODEV;
		goto exit;
	}

	/* get the platform data */
	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	irq = platform_get_irq(pdev, 0);
	if (!mem || irq <= 0) {
		ret = -ENODEV;
		goto exit_free_clk;
	}

	if (!request_mem_region(mem->start, resource_size(mem),
				KBUILD_MODNAME)) {
		dev_err(&pdev->dev, "resource unavailable\n");
		ret = -ENODEV;
		goto exit_free_clk;
	}

	addr = ioremap(mem->start, resource_size(mem));
	if (!addr) {
		dev_err(&pdev->dev, "failed to map can port\n");
		ret = -ENOMEM;
		goto exit_release_mem;
	}

	/* allocate the c_can device */
	dev = alloc_c_can_dev();
	if (!dev) {
		ret = -ENOMEM;
		goto exit_iounmap;
	}

	priv = netdev_priv(dev);
	id = platform_get_device_id(pdev);
	switch (id->driver_data) {
	case C_CAN_DEVTYPE:
		priv->regs = reg_map_c_can;
		switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
		case IORESOURCE_MEM_32BIT:
			priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
			priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
			break;
		case IORESOURCE_MEM_16BIT:
		default:
			priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
			priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
			break;
		}
		break;
	case D_CAN_DEVTYPE:
		priv->regs = reg_map_d_can;
		priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
		priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
		priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
		break;
	default:
		ret = -EINVAL;
		goto exit_free_device;
	}

	dev->irq = irq;
	priv->base = addr;
	priv->can.clock.freq = clk_get_rate(clk);
	priv->priv = clk;

	platform_set_drvdata(pdev, dev);
	SET_NETDEV_DEV(dev, &pdev->dev);

	ret = register_c_can_dev(dev);
	if (ret) {
		dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
			KBUILD_MODNAME, ret);
		goto exit_free_device;
	}

	dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
		 KBUILD_MODNAME, priv->base, dev->irq);
	return 0;

exit_free_device:
	platform_set_drvdata(pdev, NULL);
	free_c_can_dev(dev);
exit_iounmap:
	iounmap(addr);
exit_release_mem:
	release_mem_region(mem->start, resource_size(mem));
exit_free_clk:
	clk_put(clk);
exit:
	dev_err(&pdev->dev, "probe failed\n");

	return ret;
}
Esempio n. 7
0
static int mddi_probe(struct platform_device *pdev)
{
	struct msm_fb_data_type *mfd;
	struct platform_device *mdp_dev = NULL;
	struct msm_fb_panel_data *pdata = NULL;
	int rc;
	resource_size_t size ;
	u32 clk_rate;

	if ((pdev->id == 0) && (pdev->num_resources >= 0)) {
		mddi_pdata = pdev->dev.platform_data;

		size =  resource_size(&pdev->resource[0]);
		msm_pmdh_base =  ioremap(pdev->resource[0].start, size);

		MSM_FB_INFO("primary mddi base phy_addr = 0x%x virt = 0x%x\n",
				pdev->resource[0].start, (int) msm_pmdh_base);

		if (unlikely(!msm_pmdh_base))
			return -ENOMEM;

		if (mddi_pdata && mddi_pdata->mddi_power_save)
			mddi_pdata->mddi_power_save(1);

		mddi_resource_initialized = 1;
		return 0;
	}

	if (!mddi_resource_initialized)
		return -EPERM;

	mfd = platform_get_drvdata(pdev);

	if (!mfd)
		return -ENODEV;

	if (mfd->key != MFD_KEY)
		return -EINVAL;

	if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
		return -ENOMEM;

	mdp_dev = platform_device_alloc("mdp", pdev->id);
	if (!mdp_dev)
		return -ENOMEM;

	/*
	 * link to the latest pdev
	 */
	mfd->pdev = mdp_dev;
	mfd->dest = DISPLAY_LCD;

	/*
	 * alloc panel device data
	 */
	if (platform_device_add_data
	    (mdp_dev, pdev->dev.platform_data,
	     sizeof(struct msm_fb_panel_data))) {
		printk(KERN_ERR "mddi_probe: platform_device_add_data failed!\n");
		platform_device_put(mdp_dev);
		return -ENOMEM;
	}
	/*
	 * data chain
	 */
	pdata = mdp_dev->dev.platform_data;
	pdata->on = mddi_on;
	pdata->off = mddi_off;
	pdata->next = pdev;
	pdata->clk_func = pmdh_clk_func;
	/*
	 * get/set panel specific fb info
	 */
	mfd->panel_info = pdata->panel_info;

	if (mfd->index == 0)
		mfd->fb_imgType = MSMFB_DEFAULT_TYPE;
	else
		mfd->fb_imgType = MDP_RGB_565;

#ifdef CONFIG_MACH_ES209RA
	clk_rate = mfd->panel_info.clk_rate;
#else
	clk_rate = mfd->panel_info.clk_max;
#endif
	if (mddi_pdata &&
	    mddi_pdata->mddi_sel_clk &&
	    mddi_pdata->mddi_sel_clk(&clk_rate))
			printk(KERN_ERR
			  "%s: can't select mddi io clk targate rate = %d\n",
			  __func__, clk_rate);

	if (clk_set_max_rate(mddi_clk, clk_rate) < 0)
		printk(KERN_ERR "%s: clk_set_max_rate failed\n", __func__);
	mfd->panel_info.clk_rate = mfd->panel_info.clk_min;

	if (!mddi_client_type)
		mddi_client_type = mfd->panel_info.lcd.rev;
	else if (!mfd->panel_info.lcd.rev)
		printk(KERN_ERR
		"%s: mddi client is trying to revert back to type 1	!!!\n",
		__func__);

	/*
	 * set driver data
	 */
	platform_set_drvdata(mdp_dev, mfd);
	rc = pm_runtime_set_active(&pdev->dev);
	if (rc < 0)
		printk(KERN_ERR "pm_runtime: fail to set active\n");

	rc = 0;
	pm_runtime_enable(&pdev->dev);
	/*
	 * register in mdp driver
	 */
	rc = platform_device_add(mdp_dev);
	if (rc)
		goto mddi_probe_err;

	pdev_list[pdev_list_cnt++] = pdev;

#ifdef CONFIG_HAS_EARLYSUSPEND
	mfd->mddi_early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
	mfd->mddi_early_suspend.suspend = mddi_early_suspend;
	mfd->mddi_early_suspend.resume = mddi_early_resume;
	register_early_suspend(&mfd->mddi_early_suspend);
#endif

	return 0;

mddi_probe_err:
	platform_device_put(mdp_dev);
	return rc;
}
static int __devinit pil_modem_driver_probe(struct platform_device *pdev)
{
	struct modem_data *drv;
	struct resource *res;
	struct pil_desc *desc;
	int ret;

	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
	if (!drv)
		return -ENOMEM;
	platform_set_drvdata(pdev, drv);

	drv->irq = platform_get_irq(pdev, 0);
	if (drv->irq < 0)
		return drv->irq;

	drv->xo = devm_clk_get(&pdev->dev, "xo");
	if (IS_ERR(drv->xo))
		return PTR_ERR(drv->xo);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	drv->base = devm_request_and_ioremap(&pdev->dev, res);
	if (!drv->base)
		return -ENOMEM;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	drv->wdog = devm_request_and_ioremap(&pdev->dev, res);
	if (!drv->wdog)
		return -ENOMEM;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
	if (!res)
		return -EINVAL;

	drv->cbase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
	if (!drv->cbase)
		return -ENOMEM;

	desc = &drv->pil_desc;
	desc->name = "modem";
	desc->dev = &pdev->dev;
	desc->owner = THIS_MODULE;
	desc->proxy_timeout = 10000;

	if (pas_supported(PAS_MODEM) > 0) {
		desc->ops = &pil_modem_ops_trusted;
		dev_info(&pdev->dev, "using secure boot\n");
	} else {
		desc->ops = &pil_modem_ops;
		dev_info(&pdev->dev, "using non-secure boot\n");
	}
	ret = pil_desc_init(desc);
	if (ret)
		return ret;

	drv->notifier.notifier_call = modem_notif_handler,
	ret = modem_register_notifier(&drv->notifier);
	if (ret)
		goto err_notify;

	drv->subsys_desc.name = "modem";
	drv->subsys_desc.depends_on = "adsp";
	drv->subsys_desc.dev = &pdev->dev;
	drv->subsys_desc.owner = THIS_MODULE;
	drv->subsys_desc.shutdown = modem_shutdown;
	drv->subsys_desc.powerup = modem_powerup;
	drv->subsys_desc.ramdump = modem_ramdump;
	drv->subsys_desc.crash_shutdown = modem_crash_shutdown;

	INIT_WORK(&drv->fatal_work, modem_fatal_fn);
	INIT_DELAYED_WORK(&drv->unlock_work, modem_unlock_timeout);

	drv->subsys = subsys_register(&drv->subsys_desc);
	if (IS_ERR(drv->subsys)) {
		ret = PTR_ERR(drv->subsys);
		goto err_subsys;
	}

	drv->ramdump_dev = create_ramdump_device("modem", &pdev->dev);
	if (!drv->ramdump_dev) {
		ret = -ENOMEM;
		goto err_ramdump;
	}

	scm_pas_init(MSM_BUS_MASTER_SPS);

	ret = devm_request_irq(&pdev->dev, drv->irq, modem_wdog_bite_irq,
			IRQF_TRIGGER_RISING, "modem_watchdog", drv);
	if (ret)
		goto err_irq;
	disable_irq(drv->irq);
	return 0;

err_irq:
	destroy_ramdump_device(drv->ramdump_dev);
err_ramdump:
	subsys_unregister(drv->subsys);
err_subsys:
	modem_unregister_notifier(&drv->notifier);
err_notify:
	pil_desc_release(desc);
	return ret;
}
Esempio n. 9
0
/*
 * Set up a CCU.  Call the provided ccu_clks_setup callback to
 * initialize the array of clocks provided by the CCU.
 */
void __init kona_dt_ccu_setup(struct device_node *node,
			int (*ccu_clks_setup)(struct ccu_data *))
{
	struct ccu_data *ccu;
	struct resource res = { 0 };
	resource_size_t range;
	int ret;

	ccu = kzalloc(sizeof(*ccu), GFP_KERNEL);
	if (ccu)
		ccu->name = kstrdup(node->name, GFP_KERNEL);
	if (!ccu || !ccu->name) {
		pr_err("%s: unable to allocate CCU struct for %s\n",
			__func__, node->name);
		kfree(ccu);

		return;
	}

	ret = of_address_to_resource(node, 0, &res);
	if (ret) {
		pr_err("%s: no valid CCU registers found for %s\n", __func__,
			node->name);
		goto out_err;
	}

	range = resource_size(&res);
	if (range > (resource_size_t)U32_MAX) {
		pr_err("%s: address range too large for %s\n", __func__,
			node->name);
		goto out_err;
	}

	ccu->range = (u32)range;
	ccu->base = ioremap(res.start, ccu->range);
	if (!ccu->base) {
		pr_err("%s: unable to map CCU registers for %s\n", __func__,
			node->name);
		goto out_err;
	}

	spin_lock_init(&ccu->lock);
	INIT_LIST_HEAD(&ccu->links);
	ccu->node = of_node_get(node);

	list_add_tail(&ccu->links, &ccu_list);

	/* Set up clocks array (in ccu->data) */
	if (ccu_clks_setup(ccu))
		goto out_err;

	ret = of_clk_add_provider(node, of_clk_src_onecell_get, &ccu->data);
	if (ret) {
		pr_err("%s: error adding ccu %s as provider (%d)\n", __func__,
				node->name, ret);
		goto out_err;
	}

	if (!kona_ccu_init(ccu))
		pr_err("Broadcom %s initialization had errors\n", node->name);

	return;
out_err:
	kona_ccu_teardown(ccu);
	pr_err("Broadcom %s setup aborted\n", node->name);
}
static int s3c24xx_spi_probe(struct platform_device *pdev)
{
	struct s3c2410_spi_info *pdata;
	struct s3c24xx_spi *hw;
	struct spi_master *master;
	struct resource *res;
	int err = 0;

	master = spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi));
	if (master == NULL) {
		dev_err(&pdev->dev, "No memory for spi_master\n");
		err = -ENOMEM;
		goto err_nomem;
	}

	hw = spi_master_get_devdata(master);
	memset(hw, 0, sizeof(struct s3c24xx_spi));

	hw->master = master;
	hw->pdata = pdata = dev_get_platdata(&pdev->dev);
	hw->dev = &pdev->dev;

	if (pdata == NULL) {
		dev_err(&pdev->dev, "No platform data supplied\n");
		err = -ENOENT;
		goto err_no_pdata;
	}

	platform_set_drvdata(pdev, hw);
	init_completion(&hw->done);

	/* initialise fiq handler */

	s3c24xx_spi_initfiq(hw);

	/* setup the master state. */

	/* the spi->mode bits understood by this driver: */
	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;

	master->num_chipselect = hw->pdata->num_cs;
	master->bus_num = pdata->bus_num;

	/* setup the state for the bitbang driver */

	hw->bitbang.master         = hw->master;
	hw->bitbang.setup_transfer = s3c24xx_spi_setupxfer;
	hw->bitbang.chipselect     = s3c24xx_spi_chipsel;
	hw->bitbang.txrx_bufs      = s3c24xx_spi_txrx;

	hw->master->setup  = s3c24xx_spi_setup;
	hw->master->cleanup = s3c24xx_spi_cleanup;

	dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang);

	/* find and map our resources */

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL) {
		dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
		err = -ENOENT;
		goto err_no_iores;
	}

	hw->ioarea = request_mem_region(res->start, resource_size(res),
					pdev->name);

	if (hw->ioarea == NULL) {
		dev_err(&pdev->dev, "Cannot reserve region\n");
		err = -ENXIO;
		goto err_no_iores;
	}

	hw->regs = ioremap(res->start, resource_size(res));
	if (hw->regs == NULL) {
		dev_err(&pdev->dev, "Cannot map IO\n");
		err = -ENXIO;
		goto err_no_iomap;
	}

	hw->irq = platform_get_irq(pdev, 0);
	if (hw->irq < 0) {
		dev_err(&pdev->dev, "No IRQ specified\n");
		err = -ENOENT;
		goto err_no_irq;
	}

	err = request_irq(hw->irq, s3c24xx_spi_irq, 0, pdev->name, hw);
	if (err) {
		dev_err(&pdev->dev, "Cannot claim IRQ\n");
		goto err_no_irq;
	}

	hw->clk = clk_get(&pdev->dev, "spi");
	if (IS_ERR(hw->clk)) {
		dev_err(&pdev->dev, "No clock for device\n");
		err = PTR_ERR(hw->clk);
		goto err_no_clk;
	}

	/* setup any gpio we can */

	if (!pdata->set_cs) {
		if (pdata->pin_cs < 0) {
			dev_err(&pdev->dev, "No chipselect pin\n");
			err = -EINVAL;
			goto err_register;
		}

		err = gpio_request(pdata->pin_cs, dev_name(&pdev->dev));
		if (err) {
			dev_err(&pdev->dev, "Failed to get gpio for cs\n");
			goto err_register;
		}

		hw->set_cs = s3c24xx_spi_gpiocs;
		gpio_direction_output(pdata->pin_cs, 1);
	} else
		hw->set_cs = pdata->set_cs;

	s3c24xx_spi_initialsetup(hw);

	/* register our spi controller */

	err = spi_bitbang_start(&hw->bitbang);
	if (err) {
		dev_err(&pdev->dev, "Failed to register SPI master\n");
		goto err_register;
	}

	return 0;

 err_register:
	if (hw->set_cs == s3c24xx_spi_gpiocs)
		gpio_free(pdata->pin_cs);

	clk_disable(hw->clk);
	clk_put(hw->clk);

 err_no_clk:
	free_irq(hw->irq, hw);

 err_no_irq:
	iounmap(hw->regs);

 err_no_iomap:
	release_resource(hw->ioarea);
	kfree(hw->ioarea);

 err_no_iores:
 err_no_pdata:
	spi_master_put(hw->master);

 err_nomem:
	return err;
}
static __devinit int tegra_i2s_platform_probe(struct platform_device *pdev)
{
	struct tegra_i2s * i2s;
	struct resource *mem, *memregion, *dmareq;
	u32 of_dma[2];
	u32 dma_ch;
	int ret;

	i2s = devm_kzalloc(&pdev->dev, sizeof(struct tegra_i2s), GFP_KERNEL);
	if (!i2s) {
		dev_err(&pdev->dev, "Can't allocate tegra_i2s\n");
		ret = -ENOMEM;
		goto err;
	}
	dev_set_drvdata(&pdev->dev, i2s);

	i2s->dai = tegra_i2s_dai_template;
	i2s->dai.name = dev_name(&pdev->dev);

	i2s->clk_i2s = clk_get(&pdev->dev, NULL);
	if (IS_ERR(i2s->clk_i2s)) {
		dev_err(&pdev->dev, "Can't retrieve i2s clock\n");
		ret = PTR_ERR(i2s->clk_i2s);
		goto err;
	}

	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!mem) {
		dev_err(&pdev->dev, "No memory resource\n");
		ret = -ENODEV;
		goto err_clk_put;
	}

	dmareq = platform_get_resource(pdev, IORESOURCE_DMA, 0);
	if (!dmareq) {
		if (of_property_read_u32_array(pdev->dev.of_node,
					"nvidia,dma-request-selector",
					of_dma, 2) < 0) {
			dev_err(&pdev->dev, "No DMA resource\n");
			ret = -ENODEV;
			goto err_clk_put;
		}
		dma_ch = of_dma[1];
	} else {
		dma_ch = dmareq->start;
	}

	memregion = devm_request_mem_region(&pdev->dev, mem->start,
					    resource_size(mem), DRV_NAME);
	if (!memregion) {
		dev_err(&pdev->dev, "Memory region already claimed\n");
		ret = -EBUSY;
		goto err_clk_put;
	}

	i2s->regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
	if (!i2s->regs) {
		dev_err(&pdev->dev, "ioremap failed\n");
		ret = -ENOMEM;
		goto err_clk_put;
	}

	i2s->capture_dma_data.addr = mem->start + TEGRA_I2S_FIFO2;
	i2s->capture_dma_data.wrap = 4;
	i2s->capture_dma_data.width = 32;
	i2s->capture_dma_data.req_sel = dma_ch;

	i2s->playback_dma_data.addr = mem->start + TEGRA_I2S_FIFO1;
	i2s->playback_dma_data.wrap = 4;
	i2s->playback_dma_data.width = 32;
	i2s->playback_dma_data.req_sel = dma_ch;

	i2s->reg_ctrl = TEGRA_I2S_CTRL_FIFO_FORMAT_PACKED;

	ret = snd_soc_register_dai(&pdev->dev, &i2s->dai);
	if (ret) {
		dev_err(&pdev->dev, "Could not register DAI: %d\n", ret);
		ret = -ENOMEM;
		goto err_clk_put;
	}

	tegra_i2s_debug_add(i2s);

	return 0;

err_clk_put:
	clk_put(i2s->clk_i2s);
err:
	return ret;
}
/**
 * usb_hcd_ppc_soc_probe - initialize On-Chip HCDs
 * Context: !in_interrupt()
 *
 * Allocates basic resources for this USB host controller.
 *
 * Store this function in the HCD's struct pci_driver as probe().
 */
static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver,
			  struct platform_device *pdev)
{
	int retval;
	struct usb_hcd *hcd;
	struct ohci_hcd	*ohci;
	struct resource *res;
	int irq;

	pr_debug("initializing PPC-SOC USB Controller\n");

	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!res) {
		pr_debug("%s: no irq\n", __FILE__);
		return -ENODEV;
	}
	irq = res->start;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		pr_debug("%s: no reg addr\n", __FILE__);
		return -ENODEV;
	}

	hcd = usb_create_hcd(driver, &pdev->dev, "PPC-SOC USB");
	if (!hcd)
		return -ENOMEM;
	hcd->rsrc_start = res->start;
	hcd->rsrc_len = resource_size(res);

	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
		pr_debug("%s: request_mem_region failed\n", __FILE__);
		retval = -EBUSY;
		goto err1;
	}

	hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
	if (!hcd->regs) {
		pr_debug("%s: ioremap failed\n", __FILE__);
		retval = -ENOMEM;
		goto err2;
	}

	ohci = hcd_to_ohci(hcd);
	ohci->flags |= OHCI_QUIRK_BE_MMIO | OHCI_QUIRK_BE_DESC;

#ifdef CONFIG_PPC_MPC52xx
	/* MPC52xx doesn't need frame_no shift */
	ohci->flags |= OHCI_QUIRK_FRAME_NO;
#endif
	ohci_hcd_init(ohci);

	retval = usb_add_hcd(hcd, irq, 0);
	if (retval == 0)
		return retval;

	pr_debug("Removing PPC-SOC USB Controller\n");

	iounmap(hcd->regs);
 err2:
	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
 err1:
	usb_put_hcd(hcd);
	return retval;
}
Esempio n. 13
0
static int __init ilc_probe(struct platform_device *pdev)
{
	struct stm_plat_ilc3_data *pdata = pdev->dev.platform_data;
	struct resource *memory;
	int memory_size;
	struct ilc *ilc;
	int i;
	int error = 0;

	memory = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!memory)
		return -EINVAL;
	memory_size = resource_size(memory);

	if (!request_mem_region(memory->start, memory_size, pdev->name))
		return -EBUSY;

	ilc = kzalloc(sizeof(struct ilc), GFP_KERNEL);
	if (!ilc)
		return -ENOMEM;
	platform_set_drvdata(pdev, ilc);

	ilc->clk = clk_get(&pdev->dev, "comms_clk");
	clk_enable(ilc->clk); /* Just to increase the usage_counter */

	spin_lock_init(&ilc->lock);

	ilc->name = dev_name(&pdev->dev);
	ilc->inputs_num = pdata->inputs_num;
	ilc->outputs_num = pdata->outputs_num;
	ilc->first_irq = pdata->first_irq;
	ilc->disable_wakeup = pdata->disable_wakeup;

	ilc->base = ioremap(memory->start, memory_size);
	if (!ilc->base)
		return -ENOMEM;

	ilc->irqs = kzalloc(sizeof(struct ilc_irq) * ilc->inputs_num,
			GFP_KERNEL);
	if (!ilc->irqs)
		return -ENOMEM;

	for (i = 0; i < ilc->inputs_num; ++i) {
		ilc->irqs[i].priority = 7;
		ilc->irqs[i].trigger_mode = ILC_TRIGGERMODE_HIGH;
	}

	ilc->priority = kzalloc(ilc->outputs_num * sizeof(long*), GFP_KERNEL);
	if (!ilc->priority)
		return -ENOMEM;

	for (i = 0; i < ilc->outputs_num; ++i) {
		ilc->priority[i] = kzalloc(sizeof(long) *
				DIV_ROUND_UP(ilc->inputs_num, 32), GFP_KERNEL);
		if (!ilc->priority[i])
			return -ENOMEM;
	}

	ilc_demux_init(pdev);

	list_add(&ilc->list, &ilcs_list);

	/* sysdev doesn't appear to like id's of -1 */
	ilc->sysdev.id = (pdev->id == -1) ? 0 : pdev->id;

	ilc->sysdev.cls = &ilc_sysdev_class,
	error = sysdev_register(&ilc->sysdev);

	return error;
}
static int __init davinci_vc_probe(struct platform_device *pdev)
{
	struct davinci_vc *davinci_vc;
	struct resource *res, *mem;
	struct mfd_cell *cell = NULL;
	int ret;

	davinci_vc = kzalloc(sizeof(struct davinci_vc), GFP_KERNEL);
	if (!davinci_vc) {
		dev_dbg(&pdev->dev,
			    "could not allocate memory for private data\n");
		return -ENOMEM;
	}

	davinci_vc->clk = clk_get(&pdev->dev, NULL);
	if (IS_ERR(davinci_vc->clk)) {
		dev_dbg(&pdev->dev,
			    "could not get the clock for voice codec\n");
		ret = -ENODEV;
		goto fail1;
	}
	clk_enable(davinci_vc->clk);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "no mem resource\n");
		ret = -ENODEV;
		goto fail2;
	}

	davinci_vc->pbase = res->start;
	davinci_vc->base_size = resource_size(res);

	mem = request_mem_region(davinci_vc->pbase, davinci_vc->base_size,
				 pdev->name);
	if (!mem) {
		dev_err(&pdev->dev, "VCIF region already claimed\n");
		ret = -EBUSY;
		goto fail2;
	}

	davinci_vc->base = ioremap(davinci_vc->pbase, davinci_vc->base_size);
	if (!davinci_vc->base) {
		dev_err(&pdev->dev, "can't ioremap mem resource.\n");
		ret = -ENOMEM;
		goto fail3;
	}

	res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
	if (!res) {
		dev_err(&pdev->dev, "no DMA resource\n");
		ret = -ENXIO;
		goto fail4;
	}

	davinci_vc->davinci_vcif.dma_tx_channel = res->start;
	davinci_vc->davinci_vcif.dma_tx_addr =
		(dma_addr_t)(io_v2p(davinci_vc->base) + DAVINCI_VC_WFIFO);

	res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
	if (!res) {
		dev_err(&pdev->dev, "no DMA resource\n");
		ret = -ENXIO;
		goto fail4;
	}

	davinci_vc->davinci_vcif.dma_rx_channel = res->start;
	davinci_vc->davinci_vcif.dma_rx_addr =
		(dma_addr_t)(io_v2p(davinci_vc->base) + DAVINCI_VC_RFIFO);

	davinci_vc->dev = &pdev->dev;
	davinci_vc->pdev = pdev;

	/* Voice codec interface client */
	cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL];
	cell->name = "davinci-vcif";
	cell->mfd_data = davinci_vc;

	/* Voice codec CQ93VC client */
	cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL];
	cell->name = "cq93vc-codec";
	cell->mfd_data = davinci_vc;

	ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
			      DAVINCI_VC_CELLS, NULL, 0);
	if (ret != 0) {
		dev_err(&pdev->dev, "fail to register client devices\n");
		goto fail4;
	}

	return 0;

fail4:
	iounmap(davinci_vc->base);
fail3:
	release_mem_region(davinci_vc->pbase, davinci_vc->base_size);
fail2:
	clk_disable(davinci_vc->clk);
	clk_put(davinci_vc->clk);
	davinci_vc->clk = NULL;
fail1:
	kfree(davinci_vc);

	return ret;
}
Esempio n. 15
0
static long iTCO_wdt_ioctl(struct file *file, unsigned int cmd,
							unsigned long arg)
{
	int new_options, retval = -EINVAL;
	int new_heartbeat;
	void __user *argp = (void __user *)arg;
	int __user *p = argp;
	static struct watchdog_info ident = {
		.options =		WDIOF_SETTIMEOUT |
					WDIOF_KEEPALIVEPING |
					WDIOF_MAGICCLOSE,
		.firmware_version =	0,
		.identity =		DRV_NAME,
	};

	switch (cmd) {
	case WDIOC_GETSUPPORT:
		return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
	case WDIOC_GETSTATUS:
	case WDIOC_GETBOOTSTATUS:
		return put_user(0, p);

	case WDIOC_SETOPTIONS:
	{
		if (get_user(new_options, p))
			return -EFAULT;

		if (new_options & WDIOS_DISABLECARD) {
			iTCO_wdt_stop();
			retval = 0;
		}
		if (new_options & WDIOS_ENABLECARD) {
			iTCO_wdt_keepalive();
			iTCO_wdt_start();
			retval = 0;
		}
		return retval;
	}
	case WDIOC_KEEPALIVE:
		iTCO_wdt_keepalive();
		return 0;

	case WDIOC_SETTIMEOUT:
	{
		if (get_user(new_heartbeat, p))
			return -EFAULT;
		if (iTCO_wdt_set_heartbeat(new_heartbeat))
			return -EINVAL;
		iTCO_wdt_keepalive();
		/* Fall */
	}
	case WDIOC_GETTIMEOUT:
		return put_user(heartbeat, p);
	case WDIOC_GETTIMELEFT:
	{
		int time_left;
		if (iTCO_wdt_get_timeleft(&time_left))
			return -EINVAL;
		return put_user(time_left, p);
	}
	default:
		return -ENOTTY;
	}
}

/*
 *	Kernel Interfaces
 */

static const struct file_operations iTCO_wdt_fops = {
	.owner =		THIS_MODULE,
	.llseek =		no_llseek,
	.write =		iTCO_wdt_write,
	.unlocked_ioctl =	iTCO_wdt_ioctl,
	.open =			iTCO_wdt_open,
	.release =		iTCO_wdt_release,
};

static struct miscdevice iTCO_wdt_miscdev = {
	.minor =	WATCHDOG_MINOR,
	.name =		"watchdog",
	.fops =		&iTCO_wdt_fops,
};

/*
 *	Init & exit routines
 */

static void __devexit iTCO_wdt_cleanup(void)
{
	/* Stop the timer before we leave */
	if (!nowayout)
		iTCO_wdt_stop();

	/* Deregister */
	misc_deregister(&iTCO_wdt_miscdev);

	/* release resources */
	release_region(iTCO_wdt_private.tco_res->start,
			resource_size(iTCO_wdt_private.tco_res));
	release_region(iTCO_wdt_private.smi_res->start,
			resource_size(iTCO_wdt_private.smi_res));
	if (iTCO_wdt_private.iTCO_version == 2) {
		iounmap(iTCO_wdt_private.gcs);
		release_mem_region(iTCO_wdt_private.gcs_res->start,
				resource_size(iTCO_wdt_private.gcs_res));
	}

	iTCO_wdt_private.tco_res = NULL;
	iTCO_wdt_private.smi_res = NULL;
	iTCO_wdt_private.gcs_res = NULL;
	iTCO_wdt_private.gcs = NULL;
}

static int __devinit iTCO_wdt_probe(struct platform_device *dev)
{
	int ret = -ENODEV;
	unsigned long val32;
	struct lpc_ich_info *ich_info = dev->dev.platform_data;

	if (!ich_info)
		goto out;

	spin_lock_init(&iTCO_wdt_private.io_lock);

	iTCO_wdt_private.tco_res =
		platform_get_resource(dev, IORESOURCE_IO, ICH_RES_IO_TCO);
	if (!iTCO_wdt_private.tco_res)
		goto out;

	iTCO_wdt_private.smi_res =
		platform_get_resource(dev, IORESOURCE_IO, ICH_RES_IO_SMI);
	if (!iTCO_wdt_private.smi_res)
		goto out;

	iTCO_wdt_private.iTCO_version = ich_info->iTCO_version;
	iTCO_wdt_private.dev = dev;
	iTCO_wdt_private.pdev = to_pci_dev(dev->dev.parent);

	/*
	 * Get the Memory-Mapped GCS register, we need it for the
	 * NO_REBOOT flag (TCO v2).
	 */
	if (iTCO_wdt_private.iTCO_version == 2) {
		iTCO_wdt_private.gcs_res = platform_get_resource(dev,
							IORESOURCE_MEM,
							ICH_RES_MEM_GCS);

		if (!iTCO_wdt_private.gcs_res)
			goto out;

		if (!request_mem_region(iTCO_wdt_private.gcs_res->start,
			resource_size(iTCO_wdt_private.gcs_res), dev->name)) {
			ret = -EBUSY;
			goto out;
		}
		iTCO_wdt_private.gcs = ioremap(iTCO_wdt_private.gcs_res->start,
			resource_size(iTCO_wdt_private.gcs_res));
		if (!iTCO_wdt_private.gcs) {
			ret = -EIO;
			goto unreg_gcs;
		}
	}

	/* Check chipset's NO_REBOOT bit */
	if (iTCO_wdt_unset_NO_REBOOT_bit() && iTCO_vendor_check_noreboot_on()) {
		pr_info("unable to reset NO_REBOOT flag, device disabled by hardware/BIOS\n");
		ret = -ENODEV;	/* Cannot reset NO_REBOOT bit */
		goto unmap_gcs;
	}

	/* Set the NO_REBOOT bit to prevent later reboots, just for sure */
	iTCO_wdt_set_NO_REBOOT_bit();

	/* The TCO logic uses the TCO_EN bit in the SMI_EN register */
	if (!request_region(iTCO_wdt_private.smi_res->start,
			resource_size(iTCO_wdt_private.smi_res), dev->name)) {
		pr_err("I/O address 0x%04llx already in use, device disabled\n",
		       SMI_EN);
		ret = -EBUSY;
		goto unmap_gcs;
	}
	if (turn_SMI_watchdog_clear_off >= iTCO_wdt_private.iTCO_version) {
		/*
		 * Bit 13: TCO_EN -> 0
		 * Disables TCO logic generating an SMI#
		 */
		val32 = inl(SMI_EN);
		val32 &= 0xffffdfff;	/* Turn off SMI clearing watchdog */
		outl(val32, SMI_EN);
	}

	if (!request_region(iTCO_wdt_private.tco_res->start,
			resource_size(iTCO_wdt_private.tco_res), dev->name)) {
		pr_err("I/O address 0x%04llx already in use, device disabled\n",
		       TCOBASE);
		ret = -EBUSY;
		goto unreg_smi;
	}

	pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04llx)\n",
		ich_info->name, ich_info->iTCO_version, TCOBASE);

	/* Clear out the (probably old) status */
	outw(0x0008, TCO1_STS);	/* Clear the Time Out Status bit */
	outw(0x0002, TCO2_STS);	/* Clear SECOND_TO_STS bit */
	outw(0x0004, TCO2_STS);	/* Clear BOOT_STS bit */

	/* Make sure the watchdog is not running */
	iTCO_wdt_stop();

	/* Check that the heartbeat value is within it's range;
	   if not reset to the default */
	if (iTCO_wdt_set_heartbeat(heartbeat)) {
		iTCO_wdt_set_heartbeat(WATCHDOG_HEARTBEAT);
		pr_info("timeout value out of range, using %d\n", heartbeat);
	}

	ret = misc_register(&iTCO_wdt_miscdev);
	if (ret != 0) {
		pr_err("cannot register miscdev on minor=%d (err=%d)\n",
		       WATCHDOG_MINOR, ret);
		goto unreg_tco;
	}

	pr_info("initialized. heartbeat=%d sec (nowayout=%d)\n",
		heartbeat, nowayout);

	return 0;

unreg_tco:
	release_region(iTCO_wdt_private.tco_res->start,
			resource_size(iTCO_wdt_private.tco_res));
unreg_smi:
	release_region(iTCO_wdt_private.smi_res->start,
			resource_size(iTCO_wdt_private.smi_res));
unmap_gcs:
	if (iTCO_wdt_private.iTCO_version == 2)
		iounmap(iTCO_wdt_private.gcs);
unreg_gcs:
	if (iTCO_wdt_private.iTCO_version == 2)
		release_mem_region(iTCO_wdt_private.gcs_res->start,
				resource_size(iTCO_wdt_private.gcs_res));
out:
	iTCO_wdt_private.tco_res = NULL;
	iTCO_wdt_private.smi_res = NULL;
	iTCO_wdt_private.gcs_res = NULL;
	iTCO_wdt_private.gcs = NULL;

	return ret;
}

static int __devexit iTCO_wdt_remove(struct platform_device *dev)
{
	if (iTCO_wdt_private.tco_res || iTCO_wdt_private.smi_res)
		iTCO_wdt_cleanup();

	return 0;
}

static void iTCO_wdt_shutdown(struct platform_device *dev)
{
	iTCO_wdt_stop();
}

#define iTCO_wdt_suspend NULL
#define iTCO_wdt_resume  NULL

static struct platform_driver iTCO_wdt_driver = {
	.probe          = iTCO_wdt_probe,
	.remove         = __devexit_p(iTCO_wdt_remove),
	.shutdown       = iTCO_wdt_shutdown,
	.suspend        = iTCO_wdt_suspend,
	.resume         = iTCO_wdt_resume,
	.driver         = {
		.owner  = THIS_MODULE,
		.name   = DRV_NAME,
	},
};

static int __init iTCO_wdt_init_module(void)
{
	int err;

	pr_info("Intel TCO WatchDog Timer Driver v%s\n", DRV_VERSION);

	err = platform_driver_register(&iTCO_wdt_driver);
	if (err)
		return err;

	return 0;
}
static int __devinit msm_ssbi_probe(struct platform_device *pdev)
{
	const struct msm_ssbi_platform_data *pdata = pdev->dev.platform_data;
	struct resource *mem_res;
	struct msm_ssbi *ssbi;
	int ret = 0;

	if (!pdata) {
		pr_err("missing platform data\n");
		return -EINVAL;
	}

	pr_debug("%s\n", pdata->slave.name);

	ssbi = kzalloc(sizeof(struct msm_ssbi), GFP_KERNEL);
	if (!ssbi) {
		pr_err("can not allocate ssbi_data\n");
		return -ENOMEM;
	}

	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!mem_res) {
		pr_err("missing mem resource\n");
		ret = -EINVAL;
		goto err_get_mem_res;
	}

	ssbi->base = ioremap(mem_res->start, resource_size(mem_res));
	if (!ssbi->base) {
		pr_err("ioremap of 0x%p failed\n", (void *)mem_res->start);
		ret = -EINVAL;
		goto err_ioremap;
	}
	ssbi->dev = &pdev->dev;
	platform_set_drvdata(pdev, ssbi);

	ssbi->controller_type = pdata->controller_type;
	if (ssbi->controller_type == MSM_SBI_CTRL_PMIC_ARBITER) {
		ssbi->read = msm_ssbi_pa_read_bytes;
		ssbi->write = msm_ssbi_pa_write_bytes;
	} else {
		ssbi->read = msm_ssbi_read_bytes;
		ssbi->write = msm_ssbi_write_bytes;
	}

	spin_lock_init(&ssbi->lock);

	ret = msm_ssbi_add_slave(ssbi, &pdata->slave);
	if (ret)
		goto err_ssbi_add_slave;

	return 0;

err_ssbi_add_slave:
	platform_set_drvdata(pdev, NULL);
	iounmap(ssbi->base);
err_ioremap:
err_get_mem_res:
	kfree(ssbi);
	return ret;
}
Esempio n. 17
0
static int sh_keysc_probe(struct platform_device *pdev)
{
	struct sh_keysc_priv *priv;
	struct sh_keysc_info *pdata;
	struct resource *res;
	struct input_dev *input;
	int i;
	int irq, error;

	if (!pdev->dev.platform_data) {
		dev_err(&pdev->dev, "no platform data defined\n");
		error = -EINVAL;
		goto err0;
	}

	error = -ENXIO;
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL) {
		dev_err(&pdev->dev, "failed to get I/O memory\n");
		goto err0;
	}

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(&pdev->dev, "failed to get irq\n");
		goto err0;
	}

	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (priv == NULL) {
		dev_err(&pdev->dev, "failed to allocate driver data\n");
		error = -ENOMEM;
		goto err0;
	}

	platform_set_drvdata(pdev, priv);
	memcpy(&priv->pdata, pdev->dev.platform_data, sizeof(priv->pdata));
	pdata = &priv->pdata;

	priv->iomem_base = ioremap_nocache(res->start, resource_size(res));
	if (priv->iomem_base == NULL) {
		dev_err(&pdev->dev, "failed to remap I/O memory\n");
		error = -ENXIO;
		goto err1;
	}

	priv->input = input_allocate_device();
	if (!priv->input) {
		dev_err(&pdev->dev, "failed to allocate input device\n");
		error = -ENOMEM;
		goto err2;
	}

	input = priv->input;
	input->evbit[0] = BIT_MASK(EV_KEY);

	input->name = pdev->name;
	input->phys = "sh-keysc-keys/input0";
	input->dev.parent = &pdev->dev;

	input->id.bustype = BUS_HOST;
	input->id.vendor = 0x0001;
	input->id.product = 0x0001;
	input->id.version = 0x0100;

	input->keycode = pdata->keycodes;
	input->keycodesize = sizeof(pdata->keycodes[0]);
	input->keycodemax = ARRAY_SIZE(pdata->keycodes);

	error = request_threaded_irq(irq, NULL, sh_keysc_isr, IRQF_ONESHOT,
				     dev_name(&pdev->dev), pdev);
	if (error) {
		dev_err(&pdev->dev, "failed to request IRQ\n");
		goto err3;
	}

	for (i = 0; i < SH_KEYSC_MAXKEYS; i++)
		__set_bit(pdata->keycodes[i], input->keybit);
	__clear_bit(KEY_RESERVED, input->keybit);

	error = input_register_device(input);
	if (error) {
		dev_err(&pdev->dev, "failed to register input device\n");
		goto err4;
	}

	pm_runtime_enable(&pdev->dev);
	pm_runtime_get_sync(&pdev->dev);

	sh_keysc_write(priv, KYCR1, (sh_keysc_mode[pdata->mode].kymd << 8) |
		       pdata->scan_timing);
	sh_keysc_level_mode(priv, 0);

	device_init_wakeup(&pdev->dev, 1);

	return 0;

 err4:
	free_irq(irq, pdev);
 err3:
	input_free_device(input);
 err2:
	iounmap(priv->iomem_base);
 err1:
	kfree(priv);
 err0:
	return error;
}
Esempio n. 18
0
	edac_dev->ctl_name = pdata->name;
	edac_dev->dev_name = pdata->name;

	res = of_address_to_resource(op->dev.of_node, 0, &r);
	if (res) {
		printk(KERN_ERR "%s: Unable to get resource for "
		       "L2 err regs\n", __func__);
		goto err;
	}

	/* we only need the error registers */
	r.start += 0xe00;

<<<<<<< HEAD
<<<<<<< HEAD
	if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
				     pdata->name)) {
=======
	if (!devm_request_mem_region(&op->dev, r.start,
				     r.end - r.start + 1, pdata->name)) {
>>>>>>> 73a10a64c2f389351ff1594d88983f47c8de08f0
=======
	if (!devm_request_mem_region(&op->dev, r.start,
				     r.end - r.start + 1, pdata->name)) {
>>>>>>> ae1773bb70f3d7cf73324ce8fba787e01d8fa9f2
		printk(KERN_ERR "%s: Error while requesting mem region\n",
		       __func__);
		res = -EBUSY;
		goto err;
	}
Esempio n. 19
0
unsigned int cpm_pic_init(void)
{
	struct device_node *np = NULL;
	struct resource res;
	unsigned int sirq = 0, hwirq, eirq;
	int ret;

	pr_debug("cpm_pic_init\n");

	np = of_find_compatible_node(NULL, NULL, "fsl,cpm1-pic");
	if (np == NULL)
		np = of_find_compatible_node(NULL, "cpm-pic", "CPM");
	if (np == NULL) {
		printk(KERN_ERR "CPM PIC init: can not find cpm-pic node\n");
		return sirq;
	}

	ret = of_address_to_resource(np, 0, &res);
	if (ret)
		goto end;

	cpic_reg = ioremap(res.start, resource_size(&res));
	if (cpic_reg == NULL)
		goto end;

	sirq = irq_of_parse_and_map(np, 0);
	if (!sirq)
		goto end;

	/* Initialize the CPM interrupt controller. */
	hwirq = (unsigned int)virq_to_hw(sirq);
	out_be32(&cpic_reg->cpic_cicr,
	    (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) |
		((hwirq/2) << 13) | CICR_HP_MASK);

	out_be32(&cpic_reg->cpic_cimr, 0);

	cpm_pic_host = irq_domain_add_linear(np, 64, &cpm_pic_host_ops, NULL);
	if (cpm_pic_host == NULL) {
		printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
		sirq = 0;
		goto end;
	}

	/* Install our own error handler. */
	np = of_find_compatible_node(NULL, NULL, "fsl,cpm1");
	if (np == NULL)
		np = of_find_node_by_type(NULL, "cpm");
	if (np == NULL) {
		printk(KERN_ERR "CPM PIC init: can not find cpm node\n");
		goto end;
	}

	eirq = irq_of_parse_and_map(np, 0);
	if (!eirq)
		goto end;

	if (setup_irq(eirq, &cpm_error_irqaction))
		printk(KERN_ERR "Could not allocate CPM error IRQ!");

	setbits32(&cpic_reg->cpic_cicr, CICR_IEN);

end:
	of_node_put(np);
	return sirq;
}
Esempio n. 20
0
static int __devinit mpc85xx_pci_err_probe(struct platform_device *op)
{
	struct edac_pci_ctl_info *pci;
	struct mpc85xx_pci_pdata *pdata;
	struct resource r;
	int res = 0;

	if (!devres_open_group(&op->dev, mpc85xx_pci_err_probe, GFP_KERNEL))
		return -ENOMEM;

	pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err");
	if (!pci)
		return -ENOMEM;

	pdata = pci->pvt_info;
	pdata->name = "mpc85xx_pci_err";
	pdata->irq = NO_IRQ;
	dev_set_drvdata(&op->dev, pci);
	pci->dev = &op->dev;
	pci->mod_name = EDAC_MOD_STR;
	pci->ctl_name = pdata->name;
	pci->dev_name = dev_name(&op->dev);

	if (edac_op_state == EDAC_OPSTATE_POLL)
		pci->edac_check = mpc85xx_pci_check;

	pdata->edac_idx = edac_pci_idx++;

	res = of_address_to_resource(op->dev.of_node, 0, &r);
	if (res) {
		printk(KERN_ERR "%s: Unable to get resource for "
		       "PCI err regs\n", __func__);
		goto err;
	}

	/* we only need the error registers */
	r.start += 0xe00;

	if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
					pdata->name)) {
		printk(KERN_ERR "%s: Error while requesting mem region\n",
		       __func__);
		res = -EBUSY;
		goto err;
	}

	pdata->pci_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
	if (!pdata->pci_vbase) {
		printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
		res = -ENOMEM;
		goto err;
	}

	orig_pci_err_cap_dr =
	    in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR);

	/* PCI master abort is expected during config cycles */
	out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40);

	orig_pci_err_en = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);

	/* disable master abort reporting */
	out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40);

	/* clear error bits */
	out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);

	if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
		debugf3("%s(): failed edac_pci_add_device()\n", __func__);
		goto err;
	}

	if (edac_op_state == EDAC_OPSTATE_INT) {
		pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
		res = devm_request_irq(&op->dev, pdata->irq,
				       mpc85xx_pci_isr, IRQF_DISABLED,
				       "[EDAC] PCI err", pci);
		if (res < 0) {
			printk(KERN_ERR
			       "%s: Unable to requiest irq %d for "
			       "MPC85xx PCI err\n", __func__, pdata->irq);
			irq_dispose_mapping(pdata->irq);
			res = -ENODEV;
			goto err2;
		}

		printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
		       pdata->irq);
	}

	devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
	debugf3("%s(): success\n", __func__);
	printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");

	return 0;

err2:
	edac_pci_del_device(&op->dev);
err:
	edac_pci_free_ctl_info(pci);
	devres_release_group(&op->dev, mpc85xx_pci_err_probe);
	return res;
}
Esempio n. 21
0
static int ehci_msm_probe(struct platform_device *pdev)
{
	struct usb_hcd *hcd;
	struct resource *res;
	int ret;

	dev_dbg(&pdev->dev, "ehci_msm proble\n");

	if (!pdev->dev.dma_mask)
		pdev->dev.dma_mask = &msm_ehci_dma_mask;
	if (!pdev->dev.coherent_dma_mask)
		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);

	hcd = usb_create_hcd(&ehci_msm_hc_driver, &pdev->dev,
			     dev_name(&pdev->dev));
	if (!hcd) {
		dev_err(&pdev->dev, "Unable to create HCD\n");
		return  -ENOMEM;
	}

	hcd_to_bus(hcd)->skip_resume = true;

	hcd->irq = platform_get_irq(pdev, 0);
	if (hcd->irq < 0) {
		dev_err(&pdev->dev, "Unable to get IRQ resource\n");
		ret = hcd->irq;
		goto put_hcd;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "Unable to get memory resource\n");
		ret = -ENODEV;
		goto put_hcd;
	}

	hcd->rsrc_start = res->start;
	hcd->rsrc_len = resource_size(res);
	hcd->regs = devm_ioremap(&pdev->dev, hcd->rsrc_start, hcd->rsrc_len);
	if (!hcd->regs) {
		dev_err(&pdev->dev, "ioremap failed\n");
		ret = -ENOMEM;
		goto put_hcd;
	}

	/*
	 * OTG driver takes care of PHY initialization, clock management,
	 * powering up VBUS, mapping of registers address space and power
	 * management.
	 */
	phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
	if (IS_ERR_OR_NULL(phy)) {
		dev_err(&pdev->dev, "unable to find transceiver\n");
		ret = -ENODEV;
		goto put_hcd;
	}

	ret = otg_set_host(phy->otg, &hcd->self);
	if (ret < 0) {
		dev_err(&pdev->dev, "unable to register with transceiver\n");
		goto put_hcd;
	}

	hcd->phy = phy;
	device_init_wakeup(&pdev->dev, 1);
	pm_runtime_enable(&pdev->dev);

	msm_bam_set_usb_host_dev(&pdev->dev);

	return 0;

put_hcd:
	usb_put_hcd(hcd);

	return ret;
}
Esempio n. 22
0
static int __devinit omap4_keypad_probe(struct platform_device *pdev)
{
	const struct omap4_keypad_platform_data *pdata;
	struct omap4_keypad *keypad_data;
	struct input_dev *input_dev;
	struct resource *res;
	resource_size_t size;
	unsigned int row_shift, max_keys;
	int irq;
	int error;

	/* platform data */
	pdata = pdev->dev.platform_data;
	if (!pdata) {
		dev_err(&pdev->dev, "no platform data defined\n");
		return -EINVAL;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "no base address specified\n");
		return -EINVAL;
	}

	irq = platform_get_irq(pdev, 0);
	if (!irq) {
		dev_err(&pdev->dev, "no keyboard irq assigned\n");
		return -EINVAL;
	}

	if (!pdata->keymap_data) {
		dev_err(&pdev->dev, "no keymap data defined\n");
		return -EINVAL;
	}

	row_shift = get_count_order(pdata->cols);
	max_keys = pdata->rows << row_shift;

	keypad_data = kzalloc(sizeof(struct omap4_keypad) +
				max_keys * sizeof(keypad_data->keymap[0]),
			      GFP_KERNEL);
	if (!keypad_data) {
		dev_err(&pdev->dev, "keypad_data memory allocation failed\n");
		return -ENOMEM;
	}

	size = resource_size(res);

	res = request_mem_region(res->start, size, pdev->name);
	if (!res) {
		dev_err(&pdev->dev, "can't request mem region\n");
		error = -EBUSY;
		goto err_free_keypad;
	}

	keypad_data->base = ioremap(res->start, resource_size(res));
	if (!keypad_data->base) {
		dev_err(&pdev->dev, "can't ioremap mem resource\n");
		error = -ENOMEM;
		goto err_release_mem;
	}

	keypad_data->irq = irq;
	keypad_data->row_shift = row_shift;
	keypad_data->rows = pdata->rows;
	keypad_data->cols = pdata->cols;

	/* input device allocation */
	keypad_data->input = input_dev = input_allocate_device();
	if (!input_dev) {
		error = -ENOMEM;
		goto err_unmap;
	}

	input_dev->name = pdev->name;
	input_dev->dev.parent = &pdev->dev;
	input_dev->id.bustype = BUS_HOST;
	input_dev->id.vendor = 0x0001;
	input_dev->id.product = 0x0001;
	input_dev->id.version = 0x0001;

	input_dev->open = omap4_keypad_open;
	input_dev->close = omap4_keypad_close;

	input_dev->keycode	= keypad_data->keymap;
	input_dev->keycodesize	= sizeof(keypad_data->keymap[0]);
	input_dev->keycodemax	= max_keys;

	__set_bit(EV_KEY, input_dev->evbit);
	__set_bit(EV_REP, input_dev->evbit);

	input_set_capability(input_dev, EV_MSC, MSC_SCAN);

	input_set_drvdata(input_dev, keypad_data);

	matrix_keypad_build_keymap(pdata->keymap_data, row_shift,
			input_dev->keycode, input_dev->keybit);

	error = request_irq(keypad_data->irq, omap4_keypad_interrupt,
			     IRQF_TRIGGER_RISING,
			     "omap4-keypad", keypad_data);
	if (error) {
		dev_err(&pdev->dev, "failed to register interrupt\n");
		goto err_free_input;
	}

	pm_runtime_enable(&pdev->dev);

	error = input_register_device(keypad_data->input);
	if (error < 0) {
		dev_err(&pdev->dev, "failed to register input device\n");
		goto err_pm_disable;
	}

	platform_set_drvdata(pdev, keypad_data);
	return 0;

err_pm_disable:
	pm_runtime_disable(&pdev->dev);
	free_irq(keypad_data->irq, keypad_data);
err_free_input:
	input_free_device(input_dev);
err_unmap:
	iounmap(keypad_data->base);
err_release_mem:
	release_mem_region(res->start, size);
err_free_keypad:
	kfree(keypad_data);
	return error;
}
static int __devinit ehci_msm_probe(struct platform_device *pdev)
{
	struct usb_hcd *hcd;
	struct resource *res;
	struct msm_usb_host_platform_data *pdata;
	int retval;
	struct msmusb_hcd *mhcd;

	hcd = usb_create_hcd(&msm_hc_driver, &pdev->dev, dev_name(&pdev->dev));
	if (!hcd)
		return  -ENOMEM;

	hcd->irq = platform_get_irq(pdev, 0);
	if (hcd->irq < 0) {
		usb_put_hcd(hcd);
		return hcd->irq;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		usb_put_hcd(hcd);
		return -ENODEV;
	}

	hcd->rsrc_start = res->start;
	hcd->rsrc_len = resource_size(res);

	mhcd = hcd_to_mhcd(hcd);
	spin_lock_init(&mhcd->lock);
	mhcd->in_lpm = 0;
	mhcd->running = 0;
	device_init_wakeup(&pdev->dev, 1);

	pdata = pdev->dev.platform_data;
	if (PHY_TYPE(pdata->phy_info) == USB_PHY_UNDEFINED) {
		usb_put_hcd(hcd);
		return -ENODEV;
	}
	hcd->power_budget = pdata->power_budget;
	mhcd->pdata = pdata;
	INIT_WORK(&mhcd->lpm_exit_work, usb_lpm_exit_w);

	wake_lock_init(&mhcd->wlock, WAKE_LOCK_SUSPEND, dev_name(&pdev->dev));
	pdata->ebi1_clk = clk_get(&pdev->dev, "core_clk");
	if (IS_ERR(pdata->ebi1_clk))
		pdata->ebi1_clk = NULL;
	else
		clk_set_rate(pdata->ebi1_clk, INT_MAX);

#ifdef CONFIG_USB_HOST_NOTIFY
		if (pdata->host_notify) {
			hcd->host_notify = pdata->host_notify;
			hcd->ndev.name = dev_name(&pdev->dev);
			retval = host_notify_dev_register(&hcd->ndev);
			if (retval) {
				dev_err(&pdev->dev, "Failed to host_notify_dev_register\n");
				return -ENODEV;
			}
		}
#endif
	
#ifdef CONFIG_USB_SEC_WHITELIST
		if (pdata->sec_whlist_table_num)
			hcd->sec_whlist_table_num = pdata->sec_whlist_table_num;
#endif

	retval = msm_xusb_init_host(pdev, mhcd);

	if (retval < 0) {
		wake_lock_destroy(&mhcd->wlock);
		usb_put_hcd(hcd);
		clk_put(pdata->ebi1_clk);
	}

	pm_runtime_enable(&pdev->dev);

	return retval;
}
static int uio_dmem_genirq_probe(struct platform_device *pdev)
{
	struct uio_dmem_genirq_pdata *pdata = pdev->dev.platform_data;
	struct uio_info *uioinfo = &pdata->uioinfo;
	struct uio_dmem_genirq_platdata *priv;
	struct uio_mem *uiomem;
	int ret = -EINVAL;
	int i;

	if (pdev->dev.of_node) {
		int irq;

		/* alloc uioinfo for one device */
		uioinfo = kzalloc(sizeof(*uioinfo), GFP_KERNEL);
		if (!uioinfo) {
			ret = -ENOMEM;
			dev_err(&pdev->dev, "unable to kmalloc\n");
			goto bad2;
		}
		uioinfo->name = pdev->dev.of_node->name;
		uioinfo->version = "devicetree";

		/* Multiple IRQs are not supported */
		irq = platform_get_irq(pdev, 0);
		if (irq == -ENXIO)
			uioinfo->irq = UIO_IRQ_NONE;
		else
			uioinfo->irq = irq;
	}

	if (!uioinfo || !uioinfo->name || !uioinfo->version) {
		dev_err(&pdev->dev, "missing platform_data\n");
		goto bad0;
	}

	if (uioinfo->handler || uioinfo->irqcontrol ||
	    uioinfo->irq_flags & IRQF_SHARED) {
		dev_err(&pdev->dev, "interrupt configuration error\n");
		goto bad0;
	}

	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv) {
		ret = -ENOMEM;
		dev_err(&pdev->dev, "unable to kmalloc\n");
		goto bad0;
	}

	dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));

	priv->uioinfo = uioinfo;
	spin_lock_init(&priv->lock);
	priv->flags = 0; /* interrupt is enabled to begin with */
	priv->pdev = pdev;
	mutex_init(&priv->alloc_lock);

	if (!uioinfo->irq) {
		ret = platform_get_irq(pdev, 0);
		if (ret < 0) {
			dev_err(&pdev->dev, "failed to get IRQ\n");
			goto bad0;
		}
		uioinfo->irq = ret;
	}
	uiomem = &uioinfo->mem[0];

	for (i = 0; i < pdev->num_resources; ++i) {
		struct resource *r = &pdev->resource[i];

		if (r->flags != IORESOURCE_MEM)
			continue;

		if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
			dev_warn(&pdev->dev, "device has more than "
					__stringify(MAX_UIO_MAPS)
					" I/O memory resources.\n");
			break;
		}

		uiomem->memtype = UIO_MEM_PHYS;
		uiomem->addr = r->start;
		uiomem->size = resource_size(r);
		++uiomem;
	}

	priv->dmem_region_start = i;
	priv->num_dmem_regions = pdata->num_dynamic_regions;

	for (i = 0; i < pdata->num_dynamic_regions; ++i) {
		if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
			dev_warn(&pdev->dev, "device has more than "
					__stringify(MAX_UIO_MAPS)
					" dynamic and fixed memory regions.\n");
			break;
		}
		uiomem->memtype = UIO_MEM_PHYS;
		uiomem->addr = DMEM_MAP_ERROR;
		uiomem->size = pdata->dynamic_region_sizes[i];
		++uiomem;
	}

	while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) {
		uiomem->size = 0;
		++uiomem;
	}

	/* This driver requires no hardware specific kernel code to handle
	 * interrupts. Instead, the interrupt handler simply disables the
	 * interrupt in the interrupt controller. User space is responsible
	 * for performing hardware specific acknowledge and re-enabling of
	 * the interrupt in the interrupt controller.
	 *
	 * Interrupt sharing is not supported.
	 */

	uioinfo->handler = uio_dmem_genirq_handler;
	uioinfo->irqcontrol = uio_dmem_genirq_irqcontrol;
	uioinfo->open = uio_dmem_genirq_open;
	uioinfo->release = uio_dmem_genirq_release;
	uioinfo->priv = priv;

	/* Enable Runtime PM for this device:
	 * The device starts in suspended state to allow the hardware to be
	 * turned off by default. The Runtime PM bus code should power on the
	 * hardware and enable clocks at open().
	 */
	pm_runtime_enable(&pdev->dev);

	ret = uio_register_device(&pdev->dev, priv->uioinfo);
	if (ret) {
		dev_err(&pdev->dev, "unable to register uio device\n");
		goto bad1;
	}

	platform_set_drvdata(pdev, priv);
	return 0;
 bad1:
	kfree(priv);
	pm_runtime_disable(&pdev->dev);
 bad0:
	/* kfree uioinfo for OF */
	if (pdev->dev.of_node)
		kfree(uioinfo);
 bad2:
	return ret;
}
Esempio n. 25
0
static int __devinit s5p_ehci_probe(struct platform_device *pdev)
{
	struct s5p_ehci_platdata *pdata;
	struct s5p_ehci_hcd *s5p_ehci;
	struct usb_hcd *hcd;
	struct ehci_hcd *ehci;
	struct resource *res;
	int irq;
	int err;

	pdata = pdev->dev.platform_data;
	if (!pdata) {
		dev_err(&pdev->dev, "No platform data defined\n");
		return -EINVAL;
	}

	s5p_ehci = kzalloc(sizeof(struct s5p_ehci_hcd), GFP_KERNEL);
	if (!s5p_ehci)
		return -ENOMEM;
	s5p_ehci->dev = &pdev->dev;

	hcd = usb_create_hcd(&s5p_ehci_hc_driver, &pdev->dev,
					dev_name(&pdev->dev));
	if (!hcd) {
		dev_err(&pdev->dev, "Unable to create HCD\n");
		err = -ENOMEM;
		goto fail_hcd;
	}

	s5p_ehci->hcd = hcd;
	s5p_ehci->clk = clk_get(&pdev->dev, "usbhost");

	if (IS_ERR(s5p_ehci->clk)) {
		dev_err(&pdev->dev, "Failed to get usbhost clock\n");
		err = PTR_ERR(s5p_ehci->clk);
		goto fail_clk;
	}

	err = clk_enable(s5p_ehci->clk);
	if (err)
		goto fail_clken;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "Failed to get I/O memory\n");
		err = -ENXIO;
		goto fail_io;
	}

	hcd->rsrc_start = res->start;
	hcd->rsrc_len = resource_size(res);
	hcd->regs = ioremap(res->start, resource_size(res));
	if (!hcd->regs) {
		dev_err(&pdev->dev, "Failed to remap I/O memory\n");
		err = -ENOMEM;
		goto fail_io;
	}

	irq = platform_get_irq(pdev, 0);
	if (!irq) {
		dev_err(&pdev->dev, "Failed to get IRQ\n");
		err = -ENODEV;
		goto fail;
	}

	platform_set_drvdata(pdev, s5p_ehci);

	s5p_ehci_phy_init(pdev);

	ehci = hcd_to_ehci(hcd);
	ehci->caps = hcd->regs;
	ehci->regs = hcd->regs +
		HC_LENGTH(ehci, readl(&ehci->caps->hc_capbase));

	dbg_hcs_params(ehci, "reset");
	dbg_hcc_params(ehci, "reset");

	/* cache this readonly data; minimize chip reads */
	ehci->hcs_params = readl(&ehci->caps->hcs_params);

	err = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
	if (err) {
		dev_err(&pdev->dev, "Failed to add USB HCD\n");
		goto fail;
	}

	create_ehci_sys_file(ehci);
	s5p_ehci->power_on = 1;

#ifdef CONFIG_USB_SUSPEND
	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);
#endif
#ifdef CONFIG_MDM_HSIC_PM
/*
	set_host_stat(hsic_pm_dev, POWER_ON);
*/
	pm_runtime_allow(&pdev->dev);
	pm_runtime_set_autosuspend_delay(&hcd->self.root_hub->dev, 0);

	pm_runtime_forbid(&pdev->dev);
	enable_periodic(ehci);
#endif
#if defined(CONFIG_LINK_DEVICE_HSIC) || defined(CONFIG_LINK_DEVICE_USB)
	if (num_possible_cpus() > 1) {
		s5p_ehci_irq_no = irq;
		s5p_ehci_irq_cpu = s5p_ehci_cpus[num_possible_cpus() - 1];
		irq_set_affinity(s5p_ehci_irq_no, cpumask_of(s5p_ehci_irq_cpu));
		register_cpu_notifier(&s5p_ehci_cpu_notifier);
	}

	/* for cp enumeration */
	pm_runtime_forbid(&pdev->dev);
	/*HSIC IPC control the ACTIVE_STATE*/
	if (pdata && pdata->noti_host_states)
		pdata->noti_host_states(pdev, S5P_HOST_ON);
#endif

	return 0;

fail:
	iounmap(hcd->regs);
fail_io:
	clk_disable(s5p_ehci->clk);
fail_clken:
	clk_put(s5p_ehci->clk);
fail_clk:
	usb_put_hcd(hcd);
fail_hcd:
	kfree(s5p_ehci);
	return err;
}
Esempio n. 26
0
static int ehci_mxc_drv_probe(struct platform_device *pdev)
{
	struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
	struct usb_hcd *hcd;
	struct resource *res;
	int irq, ret;
	unsigned int flags;
	struct ehci_mxc_priv *priv;
	struct device *dev = &pdev->dev;
	struct ehci_hcd *ehci;

	dev_info(&pdev->dev, "initializing i.MX USB Controller\n");

	if (!pdata) {
		dev_err(dev, "No platform data given, bailing out.\n");
		return -EINVAL;
	}

	irq = platform_get_irq(pdev, 0);

	hcd = usb_create_hcd(&ehci_mxc_hc_driver, dev, dev_name(dev));
	if (!hcd)
		return -ENOMEM;

	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv) {
		ret = -ENOMEM;
		goto err_alloc;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(dev, "Found HC with no register addr. Check setup!\n");
		ret = -ENODEV;
		goto err_get_resource;
	}

	hcd->rsrc_start = res->start;
	hcd->rsrc_len = resource_size(res);

	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
		dev_dbg(dev, "controller already in use\n");
		ret = -EBUSY;
		goto err_request_mem;
	}

	hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
	if (!hcd->regs) {
		dev_err(dev, "error mapping memory\n");
		ret = -EFAULT;
		goto err_ioremap;
	}

	/* enable clocks */
	priv->usbclk = clk_get(dev, "usb");
	if (IS_ERR(priv->usbclk)) {
		ret = PTR_ERR(priv->usbclk);
		goto err_clk;
	}
	clk_enable(priv->usbclk);

	if (!cpu_is_mx35() && !cpu_is_mx25()) {
		priv->ahbclk = clk_get(dev, "usb_ahb");
		if (IS_ERR(priv->ahbclk)) {
			ret = PTR_ERR(priv->ahbclk);
			goto err_clk_ahb;
		}
		clk_enable(priv->ahbclk);
	}

	/* "dr" device has its own clock on i.MX51 */
	if (cpu_is_mx51() && (pdev->id == 0)) {
		priv->phy1clk = clk_get(dev, "usb_phy1");
		if (IS_ERR(priv->phy1clk)) {
			ret = PTR_ERR(priv->phy1clk);
			goto err_clk_phy;
		}
		clk_enable(priv->phy1clk);
	}


	/* call platform specific init function */
	if (pdata->init) {
		ret = pdata->init(pdev);
		if (ret) {
			dev_err(dev, "platform init failed\n");
			goto err_init;
		}
		/* platforms need some time to settle changed IO settings */
		mdelay(10);
	}

	ehci = hcd_to_ehci(hcd);

	/* EHCI registers start at offset 0x100 */
	ehci->caps = hcd->regs + 0x100;
	ehci->regs = hcd->regs + 0x100 +
	    HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));

	/* set up the PORTSCx register */
	ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);

	/* is this really needed? */
	msleep(10);

	/* Initialize the transceiver */
	if (pdata->otg) {
		pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET;
		ret = otg_init(pdata->otg);
		if (ret) {
			dev_err(dev, "unable to init transceiver, probably missing\n");
			ret = -ENODEV;
			goto err_add;
		}
		ret = otg_set_vbus(pdata->otg, 1);
		if (ret) {
			dev_err(dev, "unable to enable vbus on transceiver\n");
			goto err_add;
		}
	}

	priv->hcd = hcd;
	platform_set_drvdata(pdev, priv);

	ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
	if (ret)
		goto err_add;

	if (pdata->otg) {
		/*
		 * efikamx and efikasb have some hardware bug which is
		 * preventing usb to work unless CHRGVBUS is set.
		 * It's in violation of USB specs
		 */
		if (machine_is_mx51_efikamx() || machine_is_mx51_efikasb()) {
			flags = otg_io_read(pdata->otg, ULPI_OTG_CTRL);
			flags |= ULPI_OTG_CTRL_CHRGVBUS;
			ret = otg_io_write(pdata->otg, flags, ULPI_OTG_CTRL);
			if (ret) {
				dev_err(dev, "unable to set CHRVBUS\n");
				goto err_add;
			}
		}
	}

	return 0;

err_add:
	if (pdata && pdata->exit)
		pdata->exit(pdev);
err_init:
	if (priv->phy1clk) {
		clk_disable(priv->phy1clk);
		clk_put(priv->phy1clk);
	}
err_clk_phy:
	if (priv->ahbclk) {
		clk_disable(priv->ahbclk);
		clk_put(priv->ahbclk);
	}
err_clk_ahb:
	clk_disable(priv->usbclk);
	clk_put(priv->usbclk);
err_clk:
	iounmap(hcd->regs);
err_ioremap:
	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err_request_mem:
err_get_resource:
	kfree(priv);
err_alloc:
	usb_put_hcd(hcd);
	return ret;
}
int cp_watchdog_probe(struct platform_device *pdev)
{
	struct device_node *np = pdev->dev.of_node;
	struct resource *res;
	int ret = -EINVAL;
	u32 type;

	cp_watchdog = devm_kzalloc(&pdev->dev,
		sizeof(struct cp_watchdog), GFP_KERNEL);

	if (!cp_watchdog)
		return -ENOMEM;

	if (of_property_read_u32(np, "watchdog-type", &type)) {
		dev_err(&pdev->dev, "%s: no watchdog type defined\n", __func__);
		ret = -EINVAL;
		goto freemem;
	}

	dev_info(&pdev->dev, "%s: watchdog type %d\n", __func__, type);

	switch (type) {
	case cp_wdt_type_wdt_timer:

		cp_watchdog->data = devm_kzalloc(&pdev->dev,
			sizeof(struct wdt_timer), GFP_KERNEL);
		if (!cp_watchdog->data) {
			ret = -ENOMEM;
			goto freemem;
		}
		cp_watchdog->ops = &wdt_timer_ops;
		break;

	case cp_wdt_type_soc_timer:

		cp_watchdog->data = devm_kzalloc(&pdev->dev,
			sizeof(struct soc_timer), GFP_KERNEL);
		if (!cp_watchdog->data) {
			ret = -ENOMEM;
			goto freemem;
		}
		cp_watchdog->ops = &soc_timer_ops;

		if (of_property_read_u32(np, "timer-num",
				&cp_watchdog->timer_num)) {
			dev_err(&pdev->dev,
				"%s: no timer num defined\n", __func__);
			ret = -EINVAL;
			goto freemem;
		}
		if (of_property_read_u32(np, "match-num",
				&cp_watchdog->match_num)) {
			dev_err(&pdev->dev,
				"%s: no match num defined\n", __func__);
			ret = -EINVAL;
			goto freemem;
		}
		dev_info(&pdev->dev,
			"%s: timer-num %u, match-num %u\n", __func__,
			cp_watchdog->timer_num, cp_watchdog->match_num);
		break;

	default:
		dev_err(&pdev->dev, "%s: wrong watchdog type %u\n",
			__func__, type);
		ret = -EINVAL;
		goto freemem;
	}

	cp_watchdog->irq = platform_get_irq(pdev, 0);
	if (cp_watchdog->irq < 0) {
		dev_err(&pdev->dev, "%s: no irq defined\n", __func__);
		ret = -ENXIO;
		goto freemem;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "%s: no iomem defined\n", __func__);
		ret = -ENOMEM;
		goto freemem;
	}

#ifdef REQUEST_MEM_REGION
	if (!devm_request_mem_region(&pdev->dev, res->start,
			resource_size(res), "cp-watchdog")) {
		dev_err(&pdev->dev,
			"%s: can't request region for resource %pR\n",
			__func__, res);
		ret = -EINVAL;
		goto freemem;
	}
#endif

	cp_watchdog->base = devm_ioremap_nocache(&pdev->dev,
		res->start, resource_size(res));
	if (!cp_watchdog->base) {
		dev_err(&pdev->dev, "%s: map res %lx - %lx failed\n", __func__,
			(unsigned long)res->start, (unsigned long)res->end);
		ret = -ENOMEM;
		goto freememreg;
	}

	if (cp_watchdog->ops->init && cp_watchdog->ops->init(cp_watchdog) < 0) {
		pr_err("%s: init watchdog error\n", __func__);
		ret = -ENODEV;
		goto freeiomap;
	}

	platform_set_drvdata(pdev, cp_watchdog);

	dev_info(&pdev->dev, "%s: init watchdog success\n", __func__);

	return 0;

freeiomap:
	devm_iounmap(&pdev->dev, cp_watchdog->base);
freememreg:
#ifdef REQUEST_MEM_REGION
	devm_release_mem_region(&pdev->dev, res->start,
		resource_size(res));
#endif
freemem:
	devm_kfree(&pdev->dev, cp_watchdog->data);
	devm_kfree(&pdev->dev, cp_watchdog);
	cp_watchdog = NULL;
	return ret;
}
Esempio n. 28
0
static int __devinit mxs_auart_probe(struct platform_device *pdev)
{
	struct mxs_auart_port *s;
	u32 version;
	int ret = 0;
	struct resource *r;

	s = kzalloc(sizeof(struct mxs_auart_port), GFP_KERNEL);
	if (!s) {
		ret = -ENOMEM;
		goto out;
	}

	s->clk = clk_get(&pdev->dev, NULL);
	if (IS_ERR(s->clk)) {
		ret = PTR_ERR(s->clk);
		goto out_free;
	}

	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!r) {
		ret = -ENXIO;
		goto out_free_clk;
	}

	s->port.mapbase = r->start;
	s->port.membase = ioremap(r->start, resource_size(r));
	s->port.ops = &mxs_auart_ops;
	s->port.iotype = UPIO_MEM;
	s->port.line = pdev->id < 0 ? 0 : pdev->id;
	s->port.fifosize = 16;
	s->port.uartclk = clk_get_rate(s->clk);
	s->port.type = PORT_IMX;
	s->port.dev = s->dev = get_device(&pdev->dev);

	s->flags = 0;
	s->ctrl = 0;

	s->irq = platform_get_irq(pdev, 0);
	s->port.irq = s->irq;
	ret = request_irq(s->irq, mxs_auart_irq_handle, 0, dev_name(&pdev->dev), s);
	if (ret)
		goto out_free_clk;

	platform_set_drvdata(pdev, s);

	auart_port[pdev->id] = s;

	mxs_auart_reset(&s->port);

	ret = uart_add_one_port(&auart_driver, &s->port);
	if (ret)
		goto out_free_irq;

	version = readl(s->port.membase + AUART_VERSION);
	dev_info(&pdev->dev, "Found APPUART %d.%d.%d\n",
	       (version >> 24) & 0xff,
	       (version >> 16) & 0xff, version & 0xffff);

	return 0;

out_free_irq:
	auart_port[pdev->id] = NULL;
	free_irq(s->irq, s);
out_free_clk:
	clk_put(s->clk);
out_free:
	kfree(s);
out:
	return ret;
}
Esempio n. 29
0
static int opencores_kbd_probe(struct platform_device *pdev)
{
	struct input_dev *input;
	struct opencores_kbd *opencores_kbd;
	struct resource *res;
	int irq, i, error;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "missing board memory resource\n");
		return -EINVAL;
	}

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(&pdev->dev, "missing board IRQ resource\n");
		return -EINVAL;
	}

	opencores_kbd = kzalloc(sizeof(*opencores_kbd), GFP_KERNEL);
	input = input_allocate_device();
	if (!opencores_kbd || !input) {
		dev_err(&pdev->dev, "failed to allocate device structures\n");
		error = -ENOMEM;
		goto err_free_mem;
	}

	opencores_kbd->addr_res = res;
	res = request_mem_region(res->start, resource_size(res), pdev->name);
	if (!res) {
		dev_err(&pdev->dev, "failed to request I/O memory\n");
		error = -EBUSY;
		goto err_free_mem;
	}

	opencores_kbd->addr = ioremap(res->start, resource_size(res));
	if (!opencores_kbd->addr) {
		dev_err(&pdev->dev, "failed to remap I/O memory\n");
		error = -ENXIO;
		goto err_rel_mem;
	}

	opencores_kbd->input = input;
	opencores_kbd->irq = irq;

	input->name = pdev->name;
	input->phys = "opencores-kbd/input0";
	input->dev.parent = &pdev->dev;

	input_set_drvdata(input, opencores_kbd);

	input->id.bustype = BUS_HOST;
	input->id.vendor = 0x0001;
	input->id.product = 0x0001;
	input->id.version = 0x0100;

	input->keycode = opencores_kbd->keycodes;
	input->keycodesize = sizeof(opencores_kbd->keycodes[0]);
	input->keycodemax = ARRAY_SIZE(opencores_kbd->keycodes);

	__set_bit(EV_KEY, input->evbit);

	for (i = 0; i < ARRAY_SIZE(opencores_kbd->keycodes); i++) {
		/*
		 * OpenCores controller happens to have scancodes match
		 * our KEY_* definitions.
		 */
		opencores_kbd->keycodes[i] = i;
		__set_bit(opencores_kbd->keycodes[i], input->keybit);
	}
	__clear_bit(KEY_RESERVED, input->keybit);

	error = request_irq(irq, &opencores_kbd_isr,
			    IRQF_TRIGGER_RISING, pdev->name, opencores_kbd);
	if (error) {
		dev_err(&pdev->dev, "unable to claim irq %d\n", irq);
		goto err_unmap_mem;
	}

	error = input_register_device(input);
	if (error) {
		dev_err(&pdev->dev, "unable to register input device\n");
		goto err_free_irq;
	}

	platform_set_drvdata(pdev, opencores_kbd);

	return 0;

 err_free_irq:
	free_irq(irq, opencores_kbd);
 err_unmap_mem:
	iounmap(opencores_kbd->addr);
 err_rel_mem:
	release_mem_region(res->start, resource_size(res));
 err_free_mem:
	input_free_device(input);
	kfree(opencores_kbd);

	return error;
}
Esempio n. 30
0
/**
  @brief wpalDeviceInit provides a mechanism to initialize the DXE
         platform adaptation

  @param  deviceCB:  Implementation-specific device control block

  @see    wpalDeviceClose

  @return SUCCESS if the DXE abstraction was opened
*/
wpt_status wpalDeviceInit
(
   void * devHandle
)
{
   struct device *wcnss_device = (struct device *)devHandle;
   struct resource *wcnss_memory;
   int tx_irq;
   int rx_irq;

   if (NULL != gpEnv) {
      WPAL_TRACE(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR,
                 "%s: invoked  after subsystem initialized",
                 __func__);
      return eWLAN_PAL_STATUS_E_INVAL;
   }

   if (NULL == wcnss_device) {
      WPAL_TRACE(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR,
                 "%s: invalid device",
                 __func__);
      return eWLAN_PAL_STATUS_E_INVAL;
   }

   wcnss_memory = wcnss_wlan_get_memory_map(wcnss_device);
   if (NULL == wcnss_memory) {
      WPAL_TRACE(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR,
                 "%s: WCNSS memory map unavailable",
                 __func__);
      return eWLAN_PAL_STATUS_E_FAILURE;
   }

   tx_irq = wcnss_wlan_get_dxe_tx_irq(wcnss_device);
   if (0 > tx_irq) {
      WPAL_TRACE(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR,
                 "%s: WCNSS TX IRQ unavailable",
                 __func__);
      return eWLAN_PAL_STATUS_E_FAILURE;
   }

   rx_irq = wcnss_wlan_get_dxe_rx_irq(wcnss_device);
   if (0 > rx_irq) {
      WPAL_TRACE(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR,
                 "%s: WCNSS RX IRQ unavailable",
                 __func__);
      return eWLAN_PAL_STATUS_E_FAILURE;
   }

   gpEnv = &gEnv;
   if (NULL == gpEnv) {
      WPAL_TRACE(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR,
                 "%s: memory allocation failure",
                 __func__);
      return eWLAN_PAL_STATUS_E_NOMEM;
   }

   memset(gpEnv, 0, sizeof(*gpEnv));

   gpEnv->wcnss_memory = wcnss_memory;
   gpEnv->tx_irq = tx_irq;
   gpEnv->rx_irq = rx_irq;

   /* note the we don't invoke request_mem_region().
      the memory described by wcnss_memory encompases the entire
      register space (including BT and FM) and we do not want
      exclusive access to that memory */

   gpEnv->mmio = ioremap(wcnss_memory->start, resource_size(wcnss_memory));

   if (NULL == gpEnv->mmio) {
      WPAL_TRACE(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR,
                 "%s: memory remap failure",
                 __func__);
      goto err_ioremap;
   }

   gpEnv->tx_registered = 0;
   gpEnv->rx_registered = 0;

   /* successfully allocated environment, memory and IRQs */
   return eWLAN_PAL_STATUS_SUCCESS;

 err_ioremap:
   gpEnv = NULL;

   return eWLAN_PAL_STATUS_E_FAILURE;

}