예제 #1
0
파일: pxa.c 프로젝트: ReneNyffenegger/linux
static int serial_pxa_probe(struct platform_device *dev)
{
	struct uart_pxa_port *sport;
	struct resource *mmres, *irqres;
	int ret;

	mmres = platform_get_resource(dev, IORESOURCE_MEM, 0);
	irqres = platform_get_resource(dev, IORESOURCE_IRQ, 0);
	if (!mmres || !irqres)
		return -ENODEV;

	sport = kzalloc(sizeof(struct uart_pxa_port), GFP_KERNEL);
	if (!sport)
		return -ENOMEM;

	sport->clk = clk_get(&dev->dev, NULL);
	if (IS_ERR(sport->clk)) {
		ret = PTR_ERR(sport->clk);
		goto err_free;
	}

	ret = clk_prepare(sport->clk);
	if (ret) {
		clk_put(sport->clk);
		goto err_free;
	}

	sport->port.type = PORT_PXA;
	sport->port.iotype = UPIO_MEM;
	sport->port.mapbase = mmres->start;
	sport->port.irq = irqres->start;
	sport->port.fifosize = 64;
	sport->port.ops = &serial_pxa_pops;
	sport->port.dev = &dev->dev;
	sport->port.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
	sport->port.uartclk = clk_get_rate(sport->clk);

	ret = serial_pxa_probe_dt(dev, sport);
	if (ret > 0)
		sport->port.line = dev->id;
	else if (ret < 0)
		goto err_clk;
	snprintf(sport->name, PXA_NAME_LEN - 1, "UART%d", sport->port.line + 1);

	sport->port.membase = ioremap(mmres->start, resource_size(mmres));
	if (!sport->port.membase) {
		ret = -ENOMEM;
		goto err_clk;
	}

	serial_pxa_ports[sport->port.line] = sport;

	uart_add_one_port(&serial_pxa_reg, &sport->port);
	platform_set_drvdata(dev, sport);

	return 0;

 err_clk:
	clk_unprepare(sport->clk);
	clk_put(sport->clk);
 err_free:
	kfree(sport);
	return ret;
}
예제 #2
0
static int bdisp_probe(struct platform_device *pdev)
{
	struct bdisp_dev *bdisp;
	struct resource *res;
	struct device *dev = &pdev->dev;
	int ret;

	dev_dbg(dev, "%s\n", __func__);

	bdisp = devm_kzalloc(dev, sizeof(struct bdisp_dev), GFP_KERNEL);
	if (!bdisp)
		return -ENOMEM;

	bdisp->pdev = pdev;
	bdisp->dev = dev;
	platform_set_drvdata(pdev, bdisp);

	if (dev->of_node)
		bdisp->id = of_alias_get_id(pdev->dev.of_node, BDISP_NAME);
	else
		bdisp->id = pdev->id;

	init_waitqueue_head(&bdisp->irq_queue);
	INIT_DELAYED_WORK(&bdisp->timeout_work, bdisp_irq_timeout);
	bdisp->work_queue = create_workqueue(BDISP_NAME);

	spin_lock_init(&bdisp->slock);
	mutex_init(&bdisp->lock);

	/* get resources */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	bdisp->regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(bdisp->regs)) {
		dev_err(dev, "failed to get regs\n");
		return PTR_ERR(bdisp->regs);
	}

	bdisp->clock = devm_clk_get(dev, BDISP_NAME);
	if (IS_ERR(bdisp->clock)) {
		dev_err(dev, "failed to get clock\n");
		return PTR_ERR(bdisp->clock);
	}

	ret = clk_prepare(bdisp->clock);
	if (ret < 0) {
		dev_err(dev, "clock prepare failed\n");
		bdisp->clock = ERR_PTR(-EINVAL);
		return ret;
	}

	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!res) {
		dev_err(dev, "failed to get IRQ resource\n");
		goto err_clk;
	}

	ret = devm_request_threaded_irq(dev, res->start, bdisp_irq_handler,
					bdisp_irq_thread, IRQF_ONESHOT,
					pdev->name, bdisp);
	if (ret) {
		dev_err(dev, "failed to install irq\n");
		goto err_clk;
	}

	/* v4l2 register */
	ret = v4l2_device_register(dev, &bdisp->v4l2_dev);
	if (ret) {
		dev_err(dev, "failed to register\n");
		goto err_clk;
	}

	/* Debug */
	ret = bdisp_debugfs_create(bdisp);
	if (ret) {
		dev_err(dev, "failed to create debugfs\n");
		goto err_v4l2;
	}

	/* Power management */
	pm_runtime_enable(dev);
	ret = pm_runtime_get_sync(dev);
	if (ret < 0) {
		dev_err(dev, "failed to set PM\n");
		goto err_dbg;
	}

	/* Continuous memory allocator */
	bdisp->alloc_ctx = vb2_dma_contig_init_ctx(dev);
	if (IS_ERR(bdisp->alloc_ctx)) {
		ret = PTR_ERR(bdisp->alloc_ctx);
		goto err_pm;
	}

	/* Filters */
	if (bdisp_hw_alloc_filters(bdisp->dev)) {
		dev_err(bdisp->dev, "no memory for filters\n");
		ret = -ENOMEM;
		goto err_vb2_dma;
	}

	/* Register */
	ret = bdisp_register_device(bdisp);
	if (ret) {
		dev_err(dev, "failed to register\n");
		goto err_filter;
	}

	dev_info(dev, "%s%d registered as /dev/video%d\n", BDISP_NAME,
		 bdisp->id, bdisp->vdev.num);

	pm_runtime_put(dev);

	return 0;

err_filter:
	bdisp_hw_free_filters(bdisp->dev);
err_vb2_dma:
	vb2_dma_contig_cleanup_ctx(bdisp->alloc_ctx);
err_pm:
	pm_runtime_put(dev);
err_dbg:
	bdisp_debugfs_remove(bdisp);
err_v4l2:
	v4l2_device_unregister(&bdisp->v4l2_dev);
err_clk:
	if (!IS_ERR(bdisp->clock))
		clk_unprepare(bdisp->clock);

	return ret;
}
예제 #3
0
파일: core.c 프로젝트: 19Dan01/linux
void rsnd_mod_quit(struct rsnd_mod *mod)
{
	if (mod->clk)
		clk_unprepare(mod->clk);
}
예제 #4
0
static void kbase_rk_clk_term(struct kbase_device *kbdev)
{
	struct kbase_rk *kbase_rk = kbdev->platform_context;

	clk_unprepare(kbase_rk->clk);
}
int vcap_clk_powerup(struct vcap_dev *dev, struct device *ddev,
		unsigned long rate)
{
	int ret = 0;

	dev->vcap_clk = clk_get(ddev, "core_clk");
	if (IS_ERR(dev->vcap_clk)) {
		dev->vcap_clk = NULL;
		pr_err("%s: Could not clk_get core_clk\n", __func__);
		clk_put(dev->vcap_clk);
		dev->vcap_clk = NULL;
		return -EINVAL;
	}

	clk_prepare(dev->vcap_clk);
	ret = clk_enable(dev->vcap_clk);
	if (ret) {
		pr_err("%s: Failed core clk_enable %d\n", __func__, ret);
		goto fail_vcap_clk_unprep;
	}

	rate = clk_round_rate(dev->vcap_clk, rate);
	if (rate < 0) {
		pr_err("%s: Failed core rnd_rate\n", __func__);
		goto fail_vcap_clk;
	}
	ret = clk_set_rate(dev->vcap_clk, rate);
	if (ret < 0) {
		pr_err("%s: Failed core set_rate %d\n", __func__, ret);
		goto fail_vcap_clk;
	}

	dev->vcap_npl_clk = clk_get(ddev, "vcap_npl_clk");
	if (IS_ERR(dev->vcap_npl_clk)) {
		dev->vcap_npl_clk = NULL;
		pr_err("%s: Could not clk_get npl\n", __func__);
		clk_put(dev->vcap_npl_clk);
		dev->vcap_npl_clk = NULL;
		goto fail_vcap_clk;
	}

	clk_prepare(dev->vcap_npl_clk);
	ret = clk_enable(dev->vcap_npl_clk);
	if (ret) {
		pr_err("%s:Failed npl clk_enable %d\n", __func__, ret);
		goto fail_vcap_npl_clk_unprep;
	}

	dev->vcap_p_clk = clk_get(ddev, "iface_clk");
	if (IS_ERR(dev->vcap_p_clk)) {
		dev->vcap_p_clk = NULL;
		pr_err("%s: Could not clk_get pix(AHB)\n", __func__);
		clk_put(dev->vcap_p_clk);
		dev->vcap_p_clk = NULL;
		goto fail_vcap_npl_clk;
	}

	clk_prepare(dev->vcap_p_clk);
	ret = clk_enable(dev->vcap_p_clk);
	if (ret) {
		pr_err("%s: Failed pix(AHB) clk_enable %d\n", __func__, ret);
		goto fail_vcap_p_clk_unprep;
	}
	return 0;

fail_vcap_p_clk_unprep:
	clk_unprepare(dev->vcap_p_clk);
	clk_put(dev->vcap_p_clk);
	dev->vcap_p_clk = NULL;

fail_vcap_npl_clk:
	clk_disable(dev->vcap_npl_clk);
fail_vcap_npl_clk_unprep:
	clk_unprepare(dev->vcap_npl_clk);
	clk_put(dev->vcap_npl_clk);
	dev->vcap_npl_clk = NULL;

fail_vcap_clk:
	clk_disable(dev->vcap_clk);
fail_vcap_clk_unprep:
	clk_unprepare(dev->vcap_clk);
	clk_put(dev->vcap_clk);
	dev->vcap_clk = NULL;
	return -EINVAL;
}
static int msm_iommu_probe(struct platform_device *pdev)
{
	struct iommu_pmon *pmon_info;
	struct msm_iommu_drvdata *drvdata;
	struct resource *r;
	int ret, needs_alt_core_clk, needs_alt_iface_clk;
	int global_cfg_irq, global_client_irq;
	u32 temp;

	drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
	if (!drvdata)
		return -ENOMEM;

	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iommu_base");
	if (!r)
		return -EINVAL;

	drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
	if (!drvdata->base)
		return -ENOMEM;

	drvdata->phys_base = r->start;

	if (IS_ENABLED(CONFIG_MSM_IOMMU_VBIF_CHECK)) {
		drvdata->vbif_base =
			ioremap(drvdata->phys_base - (phys_addr_t) 0x4000,
				0x1000);
		WARN_ON_ONCE(!drvdata->vbif_base);
	}

	r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
					"smmu_local_base");
	if (r) {
		drvdata->smmu_local_base =
			devm_ioremap(&pdev->dev, r->start, resource_size(r));
		if (!drvdata->smmu_local_base)
			return -ENOMEM;
	}

	drvdata->glb_base = drvdata->base;

	if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-mmu-500"))
		drvdata->model = MMU_500;

	if (of_get_property(pdev->dev.of_node, "vdd-supply", NULL)) {

		drvdata->gdsc = devm_regulator_get(&pdev->dev, "vdd");
		if (IS_ERR(drvdata->gdsc))
			return PTR_ERR(drvdata->gdsc);

		drvdata->alt_gdsc = devm_regulator_get(&pdev->dev,
							"qcom,alt-vdd");
		if (IS_ERR(drvdata->alt_gdsc))
			drvdata->alt_gdsc = NULL;
	} else {
		pr_debug("Warning: No regulator specified for IOMMU\n");
	}

	drvdata->pclk = devm_clk_get(&pdev->dev, "iface_clk");
	if (IS_ERR(drvdata->pclk)) {
		ret = PTR_ERR(drvdata->pclk);
		drvdata->pclk = NULL;
		goto fail;
	}

	ret = clk_prepare(drvdata->pclk);
	if (ret)
		return ret;

	drvdata->clk = devm_clk_get(&pdev->dev, "core_clk");
	if (IS_ERR(drvdata->clk)) {
		ret = PTR_ERR(drvdata->clk);
		drvdata->clk = NULL;
		goto fail;
	}

	ret = clk_prepare(drvdata->clk);
	if (ret)
		goto fail;

	needs_alt_core_clk = of_property_read_bool(pdev->dev.of_node,
						   "qcom,needs-alt-core-clk");
	if (needs_alt_core_clk) {
		drvdata->aclk = devm_clk_get(&pdev->dev, "alt_core_clk");
		if (IS_ERR(drvdata->aclk)) {
			ret =  PTR_ERR(drvdata->aclk);
			drvdata->aclk = NULL;
			goto fail;
		}

		ret =  clk_prepare(drvdata->aclk);
		if (ret)
			goto fail;
	}

	needs_alt_iface_clk = of_property_read_bool(pdev->dev.of_node,
						   "qcom,needs-alt-iface-clk");
	if (needs_alt_iface_clk) {
		drvdata->aiclk = devm_clk_get(&pdev->dev, "alt_iface_clk");
		if (IS_ERR(drvdata->aiclk)) {
			ret = PTR_ERR(drvdata->aiclk);
			drvdata->aiclk = NULL;
			goto fail;
		}

		ret =  clk_prepare(drvdata->aiclk);
		if (ret)
			goto fail;
	}

	if (!of_property_read_u32(pdev->dev.of_node,
				"qcom,cb-base-offset",
				&temp))
		drvdata->cb_base = drvdata->base + temp;
	else
		drvdata->cb_base = drvdata->base + 0x8000;

	if (clk_get_rate(drvdata->clk) == 0) {
		ret = clk_round_rate(drvdata->clk, 1000);
		clk_set_rate(drvdata->clk, ret);
	}

	if (drvdata->aclk && clk_get_rate(drvdata->aclk) == 0) {
		ret = clk_round_rate(drvdata->aclk, 1000);
		clk_set_rate(drvdata->aclk, ret);
	}

	if (drvdata->aiclk && clk_get_rate(drvdata->aiclk) == 0) {
		ret = clk_round_rate(drvdata->aiclk, 1000);
		clk_set_rate(drvdata->aiclk, ret);
	}

	ret = msm_iommu_parse_dt(pdev, drvdata);
	if (ret)
		return ret;

	dev_info(&pdev->dev,
		"device %s (model: %d) mapped at %p, with %d ctx banks\n",
		drvdata->name, drvdata->model, drvdata->base, drvdata->ncb);

	platform_set_drvdata(pdev, drvdata);

	pmon_info = msm_iommu_pm_alloc(&pdev->dev);
	if (pmon_info != NULL) {
		ret = msm_iommu_pmon_parse_dt(pdev, pmon_info);
		if (ret) {
			msm_iommu_pm_free(&pdev->dev);
			pr_info("%s: pmon not available.\n", drvdata->name);
		} else {
			pmon_info->iommu.base = drvdata->base;
			pmon_info->iommu.ops = msm_get_iommu_access_ops();
			pmon_info->iommu.hw_ops = iommu_pm_get_hw_ops_v1();
			pmon_info->iommu.iommu_name = drvdata->name;
			ret = msm_iommu_pm_iommu_register(pmon_info);
			if (ret) {
				pr_err("%s iommu register fail\n",
								drvdata->name);
				msm_iommu_pm_free(&pdev->dev);
			} else {
				pr_debug("%s iommu registered for pmon\n",
						pmon_info->iommu.iommu_name);
			}
		}
	}

	global_cfg_irq =
		platform_get_irq_byname(pdev, "global_cfg_NS_irq");
	if (global_cfg_irq > 0) {
		ret = devm_request_threaded_irq(&pdev->dev, global_cfg_irq,
				NULL,
				msm_iommu_global_fault_handler,
				IRQF_ONESHOT | IRQF_SHARED |
				IRQF_TRIGGER_RISING,
				"msm_iommu_global_cfg_irq", pdev);
		if (ret < 0)
			pr_err("Request Global CFG IRQ %d failed with ret=%d\n",
					global_cfg_irq, ret);
	}

	global_client_irq =
		platform_get_irq_byname(pdev, "global_client_NS_irq");
	if (global_client_irq > 0) {
		ret = devm_request_threaded_irq(&pdev->dev, global_client_irq,
				NULL,
				msm_iommu_global_fault_handler,
				IRQF_ONESHOT | IRQF_SHARED |
				IRQF_TRIGGER_RISING,
				"msm_iommu_global_client_irq", pdev);
		if (ret < 0)
			pr_err("Request Global Client IRQ %d failed with ret=%d\n",
					global_client_irq, ret);
	}

	ret = of_platform_populate(pdev->dev.of_node, msm_iommu_ctx_match_table,
				   NULL, &pdev->dev);
fail:
	if (ret) {
		clk_unprepare(drvdata->clk);
		clk_unprepare(drvdata->pclk);
		clk_unprepare(drvdata->aclk);
		clk_unprepare(drvdata->aiclk);
		pr_err("Failed to create iommu context device\n");
	}

	return ret;
}
예제 #7
0
static int nop_usb_xceiv_probe(struct platform_device *pdev)
{
    struct device *dev = &pdev->dev;
    struct nop_usb_xceiv_platform_data *pdata = pdev->dev.platform_data;
    struct nop_usb_xceiv	*nop;
    enum usb_phy_type	type = USB_PHY_TYPE_USB2;
    int err;
    u32 clk_rate = 0;
    bool needs_vcc = false;
    bool needs_reset = false;

    nop = devm_kzalloc(&pdev->dev, sizeof(*nop), GFP_KERNEL);
    if (!nop)
        return -ENOMEM;

    nop->phy.otg = devm_kzalloc(&pdev->dev, sizeof(*nop->phy.otg),
                                GFP_KERNEL);
    if (!nop->phy.otg)
        return -ENOMEM;

    if (dev->of_node) {
        struct device_node *node = dev->of_node;

        if (of_property_read_u32(node, "clock-frequency", &clk_rate))
            clk_rate = 0;

        needs_vcc = of_property_read_bool(node, "vcc-supply");
        needs_reset = of_property_read_bool(node, "reset-supply");

    } else if (pdata) {
        type = pdata->type;
        clk_rate = pdata->clk_rate;
        needs_vcc = pdata->needs_vcc;
        needs_reset = pdata->needs_reset;
    }

    nop->clk = devm_clk_get(&pdev->dev, "main_clk");
    if (IS_ERR(nop->clk)) {
        dev_dbg(&pdev->dev, "Can't get phy clock: %ld\n",
                PTR_ERR(nop->clk));
    }

    if (!IS_ERR(nop->clk) && clk_rate) {
        err = clk_set_rate(nop->clk, clk_rate);
        if (err) {
            dev_err(&pdev->dev, "Error setting clock rate\n");
            return err;
        }
    }

    if (!IS_ERR(nop->clk)) {
        err = clk_prepare(nop->clk);
        if (err) {
            dev_err(&pdev->dev, "Error preparing clock\n");
            return err;
        }
    }

    nop->vcc = devm_regulator_get(&pdev->dev, "vcc");
    if (IS_ERR(nop->vcc)) {
        dev_dbg(&pdev->dev, "Error getting vcc regulator: %ld\n",
                PTR_ERR(nop->vcc));
        if (needs_vcc)
            return -EPROBE_DEFER;
    }

    nop->reset = devm_regulator_get(&pdev->dev, "reset");
    if (IS_ERR(nop->reset)) {
        dev_dbg(&pdev->dev, "Error getting reset regulator: %ld\n",
                PTR_ERR(nop->reset));
        if (needs_reset)
            return -EPROBE_DEFER;
    }

    nop->dev		= &pdev->dev;
    nop->phy.dev		= nop->dev;
    nop->phy.label		= "nop-xceiv";
    nop->phy.set_suspend	= nop_set_suspend;
    nop->phy.init		= nop_init;
    nop->phy.shutdown	= nop_shutdown;
    nop->phy.state		= OTG_STATE_UNDEFINED;
    nop->phy.type		= type;

    nop->phy.otg->phy		= &nop->phy;
    nop->phy.otg->set_host		= nop_set_host;
    nop->phy.otg->set_peripheral	= nop_set_peripheral;

    err = usb_add_phy_dev(&nop->phy);
    if (err) {
        dev_err(&pdev->dev, "can't register transceiver, err: %d\n",
                err);
        goto err_add;
    }

    platform_set_drvdata(pdev, nop);

    return 0;

err_add:
    if (!IS_ERR(nop->clk))
        clk_unprepare(nop->clk);
    return err;
}
static int serial_pxa_probe(struct platform_device *dev)
{
	struct uart_pxa_port *sport;
	struct resource *mmres, *irqres, *dmares;
	int ret;
	struct uart_pxa_dma *pxa_dma;

	mmres = platform_get_resource(dev, IORESOURCE_MEM, 0);
	irqres = platform_get_resource(dev, IORESOURCE_IRQ, 0);
	if (!mmres || !irqres)
		return -ENODEV;

	sport = kzalloc(sizeof(struct uart_pxa_port), GFP_KERNEL);
	if (!sport)
		return -ENOMEM;

	sport->clk = clk_get(&dev->dev, NULL);
	if (IS_ERR(sport->clk)) {
		ret = PTR_ERR(sport->clk);
		goto err_free;
	}

	ret = clk_prepare(sport->clk);
	if (ret) {
		clk_put(sport->clk);
		goto err_free;
	}

	sport->port.type = PORT_PXA;
	sport->port.iotype = UPIO_MEM;
	sport->port.mapbase = mmres->start;
	sport->port.irq = irqres->start;
	sport->port.fifosize = 64;
	sport->port.ops = &serial_pxa_pops;
	sport->port.dev = &dev->dev;
	sport->port.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
	sport->port.uartclk = clk_get_rate(sport->clk);

	pxa_dma = &sport->uart_dma;
	pxa_dma->drcmr_rx = 0;
	pxa_dma->drcmr_tx = 0;
	pxa_dma->txdma_chan = NULL;
	pxa_dma->rxdma_chan = NULL;
	pxa_dma->txdma_addr = NULL;
	pxa_dma->rxdma_addr = NULL;
	sport->dma_enable = 0;

	sport->edge_wakeup_gpio = -1;

	ret = serial_pxa_probe_dt(dev, sport);
	if (ret > 0)
		sport->port.line = dev->id;
	else if (ret < 0)
		goto err_clk;
	snprintf(sport->name, PXA_NAME_LEN - 1, "UART%d", sport->port.line + 1);

	if (ret > 0 && uart_dma) {
		/* Get Rx DMA mapping value */
		dmares = platform_get_resource(dev, IORESOURCE_DMA, 0);
		if (dmares) {
			pxa_dma->drcmr_rx = dmares->start;

			/* Get Tx DMA mapping value */
			dmares = platform_get_resource(dev, IORESOURCE_DMA, 1);
			if (dmares) {
				pxa_dma->drcmr_tx = dmares->start;
				sport->dma_enable = 1;
			}
		}
	}

	sport->qos_idle[PXA_UART_TX].name = kasprintf(GFP_KERNEL,
					    "%s%s", "tx", sport->name);
	if (!sport->qos_idle[PXA_UART_TX].name) {
		ret = -ENOMEM;
		goto err_clk;
	}

	sport->qos_idle[PXA_UART_RX].name = kasprintf(GFP_KERNEL,
					    "%s%s", "rx", sport->name);
	if (!sport->qos_idle[PXA_UART_RX].name) {
		ret = -ENOMEM;
		kfree(sport->qos_idle[PXA_UART_TX].name);
		goto err_clk;
	}

	pm_qos_add_request(&sport->qos_idle[PXA_UART_TX], PM_QOS_CPUIDLE_BLOCK,
			   PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
	pm_qos_add_request(&sport->qos_idle[PXA_UART_RX], PM_QOS_CPUIDLE_BLOCK,
			   PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
	if (sport->edge_wakeup_gpio >= 0) {
		ret = request_mfp_edge_wakeup(sport->edge_wakeup_gpio,
					      uart_edge_wakeup_handler,
					      sport, &dev->dev);
		if (ret) {
			dev_err(&dev->dev, "failed to request edge wakeup.\n");
			goto err_qos;
		}
	}

	sport->port.membase = ioremap(mmres->start, resource_size(mmres));
	if (!sport->port.membase) {
		ret = -ENOMEM;
		goto err_map;
	}

	INIT_WORK(&sport->uart_tx_lpm_work, uart_tx_lpm_handler);

	init_timer(&sport->pxa_timer);
	sport->pxa_timer.function = pxa_timer_handler;
	sport->pxa_timer.data = (long)sport;
	serial_pxa_ports[sport->port.line] = sport;

	uart_add_one_port(&serial_pxa_reg, &sport->port);
	platform_set_drvdata(dev, sport);

	return 0;

 err_map:
	if (sport->edge_wakeup_gpio >= 0)
		remove_mfp_edge_wakeup(sport->edge_wakeup_gpio);
 err_qos:
	kfree(sport->qos_idle[PXA_UART_TX].name);
	kfree(sport->qos_idle[PXA_UART_RX].name);
	pm_qos_remove_request(&sport->qos_idle[PXA_UART_RX]);
	pm_qos_remove_request(&sport->qos_idle[PXA_UART_TX]);
 err_clk:
	clk_unprepare(sport->clk);
	clk_put(sport->clk);
 err_free:
	kfree(sport);
	return ret;
}
예제 #9
0
파일: sh_tmu.c 프로젝트: 168519/linux
static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
{
	unsigned int i;
	int ret;

	tmu->pdev = pdev;

	raw_spin_lock_init(&tmu->lock);

	if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
		ret = sh_tmu_parse_dt(tmu);
		if (ret < 0)
			return ret;
	} else if (pdev->dev.platform_data) {
		const struct platform_device_id *id = pdev->id_entry;
		struct sh_timer_config *cfg = pdev->dev.platform_data;

		tmu->model = id->driver_data;
		tmu->num_channels = hweight8(cfg->channels_mask);
	} else {
		dev_err(&tmu->pdev->dev, "missing platform data\n");
		return -ENXIO;
	}

	/* Get hold of clock. */
	tmu->clk = clk_get(&tmu->pdev->dev, "fck");
	if (IS_ERR(tmu->clk)) {
		dev_err(&tmu->pdev->dev, "cannot get clock\n");
		return PTR_ERR(tmu->clk);
	}

	ret = clk_prepare(tmu->clk);
	if (ret < 0)
		goto err_clk_put;

	/* Map the memory resource. */
	ret = sh_tmu_map_memory(tmu);
	if (ret < 0) {
		dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
		goto err_clk_unprepare;
	}

	/* Allocate and setup the channels. */
	tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels,
				GFP_KERNEL);
	if (tmu->channels == NULL) {
		ret = -ENOMEM;
		goto err_unmap;
	}

	/*
	 * Use the first channel as a clock event device and the second channel
	 * as a clock source.
	 */
	for (i = 0; i < tmu->num_channels; ++i) {
		ret = sh_tmu_channel_setup(&tmu->channels[i], i,
					   i == 0, i == 1, tmu);
		if (ret < 0)
			goto err_unmap;
	}

	platform_set_drvdata(pdev, tmu);

	return 0;

err_unmap:
	kfree(tmu->channels);
	iounmap(tmu->mapbase);
err_clk_unprepare:
	clk_unprepare(tmu->clk);
err_clk_put:
	clk_put(tmu->clk);
	return ret;
}
예제 #10
0
static void da8xx_usb0_clk48_unprepare(struct clk_hw *hw)
{
	struct da8xx_usb0_clk48 *usb0 = to_da8xx_usb0_clk48(hw);

	clk_unprepare(usb0->fck);
}
예제 #11
0
/**
 * cdns_uart_probe - Platform driver probe
 * @pdev: Pointer to the platform device structure
 *
 * Return: 0 on success, negative errno otherwise
 */
static int cdns_uart_probe(struct platform_device *pdev)
{
	int rc, id, irq;
	struct uart_port *port;
	struct resource *res;
	struct cdns_uart *cdns_uart_data;
	const struct of_device_id *match;

	cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data),
			GFP_KERNEL);
	if (!cdns_uart_data)
		return -ENOMEM;

	match = of_match_node(cdns_uart_of_match, pdev->dev.of_node);
	if (match && match->data) {
		const struct cdns_platform_data *data = match->data;

		cdns_uart_data->quirks = data->quirks;
	}

	cdns_uart_data->pclk = devm_clk_get(&pdev->dev, "pclk");
	if (IS_ERR(cdns_uart_data->pclk)) {
		cdns_uart_data->pclk = devm_clk_get(&pdev->dev, "aper_clk");
		if (!IS_ERR(cdns_uart_data->pclk))
			dev_err(&pdev->dev, "clock name 'aper_clk' is deprecated.\n");
	}
	if (IS_ERR(cdns_uart_data->pclk)) {
		dev_err(&pdev->dev, "pclk clock not found.\n");
		return PTR_ERR(cdns_uart_data->pclk);
	}

	cdns_uart_data->uartclk = devm_clk_get(&pdev->dev, "uart_clk");
	if (IS_ERR(cdns_uart_data->uartclk)) {
		cdns_uart_data->uartclk = devm_clk_get(&pdev->dev, "ref_clk");
		if (!IS_ERR(cdns_uart_data->uartclk))
			dev_err(&pdev->dev, "clock name 'ref_clk' is deprecated.\n");
	}
	if (IS_ERR(cdns_uart_data->uartclk)) {
		dev_err(&pdev->dev, "uart_clk clock not found.\n");
		return PTR_ERR(cdns_uart_data->uartclk);
	}

	rc = clk_prepare(cdns_uart_data->pclk);
	if (rc) {
		dev_err(&pdev->dev, "Unable to enable pclk clock.\n");
		return rc;
	}
	rc = clk_prepare(cdns_uart_data->uartclk);
	if (rc) {
		dev_err(&pdev->dev, "Unable to enable device clock.\n");
		goto err_out_clk_dis_pclk;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		rc = -ENODEV;
		goto err_out_clk_disable;
	}

	irq = platform_get_irq(pdev, 0);
	if (irq <= 0) {
		rc = -ENXIO;
		goto err_out_clk_disable;
	}

#ifdef CONFIG_COMMON_CLK
	cdns_uart_data->clk_rate_change_nb.notifier_call =
			cdns_uart_clk_notifier_cb;
	if (clk_notifier_register(cdns_uart_data->uartclk,
				&cdns_uart_data->clk_rate_change_nb))
		dev_warn(&pdev->dev, "Unable to register clock notifier.\n");
#endif
	/* Look for a serialN alias */
	id = of_alias_get_id(pdev->dev.of_node, "serial");
	if (id < 0)
		id = 0;

	/* Initialize the port structure */
	port = cdns_uart_get_port(id);

	if (!port) {
		dev_err(&pdev->dev, "Cannot get uart_port structure\n");
		rc = -ENODEV;
		goto err_out_notif_unreg;
	} else {
		/* Register the port.
		 * This function also registers this device with the tty layer
		 * and triggers invocation of the config_port() entry point.
		 */
		port->mapbase = res->start;
		port->irq = irq;
		port->dev = &pdev->dev;
		port->uartclk = clk_get_rate(cdns_uart_data->uartclk);
		port->private_data = cdns_uart_data;
		cdns_uart_data->port = port;
		platform_set_drvdata(pdev, port);
		rc = uart_add_one_port(&cdns_uart_uart_driver, port);
		if (rc) {
			dev_err(&pdev->dev,
				"uart_add_one_port() failed; err=%i\n", rc);
			goto err_out_notif_unreg;
		}
		return 0;
	}

err_out_notif_unreg:
#ifdef CONFIG_COMMON_CLK
	clk_notifier_unregister(cdns_uart_data->uartclk,
			&cdns_uart_data->clk_rate_change_nb);
#endif
err_out_clk_disable:
	clk_unprepare(cdns_uart_data->uartclk);
err_out_clk_dis_pclk:
	clk_unprepare(cdns_uart_data->pclk);

	return rc;
}
int md_cd_power_off(struct ccci_modem *md, unsigned int timeout)
{
    int ret = 0;
    unsigned int reg_value;
#ifdef FEATURE_RF_CLK_BUF    
    mutex_lock(&clk_buf_ctrl_lock); 
#endif
    // power off MD_INFRA and MODEM_TOP
    switch(md->index)
    {
        case MD_SYS1:
#if defined(CONFIG_MTK_LEGACY)
        ret = md_power_off(SYS_MD1, timeout);
#else
        clk_disable(clk_scp_sys_md1_main);
        #ifdef FEATURE_RF_CLK_BUF
        mutex_unlock(&clk_buf_ctrl_lock);
        #endif
        clk_unprepare(clk_scp_sys_md1_main); // cannot be called in mutex context
        #ifdef FEATURE_RF_CLK_BUF
        mutex_lock(&clk_buf_ctrl_lock);
        #endif
#endif
        kicker_pbm_by_md(MD1,false);
        CCCI_INF_MSG(md->index, TAG, "Call end kicker_pbm_by_md(0,false)\n"); 
        break;
    }
#ifdef FEATURE_RF_CLK_BUF
    // config RFICx as AP SPM control
    CCCI_INF_MSG(md->index, TAG, "clock buffer, AP SPM control mode\n"); 
    mt_set_gpio_mode(GPIO_RFIC0_BSI_CK,  GPIO_MODE_04); 
    mt_set_gpio_mode(GPIO_RFIC0_BSI_D0,  GPIO_MODE_04);
    mt_set_gpio_mode(GPIO_RFIC0_BSI_D1,  GPIO_MODE_04);
    mt_set_gpio_mode(GPIO_RFIC0_BSI_D2,  GPIO_MODE_04);
    mt_set_gpio_mode(GPIO_RFIC0_BSI_CS,  GPIO_MODE_04);
	mutex_unlock(&clk_buf_ctrl_lock);
#endif

#ifdef FEATURE_VLTE_SUPPORT
    // Turn off VLTE
    //if(!(mt6325_upmu_get_swcid()==PMIC6325_E1_CID_CODE ||
    //     mt6325_upmu_get_swcid()==PMIC6325_E2_CID_CODE))
    {    
    /*
        *[Notes] move into md cmos flow, for hardwareissue, so disable on denlai.
        * bring up need confirm with MD DE & SPM 
        */
    //reg_value = ccci_read32(infra_ao_base,0x338); 
    //reg_value &= ~(0x40); //bit[6] =>1'b0
    //reg_value |= 0x40;//bit[6] =>1'b1
    //ccci_write32(infra_ao_base,0x338,reg_value);
    //CCCI_INF_MSG(md->index, CORE, "md_cd_power_off: set SRCLKEN infra_misc(0x1000_0338)=0x%x, bit[6]=0x%x\n",ccci_read32(infra_ao_base,0x338),(ccci_read32(infra_ao_base,0x338)&0x40));
    
    CCCI_INF_MSG(md->index, CORE, "md_cd_power_off:set VLTE on,bit0=0\n");
    pmic_config_interface(0x04D6, 0x0, 0x1, 0); //bit[0] =>1'b0
    }
    mt_set_gpio_out(GPIO_LTE_VSRAM_EXT_POWER_EN_PIN,0);
    CCCI_INF_MSG(md->index, CORE, "md_cd_power_off:mt_set_gpio_out(GPIO_LTE_VSRAM_EXT_POWER_EN_PIN,0)\n");
#endif
    return ret;
}
예제 #13
0
static int dc_i2c_probe(struct platform_device *pdev)
{
	struct device_node *np = pdev->dev.of_node;
	struct dc_i2c *i2c;
	struct resource *r;
	int ret = 0, irq;

	i2c = devm_kzalloc(&pdev->dev, sizeof(struct dc_i2c), GFP_KERNEL);
	if (!i2c)
		return -ENOMEM;

	if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
				 &i2c->frequency))
		i2c->frequency = DEFAULT_FREQ;

	i2c->dev = &pdev->dev;
	platform_set_drvdata(pdev, i2c);

	spin_lock_init(&i2c->lock);
	init_completion(&i2c->done);

	i2c->clk = devm_clk_get(&pdev->dev, NULL);
	if (IS_ERR(i2c->clk))
		return PTR_ERR(i2c->clk);

	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	i2c->regs = devm_ioremap_resource(&pdev->dev, r);
	if (IS_ERR(i2c->regs))
		return PTR_ERR(i2c->regs);

	irq = platform_get_irq(pdev, 0);
	if (irq < 0)
		return irq;

	ret = devm_request_irq(&pdev->dev, irq, dc_i2c_irq, 0,
			       dev_name(&pdev->dev), i2c);
	if (ret < 0)
		return ret;

	strlcpy(i2c->adap.name, "Conexant Digicolor I2C adapter",
		sizeof(i2c->adap.name));
	i2c->adap.owner = THIS_MODULE;
	i2c->adap.algo = &dc_i2c_algorithm;
	i2c->adap.dev.parent = &pdev->dev;
	i2c->adap.dev.of_node = np;
	i2c->adap.algo_data = i2c;

	ret = dc_i2c_init_hw(i2c);
	if (ret)
		return ret;

	ret = clk_prepare_enable(i2c->clk);
	if (ret < 0)
		return ret;

	ret = i2c_add_adapter(&i2c->adap);
	if (ret < 0) {
		clk_unprepare(i2c->clk);
		return ret;
	}

	return 0;
}
static int spear_kbd_probe(struct platform_device *pdev)
{
	struct kbd_platform_data *pdata = dev_get_platdata(&pdev->dev);
	const struct matrix_keymap_data *keymap = pdata ? pdata->keymap : NULL;
	struct spear_kbd *kbd;
	struct input_dev *input_dev;
	struct resource *res;
	int irq;
	int error;

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(&pdev->dev, "not able to get irq for the device\n");
		return irq;
	}

	kbd = devm_kzalloc(&pdev->dev, sizeof(*kbd), GFP_KERNEL);
	if (!kbd) {
		dev_err(&pdev->dev, "not enough memory for driver data\n");
		return -ENOMEM;
	}

	input_dev = devm_input_allocate_device(&pdev->dev);
	if (!input_dev) {
		dev_err(&pdev->dev, "unable to allocate input device\n");
		return -ENOMEM;
	}

	kbd->input = input_dev;
	kbd->irq = irq;

	if (!pdata) {
		error = spear_kbd_parse_dt(pdev, kbd);
		if (error)
			return error;
	} else {
		kbd->mode = pdata->mode;
		kbd->rep = pdata->rep;
		kbd->suspended_rate = pdata->suspended_rate;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	kbd->io_base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(kbd->io_base))
		return PTR_ERR(kbd->io_base);

	kbd->clk = devm_clk_get(&pdev->dev, NULL);
	if (IS_ERR(kbd->clk))
		return PTR_ERR(kbd->clk);

	input_dev->name = "Spear Keyboard";
	input_dev->phys = "keyboard/input0";
	input_dev->id.bustype = BUS_HOST;
	input_dev->id.vendor = 0x0001;
	input_dev->id.product = 0x0001;
	input_dev->id.version = 0x0100;
	input_dev->open = spear_kbd_open;
	input_dev->close = spear_kbd_close;

	error = matrix_keypad_build_keymap(keymap, NULL, NUM_ROWS, NUM_COLS,
					   kbd->keycodes, input_dev);
	if (error) {
		dev_err(&pdev->dev, "Failed to build keymap\n");
		return error;
	}

	if (kbd->rep)
		__set_bit(EV_REP, input_dev->evbit);
	input_set_capability(input_dev, EV_MSC, MSC_SCAN);

	input_set_drvdata(input_dev, kbd);

	error = devm_request_irq(&pdev->dev, irq, spear_kbd_interrupt, 0,
			"keyboard", kbd);
	if (error) {
		dev_err(&pdev->dev, "request_irq failed\n");
		return error;
	}

	error = clk_prepare(kbd->clk);
	if (error)
		return error;

	error = input_register_device(input_dev);
	if (error) {
		dev_err(&pdev->dev, "Unable to register keyboard device\n");
		clk_unprepare(kbd->clk);
		return error;
	}

	device_init_wakeup(&pdev->dev, 1);
	platform_set_drvdata(pdev, kbd);

	return 0;
}
예제 #15
0
void mdss_dsi_unprepare_clocks(void)
{
	clk_unprepare(dsi_esc_clk);
	clk_unprepare(dsi_pixel_clk);
	clk_unprepare(dsi_byte_clk);
}
예제 #16
0
파일: core.c 프로젝트: lfd/PreemptRT
void rsnd_mod_quit(struct rsnd_mod *mod)
{
	clk_unprepare(mod->clk);
	mod->clk = NULL;
}
예제 #17
0
/**
 * clk_bulk_unprepare - undo preparation of a set of clock sources
 * @num_clks: the number of clk_bulk_data
 * @clks: the clk_bulk_data table being unprepared
 *
 * clk_bulk_unprepare may sleep, which differentiates it from clk_bulk_disable.
 * Returns 0 on success, VMM_EERROR otherwise.
 */
void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks)
{
	while (--num_clks >= 0)
		clk_unprepare(clks[num_clks].clk);
}
예제 #18
0
static int g2d_probe(struct platform_device *pdev)
{
	struct g2d_dev *dev;
	struct video_device *vfd;
	struct resource *res;
	const struct of_device_id *of_id;
	int ret = 0;

	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
	if (!dev)
		return -ENOMEM;

	spin_lock_init(&dev->ctrl_lock);
	mutex_init(&dev->mutex);
	atomic_set(&dev->num_inst, 0);
	init_waitqueue_head(&dev->irq_queue);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);

	dev->regs = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(dev->regs))
		return PTR_ERR(dev->regs);

	dev->clk = clk_get(&pdev->dev, "sclk_fimg2d");
	if (IS_ERR(dev->clk)) {
		dev_err(&pdev->dev, "failed to get g2d clock\n");
		return -ENXIO;
	}

	ret = clk_prepare(dev->clk);
	if (ret) {
		dev_err(&pdev->dev, "failed to prepare g2d clock\n");
		goto put_clk;
	}

	dev->gate = clk_get(&pdev->dev, "fimg2d");
	if (IS_ERR(dev->gate)) {
		dev_err(&pdev->dev, "failed to get g2d clock gate\n");
		ret = -ENXIO;
		goto unprep_clk;
	}

	ret = clk_prepare(dev->gate);
	if (ret) {
		dev_err(&pdev->dev, "failed to prepare g2d clock gate\n");
		goto put_clk_gate;
	}

	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!res) {
		dev_err(&pdev->dev, "failed to find IRQ\n");
		ret = -ENXIO;
		goto unprep_clk_gate;
	}

	dev->irq = res->start;

	ret = devm_request_irq(&pdev->dev, dev->irq, g2d_isr,
						0, pdev->name, dev);
	if (ret) {
		dev_err(&pdev->dev, "failed to install IRQ\n");
		goto put_clk_gate;
	}

	dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
	if (IS_ERR(dev->alloc_ctx)) {
		ret = PTR_ERR(dev->alloc_ctx);
		goto unprep_clk_gate;
	}

	ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
	if (ret)
		goto alloc_ctx_cleanup;
	vfd = video_device_alloc();
	if (!vfd) {
		v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
		ret = -ENOMEM;
		goto unreg_v4l2_dev;
	}
	*vfd = g2d_videodev;
	vfd->lock = &dev->mutex;
	ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
	if (ret) {
		v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
		goto rel_vdev;
	}
	video_set_drvdata(vfd, dev);
	snprintf(vfd->name, sizeof(vfd->name), "%s", g2d_videodev.name);
	dev->vfd = vfd;
	v4l2_info(&dev->v4l2_dev, "device registered as /dev/video%d\n",
								vfd->num);
	platform_set_drvdata(pdev, dev);
	dev->m2m_dev = v4l2_m2m_init(&g2d_m2m_ops);
	if (IS_ERR(dev->m2m_dev)) {
		v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
		ret = PTR_ERR(dev->m2m_dev);
		goto unreg_video_dev;
	}

	def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3;

	if (!pdev->dev.of_node) {
		dev->variant = g2d_get_drv_data(pdev);
	} else {
		of_id = of_match_node(exynos_g2d_match, pdev->dev.of_node);
		if (!of_id) {
			ret = -ENODEV;
			goto unreg_video_dev;
		}
		dev->variant = (struct g2d_variant *)of_id->data;
	}

	return 0;

unreg_video_dev:
	video_unregister_device(dev->vfd);
rel_vdev:
	video_device_release(vfd);
unreg_v4l2_dev:
	v4l2_device_unregister(&dev->v4l2_dev);
alloc_ctx_cleanup:
	vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
unprep_clk_gate:
	clk_unprepare(dev->gate);
put_clk_gate:
	clk_put(dev->gate);
unprep_clk:
	clk_unprepare(dev->clk);
put_clk:
	clk_put(dev->clk);

	return ret;
}
예제 #19
0
int __init acpuclk_cortex_init(struct platform_device *pdev,
	struct acpuclk_drv_data *data)
{
	unsigned long max_cpu_khz = 0;
	int i, rc;

	acpuclk_init_data = data;
	mutex_init(&acpuclk_init_data->lock);

	bus_perf_client = msm_bus_scale_register_client(
		acpuclk_init_data->bus_scale);
	if (!bus_perf_client) {
		pr_err("Unable to register bus client\n");
		BUG();
	}

	for (i = 0; i < NUM_SRC; i++) {
		if (!acpuclk_init_data->src_clocks[i].name)
			continue;
		acpuclk_init_data->src_clocks[i].clk =
			clk_get(&pdev->dev,
				acpuclk_init_data->src_clocks[i].name);
		BUG_ON(IS_ERR(acpuclk_init_data->src_clocks[i].clk));
		/*
		 * Prepare the PLLs because we enable/disable them
		 * in atomic context during power collapse/restore.
		 */
		BUG_ON(clk_prepare(acpuclk_init_data->src_clocks[i].clk));
	}

	/* Improve boot time by ramping up CPU immediately */
	for (i = 0; acpuclk_init_data->freq_tbl[i].khz != 0 &&
			acpuclk_init_data->freq_tbl[i].use_for_scaling; i++)
		max_cpu_khz = acpuclk_init_data->freq_tbl[i].khz;

	/* Initialize regulators */
	rc = increase_vdd(acpuclk_init_data->freq_tbl[i].vdd_cpu,
		acpuclk_init_data->freq_tbl[i].vdd_mem);
	if (rc)
		goto err_vdd;

	rc = regulator_enable(acpuclk_init_data->vdd_mem);
	if (rc) {
		dev_err(&pdev->dev, "regulator_enable for mem failed\n");
		goto err_vdd;
	}

	rc = regulator_enable(acpuclk_init_data->vdd_cpu);
	if (rc) {
		dev_err(&pdev->dev, "regulator_enable for cpu failed\n");
		goto err_vdd_cpu;
	}

	acpuclk_cortex_set_rate(0, max_cpu_khz, SETRATE_INIT);

	acpuclk_register(&acpuclk_cortex_data);
	cpufreq_table_init();

	return 0;

err_vdd_cpu:
	regulator_disable(acpuclk_init_data->vdd_mem);
err_vdd:
	regulator_put(acpuclk_init_data->vdd_mem);
	regulator_put(acpuclk_init_data->vdd_cpu);

	for (i = 0; i < NUM_SRC; i++) {
		if (!acpuclk_init_data->src_clocks[i].name)
			continue;
		clk_unprepare(acpuclk_init_data->src_clocks[i].clk);
		clk_put(acpuclk_init_data->src_clocks[i].clk);
	}
	return rc;
}
예제 #20
0
void mipi_dsi_unprepare_ahb_clocks(void)
{
	clk_unprepare(dsi_m_pclk);
	clk_unprepare(dsi_s_pclk);
	clk_unprepare(amp_pclk);
}
예제 #21
0
static void exynos_adc_unprepare_clk(struct exynos_adc *info)
{
	if (info->data->needs_sclk)
		clk_unprepare(info->sclk);
	clk_unprepare(info->clk);
}
예제 #22
0
void mipi_dsi_unprepare_clocks(void)
{
	clk_unprepare(dsi_esc_clk);
	clk_unprepare(dsi_byte_div_clk);
}
int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info,
		struct clk **clk_ptr, int num_clk, int enable)
{
	int i;
	int rc = 0;
	if (enable) {
		for (i = 0; i < num_clk; i++) {
			clk_ptr[i] = clk_get(dev, clk_info[i].clk_name);
			if (IS_ERR(clk_ptr[i])) {
				pr_err("%s get failed\n", clk_info[i].clk_name);
				rc = PTR_ERR(clk_ptr[i]);
				goto cam_clk_get_err;
			}
			if (clk_info[i].clk_rate >= 0) {
				rc = clk_set_rate(clk_ptr[i],
							clk_info[i].clk_rate);
				if (rc < 0) {
					pr_err("%s set failed\n",
						   clk_info[i].clk_name);
					goto cam_clk_set_err;
				}
			}
			rc = clk_prepare(clk_ptr[i]);
			if (rc < 0) {
				pr_err("%s prepare failed\n",
					   clk_info[i].clk_name);
				goto cam_clk_prepare_err;
			}

			rc = clk_enable(clk_ptr[i]);
			if (rc < 0) {
				pr_err("%s enable failed\n",
					   clk_info[i].clk_name);
				goto cam_clk_enable_err;
			}
			if (clk_info[i].delay > 20) {
				msleep(clk_info[i].delay);
			} else if (clk_info[i].delay) {
				usleep_range(clk_info[i].delay * 1000,
					(clk_info[i].delay * 1000) + 1000);
			}
		}
	} else {
		for (i = num_clk - 1; i >= 0; i--) {
			if (clk_ptr[i] != NULL) {
				clk_disable(clk_ptr[i]);
				clk_unprepare(clk_ptr[i]);
				clk_put(clk_ptr[i]);
			}
		}
	}
	return rc;


cam_clk_enable_err:
	clk_unprepare(clk_ptr[i]);
cam_clk_prepare_err:
cam_clk_set_err:
	clk_put(clk_ptr[i]);
cam_clk_get_err:
	for (i--; i >= 0; i--) {
		if (clk_ptr[i] != NULL) {
			clk_disable(clk_ptr[i]);
			clk_unprepare(clk_ptr[i]);
			clk_put(clk_ptr[i]);
		}
	}
	return rc;
}
예제 #24
0
static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
				      struct device_node *node)
{
	const struct rockchip_domain_info *pd_info;
	struct rockchip_pm_domain *pd;
	struct device_node *qos_node;
	struct clk *clk;
	int clk_cnt;
	int i, j;
	u32 id;
	int error;

	error = of_property_read_u32(node, "reg", &id);
	if (error) {
		dev_err(pmu->dev,
			"%s: failed to retrieve domain id (reg): %d\n",
			node->name, error);
		return -EINVAL;
	}

	if (id >= pmu->info->num_domains) {
		dev_err(pmu->dev, "%s: invalid domain id %d\n",
			node->name, id);
		return -EINVAL;
	}

	pd_info = &pmu->info->domain_info[id];
	if (!pd_info) {
		dev_err(pmu->dev, "%s: undefined domain id %d\n",
			node->name, id);
		return -EINVAL;
	}

	clk_cnt = of_count_phandle_with_args(node, "clocks", "#clock-cells");
	pd = devm_kzalloc(pmu->dev,
			  sizeof(*pd) + clk_cnt * sizeof(pd->clks[0]),
			  GFP_KERNEL);
	if (!pd)
		return -ENOMEM;

	pd->info = pd_info;
	pd->pmu = pmu;

	for (i = 0; i < clk_cnt; i++) {
		clk = of_clk_get(node, i);
		if (IS_ERR(clk)) {
			error = PTR_ERR(clk);
			dev_err(pmu->dev,
				"%s: failed to get clk at index %d: %d\n",
				node->name, i, error);
			goto err_out;
		}

		error = clk_prepare(clk);
		if (error) {
			dev_err(pmu->dev,
				"%s: failed to prepare clk %pC (index %d): %d\n",
				node->name, clk, i, error);
			clk_put(clk);
			goto err_out;
		}

		pd->clks[pd->num_clks++] = clk;

		dev_dbg(pmu->dev, "added clock '%pC' to domain '%s'\n",
			clk, node->name);
	}

	pd->num_qos = of_count_phandle_with_args(node, "pm_qos",
						 NULL);

	if (pd->num_qos > 0) {
		pd->qos_regmap = devm_kcalloc(pmu->dev, pd->num_qos,
					      sizeof(*pd->qos_regmap),
					      GFP_KERNEL);
		if (!pd->qos_regmap) {
			error = -ENOMEM;
			goto err_out;
		}

		for (j = 0; j < MAX_QOS_REGS_NUM; j++) {
			pd->qos_save_regs[j] = devm_kcalloc(pmu->dev,
							    pd->num_qos,
							    sizeof(u32),
							    GFP_KERNEL);
			if (!pd->qos_save_regs[j]) {
				error = -ENOMEM;
				goto err_out;
			}
		}

		for (j = 0; j < pd->num_qos; j++) {
			qos_node = of_parse_phandle(node, "pm_qos", j);
			if (!qos_node) {
				error = -ENODEV;
				goto err_out;
			}
			pd->qos_regmap[j] = syscon_node_to_regmap(qos_node);
			if (IS_ERR(pd->qos_regmap[j])) {
				error = -ENODEV;
				of_node_put(qos_node);
				goto err_out;
			}
			of_node_put(qos_node);
		}
	}

	error = rockchip_pd_power(pd, true);
	if (error) {
		dev_err(pmu->dev,
			"failed to power on domain '%s': %d\n",
			node->name, error);
		goto err_out;
	}

	pd->genpd.name = node->name;
	pd->genpd.power_off = rockchip_pd_power_off;
	pd->genpd.power_on = rockchip_pd_power_on;
	pd->genpd.attach_dev = rockchip_pd_attach_dev;
	pd->genpd.detach_dev = rockchip_pd_detach_dev;
	pd->genpd.flags = GENPD_FLAG_PM_CLK;
	pm_genpd_init(&pd->genpd, NULL, false);

	pmu->genpd_data.domains[id] = &pd->genpd;
	return 0;

err_out:
	while (--i >= 0) {
		clk_unprepare(pd->clks[i]);
		clk_put(pd->clks[i]);
	}
	return error;
}
예제 #25
0
static int g2d_probe(struct platform_device *pdev)
{
    struct g2d_dev *dev;
    struct video_device *vfd;
    struct resource *res;
    int ret = 0;

    dev = kzalloc(sizeof(*dev), GFP_KERNEL);
    if (!dev)
        return -ENOMEM;
    spin_lock_init(&dev->ctrl_lock);
    mutex_init(&dev->mutex);
    atomic_set(&dev->num_inst, 0);
    init_waitqueue_head(&dev->irq_queue);

    res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    if (!res) {
        dev_err(&pdev->dev, "failed to find registers\n");
        ret = -ENOENT;
        goto free_dev;
    }

    dev->res_regs = request_mem_region(res->start, resource_size(res),
                                       dev_name(&pdev->dev));

    if (!dev->res_regs) {
        dev_err(&pdev->dev, "failed to obtain register region\n");
        ret = -ENOENT;
        goto free_dev;
    }

    dev->regs = ioremap(res->start, resource_size(res));
    if (!dev->regs) {
        dev_err(&pdev->dev, "failed to map registers\n");
        ret = -ENOENT;
        goto rel_res_regs;
    }

    dev->clk = clk_get(&pdev->dev, "sclk_fimg2d");
    if (IS_ERR_OR_NULL(dev->clk)) {
        dev_err(&pdev->dev, "failed to get g2d clock\n");
        ret = -ENXIO;
        goto unmap_regs;
    }

    ret = clk_prepare(dev->clk);
    if (ret) {
        dev_err(&pdev->dev, "failed to prepare g2d clock\n");
        goto put_clk;
    }

    dev->gate = clk_get(&pdev->dev, "fimg2d");
    if (IS_ERR_OR_NULL(dev->gate)) {
        dev_err(&pdev->dev, "failed to get g2d clock gate\n");
        ret = -ENXIO;
        goto unprep_clk;
    }

    ret = clk_prepare(dev->gate);
    if (ret) {
        dev_err(&pdev->dev, "failed to prepare g2d clock gate\n");
        goto put_clk_gate;
    }

    res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
    if (!res) {
        dev_err(&pdev->dev, "failed to find IRQ\n");
        ret = -ENXIO;
        goto unprep_clk_gate;
    }

    dev->irq = res->start;

    ret = request_irq(dev->irq, g2d_isr, 0, pdev->name, dev);
    if (ret) {
        dev_err(&pdev->dev, "failed to install IRQ\n");
        goto put_clk_gate;
    }

    dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
    if (IS_ERR(dev->alloc_ctx)) {
        ret = PTR_ERR(dev->alloc_ctx);
        goto rel_irq;
    }

    ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
    if (ret)
        goto alloc_ctx_cleanup;
    vfd = video_device_alloc();
    if (!vfd) {
        v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
        ret = -ENOMEM;
        goto unreg_v4l2_dev;
    }
    *vfd = g2d_videodev;
    vfd->lock = &dev->mutex;
    ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
    if (ret) {
        v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
        goto rel_vdev;
    }
    video_set_drvdata(vfd, dev);
    snprintf(vfd->name, sizeof(vfd->name), "%s", g2d_videodev.name);
    dev->vfd = vfd;
    v4l2_info(&dev->v4l2_dev, "device registered as /dev/video%d\n",
              vfd->num);
    platform_set_drvdata(pdev, dev);
    dev->m2m_dev = v4l2_m2m_init(&g2d_m2m_ops);
    if (IS_ERR(dev->m2m_dev)) {
        v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
        ret = PTR_ERR(dev->m2m_dev);
        goto unreg_video_dev;
    }

    def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3;

    return 0;

unreg_video_dev:
    video_unregister_device(dev->vfd);
rel_vdev:
    video_device_release(vfd);
unreg_v4l2_dev:
    v4l2_device_unregister(&dev->v4l2_dev);
alloc_ctx_cleanup:
    vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
rel_irq:
    free_irq(dev->irq, dev);
unprep_clk_gate:
    clk_unprepare(dev->gate);
put_clk_gate:
    clk_put(dev->gate);
unprep_clk:
    clk_unprepare(dev->clk);
put_clk:
    clk_put(dev->clk);
unmap_regs:
    iounmap(dev->regs);
rel_res_regs:
    release_resource(dev->res_regs);
free_dev:
    kfree(dev);
    return ret;
}
gceSTATUS
_SetClock(
    IN gckPLATFORM Platform,
    IN gceCORE GPU,
    IN gctBOOL Enable
    )
{
    struct imx_priv* priv = Platform->priv;
    struct clk *clk_3dcore = priv->clk_3d_core;
    struct clk *clk_3dshader = priv->clk_3d_shader;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
    struct clk *clk_3d_axi = priv->clk_3d_axi;
#endif
    struct clk *clk_2dcore = priv->clk_2d_core;
    struct clk *clk_2d_axi = priv->clk_2d_axi;
    struct clk *clk_vg_axi = priv->clk_vg_axi;


#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
    if (Enable) {
        switch (GPU) {
        case gcvCORE_MAJOR:
            clk_enable(clk_3dcore);
            if (cpu_is_mx6q())
                clk_enable(clk_3dshader);
            break;
        case gcvCORE_2D:
            clk_enable(clk_2dcore);
            clk_enable(clk_2d_axi);
            break;
        case gcvCORE_VG:
            clk_enable(clk_2dcore);
            clk_enable(clk_vg_axi);
            break;
        default:
            break;
        }
    } else {
        switch (GPU) {
        case gcvCORE_MAJOR:
            if (cpu_is_mx6q())
                clk_disable(clk_3dshader);
            clk_disable(clk_3dcore);
            break;
       case gcvCORE_2D:
            clk_disable(clk_2dcore);
            clk_disable(clk_2d_axi);
            break;
        case gcvCORE_VG:
            clk_disable(clk_2dcore);
            clk_disable(clk_vg_axi);
            break;
        default:
            break;
        }
    }
#else
    if (Enable) {
        switch (GPU) {
        case gcvCORE_MAJOR:
            clk_prepare(clk_3dcore);
            clk_enable(clk_3dcore);
            clk_prepare(clk_3dshader);
            clk_enable(clk_3dshader);
            clk_prepare(clk_3d_axi);
            clk_enable(clk_3d_axi);
            break;
        case gcvCORE_2D:
            clk_prepare(clk_2dcore);
            clk_enable(clk_2dcore);
            clk_prepare(clk_2d_axi);
            clk_enable(clk_2d_axi);
            break;
        case gcvCORE_VG:
            clk_prepare(clk_2dcore);
            clk_enable(clk_2dcore);
            clk_prepare(clk_vg_axi);
            clk_enable(clk_vg_axi);
            break;
        default:
            break;
        }
    } else {
        switch (GPU) {
        case gcvCORE_MAJOR:
            clk_disable(clk_3dshader);
            clk_unprepare(clk_3dshader);
            clk_disable(clk_3dcore);
            clk_unprepare(clk_3dcore);
            clk_disable(clk_3d_axi);
            clk_unprepare(clk_3d_axi);
            break;
       case gcvCORE_2D:
            clk_disable(clk_2dcore);
            clk_unprepare(clk_2dcore);
            clk_disable(clk_2d_axi);
            clk_unprepare(clk_2d_axi);
            break;
        case gcvCORE_VG:
            clk_disable(clk_2dcore);
            clk_unprepare(clk_2dcore);
            clk_disable(clk_vg_axi);
            clk_unprepare(clk_vg_axi);
            break;
        default:
            break;
        }
    }
#endif

    return gcvSTATUS_OK;
}
예제 #27
0
void mdss_edp_unprepare_aux_clocks(struct mdss_edp_drv_pdata *edp_drv)
{
	clk_unprepare(edp_drv->mdp_core_clk);
	clk_unprepare(edp_drv->aux_clk);
	clk_unprepare(edp_drv->ahb_clk);
}
예제 #28
0
/*****************************************************************************
* FUNCTION
*  hal_btif_clk_ctrl
* DESCRIPTION
*  control clock output enable/disable of DMA module
* PARAMETERS
* p_dma_info   [IN]        pointer to BTIF dma channel's information
* RETURNS
*  0 means success, negative means fail
*****************************************************************************/
int hal_btif_dma_clk_ctrl(P_MTK_DMA_INFO_STR p_dma_info, ENUM_CLOCK_CTRL flag)
{
/*In MTK DMA BTIF channel, there's only one global CG on AP_DMA, no sub channel's CG bit*/
/*according to Artis's comment, clock of DMA and BTIF is default off, so we assume it to be off by default*/
	int i_ret = 0;
	unsigned long irq_flag = 0;

#if MTK_BTIF_ENABLE_CLK_REF_COUNTER
	static atomic_t s_clk_ref = ATOMIC_INIT(0);
#else
	static ENUM_CLOCK_CTRL status = CLK_OUT_DISABLE;
#endif

#if defined(CONFIG_MTK_LEGACY)
	spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag);
#endif

#if MTK_BTIF_ENABLE_CLK_CTL

#if MTK_BTIF_ENABLE_CLK_REF_COUNTER

	if (CLK_OUT_ENABLE == flag) {
		if (1 == atomic_inc_return(&s_clk_ref)) {
#if defined(CONFIG_MTK_LEGACY)
			i_ret =
			    enable_clock(MTK_BTIF_APDMA_CLK_CG, DMA_USER_ID);
			if (i_ret) {
				BTIF_WARN_FUNC
				    ("enable_clock for MTK_BTIF_APDMA_CLK_CG failed, ret:%d",
				     i_ret);
			}
#else
			clk_prepare(clk_btif_apdma);
			spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag);
			clk_enable(clk_btif_apdma);
			spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag);
			BTIF_INFO_FUNC("[CCF]enable clk_btif_apdma\n");
#endif /* defined(CONFIG_MTK_LEGACY) */
		}
	} else if (CLK_OUT_DISABLE == flag) {
		if (0 == atomic_dec_return(&s_clk_ref)) {
#if defined(CONFIG_MTK_LEGACY)
			i_ret =
			    disable_clock(MTK_BTIF_APDMA_CLK_CG, DMA_USER_ID);
			if (i_ret) {
				BTIF_WARN_FUNC
				    ("disable_clock for MTK_BTIF_APDMA_CLK_CG failed, ret:%d",
				     i_ret);
			}
#else
			spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag);
			clk_disable(clk_btif_apdma);
			spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag);
			clk_unprepare(clk_btif_apdma);
			BTIF_INFO_FUNC("[CCF] clk_disable_unprepare(clk_btif_apdma) calling\n");
#endif /* defined(CONFIG_MTK_LEGACY) */

		}
	} else {
		i_ret = ERR_INVALID_PAR;
		BTIF_ERR_FUNC("invalid  clock ctrl flag (%d)\n", flag);
	}

#else

	if (status == flag) {
		i_ret = 0;
		BTIF_DBG_FUNC("dma clock already %s\n",
			      CLK_OUT_ENABLE ==
			      status ? "enabled" : "disabled");
	} else {
		if (CLK_OUT_ENABLE == flag) {
#if defined(CONFIG_MTK_LEGACY)
			i_ret =
			    enable_clock(MTK_BTIF_APDMA_CLK_CG, DMA_USER_ID);
			status = (0 == i_ret) ? flag : status;
			if (i_ret) {
				BTIF_WARN_FUNC
				    ("enable_clock for MTK_BTIF_APDMA_CLK_CG failed, ret:%d",
				     i_ret);
			}
#else
			clk_prepare(clk_btif_apdma);
			spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag);
			clk_enable(clk_btif_apdma);
			spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag);
			BTIF_INFO_FUNC("[CCF]enable clk_btif_apdma\n");
#endif /* defined(CONFIG_MTK_LEGACY) */

		} else if (CLK_OUT_DISABLE == flag) {
#if defined(CONFIG_MTK_LEGACY)
			i_ret =
			    disable_clock(MTK_BTIF_APDMA_CLK_CG, DMA_USER_ID);
			status = (0 == i_ret) ? flag : status;
			if (i_ret) {
				BTIF_WARN_FUNC
				    ("disable_clock for MTK_BTIF_APDMA_CLK_CG failed, ret:%d",
				     i_ret);
			}
#else
			spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag);
			clk_disable(clk_btif_apdma);
			spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag);
			clk_unprepare(clk_btif_apdma);
			BTIF_INFO_FUNC("[CCF] clk_disable_unprepare(clk_btif_apdma) calling\n");
#endif /* defined(CONFIG_MTK_LEGACY) */

		} else {
			i_ret = ERR_INVALID_PAR;
			BTIF_ERR_FUNC("invalid  clock ctrl flag (%d)\n", flag);
		}
	}
#endif

#else

#if MTK_BTIF_ENABLE_CLK_REF_COUNTER

#else

	status = flag;
#endif

	i_ret = 0;
#endif

#if defined(CONFIG_MTK_LEGACY)
	spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag);
#endif

#if MTK_BTIF_ENABLE_CLK_REF_COUNTER
	if (0 == i_ret) {
		BTIF_DBG_FUNC("dma clock %s\n",
			      CLK_OUT_ENABLE == flag ? "enabled" : "disabled");
	} else {
		BTIF_ERR_FUNC("%s dma clock failed, ret(%d)\n",
			      CLK_OUT_ENABLE == flag ? "enable" : "disable",
			      i_ret);
	}
#else

	if (0 == i_ret) {
		BTIF_DBG_FUNC("dma clock %s\n",
			      CLK_OUT_ENABLE == flag ? "enabled" : "disabled");
	} else {
		BTIF_ERR_FUNC("%s dma clock failed, ret(%d)\n",
			      CLK_OUT_ENABLE == flag ? "enable" : "disable",
			      i_ret);
	}
#endif
#if defined(CONFIG_MTK_LEGACY)
	BTIF_DBG_FUNC("DMA's clock is %s\n",
		      (0 == clock_is_on(MTK_BTIF_APDMA_CLK_CG)) ? "off" : "on");
#endif
	return i_ret;
}
예제 #29
0
파일: sh_tmu.c 프로젝트: 7799/linux
static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
{
	struct sh_timer_config *cfg = pdev->dev.platform_data;
	struct resource *res;
	int irq, ret;
	ret = -ENXIO;

	memset(p, 0, sizeof(*p));
	p->pdev = pdev;

	if (!cfg) {
		dev_err(&p->pdev->dev, "missing platform data\n");
		goto err0;
	}

	platform_set_drvdata(pdev, p);

	res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&p->pdev->dev, "failed to get I/O memory\n");
		goto err0;
	}

	irq = platform_get_irq(p->pdev, 0);
	if (irq < 0) {
		dev_err(&p->pdev->dev, "failed to get irq\n");
		goto err0;
	}

	/* map memory, let mapbase point to our channel */
	p->mapbase = ioremap_nocache(res->start, resource_size(res));
	if (p->mapbase == NULL) {
		dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
		goto err0;
	}

	/* setup data for setup_irq() (too early for request_irq()) */
	p->irqaction.name = dev_name(&p->pdev->dev);
	p->irqaction.handler = sh_tmu_interrupt;
	p->irqaction.dev_id = p;
	p->irqaction.irq = irq;
	p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;

	/* get hold of clock */
	p->clk = clk_get(&p->pdev->dev, "tmu_fck");
	if (IS_ERR(p->clk)) {
		dev_err(&p->pdev->dev, "cannot get clock\n");
		ret = PTR_ERR(p->clk);
		goto err1;
	}

	ret = clk_prepare(p->clk);
	if (ret < 0)
		goto err2;

	p->cs_enabled = false;
	p->enable_count = 0;

	ret = sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
			      cfg->clockevent_rating,
			      cfg->clocksource_rating);
	if (ret < 0)
		goto err3;

	return 0;

 err3:
	clk_unprepare(p->clk);
 err2:
	clk_put(p->clk);
 err1:
	iounmap(p->mapbase);
 err0:
	return ret;
}
예제 #30
0
static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
{
    struct sh_timer_config *cfg = pdev->dev.platform_data;
    struct resource *res, *res2;
    int irq, ret;
    ret = -ENXIO;

    memset(p, 0, sizeof(*p));
    p->pdev = pdev;

    if (!cfg) {
        dev_err(&p->pdev->dev, "missing platform data\n");
        goto err0;
    }

    res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
    if (!res) {
        dev_err(&p->pdev->dev, "failed to get I/O memory\n");
        goto err0;
    }

    /* optional resource for the shared timer start/stop register */
    res2 = platform_get_resource(p->pdev, IORESOURCE_MEM, 1);

    irq = platform_get_irq(p->pdev, 0);
    if (irq < 0) {
        dev_err(&p->pdev->dev, "failed to get irq\n");
        goto err0;
    }

    /* map memory, let mapbase point to our channel */
    p->mapbase = ioremap_nocache(res->start, resource_size(res));
    if (p->mapbase == NULL) {
        dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
        goto err0;
    }

    /* map second resource for CMSTR */
    p->mapbase_str = ioremap_nocache(res2 ? res2->start :
                                     res->start - cfg->channel_offset,
                                     res2 ? resource_size(res2) : 2);
    if (p->mapbase_str == NULL) {
        dev_err(&p->pdev->dev, "failed to remap I/O second memory\n");
        goto err1;
    }

    /* request irq using setup_irq() (too early for request_irq()) */
    p->irqaction.name = dev_name(&p->pdev->dev);
    p->irqaction.handler = sh_cmt_interrupt;
    p->irqaction.dev_id = p;
    p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
                         IRQF_IRQPOLL  | IRQF_NOBALANCING;

    /* get hold of clock */
    p->clk = clk_get(&p->pdev->dev, "cmt_fck");
    if (IS_ERR(p->clk)) {
        dev_err(&p->pdev->dev, "cannot get clock\n");
        ret = PTR_ERR(p->clk);
        goto err2;
    }

    ret = clk_prepare(p->clk);
    if (ret < 0)
        goto err3;

    if (res2 && (resource_size(res2) == 4)) {
        /* assume both CMSTR and CMCSR to be 32-bit */
        p->read_control = sh_cmt_read32;
        p->write_control = sh_cmt_write32;
    } else {
        p->read_control = sh_cmt_read16;
        p->write_control = sh_cmt_write16;
    }

    if (resource_size(res) == 6) {
        p->width = 16;
        p->read_count = sh_cmt_read16;
        p->write_count = sh_cmt_write16;
        p->overflow_bit = 0x80;
        p->clear_bits = ~0x80;
    } else {
        p->width = 32;
        p->read_count = sh_cmt_read32;
        p->write_count = sh_cmt_write32;
        p->overflow_bit = 0x8000;
        p->clear_bits = ~0xc000;
    }

    if (p->width == (sizeof(p->max_match_value) * 8))
        p->max_match_value = ~0;
    else
        p->max_match_value = (1 << p->width) - 1;

    p->match_value = p->max_match_value;
    raw_spin_lock_init(&p->lock);

    ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
                          cfg->clockevent_rating,
                          cfg->clocksource_rating);
    if (ret) {
        dev_err(&p->pdev->dev, "registration failed\n");
        goto err4;
    }
    p->cs_enabled = false;

    ret = setup_irq(irq, &p->irqaction);
    if (ret) {
        dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
        goto err4;
    }

    platform_set_drvdata(pdev, p);

    return 0;
err4:
    clk_unprepare(p->clk);
err3:
    clk_put(p->clk);
err2:
    iounmap(p->mapbase_str);
err1:
    iounmap(p->mapbase);
err0:
    return ret;
}