Esempio n. 1
0
static int dwc3_core_init_for_resume(struct dwc3 *dwc)
{
	int ret;

	ret = reset_control_deassert(dwc->reset);
	if (ret)
		return ret;

	ret = clk_bulk_prepare(dwc->num_clks, dwc->clks);
	if (ret)
		goto assert_reset;

	ret = clk_bulk_enable(dwc->num_clks, dwc->clks);
	if (ret)
		goto unprepare_clks;

	ret = dwc3_core_init(dwc);
	if (ret)
		goto disable_clks;

	return 0;

disable_clks:
	clk_bulk_disable(dwc->num_clks, dwc->clks);
unprepare_clks:
	clk_bulk_unprepare(dwc->num_clks, dwc->clks);
assert_reset:
	reset_control_assert(dwc->reset);

	return ret;
}
Esempio n. 2
0
static void device_run(void *priv)
{
	struct rockchip_vpu_ctx *ctx = priv;
	int ret;

	ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks);
	if (ret)
		goto err_cancel_job;
	ret = pm_runtime_get_sync(ctx->dev->dev);
	if (ret < 0)
		goto err_cancel_job;

	ctx->codec_ops->run(ctx);
	return;

err_cancel_job:
	rockchip_vpu_job_finish(ctx->dev, ctx, 0, VB2_BUF_STATE_ERROR);
}
Esempio n. 3
0
static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
{
	struct rockchip_pmu *pmu = pd->pmu;
	int ret;

	mutex_lock(&pmu->mutex);

	if (rockchip_pmu_domain_is_on(pd) != power_on) {
		ret = clk_bulk_enable(pd->num_clks, pd->clks);
		if (ret < 0) {
			dev_err(pmu->dev, "failed to enable clocks\n");
			mutex_unlock(&pmu->mutex);
			return ret;
		}

		if (!power_on) {
			rockchip_pmu_save_qos(pd);

			/* if powering down, idle request to NIU first */
			rockchip_pmu_set_idle_request(pd, true);
		}

		rockchip_do_pmu_set_power_domain(pd, power_on);

		if (power_on) {
			/* if powering up, leave idle mode */
			rockchip_pmu_set_idle_request(pd, false);

			rockchip_pmu_restore_qos(pd);
		}

		clk_bulk_disable(pd->num_clks, pd->clks);
	}

	mutex_unlock(&pmu->mutex);
	return 0;
}
Esempio n. 4
0
static int dwc3_probe(struct platform_device *pdev)
{
	struct device		*dev = &pdev->dev;
	struct resource		*res, dwc_res;
	struct dwc3		*dwc;
	int			ret;
	u32			mdwidth;
	void __iomem		*regs;

	dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
	if (!dwc)
		return -ENOMEM;

	dwc->clks = devm_kmemdup(dev, dwc3_core_clks, sizeof(dwc3_core_clks),
				 GFP_KERNEL);
	if (!dwc->clks)
		return -ENOMEM;

	dwc->dev = dev;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(dev, "missing memory resource\n");
		return -ENODEV;
	}

	dwc->xhci_resources[0].start = res->start;
	dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
					DWC3_XHCI_REGS_END;
	dwc->xhci_resources[0].flags = res->flags;
	dwc->xhci_resources[0].name = res->name;

	/*
	 * Request memory region but exclude xHCI regs,
	 * since it will be requested by the xhci-plat driver.
	 */
	dwc_res = *res;
	dwc_res.start += DWC3_GLOBALS_REGS_START;

	regs = devm_ioremap_resource(dev, &dwc_res);
	if (IS_ERR(regs))
		return PTR_ERR(regs);

	dwc->regs	= regs;
	dwc->regs_size	= resource_size(&dwc_res);

	dwc3_get_properties(dwc);

	dwc->reset = devm_reset_control_get_optional_shared(dev, NULL);
	if (IS_ERR(dwc->reset))
		return PTR_ERR(dwc->reset);

	if (dev->of_node) {
		dwc->num_clks = ARRAY_SIZE(dwc3_core_clks);

		ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks);
		if (ret == -EPROBE_DEFER)
			return ret;
		/*
		 * Clocks are optional, but new DT platforms should support all
		 * clocks as required by the DT-binding.
		 */
		if (ret)
			dwc->num_clks = 0;
	}

	ret = reset_control_deassert(dwc->reset);
	if (ret)
		goto put_clks;

	ret = clk_bulk_prepare(dwc->num_clks, dwc->clks);
	if (ret)
		goto assert_reset;

	ret = clk_bulk_enable(dwc->num_clks, dwc->clks);
	if (ret)
		goto unprepare_clks;

	platform_set_drvdata(pdev, dwc);
	dwc3_cache_hwparams(dwc);

	spin_lock_init(&dwc->lock);

	/* Set dma coherent mask to DMA BUS data width */
	mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
	dev_dbg(dev, "Enabling %d-bit DMA addresses.\n", mdwidth);
	dma_set_coherent_mask(dev, DMA_BIT_MASK(mdwidth));

	pm_runtime_set_active(dev);
	pm_runtime_use_autosuspend(dev);
	pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
	pm_runtime_enable(dev);
	ret = pm_runtime_get_sync(dev);
	if (ret < 0)
		goto err1;

	pm_runtime_forbid(dev);

	ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
	if (ret) {
		dev_err(dwc->dev, "failed to allocate event buffers\n");
		ret = -ENOMEM;
		goto err2;
	}

	ret = dwc3_get_dr_mode(dwc);
	if (ret)
		goto err3;

	ret = dwc3_core_init(dwc);
	if (ret) {
		dev_err(dev, "failed to initialize core\n");
		goto err4;
	}

	dwc3_check_params(dwc);

	ret = dwc3_core_init_mode(dwc);
	if (ret)
		goto err5;

	dwc3_debugfs_init(dwc);
	pm_runtime_put(dev);

	return 0;

err5:
	dwc3_event_buffers_cleanup(dwc);

err4:
	dwc3_free_scratch_buffers(dwc);

err3:
	dwc3_free_event_buffers(dwc);

err2:
	pm_runtime_allow(&pdev->dev);

err1:
	pm_runtime_put_sync(&pdev->dev);
	pm_runtime_disable(&pdev->dev);

	clk_bulk_disable(dwc->num_clks, dwc->clks);
unprepare_clks:
	clk_bulk_unprepare(dwc->num_clks, dwc->clks);
assert_reset:
	reset_control_assert(dwc->reset);
put_clks:
	clk_bulk_put(dwc->num_clks, dwc->clks);

	return ret;
}
Esempio n. 5
0
static int __maybe_unused dwc3_of_simple_runtime_resume(struct device *dev)
{
	struct dwc3_of_simple	*simple = dev_get_drvdata(dev);

	return clk_bulk_enable(simple->num_clocks, simple->clks);
}