static int dwc3_core_init_for_resume(struct dwc3 *dwc) { int ret; ret = reset_control_deassert(dwc->reset); if (ret) return ret; ret = clk_bulk_prepare(dwc->num_clks, dwc->clks); if (ret) goto assert_reset; ret = clk_bulk_enable(dwc->num_clks, dwc->clks); if (ret) goto unprepare_clks; ret = dwc3_core_init(dwc); if (ret) goto disable_clks; return 0; disable_clks: clk_bulk_disable(dwc->num_clks, dwc->clks); unprepare_clks: clk_bulk_unprepare(dwc->num_clks, dwc->clks); assert_reset: reset_control_assert(dwc->reset); return ret; }
static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev) { struct dwc3_of_simple *simple = dev_get_drvdata(dev); clk_bulk_disable(simple->num_clocks, simple->clks); return 0; }
static void rockchip_vpu_job_finish(struct rockchip_vpu_dev *vpu, struct rockchip_vpu_ctx *ctx, unsigned int bytesused, enum vb2_buffer_state result) { struct vb2_v4l2_buffer *src, *dst; size_t avail_size; pm_runtime_mark_last_busy(vpu->dev); pm_runtime_put_autosuspend(vpu->dev); clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks); src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); if (WARN_ON(!src)) return; if (WARN_ON(!dst)) return; src->sequence = ctx->sequence_out++; dst->sequence = ctx->sequence_cap++; dst->field = src->field; if (src->flags & V4L2_BUF_FLAG_TIMECODE) dst->timecode = src->timecode; dst->vb2_buf.timestamp = src->vb2_buf.timestamp; dst->flags &= ~(V4L2_BUF_FLAG_TSTAMP_SRC_MASK | V4L2_BUF_FLAG_TIMECODE); dst->flags |= src->flags & (V4L2_BUF_FLAG_TSTAMP_SRC_MASK | V4L2_BUF_FLAG_TIMECODE); avail_size = vb2_plane_size(&dst->vb2_buf, 0) - ctx->vpu_dst_fmt->header_size; if (bytesused <= avail_size) { if (ctx->bounce_buf) { memcpy(vb2_plane_vaddr(&dst->vb2_buf, 0) + ctx->vpu_dst_fmt->header_size, ctx->bounce_buf, bytesused); } dst->vb2_buf.planes[0].bytesused = ctx->vpu_dst_fmt->header_size + bytesused; } else { result = VB2_BUF_STATE_ERROR; } v4l2_m2m_buf_done(src, result); v4l2_m2m_buf_done(dst, result); v4l2_m2m_job_finish(vpu->m2m_dev, ctx->fh.m2m_ctx); }
static void dwc3_core_exit(struct dwc3 *dwc) { usb_phy_shutdown(dwc->usb2_phy); usb_phy_shutdown(dwc->usb3_phy); phy_exit(dwc->usb2_generic_phy); phy_exit(dwc->usb3_generic_phy); usb_phy_set_suspend(dwc->usb2_phy, 1); usb_phy_set_suspend(dwc->usb3_phy, 1); phy_power_off(dwc->usb2_generic_phy); phy_power_off(dwc->usb3_generic_phy); clk_bulk_disable(dwc->num_clks, dwc->clks); clk_bulk_unprepare(dwc->num_clks, dwc->clks); reset_control_assert(dwc->reset); }
/** * clk_bulk_enable - ungate a set of clocks * @num_clks: the number of clk_bulk_data * @clks: the clk_bulk_data table being ungated * * clk_bulk_enable must not sleep * Returns 0 on success, -EERROR otherwise. */ int __must_check clk_bulk_enable(int num_clks, const struct clk_bulk_data *clks) { int ret; int i; for (i = 0; i < num_clks; i++) { ret = clk_enable(clks[i].clk); if (ret) { pr_err("Failed to enable clk '%s': %d\n", clks[i].id, ret); goto err; } } return 0; err: clk_bulk_disable(i, clks); return ret; }
/** * clk_bulk_enable - ungate a set of clocks * @num_clks: the number of clk_bulk_data * @clks: the clk_bulk_data table being ungated * * clk_bulk_enable must not sleep * Returns 0 on success, VMM_EERROR otherwise. */ int clk_bulk_enable(int num_clks, const struct clk_bulk_data *clks) { int ret; int i; for (i = 0; i < num_clks; i++) { ret = clk_enable(clks[i].clk); if (ret) { vmm_lerror(__func__, "Failed to enable clk '%s': %d\n", clks[i].id, ret); goto err; } } return 0; err: clk_bulk_disable(i, clks); return ret; }
static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on) { struct rockchip_pmu *pmu = pd->pmu; int ret; mutex_lock(&pmu->mutex); if (rockchip_pmu_domain_is_on(pd) != power_on) { ret = clk_bulk_enable(pd->num_clks, pd->clks); if (ret < 0) { dev_err(pmu->dev, "failed to enable clocks\n"); mutex_unlock(&pmu->mutex); return ret; } if (!power_on) { rockchip_pmu_save_qos(pd); /* if powering down, idle request to NIU first */ rockchip_pmu_set_idle_request(pd, true); } rockchip_do_pmu_set_power_domain(pd, power_on); if (power_on) { /* if powering up, leave idle mode */ rockchip_pmu_set_idle_request(pd, false); rockchip_pmu_restore_qos(pd); } clk_bulk_disable(pd->num_clks, pd->clks); } mutex_unlock(&pmu->mutex); return 0; }
static int dwc3_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *res, dwc_res; struct dwc3 *dwc; int ret; u32 mdwidth; void __iomem *regs; dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL); if (!dwc) return -ENOMEM; dwc->clks = devm_kmemdup(dev, dwc3_core_clks, sizeof(dwc3_core_clks), GFP_KERNEL); if (!dwc->clks) return -ENOMEM; dwc->dev = dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "missing memory resource\n"); return -ENODEV; } dwc->xhci_resources[0].start = res->start; dwc->xhci_resources[0].end = dwc->xhci_resources[0].start + DWC3_XHCI_REGS_END; dwc->xhci_resources[0].flags = res->flags; dwc->xhci_resources[0].name = res->name; /* * Request memory region but exclude xHCI regs, * since it will be requested by the xhci-plat driver. */ dwc_res = *res; dwc_res.start += DWC3_GLOBALS_REGS_START; regs = devm_ioremap_resource(dev, &dwc_res); if (IS_ERR(regs)) return PTR_ERR(regs); dwc->regs = regs; dwc->regs_size = resource_size(&dwc_res); dwc3_get_properties(dwc); dwc->reset = devm_reset_control_get_optional_shared(dev, NULL); if (IS_ERR(dwc->reset)) return PTR_ERR(dwc->reset); if (dev->of_node) { dwc->num_clks = ARRAY_SIZE(dwc3_core_clks); ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks); if (ret == -EPROBE_DEFER) return ret; /* * Clocks are optional, but new DT platforms should support all * clocks as required by the DT-binding. */ if (ret) dwc->num_clks = 0; } ret = reset_control_deassert(dwc->reset); if (ret) goto put_clks; ret = clk_bulk_prepare(dwc->num_clks, dwc->clks); if (ret) goto assert_reset; ret = clk_bulk_enable(dwc->num_clks, dwc->clks); if (ret) goto unprepare_clks; platform_set_drvdata(pdev, dwc); dwc3_cache_hwparams(dwc); spin_lock_init(&dwc->lock); /* Set dma coherent mask to DMA BUS data width */ mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0); dev_dbg(dev, "Enabling %d-bit DMA addresses.\n", mdwidth); dma_set_coherent_mask(dev, DMA_BIT_MASK(mdwidth)); pm_runtime_set_active(dev); pm_runtime_use_autosuspend(dev); pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY); pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) goto err1; pm_runtime_forbid(dev); ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE); if (ret) { dev_err(dwc->dev, "failed to allocate event buffers\n"); ret = -ENOMEM; goto err2; } ret = dwc3_get_dr_mode(dwc); if (ret) goto err3; ret = dwc3_core_init(dwc); if (ret) { dev_err(dev, "failed to initialize core\n"); goto err4; } dwc3_check_params(dwc); ret = dwc3_core_init_mode(dwc); if (ret) goto err5; dwc3_debugfs_init(dwc); pm_runtime_put(dev); return 0; err5: dwc3_event_buffers_cleanup(dwc); err4: dwc3_free_scratch_buffers(dwc); err3: dwc3_free_event_buffers(dwc); err2: pm_runtime_allow(&pdev->dev); err1: pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); clk_bulk_disable(dwc->num_clks, dwc->clks); unprepare_clks: clk_bulk_unprepare(dwc->num_clks, dwc->clks); assert_reset: reset_control_assert(dwc->reset); put_clks: clk_bulk_put(dwc->num_clks, dwc->clks); return ret; }