void dss_clk_enable(enum dss_clock clks) { dss_clk_enable_no_ctx(clks); if (cpu_is_omap34xx() && dss_need_ctx_restore()) restore_all_ctx(); else if (cpu_is_omap44xx() && dss_need_ctx_restore()) restore_all_ctx(); }
void dss_clk_enable(enum dss_clock clks) { bool check_ctx = core.num_clks_enabled == 0; dss_clk_enable_no_ctx(clks); if (check_ctx && cpu_is_omap34xx() && dss_need_ctx_restore()) restore_all_ctx(); }
void dss_clk_enable(enum dss_clock clks) { dss_clk_enable_no_ctx(clks); if (cpu_is_omap34xx()) { unsigned num_clks = count_clk_bits(clks); if (dss.num_clks_enabled == num_clks) restore_all_ctx(); } }
void dss_clk_enable(enum dss_clock clks) { bool restore = false; if (core.num_clks_enabled == 0) restore = true; dss_clk_enable_no_ctx(clks); if (restore || (cpu_is_omap34xx() && dss_need_ctx_restore())) restore_all_ctx(); }
/* * enables mainclk (DSS clocks on OMAP4 if any device is enabled. * Returns 0 on success. */ int dss_mainclk_state_enable(void) { int r; struct bus_type *bus = dss_get_bus(); r = bus_for_each_dev(bus, NULL, NULL, dss_check_state_disabled); if (r) { r = dss_mainclk_enable(); if (!r) restore_all_ctx(); return r; } else { /* All devices are disabled/suspended */ return -EAGAIN; } }
/* * enables mainclk (DSS clocks on OMAP4 if any device is enabled. * Returns 0 on success. */ int dss_clken_restore_ctx(void) { int r; struct bus_type *bus = dss_get_bus(); DSSDBG("%s enter\n", __func__); r = bus_for_each_dev(bus, NULL, NULL, dss_check_state_disabled); if (r) { r = dsi_runtime_get(); if ((!r) && (atomic_cmpxchg(&context_state, DSS_CONTEXT_NOT_RESTORED, DSS_CONTEXT_RESTORED) == DSS_CONTEXT_NOT_RESTORED)) { restore_all_ctx(); } return r; } else { /* All devices are disabled/suspended */ return -EAGAIN; } }