void display_block_clock_off(struct display_driver *dispdrv) { mutex_lock(&dispdrv->pm_status.clk_lock); if (dispdrv->pm_status.clock_enabled) { pm_debug("+"); if (__display_block_clock_off(dispdrv) == 0) dispdrv->pm_status.clock_enabled = 0; pm_debug("-"); } mutex_unlock(&dispdrv->pm_status.clk_lock); }
int display_hibernation_power_off(struct display_driver *dispdrv) { int ret = 0; struct s3c_fb *sfb = dispdrv->decon_driver.sfb; disp_pm_gate_lock(dispdrv, true); mutex_lock(&dispdrv->pm_status.pm_lock); if (sfb->power_state == POWER_DOWN) { pr_info("%s, DECON are already power off state\n", __func__); goto done; } if (atomic_read(&dispdrv->pm_status.lock_count) > GATE_LOCK_CNT) { pr_info("%s, DECON does not need power-off\n", __func__); goto done; } if (get_display_line_count(dispdrv)) { pm_debug("wait until last frame is totally transferred %d:", get_display_line_count(dispdrv)); goto done; } pm_info("##### +"); sfb->power_state = POWER_HIBER_DOWN; __display_hibernation_power_off(dispdrv); disp_pm_runtime_put_sync(dispdrv); request_dynamic_hotplug(true); pm_info("##### -\n"); done: mutex_unlock(&dispdrv->pm_status.pm_lock); disp_pm_gate_lock(dispdrv, false); return ret; }
int disp_pm_runtime_enable(struct display_driver *dispdrv) { #ifdef DISP_RUNTIME_PM_DEBUG pm_debug("runtime pm for disp-driver enabled\n"); #endif pm_runtime_enable(dispdrv->display_driver); return 0; }
void display_block_clock_on(struct display_driver *dispdrv) { if (!get_display_power_status()) { pm_info("Requested a pm_runtime_get_sync, but power still off"); pm_runtime_get_sync(dispdrv->display_driver); if (!get_display_power_status()) BUG(); } mutex_lock(&dispdrv->pm_status.clk_lock); if (!dispdrv->pm_status.clock_enabled) { pm_debug("+"); __display_block_clock_on(dispdrv); dispdrv->pm_status.clock_enabled = 1; pm_debug("-"); } mutex_unlock(&dispdrv->pm_status.clk_lock); }
static void decon_clock_gating_handler(struct kthread_work *work) { struct display_driver *dispdrv = get_display_driver(); if (dispdrv->pm_status.clk_idle_count > MAX_CLK_GATING_COUNT) display_block_clock_off(dispdrv); init_gating_idle_count(dispdrv); disp_pm_gate_lock(dispdrv, false); pm_debug("display_block_clock_off -"); }
static void enable_mask(struct display_driver *dispdrv) { if(dispdrv->pm_status.trigger_masked) return; dispdrv->pm_status.trigger_masked = 1; /* MASK */ set_hw_trigger_mask(dispdrv->decon_driver.sfb, true); pm_debug("Enable mask"); }
static void disable_mask(struct display_driver *dispdrv) { if(!dispdrv->pm_status.trigger_masked) return; dispdrv->pm_status.trigger_masked = 0; /* UNMASK */ set_hw_trigger_mask(dispdrv->decon_driver.sfb, false); pm_debug("Disable mask"); }
static int __display_block_clock_off(struct display_driver *dispdrv) { if (get_display_line_count(dispdrv)) { pm_debug("wait until last frame is totally transferred %d:", get_display_line_count(dispdrv)); return -EBUSY; } /* DECON -> MIC -> DSIM */ call_pm_ops(dispdrv, decon_driver, clk_off, dispdrv); #ifdef CONFIG_DECON_MIC call_pm_ops(dispdrv, mic_driver, clk_off, dispdrv); #endif call_pm_ops(dispdrv, dsi_driver, clk_off, dispdrv); return 0; }
static int __display_block_clock_off(struct display_driver *dispdrv) { if (get_display_line_count(dispdrv)) { pm_debug("wait until last frame is totally transferred %d:", get_display_line_count(dispdrv)); return -EBUSY; } /* SMMU -> DECON -> MIC -> DSIM */ #ifdef CONFIG_ION_EXYNOS if (dispdrv->platform_status > DISP_STATUS_PM0) iovmm_deactivate(dispdrv->decon_driver.sfb->dev); #endif call_pm_ops(dispdrv, decon_driver, clk_off, dispdrv); #ifdef CONFIG_DECON_MIC call_pm_ops(dispdrv, mic_driver, clk_off, dispdrv); #endif call_pm_ops(dispdrv, dsi_driver, clk_off, dispdrv); return 0; }
/* disp_pm_te_triggered - check clock gating or not. * this function is called in the TE interrupt handler */ void disp_pm_te_triggered(struct display_driver *dispdrv) { te_count++; if (!dispdrv->pm_status.clock_gating_on) return; spin_lock(&dispdrv->pm_status.slock); if (dispdrv->platform_status > DISP_STATUS_PM0 && atomic_read(&dispdrv->pm_status.lock_count) == 0) { if (dispdrv->pm_status.clock_enabled) { if (!dispdrv->pm_status.trigger_masked) enable_mask(dispdrv); } if (dispdrv->pm_status.clock_enabled && MAX_CLK_GATING_COUNT > 0) { if (!dispdrv->pm_status.trigger_masked) { enable_mask(dispdrv); } ++dispdrv->pm_status.clk_idle_count; if (dispdrv->pm_status.clk_idle_count > MAX_CLK_GATING_COUNT) { disp_pm_gate_lock(dispdrv, true); pm_debug("display_block_clock_off +"); queue_kthread_work(&dispdrv->pm_status.control_clock_gating, &dispdrv->pm_status.control_clock_gating_work); } } else { ++dispdrv->pm_status.pwr_idle_count; if (dispdrv->pm_status.power_gating_on && dispdrv->pm_status.pwr_idle_count > MAX_PWR_GATING_COUNT) { queue_kthread_work(&dispdrv->pm_status.control_power_gating, &dispdrv->pm_status.control_power_gating_work); } } } spin_unlock(&dispdrv->pm_status.slock); }