static void kgsl_power(bool on) { if (on) { internal_pwr_rail_mode(PWR_RAIL_GRP_CLK, PWR_RAIL_CTL_MANUAL); internal_pwr_rail_ctl(PWR_RAIL_GRP_CLK, 1); } else { internal_pwr_rail_ctl(PWR_RAIL_GRP_CLK, 0); internal_pwr_rail_mode(PWR_RAIL_GRP_CLK, PWR_RAIL_CTL_AUTO); } }
int kgsl_pwrctrl_pwrrail(struct kgsl_device *device, unsigned int pwrflag) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; switch (pwrflag) { case KGSL_PWRFLAGS_POWER_OFF: if (pwr->power_flags & KGSL_PWRFLAGS_POWER_ON) { internal_pwr_rail_ctl(pwr->pwr_rail, KGSL_FALSE); if (pwr->gpu_reg) regulator_disable(pwr->gpu_reg); pwr->power_flags &= ~(KGSL_PWRFLAGS_POWER_ON); pwr->power_flags |= KGSL_PWRFLAGS_POWER_OFF; } return KGSL_SUCCESS; case KGSL_PWRFLAGS_POWER_ON: if (pwr->power_flags & KGSL_PWRFLAGS_POWER_OFF) { internal_pwr_rail_mode(pwr->pwr_rail, PWR_RAIL_CTL_MANUAL); internal_pwr_rail_ctl(pwr->pwr_rail, KGSL_TRUE); if (pwr->gpu_reg) regulator_enable(pwr->gpu_reg); pwr->power_flags &= ~(KGSL_PWRFLAGS_POWER_OFF); pwr->power_flags |= KGSL_PWRFLAGS_POWER_ON; } return KGSL_SUCCESS; default: return KGSL_FAILURE; } }
int kgsl_pwrctrl_init(struct kgsl_device *device) { int i, result = 0; struct clk *clk; struct platform_device *pdev = container_of(device->parentdev, struct platform_device, dev); struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data; struct kgsl_device_pwr_data *pdata_pwr = &pdata_dev->pwr_data; const char *clk_names[KGSL_MAX_CLKS] = {pwr->src_clk_name, pdata_dev->clk.name.clk, pdata_dev->clk.name.pclk, pdata_dev->imem_clk_name.clk, pdata_dev->imem_clk_name.pclk}; /*acquire clocks */ for (i = 1; i < KGSL_MAX_CLKS; i++) { if (clk_names[i]) { clk = clk_get(&pdev->dev, clk_names[i]); if (IS_ERR(clk)) goto clk_err; pwr->grp_clks[i] = clk; } } /* Make sure we have a source clk for freq setting */ clk = clk_get(&pdev->dev, clk_names[0]); pwr->grp_clks[0] = (IS_ERR(clk)) ? pwr->grp_clks[1] : clk; /* put the AXI bus into asynchronous mode with the graphics cores */ if (pdata_pwr->set_grp_async != NULL) pdata_pwr->set_grp_async(); if (pdata_pwr->num_levels > KGSL_MAX_PWRLEVELS) { KGSL_PWR_ERR(device, "invalid power level count: %d\n", pdata_pwr->num_levels); result = -EINVAL; goto done; } pwr->num_pwrlevels = pdata_pwr->num_levels; pwr->active_pwrlevel = pdata_pwr->init_level; for (i = 0; i < pdata_pwr->num_levels; i++) { pwr->pwrlevels[i].gpu_freq = (pdata_pwr->pwrlevel[i].gpu_freq > 0) ? clk_round_rate(pwr->grp_clks[0], pdata_pwr->pwrlevel[i]. gpu_freq) : 0; pwr->pwrlevels[i].bus_freq = pdata_pwr->pwrlevel[i].bus_freq; } /* Do not set_rate for targets in sync with AXI */ if (pwr->pwrlevels[0].gpu_freq > 0) clk_set_rate(pwr->grp_clks[0], pwr-> pwrlevels[pwr->num_pwrlevels - 1].gpu_freq); pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name); if (IS_ERR(pwr->gpu_reg)) pwr->gpu_reg = NULL; if (internal_pwr_rail_mode(device->pwrctrl.pwr_rail, PWR_RAIL_CTL_MANUAL)) { KGSL_PWR_ERR(device, "internal_pwr_rail_mode failed\n"); result = -EINVAL; goto done; } pwr->power_flags = 0; pwr->nap_allowed = pdata_pwr->nap_allowed; pwr->interval_timeout = pdata_pwr->idle_timeout; pwr->ebi1_clk = clk_get(NULL, "ebi1_kgsl_clk"); if (IS_ERR(pwr->ebi1_clk)) pwr->ebi1_clk = NULL; else clk_set_rate(pwr->ebi1_clk, pwr->pwrlevels[pwr->active_pwrlevel]. bus_freq); if (pdata_dev->clk.bus_scale_table != NULL) { pwr->pcl = msm_bus_scale_register_client(pdata_dev->clk. bus_scale_table); if (!pwr->pcl) { KGSL_PWR_ERR(device, "msm_bus_scale_register_client failed: " "id %d table %p", device->id, pdata_dev->clk.bus_scale_table); result = -EINVAL; goto done; } } /*acquire interrupt */ pwr->interrupt_num = platform_get_irq_byname(pdev, pwr->irq_name); if (pwr->interrupt_num <= 0) { KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n", pwr->interrupt_num); result = -EINVAL; goto done; } register_early_suspend(&device->display_off); return result; clk_err: result = PTR_ERR(clk); KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n", clk_names[i], result); done: return result; }
static u32 res_trk_enable_pwr_rail(void) { mutex_lock(&resource_context.lock); if (!resource_context.rail_enabled) { int rc = -1; rc = internal_pwr_rail_mode(PWR_RAIL_MFC_CLK, PWR_RAIL_CTL_MANUAL); if (rc) { VCDRES_MSG_ERROR("%s(): internal_pwr_rail_mode \ failed %d\n", __func__, rc); mutex_unlock(&resource_context.lock); return false; } VCDRES_MSG_LOW("%s(): internal_pwr_rail_mode Success %d\n", __func__, rc); resource_context.pclk = clk_get(resource_context.device, "mfc_pclk"); if (IS_ERR(resource_context.pclk)) { VCDRES_MSG_ERROR("%s(): mfc_pclk get failed\n" , __func__); mutex_unlock(&resource_context.lock); return false; } resource_context.hclk = clk_get(resource_context.device, "mfc_clk"); if (IS_ERR(resource_context.hclk)) { VCDRES_MSG_ERROR("%s(): mfc_clk get failed\n" , __func__); clk_put(resource_context.pclk); mutex_unlock(&resource_context.lock); return false; } resource_context.hclk_div2 = clk_get(resource_context.device, "mfc_div2_clk"); if (IS_ERR(resource_context.pclk)) { VCDRES_MSG_ERROR("%s(): mfc_div2_clk get failed\n" , __func__); clk_put(resource_context.pclk); clk_put(resource_context.hclk); mutex_unlock(&resource_context.lock); return false; } rc = internal_pwr_rail_ctl(PWR_RAIL_MFC_CLK, 1); if (rc) { VCDRES_MSG_ERROR("\n internal_pwr_rail_ctl failed %d\n" , rc); mutex_unlock(&resource_context.lock); return false; } VCDRES_MSG_LOW("%s(): internal_pwr_rail_ctl Success %d\n" , __func__, rc); msleep(20); rc = clk_reset(resource_context.pclk, CLK_RESET_DEASSERT); if (rc) { VCDRES_MSG_ERROR("\n clk_reset failed %d\n", rc); mutex_unlock(&resource_context.lock); return false; } msleep(20); }
//static u32 res_trk_enable_pwr_rail(void) static u32 res_trk_enable_videocore(void) /* 20110511 Y.Yagami CHG-E msm: vidc: Fix video core clock power up/down sequence */ { mutex_lock(&resource_context.lock); if (!resource_context.rail_enabled) { int rc = -1; rc = internal_pwr_rail_mode(PWR_RAIL_MFC_CLK, PWR_RAIL_CTL_MANUAL); if (rc) { VCDRES_MSG_ERROR("%s(): internal_pwr_rail_mode \ failed %d\n", __func__, rc); mutex_unlock(&resource_context.lock); return false; } VCDRES_MSG_LOW("%s(): internal_pwr_rail_mode Success %d\n", __func__, rc); resource_context.pclk = clk_get(resource_context.device, "mfc_pclk"); if (IS_ERR(resource_context.pclk)) { VCDRES_MSG_ERROR("%s(): mfc_pclk get failed\n" , __func__); /* 20110511 Y.Yagami CHG-S msm: vidc: Fix video core clock power up/down sequence */ // mutex_unlock(&resource_context.lock); // return false; goto bail_out; /* 20110511 Y.Yagami CHG-E msm: vidc: Fix video core clock power up/down sequence */ } resource_context.hclk = clk_get(resource_context.device, "mfc_clk"); if (IS_ERR(resource_context.hclk)) { VCDRES_MSG_ERROR("%s(): mfc_clk get failed\n" , __func__); /* 20110511 Y.Yagami CHG-S msm: vidc: Fix video core clock power up/down sequence */ // clk_put(resource_context.pclk); // mutex_unlock(&resource_context.lock); // return false; goto release_pclk; /* 20110511 Y.Yagami CHG-E msm: vidc: Fix video core clock power up/down sequence */ } resource_context.hclk_div2 = clk_get(resource_context.device, "mfc_div2_clk"); /* 20110511 Y.Yagami CHG-S msm: vidc: Fix video core clock power up/down sequence */ // if (IS_ERR(resource_context.pclk)) { if (IS_ERR(resource_context.hclk_div2)) { /* 20110511 Y.Yagami CHG-E msm: vidc: Fix video core clock power up/down sequence */ VCDRES_MSG_ERROR("%s(): mfc_div2_clk get failed\n" , __func__); /* 20110511 Y.Yagami CHG-S msm: vidc: Fix video core clock power up/down sequence */ // clk_put(resource_context.pclk); // clk_put(resource_context.hclk); // mutex_unlock(&resource_context.lock); // return false; goto release_hclk_pclk; /* 20110511 Y.Yagami CHG-E msm: vidc: Fix video core clock power up/down sequence */ } #ifdef PATCH0330 /* 20110511 Y.Yagami CHG-S msm: vidc: Fix video core clock power up/down sequence */ // clk_set_rate(resource_context.hclk, // mfc_clk_freq_table[0]); // // clk_enable(resource_context.pclk); // clk_enable(resource_context.hclk); // clk_enable(resource_context.hclk_div2); if (clk_set_rate(resource_context.hclk, mfc_clk_freq_table[0])) { VCDRES_MSG_ERROR("\n pwr_rail_enable:" " set clk rate failed\n"); goto release_all_clks; } if (clk_enable(resource_context.pclk)) { VCDRES_MSG_ERROR("vidc pclk Enable failed\n"); goto release_all_clks; } if (clk_enable(resource_context.hclk)) { VCDRES_MSG_ERROR("vidc hclk Enable failed\n"); goto disable_pclk; } if (clk_enable(resource_context.hclk_div2)) { VCDRES_MSG_ERROR("vidc hclk_div2 Enable failed\n"); goto disable_hclk_pclk; } /* 20110511 Y.Yagami CHG-E msm: vidc: Fix video core clock power up/down sequence */ #endif /* PATCH0330 */ rc = internal_pwr_rail_ctl(PWR_RAIL_MFC_CLK, 1); if (rc) { VCDRES_MSG_ERROR("\n internal_pwr_rail_ctl failed %d\n" , rc); /* 20110511 Y.Yagami CHG-S msm: vidc: Fix video core clock power up/down sequence */ // mutex_unlock(&resource_context.lock); // return false; goto disable_and_release_all_clks; /* 20110511 Y.Yagami CHG-E msm: vidc: Fix video core clock power up/down sequence */ } VCDRES_MSG_LOW("%s(): internal_pwr_rail_ctl Success %d\n" , __func__, rc); msleep(20); rc = clk_reset(resource_context.pclk, CLK_RESET_DEASSERT); if (rc) { VCDRES_MSG_ERROR("\n clk_reset failed %d\n", rc); /* 20110511 Y.Yagami CHG-S msm: vidc: Fix video core clock power up/down sequence */ // mutex_unlock(&resource_context.lock); // return false; goto disable_and_release_all_clks; /* 20110511 Y.Yagami CHG-E msm: vidc: Fix video core clock power up/down sequence */ } msleep(20); #ifdef PATCH0330 clk_disable(resource_context.pclk); clk_disable(resource_context.hclk); clk_disable(resource_context.hclk_div2); #endif /* PATCH0330 */ }