static int scm_pas_enable_bw(void) { int ret = 0; if (!scm_perf_client) return -EINVAL; mutex_lock(&scm_pas_bw_mutex); if (!scm_pas_bw_count) { ret = msm_bus_scale_client_update_request(scm_perf_client, 1); if (ret) { pr_err("bandwidth request failed (%d)\n", ret); } else if (scm_bus_clk) { ret = clk_prepare_enable(scm_bus_clk); if (ret) pr_err("clock enable failed\n"); } } if (ret) msm_bus_scale_client_update_request(scm_perf_client, 0); else scm_pas_bw_count++; mutex_unlock(&scm_pas_bw_mutex); return ret; }
int kgsl_pwrctrl_axi(struct kgsl_device *device, unsigned int pwrflag) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; switch (pwrflag) { case KGSL_PWRFLAGS_AXI_OFF: if (pwr->power_flags & KGSL_PWRFLAGS_AXI_ON) { if (pwr->clk_freq[KGSL_AXI_HIGH] && pwr->ebi1_clk) clk_disable(pwr->ebi1_clk); if (pwr->pcl) msm_bus_scale_client_update_request(pwr->pcl, BW_INIT); pwr->power_flags &= ~(KGSL_PWRFLAGS_AXI_ON); pwr->power_flags |= KGSL_PWRFLAGS_AXI_OFF; } return KGSL_SUCCESS; case KGSL_PWRFLAGS_AXI_ON: if (pwr->power_flags & KGSL_PWRFLAGS_AXI_OFF) { if (pwr->clk_freq[KGSL_AXI_HIGH] && pwr->ebi1_clk) clk_enable(pwr->ebi1_clk); if (pwr->pcl) msm_bus_scale_client_update_request(pwr->pcl, BW_MAX); pwr->power_flags &= ~(KGSL_PWRFLAGS_AXI_OFF); pwr->power_flags |= KGSL_PWRFLAGS_AXI_ON; } return KGSL_SUCCESS; default: return KGSL_FAILURE; } }
void kgsl_pwrctrl_axi(struct kgsl_device *device, int state) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; if (state == KGSL_PWRFLAGS_OFF) { if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) { KGSL_PWR_INFO(device, "axi off, device %d\n", device->id); if (pwr->ebi1_clk) { clk_set_rate(pwr->ebi1_clk, 0); clk_disable(pwr->ebi1_clk); } if (pwr->pcl) msm_bus_scale_client_update_request(pwr->pcl, 0); } } else if (state == KGSL_PWRFLAGS_ON) { if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) { KGSL_PWR_INFO(device, "axi on, device %d\n", device->id); if (pwr->ebi1_clk) { clk_enable(pwr->ebi1_clk); clk_set_rate(pwr->ebi1_clk, pwr->pwrlevels[pwr->active_pwrlevel]. bus_freq); } if (pwr->pcl) msm_bus_scale_client_update_request(pwr->pcl, pwr->pwrlevels[pwr->active_pwrlevel]. bus_freq); } } }
/** * msm_bus_scale_unregister_client() - Unregister the client from the bus driver * @cl: Handle to the client */ void msm_bus_scale_unregister_client(uint32_t cl) { int i; struct msm_bus_client *client = (struct msm_bus_client *)(cl); bool warn = false; if (IS_ERR_OR_NULL(client)) return; for (i = 0; i < client->pdata->usecase->num_paths; i++) { if ((client->pdata->usecase[0].vectors[i].ab) || (client->pdata->usecase[0].vectors[i].ib)) { warn = true; break; } } if (warn) { int num_paths = client->pdata->usecase->num_paths; int ab[num_paths], ib[num_paths]; WARN(1, "%s called unregister with non-zero vectors\n", client->pdata->name); /* * Save client values and zero them out to * cleanly unregister */ for (i = 0; i < num_paths; i++) { ab[i] = client->pdata->usecase[0].vectors[i].ab; ib[i] = client->pdata->usecase[0].vectors[i].ib; client->pdata->usecase[0].vectors[i].ab = 0; client->pdata->usecase[0].vectors[i].ib = 0; } msm_bus_scale_client_update_request(cl, 0); /* Restore client vectors if required for re-registering. */ for (i = 0; i < num_paths; i++) { client->pdata->usecase[0].vectors[i].ab = ab[i]; client->pdata->usecase[0].vectors[i].ib = ib[i]; } } else if (client->curr != 0) msm_bus_scale_client_update_request(cl, 0); MSM_BUS_DBG("Unregistering client %d\n", cl); #ifdef SEC_FEATURE_USE_RT_MUTEX rt_mutex_lock(&msm_bus_lock); #else mutex_lock(&msm_bus_lock); #endif msm_bus_scale_client_reset_pnodes(cl); msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_UNREGISTER, cl); #ifdef SEC_FEATURE_USE_RT_MUTEX rt_mutex_unlock(&msm_bus_lock); #else mutex_unlock(&msm_bus_lock); #endif kfree(client->src_pnode); kfree(client); }
static int msm_rng_enable_hw(struct msm_rng_device *msm_rng_dev) { unsigned long val = 0; unsigned long reg_val = 0; int ret = 0; if (msm_rng_dev->qrng_perf_client) { ret = msm_bus_scale_client_update_request( msm_rng_dev->qrng_perf_client, 1); if (ret) pr_err("bus_scale_client_update_req failed!\n"); } /* Enable the PRNG CLK */ ret = clk_prepare_enable(msm_rng_dev->prng_clk); if (ret) { dev_err(&(msm_rng_dev->pdev)->dev, "failed to enable clock in probe\n"); return -EPERM; } /* Enable PRNG h/w only if it is NOT ON */ val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET) & PRNG_HW_ENABLE; /* PRNG H/W is not ON */ if (val != PRNG_HW_ENABLE) { val = readl_relaxed(msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET); val &= PRNG_LFSR_CFG_MASK; val |= PRNG_LFSR_CFG_CLOCKS; writel_relaxed(val, msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET); /* The PRNG CONFIG register should be first written */ mb(); reg_val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET) & PRNG_CONFIG_MASK; reg_val |= PRNG_HW_ENABLE; writel_relaxed(reg_val, msm_rng_dev->base + PRNG_CONFIG_OFFSET); /* The PRNG clk should be disabled only after we enable the * PRNG h/w by writing to the PRNG CONFIG register. */ mb(); } clk_disable_unprepare(msm_rng_dev->prng_clk); if (msm_rng_dev->qrng_perf_client) { ret = msm_bus_scale_client_update_request( msm_rng_dev->qrng_perf_client, 0); if (ret) pr_err("bus_scale_client_update_req failed!\n"); } return 0; }
void msm_camio_set_perf_lvl(enum msm_bus_perf_setting perf_setting) { static uint32_t bus_perf_client; int rc = 0; switch (perf_setting) { case S_INIT: bus_perf_client = msm_bus_scale_register_client(&cam_bus_client_pdata); if (!bus_perf_client) { pr_err("%s: Registration Failed!!!\n", __func__); bus_perf_client = 0; return; } pr_info("%s: S_INIT rc = %u\n", __func__, bus_perf_client); break; case S_EXIT: if (bus_perf_client) { pr_info("%s: S_EXIT\n", __func__); msm_bus_scale_unregister_client(bus_perf_client); } else pr_err("%s: Bus Client NOT Registered!!!\n", __func__); break; case S_PREVIEW: if (bus_perf_client) { rc = msm_bus_scale_client_update_request( bus_perf_client, 1); pr_info("%s: S_PREVIEW rc = %d\n", __func__, rc); } else pr_err("%s: Bus Client NOT Registered!!!\n", __func__); break; case S_VIDEO: if (bus_perf_client) { rc = msm_bus_scale_client_update_request( bus_perf_client, 2); pr_info("%s: S_VIDEO rc = %d\n", __func__, rc); } else pr_err("%s: Bus Client NOT Registered!!!\n", __func__); break; case S_CAPTURE: if (bus_perf_client) { rc = msm_bus_scale_client_update_request( bus_perf_client, 3); pr_info("%s: S_CAPTURE rc = %d\n", __func__, rc); } else pr_err("%s: Bus Client NOT Registered!!!\n", __func__); break; case S_DEFAULT: break; default: pr_info("%s: INVALID CASE\n", __func__); } }
int msm_isp_init_bandwidth_mgr(enum msm_isp_hw_client client) { int rc = 0; mutex_lock(&bandwidth_mgr_mutex); isp_bandwidth_mgr.client_info[client].active = 1; if (isp_bandwidth_mgr.use_count++) { mutex_unlock(&bandwidth_mgr_mutex); return rc; } isp_bandwidth_mgr.bus_client = msm_bus_scale_register_client(&msm_isp_bus_client_pdata); if (!isp_bandwidth_mgr.bus_client) { pr_err("%s: client register failed\n", __func__); mutex_unlock(&bandwidth_mgr_mutex); return -EINVAL; } isp_bandwidth_mgr.bus_vector_active_idx = 1; msm_bus_scale_client_update_request( isp_bandwidth_mgr.bus_client, isp_bandwidth_mgr.bus_vector_active_idx); mutex_unlock(&bandwidth_mgr_mutex); return 0; }
static int dtv_off(struct platform_device *pdev) { int ret = 0; ret = panel_next_off(pdev); pr_info("%s\n", __func__); clk_disable(tv_enc_clk); clk_disable(tv_dac_clk); clk_disable(hdmi_clk); if (mdp_tv_clk) clk_disable(mdp_tv_clk); if (dtv_pdata && dtv_pdata->lcdc_power_save) dtv_pdata->lcdc_power_save(0); if (dtv_pdata && dtv_pdata->lcdc_gpio_config) ret = dtv_pdata->lcdc_gpio_config(0); #ifdef CONFIG_MSM_BUS_SCALING if (dtv_bus_scale_handle > 0) msm_bus_scale_client_update_request(dtv_bus_scale_handle, 0); #else if(pm_qos_req) pm_qos_update_request(pm_qos_req, PM_QOS_DEFAULT_VALUE); #endif return ret; }
static int dtv_off_sub(void) { int ret = 0; if (!dtv_pdev) { pr_err("%s: FAILED: invalid arg\n", __func__); return -EINVAL; } ret = panel_next_off(dtv_pdev); pr_info("%s\n", __func__); clk_disable_unprepare(hdmi_clk); if (mdp_tv_clk) clk_disable_unprepare(mdp_tv_clk); if (dtv_pdata && dtv_pdata->lcdc_power_save) dtv_pdata->lcdc_power_save(0); if (dtv_pdata && dtv_pdata->lcdc_gpio_config) ret = dtv_pdata->lcdc_gpio_config(0); #ifdef CONFIG_MSM_BUS_SCALING if (dtv_bus_scale_handle > 0) msm_bus_scale_client_update_request(dtv_bus_scale_handle, 0); #else if (ebi1_clk) clk_disable_unprepare(ebi1_clk); #endif mdp4_extn_disp = 0; return ret; }
void msm_isp_deinit_bandwidth_mgr(enum msm_isp_hw_client client) { mutex_lock(&bandwidth_mgr_mutex); memset(&isp_bandwidth_mgr.client_info[client], 0, sizeof(struct msm_isp_bandwidth_info)); if (--isp_bandwidth_mgr.use_count) { mutex_unlock(&bandwidth_mgr_mutex); return; } if (!isp_bandwidth_mgr.bus_client) { /*QCT_PATCH S, fix lockup when start camera with 13M resolution, 2013-10-31, [email protected] */ pr_err("%s:%d error: bus client invalid\n", __func__, __LINE__); mutex_unlock(&bandwidth_mgr_mutex); /*QCT_PATCH E, fix lockup when start camera with 13M resolution, 2013-10-31, [email protected] */ return; /*QCT_PATCH S, fix lockup when start camera with 13M resolution, 2013-10-31, [email protected] */ } /*QCT_PATCH E, fix lockup when start camera with 13M resolution, 2013-10-31, [email protected] */ msm_bus_scale_client_update_request( isp_bandwidth_mgr.bus_client, 0); msm_bus_scale_unregister_client(isp_bandwidth_mgr.bus_client); isp_bandwidth_mgr.bus_client = 0; mutex_unlock(&bandwidth_mgr_mutex); }
static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) { if (mdp4_dtv_encoder->bsc) { DBG("set bus scaling: %d", idx); msm_bus_scale_client_update_request(mdp4_dtv_encoder->bsc, idx); } }
static int set_bw(int new_ib, int new_ab) { static int cur_idx, cur_ab, cur_ib; int i, ret; if (cur_ib == new_ib && cur_ab == new_ab) return 0; i = (cur_idx + 1) % ARRAY_SIZE(bw_levels); bw_levels[i].vectors[0].ib = new_ib * MBYTE; bw_levels[i].vectors[0].ab = new_ab / num_paths * MBYTE; bw_levels[i].vectors[1].ib = new_ib * MBYTE; bw_levels[i].vectors[1].ab = new_ab / num_paths * MBYTE; pr_debug("BW MBps: AB: %d IB: %d\n", new_ab, new_ib); ret = msm_bus_scale_client_update_request(bus_client, i); if (ret) { pr_err("bandwidth request failed (%d)\n", ret); } else { cur_idx = i; cur_ib = new_ib; cur_ab = new_ab; } return ret; }
static void bs_set(struct msm_gpu *gpu, int idx) { if (gpu->bsc) { DBG("set bus scaling: %d", idx); msm_bus_scale_client_update_request(gpu->bsc, idx); } }
int mdss_mdp_bus_scale_set_quota(u32 ab_quota, u32 ib_quota) { static int current_bus_idx; int bus_idx; if (mdss_res->bus_hdl < 1) { pr_err("invalid bus handle %d\n", mdss_res->bus_hdl); return -EINVAL; } if ((ab_quota | ib_quota) == 0) { bus_idx = 0; } else { int num_cases = mdp_bus_scale_table.num_usecases; struct msm_bus_vectors *vect = NULL; bus_idx = (current_bus_idx % (num_cases - 1)) + 1; vect = mdp_bus_scale_table.usecase[current_bus_idx].vectors; if ((ab_quota == vect->ab) && (ib_quota == vect->ib)) { pr_debug("skip bus scaling, no change in vectors\n"); return 0; } vect = mdp_bus_scale_table.usecase[bus_idx].vectors; vect->ab = ab_quota; vect->ib = ib_quota; pr_debug("bus scale idx=%d ab=%u ib=%u\n", bus_idx, vect->ab, vect->ib); } current_bus_idx = bus_idx; return msm_bus_scale_client_update_request(mdss_res->bus_hdl, bus_idx); }
static void set_bw(int mbps) { static int cur_idx, cur_ab, cur_ib; int new_ab, new_ib; int i, ret; if (!io_percent) io_percent = 1; new_ab = roundup(mbps, bw_step); new_ib = mbps * 100 / io_percent; new_ib = roundup(new_ib, bw_step); if (cur_ib == new_ib && cur_ab == new_ab) return; i = (cur_idx + 1) % ARRAY_SIZE(bw_levels); bw_levels[i].vectors[0].ib = new_ib * 1000000ULL; bw_levels[i].vectors[0].ab = new_ab * 1000000ULL; bw_levels[i].vectors[1].ib = new_ib * 1000000ULL; bw_levels[i].vectors[1].ab = new_ab * 1000000ULL; pr_debug("BW MBps: Req: %d AB: %d IB: %d\n", mbps, new_ab, new_ib); ret = msm_bus_scale_client_update_request(bus_client, i); if (ret) pr_err("bandwidth request failed (%d)\n", ret); else { cur_idx = i; cur_ib = new_ib; cur_ab = new_ab; } }
int mhi_set_bus_request(struct mhi_device_ctxt *mhi_dev_ctxt, int index) { mhi_log(MHI_MSG_INFO, "Setting bus request to index %d\n", index); return msm_bus_scale_client_update_request(mhi_dev_ctxt->bus_client, index); }
static int set_bw(struct device *dev, int new_ib, int new_ab) { struct dev_data *d = dev_get_drvdata(dev); int i, ret; if (d->cur_ib == new_ib && d->cur_ab == new_ab) return 0; i = (d->cur_idx + 1) % DBL_BUF; d->bw_levels[i].vectors[0].ib = new_ib * MBYTE; d->bw_levels[i].vectors[0].ab = new_ab / d->num_paths * MBYTE; d->bw_levels[i].vectors[1].ib = new_ib * MBYTE; d->bw_levels[i].vectors[1].ab = new_ab / d->num_paths * MBYTE; dev_dbg(dev, "BW MBps: AB: %d IB: %d\n", new_ab, new_ib); ret = msm_bus_scale_client_update_request(d->bus_client, i); if (ret) { dev_err(dev, "bandwidth request failed (%d)\n", ret); } else { d->cur_idx = i; d->cur_ib = new_ib; d->cur_ab = new_ab; } return ret; }
static int pil_make_proxy_vote(struct pil_desc *pil) { struct pil_tz_data *d = desc_to_data(pil); int rc; rc = enable_regulators(pil->dev, d->proxy_regs, d->proxy_reg_count); if (rc) return rc; rc = prepare_enable_clocks(pil->dev, d->proxy_clks, d->proxy_clk_count); if (rc) goto err_clks; if (d->bus_client) { rc = msm_bus_scale_client_update_request(d->bus_client, 1); if (rc) { dev_err(pil->dev, "bandwidth request failed\n"); goto err_bw; } } return 0; err_bw: disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count); err_clks: disable_regulators(d->proxy_regs, d->proxy_reg_count); return rc; }
int res_trk_update_bus_perf_level(struct vcd_dev_ctxt *dev_ctxt, u32 perf_level) { struct vcd_clnt_ctxt *cctxt_itr = NULL; u32 enc_perf_level = 0, dec_perf_level = 0; u32 bus_clk_index, client_type = 0; int rc = 0; cctxt_itr = dev_ctxt->cctxt_list_head; while (cctxt_itr) { if (cctxt_itr->decoding) dec_perf_level += cctxt_itr->reqd_perf_lvl; else enc_perf_level += cctxt_itr->reqd_perf_lvl; cctxt_itr = cctxt_itr->next; } if (!enc_perf_level) client_type = 1; if (perf_level <= RESTRK_1080P_VGA_PERF_LEVEL) bus_clk_index = 0; else if (perf_level <= RESTRK_1080P_720P_PERF_LEVEL) bus_clk_index = 1; else bus_clk_index = 2; if (dev_ctxt->reqd_perf_lvl + dev_ctxt->curr_perf_lvl == 0) bus_clk_index = 2; bus_clk_index = (bus_clk_index << 1) + (client_type + 1); VCDRES_MSG_LOW("%s(), bus_clk_index = %d", __func__, bus_clk_index); VCDRES_MSG_LOW("%s(),context.pcl = %x", __func__, resource_context.pcl); VCDRES_MSG_LOW("%s(), bus_perf_level = %x", __func__, perf_level); rc = msm_bus_scale_client_update_request(resource_context.pcl, bus_clk_index); return rc; }
static int ath6kl_hsic_remove(struct platform_device *pdev) { struct ath6kl_platform_data *pdata = platform_get_drvdata(pdev); if (machine_is_apq8064_dma()) { ath6kl_toggle_radio(pdev->dev.platform_data, 0); } else { msm_bus_scale_client_update_request(bus_perf_client, 1); if (bus_perf_client) msm_bus_scale_unregister_client(bus_perf_client); if (pdata->wifi_chip_pwd != NULL) { int ret; ret = ath6kl_platform_power(pdata, 0); if (pdata->wifi_chip_pwd->reg) regulator_put(pdata->wifi_chip_pwd->reg); if (pdata->wifi_vddpa != NULL && pdata->wifi_vddpa->reg) regulator_put(pdata->wifi_vddpa->reg); if (pdata->wifi_vddio != NULL && pdata->wifi_vddio->reg) regulator_put(pdata->wifi_vddio->reg); if (ret == 0 && ath6kl_bt_on == 0) ath6kl_hsic_bind(0); } } return 0; }
int msm_isp_update_bandwidth(enum msm_isp_hw_client client, uint64_t ab, uint64_t ib) { int i; struct msm_bus_paths *path; mutex_lock(&bandwidth_mgr_mutex); if (!isp_bandwidth_mgr.use_count || !isp_bandwidth_mgr.bus_client) { pr_err("%s: bandwidth manager inactive\n", __func__); return -EINVAL; } isp_bandwidth_mgr.client_info[client].ab = ab; isp_bandwidth_mgr.client_info[client].ib = ib; ALT_VECTOR_IDX(isp_bandwidth_mgr.bus_vector_active_idx); path = &(msm_isp_bus_client_pdata.usecase[ isp_bandwidth_mgr.bus_vector_active_idx]); path->vectors[0].ab = MSM_ISP_MIN_AB; path->vectors[0].ib = MSM_ISP_MIN_IB; for (i = 0; i < MAX_ISP_CLIENT; i++) { if (isp_bandwidth_mgr.client_info[i].active) { path->vectors[0].ab += isp_bandwidth_mgr.client_info[i].ab; path->vectors[0].ib += isp_bandwidth_mgr.client_info[i].ib; } } msm_bus_scale_client_update_request(isp_bandwidth_mgr.bus_client, isp_bandwidth_mgr.bus_vector_active_idx); mutex_unlock(&bandwidth_mgr_mutex); return 0; }
static int dtv_off(struct platform_device *pdev) { int ret = 0; ret = panel_next_off(pdev); pr_info("%s\n", __func__); clk_disable_unprepare(hdmi_clk); if (mdp_tv_clk) clk_disable_unprepare(mdp_tv_clk); if (dtv_pdata && dtv_pdata->lcdc_power_save) dtv_pdata->lcdc_power_save(0); if (dtv_pdata && dtv_pdata->lcdc_gpio_config) ret = dtv_pdata->lcdc_gpio_config(0); #ifdef CONFIG_MSM_BUS_SCALING if (dtv_bus_scale_handle > 0) msm_bus_scale_client_update_request(dtv_bus_scale_handle, 0); #else if (ebi1_clk) clk_disable_unprepare(ebi1_clk); #endif mdp4_extn_disp = 0; return ret; }
void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device, unsigned int new_level) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; if (new_level < (pwr->num_pwrlevels - 1) && new_level >= pwr->thermal_pwrlevel && new_level != pwr->active_pwrlevel) { pwr->active_pwrlevel = new_level; if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) || (device->state == KGSL_STATE_NAP)) clk_set_rate(pwr->grp_clks[0], pwr->pwrlevels[pwr->active_pwrlevel]. gpu_freq); if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) { if (pwr->pcl) msm_bus_scale_client_update_request(pwr->pcl, pwr->pwrlevels[pwr->active_pwrlevel]. bus_freq); else if (pwr->ebi1_clk) clk_set_rate(pwr->ebi1_clk, pwr->pwrlevels[pwr->active_pwrlevel]. bus_freq); } KGSL_PWR_WARN(device, "kgsl pwr level changed to %d\n", pwr->active_pwrlevel); } }
static int tvenc_off(struct platform_device *pdev) { int ret = 0; struct msm_fb_data_type *mfd; mfd = platform_get_drvdata(pdev); ret = panel_next_off(pdev); if (ret) pr_err("%s: tvout_off failed! %d\n", __func__, ret); tvenc_set_clock(CLOCK_OFF); if (tvenc_pdata && tvenc_pdata->pm_vid_en) ret = tvenc_pdata->pm_vid_en(0); #ifdef CONFIG_MSM_BUS_SCALING if (tvenc_bus_scale_handle > 0) msm_bus_scale_client_update_request(tvenc_bus_scale_handle, 0); #else if (mfd->ebi1_clk) clk_disable(mfd->ebi1_clk); #endif if (ret) pr_err("%s: pm_vid_en(off) failed! %d\n", __func__, ret); mdp4_extn_disp = 0; return ret; }
void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device, unsigned int new_level) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; if (new_level < (pwr->num_pwrlevels - 1) && new_level >= pwr->thermal_pwrlevel && new_level != pwr->active_pwrlevel) { struct kgsl_pwrlevel *pwrlevel = &pwr->pwrlevels[new_level]; pwr->active_pwrlevel = new_level; if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) || (device->state == KGSL_STATE_NAP)) { /* * On some platforms, instability is caused on * changing clock freq when the core is busy. * Idle the gpu core before changing the clock freq. */ if (pwr->idle_needed == true) device->ftbl->idle(device, KGSL_TIMEOUT_DEFAULT); clk_set_rate(pwr->grp_clks[0], pwrlevel->gpu_freq); } if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) { if (pwr->pcl) msm_bus_scale_client_update_request(pwr->pcl, pwrlevel->bus_freq); else if (pwr->ebi1_clk) clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq); } trace_kgsl_pwrlevel(device, pwr->active_pwrlevel, pwrlevel->gpu_freq); } }
static ssize_t ehci_hsic_msm_bus_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { char buf[8]; int ret; struct seq_file *s = file->private_data; struct msm_hsic_hcd *mehci = s->private; memset(buf, 0x00, sizeof(buf)); if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; if (!strncmp(buf, "enable", 6)) { /* Do not vote here. Let hsic driver decide when to vote */ debug_bus_voting_enabled = true; } else { debug_bus_voting_enabled = false; if (mehci->bus_perf_client) { ret = msm_bus_scale_client_update_request( mehci->bus_perf_client, 0); if (ret) dev_err(mehci->dev, "%s: Failed to devote " "for bus bw %d\n", __func__, ret); } } return count; }
static int dtv_on(struct platform_device *pdev) { int ret = 0; struct msm_fb_data_type *mfd; unsigned long panel_pixclock_freq , pm_qos_rate; mfd = platform_get_drvdata(pdev); panel_pixclock_freq = mfd->fbi->var.pixclock; if (panel_pixclock_freq > 58000000) pm_qos_rate = panel_pixclock_freq / 1000 ; else pm_qos_rate = 58000; mdp4_extn_disp = 1; #ifdef CONFIG_MSM_BUS_SCALING if (dtv_bus_scale_handle > 0) msm_bus_scale_client_update_request(dtv_bus_scale_handle, 1); #else if (ebi1_clk) { clk_set_rate(ebi1_clk, pm_qos_rate * 1000); clk_prepare_enable(ebi1_clk); } #endif if (dtv_pdata && dtv_pdata->lcdc_power_save) dtv_pdata->lcdc_power_save(1); if (dtv_pdata && dtv_pdata->lcdc_gpio_config) ret = dtv_pdata->lcdc_gpio_config(1); mfd = platform_get_drvdata(pdev); ret = clk_set_rate(tv_src_clk, mfd->fbi->var.pixclock); if (ret) { pr_info("%s: clk_set_rate(%d) failed\n", __func__, mfd->fbi->var.pixclock); if (mfd->fbi->var.pixclock == 27030000) mfd->fbi->var.pixclock = 27000000; ret = clk_set_rate(tv_src_clk, mfd->fbi->var.pixclock); } pr_info("%s: tv_src_clk=%dkHz, pm_qos_rate=%ldkHz, [%d]\n", __func__, mfd->fbi->var.pixclock/1000, pm_qos_rate, ret); mfd->panel_info.clk_rate = mfd->fbi->var.pixclock; #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT while(atomic_read(&read_an_complete)) msleep(1); #endif clk_prepare_enable(hdmi_clk); clk_reset(hdmi_clk, CLK_RESET_ASSERT); udelay(20); clk_reset(hdmi_clk, CLK_RESET_DEASSERT); if (mdp_tv_clk) clk_prepare_enable(mdp_tv_clk); ret = panel_next_on(pdev); return ret; }
static int dtv_on(struct platform_device *pdev) { int ret = 0; struct msm_fb_data_type *mfd; unsigned long panel_pixclock_freq , pm_qos_rate; mfd = platform_get_drvdata(pdev); panel_pixclock_freq = mfd->fbi->var.pixclock; #ifdef CONFIG_MSM_NPA_SYSTEM_BUS pm_qos_rate = MSM_AXI_FLOW_MDP_DTV_720P_2BPP; #else if (panel_pixclock_freq > 58000000) /* pm_qos_rate should be in Khz */ pm_qos_rate = panel_pixclock_freq / 1000 ; else pm_qos_rate = 58000; #endif #ifdef CONFIG_MSM_BUS_SCALING if (dtv_bus_scale_handle > 0) msm_bus_scale_client_update_request(dtv_bus_scale_handle, 1); #else if(pm_qos_req) pm_qos_update_request(pm_qos_req, pm_qos_rate); #endif mfd = platform_get_drvdata(pdev); ret = clk_set_rate(tv_src_clk, mfd->fbi->var.pixclock); if (ret) { pr_info("%s: clk_set_rate(%d) failed\n", __func__, mfd->fbi->var.pixclock); if (mfd->fbi->var.pixclock == 27030000) mfd->fbi->var.pixclock = 27000000; ret = clk_set_rate(tv_src_clk, mfd->fbi->var.pixclock); } pr_info("%s: tv_src_clk=%dkHz, pm_qos_rate=%ldkHz, [%d]\n", __func__, mfd->fbi->var.pixclock/1000, pm_qos_rate, ret); clk_enable(tv_enc_clk); clk_enable(tv_dac_clk); clk_enable(hdmi_clk); clk_reset(hdmi_clk, CLK_RESET_ASSERT); udelay(20); clk_reset(hdmi_clk, CLK_RESET_DEASSERT); if (mdp_tv_clk) clk_enable(mdp_tv_clk); if (dtv_pdata && dtv_pdata->lcdc_power_save) dtv_pdata->lcdc_power_save(1); if (dtv_pdata && dtv_pdata->lcdc_gpio_config) ret = dtv_pdata->lcdc_gpio_config(1); ret = panel_next_on(pdev); return ret; }
static int dtv_on(struct platform_device *pdev) { int ret = 0; struct msm_fb_data_type *mfd; unsigned long panel_pixclock_freq , pm_qos_rate; /* If a power down is already underway, wait for it to finish */ flush_work_sync(&dtv_off_work); mfd = platform_get_drvdata(pdev); panel_pixclock_freq = mfd->fbi->var.pixclock; if (panel_pixclock_freq > 58000000) /* pm_qos_rate should be in Khz */ pm_qos_rate = panel_pixclock_freq / 1000 ; else pm_qos_rate = 58000; mdp4_extn_disp = 1; #ifdef CONFIG_MSM_BUS_SCALING if (dtv_bus_scale_handle > 0) msm_bus_scale_client_update_request(dtv_bus_scale_handle, 1); #else if (ebi1_clk) { clk_set_rate(ebi1_clk, pm_qos_rate * 1000); clk_prepare_enable(ebi1_clk); } #endif if (dtv_pdata && dtv_pdata->lcdc_power_save) dtv_pdata->lcdc_power_save(1); if (dtv_pdata && dtv_pdata->lcdc_gpio_config) ret = dtv_pdata->lcdc_gpio_config(1); mfd = platform_get_drvdata(pdev); ret = clk_set_rate(tv_src_clk, mfd->fbi->var.pixclock); if (ret) { pr_info("%s: clk_set_rate(%d) failed\n", __func__, mfd->fbi->var.pixclock); if (mfd->fbi->var.pixclock == 27030000) mfd->fbi->var.pixclock = 27000000; ret = clk_set_rate(tv_src_clk, mfd->fbi->var.pixclock); } pr_info("%s: tv_src_clk=%dkHz, pm_qos_rate=%ldkHz, [%d]\n", __func__, mfd->fbi->var.pixclock/1000, pm_qos_rate, ret); mfd->panel_info.clk_rate = mfd->fbi->var.pixclock; clk_prepare_enable(hdmi_clk); clk_reset(hdmi_clk, CLK_RESET_ASSERT); udelay(20); clk_reset(hdmi_clk, CLK_RESET_DEASSERT); if (mdp_tv_clk) clk_prepare_enable(mdp_tv_clk); ret = panel_next_on(pdev); return ret; }
static void scm_pas_disable_bw(void) { mutex_lock(&scm_pas_bw_mutex); if (scm_pas_bw_count-- == 1) { msm_bus_scale_client_update_request(scm_perf_client, 0); clk_disable(scm_bus_clk); } mutex_unlock(&scm_pas_bw_mutex); }