void kgsl_pwrctrl_axi(struct kgsl_device *device, int state) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; if (state == KGSL_PWRFLAGS_OFF) { if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) { KGSL_PWR_INFO(device, "axi off, device %d\n", device->id); if (pwr->ebi1_clk) { clk_set_rate(pwr->ebi1_clk, 0); clk_disable(pwr->ebi1_clk); } pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, "kgsl_3d", PM_QOS_DEFAULT_VALUE); } } else if (state == KGSL_PWRFLAGS_ON) { if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) { KGSL_PWR_INFO(device, "axi on, device %d\n", device->id); pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, "kgsl_3d", pwr->pwrlevels[pwr->active_pwrlevel]. bus_freq/1000); if (pwr->ebi1_clk) { clk_enable(pwr->ebi1_clk); clk_set_rate(pwr->ebi1_clk, pwr->pwrlevels[pwr->active_pwrlevel]. bus_freq); } } } }
static void msm72k_pm_qos_update(int vote) { if (vote) { pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, DRIVER_NAME, 0); pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, DRIVER_NAME, MSM_AXI_MAX_FREQ); } else { pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, DRIVER_NAME, PM_QOS_DEFAULT_VALUE); pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, DRIVER_NAME, PM_QOS_DEFAULT_VALUE); } }
static int kgsl_last_release_locked(void) { BUG_ON(kgsl_driver.grp_clk == NULL); BUG_ON(kgsl_driver.imem_clk == NULL); disable_irq(kgsl_driver.interrupt_num); kgsl_yamato_stop(&kgsl_driver.yamato_device); /* close devices */ kgsl_yamato_close(&kgsl_driver.yamato_device); /* shutdown memory apertures */ kgsl_sharedmem_close(&kgsl_driver.shmem); if (kgsl_driver.grp_pclk) clk_disable(kgsl_driver.grp_pclk); clk_disable(kgsl_driver.grp_clk); clk_disable(kgsl_driver.imem_clk); max_axi_freq_set = 0; pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, DRIVER_NAME, PM_QOS_DEFAULT_VALUE); return 0; }
static int tovis_qvga_disp_on(struct platform_device *pdev) { /* fixed lcd tearing during playing video by bongkyu.kim */ pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, "ebi2_lcd", 65000); if (!disp_initialized) tovis_qvga_disp_init(pdev); if (!display_on) { mdelay(10); gpio_set_value(102, 0); mdelay(1); gpio_set_value(102, 1); mdelay(5); display_on = TRUE; do_tovis_init(); /* // (d) -> (a) EBI2_WRITE16C(DISP_CMD_PORT, 0xff); // CSX Falling Edge EBI2_WRITE16C(DISP_CMD_PORT, 0xff); // CSX Falling Edge mdelay(5); EBI2_WRITE16C(DISP_CMD_PORT, 0xff); // CSX Falling Edge EBI2_WRITE16C(DISP_CMD_PORT, 0xff); // CSX Falling Edge EBI2_WRITE16C(DISP_CMD_PORT, 0xff); // CSX Falling Edge EBI2_WRITE16C(DISP_CMD_PORT, 0xff); // CSX Falling Edge mdelay(10); */ } return 0; }
static void msm_xusb_pm_qos_update(struct msmusb_hcd *mhcd, int vote) { struct usb_hcd *hcd = mhcd_to_hcd(mhcd); if (vote) { if (mhcd->pdata->max_axi_khz) pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, (char *)hcd->self.bus_name, mhcd->pdata->max_axi_khz); } else { if (mhcd->pdata->max_axi_khz) pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, (char *) hcd->self.bus_name, PM_QOS_DEFAULT_VALUE); } }
static int dtv_off(struct platform_device *pdev) { int ret = 0; ret = panel_next_off(pdev); pr_info("%s\n", __func__); clk_disable(tv_enc_clk); clk_disable(tv_dac_clk); clk_disable(hdmi_clk); if (mdp_tv_clk) clk_disable(mdp_tv_clk); if (dtv_pdata && dtv_pdata->lcdc_power_save) dtv_pdata->lcdc_power_save(0); if (dtv_pdata && dtv_pdata->lcdc_gpio_config) ret = dtv_pdata->lcdc_gpio_config(0); pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ , "dtv", PM_QOS_DEFAULT_VALUE); return ret; }
static int lcdc_on(struct platform_device *pdev) { int ret = 0; struct msm_fb_data_type *mfd; unsigned long panel_pixclock_freq , pm_qos_freq; mfd = platform_get_drvdata(pdev); panel_pixclock_freq = mfd->fbi->var.pixclock; if (panel_pixclock_freq > 58000000) /* pm_qos_freq should be in Khz */ pm_qos_freq = panel_pixclock_freq / 1000 ; else pm_qos_freq = 58000; pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ , "lcdc", pm_qos_freq); mfd = platform_get_drvdata(pdev); clk_enable(mdp_lcdc_pclk_clk); clk_enable(mdp_lcdc_pad_pclk_clk); if (lcdc_pdata && lcdc_pdata->lcdc_power_save) lcdc_pdata->lcdc_power_save(1); if (lcdc_pdata && lcdc_pdata->lcdc_gpio_config) ret = lcdc_pdata->lcdc_gpio_config(1); clk_set_rate(mdp_lcdc_pclk_clk, mfd->fbi->var.pixclock); clk_set_rate(mdp_lcdc_pad_pclk_clk, mfd->fbi->var.pixclock); mdp_lcdc_pclk_clk_rate = clk_get_rate(mdp_lcdc_pclk_clk); mdp_lcdc_pad_pclk_clk_rate = clk_get_rate(mdp_lcdc_pad_pclk_clk); ret = panel_next_on(pdev); return ret; }
static int mddi_resume(struct platform_device *pdev) { mddi_host_type host_idx = MDDI_HOST_PRIM; if (!mddi_is_in_suspend) return 0; mddi_is_in_suspend = 0; if (mddi_power_locked) return 0; #ifdef CONFIG_SHLCDC_BOARD if (no_set_power_flag == FALSE) { shlcdc_api_set_power_mode(SHLCDC_DEV_TYPE_MDDI, SHLCDC_DEV_PWR_ON); } #endif enable_irq(INT_MDDI_PRI); #ifdef CONFIG_SHLCDC_BOARD pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, "mddi", 117000); #endif clk_enable(mddi_clk); mddi_host_reg_out(PAD_CTL, mddi_pad_ctrl); if (mddi_host_timer.function) mddi_host_timer_service(0); return 0; }
static int mddi_on(struct platform_device *pdev) { int ret = 0; u32 clk_rate; struct msm_fb_data_type *mfd; mfd = platform_get_drvdata(pdev); if (mddi_pdata && mddi_pdata->mddi_power_save) mddi_pdata->mddi_power_save(1); clk_rate = mfd->fbi->var.pixclock; clk_rate = min(clk_rate, mfd->panel_info.clk_max); if (mddi_pdata && mddi_pdata->mddi_sel_clk && mddi_pdata->mddi_sel_clk(&clk_rate)) printk(KERN_ERR "%s: can't select mddi io clk targate rate = %d\n", __func__, clk_rate); if (clk_set_min_rate(mddi_clk, clk_rate) < 0) printk(KERN_ERR "%s: clk_set_min_rate failed\n", __func__); pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ , "mddi", 65000); ret = panel_next_on(pdev); return ret; }
void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device, unsigned int new_level) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; if (new_level < (pwr->num_pwrlevels - 1) && new_level >= pwr->thermal_pwrlevel && new_level != pwr->active_pwrlevel) { pwr->active_pwrlevel = new_level; if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) || (device->state == KGSL_STATE_NAP)) clk_set_rate(pwr->grp_clks[0], pwr->pwrlevels[pwr->active_pwrlevel]. gpu_freq); if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) { if (pwr->ebi1_clk) clk_set_rate(pwr->ebi1_clk, pwr->pwrlevels[pwr->active_pwrlevel]. bus_freq); pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, "kgsl_3d", pwr->pwrlevels[pwr->active_pwrlevel]. bus_freq/1000); } KGSL_PWR_WARN(device, "kgsl pwr level changed to %d\n", pwr->active_pwrlevel); } }
/*Resume function*/ static int kgsl_resume(struct platform_device *dev) { if (max_axi_freq_set) { pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, DRIVER_NAME, max_axi_freq); } return 0; }
int update_axi_qos(uint32_t rate) { int rc = 0; rc = pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, MSM_AXI_QOS_NAME, rate); if (rc < 0) CDBG("update AXI bus QOS fails. rc = %d\n", rc); return rc; }
/* * Manage the rate of the uart bus clock on pnx */ inline unsigned int pnx_serial_pm_qos_up(struct pnx_uart *uart) { pm_qos_update_requirement(PM_QOS_PCLK2_THROUGHPUT, (char *)uart->uart_name, 52); uart->pm_qos_status=PNX_UART_PM_QOS_UP_FORCE; return 0; }
static void msm_xusb_pm_qos_update(struct msmusb_hcd *mhcd, int vote) { struct usb_hcd *hcd = mhcd_to_hcd(mhcd); if (PHY_TYPE(mhcd->pdata->phy_info) == USB_PHY_SERIAL_PMIC) goto vote_for_axi; if (!depends_on_axi_freq(mhcd->xceiv)) return; vote_for_axi: if (vote) { pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, (char *)hcd->self.bus_name, MSM_AXI_MAX_FREQ); } else { pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, (char *) hcd->self.bus_name, PM_QOS_DEFAULT_VALUE); } }
/* MJA patch for uart on TAT */ int pnx_serial_set_activity_detect(struct pnx_uart *uart) { int ret; uart->enable_detection = 0; /* console activity detection is only available for UART1 */ if ( strcmp(uart->uart_name,"UART1") != 0 ) return 0; console_uart = uart; uart->enable_detection = 1; uart->rx_activity = 0; uart->tx_activity = 0; uart->rx_triggered = 0; uart->tx_triggered = 0; uart->clock_enable = 0; INIT_DELAYED_WORK(&(uart->rx_work), rx_timeout_work); INIT_DELAYED_WORK(&(uart->tx_work), tx_timeout_work); uart->workqueue = create_singlethread_workqueue("kserd"); if (!(uart->workqueue)) goto irq_free; wake_lock_init(&(uart->lock), WAKE_LOCK_SUSPEND, "console activity"); if (!tatonuart) { /* if not in TAT uart */ if (set_irq_type(IRQ_EXTINT(18), IRQ_TYPE_EDGE_BOTH)) { printk(KERN_WARNING "failed setirq type\n"); goto irq_free; } ret = request_irq(IRQ_EXTINT(18), pnx_serial_rx_activity_detected, 0, " serial (detect)", NULL); if (ret) { printk(KERN_WARNING "request_irq failed\n"); goto irq_free; } } else { pm_qos_update_requirement(PM_QOS_PCLK2_THROUGHPUT, (char *)uart->uart_name, 52); uart->pm_qos_status=PNX_UART_PM_QOS_UP_XFER; } irq_free: return 0; }
static int mddi_off(struct platform_device *pdev) { int ret = 0; ret = panel_next_off(pdev); if (mddi_pdata && mddi_pdata->mddi_power_save) mddi_pdata->mddi_power_save(0); pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ , "mddi", PM_QOS_DEFAULT_VALUE); return ret; }
inline unsigned int pnx_serial_pm_update_qos(struct pnx_uart *uart) { if (uart->pm_qos_status == PNX_UART_PM_QOS_DOWN) { pm_qos_update_requirement(PM_QOS_PCLK2_THROUGHPUT, (char *)uart->uart_name, 52); /* start guard timer */ mod_timer(&(uart->pm_qos_timer), (jiffies + 1*HZ)); } if (uart->pm_qos_status != PNX_UART_PM_QOS_UP_FORCE) uart->pm_qos_status=PNX_UART_PM_QOS_UP_XFER; return 0; }
static void pnx_serial_pm_qos_timeout( unsigned long arg) { struct pnx_uart *uart = (struct pnx_uart *) arg; if (uart->pm_qos_status == PNX_UART_PM_QOS_UP_XFER) { mod_timer(&(uart->pm_qos_timer), (jiffies + 1*HZ)); uart->pm_qos_status = PNX_UART_PM_QOS_UP; } else if (uart->pm_qos_status != PNX_UART_PM_QOS_UP_FORCE) { pm_qos_update_requirement(PM_QOS_PCLK2_THROUGHPUT, (char *)uart->uart_name, PM_QOS_DEFAULT_VALUE); uart->pm_qos_status=PNX_UART_PM_QOS_DOWN; } }
int update_axi_qos(uint32_t freq) { int rc = 0; if (axi_qos_requested) { rc = pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, MSM_AXI_QOS_NAME, freq); if (rc < 0) CDBG("update AXI bus QOS fails. rc = %d\n", rc); else CDBG("%s: request successful\n", __func__); } return rc; }
static int lcdc_on(struct platform_device *pdev) { int ret = 0; struct msm_fb_data_type *mfd; unsigned long panel_pixclock_freq, pm_qos_rate; mfd = platform_get_drvdata(pdev); panel_pixclock_freq = mfd->fbi->var.pixclock; #ifdef CONFIG_MSM_NPA_SYSTEM_BUS pm_qos_rate = MSM_AXI_FLOW_MDP_LCDC_WVGA_2BPP; #else #ifdef CONFIG_HUAWEI_KERNEL if (panel_pixclock_freq > 58000000) /* pm_qos_rate should be in Khz */ pm_qos_rate = panel_pixclock_freq / 1000 ; else pm_qos_rate = 58000; #else if (panel_pixclock_freq > 65000000) pm_qos_rate = panel_pixclock_freq / 1000 ; else pm_qos_rate = 65000; #endif #endif pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ , "lcdc", pm_qos_rate); mfd = platform_get_drvdata(pdev); ret = clk_set_rate(pixel_mdp_clk, mfd->fbi->var.pixclock); if (ret) { pr_err("%s: Can't set MDP LCDC pixel clock to rate %u\n", __func__, mfd->fbi->var.pixclock); goto out; } clk_enable(pixel_mdp_clk); clk_enable(pixel_lcdc_clk); if (lcdc_pdata && lcdc_pdata->lcdc_power_save) lcdc_pdata->lcdc_power_save(1); if (lcdc_pdata && lcdc_pdata->lcdc_gpio_config) ret = lcdc_pdata->lcdc_gpio_config(1); ret = panel_next_on(pdev); out: return ret; }
static int dtv_on(struct platform_device *pdev) { int ret = 0; struct msm_fb_data_type *mfd; unsigned long panel_pixclock_freq , pm_qos_rate; mfd = platform_get_drvdata(pdev); panel_pixclock_freq = mfd->fbi->var.pixclock; #ifdef CONFIG_MSM_NPA_SYSTEM_BUS pm_qos_rate = MSM_AXI_FLOW_MDP_DTV_720P_2BPP; #else pm_qos_rate = MSM_AXI_QOS_DTV_ON; #endif pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ , "dtv", pm_qos_rate); mfd = platform_get_drvdata(pdev); ret = clk_set_rate(tv_src_clk, mfd->fbi->var.pixclock); if (ret) { pr_info("%s: clk_set_rate(%d) failed\n", __func__, mfd->fbi->var.pixclock); if (mfd->fbi->var.pixclock == 27030000) mfd->fbi->var.pixclock = 27000000; ret = clk_set_rate(tv_src_clk, mfd->fbi->var.pixclock); } pr_info("%s: tv_src_clk=%dkHz, pm_qos_rate=%ldkHz, [%d]\n", __func__, mfd->fbi->var.pixclock/1000, pm_qos_rate, ret); clk_enable(tv_enc_clk); clk_enable(tv_dac_clk); clk_enable(hdmi_clk); if (mdp_tv_clk) clk_enable(mdp_tv_clk); if (dtv_pdata && dtv_pdata->lcdc_power_save) dtv_pdata->lcdc_power_save(1); if (dtv_pdata && dtv_pdata->lcdc_gpio_config) ret = dtv_pdata->lcdc_gpio_config(1); ret = panel_next_on(pdev); return ret; }
static int lcdc_off(struct platform_device *pdev) { int ret = 0; ret = panel_next_off(pdev); clk_disable(mdp_lcdc_pclk_clk); clk_disable(mdp_lcdc_pad_pclk_clk); if (lcdc_pdata && lcdc_pdata->lcdc_power_save) lcdc_pdata->lcdc_power_save(0); pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ , "lcdc", PM_QOS_DEFAULT_VALUE); return ret; }
void mddi_disable(int lock) { mddi_host_type host_idx = MDDI_HOST_PRIM; if (mddi_power_locked) return; if (lock) mddi_power_locked = 1; if (mddi_host_timer.function) del_timer_sync(&mddi_host_timer); mddi_pad_ctrl = mddi_host_reg_in(PAD_CTL); mddi_host_reg_out(PAD_CTL, 0x0); if (clk_set_min_rate(mddi_clk, 0) < 0) printk(KERN_ERR "%s: clk_set_min_rate failed\n", __func__); clk_disable(mddi_clk); if (mddi_pclk) clk_disable(mddi_pclk); #ifdef CONFIG_SHLCDC_BOARD pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, "mddi", PM_QOS_DEFAULT_VALUE); #endif disable_irq(INT_MDDI_PRI); if (mddi_pdata && mddi_pdata->mddi_power_save) mddi_pdata->mddi_power_save(0); #ifdef CONFIG_SHLCDC_BOARD if (no_set_power_flag == FALSE) { shlcdc_api_set_power_mode(SHLCDC_DEV_TYPE_MDDI, SHLCDC_DEV_PWR_OFF); } #endif }
/* file operations */ static int kgsl_first_open_locked(void) { int result = 0; BUG_ON(kgsl_driver.grp_clk == NULL); BUG_ON(kgsl_driver.imem_clk == NULL); if (max_axi_freq) { max_axi_freq_set = 1; pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, DRIVER_NAME, max_axi_freq); } if (kgsl_driver.grp_pclk) clk_enable(kgsl_driver.grp_pclk); clk_enable(kgsl_driver.grp_clk); clk_enable(kgsl_driver.imem_clk); /* init memory apertures */ result = kgsl_sharedmem_init(&kgsl_driver.shmem); if (result != 0) goto done; /* init devices */ result = kgsl_yamato_init(&kgsl_driver.yamato_device, &kgsl_driver.yamato_config); if (result != 0) goto done; result = kgsl_yamato_start(&kgsl_driver.yamato_device, 0); if (result != 0) goto done; enable_irq(kgsl_driver.interrupt_num); done: return result; }
static int lcdc_off(struct platform_device *pdev) { int ret = 0; ret = panel_next_off(pdev); /* LGE_CHANGE, change clock disable sequece, dclk off -> vsync off */ clk_disable(pixel_lcdc_clk); msleep(16); clk_disable(pixel_mdp_clk); if (lcdc_pdata && lcdc_pdata->lcdc_power_save) lcdc_pdata->lcdc_power_save(0); if (lcdc_pdata && lcdc_pdata->lcdc_gpio_config) ret = lcdc_pdata->lcdc_gpio_config(0); pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ , "lcdc", PM_QOS_DEFAULT_VALUE); return ret; }
static int tovis_qvga_disp_off(struct platform_device *pdev) { if (!disp_initialized) tovis_qvga_disp_init(pdev); pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, "ebi2_lcd", PM_QOS_DEFAULT_VALUE); if (display_on) { // (b) -> (a) EBI2_WRITE16C(DISP_CMD_PORT, 0x28); // ScreenOff mdelay(20); EBI2_WRITE16C(DISP_CMD_PORT, 0x10); // AMP Off mdelay(120); // (a) -> (d) EBI2_WRITE16C(DISP_CMD_PORT, 0xb1); EBI2_WRITE16D(DISP_DATA_PORT,0x01); display_on = FALSE; } return 0; }
u32 res_trk_set_perf_level(u32 n_req_perf_lvl, u32 *pn_set_perf_lvl, struct vcd_clnt_ctxt_type_t *p_cctxt) { u32 axi_freq = 0, mfc_freq = 0, calc_mfc_freq = 0; int rc = -1; if (!pn_set_perf_lvl) { VCDRES_MSG_ERROR("%s(): pn_perf_lvl is NULL\n", __func__); return FALSE; } VCDRES_MSG_LOW("%s(), n_req_perf_lvl = %d", __func__, n_req_perf_lvl); if (p_cctxt) { calc_mfc_freq = res_trk_convert_perf_lvl_to_freq( (u64)n_req_perf_lvl); if (calc_mfc_freq < VCD_RESTRK_MIN_FREQ_POINT) calc_mfc_freq = VCD_RESTRK_MIN_FREQ_POINT; else if (calc_mfc_freq > VCD_RESTRK_MAX_FREQ_POINT) calc_mfc_freq = VCD_RESTRK_MAX_FREQ_POINT; if (!p_cctxt->b_decoding) { if (n_req_perf_lvl >= VGA_PERF_LEVEL) { mfc_freq = mfc_clk_freq_table[2]; axi_freq = axi_clk_freq_table_enc[1]; } else { mfc_freq = mfc_clk_freq_table[0]; axi_freq = axi_clk_freq_table_enc[0]; } VCDRES_MSG_HIGH("\n ENCODER: axi_freq = %u" ", mfc_freq = %u, calc_mfc_freq = %u," " n_req_perf_lvl = %u", axi_freq, mfc_freq, calc_mfc_freq, n_req_perf_lvl); } else { if (n_req_perf_lvl <= QVGA_PERF_LEVEL) { mfc_freq = mfc_clk_freq_table[0]; axi_freq = axi_clk_freq_table_dec[0]; } else { axi_freq = axi_clk_freq_table_dec[0]; if (n_req_perf_lvl <= VGA_PERF_LEVEL) mfc_freq = mfc_clk_freq_table[0]; else if (n_req_perf_lvl <= WVGA_PERF_LEVEL) mfc_freq = mfc_clk_freq_table[1]; else { mfc_freq = mfc_clk_freq_table[2]; axi_freq = axi_clk_freq_table_dec[1]; } } VCDRES_MSG_HIGH("\n DECODER: axi_freq = %u" ", mfc_freq = %u, calc_mfc_freq = %u," " n_req_perf_lvl = %u", axi_freq, mfc_freq, calc_mfc_freq, n_req_perf_lvl); } } else { VCDRES_MSG_HIGH("%s() WARNING:: p_cctxt is NULL", __func__); return TRUE; } #ifdef AXI_CLK_SCALING if (n_req_perf_lvl != VCD_RESTRK_MIN_PERF_LEVEL) { VCDRES_MSG_HIGH("\n %s(): Setting AXI freq to %u", __func__, axi_freq); rc = pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, MSM_AXI_QOS_NAME, axi_freq); if (rc < 0) { VCDRES_MSG_ERROR("\n Update AXI bus QOS fails," "rc = %d\n", rc); return FALSE; } } #endif #ifdef USE_RES_TRACKER if (n_req_perf_lvl != VCD_RESTRK_MIN_PERF_LEVEL) { VCDRES_MSG_HIGH("\n %s(): Setting MFC freq to %u", __func__, mfc_freq); if (!vid_c_sel_clk_rate(mfc_freq)) { VCDRES_MSG_ERROR("%s(): vid_c_sel_clk_rate FAILED\n", __func__); *pn_set_perf_lvl = 0; return FALSE; } } #endif *pn_set_perf_lvl = res_trk_convert_freq_to_perf_lvl((u64) mfc_freq); return TRUE; }
/*Suspend function*/ static int kgsl_suspend(struct platform_device *dev, pm_message_t state) { pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, DRIVER_NAME, PM_QOS_DEFAULT_VALUE); return 0; }
static int msm_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { DECLARE_COMPLETION_ONSTACK(complete); struct msm_i2c_dev *dev = i2c_get_adapdata(adap); int ret; int rem = num; uint16_t addr; long timeout; unsigned long flags; int check_busy = 1; mutex_lock(&dev->mlock); if (dev->suspended) { mutex_unlock(&dev->mlock); return -EIO; } /* Don't allow power collapse until we release remote spinlock */ pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, "msm_i2c", dev->pdata->pm_lat); msm_i2c_rmutex_lock(dev); if (adap == &dev->adap_pri) writel(0, dev->base + I2C_INTERFACE_SELECT); else writel(I2C_INTERFACE_SELECT_INTF_SELECT, dev->base + I2C_INTERFACE_SELECT); enable_irq(dev->irq); while (rem) { addr = msgs->addr << 1; if (msgs->flags & I2C_M_RD) addr |= 1; spin_lock_irqsave(&dev->lock, flags); dev->msg = msgs; dev->rem = rem; dev->pos = 0; dev->err = 0; dev->flush_cnt = 0; dev->cnt = msgs->len; dev->complete = &complete; spin_unlock_irqrestore(&dev->lock, flags); if (check_busy) { ret = msm_i2c_poll_notbusy(dev); if (ret) ret = msm_i2c_recover_bus_busy(dev, adap); if (ret) { dev_err(dev->dev, "Error waiting for notbusy\n"); goto out_err; } check_busy = 0; } if (rem == 1 && msgs->len == 0) addr |= I2C_WRITE_DATA_LAST_BYTE; /* Wait for WR buffer not full */ ret = msm_i2c_poll_writeready(dev); if (ret) { ret = msm_i2c_recover_bus_busy(dev, adap); if (ret) { dev_err(dev->dev, "Error waiting for write ready before addr\n"); goto out_err; } } /* special case for doing 1 byte read. * There should be no scheduling between I2C controller becoming * ready to read and writing LAST-BYTE to I2C controller * This will avoid potential of I2C controller starting to latch * another extra byte. */ if ((msgs->len == 1) && (msgs->flags & I2C_M_RD)) { uint32_t retries = 0; spin_lock_irqsave(&dev->lock, flags); writel(I2C_WRITE_DATA_ADDR_BYTE | addr, dev->base + I2C_WRITE_DATA); /* Poll for I2C controller going into RX_DATA mode to * ensure controller goes into receive mode. * Just checking write_buffer_full may not work since * there is delay between the write-buffer becoming * empty and the slave sending ACK to ensure I2C * controller goes in receive mode to receive data. */ while (retries != 2000) { uint32_t status = readl(dev->base + I2C_STATUS); if ((status & I2C_STATUS_RX_DATA_STATE) == I2C_STATUS_RX_DATA_STATE) break; retries++; } if (retries >= 2000) { dev->rd_acked = 0; spin_unlock_irqrestore(&dev->lock, flags); /* 1-byte-reads from slow devices in interrupt * context */ goto wait_for_int; } dev->rd_acked = 1; writel(I2C_WRITE_DATA_LAST_BYTE, dev->base + I2C_WRITE_DATA); spin_unlock_irqrestore(&dev->lock, flags); } else { writel(I2C_WRITE_DATA_ADDR_BYTE | addr, dev->base + I2C_WRITE_DATA); } /* Polling and waiting for write_buffer_empty is not necessary. * Even worse, if we do, it can result in invalid status and * error if interrupt(s) occur while polling. */ /* * Now that we've setup the xfer, the ISR will transfer the data * and wake us up with dev->err set if there was an error */ wait_for_int: timeout = wait_for_completion_timeout(&complete, HZ); if (!timeout) { dev_err(dev->dev, "Transaction timed out\n"); writel(I2C_WRITE_DATA_LAST_BYTE, dev->base + I2C_WRITE_DATA); msleep(100); /* FLUSH */ readl(dev->base + I2C_READ_DATA); readl(dev->base + I2C_STATUS); ret = -ETIMEDOUT; goto out_err; } if (dev->err) { dev_err(dev->dev, "Error during data xfer (%d)\n", dev->err); ret = dev->err; goto out_err; } if (msgs->flags & I2C_M_RD) check_busy = 1; msgs++; rem--; } ret = num; out_err: spin_lock_irqsave(&dev->lock, flags); dev->complete = NULL; dev->msg = NULL; dev->rem = 0; dev->pos = 0; dev->err = 0; dev->flush_cnt = 0; dev->cnt = 0; spin_unlock_irqrestore(&dev->lock, flags); disable_irq(dev->irq); msm_i2c_rmutex_unlock(dev); pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, "msm_i2c", PM_QOS_DEFAULT_VALUE); mutex_unlock(&dev->mlock); return ret; }