/***************************************************************************** Description : 1s frequency control release timer timeout, this function will be triggered. It will release the minimal CPU and DDR frequency lock. This function run in the frequency control work quenue. Prototype : void k3v2_release_freq_lock_work(struct work_struct *work) Input Param : Output Param : Return Value : ******************************************************************************/ static void k3v2_release_freq_lock_work(struct work_struct *work) { freq_lock_control_t *fl_control_ptr = container_of(work, freq_lock_control_t, release_lock_work); lfprintf("k3v2_release_freq_lock_work call enter.\n"); #ifdef DEBUG_WIFI_FREQ_LOCK pre_release_jiffies = jiffies; #endif if(NULL == fl_control_ptr){ printk("k3v2_release_freq_lock_work NULL point error!\n"); return; } mutex_lock(&fl_control_ptr->lock_freq_mtx); if(LR_SHOULD_DROP_MAGIC == fl_control_ptr->release_work_state ){ lfprintf("k3v2_release_freq_lock_work work has been drop, not do lock release, return here.\n"); fl_control_ptr->release_work_state = LR_DISABLE_MAGIC; return; } if(FREQ_LOCK_ENABLE == fl_control_ptr->lock_mod){ #ifdef CONFIG_CPU_FREQ_GOV_K3HOTPLUG /*lock freq timer timeout, do frequency lock release process here.*/ pm_qos_remove_request(&fl_control_ptr->cpu_qos_request); if(fl_control_ptr->lock_level >= START_DDR_FREQ_LOCK_LEVEL){ pm_qos_remove_request(&fl_control_ptr->ddr_qos_request); } #endif fl_control_ptr->release_work_state = LR_DISABLE_MAGIC; fl_control_ptr->lock_mod = FREQ_LOCK_DISABLE; } mutex_unlock(&fl_control_ptr->lock_freq_mtx); }
static int gpu_pm_qos_command(struct exynos_context *platform, gpu_pmqos_state state) { #ifdef CONFIG_BUS_DEVFREQ switch (state) { case GPU_CONTROL_PM_QOS_INIT: pm_qos_add_request(&exynos5_g3d_mif_qos, PM_QOS_BUS_THROUGHPUT, 0); pm_qos_add_request(&exynos5_g3d_int_qos, PM_QOS_DEVICE_THROUGHPUT, 0); pm_qos_add_request(&exynos5_g3d_cpu_qos, PM_QOS_KFC_FREQ_MIN, 0); break; case GPU_CONTROL_PM_QOS_DEINIT: pm_qos_remove_request(&exynos5_g3d_mif_qos); pm_qos_remove_request(&exynos5_g3d_int_qos); pm_qos_remove_request(&exynos5_g3d_cpu_qos); break; case GPU_CONTROL_PM_QOS_SET: if (platform->step < 0) return -1; pm_qos_update_request(&exynos5_g3d_mif_qos, platform->table[platform->step].mem_freq); pm_qos_update_request(&exynos5_g3d_int_qos, platform->table[platform->step].int_freq); pm_qos_update_request(&exynos5_g3d_cpu_qos, platform->table[platform->step].cpu_freq); break; case GPU_CONTROL_PM_QOS_RESET: pm_qos_update_request(&exynos5_g3d_mif_qos, 0); pm_qos_update_request(&exynos5_g3d_int_qos, 0); pm_qos_update_request(&exynos5_g3d_cpu_qos, 0); default: break; } #endif /* CONFIG_BUS_DEVFREQ */ return 0; }
static int fimg2d_remove(struct platform_device *pdev) { #ifdef CONFIG_ARM_EXYNOS_IKS_CPUFREQ pm_qos_remove_request(&exynos5_g2d_cpu_qos); pm_qos_remove_request(&exynos5_g2d_mif_qos); #endif misc_deregister(&fimg2d_dev); #ifdef CONFIG_PM_RUNTIME pm_runtime_disable(&pdev->dev); #else fimg2d_clk_off(ctrl); #endif fimg2d_clk_release(ctrl); free_irq(ctrl->irq, NULL); if (ctrl->mem) { iounmap(ctrl->regs); release_resource(ctrl->mem); kfree(ctrl->mem); } #ifdef BLIT_WORKQUE destroy_workqueue(ctrl->work_q); #endif mutex_destroy(&ctrl->drvlock); kfree(ctrl); return 0; }
static int __init tegra_auto_hotplug_debug_init(void) { if (!tegra3_cpu_lock) return -ENOENT; hp_debugfs_root = debugfs_create_dir("tegra_hotplug", NULL); if (!hp_debugfs_root) return -ENOMEM; pm_qos_add_request(&min_cpu_req, PM_QOS_MIN_ONLINE_CPUS, PM_QOS_DEFAULT_VALUE); pm_qos_add_request(&max_cpu_req, PM_QOS_MAX_ONLINE_CPUS, PM_QOS_DEFAULT_VALUE); if (!debugfs_create_file( "min_cpus", S_IRUGO, hp_debugfs_root, NULL, &min_cpus_fops)) goto err_out; if (!debugfs_create_file( "max_cpus", S_IRUGO, hp_debugfs_root, NULL, &max_cpus_fops)) goto err_out; if (!debugfs_create_file( "stats", S_IRUGO, hp_debugfs_root, NULL, &hp_stats_fops)) goto err_out; return 0; err_out: debugfs_remove_recursive(hp_debugfs_root); pm_qos_remove_request(&min_cpu_req); pm_qos_remove_request(&max_cpu_req); return -ENOMEM; }
void fimg2d_pm_qos_remove(struct fimg2d_control *ctrl) { #if defined(CONFIG_ARM_EXYNOS_IKS_CPUFREQ) || \ defined(CONFIG_ARM_EXYNOS_MP_CPUFREQ) || \ defined(CONFIG_FIMG2D_USE_BUS_DEVFREQ) struct fimg2d_platdata *pdata; #ifdef CONFIG_OF pdata = ctrl->pdata; #else pdata = to_fimg2d_plat(ctrl->dev); #endif #endif #if defined(CONFIG_ARM_EXYNOS_IKS_CPUFREQ) || \ defined(CONFIG_ARM_EXYNOS_MP_CPUFREQ) if (pdata->cpu_min) pm_qos_remove_request(&ctrl->exynos5_g2d_cpu_qos); if (pdata->kfc_min) pm_qos_remove_request(&ctrl->exynos5_g2d_kfc_qos); #endif #ifdef CONFIG_FIMG2D_USE_BUS_DEVFREQ if (pdata->mif_min) pm_qos_remove_request(&ctrl->exynos5_g2d_mif_qos); if (pdata->int_min) pm_qos_remove_request(&ctrl->exynos5_g2d_int_qos); #endif }
int proactive_pm_qos_command(struct exynos_context *platform, gpu_pmqos_state state) { DVFS_ASSERT(platform); if (!platform->devfreq_status) return 0; switch (state) { case GPU_CONTROL_PM_QOS_INIT: pm_qos_add_request(&proactive_mif_min_qos, PM_QOS_BUS_THROUGHPUT, 0); pm_qos_add_request(&proactive_apollo_min_qos, PM_QOS_CLUSTER0_FREQ_MIN, 0); pm_qos_add_request(&proactive_atlas_min_qos, PM_QOS_CLUSTER1_FREQ_MIN, 0); if (!platform->pmqos_int_disable) pm_qos_add_request(&proactive_int_min_qos, PM_QOS_DEVICE_THROUGHPUT, 0); break; case GPU_CONTROL_PM_QOS_DEINIT: pm_qos_remove_request(&proactive_mif_min_qos); pm_qos_remove_request(&proactive_apollo_min_qos); pm_qos_remove_request(&proactive_atlas_min_qos); if (!platform->pmqos_int_disable) pm_qos_remove_request(&proactive_int_min_qos); break; case GPU_CONTROL_PM_QOS_RESET: pm_qos_update_request(&proactive_mif_min_qos, 0); pm_qos_update_request(&proactive_apollo_min_qos, 0); pm_qos_update_request(&proactive_atlas_min_qos, 0); default: break; } return 0; }
void tegra_auto_hotplug_exit(void) { destroy_workqueue(hotplug_wq); #ifdef CONFIG_DEBUG_FS debugfs_remove_recursive(hp_debugfs_root); pm_qos_remove_request(&min_cpu_req); pm_qos_remove_request(&max_cpu_req); #endif }
static int gpu_pm_qos_command(struct exynos_context *platform, gpu_pmqos_state state) { #ifdef CONFIG_BUS_DEVFREQ switch (state) { case GPU_CONTROL_PM_QOS_INIT: pm_qos_add_request(&exynos5_g3d_mif_qos, PM_QOS_BUS_THROUGHPUT, 0); pm_qos_add_request(&exynos5_g3d_int_qos, PM_QOS_DEVICE_THROUGHPUT, 0); pm_qos_add_request(&exynos5_g3d_cpu_kfc_min_qos, PM_QOS_KFC_FREQ_MIN, 0); pm_qos_add_request(&exynos5_g3d_cpu_egl_max_qos, PM_QOS_CPU_FREQ_MAX, PM_QOS_CPU_FREQ_MAX_DEFAULT_VALUE); #if SLSI_INTEGRATION #if defined(SET_MINLOCK) pm_qos_add_request(&exynos5_g3d_cpu_egl_min_qos, PM_QOS_CPU_FREQ_MIN, 0); platform->custom_cpu_max_lock = 0; #endif #endif break; case GPU_CONTROL_PM_QOS_DEINIT: pm_qos_remove_request(&exynos5_g3d_mif_qos); pm_qos_remove_request(&exynos5_g3d_int_qos); pm_qos_remove_request(&exynos5_g3d_cpu_kfc_min_qos); pm_qos_remove_request(&exynos5_g3d_cpu_egl_max_qos); #if SLSI_INTEGRATION #if defined(SET_MINLOCK) pm_qos_remove_request(&exynos5_g3d_cpu_egl_min_qos); #endif #endif break; case GPU_CONTROL_PM_QOS_SET: if (platform->step < 0) return -1; pm_qos_update_request(&exynos5_g3d_mif_qos, platform->table[platform->step].mem_freq); pm_qos_update_request(&exynos5_g3d_int_qos, platform->table[platform->step].int_freq); pm_qos_update_request(&exynos5_g3d_cpu_kfc_min_qos, platform->table[platform->step].cpu_freq); #if SLSI_INTEGRATION #if defined(SET_MINLOCK) if (platform->custom_cpu_max_lock) pm_qos_update_request(&exynos5_g3d_cpu_egl_max_qos, platform->custom_cpu_max_lock); else #endif #endif pm_qos_update_request(&exynos5_g3d_cpu_egl_max_qos, platform->table[platform->step].cpu_max_freq); break; case GPU_CONTROL_PM_QOS_RESET: pm_qos_update_request(&exynos5_g3d_mif_qos, 0); pm_qos_update_request(&exynos5_g3d_int_qos, 0); pm_qos_update_request(&exynos5_g3d_cpu_kfc_min_qos, 0); pm_qos_update_request(&exynos5_g3d_cpu_egl_max_qos, PM_QOS_CPU_FREQ_MAX_DEFAULT_VALUE); default: break; } #endif /* CONFIG_BUS_DEVFREQ */ return 0; }
/** * Stops the Sleep-Mode Protocol on the Host. */ static void bluetooth_pm_sleep_stop(void) { unsigned long irq_flags; spin_lock_irqsave(&rw_lock, irq_flags); if (!test_bit(BT_PROTO, &flags)) { spin_unlock_irqrestore(&rw_lock, irq_flags); return; } printk("%s\n", __func__); del_timer(&tx_timer); //BT_S : [CONBT-1475] LGC_BT_COMMON_IMP_KERNEL_UART_HCI_COMMAND_TIMEOUT uart_off_jiffies = 0; uart_on_jiffies = 0; del_timer(&uart_control_timer); //BT_E : [CONBT-1475] LGC_BT_COMMON_IMP_KERNEL_UART_HCI_COMMAND_TIMEOUT clear_bit(BT_PROTO, &flags); if (test_bit(BT_ASLEEP, &flags)) { clear_bit(BT_ASLEEP, &flags); } #ifdef UART_CONTROL_MSM else{ /*Deactivate UART*/ //hsuart_power(0); //block uart power off } #endif/*UART_CONTROL_MSM*/ atomic_inc(&open_count); spin_unlock_irqrestore(&rw_lock, irq_flags); #ifdef QOS_REQUEST_MSM if(bsi->dma_qos_request == REQUESTED) { pm_qos_remove_request(&bsi->dma_qos); bsi->dma_qos_request = NOT_REQUESTED; } #endif /* QOS_REQUEST_MSM */ #ifdef QOS_REQUEST_TEGRA if (bsi->resume_min_frequency) pm_qos_remove_request(&bsi->resume_cpu_freq_req); #endif/*QOS_REQUEST_TEGRA*/ if (disable_irq_wake(bsi->host_wake_irq)) printk("%s, Couldn't disable hostwake IRQ wakeup mode\n", __func__); free_irq(bsi->host_wake_irq, NULL); wake_lock_timeout(&bsi->wake_lock, HZ / 2); }
static void __exit cfboost_exit(void) { /* stop input events */ input_unregister_handler(&cfb_input_handler); /* cancel pending work requests */ cancel_work_sync(&boost); cancel_delayed_work_sync(&unboost); /* clean up */ destroy_workqueue(cfb_wq); pm_qos_remove_request(&freq_req); pm_qos_remove_request(&core_req); }
void kbase_platform_dvfs_term(void) { if (mali_dvfs_wq) destroy_workqueue(mali_dvfs_wq); #if defined(CONFIG_ARM_EXYNOS5420_BUS_DEVFREQ) pm_qos_remove_request(&exynos5_g3d_mif_qos); pm_qos_remove_request(&exynos5_g3d_int_qos); pm_qos_remove_request(&exynos5_g3d_cpu_qos); #endif mali_dvfs_wq = NULL; }
/* gpu power clock deinit */ int sec_gpu_pwr_clk_deinit(void) { int ret = -1; gpu_clks_put(); ret = gpu_regulator_disable(); if (ret) PVR_DPF((PVR_DBG_ERROR, "gpu_regulator_disable error[%d]", ret)); #if defined(CONFIG_ARM_EXYNOS5410_BUS_DEVFREQ) pm_qos_remove_request(&exynos5_g3d_cpu_qos); pm_qos_remove_request(&exynos5_g3d_int_qos); pm_qos_remove_request(&exynos5_g3d_mif_qos); #endif return ret; }
void fimg2d_pm_qos_remove(struct fimg2d_control *ctrl) { struct fimg2d_platdata *pdata = to_fimg2d_plat(ctrl->dev); #ifdef CONFIG_ARM_EXYNOS_IKS_CPUFREQ if (pdata->cpu_min) pm_qos_remove_request(&ctrl->exynos5_g2d_cpu_qos); #endif #ifdef CONFIG_FIMG2D_USE_BUS_DEVFREQ if (pdata->mif_min) pm_qos_remove_request(&ctrl->exynos5_g2d_mif_qos); if (pdata->int_min) pm_qos_remove_request(&ctrl->exynos5_g2d_int_qos); #endif }
int mv_otg_remove(struct platform_device *pdev) { struct mv_otg *mvotg = platform_get_drvdata(pdev); device_init_wakeup(&pdev->dev, 0); sysfs_remove_group(&mvotg->pdev->dev.kobj, &inputs_attr_group); if (mvotg->qwork) { flush_workqueue(mvotg->qwork); destroy_workqueue(mvotg->qwork); } if (mvotg->pdata->extern_attr & (MV_USB_HAS_VBUS_DETECTION | MV_USB_HAS_IDPIN_DETECTION)) pxa_usb_unregister_notifier(mvotg->pdata->id, &mvotg->notifier); mv_otg_disable(mvotg); clk_unprepare(mvotg->clk); pm_qos_remove_request(&mvotg->qos_idle); usb_remove_phy(&mvotg->phy); return 0; }
void ath9k_deinit_device(struct ath_softc *sc) { struct ieee80211_hw *hw = sc->hw; int i = 0; ath9k_ps_wakeup(sc); wiphy_rfkill_stop_polling(sc->hw->wiphy); ath_deinit_leds(sc); for (i = 0; i < sc->num_sec_wiphy; i++) { struct ath_wiphy *aphy = sc->sec_wiphy[i]; if (aphy == NULL) continue; sc->sec_wiphy[i] = NULL; ieee80211_unregister_hw(aphy->hw); ieee80211_free_hw(aphy->hw); } ieee80211_unregister_hw(hw); pm_qos_remove_request(&ath9k_pm_qos_req); ath_rx_cleanup(sc); ath_tx_cleanup(sc); ath9k_deinit_softc(sc); kfree(sc->sec_wiphy); }
static void debugfs_remove_request(int users) { struct debugfs_pm_qos_user *user = NULL; int i; if (users > pm_qos_users) { pr_info("[DDR DEVFREQ DEBUGFS] no such user\n"); return; } users--; user = pm_qos_user[users]; pm_qos_remove_request(&user->req); kfree(user); pm_qos_user[users] = NULL; for (i = users + 1; i < PM_QOS_USERS; i++) { pm_qos_user[i - 1] = pm_qos_user[i]; pm_qos_user[i] = NULL; } pm_qos_users--; return; }
static int __devexit sdhci_pxav3_remove(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_pxa *pxa = pltfm_host->priv; struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; sdhci_remove_host(host, 1); if (pdata && pdata->flags & PXA_FLAG_EN_PM_RUNTIME) pm_runtime_disable(&pdev->dev); if (pdata) pm_qos_remove_request(&pdata->qos_idle); clk_disable_unprepare(pltfm_host->clk); clk_put(pltfm_host->clk); if (pdata && pdata->cd_type == PXA_SDHCI_CD_GPIO && gpio_is_valid(pdata->ext_cd_gpio)) mmc_gpio_free_cd(host->mmc); sdhci_pltfm_free(pdev); kfree(pxa); platform_set_drvdata(pdev, NULL); return 0; }
static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE; int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK; mutex_lock(&mcpdm->mutex); if (!dai->active) { if (omap_mcpdm_active(mcpdm)) { omap_mcpdm_stop(mcpdm); omap_mcpdm_close_streams(mcpdm); mcpdm->config[0].link_mask = 0; mcpdm->config[1].link_mask = 0; } } if (mcpdm->latency[stream2]) pm_qos_update_request(&mcpdm->pm_qos_req, mcpdm->latency[stream2]); else if (mcpdm->latency[stream1]) pm_qos_remove_request(&mcpdm->pm_qos_req); mcpdm->latency[stream1] = 0; mutex_unlock(&mcpdm->mutex); }
static void usb_load(struct work_struct *work) { int cpu; unsigned int num_irqs = 0; static unsigned int old_num_irqs = UINT_MAX; for_each_online_cpu(cpu) num_irqs += kstat_irqs_cpu(IRQ_DB8500_USBOTG, cpu); if ((num_irqs > old_num_irqs) && (num_irqs - old_num_irqs) > USB_LIMIT) { prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP, "usb", 125); if (!usb_pm_qos_is_latency_0) { usb_pm_qos_latency = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, 0); usb_pm_qos_is_latency_0 = true; } } else { prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP, "usb", 25); if (usb_pm_qos_is_latency_0) { pm_qos_remove_request(usb_pm_qos_latency); usb_pm_qos_is_latency_0 = false; } } old_num_irqs = num_irqs; schedule_delayed_work_on(0, &work_usb_workaround, msecs_to_jiffies(USB_PROBE_DELAY)); }
static int msm_mctl_release(struct msm_cam_media_controller *p_mctl) { int rc = 0; struct msm_sync *sync = &(p_mctl->sync); v4l2_subdev_call(p_mctl->ispif_sdev, core, ioctl, VIDIOC_MSM_ISPIF_RELEASE, NULL); if (p_mctl->isp_sdev && p_mctl->isp_sdev->isp_release) p_mctl->isp_sdev->isp_release(&p_mctl->sync); v4l2_subdev_call(p_mctl->csid_sdev, core, ioctl, VIDIOC_MSM_CSID_RELEASE, NULL); v4l2_subdev_call(p_mctl->csiphy_sdev, core, ioctl, VIDIOC_MSM_CSIPHY_RELEASE, NULL); if (p_mctl->sync.actctrl.a_power_down) p_mctl->sync.actctrl.a_power_down(sync->sdata->actuator_info); if (p_mctl->sync.sctrl.s_release) p_mctl->sync.sctrl.s_release(); rc = msm_camio_sensor_clk_off(sync->pdev); if (rc < 0) pr_err("%s: msm_camio_sensor_clk_off failed:%d\n", __func__, rc); pm_qos_update_request(&p_mctl->pm_qos_req_list, PM_QOS_DEFAULT_VALUE); pm_qos_remove_request(&p_mctl->pm_qos_req_list); wake_unlock(&p_mctl->sync.wake_lock); return rc; }
static int broadcast_tdmb_fc8080_remove(struct spi_device *spi) { printk("broadcast_tdmb_fc8080_remove \n"); #ifdef FEATURE_DMB_USE_WORKQUEUE if (fc8080_ctrl_info.spi_wq) { flush_workqueue(fc8080_ctrl_info.spi_wq); destroy_workqueue(fc8080_ctrl_info.spi_wq); } #endif #ifdef FEATURE_DMB_USE_BUS_SCALE msm_bus_scale_unregister_client(fc8080_ctrl_info.bus_scale_client_id); #endif free_irq(spi->irq, &fc8080_ctrl_info); mutex_destroy(&fc8080_ctrl_info.mutex); wake_lock_destroy(&fc8080_ctrl_info.wake_lock); #ifdef FEATURE_DMB_USE_PM_QOS pm_qos_remove_request(&fc8080_ctrl_info.pm_req_list); #endif memset((unsigned char*)&fc8080_ctrl_info, 0x0, sizeof(struct tdmb_fc8080_ctrl_blk)); return 0; }
static int exynos7_devfreq_disp_remove(struct platform_device *pdev) { struct devfreq_data_disp *data = platform_get_drvdata(pdev); devfreq_remove_device(data->devfreq); pm_qos_remove_request(&min_disp_thermal_qos); pm_qos_remove_request(&exynos7_disp_qos); pm_qos_remove_request(&boot_disp_qos); regulator_put(data->vdd_disp_cam0); kfree(data); platform_set_drvdata(pdev, NULL); return 0; }
static void gpio_key_set_dvfs_off(struct work_struct *work) { struct gpio_button_data *bdata = container_of(work, struct gpio_button_data, key_work_dvfs_off.work); mutex_lock(&bdata->key_dvfs_lock); pm_qos_remove_request(&bdata->key_cpu_qos); pm_qos_remove_request(&bdata->key_mif_qos); pm_qos_remove_request(&bdata->key_int_qos); bdata->key_dvfs_lock_status = false; mutex_unlock(&bdata->key_dvfs_lock); printk(KERN_DEBUG "[key] DVFS Off\n"); }
static void __exit secmem_exit(void) { #if defined(CONFIG_ARM_EXYNOS5410_BUS_DEVFREQ) pm_qos_remove_request(&exynos5_secmem_mif_qos); #endif misc_deregister(&secmem); }
void release_axi_qos(void) { /* if (!&pm_qos_req) { CDBG("add_axi_qos() has not been called\n"); } */ pm_qos_remove_request(&pm_qos_req); }
void bimc_boot_completed(struct work_struct *work) { pr_info("%s\n", __FUNCTION__); if (pm_qos_request_active(&bimc_min_qos)) pm_qos_remove_request(&bimc_min_qos); msm_set_pmqos_bimc_min_freq(); }
static void __exit secmem_exit(void) { #if defined(CONFIG_ARM_EXYNOS5410_BUS_DEVFREQ) pm_qos_remove_request(&exynos5_secmem_mif_qos); #endif __pm_runtime_disable(secmem.this_device, false); misc_deregister(&secmem); }
static void wlan_load(struct work_struct *work) { int cpu; unsigned int num_irqs = 0; static unsigned int old_num_irqs = UINT_MAX; for_each_online_cpu(cpu) num_irqs += kstat_irqs_cpu(IRQ_DB8500_SDMMC1, cpu); if ((num_irqs > old_num_irqs) && (num_irqs - old_num_irqs) > wlan_limit) { if (wlan_arm_khz) prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ, "wlan", wlan_arm_khz); prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "wlan", PRCMU_QOS_MAX_VALUE); prcmu_qos_update_requirement(PRCMU_QOS_DDR_OPP, "wlan", PRCMU_QOS_MAX_VALUE); if (!wlan_pm_qos_is_latency_0) { /* * The wake up latency is set to 0 to prevent * the system from going to sleep. This improves * the wlan throughput in DMA mode. * The wake up latency from sleep adds ~5% overhead * for TX in some cases. * This change doesn't increase performance for wlan * PIO since the CPU usage prevents sleep in this mode. */ pm_qos_add_request(&wlan_pm_qos_latency, PM_QOS_CPU_DMA_LATENCY, 0); wlan_pm_qos_is_latency_0 = true; } } else { prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ, "wlan", PRCMU_QOS_DEFAULT_VALUE); prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "wlan", PRCMU_QOS_DEFAULT_VALUE); prcmu_qos_update_requirement(PRCMU_QOS_DDR_OPP, "wlan", PRCMU_QOS_DEFAULT_VALUE); if (wlan_pm_qos_is_latency_0) { pm_qos_remove_request(&wlan_pm_qos_latency); wlan_pm_qos_is_latency_0 = false; } } old_num_irqs = num_irqs; schedule_delayed_work_on(0, &work_wlan_workaround, msecs_to_jiffies(wlan_probe_delay)); }
/***************************************************************************** Description : Do workqueue release , mamery freee etc while the WiFi driver closing. Prototype : int v9r1_freq_ctrl_destroy(void) Input Param : Output Param : Return Value : ******************************************************************************/ int v9r1_freq_ctrl_destroy(void) { #ifdef CONFIG_CPU_FREQ_GOV_K3HOTPLUG lfprintf("v9r1_freq_ctrl_destroy enter\n"); if (NULL == freq_lock_control_ptr) { lfprintf("freq_lock_control_ptr NULL pointer!\n"); return 0; } mutex_lock(&freq_lock_control_ptr->lock_freq_mtx); del_timer_sync(&freq_lock_control_ptr->lock_freq_timer_list); /*If the Wi-Fi is closing, but the frequency lock release timer is not timeout, * must release the lock directly here!*/ if(FREQ_LOCK_ENABLE == freq_lock_control_ptr->lock_mod){ pm_qos_remove_request(&freq_lock_control_ptr->cpu_qos_request); if(freq_lock_control_ptr->lock_level >= START_DDR_FREQ_LOCK_LEVEL){ pm_qos_remove_request(&freq_lock_control_ptr->ddr_qos_request); } freq_lock_control_ptr->release_work_state = LR_DISABLE_MAGIC; freq_lock_control_ptr->lock_mod = FREQ_LOCK_DISABLE; lfprintf("v9r1_freq_ctrl_destroy freq lock release here!\n"); } #ifdef DEBUG_WIFI_FREQ_LOCK else{ lfprintf("v9r1_freq_ctrl_destroy freq lock has already been released!\n"); } #endif cancel_work_sync(&freq_lock_control_ptr->release_lock_work); cancel_work_sync(&freq_lock_control_ptr->do_freq_lock_work); destroy_workqueue(freq_lock_control_ptr->freq_ctl_wq); mutex_unlock(&freq_lock_control_ptr->lock_freq_mtx); mutex_destroy(&freq_lock_control_ptr->lock_freq_mtx); kfree(freq_lock_control_ptr); freq_lock_control_ptr = NULL; printk("wl monitor exit ok!\n"); #endif /*end of #ifdef CONFIG_CPU_FREQ_GOV_K3HOTPLUG*/ return 0; }
static int exynos5_devfreq_int_remove(struct platform_device *pdev) { struct devfreq_data_int *data = platform_get_drvdata(pdev); devfreq_remove_device(data->devfreq); pm_qos_remove_request(&min_int_thermal_qos); pm_qos_remove_request(&exynos5_int_qos); pm_qos_remove_request(&boot_int_qos); pm_qos_remove_request(&exynos5_int_bts_qos); regulator_put(data->vdd_int); kfree(data); platform_set_drvdata(pdev, NULL); return 0; }