void cpufreq_stats_reset(void) { unsigned int cpu; cpufreq_unregister_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER); cpufreq_unregister_notifier(¬ifier_trans_block, CPUFREQ_TRANSITION_NOTIFIER); unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); for_each_online_cpu(cpu) { cpufreq_stats_free_sysfs(cpu); cpufreq_stats_free_table(cpu); } cpufreq_register_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER); cpufreq_register_notifier(¬ifier_trans_block, CPUFREQ_TRANSITION_NOTIFIER); register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); for_each_online_cpu(cpu) { cpufreq_update_policy(cpu); } return; }
static int __init cpufreq_stats_init(void) { int ret; unsigned int cpu; spin_lock_init(&cpufreq_stats_lock); ret = cpufreq_register_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER); if (ret) return ret; ret = cpufreq_register_notifier(¬ifier_trans_block, CPUFREQ_TRANSITION_NOTIFIER); if (ret) { cpufreq_unregister_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER); return ret; } register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); for_each_online_cpu(cpu) { cpufreq_update_policy(cpu); } create_all_freq_table(); ret = sysfs_create_file(cpufreq_global_kobject, &_attr_all_time_in_state.attr); if (ret) pr_warn("Error creating sysfs file for cpufreq stats\n"); return 0; }
static int __init cpufreq_stats_init(void) { int ret; unsigned int cpu; spin_lock_init(&cpufreq_stats_lock); ret = cpufreq_register_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER); if (ret) return ret; ret = cpufreq_register_notifier(¬ifier_trans_block, CPUFREQ_TRANSITION_NOTIFIER); if (ret) { cpufreq_unregister_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER); return ret; } register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); for_each_online_cpu(cpu) { cpufreq_update_policy(cpu); } ret = sysfs_create_group(cpufreq_global_kobject, &overall_stats_attr_group); return 0; }
static int __init cpufreq_stats_init(void) { int ret; unsigned int cpu; spin_lock_init(&cpufreq_stats_lock); ret = cpufreq_register_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER); if (ret) return ret; for_each_online_cpu(cpu) cpufreq_stats_create_table(cpu); ret = cpufreq_register_notifier(¬ifier_trans_block, CPUFREQ_TRANSITION_NOTIFIER); if (ret) { cpufreq_unregister_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER); for_each_online_cpu(cpu) cpufreq_stats_free_table(cpu); return ret; } create_all_freq_table(); ret = cpufreq_sysfs_create_file(&_attr_all_time_in_state.attr); if (ret) pr_warn("Cannot create sysfs file for cpufreq stats\n"); ret = cpufreq_sysfs_create_file(&_attr_current_in_state.attr); if (ret) pr_warn("Cannot create sysfs file for cpufreq current stats\n"); return 0; }
static int __init cpufreq_stats_init(void) { int ret; unsigned int cpu; spin_lock_init(&cpufreq_stats_lock); if ((ret = cpufreq_register_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER))) return ret; if ((ret = cpufreq_register_notifier(¬ifier_trans_block, CPUFREQ_TRANSITION_NOTIFIER))) { cpufreq_unregister_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER); return ret; } register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); for_each_online_cpu(cpu) { cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_ONLINE, (void *)(long)cpu); } return 0; }
static int ltc3577_attach( struct i2c_adapter *adap ) { struct ltc3577_i2c_driver *driver = <c3577_driver; /* YUCK! However no other way to get it. */ int rc; /* Now initialize the semaphore. */ init_MUTEX( <c3577_register_cache_lock ); /* Now initialize and register the client. */ i2c_set_clientdata( &driver->client, driver ); driver->client.adapter=adap; rc=i2c_attach_client( &driver->client ); if( rc ) { printk( KERN_ERR "LTC3577: Error registring i2C driver to device core. Aborting.\n" ); return -ENODEV; } down( <c3577_register_cache_lock ); /* Set PWM mode */ ltc3577_set_swmode( ltc3577_write, LTC3577_PWM ); if( rc < 0 ) { printk( KERN_ERR "LTC3577: Error writing initial value to PMIC. Aborting.\n" ); i2c_detach_client( &driver->client ); up( <c3577_register_cache_lock ); return -ENODEV; } /* Register the usbmode statechange listener. This will inform us when to change the */ /* PMIC's settings. */ if( IO_HaveUsbBusPowered() ) { rc=add_usb_state_change_listener( ltc3577_state_change_listener, driver ); if( rc != 0 ) { printk( KERN_ERR "LTC3577: Can't register USBMODE change state listener. Aborting.\n" ); i2c_detach_client( &driver->client ); up( <c3577_register_cache_lock ); return -ENODEV; } } atomic_set(&driver->suspended, 0); ltc3577_i2c_commit( &driver->client, ltc3577_write, NULL, 0); up( <c3577_register_cache_lock ); #if defined CONFIG_CPU_FREQ && (defined CONFIG_S3C24XX_DVS_CPUFREQ || defined S3C24XX_DFS_CPUFREQ) /* Register the CPUFREQ entries. These will set the right voltage when the frequency */ /* changes. */ cpufreq_register_notifier( &driver->freq_transition, CPUFREQ_TRANSITION_NOTIFIER ); cpufreq_register_notifier( &driver->freq_policy, CPUFREQ_POLICY_NOTIFIER ); #endif /* Done initializing. */ return 0; }
static int __init cpufreq_stats_init(void) { int ret; unsigned int cpu; spin_lock_init(&cpufreq_stats_lock); ret = cpufreq_register_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER); if (ret) return ret; register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); for_each_online_cpu(cpu) cpufreq_update_policy(cpu); ret = cpufreq_register_notifier(¬ifier_trans_block, CPUFREQ_TRANSITION_NOTIFIER); if (ret) { cpufreq_unregister_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER); unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); for_each_online_cpu(cpu) cpufreq_stats_free_table(cpu); return ret; } return 0; }
static inline int pwm_cpufreq_notifier_register(struct pwm_device *p) { p->freq_transition.notifier_call = pwm_freq_transition_notifier_cb; return cpufreq_register_notifier(&p->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); }
static inline int mfc_cpufreq_register(void) { int ret; unsigned long rate; bus_clk = clk_get(pm->device, MFC0_BUS_CLK_NAME); if (IS_ERR(bus_clk)) { printk(KERN_ERR "failed to get bus clock\n"); ret = -ENOENT; goto err_bus_clk; } prev_bus_rate = clk_get_rate(bus_clk); rate = clk_get_rate(pm->clock); if (rate != prev_bus_rate) clk_set_rate(pm->op_clk, prev_bus_rate); pm->freq_transition.notifier_call = mfc_cpufreq_transition; return cpufreq_register_notifier(&pm->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); err_bus_clk: return ret; }
static int cpu_boost_init(void) { int cpu, ret; struct cpu_sync *s; cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER); cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0); if (!cpu_boost_wq) return -EFAULT; INIT_WORK(&input_boost_work, do_input_boost); for_each_possible_cpu(cpu) { s = &per_cpu(sync_info, cpu); s->cpu = cpu; init_waitqueue_head(&s->sync_wq); spin_lock_init(&s->lock); INIT_DELAYED_WORK(&s->boost_rem, do_boost_rem); INIT_DELAYED_WORK(&s->input_boost_rem, do_input_boost_rem); s->thread = kthread_run(boost_mig_sync_thread, (void *)cpu, "boost_sync/%d", cpu); #if defined(CONFIG_ARCH_MSM8974) || defined(CONFIG_ARCH_MSM8974PRO) kthread_bind(s->thread, cpu); #endif } atomic_notifier_chain_register(&migration_notifier_head, &boost_migration_nb); ret = input_register_handler(&cpuboost_input_handler); return 0; }
static int cpu_boost_init(void) { int cpu; struct cpu_sync *s; cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER); boost_rem_wq = alloc_workqueue("cpuboost_rem_wq", WQ_HIGHPRI, 0); if (!boost_rem_wq) return -EFAULT; for_each_possible_cpu(cpu) { s = &per_cpu(sync_info, cpu); s->cpu = cpu; init_waitqueue_head(&s->sync_wq); spin_lock_init(&s->lock); INIT_DELAYED_WORK(&s->boost_rem, do_boost_rem); s->thread = kthread_run(boost_mig_sync_thread, (void *)cpu, "boost_sync/%d", cpu); } atomic_notifier_chain_register(&migration_notifier_head, &boost_migration_nb); return 0; }
int __devinit msm_thermal_init(struct msm_thermal_data *pdata) { int ret = 0; BUG_ON(!pdata); tsens_get_max_sensor_num(&max_tsens_num); memcpy(&msm_thermal_info, pdata, sizeof(struct msm_thermal_data)); if (create_sensor_id_map()) return -EINVAL; if (check_sensor_id(msm_thermal_info.sensor_id)) return -EINVAL; enabled = 1; ret = cpufreq_register_notifier(&msm_thermal_cpufreq_notifier, CPUFREQ_POLICY_NOTIFIER); if (ret) pr_err("%s: cannot register cpufreq notifier\n", KBUILD_MODNAME); INIT_DELAYED_WORK(&check_temp_work, check_temp); schedule_delayed_work(&check_temp_work, 0); if (num_possible_cpus() > 1) register_cpu_notifier(&msm_thermal_cpu_notifier); return ret; }
static int __init msm_thermal_init(void) { int ret = 0; enabled = 1; mutex_init(&policy_mutex); INIT_DELAYED_WORK(&check_temp_work, check_temp); ret = cpufreq_register_notifier(&msm_thermal_notifier_block, CPUFREQ_POLICY_NOTIFIER); if (ret) { pr_err("%s: cpufreq_register_notifier FAIL: %d\n", __func__, ret); goto fail; } ret = register_hotcpu_notifier(&msm_thermal_hotcpu_notify); if (ret) { pr_err("%s: register_hotcpu_notifier FAIL: %d\n", __func__, ret); goto fail; } schedule_delayed_work(&check_temp_work, 0); fail: return ret; }
static int osiris_dvs_probe(struct platform_device *pdev) { int ret; dev_info(&pdev->dev, "initialising\n"); ret = gpio_request(OSIRIS_GPIO_DVS, "osiris-dvs"); if (ret) { dev_err(&pdev->dev, "cannot claim gpio\n"); goto err_nogpio; } /* start with dvs disabled */ gpio_direction_output(OSIRIS_GPIO_DVS, 1); ret = cpufreq_register_notifier(&osiris_dvs_nb, CPUFREQ_TRANSITION_NOTIFIER); if (ret) { dev_err(&pdev->dev, "failed to register with cpufreq\n"); goto err_nofreq; } osiris_dvs_tps_setdvs(true); return 0; err_nofreq: gpio_free(OSIRIS_GPIO_DVS); err_nogpio: return ret; }
static int cpu_boost_init(void) { int cpu, ret; struct cpu_sync *s; cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0); if (!cpu_boost_wq) return -EFAULT; INIT_WORK(&input_boost_work, do_input_boost); for_each_possible_cpu(cpu) { s = &per_cpu(sync_info, cpu); s->cpu = cpu; init_waitqueue_head(&s->sync_wq); atomic_set(&s->being_woken, 0); spin_lock_init(&s->lock); INIT_DELAYED_WORK(&s->boost_rem, do_boost_rem); INIT_DELAYED_WORK(&s->input_boost_rem, do_input_boost_rem); s->thread = kthread_run(boost_mig_sync_thread, (void *)cpu, "boost_sync/%d", cpu); set_cpus_allowed(s->thread, *cpumask_of(cpu)); } cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER); atomic_notifier_chain_register(&migration_notifier_head, &boost_migration_nb); ret = input_register_handler(&cpuboost_input_handler); return 0; }
static int cs_init(struct dbs_data *dbs_data, bool notify) { struct cs_dbs_tuners *tuners; tuners = kzalloc(sizeof(*tuners), GFP_KERNEL); if (!tuners) { pr_err("%s: kzalloc failed\n", __func__); return -ENOMEM; } tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; tuners->ignore_nice_load = 0; tuners->freq_step = DEF_FREQUENCY_STEP; dbs_data->tuners = tuners; dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); if (notify) cpufreq_register_notifier(&cs_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); return 0; }
static int cpu_boost_init(void) { int cpu, ret; struct cpu_sync *s; cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER); cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0); if (!cpu_boost_wq) return -EFAULT; INIT_WORK(&input_boost_work, do_input_boost); for_each_possible_cpu(cpu) { s = &per_cpu(sync_info, cpu); s->cpu = cpu; spin_lock_init(&s->lock); INIT_DELAYED_WORK(&s->boost_rem, do_boost_rem); INIT_DELAYED_WORK(&s->input_boost_rem, do_input_boost_rem); } atomic_notifier_chain_register(&migration_notifier_head, &boost_migration_nb); ret = smpboot_register_percpu_thread(&cpuboost_threads); if (ret) pr_err("Cannot register cpuboost threads.\n"); ret = input_register_handler(&cpuboost_input_handler); if (ret) pr_err("Cannot register cpuboost input handler.\n"); return ret; }
static inline int s3c24xx_serial_cpufreq_register(struct s3c24xx_uart_port *port) { port->freq_transition.notifier_call = s3c24xx_serial_cpufreq_transition; return cpufreq_register_notifier(&port->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); }
static int twd_cpufreq_init(void) { if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk)) return cpufreq_register_notifier(&twd_cpufreq_nb, CPUFREQ_TRANSITION_NOTIFIER); return 0; }
static int twd_cpufreq_init(void) { if (!IS_ERR_OR_NULL(twd_clk)) return cpufreq_register_notifier(&twd_cpufreq_nb, CPUFREQ_TRANSITION_NOTIFIER); return 0; }
static inline int s3c2410wdt_cpufreq_register(void) { if (soc_is_exynos5250()) return 0; return cpufreq_register_notifier(&s3c2410wdt_cpufreq_transition_nb, CPUFREQ_TRANSITION_NOTIFIER); }
void acpi_thermal_cpufreq_init(void) { int i; i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block, CPUFREQ_POLICY_NOTIFIER); if (!i) acpi_thermal_cpufreq_is_init = 1; }
void acpi_processor_ppc_init(void) { if (!cpufreq_register_notifier (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER)) acpi_processor_ppc_status |= PPC_REGISTERED; else printk(KERN_DEBUG "Warning: Processor Platform Limit not supported.\n"); }
static inline int ak98sdio_cpufreq_register(struct ak98_mci_host *host) { // use for requst and cpufreq init_MUTEX(&host->freq_lock); host->freq_transition.notifier_call = ak98sdio_cpufreq_transition; return cpufreq_register_notifier(&host->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); }
static int __init cpu_iboost_init(void) { struct boost_policy *b; int cpu, ret; boost_wq = alloc_workqueue("cpu_iboost_wq", WQ_HIGHPRI, 0); if (!boost_wq) { pr_err("Failed to allocate workqueue\n"); ret = -EFAULT; goto err; } cpufreq_register_notifier(&cpu_do_boost_nb, CPUFREQ_POLICY_NOTIFIER); INIT_DELAYED_WORK(&fb_boost_work, fb_boost_fn); fb_register_client(&fb_boost_nb); for_each_possible_cpu(cpu) { b = &per_cpu(boost_info, cpu); b->cpu = cpu; INIT_DELAYED_WORK(&b->ib_restore_work, ib_restore_main); init_waitqueue_head(&b->sync_wq); atomic_set(&b->being_woken, 0); spin_lock_init(&b->lock); INIT_DELAYED_WORK(&b->mig_boost_rem, do_mig_boost_rem); b->thread = kthread_run(boost_mig_sync_thread, (void *)cpu, "boost_sync/%d", cpu); set_cpus_allowed(b->thread, *cpumask_of(cpu)); } atomic_notifier_chain_register(&migration_notifier_head, &boost_migration_nb); INIT_WORK(&boost_work, ib_boost_main); ret = input_register_handler(&cpu_iboost_input_handler); if (ret) { pr_err("Failed to register input handler, err: %d\n", ret); goto err; } cpu_iboost_kobject = kobject_create_and_add("cpu_input_boost", kernel_kobj); if (!cpu_iboost_kobject) { pr_err("Failed to create kobject\n"); goto err; } ret = sysfs_create_group(cpu_iboost_kobject, &cpu_iboost_attr_group); if (ret) { pr_err("Failed to create sysfs interface\n"); kobject_put(cpu_iboost_kobject); } err: return ret; }
static int __init msm_sleep_info_init(void) { int err = 0; int cpu; struct sleep_data *sleep_info = NULL; msm_stats_wq = create_workqueue("msm_stats_wq"); if (!msm_stats_wq) { printk(KERN_ERR "Creation of msm_stats_wq failed!!\n"); return -EINVAL; } /* Register callback from idle for all cpus */ msm_idle_register_cb(idle_enter, idle_exit); INIT_DELAYED_WORK_DEFERRABLE(&rq_info.rq_work, rq_work_fn); INIT_DELAYED_WORK_DEFERRABLE(&rq_info.def_timer_work, def_work_fn); init_rq_attribs(); for_each_possible_cpu(cpu) { printk(KERN_INFO "msm_sleep_stats: Initializing sleep stats " "for CPU[%d]\n", cpu); sleep_info = &per_cpu(core_sleep_info, cpu); sleep_info->cpu = cpu; INIT_WORK(&sleep_info->work, notify_uspace_work_fn); /* Initialize high resolution timer */ hrtimer_init(&sleep_info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); sleep_info->timer.function = timer_func; /* Register for cpufreq policy changes */ sleep_info->nb.notifier_call = policy_change_notifier; err = cpufreq_register_notifier(&sleep_info->nb, CPUFREQ_POLICY_NOTIFIER); if (err) goto cleanup; /* Create sysfs object */ err = add_sysfs_objects(sleep_info); if (err) goto cleanup; continue; cleanup: printk(KERN_INFO "msm_sleep_stats: Failed to initialize sleep " "stats for CPU[%d]\n", cpu); sleep_info->cpu = -1; cpufreq_unregister_notifier(&sleep_info->nb, CPUFREQ_POLICY_NOTIFIER); remove_sysfs_objects(sleep_info); } return 0; }
static int __init cbe_cpufreq_pmi_init(void) { cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0; if (!cbe_cpufreq_has_pmi) return -ENODEV; cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); return 0; }
void __init stmp3xxx_dma_init(void) { stmp3xxx_clearl(BM_APBH_CTRL0_CLKGATE | BM_APBH_CTRL0_SFTRST, REGS_APBH_BASE + HW_APBH_CTRL0); stmp3xxx_clearl(BM_APBX_CTRL0_CLKGATE | BM_APBX_CTRL0_SFTRST, REGS_APBX_BASE + HW_APBX_CTRL0); #ifdef CONFIG_CPU_FREQ cpufreq_register_notifier(&dma_cpufreq_nb.nb, CPUFREQ_TRANSITION_NOTIFIER); #endif /* CONFIG_CPU_FREQ */ }
static int soc_pcmcia_cpufreq_register(void) { int ret; ret = cpufreq_register_notifier(&soc_pcmcia_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); if (ret < 0) printk(KERN_ERR "Unable to register CPU frequency change " "notifier for PCMCIA (%d)\n", ret); return ret; }
void acpi_thermal_cpufreq_init(void) { int i; for (i = 0; i < NR_CPUS; i++) cpufreq_thermal_reduction_pctg[i] = 0; i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block, CPUFREQ_POLICY_NOTIFIER); if (!i) acpi_thermal_cpufreq_is_init = 1; }