static ssize_t __cpuinit store_cc_enabled(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret = 0; int val = 0; mutex_lock(&core_control_mutex); ret = kstrtoint(buf, 10, &val); if (ret) { pr_err("%s: Invalid input %s\n", KBUILD_MODNAME, buf); goto done_store_cc; } if (core_control_enabled == !!val) goto done_store_cc; core_control_enabled = !!val; if (core_control_enabled) { pr_info("%s: Core control enabled\n", KBUILD_MODNAME); register_cpu_notifier(&msm_thermal_cpu_notifier); update_offline_cores(cpus_offlined); } else { pr_info("%s: Core control disabled\n", KBUILD_MODNAME); unregister_cpu_notifier(&msm_thermal_cpu_notifier); } done_store_cc: mutex_unlock(&core_control_mutex); return count; }
void quadd_power_clk_stop(void) { struct power_clk_source *s; if (power_ctx.quadd_ctx->param.power_rate_freq == 0) return; if (power_ctx.period > 0) del_timer_sync(&power_ctx.timer); s = &power_ctx.gpu; if (atomic_cmpxchg(&s->active, 1, 0)) { #ifdef CONFIG_COMMON_CLK if (s->clkp) clk_notifier_unregister(s->clkp, &s->nb); #endif } s = &power_ctx.emc; if (atomic_cmpxchg(&s->active, 1, 0)) { #ifdef CONFIG_COMMON_CLK if (s->clkp) clk_notifier_unregister(s->clkp, &s->nb); #endif } s = &power_ctx.cpu; if (atomic_cmpxchg(&s->active, 1, 0)) { pr_info("power_clk: stop\n"); unregister_cpu_notifier(&s->nb); } }
static ssize_t __ref store_cc_enabled(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret = 0; int val = 0; ret = kstrtoint(buf, 10, &val); if (ret) { pr_err("%s: Invalid input %s\n", KBUILD_MODNAME, buf); goto done_store_cc; } if (core_control_enabled == !!val) goto done_store_cc; core_control_enabled = !!val; if (core_control_enabled) { pr_info("%s: Core control enabled\n", KBUILD_MODNAME); register_cpu_notifier(&msm_thermal_cpu_notifier); if (hotplug_task) complete(&hotplug_notify_complete); else pr_err("%s: Hotplug task is not initialized\n", KBUILD_MODNAME); } else { pr_info("%s: Core control disabled\n", KBUILD_MODNAME); unregister_cpu_notifier(&msm_thermal_cpu_notifier); } done_store_cc: return count; }
static void __exit pseries_processor_idle_exit(void) { unregister_cpu_notifier(&setup_hotplug_notifier); pseries_idle_devices_uninit(); cpuidle_unregister_driver(&pseries_idle_driver); return; }
static void acpi_cpufreq_boost_exit(void) { if (msrs) { unregister_cpu_notifier(&boost_nb); msrs_free(msrs); msrs = NULL; } }
static void __exit msr_exit(void) { int cpu = 0; for_each_online_cpu(cpu) class_device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); class_destroy(msr_class); unregister_chrdev(MSR_MAJOR, "cpu/msr"); unregister_cpu_notifier(&msr_class_cpu_notifier); }
static void __exit cpuid_exit(void) { int cpu = 0; for_each_online_cpu(cpu) class_simple_device_remove(MKDEV(CPUID_MAJOR, cpu)); class_simple_destroy(cpuid_class); unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); unregister_cpu_notifier(&cpuid_class_cpu_notifier); }
void vmmr0_exit_srcu(void) { int cpu; #ifdef CONFIG_HOTPLUG_CPU unregister_cpu_notifier(&cpu_nfb); #endif /* CONFIG_HOTPLUG_CPU */ for_each_online_cpu(cpu) if (per_cpu(sync_thread, cpu)) kthread_stop(per_cpu(sync_thread, cpu)); }
void op_nmi_exit(void) { if (using_nmi) { exit_sysfs(); #ifdef CONFIG_SMP unregister_cpu_notifier(&oprofile_cpu_nb); #endif } if (model->exit) model->exit(); }
static int __init register_pmu_driver(void) { int err; err = register_cpu_notifier(&cpu_pmu_hotplug_notifier); if (err) return err; err = platform_driver_register(&cpu_pmu_driver); if (err) unregister_cpu_notifier(&cpu_pmu_hotplug_notifier); return err; }
static int __devexit s5p_ehci_remove(struct platform_device *pdev) { struct s5p_ehci_platdata *pdata = pdev->dev.platform_data; struct s5p_ehci_hcd *s5p_ehci = platform_get_drvdata(pdev); struct usb_hcd *hcd = s5p_ehci->hcd; /* pm_runtime_disable called twice during pdev unregistering * it causes disable_depth mismatching, so rpm for this device * cannot works from disable_depth count * replace it to runtime forbid. */ #ifdef CONFIG_USB_SUSPEND #ifdef CONFIG_MDM_HSIC_PM pm_runtime_forbid(&pdev->dev); #else pm_runtime_disable(&pdev->dev); #endif #endif s5p_ehci->power_on = 0; remove_ehci_sys_file(hcd_to_ehci(hcd)); usb_remove_hcd(hcd); #ifdef CONFIG_EHCI_IRQ_DISTRIBUTION if (num_possible_cpus() > 1) { s5p_ehci_irq_no = 0; s5p_ehci_irq_cpu = 0; unregister_cpu_notifier(&s5p_ehci_cpu_notifier); } #endif #if defined(CONFIG_LINK_DEVICE_HSIC) || defined(CONFIG_LINK_DEVICE_USB) /*HSIC IPC control the ACTIVE_STATE*/ if (pdata && pdata->noti_host_states) pdata->noti_host_states(pdev, S5P_HOST_OFF); #endif if (pdata && pdata->phy_exit) pdata->phy_exit(pdev, S5P_USB_PHY_HOST); iounmap(hcd->regs); clk_disable(s5p_ehci->clk); clk_put(s5p_ehci->clk); usb_put_hcd(hcd); kfree(s5p_ehci); return 0; }
static void __exit ledtrig_cpu_exit(void) { int cpu; unregister_cpu_notifier(&ledtrig_cpu_nb); for_each_possible_cpu(cpu) { struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu); led_trigger_unregister_simple(trig->_trig); trig->_trig = NULL; memset(trig->name, 0, MAX_NAME_LEN); } unregister_syscore_ops(&ledtrig_cpu_syscore_ops); }
static void __exit cpufreq_stats_exit(void) { unsigned int cpu; cpufreq_unregister_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER); cpufreq_unregister_notifier(¬ifier_trans_block, CPUFREQ_TRANSITION_NOTIFIER); unregister_cpu_notifier(&cpufreq_stat_cpu_notifier); lock_cpu_hotplug(); for_each_online_cpu(cpu) { cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_DEAD, (void *)(long)cpu); } unlock_cpu_hotplug(); }
static void nmi_shutdown(void) { struct op_msrs *msrs; get_online_cpus(); unregister_cpu_notifier(&oprofile_cpu_nb); on_each_cpu(nmi_cpu_shutdown, NULL, 1); nmi_enabled = 0; ctr_running = 0; put_online_cpus(); /* make variables visible to the nmi handler: */ smp_mb(); unregister_nmi_handler(NMI_LOCAL, "oprofile"); msrs = &get_cpu_var(cpu_msrs); model->shutdown(msrs); free_msrs(); put_cpu_var(cpu_msrs); }
void ehca_destroy_comp_pool(void) { int i; if (!ehca_scaling_code) return; #ifdef CONFIG_HOTPLUG_CPU unregister_cpu_notifier(&comp_pool_callback_nb); #endif for (i = 0; i < NR_CPUS; i++) { if (cpu_online(i)) destroy_comp_task(pool, i); } free_percpu(pool->cpu_comp_tasks); kfree(pool); }
static int __devexit hisik3_wdt_remove(struct platform_device *pdev) { struct resource *res; /* begin: add by wufan w00163571 for use kernel thread kick watchdog 20121201 */ unregister_cpu_notifier((struct notifier_block *)&k3wdt_cpu_nfb); k3_wdt_kick_stop(); /* begin: add by wufan w00163571 for use kernel thread kick watchdog 20121201 */ misc_deregister(&hisik3_wdt_miscdev); iounmap(wdt->base); clk_put(wdt->clk); kfree(wdt); wdt = NULL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res) release_mem_region(res->start, resource_size(res)); return 0; }
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) { unregister_cpu_notifier(&cpu_pmu->hotplug_nb); free_percpu(cpu_pmu->hw_events); }
DECLHIDDEN(void) rtR0MpNotificationNativeTerm(void) { IPRT_LINUX_SAVE_EFL_AC(); unregister_cpu_notifier(&g_NotifierBlock); IPRT_LINUX_RESTORE_EFL_AC(); }
static int __init arch_timer_register(void) { int err; int ppi; arch_timer_evt = alloc_percpu(struct clock_event_device); if (!arch_timer_evt) { err = -ENOMEM; goto out; } if (arch_timer_use_virtual) { ppi = arch_timer_ppi[VIRT_PPI]; err = request_percpu_irq(ppi, arch_timer_handler_virt, "arch_timer", arch_timer_evt); } else { ppi = arch_timer_ppi[PHYS_SECURE_PPI]; err = request_percpu_irq(ppi, arch_timer_handler_phys, "arch_timer", arch_timer_evt); if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) { ppi = arch_timer_ppi[PHYS_NONSECURE_PPI]; err = request_percpu_irq(ppi, arch_timer_handler_phys, "arch_timer", arch_timer_evt); if (err) free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], arch_timer_evt); } } if (err) { pr_err("arch_timer: can't register interrupt %d (%d)\n", ppi, err); goto out_free; } err = register_cpu_notifier(&arch_timer_cpu_nb); if (err) goto out_free_irq; err = arch_timer_cpu_pm_init(); if (err) goto out_unreg_notify; /* Immediately configure the timer on the boot CPU */ arch_timer_setup(this_cpu_ptr(arch_timer_evt)); return 0; out_unreg_notify: unregister_cpu_notifier(&arch_timer_cpu_nb); out_free_irq: if (arch_timer_use_virtual) free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt); else { free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], arch_timer_evt); if (arch_timer_ppi[PHYS_NONSECURE_PPI]) free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], arch_timer_evt); } out_free: free_percpu(arch_timer_evt); out: return err; }
static int __devinit hisik3_wdt_probe(struct platform_device *pdev) { int ret = 0; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENOENT; dev_warn(&pdev->dev, "WDT memory resource not defined\n"); goto err; } if (!request_mem_region(res->start, resource_size(res), pdev->name)) { dev_warn(&pdev->dev, "WDT failed to get memory region resource\n"); ret = -ENOENT; goto err; } wdt = kzalloc(sizeof(*wdt), GFP_KERNEL); if (!wdt) { dev_warn(&pdev->dev, "WDT kzalloc failed\n"); ret = -ENOMEM; goto err_kzalloc; } wdt->clk = clk_get(NULL,"clk_wd"); if (IS_ERR(wdt->clk)) { dev_warn(&pdev->dev, "WDT clock not found\n"); ret = PTR_ERR(wdt->clk); goto err_clk_get; } wdt->base = ioremap(res->start, resource_size(res)); if (!wdt->base) { ret = -ENOMEM; dev_warn(&pdev->dev, "WDT ioremap fail\n"); goto err_ioremap; } spin_lock_init(&wdt->lock); /* This checks if system booted after watchdog reset or not */ ret = clk_enable(wdt->clk); if (ret) { dev_warn(&pdev->dev, "clock enable fail"); goto err_clk_enable; } wdt->pdev = pdev; wdt_default_init(DEFAULT_TIMEOUT); wdt_default_config(); /* begin: add by wufan w00163571 for use kernel thread kick watchdog 20121201 */ ret = k3_wdt_kick_start(); if(ret) goto err_create_thread; register_cpu_notifier((struct notifier_block *)&k3wdt_cpu_nfb); /* end: add by wufan w00163571 for use kernel thread kick watchdog 20121201 */ ret = misc_register(&hisik3_wdt_miscdev); if (ret < 0) { dev_warn(&pdev->dev, "WDT cannot register misc device\n"); goto err_misc_register; } wdt_enable(); dev_warn(&pdev->dev,"WDT probing has been finished\n"); return 0; err_misc_register: /* begin: add by wufan w00163571 for use kernel thread kick watchdog 20121201 */ unregister_cpu_notifier((struct notifier_block *)&k3wdt_cpu_nfb); err_create_thread: k3_wdt_kick_stop(); /* begin: add by wufan w00163571 for use kernel thread kick watchdog 20121201 */ clk_disable(wdt->clk); err_clk_enable: iounmap(wdt->base); err_ioremap: clk_put(wdt->clk); err_clk_get: kfree(wdt); wdt = NULL; err_kzalloc: release_mem_region(res->start, resource_size(res)); err: dev_warn(&pdev->dev, "WDT probe failed!!!\n"); return ret; }
int __exit exit_tracecounters(void) { unregister_cpu_notifier(&tracectr_cpu_hotplug_notifier_block); return 0; }
void tzdev_fini_migration(void) { unregister_cpu_notifier(&tzdev_cpu_notifier); }
static void __exit evtchn_cleanup(void) { misc_deregister(&evtchn_miscdev); unregister_cpu_notifier(&evtchn_cpu_nfb); }
DECLHIDDEN(void) rtR0MpNotificationNativeTerm(void) { unregister_cpu_notifier(&g_NotifierBlock); }