static int __init cpufreq_stats_init(void) { int ret; unsigned int cpu; spin_lock_init(&cpufreq_stats_lock); ret = cpufreq_register_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER); if (ret) return ret; ret = cpufreq_register_notifier(¬ifier_trans_block, CPUFREQ_TRANSITION_NOTIFIER); if (ret) { cpufreq_unregister_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER); return ret; } register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); for_each_online_cpu(cpu) { cpufreq_update_policy(cpu); } return 0; }
static int __devinit ina3221_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ina3221_data *data; int ret, i; data = devm_kzalloc(&client->dev, sizeof(struct ina3221_data), GFP_KERNEL); if (!data) { ret = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); data->plat_data = client->dev.platform_data; mutex_init(&data->mutex); data->mode = TRIGGERED; data->shutdown_complete = 0; /* reset ina3221 */ ret = i2c_smbus_write_word_data(client, INA3221_CONFIG, __constant_cpu_to_be16((INA3221_RESET))); if (ret < 0) { dev_err(&client->dev, "ina3221 reset failure status: 0x%x\n", ret); goto exit_free; } for (i = 0; i < ARRAY_SIZE(ina3221); i++) { ret = device_create_file(&client->dev, &ina3221[i].dev_attr); if (ret) { dev_err(&client->dev, "device_create_file failed.\n"); goto exit_remove; } } data->client = client; data->nb.notifier_call = ina3221_hotplug_notify; register_hotcpu_notifier(&(data->nb)); data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { ret = PTR_ERR(data->hwmon_dev); goto exit_remove; } /* set ina3221 to power down mode */ ret = __locked_power_down_ina3221(client); if (ret < 0) goto exit_remove; return 0; exit_remove: while (i--) device_remove_file(&client->dev, &ina3221[i].dev_attr); exit_free: devm_kfree(&client->dev, data); exit: return ret; }
static __init int blk_softirq_init(void) { int i; for_each_possible_cpu(i) INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); register_hotcpu_notifier(&blk_cpu_notifier); return 0; }
static __init int tboot_late_init(void) { if (!tboot_enabled()) return 0; atomic_set(&ap_wfs_count, 0); register_hotcpu_notifier(&tboot_cpu_notifier); acpi_os_set_prepare_sleep(&tboot_sleep); return 0; }
static __init int blk_iopoll_setup(void) { int i; for_each_possible_cpu(i) INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i)); open_softirq(BLOCK_IOPOLL_SOFTIRQ, blk_iopoll_softirq, NULL); register_hotcpu_notifier(&blk_iopoll_cpu_notifier); return 0; }
static void acpi_processor_install_hotplug_notify(void) { #ifdef CONFIG_ACPI_HOTPLUG_CPU int action = INSTALL_NOTIFY_HANDLER; acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, processor_walk_namespace_cb, &action, NULL); #endif register_hotcpu_notifier(&acpi_cpu_notifier); }
static int jtag_mm_etm_probe(struct platform_device *pdev, uint32_t cpu) { struct etm_ctx *etmdata; struct resource *res; struct device *dev = &pdev->dev; /* Allocate memory per cpu */ etmdata = devm_kzalloc(dev, sizeof(struct etm_ctx), GFP_KERNEL); if (!etmdata) return -ENOMEM; etm[cpu] = etmdata; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "etm-base"); if (!res) return -ENODEV; etmdata->base = devm_ioremap(dev, res->start, resource_size(res)); if (!etmdata->base) return -EINVAL; /* Allocate etm state save space per core */ etmdata->state = devm_kzalloc(dev, MAX_ETM_STATE_SIZE * sizeof(uint64_t), GFP_KERNEL); if (!etmdata->state) return -ENOMEM; spin_lock_init(&etmdata->spinlock); mutex_init(&etmdata->mutex); if (cnt++ == 0) register_hotcpu_notifier(&jtag_mm_etm_notifier); if (!smp_call_function_single(cpu, etm_init_arch_data, etmdata, 1)) etmdata->init = true; if (etmdata->init) { mutex_lock(&etmdata->mutex); if (etm_arch_supported(etmdata->arch)) { if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) < TZ_DBG_ETM_VER) etmdata->save_restore_enabled = true; else pr_info("etm save-restore supported by TZ\n"); } else pr_info("etm arch %u not supported\n", etmdata->arch); etmdata->enable = true; mutex_unlock(&etmdata->mutex); } return 0; }
static int __cpuinit topology_sysfs_init(void) { int i; for_each_online_cpu(i) { topology_cpu_callback(&topology_cpu_notifier, CPU_ONLINE, (void *)(long)i); } register_hotcpu_notifier(&topology_cpu_notifier); return 0; }
static int err_inject_init(void) { int err; dir = notifier_err_inject_init("cpu", notifier_err_inject_dir, &cpu_notifier_err_inject, priority); if (IS_ERR(dir)) return PTR_ERR(dir); err = register_hotcpu_notifier(&cpu_notifier_err_inject.nb); if (err) debugfs_remove_recursive(dir); return err; }
static int __init platform_hotplug_init(void) { acpu_sctrl_base_addr = (unsigned long)HISI_VA_ADDRESS(SOC_ACPU_SCTRL_BASE_ADDR); g_acpu_core_sc_baseaddr[0] = SOC_ACPU_SCTRL_ACPU_SC_CPU0_CTRL_ADDR(acpu_sctrl_base_addr); g_acpu_core_sc_baseaddr[1] = SOC_ACPU_SCTRL_ACPU_SC_CPU1_CTRL_ADDR(acpu_sctrl_base_addr); g_acpu_core_sc_baseaddr[2] = SOC_ACPU_SCTRL_ACPU_SC_CPU2_CTRL_ADDR(acpu_sctrl_base_addr); g_acpu_core_sc_baseaddr[3] = SOC_ACPU_SCTRL_ACPU_SC_CPU3_CTRL_ADDR(acpu_sctrl_base_addr); g_acpu_core_sc_baseaddr[4] = SOC_ACPU_SCTRL_ACPU_SC_CPU4_CTRL_ADDR(acpu_sctrl_base_addr); g_acpu_core_sc_baseaddr[5] = SOC_ACPU_SCTRL_ACPU_SC_CPU5_CTRL_ADDR(acpu_sctrl_base_addr); g_acpu_core_sc_baseaddr[6] = SOC_ACPU_SCTRL_ACPU_SC_CPU6_CTRL_ADDR(acpu_sctrl_base_addr); g_acpu_core_sc_baseaddr[7] = SOC_ACPU_SCTRL_ACPU_SC_CPU7_CTRL_ADDR(acpu_sctrl_base_addr); printk("%s: %lu %lu %lu %lu\n", __FUNCTION__, g_acpu_core_sc_baseaddr[0], g_acpu_core_sc_baseaddr[1], g_acpu_core_sc_baseaddr[2], g_acpu_core_sc_baseaddr[3]); printk("%s: %lu %lu %lu %lu\n", __FUNCTION__, g_acpu_core_sc_baseaddr[4], g_acpu_core_sc_baseaddr[5], g_acpu_core_sc_baseaddr[6], g_acpu_core_sc_baseaddr[7]); #ifndef CONFIG_ARM64 register_hotcpu_notifier(&platform_cpu_up_notifier); register_hotcpu_notifier(&platform_cpu_down_notifier); #endif return 0; }
static int __init pnpmgr_init(void) { int ret; init_timer(&app_timer); app_timer.function = app_timeout_handler; INIT_DELAYED_WORK(&touch_boost_work, touch_boost_handler); pnpmgr_kobj = kobject_create_and_add("pnpmgr", power_kobj); if (!pnpmgr_kobj) { pr_err("%s: Can not allocate enough memory for pnpmgr.\n", __func__); return -ENOMEM; } cpufreq_kobj = kobject_create_and_add("cpufreq", pnpmgr_kobj); hotplug_kobj = kobject_create_and_add("hotplug", pnpmgr_kobj); thermal_kobj = kobject_create_and_add("thermal", pnpmgr_kobj); apps_kobj = kobject_create_and_add("apps", pnpmgr_kobj); sysinfo_kobj = kobject_create_and_add("sysinfo", pnpmgr_kobj); battery_kobj = kobject_create_and_add("battery", pnpmgr_kobj); adaptive_policy_kobj = kobject_create_and_add("adaptive_policy", power_kobj); if (!cpufreq_kobj || !hotplug_kobj || !thermal_kobj || !apps_kobj || !sysinfo_kobj || !battery_kobj || !adaptive_policy_kobj) { pr_err("%s: Can not allocate enough memory.\n", __func__); return -ENOMEM; } ret = sysfs_create_group(cpufreq_kobj, &cpufreq_attr_group); ret |= sysfs_create_group(hotplug_kobj, &hotplug_attr_group); ret |= sysfs_create_group(thermal_kobj, &thermal_attr_group); ret |= sysfs_create_group(apps_kobj, &apps_attr_group); ret |= sysfs_create_group(sysinfo_kobj, &sysinfo_attr_group); ret |= sysfs_create_group(battery_kobj, &battery_attr_group); ret |= sysfs_create_group(adaptive_policy_kobj, &adaptive_attr_group); if (ret) { pr_err("%s: sysfs_create_group failed\n", __func__); return ret; } #ifdef CONFIG_HOTPLUG_CPU register_hotcpu_notifier(&cpu_hotplug_notifier); #endif return 0; }
int oprofile_timer_init(struct oprofile_operations *ops) { int rc; rc = register_hotcpu_notifier(&oprofile_cpu_notifier); if (rc) return rc; ops->create_files = NULL; ops->setup = NULL; ops->shutdown = NULL; ops->start = oprofile_hrtimer_start; ops->stop = oprofile_hrtimer_stop; ops->cpu_type = "timer"; printk(KERN_INFO "oprofile: using timer interrupt.\n"); return 0; }
static int __init acpi_processor_driver_init(void) { int result = 0; if (acpi_disabled) return 0; result = driver_register(&acpi_processor_driver); if (result < 0) return result; register_hotcpu_notifier(&acpi_cpu_notifier); acpi_thermal_cpufreq_init(); acpi_processor_ppc_init(); acpi_processor_throttling_init(); return 0; }
static int __init err_inject_init(void) { int i; #ifdef ERR_INJ_DEBUG printk(KERN_INFO "Enter error injection driver.\n"); #endif for_each_online_cpu(i) { err_inject_cpu_callback(&err_inject_cpu_notifier, CPU_ONLINE, (void *)(long)i); } register_hotcpu_notifier(&err_inject_cpu_notifier); return 0; }
static int cpufreq_stats_setup(void) { int ret; unsigned int cpu; spin_lock_init(&cpufreq_stats_lock); ret = cpufreq_register_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER); if (ret) return ret; register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); for_each_online_cpu(cpu) cpufreq_update_policy(cpu); ret = cpufreq_register_notifier(¬ifier_trans_block, CPUFREQ_TRANSITION_NOTIFIER); if (ret) { cpufreq_unregister_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER); unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); for_each_online_cpu(cpu) cpufreq_stats_free_table(cpu); return ret; } create_all_freq_table(); ret = sysfs_create_file(cpufreq_global_kobject, &_attr_all_time_in_state.attr); if (ret) pr_warn("Cannot create sysfs file for cpufreq stats\n"); ret = sysfs_create_file(cpufreq_global_kobject, &_attr_current_in_state.attr); if (ret) pr_warn("Cannot create sysfs file for cpufreq current stats\n"); create_bL_freq_table(); ret = sysfs_create_file(cpufreq_global_kobject, &_attr_bL_all_time_in_state.attr); if (ret) pr_warn("Error creating sysfs file for bL cpufreq stats\n"); return 0; }
static int __init palinfo_init(void) { int i = 0; printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION); palinfo_dir = proc_mkdir("pal", NULL); /* Create palinfo dirs in /proc for all online cpus */ for_each_online_cpu(i) { create_palinfo_proc_entries(i); } /* Register for future delivery via notify registration */ register_hotcpu_notifier(&palinfo_cpu_notifier); return 0; }
static void irq_affinity_init(struct work_struct *dummy) { int ret, cpu, i; char irq_affinity_key[48]; memset(irq_affinity_records, 0, sizeof(irq_affinity_records)); for (i = 0; i < NR_SOC_IRQS; i++) { snprintf(irq_affinity_key, sizeof(irq_affinity_key), irq_affinity_key_prefix, i); ret = get_hw_config_int(irq_affinity_key, &cpu, NULL); if (ret == false) continue; if (cpu < 0) continue; if (cpu > NR_CPUS) continue; irq_affinity_register_sysctl_table(i, cpu); /*init the records. as default, irqs binds to CPU0*/ irq_affinity_records[i] = cpu; ret = irq_set_affinity(i, cpumask_of(cpu)); if (ret < 0) printk(KERN_ERR "%s %d : failed to irq_set_affinity %d errno %d\r\n", __FUNCTION__, __LINE__, i, ret); } ret = register_hotcpu_notifier(&cpu_up_notifier); if (ret < 0){ printk(KERN_ERR"%s : register_hotcpu_notifier failed %d !\n",__FUNCTION__, ret); } ret = register_pm_notifier(&post_suspend_notifier); if (ret < 0){ printk(KERN_ERR"%s : register_pm_notifier failed %d !\n",__FUNCTION__, ret); } kfree(dummy); return; }
static __init int tboot_late_init(void) { if (!tboot_enabled()) return 0; tboot_create_trampoline(); atomic_set(&ap_wfs_count, 0); register_hotcpu_notifier(&tboot_cpu_notifier); #ifdef CONFIG_DEBUG_FS debugfs_create_file("tboot_log", S_IRUSR, arch_debugfs_dir, NULL, &tboot_log_fops); #endif acpi_os_set_prepare_sleep(&tboot_sleep); acpi_os_set_prepare_extended_sleep(&tboot_extended_sleep); return 0; }
static int __init cpuid_init(void) { int i, err = 0; i = 0; if (__register_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid", &cpuid_fops)) { printk(KERN_ERR "cpuid: unable to get major %d for cpuid\n", CPUID_MAJOR); err = -EBUSY; goto out; } cpuid_class = class_create(THIS_MODULE, "cpuid"); if (IS_ERR(cpuid_class)) { err = PTR_ERR(cpuid_class); goto out_chrdev; } cpuid_class->devnode = cpuid_devnode; get_online_cpus(); for_each_online_cpu(i) { err = cpuid_device_create(i); if (err != 0) goto out_class; } register_hotcpu_notifier(&cpuid_class_cpu_notifier); put_online_cpus(); err = 0; goto out; out_class: i = 0; for_each_online_cpu(i) { cpuid_device_destroy(i); } put_online_cpus(); class_destroy(cpuid_class); out_chrdev: __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); out: return err; }
void __init softirq_init(void) { int cpu; for_each_possible_cpu(cpu) { int i; per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; for (i = 0; i < NR_SOFTIRQS; i++) INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu)); } register_hotcpu_notifier(&remote_softirq_cpu_notifier); open_softirq(TASKLET_SOFTIRQ, tasklet_action); open_softirq(HI_SOFTIRQ, tasklet_hi_action); }
static int __init pnpmgr_init(void) { int ret; init_waitqueue_head(&sysfs_state_wq); pnpmgr_kobj = kobject_create_and_add("pnpmgr", power_kobj); if (!pnpmgr_kobj) { pr_err("%s: Can not allocate enough memory for pnpmgr.\n", __func__); return -ENOMEM; } cpufreq_kobj = kobject_create_and_add("cpufreq", pnpmgr_kobj); hotplug_kobj = kobject_create_and_add("hotplug", pnpmgr_kobj); thermal_kobj = kobject_create_and_add("thermal", pnpmgr_kobj); apps_kobj = kobject_create_and_add("apps", pnpmgr_kobj); adaptive_policy_kobj = kobject_create_and_add("adaptive_policy", power_kobj); if (!cpufreq_kobj || !hotplug_kobj || !thermal_kobj || !apps_kobj || !adaptive_policy_kobj) { pr_err("%s: Can not allocate enough memory.\n", __func__); return -ENOMEM; } ret = sysfs_create_group(cpufreq_kobj, &cpufreq_attr_group); ret |= sysfs_create_group(hotplug_kobj, &hotplug_attr_group); ret |= sysfs_create_group(thermal_kobj, &thermal_attr_group); ret |= sysfs_create_group(apps_kobj, &apps_attr_group); ret |= sysfs_create_group(adaptive_policy_kobj, &adaptive_attr_group); if (ret) { pr_err("%s: sysfs_create_group failed\n", __func__); return ret; } #ifdef CONFIG_HOTPLUG_CPU register_hotcpu_notifier(&cpu_hotplug_notifier); #endif return 0; }
static int cpu_boost_init(void) { int cpu, ret; struct cpu_sync *s; cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER); cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0); if (!cpu_boost_wq) return -EFAULT; INIT_WORK(&input_boost_work, do_input_boost); for_each_possible_cpu(cpu) { s = &per_cpu(sync_info, cpu); s->cpu = cpu; init_waitqueue_head(&s->sync_wq); spin_lock_init(&s->lock); INIT_DELAYED_WORK(&s->boost_rem, do_boost_rem); INIT_DELAYED_WORK(&s->input_boost_rem, do_input_boost_rem); s->thread = kthread_run(boost_mig_sync_thread, (void *)cpu, "boost_sync/%d", cpu); set_cpus_allowed(s->thread, *cpumask_of(cpu)); } atomic_notifier_chain_register(&migration_notifier_head, &boost_migration_nb); ret = input_register_handler(&cpuboost_input_handler); return 0; if (ret) pr_err("Cannot register cpuboost input handler.\n"); ret = register_hotcpu_notifier(&cpu_nblk); if (ret) pr_err("Cannot register cpuboost hotplug handler.\n"); notif.notifier_call = fb_notifier_callback; return ret; }
int start_tbs(bool is_start_paused) { int ret; ret = register_hotcpu_notifier(&cpu_notifier_for_timer); if (ret != 0) return ret; if (!is_start_paused) { tbs_running = true; } else { tbs_running = false; } on_each_cpu(__start_tbs, NULL, 1); return 0; }
static int __init cpufreq_stats_init(void) { int ret; unsigned int cpu; spin_lock_init(&cpufreq_stats_lock); ret = cpufreq_register_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER); if (ret) return ret; register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); for_each_online_cpu(cpu) cpufreq_update_policy(cpu); ret = cpufreq_register_notifier(¬ifier_trans_block, CPUFREQ_TRANSITION_NOTIFIER); if (ret) { cpufreq_unregister_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER); unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); for_each_online_cpu(cpu) cpufreq_stats_free_table(cpu); return ret; } create_all_freq_table(); for_each_possible_cpu(cpu) { cpufreq_allstats_create(cpu); } if (all_freq_table && all_freq_table->freq_table) sort(all_freq_table->freq_table, all_freq_table->table_size, sizeof(unsigned int), &compare_for_sort, NULL); ret = sysfs_create_file(cpufreq_global_kobject, &_attr_all_time_in_state.attr); if (ret) pr_warn("Error creating sysfs file for cpufreq stats\n"); return 0; }
static int __init msr_init(void) { int i, err = 0; i = 0; if (__register_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr", &msr_fops)) { printk(KERN_ERR "msr: unable to get major %d for msr\n", MSR_MAJOR); err = -EBUSY; goto out; } msr_class = class_create(THIS_MODULE, "msr"); if (IS_ERR(msr_class)) { err = PTR_ERR(msr_class); goto out_chrdev; } msr_class->devnode = msr_devnode; get_online_cpus(); for_each_online_cpu(i) { err = msr_device_create(i); if (err != 0) goto out_class; } register_hotcpu_notifier(&msr_class_cpu_notifier); put_online_cpus(); err = 0; goto out; out_class: i = 0; for_each_online_cpu(i) msr_device_destroy(i); put_online_cpus(); class_destroy(msr_class); out_chrdev: __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr"); out: return err; }
static __init int thermal_throttle_init_device(void) { unsigned int cpu = 0; int err; if (!atomic_read(&therm_throt_en)) return 0; register_hotcpu_notifier(&thermal_throttle_cpu_notifier); #ifdef CONFIG_HOTPLUG_CPU mutex_lock(&therm_cpu_lock); #endif /* connect live CPUs to sysfs */ for_each_online_cpu(cpu) { err = thermal_throttle_add_dev(get_cpu_sysdev(cpu)); WARN_ON(err); } #ifdef CONFIG_HOTPLUG_CPU mutex_unlock(&therm_cpu_lock); #endif return 0; }
static int __init init_hotplug_rtb(void) { return register_hotcpu_notifier(&hotplug_rtb_notifier); }
static int etm_probe(struct amba_device *adev, const struct amba_id *id) { int ret; void __iomem *base; struct device *dev = &adev->dev; struct coresight_platform_data *pdata = NULL; struct etm_drvdata *drvdata; struct resource *res = &adev->res; struct coresight_desc *desc; struct device_node *np = adev->dev.of_node; desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); if (!desc) return -ENOMEM; drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; if (np) { pdata = of_get_coresight_platform_data(dev, np); if (IS_ERR(pdata)) return PTR_ERR(pdata); adev->dev.platform_data = pdata; drvdata->use_cp14 = of_property_read_bool(np, "arm,cp14"); } drvdata->dev = &adev->dev; dev_set_drvdata(dev, drvdata); /* Validity for the resource is already checked by the AMBA core */ base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) return PTR_ERR(base); drvdata->base = base; spin_lock_init(&drvdata->spinlock); drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */ if (!IS_ERR(drvdata->atclk)) { ret = clk_prepare_enable(drvdata->atclk); if (ret) return ret; } drvdata->cpu = pdata ? pdata->cpu : 0; get_online_cpus(); etmdrvdata[drvdata->cpu] = drvdata; if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1)) drvdata->os_unlock = true; if (smp_call_function_single(drvdata->cpu, etm_init_arch_data, drvdata, 1)) dev_err(dev, "ETM arch init failed\n"); if (!etm_count++) register_hotcpu_notifier(&etm_cpu_notifier); put_online_cpus(); if (etm_arch_supported(drvdata->arch) == false) { ret = -EINVAL; goto err_arch_supported; } etm_init_default_data(drvdata); desc->type = CORESIGHT_DEV_TYPE_SOURCE; desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; desc->ops = &etm_cs_ops; desc->pdata = pdata; desc->dev = dev; desc->groups = coresight_etm_groups; drvdata->csdev = coresight_register(desc); if (IS_ERR(drvdata->csdev)) { ret = PTR_ERR(drvdata->csdev); goto err_arch_supported; } pm_runtime_put(&adev->dev); dev_info(dev, "%s initialized\n", (char *)id->data); if (boot_enable) { coresight_enable(drvdata->csdev); drvdata->boot_enable = true; } return 0; err_arch_supported: if (--etm_count == 0) unregister_hotcpu_notifier(&etm_cpu_notifier); return ret; }
static int __cpuinit register_shx3_cpu_notifier(void) { register_hotcpu_notifier(&shx3_cpu_notifier); return 0; }
static int __devinit ina230_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ina230_data *data; int err; u8 i; data = devm_kzalloc(&client->dev, sizeof(struct ina230_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); data->pdata = client->dev.platform_data; data->running = false; data->nb.notifier_call = ina230_hotplug_notify; data->client = client; mutex_init(&data->mutex); err = i2c_smbus_write_word_data(client, INA230_CONFIG, __constant_cpu_to_be16(INA230_RESET)); if (err < 0) { dev_err(&client->dev, "ina230 reset failure status: 0x%x\n", err); goto exit; } for (i = 0; i < ARRAY_SIZE(ina230); i++) { err = device_create_file(&client->dev, &ina230[i].dev_attr); if (err) { dev_err(&client->dev, "device_create_file failed.\n"); goto exit_remove; } } data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove; } register_hotcpu_notifier(&(data->nb)); err = i2c_smbus_write_word_data(client, INA230_MASK, 0); if (err < 0) { dev_err(&client->dev, "mask write failure sts: 0x%x\n", err); goto exit_remove; } /* set ina230 to power down mode */ err = i2c_smbus_write_word_data(client, INA230_CONFIG, __constant_cpu_to_be16(INA230_POWER_DOWN)); if (err < 0) { dev_err(&client->dev, "power down failure sts: 0x%x\n", err); goto exit_remove; } return 0; exit_remove: for (i = 0; i < ARRAY_SIZE(ina230); i++) device_remove_file(&client->dev, &ina230[i].dev_attr); exit: return err; }