static inline void cstate_cleanup(void) { cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE); cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING); if (has_cstate_core) perf_pmu_unregister(&cstate_core_pmu); if (has_cstate_pkg) perf_pmu_unregister(&cstate_pkg_pmu); }
int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int), int (*cpu_dead_cb)(unsigned int)) { int rc; rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE, "x86/xen/hvm_guest:prepare", cpu_up_prepare_cb, cpu_dead_cb); if (rc >= 0) { rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/xen/hvm_guest:online", xen_cpu_up_online, NULL); if (rc < 0) cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE); } return rc >= 0 ? 0 : rc; }
static int etm_probe(struct amba_device *adev, const struct amba_id *id) { int ret; void __iomem *base; struct device *dev = &adev->dev; struct coresight_platform_data *pdata = NULL; struct etm_drvdata *drvdata; struct resource *res = &adev->res; struct coresight_desc *desc; struct device_node *np = adev->dev.of_node; desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); if (!desc) return -ENOMEM; drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; if (np) { pdata = of_get_coresight_platform_data(dev, np); if (IS_ERR(pdata)) return PTR_ERR(pdata); adev->dev.platform_data = pdata; drvdata->use_cp14 = of_property_read_bool(np, "arm,cp14"); } drvdata->dev = &adev->dev; dev_set_drvdata(dev, drvdata); /* Validity for the resource is already checked by the AMBA core */ base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) return PTR_ERR(base); drvdata->base = base; spin_lock_init(&drvdata->spinlock); drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */ if (!IS_ERR(drvdata->atclk)) { ret = clk_prepare_enable(drvdata->atclk); if (ret) return ret; } drvdata->cpu = pdata ? pdata->cpu : 0; get_online_cpus(); etmdrvdata[drvdata->cpu] = drvdata; if (smp_call_function_single(drvdata->cpu, etm_init_arch_data, drvdata, 1)) dev_err(dev, "ETM arch init failed\n"); if (!etm_count++) { cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING, "AP_ARM_CORESIGHT_STARTING", etm_starting_cpu, etm_dying_cpu); ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "AP_ARM_CORESIGHT_ONLINE", etm_online_cpu, NULL); if (ret < 0) goto err_arch_supported; hp_online = ret; } put_online_cpus(); if (etm_arch_supported(drvdata->arch) == false) { ret = -EINVAL; goto err_arch_supported; } etm_init_trace_id(drvdata); etm_set_default(&drvdata->config); desc->type = CORESIGHT_DEV_TYPE_SOURCE; desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; desc->ops = &etm_cs_ops; desc->pdata = pdata; desc->dev = dev; desc->groups = coresight_etm_groups; drvdata->csdev = coresight_register(desc); if (IS_ERR(drvdata->csdev)) { ret = PTR_ERR(drvdata->csdev); goto err_arch_supported; } ret = etm_perf_symlink(drvdata->csdev, true); if (ret) { coresight_unregister(drvdata->csdev); goto err_arch_supported; } pm_runtime_put(&adev->dev); dev_info(dev, "%s initialized\n", (char *)id->data); if (boot_enable) { coresight_enable(drvdata->csdev); drvdata->boot_enable = true; } return 0; err_arch_supported: if (--etm_count == 0) { cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); if (hp_online) cpuhp_remove_state_nocalls(hp_online); } return ret; }
static void acpi_cpufreq_boost_exit(void) { if (acpi_cpufreq_online >= 0) cpuhp_remove_state_nocalls(acpi_cpufreq_online); }
static void __exit cstate_pmu_exit(void) { cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE); cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING); cstate_cleanup(); }
static void loongson3_exit(void) { on_each_cpu(reset_counters, NULL, 1); cpuhp_remove_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING); perf_irq = save_perf_irq; }
static void __exit amd_power_pmu_exit(void) { cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE); perf_pmu_unregister(&pmu_class); }
static int __init b15_rac_init(void) { struct device_node *dn, *cpu_dn; int ret = 0, cpu; u32 reg, en_mask = 0; dn = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl"); if (!dn) return -ENODEV; if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n")) goto out; b15_rac_base = of_iomap(dn, 0); if (!b15_rac_base) { pr_err("failed to remap BIU control base\n"); ret = -ENOMEM; goto out; } cpu_dn = of_get_cpu_node(0, NULL); if (!cpu_dn) { ret = -ENODEV; goto out; } if (of_device_is_compatible(cpu_dn, "brcm,brahma-b15")) rac_flush_offset = B15_RAC_FLUSH_REG; else if (of_device_is_compatible(cpu_dn, "brcm,brahma-b53")) rac_flush_offset = B53_RAC_FLUSH_REG; else { pr_err("Unsupported CPU\n"); of_node_put(cpu_dn); ret = -EINVAL; goto out; } of_node_put(cpu_dn); ret = register_reboot_notifier(&b15_rac_reboot_nb); if (ret) { pr_err("failed to register reboot notifier\n"); iounmap(b15_rac_base); goto out; } if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) { ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, "arm/cache-b15-rac:dead", NULL, b15_rac_dead_cpu); if (ret) goto out_unmap; ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING, "arm/cache-b15-rac:dying", NULL, b15_rac_dying_cpu); if (ret) goto out_cpu_dead; } if (IS_ENABLED(CONFIG_PM_SLEEP)) register_syscore_ops(&b15_rac_syscore_ops); spin_lock(&rac_lock); reg = __raw_readl(b15_rac_base + RAC_CONFIG0_REG); for_each_possible_cpu(cpu) en_mask |= ((1 << RACPREFDATA_SHIFT) << (cpu * RAC_CPU_SHIFT)); WARN(reg & en_mask, "Read-ahead cache not previously disabled\n"); b15_rac_enable(); set_bit(RAC_ENABLED, &b15_rac_flags); spin_unlock(&rac_lock); pr_info("Broadcom Brahma-B15 readahead cache at: 0x%p\n", b15_rac_base + RAC_CONFIG0_REG); goto out; out_cpu_dead: cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING); out_unmap: unregister_reboot_notifier(&b15_rac_reboot_nb); iounmap(b15_rac_base); out: of_node_put(dn); return ret; }