for_each_vcpu ( d, v ) { spinlock_t *lock; vcpudata = v->sched_priv; migrate_timer(&v->periodic_timer, new_p); migrate_timer(&v->singleshot_timer, new_p); migrate_timer(&v->poll_timer, new_p); cpumask_setall(v->cpu_hard_affinity); cpumask_setall(v->cpu_soft_affinity); lock = vcpu_schedule_lock_irq(v); v->processor = new_p; /* * With v->processor modified we must not * - make any further changes assuming we hold the scheduler lock, * - use vcpu_schedule_unlock_irq(). */ spin_unlock_irq(lock); v->sched_priv = vcpu_priv[v->vcpu_id]; if ( !d->is_dying ) sched_move_irqs(v); new_p = cpumask_cycle(new_p, c->cpu_valid); SCHED_OP(c->sched, insert_vcpu, v); SCHED_OP(old_ops, free_vdata, vcpudata); }
int arm_pmu_device_probe(struct platform_device *pdev, const struct of_device_id *of_table, const struct pmu_probe_info *probe_table) { const struct of_device_id *of_id; armpmu_init_fn init_fn; struct device_node *node = pdev->dev.of_node; struct arm_pmu *pmu; int ret = -ENODEV; pmu = armpmu_alloc(); if (!pmu) return -ENOMEM; pmu->plat_device = pdev; ret = pmu_parse_irqs(pmu); if (ret) goto out_free; if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { init_fn = of_id->data; pmu->secure_access = of_property_read_bool(pdev->dev.of_node, "secure-reg-access"); /* arm64 systems boot only as non-secure */ if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) { pr_warn("ignoring \"secure-reg-access\" property for arm64\n"); pmu->secure_access = false; } ret = init_fn(pmu); } else if (probe_table) { cpumask_setall(&pmu->supported_cpus); ret = probe_current_pmu(pmu, probe_table); } if (ret) { pr_info("%pOF: failed to probe PMU!\n", node); goto out_free; } ret = armpmu_request_irqs(pmu); if (ret) goto out_free_irqs; ret = armpmu_register(pmu); if (ret) goto out_free; return 0; out_free_irqs: armpmu_free_irqs(pmu); out_free: pr_info("%pOF: failed to register PMU devices!\n", node); armpmu_free(pmu); return ret; }
void __init x3_bpc_mgmt_init(void) { #ifdef CONFIG_SMP #if defined(CONFIG_MACH_VU10) int tegra_bpc_gpio; int int_gpio; if (x3_get_hw_rev_pcb_version() == hw_rev_pcb_type_A) { tegra_bpc_gpio = TEGRA_GPIO_PJ6; } else { tegra_bpc_gpio = TEGRA_GPIO_PX6; } bpc_mgmt_platform_data.gpio_trigger = tegra_bpc_gpio; int_gpio = TEGRA_GPIO_TO_IRQ(tegra_bpc_gpio); tegra_gpio_enable(tegra_bpc_gpio); #else int int_gpio = TEGRA_GPIO_TO_IRQ(TEGRA_BPC_TRIGGER); tegra_gpio_enable(TEGRA_BPC_TRIGGER); #endif cpumask_setall(&(bpc_mgmt_platform_data.affinity_mask)); irq_set_affinity_hint(int_gpio, &(bpc_mgmt_platform_data.affinity_mask)); irq_set_affinity(int_gpio, &(bpc_mgmt_platform_data.affinity_mask)); #endif platform_device_register(&x3_bpc_mgmt_device); return; }
static void __init init_irq_default_affinity(void) { if (!cpumask_available(irq_default_affinity)) zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); if (cpumask_empty(irq_default_affinity)) cpumask_setall(irq_default_affinity); }
static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data) { struct irq_desc *desc; unsigned long flags; desc = irq_to_desc(irq); if (!desc) { WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); return; } /* Ensure we don't have left over values from a previous use of this irq */ raw_spin_lock_irqsave(&desc->lock, flags); desc->status = IRQ_DISABLED; desc->chip = &no_irq_chip; desc->handle_irq = handle_bad_irq; desc->depth = 1; desc->msi_desc = NULL; desc->handler_data = NULL; if (!keep_chip_data) desc->chip_data = NULL; desc->action = NULL; desc->irq_count = 0; desc->irqs_unhandled = 0; #ifdef CONFIG_SMP cpumask_setall(desc->affinity); #ifdef CONFIG_GENERIC_PENDING_IRQ cpumask_clear(desc->pending_mask); #endif #endif raw_spin_unlock_irqrestore(&desc->lock, flags); }
static int _stp_data_open_trace(struct inode *inode, struct file *file) { struct _stp_iterator *iter = inode->i_private; #ifdef STP_BULKMODE int cpu_file = iter->cpu_file; #endif /* We only allow for one reader per cpu */ dbug_trans(1, "trace attach\n"); #ifdef STP_BULKMODE if (!cpumask_test_cpu(cpu_file, _stp_relay_data.trace_reader_cpumask)) cpumask_set_cpu(cpu_file, _stp_relay_data.trace_reader_cpumask); else { dbug_trans(1, "returning EBUSY\n"); return -EBUSY; } #else if (!cpumask_empty(_stp_relay_data.trace_reader_cpumask)) { dbug_trans(1, "returning EBUSY\n"); return -EBUSY; } cpumask_setall(_stp_relay_data.trace_reader_cpumask); #endif file->private_data = inode->i_private; return 0; }
/* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */ static int global_invalidates(struct kvm *kvm) { int global; int cpu; /* * If there is only one vcore, and it's currently running, * as indicated by local_paca->kvm_hstate.kvm_vcpu being set, * we can use tlbiel as long as we mark all other physical * cores as potentially having stale TLB entries for this lpid. * Otherwise, don't use tlbiel. */ if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu) global = 0; else global = 1; if (!global) { /* any other core might now have stale TLB entries... */ smp_wmb(); cpumask_setall(&kvm->arch.need_tlb_flush); cpu = local_paca->kvm_hstate.kvm_vcore->pcpu; /* * On POWER9, threads are independent but the TLB is shared, * so use the bit for the first thread to represent the core. */ if (cpu_has_feature(CPU_FTR_ARCH_300)) cpu = cpu_first_thread_sibling(cpu); cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush); } return global; }
/* * cpudl_init - initialize the cpudl structure * @cp: the cpudl max-heap context */ int cpudl_init(struct cpudl *cp) { int i; memset(cp, 0, sizeof(*cp)); raw_spin_lock_init(&cp->lock); cp->size = 0; cp->elements = kcalloc(nr_cpu_ids, sizeof(struct cpudl_item), GFP_KERNEL); if (!cp->elements) return -ENOMEM; if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) { kfree(cp->elements); return -ENOMEM; } for_each_possible_cpu(i) cp->elements[i].idx = IDX_INVALID; cpumask_setall(cp->free_cpus); return 0; }
void __init arch_get_fast_and_slow_cpus(struct cpumask *fast, struct cpumask *slow) { struct device_node *cn = NULL; int cpu; cpumask_clear(fast); cpumask_clear(slow); /* * Use the config options if they are given. This helps testing * HMP scheduling on systems without a big.LITTLE architecture. */ if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) { if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast)) WARN(1, "Failed to parse HMP fast cpu mask!\n"); if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow)) WARN(1, "Failed to parse HMP slow cpu mask!\n"); return; } /* * Else, parse device tree for little cores. */ while ((cn = of_find_node_by_type(cn, "cpu"))) { const u32 *mpidr; int len; mpidr = of_get_property(cn, "reg", &len); if (!mpidr || len != 4) { pr_err("* %s missing reg property\n", cn->full_name); continue; } cpu = get_logical_index(be32_to_cpup(mpidr)); if (cpu == -EINVAL) { pr_err("couldn't get logical index for mpidr %x\n", be32_to_cpup(mpidr)); break; } if (is_little_cpu(cn)) cpumask_set_cpu(cpu, slow); else cpumask_set_cpu(cpu, fast); } if (!cpumask_empty(fast) && !cpumask_empty(slow)) return; /* * We didn't find both big and little cores so let's call all cores * fast as this will keep the system running, with all cores being * treated equal. */ cpumask_setall(fast); cpumask_clear(slow); }
// ARM10C 20141004 static void __init init_irq_default_affinity(void) { // GFP_NOWAIT: 0 // alloc_cpumask_var(&irq_default_affinity, 0): 0 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); cpumask_setall(irq_default_affinity); // irq_default_affinity->bits[0]: 0xF }
static void __init init_irq_default_affinity(void) { alloc_bootmem_cpumask_var(&irq_default_affinity); #if defined(CONFIG_MIPS_BRCM) cpumask_set_cpu(0, (cpumask_t *)&irq_default_affinity); #else cpumask_setall(irq_default_affinity); #endif }
void __init init_xenbus_allowed_cpumask(void) { if (!alloc_cpumask_var(&xenbus_allowed_cpumask, GFP_KERNEL)) BUG(); cpumask_copy(xenbus_allowed_cpumask, cpu_present_mask); if (!alloc_cpumask_var(&local_allowed_cpumask, GFP_KERNEL)) BUG(); cpumask_setall(local_allowed_cpumask); }
static void __init init_irq_default_affinity(void) { #ifdef CONFIG_CPUMASK_OFFSTACK if (!irq_default_affinity) zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); #endif if (cpumask_empty(irq_default_affinity)) cpumask_setall(irq_default_affinity); }
static void __init init_irq_default_affinity(void) { alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); #ifdef CONFIG_SCHED_HMP cpumask_copy(irq_default_affinity, &hmp_slow_cpu_mask); #else cpumask_setall(irq_default_affinity); #endif }
int sched_init_vcpu(struct vcpu *v, unsigned int processor) { struct domain *d = v->domain; /* * Initialize processor and affinity settings. The idler, and potentially * domain-0 VCPUs, are pinned onto their respective physical CPUs. */ v->processor = processor; if ( is_idle_domain(d) || d->is_pinned ) cpumask_copy(v->cpu_hard_affinity, cpumask_of(processor)); else cpumask_setall(v->cpu_hard_affinity); cpumask_setall(v->cpu_soft_affinity); /* Initialise the per-vcpu timers. */ init_timer(&v->periodic_timer, vcpu_periodic_timer_fn, v, v->processor); init_timer(&v->singleshot_timer, vcpu_singleshot_timer_fn, v, v->processor); init_timer(&v->poll_timer, poll_timer_fn, v, v->processor); /* Idle VCPUs are scheduled immediately. */ if ( is_idle_domain(d) ) { per_cpu(schedule_data, v->processor).curr = v; v->is_running = 1; } TRACE_2D(TRC_SCHED_DOM_ADD, v->domain->domain_id, v->vcpu_id); v->sched_priv = SCHED_OP(DOM2OP(d), alloc_vdata, v, d->sched_priv); if ( v->sched_priv == NULL ) return 1; SCHED_OP(DOM2OP(d), insert_vcpu, v); return 0; }
void tzdev_init_migration(void) { cpumask_setall(&tzdev_cpu_mask[CLUSTER_BIG]); cpumask_clear(&tzdev_cpu_mask[CLUSTER_LITTLE]); if (strlen(CONFIG_HMP_FAST_CPU_MASK)) cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, &tzdev_cpu_mask[CLUSTER_BIG]); else pr_notice("All CPUs are equal, core migration will do nothing.\n"); cpumask_andnot(&tzdev_cpu_mask[CLUSTER_LITTLE], cpu_present_mask, &tzdev_cpu_mask[CLUSTER_BIG]); register_cpu_notifier(&tzdev_cpu_notifier); }
/* * cpudl_init - initialize the cpudl structure * @cp: the cpudl max-heap context */ int cpudl_init(struct cpudl *cp) { int i; memset(cp, 0, sizeof(*cp)); raw_spin_lock_init(&cp->lock); cp->size = 0; for (i = 0; i < NR_CPUS; i++) cp->cpu_to_idx[i] = IDX_INVALID; if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) return -ENOMEM; cpumask_setall(cp->free_cpus); return 0; }
void __init enterprise_bpc_mgmt_init(void) { int int_gpio = TEGRA_GPIO_TO_IRQ(TEGRA_BPC_TRIGGER); tegra_gpio_enable(TEGRA_BPC_TRIGGER); #ifdef CONFIG_SMP cpumask_setall(&(bpc_mgmt_platform_data.affinity_mask)); irq_set_affinity_hint(int_gpio, &(bpc_mgmt_platform_data.affinity_mask)); irq_set_affinity(int_gpio, &(bpc_mgmt_platform_data.affinity_mask)); #endif platform_device_register(&enterprise_bpc_mgmt_device); return; }
int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) { int cpu_dest; /* timer and ipi have to always be received on all CPUs */ if (CHECK_IRQ_PER_CPU(irq)) { /* Bad linux design decision. The mask has already * been set; we must reset it */ cpumask_setall(irq_desc[irq].affinity); return -EINVAL; } /* whatever mask they set, we just allow one CPU */ cpu_dest = first_cpu(*dest); return cpu_dest; }
static int mc_timer_init(void) { cpumask_t cpu; mc_timer_thread = kthread_create(kthread_worker_fn, &mc_timer_worker, "mc_timer"); if (IS_ERR(mc_timer_thread)) { mc_timer_thread = NULL; pr_err("%s: timer thread creation failed!", __func__); return -EFAULT; } wake_up_process(mc_timer_thread); cpumask_setall(&cpu); cpumask_clear_cpu(DEFAULT_BIG_CORE, &cpu); set_cpus_allowed(mc_timer_thread, cpu); hrtimer_init(&mc_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); mc_hrtimer.function = mc_hrtimer_func; return 0; }
static int cpufreq_init(struct cpufreq_policy *policy) { struct cpufreq_frequency_table *freq_table; struct private_data *priv; struct device *cpu_dev; struct clk *cpu_clk; struct dev_pm_opp *suspend_opp; unsigned int transition_latency; bool opp_v1 = false; const char *name; int ret; cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { pr_err("failed to get cpu%d device\n", policy->cpu); return -ENODEV; } cpu_clk = clk_get(cpu_dev, NULL); if (IS_ERR(cpu_clk)) { ret = PTR_ERR(cpu_clk); dev_err(cpu_dev, "%s: failed to get clk: %d\n", __func__, ret); return ret; } /* Get OPP-sharing information from "operating-points-v2" bindings */ ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, policy->cpus); if (ret) { /* * operating-points-v2 not supported, fallback to old method of * finding shared-OPPs for backward compatibility. */ if (ret == -ENOENT) opp_v1 = true; else goto out_put_clk; } /* * OPP layer will be taking care of regulators now, but it needs to know * the name of the regulator first. */ name = find_supply_name(cpu_dev); if (name) { ret = dev_pm_opp_set_regulator(cpu_dev, name); if (ret) { dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n", policy->cpu, ret); goto out_put_clk; } } /* * Initialize OPP tables for all policy->cpus. They will be shared by * all CPUs which have marked their CPUs shared with OPP bindings. * * For platforms not using operating-points-v2 bindings, we do this * before updating policy->cpus. Otherwise, we will end up creating * duplicate OPPs for policy->cpus. * * OPPs might be populated at runtime, don't check for error here */ dev_pm_opp_of_cpumask_add_table(policy->cpus); /* * But we need OPP table to function so if it is not there let's * give platform code chance to provide it for us. */ ret = dev_pm_opp_get_opp_count(cpu_dev); if (ret <= 0) { dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n"); ret = -EPROBE_DEFER; goto out_free_opp; } if (opp_v1) { struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data(); if (!pd || !pd->independent_clocks) cpumask_setall(policy->cpus); /* * OPP tables are initialized only for policy->cpu, do it for * others as well. */ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus); if (ret) dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", __func__, ret); } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { ret = -ENOMEM; goto out_free_opp; } priv->reg_name = name; ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); goto out_free_priv; } priv->cpu_dev = cpu_dev; policy->driver_data = priv; policy->clk = cpu_clk; rcu_read_lock(); suspend_opp = dev_pm_opp_get_suspend_opp(cpu_dev); if (suspend_opp) policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000; rcu_read_unlock(); ret = cpufreq_table_validate_and_show(policy, freq_table); if (ret) { dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, ret); goto out_free_cpufreq_table; } /* Support turbo/boost mode */ if (policy_has_boost_freq(policy)) { /* This gets disabled by core on driver unregister */ ret = cpufreq_enable_boost_support(); if (ret) goto out_free_cpufreq_table; cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; } transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev); if (!transition_latency) transition_latency = CPUFREQ_ETERNAL; policy->cpuinfo.transition_latency = transition_latency; return 0; out_free_cpufreq_table: dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); out_free_priv: kfree(priv); out_free_opp: dev_pm_opp_of_cpumask_remove_table(policy->cpus); if (name) dev_pm_opp_put_regulator(cpu_dev); out_put_clk: clk_put(cpu_clk); return ret; }
static void __init init_irq_default_affinity(void) { alloc_bootmem_cpumask_var(&irq_default_affinity); cpumask_setall(irq_default_affinity); }
static int cpufreq_init(struct cpufreq_policy *policy) { struct cpufreq_dt_platform_data *pd; struct cpufreq_frequency_table *freq_table; struct thermal_cooling_device *cdev; struct device_node *np; struct private_data *priv; struct device *cpu_dev; struct regulator *cpu_reg; struct clk *cpu_clk; unsigned long min_uV = ~0, max_uV = 0; unsigned int transition_latency; int ret; ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk); if (ret) { pr_err("%s: Failed to allocate resources\n: %d", __func__, ret); return ret; } np = of_node_get(cpu_dev->of_node); if (!np) { dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu); ret = -ENOENT; goto out_put_reg_clk; } /* OPPs might be populated at runtime, don't check for error here */ of_init_opp_table(cpu_dev); priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { ret = -ENOMEM; goto out_put_node; } of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance); if (of_property_read_u32(np, "clock-latency", &transition_latency)) transition_latency = CPUFREQ_ETERNAL; if (!IS_ERR(cpu_reg)) { unsigned long opp_freq = 0; /* * Disable any OPPs where the connected regulator isn't able to * provide the specified voltage and record minimum and maximum * voltage levels. */ while (1) { struct dev_pm_opp *opp; unsigned long opp_uV, tol_uV; rcu_read_lock(); opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq); if (IS_ERR(opp)) { rcu_read_unlock(); break; } opp_uV = dev_pm_opp_get_voltage(opp); rcu_read_unlock(); tol_uV = opp_uV * priv->voltage_tolerance / 100; if (regulator_is_supported_voltage(cpu_reg, opp_uV, opp_uV + tol_uV)) { if (opp_uV < min_uV) min_uV = opp_uV; if (opp_uV > max_uV) max_uV = opp_uV; } else { dev_pm_opp_disable(cpu_dev, opp_freq); } opp_freq++; } ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); if (ret > 0) transition_latency += ret * 1000; } ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { pr_err("failed to init cpufreq table: %d\n", ret); goto out_free_priv; } /* * For now, just loading the cooling device; * thermal DT code takes care of matching them. */ if (of_find_property(np, "#cooling-cells", NULL)) { cdev = of_cpufreq_cooling_register(np, cpu_present_mask); if (IS_ERR(cdev)) dev_err(cpu_dev, "running cpufreq without cooling device: %ld\n", PTR_ERR(cdev)); else priv->cdev = cdev; } priv->cpu_dev = cpu_dev; priv->cpu_reg = cpu_reg; policy->driver_data = priv; policy->clk = cpu_clk; ret = cpufreq_table_validate_and_show(policy, freq_table); if (ret) { dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, ret); goto out_cooling_unregister; } policy->cpuinfo.transition_latency = transition_latency; pd = cpufreq_get_driver_data(); if (!pd || !pd->independent_clocks) cpumask_setall(policy->cpus); of_node_put(np); return 0; out_cooling_unregister: cpufreq_cooling_unregister(priv->cdev); dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); out_free_priv: kfree(priv); out_put_node: of_node_put(np); out_put_reg_clk: clk_put(cpu_clk); if (!IS_ERR(cpu_reg)) regulator_put(cpu_reg); return ret; }
static void __init init_irq_default_affinity(void) { alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); cpumask_setall(irq_default_affinity); }
static int pmu_parse_irqs(struct arm_pmu *pmu) { int i = 0, num_irqs; struct platform_device *pdev = pmu->plat_device; struct pmu_hw_events __percpu *hw_events = pmu->hw_events; num_irqs = platform_irq_count(pdev); if (num_irqs < 0) { pr_err("unable to count PMU IRQs\n"); return num_irqs; } /* * In this case we have no idea which CPUs are covered by the PMU. * To match our prior behaviour, we assume all CPUs in this case. */ if (num_irqs == 0) { pr_warn("no irqs for PMU, sampling events not supported\n"); pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; cpumask_setall(&pmu->supported_cpus); return 0; } if (num_irqs == 1) { int irq = platform_get_irq(pdev, 0); if (irq && irq_is_percpu(irq)) return pmu_parse_percpu_irq(pmu, irq); } if (!pmu_has_irq_affinity(pdev->dev.of_node)) { pr_warn("no interrupt-affinity property for %pOF, guessing.\n", pdev->dev.of_node); } /* * Some platforms have all PMU IRQs OR'd into a single IRQ, with a * special platdata function that attempts to demux them. */ if (dev_get_platdata(&pdev->dev)) cpumask_setall(&pmu->supported_cpus); for (i = 0; i < num_irqs; i++) { int cpu, irq; irq = platform_get_irq(pdev, i); if (WARN_ON(irq <= 0)) continue; if (irq_is_percpu(irq)) { pr_warn("multiple PPIs or mismatched SPI/PPI detected\n"); return -EINVAL; } cpu = pmu_parse_irq_affinity(pdev->dev.of_node, i); if (cpu < 0) return cpu; if (cpu >= nr_cpu_ids) continue; if (per_cpu(hw_events->irq, cpu)) { pr_warn("multiple PMU IRQs for the same CPU detected\n"); return -EINVAL; } per_cpu(hw_events->irq, cpu) = irq; cpumask_set_cpu(cpu, &pmu->supported_cpus); } return 0; }