static bool migrate_one_irq(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); const struct cpumask *affinity = d->affinity; struct irq_chip *c; bool ret = false; /* * If this is a per-CPU interrupt, or the affinity does not * include this CPU, then we have nothing to do. */ if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) return false; if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { affinity = cpu_online_mask; ret = true; } c = irq_data_get_irq_chip(d); if (c->irq_set_affinity) c->irq_set_affinity(d, affinity, true); else pr_debug("IRQ%u: unable to set affinity\n", d->irq); return ret; }
static int xtensa_mx_irq_set_affinity(struct irq_data *d, const struct cpumask *dest, bool force) { unsigned mask = 1u << cpumask_any_and(dest, cpu_online_mask); set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE)); return 0; }
void fixup_irqs(void) { unsigned int irq, vector; static int warned; struct irq_desc *desc; struct irq_data *data; struct irq_chip *chip; for_each_irq_desc(irq, desc) { int break_affinity = 0; int set_affinity = 1; const struct cpumask *affinity; if (!desc) continue; if (irq == 2) continue; raw_spin_lock(&desc->lock); data = irq_desc_get_irq_data(desc); affinity = data->affinity; if (!irq_has_action(irq) || irqd_is_per_cpu(data) || cpumask_subset(affinity, cpu_online_mask)) { raw_spin_unlock(&desc->lock); continue; } irq_force_complete_move(irq); if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { break_affinity = 1; affinity = cpu_all_mask; } chip = irq_data_get_irq_chip(data); if (!irqd_can_move_in_process_context(data) && chip->irq_mask) chip->irq_mask(data); if (chip->irq_set_affinity) chip->irq_set_affinity(data, affinity, true); else if (!(warned++)) set_affinity = 0; if (!irqd_can_move_in_process_context(data) && !irqd_irq_masked(data) && chip->irq_unmask) chip->irq_unmask(data); raw_spin_unlock(&desc->lock); if (break_affinity && set_affinity) printk("Broke affinity for irq %i\n", irq); else if (!set_affinity) printk("Cannot set affinity for irq %i\n", irq); }
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ void fixup_irqs(void) { unsigned int irq, vector; static int warned; struct irq_desc *desc; for_each_irq_desc(irq, desc) { int break_affinity = 0; int set_affinity = 1; const struct cpumask *affinity; if (!desc) continue; if (irq == 2) continue; /* interrupt's are disabled at this point */ spin_lock(&desc->lock); affinity = desc->affinity; if (!irq_has_action(irq) || cpumask_equal(affinity, cpu_online_mask)) { spin_unlock(&desc->lock); continue; } /* * Complete the irq move. This cpu is going down and for * non intr-remapping case, we can't wait till this interrupt * arrives at this cpu before completing the irq move. */ irq_force_complete_move(irq); if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { break_affinity = 1; affinity = cpu_all_mask; } if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->mask) desc->chip->mask(irq); if (desc->chip->set_affinity) desc->chip->set_affinity(irq, affinity); else if (!(warned++)) set_affinity = 0; if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) desc->chip->unmask(irq); spin_unlock(&desc->lock); if (break_affinity && set_affinity) printk("Broke affinity for irq %i\n", irq); else if (!set_affinity) printk("Cannot set affinity for irq %i\n", irq); }
/** * speedstep_target - set a new CPUFreq policy * @policy: new policy * @index: index of target frequency * * Sets a new CPUFreq policy. */ static int speedstep_target(struct cpufreq_policy *policy, unsigned int index) { unsigned int policy_cpu; policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); smp_call_function_single(policy_cpu, _speedstep_set_state, &index, true); return 0; }
void irq_move_masked_irq(struct irq_data *idata) { struct irq_desc *desc = irq_data_to_desc(idata); struct irq_chip *chip = idata->chip; if (likely(!irqd_is_setaffinity_pending(&desc->irq_data))) return; /* * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. */ if (!irqd_can_balance(&desc->irq_data)) { WARN_ON(1); return; } irqd_clr_move_pending(&desc->irq_data); if (unlikely(cpumask_empty(desc->pending_mask))) return; if (!chip->irq_set_affinity) return; assert_raw_spin_locked(&desc->lock); /* * If there was a valid mask to work with, please * do the disable, re-program, enable sequence. * This is *not* particularly important for level triggered * but in a edge trigger case, we might be setting rte * when an active trigger is coming in. This could * cause some ioapics to mal-function. * Being paranoid i guess! * * For correct operation this depends on the caller * masking the irqs. */ if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)) { int ret = chip->irq_set_affinity(&desc->irq_data, desc->pending_mask, false); switch (ret) { case IRQ_SET_MASK_OK: cpumask_copy(desc->irq_data.affinity, desc->pending_mask); case IRQ_SET_MASK_OK_NOCOPY: irq_set_thread_affinity(desc); } } cpumask_clear(desc->pending_mask); }
void move_masked_irq(int irq) { struct irq_desc *desc = irq_to_desc(irq); if (likely(!(desc->status & IRQ_MOVE_PENDING))) return; /* * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. */ if (CHECK_IRQ_PER_CPU(desc->status)) { WARN_ON(1); return; } desc->status &= ~IRQ_MOVE_PENDING; if (unlikely(cpumask_empty(desc->pending_mask))) return; if (!desc->chip->set_affinity) return; assert_spin_locked(&desc->lock); /* * If there was a valid mask to work with, please * do the disable, re-program, enable sequence. * This is *not* particularly important for level triggered * but in a edge trigger case, we might be setting rte * when an active trigger is comming in. This could * cause some ioapics to mal-function. * Being paranoid i guess! * * For correct operation this depends on the caller * masking the irqs. */ if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)) { int ret = chip->irq_set_affinity(irq, desc->pending_mask); switch (ret) { case IRQ_SET_MASK_OK: cpumask_copy(desc->affinity, desc->pending_mask); case IRQ_SET_MASK_OK_NOCOPY: irq_set_thread_affinity(desc); } } cpumask_clear(desc->pending_mask); }
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ void fixup_irqs(void) { unsigned int irq; static int warned; struct irq_desc *desc; for_each_irq_desc(irq, desc) { int break_affinity = 0; int set_affinity = 1; const struct cpumask *affinity; if (!desc) continue; if (irq == 2) continue; /* interrupt's are disabled at this point */ atomic_spin_lock(&desc->lock); affinity = desc->affinity; if (!irq_has_action(irq) || cpumask_equal(affinity, cpu_online_mask)) { atomic_spin_unlock(&desc->lock); continue; } if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { break_affinity = 1; affinity = cpu_all_mask; } if (desc->chip->mask) desc->chip->mask(irq); if (desc->chip->set_affinity) desc->chip->set_affinity(irq, affinity); else if (!(warned++)) set_affinity = 0; if (desc->chip->unmask) desc->chip->unmask(irq); atomic_spin_unlock(&desc->lock); if (break_affinity && set_affinity) printk("Broke affinity for irq %i\n", irq); else if (!set_affinity) printk("Cannot set affinity for irq %i\n", irq); }
static bool migrate_one_irq(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); const struct cpumask *affinity = d->affinity; /* * If this is a per-CPU interrupt, or the affinity does not * include this CPU, then we have nothing to do. */ if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) return false; if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) affinity = cpu_online_mask; ret = true; }
static bool migrate_one_irq(struct irq_data *d) { unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask); bool ret = false; if (cpu >= nr_cpu_ids) { cpu = cpumask_any(cpu_online_mask); ret = true; } pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu); d->chip->irq_set_affinity(d, cpumask_of(cpu), true); return ret; }
/* * Since cpu_online_mask is already updated, we just need to check for * affinity that has zeros */ static void migrate_irqs(void) { int irq, new_cpu; for (irq=0; irq < NR_IRQS; irq++) { struct irq_desc *desc = irq_to_desc(irq); struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); if (irqd_irq_disabled(data)) continue; /* * No handling for now. * TBD: Implement a disable function so we can now * tell CPU not to respond to these local intr sources. * such as ITV,CPEI,MCA etc. */ if (irqd_is_per_cpu(data)) continue; if (cpumask_any_and(data->affinity, cpu_online_mask) >= nr_cpu_ids) { /* * Save it for phase 2 processing */ vectors_in_migration[irq] = irq; new_cpu = cpumask_any(cpu_online_mask); /* * Al three are essential, currently WARN_ON.. maybe panic? */ if (chip && chip->irq_disable && chip->irq_enable && chip->irq_set_affinity) { chip->irq_disable(data); chip->irq_set_affinity(data, cpumask_of(new_cpu), false); chip->irq_enable(data); } else { WARN_ON((!chip || !chip->irq_disable || !chip->irq_enable || !chip->irq_set_affinity)); } } } }
static int speedstep_cpu_init(struct cpufreq_policy *policy) { unsigned int policy_cpu; struct get_freqs gf; /* only run on CPU to be set, or on its sibling */ #ifdef CONFIG_SMP cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); #endif policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); /* detect low and high frequency and transition latency */ gf.policy = policy; smp_call_function_single(policy_cpu, get_freqs_on_cpu, &gf, 1); if (gf.ret) return gf.ret; return cpufreq_table_validate_and_show(policy, speedstep_freqs); }
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); unsigned int shift = (d->irq % 4) * 8; unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); u32 val, mask, bit; if (cpu >= 8 || cpu >= nr_cpu_ids) return -EINVAL; mask = 0xff << shift; bit = 1 << (cpu_logical_map(cpu) + shift); raw_spin_lock(&irq_controller_lock); val = readl_relaxed(reg) & ~mask; writel_relaxed(val | bit, reg); raw_spin_unlock(&irq_controller_lock); return IRQ_SET_MASK_OK; }
/* * Generic version of the affinity autoselector. */ static int setup_affinity(unsigned int irq, struct irq_desc *desc) { if (!irq_can_set_affinity(irq)) return 0; /* * Preserve an userspace affinity setup, but make sure that * one of the targets is online. */ if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask) < nr_cpu_ids) goto set_affinity; else desc->status &= ~IRQ_AFFINITY_SET; } cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity); set_affinity: desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false); return 0; }
static bool migrate_one_irq(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); const struct cpumask *affinity = d->affinity; struct irq_chip *c; bool ret = false; if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) return false; if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { affinity = cpu_online_mask; ret = true; } c = irq_data_get_irq_chip(d); if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) cpumask_copy(d->affinity, affinity); return ret; }
int cpupri_find(struct cpupri *cp, struct task_struct *p, struct cpumask *lowest_mask) { int idx = 0; int task_pri = convert_prio(p->prio); if (task_pri >= MAX_RT_PRIO) return 0; for (idx = 0; idx < task_pri; idx++) { struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; int skip = 0; if (!atomic_read(&(vec)->count)) skip = 1; smp_rmb(); if (skip) continue; if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) continue; if (lowest_mask) { cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); if (cpumask_any(lowest_mask) >= nr_cpu_ids) continue; } return 1; } return 0; }
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ void fixup_irqs(void) { unsigned int irq, vector; static int warned; struct irq_desc *desc; struct irq_data *data; struct irq_chip *chip; int ret; for_each_irq_desc(irq, desc) { int break_affinity = 0; int set_affinity = 1; const struct cpumask *affinity; if (!desc) continue; if (irq == 2) continue; /* interrupt's are disabled at this point */ raw_spin_lock(&desc->lock); data = irq_desc_get_irq_data(desc); affinity = irq_data_get_affinity_mask(data); if (!irq_has_action(irq) || irqd_is_per_cpu(data) || cpumask_subset(affinity, cpu_online_mask)) { raw_spin_unlock(&desc->lock); continue; } /* * Complete the irq move. This cpu is going down and for * non intr-remapping case, we can't wait till this interrupt * arrives at this cpu before completing the irq move. */ irq_force_complete_move(desc); if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { break_affinity = 1; affinity = cpu_online_mask; } chip = irq_data_get_irq_chip(data); /* * The interrupt descriptor might have been cleaned up * already, but it is not yet removed from the radix tree */ if (!chip) { raw_spin_unlock(&desc->lock); continue; } if (!irqd_can_move_in_process_context(data) && chip->irq_mask) chip->irq_mask(data); if (chip->irq_set_affinity) { ret = chip->irq_set_affinity(data, affinity, true); if (ret == -ENOSPC) pr_crit("IRQ %d set affinity failed because there are no available vectors. The device assigned to this IRQ is unstable.\n", irq); } else { if (!(warned++)) set_affinity = 0; } /* * We unmask if the irq was not marked masked by the * core code. That respects the lazy irq disable * behaviour. */ if (!irqd_can_move_in_process_context(data) && !irqd_irq_masked(data) && chip->irq_unmask) chip->irq_unmask(data); raw_spin_unlock(&desc->lock); if (break_affinity && set_affinity) pr_notice("Broke affinity for irq %i\n", irq); else if (!set_affinity) pr_notice("Cannot set affinity for irq %i\n", irq); }
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ void fixup_irqs(void) { unsigned int irq, vector; static int warned; struct irq_desc *desc; struct irq_data *data; struct irq_chip *chip; for_each_irq_desc(irq, desc) { int break_affinity = 0; int set_affinity = 1; const struct cpumask *affinity; if (!desc) continue; if (irq == 2) continue; /* interrupt's are disabled at this point */ raw_spin_lock(&desc->lock); data = irq_desc_get_irq_data(desc); affinity = data->affinity; if (!irq_has_action(irq) || irqd_is_per_cpu(data) || cpumask_subset(affinity, cpu_online_mask)) { raw_spin_unlock(&desc->lock); continue; } /* * Complete the irq move. This cpu is going down and for * non intr-remapping case, we can't wait till this interrupt * arrives at this cpu before completing the irq move. */ irq_force_complete_move(irq); if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { break_affinity = 1; affinity = cpu_online_mask; } chip = irq_data_get_irq_chip(data); if (!irqd_can_move_in_process_context(data) && chip->irq_mask) chip->irq_mask(data); if (chip->irq_set_affinity) chip->irq_set_affinity(data, affinity, true); else if (!(warned++)) set_affinity = 0; /* * We unmask if the irq was not marked masked by the * core code. That respects the lazy irq disable * behaviour. */ if (!irqd_can_move_in_process_context(data) && !irqd_irq_masked(data) && chip->irq_unmask) chip->irq_unmask(data); raw_spin_unlock(&desc->lock); if (break_affinity && set_affinity) pr_notice("Broke affinity for irq %i\n", irq); else if (!set_affinity) pr_notice("Cannot set affinity for irq %i\n", irq); }
/** * cpupri_find - find the best (lowest-pri) CPU in the system * @cp: The cpupri context * @p: The task * @lowest_mask: A mask to fill in with selected CPUs (or NULL) * * Note: This function returns the recommended CPUs as calculated during the * current invocation. By the time the call returns, the CPUs may have in * fact changed priorities any number of times. While not ideal, it is not * an issue of correctness since the normal rebalancer logic will correct * any discrepancies created by racing against the uncertainty of the current * priority configuration. * * Returns: (int)bool - CPUs were found */ int cpupri_find(struct cpupri *cp, struct task_struct *p, struct cpumask *lowest_mask) { int idx = 0; int task_pri = convert_prio(p->prio); if (task_pri >= MAX_RT_PRIO) return 0; for (idx = 0; idx < task_pri; idx++) { struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; int skip = 0; if (!atomic_read(&(vec)->count)) skip = 1; /* * When looking at the vector, we need to read the counter, * do a memory barrier, then read the mask. * * Note: This is still all racey, but we can deal with it. * Ideally, we only want to look at masks that are set. * * If a mask is not set, then the only thing wrong is that we * did a little more work than necessary. * * If we read a zero count but the mask is set, because of the * memory barriers, that can only happen when the highest prio * task for a run queue has left the run queue, in which case, * it will be followed by a pull. If the task we are processing * fails to find a proper place to go, that pull request will * pull this task if the run queue is running at a lower * priority. */ smp_rmb(); /* Need to do the rmb for every iteration */ if (skip) continue; if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) continue; if (lowest_mask) { cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); /* * We have to ensure that we have at least one bit * still set in the array, since the map could have * been concurrently emptied between the first and * second reads of vec->mask. If we hit this * condition, simply act as though we never hit this * priority level and continue on. */ if (cpumask_any(lowest_mask) >= nr_cpu_ids) continue; } return 1; } return 0; }