static bool migrate_one_irq(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); const struct cpumask *affinity = d->common->affinity; struct irq_chip *c; bool ret = false; /* * If this is a per-CPU interrupt, or the affinity does not * include this CPU, then we have nothing to do. */ if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) return false; if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { affinity = cpu_online_mask; ret = true; } c = irq_data_get_irq_chip(d); if (!c->irq_set_affinity) { pr_warn_ratelimited("IRQ%u: unable to set affinity\n", d->irq); } else { int r = irq_do_set_affinity(d, affinity, false); if (r) pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n", d->irq, r); } return ret; }
static bool migrate_one_irq(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); const struct cpumask *affinity = d->affinity; struct irq_chip *c; bool ret = false; /* * If this is a per-CPU interrupt, or the affinity does not * include this CPU, then we have nothing to do. */ if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) return false; if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { affinity = cpu_online_mask; ret = true; } c = irq_data_get_irq_chip(d); if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) cpumask_copy(d->affinity, affinity); return ret; }
void fixup_irqs(void) { unsigned int irq, vector; static int warned; struct irq_desc *desc; struct irq_data *data; struct irq_chip *chip; for_each_irq_desc(irq, desc) { int break_affinity = 0; int set_affinity = 1; const struct cpumask *affinity; if (!desc) continue; if (irq == 2) continue; raw_spin_lock(&desc->lock); data = irq_desc_get_irq_data(desc); affinity = data->affinity; if (!irq_has_action(irq) || irqd_is_per_cpu(data) || cpumask_subset(affinity, cpu_online_mask)) { raw_spin_unlock(&desc->lock); continue; } irq_force_complete_move(irq); if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { break_affinity = 1; affinity = cpu_all_mask; } chip = irq_data_get_irq_chip(data); if (!irqd_can_move_in_process_context(data) && chip->irq_mask) chip->irq_mask(data); if (chip->irq_set_affinity) chip->irq_set_affinity(data, affinity, true); else if (!(warned++)) set_affinity = 0; if (!irqd_can_move_in_process_context(data) && !irqd_irq_masked(data) && chip->irq_unmask) chip->irq_unmask(data); raw_spin_unlock(&desc->lock); if (break_affinity && set_affinity) printk("Broke affinity for irq %i\n", irq); else if (!set_affinity) printk("Cannot set affinity for irq %i\n", irq); }
int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest) { int cpu_dest; /* timer and ipi have to always be received on all CPUs */ if (irqd_is_per_cpu(d)) return -EINVAL; /* whatever mask they set, we just allow one CPU */ cpu_dest = first_cpu(*dest); return cpu_dest; }
int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest) { int cpu_dest; if (irqd_is_per_cpu(d)) return -EINVAL; cpu_dest = first_cpu(*dest); return cpu_dest; }
/* ONLY called from entry.S:intr_extint() */ void do_cpu_irq_mask(struct pt_regs *regs) { struct pt_regs *old_regs; unsigned long eirr_val; int irq, cpu = smp_processor_id(); #ifdef CONFIG_SMP struct irq_desc *desc; cpumask_t dest; #endif old_regs = set_irq_regs(regs); local_irq_disable(); irq_enter(); eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu); if (!eirr_val) goto set_out; irq = eirr_to_irq(eirr_val); #ifdef CONFIG_SMP desc = irq_to_desc(irq); cpumask_copy(&dest, desc->irq_data.affinity); if (irqd_is_per_cpu(&desc->irq_data) && !cpu_isset(smp_processor_id(), dest)) { int cpu = first_cpu(dest); printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n", irq, smp_processor_id(), cpu); gsc_writel(irq + CPU_IRQ_BASE, per_cpu(cpu_data, cpu).hpa); goto set_out; } #endif stack_overflow_check(regs); #ifdef CONFIG_IRQSTACKS execute_on_irq_stack(&generic_handle_irq, irq); #else generic_handle_irq(irq); #endif /* CONFIG_IRQSTACKS */ out: irq_exit(); set_irq_regs(old_regs); return; set_out: set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); goto out; }
static bool migrate_one_irq(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); const struct cpumask *affinity = d->affinity; /* * If this is a per-CPU interrupt, or the affinity does not * include this CPU, then we have nothing to do. */ if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) return false; if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) affinity = cpu_online_mask; ret = true; }
/* * Since cpu_online_mask is already updated, we just need to check for * affinity that has zeros */ static void migrate_irqs(void) { int irq, new_cpu; for (irq=0; irq < NR_IRQS; irq++) { struct irq_desc *desc = irq_to_desc(irq); struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); if (irqd_irq_disabled(data)) continue; /* * No handling for now. * TBD: Implement a disable function so we can now * tell CPU not to respond to these local intr sources. * such as ITV,CPEI,MCA etc. */ if (irqd_is_per_cpu(data)) continue; if (cpumask_any_and(data->affinity, cpu_online_mask) >= nr_cpu_ids) { /* * Save it for phase 2 processing */ vectors_in_migration[irq] = irq; new_cpu = cpumask_any(cpu_online_mask); /* * Al three are essential, currently WARN_ON.. maybe panic? */ if (chip && chip->irq_disable && chip->irq_enable && chip->irq_set_affinity) { chip->irq_disable(data); chip->irq_set_affinity(data, cpumask_of(new_cpu), false); chip->irq_enable(data); } else { WARN_ON((!chip || !chip->irq_disable || !chip->irq_enable || !chip->irq_set_affinity)); } } } }
void irq_move_masked_irq(struct irq_data *idata) { struct irq_desc *desc = irq_data_to_desc(idata); struct irq_chip *chip = desc->irq_data.chip; if (likely(!irqd_is_setaffinity_pending(&desc->irq_data))) return; irqd_clr_move_pending(&desc->irq_data); /* * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. */ if (irqd_is_per_cpu(&desc->irq_data)) { WARN_ON(1); return; } if (unlikely(cpumask_empty(desc->pending_mask))) return; if (!chip->irq_set_affinity) return; assert_raw_spin_locked(&desc->lock); /* * If there was a valid mask to work with, please * do the disable, re-program, enable sequence. * This is *not* particularly important for level triggered * but in a edge trigger case, we might be setting rte * when an active trigger is coming in. This could * cause some ioapics to mal-function. * Being paranoid i guess! * * For correct operation this depends on the caller * masking the irqs. */ if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false); cpumask_clear(desc->pending_mask); }
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ void fixup_irqs(void) { unsigned int irq, vector; static int warned; struct irq_desc *desc; struct irq_data *data; struct irq_chip *chip; for_each_irq_desc(irq, desc) { int break_affinity = 0; int set_affinity = 1; const struct cpumask *affinity; if (!desc) continue; if (irq == 2) continue; /* interrupt's are disabled at this point */ raw_spin_lock(&desc->lock); data = irq_desc_get_irq_data(desc); affinity = data->affinity; if (!irq_has_action(irq) || irqd_is_per_cpu(data) || cpumask_subset(affinity, cpu_online_mask)) { raw_spin_unlock(&desc->lock); continue; } /* * Complete the irq move. This cpu is going down and for * non intr-remapping case, we can't wait till this interrupt * arrives at this cpu before completing the irq move. */ irq_force_complete_move(irq); if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { break_affinity = 1; affinity = cpu_online_mask; } chip = irq_data_get_irq_chip(data); if (!irqd_can_move_in_process_context(data) && chip->irq_mask) chip->irq_mask(data); if (chip->irq_set_affinity) chip->irq_set_affinity(data, affinity, true); else if (!(warned++)) set_affinity = 0; /* * We unmask if the irq was not marked masked by the * core code. That respects the lazy irq disable * behaviour. */ if (!irqd_can_move_in_process_context(data) && !irqd_irq_masked(data) && chip->irq_unmask) chip->irq_unmask(data); raw_spin_unlock(&desc->lock); if (break_affinity && set_affinity) pr_notice("Broke affinity for irq %i\n", irq); else if (!set_affinity) pr_notice("Cannot set affinity for irq %i\n", irq); }
/* * This cpu is going to be removed and its vectors migrated to the remaining * online cpus. Check to see if there are enough vectors in the remaining cpus. * This function is protected by stop_machine(). */ int check_irq_vectors_for_cpu_disable(void) { int irq, cpu; unsigned int this_cpu, vector, this_count, count; struct irq_desc *desc; struct irq_data *data; this_cpu = smp_processor_id(); cpumask_copy(&online_new, cpu_online_mask); cpu_clear(this_cpu, online_new); this_count = 0; for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { irq = __this_cpu_read(vector_irq[vector]); if (irq >= 0) { desc = irq_to_desc(irq); data = irq_desc_get_irq_data(desc); cpumask_copy(&affinity_new, data->affinity); cpu_clear(this_cpu, affinity_new); /* Do not count inactive or per-cpu irqs. */ if (!irq_has_action(irq) || irqd_is_per_cpu(data)) continue; /* * A single irq may be mapped to multiple * cpu's vector_irq[] (for example IOAPIC cluster * mode). In this case we have two * possibilities: * * 1) the resulting affinity mask is empty; that is * this the down'd cpu is the last cpu in the irq's * affinity mask, or * * 2) the resulting affinity mask is no longer * a subset of the online cpus but the affinity * mask is not zero; that is the down'd cpu is the * last online cpu in a user set affinity mask. */ if (cpumask_empty(&affinity_new) || !cpumask_subset(&affinity_new, &online_new)) this_count++; } } count = 0; for_each_online_cpu(cpu) { if (cpu == this_cpu) continue; for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { if (per_cpu(vector_irq, cpu)[vector] < 0) count++; } } if (count < this_count) { pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n", this_cpu, this_count, count); return -ERANGE; } return 0; }
/* * This cpu is going to be removed and its vectors migrated to the remaining * online cpus. Check to see if there are enough vectors in the remaining cpus. * This function is protected by stop_machine(). */ int check_irq_vectors_for_cpu_disable(void) { unsigned int this_cpu, vector, this_count, count; struct irq_desc *desc; struct irq_data *data; int cpu; this_cpu = smp_processor_id(); cpumask_copy(&online_new, cpu_online_mask); cpumask_clear_cpu(this_cpu, &online_new); this_count = 0; for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { desc = __this_cpu_read(vector_irq[vector]); if (IS_ERR_OR_NULL(desc)) continue; /* * Protect against concurrent action removal, affinity * changes etc. */ raw_spin_lock(&desc->lock); data = irq_desc_get_irq_data(desc); cpumask_copy(&affinity_new, irq_data_get_affinity_mask(data)); cpumask_clear_cpu(this_cpu, &affinity_new); /* Do not count inactive or per-cpu irqs. */ if (!irq_desc_has_action(desc) || irqd_is_per_cpu(data)) { raw_spin_unlock(&desc->lock); continue; } raw_spin_unlock(&desc->lock); /* * A single irq may be mapped to multiple cpu's * vector_irq[] (for example IOAPIC cluster mode). In * this case we have two possibilities: * * 1) the resulting affinity mask is empty; that is * this the down'd cpu is the last cpu in the irq's * affinity mask, or * * 2) the resulting affinity mask is no longer a * subset of the online cpus but the affinity mask is * not zero; that is the down'd cpu is the last online * cpu in a user set affinity mask. */ if (cpumask_empty(&affinity_new) || !cpumask_subset(&affinity_new, &online_new)) this_count++; } /* No need to check any further. */ if (!this_count) return 0; count = 0; for_each_online_cpu(cpu) { if (cpu == this_cpu) continue; /* * We scan from FIRST_EXTERNAL_VECTOR to first system * vector. If the vector is marked in the used vectors * bitmap or an irq is assigned to it, we don't count * it as available. * * As this is an inaccurate snapshot anyway, we can do * this w/o holding vector_lock. */ for (vector = FIRST_EXTERNAL_VECTOR; vector < FIRST_SYSTEM_VECTOR; vector++) { if (!test_bit(vector, used_vectors) && IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector])) { if (++count == this_count) return 0; } } } if (count < this_count) { pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n", this_cpu, this_count, count); return -ERANGE; } return 0; }
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ void fixup_irqs(void) { unsigned int irq, vector; static int warned; struct irq_desc *desc; struct irq_data *data; struct irq_chip *chip; int ret; for_each_irq_desc(irq, desc) { int break_affinity = 0; int set_affinity = 1; const struct cpumask *affinity; if (!desc) continue; if (irq == 2) continue; /* interrupt's are disabled at this point */ raw_spin_lock(&desc->lock); data = irq_desc_get_irq_data(desc); affinity = irq_data_get_affinity_mask(data); if (!irq_has_action(irq) || irqd_is_per_cpu(data) || cpumask_subset(affinity, cpu_online_mask)) { raw_spin_unlock(&desc->lock); continue; } /* * Complete the irq move. This cpu is going down and for * non intr-remapping case, we can't wait till this interrupt * arrives at this cpu before completing the irq move. */ irq_force_complete_move(desc); if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { break_affinity = 1; affinity = cpu_online_mask; } chip = irq_data_get_irq_chip(data); /* * The interrupt descriptor might have been cleaned up * already, but it is not yet removed from the radix tree */ if (!chip) { raw_spin_unlock(&desc->lock); continue; } if (!irqd_can_move_in_process_context(data) && chip->irq_mask) chip->irq_mask(data); if (chip->irq_set_affinity) { ret = chip->irq_set_affinity(data, affinity, true); if (ret == -ENOSPC) pr_crit("IRQ %d set affinity failed because there are no available vectors. The device assigned to this IRQ is unstable.\n", irq); } else { if (!(warned++)) set_affinity = 0; } /* * We unmask if the irq was not marked masked by the * core code. That respects the lazy irq disable * behaviour. */ if (!irqd_can_move_in_process_context(data) && !irqd_irq_masked(data) && chip->irq_unmask) chip->irq_unmask(data); raw_spin_unlock(&desc->lock); if (break_affinity && set_affinity) pr_notice("Broke affinity for irq %i\n", irq); else if (!set_affinity) pr_notice("Cannot set affinity for irq %i\n", irq); }