static bool migrate_one_irq(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); const struct cpumask *affinity = d->common->affinity; struct irq_chip *c; bool ret = false; /* * If this is a per-CPU interrupt, or the affinity does not * include this CPU, then we have nothing to do. */ if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) return false; if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { affinity = cpu_online_mask; ret = true; } c = irq_data_get_irq_chip(d); if (!c->irq_set_affinity) { pr_warn_ratelimited("IRQ%u: unable to set affinity\n", d->irq); } else { int r = irq_do_set_affinity(d, affinity, false); if (r) pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n", d->irq, r); } return ret; }
/** * __ipi_send_single - send an IPI to a target Linux SMP CPU * @desc: pointer to irq_desc of the IRQ * @cpu: destination CPU, must in the destination mask passed to * irq_reserve_ipi() * * This function is for architecture or core code to speed up IPI sending. Not * usable from driver code. * * Returns zero on success and negative error number on failure. */ int __ipi_send_single(struct irq_desc *desc, unsigned int cpu) { struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); #ifdef DEBUG /* * Minimise the overhead by omitting the checks for Linux SMP IPIs. * Since the callers should be arch or core code which is generally * trusted, only check for errors when debugging. */ if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu))) return -EINVAL; #endif if (!chip->ipi_send_single) { chip->ipi_send_mask(data, cpumask_of(cpu)); return 0; } /* FIXME: Store this information in irqdata flags */ if (irq_domain_is_ipi_per_cpu(data->domain) && cpu != data->common->ipi_offset) { /* use the correct data for that cpu */ unsigned irq = data->irq + cpu - data->common->ipi_offset; data = irq_get_irq_data(irq); } chip->ipi_send_single(data, cpu); return 0; }
static void intel_mid_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct intel_mid_gpio *priv = gpiochip_get_data(gc); struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); u32 base, gpio, mask; unsigned long pending; void __iomem *gedr; /* check GPIO controller to check which pin triggered the interrupt */ for (base = 0; base < priv->chip.ngpio; base += 32) { gedr = gpio_reg(&priv->chip, base, GEDR); while ((pending = readl(gedr))) { gpio = __ffs(pending); mask = BIT(gpio); /* Clear before handling so we can't lose an edge */ writel(mask, gedr); generic_handle_irq(irq_find_mapping(gc->irqdomain, base + gpio)); } } chip->irq_eoi(data); }
static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_data *idata = irq_desc_get_irq_data(desc); struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(idata); void __iomem *pio = at91_gpio->regbase; unsigned long isr; int n; chained_irq_enter(chip, desc); for (;;) { /* Reading ISR acks pending (edge triggered) GPIO interrupts. * When there none are pending, we're finished unless we need * to process multiple banks (like ID_PIOCDE on sam9263). */ isr = __raw_readl(pio + PIO_ISR) & __raw_readl(pio + PIO_IMR); if (!isr) { if (!at91_gpio->next) break; at91_gpio = at91_gpio->next; pio = at91_gpio->regbase; continue; } n = find_first_bit(&isr, BITS_PER_LONG); while (n < BITS_PER_LONG) { generic_handle_irq(irq_find_mapping(at91_gpio->domain, n)); n = find_next_bit(&isr, BITS_PER_LONG, n + 1); } } chained_irq_exit(chip, desc); /* now it may re-trigger */ }
void irq_move_irq(struct irq_data *idata) { bool masked; /* * Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled, * and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is * disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here. */ idata = irq_desc_get_irq_data(irq_data_to_desc(idata)); if (likely(!irqd_is_setaffinity_pending(idata))) return; if (unlikely(irqd_irq_disabled(idata))) return; /* * Be careful vs. already masked interrupts. If this is a * threaded interrupt with ONESHOT set, we can end up with an * interrupt storm. */ masked = irqd_irq_masked(idata); if (!masked) idata->chip->irq_mask(idata); irq_move_masked_irq(idata); if (!masked) idata->chip->irq_unmask(idata); }
int irq_startup(struct irq_desc *desc, bool resend, bool force) { struct irq_data *d = irq_desc_get_irq_data(desc); struct cpumask *aff = irq_data_get_affinity_mask(d); int ret = 0; desc->depth = 0; if (irqd_is_started(d)) { irq_enable(desc); } else { switch (__irq_startup_managed(desc, aff, force)) { case IRQ_STARTUP_NORMAL: ret = __irq_startup(desc); irq_setup_affinity(desc); break; case IRQ_STARTUP_MANAGED: irq_do_set_affinity(d, aff, false); ret = __irq_startup(desc); break; case IRQ_STARTUP_ABORT: irqd_set_managed_shutdown(d); return 0; } } if (resend) check_irq_resend(desc); return ret; }
static int __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) { struct irq_data *d = irq_desc_get_irq_data(desc); if (!irqd_affinity_is_managed(d)) return IRQ_STARTUP_NORMAL; irqd_clr_managed_shutdown(d); if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) { /* * Catch code which fiddles with enable_irq() on a managed * and potentially shutdown IRQ. Chained interrupt * installment or irq auto probing should not happen on * managed irqs either. */ if (WARN_ON_ONCE(force)) return IRQ_STARTUP_ABORT; /* * The interrupt was requested, but there is no online CPU * in it's affinity mask. Put it into managed shutdown * state and let the cpu hotplug mechanism start it up once * a CPU in the mask becomes available. */ return IRQ_STARTUP_ABORT; } /* * Managed interrupts have reserved resources, so this should not * happen. */ if (WARN_ON(irq_domain_activate_irq(d, false))) return IRQ_STARTUP_ABORT; return IRQ_STARTUP_MANAGED; }
static void altera_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) { struct altera_gpio_chip *altera_gc = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); struct of_mm_gpio_chip *mm_gc = &altera_gc->mmchip; unsigned long status; int base; chip->irq_mask(&desc->irq_data); if (altera_gc->level_trigger) status = readl(mm_gc->regs + ALTERA_GPIO_DATA); else { status = readl(mm_gc->regs + ALTERA_GPIO_EDGE_CAP); writel(status, mm_gc->regs + ALTERA_GPIO_EDGE_CAP); } status &= readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK); for (base = 0; base < mm_gc->gc.ngpio; base++) { if ((1 << base) & status) { generic_handle_irq( irq_linear_revmap(altera_gc->irq, base)); } } chip->irq_eoi(irq_desc_get_irq_data(desc)); chip->irq_unmask(&desc->irq_data); }
static bool migrate_one_irq(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); const struct cpumask *affinity = d->affinity; struct irq_chip *c; bool ret = false; /* * If this is a per-CPU interrupt, or the affinity does not * include this CPU, then we have nothing to do. */ if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) return false; if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { affinity = cpu_online_mask; ret = true; } c = irq_data_get_irq_chip(d); if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) cpumask_copy(d->affinity, affinity); return ret; }
/** * ipi_send_mask - send an IPI to target Linux SMP CPU(s) * @desc: pointer to irq_desc of the IRQ * @dest: dest CPU(s), must be a subset of the mask passed to * irq_reserve_ipi() * * This function is for architecture or core code to speed up IPI sending. Not * usable from driver code. * * Returns zero on success and negative error number on failure. */ int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest) { struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); unsigned int cpu; #ifdef DEBUG /* * Minimise the overhead by omitting the checks for Linux SMP IPIs. * Since the callers should be arch or core code which is generally * trusted, only check for errors when debugging. */ if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0))) return -EINVAL; #endif if (chip->ipi_send_mask) { chip->ipi_send_mask(data, dest); return 0; } if (irq_domain_is_ipi_per_cpu(data->domain)) { unsigned int base = data->irq; for_each_cpu(cpu, dest) { unsigned irq = base + cpu - data->common->ipi_offset; data = irq_get_irq_data(irq); chip->ipi_send_single(data, cpu); }
int irq_activate(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); if (!irqd_affinity_is_managed(d)) return irq_domain_activate_irq(d, false); return 0; }
static void idu_cascade_isr(struct irq_desc *desc) { struct irq_domain *idu_domain = irq_desc_get_handler_data(desc); irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc)); irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq; generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq)); }
void fixup_irqs(void) { unsigned int irq, vector; static int warned; struct irq_desc *desc; struct irq_data *data; struct irq_chip *chip; for_each_irq_desc(irq, desc) { int break_affinity = 0; int set_affinity = 1; const struct cpumask *affinity; if (!desc) continue; if (irq == 2) continue; raw_spin_lock(&desc->lock); data = irq_desc_get_irq_data(desc); affinity = data->affinity; if (!irq_has_action(irq) || irqd_is_per_cpu(data) || cpumask_subset(affinity, cpu_online_mask)) { raw_spin_unlock(&desc->lock); continue; } irq_force_complete_move(irq); if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { break_affinity = 1; affinity = cpu_all_mask; } chip = irq_data_get_irq_chip(data); if (!irqd_can_move_in_process_context(data) && chip->irq_mask) chip->irq_mask(data); if (chip->irq_set_affinity) chip->irq_set_affinity(data, affinity, true); else if (!(warned++)) set_affinity = 0; if (!irqd_can_move_in_process_context(data) && !irqd_irq_masked(data) && chip->irq_unmask) chip->irq_unmask(data); raw_spin_unlock(&desc->lock); if (break_affinity && set_affinity) printk("Broke affinity for irq %i\n", irq); else if (!set_affinity) printk("Cannot set affinity for irq %i\n", irq); }
static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) { int current_cpu = smp_processor_id(); const struct cpumask *aff; struct irq_data *idata; idata = irq_desc_get_irq_data(c->irq_desc); aff = irq_data_get_affinity_mask(idata); return cpumask_test_cpu(current_cpu, aff); }
static void idu_cascade_isr(struct irq_desc *desc) { struct irq_domain *idu_domain = irq_desc_get_handler_data(desc); struct irq_chip *core_chip = irq_desc_get_chip(desc); irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc)); irq_hw_number_t idu_hwirq = core_hwirq - FIRST_EXT_IRQ; chained_irq_enter(core_chip, desc); generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq)); chained_irq_exit(core_chip, desc); }
/** * irq_set_affinity - Set the irq affinity of a given irq * @irq: Interrupt to set affinity * @mask: cpumask * */ int irq_set_affinity(unsigned int irq, const struct cpumask *mask) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; int ret; if (!desc) return -EINVAL; raw_spin_lock_irqsave(&desc->lock, flags); ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask); raw_spin_unlock_irqrestore(&desc->lock, flags); return ret; }
static void x3proto_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); unsigned long mask; int pin; chip->irq_mask_ack(data); mask = __raw_readw(KEYDETR); for_each_set_bit(pin, &mask, NR_BASEBOARD_GPIOS) generic_handle_irq(irq_linear_revmap(x3proto_irq_domain, pin)); chip->irq_unmask(data); }
static void at91_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq) { struct irq_desc *desc = irq_to_desc(irq); struct irq_data *idata = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(idata); if (chip == &gpio_irqchip) { struct at91_gpio_chip *chip = &gpio_chip[(irq - PIN_BASE) / 32]; struct at91_gpio_bank *bank = chip->bank; if (ipd != &ipipe_root && --(*chip->nonroot_gpios) == 0) __ipipe_irqbits[(bank->id / BITS_PER_LONG)] |= (1 << (bank->id % BITS_PER_LONG)); } }
static bool migrate_one_irq(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); const struct cpumask *affinity = d->affinity; /* * If this is a per-CPU interrupt, or the affinity does not * include this CPU, then we have nothing to do. */ if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) return false; if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) affinity = cpu_online_mask; ret = true; }
/* * Since cpu_online_mask is already updated, we just need to check for * affinity that has zeros */ static void migrate_irqs(void) { int irq, new_cpu; for (irq=0; irq < NR_IRQS; irq++) { struct irq_desc *desc = irq_to_desc(irq); struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); if (irqd_irq_disabled(data)) continue; /* * No handling for now. * TBD: Implement a disable function so we can now * tell CPU not to respond to these local intr sources. * such as ITV,CPEI,MCA etc. */ if (irqd_is_per_cpu(data)) continue; if (cpumask_any_and(data->affinity, cpu_online_mask) >= nr_cpu_ids) { /* * Save it for phase 2 processing */ vectors_in_migration[irq] = irq; new_cpu = cpumask_any(cpu_online_mask); /* * Al three are essential, currently WARN_ON.. maybe panic? */ if (chip && chip->irq_disable && chip->irq_enable && chip->irq_set_affinity) { chip->irq_disable(data); chip->irq_set_affinity(data, cpumask_of(new_cpu), false); chip->irq_enable(data); } else { WARN_ON((!chip || !chip->irq_disable || !chip->irq_enable || !chip->irq_set_affinity)); } } } }
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ void fixup_irqs(void) { unsigned int irr, vector; struct irq_desc *desc; struct irq_data *data; struct irq_chip *chip; irq_migrate_all_off_this_cpu(); /* * We can remove mdelay() and then send spuriuous interrupts to * new cpu targets for all the irqs that were handled previously by * this cpu. While it works, I have seen spurious interrupt messages * (nothing wrong but still...). * * So for now, retain mdelay(1) and check the IRR and then send those * interrupts to new targets as this cpu is already offlined... */ mdelay(1); /* * We can walk the vector array of this cpu without holding * vector_lock because the cpu is already marked !online, so * nothing else will touch it. */ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector]))) continue; irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); if (irr & (1 << (vector % 32))) { desc = __this_cpu_read(vector_irq[vector]); raw_spin_lock(&desc->lock); data = irq_desc_get_irq_data(desc); chip = irq_data_get_irq_chip(data); if (chip->irq_retrigger) { chip->irq_retrigger(data); __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED); } raw_spin_unlock(&desc->lock); } if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED) __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); } }
static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc)); struct irq_chip *chip = irq_data_get_irq_chip(data); u32 base, pin, mask; void __iomem *reg; u32 pending; unsigned virq; int looplimit = 0; /* check from GPIO controller which pin triggered the interrupt */ for (base = 0; base < vg->chip.ngpio; base += 32) { reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG); while ((pending = readl(reg))) { pin = __ffs(pending); mask = BIT(pin); /* Clear before handling so we can't lose an edge */ writel(mask, reg); virq = irq_find_mapping(vg->chip.irqdomain, base + pin); generic_handle_irq(virq); /* In case bios or user sets triggering incorretly a pin * might remain in "interrupt triggered" state. */ if (looplimit++ > 32) { dev_err(&vg->pdev->dev, "Gpio %d interrupt flood, disabling\n", base + pin); reg = byt_gpio_reg(&vg->chip, base + pin, BYT_CONF0_REG); mask = readl(reg); mask &= ~(BYT_TRIG_NEG | BYT_TRIG_POS | BYT_TRIG_LVL); writel(mask, reg); mask = readl(reg); /* flush */ break; } } } chip->irq_eoi(data); }
static int __irq_startup(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); int ret = 0; /* Warn if this interrupt is not activated but try nevertheless */ WARN_ON_ONCE(!irqd_is_activated(d)); if (d->chip->irq_startup) { ret = d->chip->irq_startup(d); irq_state_clr_disabled(desc); irq_state_clr_masked(desc); } else { irq_enable(desc); } irq_state_set_started(desc); return ret; }
static void s3c2440_eint_demux(unsigned int irq, struct irq_desc *desc) { int i; unsigned long eintpend = ioread32(REG_GPIO(EINTPEND)); unsigned long eintmask = ioread32(REG_GPIO(EINTMASK)); struct irq_data *data = irq_desc_get_irq_data(desc); eintpend &= ~eintmask; s3c2440_irq_mask(data); for (i = 4; i < 24; i++) { if (eintpend & (1 << i)) { generic_handle_irq(IRQ_EINT4 + i - 4); break; } } s3c2440_irq_ack(data); s3c2440_irq_unmask(data); }
/** * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt * @irq: interrupt number to set affinity * @vcpu_info: vCPU specific data * * This function uses the vCPU specific data to set the vCPU * affinity for an irq. The vCPU specific data is passed from * outside, such as KVM. One example code path is as below: * KVM -> IOMMU -> irq_set_vcpu_affinity(). */ int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) { unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); struct irq_data *data; struct irq_chip *chip; int ret = -ENOSYS; if (!desc) return -EINVAL; data = irq_desc_get_irq_data(desc); chip = irq_data_get_irq_chip(data); if (chip && chip->irq_set_vcpu_affinity) ret = chip->irq_set_vcpu_affinity(data, vcpu_info); irq_put_desc_unlock(desc, flags); return ret; }
static void s3c2440_uart_demux(unsigned int irq, struct irq_desc *desc) { unsigned long subsrcpnd; int offset; int i; struct irq_data *data = irq_desc_get_irq_data(desc); s3c2440_irq_mask(data); s3c2440_irq_ack(data); subsrcpnd = ioread32(REG_INT(SUBSRCPND)); offset = irq - IRQ_UART0; offset *= 3; for (i = 0; i < 3; i++) { if (subsrcpnd & (1 << (offset + i))) generic_handle_irq(IRQ_RXD0 + offset + i); } s3c2440_irq_unmask(data); }
static void balloon3_irq_handler(struct irq_desc *desc) { unsigned long pending = __raw_readl(BALLOON3_INT_CONTROL_REG) & balloon3_irq_enabled; do { struct irq_data *d = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int irq; /* clear useless edge notification */ if (chip->irq_ack) chip->irq_ack(d); while (pending) { irq = BALLOON3_IRQ(0) + __ffs(pending); generic_handle_irq(irq); pending &= pending - 1; } pending = __raw_readl(BALLOON3_INT_CONTROL_REG) & balloon3_irq_enabled; } while (pending); }
static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) { unsigned pin; struct irq_data *idata = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(idata); struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(idata); void __iomem *pio = at91_gpio->regbase; u32 isr; /* temporarily mask (level sensitive) parent IRQ */ chip->irq_ack(idata); for (;;) { /* Reading ISR acks pending (edge triggered) GPIO interrupts. * When there none are pending, we're finished unless we need * to process multiple banks (like ID_PIOCDE on sam9263). */ isr = __raw_readl(pio + PIO_ISR) & __raw_readl(pio + PIO_IMR); if (!isr) { if (!at91_gpio->next) break; at91_gpio = at91_gpio->next; pio = at91_gpio->regbase; continue; } pin = at91_gpio->chip.base; while (isr) { if (isr & 1) ipipe_handle_demuxed_irq(pin); pin++; isr >>= 1; } } chip->irq_unmask(idata); /* now it may re-trigger */ }
static void msic_gpio_irq_handler(unsigned irq, struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct msic_gpio *mg = irq_data_get_irq_handler_data(data); struct irq_chip *chip = irq_data_get_irq_chip(data); struct intel_msic *msic = pdev_to_intel_msic(mg->pdev); int i; int bitnr; u8 pin; unsigned long pending = 0; for (i = 0; i < (mg->chip.ngpio / BITS_PER_BYTE); i++) { intel_msic_irq_read(msic, INTEL_MSIC_GPIO0LVIRQ + i, &pin); pending = pin; if (pending) { for_each_set_bit(bitnr, &pending, BITS_PER_BYTE) generic_handle_irq(mg->irq_base + (i * BITS_PER_BYTE) + bitnr); } } chip->irq_eoi(data); }
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ void fixup_irqs(void) { unsigned int irq, vector; static int warned; struct irq_desc *desc; struct irq_data *data; struct irq_chip *chip; for_each_irq_desc(irq, desc) { int break_affinity = 0; int set_affinity = 1; const struct cpumask *affinity; if (!desc) continue; if (irq == 2) continue; /* interrupt's are disabled at this point */ raw_spin_lock(&desc->lock); data = irq_desc_get_irq_data(desc); affinity = data->affinity; if (!irq_has_action(irq) || irqd_is_per_cpu(data) || cpumask_subset(affinity, cpu_online_mask)) { raw_spin_unlock(&desc->lock); continue; } /* * Complete the irq move. This cpu is going down and for * non intr-remapping case, we can't wait till this interrupt * arrives at this cpu before completing the irq move. */ irq_force_complete_move(irq); if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { break_affinity = 1; affinity = cpu_online_mask; } chip = irq_data_get_irq_chip(data); if (!irqd_can_move_in_process_context(data) && chip->irq_mask) chip->irq_mask(data); if (chip->irq_set_affinity) chip->irq_set_affinity(data, affinity, true); else if (!(warned++)) set_affinity = 0; /* * We unmask if the irq was not marked masked by the * core code. That respects the lazy irq disable * behaviour. */ if (!irqd_can_move_in_process_context(data) && !irqd_irq_masked(data) && chip->irq_unmask) chip->irq_unmask(data); raw_spin_unlock(&desc->lock); if (break_affinity && set_affinity) pr_notice("Broke affinity for irq %i\n", irq); else if (!set_affinity) pr_notice("Cannot set affinity for irq %i\n", irq); }