/** * __ipi_send_single - send an IPI to a target Linux SMP CPU * @desc: pointer to irq_desc of the IRQ * @cpu: destination CPU, must in the destination mask passed to * irq_reserve_ipi() * * This function is for architecture or core code to speed up IPI sending. Not * usable from driver code. * * Returns zero on success and negative error number on failure. */ int __ipi_send_single(struct irq_desc *desc, unsigned int cpu) { struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); #ifdef DEBUG /* * Minimise the overhead by omitting the checks for Linux SMP IPIs. * Since the callers should be arch or core code which is generally * trusted, only check for errors when debugging. */ if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu))) return -EINVAL; #endif if (!chip->ipi_send_single) { chip->ipi_send_mask(data, cpumask_of(cpu)); return 0; } /* FIXME: Store this information in irqdata flags */ if (irq_domain_is_ipi_per_cpu(data->domain) && cpu != data->common->ipi_offset) { /* use the correct data for that cpu */ unsigned irq = data->irq + cpu - data->common->ipi_offset; data = irq_get_irq_data(irq); } chip->ipi_send_single(data, cpu); return 0; }
static bool migrate_one_irq(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); const struct cpumask *affinity = d->common->affinity; struct irq_chip *c; bool ret = false; /* * If this is a per-CPU interrupt, or the affinity does not * include this CPU, then we have nothing to do. */ if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) return false; if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { affinity = cpu_online_mask; ret = true; } c = irq_data_get_irq_chip(d); if (!c->irq_set_affinity) { pr_warn_ratelimited("IRQ%u: unable to set affinity\n", d->irq); } else { int r = irq_do_set_affinity(d, affinity, false); if (r) pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n", d->irq, r); } return ret; }
/** * ipi_send_mask - send an IPI to target Linux SMP CPU(s) * @desc: pointer to irq_desc of the IRQ * @dest: dest CPU(s), must be a subset of the mask passed to * irq_reserve_ipi() * * This function is for architecture or core code to speed up IPI sending. Not * usable from driver code. * * Returns zero on success and negative error number on failure. */ int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest) { struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); unsigned int cpu; #ifdef DEBUG /* * Minimise the overhead by omitting the checks for Linux SMP IPIs. * Since the callers should be arch or core code which is generally * trusted, only check for errors when debugging. */ if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0))) return -EINVAL; #endif if (chip->ipi_send_mask) { chip->ipi_send_mask(data, dest); return 0; } if (irq_domain_is_ipi_per_cpu(data->domain)) { unsigned int base = data->irq; for_each_cpu(cpu, dest) { unsigned irq = base + cpu - data->common->ipi_offset; data = irq_get_irq_data(irq); chip->ipi_send_single(data, cpu); }
static bool migrate_one_irq(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); const struct cpumask *affinity = d->affinity; struct irq_chip *c; bool ret = false; /* * If this is a per-CPU interrupt, or the affinity does not * include this CPU, then we have nothing to do. */ if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) return false; if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { affinity = cpu_online_mask; ret = true; } c = irq_data_get_irq_chip(d); if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) cpumask_copy(d->affinity, affinity); return ret; }
static void intel_mid_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct intel_mid_gpio *priv = gpiochip_get_data(gc); struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); u32 base, gpio, mask; unsigned long pending; void __iomem *gedr; /* check GPIO controller to check which pin triggered the interrupt */ for (base = 0; base < priv->chip.ngpio; base += 32) { gedr = gpio_reg(&priv->chip, base, GEDR); while ((pending = readl(gedr))) { gpio = __ffs(pending); mask = BIT(gpio); /* Clear before handling so we can't lose an edge */ writel(mask, gedr); generic_handle_irq(irq_find_mapping(gc->irqdomain, base + gpio)); } } chip->irq_eoi(data); }
int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) { struct irq_chip *chip = irq_data_get_irq_chip(data); struct irq_desc *desc = irq_data_to_desc(data); int ret = 0; if (!chip || !chip->irq_set_affinity) return -EINVAL; if (irq_can_move_pcntxt(data)) { ret = chip->irq_set_affinity(data, mask, false); switch (ret) { case IRQ_SET_MASK_OK: cpumask_copy(data->affinity, mask); case IRQ_SET_MASK_OK_NOCOPY: irq_set_thread_affinity(desc); ret = 0; } } else { irqd_set_move_pending(data); irq_copy_pending(desc, mask); } if (desc->affinity_notify) { kref_get(&desc->affinity_notify->kref); schedule_work(&desc->affinity_notify->work); } irqd_set(data, IRQD_AFFINITY_SET); return ret; }
int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, bool force) { struct irq_chip *chip = irq_data_get_irq_chip(data); struct irq_desc *desc = irq_data_to_desc(data); int ret = 0; if (!chip || !chip->irq_set_affinity) return -EINVAL; if (irq_can_move_pcntxt(data)) { ret = irq_do_set_affinity(data, mask, force); } else { irqd_set_move_pending(data); irq_copy_pending(desc, mask); } if (desc->affinity_notify) { kref_get(&desc->affinity_notify->kref); schedule_work(&desc->affinity_notify->work); } irqd_set(data, IRQD_AFFINITY_SET); return ret; }
/* Called from the FIQ asm handler */ void msm7k_fiq_handler(void) { struct irq_data *d; struct irq_chip *c; struct pt_regs context_regs; pr_info("Fiq is received %s\n", __func__); fiq_counter++; d = irq_get_irq_data(MSM8625_INT_A9_M2A_2); c = irq_data_get_irq_chip(d); c->irq_mask(d); local_irq_disable(); /* Clear the IRQ from the ENABLE_SET */ gic_clear_irq_pending(MSM8625_INT_A9_M2A_2); local_irq_enable(); flush_cache_all(); outer_flush_all(); pr_err("%s msm_dump_cpu_ctx usr_r0:0x%x", __func__, msm_dump_cpu_ctx.usr_r0); pr_err("%s msm_dump_cpu_ctx usr_r0:0x%x usr_r1:0x%x usr_r2:0x%x usr_r3:0x%x usr_r4:0x%x usr_r5:0x%x usr_r6:0x%x usr_r7:0x%x usr_r8:0x%x usr_r9:0x%x usr_r10:0x%x usr_r11:0x%x usr_r12:0x%x usr_r13:0x%x usr_r14:0x%x irq_spsr:0x%x irq_r13:0x%x irq_r14:0x%x svc_spsr:0x%x svc_r13:0x%x svc_r14:0x%x abt_spsr:0x%x abt_r13:0x%x abt_r14:0x%x und_spsr:0x%x und_r13:0x%x und_r14:0x%x fiq_spsr:0x%x fiq_r8:0x%x fiq_r9:0x%x fiq_r10:0x%x fiq_r11:0x%x fiq_r12:0x%x fiq_r13:0x%x fiq_r14:0x%x\n",__func__, msm_dump_cpu_ctx.usr_r0,msm_dump_cpu_ctx.usr_r1,msm_dump_cpu_ctx.usr_r2,msm_dump_cpu_ctx.usr_r3, msm_dump_cpu_ctx.usr_r4, msm_dump_cpu_ctx.usr_r5, msm_dump_cpu_ctx.usr_r6, msm_dump_cpu_ctx.usr_r7, msm_dump_cpu_ctx.usr_r8, msm_dump_cpu_ctx.usr_r9, msm_dump_cpu_ctx.usr_r10, msm_dump_cpu_ctx.usr_r11, msm_dump_cpu_ctx.usr_r12, msm_dump_cpu_ctx.usr_r13, msm_dump_cpu_ctx.usr_r14, msm_dump_cpu_ctx.irq_spsr, msm_dump_cpu_ctx.irq_r13, msm_dump_cpu_ctx.irq_r14, msm_dump_cpu_ctx.svc_spsr, msm_dump_cpu_ctx.svc_r13, msm_dump_cpu_ctx.svc_r14, msm_dump_cpu_ctx.abt_spsr,msm_dump_cpu_ctx.abt_r13, msm_dump_cpu_ctx.abt_r14, msm_dump_cpu_ctx.und_spsr,msm_dump_cpu_ctx.und_r13, msm_dump_cpu_ctx.und_r14, msm_dump_cpu_ctx.fiq_spsr,msm_dump_cpu_ctx.fiq_r8, msm_dump_cpu_ctx.fiq_r9, msm_dump_cpu_ctx.fiq_r10, msm_dump_cpu_ctx.fiq_r11, msm_dump_cpu_ctx.fiq_r12, msm_dump_cpu_ctx.fiq_r13, msm_dump_cpu_ctx.fiq_r14); context_regs.ARM_sp = msm_dump_cpu_ctx.svc_r13; context_regs.ARM_lr = msm_dump_cpu_ctx.svc_r14; context_regs.ARM_fp = msm_dump_cpu_ctx.usr_r11; //for the svc r11 is the same with usr r11 context_regs.ARM_pc = msm_dump_cpu_ctx.svc_r14; //dump_stack(); unwind_backtrace(&context_regs, current); #ifdef CONFIG_SMP trigger_all_cpu_backtrace(); #endif return; }
static void mcuio_soft_hc_irq_unmask(struct irq_data *d) { struct irq_chip *chip = irq_data_get_irq_chip(d); struct mcuio_soft_hc *shc = container_of(chip, struct mcuio_soft_hc, chip); shc->irq_enabled = 1; }
int show_interrupts(struct seq_file *p, void *v) { unsigned long flags, any_count = 0; int i = *(loff_t *)v, j, prec; struct irqaction *action; struct irq_desc *desc; struct irq_data *data; struct irq_chip *chip; if (i > nr_irqs) return 0; for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) j *= 10; if (i == nr_irqs) return show_other_interrupts(p, prec); if (i == 0) { seq_printf(p, "%*s", prec + 8, ""); for_each_online_cpu(j) seq_printf(p, "CPU%-8d", j); seq_putc(p, '\n'); } desc = irq_to_desc(i); if (!desc) return 0; data = irq_get_irq_data(i); chip = irq_data_get_irq_chip(data); raw_spin_lock_irqsave(&desc->lock, flags); for_each_online_cpu(j) any_count |= kstat_irqs_cpu(i, j); action = desc->action; if (!action && !any_count) goto out; seq_printf(p, "%*d: ", prec, i); for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); seq_printf(p, " %14s", chip->name); seq_printf(p, "-%-8s", desc->name); if (action) { seq_printf(p, " %s", action->name); while ((action = action->next) != NULL) seq_printf(p, ", %s", action->name); } seq_putc(p, '\n'); out: raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; }
void fixup_irqs(void) { unsigned int irq, vector; static int warned; struct irq_desc *desc; struct irq_data *data; struct irq_chip *chip; for_each_irq_desc(irq, desc) { int break_affinity = 0; int set_affinity = 1; const struct cpumask *affinity; if (!desc) continue; if (irq == 2) continue; raw_spin_lock(&desc->lock); data = irq_desc_get_irq_data(desc); affinity = data->affinity; if (!irq_has_action(irq) || irqd_is_per_cpu(data) || cpumask_subset(affinity, cpu_online_mask)) { raw_spin_unlock(&desc->lock); continue; } irq_force_complete_move(irq); if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { break_affinity = 1; affinity = cpu_all_mask; } chip = irq_data_get_irq_chip(data); if (!irqd_can_move_in_process_context(data) && chip->irq_mask) chip->irq_mask(data); if (chip->irq_set_affinity) chip->irq_set_affinity(data, affinity, true); else if (!(warned++)) set_affinity = 0; if (!irqd_can_move_in_process_context(data) && !irqd_irq_masked(data) && chip->irq_unmask) chip->irq_unmask(data); raw_spin_unlock(&desc->lock); if (break_affinity && set_affinity) printk("Broke affinity for irq %i\n", irq); else if (!set_affinity) printk("Cannot set affinity for irq %i\n", irq); }
static void clear_pending_spi(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); struct irq_chip *c = irq_data_get_irq_chip(d); c->irq_mask(d); local_irq_disable(); /* Clear the IRQ from the ENABLE_SET */ gic_clear_irq_pending(irq); local_irq_enable(); }
static void clear_pending_spi(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); struct irq_chip *c = irq_data_get_irq_chip(d); c->irq_mask(d); local_irq_disable(); gic_clear_irq_pending(irq); local_irq_enable(); }
static void at91_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq) { struct irq_desc *desc = irq_to_desc(irq); struct irq_data *idata = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(idata); if (chip == &gpio_irqchip) { struct at91_gpio_chip *chip = &gpio_chip[(irq - PIN_BASE) / 32]; struct at91_gpio_bank *bank = chip->bank; if (ipd != &ipipe_root && --(*chip->nonroot_gpios) == 0) __ipipe_irqbits[(bank->id / BITS_PER_LONG)] |= (1 << (bank->id % BITS_PER_LONG)); } }
static void x3proto_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); unsigned long mask; int pin; chip->irq_mask_ack(data); mask = __raw_readw(KEYDETR); for_each_set_bit(pin, &mask, NR_BASEBOARD_GPIOS) generic_handle_irq(irq_linear_revmap(x3proto_irq_domain, pin)); chip->irq_unmask(data); }
/* * Since cpu_online_mask is already updated, we just need to check for * affinity that has zeros */ static void migrate_irqs(void) { int irq, new_cpu; for (irq=0; irq < NR_IRQS; irq++) { struct irq_desc *desc = irq_to_desc(irq); struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); if (irqd_irq_disabled(data)) continue; /* * No handling for now. * TBD: Implement a disable function so we can now * tell CPU not to respond to these local intr sources. * such as ITV,CPEI,MCA etc. */ if (irqd_is_per_cpu(data)) continue; if (cpumask_any_and(data->affinity, cpu_online_mask) >= nr_cpu_ids) { /* * Save it for phase 2 processing */ vectors_in_migration[irq] = irq; new_cpu = cpumask_any(cpu_online_mask); /* * Al three are essential, currently WARN_ON.. maybe panic? */ if (chip && chip->irq_disable && chip->irq_enable && chip->irq_set_affinity) { chip->irq_disable(data); chip->irq_set_affinity(data, cpumask_of(new_cpu), false); chip->irq_enable(data); } else { WARN_ON((!chip || !chip->irq_disable || !chip->irq_enable || !chip->irq_set_affinity)); } } } }
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ void fixup_irqs(void) { unsigned int irr, vector; struct irq_desc *desc; struct irq_data *data; struct irq_chip *chip; irq_migrate_all_off_this_cpu(); /* * We can remove mdelay() and then send spuriuous interrupts to * new cpu targets for all the irqs that were handled previously by * this cpu. While it works, I have seen spurious interrupt messages * (nothing wrong but still...). * * So for now, retain mdelay(1) and check the IRR and then send those * interrupts to new targets as this cpu is already offlined... */ mdelay(1); /* * We can walk the vector array of this cpu without holding * vector_lock because the cpu is already marked !online, so * nothing else will touch it. */ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector]))) continue; irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); if (irr & (1 << (vector % 32))) { desc = __this_cpu_read(vector_irq[vector]); raw_spin_lock(&desc->lock); data = irq_desc_get_irq_data(desc); chip = irq_data_get_irq_chip(data); if (chip->irq_retrigger) { chip->irq_retrigger(data); __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED); } raw_spin_unlock(&desc->lock); } if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED) __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); } }
static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc)); struct irq_chip *chip = irq_data_get_irq_chip(data); u32 base, pin, mask; void __iomem *reg; u32 pending; unsigned virq; int looplimit = 0; /* check from GPIO controller which pin triggered the interrupt */ for (base = 0; base < vg->chip.ngpio; base += 32) { reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG); while ((pending = readl(reg))) { pin = __ffs(pending); mask = BIT(pin); /* Clear before handling so we can't lose an edge */ writel(mask, reg); virq = irq_find_mapping(vg->chip.irqdomain, base + pin); generic_handle_irq(virq); /* In case bios or user sets triggering incorretly a pin * might remain in "interrupt triggered" state. */ if (looplimit++ > 32) { dev_err(&vg->pdev->dev, "Gpio %d interrupt flood, disabling\n", base + pin); reg = byt_gpio_reg(&vg->chip, base + pin, BYT_CONF0_REG); mask = readl(reg); mask &= ~(BYT_TRIG_NEG | BYT_TRIG_POS | BYT_TRIG_LVL); writel(mask, reg); mask = readl(reg); /* flush */ break; } } } chip->irq_eoi(data); }
int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { struct irq_desc *desc = irq_data_to_desc(data); struct irq_chip *chip = irq_data_get_irq_chip(data); int ret; ret = chip->irq_set_affinity(data, mask, force); switch (ret) { case IRQ_SET_MASK_OK: cpumask_copy(data->affinity, mask); case IRQ_SET_MASK_OK_NOCOPY: irq_set_thread_affinity(desc); ret = 0; } return ret; }
/* Called from the FIQ asm handler */ void msm7k_fiq_handler(void) { struct irq_data *d; struct irq_chip *c; pr_info("Fiq is received %s\n", __func__); fiq_counter++; d = irq_get_irq_data(MSM8625_INT_A9_M2A_2); c = irq_data_get_irq_chip(d); c->irq_mask(d); local_irq_disable(); /* Clear the IRQ from the ENABLE_SET */ gic_clear_irq_pending(MSM8625_INT_A9_M2A_2); local_irq_enable(); flush_cache_all(); outer_flush_all(); return; }
/** * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt * @irq: interrupt number to set affinity * @vcpu_info: vCPU specific data * * This function uses the vCPU specific data to set the vCPU * affinity for an irq. The vCPU specific data is passed from * outside, such as KVM. One example code path is as below: * KVM -> IOMMU -> irq_set_vcpu_affinity(). */ int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) { unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); struct irq_data *data; struct irq_chip *chip; int ret = -ENOSYS; if (!desc) return -EINVAL; data = irq_desc_get_irq_data(desc); chip = irq_data_get_irq_chip(data); if (chip && chip->irq_set_vcpu_affinity) ret = chip->irq_set_vcpu_affinity(data, vcpu_info); irq_put_desc_unlock(desc, flags); return ret; }
static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) { unsigned pin; struct irq_data *idata = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(idata); struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(idata); void __iomem *pio = at91_gpio->regbase; u32 isr; /* temporarily mask (level sensitive) parent IRQ */ chip->irq_ack(idata); for (;;) { /* Reading ISR acks pending (edge triggered) GPIO interrupts. * When there none are pending, we're finished unless we need * to process multiple banks (like ID_PIOCDE on sam9263). */ isr = __raw_readl(pio + PIO_ISR) & __raw_readl(pio + PIO_IMR); if (!isr) { if (!at91_gpio->next) break; at91_gpio = at91_gpio->next; pio = at91_gpio->regbase; continue; } pin = at91_gpio->chip.base; while (isr) { if (isr & 1) ipipe_handle_demuxed_irq(pin); pin++; isr >>= 1; } } chip->irq_unmask(idata); /* now it may re-trigger */ }
/** * irq_cpu_offline - Invoke all irq_cpu_offline functions. * * Iterate through all irqs and invoke the chip.irq_cpu_offline() * for each. */ void irq_cpu_offline(void) { struct irq_desc *desc; struct irq_chip *chip; unsigned long flags; unsigned int irq; for_each_active_irq(irq) { desc = irq_to_desc(irq); if (!desc) continue; raw_spin_lock_irqsave(&desc->lock, flags); chip = irq_data_get_irq_chip(&desc->irq_data); if (chip && chip->irq_cpu_offline && (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || !irqd_irq_disabled(&desc->irq_data))) chip->irq_cpu_offline(&desc->irq_data); raw_spin_unlock_irqrestore(&desc->lock, flags); } }
int irq_select_affinity(unsigned int irq) { struct irq_data *data = irq_get_irq_data(irq); struct irq_chip *chip; static int last_cpu; int cpu = last_cpu + 1; if (!data) return 1; chip = irq_data_get_irq_chip(data); if (!chip->irq_set_affinity || irq_user_affinity[irq]) return 1; while (!cpu_possible(cpu) || !cpumask_test_cpu(cpu, irq_default_affinity)) cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); last_cpu = cpu; cpumask_copy(data->affinity, cpumask_of(cpu)); chip->irq_set_affinity(data, cpumask_of(cpu), false); return 0; }
static void msic_gpio_irq_handler(unsigned irq, struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct msic_gpio *mg = irq_data_get_irq_handler_data(data); struct irq_chip *chip = irq_data_get_irq_chip(data); struct intel_msic *msic = pdev_to_intel_msic(mg->pdev); int i; int bitnr; u8 pin; unsigned long pending = 0; for (i = 0; i < (mg->chip.ngpio / BITS_PER_BYTE); i++) { intel_msic_irq_read(msic, INTEL_MSIC_GPIO0LVIRQ + i, &pin); pending = pin; if (pending) { for_each_set_bit(bitnr, &pending, BITS_PER_BYTE) generic_handle_irq(mg->irq_base + (i * BITS_PER_BYTE) + bitnr); } } chip->irq_eoi(data); }
/* Called from the FIQ asm handler */ void msm7k_fiq_handler(void) { struct irq_data *d; struct irq_chip *c; struct pt_regs ctx_regs; pr_info("Fiq is received %s\n", __func__); fiq_counter++; d = irq_get_irq_data(MSM8625_INT_A9_M2A_2); c = irq_data_get_irq_chip(d); c->irq_mask(d); local_irq_disable(); /* Clear the IRQ from the ENABLE_SET */ gic_clear_irq_pending(MSM8625_INT_A9_M2A_2); local_irq_enable(); ctx_regs.ARM_pc = msm_dump_cpu_ctx.fiq_r14; ctx_regs.ARM_lr = msm_dump_cpu_ctx.svc_r14; ctx_regs.ARM_sp = msm_dump_cpu_ctx.svc_r13; ctx_regs.ARM_fp = msm_dump_cpu_ctx.usr_r11; #ifdef CONFIG_SEC_DEBUG do { extern void sec_save_final_context(void); sec_save_final_context(); } while (0); #endif unwind_backtrace(&ctx_regs, current); #ifdef CONFIG_SMP smp_send_all_cpu_backtrace(); #endif flush_cache_all(); outer_flush_all(); return; }
static inline struct em_gio_priv *irq_to_priv(struct irq_data *d) { struct irq_chip *chip = irq_data_get_irq_chip(d); return container_of(chip, struct em_gio_priv, irq_chip); }
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ void fixup_irqs(void) { unsigned int irq, vector; static int warned; struct irq_desc *desc; struct irq_data *data; struct irq_chip *chip; for_each_irq_desc(irq, desc) { int break_affinity = 0; int set_affinity = 1; const struct cpumask *affinity; if (!desc) continue; if (irq == 2) continue; /* interrupt's are disabled at this point */ raw_spin_lock(&desc->lock); data = irq_desc_get_irq_data(desc); affinity = data->affinity; if (!irq_has_action(irq) || irqd_is_per_cpu(data) || cpumask_subset(affinity, cpu_online_mask)) { raw_spin_unlock(&desc->lock); continue; } /* * Complete the irq move. This cpu is going down and for * non intr-remapping case, we can't wait till this interrupt * arrives at this cpu before completing the irq move. */ irq_force_complete_move(irq); if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { break_affinity = 1; affinity = cpu_online_mask; } chip = irq_data_get_irq_chip(data); if (!irqd_can_move_in_process_context(data) && chip->irq_mask) chip->irq_mask(data); if (chip->irq_set_affinity) chip->irq_set_affinity(data, affinity, true); else if (!(warned++)) set_affinity = 0; /* * We unmask if the irq was not marked masked by the * core code. That respects the lazy irq disable * behaviour. */ if (!irqd_can_move_in_process_context(data) && !irqd_irq_masked(data) && chip->irq_unmask) chip->irq_unmask(data); raw_spin_unlock(&desc->lock); if (break_affinity && set_affinity) pr_notice("Broke affinity for irq %i\n", irq); else if (!set_affinity) pr_notice("Cannot set affinity for irq %i\n", irq); }
struct irq_data *d = irq_desc_get_irq_data(desc); const struct cpumask *affinity = d->affinity; /* * If this is a per-CPU interrupt, or the affinity does not * include this CPU, then we have nothing to do. */ if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) return false; if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) affinity = cpu_online_mask; ret = true; } c = irq_data_get_irq_chip(d); if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) cpumask_copy(d->affinity, affinity); return irq_set_affinity_locked(d, affinity, 0) == 0; } /* * The current CPU has been marked offline. Migrate IRQs off this CPU. * If the affinity settings do not allow other CPUs, force them onto any * available CPU. * * Note: we must iterate over all IRQs, whether they have an attached * action structure or not, as we need to get chained interrupts too.
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ void fixup_irqs(void) { unsigned int irq, vector; static int warned; struct irq_desc *desc; struct irq_data *data; struct irq_chip *chip; int ret; for_each_irq_desc(irq, desc) { int break_affinity = 0; int set_affinity = 1; const struct cpumask *affinity; if (!desc) continue; if (irq == 2) continue; /* interrupt's are disabled at this point */ raw_spin_lock(&desc->lock); data = irq_desc_get_irq_data(desc); affinity = irq_data_get_affinity_mask(data); if (!irq_has_action(irq) || irqd_is_per_cpu(data) || cpumask_subset(affinity, cpu_online_mask)) { raw_spin_unlock(&desc->lock); continue; } /* * Complete the irq move. This cpu is going down and for * non intr-remapping case, we can't wait till this interrupt * arrives at this cpu before completing the irq move. */ irq_force_complete_move(desc); if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { break_affinity = 1; affinity = cpu_online_mask; } chip = irq_data_get_irq_chip(data); /* * The interrupt descriptor might have been cleaned up * already, but it is not yet removed from the radix tree */ if (!chip) { raw_spin_unlock(&desc->lock); continue; } if (!irqd_can_move_in_process_context(data) && chip->irq_mask) chip->irq_mask(data); if (chip->irq_set_affinity) { ret = chip->irq_set_affinity(data, affinity, true); if (ret == -ENOSPC) pr_crit("IRQ %d set affinity failed because there are no available vectors. The device assigned to this IRQ is unstable.\n", irq); } else { if (!(warned++)) set_affinity = 0; } /* * We unmask if the irq was not marked masked by the * core code. That respects the lazy irq disable * behaviour. */ if (!irqd_can_move_in_process_context(data) && !irqd_irq_masked(data) && chip->irq_unmask) chip->irq_unmask(data); raw_spin_unlock(&desc->lock); if (break_affinity && set_affinity) pr_notice("Broke affinity for irq %i\n", irq); else if (!set_affinity) pr_notice("Cannot set affinity for irq %i\n", irq); }