static bool migrate_one_irq(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); const struct cpumask *affinity = irq_data_get_affinity_mask(d); struct irq_chip *c; bool ret = false; /* * If this is a per-CPU interrupt, or the affinity does not * include this CPU, then we have nothing to do. */ if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) return false; if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { affinity = cpu_online_mask; ret = true; } c = irq_data_get_irq_chip(d); if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) cpumask_copy(irq_data_get_affinity_mask(d), affinity); return ret; }
int irq_startup(struct irq_desc *desc, bool resend, bool force) { struct irq_data *d = irq_desc_get_irq_data(desc); struct cpumask *aff = irq_data_get_affinity_mask(d); int ret = 0; desc->depth = 0; if (irqd_is_started(d)) { irq_enable(desc); } else { switch (__irq_startup_managed(desc, aff, force)) { case IRQ_STARTUP_NORMAL: ret = __irq_startup(desc); irq_setup_affinity(desc); break; case IRQ_STARTUP_MANAGED: irq_do_set_affinity(d, aff, false); ret = __irq_startup(desc); break; case IRQ_STARTUP_ABORT: irqd_set_managed_shutdown(d); return 0; } } if (resend) check_irq_resend(desc); return ret; }
static void ics_rtas_unmask_irq(struct irq_data *d) { unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); int call_status; int server; pr_devel("xics: unmask virq %d [hw 0x%x]\n", d->irq, hw_irq); if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) return; server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0); call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server, DEFAULT_PRIORITY); if (call_status != 0) { printk(KERN_ERR "%s: ibm_set_xive irq %u server %x returned %d\n", __func__, hw_irq, server, call_status); return; } /* Now unmask the interrupt (often a no-op) */ call_status = rtas_call(ibm_int_on, 1, 1, NULL, hw_irq); if (call_status != 0) { printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", __func__, hw_irq, call_status); return; } }
static int sn_set_msi_irq_affinity(struct irq_data *data, const struct cpumask *cpu_mask, bool force) { struct msi_msg msg; int slice; nasid_t nasid; u64 bus_addr; struct pci_dev *pdev; struct pcidev_info *sn_pdev; struct sn_irq_info *sn_irq_info; struct sn_irq_info *new_irq_info; struct sn_pcibus_provider *provider; unsigned int cpu, irq = data->irq; cpu = cpumask_first_and(cpu_mask, cpu_online_mask); sn_irq_info = sn_msi_info[irq].sn_irq_info; if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) return -1; /* * Release XIO resources for the old MSI PCI address */ __get_cached_msi_msg(irq_data_get_msi_desc(data), &msg); sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; pdev = sn_pdev->pdi_linux_pcidev; provider = SN_PCIDEV_BUSPROVIDER(pdev); bus_addr = (u64)(msg.address_hi) << 32 | (u64)(msg.address_lo); (*provider->dma_unmap)(pdev, bus_addr, PCI_DMA_FROMDEVICE); sn_msi_info[irq].pci_addr = 0; nasid = cpuid_to_nasid(cpu); slice = cpuid_to_slice(cpu); new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice); sn_msi_info[irq].sn_irq_info = new_irq_info; if (new_irq_info == NULL) return -1; /* * Map the xio address into bus space */ bus_addr = (*provider->dma_map_consistent)(pdev, new_irq_info->irq_xtalkaddr, sizeof(new_irq_info->irq_xtalkaddr), SN_DMA_MSI|SN_DMA_ADDR_XIO); sn_msi_info[irq].pci_addr = bus_addr; msg.address_hi = (u32)(bus_addr >> 32); msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); pci_write_msi_msg(irq, &msg); cpumask_copy(irq_data_get_affinity_mask(data), cpu_mask); return 0; }
unsigned long txn_affinity_addr(unsigned int irq, int cpu) { #ifdef CONFIG_SMP struct irq_data *d = irq_get_irq_data(irq); cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(cpu)); #endif return per_cpu(cpu_data, cpu).txn_addr; }
static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) { int current_cpu = smp_processor_id(); const struct cpumask *aff; struct irq_data *idata; idata = irq_desc_get_irq_data(c->irq_desc); aff = irq_data_get_affinity_mask(idata); return cpumask_test_cpu(current_cpu, aff); }
static inline int enable_irq_for_cpu(int cpu, struct irq_data *d, const struct cpumask *m) { bool enable = cpu_online(cpu); #ifdef CONFIG_SMP if (m) enable &= cpumask_test_cpu(cpu, m); else if (irqd_affinity_was_set(d)) enable &= cpumask_test_cpu(cpu, irq_data_get_affinity_mask(d)); #endif return enable; }
static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest, bool force) { int cpu_dest; cpu_dest = cpu_check_affinity(d, dest); if (cpu_dest < 0) return -1; cpumask_copy(irq_data_get_affinity_mask(d), dest); return 0; }
/** * ipi_get_hwirq - Get the hwirq associated with an IPI to a cpu * @irq: linux irq number * @cpu: the target cpu * * When dealing with coprocessors IPI, we need to inform the coprocessor of * the hwirq it needs to use to receive and send IPIs. * * Returns hwirq value on success and INVALID_HWIRQ on failure. */ irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu) { struct irq_data *data = irq_get_irq_data(irq); struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL; if (!data || !ipimask || cpu > nr_cpu_ids) return INVALID_HWIRQ; if (!cpumask_test_cpu(cpu, ipimask)) return INVALID_HWIRQ; /* * Get the real hardware irq number if the underlying implementation * uses a seperate irq per cpu. If the underlying implementation uses * a single hardware irq for all cpus then the IPI send mechanism * needs to take care of the cpu destinations. */ if (irq_domain_is_ipi_per_cpu(data->domain)) data = irq_get_irq_data(irq + cpu - data->common->ipi_offset); return data ? irqd_to_hwirq(data) : INVALID_HWIRQ; }
static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data, const struct cpumask *dest, unsigned int cpu) { struct cpumask *ipimask = irq_data_get_affinity_mask(data); if (!chip || !ipimask) return -EINVAL; if (!chip->ipi_send_single && !chip->ipi_send_mask) return -EINVAL; if (cpu > nr_cpu_ids) return -EINVAL; if (dest) { if (!cpumask_subset(dest, ipimask)) return -EINVAL; } else { if (!cpumask_test_cpu(cpu, ipimask)) return -EINVAL; } return 0; }
int irq_select_affinity(unsigned int irq) { struct irq_data *data = irq_get_irq_data(irq); struct irq_chip *chip; static int last_cpu; int cpu = last_cpu + 1; if (!data) return 1; chip = irq_data_get_irq_chip(data); if (!chip->irq_set_affinity || irq_user_affinity[irq]) return 1; while (!cpu_possible(cpu) || !cpumask_test_cpu(cpu, irq_default_affinity)) cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); last_cpu = cpu; cpumask_copy(irq_data_get_affinity_mask(data), cpumask_of(cpu)); chip->irq_set_affinity(data, cpumask_of(cpu), false); return 0; }
/** * irq_destroy_ipi() - unreserve an IPI that was previously allocated * @irq: linux irq number to be destroyed * @dest: cpumask of cpus which should have the IPI removed * * The IPIs allocated with irq_reserve_ipi() are retuerned to the system * destroying all virqs associated with them. * * Return 0 on success or error code on failure. */ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest) { struct irq_data *data = irq_get_irq_data(irq); struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL; struct irq_domain *domain; unsigned int nr_irqs; if (!irq || !data || !ipimask) return -EINVAL; domain = data->domain; if (WARN_ON(domain == NULL)) return -EINVAL; if (!irq_domain_is_ipi(domain)) { pr_warn("Trying to destroy a non IPI domain!\n"); return -EINVAL; } if (WARN_ON(!cpumask_subset(dest, ipimask))) /* * Must be destroying a subset of CPUs to which this IPI * was set up to target */ return -EINVAL; if (irq_domain_is_ipi_per_cpu(domain)) { irq = irq + cpumask_first(dest) - data->common->ipi_offset; nr_irqs = cpumask_weight(dest); } else { nr_irqs = 1; } irq_domain_free_irqs(irq, nr_irqs); return 0; }
/* * This cpu is going to be removed and its vectors migrated to the remaining * online cpus. Check to see if there are enough vectors in the remaining cpus. * This function is protected by stop_machine(). */ int check_irq_vectors_for_cpu_disable(void) { unsigned int this_cpu, vector, this_count, count; struct irq_desc *desc; struct irq_data *data; int cpu; this_cpu = smp_processor_id(); cpumask_copy(&online_new, cpu_online_mask); cpumask_clear_cpu(this_cpu, &online_new); this_count = 0; for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { desc = __this_cpu_read(vector_irq[vector]); if (IS_ERR_OR_NULL(desc)) continue; /* * Protect against concurrent action removal, affinity * changes etc. */ raw_spin_lock(&desc->lock); data = irq_desc_get_irq_data(desc); cpumask_copy(&affinity_new, irq_data_get_affinity_mask(data)); cpumask_clear_cpu(this_cpu, &affinity_new); /* Do not count inactive or per-cpu irqs. */ if (!irq_desc_has_action(desc) || irqd_is_per_cpu(data)) { raw_spin_unlock(&desc->lock); continue; } raw_spin_unlock(&desc->lock); /* * A single irq may be mapped to multiple cpu's * vector_irq[] (for example IOAPIC cluster mode). In * this case we have two possibilities: * * 1) the resulting affinity mask is empty; that is * this the down'd cpu is the last cpu in the irq's * affinity mask, or * * 2) the resulting affinity mask is no longer a * subset of the online cpus but the affinity mask is * not zero; that is the down'd cpu is the last online * cpu in a user set affinity mask. */ if (cpumask_empty(&affinity_new) || !cpumask_subset(&affinity_new, &online_new)) this_count++; } /* No need to check any further. */ if (!this_count) return 0; count = 0; for_each_online_cpu(cpu) { if (cpu == this_cpu) continue; /* * We scan from FIRST_EXTERNAL_VECTOR to first system * vector. If the vector is marked in the used vectors * bitmap or an irq is assigned to it, we don't count * it as available. * * As this is an inaccurate snapshot anyway, we can do * this w/o holding vector_lock. */ for (vector = FIRST_EXTERNAL_VECTOR; vector < FIRST_SYSTEM_VECTOR; vector++) { if (!test_bit(vector, used_vectors) && IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector])) { if (++count == this_count) return 0; } } } if (count < this_count) { pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n", this_cpu, this_count, count); return -ERANGE; } return 0; }
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ void fixup_irqs(void) { unsigned int irq, vector; static int warned; struct irq_desc *desc; struct irq_data *data; struct irq_chip *chip; int ret; for_each_irq_desc(irq, desc) { int break_affinity = 0; int set_affinity = 1; const struct cpumask *affinity; if (!desc) continue; if (irq == 2) continue; /* interrupt's are disabled at this point */ raw_spin_lock(&desc->lock); data = irq_desc_get_irq_data(desc); affinity = irq_data_get_affinity_mask(data); if (!irq_has_action(irq) || irqd_is_per_cpu(data) || cpumask_subset(affinity, cpu_online_mask)) { raw_spin_unlock(&desc->lock); continue; } /* * Complete the irq move. This cpu is going down and for * non intr-remapping case, we can't wait till this interrupt * arrives at this cpu before completing the irq move. */ irq_force_complete_move(desc); if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { break_affinity = 1; affinity = cpu_online_mask; } chip = irq_data_get_irq_chip(data); /* * The interrupt descriptor might have been cleaned up * already, but it is not yet removed from the radix tree */ if (!chip) { raw_spin_unlock(&desc->lock); continue; } if (!irqd_can_move_in_process_context(data) && chip->irq_mask) chip->irq_mask(data); if (chip->irq_set_affinity) { ret = chip->irq_set_affinity(data, affinity, true); if (ret == -ENOSPC) pr_crit("IRQ %d set affinity failed because there are no available vectors. The device assigned to this IRQ is unstable.\n", irq); } else { if (!(warned++)) set_affinity = 0; } /* * We unmask if the irq was not marked masked by the * core code. That respects the lazy irq disable * behaviour. */ if (!irqd_can_move_in_process_context(data) && !irqd_irq_masked(data) && chip->irq_unmask) chip->irq_unmask(data); raw_spin_unlock(&desc->lock); if (break_affinity && set_affinity) pr_notice("Broke affinity for irq %i\n", irq); else if (!set_affinity) pr_notice("Cannot set affinity for irq %i\n", irq); }