static int ia64_set_msi_irq_affinity(struct irq_data *idata, const cpumask_t *cpu_mask, bool force) { struct msi_msg msg; u32 addr, data; int cpu = cpumask_first_and(cpu_mask, cpu_online_mask); unsigned int irq = idata->irq; if (irq_prepare_move(irq, cpu)) return -1; __get_cached_msi_msg(idata->msi_desc, &msg); addr = msg.address_lo; addr &= MSI_ADDR_DEST_ID_MASK; addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu)); msg.address_lo = addr; data = msg.data; data &= MSI_DATA_VECTOR_MASK; data |= MSI_DATA_VECTOR(irq_to_vector(irq)); msg.data = data; write_msi_msg(irq, &msg); cpumask_copy(idata->affinity, cpumask_of(cpu)); return 0; }
/* * For the moment we only implement delivery to all cpus or one cpu. * * If the requested affinity is cpu_all_mask, we set global affinity. * If not we set it to the first cpu in the mask, even if multiple cpus * are set. This is so things like irqbalance (which set core and package * wide affinities) do the right thing. * * We need to fix this to implement support for the links */ int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, unsigned int strict_check) { if (!distribute_irqs) return xics_default_server; if (!cpumask_subset(cpu_possible_mask, cpumask)) { int server = cpumask_first_and(cpu_online_mask, cpumask); if (server < nr_cpu_ids) return get_hard_smp_processor_id(server); if (strict_check) return -1; } /* * Workaround issue with some versions of JS20 firmware that * deliver interrupts to cpus which haven't been started. This * happens when using the maxcpus= boot option. */ if (cpumask_equal(cpu_online_mask, cpu_present_mask)) return xics_default_distrib_server; return xics_default_server; }
static int sn_set_affinity_irq(struct irq_data *data, const struct cpumask *mask, bool force) { struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; unsigned int irq = data->irq; nasid_t nasid; int slice; nasid = cpuid_to_nasid(cpumask_first_and(mask, cpu_online_mask)); slice = cpuid_to_slice(cpumask_first_and(mask, cpu_online_mask)); list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, sn_irq_lh[irq], list) (void)sn_retarget_vector(sn_irq_info, nasid, slice); return 0; }
static int sn_set_msi_irq_affinity(struct irq_data *data, const struct cpumask *cpu_mask, bool force) { struct msi_msg msg; int slice; nasid_t nasid; u64 bus_addr; struct pci_dev *pdev; struct pcidev_info *sn_pdev; struct sn_irq_info *sn_irq_info; struct sn_irq_info *new_irq_info; struct sn_pcibus_provider *provider; unsigned int cpu, irq = data->irq; cpu = cpumask_first_and(cpu_mask, cpu_online_mask); sn_irq_info = sn_msi_info[irq].sn_irq_info; if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) return -1; /* * Release XIO resources for the old MSI PCI address */ __get_cached_msi_msg(data->msi_desc, &msg); sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; pdev = sn_pdev->pdi_linux_pcidev; provider = SN_PCIDEV_BUSPROVIDER(pdev); bus_addr = (u64)(msg.address_hi) << 32 | (u64)(msg.address_lo); (*provider->dma_unmap)(pdev, bus_addr, PCI_DMA_FROMDEVICE); sn_msi_info[irq].pci_addr = 0; nasid = cpuid_to_nasid(cpu); slice = cpuid_to_slice(cpu); new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice); sn_msi_info[irq].sn_irq_info = new_irq_info; if (new_irq_info == NULL) return -1; /* * Map the xio address into bus space */ bus_addr = (*provider->dma_map_consistent)(pdev, new_irq_info->irq_xtalkaddr, sizeof(new_irq_info->irq_xtalkaddr), SN_DMA_MSI|SN_DMA_ADDR_XIO); sn_msi_info[irq].pci_addr = bus_addr; msg.address_hi = (u32)(bus_addr >> 32); msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); pci_write_msi_msg(irq, &msg); cpumask_copy(data->affinity, cpu_mask); return 0; }
int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest) { int cpu_dest; /* timer and ipi have to always be received on all CPUs */ if (irqd_is_per_cpu(d)) return -EINVAL; /* whatever mask they set, we just allow one CPU */ cpu_dest = cpumask_first_and(dest, cpu_online_mask); return cpu_dest; }
static int tzdev_get_destination_cpu(void) { int cpu; cpumask_t *migration_mask; if (nr_big_cluster_requests) migration_mask = &tzdev_cpu_mask[CLUSTER_BIG]; else migration_mask = &tzdev_cpu_mask[CLUSTER_LITTLE]; cpu = cpumask_first_and(migration_mask, cpu_active_mask); if (cpu >= nr_cpu_ids) { pr_warn("No active CPUs ready for migration, migration failed.\n"); return -1; } pr_notice("Found destination CPU:%d.\n", cpu); return cpu; }
static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask) { nasid_t nasid; int cpu; cpu = cpumask_first_and(mask, cpu_online_mask); nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); hd->cpu = cpu; if (!cputoslice(cpu)) { hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_A); hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_A); } else { hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_B); hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_B); } /* Make sure it's not already pending when we connect it. */ REMOTE_HUB_CLR_INTR(nasid, hd->bit); }
int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, unsigned int strict_check) { if (!distribute_irqs) return xics_default_server; if (!cpumask_subset(cpu_possible_mask, cpumask)) { int server = cpumask_first_and(cpu_online_mask, cpumask); if (server < nr_cpu_ids) return get_hard_smp_processor_id(server); if (strict_check) return -1; } if (cpumask_equal(cpu_online_mask, cpu_present_mask)) return xics_default_distrib_server; return xics_default_server; }
static int dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { unsigned int irq = data->irq; struct irq_cfg *cfg = irq_cfg + irq; struct msi_msg msg; int cpu = cpumask_first_and(mask, cpu_online_mask); if (irq_prepare_move(irq, cpu)) return -1; dmar_msi_read(irq, &msg); msg.data &= ~MSI_DATA_VECTOR_MASK; msg.data |= MSI_DATA_VECTOR(cfg->vector); msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu)); dmar_msi_write(irq, &msg); cpumask_copy(data->affinity, mask); return 0; }