static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask) { struct msi_desc *entry; struct msg_address address; unsigned int irq = vector; unsigned int dest_cpu = first_cpu(cpu_mask); entry = (struct msi_desc *)msi_desc[vector]; if (!entry || !entry->dev) return; switch (entry->msi_attrib.type) { case PCI_CAP_ID_MSI: { int pos; if (!(pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI))) return; pci_read_config_dword(entry->dev, msi_lower_address_reg(pos), &address.lo_address.value); address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK; address.lo_address.value |= (cpu_physical_id(dest_cpu) << MSI_TARGET_CPU_SHIFT); entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu); pci_write_config_dword(entry->dev, msi_lower_address_reg(pos), address.lo_address.value); set_native_irq_info(irq, cpu_mask); break; } case PCI_CAP_ID_MSIX: { int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET; address.lo_address.value = readl(entry->mask_base + offset); address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK; address.lo_address.value |= (cpu_physical_id(dest_cpu) << MSI_TARGET_CPU_SHIFT); entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu); writel(address.lo_address.value, entry->mask_base + offset); set_native_irq_info(irq, cpu_mask); break; } default: break; } }
static void init_evtchn_cpu_bindings(void) { int i; /* By default all event channels notify CPU#0. */ for (i = 0; i < NR_IRQS; i++) set_native_irq_info(i, cpumask_of_cpu(0)); memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); for_each_possible_cpu(i) memset(cpu_evtchn_mask[i], (i == 0) ? ~0 : 0, sizeof(cpu_evtchn_mask[i])); }
static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) { shared_info_t *s = HYPERVISOR_shared_info; int irq = evtchn_to_irq[chn]; BUG_ON(!test_bit(chn, s->evtchn_mask)); if (irq != -1) set_native_irq_info(irq, cpumask_of_cpu(cpu)); clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]); set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]); cpu_evtchn[chn] = cpu; }
static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) { struct msi_msg msg; u32 addr; read_msi_msg(irq, &msg); addr = msg.address_lo; addr &= MSI_ADDR_DESTID_MASK; addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(first_cpu(cpu_mask))); msg.address_lo = addr; write_msi_msg(irq, &msg); set_native_irq_info(irq, cpu_mask); }
static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask) { struct msi_desc *entry; u32 address_hi, address_lo; unsigned int irq = vector; unsigned int dest_cpu = first_cpu(cpu_mask); unsigned long flags; spin_lock_irqsave(&msi_lock, flags); entry = (struct msi_desc *)msi_desc[vector]; if (!entry || !entry->dev) goto out_unlock; if (entry->msi_attrib.state == 0) goto out_unlock; switch (entry->msi_attrib.type) { case PCI_CAP_ID_MSI: { int pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI); if (!pos) goto out_unlock; pci_read_config_dword(entry->dev, msi_upper_address_reg(pos), &address_hi); pci_read_config_dword(entry->dev, msi_lower_address_reg(pos), &address_lo); msi_ops->target(vector, dest_cpu, &address_hi, &address_lo); pci_write_config_dword(entry->dev, msi_upper_address_reg(pos), address_hi); pci_write_config_dword(entry->dev, msi_lower_address_reg(pos), address_lo); set_native_irq_info(irq, cpu_mask); break; } case PCI_CAP_ID_MSIX: { int offset_hi = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET; int offset_lo = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET; address_hi = readl(entry->mask_base + offset_hi); address_lo = readl(entry->mask_base + offset_lo); msi_ops->target(vector, dest_cpu, &address_hi, &address_lo); writel(address_hi, entry->mask_base + offset_hi); writel(address_lo, entry->mask_base + offset_lo); set_native_irq_info(irq, cpu_mask); break; } default: break; } out_unlock: spin_unlock_irqrestore(&msi_lock, flags); }