/* * Migrate the IO-APIC irq in the presence of intr-remapping. * * For both level and edge triggered, irq migration is a simple atomic * update(of vector and cpu destination) of IRTE and flush the hardware cache. * * For level triggered, we eliminate the io-apic RTE modification (with the * updated vector information), by using a virtual vector (io-apic pin number). * Real vector that is used for interrupting cpu will be coming from * the interrupt-remapping table entry. * * As the migration is a simple atomic update of IRTE, the same mechanism * is used to migrate MSI irq's in the presence of interrupt-remapping. */ static int intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { struct irq_cfg *cfg = data->chip_data; unsigned int dest, irq = data->irq; struct irte irte; int err; if (!config_enabled(CONFIG_SMP)) return -EINVAL; if (!cpumask_intersects(mask, cpu_online_mask)) return -EINVAL; if (get_irte(irq, &irte)) return -EBUSY; err = assign_irq_vector(irq, cfg, mask); if (err) return err; err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest); if (err) { if (assign_irq_vector(irq, cfg, data->affinity)) pr_err("Failed to recover vector for irq %d\n", irq); return err; } irte.vector = cfg->vector; irte.dest_id = IRTE_DEST(dest); /* * Atomically updates the IRTE with the new destination, vector * and flushes the interrupt entry cache. */ modify_irte(irq, &irte); /* * After this point, all the interrupts will start arriving * at the new destination. So, time to cleanup the previous * vector allocation. */ if (cfg->move_in_progress) send_cleanup_vector(cfg); cpumask_copy(data->affinity, mask); return 0; }
static int msi_init(void) { static int status = -ENOMEM; if (!status) return status; if (pci_msi_quirk) { pci_msi_enable = 0; printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n"); status = -EINVAL; return status; } if ((status = msi_cache_init()) < 0) { pci_msi_enable = 0; printk(KERN_WARNING "PCI: MSI cache init failed\n"); return status; } last_alloc_vector = assign_irq_vector(AUTO_ASSIGN); if (last_alloc_vector < 0) { pci_msi_enable = 0; printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n"); status = -EBUSY; return status; } vector_irq[last_alloc_vector] = 0; nr_released_vectors++; return status; }
static int msi_init(void) { static int status = -ENOMEM; if (!status) return status; if ((status = msi_cache_init()) < 0) { pci_msi_enable = 0; printk(KERN_INFO "WARNING: MSI INIT FAILURE\n"); return status; } last_alloc_vector = assign_irq_vector(AUTO_ASSIGN); if (last_alloc_vector < 0) { pci_msi_enable = 0; printk(KERN_INFO "WARNING: ALL VECTORS ARE BUSY\n"); status = -EBUSY; return status; } vector_irq[last_alloc_vector] = 0; nr_released_vectors++; printk(KERN_INFO "MSI INIT SUCCESS\n"); return status; }
/* * The serial driver boot-time initialization code! */ static int __init simrs_init (void) { int i, rc; struct serial_state *state; if (!ia64_platform_is("hpsim")) return -ENODEV; hp_simserial_driver = alloc_tty_driver(1); if (!hp_simserial_driver) return -ENOMEM; show_serial_version(); /* Initialize the tty_driver structure */ hp_simserial_driver->owner = THIS_MODULE; hp_simserial_driver->driver_name = "simserial"; hp_simserial_driver->name = "ttyS"; hp_simserial_driver->major = TTY_MAJOR; hp_simserial_driver->minor_start = 64; hp_simserial_driver->type = TTY_DRIVER_TYPE_SERIAL; hp_simserial_driver->subtype = SERIAL_TYPE_NORMAL; hp_simserial_driver->init_termios = tty_std_termios; hp_simserial_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; hp_simserial_driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(hp_simserial_driver, &hp_ops); /* * Let's have a little bit of fun ! */ for (i = 0, state = rs_table; i < NR_PORTS; i++,state++) { if (state->type == PORT_UNKNOWN) continue; if (!state->irq) { if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0) panic("%s: out of interrupt vectors!\n", __FUNCTION__); state->irq = rc; ia64_ssc_connect_irq(KEYBOARD_INTR, state->irq); } printk(KERN_INFO "ttyS%d at 0x%04lx (irq = %d) is a %s\n", state->line, state->port, state->irq, uart_config[state->type].name); } if (tty_register_driver(hp_simserial_driver)) panic("Couldn't register simserial driver\n"); return 0; }
int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) { struct irq_cfg *cfg; struct ht_irq_msg msg; unsigned dest; int err; if (disable_apic) return -ENXIO; cfg = irq_cfg(irq); err = assign_irq_vector(irq, cfg, apic->target_cpus()); if (err) return err; err = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus(), &dest); if (err) return err; msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); msg.address_lo = HT_IRQ_LOW_BASE | HT_IRQ_LOW_DEST_ID(dest) | HT_IRQ_LOW_VECTOR(cfg->vector) | ((apic->irq_dest_mode == 0) ? HT_IRQ_LOW_DM_PHYSICAL : HT_IRQ_LOW_DM_LOGICAL) | HT_IRQ_LOW_RQEOI_EDGE | ((apic->irq_delivery_mode != dest_LowestPrio) ? HT_IRQ_LOW_MT_FIXED : HT_IRQ_LOW_MT_ARBITRATED) | HT_IRQ_LOW_IRQ_MASKED; write_ht_irq_msg(irq, &msg); irq_set_chip_and_handler_name(irq, &ht_irq_chip, handle_edge_irq, "edge"); dev_dbg(&dev->dev, "irq %d for HT\n", irq); return 0; }
static int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, unsigned long mmr_offset, int limit) { const struct cpumask *eligible_cpu = cpumask_of(cpu); struct irq_cfg *cfg = irq_get_chip_data(irq); unsigned long mmr_value; struct uv_IO_APIC_route_entry *entry; int mmr_pnode, err; BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); err = assign_irq_vector(irq, cfg, eligible_cpu); if (err != 0) return err; if (limit == UV_AFFINITY_CPU) irq_set_status_flags(irq, IRQ_NO_BALANCING); else irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); irq_set_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, irq_name); mmr_value = 0; entry = (struct uv_IO_APIC_route_entry *)&mmr_value; entry->vector = cfg->vector; entry->delivery_mode = apic->irq_delivery_mode; entry->dest_mode = apic->irq_dest_mode; entry->polarity = 0; entry->trigger = 0; entry->mask = 0; entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); mmr_pnode = uv_blade_to_pnode(mmr_blade); uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); if (cfg->move_in_progress) send_cleanup_vector(cfg); return irq; }
static int assign_msi_vector(void) { static int new_vector_avail = 1; int vector; unsigned long flags; /* * msi_lock is provided to ensure that successful allocation of MSI * vector is assigned unique among drivers. */ spin_lock_irqsave(&msi_lock, flags); if (!new_vector_avail) { int free_vector = 0; /* * vector_irq[] = -1 indicates that this specific vector is: * - assigned for MSI (since MSI have no associated IRQ) or * - assigned for legacy if less than 16, or * - having no corresponding 1:1 vector-to-IOxAPIC IRQ mapping * vector_irq[] = 0 indicates that this vector, previously * assigned for MSI, is freed by hotplug removed operations. * This vector will be reused for any subsequent hotplug added * operations. * vector_irq[] > 0 indicates that this vector is assigned for * IOxAPIC IRQs. This vector and its value provides a 1-to-1 * vector-to-IOxAPIC IRQ mapping. */ for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) { if (vector_irq[vector] != 0) continue; free_vector = vector; if (!msi_desc[vector]) break; else continue; } if (!free_vector) { spin_unlock_irqrestore(&msi_lock, flags); return -EBUSY; } vector_irq[free_vector] = -1; nr_released_vectors--; spin_unlock_irqrestore(&msi_lock, flags); if (msi_desc[free_vector] != NULL) { struct pci_dev *dev; int tail; /* free all linked vectors before re-assign */ do { spin_lock_irqsave(&msi_lock, flags); dev = msi_desc[free_vector]->dev; tail = msi_desc[free_vector]->link.tail; spin_unlock_irqrestore(&msi_lock, flags); msi_free_vector(dev, tail, 1); } while (free_vector != tail); } return free_vector; } vector = assign_irq_vector(AUTO_ASSIGN); last_alloc_vector = vector; if (vector == LAST_DEVICE_VECTOR) new_vector_avail = 0; spin_unlock_irqrestore(&msi_lock, flags); return vector; }
static int physdev_map_pirq(struct physdev_map_pirq *map) { struct domain *d; int vector, pirq, ret = 0; struct msi_info _msi; void *map_data = NULL; if ( !IS_PRIV(current->domain) ) return -EPERM; if ( !map ) return -EINVAL; if ( map->domid == DOMID_SELF ) d = rcu_lock_domain(current->domain); else d = rcu_lock_domain_by_id(map->domid); if ( d == NULL ) { ret = -ESRCH; goto free_domain; } /* Verify or get vector. */ switch ( map->type ) { case MAP_PIRQ_TYPE_GSI: if ( map->index < 0 || map->index >= NR_IRQS ) { dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n", d->domain_id, map->index); ret = -EINVAL; goto free_domain; } vector = domain_irq_to_vector(current->domain, map->index); if ( !vector ) { dprintk(XENLOG_G_ERR, "dom%d: map irq with no vector %d\n", d->domain_id, vector); ret = -EINVAL; goto free_domain; } break; case MAP_PIRQ_TYPE_MSI: vector = map->index; if ( vector == -1 ) vector = assign_irq_vector(AUTO_ASSIGN_IRQ); if ( vector < 0 || vector >= NR_VECTORS ) { dprintk(XENLOG_G_ERR, "dom%d: map irq with wrong vector %d\n", d->domain_id, vector); ret = -EINVAL; goto free_domain; } _msi.bus = map->bus; _msi.devfn = map->devfn; _msi.entry_nr = map->entry_nr; _msi.table_base = map->table_base; _msi.vector = vector; map_data = &_msi; break; default: dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n", d->domain_id, map->type); ret = -EINVAL; goto free_domain; } spin_lock(&pcidevs_lock); /* Verify or get pirq. */ spin_lock(&d->event_lock); pirq = domain_vector_to_irq(d, vector); if ( map->pirq < 0 ) { if ( pirq ) { dprintk(XENLOG_G_ERR, "dom%d: %d:%d already mapped to %d\n", d->domain_id, map->index, map->pirq, pirq); if ( pirq < 0 ) { ret = -EBUSY; goto done; } } else { pirq = get_free_pirq(d, map->type, map->index); if ( pirq < 0 ) { dprintk(XENLOG_G_ERR, "dom%d: no free pirq\n", d->domain_id); ret = pirq; goto done; } } } else { if ( pirq && pirq != map->pirq ) { dprintk(XENLOG_G_ERR, "dom%d: vector %d conflicts with irq %d\n", d->domain_id, map->index, map->pirq); ret = -EEXIST; goto done; } else pirq = map->pirq; } ret = map_domain_pirq(d, pirq, vector, map->type, map_data); if ( ret == 0 ) map->pirq = pirq; done: spin_unlock(&d->event_lock); spin_unlock(&pcidevs_lock); if ( (ret != 0) && (map->type == MAP_PIRQ_TYPE_MSI) && (map->index == -1) ) free_irq_vector(vector); free_domain: rcu_unlock_domain(d); return ret; }