void systemasic_irq_init(void) { int i, nid = cpu_to_node(boot_cpu_data); /* Assign all virtual IRQs to the System ASIC int. handler */ for (i = HW_EVENT_IRQ_BASE; i < HW_EVENT_IRQ_MAX; i++) { unsigned int irq; irq = create_irq_nr(i, nid); if (unlikely(irq == 0)) { pr_err("%s: failed hooking irq %d for systemasic\n", __func__, i); return; } if (unlikely(irq != i)) { pr_err("%s: got irq %d but wanted %d, bailing.\n", __func__, irq, i); destroy_irq(irq); return; } set_irq_chip_and_handler(i, &systemasic_int, handle_level_irq); } }
void sn_teardown_msi_irq(unsigned int irq) { nasid_t nasid; int widget; struct pci_dev *pdev; struct pcidev_info *sn_pdev; struct sn_irq_info *sn_irq_info; struct pcibus_bussoft *bussoft; struct sn_pcibus_provider *provider; sn_irq_info = sn_msi_info[irq].sn_irq_info; if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) return; sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; pdev = sn_pdev->pdi_linux_pcidev; provider = SN_PCIDEV_BUSPROVIDER(pdev); (*provider->dma_unmap)(pdev, sn_msi_info[irq].pci_addr, PCI_DMA_FROMDEVICE); sn_msi_info[irq].pci_addr = 0; bussoft = SN_PCIDEV_BUSSOFT(pdev); nasid = NASID_GET(bussoft->bs_base); widget = (nasid & 1) ? TIO_SWIN_WIDGETNUM(bussoft->bs_base) : SWIN_WIDGETNUM(bussoft->bs_base); sn_intr_free(nasid, widget, sn_irq_info); sn_msi_info[irq].sn_irq_info = NULL; destroy_irq(irq); }
int __init x3proto_gpio_setup(void) { int ilsel; int ret, i; ilsel = ilsel_enable(ILSEL_KEY); if (unlikely(ilsel < 0)) return ilsel; ret = gpiochip_add(&x3proto_gpio_chip); if (unlikely(ret)) goto err_gpio; for (i = 0; i < NR_BASEBOARD_GPIOS; i++) { unsigned long flags; int irq = create_irq(); if (unlikely(irq < 0)) { ret = -EINVAL; goto err_irq; } spin_lock_irqsave(&x3proto_gpio_lock, flags); x3proto_gpio_irq_map[i] = irq; irq_set_chip_and_handler_name(irq, &dummy_irq_chip, handle_simple_irq, "gpio"); spin_unlock_irqrestore(&x3proto_gpio_lock, flags); } pr_info("registering '%s' support, handling GPIOs %u -> %u, " "bound to IRQ %u\n", x3proto_gpio_chip.label, x3proto_gpio_chip.base, x3proto_gpio_chip.base + x3proto_gpio_chip.ngpio, ilsel); irq_set_chained_handler(ilsel, x3proto_gpio_irq_handler); irq_set_irq_wake(ilsel, 1); return 0; err_irq: for (; i >= 0; --i) if (x3proto_gpio_irq_map[i]) destroy_irq(x3proto_gpio_irq_map[i]); ret = gpiochip_remove(&x3proto_gpio_chip); if (unlikely(ret)) pr_err("Failed deregistering GPIO\n"); err_gpio: synchronize_irq(ilsel); ilsel_disable(ILSEL_KEY); return ret; }
static int hvc_tile_remove(struct platform_device *pdev) { int rc; struct hvc_struct *hp = dev_get_drvdata(&pdev->dev); rc = hvc_remove(hp); if (rc == 0) destroy_irq(hp->data); return rc; }
static int ohci_hcd_tilegx_drv_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct tilegx_usb_platform_data *pdata = dev_get_platdata(&pdev->dev); usb_remove_hcd(hcd); usb_put_hcd(hcd); tilegx_stop_ohc(); gxio_usb_host_destroy(&pdata->usb_ctx); destroy_irq(pdata->irq); return 0; }
int __init setup_hd64461(void) { int i, nid = cpu_to_node(boot_cpu_data); if (!MACH_HD64461) return 0; printk(KERN_INFO "HD64461 configured at 0x%x on irq %d(mapped into %d to %d)\n", HD64461_IOBASE, CONFIG_HD64461_IRQ, HD64461_IRQBASE, HD64461_IRQBASE + 15); /* Should be at processor specific part.. */ #if defined(CONFIG_CPU_SUBTYPE_SH7709) __raw_writew(0x2240, INTC_ICR1); #endif __raw_writew(0xffff, HD64461_NIMR); /* IRQ 80 -> 95 belongs to HD64461 */ for (i = HD64461_IRQBASE; i < HD64461_IRQBASE + 16; i++) { unsigned int irq; irq = create_irq_nr(i, nid); if (unlikely(irq == 0)) { pr_err("%s: failed hooking irq %d for HD64461\n", __func__, i); return -EBUSY; } if (unlikely(irq != i)) { pr_err("%s: got irq %d but wanted %d, bailing.\n", __func__, irq, i); destroy_irq(irq); return -EINVAL; } irq_set_chip_and_handler(i, &hd64461_irq_chip, handle_level_irq); } irq_set_chained_handler(CONFIG_HD64461_IRQ, hd64461_irq_demux); irq_set_irq_type(CONFIG_HD64461_IRQ, IRQ_TYPE_LEVEL_LOW); #ifdef CONFIG_HD64461_ENABLER printk(KERN_INFO "HD64461: enabling PCMCIA devices\n"); __raw_writeb(0x4c, HD64461_PCC1CSCIER); __raw_writeb(0x00, HD64461_PCC1CSCR); #endif return 0; }
int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, unsigned long mmr_offset) { int irq; int ret; irq = create_irq(); if (irq <= 0) return -EBUSY; ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset); if (ret != irq) destroy_irq(irq); return ret; }
void __init init_se7724_IRQ(void) { int i, nid = cpu_to_node(boot_cpu_data); __raw_writew(0xffff, IRQ0_MR); __raw_writew(0xffff, IRQ1_MR); __raw_writew(0xffff, IRQ2_MR); __raw_writew(0x0000, IRQ0_SR); __raw_writew(0x0000, IRQ1_SR); __raw_writew(0x0000, IRQ2_SR); __raw_writew(0x002a, IRQ_MODE); for (i = 0; i < SE7724_FPGA_IRQ_NR; i++) { int irq, wanted; wanted = SE7724_FPGA_IRQ_BASE + i; irq = create_irq_nr(wanted, nid); if (unlikely(irq == 0)) { pr_err("%s: failed hooking irq %d for FPGA\n", __func__, wanted); return; } if (unlikely(irq != wanted)) { pr_err("%s: got irq %d but wanted %d, bailing.\n", __func__, irq, wanted); destroy_irq(irq); return; } irq_set_chip_and_handler_name(irq, &se7724_irq_chip, handle_level_irq, "level"); } irq_set_chained_handler(IRQ0_IRQ, se7724_irq_demux); irq_set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); irq_set_chained_handler(IRQ1_IRQ, se7724_irq_demux); irq_set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW); irq_set_chained_handler(IRQ2_IRQ, se7724_irq_demux); irq_set_irq_type(IRQ2_IRQ, IRQ_TYPE_LEVEL_LOW); }
int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, unsigned long mmr_offset, int limit) { int irq, ret; irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade)); if (irq <= 0) return -EBUSY; ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset, limit); if (ret == irq) uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade); else destroy_irq(irq); return ret; }
static int hvc_tile_probe(struct platform_device *pdev) { struct hvc_struct *hp; int tile_hvc_irq; /* Create our IRQ and register it. */ tile_hvc_irq = create_irq(); if (tile_hvc_irq < 0) return -ENXIO; tile_irq_activate(tile_hvc_irq, TILE_IRQ_PERCPU); hp = hvc_alloc(0, tile_hvc_irq, &hvc_tile_get_put_ops, 128); if (IS_ERR(hp)) { destroy_irq(tile_hvc_irq); return PTR_ERR(hp); } dev_set_drvdata(&pdev->dev, hp); return 0; }
void uv_teardown_irq(unsigned int irq) { struct uv_irq_2_mmr_pnode *e; struct rb_node *n; unsigned long irqflags; spin_lock_irqsave(&uv_irq_lock, irqflags); n = uv_irq_root.rb_node; while (n) { e = rb_entry(n, struct uv_irq_2_mmr_pnode, list); if (e->irq == irq) { arch_disable_uv_irq(e->pnode, e->offset); rb_erase(n, &uv_irq_root); kfree(e); break; } if (irq < e->irq) n = n->rb_left; else n = n->rb_right; } spin_unlock_irqrestore(&uv_irq_lock, irqflags); destroy_irq(irq); }
static int ohci_hcd_tilegx_drv_probe(struct platform_device *pdev) { struct usb_hcd *hcd; struct tilegx_usb_platform_data *pdata = dev_get_platdata(&pdev->dev); pte_t pte = { 0 }; int my_cpu = smp_processor_id(); int ret; if (usb_disabled()) return -ENODEV; /* * Try to initialize our GXIO context; if we can't, the device * doesn't exist. */ if (gxio_usb_host_init(&pdata->usb_ctx, pdata->dev_index, 0) != 0) return -ENXIO; hcd = usb_create_hcd(&ohci_tilegx_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { ret = -ENOMEM; goto err_hcd; } /* * We don't use rsrc_start to map in our registers, but seems like * we ought to set it to something, so we use the register VA. */ hcd->rsrc_start = (ulong) gxio_usb_host_get_reg_start(&pdata->usb_ctx); hcd->rsrc_len = gxio_usb_host_get_reg_len(&pdata->usb_ctx); hcd->regs = gxio_usb_host_get_reg_start(&pdata->usb_ctx); tilegx_start_ohc(); /* Create our IRQs and register them. */ pdata->irq = create_irq(); if (pdata->irq < 0) { ret = -ENXIO; goto err_no_irq; } tile_irq_activate(pdata->irq, TILE_IRQ_PERCPU); /* Configure interrupts. */ ret = gxio_usb_host_cfg_interrupt(&pdata->usb_ctx, cpu_x(my_cpu), cpu_y(my_cpu), KERNEL_PL, pdata->irq); if (ret) { ret = -ENXIO; goto err_have_irq; } /* Register all of our memory. */ pte = pte_set_home(pte, PAGE_HOME_HASH); ret = gxio_usb_host_register_client_memory(&pdata->usb_ctx, pte, 0); if (ret) { ret = -ENXIO; goto err_have_irq; } ohci_hcd_init(hcd_to_ohci(hcd)); ret = usb_add_hcd(hcd, pdata->irq, IRQF_SHARED); if (ret == 0) { platform_set_drvdata(pdev, hcd); return ret; } err_have_irq: destroy_irq(pdata->irq); err_no_irq: tilegx_stop_ohc(); usb_put_hcd(hcd); err_hcd: gxio_usb_host_destroy(&pdata->usb_ctx); return ret; }
int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry) { struct msi_msg msg; int widget; int status; nasid_t nasid; u64 bus_addr; struct sn_irq_info *sn_irq_info; struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); int irq; if (!entry->msi_attrib.is_64) return -EINVAL; if (bussoft == NULL) return -EINVAL; if (provider == NULL || provider->dma_map_consistent == NULL) return -EINVAL; irq = create_irq(); if (irq < 0) return irq; nasid = NASID_GET(bussoft->bs_base); widget = (nasid & 1) ? TIO_SWIN_WIDGETNUM(bussoft->bs_base) : SWIN_WIDGETNUM(bussoft->bs_base); sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); if (! sn_irq_info) { destroy_irq(irq); return -ENOMEM; } status = sn_intr_alloc(nasid, widget, sn_irq_info, irq, -1, -1); if (status) { kfree(sn_irq_info); destroy_irq(irq); return -ENOMEM; } sn_irq_info->irq_int_bit = -1; sn_irq_fixup(pdev, sn_irq_info); sn_irq_info->irq_bridge_type = bussoft->bs_asic_type; sn_irq_info->irq_bridge = (void *)bussoft->bs_base; bus_addr = (*provider->dma_map_consistent)(pdev, sn_irq_info->irq_xtalkaddr, sizeof(sn_irq_info->irq_xtalkaddr), SN_DMA_MSI|SN_DMA_ADDR_XIO); if (! bus_addr) { sn_intr_free(nasid, widget, sn_irq_info); kfree(sn_irq_info); destroy_irq(irq); return -ENOMEM; } sn_msi_info[irq].sn_irq_info = sn_irq_info; sn_msi_info[irq].pci_addr = bus_addr; msg.address_hi = (u32)(bus_addr >> 32); msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); msg.data = 0x100 + irq; irq_set_msi_desc(irq, entry); write_msi_msg(irq, &msg); irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); return 0; }
void ia64_teardown_msi_irq(unsigned int irq) { destroy_irq(irq); }
static int physdev_map_pirq(struct physdev_map_pirq *map) { struct domain *d; int pirq, irq, ret = 0; struct msi_info _msi; void *map_data = NULL; ret = rcu_lock_target_domain_by_id(map->domid, &d); if ( ret ) return ret; if ( map->domid == DOMID_SELF && is_hvm_domain(d) ) { ret = physdev_hvm_map_pirq(d, map); goto free_domain; } if ( !IS_PRIV_FOR(current->domain, d) ) { ret = -EPERM; goto free_domain; } /* Verify or get irq. */ switch ( map->type ) { case MAP_PIRQ_TYPE_GSI: if ( map->index < 0 || map->index >= nr_irqs_gsi ) { dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n", d->domain_id, map->index); ret = -EINVAL; goto free_domain; } irq = domain_pirq_to_irq(current->domain, map->index); if ( irq <= 0 ) { if ( IS_PRIV(current->domain) ) irq = map->index; else { dprintk(XENLOG_G_ERR, "dom%d: map pirq with incorrect irq!\n", d->domain_id); ret = -EINVAL; goto free_domain; } } break; case MAP_PIRQ_TYPE_MSI: irq = map->index; if ( irq == -1 ) irq = create_irq(); if ( irq < 0 || irq >= nr_irqs ) { dprintk(XENLOG_G_ERR, "dom%d: can't create irq for msi!\n", d->domain_id); ret = -EINVAL; goto free_domain; } _msi.bus = map->bus; _msi.devfn = map->devfn; _msi.entry_nr = map->entry_nr; _msi.table_base = map->table_base; _msi.irq = irq; map_data = &_msi; break; default: dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n", d->domain_id, map->type); ret = -EINVAL; goto free_domain; } spin_lock(&pcidevs_lock); /* Verify or get pirq. */ spin_lock(&d->event_lock); pirq = domain_irq_to_pirq(d, irq); if ( map->pirq < 0 ) { if ( pirq ) { dprintk(XENLOG_G_ERR, "dom%d: %d:%d already mapped to %d\n", d->domain_id, map->index, map->pirq, pirq); if ( pirq < 0 ) { ret = -EBUSY; goto done; } } else { pirq = get_free_pirq(d, map->type, map->index); if ( pirq < 0 ) { dprintk(XENLOG_G_ERR, "dom%d: no free pirq\n", d->domain_id); ret = pirq; goto done; } } } else { if ( pirq && pirq != map->pirq ) { dprintk(XENLOG_G_ERR, "dom%d: pirq %d conflicts with irq %d\n", d->domain_id, map->index, map->pirq); ret = -EEXIST; goto done; } else pirq = map->pirq; } ret = map_domain_pirq(d, pirq, irq, map->type, map_data); if ( ret == 0 ) map->pirq = pirq; done: spin_unlock(&d->event_lock); spin_unlock(&pcidevs_lock); if ( (ret != 0) && (map->type == MAP_PIRQ_TYPE_MSI) && (map->index == -1) ) destroy_irq(irq); free_domain: rcu_unlock_domain(d); return ret; }
void uv_teardown_irq(unsigned int irq, int mmr_blade, unsigned long mmr_offset) { arch_disable_uv_irq(mmr_blade, mmr_offset); destroy_irq(irq); }
int physdev_map_pirq(domid_t domid, int type, int *index, int *pirq_p, struct msi_info *msi) { struct domain *d = current->domain; int pirq, irq, ret = 0; void *map_data = NULL; if ( domid == DOMID_SELF && is_hvm_domain(d) ) { /* * Only makes sense for vector-based callback, else HVM-IRQ logic * calls back into itself and deadlocks on hvm_domain.irq_lock. */ if ( !is_hvm_pv_evtchn_domain(d) ) return -EINVAL; return physdev_hvm_map_pirq(d, type, index, pirq_p); } d = rcu_lock_domain_by_any_id(domid); if ( d == NULL ) return -ESRCH; ret = xsm_map_domain_pirq(XSM_TARGET, d); if ( ret ) goto free_domain; /* Verify or get irq. */ switch ( type ) { case MAP_PIRQ_TYPE_GSI: if ( *index < 0 || *index >= nr_irqs_gsi ) { dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n", d->domain_id, *index); ret = -EINVAL; goto free_domain; } irq = domain_pirq_to_irq(current->domain, *index); if ( irq <= 0 ) { if ( is_hardware_domain(current->domain) ) irq = *index; else { dprintk(XENLOG_G_ERR, "dom%d: map pirq with incorrect irq!\n", d->domain_id); ret = -EINVAL; goto free_domain; } } break; case MAP_PIRQ_TYPE_MSI: irq = *index; if ( irq == -1 ) irq = create_irq(NUMA_NO_NODE); if ( irq < nr_irqs_gsi || irq >= nr_irqs ) { dprintk(XENLOG_G_ERR, "dom%d: can't create irq for msi!\n", d->domain_id); ret = -EINVAL; goto free_domain; } msi->irq = irq; map_data = msi; break; default: dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n", d->domain_id, type); ret = -EINVAL; goto free_domain; } spin_lock(&pcidevs_lock); /* Verify or get pirq. */ spin_lock(&d->event_lock); pirq = domain_irq_to_pirq(d, irq); if ( *pirq_p < 0 ) { if ( pirq ) { dprintk(XENLOG_G_ERR, "dom%d: %d:%d already mapped to %d\n", d->domain_id, *index, *pirq_p, pirq); if ( pirq < 0 ) { ret = -EBUSY; goto done; } } else { pirq = get_free_pirq(d, type); if ( pirq < 0 ) { dprintk(XENLOG_G_ERR, "dom%d: no free pirq\n", d->domain_id); ret = pirq; goto done; } } } else { if ( pirq && pirq != *pirq_p ) { dprintk(XENLOG_G_ERR, "dom%d: pirq %d conflicts with irq %d\n", d->domain_id, *index, *pirq_p); ret = -EEXIST; goto done; } else pirq = *pirq_p; } ret = map_domain_pirq(d, pirq, irq, type, map_data); if ( ret == 0 ) *pirq_p = pirq; done: spin_unlock(&d->event_lock); spin_unlock(&pcidevs_lock); if ( (ret != 0) && (type == MAP_PIRQ_TYPE_MSI) && (*index == -1) ) destroy_irq(irq); free_domain: rcu_unlock_domain(d); return ret; }
/* * This routine finds the first virtqueue described in the configuration of * this device and sets it up. */ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), const char *name) { struct kvm_device *kdev = to_kvmdev(vdev); struct kvm_vqinfo *vqi; struct kvm_vqconfig *config; struct virtqueue *vq; long irq; int err = -EINVAL; if (index >= kdev->desc->num_vq) return ERR_PTR(-ENOENT); vqi = kzalloc(sizeof(*vqi), GFP_KERNEL); if (!vqi) return ERR_PTR(-ENOMEM); config = kvm_vq_config(kdev->desc)+index; vqi->config = config; vqi->pages = generic_remap_prot(config->pa, vring_size(config->num, KVM_TILE_VIRTIO_RING_ALIGN), 0, io_prot()); if (!vqi->pages) { err = -ENOMEM; goto out; } vq = vring_new_virtqueue(config->num, KVM_TILE_VIRTIO_RING_ALIGN, vdev, 0, vqi->pages, kvm_notify, callback, name); if (!vq) { err = -ENOMEM; goto unmap; } /* * Trigger the IPI interrupt in SW way. * TODO: We do not need to create one irq for each vq. A bit wasteful. */ irq = create_irq(); if (irq < 0) { err = -ENXIO; goto del_virtqueue; } tile_irq_activate(irq, TILE_IRQ_SW_CLEAR); if (request_irq(irq, vring_interrupt, 0, dev_name(&vdev->dev), vq)) { err = -ENXIO; destroy_irq(irq); goto del_virtqueue; } config->irq = irq; vq->priv = vqi; return vq; del_virtqueue: vring_del_virtqueue(vq); unmap: vunmap(vqi->pages); out: return ERR_PTR(err); }
void arch_teardown_msi_irq(unsigned int irq) { destroy_irq(irq); }
int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry) { struct msi_msg msg; int widget; int status; nasid_t nasid; u64 bus_addr; struct sn_irq_info *sn_irq_info; struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); int irq; if (!entry->msi_attrib.is_64) return -EINVAL; if (bussoft == NULL) return -EINVAL; if (provider == NULL || provider->dma_map_consistent == NULL) return -EINVAL; irq = create_irq(); if (irq < 0) return irq; /* * Set up the vector plumbing. Let the prom (via sn_intr_alloc) * decide which cpu to direct this msi at by default. */ nasid = NASID_GET(bussoft->bs_base); widget = (nasid & 1) ? TIO_SWIN_WIDGETNUM(bussoft->bs_base) : SWIN_WIDGETNUM(bussoft->bs_base); sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); if (! sn_irq_info) { destroy_irq(irq); return -ENOMEM; } status = sn_intr_alloc(nasid, widget, sn_irq_info, irq, -1, -1); if (status) { kfree(sn_irq_info); destroy_irq(irq); return -ENOMEM; } sn_irq_info->irq_int_bit = -1; /* mark this as an MSI irq */ sn_irq_fixup(pdev, sn_irq_info); /* Prom probably should fill these in, but doesn't ... */ sn_irq_info->irq_bridge_type = bussoft->bs_asic_type; sn_irq_info->irq_bridge = (void *)bussoft->bs_base; /* * Map the xio address into bus space */ bus_addr = (*provider->dma_map_consistent)(pdev, sn_irq_info->irq_xtalkaddr, sizeof(sn_irq_info->irq_xtalkaddr), SN_DMA_MSI|SN_DMA_ADDR_XIO); if (! bus_addr) { sn_intr_free(nasid, widget, sn_irq_info); kfree(sn_irq_info); destroy_irq(irq); return -ENOMEM; } sn_msi_info[irq].sn_irq_info = sn_irq_info; sn_msi_info[irq].pci_addr = bus_addr; msg.address_hi = (u32)(bus_addr >> 32); msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); /* * In the SN platform, bit 16 is a "send vector" bit which * must be present in order to move the vector through the system. */ msg.data = 0x100 + irq; irq_set_msi_desc(irq, entry); pci_write_msi_msg(irq, &msg); irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); return 0; }