Esempio n. 1
0
int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
{
	struct msi_msg	msg;
	unsigned long	dest_phys_id;
	int	irq, vector;
	cpumask_t mask;

	irq = create_irq();
	if (irq < 0)
		return irq;

	set_irq_msi(irq, desc);
	cpus_and(mask, irq_to_domain(irq), cpu_online_map);
	dest_phys_id = cpu_physical_id(first_cpu(mask));
	vector = irq_to_vector(irq);

	msg.address_hi = 0;
	msg.address_lo =
		MSI_ADDR_HEADER |
		MSI_ADDR_DESTMODE_PHYS |
		MSI_ADDR_REDIRECTION_CPU |
		MSI_ADDR_DESTID_CPU(dest_phys_id);

	msg.data =
		MSI_DATA_TRIGGER_EDGE |
		MSI_DATA_LEVEL_ASSERT |
		MSI_DATA_DELIVERY_FIXED |
		MSI_DATA_VECTOR(vector);

	write_msi_msg(irq, &msg);
	set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);

	return 0;
}
Esempio n. 2
0
/*
 * Initialize IRQ setting
 */
void __init init_se7722_IRQ(void)
{
	int i, irq;

	ctrl_outw(0, IRQ01_MASK);       /* disable all irqs */
	ctrl_outw(0x2000, 0xb03fffec);  /* mrshpc irq enable */

	for (i = 0; i < SE7722_FPGA_IRQ_NR; i++) {
		irq = create_irq();
		if (irq < 0)
			return;
		se7722_fpga_irq[i] = irq;

		set_irq_chip_and_handler_name(se7722_fpga_irq[i],
					      &se7722_irq_chip,
					      handle_level_irq, "level");

		set_irq_chip_data(se7722_fpga_irq[i], (void *)i);
	}

	set_irq_chained_handler(IRQ0_IRQ, se7722_irq_demux);
	set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW);

	set_irq_chained_handler(IRQ1_IRQ, se7722_irq_demux);
	set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW);
}
Esempio n. 3
0
void __init init_7343se_IRQ(void)
{
	int i, irq;

	__raw_writew(0, PA_CPLD_IMSK);	
	__raw_writew(0x2000, 0xb03fffec);	

	for (i = 0; i < SE7343_FPGA_IRQ_NR; i++) {
		irq = create_irq();
		if (irq < 0)
			return;
		se7343_fpga_irq[i] = irq;

		irq_set_chip_and_handler_name(se7343_fpga_irq[i],
					      &se7343_irq_chip,
					      handle_level_irq,
					      "level");

		irq_set_chip_data(se7343_fpga_irq[i], (void *)i);
	}

	irq_set_chained_handler(IRQ0_IRQ, se7343_irq_demux);
	irq_set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW);
	irq_set_chained_handler(IRQ1_IRQ, se7343_irq_demux);
	irq_set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW);
	irq_set_chained_handler(IRQ4_IRQ, se7343_irq_demux);
	irq_set_irq_type(IRQ4_IRQ, IRQ_TYPE_LEVEL_LOW);
	irq_set_chained_handler(IRQ5_IRQ, se7343_irq_demux);
	irq_set_irq_type(IRQ5_IRQ, IRQ_TYPE_LEVEL_LOW);
}
Esempio n. 4
0
File: gpio.c Progetto: 08opt/linux
int __init x3proto_gpio_setup(void)
{
	int ilsel;
	int ret, i;

	ilsel = ilsel_enable(ILSEL_KEY);
	if (unlikely(ilsel < 0))
		return ilsel;

	ret = gpiochip_add(&x3proto_gpio_chip);
	if (unlikely(ret))
		goto err_gpio;

	for (i = 0; i < NR_BASEBOARD_GPIOS; i++) {
		unsigned long flags;
		int irq = create_irq();

		if (unlikely(irq < 0)) {
			ret = -EINVAL;
			goto err_irq;
		}

		spin_lock_irqsave(&x3proto_gpio_lock, flags);
		x3proto_gpio_irq_map[i] = irq;
		irq_set_chip_and_handler_name(irq, &dummy_irq_chip,
					      handle_simple_irq, "gpio");
		spin_unlock_irqrestore(&x3proto_gpio_lock, flags);
	}

	pr_info("registering '%s' support, handling GPIOs %u -> %u, "
		"bound to IRQ %u\n",
		x3proto_gpio_chip.label, x3proto_gpio_chip.base,
		x3proto_gpio_chip.base + x3proto_gpio_chip.ngpio,
		ilsel);

	irq_set_chained_handler(ilsel, x3proto_gpio_irq_handler);
	irq_set_irq_wake(ilsel, 1);

	return 0;

err_irq:
	for (; i >= 0; --i)
		if (x3proto_gpio_irq_map[i])
			destroy_irq(x3proto_gpio_irq_map[i]);

	ret = gpiochip_remove(&x3proto_gpio_chip);
	if (unlikely(ret))
		pr_err("Failed deregistering GPIO\n");

err_gpio:
	synchronize_irq(ilsel);

	ilsel_disable(ILSEL_KEY);

	return ret;
}
Esempio n. 5
0
int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
		 unsigned long mmr_offset)
{
	int irq;
	int ret;

	irq = create_irq();
	if (irq <= 0)
		return -EBUSY;

	ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset);
	if (ret != irq)
		destroy_irq(irq);

	return ret;
}
static int hvc_tile_probe(struct platform_device *pdev)
{
	struct hvc_struct *hp;
	int tile_hvc_irq;

	/* Create our IRQ and register it. */
	tile_hvc_irq = create_irq();
	if (tile_hvc_irq < 0)
		return -ENXIO;

	tile_irq_activate(tile_hvc_irq, TILE_IRQ_PERCPU);
	hp = hvc_alloc(0, tile_hvc_irq, &hvc_tile_get_put_ops, 128);
	if (IS_ERR(hp)) {
		destroy_irq(tile_hvc_irq);
		return PTR_ERR(hp);
	}
	dev_set_drvdata(&pdev->dev, hp);

	return 0;
}
Esempio n. 7
0
int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
{
	int id, irq = create_irq();
	struct msi_msg msg;

	if (irq < 0)
		return irq;

	set_irq_msi(irq, desc);

	msg.address_hi = 0x0;
	msg.address_lo = IOP13XX_MU_MIMR_PCI;

	id = iop13xx_cpu_id();
	msg.data = (id << IOP13XX_MU_MIMR_CORE_SELECT) | (irq & 0x7f);

	write_msi_msg(irq, &msg);
	set_irq_chip_and_handler(irq, &iop13xx_msi_chip, handle_simple_irq);

	return 0;
}
Esempio n. 8
0
int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
{
    int irq = create_irq();
    int msi_irq;
    struct msi_msg msg;

    if (irq < 0)
        return irq;

    msi_irq = irq - IRQ_AURORA_MSI_START;
    irq_set_msi_desc(irq, desc);

    msg.address_hi = 0x0;
    msg.address_lo = AXP_SW_TRIG_IRQ_PHYS;
    msg.data = (0x1 << AXP_SW_TRIG_IRQ_CPU_TARGET_OFFS) |
               ((msi_irq + NR_PRIVATE_MSI_GROUP) & AXP_SW_TRIG_IRQ_INITID_MASK);

    write_msi_msg(irq, &msg);
    irq_set_chip_and_handler(irq, &armada_msi_irq_chip, handle_edge_irq);

    return 0;
}
Esempio n. 9
0
File: msi.c Progetto: Moretti0/gg
int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
{
	int irq = create_irq();
	struct msi_msg msg;

	if (irq < 0)
		return irq;

	irq_set_msi_desc(irq, desc);

	msg.address_hi = 0x0;
	msg.address_lo = MSI_MATCH_ADDR;
	/* 16bits msg.data: set cpu type to the upper 8bits*/
	msg.data = (mxc_cpu_type << 8) | ((irq - IRQ_IMX_MSI_0) & 0xFF);

	write_msi_msg(irq, &msg);
	irq_set_chip_and_handler(irq, &imx_msi_chip, handle_simple_irq);
	set_irq_flags(irq, IRQF_VALID);
	pr_info("IMX-PCIe: MSI 0x%04x @%#x:%#x, irq = %d\n",
			msg.data, msg.address_hi,
			msg.address_lo, irq);
	return 0;
}
static int ohci_hcd_tilegx_drv_probe(struct platform_device *pdev)
{
	struct usb_hcd *hcd;
	struct tilegx_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
	pte_t pte = { 0 };
	int my_cpu = smp_processor_id();
	int ret;

	if (usb_disabled())
		return -ENODEV;

	/*
	 * Try to initialize our GXIO context; if we can't, the device
	 * doesn't exist.
	 */
	if (gxio_usb_host_init(&pdata->usb_ctx, pdata->dev_index, 0) != 0)
		return -ENXIO;

	hcd = usb_create_hcd(&ohci_tilegx_hc_driver, &pdev->dev,
			     dev_name(&pdev->dev));
	if (!hcd) {
		ret = -ENOMEM;
		goto err_hcd;
	}

	/*
	 * We don't use rsrc_start to map in our registers, but seems like
	 * we ought to set it to something, so we use the register VA.
	 */
	hcd->rsrc_start =
		(ulong) gxio_usb_host_get_reg_start(&pdata->usb_ctx);
	hcd->rsrc_len = gxio_usb_host_get_reg_len(&pdata->usb_ctx);
	hcd->regs = gxio_usb_host_get_reg_start(&pdata->usb_ctx);

	tilegx_start_ohc();

	/* Create our IRQs and register them. */
	pdata->irq = create_irq();
	if (pdata->irq < 0) {
		ret = -ENXIO;
		goto err_no_irq;
	}

	tile_irq_activate(pdata->irq, TILE_IRQ_PERCPU);

	/* Configure interrupts. */
	ret = gxio_usb_host_cfg_interrupt(&pdata->usb_ctx,
					  cpu_x(my_cpu), cpu_y(my_cpu),
					  KERNEL_PL, pdata->irq);
	if (ret) {
		ret = -ENXIO;
		goto err_have_irq;
	}

	/* Register all of our memory. */
	pte = pte_set_home(pte, PAGE_HOME_HASH);
	ret = gxio_usb_host_register_client_memory(&pdata->usb_ctx, pte, 0);
	if (ret) {
		ret = -ENXIO;
		goto err_have_irq;
	}

	ohci_hcd_init(hcd_to_ohci(hcd));

	ret = usb_add_hcd(hcd, pdata->irq, IRQF_SHARED);
	if (ret == 0) {
		platform_set_drvdata(pdev, hcd);
		return ret;
	}

err_have_irq:
	destroy_irq(pdata->irq);
err_no_irq:
	tilegx_stop_ohc();
	usb_put_hcd(hcd);
err_hcd:
	gxio_usb_host_destroy(&pdata->usb_ctx);
	return ret;
}
Esempio n. 11
0
int physdev_map_pirq(domid_t domid, int type, int *index, int *pirq_p,
                     struct msi_info *msi)
{
    struct domain *d = current->domain;
    int pirq, irq, ret = 0;
    void *map_data = NULL;

    if ( domid == DOMID_SELF && is_hvm_domain(d) )
    {
        /*
         * Only makes sense for vector-based callback, else HVM-IRQ logic
         * calls back into itself and deadlocks on hvm_domain.irq_lock.
         */
        if ( !is_hvm_pv_evtchn_domain(d) )
            return -EINVAL;

        return physdev_hvm_map_pirq(d, type, index, pirq_p);
    }

    d = rcu_lock_domain_by_any_id(domid);
    if ( d == NULL )
        return -ESRCH;

    ret = xsm_map_domain_pirq(XSM_TARGET, d);
    if ( ret )
        goto free_domain;

    /* Verify or get irq. */
    switch ( type )
    {
    case MAP_PIRQ_TYPE_GSI:
        if ( *index < 0 || *index >= nr_irqs_gsi )
        {
            dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n",
                    d->domain_id, *index);
            ret = -EINVAL;
            goto free_domain;
        }

        irq = domain_pirq_to_irq(current->domain, *index);
        if ( irq <= 0 )
        {
            if ( is_hardware_domain(current->domain) )
                irq = *index;
            else {
                dprintk(XENLOG_G_ERR, "dom%d: map pirq with incorrect irq!\n",
                        d->domain_id);
                ret = -EINVAL;
                goto free_domain;
            }
        }
        break;

    case MAP_PIRQ_TYPE_MSI:
        if ( !msi->table_base )
            msi->entry_nr = 1;
        irq = *index;
        if ( irq == -1 )
    case MAP_PIRQ_TYPE_MULTI_MSI:
            irq = create_irq(NUMA_NO_NODE);

        if ( irq < nr_irqs_gsi || irq >= nr_irqs )
        {
            dprintk(XENLOG_G_ERR, "dom%d: can't create irq for msi!\n",
                    d->domain_id);
            ret = -EINVAL;
            goto free_domain;
        }

        msi->irq = irq;
        map_data = msi;
        break;

    default:
        dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n",
                d->domain_id, type);
        ret = -EINVAL;
        goto free_domain;
    }

    spin_lock(&pcidevs_lock);
    /* Verify or get pirq. */
    spin_lock(&d->event_lock);
    pirq = domain_irq_to_pirq(d, irq);
    if ( *pirq_p < 0 )
    {
        if ( pirq )
        {
            dprintk(XENLOG_G_ERR, "dom%d: %d:%d already mapped to %d\n",
                    d->domain_id, *index, *pirq_p, pirq);
            if ( pirq < 0 )
            {
                ret = -EBUSY;
                goto done;
            }
        }
        else if ( type == MAP_PIRQ_TYPE_MULTI_MSI )
        {
            if ( msi->entry_nr <= 0 || msi->entry_nr > 32 )
                ret = -EDOM;
            else if ( msi->entry_nr != 1 && !iommu_intremap )
                ret = -EOPNOTSUPP;
            else
            {
                while ( msi->entry_nr & (msi->entry_nr - 1) )
                    msi->entry_nr += msi->entry_nr & -msi->entry_nr;
                pirq = get_free_pirqs(d, msi->entry_nr);
                if ( pirq < 0 )
                {
                    while ( (msi->entry_nr >>= 1) > 1 )
                        if ( get_free_pirqs(d, msi->entry_nr) > 0 )
                            break;
                    dprintk(XENLOG_G_ERR, "dom%d: no block of %d free pirqs\n",
                            d->domain_id, msi->entry_nr << 1);
                    ret = pirq;
                }
            }
            if ( ret < 0 )
                goto done;
        }
        else
        {
            pirq = get_free_pirq(d, type);
            if ( pirq < 0 )
            {
                dprintk(XENLOG_G_ERR, "dom%d: no free pirq\n", d->domain_id);
                ret = pirq;
                goto done;
            }
        }
    }
int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
{
	struct msi_msg msg;
	int widget;
	int status;
	nasid_t nasid;
	u64 bus_addr;
	struct sn_irq_info *sn_irq_info;
	struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev);
	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
	int irq;

	if (!entry->msi_attrib.is_64)
		return -EINVAL;

	if (bussoft == NULL)
		return -EINVAL;

	if (provider == NULL || provider->dma_map_consistent == NULL)
		return -EINVAL;

	irq = create_irq();
	if (irq < 0)
		return irq;

	/*
	 * Set up the vector plumbing.  Let the prom (via sn_intr_alloc)
	 * decide which cpu to direct this msi at by default.
	 */

	nasid = NASID_GET(bussoft->bs_base);
	widget = (nasid & 1) ?
			TIO_SWIN_WIDGETNUM(bussoft->bs_base) :
			SWIN_WIDGETNUM(bussoft->bs_base);

	sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
	if (! sn_irq_info) {
		destroy_irq(irq);
		return -ENOMEM;
	}

	status = sn_intr_alloc(nasid, widget, sn_irq_info, irq, -1, -1);
	if (status) {
		kfree(sn_irq_info);
		destroy_irq(irq);
		return -ENOMEM;
	}

	sn_irq_info->irq_int_bit = -1;		/* mark this as an MSI irq */
	sn_irq_fixup(pdev, sn_irq_info);

	/* Prom probably should fill these in, but doesn't ... */
	sn_irq_info->irq_bridge_type = bussoft->bs_asic_type;
	sn_irq_info->irq_bridge = (void *)bussoft->bs_base;

	/*
	 * Map the xio address into bus space
	 */
	bus_addr = (*provider->dma_map_consistent)(pdev,
					sn_irq_info->irq_xtalkaddr,
					sizeof(sn_irq_info->irq_xtalkaddr),
					SN_DMA_MSI|SN_DMA_ADDR_XIO);
	if (! bus_addr) {
		sn_intr_free(nasid, widget, sn_irq_info);
		kfree(sn_irq_info);
		destroy_irq(irq);
		return -ENOMEM;
	}

	sn_msi_info[irq].sn_irq_info = sn_irq_info;
	sn_msi_info[irq].pci_addr = bus_addr;

	msg.address_hi = (u32)(bus_addr >> 32);
	msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);

	/*
	 * In the SN platform, bit 16 is a "send vector" bit which
	 * must be present in order to move the vector through the system.
	 */
	msg.data = 0x100 + irq;

	irq_set_msi_desc(irq, entry);
	pci_write_msi_msg(irq, &msg);
	irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);

	return 0;
}
/*
 * This routine finds the first virtqueue described in the configuration of
 * this device and sets it up.
 */
static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
				     unsigned index,
				     void (*callback)(struct virtqueue *vq),
				     const char *name)
{
	struct kvm_device *kdev = to_kvmdev(vdev);
	struct kvm_vqinfo *vqi;
	struct kvm_vqconfig *config;
	struct virtqueue *vq;
	long irq;
	int err = -EINVAL;

	if (index >= kdev->desc->num_vq)
		return ERR_PTR(-ENOENT);

	vqi = kzalloc(sizeof(*vqi), GFP_KERNEL);
	if (!vqi)
		return ERR_PTR(-ENOMEM);

	config = kvm_vq_config(kdev->desc)+index;

	vqi->config = config;
	vqi->pages = generic_remap_prot(config->pa,
				vring_size(config->num,
					KVM_TILE_VIRTIO_RING_ALIGN),
					0, io_prot());
	if (!vqi->pages) {
		err = -ENOMEM;
		goto out;
	}

	vq = vring_new_virtqueue(config->num, KVM_TILE_VIRTIO_RING_ALIGN,
				 vdev, 0, vqi->pages,
				 kvm_notify, callback, name);
	if (!vq) {
		err = -ENOMEM;
		goto unmap;
	}

	/*
	 * Trigger the IPI interrupt in SW way.
	 * TODO: We do not need to create one irq for each vq. A bit wasteful.
	 */
	irq = create_irq();
	if (irq < 0) {
		err = -ENXIO;
		goto del_virtqueue;
	}

	tile_irq_activate(irq, TILE_IRQ_SW_CLEAR);

	if (request_irq(irq, vring_interrupt, 0, dev_name(&vdev->dev), vq)) {
		err = -ENXIO;
		destroy_irq(irq);
		goto del_virtqueue;
	}

	config->irq = irq;

	vq->priv = vqi;
	return vq;

del_virtqueue:
	vring_del_virtqueue(vq);
unmap:
	vunmap(vqi->pages);
out:
	return ERR_PTR(err);
}
Esempio n. 14
0
int physdev_map_pirq(domid_t domid, int type, int *index, int *pirq_p,
                     struct msi_info *msi)
{
    struct domain *d = current->domain;
    int pirq, irq, ret = 0;
    void *map_data = NULL;

    if ( domid == DOMID_SELF && is_hvm_domain(d) )
    {
        /*
         * Only makes sense for vector-based callback, else HVM-IRQ logic
         * calls back into itself and deadlocks on hvm_domain.irq_lock.
         */
        if ( !is_hvm_pv_evtchn_domain(d) )
            return -EINVAL;

        return physdev_hvm_map_pirq(d, type, index, pirq_p);
    }

    d = rcu_lock_domain_by_any_id(domid);
    if ( d == NULL )
        return -ESRCH;

    ret = xsm_map_domain_pirq(XSM_TARGET, d);
    if ( ret )
        goto free_domain;

    /* Verify or get irq. */
    switch ( type )
    {
    case MAP_PIRQ_TYPE_GSI:
        if ( *index < 0 || *index >= nr_irqs_gsi )
        {
            dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n",
                    d->domain_id, *index);
            ret = -EINVAL;
            goto free_domain;
        }

        irq = domain_pirq_to_irq(current->domain, *index);
        if ( irq <= 0 )
        {
            if ( is_hardware_domain(current->domain) )
                irq = *index;
            else {
                dprintk(XENLOG_G_ERR, "dom%d: map pirq with incorrect irq!\n",
                        d->domain_id);
                ret = -EINVAL;
                goto free_domain;
            }
        }
        break;

    case MAP_PIRQ_TYPE_MSI:
        irq = *index;
        if ( irq == -1 )
            irq = create_irq(NUMA_NO_NODE);

        if ( irq < nr_irqs_gsi || irq >= nr_irqs )
        {
            dprintk(XENLOG_G_ERR, "dom%d: can't create irq for msi!\n",
                    d->domain_id);
            ret = -EINVAL;
            goto free_domain;
        }

        msi->irq = irq;
        map_data = msi;
        break;

    default:
        dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n",
                d->domain_id, type);
        ret = -EINVAL;
        goto free_domain;
    }

    spin_lock(&pcidevs_lock);
    /* Verify or get pirq. */
    spin_lock(&d->event_lock);
    pirq = domain_irq_to_pirq(d, irq);
    if ( *pirq_p < 0 )
    {
        if ( pirq )
        {
            dprintk(XENLOG_G_ERR, "dom%d: %d:%d already mapped to %d\n",
                    d->domain_id, *index, *pirq_p, pirq);
            if ( pirq < 0 )
            {
                ret = -EBUSY;
                goto done;
            }
        }
        else
        {
            pirq = get_free_pirq(d, type);
            if ( pirq < 0 )
            {
                dprintk(XENLOG_G_ERR, "dom%d: no free pirq\n", d->domain_id);
                ret = pirq;
                goto done;
            }
        }
    }
    else
    {
        if ( pirq && pirq != *pirq_p )
        {
            dprintk(XENLOG_G_ERR, "dom%d: pirq %d conflicts with irq %d\n",
                    d->domain_id, *index, *pirq_p);
            ret = -EEXIST;
            goto done;
        }
        else
            pirq = *pirq_p;
    }

    ret = map_domain_pirq(d, pirq, irq, type, map_data);
    if ( ret == 0 )
        *pirq_p = pirq;

 done:
    spin_unlock(&d->event_lock);
    spin_unlock(&pcidevs_lock);
    if ( (ret != 0) && (type == MAP_PIRQ_TYPE_MSI) && (*index == -1) )
        destroy_irq(irq);
 free_domain:
    rcu_unlock_domain(d);
    return ret;
}
Esempio n. 15
0
static int physdev_map_pirq(struct physdev_map_pirq *map)
{
    struct domain *d;
    int pirq, irq, ret = 0;
    struct msi_info _msi;
    void *map_data = NULL;

    ret = rcu_lock_target_domain_by_id(map->domid, &d);
    if ( ret )
        return ret;

    if ( map->domid == DOMID_SELF && is_hvm_domain(d) )
    {
        ret = physdev_hvm_map_pirq(d, map);
        goto free_domain;
    }

    if ( !IS_PRIV_FOR(current->domain, d) )
    {
        ret = -EPERM;
        goto free_domain;
    }

    /* Verify or get irq. */
    switch ( map->type )
    {
    case MAP_PIRQ_TYPE_GSI:
        if ( map->index < 0 || map->index >= nr_irqs_gsi )
        {
            dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n",
                    d->domain_id, map->index);
            ret = -EINVAL;
            goto free_domain;
        }

        irq = domain_pirq_to_irq(current->domain, map->index);
        if ( irq <= 0 )
        {
            if ( IS_PRIV(current->domain) )
                irq = map->index;
            else {
                dprintk(XENLOG_G_ERR, "dom%d: map pirq with incorrect irq!\n",
                        d->domain_id);
                ret = -EINVAL;
                goto free_domain;
            }
        }
        break;

    case MAP_PIRQ_TYPE_MSI:
        irq = map->index;
        if ( irq == -1 )
            irq = create_irq();

        if ( irq < 0 || irq >= nr_irqs )
        {
            dprintk(XENLOG_G_ERR, "dom%d: can't create irq for msi!\n",
                    d->domain_id);
            ret = -EINVAL;
            goto free_domain;
        }

        _msi.bus = map->bus;
        _msi.devfn = map->devfn;
        _msi.entry_nr = map->entry_nr;
        _msi.table_base = map->table_base;
        _msi.irq = irq;
        map_data = &_msi;
        break;

    default:
        dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n",
                d->domain_id, map->type);
        ret = -EINVAL;
        goto free_domain;
    }

    spin_lock(&pcidevs_lock);
    /* Verify or get pirq. */
    spin_lock(&d->event_lock);
    pirq = domain_irq_to_pirq(d, irq);
    if ( map->pirq < 0 )
    {
        if ( pirq )
        {
            dprintk(XENLOG_G_ERR, "dom%d: %d:%d already mapped to %d\n",
                    d->domain_id, map->index, map->pirq,
                    pirq);
            if ( pirq < 0 )
            {
                ret = -EBUSY;
                goto done;
            }
        }
        else
        {
            pirq = get_free_pirq(d, map->type, map->index);
            if ( pirq < 0 )
            {
                dprintk(XENLOG_G_ERR, "dom%d: no free pirq\n", d->domain_id);
                ret = pirq;
                goto done;
            }
        }
    }
    else
    {
        if ( pirq && pirq != map->pirq )
        {
            dprintk(XENLOG_G_ERR, "dom%d: pirq %d conflicts with irq %d\n",
                    d->domain_id, map->index, map->pirq);
            ret = -EEXIST;
            goto done;
        }
        else
            pirq = map->pirq;
    }

    ret = map_domain_pirq(d, pirq, irq, map->type, map_data);
    if ( ret == 0 )
        map->pirq = pirq;

 done:
    spin_unlock(&d->event_lock);
    spin_unlock(&pcidevs_lock);
    if ( (ret != 0) && (map->type == MAP_PIRQ_TYPE_MSI) && (map->index == -1) )
        destroy_irq(irq);
 free_domain:
    rcu_unlock_domain(d);
    return ret;
}
Esempio n. 16
0
int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
{
	struct msi_msg msg;
	int widget;
	int status;
	nasid_t nasid;
	u64 bus_addr;
	struct sn_irq_info *sn_irq_info;
	struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev);
	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
	int irq;

	if (!entry->msi_attrib.is_64)
		return -EINVAL;

	if (bussoft == NULL)
		return -EINVAL;

	if (provider == NULL || provider->dma_map_consistent == NULL)
		return -EINVAL;

	irq = create_irq();
	if (irq < 0)
		return irq;


	nasid = NASID_GET(bussoft->bs_base);
	widget = (nasid & 1) ?
			TIO_SWIN_WIDGETNUM(bussoft->bs_base) :
			SWIN_WIDGETNUM(bussoft->bs_base);

	sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
	if (! sn_irq_info) {
		destroy_irq(irq);
		return -ENOMEM;
	}

	status = sn_intr_alloc(nasid, widget, sn_irq_info, irq, -1, -1);
	if (status) {
		kfree(sn_irq_info);
		destroy_irq(irq);
		return -ENOMEM;
	}

	sn_irq_info->irq_int_bit = -1;		
	sn_irq_fixup(pdev, sn_irq_info);

	
	sn_irq_info->irq_bridge_type = bussoft->bs_asic_type;
	sn_irq_info->irq_bridge = (void *)bussoft->bs_base;

	bus_addr = (*provider->dma_map_consistent)(pdev,
					sn_irq_info->irq_xtalkaddr,
					sizeof(sn_irq_info->irq_xtalkaddr),
					SN_DMA_MSI|SN_DMA_ADDR_XIO);
	if (! bus_addr) {
		sn_intr_free(nasid, widget, sn_irq_info);
		kfree(sn_irq_info);
		destroy_irq(irq);
		return -ENOMEM;
	}

	sn_msi_info[irq].sn_irq_info = sn_irq_info;
	sn_msi_info[irq].pci_addr = bus_addr;

	msg.address_hi = (u32)(bus_addr >> 32);
	msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);

	msg.data = 0x100 + irq;

	irq_set_msi_desc(irq, entry);
	write_msi_msg(irq, &msg);
	irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);

	return 0;
}