Beispiel #1
0
/**
 * sn_dma_map_single_attrs - map a single page for DMA
 * @dev: device to map for
 * @cpu_addr: kernel virtual address of the region to map
 * @size: size of the region
 * @direction: DMA direction
 * @attrs: optional dma attributes
 *
 * Map the region pointed to by @cpu_addr for DMA and return the
 * DMA address.
 *
 * We map this to the one step pcibr_dmamap_trans interface rather than
 * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have
 * no way of saving the dmamap handle from the alloc to later free
 * (which is pretty much unacceptable).
 *
 * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
 * dma_map_consistent() so that writes force a flush of pending DMA.
 * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
 * Document Number: 007-4763-001)
 *
 * TODO: simplify our interface;
 *       figure out how to save dmamap handle so can use two step.
 */
dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr,
                   size_t size, int direction,
                   struct dma_attrs *attrs)
{
    dma_addr_t dma_addr;
    unsigned long phys_addr;
    struct pci_dev *pdev = to_pci_dev(dev);
    struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
    int dmabarr;

    dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);

    BUG_ON(dev->bus != &pci_bus_type);

    phys_addr = __pa(cpu_addr);
    if (dmabarr)
        dma_addr = provider->dma_map_consistent(pdev, phys_addr,
                            size, SN_DMA_ADDR_PHYS);
    else
        dma_addr = provider->dma_map(pdev, phys_addr, size,
                         SN_DMA_ADDR_PHYS);

    if (!dma_addr) {
        printk(KERN_ERR "%s: out of ATEs\n", __func__);
        return 0;
    }
    return dma_addr;
}
Beispiel #2
0
/**
 * sn_dma_map_sg - map a scatterlist for DMA
 * @dev: device to map for
 * @sg: scatterlist to map
 * @nhwentries: number of entries
 * @direction: direction of the DMA transaction
 *
 * Maps each entry of @sg for DMA.
 */
int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
		  int direction)
{
	unsigned long phys_addr;
	struct scatterlist *saved_sg = sg;
	struct pci_dev *pdev = to_pci_dev(dev);
	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
	int i;

	BUG_ON(dev->bus != &pci_bus_type);

	/*
	 * Setup a DMA address for each entry in the scatterlist.
	 */
	for (i = 0; i < nhwentries; i++, sg++) {
		phys_addr = SG_ENT_PHYS_ADDRESS(sg);
		sg->dma_address = provider->dma_map(pdev,
						    phys_addr, sg->length);

		if (!sg->dma_address) {
			printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);

			/*
			 * Free any successfully allocated entries.
			 */
			if (i > 0)
				sn_dma_unmap_sg(dev, saved_sg, i, direction);
			return 0;
		}

		sg->dma_length = sg->length;
	}

	return nhwentries;
}
void sn_teardown_msi_irq(unsigned int irq)
{
	nasid_t nasid;
	int widget;
	struct pci_dev *pdev;
	struct pcidev_info *sn_pdev;
	struct sn_irq_info *sn_irq_info;
	struct pcibus_bussoft *bussoft;
	struct sn_pcibus_provider *provider;

	sn_irq_info = sn_msi_info[irq].sn_irq_info;
	if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
		return;

	sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
	pdev = sn_pdev->pdi_linux_pcidev;
	provider = SN_PCIDEV_BUSPROVIDER(pdev);

	(*provider->dma_unmap)(pdev,
			       sn_msi_info[irq].pci_addr,
			       PCI_DMA_FROMDEVICE);
	sn_msi_info[irq].pci_addr = 0;

	bussoft = SN_PCIDEV_BUSSOFT(pdev);
	nasid = NASID_GET(bussoft->bs_base);
	widget = (nasid & 1) ?
			TIO_SWIN_WIDGETNUM(bussoft->bs_base) :
			SWIN_WIDGETNUM(bussoft->bs_base);

	sn_intr_free(nasid, widget, sn_irq_info);
	sn_msi_info[irq].sn_irq_info = NULL;

	destroy_irq(irq);
}
static int sn_set_msi_irq_affinity(struct irq_data *data,
				   const struct cpumask *cpu_mask, bool force)
{
	struct msi_msg msg;
	int slice;
	nasid_t nasid;
	u64 bus_addr;
	struct pci_dev *pdev;
	struct pcidev_info *sn_pdev;
	struct sn_irq_info *sn_irq_info;
	struct sn_irq_info *new_irq_info;
	struct sn_pcibus_provider *provider;
	unsigned int cpu, irq = data->irq;

	cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
	sn_irq_info = sn_msi_info[irq].sn_irq_info;
	if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
		return -1;

	/*
	 * Release XIO resources for the old MSI PCI address
	 */

	__get_cached_msi_msg(data->msi_desc, &msg);
	sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
	pdev = sn_pdev->pdi_linux_pcidev;
	provider = SN_PCIDEV_BUSPROVIDER(pdev);

	bus_addr = (u64)(msg.address_hi) << 32 | (u64)(msg.address_lo);
	(*provider->dma_unmap)(pdev, bus_addr, PCI_DMA_FROMDEVICE);
	sn_msi_info[irq].pci_addr = 0;

	nasid = cpuid_to_nasid(cpu);
	slice = cpuid_to_slice(cpu);

	new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice);
	sn_msi_info[irq].sn_irq_info = new_irq_info;
	if (new_irq_info == NULL)
		return -1;

	/*
	 * Map the xio address into bus space
	 */

	bus_addr = (*provider->dma_map_consistent)(pdev,
					new_irq_info->irq_xtalkaddr,
					sizeof(new_irq_info->irq_xtalkaddr),
					SN_DMA_MSI|SN_DMA_ADDR_XIO);

	sn_msi_info[irq].pci_addr = bus_addr;
	msg.address_hi = (u32)(bus_addr >> 32);
	msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);

	pci_write_msi_msg(irq, &msg);
	cpumask_copy(data->affinity, cpu_mask);

	return 0;
}
Beispiel #5
0
/**
 * sn_dma_unmap_single - unamp a DMA mapped page
 * @dev: device to sync
 * @dma_addr: DMA address to sync
 * @size: size of region
 * @direction: DMA direction
 *
 * This routine is supposed to sync the DMA region specified
 * by @dma_handle into the coherence domain.  On SN, we're always cache
 * coherent, so we just need to free any ATEs associated with this mapping.
 */
void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
			 int direction)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);

	BUG_ON(dev->bus != &pci_bus_type);

	provider->dma_unmap(pdev, dma_addr, direction);
}
Beispiel #6
0
/**
 * sn_pci_free_coherent - free memory associated with coherent DMAable region
 * @dev: device to free for
 * @size: size to free
 * @cpu_addr: kernel virtual address to free
 * @dma_handle: DMA address associated with this region
 *
 * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
 * any associated IOMMU mappings.
 */
void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
			  dma_addr_t dma_handle)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);

	BUG_ON(dev->bus != &pci_bus_type);

	provider->dma_unmap(pdev, dma_handle, 0);
	free_pages((unsigned long)cpu_addr, get_order(size));
}
Beispiel #7
0
/**
 * sn_dma_unmap_single_attrs - unamp a DMA mapped page
 * @dev: device to sync
 * @dma_addr: DMA address to sync
 * @size: size of region
 * @direction: DMA direction
 * @attrs: optional dma attributes
 *
 * This routine is supposed to sync the DMA region specified
 * by @dma_handle into the coherence domain.  On SN, we're always cache
 * coherent, so we just need to free any ATEs associated with this mapping.
 */
static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
			      size_t size, enum dma_data_direction dir,
			      struct dma_attrs *attrs)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);

	BUG_ON(dev->bus != &pci_bus_type);

	provider->dma_unmap(pdev, dma_addr, dir);
}
Beispiel #8
0
static void
sn_msi_target(unsigned int vector, unsigned int cpu,
	      u32 *addr_hi, u32 *addr_lo)
{
	int slice;
	nasid_t nasid;
	u64 bus_addr;
	struct pci_dev *pdev;
	struct pcidev_info *sn_pdev;
	struct sn_irq_info *sn_irq_info;
	struct sn_irq_info *new_irq_info;
	struct sn_pcibus_provider *provider;

	sn_irq_info = sn_msi_info[vector].sn_irq_info;
	if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
		return;

	/*
	 * Release XIO resources for the old MSI PCI address
	 */

        sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
	pdev = sn_pdev->pdi_linux_pcidev;
	provider = SN_PCIDEV_BUSPROVIDER(pdev);

	bus_addr = (u64)(*addr_hi) << 32 | (u64)(*addr_lo);
	(*provider->dma_unmap)(pdev, bus_addr, PCI_DMA_FROMDEVICE);
	sn_msi_info[vector].pci_addr = 0;

	nasid = cpuid_to_nasid(cpu);
	slice = cpuid_to_slice(cpu);

	new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice);
	sn_msi_info[vector].sn_irq_info = new_irq_info;
	if (new_irq_info == NULL)
		return;

	/*
	 * Map the xio address into bus space
	 */

	bus_addr = (*provider->dma_map_consistent)(pdev,
					new_irq_info->irq_xtalkaddr,
					sizeof(new_irq_info->irq_xtalkaddr),
					SN_DMA_MSI|SN_DMA_ADDR_XIO);

	sn_msi_info[vector].pci_addr = bus_addr;
	*addr_hi = (u32)(bus_addr >> 32);
	*addr_lo = (u32)(bus_addr & 0x00000000ffffffff);
}
Beispiel #9
0
/**
 * sn_dma_alloc_coherent - allocate memory for coherent DMA
 * @dev: device to allocate for
 * @size: size of the region
 * @dma_handle: DMA (bus) address
 * @flags: memory allocation flags
 *
 * dma_alloc_coherent() returns a pointer to a memory region suitable for
 * coherent DMA traffic to/from a PCI device.  On SN platforms, this means
 * that @dma_handle will have the %PCIIO_DMA_CMD flag set.
 *
 * This interface is usually used for "command" streams (e.g. the command
 * queue for a SCSI controller).  See Documentation/DMA-API.txt for
 * more information.
 */
void *sn_dma_alloc_coherent(struct device *dev, size_t size,
                dma_addr_t * dma_handle, gfp_t flags)
{
    void *cpuaddr;
    unsigned long phys_addr;
    int node;
    struct pci_dev *pdev = to_pci_dev(dev);
    struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);

    BUG_ON(dev->bus != &pci_bus_type);

    /*
     * Allocate the memory.
     */
    node = pcibus_to_node(pdev->bus);
    if (likely(node >=0)) {
        struct page *p = alloc_pages_node(node, flags, get_order(size));

        if (likely(p))
            cpuaddr = page_address(p);
        else
            return NULL;
    } else
        cpuaddr = (void *)__get_free_pages(flags, get_order(size));

    if (unlikely(!cpuaddr))
        return NULL;

    memset(cpuaddr, 0x0, size);

    /* physical addr. of the memory we just got */
    phys_addr = __pa(cpuaddr);

    /*
     * 64 bit address translations should never fail.
     * 32 bit translations can fail if there are insufficient mapping
     * resources.
     */

    *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
                           SN_DMA_ADDR_PHYS);
    if (!*dma_handle) {
        printk(KERN_ERR "%s: out of ATEs\n", __func__);
        free_pages((unsigned long)cpuaddr, get_order(size));
        return NULL;
    }

    return cpuaddr;
}
Beispiel #10
0
/**
 * sn_dma_unmap_sg - unmap a DMA scatterlist
 * @dev: device to unmap
 * @sg: scatterlist to unmap
 * @nhwentries: number of scatterlist entries
 * @direction: DMA direction
 *
 * Unmap a set of streaming mode DMA translations.
 */
void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
		     int nhwentries, int direction)
{
	int i;
	struct pci_dev *pdev = to_pci_dev(dev);
	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);

	BUG_ON(dev->bus != &pci_bus_type);

	for (i = 0; i < nhwentries; i++, sg++) {
		provider->dma_unmap(pdev, sg->dma_address, direction);
		sg->dma_address = (dma_addr_t) NULL;
		sg->dma_length = 0;
	}
}
Beispiel #11
0
/**
 * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist
 * @dev: device to unmap
 * @sg: scatterlist to unmap
 * @nhwentries: number of scatterlist entries
 * @direction: DMA direction
 * @attrs: optional dma attributes
 *
 * Unmap a set of streaming mode DMA translations.
 */
void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
               int nhwentries, int direction,
               struct dma_attrs *attrs)
{
    int i;
    struct pci_dev *pdev = to_pci_dev(dev);
    struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
    struct scatterlist *sg;

    BUG_ON(dev->bus != &pci_bus_type);

    for_each_sg(sgl, sg, nhwentries, i) {
        provider->dma_unmap(pdev, sg->dma_address, direction);
        sg->dma_address = (dma_addr_t) NULL;
        sg->dma_length = 0;
    }
Beispiel #12
0
/**
 * sn_dma_map_single - map a single page for DMA
 * @dev: device to map for
 * @cpu_addr: kernel virtual address of the region to map
 * @size: size of the region
 * @direction: DMA direction
 *
 * Map the region pointed to by @cpu_addr for DMA and return the
 * DMA address.
 *
 * We map this to the one step pcibr_dmamap_trans interface rather than
 * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have
 * no way of saving the dmamap handle from the alloc to later free
 * (which is pretty much unacceptable).
 *
 * TODO: simplify our interface;
 *       figure out how to save dmamap handle so can use two step.
 */
dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
			     int direction)
{
	dma_addr_t dma_addr;
	unsigned long phys_addr;
	struct pci_dev *pdev = to_pci_dev(dev);
	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);

	BUG_ON(dev->bus != &pci_bus_type);

	phys_addr = __pa(cpu_addr);
	dma_addr = provider->dma_map(pdev, phys_addr, size);
	if (!dma_addr) {
		printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
		return 0;
	}
	return dma_addr;
}
int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
{
	struct msi_msg msg;
	int widget;
	int status;
	nasid_t nasid;
	u64 bus_addr;
	struct sn_irq_info *sn_irq_info;
	struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev);
	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
	int irq;

	if (!entry->msi_attrib.is_64)
		return -EINVAL;

	if (bussoft == NULL)
		return -EINVAL;

	if (provider == NULL || provider->dma_map_consistent == NULL)
		return -EINVAL;

	irq = create_irq();
	if (irq < 0)
		return irq;

	/*
	 * Set up the vector plumbing.  Let the prom (via sn_intr_alloc)
	 * decide which cpu to direct this msi at by default.
	 */

	nasid = NASID_GET(bussoft->bs_base);
	widget = (nasid & 1) ?
			TIO_SWIN_WIDGETNUM(bussoft->bs_base) :
			SWIN_WIDGETNUM(bussoft->bs_base);

	sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
	if (! sn_irq_info) {
		destroy_irq(irq);
		return -ENOMEM;
	}

	status = sn_intr_alloc(nasid, widget, sn_irq_info, irq, -1, -1);
	if (status) {
		kfree(sn_irq_info);
		destroy_irq(irq);
		return -ENOMEM;
	}

	sn_irq_info->irq_int_bit = -1;		/* mark this as an MSI irq */
	sn_irq_fixup(pdev, sn_irq_info);

	/* Prom probably should fill these in, but doesn't ... */
	sn_irq_info->irq_bridge_type = bussoft->bs_asic_type;
	sn_irq_info->irq_bridge = (void *)bussoft->bs_base;

	/*
	 * Map the xio address into bus space
	 */
	bus_addr = (*provider->dma_map_consistent)(pdev,
					sn_irq_info->irq_xtalkaddr,
					sizeof(sn_irq_info->irq_xtalkaddr),
					SN_DMA_MSI|SN_DMA_ADDR_XIO);
	if (! bus_addr) {
		sn_intr_free(nasid, widget, sn_irq_info);
		kfree(sn_irq_info);
		destroy_irq(irq);
		return -ENOMEM;
	}

	sn_msi_info[irq].sn_irq_info = sn_irq_info;
	sn_msi_info[irq].pci_addr = bus_addr;

	msg.address_hi = (u32)(bus_addr >> 32);
	msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);

	/*
	 * In the SN platform, bit 16 is a "send vector" bit which
	 * must be present in order to move the vector through the system.
	 */
	msg.data = 0x100 + irq;

	irq_set_msi_desc(irq, entry);
	pci_write_msi_msg(irq, &msg);
	irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);

	return 0;
}
Beispiel #14
0
int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
{
	struct msi_msg msg;
	int widget;
	int status;
	nasid_t nasid;
	u64 bus_addr;
	struct sn_irq_info *sn_irq_info;
	struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev);
	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
	int irq;

	if (!entry->msi_attrib.is_64)
		return -EINVAL;

	if (bussoft == NULL)
		return -EINVAL;

	if (provider == NULL || provider->dma_map_consistent == NULL)
		return -EINVAL;

	irq = create_irq();
	if (irq < 0)
		return irq;


	nasid = NASID_GET(bussoft->bs_base);
	widget = (nasid & 1) ?
			TIO_SWIN_WIDGETNUM(bussoft->bs_base) :
			SWIN_WIDGETNUM(bussoft->bs_base);

	sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
	if (! sn_irq_info) {
		destroy_irq(irq);
		return -ENOMEM;
	}

	status = sn_intr_alloc(nasid, widget, sn_irq_info, irq, -1, -1);
	if (status) {
		kfree(sn_irq_info);
		destroy_irq(irq);
		return -ENOMEM;
	}

	sn_irq_info->irq_int_bit = -1;		
	sn_irq_fixup(pdev, sn_irq_info);

	
	sn_irq_info->irq_bridge_type = bussoft->bs_asic_type;
	sn_irq_info->irq_bridge = (void *)bussoft->bs_base;

	bus_addr = (*provider->dma_map_consistent)(pdev,
					sn_irq_info->irq_xtalkaddr,
					sizeof(sn_irq_info->irq_xtalkaddr),
					SN_DMA_MSI|SN_DMA_ADDR_XIO);
	if (! bus_addr) {
		sn_intr_free(nasid, widget, sn_irq_info);
		kfree(sn_irq_info);
		destroy_irq(irq);
		return -ENOMEM;
	}

	sn_msi_info[irq].sn_irq_info = sn_irq_info;
	sn_msi_info[irq].pci_addr = bus_addr;

	msg.address_hi = (u32)(bus_addr >> 32);
	msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);

	msg.data = 0x100 + irq;

	irq_set_msi_desc(irq, entry);
	write_msi_msg(irq, &msg);
	irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);

	return 0;
}
Beispiel #15
0
int
sn_msi_setup(struct pci_dev *pdev, unsigned int vector,
	     u32 *addr_hi, u32 *addr_lo, u32 *data)
{
	int widget;
	int status;
	nasid_t nasid;
	u64 bus_addr;
	struct sn_irq_info *sn_irq_info;
	struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev);
	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);

	if (bussoft == NULL)
		return -EINVAL;

	if (provider == NULL || provider->dma_map_consistent == NULL)
		return -EINVAL;

	/*
	 * Set up the vector plumbing.  Let the prom (via sn_intr_alloc)
	 * decide which cpu to direct this msi at by default.
	 */

	nasid = NASID_GET(bussoft->bs_base);
	widget = (nasid & 1) ?
			TIO_SWIN_WIDGETNUM(bussoft->bs_base) :
			SWIN_WIDGETNUM(bussoft->bs_base);

	sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
	if (! sn_irq_info)
		return -ENOMEM;

	status = sn_intr_alloc(nasid, widget, sn_irq_info, vector, -1, -1);
	if (status) {
		kfree(sn_irq_info);
		return -ENOMEM;
	}

	sn_irq_info->irq_int_bit = -1;		/* mark this as an MSI irq */
	sn_irq_fixup(pdev, sn_irq_info);

	/* Prom probably should fill these in, but doesn't ... */
	sn_irq_info->irq_bridge_type = bussoft->bs_asic_type;
	sn_irq_info->irq_bridge = (void *)bussoft->bs_base;

	/*
	 * Map the xio address into bus space
	 */
	bus_addr = (*provider->dma_map_consistent)(pdev,
					sn_irq_info->irq_xtalkaddr,
					sizeof(sn_irq_info->irq_xtalkaddr),
					SN_DMA_MSI|SN_DMA_ADDR_XIO);
	if (! bus_addr) {
		sn_intr_free(nasid, widget, sn_irq_info);
		kfree(sn_irq_info);
		return -ENOMEM;
	}

	sn_msi_info[vector].sn_irq_info = sn_irq_info;
	sn_msi_info[vector].pci_addr = bus_addr;

	*addr_hi = (u32)(bus_addr >> 32);
	*addr_lo = (u32)(bus_addr & 0x00000000ffffffff);

	/*
	 * In the SN platform, bit 16 is a "send vector" bit which
	 * must be present in order to move the vector through the system.
	 */
	*data = 0x100 + (unsigned int)vector;

#ifdef CONFIG_SMP
	set_irq_affinity_info((vector & 0xff), sn_irq_info->irq_cpuid, 0);
#endif

	return 0;
}