Beispiel #1
0
void
mlreset(int slave)
{
	if (!slave) {
		/*
		 * We are the master cpu and node.
		 */ 
		master_nasid = get_nasid();
		set_master_bridge_base();

		/* We're the master processor */
		master_procid = smp_processor_id();
		master_nasid = cpuid_to_nasid(master_procid);

		/*
		 * master_nasid we get back better be same as one from
		 * get_nasid()
		 */
		ASSERT_ALWAYS(master_nasid == get_nasid());

		/* early initialization of iograph */
		iograph_early_init();

		/* Initialize Hub Pseudodriver Management */
		hubdev_init();

	} else { /* slave != 0 */
		/*
		 * This code is performed ONLY by slave processors.
		 */

	}
}
static int sn_set_msi_irq_affinity(struct irq_data *data,
				   const struct cpumask *cpu_mask, bool force)
{
	struct msi_msg msg;
	int slice;
	nasid_t nasid;
	u64 bus_addr;
	struct pci_dev *pdev;
	struct pcidev_info *sn_pdev;
	struct sn_irq_info *sn_irq_info;
	struct sn_irq_info *new_irq_info;
	struct sn_pcibus_provider *provider;
	unsigned int cpu, irq = data->irq;

	cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
	sn_irq_info = sn_msi_info[irq].sn_irq_info;
	if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
		return -1;

	/*
	 * Release XIO resources for the old MSI PCI address
	 */

	__get_cached_msi_msg(data->msi_desc, &msg);
	sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
	pdev = sn_pdev->pdi_linux_pcidev;
	provider = SN_PCIDEV_BUSPROVIDER(pdev);

	bus_addr = (u64)(msg.address_hi) << 32 | (u64)(msg.address_lo);
	(*provider->dma_unmap)(pdev, bus_addr, PCI_DMA_FROMDEVICE);
	sn_msi_info[irq].pci_addr = 0;

	nasid = cpuid_to_nasid(cpu);
	slice = cpuid_to_slice(cpu);

	new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice);
	sn_msi_info[irq].sn_irq_info = new_irq_info;
	if (new_irq_info == NULL)
		return -1;

	/*
	 * Map the xio address into bus space
	 */

	bus_addr = (*provider->dma_map_consistent)(pdev,
					new_irq_info->irq_xtalkaddr,
					sizeof(new_irq_info->irq_xtalkaddr),
					SN_DMA_MSI|SN_DMA_ADDR_XIO);

	sn_msi_info[irq].pci_addr = bus_addr;
	msg.address_hi = (u32)(bus_addr >> 32);
	msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);

	pci_write_msi_msg(irq, &msg);
	cpumask_copy(data->affinity, cpu_mask);

	return 0;
}
Beispiel #3
0
int
nasid_slice_to_cpuid(int nasid, int slice)
{
	long cpu;

	for (cpu = 0; cpu < NR_CPUS; cpu++)
		if (cpuid_to_nasid(cpu) == nasid &&
					cpuid_to_slice(cpu) == slice)
			return cpu;

	return -1;
}
Beispiel #4
0
static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
{
	struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
	nasid_t nasid;
	int slice;

	nasid = cpuid_to_nasid(first_cpu(mask));
	slice = cpuid_to_slice(first_cpu(mask));

	list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
				 sn_irq_lh[irq], list)
		(void)sn_retarget_vector(sn_irq_info, nasid, slice);
}
Beispiel #5
0
static void
sn_msi_target(unsigned int vector, unsigned int cpu,
	      u32 *addr_hi, u32 *addr_lo)
{
	int slice;
	nasid_t nasid;
	u64 bus_addr;
	struct pci_dev *pdev;
	struct pcidev_info *sn_pdev;
	struct sn_irq_info *sn_irq_info;
	struct sn_irq_info *new_irq_info;
	struct sn_pcibus_provider *provider;

	sn_irq_info = sn_msi_info[vector].sn_irq_info;
	if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
		return;

	/*
	 * Release XIO resources for the old MSI PCI address
	 */

        sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
	pdev = sn_pdev->pdi_linux_pcidev;
	provider = SN_PCIDEV_BUSPROVIDER(pdev);

	bus_addr = (u64)(*addr_hi) << 32 | (u64)(*addr_lo);
	(*provider->dma_unmap)(pdev, bus_addr, PCI_DMA_FROMDEVICE);
	sn_msi_info[vector].pci_addr = 0;

	nasid = cpuid_to_nasid(cpu);
	slice = cpuid_to_slice(cpu);

	new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice);
	sn_msi_info[vector].sn_irq_info = new_irq_info;
	if (new_irq_info == NULL)
		return;

	/*
	 * Map the xio address into bus space
	 */

	bus_addr = (*provider->dma_map_consistent)(pdev,
					new_irq_info->irq_xtalkaddr,
					sizeof(new_irq_info->irq_xtalkaddr),
					SN_DMA_MSI|SN_DMA_ADDR_XIO);

	sn_msi_info[vector].pci_addr = bus_addr;
	*addr_hi = (u32)(bus_addr >> 32);
	*addr_lo = (u32)(bus_addr & 0x00000000ffffffff);
}
Beispiel #6
0
static int sn_set_affinity_irq(struct irq_data *data,
			       const struct cpumask *mask, bool force)
{
	struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
	unsigned int irq = data->irq;
	nasid_t nasid;
	int slice;

	nasid = cpuid_to_nasid(cpumask_first(mask));
	slice = cpuid_to_slice(cpumask_first(mask));

	list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
				 sn_irq_lh[irq], list)
		(void)sn_retarget_vector(sn_irq_info, nasid, slice);

	return 0;
}
Beispiel #7
0
static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
{
	struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
	int cpuid, cpuphys;

	cpuid = first_cpu(mask);
	cpuphys = cpu_physical_id(cpuid);

	list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
				 sn_irq_lh[irq], list) {
		u64 bridge;
		int local_widget, status;
		nasid_t local_nasid;
		struct sn_irq_info *new_irq_info;
		struct sn_pcibus_provider *pci_provider;

		new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
		if (new_irq_info == NULL)
			break;
		memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));

		bridge = (u64) new_irq_info->irq_bridge;
		if (!bridge) {
			kfree(new_irq_info);
			break; /* irq is not a device interrupt */
		}

		local_nasid = NASID_GET(bridge);

		if (local_nasid & 1)
			local_widget = TIO_SWIN_WIDGETNUM(bridge);
		else
			local_widget = SWIN_WIDGETNUM(bridge);

		/* Free the old PROM new_irq_info structure */
		sn_intr_free(local_nasid, local_widget, new_irq_info);
		/* Update kernels new_irq_info with new target info */
		unregister_intr_pda(new_irq_info);

		/* allocate a new PROM new_irq_info struct */
		status = sn_intr_alloc(local_nasid, local_widget,
				       __pa(new_irq_info), irq,
				       cpuid_to_nasid(cpuid),
				       cpuid_to_slice(cpuid));

		/* SAL call failed */
		if (status) {
			kfree(new_irq_info);
			break;
		}

		new_irq_info->irq_cpuid = cpuid;
		register_intr_pda(new_irq_info);

		pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
		if (pci_provider && pci_provider->target_interrupt)
			(pci_provider->target_interrupt)(new_irq_info);

		spin_lock(&sn_irq_info_lock);
		list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
		spin_unlock(&sn_irq_info_lock);
		call_rcu(&sn_irq_info->rcu, sn_irq_info_free);

#ifdef CONFIG_SMP
		set_irq_affinity_info((irq & 0xff), cpuphys, 0);
#endif
	}
Beispiel #8
0
static int
xp_cpu_to_nasid_sn2(int cpuid)
{
	return cpuid_to_nasid(cpuid);
}
Beispiel #9
0
/*
 * Fill the partition reserved page with the information needed by
 * other partitions to discover we are alive and establish initial
 * communications.
 */
struct xpc_rsvd_page *
xpc_rsvd_page_init(void)
{
	struct xpc_rsvd_page *rp;
	AMO_t *amos_page;
	u64 rp_pa, nasid_array = 0;
	int i, ret;


	/* get the local reserved page's address */

	preempt_disable();
	rp_pa = xpc_get_rsvd_page_pa(cpuid_to_nasid(smp_processor_id()));
	preempt_enable();
	if (rp_pa == 0) {
		dev_err(xpc_part, "SAL failed to locate the reserved page\n");
		return NULL;
	}
	rp = (struct xpc_rsvd_page *) __va(rp_pa);

	if (rp->partid != sn_partition_id) {
		dev_err(xpc_part, "the reserved page's partid of %d should be "
			"%d\n", rp->partid, sn_partition_id);
		return NULL;
	}

	rp->version = XPC_RP_VERSION;

	/* establish the actual sizes of the nasid masks */
	if (rp->SAL_version == 1) {
		/* SAL_version 1 didn't set the nasids_size field */
		rp->nasids_size = 128;
	}
	xp_nasid_mask_bytes = rp->nasids_size;
	xp_nasid_mask_words = xp_nasid_mask_bytes / 8;

	/* setup the pointers to the various items in the reserved page */
	xpc_part_nasids = XPC_RP_PART_NASIDS(rp);
	xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp);
	xpc_vars = XPC_RP_VARS(rp);
	xpc_vars_part = XPC_RP_VARS_PART(rp);

	/*
	 * Before clearing xpc_vars, see if a page of AMOs had been previously
	 * allocated. If not we'll need to allocate one and set permissions
	 * so that cross-partition AMOs are allowed.
	 *
	 * The allocated AMO page needs MCA reporting to remain disabled after
	 * XPC has unloaded.  To make this work, we keep a copy of the pointer
	 * to this page (i.e., amos_page) in the struct xpc_vars structure,
	 * which is pointed to by the reserved page, and re-use that saved copy
	 * on subsequent loads of XPC. This AMO page is never freed, and its
	 * memory protections are never restricted.
	 */
	if ((amos_page = xpc_vars->amos_page) == NULL) {
		amos_page = (AMO_t *) TO_AMO(uncached_alloc_page(0));
		if (amos_page == NULL) {
			dev_err(xpc_part, "can't allocate page of AMOs\n");
			return NULL;
		}

		/*
		 * Open up AMO-R/W to cpu.  This is done for Shub 1.1 systems
		 * when xpc_allow_IPI_ops() is called via xpc_hb_init().
		 */
		if (!enable_shub_wars_1_1()) {
			ret = sn_change_memprotect(ia64_tpa((u64) amos_page),
					PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1,
					&nasid_array);
			if (ret != 0) {
				dev_err(xpc_part, "can't change memory "
					"protections\n");
				uncached_free_page(__IA64_UNCACHED_OFFSET |
						   TO_PHYS((u64) amos_page));
				return NULL;
			}
		}
	} else if (!IS_AMO_ADDRESS((u64) amos_page)) {
		/*
		 * EFI's XPBOOT can also set amos_page in the reserved page,
		 * but it happens to leave it as an uncached physical address
		 * and we need it to be an uncached virtual, so we'll have to
		 * convert it.
		 */
		if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) {
			dev_err(xpc_part, "previously used amos_page address "
				"is bad = 0x%p\n", (void *) amos_page);
			return NULL;
		}
		amos_page = (AMO_t *) TO_AMO((u64) amos_page);
	}

	/* clear xpc_vars */
	memset(xpc_vars, 0, sizeof(struct xpc_vars));

	xpc_vars->version = XPC_V_VERSION;
	xpc_vars->act_nasid = cpuid_to_nasid(0);
	xpc_vars->act_phys_cpuid = cpu_physical_id(0);
	xpc_vars->vars_part_pa = __pa(xpc_vars_part);
	xpc_vars->amos_page_pa = ia64_tpa((u64) amos_page);
	xpc_vars->amos_page = amos_page;  /* save for next load of XPC */


	/* clear xpc_vars_part */
	memset((u64 *) xpc_vars_part, 0, sizeof(struct xpc_vars_part) *
							XP_MAX_PARTITIONS);

	/* initialize the activate IRQ related AMO variables */
	for (i = 0; i < xp_nasid_mask_words; i++) {
		(void) xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i);
	}

	/* initialize the engaged remote partitions related AMO variables */
	(void) xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO);
	(void) xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO);

	/* timestamp of when reserved page was setup by XPC */
	rp->stamp = CURRENT_TIME;

	/*
	 * This signifies to the remote partition that our reserved
	 * page is initialized.
	 */
	rp->vars_pa = __pa(xpc_vars);

	return rp;
}
Beispiel #10
0
/*
 * Fill the partition reserved page with the information needed by
 * other partitions to discover we are alive and establish initial
 * communications.
 */
struct xpc_rsvd_page *
xpc_rsvd_page_init(void)
{
	struct xpc_rsvd_page *rp;
	AMO_t *amos_page;
	u64 rp_pa, next_cl, nasid_array = 0;
	int i, ret;


	/* get the local reserved page's address */

	rp_pa = xpc_get_rsvd_page_pa(cnodeid_to_nasid(0),
					(u64) xpc_remote_copy_buffer,
						XPC_RSVD_PAGE_ALIGNED_SIZE);
	if (rp_pa == 0) {
		dev_err(xpc_part, "SAL failed to locate the reserved page\n");
		return NULL;
	}
	rp = (struct xpc_rsvd_page *) __va(rp_pa);

	if (rp->partid != sn_partition_id) {
		dev_err(xpc_part, "the reserved page's partid of %d should be "
			"%d\n", rp->partid, sn_partition_id);
		return NULL;
	}

	rp->version = XPC_RP_VERSION;

	/*
	 * Place the XPC variables on the cache line following the
	 * reserved page structure.
	 */
	next_cl = (u64) rp + XPC_RSVD_PAGE_ALIGNED_SIZE;
	xpc_vars = (struct xpc_vars *) next_cl;

	/*
	 * Before clearing xpc_vars, see if a page of AMOs had been previously
	 * allocated. If not we'll need to allocate one and set permissions
	 * so that cross-partition AMOs are allowed.
	 *
	 * The allocated AMO page needs MCA reporting to remain disabled after
	 * XPC has unloaded.  To make this work, we keep a copy of the pointer
	 * to this page (i.e., amos_page) in the struct xpc_vars structure,
	 * which is pointed to by the reserved page, and re-use that saved copy
	 * on subsequent loads of XPC. This AMO page is never freed, and its
	 * memory protections are never restricted.
	 */
	if ((amos_page = xpc_vars->amos_page) == NULL) {
		amos_page = (AMO_t *) mspec_kalloc_page(0);
		if (amos_page == NULL) {
			dev_err(xpc_part, "can't allocate page of AMOs\n");
			return NULL;
		}

		/*
		 * Open up AMO-R/W to cpu.  This is done for Shub 1.1 systems
		 * when xpc_allow_IPI_ops() is called via xpc_hb_init().
		 */
		if (!enable_shub_wars_1_1()) {
			ret = sn_change_memprotect(ia64_tpa((u64) amos_page),
					PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1,
					&nasid_array);
			if (ret != 0) {
				dev_err(xpc_part, "can't change memory "
					"protections\n");
				mspec_kfree_page((unsigned long) amos_page);
				return NULL;
			}
		}
	} else if (!IS_AMO_ADDRESS((u64) amos_page)) {
		/*
		 * EFI's XPBOOT can also set amos_page in the reserved page,
		 * but it happens to leave it as an uncached physical address
		 * and we need it to be an uncached virtual, so we'll have to
		 * convert it.
		 */
		if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) {
			dev_err(xpc_part, "previously used amos_page address "
				"is bad = 0x%p\n", (void *) amos_page);
			return NULL;
		}
		amos_page = (AMO_t *) TO_AMO((u64) amos_page);
	}

	memset(xpc_vars, 0, sizeof(struct xpc_vars));

	/*
	 * Place the XPC per partition specific variables on the cache line
	 * following the XPC variables structure.
	 */
	next_cl += XPC_VARS_ALIGNED_SIZE;
	memset((u64 *) next_cl, 0, sizeof(struct xpc_vars_part) *
							XP_MAX_PARTITIONS);
	xpc_vars_part = (struct xpc_vars_part *) next_cl;
	xpc_vars->vars_part_pa = __pa(next_cl);

	xpc_vars->version = XPC_V_VERSION;
	xpc_vars->act_nasid = cpuid_to_nasid(0);
	xpc_vars->act_phys_cpuid = cpu_physical_id(0);
	xpc_vars->amos_page = amos_page;  /* save for next load of XPC */


	/*
	 * Initialize the activation related AMO variables.
	 */
	xpc_vars->act_amos = xpc_IPI_init(XP_MAX_PARTITIONS);
	for (i = 1; i < XP_NASID_MASK_WORDS; i++) {
		xpc_IPI_init(i + XP_MAX_PARTITIONS);
	}
	/* export AMO page's physical address to other partitions */
	xpc_vars->amos_page_pa = ia64_tpa((u64) xpc_vars->amos_page);

	/*
	 * This signifies to the remote partition that our reserved
	 * page is initialized.
	 */
	(volatile u64) rp->vars_pa = __pa(xpc_vars);

	return rp;
}
Beispiel #11
0
void
setclear_mask_a(int irq, int cpuid, int set)
{
	int synergy;
	int nasid;
	int reg_num;
	unsigned long mask;
	unsigned long addr;
	unsigned long reg;
	unsigned long val;
	int my_cnode, my_synergy;
	int target_cnode, target_synergy;

        /*
         * Perform some idiot checks ..
         */
        if ( (irq < 0) || (irq > 255) ||
                (cpuid < 0) || (cpuid > 512) ) {
                printk("clear_mask_a: Invalid parameter irq %d cpuid %d\n", irq, cpuid);
		return;
	}

	target_cnode = cpuid_to_cnodeid(cpuid);
	target_synergy = cpuid_to_synergy(cpuid);
	my_cnode = cpuid_to_cnodeid(smp_processor_id());
	my_synergy = cpuid_to_synergy(smp_processor_id());

	reg_num = irq / 64;
	mask = 1;
	mask <<= (irq % 64);
	switch (reg_num) {
		case 0: 
			reg = VEC_MASK0A;
			addr = VEC_MASK0A_ADDR;
			break;
		case 1: 
			reg = VEC_MASK1A;
			addr = VEC_MASK1A_ADDR;
			break;
		case 2: 
			reg = VEC_MASK2A;
			addr = VEC_MASK2A_ADDR;
			break;
		case 3: 
			reg = VEC_MASK3A;
			addr = VEC_MASK3A_ADDR;
			break;
		default:
			reg = addr = 0;
			break;
	}
	if (my_cnode == target_cnode && my_synergy == target_synergy) {
		// local synergy
		val = READ_LOCAL_SYNERGY_REG(addr);
		if (set) {
			val |= mask;
		} else {
			val &= ~mask;
		}
		WRITE_LOCAL_SYNERGY_REG(addr, val);
		val = READ_LOCAL_SYNERGY_REG(addr);
	} else { /* remote synergy */
		synergy = cpuid_to_synergy(cpuid);
		nasid = cpuid_to_nasid(cpuid);
		val = REMOTE_SYNERGY_LOAD(nasid, synergy, reg);
		if (set) {
			val |= mask;
		} else {
			val &= ~mask;
		}
		REMOTE_SYNERGY_STORE(nasid, synergy, reg, val);
	}
}
Beispiel #12
0
void
synergy_perf_update(int cpu)
{
	nasid_t		nasid;
	cnodeid_t       cnode;
	struct nodepda_s *npdap;

	/*
	 * synergy_perf_initialized is set by synergy_perf_init()
	 * which is called last thing by sn_mp_setup(), i.e. well
	 * after nodepda has been initialized.
	 */
	if (!synergy_perf_initialized)
		return;

	cnode = cpuid_to_cnodeid(cpu);
	npdap = NODEPDA(cnode);

	if (npdap == NULL || cnode < 0 || cnode >= numnodes)
		/* this should not happen: still in early io init */
		return;

#if 0
	/* use this to check nodepda initialization */
	if (((uint64_t)npdap) & 0x7) {
		printk("\nERROR on cpu %d : cnode=%d, npdap == %p, not aligned\n", cpu, cnode, npdap);
		BUG();
	}
#endif

	if (npdap->synergy_perf_enabled == 0 || npdap->synergy_perf_data == NULL) {
		/* Not enabled, or no events to monitor */
		return;
	}

	if (npdap->synergy_inactive_intervals++ % npdap->synergy_perf_freq != 0) {
		/* don't multiplex on every timer interrupt */
		return;
	}

	/*
	 * Read registers for last interval and increment counters.
	 * Hold the per-node synergy_perf_lock so concurrent readers get
	 * consistent values.
	 */
	spin_lock_irq(&npdap->synergy_perf_lock);

	nasid = cpuid_to_nasid(cpu);
	npdap->synergy_active_intervals++;
	npdap->synergy_perf_data->intervals++;
	npdap->synergy_perf_data->total_intervals = npdap->synergy_active_intervals;

	npdap->synergy_perf_data->counts[0] += 0xffffffffffUL &
		REMOTE_SYNERGY_LOAD(nasid, 0, PERF_CNTR0_A);

	npdap->synergy_perf_data->counts[1] += 0xffffffffffUL &
		REMOTE_SYNERGY_LOAD(nasid, 1, PERF_CNTR0_B);

	/* skip to next in circular list */
	npdap->synergy_perf_data = npdap->synergy_perf_data->next;

	spin_unlock_irq(&npdap->synergy_perf_lock);

	/* set the counter 0 selection modes for both A and B */
	REMOTE_SYNERGY_STORE(nasid, 0, PERF_CNTL0_A, npdap->synergy_perf_data->modesel);
	REMOTE_SYNERGY_STORE(nasid, 1, PERF_CNTL0_B, npdap->synergy_perf_data->modesel);

	/* and reset the counter registers to zero */
	REMOTE_SYNERGY_STORE(nasid, 0, PERF_CNTR0_A, 0UL);
	REMOTE_SYNERGY_STORE(nasid, 1, PERF_CNTR0_B, 0UL);
}
Beispiel #13
0
/*
 * Allocate resources required for an interrupt as specified in dev_desc.
 * Returns a hub interrupt handle on success, or 0 on failure.
 */
static hub_intr_t
do_hub_intr_alloc(devfs_handle_t dev,		/* which crosstalk device */
		  device_desc_t dev_desc,	/* device descriptor */
		  devfs_handle_t owner_dev,	/* owner of this interrupt, if known */
		  int uncond_nothread)		/* unconditionally non-threaded */
{
	cpuid_t cpu = (cpuid_t)0;			/* cpu to receive interrupt */
        int cpupicked = 0;
	int bit;			/* interrupt vector */
	/*REFERENCED*/
	int intr_resflags = 0;
	hub_intr_t intr_hdl;
	cnodeid_t nodeid;		/* node to receive interrupt */
	/*REFERENCED*/
	nasid_t nasid;			/* nasid to receive interrupt */
	struct xtalk_intr_s *xtalk_info;
	iopaddr_t xtalk_addr;		/* xtalk addr on hub to set intr */
	xwidget_info_t xwidget_info;	/* standard crosstalk widget info handle */
	char *intr_name = NULL;
	ilvl_t intr_swlevel = (ilvl_t)0;
	extern int default_intr_pri;
	extern void synergy_intr_alloc(int, int);


	if (dev_desc) {
		if (dev_desc->flags & D_INTR_ISERR) {
			intr_resflags = II_ERRORINT;
		} else if (!uncond_nothread && !(dev_desc->flags & D_INTR_NOTHREAD)) {
			intr_resflags = II_THREADED;
		} else {
			/* Neither an error nor a thread. */
			intr_resflags = 0;
		}
	} else {
		intr_swlevel = default_intr_pri;
		if (!uncond_nothread)
			intr_resflags = II_THREADED;
	}

	/* XXX - Need to determine if the interrupt should be threaded. */

	/* If the cpu has not been picked already then choose a candidate 
	 * interrupt target and reserve the interrupt bit 
	 */
	if (!cpupicked) {
		cpu = intr_heuristic(dev,dev_desc,allocate_my_bit,
				     intr_resflags,owner_dev,
				     intr_name,&bit);
	}

	/* At this point we SHOULD have a valid cpu */
	if (cpu == CPU_NONE) {
#if defined(SUPPORT_PRINTING_V_FORMAT)
		printk(KERN_WARNING  "%v hub_intr_alloc could not allocate interrupt\n",
			owner_dev);
#else
		printk(KERN_WARNING  "%p hub_intr_alloc could not allocate interrupt\n",
			(void *)owner_dev);
#endif
		return(0);

	}

	/* If the cpu has been picked already (due to the bridge data 
	 * corruption bug) then try to reserve an interrupt bit .
	 */
	if (cpupicked) {
		bit = intr_reserve_level(cpu, allocate_my_bit, 
					 intr_resflags, 
					 owner_dev, intr_name);
		if (bit < 0) {
#if defined(SUPPORT_PRINTING_V_FORMAT)
			printk(KERN_WARNING  "Could not reserve an interrupt bit for cpu "
				" %d and dev %v\n",
				cpu,owner_dev);
#else
			printk(KERN_WARNING  "Could not reserve an interrupt bit for cpu "
				" %d and dev %p\n",
				(int)cpu, (void *)owner_dev);
#endif
				
			return(0);
		}
	}

	nodeid = cpuid_to_cnodeid(cpu);
	nasid = cpuid_to_nasid(cpu);
	xtalk_addr = HUBREG_AS_XTALKADDR(nasid, PIREG(PI_INT_PEND_MOD, cpuid_to_subnode(cpu)));

	/*
	 * Allocate an interrupt handle, and fill it in.  There are two
	 * pieces to an interrupt handle: the piece needed by generic
	 * xtalk code which is used by crosstalk device drivers, and
	 * the piece needed by low-level IP27 hardware code.
	 */
	intr_hdl = snia_kmem_alloc_node(sizeof(struct hub_intr_s), KM_NOSLEEP, nodeid);
	ASSERT_ALWAYS(intr_hdl);

	/* 
	 * Fill in xtalk information for generic xtalk interfaces that
	 * operate on xtalk_intr_hdl's.
	 */
	xtalk_info = &intr_hdl->i_xtalk_info;
	xtalk_info->xi_dev = dev;
	xtalk_info->xi_vector = bit;
	xtalk_info->xi_addr = xtalk_addr;

	/*
	 * Regardless of which CPU we ultimately interrupt, a given crosstalk
	 * widget always handles interrupts (and PIO and DMA) through its 
	 * designated "master" crosstalk provider.
	 */
	xwidget_info = xwidget_info_get(dev);
	if (xwidget_info)
		xtalk_info->xi_target = xwidget_info_masterid_get(xwidget_info);

	/* Fill in low level hub information for hub_* interrupt interface */
	intr_hdl->i_swlevel = intr_swlevel;
	intr_hdl->i_cpuid = cpu;
	intr_hdl->i_bit = bit;
	intr_hdl->i_flags = HUB_INTR_IS_ALLOCED;

	/* Store the actual interrupt priority level & interrupt target
	 * cpu back in the device descriptor.
	 */
	hub_device_desc_update(dev_desc, intr_swlevel, cpu);
	synergy_intr_alloc((int)bit, (int)cpu);
	return(intr_hdl);
}