示例#1
0
static void
sn1_handle_irq(int irq, void *dummy, struct pt_regs *regs)
{
	int bit, cnode;
	struct sn1_cnode_action_list *alp;
	struct sn1_intr_action *ap;
	void (*handler)(int, void *, struct pt_regs *);
	unsigned long flags = 0;
	int cpuid = smp_processor_id();


	bit = irq_to_bit_pos(irq);
	LOCAL_HUB_CLR_INTR(bit);
	cnode = cpuid_to_cnodeid(cpuid);
	alp = sn1_node_actions[cnode];
	ap = alp[irq].action_list;
	if (ap == NULL) {
		return;
	}
	while (ap) {
		flags |= ap->flags;
		handler = ap->handler;
		(*handler)(irq,ap->intr_arg,regs);
		ap = ap->next;
	}
	if ((flags & SA_SAMPLE_RANDOM) != 0)
                add_interrupt_randomness(irq);

        return;
}
示例#2
0
static hub_intr_t
do_hub_intr_alloc(devfs_handle_t dev,
		device_desc_t dev_desc,
		devfs_handle_t owner_dev,
		int uncond_nothread)
{
	cpuid_t		cpu = 0;
	int		vector;
	hub_intr_t	intr_hdl;
	cnodeid_t	cnode;
	int		cpuphys, slice;
	int		nasid;
	iopaddr_t	xtalk_addr;
	struct xtalk_intr_s	*xtalk_info;
	xwidget_info_t	xwidget_info;
	ilvl_t		intr_swlevel = 0;

	cpu = intr_heuristic(dev, dev_desc, -1, 0, owner_dev, NULL, &vector);

	if (cpu == CPU_NONE) {
		printk("Unable to allocate interrupt for 0x%p\n", (void *)owner_dev);
		return(0);
	}

	cpuphys = cpu_physical_id(cpu);
	slice = cpu_physical_id_to_slice(cpuphys);
	nasid = cpu_physical_id_to_nasid(cpuphys);
	cnode = cpuid_to_cnodeid(cpu);

	if (slice) {
		xtalk_addr = SH_II_INT1 | GLOBAL_MMR_SPACE |
			((unsigned long)nasid << 36) | (1UL << 47);
	} else {
		xtalk_addr = SH_II_INT0 | GLOBAL_MMR_SPACE |
			((unsigned long)nasid << 36) | (1UL << 47);
	}

	intr_hdl = snia_kmem_alloc_node(sizeof(struct hub_intr_s), KM_NOSLEEP, cnode);
	ASSERT_ALWAYS(intr_hdl);

	xtalk_info = &intr_hdl->i_xtalk_info;
	xtalk_info->xi_dev = dev;
	xtalk_info->xi_vector = vector;
	xtalk_info->xi_addr = xtalk_addr;

	xwidget_info = xwidget_info_get(dev);
	if (xwidget_info) {
		xtalk_info->xi_target = xwidget_info_masterid_get(xwidget_info);
	}

	intr_hdl->i_swlevel = intr_swlevel;
	intr_hdl->i_cpuid = cpu;
	intr_hdl->i_bit = vector;
	intr_hdl->i_flags |= HUB_INTR_IS_ALLOCED;

	hub_device_desc_update(dev_desc, intr_swlevel, cpu);
	return(intr_hdl);
}
示例#3
0
/*
 * Set up the platform-dependent fields in the processor pda.
 * Must be done _after_ init_platform_nodepda().
 * If we need a lock here, something else is wrong!
 */
void init_platform_pda(cpuid_t cpu)
{
#if defined(CONFIG_IA64_SGI_SN1)
	hub_intmasks_t *intmasks;
	int i, subnode;
	cnodeid_t	cnode;
	synergy_da_t	*sda;
	int	which_synergy;


	cnode = cpuid_to_cnodeid(cpu);
	which_synergy = cpuid_to_synergy(cpu);

	sda = Synergy_da_indr[(cnode * 2) + which_synergy];
	intmasks = &sda->s_intmasks;

	/* Clear INT_PEND0 masks. */
	for (i = 0; i < N_INTPEND0_MASKS; i++)
		intmasks->intpend0_masks[i] = 0;

	/* Set up pointer to the vector block in the nodepda. */
	/* (Cant use SUBNODEPDA - not working yet) */
	subnode = cpuid_to_subnode(cpu);
	intmasks->dispatch0 = &NODEPDA(cnode)->snpda[cpuid_to_subnode(cpu)].intr_dispatch0;
	intmasks->dispatch1 = &NODEPDA(cnode)->snpda[cpuid_to_subnode(cpu)].intr_dispatch1;
	if (intmasks->dispatch0 !=  &SUBNODEPDA(cnode, subnode)->intr_dispatch0 ||
	   intmasks->dispatch1 !=  &SUBNODEPDA(cnode, subnode)->intr_dispatch1)
	   	panic("xxx");
	intmasks->dispatch0 = &SUBNODEPDA(cnode, subnode)->intr_dispatch0;
	intmasks->dispatch1 = &SUBNODEPDA(cnode, subnode)->intr_dispatch1;

	/* Clear INT_PEND1 masks. */
	for (i = 0; i < N_INTPEND1_MASKS; i++)
		intmasks->intpend1_masks[i] = 0;
#endif	/* CONFIG_IA64_SGI_SN1 */
}
示例#4
0
int          
sn1_request_irq (unsigned int requested_irq, void (*handler)(int, void *, struct pt_regs *),
             unsigned long irqflags, const char * devname, void *dev_id)
{ 
	devfs_handle_t curr_dev;
	devfs_handle_t dev;
	pciio_intr_t intr_handle;
	pciio_intr_line_t line;
	device_desc_t dev_desc;
        int cpuid, bit, cnode;
	struct sn1_intr_action *ap, *new_ap;
	struct sn1_cnode_action_list *alp;
	int irq;

	if ( (requested_irq & 0xff) == 0 ) {
		int ret;

		sgi_pci_intr_support(requested_irq,
			&dev_desc, &dev, &line, &curr_dev);
		intr_handle = pciio_intr_alloc(curr_dev, NULL, line, curr_dev);
		bit = intr_handle->pi_irq;
		cpuid = intr_handle->pi_cpu;
		irq = bit_pos_to_irq(bit);
		cnode = cpuid_to_cnodeid(cpuid);
		new_ap = (struct sn1_intr_action *)kmalloc(
			sizeof(struct sn1_intr_action), GFP_KERNEL);
		irq_desc[irq].status = 0;
		new_ap->handler = handler;
		new_ap->intr_arg = dev_id;
		new_ap->flags = irqflags;
		new_ap->next = NULL;
		alp = sn1_node_actions[cnode];

		spin_lock(&alp[irq].action_list_lock);
		ap = alp[irq].action_list;
		/* check action list for "share" consistency */
		while (ap){
			if (!(ap->flags & irqflags & SA_SHIRQ) ) {
				return(-EBUSY);
				spin_unlock(&alp[irq].action_list_lock);
			}
			ap = ap->next;
		}
		ap = alp[irq].action_list;
		if (ap) {
			while (ap->next) {
				ap = ap->next;
			}
			ap->next = new_ap;
		} else {
			alp[irq].action_list = new_ap;
		}
		ret = pciio_intr_connect(intr_handle, (intr_func_t)handler, dev_id, NULL);
		if (ret) { /* connect failed, undo what we did. */
			new_ap = alp[irq].action_list;
			if (new_ap == ap) {
				alp[irq].action_list = NULL;
				kfree(ap);
			} else {
				while (new_ap->next && new_ap->next != ap) {
					new_ap = new_ap->next;
				}
				if (new_ap->next == ap) {
					new_ap->next = ap->next;
					kfree(ap);
				}
			}
		}
			
		spin_unlock(&alp[irq].action_list_lock);
		return(ret);
	} else {
		return(request_irq(requested_irq, handler, irqflags, devname, dev_id));
	}
}
示例#5
0
文件: module.c 项目: nhanh0/hah
elsc_t *get_elsc(void)
{
	return &NODEPDA(cpuid_to_cnodeid(smp_processor_id()))->module->elsc;
}
示例#6
0
void
setclear_mask_a(int irq, int cpuid, int set)
{
	int synergy;
	int nasid;
	int reg_num;
	unsigned long mask;
	unsigned long addr;
	unsigned long reg;
	unsigned long val;
	int my_cnode, my_synergy;
	int target_cnode, target_synergy;

        /*
         * Perform some idiot checks ..
         */
        if ( (irq < 0) || (irq > 255) ||
                (cpuid < 0) || (cpuid > 512) ) {
                printk("clear_mask_a: Invalid parameter irq %d cpuid %d\n", irq, cpuid);
		return;
	}

	target_cnode = cpuid_to_cnodeid(cpuid);
	target_synergy = cpuid_to_synergy(cpuid);
	my_cnode = cpuid_to_cnodeid(smp_processor_id());
	my_synergy = cpuid_to_synergy(smp_processor_id());

	reg_num = irq / 64;
	mask = 1;
	mask <<= (irq % 64);
	switch (reg_num) {
		case 0: 
			reg = VEC_MASK0A;
			addr = VEC_MASK0A_ADDR;
			break;
		case 1: 
			reg = VEC_MASK1A;
			addr = VEC_MASK1A_ADDR;
			break;
		case 2: 
			reg = VEC_MASK2A;
			addr = VEC_MASK2A_ADDR;
			break;
		case 3: 
			reg = VEC_MASK3A;
			addr = VEC_MASK3A_ADDR;
			break;
		default:
			reg = addr = 0;
			break;
	}
	if (my_cnode == target_cnode && my_synergy == target_synergy) {
		// local synergy
		val = READ_LOCAL_SYNERGY_REG(addr);
		if (set) {
			val |= mask;
		} else {
			val &= ~mask;
		}
		WRITE_LOCAL_SYNERGY_REG(addr, val);
		val = READ_LOCAL_SYNERGY_REG(addr);
	} else { /* remote synergy */
		synergy = cpuid_to_synergy(cpuid);
		nasid = cpuid_to_nasid(cpuid);
		val = REMOTE_SYNERGY_LOAD(nasid, synergy, reg);
		if (set) {
			val |= mask;
		} else {
			val &= ~mask;
		}
		REMOTE_SYNERGY_STORE(nasid, synergy, reg, val);
	}
}
示例#7
0
void
synergy_perf_update(int cpu)
{
	nasid_t		nasid;
	cnodeid_t       cnode;
	struct nodepda_s *npdap;

	/*
	 * synergy_perf_initialized is set by synergy_perf_init()
	 * which is called last thing by sn_mp_setup(), i.e. well
	 * after nodepda has been initialized.
	 */
	if (!synergy_perf_initialized)
		return;

	cnode = cpuid_to_cnodeid(cpu);
	npdap = NODEPDA(cnode);

	if (npdap == NULL || cnode < 0 || cnode >= numnodes)
		/* this should not happen: still in early io init */
		return;

#if 0
	/* use this to check nodepda initialization */
	if (((uint64_t)npdap) & 0x7) {
		printk("\nERROR on cpu %d : cnode=%d, npdap == %p, not aligned\n", cpu, cnode, npdap);
		BUG();
	}
#endif

	if (npdap->synergy_perf_enabled == 0 || npdap->synergy_perf_data == NULL) {
		/* Not enabled, or no events to monitor */
		return;
	}

	if (npdap->synergy_inactive_intervals++ % npdap->synergy_perf_freq != 0) {
		/* don't multiplex on every timer interrupt */
		return;
	}

	/*
	 * Read registers for last interval and increment counters.
	 * Hold the per-node synergy_perf_lock so concurrent readers get
	 * consistent values.
	 */
	spin_lock_irq(&npdap->synergy_perf_lock);

	nasid = cpuid_to_nasid(cpu);
	npdap->synergy_active_intervals++;
	npdap->synergy_perf_data->intervals++;
	npdap->synergy_perf_data->total_intervals = npdap->synergy_active_intervals;

	npdap->synergy_perf_data->counts[0] += 0xffffffffffUL &
		REMOTE_SYNERGY_LOAD(nasid, 0, PERF_CNTR0_A);

	npdap->synergy_perf_data->counts[1] += 0xffffffffffUL &
		REMOTE_SYNERGY_LOAD(nasid, 1, PERF_CNTR0_B);

	/* skip to next in circular list */
	npdap->synergy_perf_data = npdap->synergy_perf_data->next;

	spin_unlock_irq(&npdap->synergy_perf_lock);

	/* set the counter 0 selection modes for both A and B */
	REMOTE_SYNERGY_STORE(nasid, 0, PERF_CNTL0_A, npdap->synergy_perf_data->modesel);
	REMOTE_SYNERGY_STORE(nasid, 1, PERF_CNTL0_B, npdap->synergy_perf_data->modesel);

	/* and reset the counter registers to zero */
	REMOTE_SYNERGY_STORE(nasid, 0, PERF_CNTR0_A, 0UL);
	REMOTE_SYNERGY_STORE(nasid, 1, PERF_CNTR0_B, 0UL);
}
示例#8
0
/*
 * Allocate resources required for an interrupt as specified in dev_desc.
 * Returns a hub interrupt handle on success, or 0 on failure.
 */
static hub_intr_t
do_hub_intr_alloc(devfs_handle_t dev,		/* which crosstalk device */
		  device_desc_t dev_desc,	/* device descriptor */
		  devfs_handle_t owner_dev,	/* owner of this interrupt, if known */
		  int uncond_nothread)		/* unconditionally non-threaded */
{
	cpuid_t cpu = (cpuid_t)0;			/* cpu to receive interrupt */
        int cpupicked = 0;
	int bit;			/* interrupt vector */
	/*REFERENCED*/
	int intr_resflags = 0;
	hub_intr_t intr_hdl;
	cnodeid_t nodeid;		/* node to receive interrupt */
	/*REFERENCED*/
	nasid_t nasid;			/* nasid to receive interrupt */
	struct xtalk_intr_s *xtalk_info;
	iopaddr_t xtalk_addr;		/* xtalk addr on hub to set intr */
	xwidget_info_t xwidget_info;	/* standard crosstalk widget info handle */
	char *intr_name = NULL;
	ilvl_t intr_swlevel = (ilvl_t)0;
	extern int default_intr_pri;
	extern void synergy_intr_alloc(int, int);


	if (dev_desc) {
		if (dev_desc->flags & D_INTR_ISERR) {
			intr_resflags = II_ERRORINT;
		} else if (!uncond_nothread && !(dev_desc->flags & D_INTR_NOTHREAD)) {
			intr_resflags = II_THREADED;
		} else {
			/* Neither an error nor a thread. */
			intr_resflags = 0;
		}
	} else {
		intr_swlevel = default_intr_pri;
		if (!uncond_nothread)
			intr_resflags = II_THREADED;
	}

	/* XXX - Need to determine if the interrupt should be threaded. */

	/* If the cpu has not been picked already then choose a candidate 
	 * interrupt target and reserve the interrupt bit 
	 */
	if (!cpupicked) {
		cpu = intr_heuristic(dev,dev_desc,allocate_my_bit,
				     intr_resflags,owner_dev,
				     intr_name,&bit);
	}

	/* At this point we SHOULD have a valid cpu */
	if (cpu == CPU_NONE) {
#if defined(SUPPORT_PRINTING_V_FORMAT)
		printk(KERN_WARNING  "%v hub_intr_alloc could not allocate interrupt\n",
			owner_dev);
#else
		printk(KERN_WARNING  "%p hub_intr_alloc could not allocate interrupt\n",
			(void *)owner_dev);
#endif
		return(0);

	}

	/* If the cpu has been picked already (due to the bridge data 
	 * corruption bug) then try to reserve an interrupt bit .
	 */
	if (cpupicked) {
		bit = intr_reserve_level(cpu, allocate_my_bit, 
					 intr_resflags, 
					 owner_dev, intr_name);
		if (bit < 0) {
#if defined(SUPPORT_PRINTING_V_FORMAT)
			printk(KERN_WARNING  "Could not reserve an interrupt bit for cpu "
				" %d and dev %v\n",
				cpu,owner_dev);
#else
			printk(KERN_WARNING  "Could not reserve an interrupt bit for cpu "
				" %d and dev %p\n",
				(int)cpu, (void *)owner_dev);
#endif
				
			return(0);
		}
	}

	nodeid = cpuid_to_cnodeid(cpu);
	nasid = cpuid_to_nasid(cpu);
	xtalk_addr = HUBREG_AS_XTALKADDR(nasid, PIREG(PI_INT_PEND_MOD, cpuid_to_subnode(cpu)));

	/*
	 * Allocate an interrupt handle, and fill it in.  There are two
	 * pieces to an interrupt handle: the piece needed by generic
	 * xtalk code which is used by crosstalk device drivers, and
	 * the piece needed by low-level IP27 hardware code.
	 */
	intr_hdl = snia_kmem_alloc_node(sizeof(struct hub_intr_s), KM_NOSLEEP, nodeid);
	ASSERT_ALWAYS(intr_hdl);

	/* 
	 * Fill in xtalk information for generic xtalk interfaces that
	 * operate on xtalk_intr_hdl's.
	 */
	xtalk_info = &intr_hdl->i_xtalk_info;
	xtalk_info->xi_dev = dev;
	xtalk_info->xi_vector = bit;
	xtalk_info->xi_addr = xtalk_addr;

	/*
	 * Regardless of which CPU we ultimately interrupt, a given crosstalk
	 * widget always handles interrupts (and PIO and DMA) through its 
	 * designated "master" crosstalk provider.
	 */
	xwidget_info = xwidget_info_get(dev);
	if (xwidget_info)
		xtalk_info->xi_target = xwidget_info_masterid_get(xwidget_info);

	/* Fill in low level hub information for hub_* interrupt interface */
	intr_hdl->i_swlevel = intr_swlevel;
	intr_hdl->i_cpuid = cpu;
	intr_hdl->i_bit = bit;
	intr_hdl->i_flags = HUB_INTR_IS_ALLOCED;

	/* Store the actual interrupt priority level & interrupt target
	 * cpu back in the device descriptor.
	 */
	hub_device_desc_update(dev_desc, intr_swlevel, cpu);
	synergy_intr_alloc((int)bit, (int)cpu);
	return(intr_hdl);
}
示例#9
0
static int sn_topology_show(struct seq_file *s, void *d)
{
	int sz;
	int pt;
	int e = 0;
	int i;
	int j;
	const char *slabname;
	int ordinal;
	cpumask_t cpumask;
	char slice;
	struct cpuinfo_ia64 *c;
	struct sn_hwperf_port_info *ptdata;
	struct sn_hwperf_object_info *p;
	struct sn_hwperf_object_info *obj = d;	/* this object */
	struct sn_hwperf_object_info *objs = s->private; /* all objects */
	int rack, bay, slot, slab;
	u8 shubtype;
	u8 system_size;
	u8 sharing_size;
	u8 partid;
	u8 coher;
	u8 nasid_shift;
	u8 region_size;
	u16 nasid_mask;
	int nasid_msb;
	int pci_bus_ordinal = 0;

	if (obj == objs) {
		seq_printf(s, "# sn_topology version 2\n");
		seq_printf(s, "# objtype ordinal location partition"
			" [attribute value [, ...]]\n");

		if (ia64_sn_get_sn_info(0,
			&shubtype, &nasid_mask, &nasid_shift, &system_size,
			&sharing_size, &partid, &coher, &region_size))
			BUG();
		for (nasid_msb=63; nasid_msb > 0; nasid_msb--) {
			if (((u64)nasid_mask << nasid_shift) & (1ULL << nasid_msb))
				break;
		}
		seq_printf(s, "partition %u %s local "
			"shubtype %s, "
			"nasid_mask 0x%016lx, "
			"nasid_bits %d:%d, "
			"system_size %d, "
			"sharing_size %d, "
			"coherency_domain %d, "
			"region_size %d\n",

			partid, system_utsname.nodename,
			shubtype ? "shub2" : "shub1", 
			(u64)nasid_mask << nasid_shift, nasid_msb, nasid_shift,
			system_size, sharing_size, coher, region_size);
	}

	if (SN_HWPERF_FOREIGN(obj)) {
		/* private in another partition: not interesting */
		return 0;
	}

	for (i = 0; i < SN_HWPERF_MAXSTRING && obj->name[i]; i++) {
		if (obj->name[i] == ' ')
			obj->name[i] = '_';
	}

	slabname = sn_hwperf_get_slabname(obj, objs, &ordinal);
	seq_printf(s, "%s %d %s %s asic %s", slabname, ordinal, obj->location,
		obj->sn_hwp_this_part ? "local" : "shared", obj->name);

	if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))
		seq_putc(s, '\n');
	else {
		seq_printf(s, ", nasid 0x%x", cnodeid_to_nasid(ordinal));
		for (i=0; i < numionodes; i++) {
			seq_printf(s, i ? ":%d" : ", dist %d",
				node_distance(ordinal, i));
		}
		seq_putc(s, '\n');

		/*
		 * CPUs on this node, if any
		 */
		cpumask = node_to_cpumask(ordinal);
		for_each_online_cpu(i) {
			if (cpu_isset(i, cpumask)) {
				slice = 'a' + cpuid_to_slice(i);
				c = cpu_data(i);
				seq_printf(s, "cpu %d %s%c local"
					" freq %luMHz, arch ia64",
					i, obj->location, slice,
					c->proc_freq / 1000000);
				for_each_online_cpu(j) {
					seq_printf(s, j ? ":%d" : ", dist %d",
						node_distance(
						    cpuid_to_cnodeid(i),
						    cpuid_to_cnodeid(j)));
				}
				seq_putc(s, '\n');
			}
		}

		/*
		 * PCI busses attached to this node, if any
		 */
		if (sn_hwperf_location_to_bpos(obj->location,
			&rack, &bay, &slot, &slab)) {
			/* export pci bus info */
			print_pci_topology(s, obj, &pci_bus_ordinal,
				rack, bay, slot, slab);

		}
	}

	if (obj->ports) {
		/*
		 * numalink ports
		 */
		sz = obj->ports * sizeof(struct sn_hwperf_port_info);
		if ((ptdata = vmalloc(sz)) == NULL)
			return -ENOMEM;
		e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
				      SN_HWPERF_ENUM_PORTS, obj->id, sz,
				      (u64) ptdata, 0, 0, NULL);
		if (e != SN_HWPERF_OP_OK)
			return -EINVAL;
		for (ordinal=0, p=objs; p != obj; p++) {
			if (!SN_HWPERF_FOREIGN(p))
				ordinal += p->ports;
		}
		for (pt = 0; pt < obj->ports; pt++) {
			for (p = objs, i = 0; i < sn_hwperf_obj_cnt; i++, p++) {
				if (ptdata[pt].conn_id == p->id) {
					break;
				}
			}
			seq_printf(s, "numalink %d %s-%d",
			    ordinal+pt, obj->location, ptdata[pt].port);

			if (i >= sn_hwperf_obj_cnt) {
				/* no connection */
				seq_puts(s, " local endpoint disconnected"
					    ", protocol unknown\n");
				continue;
			}

			if (obj->sn_hwp_this_part && p->sn_hwp_this_part)
				/* both ends local to this partition */
				seq_puts(s, " local");
			else if (!obj->sn_hwp_this_part && !p->sn_hwp_this_part)
				/* both ends of the link in foreign partiton */
				seq_puts(s, " foreign");
			else
				/* link straddles a partition */
				seq_puts(s, " shared");

			/*
			 * Unlikely, but strictly should query the LLP config
			 * registers because an NL4R can be configured to run
			 * NL3 protocol, even when not talking to an NL3 router.
			 * Ditto for node-node.
			 */
			seq_printf(s, " endpoint %s-%d, protocol %s\n",
				p->location, ptdata[pt].conn_port,
				(SN_HWPERF_IS_NL3ROUTER(obj) ||
				SN_HWPERF_IS_NL3ROUTER(p)) ?  "LLP3" : "LLP4");
		}
		vfree(ptdata);
	}

	return 0;
}
示例#10
0
void
hub_error_init(cnodeid_t cnode)
{
	nasid_t nasid;

    nasid = cnodeid_to_nasid(cnode);
    hub_error_clear(nasid);

#ifdef ajm
    if (cnode == 0) {
	/*
	 * Allocate log for storing the node specific error info
	 */
	for (i = 0; i < numnodes; i++) {
	    kl_error_log[i]  = kmem_zalloc_node(sizeof(sn0_error_log_t), 
						KM_NOSLEEP, i);
	    hub_err_count[i] = kmem_zalloc_node(sizeof(hub_errcnt_t),
						VM_DIRECT | KM_NOSLEEP, i);
	    ASSERT_ALWAYS(kl_error_log[i] && hub_err_count[i]);
	}
    }

    /*
     * Assumption: There will be only one cpu who will initialize
     * a hub. we need to setup the ii and each pi error interrupts.
     * The SN1 hub (bedrock) has two PI, one for up to two processors.
     */

    if (cpuid_to_cnodeid(smp_processor_id()) == cnode) { 
	int generic_intr_mask = PI_ERR_GENERIC; /* These interrupts are sent to only 1 CPU per NODE */

	ASSERT_ALWAYS(kl_error_log[cnode]);
	ASSERT_ALWAYS(hub_err_count[cnode]);
	MD_ERR_LOG_INIT(kl_error_log[cnode]);

	/* One for each CPU */
	recover_error_init(RECOVER_ERROR_TABLE(cnode, 0));
	recover_error_init(RECOVER_ERROR_TABLE(cnode, 1));
	recover_error_init(RECOVER_ERROR_TABLE(cnode, 2));
	recover_error_init(RECOVER_ERROR_TABLE(cnode, 3));

	/*
	 * Setup error intr masks.
	 */
	for(sn=0; sn<NUM_SUBNODES; sn++) {
		int cpuA_present = REMOTE_HUB_PI_L(nasid, sn, PI_CPU_ENABLE_A);
		int cpuB_present = REMOTE_HUB_PI_L(nasid, sn, PI_CPU_ENABLE_B);

		if (cpuA_present) {
			if (cpuB_present) {		/* A && B */
	    			REMOTE_HUB_PI_S(nasid, sn, PI_ERR_INT_MASK_A,
					(PI_FATAL_ERR_CPU_B | PI_MISC_ERR_CPU_A|generic_intr_mask));
	    			REMOTE_HUB_PI_S(nasid, sn, PI_ERR_INT_MASK_B,
					(PI_FATAL_ERR_CPU_A | PI_MISC_ERR_CPU_B));

			} else {			/* A && !B */
	    			REMOTE_HUB_PI_S(nasid, sn, PI_ERR_INT_MASK_A,
					(PI_FATAL_ERR_CPU_A | PI_MISC_ERR_CPU_A|generic_intr_mask));
			}
			generic_intr_mask = 0;
		} else {
			if (cpuB_present) {		/* !A && B */
	    			REMOTE_HUB_PI_S(nasid, sn, PI_ERR_INT_MASK_B,
					(PI_FATAL_ERR_CPU_B | PI_MISC_ERR_CPU_B|generic_intr_mask));
				generic_intr_mask = 0;

			} else {			/* !A && !B */
				/* nothing to set up */
			}
		}
	}

	/*
	 * Turn off UNCAC_UNCORR interrupt in the masks. Anyone interested
	 * in these errors will peek at the int pend register to see if its
	 * set.
	 */ 
	for(sn=0; sn<NUM_SUBNODES; sn++) {
		misc = REMOTE_HUB_PI_L(nasid, sn, PI_ERR_INT_MASK_A);
		REMOTE_HUB_PI_S(nasid, sn, PI_ERR_INT_MASK_A, (misc & ~PI_ERR_UNCAC_UNCORR_A));
		misc = REMOTE_HUB_PI_L(nasid, sn, PI_ERR_INT_MASK_B);
		REMOTE_HUB_PI_S(nasid, sn, PI_ERR_INT_MASK_B, (misc & ~PI_ERR_UNCAC_UNCORR_B));
	}

	/*
	 * enable all error indicators to turn on, in case of errors.
	 *
	 * This is not good on single cpu node boards.
	 **** LOCAL_HUB_S(PI_SYSAD_ERRCHK_EN, PI_SYSAD_CHECK_ALL);
	 */
	for(sn=0; sn<NUM_SUBNODES; sn++) {
		REMOTE_HUB_PI_S(nasid, sn, PI_ERR_STATUS1_A_CLR, 0);
		REMOTE_HUB_PI_S(nasid, sn, PI_ERR_STATUS1_B_CLR, 0);
	}

	/* Set up stack for each present processor */
	for(sn=0; sn<NUM_SUBNODES; sn++) {
		if (REMOTE_HUB_PI_L(nasid, sn, PI_CPU_PRESENT_A)) {
	    	SN0_ERROR_LOG(cnode)->el_spool_cur_addr[0] =
			SN0_ERROR_LOG(cnode)->el_spool_last_addr[0] =
		    	REMOTE_HUB_PI_L(nasid, sn, PI_ERR_STACK_ADDR_A);
		}
	    
		if (REMOTE_HUB_PI_L(nasid, sn, PI_CPU_PRESENT_B)) {
	    	SN0_ERROR_LOG(cnode)->el_spool_cur_addr[1] =
			SN0_ERROR_LOG(cnode)->el_spool_last_addr[1] =
		    	REMOTE_HUB_PI_L(nasid, sn, PI_ERR_STACK_ADDR_B);
		}
	}


	PI_SPOOL_SIZE_BYTES = 
	    ERR_STACK_SIZE_BYTES(REMOTE_HUB_L(nasid, PI_ERR_STACK_SIZE));

#ifdef BRINGUP
/* BRINGUP: The following code looks like a check to make sure
the prom set up the error spool correctly for 2 processors.  I
don't think it is needed.  */
	for(sn=0; sn<NUM_SUBNODES; sn++) {
		if (REMOTE_HUB_PI_L(nasid, sn, PI_CPU_PRESENT_B)) {
			__psunsigned_t addr_a = REMOTE_HUB_PI_L(nasid, sn, PI_ERR_STACK_ADDR_A);
			__psunsigned_t addr_b = REMOTE_HUB_PI_L(nasid, sn, PI_ERR_STACK_ADDR_B);
			if ((addr_a & ~0xff) == (addr_b & ~0xff)) {
			    REMOTE_HUB_PI_S(nasid, sn, PI_ERR_STACK_ADDR_B, 	
					addr_b + PI_SPOOL_SIZE_BYTES);
	
			    SN0_ERROR_LOG(cnode)->el_spool_cur_addr[1] =
				SN0_ERROR_LOG(cnode)->el_spool_last_addr[1] =
				    REMOTE_HUB_PI_L(nasid, sn, PI_ERR_STACK_ADDR_B);
	
		    }
		}
	}
#endif /* BRINGUP */

	/* programming our own hub. Enable error_int_pend intr.
	 * If both present, CPU A takes CPU b's error interrupts and any
	 * generic ones. CPU B takes CPU A error ints.
	 */
	if (cause_intr_connect (SRB_ERR_IDX,
				(intr_func_t)(hubpi_eint_handler),
				SR_ALL_MASK|SR_IE)) {
	    cmn_err(ERR_WARN, 
		    "hub_error_init: cause_intr_connect failed on %d", cnode);
	}
    }
    else {
	/* programming remote hub. The only valid reason that this
	 * is called will be on headless hubs. No interrupts 
	 */
	for(sn=0; sn<NUM_SUBNODES; sn++) {
		REMOTE_HUB_PI_S(nasid, sn, PI_ERR_INT_MASK_A, 0); /* not necessary */
		REMOTE_HUB_PI_S(nasid, sn, PI_ERR_INT_MASK_B, 0); /* not necessary */
	}
    }
#endif /* ajm */
    /*
     * Now setup the hub ii and ni error interrupt handler.
     */

    hubii_eint_init(cnode);
    hubni_eint_init(cnode);

#ifdef ajm
    /*** XXX FIXME XXX resolve the following***/
    /* INT_PEND1 bits set up for one hub only:
     *	SHUTDOWN_INTR
     *	MD_COR_ERR_INTR
     *  COR_ERR_INTR_A and COR_ERR_INTR_B should be sent to the
     *  appropriate CPU only.
     */

    if (cnode == 0) {
	    error_consistency_check.eps_state = 0;
	    error_consistency_check.eps_cpuid = -1;
	    spinlock_init(&error_consistency_check.eps_lock, "error_dump_lock");
    }
#endif

    nodepda->huberror_ticks = HUB_ERROR_PERIOD;
    return;
}