Exemplo n.º 1
0
void __init build_cnode_tables(void)
{
	int nasid;
	int node;
	lboard_t *brd;

	memset(physical_node_map, -1, sizeof(physical_node_map));
	memset(sn_cnodeid_to_nasid, -1,
			sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));

	/*
	 * First populate the tables with C/M bricks. This ensures that
	 * cnode == node for all C & M bricks.
	 */
	for_each_online_node(node) {
		nasid = pxm_to_nasid(node_to_pxm(node));
		sn_cnodeid_to_nasid[node] = nasid;
		physical_node_map[nasid] = node;
	}

	/*
	 * num_cnodes is total number of C/M/TIO bricks. Because of the 256 node
	 * limit on the number of nodes, we can't use the generic node numbers 
	 * for this. Note that num_cnodes is incremented below as TIOs or
	 * headless/memoryless nodes are discovered.
	 */
	num_cnodes = num_online_nodes();

	/* fakeprom does not support klgraph */
	if (IS_RUNNING_ON_FAKE_PROM())
		return;

	/* Find TIOs & headless/memoryless nodes and add them to the tables */
	for_each_online_node(node) {
		kl_config_hdr_t *klgraph_header;
		nasid = cnodeid_to_nasid(node);
		klgraph_header = ia64_sn_get_klconfig_addr(nasid);
		BUG_ON(klgraph_header == NULL);
		brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info);
		while (brd) {
			if (board_needs_cnode(brd->brd_type) && physical_node_map[brd->brd_nasid] < 0) {
				sn_cnodeid_to_nasid[num_cnodes] = brd->brd_nasid;
				physical_node_map[brd->brd_nasid] = num_cnodes++;
			}
			brd = find_lboard_next(brd);
		}
	}
}
Exemplo n.º 2
0
static int __init tiocx_init(void)
{
	cnodeid_t cnodeid;
	int found_tiocx_device = 0;

	if (!ia64_platform_is("sn2"))
		return 0;

	bus_register(&tiocx_bus_type);

	for (cnodeid = 0; cnodeid < num_cnodes; cnodeid++) {
		nasid_t nasid;
		int bt;

		nasid = cnodeid_to_nasid(cnodeid);

		if ((nasid & 0x1) && is_fpga_tio(nasid, &bt)) {
			struct hubdev_info *hubdev;
			struct xwidget_info *widgetp;

			DBG("Found TIO at nasid 0x%x\n", nasid);

			hubdev =
			    (struct hubdev_info *)(NODEPDA(cnodeid)->pdinfo);

			widgetp = &hubdev->hdi_xwidget_info[TIOCX_CORELET];

			/* The CE hangs off of the CX port but is not an FPGA */
			if (widgetp->xwi_hwid.part_num == TIO_CE_ASIC_PARTNUM)
				continue;

			tio_corelet_reset(nasid, TIOCX_CORELET);
			tio_conveyor_enable(nasid);

			if (cx_device_register
			    (nasid, widgetp->xwi_hwid.part_num,
			     widgetp->xwi_hwid.mfg_num, hubdev, bt) < 0)
				return -ENXIO;
			else
				found_tiocx_device++;
		}
	}

	/* It's ok if we find zero devices. */
	DBG("found_tiocx_device= %d\n", found_tiocx_device);

	return 0;
}
Exemplo n.º 3
0
void
hub_error_init(cnodeid_t cnode)
{
	nasid_t nasid;

    nasid = cnodeid_to_nasid(cnode);
    hub_error_clear(nasid);


    /*
     * Now setup the hub ii error interrupt handler.
     */

    hubii_eint_init(cnode);

    return;
}
Exemplo n.º 4
0
/* ARGSUSED */
static void __init
klhwg_add_all_routers(vertex_hdl_t hwgraph_root)
{
	nasid_t nasid;
	cnodeid_t cnode;
	lboard_t *brd;
	vertex_hdl_t node_vertex;
	char path_buffer[100];
	int rv;

	for (cnode = 0; cnode < numnodes; cnode++) {
		nasid = cnodeid_to_nasid(cnode);
		brd = find_lboard_class_any((lboard_t *)KL_CONFIG_INFO(nasid),
				KLTYPE_ROUTER);

		if (!brd)
			/* No routers stored in this node's memory */
			continue;

		do {
			ASSERT(brd);

			/* Don't add duplicate boards. */
			if (brd->brd_flags & DUPLICATE_BOARD)
				continue;

			/* Generate a hardware graph path for this board. */
			board_to_path(brd, path_buffer);

			/* Add the router */
			rv = hwgraph_path_add(hwgraph_root, path_buffer, &node_vertex);
			if (rv != GRAPH_SUCCESS) {
				printk("Router vertex creation "
						  "failed.  Path == %s", path_buffer);
				return;
			}
			HWGRAPH_DEBUG(__FILE__, __FUNCTION__, __LINE__, node_vertex, NULL, "Created router path.\n");

		/* Find the rest of the routers stored on this node. */
		} while ( (brd = find_lboard_class_any(KLCF_NEXT_ANY(brd),
			 KLTYPE_ROUTER)) );
	}

}
Exemplo n.º 5
0
int __init
prominfo_init(void)
{
	struct proc_dir_entry **entp;
	struct proc_dir_entry *p;
	cnodeid_t cnodeid;
	nasid_t nasid;
	char name[NODE_NAME_LEN];

	if (!ia64_platform_is("sn2"))
		return 0;

	TRACE();

	DPRINTK("running on cpu %d\n", smp_processor_id());
	DPRINTK("numnodes %d\n", numnodes);

	proc_entries = kmalloc(numnodes * sizeof(struct proc_dir_entry *),
			       GFP_KERNEL);

	sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL);

	for (cnodeid = 0, entp = proc_entries;
	     cnodeid < numnodes;
	     cnodeid++, entp++) {
		sprintf(name, "node%d", cnodeid);
		*entp = proc_mkdir(name, sgi_prominfo_entry);
		nasid = cnodeid_to_nasid(cnodeid);
		p = create_proc_read_entry(
			"fit", 0, *entp, read_fit_entry,
			lookup_fit(nasid));
		if (p)
			p->owner = THIS_MODULE;
		p = create_proc_read_entry(
			"version", 0, *entp, read_version_entry,
			lookup_fit(nasid));
		if (p)
			p->owner = THIS_MODULE;
	}

	return 0;
}
Exemplo n.º 6
0
/* ARGSUSED */
static void __init
klhwg_add_disabled_cpu(vertex_hdl_t node_vertex, cnodeid_t cnode, klcpu_t *cpu, slotid_t slot)
{
        vertex_hdl_t my_cpu;
        char name[120];
        cpuid_t cpu_id;
	nasid_t nasid;

	nasid = cnodeid_to_nasid(cnode);
        cpu_id = nasid_slice_to_cpuid(nasid, cpu->cpu_info.physid);
        if(cpu_id != -1){
		snprintf(name, 120, "%s/%s/%c", EDGE_LBL_DISABLED, EDGE_LBL_CPU, 'a' + cpu->cpu_info.physid);
		(void) hwgraph_path_add(node_vertex, name, &my_cpu);

		HWGRAPH_DEBUG(__FILE__, __FUNCTION__,__LINE__, my_cpu, NULL, "Created path for disabled cpu slice.\n");

		mark_cpuvertex_as_cpu(my_cpu, cpu_id);
		device_master_set(my_cpu, node_vertex);
		return;
        }
}
Exemplo n.º 7
0
void __init
klhwg_add_all_nodes(vertex_hdl_t hwgraph_root)
{
	cnodeid_t	cnode;

	for (cnode = 0; cnode < numionodes; cnode++) {
		klhwg_add_node(hwgraph_root, cnode);
	}

	for (cnode = 0; cnode < numionodes; cnode++) {
		klhwg_add_xbow(cnode, cnodeid_to_nasid(cnode));
	}

	/*
	 * As for router hardware inventory information, we set this
	 * up in router.c. 
	 */
	
	klhwg_add_all_routers(hwgraph_root);
	klhwg_connect_routers(hwgraph_root);
	klhwg_connect_hubs(hwgraph_root);
}
Exemplo n.º 8
0
int __init prominfo_init(void)
{
	struct proc_dir_entry **entp;
	struct proc_dir_entry *p;
	cnodeid_t cnodeid;
	unsigned long nasid;
	char name[NODE_NAME_LEN];

	if (!ia64_platform_is("sn2"))
		return 0;

	proc_entries = kmalloc(num_online_nodes() * sizeof(struct proc_dir_entry *),
			       GFP_KERNEL);

	sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL);

	entp = proc_entries;
	for_each_online_node(cnodeid) {
		sprintf(name, "node%d", cnodeid);
		*entp = proc_mkdir(name, sgi_prominfo_entry);
		nasid = cnodeid_to_nasid(cnodeid);
		p = create_proc_read_entry(
			"fit", 0, *entp, read_fit_entry,
			(void *)nasid);
		if (p)
			p->owner = THIS_MODULE;
		p = create_proc_read_entry(
			"version", 0, *entp, read_version_entry,
			(void *)nasid);
		if (p)
			p->owner = THIS_MODULE;
		entp++;
	}

	return 0;
}
Exemplo n.º 9
0
static void __init
klhwg_connect_hubs(vertex_hdl_t hwgraph_root)
{
	nasid_t nasid;
	cnodeid_t cnode;
	lboard_t *brd;
	klhub_t *hub;
	lboard_t *dest_brd;
	vertex_hdl_t hub_hndl;
	vertex_hdl_t dest_hndl;
	char path_buffer[50];
	char dest_path[50];
	graph_error_t rc;
	int port;

	for (cnode = 0; cnode < numionodes; cnode++) {
		nasid = cnodeid_to_nasid(cnode);

		brd = find_lboard_any((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA);

		hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
		ASSERT(hub);

		for (port = 1; port <= MAX_NI_PORTS; port++) {
			if (hub->hub_port[port].port_nasid == INVALID_NASID) {
				continue; /* Port not active */
			}

			if (nasid_to_cnodeid(hub->hub_port[port].port_nasid) == INVALID_CNODEID)
				continue;

			/* Generate a hardware graph path for this board. */
			board_to_path(brd, path_buffer);
			rc = hwgraph_traverse(hwgraph_root, path_buffer, &hub_hndl);

			if (rc != GRAPH_SUCCESS)
				printk(KERN_WARNING  "Can't find hub: %s", path_buffer);

			dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
					hub->hub_port[port].port_nasid,
					hub->hub_port[port].port_offset);

			/* Generate a hardware graph path for this board. */
			board_to_path(dest_brd, dest_path);

			rc = hwgraph_traverse(hwgraph_root, dest_path, &dest_hndl);

			if (rc != GRAPH_SUCCESS) {
				if (KL_CONFIG_DUPLICATE_BOARD(dest_brd))
					continue;
				printk("Can't find board: %s", dest_path);
				return;
			} else {
				char buf[1024];

				rc = hwgraph_path_add(hub_hndl, EDGE_LBL_INTERCONNECT, &hub_hndl);

				HWGRAPH_DEBUG(__FILE__, __FUNCTION__, __LINE__, hub_hndl, NULL, "Created link path.\n");

				sprintf(buf,"%s/%s",path_buffer,EDGE_LBL_INTERCONNECT);
				rc = hwgraph_traverse(hwgraph_root, buf, &hub_hndl);
				sprintf(buf,"%d",port);
				rc = hwgraph_edge_add(hub_hndl, dest_hndl, buf);

				HWGRAPH_DEBUG(__FILE__, __FUNCTION__, __LINE__, hub_hndl, dest_hndl, "Created edge %s from vhdl1 to vhdl2.\n", buf);

				if (rc != GRAPH_SUCCESS) {
					printk("Can't create edge: %s/%s to vertex 0x%p, error 0x%x\n",
							path_buffer, dest_path, (void *)dest_hndl, rc);
					return;
				}
			}
		}
	}
}
Exemplo n.º 10
0
void
hub_error_init(cnodeid_t cnode)
{
	nasid_t nasid;

    nasid = cnodeid_to_nasid(cnode);
    hub_error_clear(nasid);

#ifdef ajm
    if (cnode == 0) {
	/*
	 * Allocate log for storing the node specific error info
	 */
	for (i = 0; i < numnodes; i++) {
	    kl_error_log[i]  = kmem_zalloc_node(sizeof(sn0_error_log_t), 
						KM_NOSLEEP, i);
	    hub_err_count[i] = kmem_zalloc_node(sizeof(hub_errcnt_t),
						VM_DIRECT | KM_NOSLEEP, i);
	    ASSERT_ALWAYS(kl_error_log[i] && hub_err_count[i]);
	}
    }

    /*
     * Assumption: There will be only one cpu who will initialize
     * a hub. we need to setup the ii and each pi error interrupts.
     * The SN1 hub (bedrock) has two PI, one for up to two processors.
     */

    if (cpuid_to_cnodeid(smp_processor_id()) == cnode) { 
	int generic_intr_mask = PI_ERR_GENERIC; /* These interrupts are sent to only 1 CPU per NODE */

	ASSERT_ALWAYS(kl_error_log[cnode]);
	ASSERT_ALWAYS(hub_err_count[cnode]);
	MD_ERR_LOG_INIT(kl_error_log[cnode]);

	/* One for each CPU */
	recover_error_init(RECOVER_ERROR_TABLE(cnode, 0));
	recover_error_init(RECOVER_ERROR_TABLE(cnode, 1));
	recover_error_init(RECOVER_ERROR_TABLE(cnode, 2));
	recover_error_init(RECOVER_ERROR_TABLE(cnode, 3));

	/*
	 * Setup error intr masks.
	 */
	for(sn=0; sn<NUM_SUBNODES; sn++) {
		int cpuA_present = REMOTE_HUB_PI_L(nasid, sn, PI_CPU_ENABLE_A);
		int cpuB_present = REMOTE_HUB_PI_L(nasid, sn, PI_CPU_ENABLE_B);

		if (cpuA_present) {
			if (cpuB_present) {		/* A && B */
	    			REMOTE_HUB_PI_S(nasid, sn, PI_ERR_INT_MASK_A,
					(PI_FATAL_ERR_CPU_B | PI_MISC_ERR_CPU_A|generic_intr_mask));
	    			REMOTE_HUB_PI_S(nasid, sn, PI_ERR_INT_MASK_B,
					(PI_FATAL_ERR_CPU_A | PI_MISC_ERR_CPU_B));

			} else {			/* A && !B */
	    			REMOTE_HUB_PI_S(nasid, sn, PI_ERR_INT_MASK_A,
					(PI_FATAL_ERR_CPU_A | PI_MISC_ERR_CPU_A|generic_intr_mask));
			}
			generic_intr_mask = 0;
		} else {
			if (cpuB_present) {		/* !A && B */
	    			REMOTE_HUB_PI_S(nasid, sn, PI_ERR_INT_MASK_B,
					(PI_FATAL_ERR_CPU_B | PI_MISC_ERR_CPU_B|generic_intr_mask));
				generic_intr_mask = 0;

			} else {			/* !A && !B */
				/* nothing to set up */
			}
		}
	}

	/*
	 * Turn off UNCAC_UNCORR interrupt in the masks. Anyone interested
	 * in these errors will peek at the int pend register to see if its
	 * set.
	 */ 
	for(sn=0; sn<NUM_SUBNODES; sn++) {
		misc = REMOTE_HUB_PI_L(nasid, sn, PI_ERR_INT_MASK_A);
		REMOTE_HUB_PI_S(nasid, sn, PI_ERR_INT_MASK_A, (misc & ~PI_ERR_UNCAC_UNCORR_A));
		misc = REMOTE_HUB_PI_L(nasid, sn, PI_ERR_INT_MASK_B);
		REMOTE_HUB_PI_S(nasid, sn, PI_ERR_INT_MASK_B, (misc & ~PI_ERR_UNCAC_UNCORR_B));
	}

	/*
	 * enable all error indicators to turn on, in case of errors.
	 *
	 * This is not good on single cpu node boards.
	 **** LOCAL_HUB_S(PI_SYSAD_ERRCHK_EN, PI_SYSAD_CHECK_ALL);
	 */
	for(sn=0; sn<NUM_SUBNODES; sn++) {
		REMOTE_HUB_PI_S(nasid, sn, PI_ERR_STATUS1_A_CLR, 0);
		REMOTE_HUB_PI_S(nasid, sn, PI_ERR_STATUS1_B_CLR, 0);
	}

	/* Set up stack for each present processor */
	for(sn=0; sn<NUM_SUBNODES; sn++) {
		if (REMOTE_HUB_PI_L(nasid, sn, PI_CPU_PRESENT_A)) {
	    	SN0_ERROR_LOG(cnode)->el_spool_cur_addr[0] =
			SN0_ERROR_LOG(cnode)->el_spool_last_addr[0] =
		    	REMOTE_HUB_PI_L(nasid, sn, PI_ERR_STACK_ADDR_A);
		}
	    
		if (REMOTE_HUB_PI_L(nasid, sn, PI_CPU_PRESENT_B)) {
	    	SN0_ERROR_LOG(cnode)->el_spool_cur_addr[1] =
			SN0_ERROR_LOG(cnode)->el_spool_last_addr[1] =
		    	REMOTE_HUB_PI_L(nasid, sn, PI_ERR_STACK_ADDR_B);
		}
	}


	PI_SPOOL_SIZE_BYTES = 
	    ERR_STACK_SIZE_BYTES(REMOTE_HUB_L(nasid, PI_ERR_STACK_SIZE));

#ifdef BRINGUP
/* BRINGUP: The following code looks like a check to make sure
the prom set up the error spool correctly for 2 processors.  I
don't think it is needed.  */
	for(sn=0; sn<NUM_SUBNODES; sn++) {
		if (REMOTE_HUB_PI_L(nasid, sn, PI_CPU_PRESENT_B)) {
			__psunsigned_t addr_a = REMOTE_HUB_PI_L(nasid, sn, PI_ERR_STACK_ADDR_A);
			__psunsigned_t addr_b = REMOTE_HUB_PI_L(nasid, sn, PI_ERR_STACK_ADDR_B);
			if ((addr_a & ~0xff) == (addr_b & ~0xff)) {
			    REMOTE_HUB_PI_S(nasid, sn, PI_ERR_STACK_ADDR_B, 	
					addr_b + PI_SPOOL_SIZE_BYTES);
	
			    SN0_ERROR_LOG(cnode)->el_spool_cur_addr[1] =
				SN0_ERROR_LOG(cnode)->el_spool_last_addr[1] =
				    REMOTE_HUB_PI_L(nasid, sn, PI_ERR_STACK_ADDR_B);
	
		    }
		}
	}
#endif /* BRINGUP */

	/* programming our own hub. Enable error_int_pend intr.
	 * If both present, CPU A takes CPU b's error interrupts and any
	 * generic ones. CPU B takes CPU A error ints.
	 */
	if (cause_intr_connect (SRB_ERR_IDX,
				(intr_func_t)(hubpi_eint_handler),
				SR_ALL_MASK|SR_IE)) {
	    cmn_err(ERR_WARN, 
		    "hub_error_init: cause_intr_connect failed on %d", cnode);
	}
    }
    else {
	/* programming remote hub. The only valid reason that this
	 * is called will be on headless hubs. No interrupts 
	 */
	for(sn=0; sn<NUM_SUBNODES; sn++) {
		REMOTE_HUB_PI_S(nasid, sn, PI_ERR_INT_MASK_A, 0); /* not necessary */
		REMOTE_HUB_PI_S(nasid, sn, PI_ERR_INT_MASK_B, 0); /* not necessary */
	}
    }
#endif /* ajm */
    /*
     * Now setup the hub ii and ni error interrupt handler.
     */

    hubii_eint_init(cnode);
    hubni_eint_init(cnode);

#ifdef ajm
    /*** XXX FIXME XXX resolve the following***/
    /* INT_PEND1 bits set up for one hub only:
     *	SHUTDOWN_INTR
     *	MD_COR_ERR_INTR
     *  COR_ERR_INTR_A and COR_ERR_INTR_B should be sent to the
     *  appropriate CPU only.
     */

    if (cnode == 0) {
	    error_consistency_check.eps_state = 0;
	    error_consistency_check.eps_cpuid = -1;
	    spinlock_init(&error_consistency_check.eps_lock, "error_dump_lock");
    }
#endif

    nodepda->huberror_ticks = HUB_ERROR_PERIOD;
    return;
}
Exemplo n.º 11
0
/**
 * sn_cpu_init - initialize per-cpu data areas
 * @cpuid: cpuid of the caller
 *
 * Called during cpu initialization on each cpu as it starts.
 * Currently, initializes the per-cpu data area for SNIA.
 * Also sets up a few fields in the nodepda.  Also known as
 * platform_cpu_init() by the ia64 machvec code.
 */
void __cpuinit sn_cpu_init(void)
{
	int cpuid;
	int cpuphyid;
	int nasid;
	int subnode;
	int slice;
	int cnode;
	int i;
	static int wars_have_been_checked;

	cpuid = smp_processor_id();
	if (cpuid == 0 && IS_MEDUSA()) {
		if (ia64_sn_is_fake_prom())
			sn_prom_type = 2;
		else
			sn_prom_type = 1;
		printk(KERN_INFO "Running on medusa with %s PROM\n",
		       (sn_prom_type == 1) ? "real" : "fake");
	}

	memset(pda, 0, sizeof(pda));
	if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2,
				&sn_hub_info->nasid_bitmask,
				&sn_hub_info->nasid_shift,
				&sn_system_size, &sn_sharing_domain_size,
				&sn_partition_id, &sn_coherency_id,
				&sn_region_size))
		BUG();
	sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2;

	/*
	 * Don't check status. The SAL call is not supported on all PROMs
	 * but a failure is harmless.
	 */
	(void) ia64_sn_set_cpu_number(cpuid);

	/*
	 * The boot cpu makes this call again after platform initialization is
	 * complete.
	 */
	if (nodepdaindr[0] == NULL)
		return;

	for (i = 0; i < MAX_PROM_FEATURE_SETS; i++)
		if (ia64_sn_get_prom_feature_set(i, &sn_prom_features[i]) != 0)
			break;

	cpuphyid = get_sapicid();

	if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice))
		BUG();

	for (i=0; i < MAX_NUMNODES; i++) {
		if (nodepdaindr[i]) {
			nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid;
			nodepdaindr[i]->phys_cpuid[cpuid].slice = slice;
			nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode;
		}
	}

	cnode = nasid_to_cnodeid(nasid);

	sn_nodepda = nodepdaindr[cnode];

	pda->led_address =
	    (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
	pda->led_state = LED_ALWAYS_SET;
	pda->hb_count = HZ / 2;
	pda->hb_state = 0;
	pda->idle_flag = 0;

	if (cpuid != 0) {
		/* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */
		memcpy(sn_cnodeid_to_nasid,
		       (&per_cpu(__sn_cnodeid_to_nasid, 0)),
		       sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
	}

	/*
	 * Check for WARs.
	 * Only needs to be done once, on BSP.
	 * Has to be done after loop above, because it uses this cpu's
	 * sn_cnodeid_to_nasid table which was just initialized if this
	 * isn't cpu 0.
	 * Has to be done before assignment below.
	 */
	if (!wars_have_been_checked) {
		sn_check_for_wars();
		wars_have_been_checked = 1;
	}
	sn_hub_info->shub_1_1_found = shub_1_1_found;

	/*
	 * Set up addresses of PIO/MEM write status registers.
	 */
	{
		u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0};
		u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_2,
			SH2_PIO_WRITE_STATUS_1, SH2_PIO_WRITE_STATUS_3};
		u64 *pio;
		pio = is_shub1() ? pio1 : pio2;
		pda->pio_write_status_addr =
		   (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, pio[slice]);
		pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0;
	}

	/*
	 * WAR addresses for SHUB 1.x.
	 */
	if (local_node_data->active_cpu_count++ == 0 && is_shub1()) {
		int buddy_nasid;
		buddy_nasid =
		    cnodeid_to_nasid(numa_node_id() ==
				     num_online_nodes() - 1 ? 0 : numa_node_id() + 1);
		pda->pio_shub_war_cam_addr =
		    (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid,
							      SH1_PI_CAM_CONTROL);
	}
}
Exemplo n.º 12
0
/*
 * scdrv_init
 *
 * Called at boot time to initialize the system controller communication
 * facility.
 */
int __init
scdrv_init(void)
{
	geoid_t geoid;
	cnodeid_t cnode;
	char devname[32];
	char *devnamep;
	struct sysctl_data_s *scd;
	void *salbuf;
	dev_t first_dev, dev;
	nasid_t event_nasid = ia64_sn_get_console_nasid();

	if (alloc_chrdev_region(&first_dev, 0, numionodes,
				SYSCTL_BASENAME) < 0) {
		printk("%s: failed to register SN system controller device\n",
		       __FUNCTION__);
		return -ENODEV;
	}
	snsc_class = class_create(THIS_MODULE, SYSCTL_BASENAME);

	for (cnode = 0; cnode < numionodes; cnode++) {
			geoid = cnodeid_get_geoid(cnode);
			devnamep = devname;
			format_module_id(devnamep, geo_module(geoid),
					 MODULE_FORMAT_BRIEF);
			devnamep = devname + strlen(devname);
			sprintf(devnamep, "#%d", geo_slab(geoid));

			/* allocate sysctl device data */
			scd = kmalloc(sizeof (struct sysctl_data_s),
				      GFP_KERNEL);
			if (!scd) {
				printk("%s: failed to allocate device info"
				       "for %s/%s\n", __FUNCTION__,
				       SYSCTL_BASENAME, devname);
				continue;
			}
			memset(scd, 0, sizeof (struct sysctl_data_s));

			/* initialize sysctl device data fields */
			scd->scd_nasid = cnodeid_to_nasid(cnode);
			if (!(salbuf = kmalloc(SCDRV_BUFSZ, GFP_KERNEL))) {
				printk("%s: failed to allocate driver buffer"
				       "(%s%s)\n", __FUNCTION__,
				       SYSCTL_BASENAME, devname);
				kfree(scd);
				continue;
			}

			if (ia64_sn_irtr_init(scd->scd_nasid, salbuf,
					      SCDRV_BUFSZ) < 0) {
				printk
				    ("%s: failed to initialize SAL for"
				     " system controller communication"
				     " (%s/%s): outdated PROM?\n",
				     __FUNCTION__, SYSCTL_BASENAME, devname);
				kfree(scd);
				kfree(salbuf);
				continue;
			}

			dev = first_dev + cnode;
			cdev_init(&scd->scd_cdev, &scdrv_fops);
			if (cdev_add(&scd->scd_cdev, dev, 1)) {
				printk("%s: failed to register system"
				       " controller device (%s%s)\n",
				       __FUNCTION__, SYSCTL_BASENAME, devname);
				kfree(scd);
				kfree(salbuf);
				continue;
			}

			class_device_create(snsc_class, dev, NULL,
						"%s", devname);

			ia64_sn_irtr_intr_enable(scd->scd_nasid,
						 0 /*ignored */ ,
						 SAL_IROUTER_INTR_RECV);

                        /* on the console nasid, prepare to receive
                         * system controller environmental events
                         */
                        if(scd->scd_nasid == event_nasid) {
                                scdrv_event_init(scd);
                        }
	}
	return 0;
}
Exemplo n.º 13
0
/*
 * scdrv_init
 *
 * Called at boot time to initialize the system controller communication
 * facility.
 */
int __init
scdrv_init(void)
{
	geoid_t geoid;
	cmoduleid_t cmod;
	int i;
	char devname[32];
	char *devnamep;
	module_t *m;
	struct sysctl_data_s *scd;
	void *salbuf;
	struct class_simple *snsc_class;
	dev_t first_dev, dev;

	if (alloc_chrdev_region(&first_dev, 0, (MAX_SLABS*nummodules),
				SYSCTL_BASENAME) < 0) {
		printk("%s: failed to register SN system controller device\n",
		       __FUNCTION__);
		return -ENODEV;
	}
	snsc_class = class_simple_create(THIS_MODULE, SYSCTL_BASENAME);

	for (cmod = 0; cmod < nummodules; cmod++) {
		m = sn_modules[cmod];
		for (i = 0; i <= MAX_SLABS; i++) {

			if (m->nodes[i] == -1) {
				/* node is not alive in module */
				continue;
			}

			geoid = m->geoid[i];
			devnamep = devname;
			format_module_id(devnamep, geo_module(geoid),
					 MODULE_FORMAT_BRIEF);
			devnamep = devname + strlen(devname);
			sprintf(devnamep, "#%d", geo_slab(geoid));

			/* allocate sysctl device data */
			scd = kmalloc(sizeof (struct sysctl_data_s),
				      GFP_KERNEL);
			if (!scd) {
				printk("%s: failed to allocate device info"
				       "for %s/%s\n", __FUNCTION__,
				       SYSCTL_BASENAME, devname);
				continue;
			}
			memset(scd, 0, sizeof (struct sysctl_data_s));

			/* initialize sysctl device data fields */
			scd->scd_nasid = cnodeid_to_nasid(m->nodes[i]);
			if (!(salbuf = kmalloc(SCDRV_BUFSZ, GFP_KERNEL))) {
				printk("%s: failed to allocate driver buffer"
				       "(%s%s)\n", __FUNCTION__,
				       SYSCTL_BASENAME, devname);
				kfree(scd);
				continue;
			}

			if (ia64_sn_irtr_init(scd->scd_nasid, salbuf,
					      SCDRV_BUFSZ) < 0) {
				printk
				    ("%s: failed to initialize SAL for"
				     " system controller communication"
				     " (%s/%s): outdated PROM?\n",
				     __FUNCTION__, SYSCTL_BASENAME, devname);
				kfree(scd);
				kfree(salbuf);
				continue;
			}

			dev = first_dev + m->nodes[i];
			cdev_init(&scd->scd_cdev, &scdrv_fops);
			if (cdev_add(&scd->scd_cdev, dev, 1)) {
				printk("%s: failed to register system"
				       " controller device (%s%s)\n",
				       __FUNCTION__, SYSCTL_BASENAME, devname);
				kfree(scd);
				kfree(salbuf);
				continue;
			}

			class_simple_device_add(snsc_class, dev, NULL,
						"%s", devname);

			ia64_sn_irtr_intr_enable(scd->scd_nasid,
						 0 /*ignored */ ,
						 SAL_IROUTER_INTR_RECV);
		}
	}
	return 0;
}
Exemplo n.º 14
0
/*
 * Second part error handler.  Wait until all BTE related CRBs are completed
 * and then reset the interfaces.
 */
void
bte_error_handler(unsigned long _nodepda)
{
	struct nodepda_s *err_nodepda = (struct nodepda_s *) _nodepda;
	spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
	struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
	nasid_t nasid;
	int i;
	int valid_crbs;
	unsigned long irq_flags;
	volatile u64 *notify;
	bte_result_t bh_error;
	ii_imem_u_t imem;	/* II IMEM Register */
	ii_icrb0_d_u_t icrbd;	/* II CRB Register D */
	ii_ibcr_u_t ibcr;
	ii_icmr_u_t icmr;
	ii_ieclr_u_t ieclr;


	BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
		    smp_processor_id()));

	spin_lock_irqsave(recovery_lock, irq_flags);

	if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) &&
	    (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) {
		BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda,
			    smp_processor_id()));
		spin_unlock_irqrestore(recovery_lock, irq_flags);
		return;
	}
	/*
	 * Lock all interfaces on this node to prevent new transfers
	 * from being queued.
	 */
	for (i = 0; i < BTES_PER_NODE; i++) {
		if (err_nodepda->bte_if[i].cleanup_active) {
			continue;
		}
		spin_lock(&err_nodepda->bte_if[i].spinlock);
		BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
			    smp_processor_id(), i));
		err_nodepda->bte_if[i].cleanup_active = 1;
	}

	/* Determine information about our hub */
	nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);


	/*
	 * A BTE transfer can use multiple CRBs.  We need to make sure
	 * that all the BTE CRBs are complete (or timed out) before
	 * attempting to clean up the error.  Resetting the BTE while
	 * there are still BTE CRBs active will hang the BTE.
	 * We should look at all the CRBs to see if they are allocated
	 * to the BTE and see if they are still active.  When none
	 * are active, we can continue with the cleanup.
	 *
	 * We also want to make sure that the local NI port is up.
	 * When a router resets the NI port can go down, while it
	 * goes through the LLP handshake, but then comes back up.
	 */
	icmr.ii_icmr_regval = REMOTE_HUB_L(nasid, IIO_ICMR);
	if (icmr.ii_icmr_fld_s.i_crb_mark != 0) {
		/*
		 * There are errors which still need to be cleaned up by
		 * hubiio_crb_error_handler
		 */
		mod_timer(recovery_timer, HZ * 5);
		BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
			    smp_processor_id()));
		spin_unlock_irqrestore(recovery_lock, irq_flags);
		return;
	}
	if (icmr.ii_icmr_fld_s.i_crb_vld != 0) {

		valid_crbs = icmr.ii_icmr_fld_s.i_crb_vld;

		for (i = 0; i < IIO_NUM_CRBS; i++) {
			if (!((1 << i) & valid_crbs)) {
				/* This crb was not marked as valid, ignore */
				continue;
			}
			icrbd.ii_icrb0_d_regval =
			    REMOTE_HUB_L(nasid, IIO_ICRB_D(i));
			if (icrbd.d_bteop) {
				mod_timer(recovery_timer, HZ * 5);
				BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
					 err_nodepda, smp_processor_id(), i));
				spin_unlock_irqrestore(recovery_lock,
						       irq_flags);
				return;
			}
		}
	}


	BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda,
		    smp_processor_id()));
	/* Reenable both bte interfaces */
	imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM);
	imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1;
	REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval);

	/* Clear IBLS0/1 error bits */
	ieclr.ii_ieclr_regval = 0;
	if (err_nodepda->bte_if[0].bh_error != BTE_SUCCESS)
		ieclr.ii_ieclr_fld_s.i_e_bte_0 = 1;
	if (err_nodepda->bte_if[1].bh_error != BTE_SUCCESS)
                ieclr.ii_ieclr_fld_s.i_e_bte_1 = 1;
	REMOTE_HUB_S(nasid, IIO_IECLR, ieclr.ii_ieclr_regval);

	/* Reinitialize both BTE state machines. */
	ibcr.ii_ibcr_regval = REMOTE_HUB_L(nasid, IIO_IBCR);
	ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
	REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval);


	for (i = 0; i < BTES_PER_NODE; i++) {
		bh_error = err_nodepda->bte_if[i].bh_error;
		if (bh_error != BTE_SUCCESS) {
			/* There is an error which needs to be notified */
			notify = err_nodepda->bte_if[i].most_rcnt_na;
			BTE_PRINTK(("cnode %d bte %d error=0x%lx\n",
				    err_nodepda->bte_if[i].bte_cnode,
				    err_nodepda->bte_if[i].bte_num,
				    IBLS_ERROR | (u64) bh_error));
			*notify = IBLS_ERROR | bh_error;
			err_nodepda->bte_if[i].bh_error = BTE_SUCCESS;
		}

		err_nodepda->bte_if[i].cleanup_active = 0;
		BTE_PRINTK(("eh:%p:%d Unlocked %d\n", err_nodepda,
			    smp_processor_id(), i));
		spin_unlock(&err_nodepda->bte_if[i].spinlock);
	}

	del_timer(recovery_timer);

	spin_unlock_irqrestore(recovery_lock, irq_flags);
}
Exemplo n.º 15
0
/*
 * ioctl for "sn_hwperf" misc device
 */
static int
sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
{
	struct sn_hwperf_ioctl_args a;
	struct cpuinfo_ia64 *cdata;
	struct sn_hwperf_object_info *objs;
	struct sn_hwperf_object_info *cpuobj;
	struct sn_hwperf_op_info op_info;
	void *p = NULL;
	int nobj;
	char slice;
	int node;
	int r;
	int v0;
	int i;
	int j;

	unlock_kernel();

	/* only user requests are allowed here */
	if ((op & SN_HWPERF_OP_MASK) < 10) {
		r = -EINVAL;
		goto error;
	}
	r = copy_from_user(&a, (const void __user *)arg,
		sizeof(struct sn_hwperf_ioctl_args));
	if (r != 0) {
		r = -EFAULT;
		goto error;
	}

	/*
	 * Allocate memory to hold a kernel copy of the user buffer. The
	 * buffer contents are either copied in or out (or both) of user
	 * space depending on the flags encoded in the requested operation.
	 */
	if (a.ptr) {
		p = vmalloc(a.sz);
		if (!p) {
			r = -ENOMEM;
			goto error;
		}
	}

	if (op & SN_HWPERF_OP_MEM_COPYIN) {
		r = copy_from_user(p, (const void __user *)a.ptr, a.sz);
		if (r != 0) {
			r = -EFAULT;
			goto error;
		}
	}

	switch (op) {
	case SN_HWPERF_GET_CPU_INFO:
		if (a.sz == sizeof(u64)) {
			/* special case to get size needed */
			*(u64 *) p = (u64) num_online_cpus() *
				sizeof(struct sn_hwperf_object_info);
		} else
		if (a.sz < num_online_cpus() * sizeof(struct sn_hwperf_object_info)) {
			r = -ENOMEM;
			goto error;
		} else
		if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
			memset(p, 0, a.sz);
			for (i = 0; i < nobj; i++) {
				node = sn_hwperf_obj_to_cnode(objs + i);
				for_each_online_cpu(j) {
					if (node != cpu_to_node(j))
						continue;
					cpuobj = (struct sn_hwperf_object_info *) p + j;
					slice = 'a' + cpuid_to_slice(j);
					cdata = cpu_data(j);
					cpuobj->id = j;
					snprintf(cpuobj->name,
						 sizeof(cpuobj->name),
						 "CPU %luMHz %s",
						 cdata->proc_freq / 1000000,
						 cdata->vendor);
					snprintf(cpuobj->location,
						 sizeof(cpuobj->location),
						 "%s%c", objs[i].location,
						 slice);
				}
			}

			vfree(objs);
		}
		break;

	case SN_HWPERF_GET_NODE_NASID:
		if (a.sz != sizeof(u64) ||
		   (node = a.arg) < 0 || node >= numionodes) {
			r = -EINVAL;
			goto error;
		}
		*(u64 *)p = (u64)cnodeid_to_nasid(node);
		break;

	case SN_HWPERF_GET_OBJ_NODE:
		if (a.sz != sizeof(u64) || a.arg < 0) {
			r = -EINVAL;
			goto error;
		}
		if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
			if (a.arg >= nobj) {
				r = -EINVAL;
				vfree(objs);
				goto error;
			}
			if (objs[(i = a.arg)].id != a.arg) {
				for (i = 0; i < nobj; i++) {
					if (objs[i].id == a.arg)
						break;
				}
			}
			if (i == nobj) {
				r = -EINVAL;
				vfree(objs);
				goto error;
			}
			*(u64 *)p = (u64)sn_hwperf_obj_to_cnode(objs + i);
			vfree(objs);
		}
		break;

	case SN_HWPERF_GET_MMRS:
	case SN_HWPERF_SET_MMRS:
	case SN_HWPERF_OBJECT_DISTANCE:
		op_info.p = p;
		op_info.a = &a;
		op_info.v0 = &v0;
		op_info.op = op;
		r = sn_hwperf_op_cpu(&op_info);
		if (r) {
			r = sn_hwperf_map_err(r);
			a.v0 = v0;
			goto error;
		}
		break;

	default:
		/* all other ops are a direct SAL call */
		r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op,
			      a.arg, a.sz, (u64) p, 0, 0, &v0);
		if (r) {
			r = sn_hwperf_map_err(r);
			goto error;
		}
		a.v0 = v0;
		break;
	}
Exemplo n.º 16
0
static int sn_topology_show(struct seq_file *s, void *d)
{
	int sz;
	int pt;
	int e = 0;
	int i;
	int j;
	const char *slabname;
	int ordinal;
	cpumask_t cpumask;
	char slice;
	struct cpuinfo_ia64 *c;
	struct sn_hwperf_port_info *ptdata;
	struct sn_hwperf_object_info *p;
	struct sn_hwperf_object_info *obj = d;	/* this object */
	struct sn_hwperf_object_info *objs = s->private; /* all objects */
	int rack, bay, slot, slab;
	u8 shubtype;
	u8 system_size;
	u8 sharing_size;
	u8 partid;
	u8 coher;
	u8 nasid_shift;
	u8 region_size;
	u16 nasid_mask;
	int nasid_msb;
	int pci_bus_ordinal = 0;

	if (obj == objs) {
		seq_printf(s, "# sn_topology version 2\n");
		seq_printf(s, "# objtype ordinal location partition"
			" [attribute value [, ...]]\n");

		if (ia64_sn_get_sn_info(0,
			&shubtype, &nasid_mask, &nasid_shift, &system_size,
			&sharing_size, &partid, &coher, &region_size))
			BUG();
		for (nasid_msb=63; nasid_msb > 0; nasid_msb--) {
			if (((u64)nasid_mask << nasid_shift) & (1ULL << nasid_msb))
				break;
		}
		seq_printf(s, "partition %u %s local "
			"shubtype %s, "
			"nasid_mask 0x%016lx, "
			"nasid_bits %d:%d, "
			"system_size %d, "
			"sharing_size %d, "
			"coherency_domain %d, "
			"region_size %d\n",

			partid, system_utsname.nodename,
			shubtype ? "shub2" : "shub1", 
			(u64)nasid_mask << nasid_shift, nasid_msb, nasid_shift,
			system_size, sharing_size, coher, region_size);
	}

	if (SN_HWPERF_FOREIGN(obj)) {
		/* private in another partition: not interesting */
		return 0;
	}

	for (i = 0; i < SN_HWPERF_MAXSTRING && obj->name[i]; i++) {
		if (obj->name[i] == ' ')
			obj->name[i] = '_';
	}

	slabname = sn_hwperf_get_slabname(obj, objs, &ordinal);
	seq_printf(s, "%s %d %s %s asic %s", slabname, ordinal, obj->location,
		obj->sn_hwp_this_part ? "local" : "shared", obj->name);

	if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))
		seq_putc(s, '\n');
	else {
		seq_printf(s, ", nasid 0x%x", cnodeid_to_nasid(ordinal));
		for (i=0; i < numionodes; i++) {
			seq_printf(s, i ? ":%d" : ", dist %d",
				node_distance(ordinal, i));
		}
		seq_putc(s, '\n');

		/*
		 * CPUs on this node, if any
		 */
		cpumask = node_to_cpumask(ordinal);
		for_each_online_cpu(i) {
			if (cpu_isset(i, cpumask)) {
				slice = 'a' + cpuid_to_slice(i);
				c = cpu_data(i);
				seq_printf(s, "cpu %d %s%c local"
					" freq %luMHz, arch ia64",
					i, obj->location, slice,
					c->proc_freq / 1000000);
				for_each_online_cpu(j) {
					seq_printf(s, j ? ":%d" : ", dist %d",
						node_distance(
						    cpuid_to_cnodeid(i),
						    cpuid_to_cnodeid(j)));
				}
				seq_putc(s, '\n');
			}
		}

		/*
		 * PCI busses attached to this node, if any
		 */
		if (sn_hwperf_location_to_bpos(obj->location,
			&rack, &bay, &slot, &slab)) {
			/* export pci bus info */
			print_pci_topology(s, obj, &pci_bus_ordinal,
				rack, bay, slot, slab);

		}
	}

	if (obj->ports) {
		/*
		 * numalink ports
		 */
		sz = obj->ports * sizeof(struct sn_hwperf_port_info);
		if ((ptdata = vmalloc(sz)) == NULL)
			return -ENOMEM;
		e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
				      SN_HWPERF_ENUM_PORTS, obj->id, sz,
				      (u64) ptdata, 0, 0, NULL);
		if (e != SN_HWPERF_OP_OK)
			return -EINVAL;
		for (ordinal=0, p=objs; p != obj; p++) {
			if (!SN_HWPERF_FOREIGN(p))
				ordinal += p->ports;
		}
		for (pt = 0; pt < obj->ports; pt++) {
			for (p = objs, i = 0; i < sn_hwperf_obj_cnt; i++, p++) {
				if (ptdata[pt].conn_id == p->id) {
					break;
				}
			}
			seq_printf(s, "numalink %d %s-%d",
			    ordinal+pt, obj->location, ptdata[pt].port);

			if (i >= sn_hwperf_obj_cnt) {
				/* no connection */
				seq_puts(s, " local endpoint disconnected"
					    ", protocol unknown\n");
				continue;
			}

			if (obj->sn_hwp_this_part && p->sn_hwp_this_part)
				/* both ends local to this partition */
				seq_puts(s, " local");
			else if (!obj->sn_hwp_this_part && !p->sn_hwp_this_part)
				/* both ends of the link in foreign partiton */
				seq_puts(s, " foreign");
			else
				/* link straddles a partition */
				seq_puts(s, " shared");

			/*
			 * Unlikely, but strictly should query the LLP config
			 * registers because an NL4R can be configured to run
			 * NL3 protocol, even when not talking to an NL3 router.
			 * Ditto for node-node.
			 */
			seq_printf(s, " endpoint %s-%d, protocol %s\n",
				p->location, ptdata[pt].conn_port,
				(SN_HWPERF_IS_NL3ROUTER(obj) ||
				SN_HWPERF_IS_NL3ROUTER(p)) ?  "LLP3" : "LLP4");
		}
		vfree(ptdata);
	}

	return 0;
}
Exemplo n.º 17
0
/**
 * sn_cpu_init - initialize per-cpu data areas
 * @cpuid: cpuid of the caller
 *
 * Called during cpu initialization on each cpu as it starts.
 * Currently, initializes the per-cpu data area for SNIA.
 * Also sets up a few fields in the nodepda.  Also known as
 * platform_cpu_init() by the ia64 machvec code.
 */
void __init sn_cpu_init(void)
{
	int cpuid;
	int cpuphyid;
	int nasid;
	int subnode;
	int slice;
	int cnode;
	int i;
	static int wars_have_been_checked;

	memset(pda, 0, sizeof(pda));
	if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, &sn_hub_info->nasid_bitmask, &sn_hub_info->nasid_shift,
				&sn_system_size, &sn_sharing_domain_size, &sn_partition_id,
				&sn_coherency_id, &sn_region_size))
		BUG();
	sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2;

	/*
	 * The boot cpu makes this call again after platform initialization is
	 * complete.
	 */
	if (nodepdaindr[0] == NULL)
		return;

	cpuid = smp_processor_id();
	cpuphyid = get_sapicid();

	if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice))
		BUG();

	for (i=0; i < MAX_NUMNODES; i++) {
		if (nodepdaindr[i]) {
			nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid;
			nodepdaindr[i]->phys_cpuid[cpuid].slice = slice;
			nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode;
		}
	}

	cnode = nasid_to_cnodeid(nasid);

	pda->p_nodepda = nodepdaindr[cnode];
	pda->led_address =
	    (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
	pda->led_state = LED_ALWAYS_SET;
	pda->hb_count = HZ / 2;
	pda->hb_state = 0;
	pda->idle_flag = 0;

	if (cpuid != 0) {
		memcpy(pda->cnodeid_to_nasid_table,
		       pdacpu(0)->cnodeid_to_nasid_table,
		       sizeof(pda->cnodeid_to_nasid_table));
	}

	/*
	 * Check for WARs.
	 * Only needs to be done once, on BSP.
	 * Has to be done after loop above, because it uses pda.cnodeid_to_nasid_table[i].
	 * Has to be done before assignment below.
	 */
	if (!wars_have_been_checked) {
		sn_check_for_wars();
		wars_have_been_checked = 1;
	}
	sn_hub_info->shub_1_1_found = shub_1_1_found;

	/*
	 * Set up addresses of PIO/MEM write status registers.
	 */
	{
		u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0};
		u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_1, 
			SH2_PIO_WRITE_STATUS_2, SH2_PIO_WRITE_STATUS_3};
		u64 *pio;
		pio = is_shub1() ? pio1 : pio2;
		pda->pio_write_status_addr = (volatile unsigned long *) LOCAL_MMR_ADDR(pio[slice]);
		pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0;
	}

	/*
	 * WAR addresses for SHUB 1.x.
	 */
	if (local_node_data->active_cpu_count++ == 0 && is_shub1()) {
		int buddy_nasid;
		buddy_nasid =
		    cnodeid_to_nasid(numa_node_id() ==
				     num_online_nodes() - 1 ? 0 : numa_node_id() + 1);
		pda->pio_shub_war_cam_addr =
		    (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid,
							      SH1_PI_CAM_CONTROL);
	}
}
Exemplo n.º 18
0
void
intr_init_vecblk( nodepda_t *npda,
                  cnodeid_t node,
                  int sn)
{
    int 			nasid = cnodeid_to_nasid(node);
    sh_ii_int0_config_u_t	ii_int_config;
    cpuid_t			cpu;
    cpuid_t			cpu0, cpu1;
    nodepda_t		*lnodepda;
    sh_ii_int0_enable_u_t	ii_int_enable;
    sh_int_node_id_config_u_t	node_id_config;
    sh_local_int5_config_u_t	local5_config;
    sh_local_int5_enable_u_t	local5_enable;
    extern void sn_init_cpei_timer(void);
    static int timer_added = 0;


    if (is_headless_node(node) ) {
        int cnode;
        struct ia64_sal_retval ret_stuff;

        // retarget all interrupts on this node to the master node.
        node_id_config.sh_int_node_id_config_regval = 0;
        node_id_config.sh_int_node_id_config_s.node_id = master_nasid;
        node_id_config.sh_int_node_id_config_s.id_sel = 1;
        HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_INT_NODE_ID_CONFIG),
               node_id_config.sh_int_node_id_config_regval);
        cnode = nasid_to_cnodeid(master_nasid);
        lnodepda = NODEPDA(cnode);
        cpu = lnodepda->node_first_cpu;
        cpu = cpu_physical_id(cpu);
        SAL_CALL(ret_stuff, SN_SAL_REGISTER_CE, nasid, cpu, master_nasid,0,0,0,0);
        if (ret_stuff.status < 0) {
            printk("%s: SN_SAL_REGISTER_CE SAL_CALL failed\n",__FUNCTION__);
        }
    } else {
        lnodepda = NODEPDA(node);
        cpu = lnodepda->node_first_cpu;
        cpu = cpu_physical_id(cpu);
    }

    // Get the physical id's of the cpu's on this node.
    cpu0 = nasid_slice_to_cpu_physical_id(nasid, 0);
    cpu1 = nasid_slice_to_cpu_physical_id(nasid, 2);

    HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_PI_ERROR_MASK), 0);
    HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_PI_CRBP_ERROR_MASK), 0);

    // Config and enable UART interrupt, all nodes.

    local5_config.sh_local_int5_config_regval = 0;
    local5_config.sh_local_int5_config_s.idx = SGI_UART_VECTOR;
    local5_config.sh_local_int5_config_s.pid = cpu;
    HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_LOCAL_INT5_CONFIG),
           local5_config.sh_local_int5_config_regval);

    local5_enable.sh_local_int5_enable_regval = 0;
    local5_enable.sh_local_int5_enable_s.uart_int = 1;
    HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_LOCAL_INT5_ENABLE),
           local5_enable.sh_local_int5_enable_regval);


    // The II_INT_CONFIG register for cpu 0.
    ii_int_config.sh_ii_int0_config_regval = 0;
    ii_int_config.sh_ii_int0_config_s.type = 0;
    ii_int_config.sh_ii_int0_config_s.agt = 0;
    ii_int_config.sh_ii_int0_config_s.pid = cpu0;
    ii_int_config.sh_ii_int0_config_s.base = 0;

    HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_II_INT0_CONFIG),
          ii_int_config.sh_ii_int0_config_regval);


    // The II_INT_CONFIG register for cpu 1.
    ii_int_config.sh_ii_int0_config_regval = 0;
    ii_int_config.sh_ii_int0_config_s.type = 0;
    ii_int_config.sh_ii_int0_config_s.agt = 0;
    ii_int_config.sh_ii_int0_config_s.pid = cpu1;
    ii_int_config.sh_ii_int0_config_s.base = 0;

    HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_II_INT1_CONFIG),
          ii_int_config.sh_ii_int0_config_regval);


    // Enable interrupts for II_INT0 and 1.
    ii_int_enable.sh_ii_int0_enable_regval = 0;
    ii_int_enable.sh_ii_int0_enable_s.ii_enable = 1;

    HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_II_INT0_ENABLE),
          ii_int_enable.sh_ii_int0_enable_regval);
    HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_II_INT1_ENABLE),
          ii_int_enable.sh_ii_int0_enable_regval);


    if (!timer_added) { // can only init the timer once.
        timer_added = 1;
        sn_init_cpei_timer();
    }
}
Exemplo n.º 19
0
/*
 * Fill the partition reserved page with the information needed by
 * other partitions to discover we are alive and establish initial
 * communications.
 */
struct xpc_rsvd_page *
xpc_rsvd_page_init(void)
{
	struct xpc_rsvd_page *rp;
	AMO_t *amos_page;
	u64 rp_pa, next_cl, nasid_array = 0;
	int i, ret;


	/* get the local reserved page's address */

	rp_pa = xpc_get_rsvd_page_pa(cnodeid_to_nasid(0),
					(u64) xpc_remote_copy_buffer,
						XPC_RSVD_PAGE_ALIGNED_SIZE);
	if (rp_pa == 0) {
		dev_err(xpc_part, "SAL failed to locate the reserved page\n");
		return NULL;
	}
	rp = (struct xpc_rsvd_page *) __va(rp_pa);

	if (rp->partid != sn_partition_id) {
		dev_err(xpc_part, "the reserved page's partid of %d should be "
			"%d\n", rp->partid, sn_partition_id);
		return NULL;
	}

	rp->version = XPC_RP_VERSION;

	/*
	 * Place the XPC variables on the cache line following the
	 * reserved page structure.
	 */
	next_cl = (u64) rp + XPC_RSVD_PAGE_ALIGNED_SIZE;
	xpc_vars = (struct xpc_vars *) next_cl;

	/*
	 * Before clearing xpc_vars, see if a page of AMOs had been previously
	 * allocated. If not we'll need to allocate one and set permissions
	 * so that cross-partition AMOs are allowed.
	 *
	 * The allocated AMO page needs MCA reporting to remain disabled after
	 * XPC has unloaded.  To make this work, we keep a copy of the pointer
	 * to this page (i.e., amos_page) in the struct xpc_vars structure,
	 * which is pointed to by the reserved page, and re-use that saved copy
	 * on subsequent loads of XPC. This AMO page is never freed, and its
	 * memory protections are never restricted.
	 */
	if ((amos_page = xpc_vars->amos_page) == NULL) {
		amos_page = (AMO_t *) mspec_kalloc_page(0);
		if (amos_page == NULL) {
			dev_err(xpc_part, "can't allocate page of AMOs\n");
			return NULL;
		}

		/*
		 * Open up AMO-R/W to cpu.  This is done for Shub 1.1 systems
		 * when xpc_allow_IPI_ops() is called via xpc_hb_init().
		 */
		if (!enable_shub_wars_1_1()) {
			ret = sn_change_memprotect(ia64_tpa((u64) amos_page),
					PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1,
					&nasid_array);
			if (ret != 0) {
				dev_err(xpc_part, "can't change memory "
					"protections\n");
				mspec_kfree_page((unsigned long) amos_page);
				return NULL;
			}
		}
	} else if (!IS_AMO_ADDRESS((u64) amos_page)) {
		/*
		 * EFI's XPBOOT can also set amos_page in the reserved page,
		 * but it happens to leave it as an uncached physical address
		 * and we need it to be an uncached virtual, so we'll have to
		 * convert it.
		 */
		if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) {
			dev_err(xpc_part, "previously used amos_page address "
				"is bad = 0x%p\n", (void *) amos_page);
			return NULL;
		}
		amos_page = (AMO_t *) TO_AMO((u64) amos_page);
	}

	memset(xpc_vars, 0, sizeof(struct xpc_vars));

	/*
	 * Place the XPC per partition specific variables on the cache line
	 * following the XPC variables structure.
	 */
	next_cl += XPC_VARS_ALIGNED_SIZE;
	memset((u64 *) next_cl, 0, sizeof(struct xpc_vars_part) *
							XP_MAX_PARTITIONS);
	xpc_vars_part = (struct xpc_vars_part *) next_cl;
	xpc_vars->vars_part_pa = __pa(next_cl);

	xpc_vars->version = XPC_V_VERSION;
	xpc_vars->act_nasid = cpuid_to_nasid(0);
	xpc_vars->act_phys_cpuid = cpu_physical_id(0);
	xpc_vars->amos_page = amos_page;  /* save for next load of XPC */


	/*
	 * Initialize the activation related AMO variables.
	 */
	xpc_vars->act_amos = xpc_IPI_init(XP_MAX_PARTITIONS);
	for (i = 1; i < XP_NASID_MASK_WORDS; i++) {
		xpc_IPI_init(i + XP_MAX_PARTITIONS);
	}
	/* export AMO page's physical address to other partitions */
	xpc_vars->amos_page_pa = ia64_tpa((u64) xpc_vars->amos_page);

	/*
	 * This signifies to the remote partition that our reserved
	 * page is initialized.
	 */
	(volatile u64) rp->vars_pa = __pa(xpc_vars);

	return rp;
}
Exemplo n.º 20
0
/* ARGSUSED */
static void __init
klhwg_add_node(vertex_hdl_t hwgraph_root, cnodeid_t cnode)
{
	nasid_t nasid;
	lboard_t *brd;
	klhub_t *hub;
	vertex_hdl_t node_vertex = NULL;
	char path_buffer[100];
	int rv;
	char *s;
	int board_disabled = 0;
	klcpu_t *cpu;
	vertex_hdl_t cpu_dir;

	nasid = cnodeid_to_nasid(cnode);
	brd = find_lboard_any((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
	ASSERT(brd);

	/* Generate a hardware graph path for this board. */
	board_to_path(brd, path_buffer);
	rv = hwgraph_path_add(hwgraph_root, path_buffer, &node_vertex);
	if (rv != GRAPH_SUCCESS) {
		printk("Node vertex creation failed.  Path == %s", path_buffer);
		return;
	}

	HWGRAPH_DEBUG(__FILE__, __FUNCTION__, __LINE__, node_vertex, NULL, "Created path for SHUB node.\n");
	hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
	ASSERT(hub);
	if(hub->hub_info.flags & KLINFO_ENABLE)
		board_disabled = 0;
	else
		board_disabled = 1;
		
	if(!board_disabled) {
		mark_nodevertex_as_node(node_vertex, cnode);
		s = dev_to_name(node_vertex, path_buffer, sizeof(path_buffer));
		NODEPDA(cnode)->hwg_node_name =
					kmalloc(strlen(s) + 1, GFP_KERNEL);
		if (NODEPDA(cnode)->hwg_node_name <= 0) {
			printk("%s: no memory\n", __FUNCTION__);
			return;
		}
		strcpy(NODEPDA(cnode)->hwg_node_name, s);
		hubinfo_set(node_vertex, NODEPDA(cnode)->pdinfo);
		NODEPDA(cnode)->slotdesc = brd->brd_slot;
		NODEPDA(cnode)->geoid = brd->brd_geoid;
		NODEPDA(cnode)->module = module_lookup(geo_module(brd->brd_geoid));
		klhwg_add_hub(node_vertex, hub, cnode);
	}

	/*
	 * If there's at least 1 CPU, add a "cpu" directory to represent
	 * the collection of all CPUs attached to this node.
	 */
	cpu = (klcpu_t *)find_first_component(brd, KLSTRUCT_CPU);
	if (cpu) {
		graph_error_t rv;

		rv = hwgraph_path_add(node_vertex, EDGE_LBL_CPU, &cpu_dir);
		if (rv != GRAPH_SUCCESS) {
			printk("klhwg_add_node: Cannot create CPU directory\n");
			return;
		}
		HWGRAPH_DEBUG(__FILE__, __FUNCTION__, __LINE__, cpu_dir, NULL, "Created cpu directiry on SHUB node.\n");

	}

	while (cpu) {
		cpuid_t cpu_id;
		cpu_id = nasid_slice_to_cpuid(nasid,cpu->cpu_info.physid);
		if (cpu_online(cpu_id))
			klhwg_add_cpu(node_vertex, cnode, cpu);
		else
			klhwg_add_disabled_cpu(node_vertex, cnode, cpu, brd->brd_slot);

		cpu = (klcpu_t *)
			find_component(brd, (klinfo_t *)cpu, KLSTRUCT_CPU);
	}
}
Exemplo n.º 21
0
/*
 * Wait until all BTE related CRBs are completed
 * and then reset the interfaces.
 */
int shub1_bte_error_handler(unsigned long _nodepda)
{
	struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
	struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
	nasid_t nasid;
	int i;
	int valid_crbs;
	ii_imem_u_t imem;	/* II IMEM Register */
	ii_icrb0_d_u_t icrbd;	/* II CRB Register D */
	ii_ibcr_u_t ibcr;
	ii_icmr_u_t icmr;
	ii_ieclr_u_t ieclr;

	BTE_PRINTK(("shub1_bte_error_handler(%p) - %d\n", err_nodepda,
		    smp_processor_id()));

	if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) &&
	    (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) {
		BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda,
			    smp_processor_id()));
		return 1;
	}

	/* Determine information about our hub */
	nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);

	/*
	 * A BTE transfer can use multiple CRBs.  We need to make sure
	 * that all the BTE CRBs are complete (or timed out) before
	 * attempting to clean up the error.  Resetting the BTE while
	 * there are still BTE CRBs active will hang the BTE.
	 * We should look at all the CRBs to see if they are allocated
	 * to the BTE and see if they are still active.  When none
	 * are active, we can continue with the cleanup.
	 *
	 * We also want to make sure that the local NI port is up.
	 * When a router resets the NI port can go down, while it
	 * goes through the LLP handshake, but then comes back up.
	 */
	icmr.ii_icmr_regval = REMOTE_HUB_L(nasid, IIO_ICMR);
	if (icmr.ii_icmr_fld_s.i_crb_mark != 0) {
		/*
		 * There are errors which still need to be cleaned up by
		 * hubiio_crb_error_handler
		 */
		mod_timer(recovery_timer, jiffies + (HZ * 5));
		BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
			    smp_processor_id()));
		return 1;
	}
	if (icmr.ii_icmr_fld_s.i_crb_vld != 0) {

		valid_crbs = icmr.ii_icmr_fld_s.i_crb_vld;

		for (i = 0; i < IIO_NUM_CRBS; i++) {
			if (!((1 << i) & valid_crbs)) {
				/* This crb was not marked as valid, ignore */
				continue;
			}
			icrbd.ii_icrb0_d_regval =
			    REMOTE_HUB_L(nasid, IIO_ICRB_D(i));
			if (icrbd.d_bteop) {
				mod_timer(recovery_timer, jiffies + (HZ * 5));
				BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
					    err_nodepda, smp_processor_id(),
					    i));
				return 1;
			}
		}
	}

	BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda, smp_processor_id()));
	/* Re-enable both bte interfaces */
	imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM);
	imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1;
	REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval);

	/* Clear BTE0/1 error bits */
	ieclr.ii_ieclr_regval = 0;
	if (err_nodepda->bte_if[0].bh_error != BTE_SUCCESS)
		ieclr.ii_ieclr_fld_s.i_e_bte_0 = 1;
	if (err_nodepda->bte_if[1].bh_error != BTE_SUCCESS)
		ieclr.ii_ieclr_fld_s.i_e_bte_1 = 1;
	REMOTE_HUB_S(nasid, IIO_IECLR, ieclr.ii_ieclr_regval);

	/* Reinitialize both BTE state machines. */
	ibcr.ii_ibcr_regval = REMOTE_HUB_L(nasid, IIO_IBCR);
	ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
	REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval);

	del_timer(recovery_timer);
	return 0;
}
Exemplo n.º 22
0
/*
 * Initialize all I/O on the specified node.
 */
static void
io_init_node(cnodeid_t cnodeid)
{
	/*REFERENCED*/
	vertex_hdl_t hubv, switchv, widgetv;
	struct xwidget_hwid_s hwid;
	hubinfo_t hubinfo;
	int is_xswitch;
	nodepda_t	*npdap;
	struct semaphore *peer_sema = 0;
	uint32_t	widget_partnum;

	npdap = NODEPDA(cnodeid);

	/*
	 * Get the "top" vertex for this node's hardware
	 * graph; it will carry the per-hub hub-specific
	 * data, and act as the crosstalk provider master.
	 * It's canonical path is probably something of the
	 * form /hw/module/%M/slot/%d/node
	 */
	hubv = cnodeid_to_vertex(cnodeid);
	DBG("io_init_node: Initialize IO for cnode %d hubv(node) 0x%p npdap 0x%p\n", cnodeid, hubv, npdap);

	ASSERT(hubv != GRAPH_VERTEX_NONE);

	/* 
	 * If nothing connected to this hub's xtalk port, we're done.
	 */
	early_probe_for_widget(hubv, &hwid);
	if (hwid.part_num == XWIDGET_PART_NUM_NONE) {
		DBG("**** io_init_node: Node's 0x%p hub widget has XWIDGET_PART_NUM_NONE ****\n", hubv);
		return;
		/* NOTREACHED */
	}

	/* 
	 * attach our hub_provider information to hubv,
	 * so we can use it as a crosstalk provider "master"
	 * vertex.
	 */
	xtalk_provider_register(hubv, &hub_provider);
	xtalk_provider_startup(hubv);

	/*
	 * Create a vertex to represent the crosstalk bus
	 * attached to this hub, and a vertex to be used
	 * as the connect point for whatever is out there
	 * on the other side of our crosstalk connection.
	 *
	 * Crosstalk Switch drivers "climb up" from their
	 * connection point to try and take over the switch
	 * point.
	 *
	 * Of course, the edges and verticies may already
	 * exist, in which case our net effect is just to
	 * associate the "xtalk_" driver with the connection
	 * point for the device.
	 */

	(void)hwgraph_path_add(hubv, EDGE_LBL_XTALK, &switchv);

	DBG("io_init_node: Created 'xtalk' entry to '../node/' xtalk vertex 0x%p\n", switchv);

	ASSERT(switchv != GRAPH_VERTEX_NONE);

	(void)hwgraph_edge_add(hubv, switchv, EDGE_LBL_IO);

	DBG("io_init_node: Created symlink 'io' from ../node/io to ../node/xtalk \n");

	/*
	 * We need to find the widget id and update the basew_id field
	 * accordingly. In particular, SN00 has direct connected bridge,
	 * and hence widget id is Not 0.
	 */
	widget_partnum = (((*(volatile int32_t *)(NODE_SWIN_BASE
			(cnodeid_to_nasid(cnodeid), 0) + 
			WIDGET_ID))) & WIDGET_PART_NUM) 
			>> WIDGET_PART_NUM_SHFT;

	if ((widget_partnum == XBOW_WIDGET_PART_NUM) ||
			(widget_partnum == XXBOW_WIDGET_PART_NUM) ||
			(widget_partnum == PXBOW_WIDGET_PART_NUM) ) {
		/* 
		 * Xbow control register does not have the widget ID field.
		 * So, hard code the widget ID to be zero.
		 */
		DBG("io_init_node: Found XBOW widget_partnum= 0x%x\n", widget_partnum);
		npdap->basew_id = 0;

	} else {
		void	*bridge;

		bridge = (void *)NODE_SWIN_BASE(cnodeid_to_nasid(cnodeid), 0);
		npdap->basew_id = pcireg_bridge_control_get(bridge) & WIDGET_WIDGET_ID;

		printk(" ****io_init_node: Unknown Widget Part Number 0x%x Widget ID 0x%x attached to Hubv 0x%p ****\n", widget_partnum, npdap->basew_id, (void *)hubv);
		return;
	}
	{
		char widname[10];
		sprintf(widname, "%x", npdap->basew_id);
		(void)hwgraph_path_add(switchv, widname, &widgetv);
		DBG("io_init_node: Created '%s' to '..node/xtalk/' vertex 0x%p\n", widname, widgetv);
		ASSERT(widgetv != GRAPH_VERTEX_NONE);
	}
	
	nodepda->basew_xc = widgetv;

	is_xswitch = xwidget_hwid_is_xswitch(&hwid);

	/* 
	 * Try to become the master of the widget.  If this is an xswitch
	 * with multiple hubs connected, only one will succeed.  Mastership
	 * of an xswitch is used only when touching registers on that xswitch.
	 * The slave xwidgets connected to the xswitch can be owned by various
	 * masters.
	 */
	if (device_master_set(widgetv, hubv) == 0) {

		/* Only one hub (thread) per Crosstalk device or switch makes
		 * it to here.
		 */

		/* 
		 * Initialize whatever xwidget is hanging off our hub.
		 * Whatever it is, it's accessible through widgetnum 0.
		 */
		hubinfo_get(hubv, &hubinfo);

		(void)xwidget_register(&hwid, widgetv, npdap->basew_id, hubv, hubinfo->h_widgetid);

		/* 
		 * Special handling for Crosstalk Switches (e.g. xbow).
		 * We need to do things in roughly the following order:
		 *	1) Initialize xswitch hardware (done above)
		 *	2) Determine which hubs are available to be widget masters
		 *	3) Discover which links are active from the xswitch
		 *	4) Assign xwidgets hanging off the xswitch to hubs
		 *	5) Initialize all xwidgets on the xswitch
		 */

		volunteer_for_widgets(switchv, hubv);

		/* If there's someone else on this crossbow, recognize him */
		if (npdap->xbow_peer != INVALID_NASID) {
			nodepda_t *peer_npdap = NODEPDA(nasid_to_cnodeid(npdap->xbow_peer));
			peer_sema = &peer_npdap->xbow_sema;
			volunteer_for_widgets(switchv, peer_npdap->node_vertex);
		}

		assign_widgets_to_volunteers(switchv, hubv);

		/* Signal that we're done */
		if (peer_sema) {
			up(peer_sema);
		}
		
	}
	else {
	    /* Wait 'til master is done assigning widgets. */
	    down(&npdap->xbow_sema);
	}

	/* Now both nodes can safely inititialize widgets */
	io_init_xswitch_widgets(switchv, cnodeid);

	DBG("\nio_init_node: DONE INITIALIZED ALL I/O FOR CNODEID %d\n\n", cnodeid);
}