static irqreturn_t hub_eint_handler(int irq, void *arg) { struct hubdev_info *hubdev_info; struct ia64_sal_retval ret_stuff; nasid_t nasid; ret_stuff.status = 0; ret_stuff.v0 = 0; hubdev_info = (struct hubdev_info *)arg; nasid = hubdev_info->hdi_nasid; if (is_shub1()) { SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT, (u64) nasid, 0, 0, 0, 0, 0, 0); if ((int)ret_stuff.v0) panic("%s: Fatal %s Error", __func__, ((nasid & 1) ? "TIO" : "HUBII")); if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ (void)hubiio_crb_error_handler(hubdev_info); } else if (nasid & 1) { /* TIO errors */ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT, (u64) nasid, 0, 0, 0, 0, 0, 0); if ((int)ret_stuff.v0) panic("%s: Fatal TIO Error", __func__); } else bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid))); return IRQ_HANDLED; }
static inline int mspec_zero_block(unsigned long addr, int len) { int status; if (is_sn2) { if (is_shub2()) { int nid; void *p; int i; nid = nasid_to_cnodeid(get_node_number(__pa(addr))); p = (void *)TO_AMO(scratch_page[nid]); for (i=0; i < SH2_AMO_CACHE_ENTRIES; i++) { FETCHOP_LOAD_OP(p, FETCHOP_LOAD); p += FETCHOP_VAR_SIZE; } } status = bte_copy(0, addr & ~__IA64_UNCACHED_OFFSET, len, BTE_WACQUIRE | BTE_ZERO_FILL, NULL); } else { memset((char *) addr, 0, len); status = 0; } return status; }
/* * mspec_init * * Called at boot time to initialize the mspec facility. */ static int __init mspec_init(void) { int ret; int nid; /* * The fetchop device only works on SN2 hardware, uncached and cached * memory drivers should both be valid on all ia64 hardware */ #ifdef CONFIG_SGI_SN if (ia64_platform_is("sn2")) { is_sn2 = 1; if (is_shub2()) { ret = -ENOMEM; for_each_node_state(nid, N_ONLINE) { int actual_nid; int nasid; unsigned long phys; scratch_page[nid] = uncached_alloc_page(nid, 1); if (scratch_page[nid] == 0) goto free_scratch_pages; phys = __pa(scratch_page[nid]); nasid = get_node_number(phys); actual_nid = nasid_to_cnodeid(nasid); if (actual_nid != nid) goto free_scratch_pages; } }
inline int check_nasid_equiv(nasid_t nasida, nasid_t nasidb) { if ((nasida == nasidb) || (nasida == NODEPDA(nasid_to_cnodeid(nasidb))->xbow_peer)) return 1; else return 0; }
/* * per_hub_init * * This code is executed once for each Hub chip. */ static void per_hub_init(cnodeid_t cnode) { nasid_t nasid; nodepda_t *npdap; ii_icmr_u_t ii_icmr; ii_ibcr_u_t ii_ibcr; ii_ilcsr_u_t ii_ilcsr; nasid = cnodeid_to_nasid(cnode); ASSERT(nasid != INVALID_NASID); ASSERT(nasid_to_cnodeid(nasid) == cnode); npdap = NODEPDA(cnode); /* Disable the request and reply errors. */ REMOTE_HUB_S(nasid, IIO_IWEIM, 0xC000); /* * Set the total number of CRBs that can be used. */ ii_icmr.ii_icmr_regval = 0x0; ii_icmr.ii_icmr_fld_s.i_c_cnt = 0xf; if (enable_shub_wars_1_1()) { // Set bit one of ICMR to prevent II from sending interrupt for II bug. ii_icmr.ii_icmr_regval |= 0x1; } REMOTE_HUB_S(nasid, IIO_ICMR, ii_icmr.ii_icmr_regval); /* * Set the number of CRBs that both of the BTEs combined * can use minus 1. */ ii_ibcr.ii_ibcr_regval = 0x0; ii_ilcsr.ii_ilcsr_regval = REMOTE_HUB_L(nasid, IIO_LLP_CSR); if (ii_ilcsr.ii_ilcsr_fld_s.i_llp_stat & LNK_STAT_WORKING) { ii_ibcr.ii_ibcr_fld_s.i_count = 0x8; } else { /* * if the LLP is down, there is no attached I/O, so * give BTE all the CRBs. */ ii_ibcr.ii_ibcr_fld_s.i_count = 0x14; } REMOTE_HUB_S(nasid, IIO_IBCR, ii_ibcr.ii_ibcr_regval); /* * Set CRB timeout to be 10ms. */ REMOTE_HUB_S(nasid, IIO_ICTP, 0xffffff); REMOTE_HUB_S(nasid, IIO_ICTO, 0xff); /* Initialize error interrupts for this hub. */ hub_error_init(cnode); }
static int tiocx_btchar_get(int nasid) { moduleid_t module_id; geoid_t geoid; int cnodeid; cnodeid = nasid_to_cnodeid(nasid); geoid = cnodeid_get_geoid(cnodeid); module_id = geo_module(geoid); return MODULE_GET_BTCHAR(module_id); }
/* * mspec_init * * Called at boot time to initialize the mspec facility. */ static int __init mspec_init(void) { int ret; int nid; /* * The fetchop device only works on SN2 hardware, uncached and cached * memory drivers should both be valid on all ia64 hardware */ #ifdef CONFIG_SGI_SN if (ia64_platform_is("sn2")) { is_sn2 = 1; if (is_shub2()) { ret = -ENOMEM; for_each_online_node(nid) { int actual_nid; int nasid; unsigned long phys; scratch_page[nid] = uncached_alloc_page(nid); if (scratch_page[nid] == 0) goto free_scratch_pages; phys = __pa(scratch_page[nid]); nasid = get_node_number(phys); actual_nid = nasid_to_cnodeid(nasid); if (actual_nid != nid) goto free_scratch_pages; } } ret = misc_register(&fetchop_miscdev); if (ret) { printk(KERN_ERR "%s: failed to register device %i\n", FETCHOP_ID, ret); goto free_scratch_pages; } }
/* * Initialize all I/O on the specified node. */ static void io_init_node(cnodeid_t cnodeid) { /*REFERENCED*/ vertex_hdl_t hubv, switchv, widgetv; struct xwidget_hwid_s hwid; hubinfo_t hubinfo; int is_xswitch; nodepda_t *npdap; struct semaphore *peer_sema = 0; uint32_t widget_partnum; npdap = NODEPDA(cnodeid); /* * Get the "top" vertex for this node's hardware * graph; it will carry the per-hub hub-specific * data, and act as the crosstalk provider master. * It's canonical path is probably something of the * form /hw/module/%M/slot/%d/node */ hubv = cnodeid_to_vertex(cnodeid); DBG("io_init_node: Initialize IO for cnode %d hubv(node) 0x%p npdap 0x%p\n", cnodeid, hubv, npdap); ASSERT(hubv != GRAPH_VERTEX_NONE); /* * If nothing connected to this hub's xtalk port, we're done. */ early_probe_for_widget(hubv, &hwid); if (hwid.part_num == XWIDGET_PART_NUM_NONE) { DBG("**** io_init_node: Node's 0x%p hub widget has XWIDGET_PART_NUM_NONE ****\n", hubv); return; /* NOTREACHED */ } /* * attach our hub_provider information to hubv, * so we can use it as a crosstalk provider "master" * vertex. */ xtalk_provider_register(hubv, &hub_provider); xtalk_provider_startup(hubv); /* * Create a vertex to represent the crosstalk bus * attached to this hub, and a vertex to be used * as the connect point for whatever is out there * on the other side of our crosstalk connection. * * Crosstalk Switch drivers "climb up" from their * connection point to try and take over the switch * point. * * Of course, the edges and verticies may already * exist, in which case our net effect is just to * associate the "xtalk_" driver with the connection * point for the device. */ (void)hwgraph_path_add(hubv, EDGE_LBL_XTALK, &switchv); DBG("io_init_node: Created 'xtalk' entry to '../node/' xtalk vertex 0x%p\n", switchv); ASSERT(switchv != GRAPH_VERTEX_NONE); (void)hwgraph_edge_add(hubv, switchv, EDGE_LBL_IO); DBG("io_init_node: Created symlink 'io' from ../node/io to ../node/xtalk \n"); /* * We need to find the widget id and update the basew_id field * accordingly. In particular, SN00 has direct connected bridge, * and hence widget id is Not 0. */ widget_partnum = (((*(volatile int32_t *)(NODE_SWIN_BASE (cnodeid_to_nasid(cnodeid), 0) + WIDGET_ID))) & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT; if ((widget_partnum == XBOW_WIDGET_PART_NUM) || (widget_partnum == XXBOW_WIDGET_PART_NUM) || (widget_partnum == PXBOW_WIDGET_PART_NUM) ) { /* * Xbow control register does not have the widget ID field. * So, hard code the widget ID to be zero. */ DBG("io_init_node: Found XBOW widget_partnum= 0x%x\n", widget_partnum); npdap->basew_id = 0; } else { void *bridge; bridge = (void *)NODE_SWIN_BASE(cnodeid_to_nasid(cnodeid), 0); npdap->basew_id = pcireg_bridge_control_get(bridge) & WIDGET_WIDGET_ID; printk(" ****io_init_node: Unknown Widget Part Number 0x%x Widget ID 0x%x attached to Hubv 0x%p ****\n", widget_partnum, npdap->basew_id, (void *)hubv); return; } { char widname[10]; sprintf(widname, "%x", npdap->basew_id); (void)hwgraph_path_add(switchv, widname, &widgetv); DBG("io_init_node: Created '%s' to '..node/xtalk/' vertex 0x%p\n", widname, widgetv); ASSERT(widgetv != GRAPH_VERTEX_NONE); } nodepda->basew_xc = widgetv; is_xswitch = xwidget_hwid_is_xswitch(&hwid); /* * Try to become the master of the widget. If this is an xswitch * with multiple hubs connected, only one will succeed. Mastership * of an xswitch is used only when touching registers on that xswitch. * The slave xwidgets connected to the xswitch can be owned by various * masters. */ if (device_master_set(widgetv, hubv) == 0) { /* Only one hub (thread) per Crosstalk device or switch makes * it to here. */ /* * Initialize whatever xwidget is hanging off our hub. * Whatever it is, it's accessible through widgetnum 0. */ hubinfo_get(hubv, &hubinfo); (void)xwidget_register(&hwid, widgetv, npdap->basew_id, hubv, hubinfo->h_widgetid); /* * Special handling for Crosstalk Switches (e.g. xbow). * We need to do things in roughly the following order: * 1) Initialize xswitch hardware (done above) * 2) Determine which hubs are available to be widget masters * 3) Discover which links are active from the xswitch * 4) Assign xwidgets hanging off the xswitch to hubs * 5) Initialize all xwidgets on the xswitch */ volunteer_for_widgets(switchv, hubv); /* If there's someone else on this crossbow, recognize him */ if (npdap->xbow_peer != INVALID_NASID) { nodepda_t *peer_npdap = NODEPDA(nasid_to_cnodeid(npdap->xbow_peer)); peer_sema = &peer_npdap->xbow_sema; volunteer_for_widgets(switchv, peer_npdap->node_vertex); } assign_widgets_to_volunteers(switchv, hubv); /* Signal that we're done */ if (peer_sema) { up(peer_sema); } } else { /* Wait 'til master is done assigning widgets. */ down(&npdap->xbow_sema); } /* Now both nodes can safely inititialize widgets */ io_init_xswitch_widgets(switchv, cnodeid); DBG("\nio_init_node: DONE INITIALIZED ALL I/O FOR CNODEID %d\n\n", cnodeid); }
void hubiio_crb_error_handler(struct hubdev_info *hubdev_info) { nasid_t nasid; ii_icrb0_a_u_t icrba; /* II CRB Register A */ ii_icrb0_b_u_t icrbb; /* II CRB Register B */ ii_icrb0_c_u_t icrbc; /* II CRB Register C */ ii_icrb0_d_u_t icrbd; /* II CRB Register D */ ii_icrb0_e_u_t icrbe; /* II CRB Register D */ int i; int num_errors = 0; /* Num of errors handled */ ioerror_t ioerror; nasid = hubdev_info->hdi_nasid; /* * XXX - Add locking for any recovery actions */ /* * Scan through all CRBs in the Hub, and handle the errors * in any of the CRBs marked. */ for (i = 0; i < IIO_NUM_CRBS; i++) { /* Check this crb entry to see if it is in error. */ icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(nasid, IIO_ICRB_B(i)); if (icrbb.b_mark == 0) { continue; } icrba.ii_icrb0_a_regval = REMOTE_HUB_L(nasid, IIO_ICRB_A(i)); IOERROR_INIT(&ioerror); /* read other CRB error registers. */ icrbc.ii_icrb0_c_regval = REMOTE_HUB_L(nasid, IIO_ICRB_C(i)); icrbd.ii_icrb0_d_regval = REMOTE_HUB_L(nasid, IIO_ICRB_D(i)); icrbe.ii_icrb0_e_regval = REMOTE_HUB_L(nasid, IIO_ICRB_E(i)); IOERROR_SETVALUE(&ioerror, errortype, icrbb.b_ecode); /* Check if this error is due to BTE operation, * and handle it separately. */ if (icrbd.d_bteop || ((icrbb.b_initiator == IIO_ICRB_INIT_BTE0 || icrbb.b_initiator == IIO_ICRB_INIT_BTE1) && (icrbb.b_imsgtype == IIO_ICRB_IMSGT_BTE || icrbb.b_imsgtype == IIO_ICRB_IMSGT_SN1NET))) { int bte_num; if (icrbd.d_bteop) bte_num = icrbc.c_btenum; else /* b_initiator bit 2 gives BTE number */ bte_num = (icrbb.b_initiator & 0x4) >> 2; hubiio_crb_free(hubdev_info, i); bte_crb_error_handler(nasid_to_cnodeid(nasid), bte_num, i, &ioerror, icrbd.d_bteop); num_errors++; continue; } }
/** * sn_cpu_init - initialize per-cpu data areas * @cpuid: cpuid of the caller * * Called during cpu initialization on each cpu as it starts. * Currently, initializes the per-cpu data area for SNIA. * Also sets up a few fields in the nodepda. Also known as * platform_cpu_init() by the ia64 machvec code. */ void __cpuinit sn_cpu_init(void) { int cpuid; int cpuphyid; int nasid; int subnode; int slice; int cnode; int i; static int wars_have_been_checked; cpuid = smp_processor_id(); if (cpuid == 0 && IS_MEDUSA()) { if (ia64_sn_is_fake_prom()) sn_prom_type = 2; else sn_prom_type = 1; printk(KERN_INFO "Running on medusa with %s PROM\n", (sn_prom_type == 1) ? "real" : "fake"); } memset(pda, 0, sizeof(pda)); if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, &sn_hub_info->nasid_bitmask, &sn_hub_info->nasid_shift, &sn_system_size, &sn_sharing_domain_size, &sn_partition_id, &sn_coherency_id, &sn_region_size)) BUG(); sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2; /* * Don't check status. The SAL call is not supported on all PROMs * but a failure is harmless. */ (void) ia64_sn_set_cpu_number(cpuid); /* * The boot cpu makes this call again after platform initialization is * complete. */ if (nodepdaindr[0] == NULL) return; for (i = 0; i < MAX_PROM_FEATURE_SETS; i++) if (ia64_sn_get_prom_feature_set(i, &sn_prom_features[i]) != 0) break; cpuphyid = get_sapicid(); if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice)) BUG(); for (i=0; i < MAX_NUMNODES; i++) { if (nodepdaindr[i]) { nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid; nodepdaindr[i]->phys_cpuid[cpuid].slice = slice; nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode; } } cnode = nasid_to_cnodeid(nasid); sn_nodepda = nodepdaindr[cnode]; pda->led_address = (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT)); pda->led_state = LED_ALWAYS_SET; pda->hb_count = HZ / 2; pda->hb_state = 0; pda->idle_flag = 0; if (cpuid != 0) { /* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */ memcpy(sn_cnodeid_to_nasid, (&per_cpu(__sn_cnodeid_to_nasid, 0)), sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid))); } /* * Check for WARs. * Only needs to be done once, on BSP. * Has to be done after loop above, because it uses this cpu's * sn_cnodeid_to_nasid table which was just initialized if this * isn't cpu 0. * Has to be done before assignment below. */ if (!wars_have_been_checked) { sn_check_for_wars(); wars_have_been_checked = 1; } sn_hub_info->shub_1_1_found = shub_1_1_found; /* * Set up addresses of PIO/MEM write status registers. */ { u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0}; u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_2, SH2_PIO_WRITE_STATUS_1, SH2_PIO_WRITE_STATUS_3}; u64 *pio; pio = is_shub1() ? pio1 : pio2; pda->pio_write_status_addr = (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, pio[slice]); pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0; } /* * WAR addresses for SHUB 1.x. */ if (local_node_data->active_cpu_count++ == 0 && is_shub1()) { int buddy_nasid; buddy_nasid = cnodeid_to_nasid(numa_node_id() == num_online_nodes() - 1 ? 0 : numa_node_id() + 1); pda->pio_shub_war_cam_addr = (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, SH1_PI_CAM_CONTROL); } }
static void __init scan_for_ionodes(void) { int nasid = 0; lboard_t *brd; /* Setup ionodes with memory */ for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid += 2) { char *klgraph_header; cnodeid_t cnodeid; if (physical_node_map[nasid] == -1) continue; cnodeid = -1; klgraph_header = __va(ia64_sn_get_klconfig_addr(nasid)); if (!klgraph_header) { if (IS_RUNNING_ON_SIMULATOR()) continue; BUG(); /* All nodes must have klconfig tables! */ } cnodeid = nasid_to_cnodeid(nasid); root_lboard[cnodeid] = (lboard_t *) NODE_OFFSET_TO_LBOARD((nasid), ((kl_config_hdr_t *) (klgraph_header))-> ch_board_info); } /* Scan headless/memless IO Nodes. */ for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid += 2) { /* if there's no nasid, don't try to read the klconfig on the node */ if (physical_node_map[nasid] == -1) continue; brd = find_lboard_any((lboard_t *) root_lboard[nasid_to_cnodeid(nasid)], KLTYPE_SNIA); if (brd) { brd = KLCF_NEXT_ANY(brd); /* Skip this node's lboard */ if (!brd) continue; } brd = find_lboard_any(brd, KLTYPE_SNIA); while (brd) { pda->cnodeid_to_nasid_table[numionodes] = brd->brd_nasid; physical_node_map[brd->brd_nasid] = numionodes; root_lboard[numionodes] = brd; numionodes++; brd = KLCF_NEXT_ANY(brd); if (!brd) break; brd = find_lboard_any(brd, KLTYPE_SNIA); } } /* Scan for TIO nodes. */ for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid += 2) { /* if there's no nasid, don't try to read the klconfig on the node */ if (physical_node_map[nasid] == -1) continue; brd = find_lboard_any((lboard_t *) root_lboard[nasid_to_cnodeid(nasid)], KLTYPE_TIO); while (brd) { pda->cnodeid_to_nasid_table[numionodes] = brd->brd_nasid; physical_node_map[brd->brd_nasid] = numionodes; root_lboard[numionodes] = brd; numionodes++; brd = KLCF_NEXT_ANY(brd); if (!brd) break; brd = find_lboard_any(brd, KLTYPE_TIO); } } }
/** * sn_cpu_init - initialize per-cpu data areas * @cpuid: cpuid of the caller * * Called during cpu initialization on each cpu as it starts. * Currently, initializes the per-cpu data area for SNIA. * Also sets up a few fields in the nodepda. Also known as * platform_cpu_init() by the ia64 machvec code. */ void __init sn_cpu_init(void) { int cpuid; int cpuphyid; int nasid; int subnode; int slice; int cnode; int i; static int wars_have_been_checked; memset(pda, 0, sizeof(pda)); if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, &sn_hub_info->nasid_bitmask, &sn_hub_info->nasid_shift, &sn_system_size, &sn_sharing_domain_size, &sn_partition_id, &sn_coherency_id, &sn_region_size)) BUG(); sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2; /* * The boot cpu makes this call again after platform initialization is * complete. */ if (nodepdaindr[0] == NULL) return; cpuid = smp_processor_id(); cpuphyid = get_sapicid(); if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice)) BUG(); for (i=0; i < MAX_NUMNODES; i++) { if (nodepdaindr[i]) { nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid; nodepdaindr[i]->phys_cpuid[cpuid].slice = slice; nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode; } } cnode = nasid_to_cnodeid(nasid); pda->p_nodepda = nodepdaindr[cnode]; pda->led_address = (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT)); pda->led_state = LED_ALWAYS_SET; pda->hb_count = HZ / 2; pda->hb_state = 0; pda->idle_flag = 0; if (cpuid != 0) { memcpy(pda->cnodeid_to_nasid_table, pdacpu(0)->cnodeid_to_nasid_table, sizeof(pda->cnodeid_to_nasid_table)); } /* * Check for WARs. * Only needs to be done once, on BSP. * Has to be done after loop above, because it uses pda.cnodeid_to_nasid_table[i]. * Has to be done before assignment below. */ if (!wars_have_been_checked) { sn_check_for_wars(); wars_have_been_checked = 1; } sn_hub_info->shub_1_1_found = shub_1_1_found; /* * Set up addresses of PIO/MEM write status registers. */ { u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0}; u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_1, SH2_PIO_WRITE_STATUS_2, SH2_PIO_WRITE_STATUS_3}; u64 *pio; pio = is_shub1() ? pio1 : pio2; pda->pio_write_status_addr = (volatile unsigned long *) LOCAL_MMR_ADDR(pio[slice]); pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0; } /* * WAR addresses for SHUB 1.x. */ if (local_node_data->active_cpu_count++ == 0 && is_shub1()) { int buddy_nasid; buddy_nasid = cnodeid_to_nasid(numa_node_id() == num_online_nodes() - 1 ? 0 : numa_node_id() + 1); pda->pio_shub_war_cam_addr = (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, SH1_PI_CAM_CONTROL); } }
static void __init klhwg_connect_hubs(vertex_hdl_t hwgraph_root) { nasid_t nasid; cnodeid_t cnode; lboard_t *brd; klhub_t *hub; lboard_t *dest_brd; vertex_hdl_t hub_hndl; vertex_hdl_t dest_hndl; char path_buffer[50]; char dest_path[50]; graph_error_t rc; int port; for (cnode = 0; cnode < numionodes; cnode++) { nasid = cnodeid_to_nasid(cnode); brd = find_lboard_any((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA); hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB); ASSERT(hub); for (port = 1; port <= MAX_NI_PORTS; port++) { if (hub->hub_port[port].port_nasid == INVALID_NASID) { continue; /* Port not active */ } if (nasid_to_cnodeid(hub->hub_port[port].port_nasid) == INVALID_CNODEID) continue; /* Generate a hardware graph path for this board. */ board_to_path(brd, path_buffer); rc = hwgraph_traverse(hwgraph_root, path_buffer, &hub_hndl); if (rc != GRAPH_SUCCESS) printk(KERN_WARNING "Can't find hub: %s", path_buffer); dest_brd = (lboard_t *)NODE_OFFSET_TO_K0( hub->hub_port[port].port_nasid, hub->hub_port[port].port_offset); /* Generate a hardware graph path for this board. */ board_to_path(dest_brd, dest_path); rc = hwgraph_traverse(hwgraph_root, dest_path, &dest_hndl); if (rc != GRAPH_SUCCESS) { if (KL_CONFIG_DUPLICATE_BOARD(dest_brd)) continue; printk("Can't find board: %s", dest_path); return; } else { char buf[1024]; rc = hwgraph_path_add(hub_hndl, EDGE_LBL_INTERCONNECT, &hub_hndl); HWGRAPH_DEBUG(__FILE__, __FUNCTION__, __LINE__, hub_hndl, NULL, "Created link path.\n"); sprintf(buf,"%s/%s",path_buffer,EDGE_LBL_INTERCONNECT); rc = hwgraph_traverse(hwgraph_root, buf, &hub_hndl); sprintf(buf,"%d",port); rc = hwgraph_edge_add(hub_hndl, dest_hndl, buf); HWGRAPH_DEBUG(__FILE__, __FUNCTION__, __LINE__, hub_hndl, dest_hndl, "Created edge %s from vhdl1 to vhdl2.\n", buf); if (rc != GRAPH_SUCCESS) { printk("Can't create edge: %s/%s to vertex 0x%p, error 0x%x\n", path_buffer, dest_path, (void *)dest_hndl, rc); return; } } } } }
void * pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller) { int nasid, cnode, j; cnodeid_t near_cnode; struct hubdev_info *hubdev_info; struct pcibus_info *soft; struct sn_flush_device_kernel *sn_flush_device_kernel; struct sn_flush_device_common *common; if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) { return NULL; } /* * Allocate kernel bus soft and copy from prom. */ soft = kmalloc(sizeof(struct pcibus_info), GFP_KERNEL); if (!soft) { return NULL; } memcpy(soft, prom_bussoft, sizeof(struct pcibus_info)); soft->pbi_buscommon.bs_base = (((u64) soft->pbi_buscommon. bs_base << 4) >> 4) | __IA64_UNCACHED_OFFSET; spin_lock_init(&soft->pbi_lock); /* * register the bridge's error interrupt handler */ if (request_irq(SGI_PCIASIC_ERROR, (void *)pcibr_error_intr_handler, IRQF_SHARED, "PCIBR error", (void *)(soft))) { printk(KERN_WARNING "pcibr cannot allocate interrupt for error handler\n"); } /* * Update the Bridge with the "kernel" pagesize */ if (PAGE_SIZE < 16384) { pcireg_control_bit_clr(soft, PCIBR_CTRL_PAGE_SIZE); } else { pcireg_control_bit_set(soft, PCIBR_CTRL_PAGE_SIZE); } nasid = NASID_GET(soft->pbi_buscommon.bs_base); cnode = nasid_to_cnodeid(nasid); hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); if (hubdev_info->hdi_flush_nasid_list.widget_p) { sn_flush_device_kernel = hubdev_info->hdi_flush_nasid_list. widget_p[(int)soft->pbi_buscommon.bs_xid]; if (sn_flush_device_kernel) { for (j = 0; j < DEV_PER_WIDGET; j++, sn_flush_device_kernel++) { common = sn_flush_device_kernel->common; if (common->sfdl_slot == -1) continue; if ((common->sfdl_persistent_segment == soft->pbi_buscommon.bs_persist_segment) && (common->sfdl_persistent_busnum == soft->pbi_buscommon.bs_persist_busnum)) common->sfdl_pcibus_info = soft; } } } /* Setup the PMU ATE map */ soft->pbi_int_ate_resource.lowest_free_index = 0; soft->pbi_int_ate_resource.ate = kzalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL); if (!soft->pbi_int_ate_resource.ate) { kfree(soft); return NULL; } if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) { /* TIO PCI Bridge: find nearest node with CPUs */ int e = sn_hwperf_get_nearest_node(cnode, NULL, &near_cnode); if (e < 0) { near_cnode = (cnodeid_t)-1; /* use any node */ printk(KERN_WARNING "pcibr_bus_fixup: failed to find " "near node with CPUs to TIO node %d, err=%d\n", cnode, e); } controller->node = near_cnode; } else controller->node = cnode; return soft; }
void * pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller) { int nasid, cnode, j; struct hubdev_info *hubdev_info; struct pcibus_info *soft; struct sn_flush_device_list *sn_flush_device_list; if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) { return NULL; } /* * Allocate kernel bus soft and copy from prom. */ soft = kmalloc(sizeof(struct pcibus_info), GFP_KERNEL); if (!soft) { return NULL; } memcpy(soft, prom_bussoft, sizeof(struct pcibus_info)); soft->pbi_buscommon.bs_base = (((u64) soft->pbi_buscommon. bs_base << 4) >> 4) | __IA64_UNCACHED_OFFSET; spin_lock_init(&soft->pbi_lock); /* * register the bridge's error interrupt handler */ if (request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler, SA_SHIRQ, "PCIBR error", (void *)(soft))) { printk(KERN_WARNING "pcibr cannot allocate interrupt for error handler\n"); } /* * Update the Bridge with the "kernel" pagesize */ if (PAGE_SIZE < 16384) { pcireg_control_bit_clr(soft, PCIBR_CTRL_PAGE_SIZE); } else { pcireg_control_bit_set(soft, PCIBR_CTRL_PAGE_SIZE); } nasid = NASID_GET(soft->pbi_buscommon.bs_base); cnode = nasid_to_cnodeid(nasid); hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); if (hubdev_info->hdi_flush_nasid_list.widget_p) { sn_flush_device_list = hubdev_info->hdi_flush_nasid_list. widget_p[(int)soft->pbi_buscommon.bs_xid]; if (sn_flush_device_list) { for (j = 0; j < DEV_PER_WIDGET; j++, sn_flush_device_list++) { if (sn_flush_device_list->sfdl_slot == -1) continue; if (sn_flush_device_list-> sfdl_persistent_busnum == soft->pbi_buscommon.bs_persist_busnum) sn_flush_device_list->sfdl_pcibus_info = soft; } } } /* Setup the PMU ATE map */ soft->pbi_int_ate_resource.lowest_free_index = 0; soft->pbi_int_ate_resource.ate = kmalloc(soft->pbi_int_ate_size * sizeof(uint64_t), GFP_KERNEL); memset(soft->pbi_int_ate_resource.ate, 0, (soft->pbi_int_ate_size * sizeof(uint64_t))); if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) /* * TIO PCI Bridge with no closest node information. * FIXME: Find another way to determine the closest node */ controller->node = -1; else controller->node = cnode; return soft; }
void sn_dma_flush(u64 addr) { nasid_t nasid; int is_tio; int wid_num; int i, j; unsigned long flags; u64 itte; struct hubdev_info *hubinfo; struct sn_flush_device_kernel *p; struct sn_flush_device_common *common; struct sn_flush_nasid_entry *flush_nasid_list; if (!sn_ioif_inited) return; nasid = NASID_GET(addr); if (-1 == nasid_to_cnodeid(nasid)) return; hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo; BUG_ON(!hubinfo); flush_nasid_list = &hubinfo->hdi_flush_nasid_list; if (flush_nasid_list->widget_p == NULL) return; is_tio = (nasid & 1); if (is_tio) { int itte_index; if (TIO_HWIN(addr)) itte_index = 0; else if (TIO_BWIN_WINDOWNUM(addr)) itte_index = TIO_BWIN_WINDOWNUM(addr); else itte_index = -1; if (itte_index >= 0) { itte = flush_nasid_list->iio_itte[itte_index]; if (! TIO_ITTE_VALID(itte)) return; wid_num = TIO_ITTE_WIDGET(itte); } else wid_num = TIO_SWIN_WIDGETNUM(addr); } else { if (BWIN_WINDOWNUM(addr)) { itte = flush_nasid_list->iio_itte[BWIN_WINDOWNUM(addr)]; wid_num = IIO_ITTE_WIDGET(itte); } else wid_num = SWIN_WIDGETNUM(addr); } if (flush_nasid_list->widget_p[wid_num] == NULL) return; p = &flush_nasid_list->widget_p[wid_num][0]; for (i = 0; i < DEV_PER_WIDGET; i++,p++) { common = p->common; for (j = 0; j < PCI_ROM_RESOURCE; j++) { if (common->sfdl_bar_list[j].start == 0) break; if (addr >= common->sfdl_bar_list[j].start && addr <= common->sfdl_bar_list[j].end) break; } if (j < PCI_ROM_RESOURCE && common->sfdl_bar_list[j].start != 0) break; } if (i == DEV_PER_WIDGET) return; if (is_tio) { u32 tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID)); u32 revnum = XWIDGET_PART_REV_NUM(tio_id); if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) { return; } else { pcireg_wrb_flush_get(common->sfdl_pcibus_info, (common->sfdl_slot - 1)); } } else { spin_lock_irqsave(&p->sfdl_flush_lock, flags); *common->sfdl_flush_addr = 0; *(volatile u32 *)(common->sfdl_force_int_addr) = 1; while (*(common->sfdl_flush_addr) != 0x10f) cpu_relax(); spin_unlock_irqrestore(&p->sfdl_flush_lock, flags); } return; }
void * pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller) { int nasid, cnode, j; struct hubdev_info *hubdev_info; struct pcibus_info *soft; struct sn_flush_device_kernel *sn_flush_device_kernel; struct sn_flush_device_common *common; if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) { return NULL; } /* * Allocate kernel bus soft and copy from prom. */ soft = kmalloc(sizeof(struct pcibus_info), GFP_KERNEL); if (!soft) { return NULL; } memcpy(soft, prom_bussoft, sizeof(struct pcibus_info)); soft->pbi_buscommon.bs_base = (unsigned long) ioremap(REGION_OFFSET(soft->pbi_buscommon.bs_base), sizeof(struct pic)); spin_lock_init(&soft->pbi_lock); /* * register the bridge's error interrupt handler */ if (request_irq(SGI_PCIASIC_ERROR, pcibr_error_intr_handler, IRQF_SHARED, "PCIBR error", (void *)(soft))) { printk(KERN_WARNING "pcibr cannot allocate interrupt for error handler\n"); } sn_set_err_irq_affinity(SGI_PCIASIC_ERROR); /* * Update the Bridge with the "kernel" pagesize */ if (PAGE_SIZE < 16384) { pcireg_control_bit_clr(soft, PCIBR_CTRL_PAGE_SIZE); } else { pcireg_control_bit_set(soft, PCIBR_CTRL_PAGE_SIZE); } nasid = NASID_GET(soft->pbi_buscommon.bs_base); cnode = nasid_to_cnodeid(nasid); hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); if (hubdev_info->hdi_flush_nasid_list.widget_p) { sn_flush_device_kernel = hubdev_info->hdi_flush_nasid_list. widget_p[(int)soft->pbi_buscommon.bs_xid]; if (sn_flush_device_kernel) { for (j = 0; j < DEV_PER_WIDGET; j++, sn_flush_device_kernel++) { common = sn_flush_device_kernel->common; if (common->sfdl_slot == -1) continue; if ((common->sfdl_persistent_segment == soft->pbi_buscommon.bs_persist_segment) && (common->sfdl_persistent_busnum == soft->pbi_buscommon.bs_persist_busnum)) common->sfdl_pcibus_info = soft; } } } /* Setup the PMU ATE map */ soft->pbi_int_ate_resource.lowest_free_index = 0; soft->pbi_int_ate_resource.ate = kzalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL); if (!soft->pbi_int_ate_resource.ate) { kfree(soft); return NULL; } return soft; }
/* * If this PIC is attached to two Cbricks ("dual-ported") then * attach each bus to opposite Cbricks. * * If successful, return a new vertex suitable for attaching the PIC bus. * If not successful, return zero and both buses will attach to the * vertex passed into pic_attach(). */ static vertex_hdl_t pic_bus1_redist(nasid_t nasid, vertex_hdl_t conn_v) { cnodeid_t cnode = nasid_to_cnodeid(nasid); cnodeid_t xbow_peer = -1; char pathname[256], peer_path[256], tmpbuf[256]; char *p; int rc; vertex_hdl_t peer_conn_v, hubv; int pos; slabid_t slab; if (NODEPDA(cnode)->xbow_peer >= 0) { /* if dual-ported */ /* create a path for this widget on the peer Cbrick */ /* pcibr widget hw/module/001c11/slab/0/Pbrick/xtalk/12 */ /* sprintf(pathname, "%v", conn_v); */ xbow_peer = nasid_to_cnodeid(NODEPDA(cnode)->xbow_peer); pos = hwgfs_generate_path(conn_v, tmpbuf, 256); strcpy(pathname, &tmpbuf[pos]); p = pathname + strlen("hw/module/001c01/slab/0/"); memset(tmpbuf, 0, 16); format_module_id(tmpbuf, geo_module((NODEPDA(xbow_peer))->geoid), MODULE_FORMAT_BRIEF); slab = geo_slab((NODEPDA(xbow_peer))->geoid); sprintf(peer_path, "module/%s/slab/%d/%s", tmpbuf, (int)slab, p); /* Look for vertex for this widget on the peer Cbrick. * Expect GRAPH_NOT_FOUND. */ rc = hwgraph_traverse(hwgraph_root, peer_path, &peer_conn_v); if (GRAPH_SUCCESS == rc) printk("pic_attach: found unexpected vertex: 0x%lx\n", (uint64_t)peer_conn_v); else if (GRAPH_NOT_FOUND != rc) { printk("pic_attach: hwgraph_traverse unexpectedly" " returned 0x%x\n", rc); } else { /* try to add the widget vertex to the peer Cbrick */ rc = hwgraph_path_add(hwgraph_root, peer_path, &peer_conn_v); if (GRAPH_SUCCESS != rc) printk("pic_attach: hwgraph_path_add" " failed with 0x%x\n", rc); else { PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v, "pic_bus1_redist: added vertex %v\n", peer_conn_v)); /* Now hang appropiate stuff off of the new * vertex. We bail out if we cannot add something. * In that case, we don't remove the newly added * vertex but that should be safe and we don't * really expect the additions to fail anyway. */ if (!pic_bus1_widget_info_dup(conn_v, peer_conn_v, xbow_peer, peer_path)) return 0; hubv = cnodeid_to_vertex(xbow_peer); ASSERT(hubv != GRAPH_VERTEX_NONE); device_master_set(peer_conn_v, hubv); xtalk_provider_register(hubv, &hub_provider); xtalk_provider_startup(hubv); return peer_conn_v; } } } return 0; }
static void io_xswitch_widget_init(vertex_hdl_t xswitchv, vertex_hdl_t hubv, xwidgetnum_t widgetnum) { xswitch_info_t xswitch_info; xwidgetnum_t hub_widgetid; vertex_hdl_t widgetv; cnodeid_t cnode; widgetreg_t widget_id; nasid_t nasid, peer_nasid; struct xwidget_hwid_s hwid; hubinfo_t hubinfo; /*REFERENCED*/ int rc; char pathname[128]; lboard_t *board = NULL; char buffer[16]; char bt; moduleid_t io_module; slotid_t get_widget_slotnum(int xbow, int widget); DBG("\nio_xswitch_widget_init: hubv 0x%p, xswitchv 0x%p, widgetnum 0x%x\n", hubv, xswitchv, widgetnum); /* * Verify that xswitchv is indeed an attached xswitch. */ xswitch_info = xswitch_info_get(xswitchv); ASSERT(xswitch_info != NULL); hubinfo_get(hubv, &hubinfo); nasid = hubinfo->h_nasid; cnode = nasid_to_cnodeid(nasid); hub_widgetid = hubinfo->h_widgetid; /* * Check that the widget is an io widget and is enabled * on this nasid or the `peer' nasid. The peer nasid * is the other hub/bedrock connected to the xbow. */ peer_nasid = NODEPDA(cnode)->xbow_peer; if (peer_nasid == INVALID_NASID) /* If I don't have a peer, use myself. */ peer_nasid = nasid; if (!xbow_port_io_enabled(nasid, widgetnum) && !xbow_port_io_enabled(peer_nasid, widgetnum)) { return; } if (xswitch_info_link_ok(xswitch_info, widgetnum)) { char name[4]; lboard_t dummy; /* * If the current hub is not supposed to be the master * for this widgetnum, then skip this widget. */ if (xswitch_info_master_assignment_get(xswitch_info, widgetnum) != hubv) { return; } board = find_lboard_class_nasid( (lboard_t *)KL_CONFIG_INFO(nasid), nasid, KLCLASS_IOBRICK); if (!board && NODEPDA(cnode)->xbow_peer != INVALID_NASID) { board = find_lboard_class_nasid( (lboard_t *)KL_CONFIG_INFO( NODEPDA(cnode)->xbow_peer), NODEPDA(cnode)->xbow_peer, KLCLASS_IOBRICK); } if (board) { DBG("io_xswitch_widget_init: Found KLTYPE_IOBRICK Board 0x%p brd_type 0x%x\n", board, board->brd_type); } else { DBG("io_xswitch_widget_init: FIXME did not find IOBOARD\n"); board = &dummy; } /* Copy over the nodes' geoid info */ { lboard_t *brd; brd = find_lboard_any((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA); if ( brd != (lboard_t *)0 ) { board->brd_geoid = brd->brd_geoid; } } /* * Make sure we really want to say xbrick, pbrick, * etc. rather than XIO, graphics, etc. */ memset(buffer, 0, 16); format_module_id(buffer, geo_module(board->brd_geoid), MODULE_FORMAT_BRIEF); sprintf(pathname, EDGE_LBL_MODULE "/%s/" EDGE_LBL_SLAB "/%d" "/%s" "/%s/%d", buffer, geo_slab(board->brd_geoid), (board->brd_type == KLTYPE_PXBRICK) ? EDGE_LBL_PXBRICK : (board->brd_type == KLTYPE_IXBRICK) ? EDGE_LBL_IXBRICK : (board->brd_type == KLTYPE_CGBRICK) ? EDGE_LBL_CGBRICK : (board->brd_type == KLTYPE_OPUSBRICK) ? EDGE_LBL_OPUSBRICK : "?brick", EDGE_LBL_XTALK, widgetnum); DBG("io_xswitch_widget_init: path= %s\n", pathname); rc = hwgraph_path_add(hwgraph_root, pathname, &widgetv); ASSERT(rc == GRAPH_SUCCESS); /* This is needed to let the user programs to map the * module,slot numbers to the corresponding widget numbers * on the crossbow. */ device_master_set(hwgraph_connectpt_get(widgetv), hubv); sprintf(name, "%d", widgetnum); DBG("io_xswitch_widget_init: FIXME hwgraph_edge_add %s xswitchv 0x%p, widgetv 0x%p\n", name, xswitchv, widgetv); rc = hwgraph_edge_add(xswitchv, widgetv, name); /* * crosstalk switch code tracks which * widget is attached to each link. */ xswitch_info_vhdl_set(xswitch_info, widgetnum, widgetv); /* * Peek at the widget to get its crosstalk part and * mfgr numbers, then present it to the generic xtalk * bus provider to have its driver attach routine * called (or not). */ widget_id = XWIDGET_ID_READ(nasid, widgetnum); hwid.part_num = XWIDGET_PART_NUM(widget_id); hwid.rev_num = XWIDGET_REV_NUM(widget_id); hwid.mfg_num = XWIDGET_MFG_NUM(widget_id); (void)xwidget_register(&hwid, widgetv, widgetnum, hubv, hub_widgetid); io_module = iomoduleid_get(nasid); if (io_module >= 0) { char buffer[16]; vertex_hdl_t to, from; char *brick_name; extern char *iobrick_L1bricktype_to_name(int type); memset(buffer, 0, 16); format_module_id(buffer, geo_module(board->brd_geoid), MODULE_FORMAT_BRIEF); if ( isupper(MODULE_GET_BTCHAR(io_module)) ) { bt = tolower(MODULE_GET_BTCHAR(io_module)); } else { bt = MODULE_GET_BTCHAR(io_module); } brick_name = iobrick_L1bricktype_to_name(bt); /* Add a helper vertex so xbow monitoring * can identify the brick type. It's simply * an edge from the widget 0 vertex to the * brick vertex. */ sprintf(pathname, EDGE_LBL_HW "/" EDGE_LBL_MODULE "/%s/" EDGE_LBL_SLAB "/%d/" EDGE_LBL_NODE "/" EDGE_LBL_XTALK "/" "0", buffer, geo_slab(board->brd_geoid)); from = hwgraph_path_to_vertex(pathname); ASSERT_ALWAYS(from); sprintf(pathname, EDGE_LBL_HW "/" EDGE_LBL_MODULE "/%s/" EDGE_LBL_SLAB "/%d/" "%s", buffer, geo_slab(board->brd_geoid), brick_name); to = hwgraph_path_to_vertex(pathname); ASSERT_ALWAYS(to); rc = hwgraph_edge_add(from, to, EDGE_LBL_INTERCONNECT); if (rc != -EEXIST && rc != GRAPH_SUCCESS) { printk("%s: Unable to establish link" " for xbmon.", pathname); } } } }
/* ARGSUSED */ static void __init klhwg_connect_one_router(vertex_hdl_t hwgraph_root, lboard_t *brd, cnodeid_t cnode, nasid_t nasid) { klrou_t *router; char path_buffer[50]; char dest_path[50]; vertex_hdl_t router_hndl; vertex_hdl_t dest_hndl; int rc; int port; lboard_t *dest_brd; /* Don't add duplicate boards. */ if (brd->brd_flags & DUPLICATE_BOARD) { return; } /* Generate a hardware graph path for this board. */ board_to_path(brd, path_buffer); rc = hwgraph_traverse(hwgraph_root, path_buffer, &router_hndl); if (rc != GRAPH_SUCCESS) return; if (rc != GRAPH_SUCCESS) printk(KERN_WARNING "Can't find router: %s", path_buffer); /* We don't know what to do with multiple router components */ if (brd->brd_numcompts != 1) { printk("klhwg_connect_one_router: %d cmpts on router\n", brd->brd_numcompts); return; } /* Convert component 0 to klrou_t ptr */ router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]); for (port = 1; port <= MAX_ROUTER_PORTS; port++) { /* See if the port's active */ if (router->rou_port[port].port_nasid == INVALID_NASID) { GRPRINTF(("klhwg_connect_one_router: port %d inactive.\n", port)); continue; } if (nasid_to_cnodeid(router->rou_port[port].port_nasid) == INVALID_CNODEID) { continue; } dest_brd = (lboard_t *)NODE_OFFSET_TO_K0( router->rou_port[port].port_nasid, router->rou_port[port].port_offset); /* Generate a hardware graph path for this board. */ board_to_path(dest_brd, dest_path); rc = hwgraph_traverse(hwgraph_root, dest_path, &dest_hndl); if (rc != GRAPH_SUCCESS) { if (KL_CONFIG_DUPLICATE_BOARD(dest_brd)) continue; printk("Can't find router: %s", dest_path); return; } sprintf(dest_path, "%d", port); rc = hwgraph_edge_add(router_hndl, dest_hndl, dest_path); if (rc == GRAPH_DUP) { GRPRINTF(("Skipping port %d. nasid %d %s/%s\n", port, router->rou_port[port].port_nasid, path_buffer, dest_path)); continue; } if (rc != GRAPH_SUCCESS) { printk("Can't create edge: %s/%s to vertex 0x%p error 0x%x\n", path_buffer, dest_path, (void *)dest_hndl, rc); return; } HWGRAPH_DEBUG(__FILE__, __FUNCTION__, __LINE__, router_hndl, dest_hndl, "Created edge %s from vhdl1 to vhdl2.\n", dest_path); } }
void intr_init_vecblk( nodepda_t *npda, cnodeid_t node, int sn) { int nasid = cnodeid_to_nasid(node); sh_ii_int0_config_u_t ii_int_config; cpuid_t cpu; cpuid_t cpu0, cpu1; nodepda_t *lnodepda; sh_ii_int0_enable_u_t ii_int_enable; sh_int_node_id_config_u_t node_id_config; sh_local_int5_config_u_t local5_config; sh_local_int5_enable_u_t local5_enable; extern void sn_init_cpei_timer(void); static int timer_added = 0; if (is_headless_node(node) ) { int cnode; struct ia64_sal_retval ret_stuff; // retarget all interrupts on this node to the master node. node_id_config.sh_int_node_id_config_regval = 0; node_id_config.sh_int_node_id_config_s.node_id = master_nasid; node_id_config.sh_int_node_id_config_s.id_sel = 1; HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_INT_NODE_ID_CONFIG), node_id_config.sh_int_node_id_config_regval); cnode = nasid_to_cnodeid(master_nasid); lnodepda = NODEPDA(cnode); cpu = lnodepda->node_first_cpu; cpu = cpu_physical_id(cpu); SAL_CALL(ret_stuff, SN_SAL_REGISTER_CE, nasid, cpu, master_nasid,0,0,0,0); if (ret_stuff.status < 0) { printk("%s: SN_SAL_REGISTER_CE SAL_CALL failed\n",__FUNCTION__); } } else { lnodepda = NODEPDA(node); cpu = lnodepda->node_first_cpu; cpu = cpu_physical_id(cpu); } // Get the physical id's of the cpu's on this node. cpu0 = nasid_slice_to_cpu_physical_id(nasid, 0); cpu1 = nasid_slice_to_cpu_physical_id(nasid, 2); HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_PI_ERROR_MASK), 0); HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_PI_CRBP_ERROR_MASK), 0); // Config and enable UART interrupt, all nodes. local5_config.sh_local_int5_config_regval = 0; local5_config.sh_local_int5_config_s.idx = SGI_UART_VECTOR; local5_config.sh_local_int5_config_s.pid = cpu; HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_LOCAL_INT5_CONFIG), local5_config.sh_local_int5_config_regval); local5_enable.sh_local_int5_enable_regval = 0; local5_enable.sh_local_int5_enable_s.uart_int = 1; HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_LOCAL_INT5_ENABLE), local5_enable.sh_local_int5_enable_regval); // The II_INT_CONFIG register for cpu 0. ii_int_config.sh_ii_int0_config_regval = 0; ii_int_config.sh_ii_int0_config_s.type = 0; ii_int_config.sh_ii_int0_config_s.agt = 0; ii_int_config.sh_ii_int0_config_s.pid = cpu0; ii_int_config.sh_ii_int0_config_s.base = 0; HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_II_INT0_CONFIG), ii_int_config.sh_ii_int0_config_regval); // The II_INT_CONFIG register for cpu 1. ii_int_config.sh_ii_int0_config_regval = 0; ii_int_config.sh_ii_int0_config_s.type = 0; ii_int_config.sh_ii_int0_config_s.agt = 0; ii_int_config.sh_ii_int0_config_s.pid = cpu1; ii_int_config.sh_ii_int0_config_s.base = 0; HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_II_INT1_CONFIG), ii_int_config.sh_ii_int0_config_regval); // Enable interrupts for II_INT0 and 1. ii_int_enable.sh_ii_int0_enable_regval = 0; ii_int_enable.sh_ii_int0_enable_s.ii_enable = 1; HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_II_INT0_ENABLE), ii_int_enable.sh_ii_int0_enable_regval); HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_II_INT1_ENABLE), ii_int_enable.sh_ii_int0_enable_regval); if (!timer_added) { // can only init the timer once. timer_added = 1; sn_init_cpei_timer(); } }
int hubiio_crb_error_handler(vertex_hdl_t hub_v, hubinfo_t hinfo) { cnodeid_t cnode; nasid_t nasid; ii_icrb0_a_u_t icrba; /* II CRB Register A */ ii_icrb0_b_u_t icrbb; /* II CRB Register B */ ii_icrb0_c_u_t icrbc; /* II CRB Register C */ ii_icrb0_d_u_t icrbd; /* II CRB Register D */ ii_icrb0_e_u_t icrbe; /* II CRB Register D */ int i; int num_errors = 0; /* Num of errors handled */ ioerror_t ioerror; int rc; nasid = hinfo->h_nasid; cnode = nasid_to_cnodeid(nasid); /* * XXX - Add locking for any recovery actions */ /* * Scan through all CRBs in the Hub, and handle the errors * in any of the CRBs marked. */ for (i = 0; i < IIO_NUM_CRBS; i++) { /* Check this crb entry to see if it is in error. */ icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(nasid, IIO_ICRB_B(i)); if (icrbb.b_mark == 0) { continue; } icrba.ii_icrb0_a_regval = REMOTE_HUB_L(nasid, IIO_ICRB_A(i)); IOERROR_INIT(&ioerror); /* read other CRB error registers. */ icrbc.ii_icrb0_c_regval = REMOTE_HUB_L(nasid, IIO_ICRB_C(i)); icrbd.ii_icrb0_d_regval = REMOTE_HUB_L(nasid, IIO_ICRB_D(i)); icrbe.ii_icrb0_e_regval = REMOTE_HUB_L(nasid, IIO_ICRB_E(i)); IOERROR_SETVALUE(&ioerror,errortype,icrbb.b_ecode); /* Check if this error is due to BTE operation, * and handle it separately. */ if (icrbd.d_bteop || ((icrbb.b_initiator == IIO_ICRB_INIT_BTE0 || icrbb.b_initiator == IIO_ICRB_INIT_BTE1) && (icrbb.b_imsgtype == IIO_ICRB_IMSGT_BTE || icrbb.b_imsgtype == IIO_ICRB_IMSGT_SN1NET))){ int bte_num; if (icrbd.d_bteop) bte_num = icrbc.c_btenum; else /* b_initiator bit 2 gives BTE number */ bte_num = (icrbb.b_initiator & 0x4) >> 2; hubiio_crb_free(hinfo, i); bte_crb_error_handler(hub_v, bte_num, i, &ioerror, icrbd.d_bteop); num_errors++; continue; } /* * XXX * Assuming the only other error that would reach here is * crosstalk errors. * If CRB times out on a message from Xtalk, it changes * the message type to CRB. * * If we get here due to other errors (SN0net/CRB) * what's the action ? */ /* * Pick out the useful fields in CRB, and * tuck them away into ioerror structure. */ IOERROR_SETVALUE(&ioerror,xtalkaddr,icrba.a_addr << IIO_ICRB_ADDR_SHFT); IOERROR_SETVALUE(&ioerror,widgetnum,icrba.a_sidn); if (icrba.a_iow){ /* * XXX We shouldn't really have BRIDGE-specific code * here, but alas.... * * The BRIDGE (or XBRIDGE) sets the upper bit of TNUM * to indicate a WRITE operation. It sets the next * bit to indicate an INTERRUPT operation. The bottom * 3 bits of TNUM indicate which device was responsible. */ IOERROR_SETVALUE(&ioerror,widgetdev, TNUM_TO_WIDGET_DEV(icrba.a_tnum)); /* * The encoding of TNUM (see comments above) is * different for PIC. So we'll save TNUM here and * deal with the differences later when we can * determine if we're using a Bridge or the PIC. * * XXX: We may be able to remove saving the widgetdev * above and just sort it out of TNUM later. */ IOERROR_SETVALUE(&ioerror, tnum, icrba.a_tnum); } if (icrbb.b_error) { /* * CRB 'i' has some error. Identify the type of error, * and try to handle it. * */ switch(icrbb.b_ecode) { case IIO_ICRB_ECODE_PERR: case IIO_ICRB_ECODE_WERR: case IIO_ICRB_ECODE_AERR: case IIO_ICRB_ECODE_PWERR: case IIO_ICRB_ECODE_TOUT: case IIO_ICRB_ECODE_XTERR: printk("Shub II CRB %d: error %s on hub cnodeid: %d", i, hubiio_crb_errors[icrbb.b_ecode], cnode); /* * Any sort of write error is mostly due * bad programming (Note it's not a timeout.) * So, invoke hub_iio_error_handler with * appropriate information. */ IOERROR_SETVALUE(&ioerror,errortype,icrbb.b_ecode); /* Go through the error bit lookup phase */ if (error_state_set(hub_v, ERROR_STATE_LOOKUP) == ERROR_RETURN_CODE_CANNOT_SET_STATE) return(IOERROR_UNHANDLED); rc = hub_ioerror_handler( hub_v, DMA_WRITE_ERROR, MODE_DEVERROR, &ioerror); if (rc == IOERROR_HANDLED) { rc = hub_ioerror_handler( hub_v, DMA_WRITE_ERROR, MODE_DEVREENABLE, &ioerror); }else { printk("Unable to handle %s on hub %d", hubiio_crb_errors[icrbb.b_ecode], cnode); /* panic; */ } /* Go to Next error */ print_crb_fields(i, icrba, icrbb, icrbc, icrbd, icrbe); hubiio_crb_free(hinfo, i); continue; case IIO_ICRB_ECODE_PRERR: case IIO_ICRB_ECODE_DERR: printk("Shub II CRB %d: error %s on hub : %d", i, hubiio_crb_errors[icrbb.b_ecode], cnode); /* panic */ default: printk("Shub II CRB error (code : %d) on hub : %d", icrbb.b_ecode, cnode); /* panic */ } } /* * Error is not indicated via the errcode field * Check other error indications in this register. */ if (icrbb.b_xerr) { printk("Shub II CRB %d: Xtalk Packet with error bit set to hub %d", i, cnode); /* panic */ } if (icrbb.b_lnetuce) { printk("Shub II CRB %d: Uncorrectable data error detected on data " " from NUMAlink to node %d", i, cnode); /* panic */ } print_crb_fields(i, icrba, icrbb, icrbc, icrbd, icrbe); if (icrbb.b_error) { /* * CRB 'i' has some error. Identify the type of error, * and try to handle it. */ switch(icrbb.b_ecode) { case IIO_ICRB_ECODE_PERR: case IIO_ICRB_ECODE_WERR: case IIO_ICRB_ECODE_AERR: case IIO_ICRB_ECODE_PWERR: printk("%s on hub cnodeid: %d", hubiio_crb_errors[icrbb.b_ecode], cnode); /* * Any sort of write error is mostly due * bad programming (Note it's not a timeout.) * So, invoke hub_iio_error_handler with * appropriate information. */ IOERROR_SETVALUE(&ioerror,errortype,icrbb.b_ecode); rc = hub_ioerror_handler( hub_v, DMA_WRITE_ERROR, MODE_DEVERROR, &ioerror); if (rc == IOERROR_HANDLED) { rc = hub_ioerror_handler( hub_v, DMA_WRITE_ERROR, MODE_DEVREENABLE, &ioerror); ASSERT(rc == IOERROR_HANDLED); }else { panic("Unable to handle %s on hub %d", hubiio_crb_errors[icrbb.b_ecode], cnode); /*NOTREACHED*/ } /* Go to Next error */ hubiio_crb_free(hinfo, i); continue; case IIO_ICRB_ECODE_PRERR: case IIO_ICRB_ECODE_TOUT: case IIO_ICRB_ECODE_XTERR: case IIO_ICRB_ECODE_DERR: panic("Fatal %s on hub : %d", hubiio_crb_errors[icrbb.b_ecode], cnode); /*NOTREACHED*/ default: panic("Fatal error (code : %d) on hub : %d", icrbb.b_ecode, cnode); /*NOTREACHED*/ } } /* if (icrbb.b_error) */ /* * Error is not indicated via the errcode field * Check other error indications in this register. */ if (icrbb.b_xerr) { panic("Xtalk Packet with error bit set to hub %d", cnode); /*NOTREACHED*/ } if (icrbb.b_lnetuce) { panic("Uncorrectable data error detected on data " " from Craylink to node %d", cnode); /*NOTREACHED*/ } }
void sn_dma_flush(uint64_t addr) { nasid_t nasid; int is_tio; int wid_num; int i, j; uint64_t flags; uint64_t itte; struct hubdev_info *hubinfo; volatile struct sn_flush_device_list *p; struct sn_flush_nasid_entry *flush_nasid_list; if (!sn_ioif_inited) return; nasid = NASID_GET(addr); if (-1 == nasid_to_cnodeid(nasid)) return; hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo; if (!hubinfo) { BUG(); } flush_nasid_list = &hubinfo->hdi_flush_nasid_list; if (flush_nasid_list->widget_p == NULL) return; is_tio = (nasid & 1); if (is_tio) { int itte_index; if (TIO_HWIN(addr)) itte_index = 0; else if (TIO_BWIN_WINDOWNUM(addr)) itte_index = TIO_BWIN_WINDOWNUM(addr); else itte_index = -1; if (itte_index >= 0) { itte = flush_nasid_list->iio_itte[itte_index]; if (! TIO_ITTE_VALID(itte)) return; wid_num = TIO_ITTE_WIDGET(itte); } else wid_num = TIO_SWIN_WIDGETNUM(addr); } else { if (BWIN_WINDOWNUM(addr)) { itte = flush_nasid_list->iio_itte[BWIN_WINDOWNUM(addr)]; wid_num = IIO_ITTE_WIDGET(itte); } else wid_num = SWIN_WIDGETNUM(addr); } if (flush_nasid_list->widget_p[wid_num] == NULL) return; p = &flush_nasid_list->widget_p[wid_num][0]; /* find a matching BAR */ for (i = 0; i < DEV_PER_WIDGET; i++) { for (j = 0; j < PCI_ROM_RESOURCE; j++) { if (p->sfdl_bar_list[j].start == 0) break; if (addr >= p->sfdl_bar_list[j].start && addr <= p->sfdl_bar_list[j].end) break; } if (j < PCI_ROM_RESOURCE && p->sfdl_bar_list[j].start != 0) break; p++; } /* if no matching BAR, return without doing anything. */ if (i == DEV_PER_WIDGET) return; /* * For TIOCP use the Device(x) Write Request Buffer Flush Bridge * register since it ensures the data has entered the coherence * domain, unlike PIC. */ if (is_tio) { /* * Note: devices behind TIOCE should never be matched in the * above code, and so the following code is PIC/CP centric. * If CE ever needs the sn_dma_flush mechanism, we will have * to account for that here and in tioce_bus_fixup(). */ uint32_t tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID)); uint32_t revnum = XWIDGET_PART_REV_NUM(tio_id); /* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */ if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) { return; } else { pcireg_wrb_flush_get(p->sfdl_pcibus_info, (p->sfdl_slot - 1)); } } else { spin_lock_irqsave(&((struct sn_flush_device_list *)p)-> sfdl_flush_lock, flags); *p->sfdl_flush_addr = 0; /* force an interrupt. */ *(volatile uint32_t *)(p->sfdl_force_int_addr) = 1; /* wait for the interrupt to come back. */ while (*(p->sfdl_flush_addr) != 0x10f) cpu_relax(); /* okay, everything is synched up. */ spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock, flags); } return; }
static void __init klhwg_add_xbow(cnodeid_t cnode, nasid_t nasid) { lboard_t *brd; klxbow_t *xbow_p; nasid_t hub_nasid; cnodeid_t hub_cnode; int widgetnum; vertex_hdl_t xbow_v, hubv; /*REFERENCED*/ graph_error_t err; if (!(brd = find_lboard_nasid((lboard_t *)KL_CONFIG_INFO(nasid), nasid, KLTYPE_IOBRICK_XBOW))) return; if (KL_CONFIG_DUPLICATE_BOARD(brd)) return; if ((xbow_p = (klxbow_t *)find_component(brd, NULL, KLSTRUCT_XBOW)) == NULL) return; for (widgetnum = HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX; widgetnum++) { if (!XBOW_PORT_TYPE_HUB(xbow_p, widgetnum)) continue; hub_nasid = XBOW_PORT_NASID(xbow_p, widgetnum); if (hub_nasid == INVALID_NASID) { printk(KERN_WARNING "hub widget %d, skipping xbow graph\n", widgetnum); continue; } hub_cnode = nasid_to_cnodeid(hub_nasid); if (hub_cnode == INVALID_CNODEID) { continue; } hubv = cnodeid_to_vertex(hub_cnode); err = hwgraph_path_add(hubv, EDGE_LBL_XTALK, &xbow_v); if (err != GRAPH_SUCCESS) { if (err == GRAPH_DUP) printk(KERN_WARNING "klhwg_add_xbow: Check for " "working routers and router links!"); printk("klhwg_add_xbow: Failed to add " "edge: vertex 0x%p to vertex 0x%p," "error %d\n", (void *)hubv, (void *)xbow_v, err); return; } HWGRAPH_DEBUG(__FILE__, __FUNCTION__, __LINE__, xbow_v, NULL, "Created path for xtalk.\n"); xswitch_vertex_init(xbow_v); NODEPDA(hub_cnode)->xbow_vhdl = xbow_v; /* * XXX - This won't work is we ever hook up two hubs * by crosstown through a crossbow. */ if (hub_nasid != nasid) { NODEPDA(hub_cnode)->xbow_peer = nasid; NODEPDA(nasid_to_cnodeid(nasid))->xbow_peer = hub_nasid; } } }