Esempio n. 1
0
/*ARGSUSED*/
int
mem_refcnt_open(devfs_handle_t *devp, mode_t oflag, int otyp, cred_t *crp)
{
        cnodeid_t node;
#ifndef CONFIG_IA64_SGI_SN1
	extern int numnodes;
#endif
        
        ASSERT( (hubspc_subdevice_t)(ulong)device_info_get(*devp) == HUBSPC_REFCOUNTERS );

        if (!cap_able(CAP_MEMORY_MGT)) {
                return (EPERM);
        }

        node = master_node_get(*devp);

        ASSERT( (node >= 0) && (node < numnodes) );

        if (NODEPDA(node)->migr_refcnt_counterbuffer == NULL) {
                return (ENODEV);
        }

        ASSERT( NODEPDA(node)->migr_refcnt_counterbase != NULL );
        ASSERT( NODEPDA(node)->migr_refcnt_cbsize != (size_t)0 );

        return (0);
}
Esempio n. 2
0
/*
 * For each PCI bridge connected to the xswitch, add a link from the
 * board's klconfig info to the bridge's hwgraph vertex.  This lets
 * the FRU analyzer find the bridge without traversing the hardware
 * graph and risking hangs.
 */
static void
io_link_xswitch_widgets(devfs_handle_t xswitchv, cnodeid_t cnodeid)
{
	xwidgetnum_t		widgetnum;
	char 			pathname[128];
	devfs_handle_t		vhdl;
	nasid_t			nasid, peer_nasid;
	lboard_t		*board;



	/* And its connected hub's nasids */
	nasid = COMPACT_TO_NASID_NODEID(cnodeid);
	peer_nasid = NODEPDA(cnodeid)->xbow_peer;

	/* 
	 * Look for paths matching "<widgetnum>/pci" under xswitchv.
	 * For every widget, init. its lboard's hwgraph link.  If the
	 * board has a PCI bridge, point the link to it.
	 */
	for (widgetnum = HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX;
		 widgetnum++) {
		sprintf(pathname, "%d", widgetnum);
		if (hwgraph_traverse(xswitchv, pathname, &vhdl) !=
		    GRAPH_SUCCESS)
			continue;

		board = find_lboard_module((lboard_t *)KL_CONFIG_INFO(nasid),
				NODEPDA(cnodeid)->module_id);
		if (board == NULL && peer_nasid != INVALID_NASID) {
			/*
			 * Try to find the board on our peer
			 */
			board = find_lboard_module(
				(lboard_t *)KL_CONFIG_INFO(peer_nasid),
				NODEPDA(cnodeid)->module_id);
		}
		if (board == NULL) {
#if defined(SUPPORT_PRINTING_V_FORMAT)
			printk(KERN_WARNING  "Could not find PROM info for vertex %v, "
				"FRU analyzer may fail",
				vhdl);
#else
			printk(KERN_WARNING  "Could not find PROM info for vertex 0x%p, "
				"FRU analyzer may fail",
				(void *)vhdl);
#endif
			return;
		}

		sprintf(pathname, "%d/"EDGE_LBL_PCI, widgetnum);
		if (hwgraph_traverse(xswitchv, pathname, &vhdl) == 
		    GRAPH_SUCCESS)
			board->brd_graph_link = vhdl;
		else
			board->brd_graph_link = GRAPH_VERTEX_NONE;
	}
}
Esempio n. 3
0
elsc_t *get_elsc(void)
{
#ifdef BRINGUP
return(Elsc[get_nasid()]);
#else
	if ( NODEPDA(get_nasid())->module == (module_t *)0 ) {
		printf("get_elsc() for nasd %d fails\n", get_nasid());
//		return((elsc_t *)0);
	}
	return &NODEPDA(get_nasid())->module->elsc;

//	return &NODEPDA(NASID_TO_COMPACT_NODEID(0))->module->elsc;
#endif
}
Esempio n. 4
0
static irqreturn_t hub_eint_handler(int irq, void *arg)
{
	struct hubdev_info *hubdev_info;
	struct ia64_sal_retval ret_stuff;
	nasid_t nasid;

	ret_stuff.status = 0;
	ret_stuff.v0 = 0;
	hubdev_info = (struct hubdev_info *)arg;
	nasid = hubdev_info->hdi_nasid;

	if (is_shub1()) {
		SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
			(u64) nasid, 0, 0, 0, 0, 0, 0);

		if ((int)ret_stuff.v0)
			panic("%s: Fatal %s Error", __func__,
				((nasid & 1) ? "TIO" : "HUBII"));

		if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
			(void)hubiio_crb_error_handler(hubdev_info);
	} else
		if (nasid & 1) {	/* TIO errors */
			SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
				(u64) nasid, 0, 0, 0, 0, 0, 0);

			if ((int)ret_stuff.v0)
				panic("%s: Fatal TIO Error", __func__);
		} else
			bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid)));

	return IRQ_HANDLED;
}
Esempio n. 5
0
void
update_node_information(cnodeid_t cnodeid)
{
    nodepda_t *npda = NODEPDA(cnodeid);
    nodepda_router_info_t *npda_rip;

    /* Go through the list of router info
     * structures and copy some frequently
     * accessed info from the info hanging
     * off the corresponding router vertices
     */
    npda_rip = npda->npda_rip_first;
    while(npda_rip) {
        if (npda_rip->router_infop) {
            npda_rip->router_portmask =
                npda_rip->router_infop->ri_portmask;
            npda_rip->router_slot =
                npda_rip->router_infop->ri_slotnum;
        } else {
            /* No router, no ports. */
            npda_rip->router_portmask = 0;
        }
        npda_rip = npda_rip->router_next;
    }
}
int
check_nasid_equiv(nasid_t nasida, nasid_t nasidb)
{
	if ((nasida == nasidb) || (nasida == NODEPDA(NASID_TO_COMPACT_NODEID(nasidb))->xbow_peer))
		return 1;
	else
		return 0;
}
Esempio n. 7
0
/*ARGSUSED*/
int
mem_refcnt_mmap(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
{
        cnodeid_t node;
        int errcode;
        char* buffer;
        size_t blen;
#ifndef CONFIG_IA64_SGI_SN1
	extern int numnodes;
#endif
        
        ASSERT( (hubspc_subdevice_t)(ulong)device_info_get(dev) == HUBSPC_REFCOUNTERS );

        node = master_node_get(dev);

        ASSERT( (node >= 0) && (node < numnodes) );

        ASSERT( NODEPDA(node)->migr_refcnt_counterbuffer != NULL);
        ASSERT( NODEPDA(node)->migr_refcnt_counterbase != NULL );
        ASSERT( NODEPDA(node)->migr_refcnt_cbsize != 0 );

        /*
         * XXXX deal with prot's somewhere around here....
         */

        buffer = NODEPDA(node)->migr_refcnt_counterbuffer;
        blen = NODEPDA(node)->migr_refcnt_cbsize;

        /*
         * Force offset to be a multiple of sizeof(refcnt_t)
         * We round up.
         */

        off = (((off - 1)/sizeof(refcnt_t)) + 1) * sizeof(refcnt_t);

        if ( ((buffer + blen) - (buffer + off + len)) < 0 ) {
                return (EPERM);
        }

        errcode = v_mapphys(vt,
                            buffer + off,
                            len);

        return errcode;
}
Esempio n. 8
0
inline int
check_nasid_equiv(nasid_t nasida, nasid_t nasidb)
{
	if ((nasida == nasidb)
	    || (nasida == NODEPDA(nasid_to_cnodeid(nasidb))->xbow_peer))
		return 1;
	else
		return 0;
}
Esempio n. 9
0
/*
 * per_hub_init
 *
 * 	This code is executed once for each Hub chip.
 */
static void
per_hub_init(cnodeid_t cnode)
{
	nasid_t nasid;
	nodepda_t *npdap;
	ii_icmr_u_t ii_icmr;
	ii_ibcr_u_t ii_ibcr;
	ii_ilcsr_u_t ii_ilcsr;

	nasid = cnodeid_to_nasid(cnode);

	ASSERT(nasid != INVALID_NASID);
	ASSERT(nasid_to_cnodeid(nasid) == cnode);

	npdap = NODEPDA(cnode);

	/* Disable the request and reply errors. */
	REMOTE_HUB_S(nasid, IIO_IWEIM, 0xC000);

	/*
	 * Set the total number of CRBs that can be used.
	 */
	ii_icmr.ii_icmr_regval = 0x0;
	ii_icmr.ii_icmr_fld_s.i_c_cnt = 0xf;
	if (enable_shub_wars_1_1()) {
		// Set bit one of ICMR to prevent II from sending interrupt for II bug.
		ii_icmr.ii_icmr_regval |= 0x1;
	}
	REMOTE_HUB_S(nasid, IIO_ICMR, ii_icmr.ii_icmr_regval);

	/*
	 * Set the number of CRBs that both of the BTEs combined
	 * can use minus 1.
	 */
	ii_ibcr.ii_ibcr_regval = 0x0;
	ii_ilcsr.ii_ilcsr_regval = REMOTE_HUB_L(nasid, IIO_LLP_CSR);
	if (ii_ilcsr.ii_ilcsr_fld_s.i_llp_stat & LNK_STAT_WORKING) {
		ii_ibcr.ii_ibcr_fld_s.i_count = 0x8;
	} else {
		/*
		 * if the LLP is down, there is no attached I/O, so
		 * give BTE all the CRBs.
		 */
		ii_ibcr.ii_ibcr_fld_s.i_count = 0x14;
	}
	REMOTE_HUB_S(nasid, IIO_IBCR, ii_ibcr.ii_ibcr_regval);

	/*
	 * Set CRB timeout to be 10ms.
	 */
	REMOTE_HUB_S(nasid, IIO_ICTP, 0xffffff);
	REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);

	/* Initialize error interrupts for this hub. */
	hub_error_init(cnode);
}
Esempio n. 10
0
/*
 * copy xwidget_info_t from conn_v to peer_conn_v
 */
static int
pic_bus1_widget_info_dup(vertex_hdl_t conn_v, vertex_hdl_t peer_conn_v,
					cnodeid_t xbow_peer, char *peer_path)
{
	xwidget_info_t widget_info, peer_widget_info;
	vertex_hdl_t peer_hubv;
	hubinfo_t peer_hub_info;

	/* get the peer hub's widgetid */
	peer_hubv = NODEPDA(xbow_peer)->node_vertex;
	peer_hub_info = NULL;
	hubinfo_get(peer_hubv, &peer_hub_info);
	if (peer_hub_info == NULL)
		return 0;

	if (hwgraph_info_get_LBL(conn_v, INFO_LBL_XWIDGET,
			(arbitrary_info_t *)&widget_info) == GRAPH_SUCCESS) {
		peer_widget_info = kmalloc(sizeof (*(peer_widget_info)), GFP_KERNEL);
		if ( !peer_widget_info ) {
			return -ENOMEM;
		}
		memset(peer_widget_info, 0, sizeof (*(peer_widget_info)));

		peer_widget_info->w_fingerprint = widget_info_fingerprint;
    		peer_widget_info->w_vertex = peer_conn_v;
    		peer_widget_info->w_id = widget_info->w_id;
    		peer_widget_info->w_master = peer_hubv;
    		peer_widget_info->w_masterid = peer_hub_info->h_widgetid;
		/* structure copy */
    		peer_widget_info->w_hwid = widget_info->w_hwid;
    		peer_widget_info->w_efunc = 0;
    		peer_widget_info->w_einfo = 0;
		peer_widget_info->w_name = kmalloc(strlen(peer_path) + 1, GFP_KERNEL);
		if (!peer_widget_info->w_name) {
			kfree(peer_widget_info);
			return -ENOMEM;
		}
		strcpy(peer_widget_info->w_name, peer_path);

		if (hwgraph_info_add_LBL(peer_conn_v, INFO_LBL_XWIDGET,
			(arbitrary_info_t)peer_widget_info) != GRAPH_SUCCESS) {
			kfree(peer_widget_info->w_name);
				kfree(peer_widget_info);
				return 0;
		}

		xwidget_info_set(peer_conn_v, peer_widget_info);

		return 1;
	}

	printk("pic_bus1_widget_info_dup: "
			"cannot get INFO_LBL_XWIDGET from 0x%lx\n", (uint64_t)conn_v);
	return 0;
}
Esempio n. 11
0
static void
synergy_perf_set_freq(int freq)
{
	int		cnode;
	nodepda_t	*npdap;

	for (cnode=0; cnode < numnodes; cnode++) {
		if ((npdap = NODEPDA(cnode)) != NULL)
			npdap->synergy_perf_freq = freq;
	}
}
Esempio n. 12
0
static void
synergy_perf_set_enable(int enable)
{
	int		cnode;
	nodepda_t	*npdap;

	for (cnode=0; cnode < numnodes; cnode++) {
		if ((npdap = NODEPDA(cnode)) != NULL)
			npdap->synergy_perf_enabled = enable;
	}
	printk("NOTICE: synergy perf counting %sabled on all nodes\n", enable ? "en" : "dis");
}
Esempio n. 13
0
/*
 * Transfer the bte_test_buffer from our node to the specified
 * destination and print out timing results.
 */
static void
brt_time_xfer(int dest_node, int iterations, int xfer_lines)
{
	int iteration;
	char *src, *dst;
	u64 xfer_len, src_phys, dst_phys;
	u64 itc_before, itc_after, mem_intvl, bte_intvl;


	xfer_len = xfer_lines * L1_CACHE_BYTES;

	src = nodepda->bte_if[0].bte_test_buf;
	src_phys = __pa(src);
	dst = NODEPDA(dest_node)->bte_if[1].bte_test_buf;
	dst_phys = __pa(dst);
	mem_intvl = 0;

	for (iteration = 0; iteration < iterations; iteration++) {
		if (tm_memcpy) {
			itc_before = ia64_get_itc();
			memcpy(dst, src, xfer_len);
			itc_after = ia64_get_itc();
			mem_intvl = itc_after - itc_before;
		}

		itc_before = ia64_get_itc();
		bte_copy(src_phys, dst_phys, xfer_len, BTE_NOTIFY, NULL);
		itc_after = ia64_get_itc();
		bte_intvl = itc_after - itc_before;

		if (tm_memcpy) {
			printk("%3d,%3d,%3d,%5d,%4ld,%7ld,%3ld,"
			       "%7ld,%7ld,%7ld\n",
			       smp_processor_id(), NASID_GET(src),
			       NASID_GET(dst), xfer_lines,
			       NSEC(bte_setup_time),
			       NSEC(bte_transfer_time),
			       NSEC(bte_tear_down_time),
			       NSEC(bte_execute_time), NSEC(bte_intvl),
			       NSEC(mem_intvl));
		} else {
			printk("%3d,%3d,%3d,%5d,%4ld,%7ld,%3ld,"
			       "%7ld,%7ld\n",
			       smp_processor_id(), NASID_GET(src),
			       NASID_GET(dst), xfer_lines,
			       NSEC(bte_setup_time),
			       NSEC(bte_transfer_time),
			       NSEC(bte_tear_down_time),
			       NSEC(bte_execute_time), NSEC(bte_intvl));
		}
	}

}
Esempio n. 14
0
/*
 * First part error handler.  This is called whenever any error CRB interrupt
 * is generated by the II.
 */
void
bte_crb_error_handler(cnodeid_t cnode, int btenum,
                      int crbnum, ioerror_t * ioe, int bteop)
{
	struct bteinfo_s *bte;


	bte = &(NODEPDA(cnode)->bte_if[btenum]);

	/*
	 * The caller has already figured out the error type, we save that
	 * in the bte handle structure for the thread exercising the
	 * interface to consume.
	 */
	bte->bh_error = ioe->ie_errortype + BTEFAIL_OFFSET;
	bte->bte_error_count++;

	BTE_PRINTK(("Got an error on cnode %d bte %d: HW error type 0x%x\n",
		bte->bte_cnode, bte->bte_num, ioe->ie_errortype));
	bte_error_handler((unsigned long) NODEPDA(cnode));
}
Esempio n. 15
0
/*
 * Set up the platform-dependent fields in the processor pda.
 * Must be done _after_ init_platform_nodepda().
 * If we need a lock here, something else is wrong!
 */
void init_platform_pda(cpuid_t cpu)
{
#if defined(CONFIG_IA64_SGI_SN1)
	hub_intmasks_t *intmasks;
	int i, subnode;
	cnodeid_t	cnode;
	synergy_da_t	*sda;
	int	which_synergy;


	cnode = cpuid_to_cnodeid(cpu);
	which_synergy = cpuid_to_synergy(cpu);

	sda = Synergy_da_indr[(cnode * 2) + which_synergy];
	intmasks = &sda->s_intmasks;

	/* Clear INT_PEND0 masks. */
	for (i = 0; i < N_INTPEND0_MASKS; i++)
		intmasks->intpend0_masks[i] = 0;

	/* Set up pointer to the vector block in the nodepda. */
	/* (Cant use SUBNODEPDA - not working yet) */
	subnode = cpuid_to_subnode(cpu);
	intmasks->dispatch0 = &NODEPDA(cnode)->snpda[cpuid_to_subnode(cpu)].intr_dispatch0;
	intmasks->dispatch1 = &NODEPDA(cnode)->snpda[cpuid_to_subnode(cpu)].intr_dispatch1;
	if (intmasks->dispatch0 !=  &SUBNODEPDA(cnode, subnode)->intr_dispatch0 ||
	   intmasks->dispatch1 !=  &SUBNODEPDA(cnode, subnode)->intr_dispatch1)
	   	panic("xxx");
	intmasks->dispatch0 = &SUBNODEPDA(cnode, subnode)->intr_dispatch0;
	intmasks->dispatch1 = &SUBNODEPDA(cnode, subnode)->intr_dispatch1;

	/* Clear INT_PEND1 masks. */
	for (i = 0; i < N_INTPEND1_MASKS; i++)
		intmasks->intpend1_masks[i] = 0;
#endif	/* CONFIG_IA64_SGI_SN1 */
}
Esempio n. 16
0
static int __init tiocx_init(void)
{
	cnodeid_t cnodeid;
	int found_tiocx_device = 0;

	if (!ia64_platform_is("sn2"))
		return 0;

	bus_register(&tiocx_bus_type);

	for (cnodeid = 0; cnodeid < num_cnodes; cnodeid++) {
		nasid_t nasid;
		int bt;

		nasid = cnodeid_to_nasid(cnodeid);

		if ((nasid & 0x1) && is_fpga_tio(nasid, &bt)) {
			struct hubdev_info *hubdev;
			struct xwidget_info *widgetp;

			DBG("Found TIO at nasid 0x%x\n", nasid);

			hubdev =
			    (struct hubdev_info *)(NODEPDA(cnodeid)->pdinfo);

			widgetp = &hubdev->hdi_xwidget_info[TIOCX_CORELET];

			/* The CE hangs off of the CX port but is not an FPGA */
			if (widgetp->xwi_hwid.part_num == TIO_CE_ASIC_PARTNUM)
				continue;

			tio_corelet_reset(nasid, TIOCX_CORELET);
			tio_conveyor_enable(nasid);

			if (cx_device_register
			    (nasid, widgetp->xwi_hwid.part_num,
			     widgetp->xwi_hwid.mfg_num, hubdev, bt) < 0)
				return -ENXIO;
			else
				found_tiocx_device++;
		}
	}

	/* It's ok if we find zero devices. */
	DBG("found_tiocx_device= %d\n", found_tiocx_device);

	return 0;
}
Esempio n. 17
0
/*
 * copy xwidget_info_t from conn_v to peer_conn_v
 */
int
pic_bus1_widget_info_dup(vertex_hdl_t conn_v, vertex_hdl_t peer_conn_v,
                         cnodeid_t xbow_peer)
{
    xwidget_info_t widget_info, peer_widget_info;
    char peer_path[256];
    vertex_hdl_t peer_hubv;
    hubinfo_t peer_hub_info;

    /* get the peer hub's widgetid */
    peer_hubv = NODEPDA(xbow_peer)->node_vertex;
    peer_hub_info = NULL;
    hubinfo_get(peer_hubv, &peer_hub_info);
    if (peer_hub_info == NULL)
        return 0;

    if (hwgraph_info_get_LBL(conn_v, INFO_LBL_XWIDGET,
                             (arbitrary_info_t *)&widget_info) == GRAPH_SUCCESS) {
        NEW(peer_widget_info);
        peer_widget_info->w_vertex = peer_conn_v;
        peer_widget_info->w_id = widget_info->w_id;
        peer_widget_info->w_master = peer_hubv;
        peer_widget_info->w_masterid = peer_hub_info->h_widgetid;
        /* structure copy */
        peer_widget_info->w_hwid = widget_info->w_hwid;
        peer_widget_info->w_efunc = 0;
        peer_widget_info->w_einfo = 0;
        peer_widget_info->w_name = kmalloc(strlen(peer_path) + 1, GFP_KERNEL);
        strcpy(peer_widget_info->w_name, peer_path);

        if (hwgraph_info_add_LBL(peer_conn_v, INFO_LBL_XWIDGET,
                                 (arbitrary_info_t)peer_widget_info) != GRAPH_SUCCESS) {
            DEL(peer_widget_info);
            return 0;
        }

        xwidget_info_set(peer_conn_v, peer_widget_info);

        return 1;
    }

    printk("pic_bus1_widget_info_dup: "
           "cannot get INFO_LBL_XWIDGET from 0x%lx\n", (uint64_t)conn_v);
    return 0;
}
Esempio n. 18
0
void sn_dma_flush(u64 addr)
{
	nasid_t nasid;
	int is_tio;
	int wid_num;
	int i, j;
	unsigned long flags;
	u64 itte;
	struct hubdev_info *hubinfo;
	struct sn_flush_device_kernel *p;
	struct sn_flush_device_common *common;
	struct sn_flush_nasid_entry *flush_nasid_list;

	if (!sn_ioif_inited)
		return;

	nasid = NASID_GET(addr);
	if (-1 == nasid_to_cnodeid(nasid))
		return;

	hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo;

	BUG_ON(!hubinfo);

	flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
	if (flush_nasid_list->widget_p == NULL)
		return;

	is_tio = (nasid & 1);
	if (is_tio) {
		int itte_index;

		if (TIO_HWIN(addr))
			itte_index = 0;
		else if (TIO_BWIN_WINDOWNUM(addr))
			itte_index = TIO_BWIN_WINDOWNUM(addr);
		else
			itte_index = -1;

		if (itte_index >= 0) {
			itte = flush_nasid_list->iio_itte[itte_index];
			if (! TIO_ITTE_VALID(itte))
				return;
			wid_num = TIO_ITTE_WIDGET(itte);
		} else
			wid_num = TIO_SWIN_WIDGETNUM(addr);
	} else {
		if (BWIN_WINDOWNUM(addr)) {
			itte = flush_nasid_list->iio_itte[BWIN_WINDOWNUM(addr)];
			wid_num = IIO_ITTE_WIDGET(itte);
		} else
			wid_num = SWIN_WIDGETNUM(addr);
	}
	if (flush_nasid_list->widget_p[wid_num] == NULL)
		return;
	p = &flush_nasid_list->widget_p[wid_num][0];

	
	for (i = 0; i < DEV_PER_WIDGET; i++,p++) {
		common = p->common;
		for (j = 0; j < PCI_ROM_RESOURCE; j++) {
			if (common->sfdl_bar_list[j].start == 0)
				break;
			if (addr >= common->sfdl_bar_list[j].start
			    && addr <= common->sfdl_bar_list[j].end)
				break;
		}
		if (j < PCI_ROM_RESOURCE && common->sfdl_bar_list[j].start != 0)
			break;
	}

	
	if (i == DEV_PER_WIDGET)
		return;

	if (is_tio) {
		u32 tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID));
		u32 revnum = XWIDGET_PART_REV_NUM(tio_id);

		
		if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) {
			return;
		} else {
			pcireg_wrb_flush_get(common->sfdl_pcibus_info,
					     (common->sfdl_slot - 1));
		}
	} else {
		spin_lock_irqsave(&p->sfdl_flush_lock, flags);
		*common->sfdl_flush_addr = 0;

		
		*(volatile u32 *)(common->sfdl_force_int_addr) = 1;

		
		while (*(common->sfdl_flush_addr) != 0x10f)
			cpu_relax();

		
		spin_unlock_irqrestore(&p->sfdl_flush_lock, flags);
	}
	return;
}
Esempio n. 19
0
static int
synergy_perf_ioctl(struct inode *inode, struct file *file,
        unsigned int cmd, unsigned long arg)
{
	int             cnode;
	nodepda_t       *npdap;
	synergy_perf_t	*p;
	int		intarg;
	int		fsb;
	uint64_t	longarg;
	uint64_t	*stats;
	int		n;
	devfs_handle_t	d;
	arbitrary_info_t info;
	
	if ((d = devfs_get_handle_from_inode(inode)) == NULL)
		return -ENODEV;
	info = hwgraph_fastinfo_get(d);

	cnode = SYNERGY_PERF_INFO_CNODE(info);
	fsb = SYNERGY_PERF_INFO_FSB(info);
	npdap = NODEPDA(cnode);

	switch (cmd) {
	case SNDRV_GET_SYNERGY_VERSION:
		/* return int, version of data structure for SNDRV_GET_SYNERGYINFO */
		intarg = 1; /* version 1 */
		if (copy_to_user((void *)arg, &intarg, sizeof(intarg)))
		    return -EFAULT;
		break;

	case SNDRV_GET_INFOSIZE:
		/* return int, sizeof buf needed for SYNERGY_PERF_GET_STATS */
		intarg = synergy_perf_size(npdap);
		if (copy_to_user((void *)arg, &intarg, sizeof(intarg)))
		    return -EFAULT;
		break;

	case SNDRV_GET_SYNERGYINFO:
		/* return array of event/value pairs, this node only */
		if ((intarg = synergy_perf_size(npdap)) <= 0)
			return -ENODATA;
		if ((stats = (uint64_t *)kmalloc(intarg, GFP_KERNEL)) == NULL)
			return -ENOMEM;
		spin_lock_irq(&npdap->synergy_perf_lock);
		for (n=0, p = npdap->synergy_perf_first; p;) {
			stats[n++] = p->modesel;
			if (p->intervals > 0)
			    stats[n++] = p->counts[fsb] * p->total_intervals / p->intervals;
			else
			    stats[n++] = 0;
			p = p->next;
			if (p == npdap->synergy_perf_first)
				break;
		}
		spin_unlock_irq(&npdap->synergy_perf_lock);

		if (copy_to_user((void *)arg, stats, intarg)) {
		    kfree(stats);
		    return -EFAULT;
		}

		kfree(stats);
		break;

	case SNDRV_SYNERGY_APPEND:
		/* reads 64bit event, append synergy perf event to all nodes  */
		if (copy_from_user(&longarg, (void *)arg, sizeof(longarg)))
		    return -EFAULT;
		return synergy_perf_append(longarg);
		break;

	case SNDRV_GET_SYNERGY_STATUS:
		/* return int, 1 if enabled else 0 */
		intarg = npdap->synergy_perf_enabled;
		if (copy_to_user((void *)arg, &intarg, sizeof(intarg)))
		    return -EFAULT;
		break;

	case SNDRV_SYNERGY_ENABLE:
		/* read int, if true enable counting else disable */
		if (copy_from_user(&intarg, (void *)arg, sizeof(intarg)))
		    return -EFAULT;
		synergy_perf_set_enable(intarg);
		break;

	case SNDRV_SYNERGY_FREQ:
		/* read int, set jiffies per update */ 
		if (copy_from_user(&intarg, (void *)arg, sizeof(intarg)))
		    return -EFAULT;
		if (intarg < 0 || intarg >= HZ)
			return -EINVAL;
		synergy_perf_set_freq(intarg);
		break;

	default:
		printk("Warning: invalid ioctl %d on synergy mon for cnode=%d fsb=%d\n", cmd, cnode, fsb);
		return -EINVAL;
	}
	return(0);
}
Esempio n. 20
0
File: module.c Progetto: nhanh0/hah
elsc_t *get_elsc(void)
{
	return &NODEPDA(cpuid_to_cnodeid(smp_processor_id()))->module->elsc;
}
Esempio n. 21
0
void
synergy_perf_update(int cpu)
{
	nasid_t		nasid;
	cnodeid_t       cnode;
	struct nodepda_s *npdap;

	/*
	 * synergy_perf_initialized is set by synergy_perf_init()
	 * which is called last thing by sn_mp_setup(), i.e. well
	 * after nodepda has been initialized.
	 */
	if (!synergy_perf_initialized)
		return;

	cnode = cpuid_to_cnodeid(cpu);
	npdap = NODEPDA(cnode);

	if (npdap == NULL || cnode < 0 || cnode >= numnodes)
		/* this should not happen: still in early io init */
		return;

#if 0
	/* use this to check nodepda initialization */
	if (((uint64_t)npdap) & 0x7) {
		printk("\nERROR on cpu %d : cnode=%d, npdap == %p, not aligned\n", cpu, cnode, npdap);
		BUG();
	}
#endif

	if (npdap->synergy_perf_enabled == 0 || npdap->synergy_perf_data == NULL) {
		/* Not enabled, or no events to monitor */
		return;
	}

	if (npdap->synergy_inactive_intervals++ % npdap->synergy_perf_freq != 0) {
		/* don't multiplex on every timer interrupt */
		return;
	}

	/*
	 * Read registers for last interval and increment counters.
	 * Hold the per-node synergy_perf_lock so concurrent readers get
	 * consistent values.
	 */
	spin_lock_irq(&npdap->synergy_perf_lock);

	nasid = cpuid_to_nasid(cpu);
	npdap->synergy_active_intervals++;
	npdap->synergy_perf_data->intervals++;
	npdap->synergy_perf_data->total_intervals = npdap->synergy_active_intervals;

	npdap->synergy_perf_data->counts[0] += 0xffffffffffUL &
		REMOTE_SYNERGY_LOAD(nasid, 0, PERF_CNTR0_A);

	npdap->synergy_perf_data->counts[1] += 0xffffffffffUL &
		REMOTE_SYNERGY_LOAD(nasid, 1, PERF_CNTR0_B);

	/* skip to next in circular list */
	npdap->synergy_perf_data = npdap->synergy_perf_data->next;

	spin_unlock_irq(&npdap->synergy_perf_lock);

	/* set the counter 0 selection modes for both A and B */
	REMOTE_SYNERGY_STORE(nasid, 0, PERF_CNTL0_A, npdap->synergy_perf_data->modesel);
	REMOTE_SYNERGY_STORE(nasid, 1, PERF_CNTL0_B, npdap->synergy_perf_data->modesel);

	/* and reset the counter registers to zero */
	REMOTE_SYNERGY_STORE(nasid, 0, PERF_CNTR0_A, 0UL);
	REMOTE_SYNERGY_STORE(nasid, 1, PERF_CNTR0_B, 0UL);
}
Esempio n. 22
0
/* ARGSUSED */
static void __init
klhwg_add_node(vertex_hdl_t hwgraph_root, cnodeid_t cnode)
{
	nasid_t nasid;
	lboard_t *brd;
	klhub_t *hub;
	vertex_hdl_t node_vertex = NULL;
	char path_buffer[100];
	int rv;
	char *s;
	int board_disabled = 0;
	klcpu_t *cpu;
	vertex_hdl_t cpu_dir;

	nasid = cnodeid_to_nasid(cnode);
	brd = find_lboard_any((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
	ASSERT(brd);

	/* Generate a hardware graph path for this board. */
	board_to_path(brd, path_buffer);
	rv = hwgraph_path_add(hwgraph_root, path_buffer, &node_vertex);
	if (rv != GRAPH_SUCCESS) {
		printk("Node vertex creation failed.  Path == %s", path_buffer);
		return;
	}

	HWGRAPH_DEBUG(__FILE__, __FUNCTION__, __LINE__, node_vertex, NULL, "Created path for SHUB node.\n");
	hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
	ASSERT(hub);
	if(hub->hub_info.flags & KLINFO_ENABLE)
		board_disabled = 0;
	else
		board_disabled = 1;
		
	if(!board_disabled) {
		mark_nodevertex_as_node(node_vertex, cnode);
		s = dev_to_name(node_vertex, path_buffer, sizeof(path_buffer));
		NODEPDA(cnode)->hwg_node_name =
					kmalloc(strlen(s) + 1, GFP_KERNEL);
		if (NODEPDA(cnode)->hwg_node_name <= 0) {
			printk("%s: no memory\n", __FUNCTION__);
			return;
		}
		strcpy(NODEPDA(cnode)->hwg_node_name, s);
		hubinfo_set(node_vertex, NODEPDA(cnode)->pdinfo);
		NODEPDA(cnode)->slotdesc = brd->brd_slot;
		NODEPDA(cnode)->geoid = brd->brd_geoid;
		NODEPDA(cnode)->module = module_lookup(geo_module(brd->brd_geoid));
		klhwg_add_hub(node_vertex, hub, cnode);
	}

	/*
	 * If there's at least 1 CPU, add a "cpu" directory to represent
	 * the collection of all CPUs attached to this node.
	 */
	cpu = (klcpu_t *)find_first_component(brd, KLSTRUCT_CPU);
	if (cpu) {
		graph_error_t rv;

		rv = hwgraph_path_add(node_vertex, EDGE_LBL_CPU, &cpu_dir);
		if (rv != GRAPH_SUCCESS) {
			printk("klhwg_add_node: Cannot create CPU directory\n");
			return;
		}
		HWGRAPH_DEBUG(__FILE__, __FUNCTION__, __LINE__, cpu_dir, NULL, "Created cpu directiry on SHUB node.\n");

	}

	while (cpu) {
		cpuid_t cpu_id;
		cpu_id = nasid_slice_to_cpuid(nasid,cpu->cpu_info.physid);
		if (cpu_online(cpu_id))
			klhwg_add_cpu(node_vertex, cnode, cpu);
		else
			klhwg_add_disabled_cpu(node_vertex, cnode, cpu, brd->brd_slot);

		cpu = (klcpu_t *)
			find_component(brd, (klinfo_t *)cpu, KLSTRUCT_CPU);
	}
}
Esempio n. 23
0
static void __init
klhwg_add_xbow(cnodeid_t cnode, nasid_t nasid)
{
	lboard_t *brd;
	klxbow_t *xbow_p;
	nasid_t hub_nasid;
	cnodeid_t hub_cnode;
	int widgetnum;
	vertex_hdl_t xbow_v, hubv;
	/*REFERENCED*/
	graph_error_t err;

	if (!(brd = find_lboard_nasid((lboard_t *)KL_CONFIG_INFO(nasid), 
			nasid, KLTYPE_IOBRICK_XBOW)))
		return;

	if (KL_CONFIG_DUPLICATE_BOARD(brd))
	    return;

	if ((xbow_p = (klxbow_t *)find_component(brd, NULL, KLSTRUCT_XBOW))
	    == NULL)
	    return;

	for (widgetnum = HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX; widgetnum++) {
		if (!XBOW_PORT_TYPE_HUB(xbow_p, widgetnum)) 
		    continue;

		hub_nasid = XBOW_PORT_NASID(xbow_p, widgetnum);
		if (hub_nasid == INVALID_NASID) {
			printk(KERN_WARNING  "hub widget %d, skipping xbow graph\n", widgetnum);
			continue;
		}

		hub_cnode = nasid_to_cnodeid(hub_nasid);

		if (hub_cnode == INVALID_CNODEID) {
			continue;
		}
			
		hubv = cnodeid_to_vertex(hub_cnode);

		err = hwgraph_path_add(hubv, EDGE_LBL_XTALK, &xbow_v);
                if (err != GRAPH_SUCCESS) {
                        if (err == GRAPH_DUP)
                                printk(KERN_WARNING  "klhwg_add_xbow: Check for "
                                        "working routers and router links!");

                        printk("klhwg_add_xbow: Failed to add "
                                "edge: vertex 0x%p to vertex 0x%p,"
                                "error %d\n",
                                (void *)hubv, (void *)xbow_v, err);
			return;
                }

		HWGRAPH_DEBUG(__FILE__, __FUNCTION__, __LINE__, xbow_v, NULL, "Created path for xtalk.\n");

		xswitch_vertex_init(xbow_v); 

		NODEPDA(hub_cnode)->xbow_vhdl = xbow_v;

		/*
		 * XXX - This won't work is we ever hook up two hubs
		 * by crosstown through a crossbow.
		 */
		if (hub_nasid != nasid) {
			NODEPDA(hub_cnode)->xbow_peer = nasid;
			NODEPDA(nasid_to_cnodeid(nasid))->xbow_peer =
				hub_nasid;
		}
	}
}
Esempio n. 24
0
void
intr_init_vecblk( nodepda_t *npda,
                  cnodeid_t node,
                  int sn)
{
    int 			nasid = cnodeid_to_nasid(node);
    sh_ii_int0_config_u_t	ii_int_config;
    cpuid_t			cpu;
    cpuid_t			cpu0, cpu1;
    nodepda_t		*lnodepda;
    sh_ii_int0_enable_u_t	ii_int_enable;
    sh_int_node_id_config_u_t	node_id_config;
    sh_local_int5_config_u_t	local5_config;
    sh_local_int5_enable_u_t	local5_enable;
    extern void sn_init_cpei_timer(void);
    static int timer_added = 0;


    if (is_headless_node(node) ) {
        int cnode;
        struct ia64_sal_retval ret_stuff;

        // retarget all interrupts on this node to the master node.
        node_id_config.sh_int_node_id_config_regval = 0;
        node_id_config.sh_int_node_id_config_s.node_id = master_nasid;
        node_id_config.sh_int_node_id_config_s.id_sel = 1;
        HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_INT_NODE_ID_CONFIG),
               node_id_config.sh_int_node_id_config_regval);
        cnode = nasid_to_cnodeid(master_nasid);
        lnodepda = NODEPDA(cnode);
        cpu = lnodepda->node_first_cpu;
        cpu = cpu_physical_id(cpu);
        SAL_CALL(ret_stuff, SN_SAL_REGISTER_CE, nasid, cpu, master_nasid,0,0,0,0);
        if (ret_stuff.status < 0) {
            printk("%s: SN_SAL_REGISTER_CE SAL_CALL failed\n",__FUNCTION__);
        }
    } else {
        lnodepda = NODEPDA(node);
        cpu = lnodepda->node_first_cpu;
        cpu = cpu_physical_id(cpu);
    }

    // Get the physical id's of the cpu's on this node.
    cpu0 = nasid_slice_to_cpu_physical_id(nasid, 0);
    cpu1 = nasid_slice_to_cpu_physical_id(nasid, 2);

    HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_PI_ERROR_MASK), 0);
    HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_PI_CRBP_ERROR_MASK), 0);

    // Config and enable UART interrupt, all nodes.

    local5_config.sh_local_int5_config_regval = 0;
    local5_config.sh_local_int5_config_s.idx = SGI_UART_VECTOR;
    local5_config.sh_local_int5_config_s.pid = cpu;
    HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_LOCAL_INT5_CONFIG),
           local5_config.sh_local_int5_config_regval);

    local5_enable.sh_local_int5_enable_regval = 0;
    local5_enable.sh_local_int5_enable_s.uart_int = 1;
    HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_LOCAL_INT5_ENABLE),
           local5_enable.sh_local_int5_enable_regval);


    // The II_INT_CONFIG register for cpu 0.
    ii_int_config.sh_ii_int0_config_regval = 0;
    ii_int_config.sh_ii_int0_config_s.type = 0;
    ii_int_config.sh_ii_int0_config_s.agt = 0;
    ii_int_config.sh_ii_int0_config_s.pid = cpu0;
    ii_int_config.sh_ii_int0_config_s.base = 0;

    HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_II_INT0_CONFIG),
          ii_int_config.sh_ii_int0_config_regval);


    // The II_INT_CONFIG register for cpu 1.
    ii_int_config.sh_ii_int0_config_regval = 0;
    ii_int_config.sh_ii_int0_config_s.type = 0;
    ii_int_config.sh_ii_int0_config_s.agt = 0;
    ii_int_config.sh_ii_int0_config_s.pid = cpu1;
    ii_int_config.sh_ii_int0_config_s.base = 0;

    HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_II_INT1_CONFIG),
          ii_int_config.sh_ii_int0_config_regval);


    // Enable interrupts for II_INT0 and 1.
    ii_int_enable.sh_ii_int0_enable_regval = 0;
    ii_int_enable.sh_ii_int0_enable_s.ii_enable = 1;

    HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_II_INT0_ENABLE),
          ii_int_enable.sh_ii_int0_enable_regval);
    HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_II_INT1_ENABLE),
          ii_int_enable.sh_ii_int0_enable_regval);


    if (!timer_added) { // can only init the timer once.
        timer_added = 1;
        sn_init_cpei_timer();
    }
}
Esempio n. 25
0
/*
 * One of these threads is started per cpu.  Each thread is responsible
 * for loading that cpu's bte interface and then writing to the
 * test buffer.  The transfers are set in a round-robin fashion.
 * The end result is that each test buffer is being written into
 * by the previous node and both cpu's at the same time as the
 * local bte is transferring it to the next node.
 */
static int
brt_notify_thrd(void *__bind_cpu)
{
	int bind_cpu = (long int)__bind_cpu;
	int cpu = cpu_logical_map(bind_cpu);
	nodepda_t *nxt_node;
	long tmout_itc_intvls;
	long tmout;
	long passes;
	long good_xfer_cnt;
	u64 src_phys, dst_phys;
	int i;
	volatile char *src_buf;
	u64 *notify;

	atomic_inc(&brt_thread_cnt);
	daemonize();
	set_user_nice(current, 19);
	sigfillset(&current->blocked);

	/* Migrate to the right CPU */
	set_cpus_allowed(current, 1UL << cpu);

	/* Calculate the uSec timeout itc offset. */
	tmout_itc_intvls = local_cpu_data->cyc_per_usec * hang_usec;

	if (local_cnodeid() == (numnodes - 1)) {
		nxt_node = NODEPDA(0);
	} else {
		nxt_node = NODEPDA(local_cnodeid() + 1);
	}

	src_buf = nodepda->bte_if[0].bte_test_buf;
	src_phys = __pa(src_buf);
	dst_phys = __pa(nxt_node->bte_if[0].bte_test_buf);

	notify = kmalloc(L1_CACHE_BYTES, GFP_KERNEL);
	ASSERT(!((u64) notify & L1_CACHE_MASK));

	printk("BTE Hang %d xfer 0x%lx -> 0x%lx, Notify=0x%lx\n",
	       smp_processor_id(), src_phys, dst_phys, (u64) notify);

	passes = 0;
	good_xfer_cnt = 0;

	/* Loop until signalled to exit. */
	while (!brt_exit_flag) {
		/*
		 * A hang will prevent further transfers.
		 * NOTE: Sometimes, it appears like a hang occurred and
		 * then transfers begin again.  This just means that
		 * there is NUMA congestion and the hang_usec param
		 * should be increased.
		 */
		if (!(*notify & IBLS_BUSY)) {
			if ((bte_copy(src_phys,
				      dst_phys,
				      4UL * L1_CACHE_BYTES,
				      BTE_NOTIFY,
				      (void *)notify)) != BTE_SUCCESS) {
				printk("<0>Cpu %d Could not "
				       "allocate a bte.\n",
				       smp_processor_id());
				continue;
			}

			tmout = ia64_get_itc() + tmout_itc_intvls;

			while ((*notify & IBLS_BUSY) &&
			       (ia64_get_itc() < tmout)) {


				/* Push data out with the processor. */
				for (i = 0; i < (4 * L1_CACHE_BYTES);
				     i += L1_CACHE_BYTES) {
					src_buf[i] = (passes % 128);
				}
			};

			if (*notify & IBLS_BUSY) {
				printk("<0>Cpu %d BTE appears to have "
				       "hung.\n", smp_processor_id());
			} else {
				good_xfer_cnt++;
			}
		}

		/* Every x passes, take a little break. */
		if (!(++passes % 40)) {
			passes = 0;
			schedule_timeout(0.01 * HZ);
		}
	}

	kfree(notify);

	printk("Cpu %d had %ld good passes\n",
	       smp_processor_id(), good_xfer_cnt);

	atomic_dec(&brt_thread_cnt);
	return (0);
}
Esempio n. 26
0
void *
pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
{
	int nasid, cnode, j;
	cnodeid_t near_cnode;
	struct hubdev_info *hubdev_info;
	struct pcibus_info *soft;
	struct sn_flush_device_kernel *sn_flush_device_kernel;
	struct sn_flush_device_common *common;

	if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) {
		return NULL;
	}

	/*
	 * Allocate kernel bus soft and copy from prom.
	 */

	soft = kmalloc(sizeof(struct pcibus_info), GFP_KERNEL);
	if (!soft) {
		return NULL;
	}

	memcpy(soft, prom_bussoft, sizeof(struct pcibus_info));
	soft->pbi_buscommon.bs_base =
	    (((u64) soft->pbi_buscommon.
	      bs_base << 4) >> 4) | __IA64_UNCACHED_OFFSET;

	spin_lock_init(&soft->pbi_lock);

	/*
	 * register the bridge's error interrupt handler
	 */
	if (request_irq(SGI_PCIASIC_ERROR, (void *)pcibr_error_intr_handler,
			IRQF_SHARED, "PCIBR error", (void *)(soft))) {
		printk(KERN_WARNING
		       "pcibr cannot allocate interrupt for error handler\n");
	}

	/* 
	 * Update the Bridge with the "kernel" pagesize 
	 */
	if (PAGE_SIZE < 16384) {
		pcireg_control_bit_clr(soft, PCIBR_CTRL_PAGE_SIZE);
	} else {
		pcireg_control_bit_set(soft, PCIBR_CTRL_PAGE_SIZE);
	}

	nasid = NASID_GET(soft->pbi_buscommon.bs_base);
	cnode = nasid_to_cnodeid(nasid);
	hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);

	if (hubdev_info->hdi_flush_nasid_list.widget_p) {
		sn_flush_device_kernel = hubdev_info->hdi_flush_nasid_list.
		    widget_p[(int)soft->pbi_buscommon.bs_xid];
		if (sn_flush_device_kernel) {
			for (j = 0; j < DEV_PER_WIDGET;
			     j++, sn_flush_device_kernel++) {
				common = sn_flush_device_kernel->common;
				if (common->sfdl_slot == -1)
					continue;
				if ((common->sfdl_persistent_segment ==
				     soft->pbi_buscommon.bs_persist_segment) &&
				     (common->sfdl_persistent_busnum ==
				     soft->pbi_buscommon.bs_persist_busnum))
					common->sfdl_pcibus_info =
					    soft;
			}
		}
	}

	/* Setup the PMU ATE map */
	soft->pbi_int_ate_resource.lowest_free_index = 0;
	soft->pbi_int_ate_resource.ate =
	    kzalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL);

	if (!soft->pbi_int_ate_resource.ate) {
		kfree(soft);
		return NULL;
	}

	if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) {
		/* TIO PCI Bridge: find nearest node with CPUs */
		int e = sn_hwperf_get_nearest_node(cnode, NULL, &near_cnode);

		if (e < 0) {
			near_cnode = (cnodeid_t)-1; /* use any node */
			printk(KERN_WARNING "pcibr_bus_fixup: failed to find "
				"near node with CPUs to TIO node %d, err=%d\n",
				cnode, e);
		}
		controller->node = near_cnode;
	}
	else
		controller->node = cnode;
	return soft;
}
Esempio n. 27
0
void *
pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
{
	int nasid, cnode, j;
	struct hubdev_info *hubdev_info;
	struct pcibus_info *soft;
	struct sn_flush_device_list *sn_flush_device_list;

	if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) {
		return NULL;
	}

	/*
	 * Allocate kernel bus soft and copy from prom.
	 */

	soft = kmalloc(sizeof(struct pcibus_info), GFP_KERNEL);
	if (!soft) {
		return NULL;
	}

	memcpy(soft, prom_bussoft, sizeof(struct pcibus_info));
	soft->pbi_buscommon.bs_base =
	    (((u64) soft->pbi_buscommon.
	      bs_base << 4) >> 4) | __IA64_UNCACHED_OFFSET;

	spin_lock_init(&soft->pbi_lock);

	/*
	 * register the bridge's error interrupt handler
	 */
	if (request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler,
			SA_SHIRQ, "PCIBR error", (void *)(soft))) {
		printk(KERN_WARNING
		       "pcibr cannot allocate interrupt for error handler\n");
	}

	/* 
	 * Update the Bridge with the "kernel" pagesize 
	 */
	if (PAGE_SIZE < 16384) {
		pcireg_control_bit_clr(soft, PCIBR_CTRL_PAGE_SIZE);
	} else {
		pcireg_control_bit_set(soft, PCIBR_CTRL_PAGE_SIZE);
	}

	nasid = NASID_GET(soft->pbi_buscommon.bs_base);
	cnode = nasid_to_cnodeid(nasid);
	hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);

	if (hubdev_info->hdi_flush_nasid_list.widget_p) {
		sn_flush_device_list = hubdev_info->hdi_flush_nasid_list.
		    widget_p[(int)soft->pbi_buscommon.bs_xid];
		if (sn_flush_device_list) {
			for (j = 0; j < DEV_PER_WIDGET;
			     j++, sn_flush_device_list++) {
				if (sn_flush_device_list->sfdl_slot == -1)
					continue;
				if (sn_flush_device_list->
				    sfdl_persistent_busnum ==
				    soft->pbi_buscommon.bs_persist_busnum)
					sn_flush_device_list->sfdl_pcibus_info =
					    soft;
			}
		}
	}

	/* Setup the PMU ATE map */
	soft->pbi_int_ate_resource.lowest_free_index = 0;
	soft->pbi_int_ate_resource.ate =
	    kmalloc(soft->pbi_int_ate_size * sizeof(uint64_t), GFP_KERNEL);
	memset(soft->pbi_int_ate_resource.ate, 0,
 	       (soft->pbi_int_ate_size * sizeof(uint64_t)));

	if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP)
		/*
		 * TIO PCI Bridge with no closest node information.
		 * FIXME: Find another way to determine the closest node
		 */
		controller->node = -1;
	else
		controller->node = cnode;
	return soft;
}
/*
 * Initialize all I/O on the specified node.
 */
static void
io_init_node(cnodeid_t cnodeid)
{
	/*REFERENCED*/
	vertex_hdl_t hubv, switchv, widgetv;
	struct xwidget_hwid_s hwid;
	hubinfo_t hubinfo;
	int is_xswitch;
	nodepda_t	*npdap;
	struct semaphore *peer_sema = 0;
	uint32_t	widget_partnum;
	cpuid_t	c = 0;

	npdap = NODEPDA(cnodeid);

	/*
	 * Get the "top" vertex for this node's hardware
	 * graph; it will carry the per-hub hub-specific
	 * data, and act as the crosstalk provider master.
	 * It's canonical path is probably something of the
	 * form /hw/module/%M/slot/%d/node
	 */
	hubv = cnodeid_to_vertex(cnodeid);
	DBG("io_init_node: Initialize IO for cnode %d hubv(node) 0x%p npdap 0x%p\n", cnodeid, hubv, npdap);

	ASSERT(hubv != GRAPH_VERTEX_NONE);

	/*
	 * Read mfg info on this hub
	 */

	/* 
	 * If nothing connected to this hub's xtalk port, we're done.
	 */
	early_probe_for_widget(hubv, &hwid);
	if (hwid.part_num == XWIDGET_PART_NUM_NONE) {
#ifdef PROBE_TEST
		if ((cnodeid == 1) || (cnodeid == 2)) {
			int index;

			for (index = 0; index < 600; index++)
				DBG("Interfering with device probing!!!\n");
		}
#endif
		/* io_init_done takes cpu cookie as 2nd argument 
		 * to do a restorenoderun for the setnoderun done 
		 * at the start of this thread 
		 */
		
		DBG("**** io_init_node: Node's 0x%p hub widget has XWIDGET_PART_NUM_NONE ****\n", hubv);
		return;
		/* NOTREACHED */
	}

	/* 
	 * attach our hub_provider information to hubv,
	 * so we can use it as a crosstalk provider "master"
	 * vertex.
	 */
	xtalk_provider_register(hubv, &hub_provider);
	xtalk_provider_startup(hubv);

	/*
	 * Create a vertex to represent the crosstalk bus
	 * attached to this hub, and a vertex to be used
	 * as the connect point for whatever is out there
	 * on the other side of our crosstalk connection.
	 *
	 * Crosstalk Switch drivers "climb up" from their
	 * connection point to try and take over the switch
	 * point.
	 *
	 * Of course, the edges and verticies may already
	 * exist, in which case our net effect is just to
	 * associate the "xtalk_" driver with the connection
	 * point for the device.
	 */

	(void)hwgraph_path_add(hubv, EDGE_LBL_XTALK, &switchv);

	DBG("io_init_node: Created 'xtalk' entry to '../node/' xtalk vertex 0x%p\n", switchv);

	ASSERT(switchv != GRAPH_VERTEX_NONE);

	(void)hwgraph_edge_add(hubv, switchv, EDGE_LBL_IO);

	DBG("io_init_node: Created symlink 'io' from ../node/io to ../node/xtalk \n");

	/*
	 * We need to find the widget id and update the basew_id field
	 * accordingly. In particular, SN00 has direct connected bridge,
	 * and hence widget id is Not 0.
	 */

	widget_partnum = (((*(volatile int32_t *)(NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + WIDGET_ID))) & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT;

	if (widget_partnum == BRIDGE_WIDGET_PART_NUM ||
				widget_partnum == XBRIDGE_WIDGET_PART_NUM){
		npdap->basew_id = (((*(volatile int32_t *)(NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + BRIDGE_WID_CONTROL))) & WIDGET_WIDGET_ID);

		DBG("io_init_node: Found XBRIDGE widget_partnum= 0x%x\n", widget_partnum);

	} else if ((widget_partnum == XBOW_WIDGET_PART_NUM) ||
			(widget_partnum == XXBOW_WIDGET_PART_NUM) ||
			(widget_partnum == PXBOW_WIDGET_PART_NUM) ) {
		/* 
		 * Xbow control register does not have the widget ID field.
		 * So, hard code the widget ID to be zero.
		 */
		DBG("io_init_node: Found XBOW widget_partnum= 0x%x\n", widget_partnum);
		npdap->basew_id = 0;

	} else {
		npdap->basew_id = (((*(volatile int32_t *)(NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + BRIDGE_WID_CONTROL))) & WIDGET_WIDGET_ID);

		panic(" ****io_init_node: Unknown Widget Part Number 0x%x Widget ID 0x%x attached to Hubv 0x%p ****\n", widget_partnum, npdap->basew_id, (void *)hubv);

		/*NOTREACHED*/
	}
	{
		char widname[10];
		sprintf(widname, "%x", npdap->basew_id);
		(void)hwgraph_path_add(switchv, widname, &widgetv);
		DBG("io_init_node: Created '%s' to '..node/xtalk/' vertex 0x%p\n", widname, widgetv);
		ASSERT(widgetv != GRAPH_VERTEX_NONE);
	}
	
	nodepda->basew_xc = widgetv;

	is_xswitch = xwidget_hwid_is_xswitch(&hwid);

	/* 
	 * Try to become the master of the widget.  If this is an xswitch
	 * with multiple hubs connected, only one will succeed.  Mastership
	 * of an xswitch is used only when touching registers on that xswitch.
	 * The slave xwidgets connected to the xswitch can be owned by various
	 * masters.
	 */
	if (device_master_set(widgetv, hubv) == 0) {

		/* Only one hub (thread) per Crosstalk device or switch makes
		 * it to here.
		 */

		/* 
		 * Initialize whatever xwidget is hanging off our hub.
		 * Whatever it is, it's accessible through widgetnum 0.
		 */
		hubinfo_get(hubv, &hubinfo);

		(void)xwidget_register(&hwid, widgetv, npdap->basew_id, hubv, hubinfo->h_widgetid);

		if (!is_xswitch) {
			/* io_init_done takes cpu cookie as 2nd argument 
			 * to do a restorenoderun for the setnoderun done 
			 * at the start of this thread 
			 */
			io_init_done(cnodeid,c);
			/* NOTREACHED */
		}

		/* 
		 * Special handling for Crosstalk Switches (e.g. xbow).
		 * We need to do things in roughly the following order:
		 *	1) Initialize xswitch hardware (done above)
		 *	2) Determine which hubs are available to be widget masters
		 *	3) Discover which links are active from the xswitch
		 *	4) Assign xwidgets hanging off the xswitch to hubs
		 *	5) Initialize all xwidgets on the xswitch
		 */

		DBG("call volunteer_for_widgets\n");

		volunteer_for_widgets(switchv, hubv);

		/* If there's someone else on this crossbow, recognize him */
		if (npdap->xbow_peer != INVALID_NASID) {
			nodepda_t *peer_npdap = NODEPDA(NASID_TO_COMPACT_NODEID(npdap->xbow_peer));
			peer_sema = &peer_npdap->xbow_sema;
			DBG("call volunteer_for_widgets again\n");
			volunteer_for_widgets(switchv, peer_npdap->node_vertex);
		}

		assign_widgets_to_volunteers(switchv, hubv);

		/* Signal that we're done */
		if (peer_sema) {
			up(peer_sema);
		}
		
	}
	else {
	    /* Wait 'til master is done assigning widgets. */
	    down(&npdap->xbow_sema);
	}

#ifdef PROBE_TEST
	if ((cnodeid == 1) || (cnodeid == 2)) {
		int index;

		for (index = 0; index < 500; index++)
			DBG("Interfering with device probing!!!\n");
	}
#endif
	/* Now both nodes can safely inititialize widgets */
	io_init_xswitch_widgets(switchv, cnodeid);
	io_link_xswitch_widgets(switchv, cnodeid);

	/* io_init_done takes cpu cookie as 2nd argument 
	 * to do a restorenoderun for the setnoderun done 
	 * at the start of this thread 
	 */
	io_init_done(cnodeid,c);

	DBG("\nio_init_node: DONE INITIALIZED ALL I/O FOR CNODEID %d\n\n", cnodeid);
}
Esempio n. 29
0
void sn_dma_flush(uint64_t addr)
{
	nasid_t nasid;
	int is_tio;
	int wid_num;
	int i, j;
	uint64_t flags;
	uint64_t itte;
	struct hubdev_info *hubinfo;
	volatile struct sn_flush_device_list *p;
	struct sn_flush_nasid_entry *flush_nasid_list;

	if (!sn_ioif_inited)
		return;

	nasid = NASID_GET(addr);
	if (-1 == nasid_to_cnodeid(nasid))
		return;

	hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo;

	if (!hubinfo) {
		BUG();
	}

	flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
	if (flush_nasid_list->widget_p == NULL)
		return;

	is_tio = (nasid & 1);
	if (is_tio) {
		int itte_index;

		if (TIO_HWIN(addr))
			itte_index = 0;
		else if (TIO_BWIN_WINDOWNUM(addr))
			itte_index = TIO_BWIN_WINDOWNUM(addr);
		else
			itte_index = -1;

		if (itte_index >= 0) {
			itte = flush_nasid_list->iio_itte[itte_index];
			if (! TIO_ITTE_VALID(itte))
				return;
			wid_num = TIO_ITTE_WIDGET(itte);
		} else
			wid_num = TIO_SWIN_WIDGETNUM(addr);
	} else {
		if (BWIN_WINDOWNUM(addr)) {
			itte = flush_nasid_list->iio_itte[BWIN_WINDOWNUM(addr)];
			wid_num = IIO_ITTE_WIDGET(itte);
		} else
			wid_num = SWIN_WIDGETNUM(addr);
	}
	if (flush_nasid_list->widget_p[wid_num] == NULL)
		return;
	p = &flush_nasid_list->widget_p[wid_num][0];

	/* find a matching BAR */
	for (i = 0; i < DEV_PER_WIDGET; i++) {
		for (j = 0; j < PCI_ROM_RESOURCE; j++) {
			if (p->sfdl_bar_list[j].start == 0)
				break;
			if (addr >= p->sfdl_bar_list[j].start
			    && addr <= p->sfdl_bar_list[j].end)
				break;
		}
		if (j < PCI_ROM_RESOURCE && p->sfdl_bar_list[j].start != 0)
			break;
		p++;
	}

	/* if no matching BAR, return without doing anything. */
	if (i == DEV_PER_WIDGET)
		return;

	/*
	 * For TIOCP use the Device(x) Write Request Buffer Flush Bridge
	 * register since it ensures the data has entered the coherence
	 * domain, unlike PIC.
	 */
	if (is_tio) {
		/*
	 	 * Note:  devices behind TIOCE should never be matched in the
		 * above code, and so the following code is PIC/CP centric.
		 * If CE ever needs the sn_dma_flush mechanism, we will have
		 * to account for that here and in tioce_bus_fixup().
	 	 */
		uint32_t tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID));
		uint32_t revnum = XWIDGET_PART_REV_NUM(tio_id);

		/* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */
		if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) {
			return;
		} else {
			pcireg_wrb_flush_get(p->sfdl_pcibus_info,
					     (p->sfdl_slot - 1));
		}
	} else {
		spin_lock_irqsave(&((struct sn_flush_device_list *)p)->
				  sfdl_flush_lock, flags);

		*p->sfdl_flush_addr = 0;

		/* force an interrupt. */
		*(volatile uint32_t *)(p->sfdl_force_int_addr) = 1;

		/* wait for the interrupt to come back. */
		while (*(p->sfdl_flush_addr) != 0x10f)
			cpu_relax();

		/* okay, everything is synched up. */
		spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock, flags);
	}
	return;
}
void
io_xswitch_widget_init(vertex_hdl_t  	xswitchv,
		       vertex_hdl_t	hubv,
		       xwidgetnum_t	widgetnum)
{
	xswitch_info_t		xswitch_info;
	xwidgetnum_t		hub_widgetid;
	vertex_hdl_t		widgetv;
	cnodeid_t		cnode;
	widgetreg_t		widget_id;
	nasid_t			nasid, peer_nasid;
	struct xwidget_hwid_s 	hwid;
	hubinfo_t		hubinfo;
	/*REFERENCED*/
	int			rc;
	char 			pathname[128];
	lboard_t		*board = NULL;
	char			buffer[16];
	char			bt;
	moduleid_t		io_module;
	slotid_t get_widget_slotnum(int xbow, int widget);
	
	DBG("\nio_xswitch_widget_init: hubv 0x%p, xswitchv 0x%p, widgetnum 0x%x\n", hubv, xswitchv, widgetnum);

	/*
	 * Verify that xswitchv is indeed an attached xswitch.
	 */
	xswitch_info = xswitch_info_get(xswitchv);
	ASSERT(xswitch_info != NULL);

	hubinfo_get(hubv, &hubinfo);
	nasid = hubinfo->h_nasid;
	cnode = NASID_TO_COMPACT_NODEID(nasid);
	hub_widgetid = hubinfo->h_widgetid;

	/*
	 * Check that the widget is an io widget and is enabled
	 * on this nasid or the `peer' nasid.  The peer nasid
	 * is the other hub/bedrock connected to the xbow.
	 */
	peer_nasid = NODEPDA(cnode)->xbow_peer;
	if (peer_nasid == INVALID_NASID)
		/* If I don't have a peer, use myself. */
		peer_nasid = nasid;
	if (!xbow_port_io_enabled(nasid, widgetnum) &&
	    !xbow_port_io_enabled(peer_nasid, widgetnum)) {
		return;
	}

	if (xswitch_info_link_ok(xswitch_info, widgetnum)) {
		char			name[4];
		lboard_t dummy;


		/*
		 * If the current hub is not supposed to be the master 
		 * for this widgetnum, then skip this widget.
		 */

		if (xswitch_info_master_assignment_get(xswitch_info, widgetnum) != hubv) {
			return;
		}

		board = find_lboard_class(
				(lboard_t *)KL_CONFIG_INFO(nasid),
				KLCLASS_IOBRICK);
		if (!board && NODEPDA(cnode)->xbow_peer != INVALID_NASID) {
		    	board = find_lboard_class(
					(lboard_t *)KL_CONFIG_INFO( NODEPDA(cnode)->xbow_peer),
						KLCLASS_IOBRICK);
		}

		if (board) {
			DBG("io_xswitch_widget_init: Found KLTYPE_IOBRICK Board 0x%p brd_type 0x%x\n", board, board->brd_type);
		} else {
			DBG("io_xswitch_widget_init: FIXME did not find IOBOARD\n");
			board = &dummy;
		}


		/* Copy over the nodes' geoid info */
		{
			lboard_t *brd;

			brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
			if ( brd != (lboard_t *)0 ) {
				board->brd_geoid = brd->brd_geoid;
			}
		}

		/*
 		 * Make sure we really want to say xbrick, pbrick,
		 * etc. rather than XIO, graphics, etc.
 		 */

		memset(buffer, 0, 16);
		format_module_id(buffer, geo_module(board->brd_geoid), MODULE_FORMAT_BRIEF);

		sprintf(pathname, EDGE_LBL_MODULE "/%s/" EDGE_LBL_SLAB "/%d" "/%s" "/%s/%d",
			buffer,
			geo_slab(board->brd_geoid),
			(board->brd_type == KLTYPE_IBRICK) ? EDGE_LBL_IBRICK :
			(board->brd_type == KLTYPE_PBRICK) ? EDGE_LBL_PBRICK :
			(board->brd_type == KLTYPE_PXBRICK) ? EDGE_LBL_PXBRICK :
			(board->brd_type == KLTYPE_IXBRICK) ? EDGE_LBL_IXBRICK :
			(board->brd_type == KLTYPE_CGBRICK) ? EDGE_LBL_CGBRICK :
			(board->brd_type == KLTYPE_OPUSBRICK) ? EDGE_LBL_OPUSBRICK :
			(board->brd_type == KLTYPE_XBRICK) ? EDGE_LBL_XBRICK : "?brick",
			EDGE_LBL_XTALK, widgetnum);
		
		DBG("io_xswitch_widget_init: path= %s\n", pathname);
		rc = hwgraph_path_add(hwgraph_root, pathname, &widgetv);
		
		ASSERT(rc == GRAPH_SUCCESS);

		/* This is needed to let the user programs to map the
		 * module,slot numbers to the corresponding widget numbers
		 * on the crossbow.
		 */
		device_master_set(hwgraph_connectpt_get(widgetv), hubv);
		sprintf(name, "%d", widgetnum);
		DBG("io_xswitch_widget_init: FIXME hwgraph_edge_add %s xswitchv 0x%p, widgetv 0x%p\n", name, xswitchv, widgetv);
		rc = hwgraph_edge_add(xswitchv, widgetv, name);
		
		/*
		 * crosstalk switch code tracks which
		 * widget is attached to each link.
		 */
		xswitch_info_vhdl_set(xswitch_info, widgetnum, widgetv);
		
		/*
		 * Peek at the widget to get its crosstalk part and
		 * mfgr numbers, then present it to the generic xtalk
		 * bus provider to have its driver attach routine
		 * called (or not).
		 */
		widget_id = XWIDGET_ID_READ(nasid, widgetnum);
		hwid.part_num = XWIDGET_PART_NUM(widget_id);
		hwid.rev_num = XWIDGET_REV_NUM(widget_id);
		hwid.mfg_num = XWIDGET_MFG_NUM(widget_id);
		/* Store some inventory information about
		 * the xwidget in the hardware graph.
		 */
		xwidget_inventory_add(widgetv,board,hwid);

		(void)xwidget_register(&hwid, widgetv, widgetnum,
				       hubv, hub_widgetid);

		ia64_sn_sysctl_iobrick_module_get(nasid, &io_module);
		if (io_module >= 0) {
			char			buffer[16];
			vertex_hdl_t		to, from;
			char           		*brick_name;
			extern char *iobrick_L1bricktype_to_name(int type);


			memset(buffer, 0, 16);
			format_module_id(buffer, geo_module(board->brd_geoid), MODULE_FORMAT_BRIEF);

			if ( isupper(MODULE_GET_BTCHAR(io_module)) ) {
				bt = tolower(MODULE_GET_BTCHAR(io_module));
			}
			else {
				bt = MODULE_GET_BTCHAR(io_module);
			}

			brick_name = iobrick_L1bricktype_to_name(bt);

			/* Add a helper vertex so xbow monitoring
			* can identify the brick type. It's simply
			* an edge from the widget 0 vertex to the
			*  brick vertex.
			*/

			sprintf(pathname, EDGE_LBL_HW "/" EDGE_LBL_MODULE "/%s/"
				EDGE_LBL_SLAB "/%d/"
				EDGE_LBL_NODE "/" EDGE_LBL_XTALK "/"
				"0",
				buffer, geo_slab(board->brd_geoid));
			DBG("io_xswitch_widget_init: FROM path '%s'\n", pathname);

			from = hwgraph_path_to_vertex(pathname);
			ASSERT_ALWAYS(from);

			sprintf(pathname, EDGE_LBL_HW "/" EDGE_LBL_MODULE "/%s/"
				EDGE_LBL_SLAB "/%d/"
				"%s",
				buffer, geo_slab(board->brd_geoid), brick_name);
			DBG("io_xswitch_widget_init: TO path '%s'\n", pathname);
			to = hwgraph_path_to_vertex(pathname);
			ASSERT_ALWAYS(to);
			rc = hwgraph_edge_add(from, to,
				EDGE_LBL_INTERCONNECT);
			if (rc == -EEXIST)
				goto link_done;
			if (rc != GRAPH_SUCCESS) {
				printk("%s: Unable to establish link"
					" for xbmon.", pathname);
			}
link_done:
		}

#ifdef	SN0_USE_BTE
		bte_bpush_war(cnode, (void *)board);
#endif
	}
}