Exemple #1
0
/*
 * Change protections to allow IPI operations (and AMO operations on
 * Shub 1.1 systems).
 */
void
xpc_allow_IPI_ops(void)
{
	int node;
	int nasid;


	// >>> Change SH_IPI_ACCESS code to use SAL call once it is available.

	if (is_shub2()) {
		xpc_sh2_IPI_access0 =
			(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
		xpc_sh2_IPI_access1 =
			(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
		xpc_sh2_IPI_access2 =
			(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
		xpc_sh2_IPI_access3 =
			(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));

		for_each_online_node(node) {
			nasid = cnodeid_to_nasid(node);
			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
								-1UL);
			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
								-1UL);
			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
								-1UL);
			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
								-1UL);
		}

	} else {
Exemple #2
0
/* Check for an RTC interrupt pending */
static int mmtimer_int_pending(int comparator)
{
	if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) &
			SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator)
		return 1;
	else
		return 0;
}
Exemple #3
0
static void sn_ack_irq(unsigned int irq)
{
	u64 event_occurred, mask;

	irq = irq & 0xff;
	event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED));
	mask = event_occurred & SH_ALL_INT_MASK;
	HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask);
	__set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);

	move_native_irq(irq);
}
Exemple #4
0
static void intr_clear_bits(nasid_t nasid, volatile hubreg_t *pend,
	int base_level)
{
	volatile hubreg_t bits;
	int i;

	/* Check pending interrupts */
	if ((bits = HUB_L(pend)) != 0)
		for (i = 0; i < N_INTPEND_BITS; i++)
			if (bits & (1 << i))
				LOCAL_HUB_CLR_INTR(base_level + i);
}
/**
 * hub_pio_map  -  establish a HUB PIO mapping
 *
 * @hub:	hub to perform PIO mapping on
 * @widget:	widget ID to perform PIO mapping for
 * @xtalk_addr:	xtalk_address that needs to be mapped
 * @size:	size of the PIO mapping
 *
 **/
unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget,
			  unsigned long xtalk_addr, size_t size)
{
	nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
	volatile hubreg_t junk;
	unsigned i;

	/* use small-window mapping if possible */
	if ((xtalk_addr % SWIN_SIZE) + size <= SWIN_SIZE)
		return NODE_SWIN_BASE(nasid, widget) + (xtalk_addr % SWIN_SIZE);

	if ((xtalk_addr % BWIN_SIZE) + size > BWIN_SIZE) {
		printk(KERN_WARNING "PIO mapping at hub %d widget %d addr 0x%lx"
				" too big (%ld)\n",
				nasid, widget, xtalk_addr, size);
		return 0;
	}

	xtalk_addr &= ~(BWIN_SIZE-1);
	for (i = 0; i < HUB_NUM_BIG_WINDOW; i++) {
		if (test_and_set_bit(i, hub_data(cnode)->h_bigwin_used))
			continue;

		/*
		 * The code below does a PIO write to setup an ITTE entry.
		 *
		 * We need to prevent other CPUs from seeing our updated
		 * memory shadow of the ITTE (in the piomap) until the ITTE
		 * entry is actually set up; otherwise, another CPU might
		 * attempt a PIO prematurely.
		 *
		 * Also, the only way we can know that an entry has been
		 * received  by the hub and can be used by future PIO reads/
		 * writes is by reading back the ITTE entry after writing it.
		 *
		 * For these two reasons, we PIO read back the ITTE entry
		 * after we write it.
		 */
		IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr);
		junk = HUB_L(IIO_ITTE_GET(nasid, i));

		return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE);
	}

	printk(KERN_WARNING "unable to establish PIO mapping for at"
			" hub %d widget %d addr 0x%lx\n",
			nasid, widget, xtalk_addr);
	return 0;
}
Exemple #6
0
/*ARGSUSED*/
int
cpuprom_map(devfs_handle_t dev, vhandl_t *vt, off_t addr, size_t len)
{
    int 		errcode;
    caddr_t 	kvaddr;
    devfs_handle_t		node;
    cnodeid_t 	cnode;

    node = prominfo_nodeget(dev);

    if (!node)
        return EIO;


    kvaddr = hubdev_prombase_get(node);
    cnode  = hubdev_cnodeid_get(node);
#ifdef	HUBSPC_DEBUG
    printf("cpuprom_map: hubnode %d kvaddr 0x%x\n", node, kvaddr);
#endif

    if (len > RBOOT_SIZE)
        len = RBOOT_SIZE;
    /*
     * Map in the prom space
     */
    errcode = v_mapphys(vt, kvaddr, len);

    if (errcode == 0 ) {
        /*
         * Set the MD configuration registers suitably.
         */
        nasid_t		nasid;
        uint64_t	value;
        volatile hubreg_t	*regaddr;

        nasid = COMPACT_TO_NASID_NODEID(cnode);
        regaddr = REMOTE_HUB_ADDR(nasid, FPROM_CONFIG_ADDR);
        value = HUB_L(regaddr);
        value &= ~(FPROM_SETUP_MASK | FPROM_ENABLE_MASK);
        {
            value |= (((long)CONFIG_FPROM_SETUP << FPROM_SETUP_SHFT) |
                      ((long)CONFIG_FPROM_ENABLE << FPROM_ENABLE_SHFT));
        }
        HUB_S(regaddr, value);

    }
    return (errcode);
}
Exemple #7
0
static void sn_ack_irq(unsigned int irq)
{
	uint64_t event_occurred, mask = 0;
	int nasid;

	irq = irq & 0xff;
	nasid = get_nasid();
	event_occurred =
	    HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED));
	mask = event_occurred & SH_ALL_INT_MASK;
	HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS),
		 mask);
	__set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);

	move_irq(irq);
}
Exemple #8
0
static void sn_end_irq(unsigned int irq)
{
	int ivec;
	u64 event_occurred;

	ivec = irq & 0xff;
	if (ivec == SGI_UART_VECTOR) {
		event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED));
		/* If the UART bit is set here, we may have received an
		 * interrupt from the UART that the driver missed.  To
		 * make sure, we IPI ourselves to force us to look again.
		 */
		if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
			platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR,
					  IA64_IPI_DM_INT, 0);
		}
	}
	__clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
	if (sn_force_interrupt_flag)
		force_interrupt(irq);
}
static void
sn_end_irq(unsigned int irq)
{
	int nasid;
	int ivec;
	unsigned long event_occurred;

	ivec = irq & 0xff;
	if (ivec == SGI_UART_VECTOR) {
		nasid = smp_physical_node_id();
		event_occurred = HUB_L( (unsigned long *)GLOBAL_MMR_ADDR(nasid,SH_EVENT_OCCURRED) );
		// If the UART bit is set here, we may have received an interrupt from the
		// UART that the driver missed.  To make sure, we IPI ourselves to force us
		// to look again.
		if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
				platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR, IA64_IPI_DM_INT, 0);
		}
	}
	clear_bit(ivec, (volatile void *)pda.sn_in_service_ivecs);
	if (sn_force_interrupt_flag)
		force_interrupt(irq);
}
Exemple #10
0
static void
sn_end_irq(unsigned int irq)
{
#ifdef CONFIG_IA64_SGI_SN1
	unsigned long long intpend_val, mask = 0x70L;
	int subnode;
#endif
	int nasid;
#ifdef CONFIG_IA64_SGI_SN2
	unsigned long event_occurred;
#endif

	irq = irq & 0xff;
#ifdef CONFIG_IA64_SGI_SN1
	if (irq == SGI_UART_IRQ) {
		nasid = smp_physical_node_id();
		subnode = cpuid_to_subnode(smp_processor_id());
		intpend_val = REMOTE_HUB_PI_L(nasid, subnode, PI_INT_PEND0);
		if (intpend_val & mask) {
			platform_send_ipi(smp_processor_id(), SGI_UART_IRQ, IA64_IPI_DM_INT, 0);
		}
	}
#endif
#ifdef CONFIG_IA64_SGI_SN2
	if (irq == SGI_UART_VECTOR) {
		nasid = smp_physical_node_id();
		event_occurred = HUB_L( (unsigned long *)GLOBAL_MMR_ADDR(nasid,SH_EVENT_OCCURRED) );
		// If the UART bit is set here, we may have received an interrupt from the
		// UART that the driver missed.  To make sure, we IPI ourselves to force us
		// to look again.
		if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
				platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR, IA64_IPI_DM_INT, 0);
		}
	}
#endif

}
static void
sn_ack_irq(unsigned int irq)
{
	unsigned long event_occurred, mask = 0;
	int nasid;

	irq = irq & 0xff;
	nasid = smp_physical_node_id();
	event_occurred = HUB_L( (unsigned long *)GLOBAL_MMR_ADDR(nasid,SH_EVENT_OCCURRED) );
	if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
		mask |= (1 << SH_EVENT_OCCURRED_UART_INT_SHFT);
	}
	if (event_occurred & SH_EVENT_OCCURRED_IPI_INT_MASK) {
		mask |= (1 << SH_EVENT_OCCURRED_IPI_INT_SHFT);
	}
	if (event_occurred & SH_EVENT_OCCURRED_II_INT0_MASK) {
		mask |= (1 << SH_EVENT_OCCURRED_II_INT0_SHFT);
	}
	if (event_occurred & SH_EVENT_OCCURRED_II_INT1_MASK) {
		mask |= (1 << SH_EVENT_OCCURRED_II_INT1_SHFT);
	}
	HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS), mask );
	__set_bit(irq, (volatile void *)pda.sn_in_service_ivecs);
}
Exemple #12
0
void sn_dma_flush(uint64_t addr)
{
	nasid_t nasid;
	int is_tio;
	int wid_num;
	int i, j;
	uint64_t flags;
	uint64_t itte;
	struct hubdev_info *hubinfo;
	volatile struct sn_flush_device_list *p;
	struct sn_flush_nasid_entry *flush_nasid_list;

	if (!sn_ioif_inited)
		return;

	nasid = NASID_GET(addr);
	if (-1 == nasid_to_cnodeid(nasid))
		return;

	hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo;

	if (!hubinfo) {
		BUG();
	}

	flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
	if (flush_nasid_list->widget_p == NULL)
		return;

	is_tio = (nasid & 1);
	if (is_tio) {
		int itte_index;

		if (TIO_HWIN(addr))
			itte_index = 0;
		else if (TIO_BWIN_WINDOWNUM(addr))
			itte_index = TIO_BWIN_WINDOWNUM(addr);
		else
			itte_index = -1;

		if (itte_index >= 0) {
			itte = flush_nasid_list->iio_itte[itte_index];
			if (! TIO_ITTE_VALID(itte))
				return;
			wid_num = TIO_ITTE_WIDGET(itte);
		} else
			wid_num = TIO_SWIN_WIDGETNUM(addr);
	} else {
		if (BWIN_WINDOWNUM(addr)) {
			itte = flush_nasid_list->iio_itte[BWIN_WINDOWNUM(addr)];
			wid_num = IIO_ITTE_WIDGET(itte);
		} else
			wid_num = SWIN_WIDGETNUM(addr);
	}
	if (flush_nasid_list->widget_p[wid_num] == NULL)
		return;
	p = &flush_nasid_list->widget_p[wid_num][0];

	/* find a matching BAR */
	for (i = 0; i < DEV_PER_WIDGET; i++) {
		for (j = 0; j < PCI_ROM_RESOURCE; j++) {
			if (p->sfdl_bar_list[j].start == 0)
				break;
			if (addr >= p->sfdl_bar_list[j].start
			    && addr <= p->sfdl_bar_list[j].end)
				break;
		}
		if (j < PCI_ROM_RESOURCE && p->sfdl_bar_list[j].start != 0)
			break;
		p++;
	}

	/* if no matching BAR, return without doing anything. */
	if (i == DEV_PER_WIDGET)
		return;

	/*
	 * For TIOCP use the Device(x) Write Request Buffer Flush Bridge
	 * register since it ensures the data has entered the coherence
	 * domain, unlike PIC.
	 */
	if (is_tio) {
		/*
	 	 * Note:  devices behind TIOCE should never be matched in the
		 * above code, and so the following code is PIC/CP centric.
		 * If CE ever needs the sn_dma_flush mechanism, we will have
		 * to account for that here and in tioce_bus_fixup().
	 	 */
		uint32_t tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID));
		uint32_t revnum = XWIDGET_PART_REV_NUM(tio_id);

		/* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */
		if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) {
			return;
		} else {
			pcireg_wrb_flush_get(p->sfdl_pcibus_info,
					     (p->sfdl_slot - 1));
		}
	} else {
		spin_lock_irqsave(&((struct sn_flush_device_list *)p)->
				  sfdl_flush_lock, flags);

		*p->sfdl_flush_addr = 0;

		/* force an interrupt. */
		*(volatile uint32_t *)(p->sfdl_force_int_addr) = 1;

		/* wait for the interrupt to come back. */
		while (*(p->sfdl_flush_addr) != 0x10f)
			cpu_relax();

		/* okay, everything is synched up. */
		spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock, flags);
	}
	return;
}
Exemple #13
0
/* 
 * >>> bte_crb_error_handler needs to be broken into two parts.  The
 * first should cleanup the CRB.  The second should wait until all bte
 * related CRB's are complete and then do the error reset.
 */
void
bte_crb_error_handler(devfs_handle_t hub_v, int btenum, 
		      int crbnum, ioerror_t *ioe, int bteop)
/*
 * Function: 	bte_crb_error_handler
 * Purpose:	Process a CRB for a specific HUB/BTE
 * Parameters:	hub_v	- vertex of hub in HW graph
 *		btenum	- bte number on hub (0 == a, 1 == b)
 *		crbnum	- crb number being processed
 * Notes: 
 *	This routine assumes serialization at a higher level. A CRB 
 *	should not be processed more than once. The error recovery 
 *	follows the following sequence - if you change this, be real
 *	sure about what you are doing. 
 *
 */
{
        hubinfo_t	hinfo;
	icrba_t		crba; 
	icrbb_t		crbb; 
	nasid_t		n;
	hubreg_t	iidsr, imem, ieclr;

	hubinfo_get(hub_v, &hinfo);


	n = hinfo->h_nasid;
	

	/*
	 * The following 10 lines (or so) are adapted from IRIXs
	 * bte_crb_error function.  No clear documentation tells
	 * why the crb needs to complete normally in order for
	 * the BTE to resume normal operations.  This first step
	 * appears vital!
	 */

	/*
	 * Zero error and error code to prevent error_dump complaining
	 * about these CRBs. Copy the CRB to the notification line.
	 * The crb address is in shub format (physical address shifted
	 * right by cacheline size).
	 */
	crbb.ii_icrb0_b_regval = REMOTE_HUB_L(n, IIO_ICRB_B(crbnum));
	crbb.b_error=0;
	crbb.b_ecode=0;
	REMOTE_HUB_S(n, IIO_ICRB_B(crbnum), crbb.ii_icrb0_b_regval);

	crba.ii_icrb0_a_regval = REMOTE_HUB_L(n, IIO_ICRB_A(crbnum));
	crba.a_addr = TO_PHYS((u64)&nodepda->bte_if[btenum].notify) >> 3;
	crba.a_valid = 1;
	REMOTE_HUB_S(n, IIO_ICRB_A(crbnum), crba.ii_icrb0_a_regval);

	REMOTE_HUB_S(n, IIO_ICCR, 
		     IIO_ICCR_PENDING | IIO_ICCR_CMD_FLUSH | crbnum);

	while (REMOTE_HUB_L(n, IIO_ICCR) & IIO_ICCR_PENDING)
	    ;


	/* Terminate the BTE. */
	/* >>> The other bte transfer will need to be restarted. */
	HUB_L((shubreg_t *)((nodepda->bte_if[btenum].bte_base_addr +
		       IIO_IBCT0 - IIO_IBLS0)));

	imem = REMOTE_HUB_L(n, IIO_IMEM);
	ieclr = REMOTE_HUB_L(n, IIO_IECLR);
	if (btenum == 0) {
		imem |= IIO_IMEM_W0ESD | IIO_IMEM_B0ESD;
		ieclr|= IECLR_BTE0;
	} else {
		imem |= IIO_IMEM_W0ESD | IIO_IMEM_B1ESD;
		ieclr|= IECLR_BTE1;
	}
	REMOTE_HUB_S(n, IIO_IMEM, imem);
	REMOTE_HUB_S(n, IIO_IECLR, ieclr);
		
	iidsr  = REMOTE_HUB_L(n, IIO_IIDSR);
	iidsr &= ~IIO_IIDSR_SENT_MASK;
	iidsr |= IIO_IIDSR_ENB_MASK;
	REMOTE_HUB_S(n, IIO_IIDSR, iidsr);


 	bte_reset_nasid(n);

	*nodepda->bte_if[btenum].most_rcnt_na = IBLS_ERROR;
}
Exemple #14
0
void sn_dma_flush(u64 addr)
{
	nasid_t nasid;
	int is_tio;
	int wid_num;
	int i, j;
	unsigned long flags;
	u64 itte;
	struct hubdev_info *hubinfo;
	struct sn_flush_device_kernel *p;
	struct sn_flush_device_common *common;
	struct sn_flush_nasid_entry *flush_nasid_list;

	if (!sn_ioif_inited)
		return;

	nasid = NASID_GET(addr);
	if (-1 == nasid_to_cnodeid(nasid))
		return;

	hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo;

	BUG_ON(!hubinfo);

	flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
	if (flush_nasid_list->widget_p == NULL)
		return;

	is_tio = (nasid & 1);
	if (is_tio) {
		int itte_index;

		if (TIO_HWIN(addr))
			itte_index = 0;
		else if (TIO_BWIN_WINDOWNUM(addr))
			itte_index = TIO_BWIN_WINDOWNUM(addr);
		else
			itte_index = -1;

		if (itte_index >= 0) {
			itte = flush_nasid_list->iio_itte[itte_index];
			if (! TIO_ITTE_VALID(itte))
				return;
			wid_num = TIO_ITTE_WIDGET(itte);
		} else
			wid_num = TIO_SWIN_WIDGETNUM(addr);
	} else {
		if (BWIN_WINDOWNUM(addr)) {
			itte = flush_nasid_list->iio_itte[BWIN_WINDOWNUM(addr)];
			wid_num = IIO_ITTE_WIDGET(itte);
		} else
			wid_num = SWIN_WIDGETNUM(addr);
	}
	if (flush_nasid_list->widget_p[wid_num] == NULL)
		return;
	p = &flush_nasid_list->widget_p[wid_num][0];

	
	for (i = 0; i < DEV_PER_WIDGET; i++,p++) {
		common = p->common;
		for (j = 0; j < PCI_ROM_RESOURCE; j++) {
			if (common->sfdl_bar_list[j].start == 0)
				break;
			if (addr >= common->sfdl_bar_list[j].start
			    && addr <= common->sfdl_bar_list[j].end)
				break;
		}
		if (j < PCI_ROM_RESOURCE && common->sfdl_bar_list[j].start != 0)
			break;
	}

	
	if (i == DEV_PER_WIDGET)
		return;

	if (is_tio) {
		u32 tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID));
		u32 revnum = XWIDGET_PART_REV_NUM(tio_id);

		
		if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) {
			return;
		} else {
			pcireg_wrb_flush_get(common->sfdl_pcibus_info,
					     (common->sfdl_slot - 1));
		}
	} else {
		spin_lock_irqsave(&p->sfdl_flush_lock, flags);
		*common->sfdl_flush_addr = 0;

		
		*(volatile u32 *)(common->sfdl_force_int_addr) = 1;

		
		while (*(common->sfdl_flush_addr) != 0x10f)
			cpu_relax();

		
		spin_unlock_irqrestore(&p->sfdl_flush_lock, flags);
	}
	return;
}
Exemple #15
0
static void
sn_ack_irq(unsigned int irq)
{
#ifdef CONFIG_IA64_SGI_SN1
	int bit = -1;
	unsigned long long intpend_val;
	int subnode;
#endif
#ifdef CONFIG_IA64_SGI_SN2
	unsigned long event_occurred, mask = 0;
#endif
	int nasid;

	irq = irq & 0xff;
	nasid = smp_physical_node_id();
#ifdef CONFIG_IA64_SGI_SN1
	subnode = cpuid_to_subnode(smp_processor_id());
	if (irq == SGI_UART_IRQ) {
		intpend_val = REMOTE_HUB_PI_L(nasid, subnode, PI_INT_PEND0);
		if (intpend_val & (1L<<GFX_INTR_A) ) {
			bit = GFX_INTR_A;
			REMOTE_HUB_PI_CLR_INTR(nasid, subnode, bit);
		}
		if ( intpend_val & (1L<<GFX_INTR_B) ) {
			bit = GFX_INTR_B;
			REMOTE_HUB_PI_CLR_INTR(nasid, subnode, bit);
		}
		if (intpend_val & (1L<<PG_MIG_INTR) ) {
			bit = PG_MIG_INTR;
			REMOTE_HUB_PI_CLR_INTR(nasid, subnode, bit);
		}
		if (intpend_val & (1L<<CC_PEND_A)) {
			bit = CC_PEND_A;
			REMOTE_HUB_PI_CLR_INTR(nasid, subnode, bit);
		}
		if (intpend_val & (1L<<CC_PEND_B)) {
			bit = CC_PEND_B;
			REMOTE_HUB_PI_CLR_INTR(nasid, subnode, bit);
		}
		return;
	}
	bit = irq_to_bit_pos(irq);
	REMOTE_HUB_PI_CLR_INTR(nasid, subnode, bit);
#endif

#ifdef CONFIG_IA64_SGI_SN2
	event_occurred = HUB_L( (unsigned long *)GLOBAL_MMR_ADDR(nasid,SH_EVENT_OCCURRED) );
	if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
		mask |= (1 << SH_EVENT_OCCURRED_UART_INT_SHFT);
	}
	if (event_occurred & SH_EVENT_OCCURRED_IPI_INT_MASK) {
		mask |= (1 << SH_EVENT_OCCURRED_IPI_INT_SHFT);
	}
	if (event_occurred & SH_EVENT_OCCURRED_II_INT0_MASK) {
		mask |= (1 << SH_EVENT_OCCURRED_II_INT0_SHFT);
	}
	if (event_occurred & SH_EVENT_OCCURRED_II_INT1_MASK) {
		mask |= (1 << SH_EVENT_OCCURRED_II_INT1_SHFT);
	}
	HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS), mask );
#endif
}
Exemple #16
0
/* ARGSUSED */
hub_piomap_t
hub_piomap_alloc(devfs_handle_t dev,	/* set up mapping for this device */
                 device_desc_t dev_desc,	/* device descriptor */
                 iopaddr_t xtalk_addr,	/* map for this xtalk_addr range */
                 size_t byte_count,
                 size_t byte_count_max, 	/* maximum size of a mapping */
                 unsigned flags)		/* defined in sys/pio.h */
{
    xwidget_info_t widget_info = xwidget_info_get(dev);
    xwidgetnum_t widget = xwidget_info_id_get(widget_info);
    devfs_handle_t hubv = xwidget_info_master_get(widget_info);
    hubinfo_t hubinfo;
    hub_piomap_t bw_piomap;
    int bigwin, free_bw_index;
    nasid_t nasid;
    volatile hubreg_t junk;
    int s;

    /* sanity check */
    if (byte_count_max > byte_count)
        return(NULL);

    hubinfo_get(hubv, &hubinfo);

    /* If xtalk_addr range is mapped by a small window, we don't have
     * to do much
     */
    if (xtalk_addr + byte_count <= SWIN_SIZE)
        return(hubinfo_swin_piomap_get(hubinfo, (int)widget));

    /* We need to use a big window mapping.  */

    /*
     * TBD: Allow requests that would consume multiple big windows --
     * split the request up and use multiple mapping entries.
     * For now, reject requests that span big windows.
     */
    if ((xtalk_addr % BWIN_SIZE) + byte_count > BWIN_SIZE)
        return(NULL);


    /* Round xtalk address down for big window alignement */
    xtalk_addr = xtalk_addr & ~(BWIN_SIZE-1);

    /*
     * Check to see if an existing big window mapping will suffice.
     */
tryagain:
    free_bw_index = -1;
    s = mutex_spinlock(&hubinfo->h_bwlock);
    for (bigwin=0; bigwin < HUB_NUM_BIG_WINDOW; bigwin++) {
        bw_piomap = hubinfo_bwin_piomap_get(hubinfo, bigwin);

        /* If mapping is not valid, skip it */
        if (!(bw_piomap->hpio_flags & HUB_PIOMAP_IS_VALID)) {
            free_bw_index = bigwin;
            continue;
        }

        /*
         * If mapping is UNFIXED, skip it.  We don't allow sharing
         * of UNFIXED mappings, because this would allow starvation.
         */
        if (!(bw_piomap->hpio_flags & HUB_PIOMAP_IS_FIXED))
            continue;

        if ( xtalk_addr == bw_piomap->hpio_xtalk_info.xp_xtalk_addr &&
                widget == bw_piomap->hpio_xtalk_info.xp_target) {
            bw_piomap->hpio_holdcnt++;
            mutex_spinunlock(&hubinfo->h_bwlock, s);
            return(bw_piomap);
        }
    }

    /*
     * None of the existing big window mappings will work for us --
     * we need to establish a new mapping.
     */

    /* Insure that we don't consume all big windows with FIXED mappings */
    if (flags & PIOMAP_FIXED) {
        if (hubinfo->h_num_big_window_fixed < HUB_NUM_BIG_WINDOW-1) {
            ASSERT(free_bw_index >= 0);
            hubinfo->h_num_big_window_fixed++;
        } else {
            bw_piomap = NULL;
            goto done;
        }
    } else { /* PIOMAP_UNFIXED */
        if (free_bw_index < 0) {
            if (flags & PIOMAP_NOSLEEP) {
                bw_piomap = NULL;
                goto done;
            }

            sv_wait(&hubinfo->h_bwwait, PZERO, &hubinfo->h_bwlock, s);
            goto tryagain;
        }
    }


    /* OK!  Allocate big window free_bw_index for this mapping. */
    /*
     * The code below does a PIO write to setup an ITTE entry.
     * We need to prevent other CPUs from seeing our updated memory
     * shadow of the ITTE (in the piomap) until the ITTE entry is
     * actually set up; otherwise, another CPU might attempt a PIO
     * prematurely.
     *
     * Also, the only way we can know that an entry has been received
     * by the hub and can be used by future PIO reads/writes is by
     * reading back the ITTE entry after writing it.
     *
     * For these two reasons, we PIO read back the ITTE entry after
     * we write it.
     */

    nasid = hubinfo->h_nasid;
    IIO_ITTE_PUT(nasid, free_bw_index, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr);
    junk = HUB_L(IIO_ITTE_GET(nasid, free_bw_index));

    bw_piomap = hubinfo_bwin_piomap_get(hubinfo, free_bw_index);
    bw_piomap->hpio_xtalk_info.xp_dev = dev;
    bw_piomap->hpio_xtalk_info.xp_target = widget;
    bw_piomap->hpio_xtalk_info.xp_xtalk_addr = xtalk_addr;
    bw_piomap->hpio_xtalk_info.xp_kvaddr = (caddr_t)NODE_BWIN_BASE(nasid, free_bw_index);
    bw_piomap->hpio_holdcnt++;
    bw_piomap->hpio_bigwin_num = free_bw_index;

    if (flags & PIOMAP_FIXED)
        bw_piomap->hpio_flags |= HUB_PIOMAP_IS_VALID | HUB_PIOMAP_IS_FIXED;
    else
        bw_piomap->hpio_flags |= HUB_PIOMAP_IS_VALID;

done:
    mutex_spinunlock(&hubinfo->h_bwlock, s);
    return(bw_piomap);
}
Exemple #17
0
		for_each_online_node(node) {
			nasid = cnodeid_to_nasid(node);
			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
								-1UL);
			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
								-1UL);
			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
								-1UL);
			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
								-1UL);
		}

	} else {
		xpc_sh1_IPI_access =
			(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS));

		for_each_online_node(node) {
			nasid = cnodeid_to_nasid(node);
			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
								-1UL);

			/*
			 * Since the BIST collides with memory operations on
			 * SHUB 1.1 sn_change_memprotect() cannot be used.
			 */
			if (enable_shub_wars_1_1()) {
				/* open up everything */
				xpc_prot_vec[node] = (u64) HUB_L((u64 *)
						GLOBAL_MMR_ADDR(nasid,
						SH1_MD_DQLP_MMR_DIR_PRIVEC0));