/*
 * The general purpose timer ticks at 1MHz independent if
 * the rest of the system
 */
static void sibyte_set_mode(enum clock_event_mode mode,
                           struct clock_event_device *evt)
{
	unsigned int cpu = smp_processor_id();
	void __iomem *cfg, *init;

	cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
	init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));

	switch (mode) {
	case CLOCK_EVT_MODE_PERIODIC:
		__raw_writeq(0, cfg);
		__raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
		__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
			     cfg);
		break;

	case CLOCK_EVT_MODE_ONESHOT:
		/* Stop the timer until we actually program a shot */
	case CLOCK_EVT_MODE_SHUTDOWN:
		__raw_writeq(0, cfg);
		break;

	case CLOCK_EVT_MODE_UNUSED:	/* shuddup gcc */
	case CLOCK_EVT_MODE_RESUME:
		;
	}
}
Example #2
0
void copy_page(void *to, void *from)
{
	u64 from_phys = CPHYSADDR((unsigned long)from);
	u64 to_phys = CPHYSADDR((unsigned long)to);
	unsigned int cpu = smp_processor_id();

	/* if any page is not in KSEG0, use old way */
	if ((long)KSEGX((unsigned long)to) != (long)CKSEG0
	    || (long)KSEGX((unsigned long)from) != (long)CKSEG0)
		return copy_page_cpu(to, from);

	page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST |
				 M_DM_DSCRA_INTERRUPT;
	page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
	__raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));

	/*
	 * Don't really want to do it this way, but there's no
	 * reliable way to delay completion detection.
	 */
	while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
		 & M_DM_DSCR_BASE_INTERRUPT))
		;
	__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
}
Example #3
0
static void check_bus_watcher(void)
{
	uint32_t status, l2_err, memio_err;
#ifdef DUMP_L2_ECC_TAG_ON_ERROR
	uint64_t l2_tag;
#endif

	/* Destructive read, clears register and interrupt */
	status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS));
	/* Bit 31 is always on, but there's no #define for that */
	if (status & ~(1UL << 31)) {
		l2_err = csr_in32(IOADDR(A_BUS_L2_ERRORS));
#ifdef DUMP_L2_ECC_TAG_ON_ERROR
		l2_tag = in64(IO_SPACE_BASE | A_L2_ECC_TAG);
#endif
		memio_err = csr_in32(IOADDR(A_BUS_MEM_IO_ERRORS));
		prom_printf("Bus watcher error counters: %08x %08x\n", l2_err, memio_err);
		prom_printf("\nLast recorded signature:\n");
		prom_printf("Request %02x from %d, answered by %d with Dcode %d\n",
		       (unsigned int)(G_SCD_BERR_TID(status) & 0x3f),
		       (int)(G_SCD_BERR_TID(status) >> 6),
		       (int)G_SCD_BERR_RID(status),
		       (int)G_SCD_BERR_DCODE(status));
#ifdef DUMP_L2_ECC_TAG_ON_ERROR
		prom_printf("Last L2 tag w/ bad ECC: %016llx\n", l2_tag);
#endif
	} else {
Example #4
0
static void r8169_rx( struct netif * const netif ) {
	r8169_device_t *dev = netif->state;

	if( r8169_debug ) {
		printk("Packet Received\n");  
	}

	while( (inb(IOADDR(dev, R8169_CR)) & RxBufEmpty) == 0 ) {
		unsigned int rx_entry = dev->cur_rx % RX_RING_SIZE;
		struct Desc *rx_desc = dev->rx_ring + rx_entry;
		int32_t pkt_len = (rx_desc->opts1 & 0x00001fff);
		uint64_t paddr = (uint64_t)rx_desc->addr_hi << 32 | rx_desc->addr_lo;
		uint8_t *pkt_data = (uint8_t *)(__va(paddr));

		if( rx_desc->opts1 & DescOwn ) {
			if( r8169_debug ) {
				printk("RX processing finished\n");
			}
			break;
		}

		if( r8169_debug ) {
			printk("Packet RX Size = %u\n", pkt_len);
			uint16_t i;
			for (i = 0; i < pkt_len; i++){
				printk(" %x ", *((uint8_t *)(pkt_data + i)));
			}
			printk("\n");
		}

		struct pbuf * p = pbuf_alloc( PBUF_RAW, pkt_len, PBUF_POOL );
		if( !p ) {
			printk( KERN_ERR "%s: Unable to allocate pbuf! dropping\n", __func__);
			return;
		}
    
		memcpy(p->payload, pkt_data, pkt_len);
		p->tot_len = pkt_len;
		p->next = 0x0;
   
		rx_desc->opts1 |= DescOwn;
		outw(RxAckBits, IOADDR(dev, R8169_ISR));

		dev->cur_rx++;

		if( netif->input( p, netif ) != ERR_OK ) {
			printk( KERN_ERR "%s: Packet receive failed!\n", __func__);
			pbuf_free( p );
			return;
		}

		if( r8169_debug ) {
			printk("Packet Processed.  cur_rx = %d\n", dev->cur_rx);
		}
	}
}
Example #5
0
asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
{
	unsigned int pending;

#ifdef CONFIG_SIBYTE_BCM1480_PROF
	/* Set compare to count to silence count/compare timer interrupts */
	write_c0_compare(read_c0_count());
#endif

	pending = read_c0_cause() & read_c0_status();

#ifdef CONFIG_SIBYTE_BCM1480_PROF
	if (pending & CAUSEF_IP7)	/* Cpu performance counter interrupt */
		sbprof_cpu_intr(exception_epc(regs));
	else
#endif

	if (pending & CAUSEF_IP4)
		bcm1480_timer_interrupt(regs);

#ifdef CONFIG_SMP
	else if (pending & CAUSEF_IP3)
		bcm1480_mailbox_interrupt(regs);
#endif

#ifdef CONFIG_KGDB
	else if (pending & CAUSEF_IP6)
		bcm1480_kgdb_interrupt(regs);		/* KGDB (uart 1) */
#endif

	else if (pending & CAUSEF_IP2) {
		unsigned long long mask_h, mask_l;
		unsigned long base;

		/*
		 * Default...we've hit an IP[2] interrupt, which means we've
		 * got to check the 1480 interrupt registers to figure out what
		 * to do.  Need to detect which CPU we're on, now that
		 * smp_affinity is supported.
		 */
		base = A_BCM1480_IMR_MAPPER(smp_processor_id());
		mask_h = __raw_readq(
			IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_H));
		mask_l = __raw_readq(
			IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_L));

		if (mask_h) {
			if (mask_h ^ 1)
				do_IRQ(fls64(mask_h) - 1, regs);
			else
				do_IRQ(63 + fls64(mask_l), regs);
		}
	}
}
Example #6
0
static int sibyte_set_periodic(struct clock_event_device *evt)
{
	unsigned int cpu = smp_processor_id();
	void __iomem *cfg, *init;

	cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
	init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));

	__raw_writeq(0, cfg);
	__raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
	__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, cfg);
	return 0;
}
Example #7
0
void sb1250_unmask_irq(int cpu, int irq)
{
	unsigned long flags;
	u64 cur_ints;

	raw_spin_lock_irqsave(&sb1250_imr_lock, flags);
	cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
					R_IMR_INTERRUPT_MASK));
	cur_ints &= ~(((u64) 1) << irq);
	____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
					R_IMR_INTERRUPT_MASK));
	raw_spin_unlock_irqrestore(&sb1250_imr_lock, flags);
}
static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
{
	unsigned int cpu = smp_processor_id();
	void __iomem *cfg, *init;

	cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
	init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));

	__raw_writeq(0, cfg);
	__raw_writeq(delta - 1, init);
	__raw_writeq(M_SCD_TIMER_ENABLE, cfg);

	return 0;
}
Example #9
0
static void ack_bcm1480_irq(unsigned int irq)
{
	u64 pending;
	unsigned int irq_dirty;
	int k;

	/*
	 * If the interrupt was an HT interrupt, now is the time to
	 * clear it.  NOTE: we assume the HT bridge was set up to
	 * deliver the interrupts to all CPUs (which makes affinity
	 * changing easier for us)
	 */
	irq_dirty = irq;
	if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) {
		irq_dirty -= BCM1480_NR_IRQS_HALF;
	}
	for (k=0; k<2; k++) { /* Loop through high and low LDT interrupts */
		pending = __raw_readq(IOADDR(A_BCM1480_IMR_REGISTER(bcm1480_irq_owner[irq],
						R_BCM1480_IMR_LDT_INTERRUPT_H + (k*BCM1480_IMR_HL_SPACING))));
		pending &= ((u64)1 << (irq_dirty));
		if (pending) {
#ifdef CONFIG_SMP
			int i;
			for (i=0; i<NR_CPUS; i++) {
				/*
				 * Clear for all CPUs so an affinity switch
				 * doesn't find an old status
				 */
				__raw_writeq(pending, IOADDR(A_BCM1480_IMR_REGISTER(cpu_logical_map(i),
								R_BCM1480_IMR_LDT_INTERRUPT_CLR_H + (k*BCM1480_IMR_HL_SPACING))));
			}
#else
			__raw_writeq(pending, IOADDR(A_BCM1480_IMR_REGISTER(0, R_BCM1480_IMR_LDT_INTERRUPT_CLR_H + (k*BCM1480_IMR_HL_SPACING))));
#endif

			/*
			 * Generate EOI.  For Pass 1 parts, EOI is a nop.  For
			 * Pass 2, the LDT world may be edge-triggered, but
			 * this EOI shouldn't hurt.  If they are
			 * level-sensitive, the EOI is required.
			 */
#ifdef CONFIG_PCI
			if (ht_eoi_space)
				*(uint32_t *)(ht_eoi_space+(irq<<16)+(7<<2)) = 0;
#endif
		}
	}
	bcm1480_mask_irq(bcm1480_irq_owner[irq], irq);
}
Example #10
0
void sb1250_time_init(void)
{
	int cpu = smp_processor_id();
	int irq = K_INT_TIMER_0+cpu;

	/* Only have 4 general purpose timers */
	if (cpu > 3) {
		BUG();
	}

	if (!cpu) {
		/* Use our own gettimeoffset() routine */
		do_gettimeoffset = sb1250_gettimeoffset;
	}

	sb1250_mask_irq(cpu, irq);

	/* Map the timer interrupt to ip[4] of this cpu */
	bus_writeq(IMR_IP4_VAL,
		   IOADDR(A_IMR_REGISTER(cpu, R_IMR_INTERRUPT_MAP_BASE) +
			  (irq << 3)));

	/* the general purpose timer ticks at 1 Mhz independent if the rest of the system */
	/* Disable the timer and set up the count */
	bus_writeq(0, IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)));
#ifdef CONFIG_SIMULATION
	bus_writeq(50000 / HZ,
		   IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)));
#else
	bus_writeq(1000000/HZ,
		   IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)));
#endif

	/* Set the timer running */
	bus_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
		   IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)));

	sb1250_unmask_irq(cpu, irq);
	sb1250_steal_irq(irq);
	/*
	 * This interrupt is "special" in that it doesn't use the request_irq
	 * way to hook the irq line.  The timer interrupt is initialized early
	 * enough to make this a major pain, and it's also firing enough to
	 * warrant a bit of special case code.  sb1250_timer_interrupt is
	 * called directly from irq_handler.S when IP[4] is set during an
	 * interrupt
	 */
}
Example #11
0
/*
 * We use our own do_gettimeoffset() instead of the generic one,
 * because the generic one does not work for SMP case.
 * In addition, since we use general timer 0 for system time,
 * we can get accurate intra-jiffy offset without calibration.
 */
unsigned long sb1250_gettimeoffset(void)
{
	unsigned long count =
		bus_readq(IOADDR(A_SCD_TIMER_REGISTER(0, R_SCD_TIMER_CNT)));

	return 1000000/HZ - count;
 }
Example #12
0
void bcm1480_unmask_irq(int cpu, int irq)
{
	unsigned long flags;
	u64 cur_ints,hl_spacing;

	spin_lock_irqsave(&bcm1480_imr_lock, flags);
	hl_spacing = 0;
	if ((irq >= BCM1480_NR_IRQS_HALF) && (irq <= BCM1480_NR_IRQS)) {
		hl_spacing = BCM1480_IMR_HL_SPACING;
		irq -= BCM1480_NR_IRQS_HALF;
	}
	cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
	cur_ints &= ~(((u64) 1) << irq);
	____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
	spin_unlock_irqrestore(&bcm1480_imr_lock, flags);
}
Example #13
0
static cycle_t bcm1480_hpt_read(void)
{
	/* We assume this function is called xtime_lock held. */
	unsigned long count =
		__raw_readq(IOADDR(A_SCD_TIMER_REGISTER(0, R_SCD_TIMER_CNT)));
	return (jiffies + 1) * (BCM1480_HPT_VALUE / HZ) - count;
}
int 
cs_ssextio_probe(device_t parent, cfdata_t cf, void *aux)
{
	struct s3c2xx0_attach_args *sa = aux;
	bus_space_tag_t iot = sa->sa_iot;
	bus_space_handle_t ioh;
	struct cs_softc sc;
	int rv = 0, have_io = 0;
	vaddr_t ioaddr;

	if (sa->sa_intr == SSEXTIOCF_INTR_DEFAULT)
		sa->sa_intr = 9;
	if (sa->sa_addr == SSEXTIOCF_ADDR_DEFAULT)
		sa->sa_addr = S3C2410_BANK_START(3);

	/*
	 * Map the I/O space.
	 */
	ioaddr = IOADDR(sa->sa_addr);
	if (bus_space_map(iot, ioaddr, CS8900_IOSIZE, 0, &ioh))
		goto out;
	have_io = 1;

	memset(&sc, 0, sizeof sc);
	sc.sc_iot = iot;
	sc.sc_ioh = ioh;

	if (0) {
		int i;

		for (i=0; i <=PKTPG_IND_ADDR; i += 2) {
			if (i % 16 == 0)
				printf( "\n%04x: ", i);
			printf("%04x ", CS_READ_PACKET_PAGE_IO(&sc, i));
		}

	}

	/* Verify that it's a Crystal product. */
	if (CS_READ_PACKET_PAGE_IO(&sc, PKTPG_EISA_NUM) != EISA_NUM_CRYSTAL)
		goto out;

	/*
	 * Verify that it's a supported chip.
	 */
	switch (CS_READ_PACKET_PAGE_IO(&sc, PKTPG_PRODUCT_ID) & PROD_ID_MASK) {
	case PROD_ID_CS8900:
#ifdef notyet
	case PROD_ID_CS8920:
	case PROD_ID_CS8920M:
#endif
		rv = 1;
	}

 out:
	if (have_io)
		bus_space_unmap(iot, ioh, CS8900_IOSIZE);

	return (rv);
}
void __init sb1250_clocksource_init(void)
{
	struct clocksource *cs = &bcm1250_clocksource;

	/* Setup hpt using timer #3 but do not enable irq for it */
	__raw_writeq(0,
		     IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
						 R_SCD_TIMER_CFG)));
	__raw_writeq(SB1250_HPT_VALUE,
		     IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
						 R_SCD_TIMER_INIT)));
	__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
		     IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
						 R_SCD_TIMER_CFG)));

<<<<<<< HEAD
Example #16
0
static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask)
{
	int i = 0, old_cpu, cpu, int_on, k;
	u64 cur_ints;
	struct irq_desc *desc = irq_desc + irq;
	unsigned long flags;
	unsigned int irq_dirty;

	i = first_cpu(mask);
	if (next_cpu(i, mask) <= NR_CPUS) {
		printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
		return;
	}

	/* Convert logical CPU to physical CPU */
	cpu = cpu_logical_map(i);

	/* Protect against other affinity changers and IMR manipulation */
	spin_lock_irqsave(&desc->lock, flags);
	spin_lock(&bcm1480_imr_lock);

	/* Swizzle each CPU's IMR (but leave the IP selection alone) */
	old_cpu = bcm1480_irq_owner[irq];
	irq_dirty = irq;
	if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) {
		irq_dirty -= BCM1480_NR_IRQS_HALF;
	}

	for (k=0; k<2; k++) { /* Loop through high and low interrupt mask register */
		cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		int_on = !(cur_ints & (((u64) 1) << irq_dirty));
		if (int_on) {
			/* If it was on, mask it */
			cur_ints |= (((u64) 1) << irq_dirty);
			____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		}
		bcm1480_irq_owner[irq] = cpu;
		if (int_on) {
			/* unmask for the new CPU */
			cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
			cur_ints &= ~(((u64) 1) << irq_dirty);
			____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		}
	}
	spin_unlock(&bcm1480_imr_lock);
	spin_unlock_irqrestore(&desc->lock, flags);
}
static cycle_t sb1250_hpt_read(struct clocksource *cs)
{
    unsigned int count;

    count = G_SCD_TIMER_CNT(__raw_readq(IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CNT))));

    return SB1250_HPT_VALUE - count;
}
void __init sb1250_clocksource_init(void)
{
    struct clocksource *cs = &bcm1250_clocksource;


    __raw_writeq(0,
                 IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
                         R_SCD_TIMER_CFG)));
    __raw_writeq(SB1250_HPT_VALUE,
                 IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
                         R_SCD_TIMER_INIT)));
    __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
                 IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
                         R_SCD_TIMER_CFG)));

    clocksource_register_hz(cs, V_SCD_TIMER_FREQ);
}
Example #19
0
static uint32_t pci_conf2_read_config32(struct bus *pbus, int bus, int devfn, int where)
{
	uint32_t value;
	SET(bus, devfn);
	value = inl(IOADDR(devfn, where));
	outb(0, 0xCF8);
	return value;
}
Example #20
0
asmlinkage void plat_irq_dispatch(void)
{
	unsigned int pending;

#ifdef CONFIG_SIBYTE_SB1250_PROF
	/* Set compare to count to silence count/compare timer interrupts */
	write_c0_compare(read_c0_count());
#endif

	/*
	 * What a pain. We have to be really careful saving the upper 32 bits
	 * of any * register across function calls if we don't want them
	 * trashed--since were running in -o32, the calling routing never saves
	 * the full 64 bits of a register across a function call.  Being the
	 * interrupt handler, we're guaranteed that interrupts are disabled
	 * during this code so we don't have to worry about random interrupts
	 * blasting the high 32 bits.
	 */

	pending = read_c0_cause() & read_c0_status() & ST0_IM;

#ifdef CONFIG_SIBYTE_SB1250_PROF
	if (pending & CAUSEF_IP7) /* Cpu performance counter interrupt */
		sbprof_cpu_intr();
	else
#endif

	if (pending & CAUSEF_IP4)
		sb1250_timer_interrupt();

#ifdef CONFIG_SMP
	else if (pending & CAUSEF_IP3)
		sb1250_mailbox_interrupt();
#endif

#ifdef CONFIG_KGDB
	else if (pending & CAUSEF_IP6)			/* KGDB (uart 1) */
		sb1250_kgdb_interrupt();
#endif

	else if (pending & CAUSEF_IP2) {
		unsigned long long mask;

		/*
		 * Default...we've hit an IP[2] interrupt, which means we've
		 * got to check the 1250 interrupt registers to figure out what
		 * to do.  Need to detect which CPU we're on, now that
		 * smp_affinity is supported.
		 */
		mask = __raw_readq(IOADDR(A_IMR_REGISTER(smp_processor_id(),
		                              R_IMR_INTERRUPT_STATUS_BASE)));
		if (mask)
			do_IRQ(fls64(mask) - 1);
		else
			spurious_interrupt();
	} else
		spurious_interrupt();
}
Example #21
0
static void check_bus_watcher(void)              
{                               
	uint32_t status, l2_err, memio_err;

	/* Destructive read, clears register and interrupt */
	status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS));
	/* Bit 31 is always on, but there's no #define for that */
	if (status & ~(1UL << 31)) {  
		l2_err = csr_in32(IOADDR(A_BUS_L2_ERRORS));
		memio_err = csr_in32(IOADDR(A_BUS_MEM_IO_ERRORS));
		prom_printf("Bus watcher error counters: %08x %08x\n", l2_err, memio_err);
		prom_printf("\nLast recorded signature:\n");
		prom_printf("Request %02x from %d, answered by %d with Dcode %d\n",
		       (unsigned int)(G_SCD_BERR_TID(status) & 0x3f),
		       (int)(G_SCD_BERR_TID(status) >> 6),
		       (int)G_SCD_BERR_RID(status),
		       (int)G_SCD_BERR_DCODE(status));
	} else {		
Example #22
0
static irqreturn_t r8169_interrupt( int vector, void *priv ) {
	r8169_device_t *dev = &r8169_state; 
	uint16_t status = inw(IOADDR(dev, R8169_ISR));

	if( r8169_debug ) {
//		printk("Interrupt Received: 0x%x\n", status);
		if( status & PKT_RX )  printk("Receive OK\n");
		if( status & RX_ERR )  printk("Receive Error\n");
		if( status & TX_OK )   printk("Transmit OK\n");
		if( status & TX_ERR )  printk("Transmit Error\n");
		if( status & RX_RDU )  printk("Receive Descriptor Unavailable\n");
		if( status & RX_LC )   printk("Link Change\n");
		if( status & RX_FOVW ) printk("Receive FIFO Overflow\n");
		if( status & TX_TDU )  printk("Transmit Descriptor Unavailable\n");
		if( status & SW_INT )  printk("Software Interrupt\n");
		if( status & RX_FEMP ) printk("Receive FIFO Empty\n");
	}

	if( status & PKT_RX ) {
		r8169_clear_irq(dev, status);
		r8169_rx(&r8169_netif );
		if( r8169_debug ) {
			if( inb(IOADDR(dev, R8169_CR)) & RxBufEmpty ) {
				printk("Receive Buffer Empty\n");
			} else {
				printk("Packet present in RX buffer\n");
			}
		}
	}

	if( status & TX_OK ) {
		if( r8169_debug ) {
			printk("Packet successfully transmitted\n");
		}
		r8169_clear_irq(dev, TX_OK);
	}

	if( status & RX_LC ) {
		printk("PHY Status : %02x\n", inb(IOADDR(dev, R8169_PHYSTAT)));
	}

	r8169_clear_irq(dev, status);
	return IRQ_HANDLED;
}
Example #23
0
void __init arch_init_irq(void)
{

	unsigned int i;
	u64 tmp;
	unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
		STATUSF_IP1 | STATUSF_IP0;

	/* Default everything to IP2 */
	for (i = 0; i < SB1250_NR_IRQS; i++) {	/* was I0 */
		__raw_writeq(IMR_IP2_VAL,
			     IOADDR(A_IMR_REGISTER(0,
						   R_IMR_INTERRUPT_MAP_BASE) +
				    (i << 3)));
		__raw_writeq(IMR_IP2_VAL,
			     IOADDR(A_IMR_REGISTER(1,
						   R_IMR_INTERRUPT_MAP_BASE) +
				    (i << 3)));
	}

	init_sb1250_irqs();

	/*
	 * Map the high 16 bits of the mailbox registers to IP[3], for
	 * inter-cpu messages
	 */
	/* Was I1 */
	__raw_writeq(IMR_IP3_VAL,
		     IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MAP_BASE) +
			    (K_INT_MBOX_0 << 3)));
	__raw_writeq(IMR_IP3_VAL,
		     IOADDR(A_IMR_REGISTER(1, R_IMR_INTERRUPT_MAP_BASE) +
			    (K_INT_MBOX_0 << 3)));

	/* Clear the mailboxes.  The firmware may leave them dirty */
	__raw_writeq(0xffffffffffffffffULL,
		     IOADDR(A_IMR_REGISTER(0, R_IMR_MAILBOX_CLR_CPU)));
	__raw_writeq(0xffffffffffffffffULL,
		     IOADDR(A_IMR_REGISTER(1, R_IMR_MAILBOX_CLR_CPU)));

	/* Mask everything except the mailbox registers for both cpus */
	tmp = ~((u64) 0) ^ (((u64) 1) << K_INT_MBOX_0);
	__raw_writeq(tmp, IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MASK)));
	__raw_writeq(tmp, IOADDR(A_IMR_REGISTER(1, R_IMR_INTERRUPT_MASK)));

	/*
	 * Note that the timer interrupts are also mapped, but this is
	 * done in sb1250_time_init().  Also, the profiling driver
	 * does its own management of IP7.
	 */

	/* Enable necessary IPs, disable the rest */
	change_c0_status(ST0_IM, imask);
}
Example #24
0
static void ack_sb1250_irq(struct irq_data *d)
{
	unsigned int irq = d->irq;
#ifdef CONFIG_SIBYTE_HAS_LDT
	u64 pending;

	/*
	 * If the interrupt was an HT interrupt, now is the time to
	 * clear it.  NOTE: we assume the HT bridge was set up to
	 * deliver the interrupts to all CPUs (which makes affinity
	 * changing easier for us)
	 */
	pending = __raw_readq(IOADDR(A_IMR_REGISTER(sb1250_irq_owner[irq],
						    R_IMR_LDT_INTERRUPT)));
	pending &= ((u64)1 << (irq));
	if (pending) {
		int i;
		for (i=0; i<NR_CPUS; i++) {
			int cpu;
#ifdef CONFIG_SMP
			cpu = cpu_logical_map(i);
#else
			cpu = i;
#endif
			/*
			 * Clear for all CPUs so an affinity switch
			 * doesn't find an old status
			 */
			__raw_writeq(pending,
				     IOADDR(A_IMR_REGISTER(cpu,
						R_IMR_LDT_INTERRUPT_CLR)));
		}

		/*
		 * Generate EOI.  For Pass 1 parts, EOI is a nop.  For
		 * Pass 2, the LDT world may be edge-triggered, but
		 * this EOI shouldn't hurt.  If they are
		 * level-sensitive, the EOI is required.
		 */
		*(uint32_t *)(ldt_eoi_space+(irq<<16)+(7<<2)) = 0;
	}
#endif
	sb1250_mask_irq(sb1250_irq_owner[irq], irq);
}
Example #25
0
static void sb1250_set_affinity(unsigned int irq, cpumask_t mask)
{
	int i = 0, old_cpu, cpu, int_on;
	u64 cur_ints;
	struct irq_desc *desc = irq_desc + irq;
	unsigned long flags;

	i = first_cpu(mask);

	if (cpus_weight(mask) > 1) {
		printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
		return;
	}

	/* Convert logical CPU to physical CPU */
	cpu = cpu_logical_map(i);

	/* Protect against other affinity changers and IMR manipulation */
	spin_lock_irqsave(&desc->lock, flags);
	spin_lock(&sb1250_imr_lock);

	/* Swizzle each CPU's IMR (but leave the IP selection alone) */
	old_cpu = sb1250_irq_owner[irq];
	cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(old_cpu) +
					R_IMR_INTERRUPT_MASK));
	int_on = !(cur_ints & (((u64) 1) << irq));
	if (int_on) {
		/* If it was on, mask it */
		cur_ints |= (((u64) 1) << irq);
		____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(old_cpu) +
					R_IMR_INTERRUPT_MASK));
	}
	sb1250_irq_owner[irq] = cpu;
	if (int_on) {
		/* unmask for the new CPU */
		cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
					R_IMR_INTERRUPT_MASK));
		cur_ints &= ~(((u64) 1) << irq);
		____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
					R_IMR_INTERRUPT_MASK));
	}
	spin_unlock(&sb1250_imr_lock);
	spin_unlock_irqrestore(&desc->lock, flags);
}
Example #26
0
static int bcm1480_set_affinity(struct irq_data *d, const struct cpumask *mask,
				bool force)
{
	unsigned int irq_dirty, irq = d->irq;
	int i = 0, old_cpu, cpu, int_on, k;
	u64 cur_ints;
	unsigned long flags;

	i = cpumask_first(mask);

	/* Convert logical CPU to physical CPU */
	cpu = cpu_logical_map(i);

	/* Protect against other affinity changers and IMR manipulation */
	raw_spin_lock_irqsave(&bcm1480_imr_lock, flags);

	/* Swizzle each CPU's IMR (but leave the IP selection alone) */
	old_cpu = bcm1480_irq_owner[irq];
	irq_dirty = irq;
	if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) {
		irq_dirty -= BCM1480_NR_IRQS_HALF;
	}

	for (k=0; k<2; k++) { /* Loop through high and low interrupt mask register */
		cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		int_on = !(cur_ints & (((u64) 1) << irq_dirty));
		if (int_on) {
			/* If it was on, mask it */
			cur_ints |= (((u64) 1) << irq_dirty);
			____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		}
		bcm1480_irq_owner[irq] = cpu;
		if (int_on) {
			/* unmask for the new CPU */
			cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
			cur_ints &= ~(((u64) 1) << irq_dirty);
			____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		}
	}
	raw_spin_unlock_irqrestore(&bcm1480_imr_lock, flags);

	return 0;
}
Example #27
0
static int sibyte_shutdown(struct clock_event_device *evt)
{
	unsigned int cpu = smp_processor_id();
	void __iomem *cfg;

	cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));

	/* Stop the timer until we actually program a shot */
	__raw_writeq(0, cfg);
	return 0;
}
void 
cs_ssextio_attach(device_t parent, device_t self, void *aux)
{
	struct cs_softc *sc = device_private(self);
	struct s3c2xx0_attach_args *sa = aux;
	vaddr_t ioaddr;
#ifdef	SMDK24X0_ETHER_ADDR_FIXED
	static uint8_t enaddr[ETHER_ADDR_LEN] = {SMDK24X0_ETHER_ADDR_FIXED};
#else
#define enaddr NULL
#endif

	sc->sc_dev = self;
	sc->sc_iot = sc->sc_memt = sa->sa_iot;
	/* sc_irq is an IRQ number in ISA world. set 10 for INTRQ0 of CS8900A */
	sc->sc_irq = 10;

	/*
	 * Map the device.
	 */
	ioaddr = IOADDR(sa->sa_addr);
	if (bus_space_map(sc->sc_iot, ioaddr, CS8900_IOSIZE, 0, &sc->sc_ioh)) {
		aprint_error(": unable to map i/o space\n");
		return;
	}

	if (bus_space_map(sc->sc_iot, sa->sa_addr, CS8900_MEMSIZE,
			  0, &sc->sc_memh))
		aprint_error(": unable to map memory space");
	else {
		sc->sc_cfgflags |= CFGFLG_MEM_MODE;
		sc->sc_pktpgaddr = sa->sa_addr;
	}

	/* CS8900A is very slow. (nOE->Data valid: 135ns max.)
	   We need to use IOCHRDY signal */
	sc->sc_cfgflags |= CFGFLG_IOCHRDY;

	sc->sc_ih = s3c2410_extint_establish(sa->sa_intr, IPL_NET, IST_EDGE_RISING,
	    cs_intr, sc);
	if (sc->sc_ih == NULL) {
		aprint_error(": unable to establish interrupt\n");
		return;
	}

	aprint_normal("\n");

	/* SMDK24X0 doesn't have EEPRMO hooked to CS8900A */
	sc->sc_cfgflags |= CFGFLG_NOT_EEPROM;

	cs_attach(sc, enaddr, cs_media, 
	    sizeof(cs_media) / sizeof(cs_media[0]), IFM_ETHER|IFM_10_T);
}
Example #29
0
static int sb1250_set_affinity(struct irq_data *d, const struct cpumask *mask,
			       bool force)
{
	int i = 0, old_cpu, cpu, int_on;
	unsigned int irq = d->irq;
	u64 cur_ints;
	unsigned long flags;

	i = cpumask_first(mask);

	/* Convert logical CPU to physical CPU */
	cpu = cpu_logical_map(i);

	/* Protect against other affinity changers and IMR manipulation */
	raw_spin_lock_irqsave(&sb1250_imr_lock, flags);

	/* Swizzle each CPU's IMR (but leave the IP selection alone) */
	old_cpu = sb1250_irq_owner[irq];
	cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(old_cpu) +
					R_IMR_INTERRUPT_MASK));
	int_on = !(cur_ints & (((u64) 1) << irq));
	if (int_on) {
		/* If it was on, mask it */
		cur_ints |= (((u64) 1) << irq);
		____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(old_cpu) +
					R_IMR_INTERRUPT_MASK));
	}
	sb1250_irq_owner[irq] = cpu;
	if (int_on) {
		/* unmask for the new CPU */
		cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
					R_IMR_INTERRUPT_MASK));
		cur_ints &= ~(((u64) 1) << irq);
		____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
					R_IMR_INTERRUPT_MASK));
	}
	raw_spin_unlock_irqrestore(&sb1250_imr_lock, flags);

	return 0;
}
Example #30
0
static void init_duart_port(uart_state_t *port, int line)
{
	if (!(port->flags & DUART_INITIALIZED)) {
		port->line = line;
		port->status = (u32 *)(IOADDR(A_DUART_CHANREG(line, R_DUART_STATUS)));
		port->imr = (u32 *)(IOADDR(A_DUART_IMRREG(line)));
		port->tx_hold = (u32 *)(IOADDR(A_DUART_CHANREG(line, R_DUART_TX_HOLD)));
		port->rx_hold = (u32 *)(IOADDR(A_DUART_CHANREG(line, R_DUART_RX_HOLD)));
		port->mode_1 = (u32 *)(IOADDR(A_DUART_CHANREG(line, R_DUART_MODE_REG_1)));
		port->mode_2 = (u32 *)(IOADDR(A_DUART_CHANREG(line, R_DUART_MODE_REG_2)));
		port->clk_sel = (u32 *)(IOADDR(A_DUART_CHANREG(line, R_DUART_CLK_SEL)));
		port->cmd = (u32 *)(IOADDR(A_DUART_CHANREG(line, R_DUART_CMD)));
		port->flags |= DUART_INITIALIZED;
	}
}