Exemple #1
0
int
qe_put(struct iodesc *desc, void *pkt, size_t len) {
	int j;

	bcopy(pkt, (char *)sc->qeout, len);
	sc->tring[0].qe_buf_len=-len/2;
	sc->tring[0].qe_flag=sc->tring[0].qe_status1=QE_NOTYET;
	sc->tring[1].qe_flag=sc->tring[1].qe_status1=QE_NOTYET;

	QE_WCSR(QE_CSR_XMTL, LOWORD(sc->tring));
	QE_WCSR(QE_CSR_XMTH, HIWORD(sc->tring));

	for(j = 0; (j < 0x10000) && ((QE_RCSR(QE_CSR_CSR) & QE_XMIT_INT) == 0); j++)
		;

	if ((QE_RCSR(QE_CSR_CSR) & QE_XMIT_INT) == 0) {
		char eaddr[6];

		qe_init(eaddr);
		return -1;
	}
	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RCV_INT);

	if (sc->tring[0].qe_status1 & 0xc000) {
		char eaddr[6];

		qe_init(eaddr);
		return -1;
	}
	return len;
}
int cpu_init_r (void)
{
#ifdef CONFIG_QE
	uint qe_base = CONFIG_SYS_IMMR + 0x00100000; /* QE immr base */
	qe_init(qe_base);
	qe_reset();
#endif
	return 0;
}
Exemple #3
0
static int qe_open(struct net_device *dev)
{
	struct sunqe *qep = netdev_priv(dev);

	qep->mconfig = (MREGS_MCONFIG_TXENAB |
			MREGS_MCONFIG_RXENAB |
			MREGS_MCONFIG_MBAENAB);
	return qe_init(qep, 0);
}
Exemple #4
0
int
qeopen(struct open_file *f, int adapt, int ctlr, int unit, int part) {
	u_char eaddr[6];

	if (askname == 0)
		addr = bootrpb.csrphy;	/* Autoboot; use RPB instead */
	else {
		addr = 0x20000000;
		if (unit == 0)
			addr += qereg(0774440); /* XQA0 */
		else if (unit == 1)
			addr += qereg(0174460); /* XQB0 */
		else
			return ECTLR;
	}

	qe_init(eaddr);

	net_devinit(f, &qe_driver, eaddr);
	return 0;
}
Exemple #5
0
static void qe_tx_timeout(struct net_device *dev)
{
	struct sunqe *qep = netdev_priv(dev);
	int tx_full;

	spin_lock_irq(&qep->lock);

	/* Try to reclaim, if that frees up some tx
	 * entries, we're fine.
	 */
	qe_tx_reclaim(qep);
	tx_full = TX_BUFFS_AVAIL(qep) <= 0;

	spin_unlock_irq(&qep->lock);

	if (! tx_full)
		goto out;

	printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
	qe_init(qep, 1);

out:
	netif_wake_queue(dev);
}
Exemple #6
0
/* Grrr, certain error conditions completely lock up the AMD MACE,
 * so when we get these we _must_ reset the chip.
 */
static int qe_is_bolixed(struct sunqe *qep, u32 qe_status)
{
	struct net_device *dev = qep->dev;
	int mace_hwbug_workaround = 0;

	if (qe_status & CREG_STAT_EDEFER) {
		printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name);
		dev->stats.tx_errors++;
	}

	if (qe_status & CREG_STAT_CLOSS) {
		printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name);
		dev->stats.tx_errors++;
		dev->stats.tx_carrier_errors++;
	}

	if (qe_status & CREG_STAT_ERETRIES) {
		printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name);
		dev->stats.tx_errors++;
		mace_hwbug_workaround = 1;
	}

	if (qe_status & CREG_STAT_LCOLL) {
		printk(KERN_ERR "%s: Late transmit collision.\n", dev->name);
		dev->stats.tx_errors++;
		dev->stats.collisions++;
		mace_hwbug_workaround = 1;
	}

	if (qe_status & CREG_STAT_FUFLOW) {
		printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name);
		dev->stats.tx_errors++;
		mace_hwbug_workaround = 1;
	}

	if (qe_status & CREG_STAT_JERROR) {
		printk(KERN_ERR "%s: Jabber error.\n", dev->name);
	}

	if (qe_status & CREG_STAT_BERROR) {
		printk(KERN_ERR "%s: Babble error.\n", dev->name);
	}

	if (qe_status & CREG_STAT_CCOFLOW) {
		dev->stats.tx_errors += 256;
		dev->stats.collisions += 256;
	}

	if (qe_status & CREG_STAT_TXDERROR) {
		printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name);
		dev->stats.tx_errors++;
		dev->stats.tx_aborted_errors++;
		mace_hwbug_workaround = 1;
	}

	if (qe_status & CREG_STAT_TXLERR) {
		printk(KERN_ERR "%s: Transmit late error.\n", dev->name);
		dev->stats.tx_errors++;
		mace_hwbug_workaround = 1;
	}

	if (qe_status & CREG_STAT_TXPERR) {
		printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name);
		dev->stats.tx_errors++;
		dev->stats.tx_aborted_errors++;
		mace_hwbug_workaround = 1;
	}

	if (qe_status & CREG_STAT_TXSERR) {
		printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name);
		dev->stats.tx_errors++;
		dev->stats.tx_aborted_errors++;
		mace_hwbug_workaround = 1;
	}

	if (qe_status & CREG_STAT_RCCOFLOW) {
		dev->stats.rx_errors += 256;
		dev->stats.collisions += 256;
	}

	if (qe_status & CREG_STAT_RUOFLOW) {
		dev->stats.rx_errors += 256;
		dev->stats.rx_over_errors += 256;
	}

	if (qe_status & CREG_STAT_MCOFLOW) {
		dev->stats.rx_errors += 256;
		dev->stats.rx_missed_errors += 256;
	}

	if (qe_status & CREG_STAT_RXFOFLOW) {
		printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name);
		dev->stats.rx_errors++;
		dev->stats.rx_over_errors++;
	}

	if (qe_status & CREG_STAT_RLCOLL) {
		printk(KERN_ERR "%s: Late receive collision.\n", dev->name);
		dev->stats.rx_errors++;
		dev->stats.collisions++;
	}

	if (qe_status & CREG_STAT_FCOFLOW) {
		dev->stats.rx_errors += 256;
		dev->stats.rx_frame_errors += 256;
	}

	if (qe_status & CREG_STAT_CECOFLOW) {
		dev->stats.rx_errors += 256;
		dev->stats.rx_crc_errors += 256;
	}

	if (qe_status & CREG_STAT_RXDROP) {
		printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name);
		dev->stats.rx_errors++;
		dev->stats.rx_dropped++;
		dev->stats.rx_missed_errors++;
	}

	if (qe_status & CREG_STAT_RXSMALL) {
		printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name);
		dev->stats.rx_errors++;
		dev->stats.rx_length_errors++;
	}

	if (qe_status & CREG_STAT_RXLERR) {
		printk(KERN_ERR "%s: Receive late error.\n", dev->name);
		dev->stats.rx_errors++;
		mace_hwbug_workaround = 1;
	}

	if (qe_status & CREG_STAT_RXPERR) {
		printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name);
		dev->stats.rx_errors++;
		dev->stats.rx_missed_errors++;
		mace_hwbug_workaround = 1;
	}

	if (qe_status & CREG_STAT_RXSERR) {
		printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name);
		dev->stats.rx_errors++;
		dev->stats.rx_missed_errors++;
		mace_hwbug_workaround = 1;
	}

	if (mace_hwbug_workaround)
		qe_init(qep, 1);
	return mace_hwbug_workaround;
}
int cpu_init_r(void)
{
#if defined(CONFIG_CLEAR_LAW0) || defined(CONFIG_L2_CACHE)
	volatile immap_t    *immap = (immap_t *)CFG_IMMR;
#endif
#ifdef CONFIG_CLEAR_LAW0
	volatile ccsr_local_ecm_t *ecm = &immap->im_local_ecm;

	/* clear alternate boot location LAW (used for sdram, or ddr bank) */
	ecm->lawar0 = 0;
#endif

#if defined(CONFIG_L2_CACHE)
	volatile ccsr_l2cache_t *l2cache = &immap->im_l2cache;
	volatile uint cache_ctl;
	uint svr, ver;
	uint l2srbar;

	svr = get_svr();
	ver = SVR_VER(svr);

	asm("msync;isync");
	cache_ctl = l2cache->l2ctl;

	switch (cache_ctl & 0x30000000) {
	case 0x20000000:
		if (ver == SVR_8548 || ver == SVR_8548_E ||
		    ver == SVR_8544 || ver == SVR_8568_E) {
			printf ("L2 cache 512KB:");
			/* set L2E=1, L2I=1, & L2SRAM=0 */
			cache_ctl = 0xc0000000;
		} else {
			printf ("L2 cache 256KB:");
			/* set L2E=1, L2I=1, & L2BLKSZ=2 (256 Kbyte) */
			cache_ctl = 0xc8000000;
		}
		break;
	case 0x10000000:
		printf ("L2 cache 256KB:");
		if (ver == SVR_8544 || ver == SVR_8544_E) {
			cache_ctl = 0xc0000000; /* set L2E=1, L2I=1, & L2SRAM=0 */
		}
		break;
	case 0x30000000:
	case 0x00000000:
	default:
		printf ("L2 cache unknown size (0x%08x)\n", cache_ctl);
		return -1;
	}

	if (l2cache->l2ctl & 0x80000000) {
		printf(" already enabled.");
		l2srbar = l2cache->l2srbar0;
#ifdef CFG_INIT_L2_ADDR
		if (l2cache->l2ctl & 0x00010000 && l2srbar >= CFG_FLASH_BASE) {
			l2srbar = CFG_INIT_L2_ADDR;
			l2cache->l2srbar0 = l2srbar;
			printf("  Moving to 0x%08x", CFG_INIT_L2_ADDR);
		}
#endif /* CFG_INIT_L2_ADDR */
		puts("\n");
	} else {
		asm("msync;isync");
		l2cache->l2ctl = cache_ctl; /* invalidate & enable */
		asm("msync;isync");
		printf(" enabled\n");
	}
#else
	printf("L2 cache: disabled\n");
#endif
#ifdef CONFIG_QE
	uint qe_base = CFG_IMMR + 0x00080000; /* QE immr base */
	qe_init(qe_base);
	qe_reset();
#endif

	return 0;
}