Exemple #1
0
static int
vgbenewrx(Ctlr* ctlr, int i)
{
	Block* block;
	RxDesc* desc;

	/*
	 * allocate a receive Block.  we're maintaining
	 * a private pool of Blocks, so we don't want freeb
	 * to actually free them, thus we set block->free.
	 */
	block = allocb(RxSize);
	block->free = noop;

	/* Remember that block. */
	ctlr->rx_blocks[i] = block;

	/* Initialize Rx descriptor. (TODO: 48/64 bits support ?) */
	desc = &ctlr->rx_ring[i];
	desc->status = htole32(RxDesc_Status_Own);
	desc->control = htole32(0);

	desc->addr_lo = htole32((ulong)PCIWADDR(block->rp));
	desc->addr_hi = htole16(0);
	desc->length = htole16(RxSize | 0x8000);

	return 0;
}
Exemple #2
0
static void
igbereplenish(Ctlr* ctlr)
{
	int rdt;
	Block *bp;
	Rdesc *rdesc;

	rdt = ctlr->rdt;
	while(NEXT(rdt, Nrdesc) != ctlr->rdh){
		rdesc = &ctlr->rdba[rdt];
		if(ctlr->rb[rdt] != nil){
			/* nothing to do */
		}
		else if((bp = iallocb(2048)) != nil){
			ctlr->rb[rdt] = bp;
			rdesc->addr[0] = PCIWADDR(bp->rp);
			rdesc->addr[1] = 0;
		}
		else
			break;
		rdesc->status = 0;

		rdt = NEXT(rdt, Nrdesc);
	}
	ctlr->rdt = rdt;
	csr32w(ctlr, Rdt, rdt);
}
Exemple #3
0
static void
txstart(Ether *edev)
{
	int tdh, tdt;
	Ctlr *ctlr = edev->ctlr;
	Block *bp;
	Tdesc *tdesc;

	/*
	 * Try to fill the ring back up, moving buffers from the transmit q.
	 */
	tdh = PREV(ctlr->tdh, Ntdesc);
	for(tdt = ctlr->tdt; tdt != tdh; tdt = NEXT(tdt, Ntdesc)){
		/* pull off the head of the transmission queue */
		if((bp = ctlr->bqhead) == nil)	/* was qget(edev->oq) */
			break;
		ctlr->bqhead = bp->next;
		if (ctlr->bqtail == bp)
			ctlr->bqtail = nil;

		/* set up a descriptor for it */
		tdesc = &ctlr->tdba[tdt];
		tdesc->addr[0] = PCIWADDR(bp->rp);
		tdesc->addr[1] = 0;
		tdesc->control = /* Ide | */ Rs | Ifcs | Teop | BLEN(bp);

		ctlr->tb[tdt] = bp;
	}
	ctlr->tdt = tdt;
	csr32w(ctlr, Tdt, tdt);
	i82563im(ctlr, Txdw);
}
Exemple #4
0
static char*
initring(Ctlr *ctlr)
{
	RXQ *rx;
	TXQ *tx;
	int i, q;

	rx = &ctlr->rx;
	if(rx->b == nil)
		rx->b = malloc(sizeof(Block*) * Nrx);
	if(rx->p == nil)
		rx->p = mallocalign(sizeof(u32int) * Nrx, 16 * 1024, 0, 0);
	if(rx->b == nil || rx->p == nil)
		return "no memory for rx ring";
	for(i = 0; i<Nrx; i++){
		rx->p[i] = 0;
		if(rx->b[i] != nil){
			freeb(rx->b[i]);
			rx->b[i] = nil;
		}
		if(rbplant(ctlr, i) < 0)
			return "no memory for rx descriptors";
	}
	rx->i = 0;

	if(ctlr->shared == nil)
		ctlr->shared = mallocalign(4096, 4096, 0, 0);
	if(ctlr->shared == nil)
		return "no memory for shared buffer";
	memset(ctlr->shared, 0, 4096);

	for(q=0; q<nelem(ctlr->tx); q++){
		tx = &ctlr->tx[q];
		if(tx->b == nil)
			tx->b = malloc(sizeof(Block*) * Ntx);
		if(tx->d == nil)
			tx->d = mallocalign(Tdscsize * Ntx, 16 * 1024, 0, 0);
		if(tx->c == nil)
			tx->c = mallocalign(Tcmdsize * Ntx, 4, 0, 0);
		if(tx->b == nil || tx->d == nil || tx->c == nil)
			return "no memory for tx ring";
		memset(tx->d, 0, Tdscsize * Ntx);
		memset(tx->c, 0, Tcmdsize * Ntx);
		for(i=0; i<Ntx; i++){
			if(tx->b[i] != nil){
				freeb(tx->b[i]);
				tx->b[i] = nil;
			}
		}
		ctlr->shared->txbase[q] = PCIWADDR(tx->d);
		tx->i = 0;
		tx->n = 0;
		tx->lastcmd = 0;
	}
	return nil;
}
Exemple #5
0
static void
txstart(Ether *edev)
{
	int tdh, tdt, len, olen;
	Ctlr *ctlr = edev->ctlr;
	Block *bp;
	Tdesc *tdesc;

	/*
	 * Try to fill the ring back up, moving buffers from the transmit q.
	 */
	tdh = PREV(ctlr->tdh, Ntdesc);
	for(tdt = ctlr->tdt; tdt != tdh; tdt = NEXT(tdt, Ntdesc)){
		/* pull off the head of the transmission queue */
		if((bp = ctlr->bqhead) == nil)		/* was qget(edev->oq) */
			break;
		ctlr->bqhead = bp->next;
		if (ctlr->bqtail == bp)
			ctlr->bqtail = nil;
		len = olen = BLEN(bp);

		/*
		 * if packet is too short, make it longer rather than relying
		 * on ethernet interface to pad it and complain so the caller
		 * will get fixed.  I don't think Psp is working right, or it's
		 * getting cleared.
		 */
		if (len < ETHERMINTU) {
			if (bp->rp + ETHERMINTU <= bp->lim)
				bp->wp = bp->rp + ETHERMINTU;
			else
				bp->wp = bp->lim;
			len = BLEN(bp);
			print("txstart: extended short pkt %d -> %d bytes\n",
				olen, len);
		}

		/* set up a descriptor for it */
		tdesc = &ctlr->tdba[tdt];
		tdesc->addr[0] = PCIWADDR(bp->rp);
		tdesc->addr[1] = 0;
		tdesc->control = /* Ide| */ Rs|Dext|Ifcs|Teop|DtypeDD|len;
		tdesc->status = 0;

		ctlr->tb[tdt] = bp;
	}
	ctlr->tdt = tdt;
	csr32w(ctlr, Tdt, tdt);
	igbeim(ctlr, Txdw);
}
Exemple #6
0
static int
rbplant(Ctlr *ctlr, int i)
{
	Block *b;

	b = iallocb(Rbufsize+127);
	if(b == nil)
		return -1;
	b->rp = b->wp = (uchar*)((((uintptr)b->base+127)&~127));
	memset(b->rp, 0, Rdscsize);
	coherence();
	ctlr->rx.b[i] = b;
	ctlr->rx.p[i] = PCIWADDR(b->rp);
	return 0;
}
Exemple #7
0
static void
vt6102receive(Ether* edev)
{
	Ds *ds;
	Block *bp;
	Ctlr *ctlr;
	int i, len;

	ctlr = edev->ctlr;

	ds = ctlr->rdh;
	while(!(ds->status & Own) && ds->status != 0){
		if(ds->status & Rerr){
			for(i = 0; i < Nrxstats; i++){
				if(ds->status & (1<<i))
					ctlr->rxstats[i]++;
			}
		}
		else if(bp = iallocb(Rdbsz+3)){
			len = ((ds->status & LengthMASK)>>LengthSHIFT)-4;
			ds->bp->wp = ds->bp->rp+len;
			etheriq(edev, ds->bp, 1);
			bp->rp = (uchar*)ROUNDUP((ulong)bp->rp, 4);
			ds->addr = PCIWADDR(bp->rp);
			ds->bp = bp;
		}
		ds->control = Rdbsz;
		ds->branch = 0;
		ds->status = 0;

		ds->prev->branch = PCIWADDR(ds);
		coherence();
		ds->prev->status = Own;

		ds = ds->next;
	}
Exemple #8
0
static void
txstart(Ether* ether)
{
	Ctlr *ctlr;
	Block *bp;
	Des *des;
	int control;

	ctlr = ether->ctlr;
	while(ctlr->ntq < (ctlr->ntdr-1)){
		if(ctlr->setupbp){
			bp = ctlr->setupbp;
			ctlr->setupbp = 0;
			control = Ic|Set|BLEN(bp);
		}
		else{
			bp = qget(ether->oq);
			if(bp == nil)
				break;
			control = Ic|Lseg|Fseg|BLEN(bp);
		}

		ctlr->tdr[PREV(ctlr->tdrh, ctlr->ntdr)].control &= ~Ic;
		des = &ctlr->tdr[ctlr->tdrh];
		des->bp = bp;
		des->addr = PCIWADDR(bp->rp);
		des->control |= control;
		ctlr->ntq++;
		coherence();
		des->status = Own;
		csr32w(ctlr, 1, 0);
		ctlr->tdrh = NEXT(ctlr->tdrh, ctlr->ntdr);
	}

	if(ctlr->ntq > ctlr->ntqmax)
		ctlr->ntqmax = ctlr->ntq;
}
Exemple #9
0
static int
igbeinit(Ether* edev)
{
	int csr, i, r, ctrl, timeo;
	MiiPhy *phy;
	Ctlr *ctlr;

	ctlr = edev->ctlr;

	/*
	 * Set up the receive addresses.
	 * There are 16 addresses. The first should be the MAC address.
	 * The others are cleared and not marked valid (MS bit of Rah).
	 */
	csr = (edev->ea[3]<<24)|(edev->ea[2]<<16)|(edev->ea[1]<<8)|edev->ea[0];
	csr32w(ctlr, Ral, csr);
	csr = 0x80000000|(edev->ea[5]<<8)|edev->ea[4];
	csr32w(ctlr, Rah, csr);
	for(i = 1; i < 16; i++){
		csr32w(ctlr, Ral+i*8, 0);
		csr32w(ctlr, Rah+i*8, 0);
	}

	/*
	 * Clear the Multicast Table Array.
	 * It's a 4096 bit vector accessed as 128 32-bit registers.
	 */
	for(i = 0; i < 128; i++)
		csr32w(ctlr, Mta+i*4, 0);

	/*
	 * Receive initialisation.
	 * Mostly defaults from the datasheet, will
	 * need some tuning for performance:
	 *	Rctl	descriptor mimimum threshold size
	 *		discard pause frames
	 *		strip CRC
	 * 	Rdtr	interrupt delay
	 * 	Rxdctl	all the thresholds
	 */
	csr32w(ctlr, Rctl, 0);

	/*
	 * Allocate the descriptor ring and load its
	 * address and length into the NIC.
	 */
	ctlr->rdba = xspanalloc(Nrdesc*sizeof(Rdesc), 128 /* was 16 */, 0);
	csr32w(ctlr, Rdbal, PCIWADDR(ctlr->rdba));
	csr32w(ctlr, Rdbah, 0);
	csr32w(ctlr, Rdlen, Nrdesc*sizeof(Rdesc));

	/*
	 * Initialise the ring head and tail pointers and
	 * populate the ring with Blocks.
	 * The datasheet says the tail pointer is set to beyond the last
	 * descriptor hardware can process, which implies the initial
	 * condition is Rdh == Rdt. However, experience shows Rdt must
	 * always be 'behind' Rdh; the replenish routine ensures this.
	 */
	ctlr->rdh = 0;
	csr32w(ctlr, Rdh, ctlr->rdh);
	ctlr->rdt = 0;
	csr32w(ctlr, Rdt, ctlr->rdt);
	ctlr->rb = malloc(sizeof(Block*)*Nrdesc);
	igbereplenish(ctlr);

	/*
	 * Set up Rctl but don't enable receiver (yet).
	 */
	csr32w(ctlr, Rdtr, 0);
	switch(ctlr->id){
	case i82540em:
	case i82540eplp:
	case i82541gi:
	case i82541pi:
	case i82546gb:
	case i82546eb:
	case i82547gi:
		csr32w(ctlr, Radv, 64);
		break;
	}
	csr32w(ctlr, Rxdctl, (8<<WthreshSHIFT)|(8<<HthreshSHIFT)|4);
	/*
	 * Enable checksum offload.
	 */
	csr32w(ctlr, Rxcsum, Tuofl|Ipofl|(ETHERHDRSIZE<<PcssSHIFT));

	csr32w(ctlr, Rctl, Dpf|Bsize2048|Bam|RdtmsHALF);
 	/*
	 * VirtualBox does not like Rxt0,
	 * it continually interrupts.
	 */
	ctlr->im |= /*Rxt0|*/Rxo|Rxdmt0|Rxseq;

	/*
	 * Transmit initialisation.
	 * Mostly defaults from the datasheet, will
	 * need some tuning for performance. The normal mode will
	 * be full-duplex and things to tune for half-duplex are
	 *	Tctl	re-transmit on late collision
	 *	Tipg	all IPG times
	 *	Tbt	burst timer
	 *	Ait	adaptive IFS throttle
	 * and in general
	 *	Txdmac	packet prefetching
	 *	Ett	transmit early threshold
	 *	Tidv	interrupt delay value
	 *	Txdctl	all the thresholds
	 */
	csr32w(ctlr, Tctl, (0x0F<<CtSHIFT)|Psp|(66<<ColdSHIFT));	/* Fd */
	switch(ctlr->id){
	default:
		r = 6;
		break;
	case i82543gc:
	case i82544ei:
	case i82547ei:
	case i82540em:
	case i82540eplp:
	case i82541gi:
	case i82541pi:
	case i82546gb:
	case i82546eb:
	case i82547gi:
		r = 8;
		break;
	}
	csr32w(ctlr, Tipg, (6<<20)|(8<<10)|r);
	csr32w(ctlr, Ait, 0);
	csr32w(ctlr, Txdmac, 0);
	csr32w(ctlr, Tidv, 128);

	/*
	 * Allocate the descriptor ring and load its
	 * address and length into the NIC.
	 */
	ctlr->tdba = xspanalloc(Ntdesc*sizeof(Tdesc), 128 /* was 16 */, 0);
	csr32w(ctlr, Tdbal, PCIWADDR(ctlr->tdba));
	csr32w(ctlr, Tdbah, 0);
	csr32w(ctlr, Tdlen, Ntdesc*sizeof(Tdesc));

	/*
	 * Initialise the ring head and tail pointers.
	 */
	ctlr->tdh = 0;
	csr32w(ctlr, Tdh, ctlr->tdh);
	ctlr->tdt = 0;
	csr32w(ctlr, Tdt, ctlr->tdt);
	ctlr->tb = malloc(sizeof(Block*)*Ntdesc);
//	ctlr->im |= Txqe|Txdw;

	r = (4<<WthreshSHIFT)|(4<<HthreshSHIFT)|(8<<PthreshSHIFT);
	switch(ctlr->id){
	default:
		break;
	case i82540em:
	case i82540eplp:
	case i82547gi:
	case i82541pi:
	case i82546gb:
	case i82546eb:
	case i82541gi:
		r = csr32r(ctlr, Txdctl);
		r &= ~WthreshMASK;
		r |= Gran|(4<<WthreshSHIFT);

		csr32w(ctlr, Tadv, 64);
		break;
	}
	csr32w(ctlr, Txdctl, r);

	r = csr32r(ctlr, Tctl);
	r |= Ten;
	csr32w(ctlr, Tctl, r);

	igbeim(ctlr, ctlr->im);

	if(ctlr->mii == nil || ctlr->mii->curphy == nil) {
		print("igbe: no mii (yet)\n");
		return 0;
	}
	/* wait for the link to come up */
	for(timeo = 0; timeo < 3500; timeo++){
		if(miistatus(ctlr->mii) == 0)
			break;
		delay(10);
	}
	print("igbe: phy: ");
	phy = ctlr->mii->curphy;
	if (phy->fd)
		print("full duplex");
	else
		print("half duplex");
	print(", %d Mb/s\n", phy->speed);

	/*
	 * Flow control.
	 */
	ctrl = csr32r(ctlr, Ctrl);
	if(phy->rfc)
		ctrl |= Rfce;
	if(phy->tfc)
		ctrl |= Tfce;
	csr32w(ctlr, Ctrl, ctrl);

	return 0;
}
Exemple #10
0
static void
interrupt(Ureg*, void* arg)
{
	Ctlr *ctlr;
	Ether *ether;
	int len, status;
	Des *des;
	Block *bp;

	ether = arg;
	ctlr = ether->ctlr;

	while((status = csr32r(ctlr, 5)) & (Nis|Ais)){
		/*
		 * Acknowledge the interrupts and mask-out
		 * the ones that are implicitly handled.
		 */
		csr32w(ctlr, 5, status);
		status &= (ctlr->mask & ~(Nis|Ti));

		if(status & Ais){
			if(status & Tps)
				ctlr->tps++;
			if(status & Tu)
				ctlr->tu++;
			if(status & Tjt)
				ctlr->tjt++;
			if(status & Ru)
				ctlr->ru++;
			if(status & Rps)
				ctlr->rps++;
			if(status & Rwt)
				ctlr->rwt++;
			status &= ~(Ais|Rwt|Rps|Ru|Tjt|Tu|Tps);
		}

		/*
		 * Received packets.
		 */
		if(status & Ri){
			des = &ctlr->rdr[ctlr->rdrx];
			while(!(des->status & Own)){
				if(des->status & Es){
					if(des->status & Of)
						ctlr->of++;
					if(des->status & Ce)
						ctlr->ce++;
					if(des->status & Cs)
						ctlr->cs++;
					if(des->status & Tl)
						ctlr->tl++;
					if(des->status & Rf)
						ctlr->rf++;
					if(des->status & De)
						ctlr->de++;
				}
				else if(bp = iallocb(Rbsz)){
					len = ((des->status & Fl)>>16)-4;
					des->bp->wp = des->bp->rp+len;
					etheriq(ether, des->bp, 1);
					des->bp = bp;
					des->addr = PCIWADDR(bp->rp);
				}

				des->control &= Er;
				des->control |= Rbsz;
				coherence();
				des->status = Own;

				ctlr->rdrx = NEXT(ctlr->rdrx, ctlr->nrdr);
				des = &ctlr->rdr[ctlr->rdrx];
			}
			status &= ~Ri;
		}
Exemple #11
0
static char*
qcmd(Ctlr *ctlr, uint qid, uint code, uchar *data, int size, Block *block)
{
	uchar *d, *c;
	int pad;
	TXQ *q;

	assert(qid < nelem(ctlr->tx));
	assert(size <= Tcmdsize-4);

	ilock(ctlr);
	q = &ctlr->tx[qid];
	while(q->n >= Ntx && !ctlr->broken){
		iunlock(ctlr);
		qlock(q);
		if(!waserror()){
			tsleep(q, txqready, q, 10);
			poperror();
		}
		qunlock(q);
		ilock(ctlr);
	}
	if(ctlr->broken){
		iunlock(ctlr);
		return "qcmd: broken";
	}
	q->n++;

	q->lastcmd = code;
	q->b[q->i] = block;
	c = q->c + q->i * Tcmdsize;
	d = q->d + q->i * Tdscsize;

	/* build command */
	c[0] = code;
	c[1] = 0;	/* flags */
	c[2] = q->i;
	c[3] = qid;

	if(size > 0)
		memmove(c+4, data, size);
	size += 4;

	memset(d, 0, Tdscsize);

	pad = size - 4;
	if(block != nil)
		pad += BLEN(block);
	pad = ((pad + 3) & ~3) - pad;

	put32(d, (pad << 28) | ((1 + (block != nil)) << 24)), d += 4;
	put32(d, PCIWADDR(c)), d += 4;
	put32(d, size), d += 4;

	if(block != nil){
		size = BLEN(block);
		put32(d, PCIWADDR(block->rp)), d += 4;
		put32(d, size), d += 4;
	}

	USED(d);

	coherence();

	q->i = (q->i+1) % Ntx;
	csr32w(ctlr, HbusTargWptr, (qid<<8) | q->i);

	iunlock(ctlr);

	return nil;
}
Exemple #12
0
static char*
boot(Ctlr *ctlr)
{
	int i, n, size;
	uchar *dma, *p;
	FWImage *fw;
	char *err;

	fw = ctlr->fw;
	/* 16 byte padding may not be necessary. */
	size = ROUND(fw->init.data.size, 16) + ROUND(fw->init.text.size, 16);
	dma = mallocalign(size, 16, 0, 0);
	if(dma == nil)
		return "no memory for dma";

	if((err = niclock(ctlr)) != nil){
		free(dma);
		return err;
	}

	p = dma;
	memmove(p, fw->init.data.data, fw->init.data.size);
	coherence();
	prphwrite(ctlr, BsmDramDataAddr, PCIWADDR(p));
	prphwrite(ctlr, BsmDramDataSize, fw->init.data.size);
	p += ROUND(fw->init.data.size, 16);
	memmove(p, fw->init.text.data, fw->init.text.size);
	coherence();
	prphwrite(ctlr, BsmDramTextAddr, PCIWADDR(p));
	prphwrite(ctlr, BsmDramTextSize, fw->init.text.size);

	nicunlock(ctlr);
	if((err = niclock(ctlr)) != nil){
		free(dma);
		return err;
	}

	/* Copy microcode image into NIC memory. */
	p = fw->boot.text.data;
	n = fw->boot.text.size/4;
	for(i=0; i<n; i++, p += 4)
		prphwrite(ctlr, BsmSramBase+i*4, get32(p));

	prphwrite(ctlr, BsmWrMemSrc, 0);
	prphwrite(ctlr, BsmWrMemDst, 0);
	prphwrite(ctlr, BsmWrDwCount, n);

	/* Start boot load now. */
	prphwrite(ctlr, BsmWrCtrl, 1<<31);

	/* Wait for transfer to complete. */
	for(i=0; i<1000; i++){
		if((prphread(ctlr, BsmWrCtrl) & (1<<31)) == 0)
			break;
		delay(10);
	}
	if(i == 1000){
		nicunlock(ctlr);
		free(dma);
		return "bootcode timeout";
	}

	/* Enable boot after power up. */
	prphwrite(ctlr, BsmWrCtrl, 1<<30);
	nicunlock(ctlr);

	/* Now press "execute". */
	csr32w(ctlr, Reset, 0);

	/* Wait at most one second for first alive notification. */
	if(irqwait(ctlr, Ierr|Ialive, 5000) != Ialive){
		free(dma);
		return "init firmware boot failed";
	}
	free(dma);

	size = ROUND(fw->main.data.size, 16) + ROUND(fw->main.text.size, 16);
	dma = mallocalign(size, 16, 0, 0);
	if(dma == nil)
		return "no memory for dma";
	if((err = niclock(ctlr)) != nil){
		free(dma);
		return err;
	}
	p = dma;
	memmove(p, fw->main.data.data, fw->main.data.size);
	coherence();
	prphwrite(ctlr, BsmDramDataAddr, PCIWADDR(p));
	prphwrite(ctlr, BsmDramDataSize, fw->main.data.size);
	p += ROUND(fw->main.data.size, 16);
	memmove(p, fw->main.text.data, fw->main.text.size);
	coherence();
	prphwrite(ctlr, BsmDramTextAddr, PCIWADDR(p));
	prphwrite(ctlr, BsmDramTextSize, fw->main.text.size | (1<<31));
	nicunlock(ctlr);

	if(irqwait(ctlr, Ierr|Ialive, 5000) != Ialive){
		free(dma);
		return "main firmware boot failed";
	}
	free(dma);
	return postboot(ctlr);
}
Exemple #13
0
static char*
reset(Ctlr *ctlr)
{
	uchar rev;
	char *err;
	int i;

	if(ctlr->power)
		poweroff(ctlr);
	if((err = initring(ctlr)) != nil)
		return err;
	if((err = poweron(ctlr)) != nil)
		return err;

	/* Select VMAIN power source. */
	if((err = niclock(ctlr)) != nil)
		return err;
	prphwrite(ctlr, ApmgPs, (prphread(ctlr, ApmgPs) & ~PwrSrcMask) | PwrSrcVMain);
	nicunlock(ctlr);
	/* Spin until VMAIN gets selected. */
	for(i = 0; i < 5000; i++){
		if(csr32r(ctlr, GpioIn) & (1 << 9))
			break;
		delay(10);
	}

	/* Perform adapter initialization. */
	rev = ctlr->pdev->rid;
	if((rev & 0xc0) == 0x40)
		csr32w(ctlr, Cfg, csr32r(ctlr, Cfg) | AlmMb);
	else if(!(rev & 0x80))
		csr32w(ctlr, Cfg, csr32r(ctlr, Cfg) | AlmMm);

	if(ctlr->eeprom.cap == 0x80)
		csr32w(ctlr, Cfg, csr32r(ctlr, Cfg) | SkuMrc);

	if((ctlr->eeprom.rev & 0xf0) == 0xd0)
		csr32w(ctlr, Cfg, csr32r(ctlr, Cfg) | RevD);
	else
		csr32w(ctlr, Cfg, csr32r(ctlr, Cfg) & ~RevD);

	if(ctlr->eeprom.type > 1)
		csr32w(ctlr, Cfg, csr32r(ctlr, Cfg) | TypeB);

	/* Initialize RX ring. */
	if((err = niclock(ctlr)) != nil)
		return err;

	coherence();
	csr32w(ctlr, FhRxBase, PCIWADDR(ctlr->rx.p));
	csr32w(ctlr, FhRxRptrAddr, PCIWADDR(&ctlr->shared->next));
	csr32w(ctlr, FhRxWptr, 0);
	csr32w(ctlr, FhRxConfig,
		FhRxConfigDmaEna |
		FhRxConfigRdrbdEna |
		FhRxConfigWrstatusEna |
		FhRxConfigMaxfrag |
		(Nrxlog << FhRxConfigNrdbShift) |
		FhRxConfigIrqDstHost |
		(1 << FhRxConfigIrqRbthShift));
	USED(csr32r(ctlr, FhRssrTbl));
	csr32w(ctlr, FhRxWptr, (Nrx-1) & ~7);
	nicunlock(ctlr);

	/* Initialize TX rings. */
	if((err = niclock(ctlr)) != nil)
		return err;
	prphwrite(ctlr, AlmSchedMode, 2);
	prphwrite(ctlr, AlmSchedArastat, 1);
	prphwrite(ctlr, AlmSchedTxfact, 0x3f);
	prphwrite(ctlr, AlmSchedBP1, 0x10000);
	prphwrite(ctlr, AlmSchedBP2, 0x30002);
	prphwrite(ctlr, AlmSchedTxf4mf, 4);
	prphwrite(ctlr, AlmSchedTxf5mf, 5);
	csr32w(ctlr, FhTxBase, PCIWADDR(ctlr->shared));
	csr32w(ctlr, FhMsgConfig, 0xffff05a5);
	for(i = 0; i < 6; i++){
		csr32w(ctlr, FhCbbcCtrl+i*8, 0);
		csr32w(ctlr, FhCbbcBase+i*8, 0);
		csr32w(ctlr, FhTxConfig+i*32, 0x80200008);
	}
	nicunlock(ctlr);
	USED(csr32r(ctlr, FhTxBase));

	csr32w(ctlr, UcodeGp1Clr, UcodeGp1RfKill);
	csr32w(ctlr, UcodeGp1Clr, UcodeGp1CmdBlocked);

	ctlr->broken = 0;
	ctlr->wait.m = 0;
	ctlr->wait.w = 0;

	ctlr->ie = Idefmask;
	csr32w(ctlr, Imr, ctlr->ie);
	csr32w(ctlr, Isr, ~0);

	csr32w(ctlr, UcodeGp1Clr, UcodeGp1RfKill);
	csr32w(ctlr, UcodeGp1Clr, UcodeGp1RfKill);

	return nil;
}
Exemple #14
0
static void
vt6102transmit(Ether* edev)
{
	Block *bp;
	Ctlr *ctlr;
	Ds *ds, *next;
	int control, i, o, prefix, size, tdused, timeo;

	ctlr = edev->ctlr;

	ilock(&ctlr->tlock);

	/*
	 * Free any completed packets
	 */
	ds = ctlr->tdh;
	for(tdused = ctlr->tdused; tdused > 0; tdused--){
		/*
		 * For some errors the chip will turn the Tx engine
		 * off. Wait for that to happen.
		 * Could reset and re-init the chip here if it doesn't
		 * play fair.
		 * To do: adjust Tx FIFO threshold on underflow.
		 */
		if(ds->status & (Abt|Tbuff|Udf)){
			for(timeo = 0; timeo < 1000; timeo++){
				if(!(csr16r(ctlr, Cr) & Txon))
					break;
				microdelay(1);
			}
			ds->status = Own;
			csr32w(ctlr, Txdaddr, PCIWADDR(ds));
		}

		if(ds->status & Own)
			break;
		ds->addr = 0;
		ds->branch = 0;

		if(ds->bp != nil){
			freeb(ds->bp);
			ds->bp = nil;
		}
		for(i = 0; i < Ntxstats-1; i++){
			if(ds->status & (1<<i))
				ctlr->txstats[i]++;
		}
		ctlr->txstats[i] += (ds->status & NcrMASK)>>NcrSHIFT;

		ds = ds->next;
	}
	ctlr->tdh = ds;

	/*
	 * Try to fill the ring back up.
	 */
	ds = ctlr->tdt;
	while(tdused < ctlr->ntd-2){
		if((bp = qget(edev->oq)) == nil)
			break;
		tdused++;

		size = BLEN(bp);
		prefix = 0;

		if(o = (((int)bp->rp) & 0x03)){
			prefix = Txcopy-o;
			if(prefix > size)
				prefix = size;
			memmove(ds->bounce, bp->rp, prefix);
			ds->addr = PCIWADDR(ds->bounce);
			bp->rp += prefix;
			size -= prefix;
		}

		next = ds->next;
		ds->branch = PCIWADDR(ds->next);

		if(size){
			if(prefix){
				next->bp = bp;
				next->addr = PCIWADDR(bp->rp);
				next->branch = PCIWADDR(next->next);
				next->control = Edp|Chain|((size<<TbsSHIFT) & TbsMASK);

				control = Stp|Chain|((prefix<<TbsSHIFT) & TbsMASK);

				next = next->next;
				tdused++;
				ctlr->tsplit++;
			}
			else{
				ds->bp = bp;
				ds->addr = PCIWADDR(bp->rp);
				control = Edp|Stp|((size<<TbsSHIFT) & TbsMASK);
				ctlr->taligned++;
			}
		}
		else{
			freeb(bp);
			control = Edp|Stp|((prefix<<TbsSHIFT) & TbsMASK);
			ctlr->tcopied++;
		}

		ds->control = control;
		if(tdused >= ctlr->ntd-2){
			ds->control |= Ic;
			ctlr->txdw++;
		}
		coherence();
		ds->status = Own;

		ds = next;
	}
	ctlr->tdt = ds;
	ctlr->tdused = tdused;
	if(ctlr->tdused)
		csr16w(ctlr, Cr, Tdmd|ctlr->cr);

	iunlock(&ctlr->tlock);
}
Exemple #15
0
static void
vt6102attach(Ether* edev)
{
	int dsz, i;
	Ctlr *ctlr;
	Ds *ds, *prev;
	uchar *alloc, *bounce;
	char name[KNAMELEN];

	ctlr = edev->ctlr;
	qlock(&ctlr->alock);
	if(ctlr->alloc != nil){
		qunlock(&ctlr->alock);
		return;
	}

	/*
	 * Descriptor and bounce-buffer space.
	 * Must all be aligned on a 4-byte boundary,
	 * but try to align on cache-lines.
	 */
	ctlr->nrd = Nrd;
	ctlr->ntd = Ntd;
	dsz = ROUNDUP(sizeof(Ds), ctlr->cls);
	alloc = mallocalign((ctlr->nrd+ctlr->ntd)*dsz + ctlr->ntd*Txcopy, dsz, 0, 0);
	if(alloc == nil){
		qunlock(&ctlr->alock);
		error(Enomem);
	}
	ctlr->alloc = alloc;

	ctlr->rd = (Ds*)alloc;

	if(waserror()){
		ds = ctlr->rd;
		for(i = 0; i < ctlr->nrd; i++){
			if(ds->bp != nil){
				freeb(ds->bp);
				ds->bp = nil;
			}
			if((ds = ds->next) == nil)
				break;
		}
		free(ctlr->alloc);
		ctlr->alloc = nil;
		qunlock(&ctlr->alock);
		nexterror();
	}

	prev = ctlr->rd + ctlr->nrd-1;
	for(i = 0; i < ctlr->nrd; i++){
		ds = (Ds*)alloc;
		alloc += dsz;

		ds->control = Rdbsz;
		ds->branch = PCIWADDR(alloc);

		ds->bp = iallocb(Rdbsz+3);
		if(ds->bp == nil)
			error("vt6102: can't allocate receive ring\n");
		ds->bp->rp = (uchar*)ROUNDUP((ulong)ds->bp->rp, 4);
		ds->addr = PCIWADDR(ds->bp->rp);

		ds->next = (Ds*)alloc;
		ds->prev = prev;
		prev = ds;

		ds->status = Own;
	}
	prev->branch = 0;
	prev->next = ctlr->rd;
	prev->status = 0;
	ctlr->rdh = ctlr->rd;

	ctlr->td = (Ds*)alloc;
	prev = ctlr->td + ctlr->ntd-1;
	bounce = alloc + ctlr->ntd*dsz;
	for(i = 0; i < ctlr->ntd; i++){
		ds = (Ds*)alloc;
		alloc += dsz;

		ds->bounce = bounce;
		bounce += Txcopy;
		ds->next = (Ds*)alloc;
		ds->prev = prev;
		prev = ds;
	}
	prev->next = ctlr->td;
	ctlr->tdh = ctlr->tdt = ctlr->td;
	ctlr->tdused = 0;

	ctlr->cr = Dpoll|Rdmd|Txon|Rxon|Strt;
	/*Srci|Abti|Norbf|Pktrace|Ovfi|Udfi|Be|Ru|Tu|Txe|Rxe|Ptx|Prx*/
	ctlr->imr = Abti|Norbf|Pktrace|Ovfi|Udfi|Be|Ru|Tu|Txe|Rxe|Ptx|Prx;

	ilock(&ctlr->clock);
	csr32w(ctlr, Rxdaddr, PCIWADDR(ctlr->rd));
	csr32w(ctlr, Txdaddr, PCIWADDR(ctlr->td));
	csr16w(ctlr, Isr, ~0);
	csr16w(ctlr, Imr, ctlr->imr);
	csr16w(ctlr, Cr, ctlr->cr);
	iunlock(&ctlr->clock);

	snprint(name, KNAMELEN, "#l%dlproc", edev->ctlrno);
	kproc(name, vt6102lproc, edev);

	qunlock(&ctlr->alock);
	poperror();
}
Exemple #16
0
static void
i82563init(Ether* edev)
{
	Ctlr *ctlr;
	u32int r, rctl;

	ctlr = edev->ctlr;

	rctl = Dpf | Bsize2048 | Bam | RdtmsHALF;
	if(ctlr->type == i82575){
		/*
		 * Setting Qenable in Rxdctl does not
		 * appear to stick unless Ren is on.
		 */
		csr32w(ctlr, Rctl, Ren|rctl);
		r = csr32r(ctlr, Rxdctl);
		r |= Qenable;
		csr32w(ctlr, Rxdctl, r);
	}
	csr32w(ctlr, Rctl, rctl);
	ctlr->rdba = mallocalign(Nrdesc*sizeof(Rdesc), 128, 0, 0);
	csr32w(ctlr, Rdbal, PCIWADDR(ctlr->rdba));
	csr32w(ctlr, Rdbah, 0);
	csr32w(ctlr, Rdlen, Nrdesc*sizeof(Rdesc));
	ctlr->rdh = 0;
	csr32w(ctlr, Rdh, ctlr->rdh);
	ctlr->rdt = 0;
	csr32w(ctlr, Rdt, ctlr->rdt);
	ctlr->rb = malloc(sizeof(Block*)*Nrdesc);
	i82563replenish(ctlr);
	csr32w(ctlr, Rdtr, 0);
	csr32w(ctlr, Radv, 0);

	if(ctlr->type == i82573)
		csr32w(ctlr, Ert, 1024/8);
	if(ctlr->type == i82566 || ctlr->type == i82567)
		csr32w(ctlr, Pbs, 16);
	i82563im(ctlr, Rxt0 | Rxo | Rxdmt0 | Rxseq | Ack);

	csr32w(ctlr, Tctl, 0x0F<<CtSHIFT | Psp | 0x3f<<ColdSHIFT | Mulr);
	csr32w(ctlr, Tipg, 6<<20 | 8<<10 | 8);
	csr32w(ctlr, Tidv, 1);

	ctlr->tdba = mallocalign(Ntdesc*sizeof(Tdesc), 128, 0, 0);
	memset(ctlr->tdba, 0, Ntdesc*sizeof(Tdesc));
	csr32w(ctlr, Tdbal, PCIWADDR(ctlr->tdba));

	csr32w(ctlr, Tdbah, 0);
	csr32w(ctlr, Tdlen, Ntdesc*sizeof(Tdesc));
	ctlr->tdh = 0;
	csr32w(ctlr, Tdh, ctlr->tdh);
	ctlr->tdt = 0;
	csr32w(ctlr, Tdt, ctlr->tdt);
	ctlr->tb = malloc(sizeof(Block*)*Ntdesc);

	r = csr32r(ctlr, Txdctl);
	r &= ~(WthreshMASK|PthreshSHIFT);
	r |= 4<<WthreshSHIFT | 4<<PthreshSHIFT;
	if(ctlr->type == i82575)
		r |= Qenable;
	csr32w(ctlr, Txdctl, r);

	/*
	 * Don't enable checksum offload.  In practice, it interferes with
	 * tftp booting on at least the 82575.
	 */
//	csr32w(ctlr, Rxcsum, Tuofl | Ipofl | ETHERHDRSIZE<<PcssSHIFT);
	csr32w(ctlr, Rxcsum, 0);
	r = csr32r(ctlr, Tctl);
	r |= Ten;
	csr32w(ctlr, Tctl, r);
}