static void w_rxdone(Ether* ether) { Ctlr* ctlr = (Ctlr*) ether->ctlr; int len, sp; WFrame f; Block* bp=0; Etherpkt* ep; sp = csr_ins(ctlr, WR_RXId); len = w_read(ctlr, sp, 0, &f, sizeof(f)); if(len == 0){ DEBUG("wavelan: read frame error\n"); goto rxerror; } if(f.sts&WF_Err){ goto rxerror; } switch(f.sts){ case WF_1042: case WF_Tunnel: case WF_WMP: len = f.dlen + WSnapHdrLen; bp = iallocb(ETHERHDRSIZE + len + 2); if(!bp) goto rxerror; ep = (Etherpkt*) bp->wp; memmove(ep->d, f.addr1, Eaddrlen); memmove(ep->s, f.addr2, Eaddrlen); memmove(ep->type,&f.type,2); bp->wp += ETHERHDRSIZE; if(w_read(ctlr, sp, WF_802_11_Off, bp->wp, len+2) == 0){ DEBUG("wavelan: read 802.11 error\n"); goto rxerror; } bp->wp = bp->rp+(ETHERHDRSIZE+f.dlen); break; default: len = ETHERHDRSIZE + f.dlen + 2; bp = iallocb(len); if(!bp) goto rxerror; if(w_read(ctlr, sp, WF_802_3_Off, bp->wp, len) == 0){ DEBUG("wavelan: read 800.3 error\n"); goto rxerror; } bp->wp += len; } ctlr->nrx++; etheriq(ether,bp,1); ctlr->signal = ((ctlr->signal*15)+((f.qinfo>>8) & 0xFF))/16; ctlr->noise = ((ctlr->noise*15)+(f.qinfo & 0xFF))/16; return; rxerror: freeb(bp); ctlr->nrxerr++; }
int ethertxpkt(int ctlrno, Etherpkt *pkt, int len, int) { Ctlr *ctlr; Block *b; int s; if((ctlr = attach(ctlrno)) == 0) return 0; if(qlen(ctlr->oq) > 16*1024){ print("ether%d: tx queue full\n", ctlrno); return 0; } b = iallocb(sizeof(Etherpkt)); memmove(b->wp, pkt, len); memmove(((Etherpkt*)b->wp)->s, ctlr->card.ea, Eaddrlen); b->wp += len; qbwrite(ctlr->oq, b); s = splhi(); (*ctlr->card.transmit)(ctlr); splx(s); return 1; }
/* * used by print() to write to a queue. Since we may be splhi or not in * a process, don't qlock. */ int qiwrite(struct queue *q, void *vp, int len) { int n, sofar, dowakeup; struct block *b; uint8_t *p = vp; dowakeup = 0; sofar = 0; do { n = len - sofar; if (n > Maxatomic) n = Maxatomic; b = iallocb(n); if (b == NULL) break; /* TODO consider extra_data */ memmove(b->wp, p + sofar, n); /* this adjusts BLEN to be n, or at least it should */ b->wp += n; assert(n == BLEN(b)); qibwrite(q, b); sofar += n; } while (sofar < len && (q->state & Qmsg) == 0); return sofar; }
static void igbereplenish(Ctlr* ctlr) { int rdt; Block *bp; Rdesc *rdesc; rdt = ctlr->rdt; while(NEXT(rdt, Nrdesc) != ctlr->rdh){ rdesc = &ctlr->rdba[rdt]; if(ctlr->rb[rdt] != nil){ /* nothing to do */ } else if((bp = iallocb(2048)) != nil){ ctlr->rb[rdt] = bp; rdesc->addr[0] = PCIWADDR(bp->rp); rdesc->addr[1] = 0; } else break; rdesc->status = 0; rdt = NEXT(rdt, Nrdesc); } ctlr->rdt = rdt; csr32w(ctlr, Rdt, rdt); }
/* * used by print() to write to a queue. Since we may be splhi or not in * a process, don't qlock. * * this routine merges adjacent blocks if block n+1 will fit into * the free space of block n. */ int qiwrite(Queue *q, void *vp, int len) { int n, sofar, dowakeup; Block *b; uchar *p = vp; dowakeup = 0; sofar = 0; do { n = len-sofar; if(n > Maxatomic) n = Maxatomic; b = iallocb(n); if(b == nil) break; memmove(b->wp, p+sofar, n); b->wp += n; ilock(q); /* we use an artificially high limit for kernel prints since anything * over the limit gets dropped */ if(q->dlen >= 16*1024){ iunlock(q); freeb(b); break; } QDEBUG checkb(b, "qiwrite"); if(q->bfirst) q->blast->next = b; else q->bfirst = b; q->blast = b; q->len += BALLOC(b); q->dlen += n; if(q->state & Qstarve){ q->state &= ~Qstarve; dowakeup = 1; } iunlock(q); if(dowakeup){ if(q->kick) q->kick(q->arg); wakeup(&q->rr); } sofar += n; } while(sofar < len && (q->state & Qmsg) == 0); return sofar; }
void qbputc(Queue *q, int c) { Block *b; b = iallocb(1); *b->wp++ = c; qbwrite(q, b); }
int qproduce(Queue *q, void *vp, int len) { Block *b; int dowakeup; uchar *p = vp; /* sync with qread */ dowakeup = 0; lock(&q->l); if(q->state & Qclosed){ unlock(&q->l); return -1; } /* no waiting receivers, room in buffer? */ if(q->len >= q->limit){ q->state |= Qflow; unlock(&q->l); return -1; } /* save in buffer */ b = iallocb(len); if(b == 0){ unlock(&q->l); print("qproduce: iallocb failed\n"); return -1; } memmove(b->wp, p, len); b->wp += len; if(q->bfirst) q->blast->next = b; else q->bfirst = b; q->blast = b; /* b->next = 0; done by allocb() */ q->len += BALLOC(b); q->dlen += BLEN(b); QDEBUG checkb(b, "qproduce"); if(q->state & Qstarve){ q->state &= ~Qstarve; dowakeup = 1; } if(q->len >= q->limit) q->state |= Qflow; unlock(&q->l); if(dowakeup) Wakeup(&q->rr); return len; }
/* * call error if iallocb fails */ Block* allocb(int size) { Block *b; b = iallocb(size); if(b == 0) exhausted("allocb"); return b; }
/* * used by print() to write to a queue. Since we may be splhi or not in * a process, don't qlock. */ int qiwrite(Queue *q, void *vp, int len) { int n, sofar, dowakeup; Block *b; uchar *p = vp; dowakeup = 0; sofar = 0; do { n = len-sofar; if(n > Maxatomic) n = Maxatomic; b = iallocb(n); if (b == 0) { print("qiwrite: iallocb failed\n"); break; } memmove(b->wp, p+sofar, n); b->wp += n; lock(&q->l); QDEBUG checkb(b, "qiwrite"); if(q->bfirst) q->blast->next = b; else q->bfirst = b; q->blast = b; q->len += BALLOC(b); q->dlen += n; if(q->state & Qstarve){ q->state &= ~Qstarve; dowakeup = 1; } unlock(&q->l); if(dowakeup){ if(q->kick) q->kick(q->arg); Wakeup(&q->rr); } sofar += n; } while(sofar < len && (q->state & Qmsg) == 0); return sofar; }
static void receive(Ether* ether) { int port; Block* bp; int pktno, status, len; /* assumes ctlr is locked and bank 2 is selected */ /* leaves bank 2 selected on return */ port = ether->port; pktno = ins(port + FifoPorts); if (pktno & FpRxEmpty) { return; } outs(port + Pointer, PtrRead | PtrRcv | PtrAutoInc); status = ins(port + Data1); len = ins(port + Data1) & RxLenMask - HdrSize; if (status & RsOddFrame) len++; if ((status & RsError) || (bp = iallocb(len)) == 0) { if (status & RsAlgnErr) ether->frames++; if (status & (RsTooShort | RsTooLong)) ether->buffs++; if (status & RsBadCrc) ether->crcs++; outs(port + MmuCmd, McRelease); return; } /* packet length is padded to word */ inss(port + Data1, bp->rp, len / 2); bp->wp = bp->rp + (len & ~1); if (len & 1) { *bp->wp = inb(port + Data1); bp->wp++; } etheriq(ether, bp, 1); ether->inpackets++; outs(port + MmuCmd, McRelease); }
static int rbplant(Ctlr *ctlr, int i) { Block *b; b = iallocb(Rbufsize+127); if(b == nil) return -1; b->rp = b->wp = (uchar*)((((uintptr)b->base+127)&~127)); memset(b->rp, 0, Rdscsize); coherence(); ctlr->rx.b[i] = b; ctlr->rx.p[i] = PCIWADDR(b->rp); return 0; }
/* send the base station scan info to any readers */ static void w_scaninfo(Ether* ether, Ctlr *ctlr, int len) { int i, j; Netfile **ep, *f, **fp; Block *bp; WScan *wsp; ushort *scanbuf; scanbuf = malloc(len*2); if(scanbuf == nil) return; for (i = 0; i < len ; i++) scanbuf[i] = csr_ins(ctlr, WR_Data1); /* calculate number of samples */ len /= 25; if(len == 0) goto out; i = ether->scan; ep = ðer->f[Ntypes]; for(fp = ether->f; fp < ep && i > 0; fp++){ f = *fp; if(f == nil || f->scan == 0) continue; bp = iallocb(100*len); if(bp == nil) break; for(j = 0; j < len; j++){ wsp = (WScan*)(&scanbuf[j*25]); if(wsp->ssid_len > 32) wsp->ssid_len = 32; bp->wp = (uchar*)seprint((char*)bp->wp, (char*)bp->lim, "ssid=%.*s;bssid=%E;signal=%d;noise=%d;chan=%d%s\n", wsp->ssid_len, wsp->ssid, wsp->bssid, wsp->signal, wsp->noise, wsp->chan, (wsp->capinfo&(1<<4))?";wep":""); } qpass(f->in, bp); i--; } out: free(scanbuf); }
static void vt6102receive(Ether* edev) { Ds *ds; Block *bp; Ctlr *ctlr; int i, len; ctlr = edev->ctlr; ds = ctlr->rdh; while(!(ds->status & Own) && ds->status != 0){ if(ds->status & Rerr){ for(i = 0; i < Nrxstats; i++){ if(ds->status & (1<<i)) ctlr->rxstats[i]++; } } else if(bp = iallocb(Rdbsz+3)){ len = ((ds->status & LengthMASK)>>LengthSHIFT)-4; ds->bp->wp = ds->bp->rp+len; etheriq(edev, ds->bp, 1); bp->rp = (uchar*)ROUNDUP((ulong)bp->rp, 4); ds->addr = PCIWADDR(bp->rp); ds->bp = bp; } ds->control = Rdbsz; ds->branch = 0; ds->status = 0; ds->prev->branch = PCIWADDR(ds); coherence(); ds->prev->status = Own; ds = ds->next; }
static void interrupt(Ureg*, void* arg) { Ctlr *ctlr; Ether *ether; int len, status; Des *des; Block *bp; ether = arg; ctlr = ether->ctlr; while((status = csr32r(ctlr, 5)) & (Nis|Ais)){ /* * Acknowledge the interrupts and mask-out * the ones that are implicitly handled. */ csr32w(ctlr, 5, status); status &= (ctlr->mask & ~(Nis|Ti)); if(status & Ais){ if(status & Tps) ctlr->tps++; if(status & Tu) ctlr->tu++; if(status & Tjt) ctlr->tjt++; if(status & Ru) ctlr->ru++; if(status & Rps) ctlr->rps++; if(status & Rwt) ctlr->rwt++; status &= ~(Ais|Rwt|Rps|Ru|Tjt|Tu|Tps); } /* * Received packets. */ if(status & Ri){ des = &ctlr->rdr[ctlr->rdrx]; while(!(des->status & Own)){ if(des->status & Es){ if(des->status & Of) ctlr->of++; if(des->status & Ce) ctlr->ce++; if(des->status & Cs) ctlr->cs++; if(des->status & Tl) ctlr->tl++; if(des->status & Rf) ctlr->rf++; if(des->status & De) ctlr->de++; } else if(bp = iallocb(Rbsz)){ len = ((des->status & Fl)>>16)-4; des->bp->wp = des->bp->rp+len; etheriq(ether, des->bp, 1); des->bp = bp; des->addr = PCIWADDR(bp->rp); } des->control &= Er; des->control |= Rbsz; coherence(); des->status = Own; ctlr->rdrx = NEXT(ctlr->rdrx, ctlr->nrdr); des = &ctlr->rdr[ctlr->rdrx]; } status &= ~Ri; }
static void interrupt(Ureg*, void*arg) { Ctlr*ctlr; Ether*ether = arg; Etherpkt*pkt; ushort ie; int rx, len; Block *b; ctlr = ether->ctlr; if(!ctlr->active) return; /* not ours */ ctlr->interrupts++; ilock(ctlr); ie = *eisr; *eisr = ie; intack(); if(ie==0) iprint("interrupt: no interrupt source?\n"); if(ie&Ei_txdone){ if((*etcr&Etcr_txstart)==0){ if(ctlr->txbusy){ ctlr->txbusy = 0; ctlr->ntx--; ctlr->txfull++; if(ctlr->txfull==Ntx) ctlr->txfull = 0; } txrestart(ctlr); txfill(ether, ctlr); txrestart(ctlr); } else iprint("interrupt: bogus tx interrupt\n"); ie &= ~Ei_txdone; } if(ie&Ei_rxdone){ rx=*ersr&Ersr_rxfpmask; while(ctlr->rxlast!=rx){ ctlr->rxlast++; if(ctlr->rxlast >= Nrx) ctlr->rxlast = 0; pkt = (Etherpkt*)(Ethermem+ctlr->rxlast*Etherfsize); len = *(ushort*)pkt; if((b = iallocb(len+sizeof(ushort))) != nil){ memmove(b->wp, pkt, len+sizeof(ushort)); b->rp += sizeof(ushort); b->wp = b->rp + len; etheriq(ether, b, 1); }else ether->soverflows++; rx=*ersr&Ersr_rxfpmask; } ie &= ~Ei_rxdone; } if(ie&Ei_txretry){ iprint("ethersaturn: txretry!\n"); ie &= ~Ei_txretry; ctlr->txbusy = 0; txrestart(ctlr); } ie &= ~Ei_txcrs; if(ie) iprint("interrupt: unhandled interrupts %.4uX\n", ie); iunlock(ctlr); }
int qproduce(struct queue *q, void *vp, int len) { struct block *b; int dowakeup; uint8_t *p = vp; /* sync with qread */ dowakeup = 0; spin_lock_irqsave(&q->lock); /* no waiting receivers, room in buffer? */ if (q->len >= q->limit) { q->state |= Qflow; spin_unlock_irqsave(&q->lock); return -1; } /* save in buffer */ /* use Qcoalesce here to save storage */ // TODO: Consider removing the Qcoalesce flag and force a coalescing // strategy by default. b = q->blast; if ((q->state & Qcoalesce) == 0 || q->bfirst == NULL || b->lim - b->wp < len) { /* need a new block */ b = iallocb(len); if (b == 0) { spin_unlock_irqsave(&q->lock); return 0; } if (q->bfirst) q->blast->next = b; else q->bfirst = b; q->blast = b; /* b->next = 0; done by iallocb() */ q->len += BALLOC(b); } PANIC_EXTRA(b); memmove(b->wp, p, len); producecnt += len; b->wp += len; q->dlen += len; QDEBUG checkb(b, "qproduce"); if (q->state & Qstarve) { q->state &= ~Qstarve; dowakeup = 1; } if (q->len >= q->limit) q->state |= Qflow; spin_unlock_irqsave(&q->lock); if (dowakeup) { rendez_wakeup(&q->rr); qwake_cb(q, FDTAP_FILT_READABLE); } return len; }
static void vt6102attach(Ether* edev) { int dsz, i; Ctlr *ctlr; Ds *ds, *prev; uchar *alloc, *bounce; char name[KNAMELEN]; ctlr = edev->ctlr; qlock(&ctlr->alock); if(ctlr->alloc != nil){ qunlock(&ctlr->alock); return; } /* * Descriptor and bounce-buffer space. * Must all be aligned on a 4-byte boundary, * but try to align on cache-lines. */ ctlr->nrd = Nrd; ctlr->ntd = Ntd; dsz = ROUNDUP(sizeof(Ds), ctlr->cls); alloc = mallocalign((ctlr->nrd+ctlr->ntd)*dsz + ctlr->ntd*Txcopy, dsz, 0, 0); if(alloc == nil){ qunlock(&ctlr->alock); error(Enomem); } ctlr->alloc = alloc; ctlr->rd = (Ds*)alloc; if(waserror()){ ds = ctlr->rd; for(i = 0; i < ctlr->nrd; i++){ if(ds->bp != nil){ freeb(ds->bp); ds->bp = nil; } if((ds = ds->next) == nil) break; } free(ctlr->alloc); ctlr->alloc = nil; qunlock(&ctlr->alock); nexterror(); } prev = ctlr->rd + ctlr->nrd-1; for(i = 0; i < ctlr->nrd; i++){ ds = (Ds*)alloc; alloc += dsz; ds->control = Rdbsz; ds->branch = PCIWADDR(alloc); ds->bp = iallocb(Rdbsz+3); if(ds->bp == nil) error("vt6102: can't allocate receive ring\n"); ds->bp->rp = (uchar*)ROUNDUP((ulong)ds->bp->rp, 4); ds->addr = PCIWADDR(ds->bp->rp); ds->next = (Ds*)alloc; ds->prev = prev; prev = ds; ds->status = Own; } prev->branch = 0; prev->next = ctlr->rd; prev->status = 0; ctlr->rdh = ctlr->rd; ctlr->td = (Ds*)alloc; prev = ctlr->td + ctlr->ntd-1; bounce = alloc + ctlr->ntd*dsz; for(i = 0; i < ctlr->ntd; i++){ ds = (Ds*)alloc; alloc += dsz; ds->bounce = bounce; bounce += Txcopy; ds->next = (Ds*)alloc; ds->prev = prev; prev = ds; } prev->next = ctlr->td; ctlr->tdh = ctlr->tdt = ctlr->td; ctlr->tdused = 0; ctlr->cr = Dpoll|Rdmd|Txon|Rxon|Strt; /*Srci|Abti|Norbf|Pktrace|Ovfi|Udfi|Be|Ru|Tu|Txe|Rxe|Ptx|Prx*/ ctlr->imr = Abti|Norbf|Pktrace|Ovfi|Udfi|Be|Ru|Tu|Txe|Rxe|Ptx|Prx; ilock(&ctlr->clock); csr32w(ctlr, Rxdaddr, PCIWADDR(ctlr->rd)); csr32w(ctlr, Txdaddr, PCIWADDR(ctlr->td)); csr16w(ctlr, Isr, ~0); csr16w(ctlr, Imr, ctlr->imr); csr16w(ctlr, Cr, ctlr->cr); iunlock(&ctlr->clock); snprint(name, KNAMELEN, "#l%dlproc", edev->ctlrno); kproc(name, vt6102lproc, edev); qunlock(&ctlr->alock); poperror(); }
static void interrupt(Ureg*, void *arg) { int len, status, rcvd, xmtd, restart; ushort events; Ctlr *ctlr; BD *dre; Block *b, *nb; Ether *ether = arg; ctlr = ether->ctlr; if(!ctlr->active) return; /* not ours */ /* * Acknowledge all interrupts and whine about those that shouldn't * happen. */ events = ctlr->fcc->fcce; ctlr->fcc->fcce = events; /* clear events */ #ifdef DBG ehisto[events & 0x7f]++; #endif ctlr->interrupts++; if(events & BSY) ctlr->overrun++; if(events & TXE) ether->oerrs++; #ifdef DBG rcvd = xmtd = 0; #endif /* * Receiver interrupt: run round the descriptor ring logging * errors and passing valid receive data up to the higher levels * until we encounter a descriptor still owned by the chip. */ if(events & RXF){ dre = &ctlr->rdr[ctlr->rdrx]; dczap(dre, sizeof(BD)); while(((status = dre->status) & BDEmpty) == 0){ rcvd++; if(status & RxError || (status & (BDFirst|BDLast)) != (BDFirst|BDLast)){ if(status & (RxeLG|RxeSH)) ether->buffs++; if(status & RxeNO) ether->frames++; if(status & RxeCR) ether->crcs++; if(status & RxeOV) ether->overflows++; print("eth rx: %ux\n", status); }else{ /* * We have a packet. Read it in. */ len = dre->length-4; b = ctlr->rcvbufs[ctlr->rdrx]; assert(dre->addr == PADDR(b->rp)); dczap(b->rp, len); if(nb = iallocb(Bufsize)){ b->wp += len; etheriq(ether, b, 1); b = nb; b->rp = (uchar*)(((ulong)b->rp + CACHELINESZ-1) & ~(CACHELINESZ-1)); b->wp = b->rp; ctlr->rcvbufs[ctlr->rdrx] = b; ctlr->rdr[ctlr->rdrx].addr = PADDR(b->wp); }else ether->soverflows++; } /* * Finished with this descriptor, reinitialise it, * give it back to the chip, then on to the next... */ dre->length = 0; dre->status = (status & BDWrap) | BDEmpty | BDInt; dcflush(dre, sizeof(BD)); ctlr->rdrx = NEXT(ctlr->rdrx, Nrdre); dre = &ctlr->rdr[ctlr->rdrx]; dczap(dre, sizeof(BD)); } } /* * Transmitter interrupt: handle anything queued for a free descriptor. */ if(events & (TXB|TXE)){ ilock(ctlr); restart = 0; while(ctlr->ntq){ dre = &ctlr->tdr[ctlr->tdri]; dczap(dre, sizeof(BD)); status = dre->status; if(status & BDReady) break; if(status & TxeDEF) ctlr->deferred++; if(status & TxeHB) ctlr->heartbeat++; if(status & TxeLC) ctlr->latecoll++; if(status & TxeRL) ctlr->retrylim++; if(status & TxeUN) ctlr->underrun++; if(status & TxeCSL) ctlr->carrierlost++; if(status & (TxeLC|TxeRL|TxeUN)) restart = 1; ctlr->retrycount += (status>>2)&0xF; b = ctlr->txb[ctlr->tdri]; if(b == nil) panic("fcce/interrupt: bufp"); ctlr->txb[ctlr->tdri] = nil; freeb(b); ctlr->ntq--; ctlr->tdri = NEXT(ctlr->tdri, Ntdre); xmtd++; } if(restart){ ctlr->fcc->gfmr &= ~ENT; delay(10); ctlr->fcc->gfmr |= ENT; cpmop(RestartTx, ctlr->fccid, 0xc); } txstart(ether); iunlock(ctlr); }