mblk_t * efe_recv_pkt(efe_t *efep, efe_desc_t *dp) { efe_ring_t *rp; efe_buf_t *bp; uint16_t len; mblk_t *mp; uint16_t status; ASSERT(mutex_owned(&efep->efe_intrlock)); rp = efep->efe_rx_ring; len = GETDESC16(rp, &dp->d_len) - ETHERFCSL; if (len < ETHERMIN) { efep->efe_ierrors++; efep->efe_runt_errors++; return (NULL); } if (len > ETHERMAX + VLAN_TAGSZ) { efep->efe_ierrors++; efep->efe_toolong_errors++; return (NULL); } mp = allocb(len, 0); if (mp == NULL) { efep->efe_ierrors++; efep->efe_norcvbuf++; return (NULL); } mp->b_wptr = mp->b_rptr + len; bp = GETBUF(rp, efep->efe_rx_desc); SYNCBUF(bp, DDI_DMA_SYNC_FORKERNEL); bcopy(bp->b_kaddr, mp->b_rptr, len); efep->efe_ipackets++; efep->efe_rbytes += len; status = GETDESC16(rp, &dp->d_status); if (status & RXSTAT_BAR) { efep->efe_brdcstrcv++; } else if (status & RXSTAT_MAR) { efep->efe_multircv++; } return (mp); }
int efe_send(efe_t *efep, mblk_t *mp) { efe_ring_t *rp; uint16_t len; efe_desc_t *dp; uint16_t status; efe_buf_t *bp; ASSERT(mutex_owned(&efep->efe_txlock)); rp = efep->efe_tx_ring; len = msgsize(mp); if (len > ETHERMAX + VLAN_TAGSZ) { efep->efe_oerrors++; efep->efe_macxmt_errors++; freemsg(mp); return (DDI_SUCCESS); } dp = GETDESC(rp, efep->efe_tx_desc); SYNCDESC(rp, efep->efe_tx_desc, DDI_DMA_SYNC_FORKERNEL); status = GETDESC16(efep->efe_tx_ring, &dp->d_status); /* Stop if device owns descriptor */ if (status & TXSTAT_OWNER) { return (DDI_FAILURE); } bp = GETBUF(rp, efep->efe_tx_desc); mcopymsg(mp, bp->b_kaddr); /* * Packets must contain at least ETHERMIN octets. * Padded octets are zeroed out prior to sending. */ if (len < ETHERMIN) { bzero(bp->b_kaddr + len, ETHERMIN - len); len = ETHERMIN; } SYNCBUF(bp, DDI_DMA_SYNC_FORDEV); PUTDESC16(rp, &dp->d_status, TXSTAT_OWNER); PUTDESC16(rp, &dp->d_len, len); PUTDESC16(rp, &dp->d_control, TXCTL_LASTDESCR); SYNCDESC(rp, efep->efe_tx_desc, DDI_DMA_SYNC_FORDEV); efep->efe_opackets++; efep->efe_obytes += len; if (*bp->b_kaddr & 0x01) { if (bcmp(bp->b_kaddr, efe_broadcast, ETHERADDRL) == 0) { efep->efe_brdcstxmt++; } else { efep->efe_multixmt++; } } efep->efe_tx_desc = NEXTDESC(rp, efep->efe_tx_desc); return (DDI_SUCCESS); }
static mblk_t * pcn_receive(pcn_t *pcnp) { uint32_t len; pcn_buf_t *rxb; pcn_rx_desc_t *rmd; mblk_t *mpchain, **mpp, *mp; int head, cnt; mpchain = NULL; mpp = &mpchain; head = pcnp->pcn_rxhead; for (cnt = 0; cnt < PCN_RXRING; cnt++) { rmd = &pcnp->pcn_rxdescp[head]; rxb = pcnp->pcn_rxbufs[head]; SYNCRXDESC(pcnp, head, DDI_DMA_SYNC_FORKERNEL); if (rmd->pcn_rxstat & PCN_RXSTAT_OWN) break; len = rmd->pcn_rxlen - ETHERFCSL; if (rmd->pcn_rxstat & PCN_RXSTAT_ERR) { pcnp->pcn_errrcv++; if (rmd->pcn_rxstat & PCN_RXSTAT_FRAM) pcnp->pcn_align_errors++; if (rmd->pcn_rxstat & PCN_RXSTAT_OFLOW) pcnp->pcn_overflow++; if (rmd->pcn_rxstat & PCN_RXSTAT_CRC) pcnp->pcn_fcs_errors++; } else if (len > ETHERVLANMTU) { pcnp->pcn_errrcv++; pcnp->pcn_toolong_errors++; } else { mp = allocb(len + PCN_HEADROOM, 0); if (mp == NULL) { pcnp->pcn_errrcv++; pcnp->pcn_norcvbuf++; goto skip; } SYNCBUF(rxb, len, DDI_DMA_SYNC_FORKERNEL); mp->b_rptr += PCN_HEADROOM; mp->b_wptr = mp->b_rptr + len; bcopy((char *)rxb->pb_buf, mp->b_rptr, len); pcnp->pcn_ipackets++; pcnp->pcn_rbytes++; if (rmd->pcn_rxstat & PCN_RXSTAT_LAFM|PCN_RXSTAT_BAM) { if (rmd->pcn_rxstat & PCN_RXSTAT_BAM) pcnp->pcn_brdcstrcv++; else pcnp->pcn_multircv++; } *mpp = mp; mpp = &mp->b_next; } skip: rmd->pcn_rxstat = PCN_RXSTAT_OWN; SYNCRXDESC(pcnp, head, DDI_DMA_SYNC_FORDEV); head = (head + 1) % PCN_RXRING; } pcnp->pcn_rxhead = head; return (mpchain); }
static boolean_t pcn_send(pcn_t *pcnp, mblk_t *mp) { size_t len; pcn_buf_t *txb; pcn_tx_desc_t *tmd; int txsend; ASSERT(mutex_owned(&pcnp->pcn_xmtlock)); ASSERT(mp != NULL); len = msgsize(mp); if (len > ETHERVLANMTU) { pcnp->pcn_macxmt_errors++; freemsg(mp); return (B_TRUE); } if (pcnp->pcn_txavail < PCN_TXRECLAIM) pcn_reclaim(pcnp); if (pcnp->pcn_txavail == 0) { pcnp->pcn_wantw = B_TRUE; /* enable tx interrupt */ PCN_CSR_SETBIT(pcnp, PCN_CSR_EXTCTL1, PCN_EXTCTL1_LTINTEN); return (B_FALSE); } txsend = pcnp->pcn_txsend; /* * We copy the packet to a single buffer. NetBSD sources suggest * that if multiple segements are ever used, VMware has a bug that will * only allow 8 segments to be used, while the physical chips allow 16 */ txb = pcnp->pcn_txbufs[txsend]; mcopymsg(mp, txb->pb_buf); /* frees mp! */ pcnp->pcn_opackets++; pcnp->pcn_obytes += len; if (txb->pb_buf[0] & 0x1) { if (bcmp(txb->pb_buf, pcn_broadcast, ETHERADDRL) != 0) pcnp->pcn_multixmt++; else pcnp->pcn_brdcstxmt++; } tmd = &pcnp->pcn_txdescp[txsend]; SYNCBUF(txb, len, DDI_DMA_SYNC_FORDEV); tmd->pcn_txstat = 0; tmd->pcn_tbaddr = txb->pb_paddr; /* PCNet wants the 2's complement of the length of the buffer */ tmd->pcn_txctl = (~(len) + 1) & PCN_TXCTL_BUFSZ; tmd->pcn_txctl |= PCN_TXCTL_MBO; tmd->pcn_txctl |= PCN_TXCTL_STP | PCN_TXCTL_ENP | PCN_TXCTL_ADD_FCS | PCN_TXCTL_OWN | PCN_TXCTL_MORE_LTINT; SYNCTXDESC(pcnp, txsend, DDI_DMA_SYNC_FORDEV); pcnp->pcn_txavail--; pcnp->pcn_txsend = (txsend + 1) % PCN_TXRING; pcnp->pcn_txstall_time = gethrtime() + (5 * 1000000000ULL); pcn_csr_write(pcnp, PCN_CSR_CSR, PCN_CSR_TX|PCN_CSR_INTEN); return (B_TRUE); }