static void receive_packet(uint8_t *buffer, int length) { struct ifnet *ifp = &arpcom.ac_if; struct mbuf *m; struct ether_header *eh; uint32_t computed_crc, net_crc; if(length < 64) { printk("Warning: Ethernet packet too short\n"); return; } length -= 4; /* strip CRC */ net_crc = ((uint32_t)buffer[length]) | ((uint32_t)buffer[length+1] << 8) | ((uint32_t)buffer[length+2] << 16) | ((uint32_t)buffer[length+3] << 24); length -= 8; /* strip preamble */ computed_crc = ether_crc32_le(&buffer[8], length) ^ 0xffffffff; if(computed_crc == net_crc) { MGETHDR(m, M_WAIT, MT_DATA); MCLGET(m, M_WAIT); length -= sizeof(struct ether_header); /* strip Ethernet header */ memcpy(m->m_data, &buffer[8+sizeof(struct ether_header)], length); m->m_len = m->m_pkthdr.len = length; m->m_pkthdr.rcvif = ifp; eh = (struct ether_header *)&buffer[8]; ether_input(ifp, eh, m); } else printk("Ethernet CRC error: got %08x expected %08x (len=%d)\n", net_crc, computed_crc, length); }
static void virtif_worker(void *arg) { struct ifnet *ifp = arg; struct virtif_sc *sc = ifp->if_softc; struct mbuf *m; size_t plen = ETHER_MAX_LEN_JUMBO+1; ssize_t n; int error; for (;;) { m = m_gethdr(M_WAIT, MT_DATA); MEXTMALLOC(m, plen, M_WAIT); n = rumpuser_read(sc->sc_tapfd, mtod(m, void *), plen, &error); KASSERT(n < ETHER_MAX_LEN_JUMBO); if (n <= 0) { m_freem(m); break; } m->m_len = m->m_pkthdr.len = n; m->m_pkthdr.rcvif = ifp; ether_input(ifp, m); } panic("virtif_workin is a lazy boy %d\n", error); }
static void netmap_read(evutil_socket_t fd, short event, void *data) { char *buf; int err, i, pkts, rx_rings; struct netmap_if *ifp; struct netmap_ring *nring; struct nm_if *nmif; nmif = (struct nm_if *)data; ifp = nmif->nm_if_ifp; rx_rings = ifp->ni_rx_rings; if (!nohostring && !nmif->nm_if_vale) rx_rings++; pkts = 0; for (i = 0; i < rx_rings; i++) { nring = NETMAP_RXRING(ifp, i); while (!nm_ring_empty(nring)) { buf = NETMAP_GET_BUF(nring); err = ether_input(nmif, i, buf, NETMAP_SLOT_LEN(nring)); /* Send the packet to hw <-> host bridge. */ if (!nohostring && err == 1) err = ether_bridge(nmif, i, buf, NETMAP_SLOT_LEN(nring)); NETMAP_RING_NEXT(nring); if (err < 0 || ++pkts == burst) goto done; } } done: if_netmap_txsync(); }
Static void usbintr() { struct ether_header *eh; struct mbuf *m; struct usb_qdat *q; struct ifnet *ifp; int s; s = splimp(); /* Check the RX queue */ while(1) { IF_DEQUEUE(&usbq_rx, m); if (m == NULL) break; eh = mtod(m, struct ether_header *); q = (struct usb_qdat *)m->m_pkthdr.rcvif; ifp = q->ifp; m->m_pkthdr.rcvif = ifp; m_adj(m, sizeof(struct ether_header)); ether_input(ifp, eh, m); /* Re-arm the receiver */ (*q->if_rxstart)(ifp); if (!IFQ_IS_EMPTY(&ifp->if_snd)) (*ifp->if_start)(ifp); } /* Check the TX queue */ while(1) { IF_DEQUEUE(&usbq_tx, m); if (m == NULL) break; ifp = m->m_pkthdr.rcvif; m_freem(m); if (!IFQ_IS_EMPTY(&ifp->if_snd)) (*ifp->if_start)(ifp); } splx(s); return; }
/* Pass a packet up to the higher levels. */ static inline void elread(struct el_softc *sc,caddr_t buf,int len) { register struct ether_header *eh; struct mbuf *m; eh = (struct ether_header *)buf; #if NBPFILTER > 0 /* * Check if there's a bpf filter listening on this interface. * If so, hand off the raw packet to bpf. */ if(sc->bpf) { bpf_tap(sc->bpf,buf,len+sizeof(struct ether_header)); /* * Note that the interface cannot be in promiscuous mode if * there are no bpf listeners. And if el are in promiscuous * mode, el have to check if this packet is really ours. * * This test does not support multicasts. */ if((sc->arpcom.ac_if.if_flags & IFF_PROMISC) && bcmp(eh->ether_dhost,sc->arpcom.ac_enaddr, sizeof(eh->ether_dhost)) != 0 && bcmp(eh->ether_dhost,etherbroadcastaddr, sizeof(eh->ether_dhost)) != 0) return; } #endif /* * Pull packet off interface. */ m = elget(buf,len,0,&sc->arpcom.ac_if); if(m == 0) return; ether_input(&sc->arpcom.ac_if,eh,m); }
/* Handle the recv of otherwise unhandled packets */ void default_packet_rec(struct default_rec_st *rec_info) { card_st *cdst; char *rxbuf; word_t value; IO_Rec rxrecs[2]; int header_size = 128; int mtu = 1514; /* XXX hardwired constants */ int i; intf_st *intfp; iphost_st *host_st; bool_t ok = False; Net_IPHostAddr ipaddr; Net_IPHostAddr *ipaddrp; char buf[32]; uint32_t rec_recs; /* nr of recs the received in call */ TRC(printf("packet recv running\n")); cdst = rec_info->mycard; host_st = rec_info->myhost; intfp = rec_info->intf; sprintf(buf, "svc>net>%s>ipaddr", intfp->name); ipaddrp = NAME_FIND(buf, Net_IPHostAddrP); ipaddr = *ipaddrp; intfp->def_txfilt = LMPFMod$NewTXDef(cdst->pfmod, cdst->mac, ipaddr.a); getDxIOs(cdst->netif, intfp->def_txfilt, "DEF", /* RETURNS: */ &intfp->def_rxhdl, &intfp->def_txhdl, &intfp->io_rx, &intfp->io_tx, &intfp->def_rxoff, &intfp->def_txoff, &intfp->def_heap); /* sanity check what we got */ if (intfp->def_rxoff == NULL || intfp->def_rxhdl==0) printf("flowman: error: bad recv handle or offer\n"); if (intfp->io_tx == NULL) printf("flowman: default handler: tx bind failed\n"); if (intfp->io_rx == NULL) printf("flowman: default handler: rx bind failed\n"); if (intfp->def_heap == NULL) printf("flowman: default handler: def_heap == NULL\n"); ok = Netif$SetTxFilter(cdst->netif, intfp->def_txhdl, intfp->def_txfilt); if (!ok) printf("flowman: cannot set tx filter\n"); /* install default filter */ ok = LMPFCtl$SetDefault(cdst->rx_pfctl, intfp->def_rxhdl); if (!ok) printf("flowman: cannot set rx default filter\n"); /* set xxstat to 0 */ memset(&host_st->ipstat, 0, sizeof(ipstat_st)); memset(&host_st->tcpstat, 0, sizeof(tcpstat_st)); memset(&host_st->udpstat, 0, sizeof(udpstat_st)); /* Thiemo: should be something with time of day, so that it grows * after crashing */ host_st->ip_id = NOW() & 0xffff; mtu = ALIGN4(mtu); #define PIPEDEPTH 16 rxbuf = Heap$Malloc(intfp->def_heap, PIPEDEPTH * (mtu + header_size)); for(i=0; i<PIPEDEPTH; i++) { /* chop up memory */ rxrecs[0].base = rxbuf + i*(mtu+header_size); rxrecs[0].len = header_size; rxrecs[1].base = rxbuf + i*(mtu+header_size) + header_size; rxrecs[1].len = mtu; /* send recs */ // TRC(printf("prime : %p+%d %p+%d \n", // rxrecs[0].base, rxrecs[0].len, // rxrecs[1].base, rxrecs[1].len)); /* Actually, want to skip the first 2 bytes of header, so Ethernet * frames land mis-aligned, but IP layer and up is correctly * aligned */ ((char *)rxrecs[0].base) += 2; rxrecs[0].len -= 2; if (!IO$PutPkt(intfp->io_rx, 2, rxrecs, 0, 0)) printf("flowman: default prime %d failed\n", i); // TRC(printf("prime %d sent\n", i)); } while (1) { /* loop and get incoming packets */ DTRC(printf("flowman: default: waiting for packet..\n")); IO$GetPkt(intfp->io_rx, 2, rxrecs, FOREVER, &rec_recs, &value); ((char *)rxrecs[0].base) -= 2; rxrecs[0].len += 2; DTRC(printf("flowman: got packet on default channel, " "nr of IO_Recs %d, rec[0].len=%d\n", rec_recs, rxrecs[0].len)); ether_input(rxrecs, rec_recs, cdst->mac, host_st); /* send down an empty packet after adapting to orig. size */ /* XXX check base */ rxrecs[0].len = header_size; rxrecs[1].len = mtu-header_size; /* again, send down the version which is advanced slightly */ ((char *)rxrecs[0].base) += 2; rxrecs[0].len -= 2; IO$PutPkt(intfp->io_rx, 2, rxrecs, 0, 0); } }
static void open_eth_rxDaemon (void *arg) { struct ether_header *eh; struct open_eth_softc *dp = (struct open_eth_softc *) &oc; struct ifnet *ifp = &dp->arpcom.ac_if; struct mbuf *m; unsigned int len; uint32_t len_status; unsigned int bad; rtems_event_set events; for (;;) { rtems_bsdnet_event_receive (INTERRUPT_EVENT, RTEMS_WAIT | RTEMS_EVENT_ANY, RTEMS_NO_TIMEOUT, &events); #ifdef OPEN_ETH_DEBUG printf ("r\n"); #endif while (! ((len_status = dp->regs->xd[dp->rx_ptr+dp->txbufs].len_status) & OETH_RX_BD_EMPTY)) { bad = 0; if (len_status & (OETH_RX_BD_TOOLONG | OETH_RX_BD_SHORT)) { dp->rxLengthError++; bad = 1; } if (len_status & OETH_RX_BD_DRIBBLE) { dp->rxNonOctet++; bad = 1; } if (len_status & OETH_RX_BD_CRCERR) { dp->rxBadCRC++; bad = 1; } if (len_status & OETH_RX_BD_OVERRUN) { dp->rxOverrun++; bad = 1; } if (len_status & OETH_RX_BD_MISS) { dp->rxMiss++; bad = 1; } if (len_status & OETH_RX_BD_LATECOL) { dp->rxCollision++; bad = 1; } if (!bad) { /* pass on the packet in the receive buffer */ len = len_status >> 16; m = (struct mbuf *) (dp->rxdesc[dp->rx_ptr].m); m->m_len = m->m_pkthdr.len = len - sizeof (struct ether_header); eh = mtod (m, struct ether_header *); m->m_data += sizeof (struct ether_header); #ifdef CPU_U32_FIX ipalign(m); /* Align packet on 32-bit boundary */ #endif ether_input (ifp, eh, m); /* get a new mbuf */ MGETHDR (m, M_WAIT, MT_DATA); MCLGET (m, M_WAIT); m->m_pkthdr.rcvif = ifp; dp->rxdesc[dp->rx_ptr].m = m; dp->regs->xd[dp->rx_ptr + dp->txbufs].addr = (uint32_t*) mtod (m, void *); dp->rxPackets++; } dp->regs->xd[dp->rx_ptr+dp->txbufs].len_status = (dp->regs->xd[dp->rx_ptr+dp->txbufs].len_status & ~OETH_TX_BD_STATS) | OETH_TX_BD_READY; dp->rx_ptr = (dp->rx_ptr + 1) % dp->rxbufs; } }
static void mcf5272_enet_rxDaemon (void *arg) { struct mcf5272_enet_struct *sc = (struct mcf5272_enet_struct *)arg; struct ifnet *ifp = &sc->arpcom.ac_if; struct mbuf *m; uint16_t status; bd_t *rxBd; int rxBdIndex; /* * Allocate space for incoming packets and start reception */ for (rxBdIndex = 0 ; ;) { rxBd = sc->rxBdBase + rxBdIndex; MGETHDR (m, M_WAIT, MT_DATA); MCLGET (m, M_WAIT); m->m_pkthdr.rcvif = ifp; sc->rxMbuf[rxBdIndex] = m; rxBd->buffer = mtod (m, void *); rxBd->status = MCF5272_BD_EMPTY; g_enet_regs->rdar = 0x1000000; if (++rxBdIndex == sc->rxBdCount) { rxBd->status |= MCF5272_BD_WRAP; break; } } /* * Input packet handling loop */ rxBdIndex = 0; for (;;) { rxBd = sc->rxBdBase + rxBdIndex; /* * Wait for packet if there's not one ready */ if ((status = rxBd->status) & MCF5272_BD_EMPTY) { /* * Clear old events */ g_enet_regs->eir = MCF5272_ENET_EIR_RXF; /* * Wait for packet * Note that the buffer descriptor is checked * *before* the event wait -- this catches the * possibility that a packet arrived between the * `if' above, and the clearing of the event register. */ while ((status = rxBd->status) & MCF5272_BD_EMPTY) { rtems_event_set events; /* * Unmask RXF (Full frame received) event */ g_enet_regs->eir |= MCF5272_ENET_EIR_RXF; rtems_bsdnet_event_receive (INTERRUPT_EVENT, RTEMS_WAIT|RTEMS_EVENT_ANY, RTEMS_NO_TIMEOUT, &events); cp; } } cp; /* * Check that packet is valid */ if (status & MCF5272_BD_LAST) { /* * Pass the packet up the chain. * FIXME: Packet filtering hook could be done here. */ struct ether_header *eh; m = sc->rxMbuf[rxBdIndex]; m->m_len = m->m_pkthdr.len = (rxBd->length - sizeof(uint32_t) - sizeof(struct ether_header)); eh = mtod (m, struct ether_header *); m->m_data += sizeof(struct ether_header); ether_input (ifp, eh, m); /* * Allocate a new mbuf */ MGETHDR (m, M_WAIT, MT_DATA); MCLGET (m, M_WAIT); m->m_pkthdr.rcvif = ifp; sc->rxMbuf[rxBdIndex] = m; rxBd->buffer = mtod (m, void *); } else { /* * Something went wrong with the reception */ if (!(status & MCF5272_BD_LAST)) { sc->rxNotLast++; } if (status & MCF5272_BD_LONG) { sc->rxGiant++; } if (status & MCF5272_BD_NONALIGNED) { sc->rxNonOctet++; } if (status & MCF5272_BD_SHORT) { sc->rxRunt++; } if (status & MCF5272_BD_CRC_ERROR) { sc->rxBadCRC++; } if (status & MCF5272_BD_OVERRUN) { sc->rxOverrun++; } if (status & MCF5272_BD_TRUNCATED) { sc->rxTruncated++; } } /* * Reenable the buffer descriptor */ rxBd->status = (status & MCF5272_BD_WRAP) | MCF5272_BD_EMPTY; g_enet_regs->rdar = 0x1000000; /* * Move to next buffer descriptor */ if (++rxBdIndex == sc->rxBdCount) { rxBdIndex = 0; } }
/* * SCC reader task */ static void scc_rxDaemon (void *arg) { struct scc_softc *sc = (struct scc_softc *)arg; struct ifnet *ifp = &sc->arpcom.ac_if; struct mbuf *m; uint16_t status; volatile m360BufferDescriptor_t *rxBd; int rxBdIndex; /* * Allocate space for incoming packets and start reception */ for (rxBdIndex = 0 ; ;) { rxBd = sc->rxBdBase + rxBdIndex; MGETHDR (m, M_WAIT, MT_DATA); MCLGET (m, M_WAIT); m->m_pkthdr.rcvif = ifp; sc->rxMbuf[rxBdIndex] = m; rxBd->buffer = mtod (m, void *); if (++rxBdIndex == sc->rxBdCount) { rxBd->status = M360_BD_EMPTY | M360_BD_INTERRUPT | M360_BD_WRAP; break; } rxBd->status = M360_BD_EMPTY | M360_BD_INTERRUPT; } /* * Input packet handling loop */ rxBdIndex = 0; for (;;) { rxBd = sc->rxBdBase + rxBdIndex; /* * Wait for packet if there's not one ready */ if ((status = rxBd->status) & M360_BD_EMPTY) { /* * Clear old events */ m360.scc1.scce = 0x8; /* * Wait for packet * Note that the buffer descriptor is checked * *before* the event wait -- this catches the * possibility that a packet arrived between the * `if' above, and the clearing of the event register. */ while ((status = rxBd->status) & M360_BD_EMPTY) { rtems_interrupt_level level; rtems_event_set events; /* * Unmask RXF (Full frame received) event */ rtems_interrupt_disable (level); m360.scc1.sccm |= 0x8; rtems_interrupt_enable (level); rtems_bsdnet_event_receive (INTERRUPT_EVENT, RTEMS_WAIT|RTEMS_EVENT_ANY, RTEMS_NO_TIMEOUT, &events); } } /* * Check that packet is valid */ if ((status & (M360_BD_LAST | M360_BD_FIRST_IN_FRAME | M360_BD_LONG | M360_BD_NONALIGNED | M360_BD_SHORT | M360_BD_CRC_ERROR | M360_BD_OVERRUN | M360_BD_COLLISION)) == (M360_BD_LAST | M360_BD_FIRST_IN_FRAME)) { /* * Pass the packet up the chain. * FIXME: Packet filtering hook could be done here. */ struct ether_header *eh; m = sc->rxMbuf[rxBdIndex]; m->m_len = m->m_pkthdr.len = rxBd->length - sizeof(uint32_t) - sizeof(struct ether_header); eh = mtod (m, struct ether_header *); m->m_data += sizeof(struct ether_header); ether_input (ifp, eh, m); /* * Allocate a new mbuf */ MGETHDR (m, M_WAIT, MT_DATA); MCLGET (m, M_WAIT); m->m_pkthdr.rcvif = ifp; sc->rxMbuf[rxBdIndex] = m; rxBd->buffer = mtod (m, void *); } else { /* * Something went wrong with the reception */ if (!(status & M360_BD_LAST)) sc->rxNotLast++; if (!(status & M360_BD_FIRST_IN_FRAME)) sc->rxNotFirst++; if (status & M360_BD_LONG) sc->rxGiant++; if (status & M360_BD_NONALIGNED) sc->rxNonOctet++; if (status & M360_BD_SHORT) sc->rxRunt++; if (status & M360_BD_CRC_ERROR) sc->rxBadCRC++; if (status & M360_BD_OVERRUN) sc->rxOverrun++; if (status & M360_BD_COLLISION) sc->rxCollision++; } /* * Reenable the buffer descriptor */ rxBd->status = (status & (M360_BD_WRAP | M360_BD_INTERRUPT)) | M360_BD_EMPTY; /* * Move to next buffer descriptor */ if (++rxBdIndex == sc->rxBdCount) rxBdIndex = 0; }
static void fec_rxDaemon (void *arg) { volatile struct mcf5282_enet_struct *sc = (volatile struct mcf5282_enet_struct *)arg; struct ifnet *ifp = (struct ifnet* )&sc->arpcom.ac_if; struct mbuf *m; uint16_t status; volatile mcf5282BufferDescriptor_t *rxBd; int rxBdIndex; /* * Allocate space for incoming packets and start reception */ for (rxBdIndex = 0 ; ;) { rxBd = sc->rxBdBase + rxBdIndex; MGETHDR(m, M_WAIT, MT_DATA); MCLGET(m, M_WAIT); m->m_pkthdr.rcvif = ifp; sc->rxMbuf[rxBdIndex] = m; rxBd->buffer = mtod(m, void *); rxBd->status = MCF5282_FEC_RxBD_E; if (++rxBdIndex == sc->rxBdCount) { rxBd->status |= MCF5282_FEC_RxBD_W; break; } } /* * Input packet handling loop */ MCF5282_FEC_RDAR = 0; rxBdIndex = 0; for (;;) { rxBd = sc->rxBdBase + rxBdIndex; /* * Wait for packet if there's not one ready */ if ((status = rxBd->status) & MCF5282_FEC_RxBD_E) { /* * Clear old events. */ MCF5282_FEC_EIR = MCF5282_FEC_EIR_RXF; /* * Wait for packet to arrive. * Check the buffer descriptor before waiting for the event. * This catches the case when a packet arrives between the * `if' above, and the clearing of the RXF bit in the EIR. */ while ((status = rxBd->status) & MCF5282_FEC_RxBD_E) { rtems_event_set events; int level; rtems_interrupt_disable(level); MCF5282_FEC_EIMR |= MCF5282_FEC_EIMR_RXF; rtems_interrupt_enable(level); rtems_bsdnet_event_receive (RX_INTERRUPT_EVENT, RTEMS_WAIT|RTEMS_EVENT_ANY, RTEMS_NO_TIMEOUT, &events); } } /* * Check that packet is valid */ if (status & MCF5282_FEC_RxBD_L) { /* * Pass the packet up the chain. * FIXME: Packet filtering hook could be done here. */ struct ether_header *eh; int len = rxBd->length - sizeof(uint32_t); m = sc->rxMbuf[rxBdIndex]; #ifdef RTEMS_MCF5282_BSP_ENABLE_DATA_CACHE /* * Invalidate the cache. The cache is so small that it's * reasonable to simply invalidate the whole thing. */ rtems_cache_invalidate_entire_data(); #endif m->m_len = m->m_pkthdr.len = len - sizeof(struct ether_header); eh = mtod(m, struct ether_header *); m->m_data += sizeof(struct ether_header); ether_input(ifp, eh, m); /* * Allocate a new mbuf */ MGETHDR(m, M_WAIT, MT_DATA); MCLGET(m, M_WAIT); m->m_pkthdr.rcvif = ifp; sc->rxMbuf[rxBdIndex] = m; rxBd->buffer = mtod(m, void *); } /* * Reenable the buffer descriptor */ rxBd->status = (status & MCF5282_FEC_RxBD_W) | MCF5282_FEC_RxBD_E; MCF5282_FEC_RDAR = 0; /* * Move to next buffer descriptor */ if (++rxBdIndex == sc->rxBdCount) rxBdIndex = 0; } }
/* ne2000_rcv ---------------------------------------------------------------*/ int ne2000_rcv(char* buf, int len) { int flag; /* rcv_header_t header; ethernet_t ether; unsigned int page_no, curr, next; */ char* buf2 = buf; int len2; void ether_input(char*, int); /* page 0 */ outb(IO_DP8390 + DP_CR, CR_STA); /* rx status register */ flag = inb(IO_DP8390 + DP_RSR); if ((flag & RSR_FO) != 0) return 0; /* rx ring buffer over flow */ if ((inb(IO_DP8390 + DP_ISR) & ISR_OVW) != 0) return 0; /* rx success */ if ((flag & RSR_PRX) == 0) return 0; /* no received packet */ bound_page = inb(IO_DP8390 + DP_BNRY) + 1; if (bound_page == PAGE_RX_END) bound_page = PAGE_RX_START; outb(IO_DP8390 + DP_CR, CR_PS_P1); curr_page = inb(IO_DP8390 + DP_CURR); outb(IO_DP8390 + DP_CR, CR_PS_P0); if (curr_page == PAGE_RX_END) curr_page = PAGE_RX_START; if (curr_page == bound_page) return 0; ne2000_memread((unsigned int)(bound_page << 8), (unsigned char*)&rb, 4); len2 = rb.len; #if 0 printk("[%x,%x:%x,%x,%x]", bound_page, curr_page, rb.status, rb.bound, rb.len); #endif rx_start = (bound_page << 8) + 4; rx_len = rb.len - 4; rx_bound = rb.bound; if ((rb.status & RSR_PRX) != 0 && rx_len >= 14 && rx_len <= 1514) { rx_remain_len = rx_len; rx_sub_len = PAGE_RX_END * 256 - rx_start; if (rx_sub_len < rx_len) { ne2000_memread(rx_start, buf, rx_sub_len); rx_start = PAGE_RX_START * 256; buf += rx_sub_len; rx_remain_len = rx_len - rx_sub_len; } /* printk("pre-read"); */ ne2000_memread(rx_start, buf, rx_remain_len); ether_input(buf2, len2); } #if 0 { printk("dst = "); printk("%x:", buf2[0]); printk("%x:", buf2[1]); printk("%x:", buf2[2]); printk("%x:", buf2[3]); printk("%x:", buf2[4]); printk("%x ", buf2[5]); printk("src = "); printk("%x:", buf2[0]); printk("%x:", buf2[1]); printk("%x:", buf2[2]); printk("%x:", buf2[3]); printk("%x:", buf2[4]); printk("%x\n", buf2[5]); } #endif bound_page = rx_bound; if (bound_page == PAGE_RX_START) bound_page = PAGE_RX_END; bound_page --; outb(IO_DP8390 + DP_BNRY, bound_page); /* clear interrupt status */ outb(IO_DP8390 + DP_ISR, 0xff); #if 0 printk("ne2000:rcv\n"); page_no = inb(IO_DP8390 + DP_BNRY) + 1; if (page_no == ed.stoppage) page_no = ed.startpage; flag = 1; do { outb(IO_DP8390 + DP_CR, CR_PS_P1); curr = inb(IO_DP8390 + DP_CURR); outb(IO_DP8390 + DP_CR, CR_PS_P0); if (curr == page_no) break; ne2000_getblock(page_no, 0, sizeof(header), (char*)&header); ne2000_getblock(page_no, sizeof(rcv_header_t), sizeof(ethernet_t), (char*)ðer); len = (header.rbcl | (header.rbch << 8)) - sizeof(rcv_header_t); printk("page_no = %x, curr = %x, length = %x\n", page_no, curr, len); printk("dst = "); printk("%x:", ether.dst[0]); printk("%x:", ether.dst[1]); printk("%x:", ether.dst[2]); printk("%x:", ether.dst[3]); printk("%x:", ether.dst[4]); printk("%x\n", ether.dst[5]); printk("src = "); printk("%x:", ether.src[0]); printk("%x:", ether.src[1]); printk("%x:", ether.src[2]); printk("%x:", ether.src[3]); printk("%x:", ether.src[4]); printk("%x\n", ether.src[5]); printk("type= %x\n", ether.type); next = header.next; if ((header.status & RSR_PRX)) { flag = 0; } else next = curr; if (next == ed.startpage) outb(IO_DP8390 + DP_BNRY, ed.stoppage - 1); else outb(IO_DP8390 + DP_BNRY, next - 1); page_no = next; } while (flag); #endif return 0; }
/* Async. stream output */ static void fwe_as_input(struct fw_xferq *xferq) { struct mbuf *m, *m0; struct ifnet *ifp; struct fwe_softc *fwe; struct fw_bulkxfer *sxfer; struct fw_pkt *fp; u_char *c; #if defined(__DragonFly__) || __FreeBSD_version < 500000 struct ether_header *eh; #endif fwe = (struct fwe_softc *)xferq->sc; ifp = fwe->eth_softc.ifp; /* We do not need a lock here because the bottom half is serialized */ while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) { STAILQ_REMOVE_HEAD(&xferq->stvalid, link); fp = mtod(sxfer->mbuf, struct fw_pkt *); if (fwe->fd.fc->irx_post != NULL) fwe->fd.fc->irx_post(fwe->fd.fc, fp->mode.ld); m = sxfer->mbuf; /* insert new rbuf */ sxfer->mbuf = m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m0 != NULL) { m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size; STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link); } else printf("%s: m_getcl failed\n", __FUNCTION__); if (sxfer->resp != 0 || fp->mode.stream.len < ETHER_ALIGN + sizeof(struct ether_header)) { m_freem(m); ifp->if_ierrors ++; continue; } m->m_data += HDR_LEN + ETHER_ALIGN; c = mtod(m, u_char *); #if defined(__DragonFly__) || __FreeBSD_version < 500000 eh = (struct ether_header *)c; m->m_data += sizeof(struct ether_header); m->m_len = m->m_pkthdr.len = fp->mode.stream.len - ETHER_ALIGN - sizeof(struct ether_header); #else m->m_len = m->m_pkthdr.len = fp->mode.stream.len - ETHER_ALIGN; #endif m->m_pkthdr.rcvif = ifp; #if 0 FWEDEBUG(ifp, "%02x %02x %02x %02x %02x %02x\n" "%02x %02x %02x %02x %02x %02x\n" "%02x %02x %02x %02x\n" "%02x %02x %02x %02x\n" "%02x %02x %02x %02x\n" "%02x %02x %02x %02x\n", c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7], c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15], c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23], c[20], c[21], c[22], c[23] ); #endif #if defined(__DragonFly__) || __FreeBSD_version < 500000 ether_input(ifp, eh, m); #else (*ifp->if_input)(ifp, m); #endif ifp->if_ipackets ++; } if (STAILQ_FIRST(&xferq->stfree) != NULL) fwe->fd.fc->irx_enable(fwe->fd.fc, fwe->dma_ch); }
void xilTemacRxThreadSingle(struct ifnet* ifp) { struct XilTemac* xilTemac = ifp->if_softc; uint32_t npkts = 0; #ifdef DEBUG printk("%s: rxthread, packet rx on interface %s\n", DRIVER_PREFIX, xilTemac->iUnitName ); #endif uint32_t base = xilTemac->iAddr; /* While RECV_DONE_MASK in ipisr stays set */ while( IN32(base + XTE_IPISR_OFFSET) & XTE_IPXR_RECV_DONE_MASK ) { /* 1) Read the length of the packet */ uint32_t bytes = IN32(base + XTE_RPLR_OFFSET); /* 2) Read the Read Status Register (which contains no information). When * all of these in the fifo have been read, then XTE_IPXR_RECV_DONE_MASK * will stay turned off, after it's written to */ IN32(base + XTE_RSR_OFFSET); npkts++; struct mbuf* m; struct ether_header* eh; /* 3) Get some memory from the ip stack to store the packet in */ MGETHDR(m, M_WAIT, MT_DATA); MCLGET(m, M_WAIT); m->m_pkthdr.rcvif = ifp; /* 4) Copy the packet into the ip stack's memory */ xilTemacFifoRead64( base, mtod(m, uint32_t*), bytes); m->m_len = bytes - sizeof(struct ether_header); m->m_pkthdr.len = bytes - sizeof(struct ether_header); eh = mtod(m, struct ether_header*); m->m_data += sizeof(struct ether_header); /* 5) Tell the ip stack about the received packet */ ether_input(ifp, eh, m); /* 6) Try and turn off XTE_IPXR_RECV_DONE bit in the ipisr. If there's * still more packets (ie RSR ! empty), then it will stay asserted. If * there's no more packets, this will turn it off. */ OUT32(base + XTE_IPISR_OFFSET, XTE_IPXR_RECV_DONE_MASK); } /* End) All Rx packets serviced, renable rx interrupt */ uint32_t ipier = IN32(base + XTE_IPIER_OFFSET); ipier |= XTE_IPXR_RECV_DONE_MASK; OUT32(base + XTE_IPIER_OFFSET, ipier); #ifdef DEBUG printk("%s: rxthread, retrieved %d packets\n", DRIVER_PREFIX, npkts ); #endif if(npkts > xilTemac->iStats.iRxMaxDrained) { xilTemac->iStats.iRxMaxDrained = npkts; } /* ??) Very very occasionally, under extremely high stress, I get a situation * where we process no packets. That is, the rx thread was evented, but * there was no packet available. I'm not sure how this happens. Ideally, * it shouldn't ocurr, and I suspect a minor bug in the driver. However, for * me it's happenning 3 times in several hunderd million interrupts. Nothing * bad happens, as long as we don't read from the rx fifo's if nothing is * there. It is just not as efficient as possible (rx thread being evented * pointlessly) and a bit disconcerting about how it's ocurring. * The best way to reproduce this is to have two clients run: * $ ping <host> -f -s 65507 * This flood pings the device from two clients with the maximum size ping * packet. It absolutely hammers the device under test. Eventually, (if * you leave it running overnight for instance), you'll get a couple of these * stray rx events. */ if(npkts == 0) { /*printk("%s: RxThreadSingle: fatal error: event received, but no packets available\n", DRIVER_PREFIX); assert(0); */ xilTemac->iStats.iRxStrayEvents++; } }
err_t ether_output(struct netif *netif, struct pbuf *p, struct ip_addr *ipaddr) { struct pbuf *q; struct eth_hdr *ethhdr; struct eth_addr *dest, mcastaddr; struct ip_addr *queryaddr; err_t err; int i; int loopback = 0; //kprintf("ether: xmit %d bytes, %d bufs\n", p->tot_len, pbuf_clen(p)); if ((netif->flags & NETIF_UP) == 0) return -ENETDOWN; if (pbuf_header(p, ETHER_HLEN)) { kprintf(KERN_ERR "ether_output: not enough room for Ethernet header in pbuf\n"); stats.link.err++; return -EBUF; } // Construct Ethernet header. Start with looking up deciding which // MAC address to use as a destination address. Broadcasts and // multicasts are special, all other addresses are looked up in the // ARP table. queryaddr = ipaddr; if (ip_addr_isany(ipaddr) || ip_addr_isbroadcast(ipaddr, &netif->netmask)) { dest = (struct eth_addr *) ðbroadcast; } else if (ip_addr_ismulticast(ipaddr)) { // Hash IP multicast address to MAC address. mcastaddr.addr[0] = 0x01; mcastaddr.addr[1] = 0x0; mcastaddr.addr[2] = 0x5e; mcastaddr.addr[3] = ip4_addr2(ipaddr) & 0x7f; mcastaddr.addr[4] = ip4_addr3(ipaddr); mcastaddr.addr[5] = ip4_addr4(ipaddr); dest = &mcastaddr; } else if (ip_addr_cmp(ipaddr, &netif->ipaddr)) { dest = &netif->hwaddr; loopback = 1; } else { if (ip_addr_maskcmp(ipaddr, &netif->ipaddr, &netif->netmask)) { // Use destination IP address if the destination is on the same subnet as we are. queryaddr = ipaddr; } else { // Otherwise we use the default router as the address to send the Ethernet frame to. queryaddr = &netif->gw; } dest = arp_lookup(queryaddr); } // If the arp_lookup() didn't find an address, we send out an ARP query for the IP address. if (dest == NULL) { q = arp_query(netif, &netif->hwaddr, queryaddr); if (q != NULL) { err = dev_transmit((dev_t) netif->state, q); if (err < 0) { kprintf(KERN_ERR "ether: error %d sending arp packet\n", err); pbuf_free(q); stats.link.drop++; return err; } } // Queue packet for transmission, when the ARP reply returns err = arp_queue(netif, p, queryaddr); if (err < 0) { kprintf(KERN_ERR "ether: error %d queueing packet\n", err); stats.link.drop++; stats.link.memerr++; return err; } return 0; } ethhdr = p->payload; for (i = 0; i < 6; i++) { ethhdr->dest.addr[i] = dest->addr[i]; ethhdr->src.addr[i] = netif->hwaddr.addr[i]; } ethhdr->type = htons(ETHTYPE_IP); if (loopback) { struct pbuf *q; q = pbuf_dup(PBUF_RAW, p); if (!q) return -ENOMEM; err = ether_input(netif, q); if (err < 0) { pbuf_free(q); return err; } } else { err = dev_transmit((dev_t) netif->state, p); if (err < 0) { kprintf(KERN_ERR "ether: error %d sending packet\n", err); return err; } } return 0; }
static void fec_rxDaemon (void *arg) { struct m8xx_fec_enet_struct *sc = (struct m8xx_fec_enet_struct *)arg; struct ifnet *ifp = &sc->arpcom.ac_if; struct mbuf *m; uint16_t status; m8xxBufferDescriptor_t *rxBd; int rxBdIndex; /* * Allocate space for incoming packets and start reception */ for (rxBdIndex = 0 ; ;) { rxBd = sc->rxBdBase + rxBdIndex; MGETHDR (m, M_WAIT, MT_DATA); MCLGET (m, M_WAIT); m->m_pkthdr.rcvif = ifp; sc->rxMbuf[rxBdIndex] = m; rxBd->buffer = mtod (m, void *); rxBd->status = M8xx_BD_EMPTY; m8xx.fec.r_des_active = 0x1000000; if (++rxBdIndex == sc->rxBdCount) { rxBd->status |= M8xx_BD_WRAP; break; } } /* * Input packet handling loop */ rxBdIndex = 0; for (;;) { rxBd = sc->rxBdBase + rxBdIndex; /* * Wait for packet if there's not one ready */ if ((status = rxBd->status) & M8xx_BD_EMPTY) { /* * Clear old events */ m8xx.fec.ievent = M8xx_FEC_IEVENT_RFINT; /* * Wait for packet * Note that the buffer descriptor is checked * *before* the event wait -- this catches the * possibility that a packet arrived between the * `if' above, and the clearing of the event register. */ while ((status = rxBd->status) & M8xx_BD_EMPTY) { rtems_event_set events; /* * Unmask RXF (Full frame received) event */ m8xx.fec.ievent |= M8xx_FEC_IEVENT_RFINT; rtems_bsdnet_event_receive (INTERRUPT_EVENT, RTEMS_WAIT|RTEMS_EVENT_ANY, RTEMS_NO_TIMEOUT, &events); } } /* * Check that packet is valid */ if (status & M8xx_BD_LAST) { /* * Pass the packet up the chain. * FIXME: Packet filtering hook could be done here. */ struct ether_header *eh; /* * Invalidate the buffer for this descriptor */ rtems_cache_invalidate_multiple_data_lines((const void *)rxBd->buffer, rxBd->length); m = sc->rxMbuf[rxBdIndex]; m->m_len = m->m_pkthdr.len = rxBd->length - sizeof(uint32_t) - sizeof(struct ether_header); eh = mtod (m, struct ether_header *); m->m_data += sizeof(struct ether_header); ether_input (ifp, eh, m); /* * Allocate a new mbuf */ MGETHDR (m, M_WAIT, MT_DATA); MCLGET (m, M_WAIT); m->m_pkthdr.rcvif = ifp; sc->rxMbuf[rxBdIndex] = m; rxBd->buffer = mtod (m, void *); } else { /* * Something went wrong with the reception */ if (!(status & M8xx_BD_LAST)) sc->rxNotLast++; if (status & M8xx_BD_LONG) sc->rxGiant++; if (status & M8xx_BD_NONALIGNED) sc->rxNonOctet++; if (status & M8xx_BD_SHORT) sc->rxRunt++; if (status & M8xx_BD_CRC_ERROR) sc->rxBadCRC++; if (status & M8xx_BD_OVERRUN) sc->rxOverrun++; if (status & M8xx_BD_COLLISION) sc->rxCollision++; } /* * Reenable the buffer descriptor */ rxBd->status = (status & M8xx_BD_WRAP) | M8xx_BD_EMPTY; m8xx.fec.r_des_active = 0x1000000; /* * Move to next buffer descriptor */ if (++rxBdIndex == sc->rxBdCount) rxBdIndex = 0; }
static void rxDaemon(void *arg) { struct bfin_ethernetSoftc *sc; struct ifnet *ifp; struct mbuf *m; struct mbuf *rxPacket; void *dataPtr; rtems_event_set events; struct ether_header *eh; rxStatusT *status; uint32_t rxStatus; int head; int prevHead; int length; void *ethBase; void *rxdmaBase; sc = (struct bfin_ethernetSoftc *) arg; rxdmaBase = sc->rxdmaBase; ethBase = sc->ethBase; ifp = &sc->arpcom.ac_if; prevHead = sc->rxDescCount - 1; head = 0; BFIN_REG16(rxdmaBase, DMA_CONFIG_OFFSET) = DMA_MODE_RX; BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) |= EMAC_OPMODE_RE; while (1) { status = sc->rx[head].status.addr; rtems_cache_invalidate_multiple_data_lines(status, sizeof(*status)); while (status->status != 0) { if (status->status & EMAC_RX_STAT_RX_OK) { /* get new cluster to replace this one */ MGETHDR(m, M_WAIT, MT_DATA); MCLGET(m, M_WAIT); m->m_pkthdr.rcvif = ifp; } else m = NULL; rxStatus = status->status; /* update statistics */ if (m) { /* save received packet to send up a little later */ rxPacket = sc->rx[head].m; dataPtr = sc->rx[head].data.addr; /* setup dma for new cluster */ sc->rx[head].m = m; sc->rx[head].data.addr = (void *) (((intptr_t) m->m_data + 3) & ~3); /* invalidate cache for new data buffer, in case any lines are dirty from previous owner */ rtems_cache_invalidate_multiple_data_lines( sc->rx[head].data.addr, BFIN_ETHERNET_MAX_FRAME_LENGTH + 2); } else rxPacket = NULL; sc->rx[head].status.dmaConfig = DMA_MODE_STATUS_LAST; rtems_cache_flush_multiple_data_lines(&sc->rx[head], sizeof(sc->rx[head])); /* mark descriptor as empty */ status->status = 0; rtems_cache_flush_multiple_data_lines(&status->status, sizeof(status->status)); /* allow dma to continue from previous descriptor into this one */ sc->rx[prevHead].status.dmaConfig = DMA_MODE_STATUS; rtems_cache_flush_multiple_data_lines( &sc->rx[prevHead].status.dmaConfig, sizeof(sc->rx[prevHead].status.dmaConfig)); if (rxPacket) { /* send it up */ eh = (struct ether_header *) ((intptr_t) dataPtr + 2); rxPacket->m_data = (caddr_t) ((intptr_t) dataPtr + 2 + 14); length = (rxStatus & EMAC_RX_STAT_RX_FRLEN_MASK) >> EMAC_RX_STAT_RX_FRLEN_SHIFT; rxPacket->m_len = length - 14; rxPacket->m_pkthdr.len = rxPacket->m_len; /* invalidate packet buffer cache again (even though it was invalidated prior to giving it to dma engine), because speculative reads might cause cache lines to be filled at any time */ rtems_cache_invalidate_multiple_data_lines(eh, length); ether_input(ifp, eh, rxPacket); } if (++prevHead == sc->rxDescCount) prevHead = 0; if (++head == sc->rxDescCount) head = 0; status = sc->rx[head].status.addr; rtems_cache_invalidate_multiple_data_lines(status, sizeof(*status)); } /* if dma stopped before the next descriptor, restart it */ if ((BFIN_REG16(rxdmaBase, DMA_IRQ_STATUS_OFFSET) & DMA_IRQ_STATUS_DMA_RUN) == 0 && BFIN_REG32(rxdmaBase, DMA_NEXT_DESC_PTR_OFFSET) != (uint32_t) &sc->rx[head].data) { BFIN_REG16(rxdmaBase, DMA_CONFIG_OFFSET) = DMA_MODE_RX; } rtems_bsdnet_event_receive(INTERRUPT_EVENT, RTEMS_WAIT | RTEMS_EVENT_ANY, RTEMS_NO_TIMEOUT, &events); } }
int ether_input(struct nm_if *nmif, int ring, char *buf, int len) { int err; struct ether_header *eh; struct ether_vlan_header *evl; struct nm_if_vlan *vlan; if (len < ETHER_HDR_LEN) { DPRINTF("%s: discarding packet, too short.\n", __func__); pktcnt.rx_drop++; return (-1); } err = 0; eh = (struct ether_header *)buf; switch (ntohs(eh->ether_type)) { case ETHERTYPE_ARP: pktcnt.rx_arp++; err = arp_input(nmif, ring, buf + ETHER_HDR_LEN, len - ETHER_HDR_LEN); break; case ETHERTYPE_IP: pktcnt.rx_ip++; err = ip_input(nmif, ring, buf + ETHER_HDR_LEN, len - ETHER_HDR_LEN); break; case ETHERTYPE_VLAN: //pktcnt.rx_vlan++; if (len < ETHER_VLAN_ENCAP_LEN) { DPRINTF("%s: discarding vlan packet, too short.\n", __func__); pktcnt.rx_drop++; return (-1); } evl = (struct ether_vlan_header *)buf; vlan = if_find_vlan(nmif, ntohs(evl->evl_tag)); if (vlan == NULL) { pktcnt.rx_drop++; DPRINTF("%s: unknown vlan tag %d, discanding packet.\n", __func__, ntohs(evl->evl_tag)); return (-1); } memmove(buf + ETHER_VLAN_ENCAP_LEN, buf, ETHER_ADDR_LEN * 2); err = ether_input(vlan->nmif, ring, buf + ETHER_VLAN_ENCAP_LEN, len - ETHER_VLAN_ENCAP_LEN); if (!nohostring && err == 1) { memmove(buf, buf + ETHER_VLAN_ENCAP_LEN, ETHER_ADDR_LEN * 2); evl = (struct ether_vlan_header *)buf; evl->evl_encap_proto = htons(ETHERTYPE_VLAN); evl->evl_tag = htons(vlan->nmif->nm_if_vtag); ether_bridge(vlan->nmif, ring, buf, len); return (0); } break; default: pktcnt.rx_drop++; DPRINTF("%s: protocol %#04x not supported, discanding packet.\n", __func__, ntohs(eh->ether_type)); err = -1; } return (err); }
/* reader task */ void mc9328mxl_enet_rx_task(void *arg) { mc9328mxl_enet_softc_t *sc = (mc9328mxl_enet_softc_t *)arg; struct ifnet *ifp = &sc->arpcom.ac_if; struct mbuf *m; struct ether_header *eh; rtems_event_set events; int pktlen; uint16_t rsw; uint16_t bc; uint16_t cbyte; int i; uint16_t int_reg; /* Input packet handling loop */ while (1) { rtems_bsdnet_event_receive( START_RECEIVE_EVENT, RTEMS_EVENT_ANY | RTEMS_WAIT, RTEMS_NO_TIMEOUT, &events); /* Configure for reads from RX data area */ lan91c11x_write_reg(LAN91C11X_PTR, (LAN91C11X_PTR_AUTOINC | LAN91C11X_PTR_RCV | LAN91C11X_PTR_READ)); /* read the receive status word */ rsw = lan91c11x_read_reg(LAN91C11X_DATA); /* TBD: Need to check rsw here */ /* read the byte count */ bc = lan91c11x_read_reg(LAN91C11X_DATA); pktlen = (bc & 0x7ff) - 6; /* get an mbuf for this packet */ MGETHDR(m, M_WAIT, MT_DATA); /* now get a cluster pointed to by the mbuf */ /* since an mbuf by itself is too small */ MCLGET(m, M_WAIT); lan91c11x_lock(); /* Copy the received packet into an mbuf */ for (i = 0; i < (pktlen / 2); i++) { ((uint16_t*)m->m_ext.ext_buf)[i] = lan91c11x_read_reg_fast(LAN91C11X_DATA); } cbyte = lan91c11x_read_reg_fast(LAN91C11X_DATA); if (cbyte & LAN91C11X_PKT_CTRL_ODD) { ((uint16_t*)m->m_ext.ext_buf)[i] = cbyte; pktlen++; } lan91c11x_unlock(); /* Release the packets memory */ lan91c11x_write_reg(LAN91C11X_MMUCMD, LAN91C11X_MMUCMD_REMTOP); /* set the receiving interface */ m->m_pkthdr.rcvif = ifp; m->m_nextpkt = 0; /* set the length of the mbuf */ m->m_len = pktlen - (sizeof(struct ether_header)); m->m_pkthdr.len = m->m_len; /* strip off the ethernet header from the mbuf */ /* but save the pointer to it */ eh = mtod (m, struct ether_header *); m->m_data += sizeof(struct ether_header); softc.stats.rx_packets++; /* give all this stuff to the stack */ ether_input(ifp, eh, m); /* renable RX interrupts */ int_reg = lan91c11x_read_reg(LAN91C11X_INT); int_reg |= LAN91C11X_INT_RXMASK; lan91c11x_write_reg(LAN91C11X_INT, int_reg); } } /* mc9328mxl_enet_rx_task */
static void wd_rxDaemon (void *arg) { unsigned int tport; struct ether_header *eh; struct wd_softc *dp = (struct wd_softc *)&wd_softc[0]; struct ifnet *ifp = &dp->arpcom.ac_if; struct mbuf *m; unsigned int i2; unsigned int len; volatile unsigned char start, next, current; unsigned char *shp, *temp; unsigned short *real_short_ptr; rtems_event_set events; tport = wd_softc[0].port ; for (;;){ rtems_bsdnet_event_receive (INTERRUPT_EVENT, RTEMS_WAIT|RTEMS_EVENT_ANY, RTEMS_NO_TIMEOUT, &events); for (;;){ inport_byte(tport+BNRY, start); outport_byte(tport+CMDR, MSK_PG1 + MSK_RD2); inport_byte(tport+CURR, current); outport_byte(tport+CMDR, MSK_PG0 + MSK_RD2); start += 1; if (start >= OUTPAGE){ start = 0; } if (current == start) break; /* real_short_ptr avoids cast on lvalue which gcc no longer allows */ shp = dp->base + 1 + (SHAPAGE * start); next = *shp++; real_short_ptr = (unsigned short *)shp; len = *(real_short_ptr)++ - 4; if (next >= OUTPAGE){ next = 0; } MGETHDR (m, M_WAIT, MT_DATA); MCLGET (m, M_WAIT); m->m_pkthdr.rcvif = ifp; temp = (unsigned char *) m->m_data; m->m_len = m->m_pkthdr.len = len - sizeof(struct ether_header); if ((i2 = (OUTPAGE - start) * SHAPAGE - 4) < len){ memcpy(temp, shp, i2); len -= i2; temp += i2; shp = dp->base; } memcpy(temp, shp, len); eh = mtod (m, struct ether_header *); m->m_data += sizeof(struct ether_header); ether_input (ifp, eh, m); outport_byte(tport+BNRY, next-1); } /* * Ring overwrite */ if (overrun){ outport_byte(tport+ISR, MSK_OVW); /* reset IR */ outport_byte(tport+TCR, 0); /* out of loopback */ if (resend == 1) outport_byte(tport+CMDR, MSK_TXP + MSK_RD2); /* resend */ resend = 0; overrun = 0; } outport_byte(tport+IMR, 0x15); /* re-enable IT rx */ } }
/* SONIC reader task */ void at91rm9200_emac_rxDaemon(void *arg) { at91rm9200_emac_softc_t *sc = (at91rm9200_emac_softc_t *)arg; struct ifnet *ifp = &sc->arpcom.ac_if; struct mbuf *m; struct ether_header *eh; rtems_event_set events; int pktlen; /* Input packet handling loop */ for (;;) { /* turn on RX interrupts, then wait for one */ EMAC_REG(EMAC_IER) = (EMAC_INT_RCOM | /* Receive complete */ EMAC_INT_RBNA | /* Receive buf not available */ EMAC_INT_ROVR); /* Receive overrun */ rtems_bsdnet_event_receive( START_RECEIVE_EVENT, RTEMS_EVENT_ANY | RTEMS_WAIT, RTEMS_NO_TIMEOUT, &events); if (EMAC_REG(EMAC_RSR) & EMAC_RSR_BNA) { printk("1: EMAC_BNA\n"); } if (EMAC_REG(EMAC_RSR) & EMAC_RSR_OVR) { printk("1: EMAC_OVR\n"); } /* clear the receive status as we do not use it anyway */ EMAC_REG(EMAC_RSR) = (EMAC_RSR_REC | EMAC_RSR_OVR | EMAC_RSR_BNA); /* scan the buffer descriptors looking for any with data in them */ while (rxbuf_hdrs[sc->rx_buf_idx].address & RXBUF_ADD_OWNED) { pktlen = rxbuf_hdrs[sc->rx_buf_idx].status & RXBUF_STAT_LEN_MASK; /* get an mbuf this packet */ MGETHDR(m, M_WAIT, MT_DATA); /* now get a cluster pointed to by the mbuf */ /* since an mbuf by itself is too small */ MCLGET(m, M_WAIT); /* set the type of mbuf to ifp (ethernet I/F) */ m->m_pkthdr.rcvif = ifp; m->m_nextpkt = 0; /* copy the packet into the cluster pointed to by the mbuf */ memcpy((char *)m->m_ext.ext_buf, (char *)(rxbuf_hdrs[sc->rx_buf_idx].address & 0xfffffffc), pktlen); /* Release the buffer ASAP back to the EMAC */ rxbuf_hdrs[sc->rx_buf_idx].address &= ~RXBUF_ADD_OWNED; /* set the length of the mbuf */ m->m_len = pktlen - (sizeof(struct ether_header) + 4); m->m_pkthdr.len = m->m_len; /* strip off the ethernet header from the mbuf */ /* but save the pointer to it */ eh = mtod (m, struct ether_header *); m->m_data += sizeof(struct ether_header); /* increment the buffer index */ sc->rx_buf_idx++; if (sc->rx_buf_idx >= NUM_RXBDS) { sc->rx_buf_idx = 0; } /* give all this stuff to the stack */ ether_input(ifp, eh, m); } /* while ADD_OWNED = 0 */ if (EMAC_REG(EMAC_RSR) & EMAC_RSR_BNA) { printk("2:EMAC_BNA\n"); } if (EMAC_REG(EMAC_RSR) & EMAC_RSR_OVR) { printk("2:EMAC_OVR\n"); } } /* for (;;) */ } /* at91rm9200_emac_rxDaemon() */
/* Async. stream output */ static void fwe_as_input(struct fw_xferq *xferq) { struct mbuf *m; struct ether_header *eh; struct ifnet *ifp; struct fw_xfer *xfer; struct fwe_softc *fwe; u_char *c; int len; caddr_t p; fwe = (struct fwe_softc *)xferq->sc; ifp = &fwe->fwe_if; #if 0 FWE_POLL_REGISTER(fwe_poll, fwe, ifp); #endif while ((xfer = STAILQ_FIRST(&xferq->q)) != NULL) { STAILQ_REMOVE_HEAD(&xferq->q, link); xferq->queued --; MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { printf("MGETHDR failed\n"); fw_xfer_free(xfer); return; } len = xfer->recv.off + xfer->recv.len; FWEDEBUG("fwe_as_input len=%d\n", len); #if __FreeBSD_version >= 500000 MEXTADD(m, xfer->recv.buf, len, fwe_free, NULL, 0, EXT_NET_DRV); #else m->m_flags |= M_EXT; m->m_ext.ext_buf = xfer->recv.buf; m->m_ext.ext_size = len; m->m_ext.ext_free = fwe_free; m->m_ext.ext_ref = fwe_ref; *((int *)m->m_ext.ext_buf) = 1; /* XXX refcount */ #endif p = xfer->recv.buf + xfer->recv.off + HDR_LEN + ALIGN_PAD; eh = (struct ether_header *)p; #if __FreeBSD_version >= 500000 len -= xfer->recv.off + HDR_LEN + ALIGN_PAD; #else p += sizeof(struct ether_header); len -= xfer->recv.off + HDR_LEN + ALIGN_PAD + sizeof(struct ether_header); #endif m->m_data = p; m->m_len = m->m_pkthdr.len = len; m->m_pkthdr.rcvif = ifp; c = (char *)eh; #if 0 FWEDEBUG("%02x %02x %02x %02x %02x %02x\n" "%02x %02x %02x %02x %02x %02x\n" "%02x %02x %02x %02x\n" "%02x %02x %02x %02x\n" "%02x %02x %02x %02x\n" "%02x %02x %02x %02x\n", c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7], c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15], c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23], c[20], c[21], c[22], c[23] ); #endif #if __FreeBSD_version >= 500000 (*ifp->if_input)(ifp, m); #else ether_input(ifp, eh, m); #endif ifp->if_ipackets ++; xfer->recv.buf = NULL; fw_xfer_free(xfer); } }