static void receive_packet(uint8_t *buffer, int length) { struct ifnet *ifp = &arpcom.ac_if; struct mbuf *m; struct ether_header *eh; uint32_t computed_crc, net_crc; if(length < 64) { printk("Warning: Ethernet packet too short\n"); return; } length -= 4; /* strip CRC */ net_crc = ((uint32_t)buffer[length]) | ((uint32_t)buffer[length+1] << 8) | ((uint32_t)buffer[length+2] << 16) | ((uint32_t)buffer[length+3] << 24); length -= 8; /* strip preamble */ computed_crc = ether_crc32_le(&buffer[8], length) ^ 0xffffffff; if(computed_crc == net_crc) { MGETHDR(m, M_WAIT, MT_DATA); MCLGET(m, M_WAIT); length -= sizeof(struct ether_header); /* strip Ethernet header */ memcpy(m->m_data, &buffer[8+sizeof(struct ether_header)], length); m->m_len = m->m_pkthdr.len = length; m->m_pkthdr.rcvif = ifp; eh = (struct ether_header *)&buffer[8]; ether_input(ifp, eh, m); } else printk("Ethernet CRC error: got %08x expected %08x (len=%d)\n", net_crc, computed_crc, length); }
unsigned char* Ecos_MemPool_Alloc ( unsigned long Length, int Type) { struct mbuf *pMBuf = NULL; switch (Type) { case MemPool_TYPE_Header: MGETHDR(pMBuf, M_DONTWAIT, MT_DATA); break; case MemPool_TYPE_CLUSTER: MGETHDR(pMBuf, M_DONTWAIT, MT_DATA); if (pMBuf== NULL) return NULL; MCLGET(pMBuf, M_DONTWAIT); if ((pMBuf->m_flags & M_EXT) == 0) { m_freem(pMBuf); return NULL; } break; default: DBGPRINT(RT_DEBUG_ERROR, ("%s: Unknown Type %d\n", __FUNCTION__, Type)); break; } return pMBuf; }
static void sscfu_send_lower(struct sscfu *sscf, void *p, enum sscop_aasig sig, struct mbuf *m, u_int arg) { node_p node = (node_p)p; struct priv *priv = NG_NODE_PRIVATE(node); int error; struct sscop_arg *a; if (priv->lower == NULL) { if (m != NULL) m_freem(m); return; } if (m == NULL) { MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return; m->m_len = sizeof(struct sscop_arg); m->m_pkthdr.len = m->m_len; } else { M_PREPEND(m, sizeof(struct sscop_arg), M_NOWAIT); if (m == NULL) return; } a = mtod(m, struct sscop_arg *); a->sig = sig; a->arg = arg; NG_SEND_DATA_ONLY(error, priv->lower, m); }
struct mbuf* dme_alloc_receive_buffer(struct ifnet *ifp, unsigned int frame_length) { struct dme_softc *sc = ifp->if_softc; struct mbuf *m; int pad; MGETHDR(m, M_DONTWAIT, MT_DATA); m->m_pkthdr.rcvif = ifp; /* Ensure that we always allocate an even number of * bytes in order to avoid writing beyond the buffer */ m->m_pkthdr.len = frame_length + (frame_length % sc->sc_data_width); pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header); /* All our frames have the CRC attached */ m->m_flags |= M_HASFCS; if (m->m_pkthdr.len + pad > MHLEN ) MCLGET(m, M_DONTWAIT); m->m_data += pad; m->m_len = frame_length + (frame_length % sc->sc_data_width); return m; }
/* * Partition an mbuf chain in two pieces, returning the tail -- * all but the first len0 bytes. In case of failure, it returns NULL and * attempts to restore the chain to its original state. */ struct mbuf * m_split(struct mbuf *m0, int len0, int wait) { struct mbuf *m, *n; unsigned len = len0, remain, olen; for (m = m0; m && len > m->m_len; m = m->m_next) len -= m->m_len; if (m == NULL) return (NULL); remain = m->m_len - len; if (m0->m_flags & M_PKTHDR) { MGETHDR(n, wait, m0->m_type); if (n == NULL) return (NULL); if (m_dup_pkthdr(n, m0, wait)) { m_freem(n); return (NULL); } n->m_pkthdr.len -= len0; olen = m0->m_pkthdr.len; m0->m_pkthdr.len = len0; if (m->m_flags & M_EXT) goto extpacket; if (remain > MHLEN) { /* m can't be the lead packet */ MH_ALIGN(n, 0); n->m_next = m_split(m, len, wait); if (n->m_next == NULL) { (void) m_free(n); m0->m_pkthdr.len = olen; return (NULL); } else return (n); } else MH_ALIGN(n, remain); } else if (remain == 0) { n = m->m_next; m->m_next = NULL; return (n); } else { MGET(n, wait, m->m_type); if (n == NULL) return (NULL); M_ALIGN(n, remain); } extpacket: if (m->m_flags & M_EXT) { n->m_ext = m->m_ext; MCLADDREFERENCE(m, n); n->m_data = m->m_data + len; } else { memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + len, remain); } n->m_len = remain; m->m_len = len; n->m_next = m->m_next; m->m_next = NULL; return (n); }
static void sscop_send_manage(struct sscop *sscop, void *p, enum sscop_maasig sig, struct SSCOP_MBUF_T *m, u_int err, u_int cnt) { node_p node = (node_p)p; struct priv *priv = NG_NODE_PRIVATE(node); int error; struct sscop_merr *e; struct sscop_marg *a; if (priv->manage == NULL) { if (m != NULL) m_freem(m); priv->stats.maa_dropped++; return; } if (sig == SSCOP_MERROR_indication) { MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return; m->m_len = sizeof(*e); m->m_pkthdr.len = m->m_len; e = mtod(m, struct sscop_merr *); e->sig = sig; e->err = err; e->cnt = cnt; priv->stats.errors++; } else if (m == NULL) {
static void virtio_net_tx_lazy(struct vmm_netport *port, void *arg, int budget) { u16 head = 0; u32 iov_cnt = 0, pkt_len = 0, total_len = 0; struct virtio_net_dev *ndev = arg; struct virtio_device *dev = ndev->vdev; struct virtio_queue *vq = &ndev->vqs[VIRTIO_NET_TX_QUEUE]; struct virtio_iovec *iov = ndev->tx_iov; struct vmm_mbuf *mb; while ((budget > 0) && virtio_queue_available(vq)) { head = virtio_queue_get_iovec(vq, iov, &iov_cnt, &total_len); /* iov[0] is offload info */ pkt_len = total_len - iov[0].len; if (pkt_len <= VIRTIO_NET_MTU) { MGETHDR(mb, 0, 0); MEXTMALLOC(mb, pkt_len, M_WAIT); virtio_iovec_to_buf_read(dev, &iov[1], iov_cnt - 1, M_BUFADDR(mb), pkt_len); mb->m_len = mb->m_pktlen = pkt_len; vmm_port2switch_xfer_mbuf(ndev->port, mb); } virtio_queue_set_used_elem(vq, head, total_len); budget--; } if (virtio_queue_should_signal(vq)) { dev->tra->notify(dev, VIRTIO_NET_TX_QUEUE); } }
/* * Helper for sbappendchainaddr: prepend a struct sockaddr* to * an mbuf chain. */ static inline struct mbuf * m_prepend_sockaddr(struct sockbuf *sb, struct mbuf *m0, const struct sockaddr *asa) { struct mbuf *m; const int salen = asa->sa_len; KASSERT(solocked(sb->sb_so)); /* only the first in each chain need be a pkthdr */ MGETHDR(m, M_DONTWAIT, MT_SONAME); if (m == 0) return (0); MCLAIM(m, sb->sb_mowner); #ifdef notyet if (salen > MHLEN) { MEXTMALLOC(m, salen, M_NOWAIT); if ((m->m_flags & M_EXT) == 0) { m_free(m); return (0); } } #else KASSERT(salen <= MHLEN); #endif m->m_len = salen; memcpy(mtod(m, void *), asa, salen); m->m_next = m0; m->m_pkthdr.len = salen + m0->m_pkthdr.len; return m; }
/* * DON'T use free_sent_buffers to drop the queue! */ static void alloc_rx_buffers(struct sbsh_softc *sc) { unsigned cur_rbd = sc->regs->LRDR & 0x7f; struct mbuf *m; while (sc->tail_rq != ((sc->head_rq - 1) & (RQLEN - 1))) { MGETHDR(m, M_NOWAIT, MT_DATA); if (!m) { if_printf (&sc->arpcom.ac_if, "unable to get mbuf.\n"); return; } if (SBNI16_MAX_FRAME > MHLEN) { MCLGET(m, M_NOWAIT); if (!(m->m_flags & M_EXT)) { m_freem(m); if_printf (&sc->arpcom.ac_if, "unable to get mbuf cluster.\n"); return; } m->m_pkthdr.len = m->m_len = MCLBYTES; } m_adj(m, 2); /* align ip on longword boundaries */ sc->rq[sc->tail_rq++] = m; sc->tail_rq &= (RQLEN - 1); sc->rbd[cur_rbd].address = vtophys(mtod(m, vm_offset_t)); sc->rbd[cur_rbd].length = 0; sc->regs->LRDR = cur_rbd = (cur_rbd + 1) & 0x7f; } }
/* * Insert IP options into preformed packet. Adjust IP destination as * required for IP source routing, as indicated by a non-zero in_addr at the * start of the options. * * XXX This routine assumes that the packet has no options in place. */ struct mbuf * ip_insertoptions(struct mbuf *m, struct mbuf *opt, int *phlen) { struct ipoption *p = mtod(opt, struct ipoption *); struct mbuf *n; struct ip *ip = mtod(m, struct ip *); unsigned optlen; optlen = opt->m_len - sizeof(p->ipopt_dst); if (optlen + ip->ip_len > IP_MAXPACKET) { *phlen = 0; return (m); /* XXX should fail */ } if (p->ipopt_dst.s_addr) ip->ip_dst = p->ipopt_dst; if (m->m_flags & M_EXT || m->m_data - optlen < m->m_pktdat) { MGETHDR(n, M_DONTWAIT, MT_DATA); if (n == NULL) { *phlen = 0; return (m); } M_MOVE_PKTHDR(n, m); n->m_pkthdr.rcvif = NULL; n->m_pkthdr.len += optlen; m->m_len -= sizeof(struct ip); m->m_data += sizeof(struct ip); n->m_next = m; m = n; m->m_len = optlen + sizeof(struct ip); m->m_data += max_linkhdr; bcopy(ip, mtod(m, void *), sizeof(struct ip)); } else {
Static int url_newbuf(struct url_softc *sc, struct url_chain *c, struct mbuf *m) { struct mbuf *m_new = NULL; DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("%s: no memory for rx list " "-- packet dropped!\n", USBDEVNAME(sc->sc_dev)); return (ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("%s: no memory for rx list " "-- packet dropped!\n", USBDEVNAME(sc->sc_dev)); m_freem(m_new); return (ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, ETHER_ALIGN); c->url_mbuf = m_new; return (0); }
/* * Function: mcf548x_fec_rx_bd_init * * Description: Initialize the receive buffer descriptor ring. * * Returns: void * * Notes: Space for the buffers of rx BDs is allocated by the rx deamon * */ static void mcf548x_fec_rx_bd_init(struct mcf548x_enet_struct *sc) { int rxBdIndex; struct mbuf *m; struct ifnet *ifp = &sc->arpcom.ac_if; /* * Fill RX buffer descriptor ring. */ for( rxBdIndex = 0; rxBdIndex < sc->rxBdCount; rxBdIndex++ ) { MGETHDR (m, M_WAIT, MT_DATA); MCLGET (m, M_WAIT); m->m_pkthdr.rcvif = ifp; sc->rxMbuf[rxBdIndex] = m; rtems_cache_invalidate_multiple_data_lines(mtod(m,const void *), ETHER_MAX_LEN); SET_BD_BUFFER(sc->rxBd+rxBdIndex,mtod(m, void *)); SET_BD_LENGTH(sc->rxBd+rxBdIndex,ETHER_MAX_LEN); SET_BD_STATUS(sc->rxBd+rxBdIndex, MCF548X_FEC_RBD_EMPTY | MCF548X_FEC_RBD_INT | ((rxBdIndex == sc->rxBdCount-1) ? MCF548X_FEC_RBD_WRAP : 0)); } }
/* * Add a receive buffer to the indicated descriptor. */ int ni_add_rxbuf(struct ni_softc *sc, struct ni_dg *data, int idx) { struct ni_bbd *bd = &bbd[idx]; struct mbuf *m; MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) return (ENOBUFS); MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { m_freem(m); return (ENOBUFS); } m->m_data += 2; bd->nb_len = (m->m_ext.ext_size - 2); bd->nb_pte = (long)kvtopte(m->m_ext.ext_buf); bd->nb_status = 2 | NIBD_VALID; bd->nb_key = 1; data->bufs[0]._offset = 0; data->bufs[0]._len = bd->nb_len; data->bufs[0]._index = idx; data->nd_cmdref = (long)m; return (0); }
struct mbuf* wan_mbuf_alloc(int len) { struct mbuf *m; /* XXX handle len > MCLBYTES */ if (len <= 0 || len > MCLBYTES) return (NULL); MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL || len <= MHLEN) return (m); m->m_pkthdr.len = len; m->m_len = len; MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { m_freem(m); return (NULL); } return (m); }
/* * Initialize an RX descriptor and attach an MBUF cluster. */ int kue_newbuf(struct kue_softc *sc, struct kue_chain *c, struct mbuf *m) { struct mbuf *m_new = NULL; DPRINTFN(10,("%s: %s: enter\n", sc->kue_dev.dv_xname,__func__)); if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("%s: no memory for rx list " "-- packet dropped!\n", sc->kue_dev.dv_xname); return (ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("%s: no memory for rx list " "-- packet dropped!\n", sc->kue_dev.dv_xname); m_freem(m_new); return (ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } c->kue_mbuf = m_new; return (0); }
/* Add a receive buffer to the indiciated descriptor. */ int bce_add_rxbuf(struct bce_softc *sc, int idx) { struct mbuf *m; int error; MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) return (ENOBUFS); MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { m_freem(m); return (ENOBUFS); } if (sc->bce_cdata.bce_rx_chain[idx] != NULL) bus_dmamap_unload(sc->bce_dmatag, sc->bce_cdata.bce_rx_map[idx]); sc->bce_cdata.bce_rx_chain[idx] = m; error = bus_dmamap_load(sc->bce_dmatag, sc->bce_cdata.bce_rx_map[idx], m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); if (error) return (error); bus_dmamap_sync(sc->bce_dmatag, sc->bce_cdata.bce_rx_map[idx], 0, sc->bce_cdata.bce_rx_map[idx]->dm_mapsize, BUS_DMASYNC_PREREAD); BCE_INIT_RXDESC(sc, idx); return (0); }
static struct mbuf * repack(struct sbsh_softc *sc, struct mbuf *m) { struct mbuf *m_new; MGETHDR(m_new, M_NOWAIT, MT_DATA); if (!m_new) { if_printf (&sc->arpcom.ac_if, "unable to get mbuf.\n"); return (NULL); } if (m->m_pkthdr.len > MHLEN) { MCLGET(m_new, M_NOWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); if_printf (&sc->arpcom.ac_if, "unable to get mbuf cluster.\n"); return (NULL); } } m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, caddr_t)); m_new->m_pkthdr.len = m_new->m_len = m->m_pkthdr.len; m_freem(m); return (m_new); }
/* * send data by simply allocating an MBUF packet * header and pointing it to our data region. * * Optionally, the caller may supply 'reference' * and 'free' procs. (The latter may call the * user back once the networking stack has * released the buffer). * * The callbacks are provided with the 'closure' * pointer and the 'buflen' argument. */ ssize_t sendto_nocpy ( int s, const void *buf, size_t buflen, int flags, const struct sockaddr *toaddr, int tolen, void *closure, void (*freeproc)(caddr_t, u_int), void (*refproc)(caddr_t, u_int) ) { int error; struct socket *so; struct mbuf *to, *m; int ret = -1; rtems_bsdnet_semaphore_obtain (); if ((so = rtems_bsdnet_fdToSocket (s)) == NULL) { rtems_bsdnet_semaphore_release (); return -1; } error = sockaddrtombuf (&to, toaddr, tolen); if (error) { errno = error; rtems_bsdnet_semaphore_release (); return -1; } MGETHDR(m, M_WAIT, MT_DATA); m->m_pkthdr.len = 0; m->m_pkthdr.rcvif = (struct ifnet *) 0; m->m_flags |= M_EXT; m->m_ext.ext_buf = closure ? closure : (void*)buf; m->m_ext.ext_size = buflen; /* we _must_ supply non-null procs; otherwise, * the kernel code assumes it's a mbuf cluster */ m->m_ext.ext_free = freeproc ? freeproc : dummyproc; m->m_ext.ext_ref = refproc ? refproc : dummyproc; m->m_pkthdr.len += buflen; m->m_len = buflen; m->m_data = (void*)buf; error = sosend (so, to, NULL, m, NULL, flags); if (error) { if (/*auio.uio_resid != len &&*/ (error == EINTR || error == EWOULDBLOCK)) error = 0; } if (error) errno = error; else ret = buflen; if (to) m_freem(to); rtems_bsdnet_semaphore_release (); return (ret); }
int octeon_eth_recv_mbuf(struct octeon_eth_softc *sc, uint64_t *work, struct mbuf **rm) { struct mbuf *m; void (*ext_free)(caddr_t, u_int, void *); void *ext_buf; size_t ext_size; void *data; uint64_t word1 = work[1]; uint64_t word2 = work[2]; uint64_t word3 = work[3]; MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return 1; OCTEON_ETH_KASSERT(m != NULL); if ((word2 & PIP_WQE_WORD2_IP_BUFS) == 0) { /* Dynamic short */ ext_free = octeon_eth_buf_ext_free_m; ext_buf = &work[4]; ext_size = 96; data = &work[4 + sc->sc_ip_offset / sizeof(uint64_t)]; } else { vaddr_t addr; vaddr_t start_buffer; addr = PHYS_TO_XKPHYS(word3 & PIP_WQE_WORD3_ADDR, CCA_CACHED); start_buffer = addr & ~(2048 - 1); ext_free = octeon_eth_buf_ext_free_ext; ext_buf = (void *)start_buffer; ext_size = 2048; data = (void *)addr; } MEXTADD(m, ext_buf, ext_size, 0, ext_free, work); OCTEON_ETH_KASSERT(ISSET(m->m_flags, M_EXT)); m->m_data = data; m->m_len = m->m_pkthdr.len = (word1 & PIP_WQE_WORD1_LEN) >> 48; #if 0 /* * not readonly buffer */ m->m_flags |= M_EXT_RW; #endif *rm = m; OCTEON_ETH_KASSERT(*rm != NULL); return 0; }
/* * Pull read data off a interface. * Len is length of data, with local net header stripped. */ static struct mbuf * elget(caddr_t buf, int totlen, struct ifnet *ifp) { struct mbuf *top, **mp, *m; int len; caddr_t cp; char *epkt; cp = buf; epkt = cp + totlen; MGETHDR(m, MB_DONTWAIT, MT_DATA); if (m == 0) return (0); m->m_pkthdr.len = totlen; m->m_len = MHLEN; top = 0; mp = ⊤ while (totlen > 0) { if (top) { MGET(m, MB_DONTWAIT, MT_DATA); if (m == 0) { m_freem(top); return (0); } m->m_len = MLEN; } len = min(totlen, epkt - cp); if (len >= MINCLSIZE) { MCLGET(m, MB_DONTWAIT); if (m->m_flags & M_EXT) m->m_len = len = min(len, MCLBYTES); else len = m->m_len; } else { /* * Place initial small packet/header at end of mbuf. */ if (len < m->m_len) { if (top == 0 && len + max_linkhdr <= m->m_len) m->m_data += max_linkhdr; m->m_len = len; } else len = m->m_len; } bcopy(cp, mtod(m, caddr_t), (unsigned)len); cp += len; *mp = m; mp = &m->m_next; totlen -= len; if (cp == epkt) cp = buf; } return (top); }
int cpsw_new_rxbuf(struct cpsw_softc * const sc, const u_int i) { struct cpsw_ring_data * const rdp = sc->sc_rdp; const u_int h = RXDESC_PREV(i); struct cpsw_cpdma_bd bd; struct mbuf *m; int error = ENOBUFS; MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { goto reuse; } MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { m_freem(m); goto reuse; } /* We have a new buffer, prepare it for the ring. */ if (rdp->rx_mb[i] != NULL) bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]); m->m_len = m->m_pkthdr.len = MCLBYTES; rdp->rx_mb[i] = m; error = bus_dmamap_load_mbuf(sc->sc_bdt, rdp->rx_dm[i], rdp->rx_mb[i], BUS_DMA_READ|BUS_DMA_NOWAIT); if (error) { printf("can't load rx DMA map %d: %d\n", i, error); } bus_dmamap_sync(sc->sc_bdt, rdp->rx_dm[i], 0, rdp->rx_dm[i]->dm_mapsize, BUS_DMASYNC_PREREAD); error = 0; reuse: /* (re-)setup the descriptor */ bd.next = 0; bd.bufptr = rdp->rx_dm[i]->dm_segs[0].ds_addr; bd.bufoff = 0; bd.buflen = MIN(0x7ff, rdp->rx_dm[i]->dm_segs[0].ds_len); bd.pktlen = 0; bd.flags = CPDMA_BD_OWNER; cpsw_set_rxdesc(sc, i, &bd); /* and link onto ring */ cpsw_set_rxdesc_next(sc, h, cpsw_rxdesc_paddr(sc, i)); return error; }
static int tap_dev_write(int unit, struct uio *uio, int flags) { struct tap_softc *sc = device_lookup_private(&tap_cd, unit); struct ifnet *ifp; struct mbuf *m, **mp; int error = 0; int s; if (sc == NULL) return (ENXIO); getnanotime(&sc->sc_mtime); ifp = &sc->sc_ec.ec_if; /* One write, one packet, that's the rule */ MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { ifp->if_ierrors++; return (ENOBUFS); } m->m_pkthdr.len = uio->uio_resid; mp = &m; while (error == 0 && uio->uio_resid > 0) { if (*mp != m) { MGET(*mp, M_DONTWAIT, MT_DATA); if (*mp == NULL) { error = ENOBUFS; break; } } (*mp)->m_len = min(MHLEN, uio->uio_resid); error = uiomove(mtod(*mp, void *), (*mp)->m_len, uio); mp = &(*mp)->m_next; } if (error) { ifp->if_ierrors++; m_freem(m); return (error); } ifp->if_ipackets++; m_set_rcvif(m, ifp); bpf_mtap(ifp, m); s = splnet(); if_input(ifp, m); splx(s); return (0); }
/* * munge the received packet into an mbuf chain */ static inline struct mbuf * sonic_get(struct sn_softc *sc, void *pkt, int datalen) { struct mbuf *m, *top, **mp; int len; MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == 0) return 0; m->m_pkthdr.rcvif = &sc->sc_if; m->m_pkthdr.len = datalen; len = MHLEN; top = 0; mp = ⊤ while (datalen > 0) { if (top) { MGET(m, M_DONTWAIT, MT_DATA); if (m == 0) { m_freem(top); return 0; } len = MLEN; } if (datalen >= MINCLSIZE) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { if (top) m_freem(top); return 0; } len = MCLBYTES; } if (mp == &top) { char *newdata = (char *) ALIGN((char *)m->m_data + sizeof(struct ether_header)) - sizeof(struct ether_header); len -= newdata - m->m_data; m->m_data = newdata; } m->m_len = len = min(datalen, len); memcpy(mtod(m, void *), pkt, (unsigned) len); pkt = (char *)pkt + len; datalen -= len; *mp = m; mp = &m->m_next; } return top; }
static void igmp_sendpkt(struct in_multi *inm, int type, unsigned long addr) { struct mbuf *m; struct igmp *igmp; struct ip *ip; struct ip_moptions imo; MGETHDR(m, M_DONTWAIT, MT_HEADER); if (m == NULL) return; m->m_pkthdr.rcvif = loif; m->m_pkthdr.len = sizeof(struct ip) + IGMP_MINLEN; MH_ALIGN(m, IGMP_MINLEN + sizeof(struct ip)); m->m_data += sizeof(struct ip); m->m_len = IGMP_MINLEN; igmp = mtod(m, struct igmp *); igmp->igmp_type = type; igmp->igmp_code = 0; igmp->igmp_group = inm->inm_addr; igmp->igmp_cksum = 0; igmp->igmp_cksum = in_cksum(m, IGMP_MINLEN); m->m_data -= sizeof(struct ip); m->m_len += sizeof(struct ip); ip = mtod(m, struct ip *); ip->ip_tos = 0; ip->ip_len = sizeof(struct ip) + IGMP_MINLEN; ip->ip_off = 0; ip->ip_p = IPPROTO_IGMP; ip->ip_src.s_addr = INADDR_ANY; ip->ip_dst.s_addr = addr ? addr : igmp->igmp_group.s_addr; imo.imo_multicast_ifp = inm->inm_ifp; imo.imo_multicast_ttl = 1; imo.imo_multicast_vif = -1; /* * Request loopback of the report if we are acting as a multicast * router, so that the process-level routing demon can hear it. */ imo.imo_multicast_loop = (ip_mrouter != NULL); /* * XXX * Do we have to worry about reentrancy here? Don't think so. */ ip_output(m, router_alert, &igmprt, 0, &imo); ++igmpstat.igps_snd_reports; }
/*---------------------------------------------------------------------------* * allocate D-channel mbuf space *---------------------------------------------------------------------------*/ struct mbuf* i4b_Dgetmbuf(int len) { struct mbuf *m; if(len > MCLBYTES) /* if length > max extension size */ { #ifdef I4B_MBUF_DEBUG printf("i4b_getmbuf: error - len(%d) > MCLBYTES(%d)\n", len, MCLBYTES); #endif return(NULL); } MGETHDR(m, M_DONTWAIT, MT_I4B_D); /* get mbuf with pkthdr */ /* did we actually get the mbuf ? */ if(!m) { #ifdef I4B_MBUF_DEBUG printf("i4b_getbuf: error - MGETHDR failed!\n"); #endif return(NULL); } if(len >= MHLEN) { MCLGET(m, M_DONTWAIT); if(!(m->m_flags & M_EXT)) { m_freem(m); #ifdef I4B_MBUF_DEBUG printf("i4b_getbuf: error - MCLGET failed, len(%d)\n", len); #endif return (NULL); } } m->m_len = len; return(m); }
/* * Pull read data off a interface. Len is length of data, with local net * header stripped. We copy the data into mbufs. When full cluster sized * units are present we copy into clusters. */ struct mbuf * elget(struct el_softc *sc, int totlen) { struct ifnet *ifp = &sc->sc_ethercom.ec_if; bus_space_tag_t iot = sc->sc_iot; bus_space_handle_t ioh = sc->sc_ioh; struct mbuf *m, *m0, *newm; int len; MGETHDR(m0, M_DONTWAIT, MT_DATA); if (m0 == 0) return (0); m0->m_pkthdr.rcvif = ifp; m0->m_pkthdr.len = totlen; len = MHLEN; m = m0; bus_space_write_1(iot, ioh, EL_GPBL, 0); bus_space_write_1(iot, ioh, EL_GPBH, 0); while (totlen > 0) { if (totlen >= MINCLSIZE) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) goto bad; len = MCLBYTES; } m->m_len = len = min(totlen, len); bus_space_read_multi_1(iot, ioh, EL_BUF, mtod(m, u_int8_t *), len); totlen -= len; if (totlen > 0) { MGET(newm, M_DONTWAIT, MT_DATA); if (newm == 0) goto bad; len = MLEN; m = m->m_next = newm; } } bus_space_write_1(iot, ioh, EL_RBC, 0); bus_space_write_1(iot, ioh, EL_AC, EL_AC_RX); return (m0); bad: m_freem(m0); return (0); }
/* * Retreive packet from shared memory and send to the next level up via * ether_input(). */ static void ed_get_packet(struct ed_softc *sc, bus_size_t buf, u_short len) { struct ifnet *ifp = sc->ifp; struct ether_header *eh; struct mbuf *m; /* Allocate a header mbuf */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; /* * We always put the received packet in a single buffer - * either with just an mbuf header or in a cluster attached * to the header. The +2 is to compensate for the alignment * fixup below. */ if ((len + 2) > MHLEN) { /* Attach an mbuf cluster */ MCLGET(m, M_NOWAIT); /* Insist on getting a cluster */ if ((m->m_flags & M_EXT) == 0) { m_freem(m); return; } } /* * The +2 is to longword align the start of the real packet. * This is important for NFS. */ m->m_data += 2; eh = mtod(m, struct ether_header *); /* * Get packet, including link layer address, from interface. */ ed_ring_copy(sc, buf, (char *)eh, len); m->m_pkthdr.len = m->m_len = len; ED_UNLOCK(sc); (*ifp->if_input)(ifp, m); ED_LOCK(sc); }
int mveth_tx(struct mv64340_private *mp, char *data, int len, int nbufs) { int rval = -1,l; char *p; struct mbuf *m; char *emsg = 0; rtems_bsdnet_semaphore_obtain(); MGETHDR(m, M_WAIT, MT_DATA); if ( !m ) { emsg="Unable to allocate header\n"; goto bail; } MCLGET(m, M_WAIT); if ( !(m->m_flags & M_EXT) ) { m_freem(m); emsg="Unable to allocate cluster\n"; goto bail; } p = mtod(m, char *); l = 0; switch (nbufs) { case 3: default: emsg="nbufs arg must be 1..3\n"; goto bail; case 1: l += sizeof(BcHeader); memcpy(p, &BcHeader, sizeof(BcHeader)); p += sizeof(BcHeader); case 2: memcpy(p,data,len); l += len; m->m_len = m->m_pkthdr.len = l; if ( 2 == nbufs ) { M_PREPEND(m, sizeof (BcHeader), M_WAIT); if (!m) { emsg = "Unable to prepend\n"; goto bail; } p = mtod(m, char*); memcpy(p,&BcHeader,sizeof(BcHeader)); l += sizeof(BcHeader); } break; }
struct mbuf * getmcl() { struct mbuf *m; MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) return 0; MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { m_freem(m); return 0; } return m; }
static int epe_intr(void *arg) { struct epe_softc *sc = (struct epe_softc *)arg; struct ifnet * ifp = &sc->sc_ec.ec_if; uint32_t ndq = 0, irq, *cur; irq = EPE_READ(IntStsC); begin: cur = (uint32_t *)(EPE_READ(RXStsQCurAdd) - sc->ctrlpage_dsaddr + (char*)sc->ctrlpage); CTRLPAGE_DMASYNC(TX_QLEN * 3 * sizeof(uint32_t), RX_QLEN * 4 * sizeof(uint32_t), BUS_DMASYNC_PREREAD); while (sc->RXStsQ_cur != cur) { if ((sc->RXStsQ_cur[0] & (RXStsQ_RWE|RXStsQ_RFP|RXStsQ_EOB)) == (RXStsQ_RWE|RXStsQ_RFP|RXStsQ_EOB)) { uint32_t bi = (sc->RXStsQ_cur[1] >> 16) & 0x7fff; uint32_t fl = sc->RXStsQ_cur[1] & 0xffff; struct mbuf *m; MGETHDR(m, M_DONTWAIT, MT_DATA); if (m != NULL) MCLGET(m, M_DONTWAIT); if (m != NULL && (m->m_flags & M_EXT)) { bus_dmamap_unload(sc->sc_dmat, sc->rxq[bi].m_dmamap); sc->rxq[bi].m->m_pkthdr.rcvif = ifp; sc->rxq[bi].m->m_pkthdr.len = sc->rxq[bi].m->m_len = fl; bpf_mtap(ifp, sc->rxq[bi].m); (*ifp->if_input)(ifp, sc->rxq[bi].m); sc->rxq[bi].m = m; bus_dmamap_load(sc->sc_dmat, sc->rxq[bi].m_dmamap, m->m_ext.ext_buf, MCLBYTES, NULL, BUS_DMA_NOWAIT); sc->RXDQ[bi * 2] = sc->rxq[bi].m_dmamap->dm_segs[0].ds_addr; } else { /* Drop packets until we can get replacement * empty mbufs for the RXDQ. */ if (m != NULL) { m_freem(m); } ifp->if_ierrors++; } } else {