/* Enqueue a packet 'm' to a queue 'q' and add timestamp to that packet. * Return 1 when unable to add timestamp, otherwise return 0 */ static int codel_enqueue(struct fq_codel_flow *q, struct mbuf *m, struct fq_codel_si *si) { uint64_t len; len = m->m_pkthdr.len; /* finding maximum packet size */ if (len > q->cst.maxpkt_size) q->cst.maxpkt_size = len; /* Add timestamp to mbuf as MTAG */ struct m_tag *mtag; mtag = m_tag_locate(m, MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, NULL); if (mtag == NULL) mtag = m_tag_alloc(MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, sizeof(aqm_time_t), M_NOWAIT); if (mtag == NULL) { m_freem(m); goto drop; } *(aqm_time_t *)(mtag + 1) = AQM_UNOW; m_tag_prepend(m, mtag); mq_append(&q->mq, m); fq_update_stats(q, si, len, 0); return 0; drop: fq_update_stats(q, si, len, 1); m_freem(m); return 1; }
int codel_addq(struct codel *c, class_queue_t *q, struct mbuf *m) { struct m_tag *mtag; uint64_t *enqueue_time; if (qlen(q) < qlimit(q)) { mtag = m_tag_locate(m, MTAG_CODEL, 0, NULL); if (mtag == NULL) mtag = m_tag_alloc(MTAG_CODEL, 0, sizeof(uint64_t), M_NOWAIT); if (mtag == NULL) { m_freem(m); return (-1); } enqueue_time = (uint64_t *)(mtag + 1); *enqueue_time = read_machclk(); m_tag_prepend(m, mtag); _addq(q, m); return (0); } c->drop_overlimit++; m_freem(m); return (-1); }
/** * Called to deliver a frame to either the host, the wire or both. */ int vboxNetFltPortOsXmit(PVBOXNETFLTINS pThis, void *pvIfData, PINTNETSG pSG, uint32_t fDst) { NOREF(pvIfData); void (*input_f)(struct ifnet *, struct mbuf *); struct ifnet *ifp; struct mbuf *m; struct m_tag *mtag; bool fActive; int error; ifp = ASMAtomicUoReadPtrT(&pThis->u.s.ifp, struct ifnet *); VBOXCURVNET_SET(ifp->if_vnet); if (fDst & INTNETTRUNKDIR_WIRE) { m = vboxNetFltFreeBSDSGMBufFromSG(pThis, pSG); if (m == NULL) return VERR_NO_MEMORY; m = m_pullup(m, ETHER_HDR_LEN); if (m == NULL) return VERR_NO_MEMORY; m->m_flags |= M_PKTHDR; ether_output_frame(ifp, m); } if (fDst & INTNETTRUNKDIR_HOST) { m = vboxNetFltFreeBSDSGMBufFromSG(pThis, pSG); if (m == NULL) return VERR_NO_MEMORY; m = m_pullup(m, ETHER_HDR_LEN); if (m == NULL) return VERR_NO_MEMORY; /* * Delivering packets to the host will be captured by the * input hook. Tag the packet with a mbuf tag so that we * can skip re-delivery of the packet to the guest during * input hook processing. */ mtag = m_tag_alloc(MTAG_VBOX, PACKET_TAG_VBOX, 0, M_NOWAIT); if (mtag == NULL) { m_freem(m); return VERR_NO_MEMORY; } m_tag_init(m); m_tag_prepend(m, mtag); m->m_flags |= M_PKTHDR; m->m_pkthdr.rcvif = ifp; ifp->if_input(ifp, m); } VBOXCURVNET_RESTORE(); return VINF_SUCCESS; }
int if_promiscinet_add_tag(struct mbuf *m, struct in_l2info *l2i) { struct ifl2info *l2info_tag; l2info_tag = (struct ifl2info *)m_tag_alloc(MTAG_PROMISCINET, MTAG_PROMISCINET_L2INFO, MTAG_PROMISCINET_L2INFO_LEN, M_NOWAIT); if (NULL == l2info_tag) { return (ENOMEM); } in_promisc_l2info_copy(&l2info_tag->ifl2i_info, l2i); m_tag_prepend(m, &l2info_tag->ifl2i_mtag); return (0); }
int ieee80211_add_callback(struct mbuf* m, void (*func)(struct ieee80211_node*, void*, int), void* arg) { struct m_tag* mtag; struct ieee80211_cb* cb; mtag = m_tag_alloc(MTAG_ABI_NET80211, NET80211_TAG_CALLBACK, sizeof(struct ieee80211_cb), M_NOWAIT); if (mtag == NULL) return 0; cb = (struct ieee80211_cb*)(mtag+1); cb->func = func; cb->arg = arg; m_tag_prepend(m, mtag); m->m_flags |= M_TXCB; return 1; }
static int firewire_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) { struct fw_com *fc = IFP2FWC(ifp); int error, type; struct m_tag *mtag; union fw_encap *enc; struct fw_hwaddr *destfw; uint8_t speed; uint16_t psize, fsize, dsize; struct mbuf *mtail; int unicast, dgl, foff; static int next_dgl; #if defined(INET) || defined(INET6) struct llentry *lle; #endif #ifdef MAC error = mac_ifnet_check_transmit(ifp, m); if (error) goto bad; #endif if (!((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))) { error = ENETDOWN; goto bad; } /* * For unicast, we make a tag to store the lladdr of the * destination. This might not be the first time we have seen * the packet (for instance, the arp code might be trying to * re-send it after receiving an arp reply) so we only * allocate a tag if there isn't one there already. For * multicast, we will eventually use a different tag to store * the channel number. */ unicast = !(m->m_flags & (M_BCAST | M_MCAST)); if (unicast) { mtag = m_tag_locate(m, MTAG_FIREWIRE, MTAG_FIREWIRE_HWADDR, NULL); if (!mtag) { mtag = m_tag_alloc(MTAG_FIREWIRE, MTAG_FIREWIRE_HWADDR, sizeof (struct fw_hwaddr), M_NOWAIT); if (!mtag) { error = ENOMEM; goto bad; } m_tag_prepend(m, mtag); } destfw = (struct fw_hwaddr *)(mtag + 1); } else { destfw = 0; } switch (dst->sa_family) { #ifdef INET case AF_INET: /* * Only bother with arp for unicast. Allocation of * channels etc. for firewire is quite different and * doesn't fit into the arp model. */ if (unicast) { error = arpresolve(ifp, ro ? ro->ro_rt : NULL, m, dst, (u_char *) destfw, &lle); if (error) return (error == EWOULDBLOCK ? 0 : error); } type = ETHERTYPE_IP; break; case AF_ARP: { struct arphdr *ah; ah = mtod(m, struct arphdr *); ah->ar_hrd = htons(ARPHRD_IEEE1394); type = ETHERTYPE_ARP; if (unicast) *destfw = *(struct fw_hwaddr *) ar_tha(ah); /* * The standard arp code leaves a hole for the target * hardware address which we need to close up. */ bcopy(ar_tpa(ah), ar_tha(ah), ah->ar_pln); m_adj(m, -ah->ar_hln); break; } #endif #ifdef INET6 case AF_INET6: if (unicast) { error = nd6_storelladdr(fc->fc_ifp, m, dst, (u_char *) destfw, &lle); if (error) return (error); } type = ETHERTYPE_IPV6; break; #endif default: if_printf(ifp, "can't handle af%d\n", dst->sa_family); error = EAFNOSUPPORT; goto bad; } /* * Let BPF tap off a copy before we encapsulate. */ if (bpf_peers_present(ifp->if_bpf)) { struct fw_bpfhdr h; if (unicast) bcopy(destfw, h.firewire_dhost, 8); else bcopy(&firewire_broadcastaddr, h.firewire_dhost, 8); bcopy(&fc->fc_hwaddr, h.firewire_shost, 8); h.firewire_type = htons(type); bpf_mtap2(ifp->if_bpf, &h, sizeof(h), m); } /* * Punt on MCAP for now and send all multicast packets on the * broadcast channel. */ if (m->m_flags & M_MCAST) m->m_flags |= M_BCAST; /* * Figure out what speed to use and what the largest supported * packet size is. For unicast, this is the minimum of what we * can speak and what they can hear. For broadcast, lets be * conservative and use S100. We could possibly improve that * by examining the bus manager's speed map or similar. We * also reduce the packet size for broadcast to account for * the GASP header. */ if (unicast) { speed = min(fc->fc_speed, destfw->sspd); psize = min(512 << speed, 2 << destfw->sender_max_rec); } else { speed = 0; psize = 512 - 2*sizeof(uint32_t); } /* * Next, we encapsulate, possibly fragmenting the original * datagram if it won't fit into a single packet. */ if (m->m_pkthdr.len <= psize - sizeof(uint32_t)) { /* * No fragmentation is necessary. */ M_PREPEND(m, sizeof(uint32_t), M_NOWAIT); if (!m) { error = ENOBUFS; goto bad; } enc = mtod(m, union fw_encap *); enc->unfrag.ether_type = type; enc->unfrag.lf = FW_ENCAP_UNFRAG; enc->unfrag.reserved = 0; /* * Byte swap the encapsulation header manually. */ enc->ul[0] = htonl(enc->ul[0]); error = (ifp->if_transmit)(ifp, m); return (error); } else {
/* * When incoming data is appended to the socket, we get notified here. * This is also called whenever a significant event occurs for the socket. * Our original caller may have queued this even some time ago and * we cannot trust that he even still exists. The node however is being * held with a reference by the queueing code and guarantied to be valid. */ static void ng_ksocket_incoming2(node_p node, hook_p hook, void *arg1, int arg2) { struct socket *so = arg1; const priv_p priv = NG_NODE_PRIVATE(node); struct mbuf *m; struct ng_mesg *response; struct uio auio; int s, flags, error; s = splnet(); /* so = priv->so; *//* XXX could have derived this like so */ KASSERT(so == priv->so, ("%s: wrong socket", __func__)); /* Allow next incoming event to be queued. */ atomic_store_rel_int(&priv->fn_sent, 0); /* Check whether a pending connect operation has completed */ if (priv->flags & KSF_CONNECTING) { if ((error = so->so_error) != 0) { so->so_error = 0; soclrstate(so, SS_ISCONNECTING); } if (!(so->so_state & SS_ISCONNECTING)) { NG_MKMESSAGE(response, NGM_KSOCKET_COOKIE, NGM_KSOCKET_CONNECT, sizeof(int32_t), M_WAITOK | M_NULLOK); if (response != NULL) { response->header.flags |= NGF_RESP; response->header.token = priv->response_token; *(int32_t *)response->data = error; /* * send an async "response" message * to the node that set us up * (if it still exists) */ NG_SEND_MSG_ID(error, node, response, priv->response_addr, 0); } priv->flags &= ~KSF_CONNECTING; } } /* Check whether a pending accept operation has completed */ if (priv->flags & KSF_ACCEPTING) { error = ng_ksocket_check_accept(priv); if (error != EWOULDBLOCK) priv->flags &= ~KSF_ACCEPTING; if (error == 0) ng_ksocket_finish_accept(priv); } /* * If we don't have a hook, we must handle data events later. When * the hook gets created and is connected, this upcall function * will be called again. */ if (priv->hook == NULL) { splx(s); return; } /* Read and forward available mbuf's */ auio.uio_td = NULL; auio.uio_resid = 1000000000; flags = MSG_DONTWAIT; while (1) { struct sockaddr *sa = NULL; struct mbuf *n; /* Try to get next packet from socket */ if ((error = soreceive(so, (so->so_state & SS_ISCONNECTED) ? NULL : &sa, &auio, &m, NULL, &flags)) != 0) break; /* See if we got anything */ if (m == NULL) { if (sa != NULL) kfree(sa, M_SONAME); break; } /* * Don't trust the various socket layers to get the * packet header and length correct (e.g. kern/15175). * * Also, do not trust that soreceive() will clear m_nextpkt * for us (e.g. kern/84952, kern/82413). */ m->m_pkthdr.csum_flags = 0; for (n = m, m->m_pkthdr.len = 0; n != NULL; n = n->m_next) { m->m_pkthdr.len += n->m_len; n->m_nextpkt = NULL; } /* Put peer's socket address (if any) into a tag */ if (sa != NULL) { struct sa_tag *stag; stag = (struct sa_tag *)m_tag_alloc(NGM_KSOCKET_COOKIE, NG_KSOCKET_TAG_SOCKADDR, sizeof(ng_ID_t) + sa->sa_len, MB_DONTWAIT); if (stag == NULL) { kfree(sa, M_SONAME); goto sendit; } bcopy(sa, &stag->sa, sa->sa_len); kfree(sa, M_SONAME); stag->id = NG_NODE_ID(node); m_tag_prepend(m, &stag->tag); } sendit: /* Forward data with optional peer sockaddr as packet tag */ NG_SEND_DATA_ONLY(error, priv->hook, m); } /* * If the peer has closed the connection, forward a 0-length mbuf * to indicate end-of-file. */ if (so->so_rcv.sb_state & SBS_CANTRCVMORE && !(priv->flags & KSF_EOFSEEN)) { MGETHDR(m, MB_DONTWAIT, MT_DATA); if (m != NULL) { m->m_len = m->m_pkthdr.len = 0; NG_SEND_DATA_ONLY(error, priv->hook, m); } priv->flags |= KSF_EOFSEEN; } splx(s); }
static void nglmi_inquire(sc_p sc, int full) { struct mbuf *m; struct ng_tag_prio *ptag; char *cptr, *start; int error; if (sc->lmi_channel == NULL) return; MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { log(LOG_ERR, "nglmi: unable to start up LMI processing\n"); return; } m->m_pkthdr.rcvif = NULL; /* Attach a tag to packet, marking it of link level state priority, so * that device driver would put it in the beginning of queue */ ptag = (struct ng_tag_prio *)m_tag_alloc(NGM_GENERIC_COOKIE, NG_TAG_PRIO, (sizeof(struct ng_tag_prio) - sizeof(struct m_tag)), M_NOWAIT); if (ptag != NULL) { /* if it failed, well, it was optional anyhow */ ptag->priority = NG_PRIO_LINKSTATE; ptag->discardability = -1; m_tag_prepend(m, &ptag->tag); } m->m_data += 4; /* leave some room for a header */ cptr = start = mtod(m, char *); /* add in the header for an LMI inquiry. */ *cptr++ = 0x03; /* UI frame */ if (GROUP4(sc)) *cptr++ = 0x09; /* proto discriminator */ else *cptr++ = 0x08; /* proto discriminator */ *cptr++ = 0x00; /* call reference */ *cptr++ = 0x75; /* inquiry */ /* If we are Annex-D, add locking shift to codeset 5. */ if (ANNEXD(sc)) *cptr++ = 0x95; /* locking shift */ /* Add a request type */ if (ANNEXA(sc)) *cptr++ = 0x51; /* report type */ else *cptr++ = 0x01; /* report type */ *cptr++ = 0x01; /* size = 1 */ if (full) *cptr++ = 0x00; /* full */ else *cptr++ = 0x01; /* partial */ /* Add a link verification IE */ if (ANNEXA(sc)) *cptr++ = 0x53; /* verification IE */ else *cptr++ = 0x03; /* verification IE */ *cptr++ = 0x02; /* 2 extra bytes */ *cptr++ = sc->local_seq; *cptr++ = sc->remote_seq; sc->seq_retries++; /* Send it */ m->m_len = m->m_pkthdr.len = cptr - start; NG_SEND_DATA_ONLY(error, sc->lmi_channel, m); /* If we've been sending requests for long enough, and there has * been no response, then mark as DOWN, any DLCIs that are UP. */ if (sc->seq_retries == LMI_PATIENCE) { int count; for (count = 0; count < MAXDLCI; count++) if (sc->dlci_state[count] == DLCI_UP) sc->dlci_state[count] = DLCI_DOWN; } }
/* * When incoming data is appended to the socket, we get notified here. * This is also called whenever a significant event occurs for the socket. * Our original caller may have queued this even some time ago and * we cannot trust that he even still exists. The node however is being * held with a reference by the queueing code and guarantied to be valid. */ static void ng_ksocket_incoming2(node_p node, hook_p hook, void *arg1, int arg2) { struct socket *so = arg1; const priv_p priv = NG_NODE_PRIVATE(node); struct ng_mesg *response; struct uio auio; int flags, error; KASSERT(so == priv->so, ("%s: wrong socket", __func__)); /* Allow next incoming event to be queued. */ atomic_store_rel_int(&priv->fn_sent, 0); /* Check whether a pending connect operation has completed */ if (priv->flags & KSF_CONNECTING) { if ((error = so->so_error) != 0) { so->so_error = 0; so->so_state &= ~SS_ISCONNECTING; } if (!(so->so_state & SS_ISCONNECTING)) { NG_MKMESSAGE(response, NGM_KSOCKET_COOKIE, NGM_KSOCKET_CONNECT, sizeof(int32_t), M_NOWAIT); if (response != NULL) { response->header.flags |= NGF_RESP; response->header.token = priv->response_token; *(int32_t *)response->data = error; /* * send an async "response" message * to the node that set us up * (if it still exists) */ NG_SEND_MSG_ID(error, node, response, priv->response_addr, 0); } priv->flags &= ~KSF_CONNECTING; } } /* Check whether a pending accept operation has completed */ if (priv->flags & KSF_ACCEPTING) { error = ng_ksocket_check_accept(priv); if (error != EWOULDBLOCK) priv->flags &= ~KSF_ACCEPTING; if (error == 0) ng_ksocket_finish_accept(priv); } /* * If we don't have a hook, we must handle data events later. When * the hook gets created and is connected, this upcall function * will be called again. */ if (priv->hook == NULL) return; /* Read and forward available mbuf's */ auio.uio_td = NULL; auio.uio_resid = MJUMPAGESIZE; /* XXXGL: sane limit? */ flags = MSG_DONTWAIT; while (1) { struct sockaddr *sa = NULL; struct mbuf *m; /* Try to get next packet from socket */ if ((error = soreceive(so, (so->so_state & SS_ISCONNECTED) ? NULL : &sa, &auio, &m, NULL, &flags)) != 0) break; /* See if we got anything */ if (m == NULL) { if (sa != NULL) free(sa, M_SONAME); break; } KASSERT(m->m_nextpkt == NULL, ("%s: nextpkt", __func__)); /* * Stream sockets do not have packet boundaries, so * we have to allocate a header mbuf and attach the * stream of data to it. */ if (so->so_type == SOCK_STREAM) { struct mbuf *mh; mh = m_gethdr(M_NOWAIT, MT_DATA); if (mh == NULL) { m_freem(m); if (sa != NULL) free(sa, M_SONAME); break; } mh->m_next = m; for (; m; m = m->m_next) mh->m_pkthdr.len += m->m_len; m = mh; } /* Put peer's socket address (if any) into a tag */ if (sa != NULL) { struct sa_tag *stag; stag = (struct sa_tag *)m_tag_alloc(NGM_KSOCKET_COOKIE, NG_KSOCKET_TAG_SOCKADDR, sizeof(ng_ID_t) + sa->sa_len, M_NOWAIT); if (stag == NULL) { free(sa, M_SONAME); goto sendit; } bcopy(sa, &stag->sa, sa->sa_len); free(sa, M_SONAME); stag->id = NG_NODE_ID(node); m_tag_prepend(m, &stag->tag); } sendit: /* Forward data with optional peer sockaddr as packet tag */ NG_SEND_DATA_ONLY(error, priv->hook, m); } /* * If the peer has closed the connection, forward a 0-length mbuf * to indicate end-of-file. */ if (so->so_rcv.sb_state & SBS_CANTRCVMORE && !(priv->flags & KSF_EOFSEEN)) { struct mbuf *m; m = m_gethdr(M_NOWAIT, MT_DATA); if (m != NULL) NG_SEND_DATA_ONLY(error, priv->hook, m); priv->flags |= KSF_EOFSEEN; } }