int sbappendcontrol_locked(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control) { struct mbuf *m, *n, *mlast; int space; SOCKBUF_LOCK_ASSERT(sb); if (control == 0) panic("sbappendcontrol_locked"); space = m_length(control, &n) + m_length(m0, NULL); if (space > sbspace(sb)) return (0); n->m_next = m0; /* concatenate data to control */ SBLASTRECORDCHK(sb); for (m = control; m->m_next; m = m->m_next) sballoc(sb, m); sballoc(sb, m); mlast = m; SBLINKRECORD(sb, control); sb->sb_mbtail = mlast; SBLASTMBUFCHK(sb); SBLASTRECORDCHK(sb); return (1); }
void * nbuf_ensure_writable(nbuf_t *nbuf, size_t len) { struct mbuf *m = nbuf->nb_mbuf; const u_int off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t); const int tlen = off + len; bool head_buf; KASSERT(off < m_length(nbuf->nb_mbuf0)); if (!M_UNWRITABLE(m, tlen)) { return nbuf->nb_nptr; } head_buf = (nbuf->nb_mbuf0 == m); if (m_makewritable(&m, 0, tlen, M_NOWAIT)) { memset(nbuf, 0, sizeof(nbuf_t)); return NULL; } if (head_buf) { KASSERT(m_flags_p(m, M_PKTHDR)); KASSERT(off < m_length(m)); nbuf->nb_mbuf0 = m; } nbuf->nb_mbuf = m; nbuf->nb_nptr = mtod(m, uint8_t *) + off; return nbuf->nb_nptr; }
size_t nbuf_offset(const nbuf_t *nbuf) { const struct mbuf *m = nbuf->nb_mbuf; const u_int off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t); const int poff = m_length(nbuf->nb_mbuf0) - m_length(m) + off; return poff; }
/* * nbuf_ensure_contig: check whether the specified length from the current * point in the nbuf is contiguous. If not, rearrange the chain to be so. * * => Returns pointer to the data at the current offset in the buffer. * => Returns NULL on failure and nbuf becomes invalid. */ void * nbuf_ensure_contig(nbuf_t *nbuf, size_t len) { const struct mbuf * const n = nbuf->nb_mbuf; const size_t off = (uintptr_t)nbuf->nb_nptr - mtod(n, uintptr_t); KASSERT(off <= m_buflen(n)); if (__predict_false(m_buflen(n) < (off + len))) { struct mbuf *m = nbuf->nb_mbuf0; const size_t foff = nbuf_offset(nbuf); const size_t plen = m_length(m); const size_t mlen = m_buflen(m); size_t target; bool success; //npf_stats_inc(npf, NPF_STAT_NBUF_NONCONTIG); /* Attempt to round-up to NBUF_ENSURE_ALIGN bytes. */ if ((target = NBUF_ENSURE_ROUNDUP(foff + len)) > plen) { target = foff + len; } /* Rearrange the chain to be contiguous. */ KASSERT(m_flags_p(m, M_PKTHDR)); success = m_ensure_contig(&m, target); KASSERT(m != NULL); /* If no change in the chain: return what we have. */ if (m == nbuf->nb_mbuf0 && m_buflen(m) == mlen) { return success ? nbuf->nb_nptr : NULL; } /* * The mbuf chain was re-arranged. Update the pointers * accordingly and indicate that the references to the data * might need a reset. */ KASSERT(m_flags_p(m, M_PKTHDR)); nbuf->nb_mbuf0 = m; nbuf->nb_mbuf = m; KASSERT(foff < m_buflen(m) && foff < m_length(m)); nbuf->nb_nptr = mtod(m, uint8_t *) + foff; nbuf->nb_flags |= NBUF_DATAREF_RESET; if (!success) { //npf_stats_inc(npf, NPF_STAT_NBUF_CONTIG_FAIL); return NULL; } } return nbuf->nb_nptr; }
Complex FIRFilter::run(const Complex &input) { // Start at beginning of history Complex *ptrHist, *ptrHist1; ptrHist = ptrHist1 = &_z[0]; // point to last coefficient size_t m_length(_filtCoeff.size()); const Real *ptrCoef = &_filtCoeff[m_length-1]; // Form output accumulation Complex output = *ptrHist++ * *ptrCoef--; for(size_t ii = 2; ii < m_length; ++ii) { // Update history array *ptrHist1++ = *ptrHist; output += *ptrHist++ * *ptrCoef--; } // Input tap output += input * *ptrCoef; // Last history *ptrHist1 = input; return output; }
static void ntb_start(struct ifnet *ifp) { struct mbuf *m_head; struct ntb_netdev *nt = ifp->if_softc; int rc; mtx_lock(&nt->tx_lock); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; CTR0(KTR_NTB, "TX: ntb_start"); while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); CTR1(KTR_NTB, "TX: start mbuf %p", m_head); rc = ntb_transport_tx_enqueue(nt->qp, m_head, m_head, m_length(m_head, NULL)); if (rc != 0) { CTR1(KTR_NTB, "TX: could not tx mbuf %p. Returning to snd q", m_head); if (rc == EAGAIN) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); callout_reset(&nt->qp->queue_full, hz / 1000, ntb_qp_full, ifp); } break; } } mtx_unlock(&nt->tx_lock); }
struct mbuf * m_pullup(struct mbuf *bp) { /* Put it all in one contigous (aligned) mbuf */ if (bp != NULL) { if (bp->m_next != NULL) { struct mbuf *nbp; u_char *cp; nbp = m_get(m_length(bp), bp->m_type); for (cp = MBUF_CTOP(nbp); bp; bp = m_free(bp)) { memcpy(cp, MBUF_CTOP(bp), bp->m_len); cp += bp->m_len; } bp = nbp; } #ifndef __i386__ /* Do any other archs not care about alignment ? */ else if ((bp->m_offset & (sizeof(long) - 1)) != 0) { bcopy(MBUF_CTOP(bp), bp + 1, bp->m_len); bp->m_offset = 0; } #endif } return bp; }
void link_PushPacket(struct link *l, struct mbuf *bp, struct bundle *b, int pri, u_short proto) { int layer; /* * When we ``push'' a packet into the link, it gets processed by the * ``push'' function in each layer starting at the top. * We never expect the result of a ``push'' to be more than one * packet (as we do with ``pull''s). */ if(pri < 0 || (unsigned)pri >= LINK_QUEUES(l)) pri = 0; for (layer = l->nlayers; layer && bp; layer--) if (l->layer[layer - 1]->push != NULL) bp = (*l->layer[layer - 1]->push)(b, l, bp, pri, &proto); if (bp) { link_AddOutOctets(l, m_length(bp)); log_Printf(LogDEBUG, "link_PushPacket: Transmit proto 0x%04x\n", proto); m_enqueue(l->Queue + pri, m_pullup(bp)); } }
struct mbuf * m_adj(struct mbuf *bp, ssize_t n) { if (n > 0) { while (bp) { if ((size_t)n < bp->m_len) { bp->m_len = n; bp->m_offset += n; return bp; } n -= bp->m_len; bp = m_free(bp); } } else { if ((n = m_length(bp) + n) <= 0) { m_freem(bp); return NULL; } for (; bp; bp = bp->m_next, n -= bp->m_len) if ((size_t)n < bp->m_len) { bp->m_len = n; m_freem(bp->m_next); bp->m_next = NULL; break; } } return bp; }
OM_uint32 gss_get_mic(OM_uint32 *minor_status, const gss_ctx_id_t ctx, gss_qop_t qop_req, const gss_buffer_t message_buffer, gss_buffer_t message_token) { OM_uint32 maj_stat; struct mbuf *m, *mic; if (!ctx) { *minor_status = 0; return (GSS_S_NO_CONTEXT); } MGET(m, M_WAITOK, MT_DATA); if (message_buffer->length > MLEN) MCLGET(m, M_WAITOK); m_append(m, message_buffer->length, message_buffer->value); maj_stat = KGSS_GET_MIC(ctx, minor_status, qop_req, m, &mic); m_freem(m); if (maj_stat == GSS_S_COMPLETE) { message_token->length = m_length(mic, NULL); message_token->value = malloc(message_token->length, M_GSSAPI, M_WAITOK); m_copydata(mic, 0, message_token->length, message_token->value); m_freem(mic); } return (maj_stat); }
static struct mbuf * tcpmss_Check(struct bundle *bundle, struct mbuf *bp) { struct ip *pip; size_t hlen, plen; if (!Enabled(bundle, OPT_TCPMSSFIXUP)) return bp; bp = m_pullup(bp); plen = m_length(bp); pip = (struct ip *)MBUF_CTOP(bp); hlen = pip->ip_hl << 2; /* * Check for MSS option only for TCP packets with zero fragment offsets * and correct total and header lengths. */ if (pip->ip_p == IPPROTO_TCP && (ntohs(pip->ip_off) & IP_OFFMASK) == 0 && ntohs(pip->ip_len) == plen && hlen <= plen && plen >= sizeof(struct tcphdr) + hlen) MSSFixup((struct tcphdr *)(MBUF_CTOP(bp) + hlen), plen - hlen, MAXMSS(bundle->iface->mtu)); return bp; }
void link_PendingLowPriorityData(struct link *l, size_t *pkts, size_t *octets) { struct mqueue *queue, *highest; struct mbuf *m; size_t len; /* * This is all rfc1989 stuff... because our LQR packet is going to bypass * everything that's not in the highest priority queue, we must be able to * subtract that data from our outgoing packet/octet counts. However, * we've already async-encoded our data at this point, but the async * encodings MUSTn't be a part of the LQR-reported payload :( So, we have * the async layer record how much it's padded the packet in the mbuf's * priv field, and when we calculate our outgoing LQR values we subtract * this value for each packet from the octet count sent. */ highest = LINK_HIGHQ(l); *pkts = *octets = 0; for (queue = l->Queue; queue < highest; queue++) { len = queue->len; *pkts += len; for (m = queue->top; len--; m = m->m_nextpkt) *octets += m_length(m) - m->priv; } }
/* * Convert from mbufs to vbox scatter-gather data structure */ static void vboxNetFltFreeBSDMBufToSG(PVBOXNETFLTINS pThis, struct mbuf *m, PINTNETSG pSG, unsigned int cSegs, unsigned int segOffset) { static uint8_t const s_abZero[128] = {0}; unsigned int i; struct mbuf *m0; IntNetSgInitTempSegs(pSG, m_length(m, NULL), cSegs, 0 /*cSegsUsed*/); for (m0 = m, i = segOffset; m0; m0 = m0->m_next) { if (m0->m_len == 0) continue; pSG->aSegs[i].cb = m0->m_len; pSG->aSegs[i].pv = mtod(m0, uint8_t *); pSG->aSegs[i].Phys = NIL_RTHCPHYS; i++; } #ifdef PADD_RUNT_FRAMES_FROM_HOST if (pSG->cbTotal < 60) { pSG->aSegs[i].Phys = NIL_RTHCPHYS; pSG->aSegs[i].pv = (void *)&s_abZero[0]; pSG->aSegs[i].cb = 60 - pSG->cbTotal; pSG->cbTotal = 60; i++; } #endif pSG->cSegsUsed = i; }
u_int m_fixhdr(struct mbuf *m0) { u_int len; len = m_length(m0, NULL); m0->m_pkthdr.len = len; return (len); }
/* * Try and write() to the socket, whatever doesn't get written * append to the buffer... for a host with a fast net connection, * this prevents an unnecessary copy of the data * (the socket is non-blocking, so we won't hang) */ void sbappend(PNATState pData, struct socket *so, struct mbuf *m) { int ret = 0; int mlen = 0; STAM_PROFILE_START(&pData->StatIOSBAppend_pf, a); LogFlow(("sbappend: so = %lx, m = %lx, m->m_len = %d\n", (long)so, (long)m, m ? m->m_len : 0)); STAM_COUNTER_INC(&pData->StatIOSBAppend); /* Shouldn't happen, but... e.g. foreign host closes connection */ mlen = m_length(m, NULL); if (mlen <= 0) { STAM_COUNTER_INC(&pData->StatIOSBAppend_zm); goto done; } /* * If there is urgent data, call sosendoob * if not all was sent, sowrite will take care of the rest * (The rest of this function is just an optimisation) */ if (so->so_urgc) { sbappendsb(pData, &so->so_rcv, m); m_freem(pData, m); sosendoob(so); return; } /* * We only write if there's nothing in the buffer, * otherwise it'll arrive out of order, and hence corrupt */ if (so->so_rcv.sb_cc == 0) { caddr_t buf = NULL; if (m->m_next) { buf = RTMemAlloc(mlen); if (buf == NULL) { ret = 0; goto no_sent; } m_copydata(m, 0, mlen, buf); } else buf = mtod(m, char *); ret = send(so->s, buf, mlen, 0); if (m->m_next) RTMemFree(buf); }
/** * Output a UDP packet. * * @note This function will finally free m! */ int udp_output2(PNATState pData, struct socket *so, struct mbuf *m, struct sockaddr_in *saddr, struct sockaddr_in *daddr, int iptos) { register struct udpiphdr *ui; int error; int mlen = 0; LogFlowFunc(("ENTER: so = %R[natsock], m = %p, saddr = %RTnaipv4, daddr = %RTnaipv4\n", so, m, saddr->sin_addr.s_addr, daddr->sin_addr.s_addr)); /* in case of built-in service so might be NULL */ if (so) Assert(so->so_type == IPPROTO_UDP); /* * Adjust for header */ m->m_data -= sizeof(struct udpiphdr); m->m_len += sizeof(struct udpiphdr); mlen = m_length(m, NULL); /* * Fill in mbuf with extended UDP header * and addresses and length put into network format. */ ui = mtod(m, struct udpiphdr *); memset(ui->ui_x1, 0, 9); ui->ui_pr = IPPROTO_UDP; ui->ui_len = RT_H2N_U16((uint16_t)(mlen - sizeof(struct ip))); /* XXXXX Check for from-one-location sockets, or from-any-location sockets */ ui->ui_src = saddr->sin_addr; ui->ui_dst = daddr->sin_addr; ui->ui_sport = saddr->sin_port; ui->ui_dport = daddr->sin_port; ui->ui_ulen = ui->ui_len; /* * Stuff checksum and output datagram. */ ui->ui_sum = 0; if (udpcksum) { if ((ui->ui_sum = cksum(m, /* sizeof (struct udpiphdr) + */ mlen)) == 0) ui->ui_sum = 0xffff; } ((struct ip *)ui)->ip_len = mlen; ((struct ip *)ui)->ip_ttl = ip_defttl; ((struct ip *)ui)->ip_tos = iptos; udpstat.udps_opackets++; error = ip_output(pData, so, m); return error; }
struct mbuf * lqr_RecvEcho(struct fsm *fp, struct mbuf *bp) { struct hdlc *hdlc = &link2physical(fp->link)->hdlc; struct lcp *lcp = fsm2lcp(fp); struct echolqr lqr; if (m_length(bp) >= sizeof lqr) { m_freem(mbuf_Read(bp, &lqr, sizeof lqr)); bp = NULL; lqr.magic = ntohl(lqr.magic); lqr.signature = ntohl(lqr.signature); lqr.sequence = ntohl(lqr.sequence); /* Tolerate echo replies with either magic number */ if (lqr.magic != 0 && lqr.magic != lcp->his_magic && lqr.magic != lcp->want_magic) { log_Printf(LogWARN, "%s: lqr_RecvEcho: Bad magic: expected 0x%08x," " got 0x%08x\n", fp->link->name, lcp->his_magic, lqr.magic); /* * XXX: We should send a terminate request. But poor implementations may * die as a result. */ } if (lqr.signature == SIGNATURE || lqr.signature == lcp->want_magic) { /* some implementations return the wrong magic */ /* careful not to update lqm.echo.seq_recv with older values */ if ((hdlc->lqm.echo.seq_recv > (u_int32_t)0 - 5 && lqr.sequence < 5) || (hdlc->lqm.echo.seq_recv <= (u_int32_t)0 - 5 && lqr.sequence > hdlc->lqm.echo.seq_recv)) hdlc->lqm.echo.seq_recv = lqr.sequence; } else log_Printf(LogWARN, "lqr_RecvEcho: Got sig 0x%08lx, not 0x%08lx !\n", (u_long)lqr.signature, (u_long)SIGNATURE); } else log_Printf(LogWARN, "lqr_RecvEcho: Got packet size %zd, expecting %ld !\n", m_length(bp), (long)sizeof(struct echolqr)); return bp; }
static int smb_t2_placedata(struct mbuf *mtop, u_int16_t offset, u_int16_t count, struct mdchain *mdp) { struct mbuf *m, *m0; int len; m0 = m_split(mtop, offset, M_WAIT); len = m_length(m0, &m); m->m_len -= len - count; if (mdp->md_top == NULL) { md_initm(mdp, m0); } else m_cat(mdp->md_top, m0); return 0; }
/* * Copy the data from m into sb * The caller is responsible to make sure there's enough room */ void sbappendsb(PNATState pData, struct sbuf *sb, struct mbuf *m) { int len, n, nn; #ifndef VBOX_WITH_STATISTICS NOREF(pData); #endif len = m_length(m, NULL); STAM_COUNTER_INC(&pData->StatIOSBAppendSB); if (sb->sb_wptr < sb->sb_rptr) { STAM_COUNTER_INC(&pData->StatIOSBAppendSB_w_l_r); n = sb->sb_rptr - sb->sb_wptr; if (n > len) n = len; m_copydata(m, 0, n, sb->sb_wptr); } else { STAM_COUNTER_INC(&pData->StatIOSBAppendSB_w_ge_r); /* Do the right edge first */ n = sb->sb_data + sb->sb_datalen - sb->sb_wptr; if (n > len) n = len; m_copydata(m, 0, n, sb->sb_wptr); len -= n; if (len) { /* Now the left edge */ nn = sb->sb_rptr - sb->sb_data; if (nn > len) nn = len; m_copydata(m, n, nn, sb->sb_data); n += nn; } } sb->sb_cc += n; sb->sb_wptr += n; if (sb->sb_wptr >= sb->sb_data + sb->sb_datalen) { STAM_COUNTER_INC(&pData->StatIOSBAppendSB_w_alter); sb->sb_wptr -= sb->sb_datalen; } }
void mbuf_Write(struct mbuf *bp, const void *ptr, size_t m_len) { size_t plen; int nb; plen = m_length(bp); if (plen < m_len) m_len = plen; while (m_len > 0) { nb = (m_len < bp->m_len) ? m_len : bp->m_len; memcpy(MBUF_CTOP(bp), ptr, nb); m_len -= bp->m_len; bp = bp->m_next; } }
void FIRFilter::run(void) { // Set up coefficients const Real *startCoef = &_filtCoeff[0]; size_t m_length(_filtCoeff.size()); size_t lenCoef2 = (m_length + 1) / 2; // Set up input data pointers const Complex *endIn = &vIn[vIn.size() - 1]; const Complex *ptrIn = &vIn[lenCoef2 - 1]; // Initial value of accumulation length for startup size_t lenAcc = lenCoef2; for(size_t ii = 0; ii < vIn.size(); ++ii) { // Set up pointers for accumulation const Complex *ptrData = ptrIn; const Real *ptrCoef = startCoef; // Do accumulation and write result Complex acc = *ptrCoef++ * *ptrData--; for( size_t jj = 1; jj < lenAcc; jj++ ) acc += *ptrCoef++ * *ptrData--; vOut[ii] = acc; // Check for end case if( ptrIn == endIn ) { // One shorter each time lenAcc--; // Next coefficient each time startCoef++; } else { // Check for startup if( lenAcc < m_length ) { // Add to input pointer lenAcc++; } ptrIn++; } } }
/* * Append address and data, and optionally, control (ancillary) data to the * receive queue of a socket. If present, m0 must include a packet header * with total length. Returns 0 if no space in sockbuf or insufficient * mbufs. */ int sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa, struct mbuf *m0, struct mbuf *control) { struct mbuf *m, *n, *nlast; int space = asa->sa_len; SOCKBUF_LOCK_ASSERT(sb); if (m0 && (m0->m_flags & M_PKTHDR) == 0) panic("sbappendaddr_locked"); if (m0) space += m0->m_pkthdr.len; space += m_length(control, &n); if (space > sbspace(sb)) return (0); #if MSIZE <= 256 if (asa->sa_len > MLEN) return (0); #endif MGET(m, M_DONTWAIT, MT_SONAME); if (m == 0) return (0); m->m_len = asa->sa_len; bcopy(asa, mtod(m, caddr_t), asa->sa_len); if (n){ CHECK_ADD_LINKCNT(n, m0, NULL, "sbappendaddr_locked"); n->m_next = m0; /* concatenate data to control */ }else control = m0; CHECK_ADD_LINKCNT(m, control, NULL, "sbappendaddr_locked"); m->m_next = control; for (n = m; n->m_next != NULL; n = n->m_next) sballoc(sb, n); sballoc(sb, n); nlast = n; SBLINKRECORD(sb, m); sb->sb_mbtail = nlast; SBLASTMBUFCHK(sb); SBLASTRECORDCHK(sb); return (1); }
static struct mbuf * sync_LayerPull(struct bundle *b, struct link *l, struct mbuf *bp, u_short *proto) { struct physical *p = link2physical(l); if (!p) log_Printf(LogERROR, "Can't Pull a sync packet from a logical link\n"); else { log_DumpBp(LogSYNC, "Read", bp); /* Either done here or by the HDLC layer */ p->hdlc.lqm.SaveInOctets += m_length(bp) + 1; p->hdlc.lqm.SaveInPackets++; m_settype(bp, MB_SYNCIN); } return bp; }
/* * Append address and data, and optionally, control (ancillary) data to the * receive queue of a socket. If present, m0 must include a packet header * with total length. Returns 0 if no space in sockbuf or insufficient * mbufs. */ int sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa, struct mbuf *m0, struct mbuf *control) { struct mbuf *ctrl_last; int space = asa->sa_len; SOCKBUF_LOCK_ASSERT(sb); if (m0 && (m0->m_flags & M_PKTHDR) == 0) panic("sbappendaddr_locked"); if (m0) space += m0->m_pkthdr.len; space += m_length(control, &ctrl_last); if (space > sbspace(sb)) return (0); return (sbappendaddr_locked_internal(sb, asa, m0, control, ctrl_last)); }
/* * newnfs_realign: * * Check for badly aligned mbuf data and realign by copying the unaligned * portion of the data into a new mbuf chain and freeing the portions * of the old chain that were replaced. * * We cannot simply realign the data within the existing mbuf chain * because the underlying buffers may contain other rpc commands and * we cannot afford to overwrite them. * * We would prefer to avoid this situation entirely. The situation does * not occur with NFS/UDP and is supposed to only occassionally occur * with TCP. Use vfs.nfs.realign_count and realign_test to check this. * */ int newnfs_realign(struct mbuf **pm, int how) { struct mbuf *m, *n; int off, space; ++nfs_realign_test; while ((m = *pm) != NULL) { if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) { /* * NB: we can't depend on m_pkthdr.len to help us * decide what to do here. May not be worth doing * the m_length calculation as m_copyback will * expand the mbuf chain below as needed. */ space = m_length(m, NULL); if (space >= MINCLSIZE) { /* NB: m_copyback handles space > MCLBYTES */ n = m_getcl(how, MT_DATA, 0); } else n = m_get(how, MT_DATA); if (n == NULL) return (ENOMEM); /* * Align the remainder of the mbuf chain. */ n->m_len = 0; off = 0; while (m != NULL) { m_copyback(n, off, m->m_len, mtod(m, caddr_t)); off += m->m_len; m = m->m_next; } m_freem(*pm); *pm = n; ++nfs_realign_count; break; } pm = &m->m_next; } return (0); }
size_t link_QueueBytes(struct link *l) { unsigned i; size_t len, bytes; struct mbuf *m; bytes = 0; for (i = 0, len = 0; i < LINK_QUEUES(l); i++) { len = l->Queue[i].len; m = l->Queue[i].top; while (len--) { bytes += m_length(m); m = m->m_nextpkt; } } return bytes; }
static void if_pcap_send(void *arg) { struct mbuf *m; struct if_pcap_softc *sc = (struct if_pcap_softc *)arg; struct ifnet *ifp = sc->ifp; uint8_t copybuf[2048]; uint8_t *pkt; unsigned int pktlen; if (sc->uif->cpu >= 0) sched_bind(sc->tx_thread, sc->uif->cpu); while (1) { mtx_lock(&sc->tx_lock); while (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; mtx_sleep(&ifp->if_drv_flags, &sc->tx_lock, 0, "wtxlk", 0); } mtx_unlock(&sc->tx_lock); while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); pktlen = m_length(m, NULL); ifp->if_opackets++; if (!sc->isfile && (pktlen <= sizeof(copybuf))) { if (NULL == m->m_next) { /* all in one piece - avoid copy */ pkt = mtod(m, uint8_t *); ifp->if_ozcopies++; } else { pkt = copybuf; m_copydata(m, 0, pktlen, pkt); ifp->if_ocopies++; } if (0 != if_pcap_sendpacket(sc->pcap_host_ctx, pkt, pktlen)) ifp->if_oerrors++; } else { if (sc->isfile)
int ipv6cp_PushPacket(struct ipv6cp *ipv6cp, struct link *l) { struct bundle *bundle = ipv6cp->fsm.bundle; struct mqueue *queue; struct mbuf *bp; int m_len; u_int32_t secs = 0; unsigned alivesecs = 0; if (ipv6cp->fsm.state != ST_OPENED) return 0; /* * If ccp is not open but is required, do nothing. */ if (l->ccp.fsm.state != ST_OPENED && ccp_Required(&l->ccp)) { log_Printf(LogPHASE, "%s: Not transmitting... waiting for CCP\n", l->name); return 0; } queue = ipv6cp->Queue + IPV6CP_QUEUES(ipv6cp) - 1; do { if (queue->top) { bp = m_dequeue(queue); bp = mbuf_Read(bp, &secs, sizeof secs); bp = m_pullup(bp); m_len = m_length(bp); if (!FilterCheck(MBUF_CTOP(bp), AF_INET6, &bundle->filter.alive, &alivesecs)) { if (secs == 0) secs = alivesecs; bundle_StartIdleTimer(bundle, secs); } link_PushPacket(l, bp, bundle, 0, PROTO_IPV6); ipv6cp_AddOutOctets(ipv6cp, m_len); return 1; } } while (queue-- != ipv6cp->Queue); return 0; }
void xdrmbuf_append(XDR *xdrs, struct mbuf *madd) { struct mbuf *m; KASSERT(xdrs->x_ops == &xdrmbuf_ops && xdrs->x_op == XDR_ENCODE, ("xdrmbuf_append: invalid XDR stream")); if (m_length(madd, NULL) == 0) { m_freem(madd); return; } m = (struct mbuf *) xdrs->x_private; m->m_next = madd; m = m_last(madd); xdrs->x_private = m; xdrs->x_handy = m->m_len; }
OM_uint32 gss_wrap(OM_uint32 *minor_status, const gss_ctx_id_t ctx, int conf_req_flag, gss_qop_t qop_req, const gss_buffer_t input_message_buffer, int *conf_state, gss_buffer_t output_message_buffer) { OM_uint32 maj_stat; struct mbuf *m; if (!ctx) { *minor_status = 0; return (GSS_S_NO_CONTEXT); } MGET(m, M_WAITOK, MT_DATA); if (input_message_buffer->length > MLEN) MCLGET(m, M_WAITOK); m_append(m, input_message_buffer->length, input_message_buffer->value); maj_stat = KGSS_WRAP(ctx, minor_status, conf_req_flag, qop_req, &m, conf_state); /* * On success, m is the wrapped message, on failure, m is * freed. */ if (maj_stat == GSS_S_COMPLETE) { output_message_buffer->length = m_length(m, NULL); output_message_buffer->value = malloc(output_message_buffer->length, M_GSSAPI, M_WAITOK); m_copydata(m, 0, output_message_buffer->length, output_message_buffer->value); m_freem(m); } return (maj_stat); }