size_t MbufUtils::mbufTotalMaxLength(mbuf_t mbuf) { size_t len = 0; do { len += mbuf_maxlen(mbuf); } while ((mbuf = mbuf_next(mbuf))); return len; }
/** * Calculates the number of segments required to represent the mbuf. * * @returns Number of segments. * @param pThis The instance. * @param pMBuf The mbuf. * @param pvFrame The frame pointer, optional. */ DECLINLINE(unsigned) vboxNetFltDarwinMBufCalcSGSegs(PVBOXNETFLTINS pThis, mbuf_t pMBuf, void *pvFrame) { NOREF(pThis); /* * Count the buffers in the chain. */ unsigned cSegs = 0; for (mbuf_t pCur = pMBuf; pCur; pCur = mbuf_next(pCur)) if (mbuf_len(pCur)) cSegs++; else if ( !cSegs && pvFrame && (uintptr_t)pvFrame - (uintptr_t)mbuf_datastart(pMBuf) < mbuf_maxlen(pMBuf)) cSegs++; #ifdef PADD_RUNT_FRAMES_FROM_HOST /* * Add one buffer if the total is less than the ethernet minimum 60 bytes. * This may allocate a segment too much if the ethernet header is separated, * but that shouldn't harm us much. */ if (mbuf_pkthdr_len(pMBuf) < 60) cSegs++; #endif #ifdef VBOXNETFLT_DARWIN_TEST_SEG_SIZE /* maximize the number of segments. */ cSegs = RT_MAX(VBOXNETFLT_DARWIN_MAX_SEGS - 1, cSegs); #endif return cSegs ? cSegs : 1; }
size_t MbufUtils::attemptToSetLength(mbuf_t mbuf, size_t targetLength) { size_t mbufLength = mbuf_len(mbuf); size_t mbufMaxLength = mbuf_maxlen(mbuf); if (targetLength > mbufLength && mbufMaxLength != mbufLength) { size_t newBufLen = min_macro(targetLength, mbufMaxLength); mbuf_setlen(mbuf, newBufLen); mbufLength = newBufLen; } return mbufLength; }
__private_extern__ size_t mbuf_pkthdr_maxlen(mbuf_t m) { size_t maxlen = 0; mbuf_t n = m; while (n) { maxlen += mbuf_maxlen(n); n = mbuf_next(n); } return (maxlen); }
errno_t mbuf_setdata(mbuf_t mbuf, void *data, size_t len) { size_t start = (size_t)((char *)mbuf_datastart(mbuf)); size_t maxlen = mbuf_maxlen(mbuf); if ((size_t)data < start || ((size_t)data) + len > start + maxlen) return (EINVAL); mbuf->m_data = data; mbuf->m_len = len; return (0); }
errno_t mbuf_adjustlen(mbuf_t m, int amount) { /* Verify m_len will be valid after adding amount */ if (amount > 0) { int used = (size_t)mbuf_data(m) - (size_t)mbuf_datastart(m) + m->m_len; if ((size_t)(amount + used) > mbuf_maxlen(m)) return (EINVAL); } else if (-amount > m->m_len) { return (EINVAL); } m->m_len += amount; return (0); }
static int smb_ioc_request( void * hContext, struct smb_header * header, const mbuf_t words, const mbuf_t bytes, mbuf_t response) { struct smbioc_rq krq; bzero(&krq, sizeof(krq)); krq.ioc_version = SMB_IOC_STRUCT_VERSION; krq.ioc_cmd = header->command; /* XXX For large I/O requests where the uint16_t byte count * (ioc_tbc) wraps to 0, this interface will get horribly * confused. I don't think we can fix this without revving the * ioctl version -- jpeach */ /* Set transmit words buffer ... */ krq.ioc_twc = mbuf_len(words) / sizeof(uint16_t); krq.ioc_twords = mbuf_data(words); /* Set transmit bytes buffer ... */ krq.ioc_tbc = mbuf_len(bytes); krq.ioc_tbytes = mbuf_data(bytes); /* Set receive buffer, reserving space for the word count and byte count ... */ krq.ioc_rpbufsz = (int32_t)mbuf_maxlen(response); krq.ioc_rpbuf = mbuf_data(response); if (smb_ioctl_call(((struct smb_ctx *)hContext)->ct_fd, SMBIOC_REQUEST, &krq) == -1) { return errno; } header->flags = krq.ioc_flags; header->flags2 = krq.ioc_flags2; header->status = krq.ioc_ntstatus; mbuf_setlen(response, krq.ioc_rpbufsz); return 0; }
static bool mbuf_buffer(IOMemoryDescriptor *buffer, int skip_buffer, mbuf_t m, int skip_mbuf, int copy) { int offset = 0; bool isWrite = (buffer->getDirection() == kIODirectionOut); if (buffer->prepare() != kIOReturnSuccess) { KINFO("buffer prepare failed"); return false; } if (isWrite && mbuf_pkthdr_len(m) < skip_mbuf + copy) mbuf_pkthdr_setlen(m, skip_mbuf + copy); for (; m; m = mbuf_next(m)) { if (isWrite && mbuf_len(m) < skip_mbuf + copy && mbuf_trailingspace(m)) mbuf_setlen(m, min(mbuf_maxlen(m), skip_mbuf + copy)); UInt32 available = mbuf_len(m); //KDEBUG("available=%d, skip_mbuf=%d", available, skip_mbuf); if (skip_mbuf >= available) { skip_mbuf -= available; continue; } UInt8 *buf = (UInt8 *)mbuf_data(m) + skip_mbuf; IOByteCount len = copy; // remaining requested len = min(len, available - skip_mbuf); // available in mbuf len = min(len, buffer->getLength() - offset); // available in iomd IOByteCount wrote = 0; if (!len) { KDEBUG("no space, %d-%d, %d-%d", available, skip_mbuf, buffer->getLength(), offset); break; } //KDEBUG("COPY: skip_buffer=%d, offset=%d, len=%d (remaining=%d)", skip_buffer, offset, len, copy); if (isWrite) wrote = buffer->readBytes(skip_buffer + offset, buf, len); else wrote = buffer->writeBytes(skip_buffer + offset, buf, len); if (wrote != len) { KINFO("short IO"); break; } offset += len; copy -= len; skip_mbuf = 0; } if (buffer->complete() != kIOReturnSuccess) { KINFO("buffer complete failed"); return false; } if (copy > 0) { KINFO("failed to copy requested data: %d remaining", copy); return false; } return true; }
errno_t kn_tcp_pkt_from_params(mbuf_t *data, u_int8_t tcph_flags, u_int32_t iph_saddr, u_int32_t iph_daddr, u_int16_t tcph_sport, u_int16_t tcph_dport, u_int32_t tcph_seq, u_int32_t tcph_ack, const char* payload, size_t payload_len) { int retval = 0; size_t tot_data_len, tot_buf_len, max_len; // mac osx thing.. to be safe, leave out 14 bytes for ethernet header. void *buf = NULL; struct ip* o_iph; struct tcphdr* o_tcph; u_int16_t csum; mbuf_csum_request_flags_t csum_flags = 0; boolean_t pkt_allocated = FALSE; tot_data_len = sizeof(struct ip) + sizeof(struct tcphdr) + payload_len; tot_buf_len = tot_data_len + ETHHDR_LEN; // allocate the packet retval = mbuf_allocpacket(MBUF_DONTWAIT, tot_buf_len, NULL, data); if (retval != 0) { kn_debug("mbuf_allocpacket returned error %d\n", retval); goto FAILURE; } else { pkt_allocated = TRUE; } max_len = mbuf_maxlen(*data); if (max_len < tot_buf_len) { kn_debug("no enough buffer space, try to request more.\n"); retval = mbuf_prepend(data, tot_buf_len - max_len, MBUF_DONTWAIT); if (retval != 0) { kn_debug("mbuf_prepend returned error %d\n", retval); goto FAILURE; } } mbuf_pkthdr_setlen(*data, tot_data_len); retval = mbuf_pkthdr_setrcvif(*data, NULL); if (retval != 0) { kn_debug("mbuf_pkthdr_setrcvif returned error %d\n", retval); goto FAILURE; } mbuf_setlen(*data, tot_data_len); retval = mbuf_setdata(*data, (mbuf_datastart(*data) + ETHHDR_LEN), tot_data_len); if (retval != 0) { kn_debug("mbuf_setdata returned error %d\n", retval); goto FAILURE; } buf = mbuf_data(*data); mbuf_pkthdr_setheader(*data, buf); o_iph = (struct ip*)buf; memset(o_iph, 0, sizeof(struct ip)); // setup IPv4 header o_iph->ip_hl = sizeof(struct ip) / 4; o_iph->ip_v = 4; o_iph->ip_tos = 0; o_iph->ip_id = 0; o_iph->ip_off = htons(IP_DF); o_iph->ip_p = IPPROTO_TCP; o_iph->ip_len = htons(tot_data_len); o_iph->ip_sum = 0; o_iph->ip_ttl = 64; o_iph->ip_src.s_addr = iph_saddr; o_iph->ip_dst.s_addr = iph_daddr; o_tcph = (struct tcphdr*)((char*)o_iph + sizeof(struct ip)); memset(o_tcph, 0, sizeof(struct tcphdr)); o_tcph->th_sport = tcph_sport; o_tcph->th_dport = tcph_dport; o_tcph->th_seq = tcph_seq; o_tcph->th_ack = tcph_ack; o_tcph->th_flags = tcph_flags; o_tcph->th_win = 0xffffU; o_tcph->th_off = sizeof(struct tcphdr) / 4; o_tcph->th_sum = 0; o_tcph->th_urp = 0; if (payload_len > 0) { memcpy((char*)o_tcph + sizeof(struct tcphdr), payload, payload_len); } mbuf_clear_csum_performed(*data); csum_flags |= MBUF_CSUM_REQ_IP; retval = mbuf_get_csum_requested(*data, &csum_flags, NULL); if (retval != 0) { kn_debug("mbuf_get_csum_requested returned error %d\n", retval); goto FAILURE; } /* calculate TCP checksum */ csum = kn_tcp_sum_calc(sizeof(struct tcphdr) + payload_len, (u_int16_t*)&o_iph->ip_src.s_addr, (u_int16_t*)&o_iph->ip_dst.s_addr, (u_int16_t*)o_tcph); o_tcph->th_sum = csum; return 0; FAILURE: if (pkt_allocated == TRUE) { mbuf_free(*data); } return retval; }
/** * Initializes a SG list from an mbuf. * * @returns Number of segments. * @param pThis The instance. * @param pMBuf The mbuf. * @param pSG The SG. * @param pvFrame The frame pointer, optional. * @param cSegs The number of segments allocated for the SG. * This should match the number in the mbuf exactly! * @param fSrc The source of the frame. */ DECLINLINE(void) vboxNetFltDarwinMBufToSG(PVBOXNETFLTINS pThis, mbuf_t pMBuf, void *pvFrame, PINTNETSG pSG, unsigned cSegs, uint32_t fSrc) { NOREF(pThis); /* * Walk the chain and convert the buffers to segments. Works INTNETSG::cbTotal. */ unsigned iSeg = 0; IntNetSgInitTempSegs(pSG, 0 /*cbTotal*/, cSegs, 0 /*cSegsUsed*/); for (mbuf_t pCur = pMBuf; pCur; pCur = mbuf_next(pCur)) { size_t cbSeg = mbuf_len(pCur); if (cbSeg) { void *pvSeg = mbuf_data(pCur); /* deal with pvFrame */ if (!iSeg && pvFrame && pvFrame != pvSeg) { void *pvStart = mbuf_datastart(pMBuf); uintptr_t offSeg = (uintptr_t)pvSeg - (uintptr_t)pvStart; uintptr_t offSegEnd = offSeg + cbSeg; Assert(pvStart && pvSeg && offSeg < mbuf_maxlen(pMBuf) && offSegEnd <= mbuf_maxlen(pMBuf)); NOREF(offSegEnd); uintptr_t offFrame = (uintptr_t)pvFrame - (uintptr_t)pvStart; if (RT_LIKELY(offFrame < offSeg)) { pvSeg = pvFrame; cbSeg += offSeg - offFrame; } else AssertMsgFailed(("pvFrame=%p pvStart=%p pvSeg=%p offSeg=%p cbSeg=%#zx offSegEnd=%p offFrame=%p maxlen=%#zx\n", pvFrame, pvStart, pvSeg, offSeg, cbSeg, offSegEnd, offFrame, mbuf_maxlen(pMBuf))); pvFrame = NULL; } AssertBreak(iSeg < cSegs); pSG->cbTotal += cbSeg; pSG->aSegs[iSeg].cb = cbSeg; pSG->aSegs[iSeg].pv = pvSeg; pSG->aSegs[iSeg].Phys = NIL_RTHCPHYS; iSeg++; } /* The pvFrame might be in a now empty buffer. */ else if ( !iSeg && pvFrame && (uintptr_t)pvFrame - (uintptr_t)mbuf_datastart(pMBuf) < mbuf_maxlen(pMBuf)) { cbSeg = (uintptr_t)mbuf_datastart(pMBuf) + mbuf_maxlen(pMBuf) - (uintptr_t)pvFrame; pSG->cbTotal += cbSeg; pSG->aSegs[iSeg].cb = cbSeg; pSG->aSegs[iSeg].pv = pvFrame; pSG->aSegs[iSeg].Phys = NIL_RTHCPHYS; iSeg++; pvFrame = NULL; } } Assert(iSeg && iSeg <= cSegs); pSG->cSegsUsed = iSeg; #ifdef PADD_RUNT_FRAMES_FROM_HOST /* * Add a trailer if the frame is too small. * * Since we're getting to the packet before it is framed, it has not * yet been padded. The current solution is to add a segment pointing * to a buffer containing all zeros and pray that works for all frames... */ if (pSG->cbTotal < 60 && (fSrc & INTNETTRUNKDIR_HOST)) { AssertReturnVoid(iSeg < cSegs); static uint8_t const s_abZero[128] = {0}; pSG->aSegs[iSeg].Phys = NIL_RTHCPHYS; pSG->aSegs[iSeg].pv = (void *)&s_abZero[0]; pSG->aSegs[iSeg].cb = 60 - pSG->cbTotal; pSG->cbTotal = 60; pSG->cSegsUsed++; } #endif #ifdef VBOXNETFLT_DARWIN_TEST_SEG_SIZE /* * Redistribute the segments. */ if (pSG->cSegsUsed < pSG->cSegsAlloc) { /* copy the segments to the end. */ int iSrc = pSG->cSegsUsed; int iDst = pSG->cSegsAlloc; while (iSrc > 0) { iDst--; iSrc--; pSG->aSegs[iDst] = pSG->aSegs[iSrc]; } /* create small segments from the start. */ pSG->cSegsUsed = pSG->cSegsAlloc; iSrc = iDst; iDst = 0; while ( iDst < iSrc && iDst < pSG->cSegsAlloc) { pSG->aSegs[iDst].Phys = NIL_RTHCPHYS; pSG->aSegs[iDst].pv = pSG->aSegs[iSrc].pv; pSG->aSegs[iDst].cb = RT_MIN(pSG->aSegs[iSrc].cb, VBOXNETFLT_DARWIN_TEST_SEG_SIZE); if (pSG->aSegs[iDst].cb != pSG->aSegs[iSrc].cb) { pSG->aSegs[iSrc].cb -= pSG->aSegs[iDst].cb; pSG->aSegs[iSrc].pv = (uint8_t *)pSG->aSegs[iSrc].pv + pSG->aSegs[iDst].cb; } else if (++iSrc >= pSG->cSegsAlloc) { pSG->cSegsUsed = iDst + 1; break; } iDst++; } } #endif AssertMsg(!pvFrame, ("pvFrame=%p pMBuf=%p iSeg=%d\n", pvFrame, pMBuf, iSeg)); }
/** * Internal worker that create a darwin mbuf for a (scatter/)gather list. * * @returns Pointer to the mbuf. * @param pThis The instance. * @param pSG The (scatter/)gather list. */ static mbuf_t vboxNetFltDarwinMBufFromSG(PVBOXNETFLTINS pThis, PINTNETSG pSG) { /// @todo future? mbuf_how_t How = preemption enabled ? MBUF_DONTWAIT : MBUF_WAITOK; mbuf_how_t How = MBUF_WAITOK; /* * We need some way of getting back to our instance data when * the mbuf is freed, so use pvUserData for this. * -- this is not relevant anylonger! -- */ Assert(!pSG->pvUserData || pSG->pvUserData == pThis); Assert(!pSG->pvUserData2); pSG->pvUserData = pThis; /* * Allocate a packet and copy over the data. * * Using mbuf_attachcluster() here would've been nice but there are two * issues with it: (1) it's 10.5.x only, and (2) the documentation indicates * that it's not supposed to be used for really external buffers. The 2nd * point might be argued against considering that the only m_clattach user * is mallocs memory for the ext mbuf and not doing what's stated in the docs. * However, it's hard to tell if these m_clattach buffers actually makes it * to the NICs or not, and even if they did, the NIC would need the physical * addresses for the pages they contain and might end up copying the data * to a new mbuf anyway. * * So, in the end it's better to just do it the simple way that will work * 100%, even if it involves some extra work (alloc + copy) we really wished * to avoid. * * Note. We can't make use of the physical addresses on darwin because the * way the mbuf / cluster stuff works (see mbuf_data_to_physical and * mcl_to_paddr). */ mbuf_t pPkt = NULL; errno_t err = mbuf_allocpacket(How, pSG->cbTotal, NULL, &pPkt); if (!err) { /* Skip zero sized memory buffers (paranoia). */ mbuf_t pCur = pPkt; while (pCur && !mbuf_maxlen(pCur)) pCur = mbuf_next(pCur); Assert(pCur); /* Set the required packet header attributes. */ mbuf_pkthdr_setlen(pPkt, pSG->cbTotal); mbuf_pkthdr_setheader(pPkt, mbuf_data(pCur)); /* Special case the single buffer copy. */ if ( mbuf_next(pCur) && mbuf_maxlen(pCur) >= pSG->cbTotal) { mbuf_setlen(pCur, pSG->cbTotal); IntNetSgRead(pSG, mbuf_data(pCur)); } else { /* Multi buffer copying. */ size_t cbLeft = pSG->cbTotal; size_t offSrc = 0; while (cbLeft > 0 && pCur) { size_t cb = mbuf_maxlen(pCur); if (cb > cbLeft) cb = cbLeft; mbuf_setlen(pCur, cb); IntNetSgReadEx(pSG, offSrc, cb, mbuf_data(pCur)); /* advance */ offSrc += cb; cbLeft -= cb; pCur = mbuf_next(pCur); } Assert(cbLeft == 0); } if (!err) { /* * Tag the packet and return successfully. */ PVBOXNETFLTTAG pTagData; err = mbuf_tag_allocate(pPkt, g_idTag, 0 /* type */, sizeof(VBOXNETFLTTAG) /* tag len */, How, (void **)&pTagData); if (!err) { Assert(pSG->aSegs[0].cb >= sizeof(pTagData->EthHdr)); memcpy(&pTagData->EthHdr, pSG->aSegs[0].pv, sizeof(pTagData->EthHdr)); return pPkt; } /* bailout: */ AssertMsg(err == ENOMEM || err == EWOULDBLOCK, ("err=%d\n", err)); } mbuf_freem(pPkt); } else AssertMsg(err == ENOMEM || err == EWOULDBLOCK, ("err=%d\n", err)); pSG->pvUserData = NULL; return NULL; }
/* * mbuf_copyback differs from m_copyback in a few ways: * 1) mbuf_copyback will allocate clusters for new mbufs we append * 2) mbuf_copyback will grow the last mbuf in the chain if possible * 3) mbuf_copyback reports whether or not the operation succeeded * 4) mbuf_copyback allows the caller to specify M_WAITOK or M_NOWAIT */ errno_t mbuf_copyback( mbuf_t m, size_t off, size_t len, const void *data, mbuf_how_t how) { size_t mlen; mbuf_t m_start = m; mbuf_t n; int totlen = 0; errno_t result = 0; const char *cp = data; if (m == NULL || len == 0 || data == NULL) return (EINVAL); while (off > (mlen = m->m_len)) { off -= mlen; totlen += mlen; if (m->m_next == 0) { n = m_getclr(how, m->m_type); if (n == 0) { result = ENOBUFS; goto out; } n->m_len = MIN(MLEN, len + off); m->m_next = n; } m = m->m_next; } while (len > 0) { mlen = MIN(m->m_len - off, len); if (mlen < len && m->m_next == NULL && mbuf_trailingspace(m) > 0) { size_t grow = MIN(mbuf_trailingspace(m), len - mlen); mlen += grow; m->m_len += grow; } bcopy(cp, off + (char *)mbuf_data(m), (unsigned)mlen); cp += mlen; len -= mlen; mlen += off; off = 0; totlen += mlen; if (len == 0) break; if (m->m_next == 0) { n = m_get(how, m->m_type); if (n == NULL) { result = ENOBUFS; goto out; } if (len > MINCLSIZE) { /* * cluster allocation failure is okay, * we can grow chain */ mbuf_mclget(how, m->m_type, &n); } n->m_len = MIN(mbuf_maxlen(n), len); m->m_next = n; } m = m->m_next; } out: if ((m_start->m_flags & M_PKTHDR) && (m_start->m_pkthdr.len < totlen)) m_start->m_pkthdr.len = totlen; return (result); }