/* * Help break down an mbuf chain by setting the first siz bytes contiguous * pointed to by returned val. * This is used by the macro NFSM_DISSECT for tough * cases. */ APPLESTATIC void * nfsm_dissct(struct nfsrv_descript *nd, int siz, int how) { mbuf_t mp2; int siz2, xfer; caddr_t p; int left; caddr_t retp; retp = NULL; left = NFSMTOD(nd->nd_md, caddr_t) + mbuf_len(nd->nd_md) - nd->nd_dpos; while (left == 0) { nd->nd_md = mbuf_next(nd->nd_md); if (nd->nd_md == NULL) return (retp); left = mbuf_len(nd->nd_md); nd->nd_dpos = NFSMTOD(nd->nd_md, caddr_t); } if (left >= siz) { retp = nd->nd_dpos; nd->nd_dpos += siz; } else if (mbuf_next(nd->nd_md) == NULL) { return (retp); } else if (siz > ncl_mbuf_mhlen) { panic("nfs S too big"); } else { MGET(mp2, MT_DATA, how); if (mp2 == NULL) return (NULL); mbuf_setnext(mp2, mbuf_next(nd->nd_md)); mbuf_setnext(nd->nd_md, mp2); mbuf_setlen(nd->nd_md, mbuf_len(nd->nd_md) - left); nd->nd_md = mp2; retp = p = NFSMTOD(mp2, caddr_t); NFSBCOPY(nd->nd_dpos, p, left); /* Copy what was left */ siz2 = siz - left; p += left; mp2 = mbuf_next(mp2); /* Loop around copying up the siz2 bytes */ while (siz2 > 0) { if (mp2 == NULL) return (NULL); xfer = (siz2 > mbuf_len(mp2)) ? mbuf_len(mp2) : siz2; if (xfer > 0) { NFSBCOPY(NFSMTOD(mp2, caddr_t), p, xfer); NFSM_DATAP(mp2, xfer); mbuf_setlen(mp2, mbuf_len(mp2) - xfer); p += xfer; siz2 -= xfer; } if (siz2 > 0) mp2 = mbuf_next(mp2); } mbuf_setlen(nd->nd_md, siz); nd->nd_md = mp2; nd->nd_dpos = NFSMTOD(mp2, caddr_t); } return (retp); }
/* Network Interface functions */ static errno_t ipsec_demux(__unused ifnet_t interface, mbuf_t data, __unused char *frame_header, protocol_family_t *protocol) { struct ip *ip; u_int ip_version; while (data != NULL && mbuf_len(data) < 1) { data = mbuf_next(data); } if (data == NULL) return ENOENT; ip = mtod(data, struct ip *); ip_version = ip->ip_v; switch(ip_version) { case 4: *protocol = PF_INET; return 0; case 6: *protocol = PF_INET6; return 0; default: break; } return 0; }
size_t MbufUtils::mbufTotalMaxLength(mbuf_t mbuf) { size_t len = 0; do { len += mbuf_maxlen(mbuf); } while ((mbuf = mbuf_next(mbuf))); return len; }
/** * Calculates the number of segments required to represent the mbuf. * * @returns Number of segments. * @param pThis The instance. * @param pMBuf The mbuf. * @param pvFrame The frame pointer, optional. */ DECLINLINE(unsigned) vboxNetFltDarwinMBufCalcSGSegs(PVBOXNETFLTINS pThis, mbuf_t pMBuf, void *pvFrame) { NOREF(pThis); /* * Count the buffers in the chain. */ unsigned cSegs = 0; for (mbuf_t pCur = pMBuf; pCur; pCur = mbuf_next(pCur)) if (mbuf_len(pCur)) cSegs++; else if ( !cSegs && pvFrame && (uintptr_t)pvFrame - (uintptr_t)mbuf_datastart(pMBuf) < mbuf_maxlen(pMBuf)) cSegs++; #ifdef PADD_RUNT_FRAMES_FROM_HOST /* * Add one buffer if the total is less than the ethernet minimum 60 bytes. * This may allocate a segment too much if the ethernet header is separated, * but that shouldn't harm us much. */ if (mbuf_pkthdr_len(pMBuf) < 60) cSegs++; #endif #ifdef VBOXNETFLT_DARWIN_TEST_SEG_SIZE /* maximize the number of segments. */ cSegs = RT_MAX(VBOXNETFLT_DARWIN_MAX_SEGS - 1, cSegs); #endif return cSegs ? cSegs : 1; }
errno_t IOWebFilterClass::tl_data_in_func(void *cookie, socket_t so, const struct sockaddr *from, mbuf_t *data, mbuf_t *control, sflt_data_flag_t flags) { SocketTracker *tracker = (SocketTracker*)cookie; // __asm__("int3"); LOG(LOG_DEBUG, "I am in, %s, magic=%ld", tracker->proc_name, tracker->magic); if(tracker==NULL || data==NULL || (tracker->magic&(kSocketTrackerInvalid|kSocketTrackerDetach))!=0) { LOG(LOG_DEBUG, "in return process"); return 0; } if(tracker->lock==NULL) { tracker->magic=kSocketTrackerInvalid; return 0; } IOLockLock(tracker->lock); mbuf_t head = *data; uint64_t len=0; if(head==NULL) { tracker->magic=kSocketTrackerInvalid; IOLockUnlock(tracker->lock); return 0; } while(head) { len += mbuf_len(head); head = mbuf_next(head); } if(len>sizeof(tracker->request_meg)-1) { tracker->magic=kSocketTrackerInvalid; IOLockUnlock(tracker->lock); return 0; } bzero(tracker->request_meg, sizeof(tracker->request_meg)); mbuf_copydata(*data, 0, len, tracker->request_meg); //todo: sync to shared memory, record a new request if(_queue) { LOG(LOG_DEBUG, "enter queue"); _queue->EnqueueTracker((DataArgs*)tracker); } IOLockUnlock(tracker->lock); return 0; }
__private_extern__ size_t mbuf_pkthdr_maxlen(mbuf_t m) { size_t maxlen = 0; mbuf_t n = m; while (n) { maxlen += mbuf_maxlen(n); n = mbuf_next(n); } return (maxlen); }
IOReturn MbufUtils::setChainLength(mbuf_t mbuf, size_t targetLength) { if (targetLength > MbufUtils::mbufTotalMaxLength(mbuf)) { return kIOReturnNoMemory; } while (targetLength) { if (NULL == mbuf) { return kIOReturnInternalError; } targetLength -= MbufUtils::attemptToSetLength(mbuf, targetLength); mbuf = mbuf_next(mbuf); } return kIOReturnSuccess; }
/* Network Interface functions */ static errno_t utun_demux( __unused ifnet_t interface, mbuf_t data, __unused char *frame_header, protocol_family_t *protocol) { while (data != NULL && mbuf_len(data) < 1) { data = mbuf_next(data); } if (data == NULL) return ENOENT; *protocol = *(u_int32_t *)mbuf_data(data); return 0; }
/* ----------------------------------------------------------------------------- ----------------------------------------------------------------------------- */ void ppp_comp_logmbuf(char *msg, mbuf_t m) { int i, lcount, copycount, count; char lbuf[16], *data; if (m == NULL) return; IOLog("%s: \n", msg); for (count = mbuf_len(m), data = mbuf_data(m); m != NULL; ) { /* build a line of output */ for(lcount = 0; lcount < sizeof(lbuf); lcount += copycount) { if (!count) { m = mbuf_next(m); if (m == NULL) break; count = mbuf_len(m); data = mbuf_data(m); } copycount = (count > sizeof(lbuf) - lcount) ? sizeof(lbuf) - lcount : count; bcopy(data, &lbuf[lcount], copycount); data += copycount; count -= copycount; } /* output line (hex 1st, then ascii) */ IOLog("%s: 0x ", msg); for(i = 0; i < lcount; i++) { if (i == 8) IOLog(" "); IOLog("%02x ", (u_char)lbuf[i]); } for( ; i < sizeof(lbuf); i++) { if (i == 8) IOLog(" "); IOLog(" "); } IOLog(" '"); for(i = 0; i < lcount; i++) IOLog("%c",(lbuf[i]>=040 && lbuf[i]<=0176)?lbuf[i]:'.'); IOLog("'\n"); } }
/* * Advance the position in the mbuf chain. * If offs == 0, this is a no-op, but it is simpler to just return from * here than check for offs > 0 for all calls to nfsm_advance. * If left == -1, it should be calculated here. */ APPLESTATIC int nfsm_advance(struct nfsrv_descript *nd, int offs, int left) { int error = 0; if (offs == 0) goto out; /* * A negative offs should be considered a serious problem. */ if (offs < 0) panic("nfsrv_advance"); /* * If left == -1, calculate it here. */ if (left == -1) left = NFSMTOD(nd->nd_md, caddr_t) + mbuf_len(nd->nd_md) - nd->nd_dpos; /* * Loop around, advancing over the mbuf data. */ while (offs > left) { offs -= left; nd->nd_md = mbuf_next(nd->nd_md); if (nd->nd_md == NULL) { error = EBADRPC; goto out; } left = mbuf_len(nd->nd_md); nd->nd_dpos = NFSMTOD(nd->nd_md, caddr_t); } nd->nd_dpos += offs; out: NFSEXITCODE(error); return (error); }
UInt32 AtherosL1Ethernet::outputPacket(mbuf_t m, void *prm) { u32 buf_len; at_adapter *adapter=&adapter_; u16 next_to_use; u16 tpd_req = 1; TpdDescr *pTpd ; struct at_buffer *buffer_info; if(tpd_avail(&adapter->tpd_ring) < tpd_req) { // no enough descriptor DbgPrint("no enough resource!!\n"); freePacket(m); return kIOReturnOutputDropped; } // init tpd flags struct at_tpd_ring* tpd_ring = &adapter->tpd_ring; pTpd = AT_TPD_DESC(tpd_ring, ((u16)atomic_read(&tpd_ring->next_to_use))); //memset(pTpd, 0, sizeof(TpdDescr)); memset(((u8*)pTpd + sizeof(pTpd->addr)), 0, (sizeof(TpdDescr) - sizeof(pTpd->addr))); //addr don't clear next_to_use = (u16)atomic_read(&tpd_ring->next_to_use); buffer_info = tpd_ring->buffer_info+next_to_use; if (!buffer_info->memDesc) { DbgPrint("Tx buffer is null!!\n"); freePacket(m); return kIOReturnOutputDropped; } if (mbuf_pkthdr_len(m) <= AT_TX_BUF_LEN) buf_len = mbuf_pkthdr_len(m); else { DbgPrint("Tx Packet size is too big, droping\n"); freePacket(m); return kIOReturnOutputDropped; } DbgPrint("outputPacket() length %d next_to_use=%d\n", buf_len, next_to_use); UInt8 *data_ptr = (UInt8 *)buffer_info->memDesc->getBytesNoCopy(); UInt32 pkt_snd_len = 0; mbuf_t cur_buf = m; do { if (mbuf_data(cur_buf)) bcopy(mbuf_data(cur_buf), data_ptr, mbuf_len(cur_buf)); data_ptr += mbuf_len(cur_buf); pkt_snd_len += mbuf_len(cur_buf); } while(((cur_buf = mbuf_next(cur_buf)) != NULL) && ((pkt_snd_len + mbuf_len(cur_buf)) <= buf_len)); buf_len = pkt_snd_len; buffer_info->length = (UInt16)buf_len; pTpd->buf_len= OSSwapHostToLittleInt16((UInt16)buf_len); pTpd->eop = 1; if(++next_to_use == tpd_ring->count) next_to_use = 0; atomic_set(&tpd_ring->next_to_use, next_to_use); // update mailbox at_update_mailbox(adapter); OSSynchronizeIO(); freePacket(m); return kIOReturnOutputSuccess; }
static inline bool analyseSegments( mbuf_t packet, /* input packet mbuf */ const UInt32 mbufsInCache, /* number of entries in segsPerMBuf[] */ const UInt32 segsPerMBuf[], /* segments required per mbuf */ SInt32 numSegs, /* total number of segments */ const UInt32 maxSegs) /* max controller segments per mbuf */ { mbuf_t newPacket; // output mbuf chain. mbuf_t out; // current output mbuf link. SInt32 outSize; // size of current output mbuf link. SInt32 outSegs; // segments for current output mbuf link. SInt32 doneSegs; // segments for output mbuf chain. SInt32 outLen; // remaining length of input buffer. mbuf_t in = packet; // save the original input packet pointer. UInt32 inIndex = 0; const uint32_t c_mlen = mbuf_get_mlen(); // Allocate a mbuf (non header mbuf) to begin the output mbuf chain. if(mbuf_get(MBUF_DONTWAIT, MT_DATA, &newPacket)) { ERROR_LOG("analyseSegments: MGET() 1 error\n"); return false; } /* Initialise outgoing packet controls */ out = newPacket; outSize = c_mlen; doneSegs = outSegs = outLen = 0; // numSegs stores the delta between the total and the max. For each // input mbuf consumed, we decrement numSegs. // numSegs -= maxSegs; // Loop through the input packet mbuf 'in' and construct a new mbuf chain // large enough to make (numSegs + doneSegs + outSegs) less than or // equal to zero. // do { uintptr_t vmo; outLen += mbuf_len(in); while (outLen > outSize) { // Oh dear the current outgoing length is too big. if (outSize != MCLBYTES) { // Current mbuf is not yet a cluster so promote, then // check for error. if(mbuf_mclget(MBUF_DONTWAIT, MT_DATA, &out) || !(mbuf_flags(out) & MBUF_EXT) ) { ERROR_LOG("analyseSegments: MCLGET() error\n"); goto bombAnalysis; } outSize = MCLBYTES; continue; } vmo = (uintptr_t)mbuf_data(out); mbuf_setlen(out, MCLBYTES); /* Fill in target copy size */ doneSegs += (round_page(vmo + MCLBYTES) - trunc_page(vmo)) / PAGE_SIZE; // If the number of segments of the output chain, plus // the segment for the mbuf we are about to allocate is greater // than maxSegs, then abort. // if (doneSegs + 1 > (int) maxSegs) { ERROR_LOG("analyseSegments: maxSegs limit 1 reached! %ld %ld\n", doneSegs, maxSegs); goto bombAnalysis; } mbuf_t tempmbuf; if(mbuf_get(MBUF_DONTWAIT, MT_DATA, &tempmbuf)) { ERROR_LOG("analyseSegments: MGET() error\n"); goto bombAnalysis; } mbuf_setnext(out, tempmbuf); out = tempmbuf; outSize = c_mlen; outLen -= MCLBYTES; } // Compute number of segment in current outgoing mbuf. vmo = (uintptr_t)mbuf_data(out); outSegs = ((SInt32)round_page(vmo + outLen) - (SInt32)trunc_page(vmo)) / (SInt32)PAGE_SIZE; if (doneSegs + outSegs > (int) maxSegs) { ERROR_LOG("analyseSegments: maxSegs limit 2 reached! %ld %ld %ld\n", doneSegs, outSegs, maxSegs); goto bombAnalysis; } // Get the number of segments in the current inbuf if (inIndex < mbufsInCache) numSegs -= segsPerMBuf[inIndex]; // Yeah, in cache else { // Hmm, we have to recompute from scratch. Copy code from genPhys. int thisLen = 0, mbufLen; vmo = (uintptr_t)mbuf_data(in); for (mbufLen = (SInt32)mbuf_len(in); mbufLen; mbufLen -= thisLen) { thisLen = MIN((SInt32)next_page(vmo), (SInt32)(vmo + mbufLen)) - (SInt32)vmo; vmo += thisLen; numSegs--; } } // Walk the incoming buffer on one. in = mbuf_next(in); inIndex++; // continue looping until the total number of segments has dropped // to an acceptable level, or if we ran out of mbuf links. } while (in && ((numSegs + doneSegs + outSegs) > 0)); if ( (int) (numSegs + doneSegs + outSegs) <= 0) { // success mbuf_setlen(out, outLen); // Set last mbuf with the remaining length. // The amount to copy is determine by the segment length in each // mbuf linked to newPacket. The sum can be smaller than // packet->pkthdr.len; // coalesceSegments(packet, newPacket); // The initial header mbuf is preserved, its length set to zero, and // linked to the new packet chain. // coalesceSegments() has already freed the mbufs that it coalesced into the newPacket chain. // It also hooked the remaining chain pointed to by "in" to the end of the newPacket chain. // All that remains is to set packet's len to 0 (to "free" the contents that coalesceSegments copied out) // and make it the head of the new chain. mbuf_setlen(packet , 0 ); mbuf_setnext(packet, newPacket); return true; } bombAnalysis: mbuf_freem(newPacket); return false; }
UInt32 IOMbufMemoryCursor::genPhysicalSegments(mbuf_t packet, void *vector, UInt32 maxSegs, bool doCoalesce) { bool doneCoalesce = false; if (!packet || !(mbuf_flags(packet) & MBUF_PKTHDR)) return 0; if (!maxSegs) { maxSegs = maxNumSegments; if (!maxSegs) return 0; } if ( mbuf_next(packet) == 0 ) { uintptr_t src; struct IOPhysicalSegment physSeg; /* * the packet consists of only 1 mbuf * so if the data buffer doesn't span a page boundary * we can take the simple way out */ src = (uintptr_t)mbuf_data(packet); if ( trunc_page(src) == trunc_page(src + mbuf_len(packet) - 1) ) { physSeg.location = (IOPhysicalAddress) mbuf_data_to_physical((char *)src); if ( physSeg.location ) { physSeg.length = mbuf_len(packet); (*outSeg)(physSeg, vector, 0); return 1; } maxSegs = 1; if ( doCoalesce == false ) return 0; } } if ( doCoalesce == true && maxSegs == 1 ) { uintptr_t src; uintptr_t dst; mbuf_t m; mbuf_t mnext; mbuf_t out; UInt32 len = 0; struct IOPhysicalSegment physSeg; if ( mbuf_pkthdr_len(packet) > MCLBYTES ) return 0; m = packet; // Allocate a non-header mbuf + cluster. if (mbuf_getpacket( MBUF_DONTWAIT, &out )) return 0; mbuf_setflags( out, mbuf_flags( out ) & ~MBUF_PKTHDR ); dst = (uintptr_t)mbuf_data(out); do { src = (uintptr_t)mbuf_data(m); BCOPY( src, dst, mbuf_len(m) ); dst += mbuf_len(m); len += mbuf_len(m); } while ( (m = mbuf_next(m)) != 0 ); mbuf_setlen(out , len); dst = (uintptr_t)mbuf_data(out); physSeg.location = (IOPhysicalAddress) mbuf_data_to_physical((char *)dst); if (!physSeg.location) { mbuf_free(out); return 0; } physSeg.length = mbuf_len(out); (*outSeg)(physSeg, vector, 0); m = mbuf_next(packet); while (m != 0) { mnext = mbuf_next(m); mbuf_free(m); m = mnext; } // The initial header mbuf is preserved, its length set to zero, // and linked to the new packet chain. mbuf_setlen(packet , 0); mbuf_setnext(packet , out); mbuf_setnext(out , 0); return 1; } // // Iterate over the mbuf, translating segments were allowed. When we // are not allowed to translate segments then accumulate segment // statistics up to kMBufDataCacheSize of mbufs. Finally // if we overflow our cache just count how many segments this // packet represents. // UInt32 segsPerMBuf[kMBufDataCacheSize]; tryAgain: UInt32 curMBufIndex = 0; UInt32 curSegIndex = 0; UInt32 lastSegCount = 0; mbuf_t m = packet; // For each mbuf in incoming packet. do { vm_size_t mbufLen, thisLen = 0; uintptr_t src; // Step through each segment in the current mbuf for (mbufLen = mbuf_len(m), src = (uintptr_t)mbuf_data(m); mbufLen; src += thisLen, mbufLen -= thisLen) { // If maxSegmentSize is atleast PAGE_SIZE, then // thisLen = MIN(next_page(src), src + mbufLen) - src; thisLen = MIN(mbufLen, maxSegmentSize); thisLen = MIN(next_page(src), src + thisLen) - src; // If room left then find the current segment addr and output if (curSegIndex < maxSegs) { struct IOPhysicalSegment physSeg; physSeg.location = (IOPhysicalAddress) mbuf_data_to_physical((char *)src); if ( physSeg.location == 0 ) { return doCoalesce ? genPhysicalSegments(packet, vector, 1, true) : 0; } physSeg.length = thisLen; (*outSeg)(physSeg, vector, curSegIndex); } // Count segments if we are coalescing. curSegIndex++; } // Cache the segment count data if room is available. if (curMBufIndex < kMBufDataCacheSize) { segsPerMBuf[curMBufIndex] = curSegIndex - lastSegCount; lastSegCount = curSegIndex; } // Move on to next imcoming mbuf curMBufIndex++; m = mbuf_next(m); } while (m); // If we finished cleanly return number of segments found if (curSegIndex <= maxSegs) return curSegIndex; if (!doCoalesce) return 0; // if !coalescing we've got a problem. // If we are coalescing and it is possible then attempt coalesce, if (!doneCoalesce && (UInt) mbuf_pkthdr_len(packet) <= maxSegs * maxSegmentSize) { // Hmm, we have to do some coalescing. bool analysisRet; analysisRet = analyseSegments(packet, MIN(curMBufIndex, kMBufDataCacheSize), segsPerMBuf, curSegIndex, maxSegs); if (analysisRet) { doneCoalesce = true; coalesceCount++; goto tryAgain; } } assert(!doneCoalesce); // Problem in Coalesce code. packetTooBigErrors++; return 0; }
/** * Initializes a SG list from an mbuf. * * @returns Number of segments. * @param pThis The instance. * @param pMBuf The mbuf. * @param pSG The SG. * @param pvFrame The frame pointer, optional. * @param cSegs The number of segments allocated for the SG. * This should match the number in the mbuf exactly! * @param fSrc The source of the frame. */ DECLINLINE(void) vboxNetFltDarwinMBufToSG(PVBOXNETFLTINS pThis, mbuf_t pMBuf, void *pvFrame, PINTNETSG pSG, unsigned cSegs, uint32_t fSrc) { NOREF(pThis); /* * Walk the chain and convert the buffers to segments. Works INTNETSG::cbTotal. */ unsigned iSeg = 0; IntNetSgInitTempSegs(pSG, 0 /*cbTotal*/, cSegs, 0 /*cSegsUsed*/); for (mbuf_t pCur = pMBuf; pCur; pCur = mbuf_next(pCur)) { size_t cbSeg = mbuf_len(pCur); if (cbSeg) { void *pvSeg = mbuf_data(pCur); /* deal with pvFrame */ if (!iSeg && pvFrame && pvFrame != pvSeg) { void *pvStart = mbuf_datastart(pMBuf); uintptr_t offSeg = (uintptr_t)pvSeg - (uintptr_t)pvStart; uintptr_t offSegEnd = offSeg + cbSeg; Assert(pvStart && pvSeg && offSeg < mbuf_maxlen(pMBuf) && offSegEnd <= mbuf_maxlen(pMBuf)); NOREF(offSegEnd); uintptr_t offFrame = (uintptr_t)pvFrame - (uintptr_t)pvStart; if (RT_LIKELY(offFrame < offSeg)) { pvSeg = pvFrame; cbSeg += offSeg - offFrame; } else AssertMsgFailed(("pvFrame=%p pvStart=%p pvSeg=%p offSeg=%p cbSeg=%#zx offSegEnd=%p offFrame=%p maxlen=%#zx\n", pvFrame, pvStart, pvSeg, offSeg, cbSeg, offSegEnd, offFrame, mbuf_maxlen(pMBuf))); pvFrame = NULL; } AssertBreak(iSeg < cSegs); pSG->cbTotal += cbSeg; pSG->aSegs[iSeg].cb = cbSeg; pSG->aSegs[iSeg].pv = pvSeg; pSG->aSegs[iSeg].Phys = NIL_RTHCPHYS; iSeg++; } /* The pvFrame might be in a now empty buffer. */ else if ( !iSeg && pvFrame && (uintptr_t)pvFrame - (uintptr_t)mbuf_datastart(pMBuf) < mbuf_maxlen(pMBuf)) { cbSeg = (uintptr_t)mbuf_datastart(pMBuf) + mbuf_maxlen(pMBuf) - (uintptr_t)pvFrame; pSG->cbTotal += cbSeg; pSG->aSegs[iSeg].cb = cbSeg; pSG->aSegs[iSeg].pv = pvFrame; pSG->aSegs[iSeg].Phys = NIL_RTHCPHYS; iSeg++; pvFrame = NULL; } } Assert(iSeg && iSeg <= cSegs); pSG->cSegsUsed = iSeg; #ifdef PADD_RUNT_FRAMES_FROM_HOST /* * Add a trailer if the frame is too small. * * Since we're getting to the packet before it is framed, it has not * yet been padded. The current solution is to add a segment pointing * to a buffer containing all zeros and pray that works for all frames... */ if (pSG->cbTotal < 60 && (fSrc & INTNETTRUNKDIR_HOST)) { AssertReturnVoid(iSeg < cSegs); static uint8_t const s_abZero[128] = {0}; pSG->aSegs[iSeg].Phys = NIL_RTHCPHYS; pSG->aSegs[iSeg].pv = (void *)&s_abZero[0]; pSG->aSegs[iSeg].cb = 60 - pSG->cbTotal; pSG->cbTotal = 60; pSG->cSegsUsed++; } #endif #ifdef VBOXNETFLT_DARWIN_TEST_SEG_SIZE /* * Redistribute the segments. */ if (pSG->cSegsUsed < pSG->cSegsAlloc) { /* copy the segments to the end. */ int iSrc = pSG->cSegsUsed; int iDst = pSG->cSegsAlloc; while (iSrc > 0) { iDst--; iSrc--; pSG->aSegs[iDst] = pSG->aSegs[iSrc]; } /* create small segments from the start. */ pSG->cSegsUsed = pSG->cSegsAlloc; iSrc = iDst; iDst = 0; while ( iDst < iSrc && iDst < pSG->cSegsAlloc) { pSG->aSegs[iDst].Phys = NIL_RTHCPHYS; pSG->aSegs[iDst].pv = pSG->aSegs[iSrc].pv; pSG->aSegs[iDst].cb = RT_MIN(pSG->aSegs[iSrc].cb, VBOXNETFLT_DARWIN_TEST_SEG_SIZE); if (pSG->aSegs[iDst].cb != pSG->aSegs[iSrc].cb) { pSG->aSegs[iSrc].cb -= pSG->aSegs[iDst].cb; pSG->aSegs[iSrc].pv = (uint8_t *)pSG->aSegs[iSrc].pv + pSG->aSegs[iDst].cb; } else if (++iSrc >= pSG->cSegsAlloc) { pSG->cSegsUsed = iDst + 1; break; } iDst++; } } #endif AssertMsg(!pvFrame, ("pvFrame=%p pMBuf=%p iSeg=%d\n", pvFrame, pMBuf, iSeg)); }
static inline void coalesceSegments(mbuf_t srcm, mbuf_t dstm) { uintptr_t src, dst; SInt32 srcLen, dstLen; mbuf_t temp; mbuf_t head = srcm; srcLen = (SInt32)mbuf_len( srcm ); src = (uintptr_t) mbuf_data(srcm); dstLen = (SInt32)mbuf_len( dstm ); dst = (uintptr_t) mbuf_data( dstm ); for (;;) { if (srcLen < dstLen) { // Copy remainder of src mbuf to current dst. BCOPY(src, dst, srcLen); dst += srcLen; dstLen -= srcLen; // Move on to the next source mbuf. temp = mbuf_next( srcm ); assert(temp); if(srcm != head) mbuf_free(srcm); srcm = temp; srcLen = (SInt32)mbuf_len( srcm ); src = (uintptr_t)mbuf_data(srcm); } else if (srcLen > dstLen) { // Copy some of src mbuf to remaining space in dst mbuf. BCOPY(src, dst, dstLen); src += dstLen; srcLen -= dstLen; // Move on to the next destination mbuf. temp = mbuf_next( dstm ); assert(temp); dstm = temp; dstLen = (SInt32)mbuf_len( dstm ); dst = (uintptr_t)mbuf_data( dstm ); } else { /* (srcLen == dstLen) */ // copy remainder of src into remaining space of current dst BCOPY(src, dst, srcLen); // Free current mbuf and move the current onto the next temp = mbuf_next( srcm ); if(srcm != head) mbuf_free(srcm); srcm = temp; // Do we have any more dest buffers to copy to? if (! mbuf_next ( dstm )) { // nope- hook the remainder of source chain to end of dest chain mbuf_setnext(dstm, srcm); break; } dstm = mbuf_next ( dstm ); assert(srcm); dstLen = (SInt32)mbuf_len ( dstm ); dst = (uintptr_t)mbuf_data( dstm ); srcLen = (SInt32)mbuf_len( srcm ); src = (uintptr_t)mbuf_data( srcm ); } } }
UInt32 darwin_iwi3945::outputPacket(mbuf_t m, void * param) { //IOLog("outputPacket\n"); if((fNetif->getFlags() & IFF_RUNNING)!=0 || m==NULL) { if (m) if (!(mbuf_type(m) == MBUF_TYPE_FREE) ) freePacket(m); m=NULL; netStats->outputErrors++; return kIOReturnOutputDropped; } mbuf_t nm; int ret = kIOReturnOutputDropped; //checking supported packet IWI_DEBUG("outputPacket t: %d f:%04x\n",mbuf_type(m),mbuf_flags(m)); //drop mbuf is not PKTHDR if (!(mbuf_flags(m) & MBUF_PKTHDR) ){ IWI_ERR("BUG: dont support mbuf without pkthdr and dropped \n"); netStats->outputErrors++; goto finish; } if(mbuf_type(m) == MBUF_TYPE_FREE){ IWI_ERR("BUG: this is freed packet and dropped \n"); netStats->outputErrors++; goto finish; } nm = mergePacket(m); if (nm==NULL) { netStats->outputErrors++; goto finish; } if(mbuf_next(nm)){ IWI_ERR("BUG: dont support chains mbuf\n"); IWI_ERR("BUG: tx packet is not single mbuf mbuf_len(%d) mbuf_pkthdr_len(%d)\n",mbuf_len(nm) , mbuf_pkthdr_len(nm) ); IWI_ERR("BUG: next mbuf size %d\n",mbuf_len(mbuf_next(nm))); } IWI_DEBUG_FULL("call ieee80211_xmit - not done yet\n"); //ret = ieee80211_xmit(nm,priv->net_dev); //struct ieee80211_tx_control ctrl; //ret=ipw_tx_skb(priv, nm, &ctrl); finish: /* free finished packet */ //freePacket(m); //m=NULL; if (ret == kIOReturnOutputDropped) { //if (nm) //if (!(mbuf_type(nm) == MBUF_TYPE_FREE) ) freePacket(nm); //nm=NULL; } return ret; }
//callback for incoming data // only interested in DNS responses for IP:URL mappings // code inspired by: https://github.com/williamluke/peerguardian-linux/blob/master/pgosx/kern/ppfilter.c static errno_t data_in(void *cookie, socket_t so, const struct sockaddr *from, mbuf_t *data, mbuf_t *control, sflt_data_flag_t flags) { //dbg msg IOLog("LULU: in %s\n", __FUNCTION__); //port in_port_t port = 0; //peer name struct sockaddr_in6 peerName = {0}; //mem buffer mbuf_t memBuffer = NULL; //response size size_t responseSize = 0; //dns header dnsHeader* dnsHeader = NULL; //firewall event firewallEvent event = {0}; //destination socket ('from') might be null? // if so, grab it via 'getpeername' from the socket if(NULL == from) { //lookup remote socket info if(0 != sock_getpeername(so, (struct sockaddr*)&peerName, sizeof(peerName))) { //err msg IOLog("LULU ERROR: sock_getpeername() failed\n"); //bail goto bail; } //now, assign from = (const struct sockaddr*)&peerName; } //get port switch(from->sa_family) { //IPv4 case AF_INET: port = ntohs(((const struct sockaddr_in*)from)->sin_port); break; //IPv6 case AF_INET6: port = ntohs(((const struct sockaddr_in6*)from)->sin6_port); break; default: break; } //ignore non-DNS if(53 != port) { //bail goto bail; } //init memory buffer memBuffer = *data; if(NULL == memBuffer) { //bail goto bail; } //get memory buffer while(MBUF_TYPE_DATA != mbuf_type(memBuffer)) { //get next memBuffer = mbuf_next(memBuffer); if(NULL == memBuffer) { //bail goto bail; } } //sanity check length if(mbuf_len(memBuffer) <= sizeof(struct dnsHeader)) { //bail goto bail; } //get data // should be a DNS header dnsHeader = (struct dnsHeader*)mbuf_data(memBuffer); //ignore everything that isn't a DNS response // top bit flag will be 0x1, for "a name service response" if(0 == ((ntohs(dnsHeader->flags)) & (1<<(15)))) { //bail goto bail; } //ignore any errors // bottom (4) bits will be 0x0 for "successful response" if(0 != ((ntohs(dnsHeader->flags)) & (1<<(0)))) { //bail goto bail; } //ignore any packets that don't have answers if(0 == ntohs(dnsHeader->ancount)) { //bail goto bail; } //zero out event struct bzero(&event, sizeof(firewallEvent)); //set type event.dnsResponseEvent.type = EVENT_DNS_RESPONSE; //set size // max, 512 responseSize = MIN(sizeof(event.dnsResponseEvent.response), mbuf_len(memBuffer)); //copy response memcpy(event.dnsResponseEvent.response, mbuf_data(memBuffer), responseSize); //queue it up sharedDataQueue->enqueue_tail(&event, sizeof(firewallEvent)); bail: return kIOReturnSuccess; }
static bool mbuf_buffer(IOMemoryDescriptor *buffer, int skip_buffer, mbuf_t m, int skip_mbuf, int copy) { int offset = 0; bool isWrite = (buffer->getDirection() == kIODirectionOut); if (buffer->prepare() != kIOReturnSuccess) { KINFO("buffer prepare failed"); return false; } if (isWrite && mbuf_pkthdr_len(m) < skip_mbuf + copy) mbuf_pkthdr_setlen(m, skip_mbuf + copy); for (; m; m = mbuf_next(m)) { if (isWrite && mbuf_len(m) < skip_mbuf + copy && mbuf_trailingspace(m)) mbuf_setlen(m, min(mbuf_maxlen(m), skip_mbuf + copy)); UInt32 available = mbuf_len(m); //KDEBUG("available=%d, skip_mbuf=%d", available, skip_mbuf); if (skip_mbuf >= available) { skip_mbuf -= available; continue; } UInt8 *buf = (UInt8 *)mbuf_data(m) + skip_mbuf; IOByteCount len = copy; // remaining requested len = min(len, available - skip_mbuf); // available in mbuf len = min(len, buffer->getLength() - offset); // available in iomd IOByteCount wrote = 0; if (!len) { KDEBUG("no space, %d-%d, %d-%d", available, skip_mbuf, buffer->getLength(), offset); break; } //KDEBUG("COPY: skip_buffer=%d, offset=%d, len=%d (remaining=%d)", skip_buffer, offset, len, copy); if (isWrite) wrote = buffer->readBytes(skip_buffer + offset, buf, len); else wrote = buffer->writeBytes(skip_buffer + offset, buf, len); if (wrote != len) { KINFO("short IO"); break; } offset += len; copy -= len; skip_mbuf = 0; } if (buffer->complete() != kIOReturnSuccess) { KINFO("buffer complete failed"); return false; } if (copy > 0) { KINFO("failed to copy requested data: %d remaining", copy); return false; } return true; }
/* * copies mbuf chain to the uio scatter/gather list */ int nfsm_mbufuio(struct nfsrv_descript *nd, struct uio *uiop, int siz) { char *mbufcp, *uiocp; int xfer, left, len; mbuf_t mp; long uiosiz, rem; int error = 0; mp = nd->nd_md; mbufcp = nd->nd_dpos; len = NFSMTOD(mp, caddr_t) + mbuf_len(mp) - mbufcp; rem = NFSM_RNDUP(siz) - siz; while (siz > 0) { if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL) { error = EBADRPC; goto out; } left = uiop->uio_iov->iov_len; uiocp = uiop->uio_iov->iov_base; if (left > siz) left = siz; uiosiz = left; while (left > 0) { while (len == 0) { mp = mbuf_next(mp); if (mp == NULL) { error = EBADRPC; goto out; } mbufcp = NFSMTOD(mp, caddr_t); len = mbuf_len(mp); KASSERT(len >= 0, ("len %d, corrupted mbuf?", len)); } xfer = (left > len) ? len : left; #ifdef notdef /* Not Yet.. */ if (uiop->uio_iov->iov_op != NULL) (*(uiop->uio_iov->iov_op)) (mbufcp, uiocp, xfer); else #endif if (uiop->uio_segflg == UIO_SYSSPACE) NFSBCOPY(mbufcp, uiocp, xfer); else copyout(mbufcp, CAST_USER_ADDR_T(uiocp), xfer); left -= xfer; len -= xfer; mbufcp += xfer; uiocp += xfer; uiop->uio_offset += xfer; uiop->uio_resid -= xfer; } if (uiop->uio_iov->iov_len <= siz) { uiop->uio_iovcnt--; uiop->uio_iov++; } else { uiop->uio_iov->iov_base = (void *) ((char *)uiop->uio_iov->iov_base + uiosiz); uiop->uio_iov->iov_len -= uiosiz; } siz -= uiosiz; } nd->nd_dpos = mbufcp; nd->nd_md = mp; if (rem > 0) { if (len < rem) error = nfsm_advance(nd, rem, len); else nd->nd_dpos += rem; } out: NFSEXITCODE2(error, nd); return (error); }
// data in is currently used for PASV FTP support static errno_t ppfilter_data_in (__unused void *cookie, socket_t so, const struct sockaddr *from, mbuf_t *data, __unused mbuf_t *control, __unused sflt_data_flag_t flags) { in_addr_t ip4; in_port_t port; if (!from) { struct sockaddr_in6 local; if (0 != sock_getpeername(so, (struct sockaddr*)&local, sizeof(local))) bzero(&local, sizeof(local)); from = (const struct sockaddr*)&local; } if (AF_INET == from->sa_family) { port = ntohs(((const struct sockaddr_in*)from)->sin_port); } else if (AF_INET6 == from->sa_family) { const struct sockaddr_in6* addr6 = (const struct sockaddr_in6*)from; if (IN6_IS_ADDR_LOOPBACK(&addr6->sin6_addr) || !IN6_IS_ADDR_V4MAPPED(&addr6->sin6_addr)) return (0); // tables do not contain native ip6 addreses port = ntohs(addr6->sin6_port); } else return (0); // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // Short-circuit optimization for ftp filter -- if any other filters are ever added, // this will have to be removed. if (21 != port) return (0); // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX mbuf_t mdata = *data; while (mdata && MBUF_TYPE_DATA != mbuf_type(mdata)) { mdata = mbuf_next(mdata); } if (!mdata) return (0); char *pkt = mbuf_data(mdata); if (!pkt) return (0); size_t len = mbuf_len(mdata); ip4 = INADDR_NONE; int block, i; pp_data_filter_t filt = pp_data_filters[0]; for (i = 1; filt; ++i) { block = filt(pkt, len, &ip4, &port); if (INADDR_NONE != ip4) { // add to dynamic list pp_dynentries_lock(); pp_dyn_entry_t e = pp_dyn_entry_get(); if (e) { e->addr = ip4; e->port = port; e->block = block; } pp_dynentries_unlock(); break; } filt = pp_data_filters[i]; } return (0); }
// Temporarily stuff a vlan tag back into a packet so that tag shows up to bpf. // We do it by creating a temp header mbuf with the enet/vlan header in it and // then point its next field to the proper place (after the dest+src addresses) in the original // mbuf. void IOEthernetInterface::_fixupVlanPacket(mbuf_t mt, u_int16_t vlan_tag, int inputPacket) { mbuf_t newmb; mbuf_t chain; size_t remainingBytes; size_t copyBytes = 0; //initialize to prevent annoying, incorrect warning that it's used uninitialized char * destptr; if( mbuf_gethdr(MBUF_DONTWAIT, MT_DATA, &newmb) ) return; //init enough of the mbuf to keep bpf happy mbuf_setlen(newmb, ETHER_ADDR_LEN*2 + VLAN_HEADER_LEN); mbuf_pkthdr_setlen(newmb, mbuf_pkthdr_len( mt ) + VLAN_HEADER_LEN); mbuf_pkthdr_setrcvif(newmb, mbuf_pkthdr_rcvif( mt ) ); //now walk the incoming mbuf to copy out its dst & src address and //locate the type/len field in the packet. chain = mt; remainingBytes = ETHER_ADDR_LEN*2; destptr = (char *)mbuf_data( newmb ); while(chain && remainingBytes) { copyBytes = remainingBytes > mbuf_len( chain ) ? mbuf_len( chain ): remainingBytes; remainingBytes -= copyBytes; bcopy( mbuf_data( chain ), destptr, copyBytes); destptr += copyBytes; if (mbuf_len( chain ) == copyBytes) //we've completely drained this mbuf { chain = mbuf_next( chain ); //advance to next copyBytes = 0; //if we break out of loop now, make sure the offset is correct } } // chain points to the mbuf that contains the packet data with type/len field // and copyBytes indicates the offset it's at. if(chain==0 || remainingBytes) { mbuf_freem( newmb ); return; //if we can't munge the packet, just return } //patch mbuf so its data points after the dst+src address mbuf_setdata(chain, (char *)mbuf_data( chain ) + copyBytes, mbuf_len( chain ) - copyBytes ); //finish setting up our head mbuf *(short *)(destptr) = htons(ETHERTYPE_VLAN); //vlan magic number *(short *)(destptr + 2) = htons( vlan_tag ); // and the tag's value mbuf_setnext( newmb, chain ); //stick it infront of the rest of the packet // feed the tap if(inputPacket) super::feedPacketInputTap( newmb ); else super::feedPacketOutputTap( newmb ); //release the fake header mbuf_setnext( newmb, NULL ); mbuf_freem( newmb ); //and repair our old mbuf mbuf_setdata( chain, (char *)mbuf_data( chain ) - copyBytes, mbuf_len( chain ) + copyBytes ); }
mbuf_t darwin_iwi3945::mergePacket(mbuf_t m) { mbuf_t nm,nm2; int offset; if(!mbuf_next(m)) { offset = (4 - ((int)(mbuf_data(m)) & 3)) % 4; //packet needs to be 4 byte aligned if (offset==0) return m; IWI_DEBUG_FULL("this packet dont have mbuf_next, merge is not required\n"); goto copy_packet; } /* allocate and Initialize New mbuf */ nm = allocatePacket(mbuf_pkthdr_len(m)); if (nm==0) return NULL; //if (mbuf_getpacket(MBUF_WAITOK, &nm)!=0) return NULL; mbuf_setlen(nm,0); mbuf_pkthdr_setlen(nm,0); if( mbuf_next(nm)) IWI_ERR("merged mbuf_next\n"); /* merging chains to single mbuf */ for (nm2 = m; nm2; nm2 = mbuf_next(nm2)) { memcpy (skb_put (nm, mbuf_len(nm2)), (UInt8*)mbuf_data(nm2), mbuf_len(nm2)); } /* checking if merged or not. */ if( mbuf_len(nm) == mbuf_pkthdr_len(m) ) { if (m!=NULL) if (!(mbuf_type(m) == MBUF_TYPE_FREE)) freePacket(m); m=NULL; return nm; } /* merging is not completed. */ IWI_LOG("mergePacket is failed: data copy dont work collectly\n"); //IWI_LOG("orig_len %d orig_pktlen %d new_len %d new_pktlen %d\n", // mbuf_len(m),mbuf_pkthdr_len(m), // mbuf_len(nm),mbuf_pkthdr_len(nm) ); if (m!=NULL) if (!(mbuf_type(m) == MBUF_TYPE_FREE)) freePacket(m); m=NULL; if (nm!=NULL) if (!(mbuf_type(nm) == MBUF_TYPE_FREE) ) freePacket(nm); nm=NULL; return NULL; copy_packet: if (mbuf_dup(m, MBUF_WAITOK , &nm)!=0) { if (m!=NULL) if (!(mbuf_type(m) == MBUF_TYPE_FREE)) freePacket(m); m=NULL; return NULL; } if (m!=NULL) if (!(mbuf_type(m) == MBUF_TYPE_FREE) ) freePacket(m); m=NULL; return nm; //return copyPacket(m, 0); }
/** * Internal worker that create a darwin mbuf for a (scatter/)gather list. * * @returns Pointer to the mbuf. * @param pThis The instance. * @param pSG The (scatter/)gather list. */ static mbuf_t vboxNetFltDarwinMBufFromSG(PVBOXNETFLTINS pThis, PINTNETSG pSG) { /// @todo future? mbuf_how_t How = preemption enabled ? MBUF_DONTWAIT : MBUF_WAITOK; mbuf_how_t How = MBUF_WAITOK; /* * We need some way of getting back to our instance data when * the mbuf is freed, so use pvUserData for this. * -- this is not relevant anylonger! -- */ Assert(!pSG->pvUserData || pSG->pvUserData == pThis); Assert(!pSG->pvUserData2); pSG->pvUserData = pThis; /* * Allocate a packet and copy over the data. * * Using mbuf_attachcluster() here would've been nice but there are two * issues with it: (1) it's 10.5.x only, and (2) the documentation indicates * that it's not supposed to be used for really external buffers. The 2nd * point might be argued against considering that the only m_clattach user * is mallocs memory for the ext mbuf and not doing what's stated in the docs. * However, it's hard to tell if these m_clattach buffers actually makes it * to the NICs or not, and even if they did, the NIC would need the physical * addresses for the pages they contain and might end up copying the data * to a new mbuf anyway. * * So, in the end it's better to just do it the simple way that will work * 100%, even if it involves some extra work (alloc + copy) we really wished * to avoid. * * Note. We can't make use of the physical addresses on darwin because the * way the mbuf / cluster stuff works (see mbuf_data_to_physical and * mcl_to_paddr). */ mbuf_t pPkt = NULL; errno_t err = mbuf_allocpacket(How, pSG->cbTotal, NULL, &pPkt); if (!err) { /* Skip zero sized memory buffers (paranoia). */ mbuf_t pCur = pPkt; while (pCur && !mbuf_maxlen(pCur)) pCur = mbuf_next(pCur); Assert(pCur); /* Set the required packet header attributes. */ mbuf_pkthdr_setlen(pPkt, pSG->cbTotal); mbuf_pkthdr_setheader(pPkt, mbuf_data(pCur)); /* Special case the single buffer copy. */ if ( mbuf_next(pCur) && mbuf_maxlen(pCur) >= pSG->cbTotal) { mbuf_setlen(pCur, pSG->cbTotal); IntNetSgRead(pSG, mbuf_data(pCur)); } else { /* Multi buffer copying. */ size_t cbLeft = pSG->cbTotal; size_t offSrc = 0; while (cbLeft > 0 && pCur) { size_t cb = mbuf_maxlen(pCur); if (cb > cbLeft) cb = cbLeft; mbuf_setlen(pCur, cb); IntNetSgReadEx(pSG, offSrc, cb, mbuf_data(pCur)); /* advance */ offSrc += cb; cbLeft -= cb; pCur = mbuf_next(pCur); } Assert(cbLeft == 0); } if (!err) { /* * Tag the packet and return successfully. */ PVBOXNETFLTTAG pTagData; err = mbuf_tag_allocate(pPkt, g_idTag, 0 /* type */, sizeof(VBOXNETFLTTAG) /* tag len */, How, (void **)&pTagData); if (!err) { Assert(pSG->aSegs[0].cb >= sizeof(pTagData->EthHdr)); memcpy(&pTagData->EthHdr, pSG->aSegs[0].pv, sizeof(pTagData->EthHdr)); return pPkt; } /* bailout: */ AssertMsg(err == ENOMEM || err == EWOULDBLOCK, ("err=%d\n", err)); } mbuf_freem(pPkt); } else AssertMsg(err == ENOMEM || err == EWOULDBLOCK, ("err=%d\n", err)); pSG->pvUserData = NULL; return NULL; }
UInt32 HoRNDIS::outputPacket(mbuf_t packet, void *param) { mbuf_t m; size_t pktlen = 0; IOReturn ior = kIOReturnSuccess; UInt32 poolIndx; int i; LOG(V_DEBUG, ""); /* Count the total size of this packet */ m = packet; while (m) { pktlen += mbuf_len(m); m = mbuf_next(m); } LOG(V_DEBUG, "%ld bytes", pktlen); if (pktlen > (mtu + 14)) { LOG(V_ERROR, "packet too large (%ld bytes, but I told you you could have %d!)", pktlen, mtu); fpNetStats->outputErrors++; return false; } /* Find an output buffer in the pool */ IOLockLock(outbuf_lock); for (i = 0; i < OUT_BUF_MAX_TRIES; i++) { AbsoluteTime ivl, deadl; for (poolIndx = 0; poolIndx < N_OUT_BUFS; poolIndx++) if (!outbufs[poolIndx].inuse) { outbufs[poolIndx].inuse = true; break; } if (poolIndx != N_OUT_BUFS) break; /* "while", not "if". See Symphony X's seminal work on this topic, /Paradise Lost/ (2007). */ nanoseconds_to_absolutetime(OUT_BUF_WAIT_TIME, &ivl); clock_absolutetime_interval_to_deadline(ivl, &deadl); LOG(V_NOTE, "waiting for buffer..."); IOLockSleepDeadline(outbuf_lock, outbufs, deadl, THREAD_INTERRUPTIBLE); } IOLockUnlock(outbuf_lock); if (poolIndx == N_OUT_BUFS) { LOG(V_ERROR, "timed out waiting for buffer"); return kIOReturnTimeout; } /* Start filling in the send buffer */ struct rndis_data_hdr *hdr; hdr = (struct rndis_data_hdr *)outbufs[poolIndx].buf; outbufs[poolIndx].inuse = true; outbufs[poolIndx].mdp->setLength(pktlen + sizeof *hdr); memset(hdr, 0, sizeof *hdr); hdr->msg_type = RNDIS_MSG_PACKET; hdr->msg_len = cpu_to_le32(pktlen + sizeof *hdr); hdr->data_offset = cpu_to_le32(sizeof(*hdr) - 8); hdr->data_len = cpu_to_le32(pktlen); mbuf_copydata(packet, 0, pktlen, hdr + 1); freePacket(packet); /* Now, fire it off! */ outbufs[poolIndx].comp.target = this; outbufs[poolIndx].comp.parameter = (void *)poolIndx; outbufs[poolIndx].comp.action = dataWriteComplete; ior = fOutPipe->Write(outbufs[poolIndx].mdp, &outbufs[poolIndx].comp); if (ior != kIOReturnSuccess) { LOG(V_ERROR, "write failed"); if (ior == kIOUSBPipeStalled) { fOutPipe->Reset(); ior = fOutPipe->Write(outbufs[poolIndx].mdp, &outbufs[poolIndx].comp); if (ior != kIOReturnSuccess) { LOG(V_ERROR, "write really failed"); fpNetStats->outputErrors++; return ior; } } } fpNetStats->outputPackets++; return kIOReturnOutputSuccess; }
UInt32 AttansicL2Ethernet::outputPacket(mbuf_t m, void *prm) { at_adapter *adapter=&adapter_; tx_pkt_header_t* txph; u32 offset, copy_len; int txs_unused; int txbuf_unused; u32 buf_len; if (mbuf_pkthdr_len(m) <= MAX_TX_BUF_LEN) buf_len = mbuf_pkthdr_len(m); else { DbgPrint("Tx Packet size is too big, droping\n"); freePacket(m); return kIOReturnOutputDropped; } txs_unused = TxsFreeUnit(adapter); txbuf_unused = TxdFreeBytes(adapter); if (txs_unused < 1 || buf_len > txbuf_unused) { // no enough resource DbgPrint("no enough resource!!\n"); freePacket(m); return kIOReturnOutputDropped; } offset = adapter->txd_write_ptr; DbgPrint("outputPacket() begin, txd_write_ptr %d txs_next_clear %d length %d \n" , adapter->txd_write_ptr,adapter->txs_next_clear,buf_len); txph = (tx_pkt_header_t*) (((u8*)adapter->txd_ring)+offset); offset += 4; if (offset >= adapter->txd_ring_size) offset -= adapter->txd_ring_size; u32 pkt_snd_len = 0; mbuf_t cur_buf = m; do { if (mbuf_data(cur_buf)){ copy_len = adapter->txd_ring_size - offset; if (copy_len >=mbuf_len(cur_buf)) { memcpy((u8*)adapter->txd_ring+offset, mbuf_data(cur_buf), mbuf_len(cur_buf)); } else { memcpy((u8*)adapter->txd_ring+offset, mbuf_data(cur_buf), copy_len); memcpy((u8*)adapter->txd_ring, ((u8*)mbuf_data(cur_buf))+copy_len, mbuf_len(cur_buf)-copy_len); } offset += mbuf_len(cur_buf); if (offset >= adapter->txd_ring_size) offset -= adapter->txd_ring_size; pkt_snd_len += mbuf_len(cur_buf); } } while(((cur_buf = mbuf_next(cur_buf)) != NULL) && ((pkt_snd_len + mbuf_len(cur_buf)) <= buf_len)); buf_len = pkt_snd_len; *(u32*)txph = 0; txph->pkt_size = buf_len; offset = ((offset+3)&~3); if (offset >= adapter->txd_ring_size) offset -= adapter->txd_ring_size; adapter->txd_write_ptr = offset; // clear txs before send adapter->txs_ring[adapter->txs_next_clear].update = 0; if (++adapter->txs_next_clear == adapter->txs_ring_size) adapter->txs_next_clear = 0; AT_WRITE_REGW( &adapter->hw, REG_MB_TXD_WR_IDX, (adapter->txd_write_ptr>>2)); OSSynchronizeIO(); freePacket(m); return kIOReturnOutputSuccess; }