//////////////////////////////////////////////////////////////////////////////// // // inet_firewire_pre_output // // IN: ifnet_t ifp // IN: struct mbuf **m0 // IN: struct sockaddr dst_netaddr // IN: caddr_t route // OUT: char *type // OUT: char *edst // IN: u_long dl_tag // // Invoked by : // Invoked by dlil.c for dlil_output=>(*proto)->dl_pre_output=> // inet_firewire_pre_output=> // // Process a received firewire ARP/IP packet, the packet is in the mbuf // chain m // //////////////////////////////////////////////////////////////////////////////// int inet_firewire_pre_output( ifnet_t interface, __unused protocol_family_t protocol_family, mbuf_t *m0, const struct sockaddr *dst_netaddr, void* route, char *type, char *edst) { mbuf_t m = *m0; errno_t result = 0; if ((ifnet_flags(interface) & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) return ENETDOWN; // Tell firewire_frameout it's ok to loop packet unless negated below. mbuf_setflags(m, mbuf_flags(m) | MBUF_LOOP); switch (dst_netaddr->sa_family) { case AF_INET: { struct sockaddr_dl ll_dest; result = inet_arp_lookup(interface, (const struct sockaddr_in*)dst_netaddr, &ll_dest, sizeof(ll_dest), (route_t)route, *m0); if (result == 0) { bcopy(LLADDR(&ll_dest), edst, FIREWIRE_ADDR_LEN); *(u_int16_t*)type = htons(FWTYPE_IP); } } break; case AF_UNSPEC: { mbuf_setflags(m, mbuf_flags(m) & ~MBUF_LOOP); register struct firewire_header *fwh = (struct firewire_header *)dst_netaddr->sa_data; (void)memcpy(edst, fwh->fw_dhost, FIREWIRE_ADDR_LEN); *(u_short *)type = fwh->fw_type; } break; default: return EAFNOSUPPORT; } return result; }
UInt32 IOMbufMemoryCursor::genPhysicalSegments(mbuf_t packet, void *vector, UInt32 maxSegs, bool doCoalesce) { bool doneCoalesce = false; if (!packet || !(mbuf_flags(packet) & MBUF_PKTHDR)) return 0; if (!maxSegs) { maxSegs = maxNumSegments; if (!maxSegs) return 0; } if ( mbuf_next(packet) == 0 ) { uintptr_t src; struct IOPhysicalSegment physSeg; /* * the packet consists of only 1 mbuf * so if the data buffer doesn't span a page boundary * we can take the simple way out */ src = (uintptr_t)mbuf_data(packet); if ( trunc_page(src) == trunc_page(src + mbuf_len(packet) - 1) ) { physSeg.location = (IOPhysicalAddress) mbuf_data_to_physical((char *)src); if ( physSeg.location ) { physSeg.length = mbuf_len(packet); (*outSeg)(physSeg, vector, 0); return 1; } maxSegs = 1; if ( doCoalesce == false ) return 0; } } if ( doCoalesce == true && maxSegs == 1 ) { uintptr_t src; uintptr_t dst; mbuf_t m; mbuf_t mnext; mbuf_t out; UInt32 len = 0; struct IOPhysicalSegment physSeg; if ( mbuf_pkthdr_len(packet) > MCLBYTES ) return 0; m = packet; // Allocate a non-header mbuf + cluster. if (mbuf_getpacket( MBUF_DONTWAIT, &out )) return 0; mbuf_setflags( out, mbuf_flags( out ) & ~MBUF_PKTHDR ); dst = (uintptr_t)mbuf_data(out); do { src = (uintptr_t)mbuf_data(m); BCOPY( src, dst, mbuf_len(m) ); dst += mbuf_len(m); len += mbuf_len(m); } while ( (m = mbuf_next(m)) != 0 ); mbuf_setlen(out , len); dst = (uintptr_t)mbuf_data(out); physSeg.location = (IOPhysicalAddress) mbuf_data_to_physical((char *)dst); if (!physSeg.location) { mbuf_free(out); return 0; } physSeg.length = mbuf_len(out); (*outSeg)(physSeg, vector, 0); m = mbuf_next(packet); while (m != 0) { mnext = mbuf_next(m); mbuf_free(m); m = mnext; } // The initial header mbuf is preserved, its length set to zero, // and linked to the new packet chain. mbuf_setlen(packet , 0); mbuf_setnext(packet , out); mbuf_setnext(out , 0); return 1; } // // Iterate over the mbuf, translating segments were allowed. When we // are not allowed to translate segments then accumulate segment // statistics up to kMBufDataCacheSize of mbufs. Finally // if we overflow our cache just count how many segments this // packet represents. // UInt32 segsPerMBuf[kMBufDataCacheSize]; tryAgain: UInt32 curMBufIndex = 0; UInt32 curSegIndex = 0; UInt32 lastSegCount = 0; mbuf_t m = packet; // For each mbuf in incoming packet. do { vm_size_t mbufLen, thisLen = 0; uintptr_t src; // Step through each segment in the current mbuf for (mbufLen = mbuf_len(m), src = (uintptr_t)mbuf_data(m); mbufLen; src += thisLen, mbufLen -= thisLen) { // If maxSegmentSize is atleast PAGE_SIZE, then // thisLen = MIN(next_page(src), src + mbufLen) - src; thisLen = MIN(mbufLen, maxSegmentSize); thisLen = MIN(next_page(src), src + thisLen) - src; // If room left then find the current segment addr and output if (curSegIndex < maxSegs) { struct IOPhysicalSegment physSeg; physSeg.location = (IOPhysicalAddress) mbuf_data_to_physical((char *)src); if ( physSeg.location == 0 ) { return doCoalesce ? genPhysicalSegments(packet, vector, 1, true) : 0; } physSeg.length = thisLen; (*outSeg)(physSeg, vector, curSegIndex); } // Count segments if we are coalescing. curSegIndex++; } // Cache the segment count data if room is available. if (curMBufIndex < kMBufDataCacheSize) { segsPerMBuf[curMBufIndex] = curSegIndex - lastSegCount; lastSegCount = curSegIndex; } // Move on to next imcoming mbuf curMBufIndex++; m = mbuf_next(m); } while (m); // If we finished cleanly return number of segments found if (curSegIndex <= maxSegs) return curSegIndex; if (!doCoalesce) return 0; // if !coalescing we've got a problem. // If we are coalescing and it is possible then attempt coalesce, if (!doneCoalesce && (UInt) mbuf_pkthdr_len(packet) <= maxSegs * maxSegmentSize) { // Hmm, we have to do some coalescing. bool analysisRet; analysisRet = analyseSegments(packet, MIN(curMBufIndex, kMBufDataCacheSize), segsPerMBuf, curSegIndex, maxSegs); if (analysisRet) { doneCoalesce = true; coalesceCount++; goto tryAgain; } } assert(!doneCoalesce); // Problem in Coalesce code. packetTooBigErrors++; return 0; }