void tcp_lro_init(void) { int i; bzero(lro_flow_list, sizeof (struct lro_flow) * TCP_LRO_NUM_FLOWS); for (i = 0; i < TCP_LRO_FLOW_MAP; i++) { lro_flow_map[i] = TCP_LRO_FLOW_UNINIT; } /* * allocate lock group attribute, group and attribute for tcp_lro_lock */ tcp_lro_mtx_grp_attr = lck_grp_attr_alloc_init(); tcp_lro_mtx_grp = lck_grp_alloc_init("tcplro", tcp_lro_mtx_grp_attr); tcp_lro_mtx_attr = lck_attr_alloc_init(); lck_mtx_init(&tcp_lro_lock, tcp_lro_mtx_grp, tcp_lro_mtx_attr); tcp_lro_timer = thread_call_allocate(tcp_lro_timer_proc, NULL); if (tcp_lro_timer == NULL) { panic_plain("%s: unable to allocate lro timer", __func__); } return; }
Assert( const char *file, int line, const char *expression ) { int saved_return_on_panic; if (!mach_assert) { return; } saved_return_on_panic = return_on_panic; /* * If we don't have a debugger configured, returning from an * assert is a bad, bad idea; there is no guarantee that we * didn't simply assert before we were able to restart the * platform. */ if (current_debugger != NO_CUR_DB) return_on_panic = 1; panic_plain("%s:%d Assertion failed: %s", file, line, expression); return_on_panic = saved_return_on_panic; }
void Assert( const char *file, int line, const char *expression ) { int saved_return_on_panic; if (!mach_assert) { return; } saved_return_on_panic = return_on_panic; return_on_panic = 1; panic_plain("%s:%d Assertion failed: %s", file, line, expression); return_on_panic = saved_return_on_panic; }
static void tcp_lro_coalesce(int flow_id, struct mbuf *lro_mb, struct tcphdr *tcphdr, int payload_len, int drop_hdrlen, struct tcpopt *topt, u_int32_t* tsval, u_int32_t* tsecr, int thflags) { struct lro_flow *flow = NULL; struct mbuf *last; struct ip *ip = NULL; flow = &lro_flow_list[flow_id]; if (flow->lr_mhead) { if (lrodebug) printf("%s: lr_mhead %x %d \n", __func__, flow->lr_seq, payload_len); m_adj(lro_mb, drop_hdrlen); last = flow->lr_mtail; while (last->m_next != NULL) { last = last->m_next; } last->m_next = lro_mb; flow->lr_mtail = lro_mb; ip = mtod(flow->lr_mhead, struct ip *); ip->ip_len += lro_mb->m_pkthdr.len; flow->lr_mhead->m_pkthdr.len += lro_mb->m_pkthdr.len; if (flow->lr_len == 0) { panic_plain("%s: Inconsistent LRO flow state", __func__); } flow->lr_len += payload_len; flow->lr_seq += payload_len; /* * This bit is re-OR'd each time a packet is added to the * large coalesced packet. */ flow->lr_mhead->m_pkthdr.aux_flags |= MAUXF_SW_LRO_PKT; flow->lr_mhead->m_pkthdr.lro_npkts++; /* for tcpstat.tcps_rcvpack */ if (flow->lr_mhead->m_pkthdr.lro_pktlen < lro_mb->m_pkthdr.lro_pktlen) { /* * For TCP Inter Arrival Jitter calculation, return max * size encountered while coalescing a stream of pkts. */ flow->lr_mhead->m_pkthdr.lro_pktlen = lro_mb->m_pkthdr.lro_pktlen; } /* Update the timestamp value */ if (topt->to_flags & TOF_TS) { if ((flow->lr_tsval) && (TSTMP_GT(topt->to_tsval, ntohl(*(flow->lr_tsval))))) { *(flow->lr_tsval) = htonl(topt->to_tsval); } if ((flow->lr_tsecr) && (topt->to_tsecr != 0) && (TSTMP_GT(topt->to_tsecr, ntohl(*(flow->lr_tsecr))))) { if (lrodebug >= 2) { printf("%s: instantaneous RTT = %d \n", __func__, topt->to_tsecr - ntohl(*(flow->lr_tsecr))); } *(flow->lr_tsecr) = htonl(topt->to_tsecr); } } /* Coalesce the flags */ if (thflags) { flow->lr_tcphdr->th_flags |= thflags; } /* Update receive window */ flow->lr_tcphdr->th_win = tcphdr->th_win; } else { if (lro_mb) {