void khttpd_log_put(struct khttpd_log *log, struct mbuf *m) { mtx_lock(&khttpd_log_lock); while (log->choking || mbufq_full(&log->queue)) mtx_sleep(log, &khttpd_log_lock, 0, "choke", 0); if (log->fd == -1) { m_freem(m); } else { if (mbufq_len(&log->queue) == 0) { if (TAILQ_EMPTY(&khttpd_busy_logs)) wakeup(&khttpd_busy_logs); TAILQ_INSERT_HEAD(&khttpd_busy_logs, log, link); } mbufq_enqueue(&log->queue, m); } mtx_unlock(&khttpd_log_lock); }
void tcp_pcap_add(struct tcphdr *th, struct mbuf *m, struct mbufq *queue) { struct mbuf *n = NULL, *mhead; KASSERT(th, ("%s: called with th == NULL", __func__)); KASSERT(m, ("%s: called with m == NULL", __func__)); KASSERT(queue, ("%s: called with queue == NULL", __func__)); /* We only care about data packets. */ while (m && m->m_type != MT_DATA) m = m->m_next; /* We only need to do something if we still have an mbuf. */ if (!m) return; /* If we are not saving mbufs, return now. */ if (queue->mq_maxlen == 0) return; /* * Check to see if we will need to recycle mbufs. * * If we need to get rid of mbufs to stay below * our packet count, try to reuse the mbuf. Once * we already have a new mbuf (n), then we can * simply free subsequent mbufs. * * Note that most of the logic in here is to deal * with the reuse. If we are fine with constant * mbuf allocs/deallocs, we could ditch this logic. * But, it only seems to make sense to reuse * mbufs we already have. */ while (mbufq_full(queue)) { mhead = mbufq_dequeue(queue); if (n) { tcp_pcap_m_freem(mhead); } else { /* * If this held an external cluster, try to * detach the cluster. But, if we held the * last reference, go through the normal * free-ing process. */ if (mhead->m_flags & M_EXT) { switch (mhead->m_ext.ext_type) { case EXT_SFBUF: /* Don't mess around with these. */ tcp_pcap_m_freem(mhead); continue; default: if (atomic_fetchadd_int( mhead->m_ext.ext_cnt, -1) == 1) { /* * We held the last reference * on this cluster. Restore * the reference count and put * it back in the pool. */ *(mhead->m_ext.ext_cnt) = 1; tcp_pcap_m_freem(mhead); continue; } /* * We were able to cleanly free the * reference. */ atomic_subtract_int( &tcp_pcap_clusters_referenced_cur, 1); tcp_pcap_alloc_reuse_ext++; break; } } else { tcp_pcap_alloc_reuse_mbuf++; } n = mhead; tcp_pcap_m_freem(n->m_next); m_init(n, NULL, 0, M_NOWAIT, MT_DATA, 0); } } /* Check to see if we need to get a new mbuf. */ if (!n) { if (!(n = m_get(M_NOWAIT, MT_DATA))) return; tcp_pcap_alloc_new_mbuf++; } /* * What are we dealing with? If a cluster, attach it. Otherwise, * try to copy the data from the beginning of the mbuf to the * end of data. (There may be data between the start of the data * area and the current data pointer. We want to get this, because * it may contain header information that is useful.) * In cases where that isn't possible, settle for what we can * get. */ if ((m->m_flags & M_EXT) && tcp_pcap_take_cluster_reference()) { n->m_data = m->m_data; n->m_len = m->m_len; mb_dupcl(n, m); } else if (((m->m_data + m->m_len) - M_START(m)) <= M_SIZE(n)) { /* * At this point, n is guaranteed to be a normal mbuf * with no cluster and no packet header. Because the * logic in this code block requires this, the assert * is here to catch any instances where someone * changes the logic to invalidate that assumption. */ KASSERT((n->m_flags & (M_EXT | M_PKTHDR)) == 0, ("%s: Unexpected flags (%#x) for mbuf", __func__, n->m_flags)); n->m_data = n->m_dat + M_LEADINGSPACE_NOWRITE(m); n->m_len = m->m_len; bcopy(M_START(m), n->m_dat, m->m_len + M_LEADINGSPACE_NOWRITE(m)); } else { /* * This is the case where we need to "settle for what * we can get". The most probable way to this code * path is that we've already taken references to the * maximum number of mbuf clusters we can, and the data * is too long to fit in an mbuf's internal storage. * Try for a "best fit". */ tcp_pcap_copy_bestfit(th, m, n); /* Don't try to get additional data. */ goto add_to_queue; } if (m->m_next) { n->m_next = m_copym(m->m_next, 0, M_COPYALL, M_NOWAIT); tcp_pcap_adj_cluster_reference(n->m_next, 1); } add_to_queue: /* Add the new mbuf to the list. */ if (mbufq_enqueue(queue, n)) { /* This shouldn't happen. If INVARIANTS is defined, panic. */ KASSERT(0, ("%s: mbufq was unexpectedly full!", __func__)); tcp_pcap_m_freem(n); } }