Exemplo n.º 1
0
static void
nd_buf_release(void *arg, void **store, int count)
{
	struct mbufq *q;
	struct mbuf *m;
	int i;

	q = arg;

	for (i = 0; i < count; i++) {
		m = store[i];
		(void)mbufq_enqueue(q, m);
	}
}
Exemplo n.º 2
0
void
khttpd_log_put(struct khttpd_log *log, struct mbuf *m)
{

	mtx_lock(&khttpd_log_lock);

	while (log->choking || mbufq_full(&log->queue))
		mtx_sleep(log, &khttpd_log_lock, 0, "choke", 0);

	if (log->fd == -1) {
		m_freem(m);

	} else {
		if (mbufq_len(&log->queue) == 0) {
			if (TAILQ_EMPTY(&khttpd_busy_logs))
				wakeup(&khttpd_busy_logs);
			TAILQ_INSERT_HEAD(&khttpd_busy_logs, log, link);
		}

		mbufq_enqueue(&log->queue, m);
	}

	mtx_unlock(&khttpd_log_lock);
}
Exemplo n.º 3
0
void
tcp_pcap_add(struct tcphdr *th, struct mbuf *m, struct mbufq *queue)
{
	struct mbuf *n = NULL, *mhead;

	KASSERT(th, ("%s: called with th == NULL", __func__));
	KASSERT(m, ("%s: called with m == NULL", __func__));
	KASSERT(queue, ("%s: called with queue == NULL", __func__));

	/* We only care about data packets. */
	while (m && m->m_type != MT_DATA)
		m = m->m_next;

	/* We only need to do something if we still have an mbuf. */
	if (!m)
		return;

	/* If we are not saving mbufs, return now. */
	if (queue->mq_maxlen == 0)
		return;

	/*
	 * Check to see if we will need to recycle mbufs.
	 *
	 * If we need to get rid of mbufs to stay below
	 * our packet count, try to reuse the mbuf. Once
	 * we already have a new mbuf (n), then we can
	 * simply free subsequent mbufs.
	 *
	 * Note that most of the logic in here is to deal
	 * with the reuse. If we are fine with constant
	 * mbuf allocs/deallocs, we could ditch this logic.
	 * But, it only seems to make sense to reuse
	 * mbufs we already have.
	 */
	while (mbufq_full(queue)) {
		mhead = mbufq_dequeue(queue);

		if (n) {
			tcp_pcap_m_freem(mhead);
		}
		else {
			/*
			 * If this held an external cluster, try to
			 * detach the cluster. But, if we held the
			 * last reference, go through the normal
			 * free-ing process.
			 */
			if (mhead->m_flags & M_EXT) {
				switch (mhead->m_ext.ext_type) {
				case EXT_SFBUF:
					/* Don't mess around with these. */
					tcp_pcap_m_freem(mhead);
					continue;
				default:
					if (atomic_fetchadd_int(
						mhead->m_ext.ext_cnt, -1) == 1)
					{
						/*
						 * We held the last reference
						 * on this cluster. Restore
						 * the reference count and put
						 * it back in the pool.
				 		 */
						*(mhead->m_ext.ext_cnt) = 1;
						tcp_pcap_m_freem(mhead);
						continue;
					}
					/*
					 * We were able to cleanly free the
					 * reference.
				 	 */
					atomic_subtract_int(
					    &tcp_pcap_clusters_referenced_cur,
					    1);
					tcp_pcap_alloc_reuse_ext++;
					break;
				}
			}
			else {
				tcp_pcap_alloc_reuse_mbuf++;
			}

			n = mhead;
			tcp_pcap_m_freem(n->m_next);
			m_init(n, NULL, 0, M_NOWAIT, MT_DATA, 0);
		}
	}

	/* Check to see if we need to get a new mbuf. */
	if (!n) {
		if (!(n = m_get(M_NOWAIT, MT_DATA)))
			return;
		tcp_pcap_alloc_new_mbuf++;
	}

	/*
	 * What are we dealing with? If a cluster, attach it. Otherwise,
	 * try to copy the data from the beginning of the mbuf to the
	 * end of data. (There may be data between the start of the data
	 * area and the current data pointer. We want to get this, because
	 * it may contain header information that is useful.)
	 * In cases where that isn't possible, settle for what we can
	 * get.
	 */
	if ((m->m_flags & M_EXT) && tcp_pcap_take_cluster_reference()) {
		n->m_data = m->m_data;
		n->m_len = m->m_len;
		mb_dupcl(n, m);
	}
	else if (((m->m_data + m->m_len) - M_START(m)) <= M_SIZE(n)) {
		/*
		 * At this point, n is guaranteed to be a normal mbuf
		 * with no cluster and no packet header. Because the
		 * logic in this code block requires this, the assert
		 * is here to catch any instances where someone
		 * changes the logic to invalidate that assumption.
		 */
		KASSERT((n->m_flags & (M_EXT | M_PKTHDR)) == 0,
			("%s: Unexpected flags (%#x) for mbuf",
			__func__, n->m_flags));
		n->m_data = n->m_dat + M_LEADINGSPACE_NOWRITE(m);
		n->m_len = m->m_len;
		bcopy(M_START(m), n->m_dat,
			m->m_len + M_LEADINGSPACE_NOWRITE(m));
	}
	else {
		/*
		 * This is the case where we need to "settle for what
		 * we can get". The most probable way to this code
		 * path is that we've already taken references to the
		 * maximum number of mbuf clusters we can, and the data
		 * is too long to fit in an mbuf's internal storage.
		 * Try for a "best fit".
		 */
		tcp_pcap_copy_bestfit(th, m, n);

		/* Don't try to get additional data. */
		goto add_to_queue;
	}

	if (m->m_next) {
		n->m_next = m_copym(m->m_next, 0, M_COPYALL, M_NOWAIT);
		tcp_pcap_adj_cluster_reference(n->m_next, 1);
	}

add_to_queue:
	/* Add the new mbuf to the list. */
	if (mbufq_enqueue(queue, n)) {
		/* This shouldn't happen. If INVARIANTS is defined, panic. */
		KASSERT(0, ("%s: mbufq was unexpectedly full!", __func__));
		tcp_pcap_m_freem(n);
	}
}
Exemplo n.º 4
0
void
t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop)
{
	struct mbuf *sndptr, *m;
	struct fw_ofld_tx_data_wr *txwr;
	struct wrqe *wr;
	u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf;
	u_int adjusted_plen, ulp_submode;
	struct inpcb *inp = toep->inp;
	struct tcpcb *tp = intotcpcb(inp);
	int tx_credits, shove;
	struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
	struct mbufq *pduq = &toep->ulp_pduq;
	static const u_int ulp_extra_len[] = {0, 4, 4, 8};

	INP_WLOCK_ASSERT(inp);
	KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
	    ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
	KASSERT(toep->ulp_mode == ULP_MODE_ISCSI,
	    ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep));

	if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
		return;

	/*
	 * This function doesn't resume by itself.  Someone else must clear the
	 * flag and call this function.
	 */
	if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
		KASSERT(drop == 0,
		    ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
		return;
	}

	if (drop)
		rqdrop_locked(&toep->ulp_pdu_reclaimq, drop);

	while ((sndptr = mbufq_first(pduq)) != NULL) {
		M_ASSERTPKTHDR(sndptr);

		tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
		max_imm = max_imm_payload(tx_credits);
		max_nsegs = max_dsgl_nsegs(tx_credits);

		plen = 0;
		nsegs = 0;
		max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */
		for (m = sndptr; m != NULL; m = m->m_next) {
			int n = sglist_count(mtod(m, void *), m->m_len);

			nsegs += n;
			plen += m->m_len;

			/*
			 * This mbuf would send us _over_ the nsegs limit.
			 * Suspend tx because the PDU can't be sent out.
			 */
			if (plen > max_imm && nsegs > max_nsegs) {
				toep->flags |= TPF_TX_SUSPENDED;
				return;
			}

			if (max_nsegs_1mbuf < n)
				max_nsegs_1mbuf = n;
		}

		if (__predict_false(toep->flags & TPF_FIN_SENT))
			panic("%s: excess tx.", __func__);

		/*
		 * We have a PDU to send.  All of it goes out in one WR so 'm'
		 * is NULL.  A PDU's length is always a multiple of 4.
		 */
		MPASS(m == NULL);
		MPASS((plen & 3) == 0);
		MPASS(sndptr->m_pkthdr.len == plen);

		shove = !(tp->t_flags & TF_MORETOCOME);
		ulp_submode = mbuf_ulp_submode(sndptr);
		MPASS(ulp_submode < nitems(ulp_extra_len));

		/*
		 * plen doesn't include header and data digests, which are
		 * generated and inserted in the right places by the TOE, but
		 * they do occupy TCP sequence space and need to be accounted
		 * for.
		 */
		adjusted_plen = plen + ulp_extra_len[ulp_submode];
		if (plen <= max_imm) {

			/* Immediate data tx */

			wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16),
					toep->ofld_txq);
			if (wr == NULL) {
				/* XXX: how will we recover from this? */
				toep->flags |= TPF_TX_SUSPENDED;
				return;
			}
			txwr = wrtod(wr);
			credits = howmany(wr->wr_len, 16);
			write_tx_wr(txwr, toep, plen, adjusted_plen, credits,
			    shove, ulp_submode, sc->tt.tx_align);
			m_copydata(sndptr, 0, plen, (void *)(txwr + 1));
			nsegs = 0;
		} else {
			int wr_len;

			/* DSGL tx */
			wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) +
			    ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
			wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq);
			if (wr == NULL) {
				/* XXX: how will we recover from this? */
				toep->flags |= TPF_TX_SUSPENDED;
				return;
			}
			txwr = wrtod(wr);
			credits = howmany(wr_len, 16);
			write_tx_wr(txwr, toep, 0, adjusted_plen, credits,
			    shove, ulp_submode, sc->tt.tx_align);
			write_tx_sgl(txwr + 1, sndptr, m, nsegs,
			    max_nsegs_1mbuf);
			if (wr_len & 0xf) {
				uint64_t *pad = (uint64_t *)
				    ((uintptr_t)txwr + wr_len);
				*pad = 0;
			}
		}

		KASSERT(toep->tx_credits >= credits,
			("%s: not enough credits", __func__));

		m = mbufq_dequeue(pduq);
		MPASS(m == sndptr);
		mbufq_enqueue(&toep->ulp_pdu_reclaimq, m);

		toep->tx_credits -= credits;
		toep->tx_nocompl += credits;
		toep->plen_nocompl += plen;
		if (toep->tx_credits <= toep->tx_total * 3 / 8 &&
		    toep->tx_nocompl >= toep->tx_total / 4) {
			txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL);
			toep->tx_nocompl = 0;
			toep->plen_nocompl = 0;
		}

		tp->snd_nxt += adjusted_plen;
		tp->snd_max += adjusted_plen;

		toep->flags |= TPF_TX_DATA_SENT;
		if (toep->tx_credits < MIN_OFLD_TX_CREDITS)
			toep->flags |= TPF_TX_SUSPENDED;

		KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
		txsd->plen = plen;
		txsd->tx_credits = credits;
		txsd++;
		if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
			toep->txsd_pidx = 0;
			txsd = &toep->txsd[0];
		}
		toep->txsd_avail--;

		t4_l2t_send(sc, wr, toep->l2te);
	}

	/* Send a FIN if requested, but only if there are no more PDUs to send */
	if (mbufq_first(pduq) == NULL && toep->flags & TPF_SEND_FIN)
		close_conn(sc, toep);
}