Пример #1
0
void
send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt)
{
	struct wrqe *wr;
	struct cpl_abort_req *req;
	int tid = toep->tid;
	struct inpcb *inp = toep->inp;
	struct tcpcb *tp = intotcpcb(inp);	/* don't use if INP_DROPPED */

	INP_WLOCK_ASSERT(inp);

	CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s",
	    __func__, toep->tid,
	    inp->inp_flags & INP_DROPPED ? "inp dropped" :
	    tcpstates[tp->t_state],
	    toep->flags, inp->inp_flags,
	    toep->flags & TPF_ABORT_SHUTDOWN ?
	    " (abort already in progress)" : "");

	if (toep->flags & TPF_ABORT_SHUTDOWN)
		return;	/* abort already in progress */

	toep->flags |= TPF_ABORT_SHUTDOWN;

	KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
	    ("%s: flowc_wr not sent for tid %d.", __func__, tid));

	wr = alloc_wrqe(sizeof(*req), toep->ofld_txq);
	if (wr == NULL) {
		/* XXX */
		panic("%s: allocation failure.", __func__);
	}
	req = wrtod(wr);

	INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid);
	if (inp->inp_flags & INP_DROPPED)
		req->rsvd0 = htobe32(snd_nxt);
	else
		req->rsvd0 = htobe32(tp->snd_nxt);
	req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT);
	req->cmd = CPL_ABORT_SEND_RST;

	/*
	 * XXX: What's the correct way to tell that the inp hasn't been detached
	 * from its socket?  Should I even be flushing the snd buffer here?
	 */
	if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) {
		struct socket *so = inp->inp_socket;

		if (so != NULL)	/* because I'm not sure.  See comment above */
			sbflush(&so->so_snd);
	}

	t4_l2t_send(sc, wr, toep->l2te);
}
Пример #2
0
/*
 * Close a connection by sending a CPL_CLOSE_CON_REQ message.
 */
static int
close_conn(struct adapter *sc, struct toepcb *toep)
{
	struct wrqe *wr;
	struct cpl_close_con_req *req;
	unsigned int tid = toep->tid;

	CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid,
	    toep->flags & TPF_FIN_SENT ? ", IGNORED" : "");

	if (toep->flags & TPF_FIN_SENT)
		return (0);

	KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
	    ("%s: flowc_wr not sent for tid %u.", __func__, tid));

	wr = alloc_wrqe(sizeof(*req), toep->ofld_txq);
	if (wr == NULL) {
		/* XXX */
		panic("%s: allocation failure.", __func__);
	}
	req = wrtod(wr);

        req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) |
	    V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr)));
	req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) |
	    V_FW_WR_FLOWID(tid));
        req->wr.wr_lo = cpu_to_be64(0);
        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
	req->rsvd = 0;

	toep->flags |= TPF_FIN_SENT;
	toep->flags &= ~TPF_SEND_FIN;
	t4_l2t_send(sc, wr, toep->l2te);

	return (0);
}
Пример #3
0
/*
 * active open (soconnect).
 *
 * State of affairs on entry:
 * soisconnecting (so_state |= SS_ISCONNECTING)
 * tcbinfo not locked (This has changed - used to be WLOCKed)
 * inp WLOCKed
 * tp->t_state = TCPS_SYN_SENT
 * rtalloc1, RT_UNLOCK on rt.
 */
int
t4_connect(struct toedev *tod, struct socket *so, struct rtentry *rt,
    struct sockaddr *nam)
{
	struct adapter *sc = tod->tod_softc;
	struct tom_data *td = tod_td(tod);
	struct toepcb *toep = NULL;
	struct wrqe *wr = NULL;
	struct ifnet *rt_ifp = rt->rt_ifp;
	struct port_info *pi;
	int mtu_idx, rscale, qid_atid, rc, isipv6;
	struct inpcb *inp = sotoinpcb(so);
	struct tcpcb *tp = intotcpcb(inp);
	int reason;

	INP_WLOCK_ASSERT(inp);
	KASSERT(nam->sa_family == AF_INET || nam->sa_family == AF_INET6,
	    ("%s: dest addr %p has family %u", __func__, nam, nam->sa_family));

	if (rt_ifp->if_type == IFT_ETHER)
		pi = rt_ifp->if_softc;
	else if (rt_ifp->if_type == IFT_L2VLAN) {
		struct ifnet *ifp = VLAN_COOKIE(rt_ifp);

		pi = ifp->if_softc;
	} else if (rt_ifp->if_type == IFT_IEEE8023ADLAG)
		DONT_OFFLOAD_ACTIVE_OPEN(ENOSYS); /* XXX: implement lagg+TOE */
	else
		DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP);

	toep = alloc_toepcb(pi, -1, -1, M_NOWAIT);
	if (toep == NULL)
		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);

	toep->tid = alloc_atid(sc, toep);
	if (toep->tid < 0)
		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);

	toep->l2te = t4_l2t_get(pi, rt_ifp,
	    rt->rt_flags & RTF_GATEWAY ? rt->rt_gateway : nam);
	if (toep->l2te == NULL)
		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);

	isipv6 = nam->sa_family == AF_INET6;
	wr = alloc_wrqe(act_open_cpl_size(sc, isipv6), toep->ctrlq);
	if (wr == NULL)
		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);

	if (sc->tt.ddp && (so->so_options & SO_NO_DDP) == 0)
		set_tcpddp_ulp_mode(toep);
	else
		toep->ulp_mode = ULP_MODE_NONE;
	SOCKBUF_LOCK(&so->so_rcv);
	/* opt0 rcv_bufsiz initially, assumes its normal meaning later */
	toep->rx_credits = min(select_rcv_wnd(so) >> 10, M_RCV_BUFSIZ);
	SOCKBUF_UNLOCK(&so->so_rcv);

	/*
	 * The kernel sets request_r_scale based on sb_max whereas we need to
	 * take hardware's MAX_RCV_WND into account too.  This is normally a
	 * no-op as MAX_RCV_WND is much larger than the default sb_max.
	 */
	if (tp->t_flags & TF_REQ_SCALE)
		rscale = tp->request_r_scale = select_rcv_wscale();
	else
		rscale = 0;
	mtu_idx = find_best_mtu_idx(sc, &inp->inp_inc, 0);
	qid_atid = (toep->ofld_rxq->iq.abs_id << 14) | toep->tid;

	if (isipv6) {
		struct cpl_act_open_req6 *cpl = wrtod(wr);

		if ((inp->inp_vflag & INP_IPV6) == 0) {
			/* XXX think about this a bit more */
			log(LOG_ERR,
			    "%s: time to think about AF_INET6 + vflag 0x%x.\n",
			    __func__, inp->inp_vflag);
			DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP);
		}

		toep->ce = hold_lip(td, &inp->in6p_laddr);
		if (toep->ce == NULL)
			DONT_OFFLOAD_ACTIVE_OPEN(ENOENT);

		if (is_t4(sc)) {
			INIT_TP_WR(cpl, 0);
			cpl->params = select_ntuple(pi, toep->l2te);
		} else {
			struct cpl_t5_act_open_req6 *c5 = (void *)cpl;

			INIT_TP_WR(c5, 0);
			c5->iss = htobe32(tp->iss);
			c5->params = select_ntuple(pi, toep->l2te);
		}
		OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
		    qid_atid));
		cpl->local_port = inp->inp_lport;
		cpl->local_ip_hi = *(uint64_t *)&inp->in6p_laddr.s6_addr[0];
		cpl->local_ip_lo = *(uint64_t *)&inp->in6p_laddr.s6_addr[8];
		cpl->peer_port = inp->inp_fport;
		cpl->peer_ip_hi = *(uint64_t *)&inp->in6p_faddr.s6_addr[0];
		cpl->peer_ip_lo = *(uint64_t *)&inp->in6p_faddr.s6_addr[8];
		cpl->opt0 = calc_opt0(so, pi, toep->l2te, mtu_idx, rscale,
		    toep->rx_credits, toep->ulp_mode);
		cpl->opt2 = calc_opt2a(so, toep);
	} else {
		struct cpl_act_open_req *cpl = wrtod(wr);

		if (is_t4(sc)) {
			INIT_TP_WR(cpl, 0);
			cpl->params = select_ntuple(pi, toep->l2te);
		} else {
			struct cpl_t5_act_open_req *c5 = (void *)cpl;

			INIT_TP_WR(c5, 0);
			c5->iss = htobe32(tp->iss);
			c5->params = select_ntuple(pi, toep->l2te);
		}
		OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
		    qid_atid));
		inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port,
		    &cpl->peer_ip, &cpl->peer_port);
		cpl->opt0 = calc_opt0(so, pi, toep->l2te, mtu_idx, rscale,
		    toep->rx_credits, toep->ulp_mode);
		cpl->opt2 = calc_opt2a(so, toep);
	}

	CTR5(KTR_CXGBE, "%s: atid %u (%s), toep %p, inp %p", __func__,
	    toep->tid, tcpstates[tp->t_state], toep, inp);

	offload_socket(so, toep);
	rc = t4_l2t_send(sc, wr, toep->l2te);
	if (rc == 0) {
		toep->flags |= TPF_CPL_PENDING;
		return (0);
	}

	undo_offload_socket(so);
	reason = __LINE__;
failed:
	CTR3(KTR_CXGBE, "%s: not offloading (%d), rc %d", __func__, reason, rc);

	if (wr)
		free_wrqe(wr);

	if (toep) {
		if (toep->tid >= 0)
			free_atid(sc, toep->tid);
		if (toep->l2te)
			t4_l2t_release(toep->l2te);
		if (toep->ce)
			release_lip(td, toep->ce);
		free_toepcb(toep);
	}

	return (rc);
}
Пример #4
0
/*
 * active open (soconnect).
 *
 * State of affairs on entry:
 * soisconnecting (so_state |= SS_ISCONNECTING)
 * tcbinfo not locked (This has changed - used to be WLOCKed)
 * inp WLOCKed
 * tp->t_state = TCPS_SYN_SENT
 * rtalloc1, RT_UNLOCK on rt.
 */
int
t4_connect(struct toedev *tod, struct socket *so, struct rtentry *rt,
    struct sockaddr *nam)
{
	struct adapter *sc = tod->tod_softc;
	struct toepcb *toep = NULL;
	struct wrqe *wr = NULL;
	struct ifnet *rt_ifp = rt->rt_ifp;
	struct vi_info *vi;
	int mtu_idx, rscale, qid_atid, rc, isipv6, txqid, rxqid;
	struct inpcb *inp = sotoinpcb(so);
	struct tcpcb *tp = intotcpcb(inp);
	int reason;
	struct offload_settings settings;
	uint16_t vid = 0xfff, pcp = 0;

	INP_WLOCK_ASSERT(inp);
	KASSERT(nam->sa_family == AF_INET || nam->sa_family == AF_INET6,
	    ("%s: dest addr %p has family %u", __func__, nam, nam->sa_family));

	if (rt_ifp->if_type == IFT_ETHER)
		vi = rt_ifp->if_softc;
	else if (rt_ifp->if_type == IFT_L2VLAN) {
		struct ifnet *ifp = VLAN_TRUNKDEV(rt_ifp);

		vi = ifp->if_softc;
		VLAN_TAG(rt_ifp, &vid);
		VLAN_PCP(rt_ifp, &pcp);
	} else if (rt_ifp->if_type == IFT_IEEE8023ADLAG)
		DONT_OFFLOAD_ACTIVE_OPEN(ENOSYS); /* XXX: implement lagg+TOE */
	else
		DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP);

	rw_rlock(&sc->policy_lock);
	settings = *lookup_offload_policy(sc, OPEN_TYPE_ACTIVE, NULL,
	    EVL_MAKETAG(vid, pcp, 0), inp);
	rw_runlock(&sc->policy_lock);
	if (!settings.offload)
		DONT_OFFLOAD_ACTIVE_OPEN(EPERM);

	if (settings.txq >= 0 && settings.txq < vi->nofldtxq)
		txqid = settings.txq;
	else
		txqid = arc4random() % vi->nofldtxq;
	txqid += vi->first_ofld_txq;
	if (settings.rxq >= 0 && settings.rxq < vi->nofldrxq)
		rxqid = settings.rxq;
	else
		rxqid = arc4random() % vi->nofldrxq;
	rxqid += vi->first_ofld_rxq;

	toep = alloc_toepcb(vi, txqid, rxqid, M_NOWAIT | M_ZERO);
	if (toep == NULL)
		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);

	toep->tid = alloc_atid(sc, toep);
	if (toep->tid < 0)
		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);

	toep->l2te = t4_l2t_get(vi->pi, rt_ifp,
	    rt->rt_flags & RTF_GATEWAY ? rt->rt_gateway : nam);
	if (toep->l2te == NULL)
		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);

	isipv6 = nam->sa_family == AF_INET6;
	wr = alloc_wrqe(act_open_cpl_size(sc, isipv6), toep->ctrlq);
	if (wr == NULL)
		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);

	toep->vnet = so->so_vnet;
	set_ulp_mode(toep, select_ulp_mode(so, sc, &settings));
	SOCKBUF_LOCK(&so->so_rcv);
	/* opt0 rcv_bufsiz initially, assumes its normal meaning later */
	toep->rx_credits = min(select_rcv_wnd(so) >> 10, M_RCV_BUFSIZ);
	SOCKBUF_UNLOCK(&so->so_rcv);

	/*
	 * The kernel sets request_r_scale based on sb_max whereas we need to
	 * take hardware's MAX_RCV_WND into account too.  This is normally a
	 * no-op as MAX_RCV_WND is much larger than the default sb_max.
	 */
	if (tp->t_flags & TF_REQ_SCALE)
		rscale = tp->request_r_scale = select_rcv_wscale();
	else
		rscale = 0;
	mtu_idx = find_best_mtu_idx(sc, &inp->inp_inc, &settings);
	qid_atid = V_TID_QID(toep->ofld_rxq->iq.abs_id) | V_TID_TID(toep->tid) |
	    V_TID_COOKIE(CPL_COOKIE_TOM);

	if (isipv6) {
		struct cpl_act_open_req6 *cpl = wrtod(wr);
		struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
		struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;

		if ((inp->inp_vflag & INP_IPV6) == 0)
			DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP);

		toep->ce = t4_hold_lip(sc, &inp->in6p_laddr, NULL);
		if (toep->ce == NULL)
			DONT_OFFLOAD_ACTIVE_OPEN(ENOENT);

		switch (chip_id(sc)) {
		case CHELSIO_T4:
			INIT_TP_WR(cpl, 0);
			cpl->params = select_ntuple(vi, toep->l2te);
			break;
		case CHELSIO_T5:
			INIT_TP_WR(cpl5, 0);
			cpl5->iss = htobe32(tp->iss);
			cpl5->params = select_ntuple(vi, toep->l2te);
			break;
		case CHELSIO_T6:
		default:
			INIT_TP_WR(cpl6, 0);
			cpl6->iss = htobe32(tp->iss);
			cpl6->params = select_ntuple(vi, toep->l2te);
			break;
		}
		OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
		    qid_atid));
		cpl->local_port = inp->inp_lport;
		cpl->local_ip_hi = *(uint64_t *)&inp->in6p_laddr.s6_addr[0];
		cpl->local_ip_lo = *(uint64_t *)&inp->in6p_laddr.s6_addr[8];
		cpl->peer_port = inp->inp_fport;
		cpl->peer_ip_hi = *(uint64_t *)&inp->in6p_faddr.s6_addr[0];
		cpl->peer_ip_lo = *(uint64_t *)&inp->in6p_faddr.s6_addr[8];
		cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale,
		    toep->rx_credits, toep->ulp_mode, &settings);
		cpl->opt2 = calc_opt2a(so, toep, &settings);
	} else {
		struct cpl_act_open_req *cpl = wrtod(wr);
		struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
		struct cpl_t6_act_open_req *cpl6 = (void *)cpl;

		switch (chip_id(sc)) {
		case CHELSIO_T4:
			INIT_TP_WR(cpl, 0);
			cpl->params = select_ntuple(vi, toep->l2te);
			break;
		case CHELSIO_T5:
			INIT_TP_WR(cpl5, 0);
			cpl5->iss = htobe32(tp->iss);
			cpl5->params = select_ntuple(vi, toep->l2te);
			break;
		case CHELSIO_T6:
		default:
			INIT_TP_WR(cpl6, 0);
			cpl6->iss = htobe32(tp->iss);
			cpl6->params = select_ntuple(vi, toep->l2te);
			break;
		}
		OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
		    qid_atid));
		inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port,
		    &cpl->peer_ip, &cpl->peer_port);
		cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale,
		    toep->rx_credits, toep->ulp_mode, &settings);
		cpl->opt2 = calc_opt2a(so, toep, &settings);
	}

	CTR5(KTR_CXGBE, "%s: atid %u (%s), toep %p, inp %p", __func__,
	    toep->tid, tcpstates[tp->t_state], toep, inp);

	offload_socket(so, toep);
	rc = t4_l2t_send(sc, wr, toep->l2te);
	if (rc == 0) {
		toep->flags |= TPF_CPL_PENDING;
		return (0);
	}

	undo_offload_socket(so);
	reason = __LINE__;
failed:
	CTR3(KTR_CXGBE, "%s: not offloading (%d), rc %d", __func__, reason, rc);

	if (wr)
		free_wrqe(wr);

	if (toep) {
		if (toep->tid >= 0)
			free_atid(sc, toep->tid);
		if (toep->l2te)
			t4_l2t_release(toep->l2te);
		if (toep->ce)
			t4_release_lip(sc, toep->ce);
		free_toepcb(toep);
	}

	return (rc);
}
Пример #5
0
/*
 * Send data and/or a FIN to the peer.
 *
 * The socket's so_snd buffer consists of a stream of data starting with sb_mb
 * and linked together with m_next.  sb_sndptr, if set, is the last mbuf that
 * was transmitted.
 */
static void
t4_push_frames(struct adapter *sc, struct toepcb *toep)
{
	struct mbuf *sndptr, *m, *sb_sndptr;
	struct fw_ofld_tx_data_wr *txwr;
	struct wrqe *wr;
	unsigned int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf;
	struct inpcb *inp = toep->inp;
	struct tcpcb *tp = intotcpcb(inp);
	struct socket *so = inp->inp_socket;
	struct sockbuf *sb = &so->so_snd;
	int tx_credits;
	struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];

	INP_WLOCK_ASSERT(inp);
	KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
	    ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));

	if (__predict_false(toep->ulp_mode != ULP_MODE_NONE &&
	    toep->ulp_mode != ULP_MODE_TCPDDP))
		CXGBE_UNIMPLEMENTED("ulp_mode");

	/*
	 * This function doesn't resume by itself.  Someone else must clear the
	 * flag and call this function.
	 */
	if (__predict_false(toep->flags & TPF_TX_SUSPENDED))
		return;

	do {
		tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
		max_imm = max_imm_payload(tx_credits);
		max_nsegs = max_dsgl_nsegs(tx_credits);

		SOCKBUF_LOCK(sb);
		sb_sndptr = sb->sb_sndptr;
		sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb;
		plen = 0;
		nsegs = 0;
		max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */
		for (m = sndptr; m != NULL; m = m->m_next) {
			int n = sglist_count(mtod(m, void *), m->m_len);

			nsegs += n;
			plen += m->m_len;

			/* This mbuf sent us _over_ the nsegs limit, back out */
			if (plen > max_imm && nsegs > max_nsegs) {
				nsegs -= n;
				plen -= m->m_len;
				if (plen == 0) {
					/* Too few credits */
					toep->flags |= TPF_TX_SUSPENDED;
					SOCKBUF_UNLOCK(sb);
					return;
				}
				break;
			}

			if (max_nsegs_1mbuf < n)
				max_nsegs_1mbuf = n;
			sb_sndptr = m;	/* new sb->sb_sndptr if all goes well */

			/* This mbuf put us right at the max_nsegs limit */
			if (plen > max_imm && nsegs == max_nsegs) {
				m = m->m_next;
				break;
			}
		}

		if (sb->sb_flags & SB_AUTOSIZE &&
		    V_tcp_do_autosndbuf &&
		    sb->sb_hiwat < V_tcp_autosndbuf_max &&
		    sbspace(sb) < sb->sb_hiwat / 8 * 7) {
			int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc,
			    V_tcp_autosndbuf_max);

			if (!sbreserve_locked(sb, newsize, so, NULL))
				sb->sb_flags &= ~SB_AUTOSIZE;
			else {
				sowwakeup_locked(so);	/* room available */
				SOCKBUF_UNLOCK_ASSERT(sb);
				goto unlocked;
			}
		}
		SOCKBUF_UNLOCK(sb);
unlocked:

		/* nothing to send */
		if (plen == 0) {
			KASSERT(m == NULL,
			    ("%s: nothing to send, but m != NULL", __func__));
			break;
		}

		if (__predict_false(toep->flags & TPF_FIN_SENT))
			panic("%s: excess tx.", __func__);

		if (plen <= max_imm) {

			/* Immediate data tx */

			wr = alloc_wrqe(roundup(sizeof(*txwr) + plen, 16),
					toep->ofld_txq);
			if (wr == NULL) {
				/* XXX: how will we recover from this? */
				toep->flags |= TPF_TX_SUSPENDED;
				return;
			}
			txwr = wrtod(wr);
			credits = howmany(wr->wr_len, 16);
			write_tx_wr(txwr, toep, plen, plen, credits,
			    tp->t_flags & TF_MORETOCOME);
			m_copydata(sndptr, 0, plen, (void *)(txwr + 1));
		} else {
			int wr_len;

			/* DSGL tx */

			wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) +
			    ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
			wr = alloc_wrqe(roundup(wr_len, 16), toep->ofld_txq);
			if (wr == NULL) {
				/* XXX: how will we recover from this? */
				toep->flags |= TPF_TX_SUSPENDED;
				return;
			}
			txwr = wrtod(wr);
			credits = howmany(wr_len, 16);
			write_tx_wr(txwr, toep, 0, plen, credits,
			    tp->t_flags & TF_MORETOCOME);
			write_tx_sgl(txwr + 1, sndptr, m, nsegs,
			    max_nsegs_1mbuf);
			if (wr_len & 0xf) {
				uint64_t *pad = (uint64_t *)
				    ((uintptr_t)txwr + wr_len);
				*pad = 0;
			}
		}

		KASSERT(toep->tx_credits >= credits,
			("%s: not enough credits", __func__));

		toep->tx_credits -= credits;

		tp->snd_nxt += plen;
		tp->snd_max += plen;

		SOCKBUF_LOCK(sb);
		KASSERT(sb_sndptr, ("%s: sb_sndptr is NULL", __func__));
		sb->sb_sndptr = sb_sndptr;
		SOCKBUF_UNLOCK(sb);

		toep->flags |= TPF_TX_DATA_SENT;

		KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
		txsd->plen = plen;
		txsd->tx_credits = credits;
		txsd++;
		if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
			toep->txsd_pidx = 0;
			txsd = &toep->txsd[0];
		}
		toep->txsd_avail--;

		t4_l2t_send(sc, wr, toep->l2te);
	} while (m != NULL);

	/* Send a FIN if requested, but only if there's no more data to send */
	if (m == NULL && toep->flags & TPF_SEND_FIN)
		close_conn(sc, toep);
}
Пример #6
0
void
t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop)
{
	struct mbuf *sndptr, *m;
	struct fw_ofld_tx_data_wr *txwr;
	struct wrqe *wr;
	u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf;
	u_int adjusted_plen, ulp_submode;
	struct inpcb *inp = toep->inp;
	struct tcpcb *tp = intotcpcb(inp);
	int tx_credits, shove;
	struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
	struct mbufq *pduq = &toep->ulp_pduq;
	static const u_int ulp_extra_len[] = {0, 4, 4, 8};

	INP_WLOCK_ASSERT(inp);
	KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
	    ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
	KASSERT(toep->ulp_mode == ULP_MODE_ISCSI,
	    ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep));

	if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
		return;

	/*
	 * This function doesn't resume by itself.  Someone else must clear the
	 * flag and call this function.
	 */
	if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
		KASSERT(drop == 0,
		    ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
		return;
	}

	if (drop)
		rqdrop_locked(&toep->ulp_pdu_reclaimq, drop);

	while ((sndptr = mbufq_first(pduq)) != NULL) {
		M_ASSERTPKTHDR(sndptr);

		tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
		max_imm = max_imm_payload(tx_credits);
		max_nsegs = max_dsgl_nsegs(tx_credits);

		plen = 0;
		nsegs = 0;
		max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */
		for (m = sndptr; m != NULL; m = m->m_next) {
			int n = sglist_count(mtod(m, void *), m->m_len);

			nsegs += n;
			plen += m->m_len;

			/*
			 * This mbuf would send us _over_ the nsegs limit.
			 * Suspend tx because the PDU can't be sent out.
			 */
			if (plen > max_imm && nsegs > max_nsegs) {
				toep->flags |= TPF_TX_SUSPENDED;
				return;
			}

			if (max_nsegs_1mbuf < n)
				max_nsegs_1mbuf = n;
		}

		if (__predict_false(toep->flags & TPF_FIN_SENT))
			panic("%s: excess tx.", __func__);

		/*
		 * We have a PDU to send.  All of it goes out in one WR so 'm'
		 * is NULL.  A PDU's length is always a multiple of 4.
		 */
		MPASS(m == NULL);
		MPASS((plen & 3) == 0);
		MPASS(sndptr->m_pkthdr.len == plen);

		shove = !(tp->t_flags & TF_MORETOCOME);
		ulp_submode = mbuf_ulp_submode(sndptr);
		MPASS(ulp_submode < nitems(ulp_extra_len));

		/*
		 * plen doesn't include header and data digests, which are
		 * generated and inserted in the right places by the TOE, but
		 * they do occupy TCP sequence space and need to be accounted
		 * for.
		 */
		adjusted_plen = plen + ulp_extra_len[ulp_submode];
		if (plen <= max_imm) {

			/* Immediate data tx */

			wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16),
					toep->ofld_txq);
			if (wr == NULL) {
				/* XXX: how will we recover from this? */
				toep->flags |= TPF_TX_SUSPENDED;
				return;
			}
			txwr = wrtod(wr);
			credits = howmany(wr->wr_len, 16);
			write_tx_wr(txwr, toep, plen, adjusted_plen, credits,
			    shove, ulp_submode, sc->tt.tx_align);
			m_copydata(sndptr, 0, plen, (void *)(txwr + 1));
			nsegs = 0;
		} else {
			int wr_len;

			/* DSGL tx */
			wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) +
			    ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
			wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq);
			if (wr == NULL) {
				/* XXX: how will we recover from this? */
				toep->flags |= TPF_TX_SUSPENDED;
				return;
			}
			txwr = wrtod(wr);
			credits = howmany(wr_len, 16);
			write_tx_wr(txwr, toep, 0, adjusted_plen, credits,
			    shove, ulp_submode, sc->tt.tx_align);
			write_tx_sgl(txwr + 1, sndptr, m, nsegs,
			    max_nsegs_1mbuf);
			if (wr_len & 0xf) {
				uint64_t *pad = (uint64_t *)
				    ((uintptr_t)txwr + wr_len);
				*pad = 0;
			}
		}

		KASSERT(toep->tx_credits >= credits,
			("%s: not enough credits", __func__));

		m = mbufq_dequeue(pduq);
		MPASS(m == sndptr);
		mbufq_enqueue(&toep->ulp_pdu_reclaimq, m);

		toep->tx_credits -= credits;
		toep->tx_nocompl += credits;
		toep->plen_nocompl += plen;
		if (toep->tx_credits <= toep->tx_total * 3 / 8 &&
		    toep->tx_nocompl >= toep->tx_total / 4) {
			txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL);
			toep->tx_nocompl = 0;
			toep->plen_nocompl = 0;
		}

		tp->snd_nxt += adjusted_plen;
		tp->snd_max += adjusted_plen;

		toep->flags |= TPF_TX_DATA_SENT;
		if (toep->tx_credits < MIN_OFLD_TX_CREDITS)
			toep->flags |= TPF_TX_SUSPENDED;

		KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
		txsd->plen = plen;
		txsd->tx_credits = credits;
		txsd++;
		if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
			toep->txsd_pidx = 0;
			txsd = &toep->txsd[0];
		}
		toep->txsd_avail--;

		t4_l2t_send(sc, wr, toep->l2te);
	}

	/* Send a FIN if requested, but only if there are no more PDUs to send */
	if (mbufq_first(pduq) == NULL && toep->flags & TPF_SEND_FIN)
		close_conn(sc, toep);
}