コード例 #1
0
static inline int
sdp_nagle_off(struct sdp_sock *ssk, struct mbuf *mb)
{

	struct sdp_bsdh *h;

	h = mtod(mb, struct sdp_bsdh *);
	int send_now =
#ifdef SDP_ZCOPY
		BZCOPY_STATE(mb) ||
#endif
		unlikely(h->mid != SDP_MID_DATA) ||
		(ssk->flags & SDP_NODELAY) ||
		!ssk->nagle_last_unacked ||
		mb->m_pkthdr.len >= ssk->xmit_size_goal / 4 ||
		(mb->m_flags & M_PUSH);

	if (send_now) {
		unsigned long mseq = ring_head(ssk->tx_ring);
		ssk->nagle_last_unacked = mseq;
	} else {
		if (!callout_pending(&ssk->nagle_timer)) {
			callout_reset(&ssk->nagle_timer, SDP_NAGLE_TIMEOUT,
			    sdp_nagle_timeout, ssk);
			sdp_dbg_data(ssk->socket, "Starting nagle timer\n");
		}
	}
	sdp_dbg_data(ssk->socket, "send_now = %d last_unacked = %ld\n",
		send_now, ssk->nagle_last_unacked);

	return send_now;
}
コード例 #2
0
ファイル: sdp_tx.c プロジェクト: AhmadTux/freebsd
static inline void
sdp_process_tx_wc(struct sdp_sock *ssk, struct ib_wc *wc)
{

	if (likely(wc->wr_id & SDP_OP_SEND)) {
		sdp_handle_send_comp(ssk, wc);
		return;
	}

#ifdef SDP_ZCOPY
	if (wc->wr_id & SDP_OP_RDMA) {
		/* TODO: handle failed RDMA read cqe */

		sdp_dbg_data(ssk->socket,
	 	    "TX comp: RDMA read. status: %d\n", wc->status);
		sdp_prf1(sk, NULL, "TX comp: RDMA read");

		if (!ssk->tx_ring.rdma_inflight) {
			sdp_warn(ssk->socket, "ERROR: unexpected RDMA read\n");
			return;
		}

		if (!ssk->tx_ring.rdma_inflight->busy) {
			sdp_warn(ssk->socket,
			    "ERROR: too many RDMA read completions\n");
			return;
		}

		/* Only last RDMA read WR is signalled. Order is guaranteed -
		 * therefore if Last RDMA read WR is completed - all other
		 * have, too */
		ssk->tx_ring.rdma_inflight->busy = 0;
		sowwakeup(ssk->socket);
		sdp_dbg_data(ssk->socket, "woke up sleepers\n");
		return;
	}
#endif

	/* Keepalive probe sent cleanup */
	sdp_cnt(sdp_keepalive_probes_sent);

	if (likely(!wc->status))
		return;

	sdp_dbg(ssk->socket, " %s consumes KEEPALIVE status %d\n",
			__func__, wc->status);

	if (wc->status == IB_WC_WR_FLUSH_ERR)
		return;

	sdp_notify(ssk, ECONNRESET);
}
コード例 #3
0
ファイル: sdp_rx.c プロジェクト: Lxg1582/freebsd
static inline struct mbuf *
sdp_sock_queue_rcv_mb(struct socket *sk, struct mbuf *mb)
{
	struct sdp_sock *ssk = sdp_sk(sk);
	struct sdp_bsdh *h;

	h = mtod(mb, struct sdp_bsdh *);

#ifdef SDP_ZCOPY
	SDP_SKB_CB(mb)->seq = rcv_nxt(ssk);
	if (h->mid == SDP_MID_SRCAVAIL) {
		struct sdp_srcah *srcah = (struct sdp_srcah *)(h+1);
		struct rx_srcavail_state *rx_sa;
		
		ssk->srcavail_cancel_mseq = 0;

		ssk->rx_sa = rx_sa = RX_SRCAVAIL_STATE(mb) = kzalloc(
				sizeof(struct rx_srcavail_state), M_NOWAIT);

		rx_sa->mseq = ntohl(h->mseq);
		rx_sa->used = 0;
		rx_sa->len = mb_len = ntohl(srcah->len);
		rx_sa->rkey = ntohl(srcah->rkey);
		rx_sa->vaddr = be64_to_cpu(srcah->vaddr);
		rx_sa->flags = 0;

		if (ssk->tx_sa) {
			sdp_dbg_data(ssk->socket, "got RX SrcAvail while waiting "
					"for TX SrcAvail. waking up TX SrcAvail"
					"to be aborted\n");
			wake_up(sk->sk_sleep);
		}

		atomic_add(mb->len, &ssk->rcv_nxt);
		sdp_dbg_data(sk, "queueing SrcAvail. mb_len = %d vaddr = %lld\n",
			mb_len, rx_sa->vaddr);
	} else
#endif
	{
		atomic_add(mb->m_pkthdr.len, &ssk->rcv_nxt);
	}

	m_adj(mb, SDP_HEAD_SIZE);
	SOCKBUF_LOCK(&sk->so_rcv);
	if (unlikely(h->flags & SDP_OOB_PRES))
		sdp_urg(ssk, mb);
	sbappend_locked(&sk->so_rcv, mb);
	sorwakeup_locked(sk);
	return mb;
}
コード例 #4
0
ファイル: sdp_tx.c プロジェクト: AhmadTux/freebsd
static int
sdp_handle_send_comp(struct sdp_sock *ssk, struct ib_wc *wc)
{
	struct mbuf *mb = NULL;
	struct sdp_bsdh *h;

	if (unlikely(wc->status)) {
		if (wc->status != IB_WC_WR_FLUSH_ERR) {
			sdp_prf(ssk->socket, mb, "Send completion with error. "
				"Status %d", wc->status);
			sdp_dbg_data(ssk->socket, "Send completion with error. "
				"Status %d\n", wc->status);
			sdp_notify(ssk, ECONNRESET);
		}
	}

	mb = sdp_send_completion(ssk, wc->wr_id);
	if (unlikely(!mb))
		return -1;

	h = mtod(mb, struct sdp_bsdh *);
	sdp_prf1(ssk->socket, mb, "tx completion. mseq:%d", ntohl(h->mseq));
	sdp_dbg(ssk->socket, "tx completion. %p %d mseq:%d",
	    mb, mb->m_pkthdr.len, ntohl(h->mseq));
	m_freem(mb);

	return 0;
}
コード例 #5
0
ファイル: sdp_bcopy.c プロジェクト: u9621071/kernel-uek-UEK3
static inline int sdp_nagle_off(struct sdp_sock *ssk, struct sk_buff *skb)
{
	struct sdp_bsdh *h = (struct sdp_bsdh *)skb_transport_header(skb);
	int send_now =
		BZCOPY_STATE(skb) ||
		unlikely(h->mid != SDP_MID_DATA) ||
		(ssk->nonagle & TCP_NAGLE_OFF) ||
		!ssk->nagle_last_unacked ||
		skb->next != (struct sk_buff *)&sk_ssk(ssk)->sk_write_queue ||
		skb->len + sizeof(struct sdp_bsdh) >= ssk->xmit_size_goal ||
		(SDP_SKB_CB(skb)->flags & TCPHDR_PSH) ||
		(SDP_SKB_CB(skb)->flags & TCPHDR_URG);

	if (send_now) {
		unsigned long mseq = ring_head(ssk->tx_ring);
		ssk->nagle_last_unacked = mseq;
	} else {
		if (!timer_pending(&ssk->nagle_timer) && ssk->qp_active) {
			mod_timer(&ssk->nagle_timer,
					jiffies + SDP_NAGLE_TIMEOUT);
			sdp_dbg_data(sk_ssk(ssk), "Starting nagle timer\n");
		}
	}

	return send_now;
}
コード例 #6
0
static void
sdp_nagle_timeout(void *data)
{
	struct sdp_sock *ssk = (struct sdp_sock *)data;
	struct socket *sk = ssk->socket;

	sdp_dbg_data(sk, "last_unacked = %ld\n", ssk->nagle_last_unacked);

	if (!callout_active(&ssk->nagle_timer))
		return;
	callout_deactivate(&ssk->nagle_timer);

	if (!ssk->nagle_last_unacked)
		goto out;
	if (ssk->state == TCPS_CLOSED)
		return;
	ssk->nagle_last_unacked = 0;
	sdp_post_sends(ssk, M_NOWAIT);

	sowwakeup(ssk->socket);
out:
	if (sk->so_snd.sb_sndptr)
		callout_reset(&ssk->nagle_timer, SDP_NAGLE_TIMEOUT,
		    sdp_nagle_timeout, ssk);
}
コード例 #7
0
ファイル: sdp_tx.c プロジェクト: AhmadTux/freebsd
static void
sdp_tx_irq(struct ib_cq *cq, void *cq_context)
{
	struct sdp_sock *ssk;

	ssk = cq_context;
	sdp_prf1(ssk->socket, NULL, "tx irq");
	sdp_dbg_data(ssk->socket, "Got tx comp interrupt\n");
	SDPSTATS_COUNTER_INC(tx_int_count);
	SDP_WLOCK(ssk);
	sdp_poll_tx(ssk);
	SDP_WUNLOCK(ssk);
}
コード例 #8
0
ファイル: sdp_rx.c プロジェクト: Lxg1582/freebsd
/* called only from irq */
static struct mbuf *
sdp_process_rx_wc(struct sdp_sock *ssk, struct ib_wc *wc)
{
	struct mbuf *mb;
	struct sdp_bsdh *h;
	struct socket *sk = ssk->socket;
	int mseq;

	mb = sdp_recv_completion(ssk, wc->wr_id);
	if (unlikely(!mb))
		return NULL;

	if (unlikely(wc->status)) {
		if (ssk->qp_active && sk) {
			sdp_dbg(sk, "Recv completion with error. "
					"Status %d, vendor: %d\n",
				wc->status, wc->vendor_err);
			sdp_abort(sk);
			ssk->qp_active = 0;
		}
		m_freem(mb);
		return NULL;
	}

	sdp_dbg_data(sk, "Recv completion. ID %d Length %d\n",
			(int)wc->wr_id, wc->byte_len);
	if (unlikely(wc->byte_len < sizeof(struct sdp_bsdh))) {
		sdp_warn(sk, "SDP BUG! byte_len %d < %zd\n",
				wc->byte_len, sizeof(struct sdp_bsdh));
		m_freem(mb);
		return NULL;
	}
	/* Use m_adj to trim the tail of data we didn't use. */
	m_adj(mb, -(mb->m_pkthdr.len - wc->byte_len));
	h = mtod(mb, struct sdp_bsdh *);

	SDP_DUMP_PACKET(ssk->socket, "RX", mb, h);

	ssk->rx_packets++;
	ssk->rx_bytes += mb->m_pkthdr.len;

	mseq = ntohl(h->mseq);
	atomic_set(&ssk->mseq_ack, mseq);
	if (mseq != (int)wc->wr_id)
		sdp_warn(sk, "SDP BUG! mseq %d != wrid %d\n",
				mseq, (int)wc->wr_id);

	return mb;
}
コード例 #9
0
ファイル: sdp_bcopy.c プロジェクト: u9621071/kernel-uek-UEK3
void sdp_nagle_timeout(unsigned long data)
{
	struct sdp_sock *ssk = (struct sdp_sock *)data;
	struct sock *sk = sk_ssk(ssk);

	SDPSTATS_COUNTER_INC(nagle_timer);
	sdp_dbg_data(sk, "last_unacked = %u\n", ssk->nagle_last_unacked);

	if (!ssk->nagle_last_unacked)
		goto out2;

	/* Only process if the socket is not in use */
	bh_lock_sock(sk);
	if (sock_owned_by_user(sk)) {
		sdp_dbg_data(sk, "socket is busy - will try later\n");
		goto out;
	}

	if (sk->sk_state == TCP_CLOSE) {
		bh_unlock_sock(sk);
		return;
	}

	ssk->nagle_last_unacked = 0;
	sdp_post_sends(ssk, GFP_ATOMIC);

	if (sdp_sk_sleep(sk) && waitqueue_active(sdp_sk_sleep(sk)))
		sk_stream_write_space(sk);
out:
	bh_unlock_sock(sk);
out2:
	if (sk->sk_send_head && ssk->qp_active) {
		/* If has pending sends - rearm */
		mod_timer(&ssk->nagle_timer, jiffies + SDP_NAGLE_TIMEOUT);
	}
}
コード例 #10
0
ファイル: sdp_tx.c プロジェクト: u9621071/kernel-uek-UEK3
static void sdp_tx_irq(struct ib_cq *cq, void *cq_context)
{
	struct sock *sk = cq_context;
	struct sdp_sock *ssk = sdp_sk(sk);

	sdp_prf1(sk, NULL, "tx irq");
	sdp_dbg_data(sk, "Got tx comp interrupt\n");

	SDPSTATS_COUNTER_INC(tx_int_count);

	ssk->tx_compl_pending = 1;

	if (sdp_tx_handler_select(ssk) && likely(ssk->qp_active &&
				sk->sk_state != TCP_CLOSE)) {
		sdp_prf1(sk, NULL, "poll and post from tasklet");
		mod_timer(&ssk->tx_ring.timer, jiffies + SDP_TX_POLL_TIMEOUT);
		tasklet_schedule(&ssk->tx_ring.tasklet);
	}
}
コード例 #11
0
ファイル: sdp_tx.c プロジェクト: AhmadTux/freebsd
void
sdp_post_send(struct sdp_sock *ssk, struct mbuf *mb)
{
	struct sdp_buf *tx_req;
	struct sdp_bsdh *h;
	unsigned long mseq;
	struct ib_device *dev;
	struct ib_send_wr *bad_wr;
	struct ib_sge ibsge[SDP_MAX_SEND_SGES];
	struct ib_sge *sge;
	struct ib_send_wr tx_wr = { NULL };
	int i, rc;
	u64 addr;

	SDPSTATS_COUNTER_MID_INC(post_send, h->mid);
	SDPSTATS_HIST(send_size, mb->len);

	if (!ssk->qp_active) {
		m_freem(mb);
		return;
	}

	mseq = ring_head(ssk->tx_ring);
	h = mtod(mb, struct sdp_bsdh *);
	ssk->tx_packets++;
	ssk->tx_bytes += mb->m_pkthdr.len;

#ifdef SDP_ZCOPY
	if (unlikely(h->mid == SDP_MID_SRCAVAIL)) {
		struct tx_srcavail_state *tx_sa = TX_SRCAVAIL_STATE(mb);
		if (ssk->tx_sa != tx_sa) {
			sdp_dbg_data(ssk->socket, "SrcAvail cancelled "
					"before being sent!\n");
			WARN_ON(1);
			m_freem(mb);
			return;
		}
		TX_SRCAVAIL_STATE(mb)->mseq = mseq;
	}
#endif

	if (unlikely(mb->m_flags & M_URG))
		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
	else
		h->flags = 0;

	mb->m_flags |= M_RDONLY; /* Don't allow compression once sent. */
	h->bufs = htons(rx_ring_posted(ssk));
	h->len = htonl(mb->m_pkthdr.len);
	h->mseq = htonl(mseq);
	h->mseq_ack = htonl(mseq_ack(ssk));

	sdp_prf1(ssk->socket, mb, "TX: %s bufs: %d mseq:%ld ack:%d",
			mid2str(h->mid), rx_ring_posted(ssk), mseq,
			ntohl(h->mseq_ack));

	SDP_DUMP_PACKET(ssk->socket, "TX", mb, h);

	tx_req = &ssk->tx_ring.buffer[mseq & (SDP_TX_SIZE - 1)];
	tx_req->mb = mb;
	dev = ssk->ib_device;
	sge = &ibsge[0];
	for (i = 0;  mb != NULL; i++, mb = mb->m_next, sge++) {
		addr = ib_dma_map_single(dev, mb->m_data, mb->m_len,
		    DMA_TO_DEVICE);
		/* TODO: proper error handling */
		BUG_ON(ib_dma_mapping_error(dev, addr));
		BUG_ON(i >= SDP_MAX_SEND_SGES);
		tx_req->mapping[i] = addr;
		sge->addr = addr;
		sge->length = mb->m_len;
		sge->lkey = ssk->sdp_dev->mr->lkey;
	}
	tx_wr.next = NULL;
	tx_wr.wr_id = mseq | SDP_OP_SEND;
	tx_wr.sg_list = ibsge;
	tx_wr.num_sge = i;
	tx_wr.opcode = IB_WR_SEND;
	tx_wr.send_flags = IB_SEND_SIGNALED;
	if (unlikely(tx_req->mb->m_flags & M_URG))
		tx_wr.send_flags |= IB_SEND_SOLICITED;

	rc = ib_post_send(ssk->qp, &tx_wr, &bad_wr);
	if (unlikely(rc)) {
		sdp_dbg(ssk->socket,
				"ib_post_send failed with status %d.\n", rc);

		sdp_cleanup_sdp_buf(ssk, tx_req, DMA_TO_DEVICE);

		sdp_notify(ssk, ECONNRESET);
		m_freem(tx_req->mb);
		return;
	}

	atomic_inc(&ssk->tx_ring.head);
	atomic_dec(&ssk->tx_ring.credits);
	atomic_set(&ssk->remote_credits, rx_ring_posted(ssk));

	return;
}
コード例 #12
0
ファイル: sdp_tx.c プロジェクト: u9621071/kernel-uek-UEK3
void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb)
{
	struct sdp_buf *tx_req;
	struct sdp_bsdh *h = (struct sdp_bsdh *)skb_transport_header(skb);
	unsigned long mseq = ring_head(ssk->tx_ring);
	int i, rc, frags;
	u64 addr;
	struct ib_device *dev;
	struct ib_send_wr *bad_wr;

	struct ib_sge ibsge[SDP_MAX_SEND_SGES];
	struct ib_sge *sge = ibsge;
	struct ib_send_wr tx_wr = { NULL };
	u32 send_flags = IB_SEND_SIGNALED;

	SDPSTATS_COUNTER_MID_INC(post_send, h->mid);
	SDPSTATS_HIST(send_size, skb->len);

	if (!ssk->qp_active)
		goto err;

	ssk->tx_packets++;

	if (h->mid != SDP_MID_SRCAVAIL &&
			h->mid != SDP_MID_DATA &&
			h->mid != SDP_MID_SRCAVAIL_CANCEL) {
		struct sock *sk = sk_ssk(ssk);

		sk->sk_wmem_queued += skb->truesize;
		sk_mem_charge(sk, skb->truesize);
	}

	if (unlikely(h->mid == SDP_MID_SRCAVAIL)) {
		struct tx_srcavail_state *tx_sa = TX_SRCAVAIL_STATE(skb);
		if (ssk->tx_sa != tx_sa) {
			sdp_dbg_data(sk_ssk(ssk), "SrcAvail cancelled "
					"before being sent!\n");
			SDP_WARN_ON(1);
			sk_wmem_free_skb(sk_ssk(ssk), skb);
			return;
		}
		TX_SRCAVAIL_STATE(skb)->mseq = mseq;
	}

	if (unlikely(SDP_SKB_CB(skb)->flags & TCPHDR_URG))
		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
	else
		h->flags = 0;

	h->bufs = htons(rx_ring_posted(ssk));
	h->len = htonl(skb->len);
	h->mseq = htonl(mseq);
	h->mseq_ack = htonl(mseq_ack(ssk));

	sdp_prf(sk_ssk(ssk), skb, "TX: %s bufs: %d mseq:%ld ack:%d c: %d",
			mid2str(h->mid), rx_ring_posted(ssk), mseq,
			ntohl(h->mseq_ack), tx_credits(ssk));

	SDP_DUMP_PACKET(sk_ssk(ssk), "TX", skb, h);

	tx_req = &ssk->tx_ring.buffer[mseq & (SDP_TX_SIZE - 1)];
	tx_req->skb = skb;
	dev = ssk->ib_device;

	if (skb->len <= ssk->inline_thresh && !skb_shinfo(skb)->nr_frags) {
		SDPSTATS_COUNTER_INC(inline_sends);
		sge->addr = (u64) skb->data;
		sge->length = skb->len;
		sge->lkey = 0;
		frags = 0;
		tx_req->mapping[0] = 0; /* Nothing to be cleaned up by sdp_cleanup_sdp_buf() */
		send_flags |= IB_SEND_INLINE;
	} else {
		addr = ib_dma_map_single(dev, skb->data, skb->len - skb->data_len,
				DMA_TO_DEVICE);
		tx_req->mapping[0] = addr;

		/* TODO: proper error handling */
		BUG_ON(ib_dma_mapping_error(dev, addr));

		sge->addr = addr;
		sge->length = skb->len - skb->data_len;
		sge->lkey = ssk->sdp_dev->mr->lkey;
		frags = skb_shinfo(skb)->nr_frags;
		for (i = 0; i < frags; ++i) {
			++sge;
			addr = ib_dma_map_page(dev,
					skb_shinfo(skb)->frags[i].page.p,
					skb_shinfo(skb)->frags[i].page_offset,
					skb_shinfo(skb)->frags[i].size,
					DMA_TO_DEVICE);
			BUG_ON(ib_dma_mapping_error(dev, addr));
			tx_req->mapping[i + 1] = addr;
			sge->addr = addr;
			sge->length = skb_shinfo(skb)->frags[i].size;
			sge->lkey = ssk->sdp_dev->mr->lkey;
		}
	}

	tx_wr.next = NULL;
	tx_wr.wr_id = ring_head(ssk->tx_ring) | SDP_OP_SEND;
	tx_wr.sg_list = ibsge;
	tx_wr.num_sge = frags + 1;
	tx_wr.opcode = IB_WR_SEND;
	tx_wr.send_flags = send_flags;
	if (unlikely(SDP_SKB_CB(skb)->flags & TCPHDR_URG))
		tx_wr.send_flags |= IB_SEND_SOLICITED;

	rc = ib_post_send(ssk->qp, &tx_wr, &bad_wr);
	if (unlikely(rc)) {
		sdp_dbg(sk_ssk(ssk),
				"ib_post_send failed with status %d.\n", rc);

		sdp_cleanup_sdp_buf(ssk, tx_req, skb->len - skb->data_len, DMA_TO_DEVICE);

		sdp_set_error(sk_ssk(ssk), -ECONNRESET);

		goto err;
	}

	atomic_inc(&ssk->tx_ring.head);
	atomic_dec(&ssk->tx_ring.credits);
	atomic_set(&ssk->remote_credits, rx_ring_posted(ssk));

	return;

err:
	sk_wmem_free_skb(sk_ssk(ssk), skb);
}
コード例 #13
0
ファイル: sdp_bcopy.c プロジェクト: u9621071/kernel-uek-UEK3
int sdp_post_sends(struct sdp_sock *ssk, gfp_t gfp)
{
	/* TODO: nonagle? */
	struct sk_buff *skb;
	int post_count = 0;
	struct sock *sk = sk_ssk(ssk);

	if (unlikely(!ssk->id)) {
		if (sk->sk_send_head) {
			sdp_dbg(sk, "Send on socket without cmid ECONNRESET\n");
			/* TODO: flush send queue? */
			sdp_reset(sk);
		}
		return -ECONNRESET;
	}
again:
	if (sdp_tx_ring_slots_left(ssk) < SDP_TX_SIZE / 2)
		sdp_xmit_poll(ssk, 1);

	/* Run out of credits, check if got a credit update */
	if (unlikely(tx_credits(ssk) <= SDP_MIN_TX_CREDITS)) {
		sdp_poll_rx_cq(ssk);

		if (unlikely(sdp_should_rearm(sk) || !posts_handler(ssk)))
			sdp_arm_rx_cq(sk);
	}

	if (unlikely((ssk->sa_post_rdma_rd_compl || ssk->sa_post_sendsm) &&
			tx_credits(ssk) < SDP_MIN_TX_CREDITS)) {
		sdp_dbg_data(sk, "Run out of credits, can't abort SrcAvail. "
			"RdmaRdCompl: %d SendSm: %d\n",
			ssk->sa_post_rdma_rd_compl, ssk->sa_post_sendsm);
	}

	if (ssk->sa_post_rdma_rd_compl && tx_credits(ssk) >= SDP_MIN_TX_CREDITS) {
		int unreported = ssk->sa_post_rdma_rd_compl;

		skb = sdp_alloc_skb_rdmardcompl(sk, unreported, gfp);
		if (!skb)
			goto no_mem;
		sdp_post_send(ssk, skb);
		post_count++;
		ssk->sa_post_rdma_rd_compl = 0;
	}

	if (ssk->sa_post_sendsm && tx_credits(ssk) >= SDP_MIN_TX_CREDITS) {
		skb = sdp_alloc_skb_sendsm(sk, gfp);
		if (unlikely(!skb))
			goto no_mem;
		sdp_post_send(ssk, skb);
		ssk->sa_post_sendsm = 0;
		post_count++;
	}

	if (ssk->recv_request &&
	    ring_tail(ssk->rx_ring) >= SDP_MIN_TX_CREDITS &&
	    tx_credits(ssk) >= SDP_MIN_TX_CREDITS &&
	    sdp_tx_ring_slots_left(ssk)) {
		skb = sdp_alloc_skb_chrcvbuf_ack(sk,
				ssk->recv_frags * PAGE_SIZE, gfp);
		if (!skb)
			goto no_mem;
		ssk->recv_request = 0;
		sdp_post_send(ssk, skb);
		post_count++;
	}

	if (tx_credits(ssk) <= SDP_MIN_TX_CREDITS &&
	       sdp_tx_ring_slots_left(ssk) &&
	       sk->sk_send_head &&
		sdp_nagle_off(ssk, sk->sk_send_head)) {
		SDPSTATS_COUNTER_INC(send_miss_no_credits);
	}

	while (tx_credits(ssk) > SDP_MIN_TX_CREDITS &&
	       sdp_tx_ring_slots_left(ssk) &&
	       (skb = sk->sk_send_head) &&
		sdp_nagle_off(ssk, skb)) {
		update_send_head(sk, skb);
		__skb_dequeue(&sk->sk_write_queue);

		sdp_post_send(ssk, skb);

		post_count++;
	}

	if (credit_update_needed(ssk) &&
	    likely((1 << sk->sk_state) &
		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {

		skb = sdp_alloc_skb_data(sk, 0, gfp);
		if (!skb)
			goto no_mem;

		sk->sk_wmem_queued += skb->truesize;
		sk_mem_charge(sk, skb->truesize);

		sdp_post_send(ssk, skb);
		SDPSTATS_COUNTER_INC(post_send_credits);
		post_count++;
	}

	/* send DisConn if needed
	 * Do not send DisConn if there is only 1 credit. Compliance with CA4-82
	 * If one credit is available, an implementation shall only send SDP
	 * messages that provide additional credits and also do not contain ULP
	 * payload. */
	if (unlikely(ssk->sdp_disconnect) &&
			!sk->sk_send_head &&
			tx_credits(ssk) >= SDP_MIN_TX_CREDITS) {
		skb = sdp_alloc_skb_disconnect(sk, gfp);
		if (!skb)
			goto no_mem;
		ssk->sdp_disconnect = 0;
		sdp_post_send(ssk, skb);
		post_count++;
	}

	if (!sdp_tx_ring_slots_left(ssk) || post_count) {
		if (sdp_xmit_poll(ssk, 1))
			goto again;
	}

no_mem:
	return post_count;
}
コード例 #14
0
ファイル: sdp_rx.c プロジェクト: Lxg1582/freebsd
static int
sdp_process_rx_mb(struct sdp_sock *ssk, struct mbuf *mb)
{
	struct socket *sk;
	struct sdp_bsdh *h;
	unsigned long mseq_ack;
	int credits_before;

	h = mtod(mb, struct sdp_bsdh *);
	sk = ssk->socket;
	/*
	 * If another thread is in so_pcbfree this may be partially torn
	 * down but no further synchronization is required as the destroying
	 * thread will wait for receive to shutdown before discarding the
	 * socket.
	 */
	if (sk == NULL) {
		m_freem(mb);
		return 0;
	}

	SDPSTATS_HIST_LINEAR(credits_before_update, tx_credits(ssk));

	mseq_ack = ntohl(h->mseq_ack);
	credits_before = tx_credits(ssk);
	atomic_set(&ssk->tx_ring.credits, mseq_ack - ring_head(ssk->tx_ring) +
			1 + ntohs(h->bufs));
	if (mseq_ack >= ssk->nagle_last_unacked)
		ssk->nagle_last_unacked = 0;

	sdp_prf1(ssk->socket, mb, "RX %s +%d c:%d->%d mseq:%d ack:%d\n",
		mid2str(h->mid), ntohs(h->bufs), credits_before,
		tx_credits(ssk), ntohl(h->mseq), ntohl(h->mseq_ack));

	if (unlikely(h->mid == SDP_MID_DATA &&
	    mb->m_pkthdr.len == SDP_HEAD_SIZE)) {
		/* Credit update is valid even after RCV_SHUTDOWN */
		m_freem(mb);
		return 0;
	}

	if ((h->mid != SDP_MID_DATA && h->mid != SDP_MID_SRCAVAIL) ||
	    TCPS_HAVERCVDFIN(ssk->state)) {
		sdp_prf(sk, NULL, "Control mb - queing to control queue");
#ifdef SDP_ZCOPY
		if (h->mid == SDP_MID_SRCAVAIL_CANCEL) {
			sdp_dbg_data(sk, "Got SrcAvailCancel. "
					"seq: 0x%d seq_ack: 0x%d\n",
					ntohl(h->mseq), ntohl(h->mseq_ack));
			ssk->srcavail_cancel_mseq = ntohl(h->mseq);
		}


		if (h->mid == SDP_MID_RDMARDCOMPL) {
			struct sdp_rrch *rrch = (struct sdp_rrch *)(h+1);
			sdp_dbg_data(sk, "RdmaRdCompl message arrived\n");
			sdp_handle_rdma_read_compl(ssk, ntohl(h->mseq_ack),
					ntohl(rrch->len));
		}
#endif
		mb->m_nextpkt = NULL;
		if (ssk->rx_ctl_tail)
			ssk->rx_ctl_tail->m_nextpkt = mb;
		else
			ssk->rx_ctl_q = mb;
		ssk->rx_ctl_tail = mb;

		return 0;
	}

	sdp_prf1(sk, NULL, "queueing %s mb\n", mid2str(h->mid));
	mb = sdp_sock_queue_rcv_mb(sk, mb);


	return 0;
}
コード例 #15
0
ファイル: sdp_rx.c プロジェクト: Lxg1582/freebsd
/* socket lock should be taken before calling this */
static int
sdp_process_rx_ctl_mb(struct sdp_sock *ssk, struct mbuf *mb)
{
	struct sdp_bsdh *h;
	struct socket *sk;

	SDP_WLOCK_ASSERT(ssk);
	sk = ssk->socket;
 	h = mtod(mb, struct sdp_bsdh *);
	switch (h->mid) {
	case SDP_MID_DATA:
	case SDP_MID_SRCAVAIL:
		sdp_dbg(sk, "DATA after socket rcv was shutdown\n");

		/* got data in RCV_SHUTDOWN */
		if (ssk->state == TCPS_FIN_WAIT_1) {
			sdp_dbg(sk, "RX data when state = FIN_WAIT1\n");
			sdp_notify(ssk, ECONNRESET);
		}
		m_freem(mb);

		break;
#ifdef SDP_ZCOPY
	case SDP_MID_RDMARDCOMPL:
		m_freem(mb);
		break;
	case SDP_MID_SENDSM:
		sdp_handle_sendsm(ssk, ntohl(h->mseq_ack));
		m_freem(mb);
		break;
	case SDP_MID_SRCAVAIL_CANCEL:
		sdp_dbg_data(sk, "Handling SrcAvailCancel\n");
		sdp_prf(sk, NULL, "Handling SrcAvailCancel");
		if (ssk->rx_sa) {
			ssk->srcavail_cancel_mseq = ntohl(h->mseq);
			ssk->rx_sa->flags |= RX_SA_ABORTED;
			ssk->rx_sa = NULL; /* TODO: change it into SDP_MID_DATA and get 
			                      the dirty logic from recvmsg */
		} else {
			sdp_dbg(sk, "Got SrcAvailCancel - "
					"but no SrcAvail in process\n");
		}
		m_freem(mb);
		break;
	case SDP_MID_SINKAVAIL:
		sdp_dbg_data(sk, "Got SinkAvail - not supported: ignored\n");
		sdp_prf(sk, NULL, "Got SinkAvail - not supported: ignored");
		/* FALLTHROUGH */
#endif
	case SDP_MID_ABORT:
		sdp_dbg_data(sk, "Handling ABORT\n");
		sdp_prf(sk, NULL, "Handling ABORT");
		sdp_notify(ssk, ECONNRESET);
		m_freem(mb);
		break;
	case SDP_MID_DISCONN:
		sdp_dbg_data(sk, "Handling DISCONN\n");
		sdp_prf(sk, NULL, "Handling DISCONN");
		sdp_handle_disconn(ssk);
		break;
	case SDP_MID_CHRCVBUF:
		sdp_dbg_data(sk, "Handling RX CHRCVBUF\n");
		sdp_handle_resize_request(ssk, (struct sdp_chrecvbuf *)(h+1));
		m_freem(mb);
		break;
	case SDP_MID_CHRCVBUF_ACK:
		sdp_dbg_data(sk, "Handling RX CHRCVBUF_ACK\n");
		sdp_handle_resize_ack(ssk, (struct sdp_chrecvbuf *)(h+1));
		m_freem(mb);
		break;
	default:
		/* TODO: Handle other messages */
		sdp_warn(sk, "SDP: FIXME MID %d\n", h->mid);
		m_freem(mb);
	}

	return 0;
}