Ejemplo n.º 1
0
static struct mbuf *
sdp_send_completion(struct sdp_sock *ssk, int mseq)
{
	struct ib_device *dev;
	struct sdp_buf *tx_req;
	struct mbuf *mb = NULL;
	struct sdp_tx_ring *tx_ring = &ssk->tx_ring;

	if (unlikely(mseq != ring_tail(*tx_ring))) {
		printk(KERN_WARNING "Bogus send completion id %d tail %d\n",
			mseq, ring_tail(*tx_ring));
		goto out;
	}

	dev = ssk->ib_device;
	tx_req = &tx_ring->buffer[mseq & (SDP_TX_SIZE - 1)];
	mb = tx_req->mb;
	sdp_cleanup_sdp_buf(ssk, tx_req, DMA_TO_DEVICE);

#ifdef SDP_ZCOPY
	/* TODO: AIO and real zcopy code; add their context support here */
	if (BZCOPY_STATE(mb))
		BZCOPY_STATE(mb)->busy--;
#endif

	atomic_inc(&tx_ring->tail);

out:
	return mb;
}
Ejemplo n.º 2
0
static struct sk_buff *sdp_send_completion(struct sdp_sock *ssk, int mseq)
{
	struct ib_device *dev;
	struct sdp_buf *tx_req;
	struct sk_buff *skb = NULL;
	struct sdp_tx_ring *tx_ring = &ssk->tx_ring;
	if (unlikely(mseq != ring_tail(*tx_ring))) {
		printk(KERN_WARNING "Bogus send completion id %d tail %d\n",
			mseq, ring_tail(*tx_ring));
		goto out;
	}

	dev = ssk->ib_device;
	tx_req = &tx_ring->buffer[mseq & (SDP_TX_SIZE - 1)];
	skb = tx_req->skb;
	if (!skb)
		goto skip; /* This slot was used by RDMA WR */

	sdp_cleanup_sdp_buf(ssk, tx_req, skb->len - skb->data_len, DMA_TO_DEVICE);

	tx_ring->una_seq += SDP_SKB_CB(skb)->end_seq;

	/* TODO: AIO and real zcopy code; add their context support here */
	if (BZCOPY_STATE(skb))
		BZCOPY_STATE(skb)->busy--;

skip:
	atomic_inc(&tx_ring->tail);

out:
	return skb;
}
Ejemplo n.º 3
0
static inline int sdp_nagle_off(struct sdp_sock *ssk, struct sk_buff *skb)
{
	struct sdp_bsdh *h = (struct sdp_bsdh *)skb_transport_header(skb);
	int send_now =
		BZCOPY_STATE(skb) ||
		unlikely(h->mid != SDP_MID_DATA) ||
		(ssk->nonagle & TCP_NAGLE_OFF) ||
		!ssk->nagle_last_unacked ||
		skb->next != (struct sk_buff *)&sk_ssk(ssk)->sk_write_queue ||
		skb->len + sizeof(struct sdp_bsdh) >= ssk->xmit_size_goal ||
		(SDP_SKB_CB(skb)->flags & TCPHDR_PSH) ||
		(SDP_SKB_CB(skb)->flags & TCPHDR_URG);

	if (send_now) {
		unsigned long mseq = ring_head(ssk->tx_ring);
		ssk->nagle_last_unacked = mseq;
	} else {
		if (!timer_pending(&ssk->nagle_timer) && ssk->qp_active) {
			mod_timer(&ssk->nagle_timer,
					jiffies + SDP_NAGLE_TIMEOUT);
			sdp_dbg_data(sk_ssk(ssk), "Starting nagle timer\n");
		}
	}

	return send_now;
}
Ejemplo n.º 4
0
static inline int
sdp_nagle_off(struct sdp_sock *ssk, struct mbuf *mb)
{

	struct sdp_bsdh *h;

	h = mtod(mb, struct sdp_bsdh *);
	int send_now =
#ifdef SDP_ZCOPY
		BZCOPY_STATE(mb) ||
#endif
		unlikely(h->mid != SDP_MID_DATA) ||
		(ssk->flags & SDP_NODELAY) ||
		!ssk->nagle_last_unacked ||
		mb->m_pkthdr.len >= ssk->xmit_size_goal / 4 ||
		(mb->m_flags & M_PUSH);

	if (send_now) {
		unsigned long mseq = ring_head(ssk->tx_ring);
		ssk->nagle_last_unacked = mseq;
	} else {
		if (!callout_pending(&ssk->nagle_timer)) {
			callout_reset(&ssk->nagle_timer, SDP_NAGLE_TIMEOUT,
			    sdp_nagle_timeout, ssk);
			sdp_dbg_data(ssk->socket, "Starting nagle timer\n");
		}
	}
	sdp_dbg_data(ssk->socket, "send_now = %d last_unacked = %ld\n",
		send_now, ssk->nagle_last_unacked);

	return send_now;
}