Ejemplo n.º 1
0
static void sdp_poll_tx_timeout(unsigned long data)
{
	struct sdp_sock *ssk = (struct sdp_sock *)data;
	struct sock *sk = sk_ssk(ssk);
	u32 inflight, wc_processed;

	sdp_prf1(sk_ssk(ssk), NULL, "TX timeout: inflight=%d, head=%d tail=%d",
		(u32) tx_ring_posted(ssk),
		ring_head(ssk->tx_ring), ring_tail(ssk->tx_ring));

	/* Only process if the socket is not in use */
	bh_lock_sock(sk);
	if (sock_owned_by_user(sk)) {
		sdp_prf(sk_ssk(ssk), NULL, "TX comp: socket is busy");

		if (sdp_tx_handler_select(ssk) && sk->sk_state != TCP_CLOSE &&
				likely(ssk->qp_active)) {
			sdp_prf1(sk, NULL, "schedule a timer");
			mod_timer(&ssk->tx_ring.timer, jiffies + SDP_TX_POLL_TIMEOUT);
		}

		SDPSTATS_COUNTER_INC(tx_poll_busy);
		goto out;
	}

	if (unlikely(!ssk->qp || sk->sk_state == TCP_CLOSE)) {
		SDPSTATS_COUNTER_INC(tx_poll_no_op);
		goto out;
	}

	wc_processed = sdp_process_tx_cq(ssk);
	if (!wc_processed)
		SDPSTATS_COUNTER_INC(tx_poll_miss);
	else {
		sdp_post_sends(ssk, GFP_ATOMIC);
		SDPSTATS_COUNTER_INC(tx_poll_hit);
	}

	inflight = (u32) tx_ring_posted(ssk);
	sdp_prf1(sk_ssk(ssk), NULL, "finished tx proccessing. inflight = %d",
			tx_ring_posted(ssk));

	/* If there are still packets in flight and the timer has not already
	 * been scheduled by the Tx routine then schedule it here to guarantee
	 * completion processing of these packets */
	if (inflight && likely(ssk->qp_active))
		mod_timer(&ssk->tx_ring.timer, jiffies + SDP_TX_POLL_TIMEOUT);

out:
	if (ssk->tx_ring.rdma_inflight && ssk->tx_ring.rdma_inflight->busy) {
		sdp_prf1(sk, NULL, "RDMA is inflight - arming irq");
		sdp_arm_tx_cq(sk);
	}

	bh_unlock_sock(sk);
}
Ejemplo n.º 2
0
static int
sdp_process_tx_cq(struct sdp_sock *ssk)
{
	struct ib_wc ibwc[SDP_NUM_WC];
	int n, i;
	int wc_processed = 0;

	SDP_WLOCK_ASSERT(ssk);

	if (!ssk->tx_ring.cq) {
		sdp_dbg(ssk->socket, "tx irq on destroyed tx_cq\n");
		return 0;
	}

	do {
		n = ib_poll_cq(ssk->tx_ring.cq, SDP_NUM_WC, ibwc);
		for (i = 0; i < n; ++i) {
			sdp_process_tx_wc(ssk, ibwc + i);
			wc_processed++;
		}
	} while (n == SDP_NUM_WC);

	if (wc_processed) {
		sdp_post_sends(ssk, M_DONTWAIT);
		sdp_prf1(sk, NULL, "Waking sendmsg. inflight=%d", 
				(u32) tx_ring_posted(ssk));
		sowwakeup(ssk->socket);
	}

	return wc_processed;
}
Ejemplo n.º 3
0
static
void sdp_tx_ring_purge(struct sdp_sock *ssk)
{
	while (tx_ring_posted(ssk)) {
		struct mbuf *mb;
		mb = sdp_send_completion(ssk, ring_tail(ssk->tx_ring));
		if (!mb)
			break;
		m_freem(mb);
	}
}
Ejemplo n.º 4
0
static int sdp_process_tx_cq(struct sdp_sock *ssk)
{
	struct ib_wc ibwc[SDP_NUM_WC];
	int n, i;
	int wc_processed = 0;

	if (!ssk->tx_ring.cq) {
		sdp_dbg(sk_ssk(ssk), "tx irq on destroyed tx_cq\n");
		return 0;
	}

	do {
		n = ib_poll_cq(ssk->tx_ring.cq, SDP_NUM_WC, ibwc);
		for (i = 0; i < n; ++i) {
			sdp_process_tx_wc(ssk, ibwc + i);
			wc_processed++;
		}
	} while (n == SDP_NUM_WC);

	if (wc_processed) {
		struct sock *sk = sk_ssk(ssk);
		sdp_prf1(sk, NULL, "Waking sendmsg. inflight=%d",
				(u32) tx_ring_posted(ssk));

		sk_mem_reclaim(sk);

		sk_stream_write_space(sk_ssk(ssk));
		if (sk->sk_write_pending &&
				test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
				tx_ring_posted(ssk)) {
			/* a write is pending and still no room in tx queue,
			 * arm tx cq
			 */
			sdp_prf(sk_ssk(ssk), NULL, "pending tx - rearming");
			sdp_arm_tx_cq(sk);
		}

	}

	return wc_processed;
}
Ejemplo n.º 5
0
static void
sdp_poll_tx(struct sdp_sock *ssk)
{
	struct socket *sk = ssk->socket;
	u32 inflight, wc_processed;

	sdp_prf1(ssk->socket, NULL, "TX timeout: inflight=%d, head=%d tail=%d", 
		(u32) tx_ring_posted(ssk),
		ring_head(ssk->tx_ring), ring_tail(ssk->tx_ring));

	if (unlikely(ssk->state == TCPS_CLOSED)) {
		sdp_warn(sk, "Socket is closed\n");
		goto out;
	}

	wc_processed = sdp_process_tx_cq(ssk);
	if (!wc_processed)
		SDPSTATS_COUNTER_INC(tx_poll_miss);
	else
		SDPSTATS_COUNTER_INC(tx_poll_hit);

	inflight = (u32) tx_ring_posted(ssk);
	sdp_prf1(ssk->socket, NULL, "finished tx proccessing. inflight = %d",
	    inflight);

	/* If there are still packets in flight and the timer has not already
	 * been scheduled by the Tx routine then schedule it here to guarantee
	 * completion processing of these packets */
	if (inflight)
		callout_reset(&ssk->tx_ring.timer, SDP_TX_POLL_TIMEOUT,
		    sdp_poll_tx_timeout, ssk);
out:
#ifdef SDP_ZCOPY
	if (ssk->tx_ring.rdma_inflight && ssk->tx_ring.rdma_inflight->busy) {
		sdp_prf1(sk, NULL, "RDMA is inflight - arming irq");
		sdp_arm_tx_cq(ssk);
	}
#endif
	return;
}
Ejemplo n.º 6
0
void
sdp_do_posts(struct sdp_sock *ssk)
{
	struct socket *sk = ssk->socket;
	int xmit_poll_force;
	struct mbuf *mb;

	SDP_WLOCK_ASSERT(ssk);
	if (!ssk->qp_active) {
		sdp_dbg(sk, "QP is deactivated\n");
		return;
	}

	while ((mb = ssk->rx_ctl_q)) {
		ssk->rx_ctl_q = mb->m_nextpkt;
		mb->m_nextpkt = NULL;
		sdp_process_rx_ctl_mb(ssk, mb);
	}

	if (ssk->state == TCPS_TIME_WAIT)
		return;

	if (!ssk->rx_ring.cq || !ssk->tx_ring.cq)
		return;

	sdp_post_recvs(ssk);

	if (tx_ring_posted(ssk))
		sdp_xmit_poll(ssk, 1);

	sdp_post_sends(ssk, M_NOWAIT);

	xmit_poll_force = tx_credits(ssk) < SDP_MIN_TX_CREDITS;

	if (credit_update_needed(ssk) || xmit_poll_force) {
		/* if has pending tx because run out of tx_credits - xmit it */
		sdp_prf(sk, NULL, "Processing to free pending sends");
		sdp_xmit_poll(ssk,  xmit_poll_force);
		sdp_prf(sk, NULL, "Sending credit update");
		sdp_post_sends(ssk, M_NOWAIT);
	}

}