Beispiel #1
0
int
sdp_process_rx(struct sdp_sock *ssk)
{
	int wc_processed = 0;
	int credits_before;

	if (!rx_ring_trylock(&ssk->rx_ring)) {
		sdp_dbg(ssk->socket, "ring destroyed. not polling it\n");
		return 0;
	}

	credits_before = tx_credits(ssk);

	wc_processed = sdp_poll_rx_cq(ssk);
	sdp_prf(ssk->socket, NULL, "processed %d", wc_processed);

	if (wc_processed) {
		sdp_prf(ssk->socket, NULL, "credits:  %d -> %d",
				credits_before, tx_credits(ssk));
		queue_work(rx_comp_wq, &ssk->rx_comp_work);
	}
	sdp_arm_rx_cq(ssk);

	rx_ring_unlock(&ssk->rx_ring);

	return (wc_processed);
}
Beispiel #2
0
int
sdp_rx_ring_create(struct sdp_sock *ssk, struct ib_device *device)
{
	struct ib_cq *rx_cq;
	int rc = 0;


	sdp_dbg(ssk->socket, "rx ring created");
	INIT_WORK(&ssk->rx_comp_work, sdp_rx_comp_work);
	atomic_set(&ssk->rx_ring.head, 1);
	atomic_set(&ssk->rx_ring.tail, 1);

	ssk->rx_ring.buffer = kmalloc(
			sizeof *ssk->rx_ring.buffer * SDP_RX_SIZE, GFP_KERNEL);
	if (!ssk->rx_ring.buffer) {
		sdp_warn(ssk->socket,
			"Unable to allocate RX Ring size %zd.\n",
			 sizeof(*ssk->rx_ring.buffer) * SDP_RX_SIZE);

		return -ENOMEM;
	}

	rx_cq = ib_create_cq(device, sdp_rx_irq, sdp_rx_cq_event_handler,
			  ssk->socket, SDP_RX_SIZE, IB_CQ_VECTOR_LEAST_ATTACHED);

	if (IS_ERR(rx_cq)) {
		rc = PTR_ERR(rx_cq);
		sdp_warn(ssk->socket, "Unable to allocate RX CQ: %d.\n", rc);
		goto err_cq;
	}

	sdp_sk(ssk->socket)->rx_ring.cq = rx_cq;
	sdp_arm_rx_cq(ssk);

	return 0;

err_cq:
	kfree(ssk->rx_ring.buffer);
	ssk->rx_ring.buffer = NULL;
	return rc;
}
Beispiel #3
0
int sdp_post_sends(struct sdp_sock *ssk, gfp_t gfp)
{
	/* TODO: nonagle? */
	struct sk_buff *skb;
	int post_count = 0;
	struct sock *sk = sk_ssk(ssk);

	if (unlikely(!ssk->id)) {
		if (sk->sk_send_head) {
			sdp_dbg(sk, "Send on socket without cmid ECONNRESET\n");
			/* TODO: flush send queue? */
			sdp_reset(sk);
		}
		return -ECONNRESET;
	}
again:
	if (sdp_tx_ring_slots_left(ssk) < SDP_TX_SIZE / 2)
		sdp_xmit_poll(ssk, 1);

	/* Run out of credits, check if got a credit update */
	if (unlikely(tx_credits(ssk) <= SDP_MIN_TX_CREDITS)) {
		sdp_poll_rx_cq(ssk);

		if (unlikely(sdp_should_rearm(sk) || !posts_handler(ssk)))
			sdp_arm_rx_cq(sk);
	}

	if (unlikely((ssk->sa_post_rdma_rd_compl || ssk->sa_post_sendsm) &&
			tx_credits(ssk) < SDP_MIN_TX_CREDITS)) {
		sdp_dbg_data(sk, "Run out of credits, can't abort SrcAvail. "
			"RdmaRdCompl: %d SendSm: %d\n",
			ssk->sa_post_rdma_rd_compl, ssk->sa_post_sendsm);
	}

	if (ssk->sa_post_rdma_rd_compl && tx_credits(ssk) >= SDP_MIN_TX_CREDITS) {
		int unreported = ssk->sa_post_rdma_rd_compl;

		skb = sdp_alloc_skb_rdmardcompl(sk, unreported, gfp);
		if (!skb)
			goto no_mem;
		sdp_post_send(ssk, skb);
		post_count++;
		ssk->sa_post_rdma_rd_compl = 0;
	}

	if (ssk->sa_post_sendsm && tx_credits(ssk) >= SDP_MIN_TX_CREDITS) {
		skb = sdp_alloc_skb_sendsm(sk, gfp);
		if (unlikely(!skb))
			goto no_mem;
		sdp_post_send(ssk, skb);
		ssk->sa_post_sendsm = 0;
		post_count++;
	}

	if (ssk->recv_request &&
	    ring_tail(ssk->rx_ring) >= SDP_MIN_TX_CREDITS &&
	    tx_credits(ssk) >= SDP_MIN_TX_CREDITS &&
	    sdp_tx_ring_slots_left(ssk)) {
		skb = sdp_alloc_skb_chrcvbuf_ack(sk,
				ssk->recv_frags * PAGE_SIZE, gfp);
		if (!skb)
			goto no_mem;
		ssk->recv_request = 0;
		sdp_post_send(ssk, skb);
		post_count++;
	}

	if (tx_credits(ssk) <= SDP_MIN_TX_CREDITS &&
	       sdp_tx_ring_slots_left(ssk) &&
	       sk->sk_send_head &&
		sdp_nagle_off(ssk, sk->sk_send_head)) {
		SDPSTATS_COUNTER_INC(send_miss_no_credits);
	}

	while (tx_credits(ssk) > SDP_MIN_TX_CREDITS &&
	       sdp_tx_ring_slots_left(ssk) &&
	       (skb = sk->sk_send_head) &&
		sdp_nagle_off(ssk, skb)) {
		update_send_head(sk, skb);
		__skb_dequeue(&sk->sk_write_queue);

		sdp_post_send(ssk, skb);

		post_count++;
	}

	if (credit_update_needed(ssk) &&
	    likely((1 << sk->sk_state) &
		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {

		skb = sdp_alloc_skb_data(sk, 0, gfp);
		if (!skb)
			goto no_mem;

		sk->sk_wmem_queued += skb->truesize;
		sk_mem_charge(sk, skb->truesize);

		sdp_post_send(ssk, skb);
		SDPSTATS_COUNTER_INC(post_send_credits);
		post_count++;
	}

	/* send DisConn if needed
	 * Do not send DisConn if there is only 1 credit. Compliance with CA4-82
	 * If one credit is available, an implementation shall only send SDP
	 * messages that provide additional credits and also do not contain ULP
	 * payload. */
	if (unlikely(ssk->sdp_disconnect) &&
			!sk->sk_send_head &&
			tx_credits(ssk) >= SDP_MIN_TX_CREDITS) {
		skb = sdp_alloc_skb_disconnect(sk, gfp);
		if (!skb)
			goto no_mem;
		ssk->sdp_disconnect = 0;
		sdp_post_send(ssk, skb);
		post_count++;
	}

	if (!sdp_tx_ring_slots_left(ssk) || post_count) {
		if (sdp_xmit_poll(ssk, 1))
			goto again;
	}

no_mem:
	return post_count;
}