Esempio n. 1
0
static void
rpcrdma_recvcq_process_wc(struct ib_wc *wc)
{
	struct rpcrdma_rep *rep =
			(struct rpcrdma_rep *)(unsigned long)wc->wr_id;

	dprintk("RPC:       %s: rep %p status %X opcode %X length %u\n",
		__func__, rep, wc->status, wc->opcode, wc->byte_len);

	if (wc->status != IB_WC_SUCCESS) {
		rep->rr_len = ~0U;
		goto out_schedule;
	}
	if (wc->opcode != IB_WC_RECV)
		return;

	rep->rr_len = wc->byte_len;
	ib_dma_sync_single_for_cpu(rdmab_to_ia(rep->rr_buffer)->ri_id->device,
			rep->rr_iov.addr, rep->rr_len, DMA_FROM_DEVICE);

	if (rep->rr_len >= 16) {
		struct rpcrdma_msg *p = (struct rpcrdma_msg *)rep->rr_base;
		unsigned int credits = ntohl(p->rm_credit);

		if (credits == 0)
			credits = 1;	/* don't deadlock */
		else if (credits > rep->rr_buffer->rb_max_requests)
			credits = rep->rr_buffer->rb_max_requests;
		atomic_set(&rep->rr_buffer->rb_credits, credits);
	}

out_schedule:
	rpcrdma_schedule_tasklet(rep);
}
Esempio n. 2
0
static void
rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list)
{
	struct rpcrdma_rep *rep =
			(struct rpcrdma_rep *)(unsigned long)wc->wr_id;

	/* WARNING: Only wr_id and status are reliable at this point */
	if (wc->status != IB_WC_SUCCESS)
		goto out_fail;

	/* status == SUCCESS means all fields in wc are trustworthy */
	if (wc->opcode != IB_WC_RECV)
		return;

	dprintk("RPC:       %s: rep %p opcode 'recv', length %u: success\n",
		__func__, rep, wc->byte_len);

	rep->rr_len = wc->byte_len;
	ib_dma_sync_single_for_cpu(rdmab_to_ia(rep->rr_buffer)->ri_id->device,
				   rdmab_addr(rep->rr_rdmabuf),
				   rep->rr_len, DMA_FROM_DEVICE);
	prefetch(rdmab_to_msg(rep->rr_rdmabuf));

out_schedule:
	list_add_tail(&rep->rr_list, sched_list);
	return;
out_fail:
	if (wc->status != IB_WC_WR_FLUSH_ERR)
		pr_err("RPC:       %s: rep %p: %s\n",
		       __func__, rep, ib_wc_status_msg(wc->status));
	rep->rr_len = ~0U;
	goto out_schedule;
}
Esempio n. 3
0
File: verbs.c Progetto: 274914765/C
/*
 * Unregister and destroy buffer memory. Need to deal with
 * partial initialization, so it's callable from failed create.
 * Must be called before destroying endpoint, as registrations
 * reference it.
 */
void
rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
{
    int rc, i;
    struct rpcrdma_ia *ia = rdmab_to_ia(buf);

    /* clean up in reverse order from create
     *   1.  recv mr memory (mr free, then kfree)
     *   1a. bind mw memory
     *   2.  send mr memory (mr free, then kfree)
     *   3.  padding (if any) [moved to rpcrdma_ep_destroy]
     *   4.  arrays
     */
    dprintk("RPC:       %s: entering\n", __func__);

    for (i = 0; i < buf->rb_max_requests; i++) {
        if (buf->rb_recv_bufs && buf->rb_recv_bufs[i]) {
            rpcrdma_deregister_internal(ia,
                    buf->rb_recv_bufs[i]->rr_handle,
                    &buf->rb_recv_bufs[i]->rr_iov);
            kfree(buf->rb_recv_bufs[i]);
        }
        if (buf->rb_send_bufs && buf->rb_send_bufs[i]) {
            while (!list_empty(&buf->rb_mws)) {
                struct rpcrdma_mw *r;
                r = list_entry(buf->rb_mws.next,
                    struct rpcrdma_mw, mw_list);
                list_del(&r->mw_list);
                switch (ia->ri_memreg_strategy) {
                case RPCRDMA_MTHCAFMR:
                    rc = ib_dealloc_fmr(r->r.fmr);
                    if (rc)
                        dprintk("RPC:       %s:"
                            " ib_dealloc_fmr"
                            " failed %i\n",
                            __func__, rc);
                    break;
                case RPCRDMA_MEMWINDOWS_ASYNC:
                case RPCRDMA_MEMWINDOWS:
                    rc = ib_dealloc_mw(r->r.mw);
                    if (rc)
                        dprintk("RPC:       %s:"
                            " ib_dealloc_mw"
                            " failed %i\n",
                            __func__, rc);
                    break;
                default:
                    break;
                }
            }
            rpcrdma_deregister_internal(ia,
                    buf->rb_send_bufs[i]->rl_handle,
                    &buf->rb_send_bufs[i]->rl_iov);
            kfree(buf->rb_send_bufs[i]);
        }
    }

    kfree(buf->rb_pool);
}
Esempio n. 4
0
static inline
void rpcrdma_event_process(struct ib_wc *wc)
{
	struct rpcrdma_rep *rep =
			(struct rpcrdma_rep *)(unsigned long) wc->wr_id;

	dprintk("RPC:       %s: event rep %p status %X opcode %X length %u\n",
		__func__, rep, wc->status, wc->opcode, wc->byte_len);

	if (!rep) /* send or bind completion that we don't care about */
		return;

	if (IB_WC_SUCCESS != wc->status) {
		dprintk("RPC:       %s: %s WC status %X, connection lost\n",
			__func__, (wc->opcode & IB_WC_RECV) ? "recv" : "send",
			 wc->status);
		rep->rr_len = ~0U;
		rpcrdma_schedule_tasklet(rep);
		return;
	}

	switch (wc->opcode) {
	case IB_WC_RECV:
		rep->rr_len = wc->byte_len;
		ib_dma_sync_single_for_cpu(
			rdmab_to_ia(rep->rr_buffer)->ri_id->device,
			rep->rr_iov.addr, rep->rr_len, DMA_FROM_DEVICE);
		/* Keep (only) the most recent credits, after check validity */
		if (rep->rr_len >= 16) {
			struct rpcrdma_msg *p =
					(struct rpcrdma_msg *) rep->rr_base;
			unsigned int credits = ntohl(p->rm_credit);
			if (credits == 0) {
				dprintk("RPC:       %s: server"
					" dropped credits to 0!\n", __func__);
				/* don't deadlock */
				credits = 1;
			} else if (credits > rep->rr_buffer->rb_max_requests) {
				dprintk("RPC:       %s: server"
					" over-crediting: %d (%d)\n",
					__func__, credits,
					rep->rr_buffer->rb_max_requests);
				credits = rep->rr_buffer->rb_max_requests;
			}
			atomic_set(&rep->rr_buffer->rb_credits, credits);
		}
		/* fall through */
	case IB_WC_BIND_MW:
		rpcrdma_schedule_tasklet(rep);
		break;
	default:
		dprintk("RPC:       %s: unexpected WC event %X\n",
			__func__, wc->opcode);
		break;
	}
}