Пример #1
0
/**
 * xprt_rdma_free_slot - release an rpc_rqst
 * @xprt: controlling RPC transport
 * @rqst: rpc_rqst to release
 *
 */
static void
xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst)
{
	memset(rqst, 0, sizeof(*rqst));
	rpcrdma_buffer_put(rpcr_to_rdmar(rqst));
	rpc_wake_up_next(&xprt->backlog);
}
Пример #2
0
/*
 * This function returns all RDMA resources to the pool.
 */
static void
xprt_rdma_free(void *buffer)
{
	struct rpcrdma_req *req;
	struct rpcrdma_xprt *r_xprt;
	struct rpcrdma_regbuf *rb;
	int i;

	if (buffer == NULL)
		return;

	rb = container_of(buffer, struct rpcrdma_regbuf, rg_base[0]);
	req = rb->rg_owner;
	r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf);

	dprintk("RPC:       %s: called on 0x%p\n", __func__, req->rl_reply);

	for (i = 0; req->rl_nchunks;) {
		--req->rl_nchunks;
		i += rpcrdma_deregister_external(
			&req->rl_segments[i], r_xprt);
	}

	rpcrdma_buffer_put(req);
}
Пример #3
0
/*
 * This function returns all RDMA resources to the pool.
 */
static void
xprt_rdma_free(void *buffer)
{
	struct rpcrdma_req *req;
	struct rpcrdma_xprt *r_xprt;
	struct rpcrdma_regbuf *rb;

	if (buffer == NULL)
		return;

	rb = container_of(buffer, struct rpcrdma_regbuf, rg_base[0]);
	req = rb->rg_owner;
	if (req->rl_backchannel)
		return;

	r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf);

	dprintk("RPC:       %s: called on 0x%p\n", __func__, req->rl_reply);

	r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req,
					    !RPC_IS_ASYNC(req->rl_task));

	rpcrdma_buffer_put(req);
}
Пример #4
0
/*
 * The RDMA allocate/free functions need the task structure as a place
 * to hide the struct rpcrdma_req, which is necessary for the actual send/recv
 * sequence.
 *
 * The RPC layer allocates both send and receive buffers in the same call
 * (rq_send_buf and rq_rcv_buf are both part of a single contiguous buffer).
 * We may register rq_rcv_buf when using reply chunks.
 */
static void *
xprt_rdma_allocate(struct rpc_task *task, size_t size)
{
	struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
	struct rpcrdma_regbuf *rb;
	struct rpcrdma_req *req;
	size_t min_size;
	gfp_t flags;

	req = rpcrdma_buffer_get(&r_xprt->rx_buf);
	if (req == NULL)
		return NULL;

	flags = GFP_NOIO | __GFP_NOWARN;
	if (RPC_IS_SWAPPER(task))
		flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;

	if (req->rl_rdmabuf == NULL)
		goto out_rdmabuf;
	if (req->rl_sendbuf == NULL)
		goto out_sendbuf;
	if (size > req->rl_sendbuf->rg_size)
		goto out_sendbuf;

out:
	dprintk("RPC:       %s: size %zd, request 0x%p\n", __func__, size, req);
	req->rl_connect_cookie = 0;	/* our reserved value */
	return req->rl_sendbuf->rg_base;

out_rdmabuf:
	min_size = RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp);
	rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, min_size, flags);
	if (IS_ERR(rb))
		goto out_fail;
	req->rl_rdmabuf = rb;

out_sendbuf:
	/* XDR encoding and RPC/RDMA marshaling of this request has not
	 * yet occurred. Thus a lower bound is needed to prevent buffer
	 * overrun during marshaling.
	 *
	 * RPC/RDMA marshaling may choose to send payload bearing ops
	 * inline, if the result is smaller than the inline threshold.
	 * The value of the "size" argument accounts for header
	 * requirements but not for the payload in these cases.
	 *
	 * Likewise, allocate enough space to receive a reply up to the
	 * size of the inline threshold.
	 *
	 * It's unlikely that both the send header and the received
	 * reply will be large, but slush is provided here to allow
	 * flexibility when marshaling.
	 */
	min_size = RPCRDMA_INLINE_READ_THRESHOLD(task->tk_rqstp);
	min_size += RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp);
	if (size < min_size)
		size = min_size;

	rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, size, flags);
	if (IS_ERR(rb))
		goto out_fail;
	rb->rg_owner = req;

	r_xprt->rx_stats.hardway_register_count += size;
	rpcrdma_free_regbuf(&r_xprt->rx_ia, req->rl_sendbuf);
	req->rl_sendbuf = rb;
	goto out;

out_fail:
	rpcrdma_buffer_put(req);
	r_xprt->rx_stats.failed_marshal_count++;
	return NULL;
}