Exemple #1
0
/**
 * xprt_rdma_allocate - allocate transport resources for an RPC
 * @task: RPC task
 *
 * Return values:
 *        0:	Success; rq_buffer points to RPC buffer to use
 *   ENOMEM:	Out of memory, call again later
 *      EIO:	A permanent error occurred, do not retry
 */
static int
xprt_rdma_allocate(struct rpc_task *task)
{
	struct rpc_rqst *rqst = task->tk_rqstp;
	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
	gfp_t flags;

	flags = RPCRDMA_DEF_GFP;
	if (RPC_IS_SWAPPER(task))
		flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;

	if (!rpcrdma_get_sendbuf(r_xprt, req, rqst->rq_callsize, flags))
		goto out_fail;
	if (!rpcrdma_get_recvbuf(r_xprt, req, rqst->rq_rcvsize, flags))
		goto out_fail;

	rqst->rq_buffer = req->rl_sendbuf->rg_base;
	rqst->rq_rbuffer = req->rl_recvbuf->rg_base;
	trace_xprtrdma_allocate(task, req);
	return 0;

out_fail:
	trace_xprtrdma_allocate(task, NULL);
	return -ENOMEM;
}
Exemple #2
0
/*
 * Add new request to wait queue.
 *
 * Swapper tasks always get inserted at the head of the queue.
 * This should avoid many nasty memory deadlocks and hopefully
 * improve overall performance.
 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
 */
int
rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
{
	if (task->tk_rpcwait) {
		if (task->tk_rpcwait != queue)
		{
			printk(KERN_WARNING "RPC: doubly enqueued task!\n");
			return -EWOULDBLOCK;
		}
		return 0;
	}
	if (RPC_IS_SWAPPER(task))
		rpc_insert_list(&queue->task, task);
	else
		rpc_append_list(&queue->task, task);
	task->tk_rpcwait = queue;

	dprintk("RPC: %4d added to queue %p \"%s\"\n",
				task->tk_pid, queue, rpc_qname(queue));

	return 0;
}
Exemple #3
0
/*
 * The RDMA allocate/free functions need the task structure as a place
 * to hide the struct rpcrdma_req, which is necessary for the actual send/recv
 * sequence.
 *
 * The RPC layer allocates both send and receive buffers in the same call
 * (rq_send_buf and rq_rcv_buf are both part of a single contiguous buffer).
 * We may register rq_rcv_buf when using reply chunks.
 */
static void *
xprt_rdma_allocate(struct rpc_task *task, size_t size)
{
	struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
	struct rpcrdma_regbuf *rb;
	struct rpcrdma_req *req;
	size_t min_size;
	gfp_t flags;

	req = rpcrdma_buffer_get(&r_xprt->rx_buf);
	if (req == NULL)
		return NULL;

	flags = GFP_NOIO | __GFP_NOWARN;
	if (RPC_IS_SWAPPER(task))
		flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;

	if (req->rl_rdmabuf == NULL)
		goto out_rdmabuf;
	if (req->rl_sendbuf == NULL)
		goto out_sendbuf;
	if (size > req->rl_sendbuf->rg_size)
		goto out_sendbuf;

out:
	dprintk("RPC:       %s: size %zd, request 0x%p\n", __func__, size, req);
	req->rl_connect_cookie = 0;	/* our reserved value */
	return req->rl_sendbuf->rg_base;

out_rdmabuf:
	min_size = RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp);
	rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, min_size, flags);
	if (IS_ERR(rb))
		goto out_fail;
	req->rl_rdmabuf = rb;

out_sendbuf:
	/* XDR encoding and RPC/RDMA marshaling of this request has not
	 * yet occurred. Thus a lower bound is needed to prevent buffer
	 * overrun during marshaling.
	 *
	 * RPC/RDMA marshaling may choose to send payload bearing ops
	 * inline, if the result is smaller than the inline threshold.
	 * The value of the "size" argument accounts for header
	 * requirements but not for the payload in these cases.
	 *
	 * Likewise, allocate enough space to receive a reply up to the
	 * size of the inline threshold.
	 *
	 * It's unlikely that both the send header and the received
	 * reply will be large, but slush is provided here to allow
	 * flexibility when marshaling.
	 */
	min_size = RPCRDMA_INLINE_READ_THRESHOLD(task->tk_rqstp);
	min_size += RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp);
	if (size < min_size)
		size = min_size;

	rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, size, flags);
	if (IS_ERR(rb))
		goto out_fail;
	rb->rg_owner = req;

	r_xprt->rx_stats.hardway_register_count += size;
	rpcrdma_free_regbuf(&r_xprt->rx_ia, req->rl_sendbuf);
	req->rl_sendbuf = rb;
	goto out;

out_fail:
	rpcrdma_buffer_put(req);
	r_xprt->rx_stats.failed_marshal_count++;
	return NULL;
}