コード例 #1
0
ファイル: ib_fmr.c プロジェクト: 020gzh/linux
struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
{
	struct rds_ib_mr_pool *pool;
	struct rds_ib_mr *ibmr = NULL;
	struct rds_ib_fmr *fmr;
	int err = 0;

	if (npages <= RDS_MR_8K_MSG_SIZE)
		pool = rds_ibdev->mr_8k_pool;
	else
		pool = rds_ibdev->mr_1m_pool;

	ibmr = rds_ib_try_reuse_ibmr(pool);
	if (ibmr)
		return ibmr;

	ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL,
			    rdsibdev_to_node(rds_ibdev));
	if (!ibmr) {
		err = -ENOMEM;
		goto out_no_cigar;
	}

	fmr = &ibmr->u.fmr;
	fmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
			(IB_ACCESS_LOCAL_WRITE |
			 IB_ACCESS_REMOTE_READ |
			 IB_ACCESS_REMOTE_WRITE |
			 IB_ACCESS_REMOTE_ATOMIC),
			&pool->fmr_attr);
	if (IS_ERR(fmr->fmr)) {
		err = PTR_ERR(fmr->fmr);
		fmr->fmr = NULL;
		pr_warn("RDS/IB: %s failed (err=%d)\n", __func__, err);
		goto out_no_cigar;
	}

	ibmr->pool = pool;
	if (pool->pool_type == RDS_IB_MR_8K_POOL)
		rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc);
	else
		rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc);

	return ibmr;

out_no_cigar:
	if (ibmr) {
		if (fmr->fmr)
			ib_dealloc_fmr(fmr->fmr);
		kfree(ibmr);
	}
	atomic_dec(&pool->item_count);
	return ERR_PTR(err);
}
コード例 #2
0
ファイル: fmr_ops.c プロジェクト: BWhitten/linux-stable
static int
fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *mw)
{
	static struct ib_fmr_attr fmr_attr = {
		.max_pages	= RPCRDMA_MAX_FMR_SGES,
		.max_maps	= 1,
		.page_shift	= PAGE_SHIFT
	};

	mw->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES,
				       sizeof(u64), GFP_KERNEL);
	if (!mw->fmr.fm_physaddrs)
		goto out_free;

	mw->mw_sg = kcalloc(RPCRDMA_MAX_FMR_SGES,
			    sizeof(*mw->mw_sg), GFP_KERNEL);
	if (!mw->mw_sg)
		goto out_free;

	sg_init_table(mw->mw_sg, RPCRDMA_MAX_FMR_SGES);

	mw->fmr.fm_mr = ib_alloc_fmr(ia->ri_pd, RPCRDMA_FMR_ACCESS_FLAGS,
				     &fmr_attr);
	if (IS_ERR(mw->fmr.fm_mr))
		goto out_fmr_err;

	return 0;

out_fmr_err:
	dprintk("RPC:       %s: ib_alloc_fmr returned %ld\n", __func__,
		PTR_ERR(mw->fmr.fm_mr));

out_free:
	kfree(mw->mw_sg);
	kfree(mw->fmr.fm_physaddrs);
	return -ENOMEM;
}

static int
__fmr_unmap(struct rpcrdma_mw *mw)
{
	LIST_HEAD(l);
	int rc;

	list_add(&mw->fmr.fm_mr->list, &l);
	rc = ib_unmap_fmr(&l);
	list_del_init(&mw->fmr.fm_mr->list);
	return rc;
}
コード例 #3
0
/*
 * Initialize buffer memory
 */
int
rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep,
	struct rpcrdma_ia *ia, struct rpcrdma_create_data_internal *cdata)
{
	char *p;
	size_t len;
	int i, rc;
	struct rpcrdma_mw *r;

	buf->rb_max_requests = cdata->max_requests;
	spin_lock_init(&buf->rb_lock);
	atomic_set(&buf->rb_credits, 1);

	/* Need to allocate:
	 *   1.  arrays for send and recv pointers
	 *   2.  arrays of struct rpcrdma_req to fill in pointers
	 *   3.  array of struct rpcrdma_rep for replies
	 *   4.  padding, if any
	 *   5.  mw's, fmr's or frmr's, if any
	 * Send/recv buffers in req/rep need to be registered
	 */

	len = buf->rb_max_requests *
		(sizeof(struct rpcrdma_req *) + sizeof(struct rpcrdma_rep *));
	len += cdata->padding;
	switch (ia->ri_memreg_strategy) {
	case RPCRDMA_FRMR:
		len += buf->rb_max_requests * RPCRDMA_MAX_SEGS *
				sizeof(struct rpcrdma_mw);
		break;
	case RPCRDMA_MTHCAFMR:
		/* TBD we are perhaps overallocating here */
		len += (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS *
				sizeof(struct rpcrdma_mw);
		break;
	case RPCRDMA_MEMWINDOWS_ASYNC:
	case RPCRDMA_MEMWINDOWS:
		len += (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS *
				sizeof(struct rpcrdma_mw);
		break;
	default:
		break;
	}

	/* allocate 1, 4 and 5 in one shot */
	p = kzalloc(len, GFP_KERNEL);
	if (p == NULL) {
		dprintk("RPC:       %s: req_t/rep_t/pad kzalloc(%zd) failed\n",
			__func__, len);
		rc = -ENOMEM;
		goto out;
	}
	buf->rb_pool = p;	/* for freeing it later */

	buf->rb_send_bufs = (struct rpcrdma_req **) p;
	p = (char *) &buf->rb_send_bufs[buf->rb_max_requests];
	buf->rb_recv_bufs = (struct rpcrdma_rep **) p;
	p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests];

	/*
	 * Register the zeroed pad buffer, if any.
	 */
	if (cdata->padding) {
		rc = rpcrdma_register_internal(ia, p, cdata->padding,
					    &ep->rep_pad_mr, &ep->rep_pad);
		if (rc)
			goto out;
	}
	p += cdata->padding;

	/*
	 * Allocate the fmr's, or mw's for mw_bind chunk registration.
	 * We "cycle" the mw's in order to minimize rkey reuse,
	 * and also reduce unbind-to-bind collision.
	 */
	INIT_LIST_HEAD(&buf->rb_mws);
	r = (struct rpcrdma_mw *)p;
	switch (ia->ri_memreg_strategy) {
	case RPCRDMA_FRMR:
		for (i = buf->rb_max_requests * RPCRDMA_MAX_SEGS; i; i--) {
			r->r.frmr.fr_mr = ib_alloc_fast_reg_mr(ia->ri_pd,
							 RPCRDMA_MAX_SEGS);
			if (IS_ERR(r->r.frmr.fr_mr)) {
				rc = PTR_ERR(r->r.frmr.fr_mr);
				dprintk("RPC:       %s: ib_alloc_fast_reg_mr"
					" failed %i\n", __func__, rc);
				goto out;
			}
			r->r.frmr.fr_pgl =
				ib_alloc_fast_reg_page_list(ia->ri_id->device,
							    RPCRDMA_MAX_SEGS);
			if (IS_ERR(r->r.frmr.fr_pgl)) {
				rc = PTR_ERR(r->r.frmr.fr_pgl);
				dprintk("RPC:       %s: "
					"ib_alloc_fast_reg_page_list "
					"failed %i\n", __func__, rc);
				goto out;
			}
			list_add(&r->mw_list, &buf->rb_mws);
			++r;
		}
		break;
	case RPCRDMA_MTHCAFMR:
		/* TBD we are perhaps overallocating here */
		for (i = (buf->rb_max_requests+1) * RPCRDMA_MAX_SEGS; i; i--) {
			static struct ib_fmr_attr fa =
				{ RPCRDMA_MAX_DATA_SEGS, 1, PAGE_SHIFT };
			r->r.fmr = ib_alloc_fmr(ia->ri_pd,
				IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ,
				&fa);
			if (IS_ERR(r->r.fmr)) {
				rc = PTR_ERR(r->r.fmr);
				dprintk("RPC:       %s: ib_alloc_fmr"
					" failed %i\n", __func__, rc);
				goto out;
			}
			list_add(&r->mw_list, &buf->rb_mws);
			++r;
		}
		break;
	case RPCRDMA_MEMWINDOWS_ASYNC:
	case RPCRDMA_MEMWINDOWS:
		/* Allocate one extra request's worth, for full cycling */
		for (i = (buf->rb_max_requests+1) * RPCRDMA_MAX_SEGS; i; i--) {
			r->r.mw = ib_alloc_mw(ia->ri_pd);
			if (IS_ERR(r->r.mw)) {
				rc = PTR_ERR(r->r.mw);
				dprintk("RPC:       %s: ib_alloc_mw"
					" failed %i\n", __func__, rc);
				goto out;
			}
			list_add(&r->mw_list, &buf->rb_mws);
			++r;
		}
		break;
	default:
		break;
	}

	/*
	 * Allocate/init the request/reply buffers. Doing this
	 * using kmalloc for now -- one for each buf.
	 */
	for (i = 0; i < buf->rb_max_requests; i++) {
		struct rpcrdma_req *req;
		struct rpcrdma_rep *rep;

		len = cdata->inline_wsize + sizeof(struct rpcrdma_req);
		/* RPC layer requests *double* size + 1K RPC_SLACK_SPACE! */
		/* Typical ~2400b, so rounding up saves work later */
		if (len < 4096)
			len = 4096;
		req = kmalloc(len, GFP_KERNEL);
		if (req == NULL) {
			dprintk("RPC:       %s: request buffer %d alloc"
				" failed\n", __func__, i);
			rc = -ENOMEM;
			goto out;
		}
		memset(req, 0, sizeof(struct rpcrdma_req));
		buf->rb_send_bufs[i] = req;
		buf->rb_send_bufs[i]->rl_buffer = buf;

		rc = rpcrdma_register_internal(ia, req->rl_base,
				len - offsetof(struct rpcrdma_req, rl_base),
				&buf->rb_send_bufs[i]->rl_handle,
				&buf->rb_send_bufs[i]->rl_iov);
		if (rc)
			goto out;

		buf->rb_send_bufs[i]->rl_size = len-sizeof(struct rpcrdma_req);

		len = cdata->inline_rsize + sizeof(struct rpcrdma_rep);
		rep = kmalloc(len, GFP_KERNEL);
		if (rep == NULL) {
			dprintk("RPC:       %s: reply buffer %d alloc failed\n",
				__func__, i);
			rc = -ENOMEM;
			goto out;
		}
		memset(rep, 0, sizeof(struct rpcrdma_rep));
		buf->rb_recv_bufs[i] = rep;
		buf->rb_recv_bufs[i]->rr_buffer = buf;
		init_waitqueue_head(&rep->rr_unbind);

		rc = rpcrdma_register_internal(ia, rep->rr_base,
				len - offsetof(struct rpcrdma_rep, rr_base),
				&buf->rb_recv_bufs[i]->rr_handle,
				&buf->rb_recv_bufs[i]->rr_iov);
		if (rc)
			goto out;

	}
	dprintk("RPC:       %s: max_requests %d\n",
		__func__, buf->rb_max_requests);
	/* done */
	return 0;
out:
	rpcrdma_buffer_destroy(buf);
	return rc;
}
コード例 #4
0
ファイル: fmr_ops.c プロジェクト: Chong-Li/cse522
static int
fmr_op_init(struct rpcrdma_xprt *r_xprt)
{
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
	int mr_access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ;
	struct ib_fmr_attr fmr_attr = {
		.max_pages	= RPCRDMA_MAX_FMR_SGES,
		.max_maps	= 1,
		.page_shift	= PAGE_SHIFT
	};
	struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
	struct rpcrdma_mw *r;
	int i, rc;

	spin_lock_init(&buf->rb_mwlock);
	INIT_LIST_HEAD(&buf->rb_mws);
	INIT_LIST_HEAD(&buf->rb_all);

	i = max_t(int, RPCRDMA_MAX_DATA_SEGS / RPCRDMA_MAX_FMR_SGES, 1);
	i += 2;				/* head + tail */
	i *= buf->rb_max_requests;	/* one set for each RPC slot */
	dprintk("RPC:       %s: initalizing %d FMRs\n", __func__, i);

	rc = -ENOMEM;
	while (i--) {
		r = kzalloc(sizeof(*r), GFP_KERNEL);
		if (!r)
			goto out;

		r->r.fmr.physaddrs = kmalloc(RPCRDMA_MAX_FMR_SGES *
					     sizeof(u64), GFP_KERNEL);
		if (!r->r.fmr.physaddrs)
			goto out_free;

		r->r.fmr.fmr = ib_alloc_fmr(pd, mr_access_flags, &fmr_attr);
		if (IS_ERR(r->r.fmr.fmr))
			goto out_fmr_err;

		list_add(&r->mw_list, &buf->rb_mws);
		list_add(&r->mw_all, &buf->rb_all);
	}
	return 0;

out_fmr_err:
	rc = PTR_ERR(r->r.fmr.fmr);
	dprintk("RPC:       %s: ib_alloc_fmr status %i\n", __func__, rc);
	kfree(r->r.fmr.physaddrs);
out_free:
	kfree(r);
out:
	return rc;
}

static int
__fmr_unmap(struct rpcrdma_mw *r)
{
	LIST_HEAD(l);

	list_add(&r->r.fmr.fmr->list, &l);
	return ib_unmap_fmr(&l);
}