static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt) { struct ib_mr *mr; struct ib_fast_reg_page_list *pl; struct svc_rdma_fastreg_mr *frmr; frmr = kmalloc(sizeof(*frmr), GFP_KERNEL); if (!frmr) goto err; mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES); if (IS_ERR(mr)) goto err_free_frmr; pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device, RPCSVC_MAXPAGES); if (IS_ERR(pl)) goto err_free_mr; frmr->mr = mr; frmr->page_list = pl; INIT_LIST_HEAD(&frmr->frmr_list); return frmr; err_free_mr: ib_dereg_mr(mr); err_free_frmr: kfree(frmr); err: return ERR_PTR(-ENOMEM); }
void rds_iw_send_init_ring(struct rds_iw_connection *ic) { struct rds_iw_send_work *send; u32 i; for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { struct ib_sge *sge; send->s_rm = NULL; send->s_op = NULL; send->s_mapping = NULL; send->s_wr.next = NULL; send->s_wr.wr_id = i; send->s_wr.sg_list = send->s_sge; send->s_wr.num_sge = 1; send->s_wr.opcode = IB_WR_SEND; send->s_wr.send_flags = 0; send->s_wr.ex.imm_data = 0; sge = rds_iw_data_sge(ic, send->s_sge); sge->lkey = 0; sge = rds_iw_header_sge(ic, send->s_sge); sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header)); sge->length = sizeof(struct rds_header); sge->lkey = 0; send->s_mr = ib_alloc_fast_reg_mr(ic->i_pd, fastreg_message_size); if (IS_ERR(send->s_mr)) { printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_mr failed\n"); break; } send->s_page_list = ib_alloc_fast_reg_page_list( ic->i_cm_id->device, fastreg_message_size); if (IS_ERR(send->s_page_list)) { printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_page_list failed\n"); break; } } }
/* * Initialize buffer memory */ int rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, struct rpcrdma_create_data_internal *cdata) { char *p; size_t len; int i, rc; struct rpcrdma_mw *r; buf->rb_max_requests = cdata->max_requests; spin_lock_init(&buf->rb_lock); atomic_set(&buf->rb_credits, 1); /* Need to allocate: * 1. arrays for send and recv pointers * 2. arrays of struct rpcrdma_req to fill in pointers * 3. array of struct rpcrdma_rep for replies * 4. padding, if any * 5. mw's, fmr's or frmr's, if any * Send/recv buffers in req/rep need to be registered */ len = buf->rb_max_requests * (sizeof(struct rpcrdma_req *) + sizeof(struct rpcrdma_rep *)); len += cdata->padding; switch (ia->ri_memreg_strategy) { case RPCRDMA_FRMR: len += buf->rb_max_requests * RPCRDMA_MAX_SEGS * sizeof(struct rpcrdma_mw); break; case RPCRDMA_MTHCAFMR: /* TBD we are perhaps overallocating here */ len += (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS * sizeof(struct rpcrdma_mw); break; case RPCRDMA_MEMWINDOWS_ASYNC: case RPCRDMA_MEMWINDOWS: len += (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS * sizeof(struct rpcrdma_mw); break; default: break; } /* allocate 1, 4 and 5 in one shot */ p = kzalloc(len, GFP_KERNEL); if (p == NULL) { dprintk("RPC: %s: req_t/rep_t/pad kzalloc(%zd) failed\n", __func__, len); rc = -ENOMEM; goto out; } buf->rb_pool = p; /* for freeing it later */ buf->rb_send_bufs = (struct rpcrdma_req **) p; p = (char *) &buf->rb_send_bufs[buf->rb_max_requests]; buf->rb_recv_bufs = (struct rpcrdma_rep **) p; p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests]; /* * Register the zeroed pad buffer, if any. */ if (cdata->padding) { rc = rpcrdma_register_internal(ia, p, cdata->padding, &ep->rep_pad_mr, &ep->rep_pad); if (rc) goto out; } p += cdata->padding; /* * Allocate the fmr's, or mw's for mw_bind chunk registration. * We "cycle" the mw's in order to minimize rkey reuse, * and also reduce unbind-to-bind collision. */ INIT_LIST_HEAD(&buf->rb_mws); r = (struct rpcrdma_mw *)p; switch (ia->ri_memreg_strategy) { case RPCRDMA_FRMR: for (i = buf->rb_max_requests * RPCRDMA_MAX_SEGS; i; i--) { r->r.frmr.fr_mr = ib_alloc_fast_reg_mr(ia->ri_pd, RPCRDMA_MAX_SEGS); if (IS_ERR(r->r.frmr.fr_mr)) { rc = PTR_ERR(r->r.frmr.fr_mr); dprintk("RPC: %s: ib_alloc_fast_reg_mr" " failed %i\n", __func__, rc); goto out; } r->r.frmr.fr_pgl = ib_alloc_fast_reg_page_list(ia->ri_id->device, RPCRDMA_MAX_SEGS); if (IS_ERR(r->r.frmr.fr_pgl)) { rc = PTR_ERR(r->r.frmr.fr_pgl); dprintk("RPC: %s: " "ib_alloc_fast_reg_page_list " "failed %i\n", __func__, rc); goto out; } list_add(&r->mw_list, &buf->rb_mws); ++r; } break; case RPCRDMA_MTHCAFMR: /* TBD we are perhaps overallocating here */ for (i = (buf->rb_max_requests+1) * RPCRDMA_MAX_SEGS; i; i--) { static struct ib_fmr_attr fa = { RPCRDMA_MAX_DATA_SEGS, 1, PAGE_SHIFT }; r->r.fmr = ib_alloc_fmr(ia->ri_pd, IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ, &fa); if (IS_ERR(r->r.fmr)) { rc = PTR_ERR(r->r.fmr); dprintk("RPC: %s: ib_alloc_fmr" " failed %i\n", __func__, rc); goto out; } list_add(&r->mw_list, &buf->rb_mws); ++r; } break; case RPCRDMA_MEMWINDOWS_ASYNC: case RPCRDMA_MEMWINDOWS: /* Allocate one extra request's worth, for full cycling */ for (i = (buf->rb_max_requests+1) * RPCRDMA_MAX_SEGS; i; i--) { r->r.mw = ib_alloc_mw(ia->ri_pd); if (IS_ERR(r->r.mw)) { rc = PTR_ERR(r->r.mw); dprintk("RPC: %s: ib_alloc_mw" " failed %i\n", __func__, rc); goto out; } list_add(&r->mw_list, &buf->rb_mws); ++r; } break; default: break; } /* * Allocate/init the request/reply buffers. Doing this * using kmalloc for now -- one for each buf. */ for (i = 0; i < buf->rb_max_requests; i++) { struct rpcrdma_req *req; struct rpcrdma_rep *rep; len = cdata->inline_wsize + sizeof(struct rpcrdma_req); /* RPC layer requests *double* size + 1K RPC_SLACK_SPACE! */ /* Typical ~2400b, so rounding up saves work later */ if (len < 4096) len = 4096; req = kmalloc(len, GFP_KERNEL); if (req == NULL) { dprintk("RPC: %s: request buffer %d alloc" " failed\n", __func__, i); rc = -ENOMEM; goto out; } memset(req, 0, sizeof(struct rpcrdma_req)); buf->rb_send_bufs[i] = req; buf->rb_send_bufs[i]->rl_buffer = buf; rc = rpcrdma_register_internal(ia, req->rl_base, len - offsetof(struct rpcrdma_req, rl_base), &buf->rb_send_bufs[i]->rl_handle, &buf->rb_send_bufs[i]->rl_iov); if (rc) goto out; buf->rb_send_bufs[i]->rl_size = len-sizeof(struct rpcrdma_req); len = cdata->inline_rsize + sizeof(struct rpcrdma_rep); rep = kmalloc(len, GFP_KERNEL); if (rep == NULL) { dprintk("RPC: %s: reply buffer %d alloc failed\n", __func__, i); rc = -ENOMEM; goto out; } memset(rep, 0, sizeof(struct rpcrdma_rep)); buf->rb_recv_bufs[i] = rep; buf->rb_recv_bufs[i]->rr_buffer = buf; init_waitqueue_head(&rep->rr_unbind); rc = rpcrdma_register_internal(ia, rep->rr_base, len - offsetof(struct rpcrdma_rep, rr_base), &buf->rb_recv_bufs[i]->rr_handle, &buf->rb_recv_bufs[i]->rr_iov); if (rc) goto out; } dprintk("RPC: %s: max_requests %d\n", __func__, buf->rb_max_requests); /* done */ return 0; out: rpcrdma_buffer_destroy(buf); return rc; }