static void fmr_op_release_mr(struct rpcrdma_mw *r) { LIST_HEAD(unmap_list); int rc; /* Ensure MW is not on any rl_registered list */ if (!list_empty(&r->mw_list)) list_del(&r->mw_list); kfree(r->fmr.fm_physaddrs); kfree(r->mw_sg); /* In case this one was left mapped, try to unmap it * to prevent dealloc_fmr from failing with EBUSY */ rc = __fmr_unmap(r); if (rc) pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n", r, rc); rc = ib_dealloc_fmr(r->fmr.fm_mr); if (rc) pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n", r, rc); kfree(r); }
/* * Unregister and destroy buffer memory. Need to deal with * partial initialization, so it's callable from failed create. * Must be called before destroying endpoint, as registrations * reference it. */ void rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) { int rc, i; struct rpcrdma_ia *ia = rdmab_to_ia(buf); /* clean up in reverse order from create * 1. recv mr memory (mr free, then kfree) * 1a. bind mw memory * 2. send mr memory (mr free, then kfree) * 3. padding (if any) [moved to rpcrdma_ep_destroy] * 4. arrays */ dprintk("RPC: %s: entering\n", __func__); for (i = 0; i < buf->rb_max_requests; i++) { if (buf->rb_recv_bufs && buf->rb_recv_bufs[i]) { rpcrdma_deregister_internal(ia, buf->rb_recv_bufs[i]->rr_handle, &buf->rb_recv_bufs[i]->rr_iov); kfree(buf->rb_recv_bufs[i]); } if (buf->rb_send_bufs && buf->rb_send_bufs[i]) { while (!list_empty(&buf->rb_mws)) { struct rpcrdma_mw *r; r = list_entry(buf->rb_mws.next, struct rpcrdma_mw, mw_list); list_del(&r->mw_list); switch (ia->ri_memreg_strategy) { case RPCRDMA_MTHCAFMR: rc = ib_dealloc_fmr(r->r.fmr); if (rc) dprintk("RPC: %s:" " ib_dealloc_fmr" " failed %i\n", __func__, rc); break; case RPCRDMA_MEMWINDOWS_ASYNC: case RPCRDMA_MEMWINDOWS: rc = ib_dealloc_mw(r->r.mw); if (rc) dprintk("RPC: %s:" " ib_dealloc_mw" " failed %i\n", __func__, rc); break; default: break; } } rpcrdma_deregister_internal(ia, buf->rb_send_bufs[i]->rl_handle, &buf->rb_send_bufs[i]->rl_iov); kfree(buf->rb_send_bufs[i]); } } kfree(buf->rb_pool); }
struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages) { struct rds_ib_mr_pool *pool; struct rds_ib_mr *ibmr = NULL; struct rds_ib_fmr *fmr; int err = 0; if (npages <= RDS_MR_8K_MSG_SIZE) pool = rds_ibdev->mr_8k_pool; else pool = rds_ibdev->mr_1m_pool; ibmr = rds_ib_try_reuse_ibmr(pool); if (ibmr) return ibmr; ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev)); if (!ibmr) { err = -ENOMEM; goto out_no_cigar; } fmr = &ibmr->u.fmr; fmr->fmr = ib_alloc_fmr(rds_ibdev->pd, (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC), &pool->fmr_attr); if (IS_ERR(fmr->fmr)) { err = PTR_ERR(fmr->fmr); fmr->fmr = NULL; pr_warn("RDS/IB: %s failed (err=%d)\n", __func__, err); goto out_no_cigar; } ibmr->pool = pool; if (pool->pool_type == RDS_IB_MR_8K_POOL) rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc); else rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc); return ibmr; out_no_cigar: if (ibmr) { if (fmr->fmr) ib_dealloc_fmr(fmr->fmr); kfree(ibmr); } atomic_dec(&pool->item_count); return ERR_PTR(err); }
static void fmr_op_destroy(struct rpcrdma_buffer *buf) { struct rpcrdma_mw *r; int rc; while (!list_empty(&buf->rb_all)) { r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); list_del(&r->mw_all); kfree(r->r.fmr.physaddrs); rc = ib_dealloc_fmr(r->r.fmr.fmr); if (rc) dprintk("RPC: %s: ib_dealloc_fmr failed %i\n", __func__, rc); kfree(r); } }