int isert_prepare_rdma(struct isert_cmnd *isert_pdu, struct isert_connection *isert_conn, enum isert_wr_op op) { struct isert_buf *isert_buf = &isert_pdu->rdma_buf; struct isert_device *isert_dev = isert_conn->isert_dev; struct ib_device *ib_dev = isert_dev->ib_dev; int err; int buff_offset; int sg_offset, sg_cnt; int wr_cnt, i; isert_buf_init_sg(isert_buf, isert_pdu->iscsi.sg, isert_pdu->iscsi.sg_cnt, isert_pdu->iscsi.bufflen); if (op == ISER_WR_RDMA_WRITE) isert_buf->dma_dir = DMA_TO_DEVICE; else isert_buf->dma_dir = DMA_FROM_DEVICE; if (unlikely(isert_buf->sg_cnt > isert_pdu->n_sge)) { wr_cnt = isert_alloc_for_rdma(isert_pdu, isert_buf->sg_cnt, isert_conn); if (unlikely(wr_cnt)) goto out; } err = ib_dma_map_sg(ib_dev, isert_buf->sg, isert_buf->sg_cnt, isert_buf->dma_dir); if (unlikely(!err)) { pr_err("Failed to DMA map iser sg:%p len:%d\n", isert_buf->sg, isert_buf->sg_cnt); wr_cnt = -EFAULT; goto out; } buff_offset = 0; sg_cnt = 0; for (wr_cnt = 0, sg_offset = 0; sg_offset < isert_buf->sg_cnt; ++wr_cnt) { sg_cnt = min((int)isert_conn->max_sge, isert_buf->sg_cnt - sg_offset); err = isert_wr_init(&isert_pdu->wr[wr_cnt], op, isert_buf, isert_conn, isert_pdu, isert_pdu->sg_pool, sg_offset, sg_cnt, buff_offset); if (unlikely(err < 0)) { wr_cnt = err; goto out; } buff_offset = err; sg_offset += sg_cnt; } for (i = 1; i < wr_cnt; ++i) isert_link_send_wrs(&isert_pdu->wr[i - 1], &isert_pdu->wr[i]); out: TRACE_EXIT_RES(wr_cnt); return wr_cnt; }
/** * iser_start_rdma_unaligned_sg */ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, enum iser_data_dir cmd_dir) { int dma_nents; struct ib_device *dev; char *mem = NULL; struct iser_data_buf *data = &iser_ctask->data[cmd_dir]; unsigned long cmd_data_len = data->data_len; if (cmd_data_len > ISER_KMALLOC_THRESHOLD) mem = (void *)__get_free_pages(GFP_NOIO, ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); else mem = kmalloc(cmd_data_len, GFP_NOIO); if (mem == NULL) { iser_err("Failed to allocate mem size %d %d for copying sglist\n", data->size,(int)cmd_data_len); return -ENOMEM; } if (cmd_dir == ISER_DIR_OUT) { /* copy the unaligned sg the buffer which is used for RDMA */ struct scatterlist *sg = (struct scatterlist *)data->buf; int i; char *p, *from; for (p = mem, i = 0; i < data->size; i++) { from = kmap_atomic(sg[i].page, KM_USER0); memcpy(p, from + sg[i].offset, sg[i].length); kunmap_atomic(from, KM_USER0); p += sg[i].length; } } sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len); iser_ctask->data_copy[cmd_dir].buf = &iser_ctask->data_copy[cmd_dir].sg_single; iser_ctask->data_copy[cmd_dir].size = 1; iser_ctask->data_copy[cmd_dir].copy_buf = mem; dev = iser_ctask->iser_conn->ib_conn->device->ib_device; dma_nents = ib_dma_map_sg(dev, &iser_ctask->data_copy[cmd_dir].sg_single, 1, (cmd_dir == ISER_DIR_OUT) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); BUG_ON(dma_nents == 0); iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents; return 0; }
/** * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context * @ctx: context to initialize * @qp: queue pair to operate on * @port_num: port num to which the connection is bound * @sg: scatterlist to READ/WRITE from/to * @sg_cnt: number of entries in @sg * @sg_offset: current byte offset into @sg * @remote_addr:remote address to read/write (relative to @rkey) * @rkey: remote key to operate on * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ * * Returns the number of WQEs that will be needed on the workqueue if * successful, or a negative error code. */ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 sg_offset, u64 remote_addr, u32 rkey, enum dma_data_direction dir) { struct ib_device *dev = qp->pd->device; int ret; if (is_pci_p2pdma_page(sg_page(sg))) ret = pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir); else ret = ib_dma_map_sg(dev, sg, sg_cnt, dir); if (!ret) return -ENOMEM; sg_cnt = ret; /* * Skip to the S/G entry that sg_offset falls into: */ for (;;) { u32 len = sg_dma_len(sg); if (sg_offset < len) break; sg = sg_next(sg); sg_offset -= len; sg_cnt--; } ret = -EIO; if (WARN_ON_ONCE(sg_cnt == 0)) goto out_unmap_sg; if (rdma_rw_io_needs_mr(qp->device, port_num, dir, sg_cnt)) { ret = rdma_rw_init_mr_wrs(ctx, qp, port_num, sg, sg_cnt, sg_offset, remote_addr, rkey, dir); } else if (sg_cnt > 1) { ret = rdma_rw_init_map_wrs(ctx, qp, sg, sg_cnt, sg_offset, remote_addr, rkey, dir); } else { ret = rdma_rw_init_single_wr(ctx, qp, sg, sg_offset, remote_addr, rkey, dir); } if (ret < 0) goto out_unmap_sg; return ret; out_unmap_sg: ib_dma_unmap_sg(dev, sg, sg_cnt, dir); return ret; }
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, struct iser_data_buf *data, enum iser_data_dir iser_dir, enum dma_data_direction dma_dir) { struct ib_device *dev; iser_task->dir[iser_dir] = 1; dev = iser_task->iser_conn->ib_conn.device->ib_device; data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir); if (data->dma_nents == 0) { iser_err("dma_map_sg failed!!!\n"); return -EINVAL; } return 0; }
int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr, struct scatterlist *sg, unsigned int nents) { struct ib_device *dev = rds_ibdev->dev; struct rds_ib_fmr *fmr = &ibmr->u.fmr; struct scatterlist *scat = sg; u64 io_addr = 0; u64 *dma_pages; u32 len; int page_cnt, sg_dma_len; int i, j; int ret; sg_dma_len = ib_dma_map_sg(dev, sg, nents, DMA_BIDIRECTIONAL); if (unlikely(!sg_dma_len)) { pr_warn("RDS/IB: %s failed!\n", __func__); return -EBUSY; } len = 0; page_cnt = 0; for (i = 0; i < sg_dma_len; ++i) { unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); if (dma_addr & ~PAGE_MASK) { if (i > 0) return -EINVAL; else ++page_cnt; } if ((dma_addr + dma_len) & ~PAGE_MASK) { if (i < sg_dma_len - 1) return -EINVAL; else ++page_cnt; } len += dma_len; } page_cnt += len >> PAGE_SHIFT; if (page_cnt > ibmr->pool->fmr_attr.max_pages) return -EINVAL; dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC, rdsibdev_to_node(rds_ibdev)); if (!dma_pages) return -ENOMEM; page_cnt = 0; for (i = 0; i < sg_dma_len; ++i) { unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); for (j = 0; j < dma_len; j += PAGE_SIZE) dma_pages[page_cnt++] = (dma_addr & PAGE_MASK) + j; } ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr); if (ret) goto out; /* Success - we successfully remapped the MR, so we can * safely tear down the old mapping. */ rds_ib_teardown_mr(ibmr); ibmr->sg = scat; ibmr->sg_len = nents; ibmr->sg_dma_len = sg_dma_len; ibmr->remap_count++; if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL) rds_ib_stats_inc(s_ib_rdma_mr_8k_used); else rds_ib_stats_inc(s_ib_rdma_mr_1m_used); ret = 0; out: kfree(dma_pages); return ret; }
/** * rdma_rw_ctx_signature init - initialize a RW context with signature offload * @ctx: context to initialize * @qp: queue pair to operate on * @port_num: port num to which the connection is bound * @sg: scatterlist to READ/WRITE from/to * @sg_cnt: number of entries in @sg * @prot_sg: scatterlist to READ/WRITE protection information from/to * @prot_sg_cnt: number of entries in @prot_sg * @sig_attrs: signature offloading algorithms * @remote_addr:remote address to read/write (relative to @rkey) * @rkey: remote key to operate on * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ * * Returns the number of WQEs that will be needed on the workqueue if * successful, or a negative error code. */ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, struct scatterlist *sg, u32 sg_cnt, struct scatterlist *prot_sg, u32 prot_sg_cnt, struct ib_sig_attrs *sig_attrs, u64 remote_addr, u32 rkey, enum dma_data_direction dir) { struct ib_device *dev = qp->pd->device; u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device); struct ib_rdma_wr *rdma_wr; struct ib_send_wr *prev_wr = NULL; int count = 0, ret; if (sg_cnt > pages_per_mr || prot_sg_cnt > pages_per_mr) { pr_err("SG count too large\n"); return -EINVAL; } ret = ib_dma_map_sg(dev, sg, sg_cnt, dir); if (!ret) return -ENOMEM; sg_cnt = ret; ret = ib_dma_map_sg(dev, prot_sg, prot_sg_cnt, dir); if (!ret) { ret = -ENOMEM; goto out_unmap_sg; } prot_sg_cnt = ret; ctx->type = RDMA_RW_SIG_MR; ctx->nr_ops = 1; ctx->sig = kcalloc(1, sizeof(*ctx->sig), GFP_KERNEL); if (!ctx->sig) { ret = -ENOMEM; goto out_unmap_prot_sg; } ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->data, sg, sg_cnt, 0); if (ret < 0) goto out_free_ctx; count += ret; prev_wr = &ctx->sig->data.reg_wr.wr; if (prot_sg_cnt) { ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->prot, prot_sg, prot_sg_cnt, 0); if (ret < 0) goto out_destroy_data_mr; count += ret; if (ctx->sig->prot.inv_wr.next) prev_wr->next = &ctx->sig->prot.inv_wr; else prev_wr->next = &ctx->sig->prot.reg_wr.wr; prev_wr = &ctx->sig->prot.reg_wr.wr; } else { ctx->sig->prot.mr = NULL; } ctx->sig->sig_mr = ib_mr_pool_get(qp, &qp->sig_mrs); if (!ctx->sig->sig_mr) { ret = -EAGAIN; goto out_destroy_prot_mr; } if (ctx->sig->sig_mr->need_inval) { memset(&ctx->sig->sig_inv_wr, 0, sizeof(ctx->sig->sig_inv_wr)); ctx->sig->sig_inv_wr.opcode = IB_WR_LOCAL_INV; ctx->sig->sig_inv_wr.ex.invalidate_rkey = ctx->sig->sig_mr->rkey; prev_wr->next = &ctx->sig->sig_inv_wr; prev_wr = &ctx->sig->sig_inv_wr; } ctx->sig->sig_wr.wr.opcode = IB_WR_REG_SIG_MR; ctx->sig->sig_wr.wr.wr_cqe = NULL; ctx->sig->sig_wr.wr.sg_list = &ctx->sig->data.sge; ctx->sig->sig_wr.wr.num_sge = 1; ctx->sig->sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE; ctx->sig->sig_wr.sig_attrs = sig_attrs; ctx->sig->sig_wr.sig_mr = ctx->sig->sig_mr; if (prot_sg_cnt) ctx->sig->sig_wr.prot = &ctx->sig->prot.sge; prev_wr->next = &ctx->sig->sig_wr.wr; prev_wr = &ctx->sig->sig_wr.wr; count++; ctx->sig->sig_sge.addr = 0; ctx->sig->sig_sge.length = ctx->sig->data.sge.length; if (sig_attrs->wire.sig_type != IB_SIG_TYPE_NONE) ctx->sig->sig_sge.length += ctx->sig->prot.sge.length; rdma_wr = &ctx->sig->data.wr; rdma_wr->wr.sg_list = &ctx->sig->sig_sge; rdma_wr->wr.num_sge = 1; rdma_wr->remote_addr = remote_addr; rdma_wr->rkey = rkey; if (dir == DMA_TO_DEVICE) rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; else rdma_wr->wr.opcode = IB_WR_RDMA_READ; prev_wr->next = &rdma_wr->wr; prev_wr = &rdma_wr->wr; count++; return count; out_destroy_prot_mr: if (prot_sg_cnt) ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->prot.mr); out_destroy_data_mr: ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->data.mr); out_free_ctx: kfree(ctx->sig); out_unmap_prot_sg: ib_dma_unmap_sg(dev, prot_sg, prot_sg_cnt, dir); out_unmap_sg: ib_dma_unmap_sg(dev, sg, sg_cnt, dir); return ret; }
static int rds_ib_map_frmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr_pool *pool, struct rds_ib_mr *ibmr, struct scatterlist *sg, unsigned int sg_len) { struct ib_device *dev = rds_ibdev->dev; struct rds_ib_frmr *frmr = &ibmr->u.frmr; int i; u32 len; int ret = 0; /* We want to teardown old ibmr values here and fill it up with * new sg values */ rds_ib_teardown_mr(ibmr); ibmr->sg = sg; ibmr->sg_len = sg_len; ibmr->sg_dma_len = 0; frmr->sg_byte_len = 0; WARN_ON(ibmr->sg_dma_len); ibmr->sg_dma_len = ib_dma_map_sg(dev, ibmr->sg, ibmr->sg_len, DMA_BIDIRECTIONAL); if (unlikely(!ibmr->sg_dma_len)) { pr_warn("RDS/IB: %s failed!\n", __func__); return -EBUSY; } frmr->sg_byte_len = 0; frmr->dma_npages = 0; len = 0; ret = -EINVAL; for (i = 0; i < ibmr->sg_dma_len; ++i) { unsigned int dma_len = ib_sg_dma_len(dev, &ibmr->sg[i]); u64 dma_addr = ib_sg_dma_address(dev, &ibmr->sg[i]); frmr->sg_byte_len += dma_len; if (dma_addr & ~PAGE_MASK) { if (i > 0) goto out_unmap; else ++frmr->dma_npages; } if ((dma_addr + dma_len) & ~PAGE_MASK) { if (i < ibmr->sg_dma_len - 1) goto out_unmap; else ++frmr->dma_npages; } len += dma_len; } frmr->dma_npages += len >> PAGE_SHIFT; if (frmr->dma_npages > ibmr->pool->fmr_attr.max_pages) { ret = -EMSGSIZE; goto out_unmap; } ret = rds_ib_post_reg_frmr(ibmr); if (ret) goto out_unmap; if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL) rds_ib_stats_inc(s_ib_rdma_mr_8k_used); else rds_ib_stats_inc(s_ib_rdma_mr_1m_used); return ret; out_unmap: ib_dma_unmap_sg(rds_ibdev->dev, ibmr->sg, ibmr->sg_len, DMA_BIDIRECTIONAL); ibmr->sg_dma_len = 0; return ret; }
/* Issue an RDMA_READ using an FRMR to map the data sink */ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head, int *page_no, u32 *page_offset, u32 rs_handle, u32 rs_length, u64 rs_offset, bool last) { struct ib_rdma_wr read_wr; struct ib_send_wr inv_wr; struct ib_reg_wr reg_wr; u8 key; int nents = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT; struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt); struct svc_rdma_fastreg_mr *frmr = svc_rdma_get_frmr(xprt); int ret, read, pno, dma_nents, n; u32 pg_off = *page_offset; u32 pg_no = *page_no; if (IS_ERR(frmr)) return -ENOMEM; ctxt->direction = DMA_FROM_DEVICE; ctxt->frmr = frmr; nents = min_t(unsigned int, nents, xprt->sc_frmr_pg_list_len); read = min_t(int, (nents << PAGE_SHIFT) - *page_offset, rs_length); frmr->direction = DMA_FROM_DEVICE; frmr->access_flags = (IB_ACCESS_LOCAL_WRITE|IB_ACCESS_REMOTE_WRITE); frmr->sg_nents = nents; for (pno = 0; pno < nents; pno++) { int len = min_t(int, rs_length, PAGE_SIZE - pg_off); head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no]; head->arg.page_len += len; head->arg.len += len; if (!pg_off) head->count++; sg_set_page(&frmr->sg[pno], rqstp->rq_arg.pages[pg_no], len, pg_off); rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1]; rqstp->rq_next_page = rqstp->rq_respages + 1; /* adjust offset and wrap to next page if needed */ pg_off += len; if (pg_off == PAGE_SIZE) { pg_off = 0; pg_no++; } rs_length -= len; } if (last && rs_length == 0) set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags); else clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags); dma_nents = ib_dma_map_sg(xprt->sc_cm_id->device, frmr->sg, frmr->sg_nents, frmr->direction); if (!dma_nents) { pr_err("svcrdma: failed to dma map sg %p\n", frmr->sg); return -ENOMEM; } atomic_inc(&xprt->sc_dma_used); n = ib_map_mr_sg(frmr->mr, frmr->sg, frmr->sg_nents, PAGE_SIZE); if (unlikely(n != frmr->sg_nents)) { pr_err("svcrdma: failed to map mr %p (%d/%d elements)\n", frmr->mr, n, frmr->sg_nents); return n < 0 ? n : -EINVAL; } /* Bump the key */ key = (u8)(frmr->mr->lkey & 0x000000FF); ib_update_fast_reg_key(frmr->mr, ++key); ctxt->sge[0].addr = frmr->mr->iova; ctxt->sge[0].lkey = frmr->mr->lkey; ctxt->sge[0].length = frmr->mr->length; ctxt->count = 1; ctxt->read_hdr = head; /* Prepare REG WR */ reg_wr.wr.opcode = IB_WR_REG_MR; reg_wr.wr.wr_id = 0; reg_wr.wr.send_flags = IB_SEND_SIGNALED; reg_wr.wr.num_sge = 0; reg_wr.mr = frmr->mr; reg_wr.key = frmr->mr->lkey; reg_wr.access = frmr->access_flags; reg_wr.wr.next = &read_wr.wr; /* Prepare RDMA_READ */ memset(&read_wr, 0, sizeof(read_wr)); read_wr.wr.send_flags = IB_SEND_SIGNALED; read_wr.rkey = rs_handle; read_wr.remote_addr = rs_offset; read_wr.wr.sg_list = ctxt->sge; read_wr.wr.num_sge = 1; if (xprt->sc_dev_caps & SVCRDMA_DEVCAP_READ_W_INV) { read_wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV; read_wr.wr.wr_id = (unsigned long)ctxt; read_wr.wr.ex.invalidate_rkey = ctxt->frmr->mr->lkey; } else { read_wr.wr.opcode = IB_WR_RDMA_READ; read_wr.wr.next = &inv_wr; /* Prepare invalidate */ memset(&inv_wr, 0, sizeof(inv_wr)); inv_wr.wr_id = (unsigned long)ctxt; inv_wr.opcode = IB_WR_LOCAL_INV; inv_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_FENCE; inv_wr.ex.invalidate_rkey = frmr->mr->lkey; } ctxt->wr_op = read_wr.wr.opcode; /* Post the chain */ ret = svc_rdma_send(xprt, ®_wr.wr); if (ret) { pr_err("svcrdma: Error %d posting RDMA_READ\n", ret); set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); goto err; } /* return current location in page array */ *page_no = pg_no; *page_offset = pg_off; ret = read; atomic_inc(&rdma_stat_read); return ret; err: ib_dma_unmap_sg(xprt->sc_cm_id->device, frmr->sg, frmr->sg_nents, frmr->direction); svc_rdma_put_context(ctxt, 0); svc_rdma_put_frmr(xprt, frmr); return ret; }
/* Post a REG_MR Work Request to register a memory region * for remote access via RDMA READ or RDMA WRITE. */ static struct rpcrdma_mr_seg * frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, int nsegs, bool writing, struct rpcrdma_mr **out) { struct rpcrdma_ia *ia = &r_xprt->rx_ia; bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS; struct rpcrdma_frwr *frwr; struct rpcrdma_mr *mr; struct ib_mr *ibmr; struct ib_reg_wr *reg_wr; struct ib_send_wr *bad_wr; int rc, i, n; u8 key; mr = NULL; do { if (mr) rpcrdma_mr_defer_recovery(mr); mr = rpcrdma_mr_get(r_xprt); if (!mr) return ERR_PTR(-ENOBUFS); } while (mr->frwr.fr_state != FRWR_IS_INVALID); frwr = &mr->frwr; frwr->fr_state = FRWR_IS_VALID; if (nsegs > ia->ri_max_frwr_depth) nsegs = ia->ri_max_frwr_depth; for (i = 0; i < nsegs;) { if (seg->mr_page) sg_set_page(&mr->mr_sg[i], seg->mr_page, seg->mr_len, offset_in_page(seg->mr_offset)); else sg_set_buf(&mr->mr_sg[i], seg->mr_offset, seg->mr_len); ++seg; ++i; if (holes_ok) continue; if ((i < nsegs && offset_in_page(seg->mr_offset)) || offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) break; } mr->mr_dir = rpcrdma_data_dir(writing); mr->mr_nents = ib_dma_map_sg(ia->ri_device, mr->mr_sg, i, mr->mr_dir); if (!mr->mr_nents) goto out_dmamap_err; ibmr = frwr->fr_mr; n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE); if (unlikely(n != mr->mr_nents)) goto out_mapmr_err; key = (u8)(ibmr->rkey & 0x000000FF); ib_update_fast_reg_key(ibmr, ++key); reg_wr = &frwr->fr_regwr; reg_wr->wr.next = NULL; reg_wr->wr.opcode = IB_WR_REG_MR; frwr->fr_cqe.done = frwr_wc_fastreg; reg_wr->wr.wr_cqe = &frwr->fr_cqe; reg_wr->wr.num_sge = 0; reg_wr->wr.send_flags = 0; reg_wr->mr = ibmr; reg_wr->key = ibmr->rkey; reg_wr->access = writing ? IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : IB_ACCESS_REMOTE_READ; rc = ib_post_send(ia->ri_id->qp, ®_wr->wr, &bad_wr); if (rc) goto out_senderr; mr->mr_handle = ibmr->rkey; mr->mr_length = ibmr->length; mr->mr_offset = ibmr->iova; *out = mr; return seg; out_dmamap_err: pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n", mr->mr_sg, i); frwr->fr_state = FRWR_IS_INVALID; rpcrdma_mr_put(mr); return ERR_PTR(-EIO); out_mapmr_err: pr_err("rpcrdma: failed to map mr %p (%d/%d)\n", frwr->fr_mr, n, mr->mr_nents); rpcrdma_mr_defer_recovery(mr); return ERR_PTR(-EIO); out_senderr: pr_err("rpcrdma: FRWR registration ib_post_send returned %i\n", rc); rpcrdma_mr_defer_recovery(mr); return ERR_PTR(-ENOTCONN); }