/** * iser_reg_rdma_mem - Registers memory intended for RDMA, * obtaining rkey and va * * returns 0 on success, errno code on failure */ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, enum iser_data_dir cmd_dir) { struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; struct iser_regd_buf *regd_buf; int aligned_len; int err; regd_buf = &iser_ctask->rdma_regd[cmd_dir]; aligned_len = iser_data_buf_aligned_len(mem); if (aligned_len != mem->size) { iser_err("rdma alignment violation %d/%d aligned\n", aligned_len, mem->size); iser_data_buf_dump(mem); /* allocate copy buf, if we are writing, copy the */ /* unaligned scatterlist, dma map the copy */ if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0) return -ENOMEM; mem = &iser_ctask->data_copy[cmd_dir]; } iser_page_vec_build(mem, ib_conn->page_vec); err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); if (err) return err; /* take a reference on this regd buf such that it will not be released * * (eg in send dto completion) before we get the scsi response */ atomic_inc(®d_buf->ref_count); return 0; }
static int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task, struct iser_data_buf *mem, struct iser_reg_resources *rsc, struct iser_mem_reg *reg) { struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; struct iser_device *device = ib_conn->device; struct iser_page_vec *page_vec = rsc->page_vec; struct ib_fmr_pool *fmr_pool = rsc->fmr_pool; struct ib_pool_fmr *fmr; int ret, plen; page_vec->npages = 0; page_vec->fake_mr.page_size = SIZE_4K; plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg, mem->size, NULL, iser_set_page); if (unlikely(plen < mem->size)) { iser_err("page vec too short to hold this SG\n"); iser_data_buf_dump(mem, device->ib_device); iser_dump_page_vec(page_vec); return -EINVAL; } fmr = ib_fmr_pool_map_phys(fmr_pool, page_vec->pages, page_vec->npages, page_vec->pages[0]); if (IS_ERR(fmr)) { ret = PTR_ERR(fmr); iser_err("ib_fmr_pool_map_phys failed: %d\n", ret); return ret; } reg->sge.lkey = fmr->fmr->lkey; reg->rkey = fmr->fmr->rkey; reg->sge.addr = page_vec->fake_mr.iova; reg->sge.length = page_vec->fake_mr.length; reg->mem_h = fmr; iser_dbg("fmr reg: lkey=0x%x, rkey=0x%x, addr=0x%llx," " length=0x%x\n", reg->sge.lkey, reg->rkey, reg->sge.addr, reg->sge.length); return 0; }
static void iser_page_vec_build(struct iser_data_buf *data, struct iser_page_vec *page_vec) { int page_vec_len = 0; page_vec->length = 0; page_vec->offset = 0; iser_dbg("Translating sg sz: %d\n", data->dma_nents); page_vec_len = iser_sg_to_page_vec(data,page_vec); iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len); page_vec->length = page_vec_len; if (page_vec_len * PAGE_SIZE < page_vec->data_size) { iser_err("page_vec too short to hold this SG\n"); iser_data_buf_dump(data); iser_dump_page_vec(page_vec); BUG(); } }
/** * iser_reg_rdma_mem - Registers memory intended for RDMA, * obtaining rkey and va * * returns 0 on success, errno code on failure */ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, enum iser_data_dir cmd_dir) { struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; struct iser_device *device = ib_conn->device; struct ib_device *ibdev = device->ib_device; struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; struct iser_regd_buf *regd_buf; int aligned_len; int err; int i; struct scatterlist *sg; regd_buf = &iser_ctask->rdma_regd[cmd_dir]; aligned_len = iser_data_buf_aligned_len(mem, ibdev); if (aligned_len != mem->dma_nents) { iser_err("rdma alignment violation %d/%d aligned\n", aligned_len, mem->size); iser_data_buf_dump(mem, ibdev); /* unmap the command data before accessing it */ iser_dma_unmap_task_data(iser_ctask); /* allocate copy buf, if we are writing, copy the */ /* unaligned scatterlist, dma map the copy */ if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0) return -ENOMEM; mem = &iser_ctask->data_copy[cmd_dir]; } /* if there a single dma entry, FMR is not needed */ if (mem->dma_nents == 1) { sg = (struct scatterlist *)mem->buf; regd_buf->reg.lkey = device->mr->lkey; regd_buf->reg.rkey = device->mr->rkey; regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]); regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]); regd_buf->reg.is_fmr = 0; iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X " "va: 0x%08lX sz: %ld]\n", (unsigned int)regd_buf->reg.lkey, (unsigned int)regd_buf->reg.rkey, (unsigned long)regd_buf->reg.va, (unsigned long)regd_buf->reg.len); } else { /* use FMR for multiple dma entries */ iser_page_vec_build(mem, ib_conn->page_vec, ibdev); err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); if (err) { iser_data_buf_dump(mem, ibdev); iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents, ntoh24(iser_ctask->desc.iscsi_header.dlength)); iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", ib_conn->page_vec->data_size, ib_conn->page_vec->length, ib_conn->page_vec->offset); for (i=0 ; i<ib_conn->page_vec->length ; i++) iser_err("page_vec[%d] = 0x%llx\n", i, (unsigned long long) ib_conn->page_vec->pages[i]); return err; } } /* take a reference on this regd buf such that it will not be released * * (eg in send dto completion) before we get the scsi response */ atomic_inc(®d_buf->ref_count); return 0; }