void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) { int is_rdma_aligned = 1; struct iser_regd_buf *regd; /* if we were reading, copy back to unaligned sglist, * anyway dma_unmap and free the copy */ if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) { is_rdma_aligned = 0; iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN); } if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) { is_rdma_aligned = 0; iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT); } if (iser_task->dir[ISER_DIR_IN]) { regd = &iser_task->rdma_regd[ISER_DIR_IN]; if (regd->reg.is_fmr) iser_unreg_mem(®d->reg); } if (iser_task->dir[ISER_DIR_OUT]) { regd = &iser_task->rdma_regd[ISER_DIR_OUT]; if (regd->reg.is_fmr) iser_unreg_mem(®d->reg); } /* if the data was unaligned, it was already unmapped and then copied */ if (is_rdma_aligned) iser_dma_unmap_task_data(iser_task); }
/** * Decrements the reference count for the * registered buffer & releases it * * returns 0 if released, 1 if deferred */ int iser_regd_buff_release(struct iser_regd_buf *regd_buf) { struct device *dma_device; if ((atomic_read(®d_buf->ref_count) == 0) || atomic_dec_and_test(®d_buf->ref_count)) { /* if we used the dma mr, unreg is just NOP */ if (regd_buf->reg.rkey != 0) iser_unreg_mem(®d_buf->reg); if (regd_buf->dma_addr) { dma_device = regd_buf->device->ib_device->dma_device; dma_unmap_single(dma_device, regd_buf->dma_addr, regd_buf->data_size, regd_buf->direction); } /* else this regd buf is associated with task which we */ /* dma_unmap_single/sg later */ return 0; } else { iser_dbg("Release deferred, regd.buff: 0x%p\n", regd_buf); return 1; } }