static ssize_t fi_ibv_msg_ep_atomic_readwritemsg(struct fid_ep *ep_fid, const struct fi_msg_atomic *msg, struct fi_ioc *resultv, void **result_desc, size_t result_count, uint64_t flags) { struct fi_ibv_msg_ep *ep; struct ibv_send_wr wr; size_t count_copy; int ret; if (msg->iov_count != 1 || msg->msg_iov->count != 1) return -FI_E2BIG; count_copy = msg->iov_count; ret = fi_ibv_msg_ep_atomic_readwritevalid(ep_fid, msg->datatype, msg->op, &count_copy); if (ret) return ret; ep = container_of(ep_fid, struct fi_ibv_msg_ep, ep_fid); memset(&wr, 0, sizeof(wr)); switch (msg->op) { case FI_ATOMIC_READ: wr.opcode = IBV_WR_RDMA_READ; wr.wr.rdma.remote_addr = msg->rma_iov->addr; wr.wr.rdma.rkey = (uint32_t) (uintptr_t) msg->rma_iov->key; break; case FI_SUM: wr.opcode = IBV_WR_ATOMIC_FETCH_AND_ADD; wr.wr.atomic.remote_addr = msg->rma_iov->addr; wr.wr.atomic.compare_add = (uintptr_t) msg->addr; wr.wr.atomic.swap = 0; wr.wr.atomic.rkey = (uint32_t) (uintptr_t) msg->rma_iov->key; break; default: return -ENOSYS; } wr.send_flags = VERBS_COMP_FLAGS(ep, flags) | IBV_SEND_FENCE; if (flags & FI_REMOTE_CQ_DATA) wr.imm_data = htonl((uint32_t) msg->data); return fi_ibv_send_buf(ep, &wr, resultv->addr, sizeof(uint64_t), result_desc[0], msg->context); }
ssize_t fi_ibv_send_iov_flags(struct fi_ibv_msg_ep *ep, struct ibv_send_wr *wr, const struct iovec *iov, void **desc, int count, void *context, uint64_t flags) { size_t len = 0; if (!desc) fi_ibv_set_sge_iov_inline(wr->sg_list, iov, count, len); else fi_ibv_set_sge_iov(wr->sg_list, iov, count, desc, len); wr->send_flags = VERBS_INJECT_FLAGS(ep, len, flags) | VERBS_COMP_FLAGS(ep, flags); return fi_ibv_send(ep, wr, len, count, context); }
static ssize_t fi_ibv_msg_ep_atomic_writemsg(struct fid_ep *ep_fid, const struct fi_msg_atomic *msg, uint64_t flags) { struct fi_ibv_msg_ep *ep; struct ibv_send_wr wr; size_t count_copy; int ret; if (msg->iov_count != 1 || msg->msg_iov->count != 1) return -FI_E2BIG; count_copy = msg->iov_count; ret = fi_ibv_msg_ep_atomic_writevalid(ep_fid, msg->datatype, msg->op, &count_copy); if (ret) return ret; memset(&wr, 0, sizeof(wr)); switch (msg->op) { case FI_ATOMIC_WRITE: if (flags & FI_REMOTE_CQ_DATA) { wr.opcode = IBV_WR_RDMA_WRITE_WITH_IMM; wr.imm_data = htonl((uint32_t)msg->data); } else { wr.opcode = IBV_WR_RDMA_WRITE; } wr.wr.rdma.remote_addr = msg->rma_iov->addr; wr.wr.rdma.rkey = (uint32_t) (uintptr_t) msg->rma_iov->key; break; default: return -ENOSYS; } ep = container_of(ep_fid, struct fi_ibv_msg_ep, ep_fid); wr.send_flags = VERBS_INJECT_FLAGS(ep, sizeof(uint64_t), flags) | VERBS_COMP_FLAGS(ep, flags) | IBV_SEND_FENCE; return fi_ibv_send_buf(ep, &wr, msg->msg_iov->addr, sizeof(uint64_t), msg->desc[0], msg->context); }
static ssize_t fi_ibv_msg_ep_atomic_compwritemsg(struct fid_ep *ep_fid, const struct fi_msg_atomic *msg, const struct fi_ioc *comparev, void **compare_desc, size_t compare_count, struct fi_ioc *resultv, void **result_desc, size_t result_count, uint64_t flags) { struct fi_ibv_msg_ep *ep; struct ibv_send_wr wr; size_t count_copy; int ret; if (msg->iov_count != 1 || msg->msg_iov->count != 1) return -FI_E2BIG; count_copy = msg->iov_count; ret = fi_ibv_msg_ep_atomic_compwritevalid(ep_fid, msg->datatype, msg->op, &count_copy); if (ret) return ret; memset(&wr, 0, sizeof(wr)); wr.opcode = IBV_WR_ATOMIC_CMP_AND_SWP; wr.wr.atomic.remote_addr = msg->rma_iov->addr; wr.wr.atomic.compare_add = (uintptr_t) comparev->addr; wr.wr.atomic.swap = (uintptr_t) msg->addr; wr.wr.atomic.rkey = (uint32_t) (uintptr_t) msg->rma_iov->key; ep = container_of(ep_fid, struct fi_ibv_msg_ep, ep_fid); wr.send_flags = VERBS_COMP_FLAGS(ep, flags) | IBV_SEND_FENCE; if (flags & FI_REMOTE_CQ_DATA) wr.imm_data = htonl((uint32_t) msg->data); return fi_ibv_send_buf(ep, &wr, resultv->addr, sizeof(uint64_t), result_desc[0], msg->context); }