static ssize_t smr_generic_atomic(struct fid_ep *ep_fid, const struct fi_ioc *ioc, void **desc, size_t count, const struct fi_ioc *compare_ioc, void **compare_desc, size_t compare_count, struct fi_ioc *result_ioc, void **result_desc, size_t result_count, fi_addr_t addr, const struct fi_rma_ioc *rma_ioc, size_t rma_count, enum fi_datatype datatype, enum fi_op atomic_op, void *context, uint32_t op) { struct smr_ep *ep; struct smr_domain *domain; struct smr_region *peer_smr; struct smr_inject_buf *tx_buf; struct smr_cmd *cmd; struct iovec iov[SMR_IOV_LIMIT]; struct iovec compare_iov[SMR_IOV_LIMIT]; struct iovec result_iov[SMR_IOV_LIMIT]; int peer_id, err = 0; uint16_t flags = 0; ssize_t ret = 0; size_t msg_len, total_len; assert(count <= SMR_IOV_LIMIT); assert(result_count <= SMR_IOV_LIMIT); assert(compare_count <= SMR_IOV_LIMIT); assert(rma_count <= SMR_IOV_LIMIT); ep = container_of(ep_fid, struct smr_ep, util_ep.ep_fid.fid); domain = container_of(ep->util_ep.domain, struct smr_domain, util_domain); peer_id = (int) addr; ret = smr_verify_peer(ep, peer_id); if(ret) return ret; peer_smr = smr_peer_region(ep->region, peer_id); fastlock_acquire(&peer_smr->lock); if (peer_smr->cmd_cnt < 2) { ret = -FI_EAGAIN; goto unlock_region; } fastlock_acquire(&ep->util_ep.tx_cq->cq_lock); if (ofi_cirque_isfull(ep->util_ep.tx_cq->cirq)) { ret = -FI_EAGAIN; goto unlock_cq; } cmd = ofi_cirque_tail(smr_cmd_queue(peer_smr)); msg_len = total_len = ofi_datatype_size(datatype) * ofi_total_ioc_cnt(ioc, count); switch (op) { case ofi_op_atomic_compare: assert(compare_ioc); ofi_ioc_to_iov(compare_ioc, compare_iov, compare_count, ofi_datatype_size(datatype)); total_len *= 2; /* fall through */ case ofi_op_atomic_fetch: assert(result_ioc); ofi_ioc_to_iov(result_ioc, result_iov, result_count, ofi_datatype_size(datatype)); if (!domain->fast_rma) flags |= SMR_RMA_REQ; /* fall through */ case ofi_op_atomic: if (atomic_op != FI_ATOMIC_READ) { assert(ioc); ofi_ioc_to_iov(ioc, iov, count, ofi_datatype_size(datatype)); } else { count = 0; } break; default: break; } if (total_len <= SMR_MSG_DATA_LEN && !(flags & SMR_RMA_REQ)) { smr_format_inline_atomic(cmd, smr_peer_addr(ep->region)[peer_id].addr, iov, count, compare_iov, compare_count, op, datatype, atomic_op); } else if (total_len <= SMR_INJECT_SIZE) { tx_buf = smr_freestack_pop(smr_inject_pool(peer_smr)); smr_format_inject_atomic(cmd, smr_peer_addr(ep->region)[peer_id].addr, iov, count, result_iov, result_count, compare_iov, compare_count, op, datatype, atomic_op, peer_smr, tx_buf); } else { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "message too large\n"); ret = -FI_EINVAL; goto unlock_cq; } cmd->msg.hdr.op_flags |= flags; ofi_cirque_commit(smr_cmd_queue(peer_smr)); peer_smr->cmd_cnt--; if (op != ofi_op_atomic) { if (flags & SMR_RMA_REQ) { smr_post_fetch_resp(ep, cmd, (const struct iovec *) result_iov, result_count); goto format_rma; } err = smr_fetch_result(ep, peer_smr, result_iov, result_count, rma_ioc, rma_count, datatype, msg_len); if (err) FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "unable to fetch results"); } ret = ep->tx_comp(ep, context, ofi_tx_cq_flags(op), err); if (ret) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "unable to process tx completion\n"); } format_rma: cmd = ofi_cirque_tail(smr_cmd_queue(peer_smr)); smr_format_rma_ioc(cmd, rma_ioc, rma_count); ofi_cirque_commit(smr_cmd_queue(peer_smr)); peer_smr->cmd_cnt--; unlock_cq: fastlock_release(&ep->util_ep.tx_cq->cq_lock); unlock_region: fastlock_release(&peer_smr->lock); return ret; }
static ssize_t rxm_ep_atomic_common(struct rxm_ep *rxm_ep, struct rxm_conn *rxm_conn, const struct fi_msg_atomic *msg, const struct fi_ioc *comparev, void **compare_desc, size_t compare_iov_count, struct fi_ioc *resultv, void **result_desc, size_t result_iov_count, uint32_t op, uint64_t flags) { struct rxm_tx_atomic_buf *tx_buf; struct rxm_atomic_hdr *atomic_hdr; struct iovec buf_iov[RXM_IOV_LIMIT]; struct iovec cmp_iov[RXM_IOV_LIMIT]; size_t datatype_sz = ofi_datatype_size(msg->datatype); size_t buf_len = 0; size_t cmp_len = 0; size_t tot_len; ssize_t ret; assert(msg->iov_count <= RXM_IOV_LIMIT && msg->rma_iov_count <= RXM_IOV_LIMIT); if (flags & FI_REMOTE_CQ_DATA) { FI_WARN(&rxm_prov, FI_LOG_EP_DATA, "atomic with remote CQ data not supported\n"); return -FI_EINVAL; } if (msg->op != FI_ATOMIC_READ) { assert(msg->msg_iov); ofi_ioc_to_iov(msg->msg_iov, buf_iov, msg->iov_count, datatype_sz); buf_len = ofi_total_iov_len(buf_iov, msg->iov_count); } if (op == ofi_op_atomic_compare) { assert(comparev); ofi_ioc_to_iov(comparev, cmp_iov, compare_iov_count, datatype_sz); cmp_len = ofi_total_iov_len(cmp_iov, compare_iov_count); assert(buf_len == cmp_len); } tot_len = buf_len + cmp_len + sizeof(struct rxm_atomic_hdr) + sizeof(struct rxm_pkt); if (tot_len > rxm_eager_limit) { FI_WARN(&rxm_prov, FI_LOG_EP_DATA, "atomic data too large %zu\n", tot_len); return -FI_EINVAL; } ofi_ep_lock_acquire(&rxm_ep->util_ep); tx_buf = (struct rxm_tx_atomic_buf *) rxm_tx_buf_alloc(rxm_ep, RXM_BUF_POOL_TX_ATOMIC); if (OFI_UNLIKELY(!tx_buf)) { FI_WARN(&rxm_prov, FI_LOG_EP_DATA, "Ran out of buffers from Atomic buffer pool\n"); ret = -FI_EAGAIN; goto unlock; } rxm_ep_format_atomic_pkt_hdr(rxm_conn, tx_buf, tot_len, op, msg->datatype, msg->op, flags, msg->data, msg->rma_iov, msg->rma_iov_count); tx_buf->pkt.ctrl_hdr.msg_id = ofi_buf_index(tx_buf); tx_buf->app_context = msg->context; atomic_hdr = (struct rxm_atomic_hdr *) tx_buf->pkt.data; ofi_copy_from_iov(atomic_hdr->data, buf_len, buf_iov, msg->iov_count, 0); if (cmp_len) ofi_copy_from_iov(atomic_hdr->data + buf_len, cmp_len, cmp_iov, compare_iov_count, 0); tx_buf->result_iov_count = result_iov_count; if (resultv) ofi_ioc_to_iov(resultv, tx_buf->result_iov, result_iov_count, datatype_sz); ret = rxm_ep_send_atomic_req(rxm_ep, rxm_conn, tx_buf, tot_len); if (ret) ofi_buf_free(tx_buf); unlock: ofi_ep_lock_release(&rxm_ep->util_ep); return ret; }