static void smr_format_inline_atomic(struct smr_cmd *cmd, fi_addr_t peer_id, const struct iovec *iov, size_t count, const struct iovec *compv, size_t comp_count, uint32_t op, enum fi_datatype datatype, enum fi_op atomic_op) { size_t comp_size; smr_generic_format(cmd, peer_id, op, 0, datatype, atomic_op, 0, 0); cmd->msg.hdr.op_src = smr_src_inline; switch (op) { case ofi_op_atomic: case ofi_op_atomic_fetch: cmd->msg.hdr.size = ofi_copy_from_iov(cmd->msg.data.msg, SMR_MSG_DATA_LEN, iov, count, 0); break; case ofi_op_atomic_compare: cmd->msg.hdr.size = ofi_copy_from_iov(cmd->msg.data.buf, SMR_MSG_DATA_LEN, iov, count, 0); comp_size = ofi_copy_from_iov(cmd->msg.data.comp, SMR_MSG_DATA_LEN, compv, comp_count, 0); if (comp_size != cmd->msg.hdr.size) FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "atomic and compare buffer size mimatch\n"); break; default: break; } }
void rxd_init_data_pkt(struct rxd_ep *ep, struct rxd_x_entry *tx_entry, struct rxd_pkt_entry *pkt_entry) { struct rxd_data_pkt *data_pkt = (struct rxd_data_pkt *) (pkt_entry->pkt); uint32_t seg_size; seg_size = tx_entry->cq_entry.len - tx_entry->bytes_done; seg_size = MIN(rxd_ep_domain(ep)->max_seg_sz, seg_size); data_pkt->base_hdr.version = RXD_PROTOCOL_VERSION; data_pkt->base_hdr.type = (tx_entry->cq_entry.flags & (FI_READ | FI_REMOTE_READ)) ? RXD_DATA_READ : RXD_DATA; data_pkt->ext_hdr.rx_id = tx_entry->rx_id; data_pkt->ext_hdr.tx_id = tx_entry->tx_id; data_pkt->ext_hdr.seg_no = tx_entry->next_seg_no++; data_pkt->base_hdr.peer = ep->peers[tx_entry->peer].peer_addr; pkt_entry->pkt_size = ofi_copy_from_iov(data_pkt->msg, seg_size, tx_entry->iov, tx_entry->iov_count, tx_entry->bytes_done); pkt_entry->peer = tx_entry->peer; tx_entry->bytes_done += pkt_entry->pkt_size; pkt_entry->pkt_size += sizeof(*data_pkt) + ep->tx_prefix_size; }
void smr_format_inline(struct smr_cmd *cmd, fi_addr_t peer_id, const struct iovec *iov, size_t count, uint32_t op, uint64_t tag, uint64_t data, uint64_t op_flags) { smr_generic_format(cmd, peer_id, op, tag, 0, 0, data, op_flags); cmd->msg.hdr.op_src = smr_src_inline; cmd->msg.hdr.size = ofi_copy_from_iov(cmd->msg.data.msg, SMR_MSG_DATA_LEN, iov, count, 0); }
static size_t rxd_init_msg(void **ptr, const struct iovec *iov, size_t iov_count, size_t total_len, size_t avail_len) { size_t done; done = ofi_copy_from_iov(*ptr, MIN(total_len, avail_len), iov, iov_count, 0); *ptr = (char *) (*ptr) + done; return done; }
void smr_format_inject(struct smr_cmd *cmd, fi_addr_t peer_id, const struct iovec *iov, size_t count, uint32_t op, uint64_t tag, uint64_t data, uint64_t op_flags, struct smr_region *smr, struct smr_inject_buf *tx_buf) { smr_generic_format(cmd, peer_id, op, tag, 0, 0, data, op_flags); cmd->msg.hdr.op_src = smr_src_inject; cmd->msg.hdr.src_data = (char **) tx_buf - (char **) smr; cmd->msg.hdr.size = ofi_copy_from_iov(tx_buf->data, SMR_INJECT_SIZE, iov, count, 0); }
static void smr_format_inject_atomic(struct smr_cmd *cmd, fi_addr_t peer_id, const struct iovec *iov, size_t count, const struct iovec *resultv, size_t result_count, const struct iovec *compv, size_t comp_count, uint32_t op, enum fi_datatype datatype, enum fi_op atomic_op, struct smr_region *smr, struct smr_inject_buf *tx_buf) { size_t comp_size; smr_generic_format(cmd, peer_id, op, 0, datatype, atomic_op, 0, 0); cmd->msg.hdr.op_src = smr_src_inject; cmd->msg.hdr.src_data = (char **) tx_buf - (char **) smr; switch (op) { case ofi_op_atomic: case ofi_op_atomic_fetch: if (atomic_op == FI_ATOMIC_READ) cmd->msg.hdr.size = ofi_total_iov_len(resultv, result_count); else cmd->msg.hdr.size = ofi_copy_from_iov(tx_buf->data, SMR_INJECT_SIZE, iov, count, 0); break; case ofi_op_atomic_compare: cmd->msg.hdr.size = ofi_copy_from_iov(tx_buf->buf, SMR_COMP_INJECT_SIZE, iov, count, 0); comp_size = ofi_copy_from_iov(tx_buf->comp, SMR_COMP_INJECT_SIZE, compv, comp_count, 0); if (comp_size != cmd->msg.hdr.size) FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "atomic and compare buffer size mimatch\n"); break; default: break; } }
static ssize_t rxm_ep_readv(struct fid_ep *ep_fid, const struct iovec *iov, void **desc, size_t count, fi_addr_t src_addr, uint64_t addr, uint64_t key, void *context) { struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid); struct fi_rma_iov rma_iov = { .addr = addr, .len = ofi_total_iov_len(iov, count), .key = key, }; struct fi_msg_rma msg = { .msg_iov = iov, .desc = desc, .iov_count = count, .addr = src_addr, .rma_iov = &rma_iov, .rma_iov_count = 1, .context = context, .data = 0, }; return rxm_ep_rma_common(rxm_ep, &msg, rxm_ep_tx_flags(rxm_ep), fi_readmsg, FI_READ); } static ssize_t rxm_ep_read(struct fid_ep *ep_fid, void *buf, size_t len, void *desc, fi_addr_t src_addr, uint64_t addr, uint64_t key, void *context) { struct iovec iov = { .iov_base = (void*)buf, .iov_len = len, }; struct fi_rma_iov rma_iov = { .addr = addr, .len = len, .key = key, }; struct fi_msg_rma msg = { .msg_iov = &iov, .desc = &desc, .iov_count = 1, .addr = src_addr, .rma_iov = &rma_iov, .rma_iov_count = 1, .context = context, .data = 0, }; struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid); return rxm_ep_rma_common(rxm_ep, &msg, rxm_ep_tx_flags(rxm_ep), fi_readmsg, FI_READ); } static inline void rxm_ep_format_rma_msg(struct rxm_rma_buf *rma_buf, const struct fi_msg_rma *orig_msg, struct iovec *rxm_iov, struct fi_msg_rma *rxm_msg) { rxm_msg->context = rma_buf; rxm_msg->addr = orig_msg->addr; rxm_msg->data = orig_msg->data; ofi_copy_from_iov(rma_buf->pkt.data, rma_buf->pkt.hdr.size, orig_msg->msg_iov, orig_msg->iov_count, 0); rxm_iov->iov_base = &rma_buf->pkt.data; rxm_iov->iov_len = rma_buf->pkt.hdr.size; rxm_msg->msg_iov = rxm_iov; rxm_msg->desc = &rma_buf->hdr.desc; rxm_msg->iov_count = 1; rxm_msg->rma_iov = orig_msg->rma_iov; rxm_msg->rma_iov_count = orig_msg->rma_iov_count; } static inline ssize_t rxm_ep_rma_emulate_inject_msg(struct rxm_ep *rxm_ep, struct rxm_conn *rxm_conn, size_t total_size, const struct fi_msg_rma *msg, uint64_t flags) { struct rxm_rma_buf *rma_buf; ssize_t ret; struct iovec rxm_msg_iov = { 0 }; struct fi_msg_rma rxm_rma_msg = { 0 }; assert(msg->rma_iov_count <= rxm_ep->rxm_info->tx_attr->rma_iov_limit); ofi_ep_lock_acquire(&rxm_ep->util_ep); rma_buf = rxm_rma_buf_alloc(rxm_ep); if (OFI_UNLIKELY(!rma_buf)) { FI_WARN(&rxm_prov, FI_LOG_EP_DATA, "Ran out of buffers from RMA buffer pool\n"); ret = -FI_ENOMEM; goto unlock; } rma_buf->pkt.hdr.size = total_size; rma_buf->app_context = msg->context; rma_buf->flags = flags; rxm_ep_format_rma_msg(rma_buf, msg, &rxm_msg_iov, &rxm_rma_msg); flags = (flags & ~FI_INJECT) | FI_COMPLETION; ret = fi_writemsg(rxm_conn->msg_ep, &rxm_rma_msg, flags); if (OFI_UNLIKELY(ret)) { if (ret == -FI_EAGAIN) rxm_ep_do_progress(&rxm_ep->util_ep); ofi_buf_free(rma_buf); } unlock: ofi_ep_lock_release(&rxm_ep->util_ep); return ret; } static inline ssize_t rxm_ep_rma_emulate_inject(struct rxm_ep *rxm_ep, struct rxm_conn *rxm_conn, const void *buf, size_t len, uint64_t data, fi_addr_t dest_addr, uint64_t addr, uint64_t key, uint64_t flags) { struct fi_rma_iov rma_iov = { .addr = addr, .len = len, .key = key, }; struct iovec iov = { .iov_base = (void*)buf, .iov_len = len, }; struct fi_msg_rma msg = { .msg_iov = &iov, .desc = NULL, .iov_count = 1, .addr = dest_addr, .rma_iov = &rma_iov, .rma_iov_count = 1, .context = NULL, .data = data, }; return rxm_ep_rma_emulate_inject_msg(rxm_ep, rxm_conn, len, &msg, flags); } static inline ssize_t rxm_ep_rma_inject_common(struct rxm_ep *rxm_ep, const struct fi_msg_rma *msg, uint64_t flags) { struct rxm_conn *rxm_conn; size_t total_size = ofi_total_iov_len(msg->msg_iov, msg->iov_count); ssize_t ret; assert(total_size <= rxm_ep->rxm_info->tx_attr->inject_size); ret = rxm_ep_prepare_tx(rxm_ep, msg->addr, &rxm_conn); if (OFI_UNLIKELY(ret)) return ret; if ((total_size <= rxm_ep->msg_info->tx_attr->inject_size) && !(flags & FI_COMPLETION) && (msg->iov_count == 1) && (msg->rma_iov_count == 1)) { if (flags & FI_REMOTE_CQ_DATA) { ret = fi_inject_writedata(rxm_conn->msg_ep, msg->msg_iov->iov_base, msg->msg_iov->iov_len, msg->data, msg->addr, msg->rma_iov->addr, msg->rma_iov->key); } else { ret = fi_inject_write(rxm_conn->msg_ep, msg->msg_iov->iov_base, msg->msg_iov->iov_len, msg->addr, msg->rma_iov->addr, msg->rma_iov->key); } if (OFI_LIKELY(!ret)) { ofi_ep_wr_cntr_inc(&rxm_ep->util_ep); } else { FI_DBG(&rxm_prov, FI_LOG_EP_DATA, "fi_inject_write* for MSG provider failed with ret - %" PRId64"\n", ret); if (OFI_LIKELY(ret == -FI_EAGAIN)) rxm_ep_progress(&rxm_ep->util_ep); } return ret; } else { return rxm_ep_rma_emulate_inject_msg(rxm_ep, rxm_conn, total_size, msg, flags); } } static inline ssize_t rxm_ep_generic_writemsg(struct fid_ep *ep_fid, const struct fi_msg_rma *msg, uint64_t flags) { struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid); if (flags & FI_INJECT) return rxm_ep_rma_inject_common(rxm_ep, msg, flags); else return rxm_ep_rma_common(rxm_ep, msg, flags, fi_writemsg, FI_WRITE); } static inline ssize_t rxm_ep_writemsg(struct fid_ep *ep_fid, const struct fi_msg_rma *msg, uint64_t flags) { struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid); return rxm_ep_generic_writemsg(ep_fid, msg, flags | rxm_ep->util_ep.tx_msg_flags); } static ssize_t rxm_ep_writev(struct fid_ep *ep_fid, const struct iovec *iov, void **desc, size_t count, fi_addr_t dest_addr, uint64_t addr, uint64_t key, void *context) { struct fi_rma_iov rma_iov = { .addr = addr, .len = ofi_total_iov_len(iov, count), .key = key, }; struct fi_msg_rma msg = { .msg_iov = iov, .desc = desc, .iov_count = count, .addr = dest_addr, .rma_iov = &rma_iov, .rma_iov_count = 1, .context = context, .data = 0, }; struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid); return rxm_ep_generic_writemsg(ep_fid, &msg, rxm_ep_tx_flags(rxm_ep)); } static ssize_t rxm_ep_writedata(struct fid_ep *ep_fid, const void *buf, size_t len, void *desc, uint64_t data, fi_addr_t dest_addr, uint64_t addr, uint64_t key, void *context) { struct fi_rma_iov rma_iov = { .addr = addr, .len = len, .key = key, }; struct iovec iov = { .iov_base = (void*)buf, .iov_len = len, }; struct fi_msg_rma msg = { .msg_iov = &iov, .desc = &desc, .iov_count = 1, .addr = dest_addr, .rma_iov = &rma_iov, .rma_iov_count = 1, .context = context, .data = data, }; struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid); return rxm_ep_generic_writemsg(ep_fid, &msg, rxm_ep_tx_flags(rxm_ep) | FI_REMOTE_CQ_DATA); } static ssize_t rxm_ep_write(struct fid_ep *ep_fid, const void *buf, size_t len, void *desc, fi_addr_t dest_addr, uint64_t addr, uint64_t key, void *context) { struct fi_rma_iov rma_iov = { .addr = addr, .len = len, .key = key, }; struct iovec iov = { .iov_base = (void*)buf, .iov_len = len, }; struct fi_msg_rma msg = { .msg_iov = &iov, .desc = &desc, .iov_count = 1, .addr = dest_addr, .rma_iov = &rma_iov, .rma_iov_count = 1, .context = context, .data = 0, }; struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid); return rxm_ep_generic_writemsg(ep_fid, &msg, rxm_ep_tx_flags(rxm_ep)); } static ssize_t rxm_ep_inject_write(struct fid_ep *ep_fid, const void *buf, size_t len, fi_addr_t dest_addr, uint64_t addr, uint64_t key) { ssize_t ret; struct rxm_conn *rxm_conn; struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid); ret = rxm_ep_prepare_tx(rxm_ep, dest_addr, &rxm_conn); if (OFI_UNLIKELY(ret)) return ret; if (len <= rxm_ep->msg_info->tx_attr->inject_size) { ret = fi_inject_write(rxm_conn->msg_ep, buf, len, dest_addr, addr, key); if (OFI_LIKELY(!ret)) { ofi_ep_wr_cntr_inc(&rxm_ep->util_ep); } else { FI_DBG(&rxm_prov, FI_LOG_EP_DATA, "fi_inject_write for MSG provider failed with ret - %" PRId64"\n", ret); if (OFI_LIKELY(ret == -FI_EAGAIN)) rxm_ep_progress(&rxm_ep->util_ep); } return ret; } else { return rxm_ep_rma_emulate_inject(rxm_ep, rxm_conn, buf, len, 0, dest_addr, addr, key, FI_INJECT); } } static ssize_t rxm_ep_inject_writedata(struct fid_ep *ep_fid, const void *buf, size_t len, uint64_t data, fi_addr_t dest_addr, uint64_t addr, uint64_t key) { ssize_t ret; struct rxm_conn *rxm_conn; struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid); ret = rxm_ep_prepare_tx(rxm_ep, dest_addr, &rxm_conn); if (OFI_UNLIKELY(ret)) return ret; if (len <= rxm_ep->msg_info->tx_attr->inject_size) { ret = fi_inject_writedata(rxm_conn->msg_ep, buf, len, data, dest_addr, addr, key); if (OFI_LIKELY(!ret)) { ofi_ep_wr_cntr_inc(&rxm_ep->util_ep); } else { FI_DBG(&rxm_prov, FI_LOG_EP_DATA, "fi_inject_writedata for MSG provider failed with ret - %" PRId64"\n", ret); if (OFI_LIKELY(ret == -FI_EAGAIN)) rxm_ep_progress(&rxm_ep->util_ep); } return ret; } else { return rxm_ep_rma_emulate_inject(rxm_ep, rxm_conn, buf, len, data, dest_addr, addr, key, FI_REMOTE_CQ_DATA | FI_INJECT); } } struct fi_ops_rma rxm_ops_rma = { .size = sizeof (struct fi_ops_rma), .read = rxm_ep_read, .readv = rxm_ep_readv, .readmsg = rxm_ep_readmsg, .write = rxm_ep_write, .writev = rxm_ep_writev, .writemsg = rxm_ep_writemsg, .inject = rxm_ep_inject_write, .writedata = rxm_ep_writedata, .injectdata = rxm_ep_inject_writedata, };
static ssize_t rxm_ep_readv(struct fid_ep *ep_fid, const struct iovec *iov, void **desc, size_t count, fi_addr_t src_addr, uint64_t addr, uint64_t key, void *context) { struct fi_rma_iov rma_iov = { .addr = addr, .len = ofi_total_iov_len(iov, count), .key = key, }; struct fi_msg_rma msg = { .msg_iov = iov, .desc = desc, .iov_count = count, .addr = src_addr, .rma_iov = &rma_iov, .rma_iov_count = 1, .context = context, .data = 0, }; struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid); return rxm_ep_readmsg(ep_fid, &msg, rxm_ep_tx_flags(rxm_ep)); } static ssize_t rxm_ep_read(struct fid_ep *ep_fid, void *buf, size_t len, void *desc, fi_addr_t src_addr, uint64_t addr, uint64_t key, void *context) { struct iovec iov = { .iov_base = (void*)buf, .iov_len = len, }; struct fi_rma_iov rma_iov = { .addr = addr, .len = len, .key = key, }; struct fi_msg_rma msg = { .msg_iov = &iov, .desc = &desc, .iov_count = 1, .addr = src_addr, .rma_iov = &rma_iov, .rma_iov_count = 1, .context = context, .data = 0, }; struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid); return rxm_ep_readmsg(ep_fid, &msg, rxm_ep_tx_flags(rxm_ep)); } static ssize_t rxm_ep_rma_inject(struct fid_ep *msg_ep, struct rxm_ep *rxm_ep, const struct fi_msg_rma *msg, uint64_t flags) { struct rxm_tx_entry *tx_entry; struct rxm_tx_buf *tx_buf; struct fi_msg_rma msg_rma; struct iovec iov; size_t size; ssize_t ret; size = ofi_total_iov_len(msg->msg_iov, msg->iov_count); if (size > rxm_ep->rxm_info->tx_attr->inject_size) return -FI_EMSGSIZE; /* Use fi_inject_write instead of fi_writemsg since the latter generates * completion by default */ if (size <= rxm_ep->msg_info->tx_attr->inject_size && !(flags & FI_COMPLETION)) { if (flags & FI_REMOTE_CQ_DATA) return fi_inject_writedata(msg_ep, msg->msg_iov->iov_base, msg->msg_iov->iov_len, msg->data, msg->addr, msg->rma_iov->addr, msg->rma_iov->key); else return fi_inject_write(msg_ep, msg->msg_iov->iov_base, msg->msg_iov->iov_len, msg->addr, msg->rma_iov->addr, msg->rma_iov->key); } tx_buf = rxm_tx_buf_get(rxm_ep, RXM_BUF_POOL_TX_MSG); if (!tx_buf) { FI_WARN(&rxm_prov, FI_LOG_CQ, "TX queue full!\n"); rxm_ep_progress_multi(&rxm_ep->util_ep); return -FI_EAGAIN; } tx_entry = rxm_tx_entry_get(&rxm_ep->send_queue); if (!tx_entry) { rxm_ep_progress_multi(&rxm_ep->util_ep); ret = -FI_EAGAIN; goto err1; } tx_entry->state = RXM_TX; tx_entry->flags = flags; tx_entry->comp_flags = FI_RMA | FI_WRITE; tx_entry->tx_buf = tx_buf; ofi_copy_from_iov(tx_buf->pkt.data, size, msg->msg_iov, msg->iov_count, 0); iov.iov_base = &tx_buf->pkt.data; iov.iov_len = size; msg_rma.msg_iov = &iov; msg_rma.desc = &tx_buf->hdr.desc; msg_rma.iov_count = 1; msg_rma.addr = msg->addr; msg_rma.rma_iov = msg->rma_iov; msg_rma.rma_iov_count = msg->rma_iov_count; msg_rma.context = tx_entry; msg_rma.data = msg->data; flags = (flags & ~FI_INJECT) | FI_COMPLETION; ret = fi_writemsg(msg_ep, &msg_rma, flags); if (ret) { if (ret == -FI_EAGAIN) rxm_ep_progress_multi(&rxm_ep->util_ep); goto err2; } return 0; err2: rxm_tx_entry_release(&rxm_ep->send_queue, tx_entry); err1: rxm_tx_buf_release(rxm_ep, tx_buf); return ret; } static ssize_t rxm_ep_writemsg(struct fid_ep *ep_fid, const struct fi_msg_rma *msg, uint64_t flags) { struct util_cmap_handle *handle; struct rxm_conn *rxm_conn; struct rxm_ep *rxm_ep; int ret; rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid); ret = ofi_cmap_get_handle(rxm_ep->util_ep.cmap, msg->addr, &handle); if (OFI_UNLIKELY(ret)) return ret; rxm_conn = container_of(handle, struct rxm_conn, handle); if (flags & FI_INJECT) return rxm_ep_rma_inject(rxm_conn->msg_ep, rxm_ep, msg, flags); else return rxm_ep_rma_common(rxm_conn->msg_ep, rxm_ep, msg, flags, fi_writemsg, FI_WRITE); } static ssize_t rxm_ep_writev(struct fid_ep *ep_fid, const struct iovec *iov, void **desc, size_t count, fi_addr_t dest_addr, uint64_t addr, uint64_t key, void *context) { struct fi_rma_iov rma_iov = { .addr = addr, .len = ofi_total_iov_len(iov, count), .key = key, }; struct fi_msg_rma msg = { .msg_iov = iov, .desc = desc, .iov_count = count, .addr = dest_addr, .rma_iov = &rma_iov, .rma_iov_count = 1, .context = context, .data = 0, }; struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid); return rxm_ep_writemsg(ep_fid, &msg, rxm_ep_tx_flags(rxm_ep)); } static ssize_t rxm_ep_writedata(struct fid_ep *ep_fid, const void *buf, size_t len, void *desc, uint64_t data, fi_addr_t dest_addr, uint64_t addr, uint64_t key, void *context) { struct fi_rma_iov rma_iov = { .addr = addr, .len = len, .key = key, }; struct iovec iov = { .iov_base = (void*)buf, .iov_len = len, }; struct fi_msg_rma msg = { .msg_iov = &iov, .desc = &desc, .iov_count = 1, .addr = dest_addr, .rma_iov = &rma_iov, .rma_iov_count = 1, .context = context, .data = data, }; struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid); return rxm_ep_writemsg(ep_fid, &msg, rxm_ep_tx_flags(rxm_ep) | FI_REMOTE_CQ_DATA); } static ssize_t rxm_ep_write(struct fid_ep *ep_fid, const void *buf, size_t len, void *desc, fi_addr_t dest_addr, uint64_t addr, uint64_t key, void *context) { struct fi_rma_iov rma_iov = { .addr = addr, .len = len, .key = key, }; struct iovec iov = { .iov_base = (void*)buf, .iov_len = len, }; struct fi_msg_rma msg = { .msg_iov = &iov, .desc = &desc, .iov_count = 1, .addr = dest_addr, .rma_iov = &rma_iov, .rma_iov_count = 1, .context = context, .data = 0, }; struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid); return rxm_ep_writemsg(ep_fid, &msg, rxm_ep_tx_flags(rxm_ep)); } static ssize_t rxm_ep_inject_write(struct fid_ep *ep_fid, const void *buf, size_t len, fi_addr_t dest_addr, uint64_t addr, uint64_t key) { struct fi_rma_iov rma_iov = { .addr = addr, .len = len, .key = key, }; struct iovec iov = { .iov_base = (void*)buf, .iov_len = len, }; struct fi_msg_rma msg = { .msg_iov = &iov, .desc = NULL, .iov_count = 1, .addr = dest_addr, .rma_iov = &rma_iov, .rma_iov_count = 1, .context = NULL, .data = 0, }; struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid); return rxm_ep_writemsg(ep_fid, &msg, (rxm_ep_tx_flags(rxm_ep) & ~FI_COMPLETION) | FI_INJECT); } static ssize_t rxm_ep_inject_writedata(struct fid_ep *ep_fid, const void *buf, size_t len, uint64_t data, fi_addr_t dest_addr, uint64_t addr, uint64_t key) { struct fi_rma_iov rma_iov = { .addr = addr, .len = len, .key = key, }; struct iovec iov = { .iov_base = (void*)buf, .iov_len = len, }; struct fi_msg_rma msg = { .msg_iov = &iov, .desc = NULL, .iov_count = 1, .addr = dest_addr, .rma_iov = &rma_iov, .rma_iov_count = 1, .context = NULL, .data = data, }; struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid); return rxm_ep_writemsg(ep_fid, &msg, (rxm_ep_tx_flags(rxm_ep) & ~FI_COMPLETION) | FI_INJECT | FI_REMOTE_CQ_DATA); } struct fi_ops_rma rxm_ops_rma = { .size = sizeof (struct fi_ops_rma), .read = rxm_ep_read, .readv = rxm_ep_readv, .readmsg = rxm_ep_readmsg, .write = rxm_ep_write, .writev = rxm_ep_writev, .writemsg = rxm_ep_writemsg, .inject = rxm_ep_inject_write, .writedata = rxm_ep_writedata, .injectdata = rxm_ep_inject_writedata, };
static ssize_t rxm_ep_atomic_common(struct rxm_ep *rxm_ep, struct rxm_conn *rxm_conn, const struct fi_msg_atomic *msg, const struct fi_ioc *comparev, void **compare_desc, size_t compare_iov_count, struct fi_ioc *resultv, void **result_desc, size_t result_iov_count, uint32_t op, uint64_t flags) { struct rxm_tx_atomic_buf *tx_buf; struct rxm_atomic_hdr *atomic_hdr; struct iovec buf_iov[RXM_IOV_LIMIT]; struct iovec cmp_iov[RXM_IOV_LIMIT]; size_t datatype_sz = ofi_datatype_size(msg->datatype); size_t buf_len = 0; size_t cmp_len = 0; size_t tot_len; ssize_t ret; assert(msg->iov_count <= RXM_IOV_LIMIT && msg->rma_iov_count <= RXM_IOV_LIMIT); if (flags & FI_REMOTE_CQ_DATA) { FI_WARN(&rxm_prov, FI_LOG_EP_DATA, "atomic with remote CQ data not supported\n"); return -FI_EINVAL; } if (msg->op != FI_ATOMIC_READ) { assert(msg->msg_iov); ofi_ioc_to_iov(msg->msg_iov, buf_iov, msg->iov_count, datatype_sz); buf_len = ofi_total_iov_len(buf_iov, msg->iov_count); } if (op == ofi_op_atomic_compare) { assert(comparev); ofi_ioc_to_iov(comparev, cmp_iov, compare_iov_count, datatype_sz); cmp_len = ofi_total_iov_len(cmp_iov, compare_iov_count); assert(buf_len == cmp_len); } tot_len = buf_len + cmp_len + sizeof(struct rxm_atomic_hdr) + sizeof(struct rxm_pkt); if (tot_len > rxm_eager_limit) { FI_WARN(&rxm_prov, FI_LOG_EP_DATA, "atomic data too large %zu\n", tot_len); return -FI_EINVAL; } ofi_ep_lock_acquire(&rxm_ep->util_ep); tx_buf = (struct rxm_tx_atomic_buf *) rxm_tx_buf_alloc(rxm_ep, RXM_BUF_POOL_TX_ATOMIC); if (OFI_UNLIKELY(!tx_buf)) { FI_WARN(&rxm_prov, FI_LOG_EP_DATA, "Ran out of buffers from Atomic buffer pool\n"); ret = -FI_EAGAIN; goto unlock; } rxm_ep_format_atomic_pkt_hdr(rxm_conn, tx_buf, tot_len, op, msg->datatype, msg->op, flags, msg->data, msg->rma_iov, msg->rma_iov_count); tx_buf->pkt.ctrl_hdr.msg_id = ofi_buf_index(tx_buf); tx_buf->app_context = msg->context; atomic_hdr = (struct rxm_atomic_hdr *) tx_buf->pkt.data; ofi_copy_from_iov(atomic_hdr->data, buf_len, buf_iov, msg->iov_count, 0); if (cmp_len) ofi_copy_from_iov(atomic_hdr->data + buf_len, cmp_len, cmp_iov, compare_iov_count, 0); tx_buf->result_iov_count = result_iov_count; if (resultv) ofi_ioc_to_iov(resultv, tx_buf->result_iov, result_iov_count, datatype_sz); ret = rxm_ep_send_atomic_req(rxm_ep, rxm_conn, tx_buf, tot_len); if (ret) ofi_buf_free(tx_buf); unlock: ofi_ep_lock_release(&rxm_ep->util_ep); return ret; }