static inline int rxm_finish_send_rndv_ack(struct rxm_rx_buf *rx_buf) { RXM_LOG_STATE(FI_LOG_CQ, rx_buf->pkt, RXM_RNDV_ACK_SENT, RXM_RNDV_FINISH); rx_buf->hdr.state = RXM_RNDV_FINISH; if (!rx_buf->ep->rxm_mr_local) rxm_ep_msg_mr_closev(rx_buf->mr, rx_buf->recv_entry->rxm_iov.count); return rxm_finish_recv(rx_buf, rx_buf->recv_entry->total_len); }
static inline ssize_t rxm_ep_rma_common(struct rxm_ep *rxm_ep, const struct fi_msg_rma *msg, uint64_t flags, rxm_rma_msg_fn rma_msg, uint64_t comp_flags) { struct rxm_rma_buf *rma_buf; struct fi_msg_rma msg_rma = *msg; struct rxm_conn *rxm_conn; void *mr_desc[RXM_IOV_LIMIT] = { 0 }; int ret; assert(msg->rma_iov_count <= rxm_ep->rxm_info->tx_attr->rma_iov_limit); ret = rxm_ep_prepare_tx(rxm_ep, msg->addr, &rxm_conn); if (OFI_UNLIKELY(ret)) return ret; ofi_ep_lock_acquire(&rxm_ep->util_ep); rma_buf = rxm_rma_buf_alloc(rxm_ep); if (OFI_UNLIKELY(!rma_buf)) { FI_WARN(&rxm_prov, FI_LOG_EP_DATA, "Ran out of buffers from RMA buffer pool\n"); ret = -FI_ENOMEM; goto unlock; } rma_buf->app_context = msg->context; rma_buf->flags = flags; ret = rxm_ep_rma_reg_iov(rxm_ep, msg_rma.msg_iov, msg_rma.desc, mr_desc, msg_rma.iov_count, comp_flags & (FI_WRITE | FI_READ), rma_buf); if (OFI_UNLIKELY(ret)) goto release; msg_rma.desc = mr_desc; msg_rma.context = rma_buf; ret = rma_msg(rxm_conn->msg_ep, &msg_rma, flags); if (OFI_LIKELY(!ret)) goto unlock; if ((rxm_ep->msg_mr_local) && (!rxm_ep->rxm_mr_local)) rxm_ep_msg_mr_closev(rma_buf->mr.mr, rma_buf->mr.count); release: ofi_buf_free(rma_buf); unlock: ofi_ep_lock_release(&rxm_ep->util_ep); return ret; }
static int rxm_rndv_tx_finish(struct rxm_ep *rxm_ep, struct rxm_tx_rndv_buf *tx_buf) { int ret; RXM_LOG_STATE_TX(FI_LOG_CQ, tx_buf, RXM_RNDV_FINISH); tx_buf->hdr.state = RXM_RNDV_FINISH; tx_buf->conn->rndv_tx_credits++; if (!rxm_ep->rxm_mr_local) rxm_ep_msg_mr_closev(tx_buf->mr, tx_buf->count); ret = rxm_cq_tx_comp_write(rxm_ep, ofi_tx_cq_flags(tx_buf->pkt.hdr.op), tx_buf->app_context, tx_buf->flags); assert(ofi_tx_cq_flags(tx_buf->pkt.hdr.op) & FI_SEND); ofi_ep_tx_cntr_inc(&rxm_ep->util_ep); ofi_buf_free(tx_buf); return ret; }
static inline int rxm_finish_rma(struct rxm_ep *rxm_ep, struct rxm_rma_buf *rma_buf, uint64_t comp_flags) { int ret = rxm_cq_tx_comp_write(rxm_ep, comp_flags, rma_buf->app_context, rma_buf->flags); assert(((comp_flags & FI_WRITE) && !(comp_flags & FI_READ)) || ((comp_flags & FI_READ) && !(comp_flags & FI_WRITE))); if (comp_flags & FI_WRITE) ofi_ep_wr_cntr_inc(&rxm_ep->util_ep); else ofi_ep_rd_cntr_inc(&rxm_ep->util_ep); if (!(rma_buf->flags & FI_INJECT) && !rxm_ep->rxm_mr_local && rxm_ep->msg_mr_local) { rxm_ep_msg_mr_closev(rma_buf->mr.mr, rma_buf->mr.count); } ofi_buf_free(rma_buf); return ret; }