int ofi_ep_bind_cq(struct util_ep *ep, struct util_cq *cq, uint64_t flags) { int ret; ret = ofi_check_bind_cq_flags(ep, cq, flags); if (ret) return ret; if (flags & FI_TRANSMIT) { ep->tx_cq = cq; if (!(flags & FI_SELECTIVE_COMPLETION)) { ep->tx_op_flags |= FI_COMPLETION; ep->tx_msg_flags = FI_COMPLETION; } ofi_atomic_inc32(&cq->ref); } if (flags & FI_RECV) { ep->rx_cq = cq; if (!(flags & FI_SELECTIVE_COMPLETION)) { ep->rx_op_flags |= FI_COMPLETION; ep->rx_msg_flags = FI_COMPLETION; } ofi_atomic_inc32(&cq->ref); } if (flags & (FI_TRANSMIT | FI_RECV)) { return fid_list_insert(&cq->ep_list, &cq->ep_list_lock, &ep->ep_fid.fid); } return FI_SUCCESS; }
void sock_cntr_add_rx_ctx(struct sock_cntr *cntr, struct sock_rx_ctx *rx_ctx) { int ret; struct fid *fid = &rx_ctx->ctx.fid; ret = fid_list_insert(&cntr->rx_list, &cntr->list_lock, fid); if (ret) SOCK_LOG_ERROR("Error in adding ctx to progress list\n"); }
static int mlx_ep_bind(struct fid *fid, struct fid *bfid, uint64_t flags) { struct mlx_ep *ep; struct util_cq *cq; ep = container_of(fid, struct mlx_ep, ep.ep_fid.fid); int status = FI_SUCCESS; switch (bfid->fclass) { case FI_CLASS_CQ: /* TODO: check rest flags for send/recv ECs */ do { cq = container_of(bfid, struct util_cq, cq_fid.fid); if ( ((flags & FI_TRANSMIT) && ep->ep.tx_cq)|| ((flags & FI_RECV) && ep->ep.rx_cq)) { FI_WARN( &mlx_prov, FI_LOG_EP_CTRL, "CQ already binded\n"); status = -FI_EINVAL; break; } if (flags & FI_TRANSMIT) { ep->ep.tx_cq = cq; ofi_atomic_inc32(&(cq->ref)); } if (flags & FI_RECV) { ep->ep.rx_cq = cq; ofi_atomic_inc32(&(cq->ref)); status = fid_list_insert( &cq->ep_list, &cq->ep_list_lock, &ep->ep.ep_fid.fid); if (status) break; } if (flags & FI_SELECTIVE_COMPLETION) { ep->ep.flags |= FI_SELECTIVE_COMPLETION; } } while (0); break; case FI_CLASS_AV: if (ep->av) { FI_WARN( &mlx_prov, FI_LOG_EP_CTRL, "AV already binded\n"); status = -FI_EINVAL; break; } ep->av = container_of(bfid, struct mlx_av, av.fid); ep->av->ep = ep; break; default: status = -FI_EINVAL; break; } return status; }
void sock_cntr_add_tx_ctx(struct sock_cntr *cntr, struct sock_tx_ctx *tx_ctx) { int ret; struct fid *fid = &tx_ctx->fid.ctx.fid; ret = fid_list_insert(&cntr->tx_list, &cntr->list_lock, fid); if (ret) SOCK_LOG_ERROR("Error in adding ctx to progress list\n"); else atomic_inc(&cntr->ref); }
int ofi_ep_bind_cntr(struct util_ep *ep, struct util_cntr *cntr, uint64_t flags) { if (flags & ~(FI_TRANSMIT | FI_RECV | FI_READ | FI_WRITE | FI_REMOTE_READ | FI_REMOTE_WRITE)) { FI_WARN(ep->domain->fabric->prov, FI_LOG_EP_CTRL, "Unsupported bind flags\n"); return -FI_EBADFLAGS; } if (((flags & FI_TRANSMIT) && ep->tx_cntr) || ((flags & FI_RECV) && ep->rx_cntr) || ((flags & FI_READ) && ep->rd_cntr) || ((flags & FI_WRITE) && ep->wr_cntr) || ((flags & FI_REMOTE_READ) && ep->rem_rd_cntr) || ((flags & FI_REMOTE_WRITE) && ep->rem_wr_cntr)) { FI_WARN(ep->domain->fabric->prov, FI_LOG_EP_CTRL, "Duplicate counter binding\n"); return -FI_EINVAL; } if (flags & FI_TRANSMIT) { ep->tx_cntr = cntr; ofi_atomic_inc32(&cntr->ref); } if (flags & FI_RECV) { ep->rx_cntr = cntr; ofi_atomic_inc32(&cntr->ref); } if (flags & FI_READ) { ep->rd_cntr = cntr; ofi_atomic_inc32(&cntr->ref); } if (flags & FI_WRITE) { ep->wr_cntr = cntr; ofi_atomic_inc32(&cntr->ref); } if (flags & FI_REMOTE_READ) { ep->rem_rd_cntr = cntr; ofi_atomic_inc32(&cntr->ref); } if (flags & FI_REMOTE_WRITE) { ep->rem_wr_cntr = cntr; ofi_atomic_inc32(&cntr->ref); } ep->flags |= OFI_CNTR_ENABLED; return fid_list_insert(&cntr->ep_list, &cntr->ep_list_lock, &ep->ep_fid.fid); }
static int smr_ep_bind_cq(struct smr_ep *ep, struct util_cq *cq, uint64_t flags) { int ret = 0; if (flags & ~(FI_TRANSMIT | FI_RECV)) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "unsupported flags\n"); return -FI_EBADFLAGS; } if (((flags & FI_TRANSMIT) && ep->util_ep.tx_cq) || ((flags & FI_RECV) && ep->util_ep.rx_cq)) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "duplicate CQ binding\n"); return -FI_EINVAL; } if (flags & FI_TRANSMIT) { ep->util_ep.tx_cq = cq; ofi_atomic_inc32(&cq->ref); ep->tx_comp = cq->wait ? smr_tx_comp_signal : smr_tx_comp; } if (flags & FI_RECV) { ep->util_ep.rx_cq = cq; ofi_atomic_inc32(&cq->ref); if (cq->wait) { ep->rx_comp = (cq->domain->info_domain_caps & FI_SOURCE) ? smr_rx_src_comp_signal : smr_rx_comp_signal; } else { ep->rx_comp = (cq->domain->info_domain_caps & FI_SOURCE) ? smr_rx_src_comp : smr_rx_comp; } } ret = fid_list_insert(&cq->ep_list, &cq->ep_list_lock, &ep->util_ep.ep_fid.fid); return ret; }
int ofi_ep_bind_cntr(struct util_ep *ep, struct util_cntr *cntr, uint64_t flags) { int ret; ret = ofi_check_bind_cntr_flags(ep, cntr, flags); if (ret) return ret; if (flags & FI_TRANSMIT) { ep->tx_cntr = cntr; ofi_atomic_inc32(&cntr->ref); } if (flags & FI_RECV) { ep->rx_cntr = cntr; ofi_atomic_inc32(&cntr->ref); } if (flags & FI_READ) { ep->rd_cntr = cntr; ofi_atomic_inc32(&cntr->ref); } if (flags & FI_WRITE) { ep->wr_cntr = cntr; ofi_atomic_inc32(&cntr->ref); } if (flags & FI_REMOTE_READ) { ep->rem_rd_cntr = cntr; ofi_atomic_inc32(&cntr->ref); } if (flags & FI_REMOTE_WRITE) { ep->rem_wr_cntr = cntr; ofi_atomic_inc32(&cntr->ref); } return fid_list_insert(&cntr->ep_list, &cntr->ep_list_lock, &ep->ep_fid.fid); }
static int rxm_ep_bind_cq(struct rxm_ep *rxm_ep, struct util_cq *util_cq, uint64_t flags) { int ret; if (flags & ~(FI_TRANSMIT | FI_RECV)) { FI_WARN(&rxm_prov, FI_LOG_EP_CTRL, "unsupported flags\n"); return -FI_EBADFLAGS; } if (((flags & FI_TRANSMIT) && rxm_ep->util_ep.tx_cq) || ((flags & FI_RECV) && rxm_ep->util_ep.rx_cq)) { FI_WARN(&rxm_prov, FI_LOG_EP_CTRL, "duplicate CQ binding\n"); return -FI_EINVAL; } if (flags & FI_TRANSMIT) { rxm_ep->util_ep.tx_cq = util_cq; if (!(flags & FI_SELECTIVE_COMPLETION)) rxm_ep->rxm_info->tx_attr->op_flags |= FI_COMPLETION; atomic_inc(&util_cq->ref); } if (flags & FI_RECV) { rxm_ep->util_ep.rx_cq = util_cq; if (!(flags & FI_SELECTIVE_COMPLETION)) rxm_ep->rxm_info->rx_attr->op_flags |= FI_COMPLETION; atomic_inc(&util_cq->ref); } if (flags & (FI_TRANSMIT | FI_RECV)) { ret = fid_list_insert(&util_cq->ep_list, &util_cq->ep_list_lock, &rxm_ep->util_ep.ep_fid.fid); if (ret) return ret; } return 0; }
static int sock_ep_bind(struct fid *fid, struct fid *bfid, uint64_t flags) { int ret; size_t i; struct sock_ep *ep; struct sock_eq *eq; struct sock_cq *cq; struct sock_av *av; struct sock_cntr *cntr; struct sock_tx_ctx *tx_ctx; struct sock_rx_ctx *rx_ctx; ret = ofi_ep_bind_valid(&sock_prov, bfid, flags); if (ret) return ret; switch (fid->fclass) { case FI_CLASS_EP: ep = container_of(fid, struct sock_ep, ep.fid); break; case FI_CLASS_SEP: ep = container_of(fid, struct sock_ep, ep.fid); break; default: return -FI_EINVAL; } switch (bfid->fclass) { case FI_CLASS_EQ: eq = container_of(bfid, struct sock_eq, eq.fid); ep->attr->eq = eq; break; case FI_CLASS_MR: return 0; case FI_CLASS_CQ: cq = container_of(bfid, struct sock_cq, cq_fid.fid); if (ep->attr->domain != cq->domain) return -FI_EINVAL; if (flags & FI_SEND) { for (i = 0; i < ep->attr->ep_attr.tx_ctx_cnt; i++) { tx_ctx = ep->attr->tx_array[i]; if (!tx_ctx) continue; ret = sock_ctx_bind_cq(&tx_ctx->fid.ctx.fid, bfid, flags); if (ret) return ret; } } if (flags & FI_RECV) { for (i = 0; i < ep->attr->ep_attr.rx_ctx_cnt; i++) { rx_ctx = ep->attr->rx_array[i]; if (!rx_ctx) continue; ret = sock_ctx_bind_cq(&rx_ctx->ctx.fid, bfid, flags); if (ret) return ret; } } break; case FI_CLASS_CNTR: cntr = container_of(bfid, struct sock_cntr, cntr_fid.fid); if (ep->attr->domain != cntr->domain) return -FI_EINVAL; if (flags & FI_SEND || flags & FI_WRITE || flags & FI_READ) { for (i = 0; i < ep->attr->ep_attr.tx_ctx_cnt; i++) { tx_ctx = ep->attr->tx_array[i]; if (!tx_ctx) continue; ret = sock_ctx_bind_cntr(&tx_ctx->fid.ctx.fid, bfid, flags); if (ret) return ret; } } if (flags & FI_RECV || flags & FI_REMOTE_READ || flags & FI_REMOTE_WRITE) { for (i = 0; i < ep->attr->ep_attr.rx_ctx_cnt; i++) { rx_ctx = ep->attr->rx_array[i]; if (!rx_ctx) continue; ret = sock_ctx_bind_cntr(&rx_ctx->ctx.fid, bfid, flags); if (ret) return ret; } } break; case FI_CLASS_AV: av = container_of(bfid, struct sock_av, av_fid.fid); if (ep->attr->domain != av->domain) return -FI_EINVAL; ep->attr->av = av; ofi_atomic_inc32(&av->ref); if (ep->attr->tx_ctx && ep->attr->tx_ctx->fid.ctx.fid.fclass == FI_CLASS_TX_CTX) { ep->attr->tx_ctx->av = av; } if (ep->attr->rx_ctx && ep->attr->rx_ctx->ctx.fid.fclass == FI_CLASS_RX_CTX) ep->attr->rx_ctx->av = av; for (i = 0; i < ep->attr->ep_attr.tx_ctx_cnt; i++) { if (ep->attr->tx_array[i]) ep->attr->tx_array[i]->av = av; } for (i = 0; i < ep->attr->ep_attr.rx_ctx_cnt; i++) { if (ep->attr->rx_array[i]) ep->attr->rx_array[i]->av = av; } fastlock_acquire(&av->list_lock); ret = fid_list_insert(&av->ep_list, &ep->attr->lock, &ep->ep.fid); if (ret) { SOCK_LOG_ERROR("Error in adding fid in the EP list\n"); fastlock_release(&av->list_lock); return ret; } fastlock_release(&av->list_lock); break; case FI_CLASS_STX_CTX: tx_ctx = container_of(bfid, struct sock_tx_ctx, fid.stx.fid); fastlock_acquire(&tx_ctx->lock); dlist_insert_tail(&ep->attr->tx_ctx_entry, &tx_ctx->ep_list); fastlock_release(&tx_ctx->lock); ep->attr->tx_ctx->use_shared = 1; ep->attr->tx_ctx->stx_ctx = tx_ctx; break; case FI_CLASS_SRX_CTX: rx_ctx = container_of(bfid, struct sock_rx_ctx, ctx); fastlock_acquire(&rx_ctx->lock); dlist_insert_tail(&ep->attr->rx_ctx_entry, &rx_ctx->ep_list); fastlock_release(&rx_ctx->lock); ep->attr->rx_ctx->use_shared = 1; ep->attr->rx_ctx->srx_ctx = rx_ctx; break; default: return -ENOSYS; } return 0; }