int ofi_ep_bind(struct util_ep *util_ep, struct fid *fid, uint64_t flags) { int ret; struct util_av *av; struct util_cq *cq; struct util_eq *eq; struct util_cntr *cntr; ret = ofi_ep_bind_valid(util_ep->domain->prov, fid, flags); if (ret) return ret; switch (fid->fclass) { case FI_CLASS_CQ: cq = container_of(fid, struct util_cq, cq_fid.fid); return ofi_ep_bind_cq(util_ep, cq, flags); case FI_CLASS_EQ: eq = container_of(fid, struct util_eq, eq_fid.fid); return ofi_ep_bind_eq(util_ep, eq); case FI_CLASS_AV: av = container_of(fid, struct util_av, av_fid.fid); return ofi_ep_bind_av(util_ep, av); case FI_CLASS_CNTR: cntr = container_of(fid, struct util_cntr, cntr_fid.fid); return ofi_ep_bind_cntr(util_ep, cntr, flags); } return -FI_EINVAL; }
static int mlxm_ep_bind(struct fid *fid, struct fid *bfid, uint64_t flags) { struct mlxm_fid_ep *fid_ep; int ret; fid_ep = container_of(fid, struct mlxm_fid_ep, ep.fid); ret = ofi_ep_bind_valid(&mlxm_prov, bfid, flags); if (ret) return ret; switch (bfid->fclass) { case FI_CLASS_CQ: /* TODO: check ress flags for send/recv ECs */ fid_ep->cq = container_of(bfid, struct mlxm_fid_cq, cq.fid); break; case FI_CLASS_AV: fid_ep->av = container_of(bfid, struct mlxm_fid_av, av.fid); fid_ep->domain = fid_ep->av->domain; fid_ep->av->ep = fid_ep; break; default: return -ENOSYS; } return 0; }
static int fi_ibv_msg_ep_bind(struct fid *fid, struct fid *bfid, uint64_t flags) { struct fi_ibv_msg_ep *ep; int ret; ep = container_of(fid, struct fi_ibv_msg_ep, ep_fid.fid); ret = ofi_ep_bind_valid(&fi_ibv_prov, bfid, flags); if (ret) return ret; switch (bfid->fclass) { case FI_CLASS_CQ: /* Must bind a CQ to either RECV or SEND completions, and * the FI_SELECTIVE_COMPLETION flag is only valid when binding the * FI_SEND CQ. */ if (!(flags & (FI_RECV|FI_SEND)) || (flags & (FI_SEND|FI_SELECTIVE_COMPLETION)) == FI_SELECTIVE_COMPLETION) { return -EINVAL; } if (flags & FI_RECV) { if (ep->rcq) return -EINVAL; ep->rcq = container_of(bfid, struct fi_ibv_cq, cq_fid.fid); } if (flags & FI_SEND) { if (ep->scq) return -EINVAL; ep->scq = container_of(bfid, struct fi_ibv_cq, cq_fid.fid); if (flags & FI_SELECTIVE_COMPLETION) ep->ep_flags |= FI_SELECTIVE_COMPLETION; else ep->info->tx_attr->op_flags |= FI_COMPLETION; ep->ep_id = ep->scq->send_signal_wr_id | ep->scq->ep_cnt++; } break; case FI_CLASS_EQ: ep->eq = container_of(bfid, struct fi_ibv_eq, eq_fid.fid); ret = rdma_migrate_id(ep->id, ep->eq->channel); if (ret) return -errno; break; case FI_CLASS_SRX_CTX: ep->srq_ep = container_of(bfid, struct fi_ibv_srq_ep, ep_fid.fid); break; default: return -EINVAL; } return 0; }
static int sock_ep_bind(struct fid *fid, struct fid *bfid, uint64_t flags) { int ret, i; struct sock_ep *ep; struct sock_eq *eq; struct sock_cq *cq; struct sock_av *av; struct sock_cntr *cntr; struct sock_tx_ctx *tx_ctx; struct sock_rx_ctx *rx_ctx; ret = ofi_ep_bind_valid(&sock_prov, bfid, flags); if (ret) return ret; switch (fid->fclass) { case FI_CLASS_EP: ep = container_of(fid, struct sock_ep, ep.fid); break; case FI_CLASS_SEP: ep = container_of(fid, struct sock_ep, ep.fid); break; default: return -FI_EINVAL; } switch (bfid->fclass) { case FI_CLASS_EQ: eq = container_of(bfid, struct sock_eq, eq.fid); ep->attr->eq = eq; break; case FI_CLASS_MR: return 0; case FI_CLASS_CQ: cq = container_of(bfid, struct sock_cq, cq_fid.fid); if (ep->attr->domain != cq->domain) return -FI_EINVAL; if (flags & FI_SEND) { for (i = 0; i < ep->attr->ep_attr.tx_ctx_cnt; i++) { tx_ctx = ep->attr->tx_array[i]; if (!tx_ctx) continue; ret = sock_ctx_bind_cq(&tx_ctx->fid.ctx.fid, bfid, flags); if (ret) return ret; } } if (flags & FI_RECV) { for (i = 0; i < ep->attr->ep_attr.rx_ctx_cnt; i++) { rx_ctx = ep->attr->rx_array[i]; if (!rx_ctx) continue; ret = sock_ctx_bind_cq(&rx_ctx->ctx.fid, bfid, flags); if (ret) return ret; } } break; case FI_CLASS_CNTR: cntr = container_of(bfid, struct sock_cntr, cntr_fid.fid); if (ep->attr->domain != cntr->domain) return -FI_EINVAL; if (flags & FI_SEND || flags & FI_WRITE || flags & FI_READ) { for (i = 0; i < ep->attr->ep_attr.tx_ctx_cnt; i++) { tx_ctx = ep->attr->tx_array[i]; if (!tx_ctx) continue; ret = sock_ctx_bind_cntr(&tx_ctx->fid.ctx.fid, bfid, flags); if (ret) return ret; } } if (flags & FI_RECV || flags & FI_REMOTE_READ || flags & FI_REMOTE_WRITE) { for (i = 0; i < ep->attr->ep_attr.rx_ctx_cnt; i++) { rx_ctx = ep->attr->rx_array[i]; if (!rx_ctx) continue; ret = sock_ctx_bind_cntr(&rx_ctx->ctx.fid, bfid, flags); if (ret) return ret; } } break; case FI_CLASS_AV: av = container_of(bfid, struct sock_av, av_fid.fid); if (ep->attr->domain != av->domain) return -FI_EINVAL; ep->attr->av = av; atomic_inc(&av->ref); if (ep->attr->tx_ctx && ep->attr->tx_ctx->fid.ctx.fid.fclass == FI_CLASS_TX_CTX) { ep->attr->tx_ctx->av = av; } if (ep->attr->rx_ctx && ep->attr->rx_ctx->ctx.fid.fclass == FI_CLASS_RX_CTX) ep->attr->rx_ctx->av = av; for (i = 0; i < ep->attr->ep_attr.tx_ctx_cnt; i++) { if (ep->attr->tx_array[i]) ep->attr->tx_array[i]->av = av; } for (i = 0; i < ep->attr->ep_attr.rx_ctx_cnt; i++) { if (ep->attr->rx_array[i]) ep->attr->rx_array[i]->av = av; } break; case FI_CLASS_STX_CTX: tx_ctx = container_of(bfid, struct sock_tx_ctx, fid.stx.fid); fastlock_acquire(&tx_ctx->lock); dlist_insert_tail(&ep->attr->tx_ctx_entry, &tx_ctx->ep_list); fastlock_release(&tx_ctx->lock); ep->attr->tx_ctx->use_shared = 1; ep->attr->tx_ctx->stx_ctx = tx_ctx; break; case FI_CLASS_SRX_CTX: rx_ctx = container_of(bfid, struct sock_rx_ctx, ctx); fastlock_acquire(&rx_ctx->lock); dlist_insert_tail(&ep->attr->rx_ctx_entry, &rx_ctx->ep_list); fastlock_release(&rx_ctx->lock); ep->attr->rx_ctx->use_shared = 1; ep->attr->rx_ctx->srx_ctx = rx_ctx; break; default: return -ENOSYS; } return 0; }
static int psmx2_ep_bind(struct fid *fid, struct fid *bfid, uint64_t flags) { struct psmx2_fid_ep *ep; struct psmx2_fid_av *av; struct psmx2_fid_cq *cq; struct psmx2_fid_cntr *cntr; struct psmx2_fid_stx *stx; int err; ep = container_of(fid, struct psmx2_fid_ep, ep.fid); err = ofi_ep_bind_valid(&psmx2_prov, bfid, flags); if (err) return err; switch (bfid->fclass) { case FI_CLASS_EQ: return -FI_ENOSYS; case FI_CLASS_CQ: cq = container_of(bfid, struct psmx2_fid_cq, cq.fid); if (ep->domain != cq->domain) return -FI_EINVAL; if (flags & FI_SEND) { err = psmx2_add_poll_ctxt(&cq->poll_list, ep->tx); if (err) return err; ep->send_cq = cq; if (flags & FI_SELECTIVE_COMPLETION) ep->send_selective_completion = 1; } if (flags & FI_RECV) { err = psmx2_add_poll_ctxt(&cq->poll_list, ep->rx); if (err) return err; ep->recv_cq = cq; if (flags & FI_SELECTIVE_COMPLETION) ep->recv_selective_completion = 1; } psmx2_ep_optimize_ops(ep); break; case FI_CLASS_CNTR: cntr = container_of(bfid, struct psmx2_fid_cntr, cntr.fid); if (ep->domain != cntr->domain) return -FI_EINVAL; if (flags & (FI_SEND | FI_WRITE | FI_READ)) { err = psmx2_add_poll_ctxt(&cntr->poll_list, ep->tx); if (err) return err; } if (flags & (FI_RECV | FI_REMOTE_WRITE | FI_REMOTE_READ)) { err = psmx2_add_poll_ctxt(&cntr->poll_list, ep->rx); if (err) return err; } if (flags & FI_SEND) ep->send_cntr = cntr; if (flags & FI_RECV) ep->recv_cntr = cntr; if (flags & FI_WRITE) ep->write_cntr = cntr; if (flags & FI_READ) ep->read_cntr = cntr; if (flags & FI_REMOTE_WRITE) ep->remote_write_cntr = cntr; if (flags & FI_REMOTE_READ) ep->remote_read_cntr = cntr; break; case FI_CLASS_AV: av = container_of(bfid, struct psmx2_fid_av, av.fid); if (ep->domain != av->domain) return -FI_EINVAL; ep->av = av; psmx2_ep_optimize_ops(ep); if (ep->tx) psmx2_av_add_trx_ctxt(av, ep->tx, !psmx2_env.lazy_conn); if (ep->rx && ep->rx != ep->tx) psmx2_av_add_trx_ctxt(av, ep->rx, !psmx2_env.lazy_conn); break; case FI_CLASS_MR: if (!bfid->ops || !bfid->ops->bind) return -FI_EINVAL; err = bfid->ops->bind(bfid, fid, flags); if (err) return err; break; case FI_CLASS_STX_CTX: stx = container_of(bfid, struct psmx2_fid_stx, stx.fid); if (ep->domain != stx->domain) return -FI_EINVAL; if (ep->tx || ep->stx) return -FI_EINVAL; ep->tx = stx->tx; ep->stx = stx; err = psmx2_domain_enable_ep(ep->domain, ep); if (err) return err; ofi_atomic_inc32(&stx->ref); break; default: return -FI_ENOSYS; } return 0; }
static int psmx_ep_bind(struct fid *fid, struct fid *bfid, uint64_t flags) { struct psmx_fid_ep *ep; struct psmx_fid_av *av; struct psmx_fid_cq *cq; struct psmx_fid_cntr *cntr; struct psmx_fid_stx *stx; int err; ep = container_of(fid, struct psmx_fid_ep, ep.fid); err = ofi_ep_bind_valid(&psmx_prov, bfid, flags); if (err) return err; switch (bfid->fclass) { case FI_CLASS_EQ: return -FI_ENOSYS; case FI_CLASS_CQ: cq = container_of(bfid, struct psmx_fid_cq, cq.fid); if (ep->domain != cq->domain) return -FI_EINVAL; if (flags & FI_SEND) { ep->send_cq = cq; if (flags & FI_SELECTIVE_COMPLETION) ep->send_selective_completion = 1; } if (flags & FI_RECV) { ep->recv_cq = cq; if (flags & FI_SELECTIVE_COMPLETION) ep->recv_selective_completion = 1; } psmx_ep_optimize_ops(ep); break; case FI_CLASS_CNTR: cntr = container_of(bfid, struct psmx_fid_cntr, cntr.fid); if (ep->domain != cntr->domain) return -FI_EINVAL; if (flags & FI_SEND) ep->send_cntr = cntr; if (flags & FI_RECV) ep->recv_cntr = cntr; if (flags & FI_WRITE) ep->write_cntr = cntr; if (flags & FI_READ) ep->read_cntr = cntr; if (flags & FI_REMOTE_WRITE) ep->remote_write_cntr = cntr; if (flags & FI_REMOTE_READ) ep->remote_read_cntr = cntr; break; case FI_CLASS_AV: av = container_of(bfid, struct psmx_fid_av, av.fid); if (ep->domain != av->domain) return -FI_EINVAL; ep->av = av; psmx_ep_optimize_ops(ep); break; case FI_CLASS_MR: if (!bfid->ops || !bfid->ops->bind) return -FI_EINVAL; err = bfid->ops->bind(bfid, fid, flags); if (err) return err; break; case FI_CLASS_STX_CTX: stx = container_of(bfid, struct psmx_fid_stx, stx.fid); if (ep->domain != stx->domain) return -FI_EINVAL; break; default: return -FI_ENOSYS; } return 0; }
static int fi_ibv_rdm_ep_bind(struct fid *fid, struct fid *bfid, uint64_t flags) { struct fi_ibv_rdm_ep *ep; struct fi_ibv_rdm_cq *cq; struct fi_ibv_av *av; struct fi_ibv_rdm_cntr *cntr; int ret; ep = container_of(fid, struct fi_ibv_rdm_ep, ep_fid.fid); ret = ofi_ep_bind_valid(&fi_ibv_prov, bfid, flags); if (ret) return ret; switch (bfid->fclass) { case FI_CLASS_CQ: cq = container_of(bfid, struct fi_ibv_rdm_cq, cq_fid); if (ep->domain != cq->domain) { return -FI_EINVAL; } if (flags & FI_RECV) { if (ep->fi_rcq) return -EINVAL; ep->fi_rcq = cq; ep->rx_selective_completion = (flags & FI_SELECTIVE_COMPLETION) ? 1 : 0; } if (flags & FI_SEND) { if (ep->fi_scq) return -EINVAL; ep->fi_scq = cq; ep->tx_selective_completion = (flags & FI_SELECTIVE_COMPLETION) ? 1 : 0; } /* TODO: this is wrong. CQ to EP is 1:n */ cq->ep = ep; break; case FI_CLASS_AV: av = container_of(bfid, struct fi_ibv_av, av_fid.fid); if (ep->domain != av->domain) { return -FI_EINVAL; } ep->av = av; /* TODO: this is wrong, AV to EP is 1:n */ ep->av->ep = ep; break; case FI_CLASS_CNTR: cntr = container_of(bfid, struct fi_ibv_rdm_cntr, fid.fid); if (ep->domain != cntr->domain) { return -FI_EINVAL; } if ((flags & FI_REMOTE_READ) || (flags & FI_REMOTE_WRITE)) { return -FI_ENOSYS; } if (flags & FI_SEND) { ep->send_cntr = cntr; atomic_inc(&ep->send_cntr->ep_ref); } if (flags & FI_RECV) { ep->recv_cntr = cntr; atomic_inc(&ep->recv_cntr->ep_ref); } if (flags & FI_READ) { ep->read_cntr = cntr; atomic_inc(&ep->read_cntr->ep_ref); } if (flags & FI_WRITE) { ep->write_cntr = cntr; atomic_inc(&ep->write_cntr->ep_ref); } break; default: return -EINVAL; } return 0; }