static int sock_ctx_bind(struct fid *fid, struct fid *bfid, uint64_t flags) { switch (bfid->fclass) { case FI_CLASS_CQ: return sock_ctx_bind_cq(fid, bfid, flags); case FI_CLASS_CNTR: return sock_ctx_bind_cntr(fid, bfid, flags); case FI_CLASS_MR: return 0; default: SOCK_LOG_ERROR("Invalid bind()\n"); return -FI_EINVAL; } }
static int sock_ep_bind(struct fid *fid, struct fid *bfid, uint64_t flags) { int ret, i; struct sock_ep *ep; struct sock_eq *eq; struct sock_cq *cq; struct sock_av *av; struct sock_cntr *cntr; struct sock_tx_ctx *tx_ctx; struct sock_rx_ctx *rx_ctx; ret = ofi_ep_bind_valid(&sock_prov, bfid, flags); if (ret) return ret; switch (fid->fclass) { case FI_CLASS_EP: ep = container_of(fid, struct sock_ep, ep.fid); break; case FI_CLASS_SEP: ep = container_of(fid, struct sock_ep, ep.fid); break; default: return -FI_EINVAL; } switch (bfid->fclass) { case FI_CLASS_EQ: eq = container_of(bfid, struct sock_eq, eq.fid); ep->attr->eq = eq; break; case FI_CLASS_MR: return 0; case FI_CLASS_CQ: cq = container_of(bfid, struct sock_cq, cq_fid.fid); if (ep->attr->domain != cq->domain) return -FI_EINVAL; if (flags & FI_SEND) { for (i = 0; i < ep->attr->ep_attr.tx_ctx_cnt; i++) { tx_ctx = ep->attr->tx_array[i]; if (!tx_ctx) continue; ret = sock_ctx_bind_cq(&tx_ctx->fid.ctx.fid, bfid, flags); if (ret) return ret; } } if (flags & FI_RECV) { for (i = 0; i < ep->attr->ep_attr.rx_ctx_cnt; i++) { rx_ctx = ep->attr->rx_array[i]; if (!rx_ctx) continue; ret = sock_ctx_bind_cq(&rx_ctx->ctx.fid, bfid, flags); if (ret) return ret; } } break; case FI_CLASS_CNTR: cntr = container_of(bfid, struct sock_cntr, cntr_fid.fid); if (ep->attr->domain != cntr->domain) return -FI_EINVAL; if (flags & FI_SEND || flags & FI_WRITE || flags & FI_READ) { for (i = 0; i < ep->attr->ep_attr.tx_ctx_cnt; i++) { tx_ctx = ep->attr->tx_array[i]; if (!tx_ctx) continue; ret = sock_ctx_bind_cntr(&tx_ctx->fid.ctx.fid, bfid, flags); if (ret) return ret; } } if (flags & FI_RECV || flags & FI_REMOTE_READ || flags & FI_REMOTE_WRITE) { for (i = 0; i < ep->attr->ep_attr.rx_ctx_cnt; i++) { rx_ctx = ep->attr->rx_array[i]; if (!rx_ctx) continue; ret = sock_ctx_bind_cntr(&rx_ctx->ctx.fid, bfid, flags); if (ret) return ret; } } break; case FI_CLASS_AV: av = container_of(bfid, struct sock_av, av_fid.fid); if (ep->attr->domain != av->domain) return -FI_EINVAL; ep->attr->av = av; atomic_inc(&av->ref); if (ep->attr->tx_ctx && ep->attr->tx_ctx->fid.ctx.fid.fclass == FI_CLASS_TX_CTX) { ep->attr->tx_ctx->av = av; } if (ep->attr->rx_ctx && ep->attr->rx_ctx->ctx.fid.fclass == FI_CLASS_RX_CTX) ep->attr->rx_ctx->av = av; for (i = 0; i < ep->attr->ep_attr.tx_ctx_cnt; i++) { if (ep->attr->tx_array[i]) ep->attr->tx_array[i]->av = av; } for (i = 0; i < ep->attr->ep_attr.rx_ctx_cnt; i++) { if (ep->attr->rx_array[i]) ep->attr->rx_array[i]->av = av; } break; case FI_CLASS_STX_CTX: tx_ctx = container_of(bfid, struct sock_tx_ctx, fid.stx.fid); fastlock_acquire(&tx_ctx->lock); dlist_insert_tail(&ep->attr->tx_ctx_entry, &tx_ctx->ep_list); fastlock_release(&tx_ctx->lock); ep->attr->tx_ctx->use_shared = 1; ep->attr->tx_ctx->stx_ctx = tx_ctx; break; case FI_CLASS_SRX_CTX: rx_ctx = container_of(bfid, struct sock_rx_ctx, ctx); fastlock_acquire(&rx_ctx->lock); dlist_insert_tail(&ep->attr->rx_ctx_entry, &rx_ctx->ep_list); fastlock_release(&rx_ctx->lock); ep->attr->rx_ctx->use_shared = 1; ep->attr->rx_ctx->srx_ctx = rx_ctx; break; default: return -ENOSYS; } return 0; }
static int sock_ep_bind(struct fid *fid, struct fid *bfid, uint64_t flags) { int ret, i; struct sock_ep *ep; struct sock_eq *eq; struct sock_cq *cq; struct sock_av *av; struct sock_cntr *cntr; struct sock_tx_ctx *tx_ctx; struct sock_rx_ctx *rx_ctx; switch(fid->fclass) { case FI_CLASS_EP: ep = container_of(fid, struct sock_ep, ep.fid); break; case FI_CLASS_SEP: ep = container_of(fid, struct sock_ep, ep.fid); break; default: return -FI_EINVAL; } switch (bfid->fclass) { case FI_CLASS_EQ: eq = container_of(bfid, struct sock_eq, eq.fid); ep->eq = eq; break; case FI_CLASS_MR: return 0; case FI_CLASS_CQ: cq = container_of(bfid, struct sock_cq, cq_fid.fid); assert(ep->domain == cq->domain); if (flags & FI_SEND) { ep->comp.send_cq = cq; if (flags & FI_COMPLETION) ep->comp.send_cq_event = 1; } if (flags & FI_READ) { ep->comp.read_cq = cq; if (flags & FI_COMPLETION) ep->comp.read_cq_event = 1; } if (flags & FI_WRITE) { ep->comp.write_cq = cq; if (flags & FI_COMPLETION) ep->comp.write_cq_event = 1; } if (flags & FI_RECV) { ep->comp.recv_cq = cq; if (flags & FI_COMPLETION) ep->comp.recv_cq_event = 1; } if (flags & FI_SEND || flags & FI_WRITE || flags & FI_READ) { for (i=0; i < ep->ep_attr.tx_ctx_cnt; i++) { tx_ctx = ep->tx_array[i]; if (!tx_ctx) continue; if ((ret = sock_ctx_bind_cq(&tx_ctx->fid.ctx.fid, bfid, flags))) return ret; } } if (flags & FI_RECV) { for (i = 0; i < ep->ep_attr.rx_ctx_cnt; i++) { rx_ctx = ep->rx_array[i]; if (!rx_ctx) continue; if (rx_ctx->ctx.fid.fclass == FI_CLASS_SRX_CTX) { if (flags & FI_RECV) { ep->comp.recv_cq = cq; if (flags & FI_COMPLETION) ep->comp.recv_cq_event = 1; } fastlock_acquire(&cq->list_lock); dlist_insert_tail(&rx_ctx->cq_entry, &cq->rx_list); fastlock_release(&cq->list_lock); continue; } if ((ret = sock_ctx_bind_cq(&rx_ctx->ctx.fid, bfid, flags))) return ret; } } break; case FI_CLASS_CNTR: cntr = container_of(bfid, struct sock_cntr, cntr_fid.fid); assert(ep->domain == cntr->domain); if (flags & FI_SEND) ep->comp.send_cntr = cntr; if (flags & FI_RECV) ep->comp.recv_cntr = cntr; if (flags & FI_READ) ep->comp.read_cntr = cntr; if (flags & FI_WRITE) ep->comp.write_cntr = cntr; if (flags & FI_REMOTE_READ) ep->comp.rem_read_cntr = cntr; if (flags & FI_REMOTE_WRITE) ep->comp.rem_write_cntr = cntr; if (flags & FI_SEND || flags & FI_WRITE || flags & FI_READ) { for (i = 0; i < ep->ep_attr.tx_ctx_cnt; i++) { tx_ctx = ep->tx_array[i]; if (!tx_ctx) continue; if ((ret = sock_ctx_bind_cntr(&tx_ctx->fid.ctx.fid, bfid, flags))) return ret; } } if (flags & FI_RECV || flags & FI_REMOTE_READ || flags & FI_REMOTE_WRITE) { for (i = 0; i < ep->ep_attr.rx_ctx_cnt; i++) { rx_ctx = ep->rx_array[i]; if (!rx_ctx) continue; if (rx_ctx->ctx.fid.fclass == FI_CLASS_SRX_CTX) { if (flags & FI_RECV) rx_ctx->comp.recv_cntr = cntr; if (flags & FI_REMOTE_READ) rx_ctx->comp.rem_read_cntr = cntr; if (flags & FI_REMOTE_WRITE) rx_ctx->comp.rem_write_cntr = cntr; fastlock_acquire(&cntr->list_lock); dlist_insert_tail(&rx_ctx->cntr_entry, &cntr->rx_list); fastlock_release(&cntr->list_lock); continue; } if ((ret = sock_ctx_bind_cntr(&rx_ctx->ctx.fid, bfid, flags))) return ret; } } break; case FI_CLASS_AV: av = container_of(bfid, struct sock_av, av_fid.fid); assert(ep->domain == av->domain); ep->av = av; av->cmap = &av->domain->r_cmap; if (ep->tx_ctx && ep->tx_ctx->fid.ctx.fid.fclass == FI_CLASS_TX_CTX) { ep->tx_ctx->av = av; } if (ep->rx_ctx && ep->rx_ctx->ctx.fid.fclass == FI_CLASS_RX_CTX) ep->rx_ctx->av = av; for (i = 0; i < ep->ep_attr.tx_ctx_cnt; i++) { if (ep->tx_array[i]) ep->tx_array[i]->av = av; } for (i = 0; i < ep->ep_attr.rx_ctx_cnt; i++) { if (ep->rx_array[i]) ep->rx_array[i]->av = av; } break; case FI_CLASS_STX_CTX: tx_ctx = container_of(bfid, struct sock_tx_ctx, fid.stx.fid); dlist_insert_tail(&ep->tx_ctx_entry, &tx_ctx->ep_list); ep->tx_ctx = tx_ctx; ep->tx_array[0] = tx_ctx; break; case FI_CLASS_SRX_CTX: rx_ctx = container_of(bfid, struct sock_rx_ctx, ctx); dlist_insert_tail(&ep->rx_ctx_entry, &rx_ctx->ep_list); ep->rx_ctx = rx_ctx; ep->rx_array[0] = rx_ctx; break; default: return -ENOSYS; } return 0; }