static int psmx2_ep_control(fid_t fid, int command, void *arg) { struct fi_alias *alias; struct psmx2_fid_ep *ep, *new_ep; int err; ep = container_of(fid, struct psmx2_fid_ep, ep.fid); switch (command) { case FI_ALIAS: new_ep = (struct psmx2_fid_ep *) calloc(1, sizeof *ep); if (!new_ep) return -FI_ENOMEM; alias = arg; *new_ep = *ep; err = psmx2_ep_set_flags(new_ep, alias->flags); if (err) { free(new_ep); return err; } new_ep->base_ep = ep; ofi_atomic_inc32(&ep->ref); psmx2_ep_optimize_ops(new_ep); *alias->fid = &new_ep->ep.fid; break; case FI_SETOPSFLAG: err = psmx2_ep_set_flags(ep, *(uint64_t *)arg); if (err) return err; psmx2_ep_optimize_ops(ep); break; case FI_GETOPSFLAG: if (!arg) return -FI_EINVAL; err = psmx2_ep_get_flags(ep, arg); if (err) return err; break; case FI_ENABLE: ep->enabled = 1; return 0; default: return -FI_ENOSYS; } return 0; }
static int psmx2_ep_control(fid_t fid, int command, void *arg) { struct fi_alias *alias; struct psmx2_fid_ep *ep, *new_ep; ep = container_of(fid, struct psmx2_fid_ep, ep.fid); switch (command) { case FI_ALIAS: new_ep = (struct psmx2_fid_ep *) calloc(1, sizeof *ep); if (!new_ep) return -FI_ENOMEM; alias = arg; *new_ep = *ep; new_ep->flags = alias->flags; psmx2_ep_optimize_ops(new_ep); *alias->fid = &new_ep->ep.fid; break; case FI_SETFIDFLAG: ep->flags = *(uint64_t *)arg; psmx2_ep_optimize_ops(ep); break; case FI_GETFIDFLAG: if (!arg) return -FI_EINVAL; *(uint64_t *)arg = ep->flags; break; case FI_ENABLE: return 0; default: return -FI_ENOSYS; } return 0; }
int psmx2_ep_open_internal(struct psmx2_fid_domain *domain_priv, struct fi_info *info, struct psmx2_fid_ep **ep_out, void *context, struct psmx2_trx_ctxt *trx_ctxt) { struct psmx2_fid_ep *ep_priv; uint64_t ep_cap; int err = -FI_EINVAL; if (info) ep_cap = info->caps; else ep_cap = FI_TAGGED; if (info && info->ep_attr && info->ep_attr->auth_key) { if (info->ep_attr->auth_key_size != sizeof(psm2_uuid_t)) { FI_WARN(&psmx2_prov, FI_LOG_EP_CTRL, "Invalid auth_key_len %"PRIu64 ", should be %"PRIu64".\n", info->ep_attr->auth_key_size, sizeof(psm2_uuid_t)); goto errout; } if (memcmp(domain_priv->fabric->uuid, info->ep_attr->auth_key, sizeof(psm2_uuid_t))) { FI_WARN(&psmx2_prov, FI_LOG_EP_CTRL, "Invalid auth_key: %s\n", psmx2_uuid_to_string((void *)info->ep_attr->auth_key)); goto errout; } } ep_priv = (struct psmx2_fid_ep *) calloc(1, sizeof *ep_priv); if (!ep_priv) { err = -FI_ENOMEM; goto errout; } ep_priv->ep.fid.fclass = FI_CLASS_EP; ep_priv->ep.fid.context = context; ep_priv->ep.fid.ops = &psmx2_fi_ops; ep_priv->ep.ops = &psmx2_ep_ops; ep_priv->ep.cm = &psmx2_cm_ops; ep_priv->domain = domain_priv; ep_priv->rx = trx_ctxt; if (!(info && info->ep_attr && info->ep_attr->tx_ctx_cnt == FI_SHARED_CONTEXT)) ep_priv->tx = trx_ctxt; ofi_atomic_initialize32(&ep_priv->ref, 0); PSMX2_CTXT_TYPE(&ep_priv->nocomp_send_context) = PSMX2_NOCOMP_SEND_CONTEXT; PSMX2_CTXT_EP(&ep_priv->nocomp_send_context) = ep_priv; PSMX2_CTXT_TYPE(&ep_priv->nocomp_tsend_context) = PSMX2_NOCOMP_TSEND_CONTEXT; PSMX2_CTXT_EP(&ep_priv->nocomp_tsend_context) = ep_priv; if (ep_cap & FI_TAGGED) ep_priv->ep.tagged = &psmx2_tagged_ops; if (ep_cap & FI_MSG) ep_priv->ep.msg = &psmx2_msg_ops; if (ep_cap & FI_RMA) ep_priv->ep.rma = &psmx2_rma_ops; if (ep_cap & FI_ATOMICS) ep_priv->ep.atomic = &psmx2_atomic_ops; ep_priv->caps = ep_cap; err = psmx2_domain_enable_ep(domain_priv, ep_priv); if (err) goto errout_free_ep; psmx2_domain_acquire(domain_priv); if (info) { if (info->tx_attr) ep_priv->tx_flags = info->tx_attr->op_flags; if (info->rx_attr) ep_priv->rx_flags = info->rx_attr->op_flags; } psmx2_ep_optimize_ops(ep_priv); PSMX2_EP_INIT_OP_CONTEXT(ep_priv); *ep_out = ep_priv; return 0; errout_free_ep: free(ep_priv); errout: return err; }
static int psmx2_ep_bind(struct fid *fid, struct fid *bfid, uint64_t flags) { struct psmx2_fid_ep *ep; struct psmx2_fid_av *av; struct psmx2_fid_cq *cq; struct psmx2_fid_cntr *cntr; struct psmx2_fid_stx *stx; int err; ep = container_of(fid, struct psmx2_fid_ep, ep.fid); err = ofi_ep_bind_valid(&psmx2_prov, bfid, flags); if (err) return err; switch (bfid->fclass) { case FI_CLASS_EQ: return -FI_ENOSYS; case FI_CLASS_CQ: cq = container_of(bfid, struct psmx2_fid_cq, cq.fid); if (ep->domain != cq->domain) return -FI_EINVAL; if (flags & FI_SEND) { err = psmx2_add_poll_ctxt(&cq->poll_list, ep->tx); if (err) return err; ep->send_cq = cq; if (flags & FI_SELECTIVE_COMPLETION) ep->send_selective_completion = 1; } if (flags & FI_RECV) { err = psmx2_add_poll_ctxt(&cq->poll_list, ep->rx); if (err) return err; ep->recv_cq = cq; if (flags & FI_SELECTIVE_COMPLETION) ep->recv_selective_completion = 1; } psmx2_ep_optimize_ops(ep); break; case FI_CLASS_CNTR: cntr = container_of(bfid, struct psmx2_fid_cntr, cntr.fid); if (ep->domain != cntr->domain) return -FI_EINVAL; if (flags & (FI_SEND | FI_WRITE | FI_READ)) { err = psmx2_add_poll_ctxt(&cntr->poll_list, ep->tx); if (err) return err; } if (flags & (FI_RECV | FI_REMOTE_WRITE | FI_REMOTE_READ)) { err = psmx2_add_poll_ctxt(&cntr->poll_list, ep->rx); if (err) return err; } if (flags & FI_SEND) ep->send_cntr = cntr; if (flags & FI_RECV) ep->recv_cntr = cntr; if (flags & FI_WRITE) ep->write_cntr = cntr; if (flags & FI_READ) ep->read_cntr = cntr; if (flags & FI_REMOTE_WRITE) ep->remote_write_cntr = cntr; if (flags & FI_REMOTE_READ) ep->remote_read_cntr = cntr; break; case FI_CLASS_AV: av = container_of(bfid, struct psmx2_fid_av, av.fid); if (ep->domain != av->domain) return -FI_EINVAL; ep->av = av; psmx2_ep_optimize_ops(ep); if (ep->tx) psmx2_av_add_trx_ctxt(av, ep->tx, !psmx2_env.lazy_conn); if (ep->rx && ep->rx != ep->tx) psmx2_av_add_trx_ctxt(av, ep->rx, !psmx2_env.lazy_conn); break; case FI_CLASS_MR: if (!bfid->ops || !bfid->ops->bind) return -FI_EINVAL; err = bfid->ops->bind(bfid, fid, flags); if (err) return err; break; case FI_CLASS_STX_CTX: stx = container_of(bfid, struct psmx2_fid_stx, stx.fid); if (ep->domain != stx->domain) return -FI_EINVAL; if (ep->tx || ep->stx) return -FI_EINVAL; ep->tx = stx->tx; ep->stx = stx; err = psmx2_domain_enable_ep(ep->domain, ep); if (err) return err; ofi_atomic_inc32(&stx->ref); break; default: return -FI_ENOSYS; } return 0; }
int psmx2_ep_open(struct fid_domain *domain, struct fi_info *info, struct fid_ep **ep, void *context) { struct psmx2_fid_domain *domain_priv; struct psmx2_fid_ep *ep_priv; struct psmx2_context *item; uint8_t vlane; uint64_t ep_cap; int err = -FI_EINVAL; int i; if (info) ep_cap = info->caps; else ep_cap = FI_TAGGED; domain_priv = container_of(domain, struct psmx2_fid_domain, domain.fid); if (!domain_priv) goto errout; err = psmx2_domain_check_features(domain_priv, ep_cap); if (err) goto errout; err = psmx2_alloc_vlane(domain_priv, &vlane); if (err) goto errout; ep_priv = (struct psmx2_fid_ep *) calloc(1, sizeof *ep_priv); if (!ep_priv) { err = -FI_ENOMEM; goto errout_free_vlane; } ep_priv->ep.fid.fclass = FI_CLASS_EP; ep_priv->ep.fid.context = context; ep_priv->ep.fid.ops = &psmx2_fi_ops; ep_priv->ep.ops = &psmx2_ep_ops; ep_priv->ep.cm = &psmx2_cm_ops; ep_priv->domain = domain_priv; ep_priv->vlane = vlane; PSMX2_CTXT_TYPE(&ep_priv->nocomp_send_context) = PSMX2_NOCOMP_SEND_CONTEXT; PSMX2_CTXT_EP(&ep_priv->nocomp_send_context) = ep_priv; PSMX2_CTXT_TYPE(&ep_priv->nocomp_recv_context) = PSMX2_NOCOMP_RECV_CONTEXT; PSMX2_CTXT_EP(&ep_priv->nocomp_recv_context) = ep_priv; if (ep_cap & FI_TAGGED) ep_priv->ep.tagged = &psmx2_tagged_ops; if (ep_cap & FI_MSG) ep_priv->ep.msg = &psmx2_msg_ops; if (ep_cap & FI_RMA) ep_priv->ep.rma = &psmx2_rma_ops; if (ep_cap & FI_ATOMICS) ep_priv->ep.atomic = &psmx2_atomic_ops; ep_priv->caps = ep_cap; err = psmx2_domain_enable_ep(domain_priv, ep_priv); if (err) goto errout_free_ep; psmx2_domain_acquire(domain_priv); domain_priv->eps[ep_priv->vlane] = ep_priv; if (info) { if (info->tx_attr) ep_priv->flags = info->tx_attr->op_flags; if (info->rx_attr) ep_priv->flags |= info->rx_attr->op_flags; } psmx2_ep_optimize_ops(ep_priv); slist_init(&ep_priv->free_context_list); fastlock_init(&ep_priv->context_lock); #define PSMX2_FREE_CONTEXT_LIST_SIZE 64 for (i=0; i<PSMX2_FREE_CONTEXT_LIST_SIZE; i++) { item = calloc(1, sizeof(*item)); if (!item) { FI_WARN(&psmx2_prov, FI_LOG_EP_CTRL, "out of memory.\n"); exit(-1); } slist_insert_tail(&item->list_entry, &ep_priv->free_context_list); } *ep = &ep_priv->ep; return 0; errout_free_ep: free(ep_priv); errout_free_vlane: psmx2_free_vlane(domain_priv, vlane); errout: return err; }
static int psmx2_ep_bind(struct fid *fid, struct fid *bfid, uint64_t flags) { struct psmx2_fid_ep *ep; struct psmx2_fid_av *av; struct psmx2_fid_cq *cq; struct psmx2_fid_cntr *cntr; struct psmx2_fid_stx *stx; int err; ep = container_of(fid, struct psmx2_fid_ep, ep.fid); if (!bfid) return -FI_EINVAL; switch (bfid->fclass) { case FI_CLASS_EQ: return -FI_ENOSYS; case FI_CLASS_CQ: cq = container_of(bfid, struct psmx2_fid_cq, cq.fid); if (ep->domain != cq->domain) return -FI_EINVAL; if (flags & FI_SEND) { ep->send_cq = cq; if (flags & FI_SELECTIVE_COMPLETION) ep->send_selective_completion = 1; } if (flags & FI_RECV) { ep->recv_cq = cq; if (flags & FI_SELECTIVE_COMPLETION) ep->recv_selective_completion = 1; } psmx2_ep_optimize_ops(ep); break; case FI_CLASS_CNTR: cntr = container_of(bfid, struct psmx2_fid_cntr, cntr.fid); if (ep->domain != cntr->domain) return -FI_EINVAL; if (flags & FI_SEND) ep->send_cntr = cntr; if (flags & FI_RECV) ep->recv_cntr = cntr; if (flags & FI_WRITE) ep->write_cntr = cntr; if (flags & FI_READ) ep->read_cntr = cntr; if (flags & FI_REMOTE_WRITE) ep->remote_write_cntr = cntr; if (flags & FI_REMOTE_READ) ep->remote_read_cntr = cntr; break; case FI_CLASS_AV: av = container_of(bfid, struct psmx2_fid_av, av.fid); if (ep->domain != av->domain) return -FI_EINVAL; ep->av = av; psmx2_ep_optimize_ops(ep); break; case FI_CLASS_MR: if (!bfid->ops || !bfid->ops->bind) return -FI_EINVAL; err = bfid->ops->bind(bfid, fid, flags); if (err) return err; break; case FI_CLASS_STX_CTX: stx = container_of(bfid, struct psmx2_fid_stx, stx.fid); if (ep->domain != stx->domain) return -FI_EINVAL; break; default: return -FI_ENOSYS; } return 0; }
int psmx2_ep_open(struct fid_domain *domain, struct fi_info *info, struct fid_ep **ep, void *context) { struct psmx2_fid_domain *domain_priv; struct psmx2_fid_ep *ep_priv; uint8_t vlane; uint64_t ep_cap; int err = -FI_EINVAL; if (info) ep_cap = info->caps; else ep_cap = FI_TAGGED; domain_priv = container_of(domain, struct psmx2_fid_domain, domain.fid); if (!domain_priv) goto errout; err = psmx2_domain_check_features(domain_priv, ep_cap); if (err) goto errout; err = psmx2_alloc_vlane(domain_priv, &vlane); if (err) goto errout; ep_priv = (struct psmx2_fid_ep *) calloc(1, sizeof *ep_priv); if (!ep_priv) { err = -FI_ENOMEM; goto errout_free_vlane; } ep_priv->ep.fid.fclass = FI_CLASS_EP; ep_priv->ep.fid.context = context; ep_priv->ep.fid.ops = &psmx2_fi_ops; ep_priv->ep.ops = &psmx2_ep_ops; ep_priv->ep.cm = &psmx2_cm_ops; ep_priv->domain = domain_priv; ep_priv->vlane = vlane; PSMX2_CTXT_TYPE(&ep_priv->nocomp_send_context) = PSMX2_NOCOMP_SEND_CONTEXT; PSMX2_CTXT_EP(&ep_priv->nocomp_send_context) = ep_priv; PSMX2_CTXT_TYPE(&ep_priv->nocomp_recv_context) = PSMX2_NOCOMP_RECV_CONTEXT; PSMX2_CTXT_EP(&ep_priv->nocomp_recv_context) = ep_priv; if (ep_cap & FI_TAGGED) ep_priv->ep.tagged = &psmx2_tagged_ops; if (ep_cap & FI_MSG) ep_priv->ep.msg = &psmx2_msg_ops; if (ep_cap & FI_RMA) ep_priv->ep.rma = &psmx2_rma_ops; if (ep_cap & FI_ATOMICS) ep_priv->ep.atomic = &psmx2_atomic_ops; ep_priv->caps = ep_cap; err = psmx2_domain_enable_ep(domain_priv, ep_priv); if (err) goto errout_free_ep; psmx2_domain_acquire(domain_priv); domain_priv->eps[ep_priv->vlane] = ep_priv; if (info) { if (info->tx_attr) ep_priv->flags = info->tx_attr->op_flags; if (info->rx_attr) ep_priv->flags |= info->rx_attr->op_flags; } psmx2_ep_optimize_ops(ep_priv); *ep = &ep_priv->ep; return 0; errout_free_ep: free(ep_priv); errout_free_vlane: psmx2_free_vlane(domain_priv, vlane); errout: return err; }