int rxm_cq_open(struct fid_domain *domain, struct fi_cq_attr *attr, struct fid_cq **cq_fid, void *context) { struct rxm_domain *rxm_domain; struct rxm_cq *rxm_cq; int ret; rxm_cq = calloc(1, sizeof(*rxm_cq)); if (!rxm_cq) return -FI_ENOMEM; rxm_domain = container_of(domain, struct rxm_domain, util_domain.domain_fid); ret = fi_cq_open(rxm_domain->msg_domain, attr, &rxm_cq->msg_cq, context); if (ret) { FI_WARN(&rxm_prov, FI_LOG_CQ, "Unable to open MSG CQ\n"); goto err1; } ret = ofi_cq_init(&rxm_prov, domain, attr, &rxm_cq->util_cq, &rxm_cq_progress, context); if (ret) goto err2; *cq_fid = &rxm_cq->util_cq.cq_fid; /* Override util_cq_fi_ops */ (*cq_fid)->fid.ops = &rxm_cq_fi_ops; return 0; err2: fi_close(&rxm_cq->msg_cq->fid); err1: free(rxm_cq); return ret; }
int smr_cq_open(struct fid_domain *domain, struct fi_cq_attr *attr, struct fid_cq **cq_fid, void *context) { struct util_cq *util_cq; int ret; if (attr->wait_obj != FI_WAIT_NONE) { FI_INFO(&smr_prov, FI_LOG_CQ, "CQ wait not yet supported\n"); return -FI_ENOSYS; } util_cq = calloc(1, sizeof(*util_cq)); if (!util_cq) return -FI_ENOMEM; ret = ofi_cq_init(&smr_prov, domain, attr, util_cq, ofi_cq_progress, context); if (ret) goto free; (*cq_fid) = &util_cq->cq_fid; return 0; free: free(util_cq); return ret; }
int rxd_cq_open(struct fid_domain *domain, struct fi_cq_attr *attr, struct fid_cq **cq_fid, void *context) { int ret; struct rxd_cq *cq; cq = calloc(1, sizeof(*cq)); if (!cq) return -FI_ENOMEM; ret = ofi_cq_init(&rxd_prov, domain, attr, &cq->util_cq, &ofi_cq_progress, context); if (ret) goto free; switch (attr->format) { case FI_CQ_FORMAT_UNSPEC: case FI_CQ_FORMAT_CONTEXT: cq->write_fn = cq->util_cq.wait ? rxd_cq_write_ctx_signal : rxd_cq_write_ctx; break; case FI_CQ_FORMAT_MSG: cq->write_fn = cq->util_cq.wait ? rxd_cq_write_msg_signal : rxd_cq_write_msg; break; case FI_CQ_FORMAT_DATA: cq->write_fn = cq->util_cq.wait ? rxd_cq_write_data_signal : rxd_cq_write_data; break; case FI_CQ_FORMAT_TAGGED: cq->write_fn = cq->util_cq.wait ? rxd_cq_write_tagged_signal : rxd_cq_write_tagged; break; default: ret = -FI_EINVAL; goto cleanup; } *cq_fid = &cq->util_cq.cq_fid; (*cq_fid)->fid.ops = &rxd_cq_fi_ops; (*cq_fid)->ops = &rxd_cq_ops; return 0; cleanup: ofi_cq_cleanup(&cq->util_cq); free: free(cq); return ret; }
int udpx_cq_open(struct fid_domain *domain, struct fi_cq_attr *attr, struct fid_cq **cq_fid, void *context) { int ret; struct util_cq *cq; cq = calloc(1, sizeof(*cq)); if (!cq) return -FI_ENOMEM; ret = ofi_cq_init(&udpx_prov, domain, attr, cq, &ofi_cq_progress, context); if (ret) { free(cq); return ret; } *cq_fid = &cq->cq_fid; return 0; }
int rxd_cq_open(struct fid_domain *domain, struct fi_cq_attr *attr, struct fid_cq **cq_fid, void *context) { int ret; struct rxd_cq *cq; struct rxd_domain *rxd_domain; cq = calloc(1, sizeof(*cq)); if (!cq) return -FI_ENOMEM; ret = ofi_cq_init(&rxd_prov, domain, attr, &cq->util_cq, &rxd_cq_progress, context); if (ret) goto err1; switch (attr->format) { case FI_CQ_FORMAT_UNSPEC: case FI_CQ_FORMAT_CONTEXT: cq->write_fn = cq->util_cq.wait ? rxd_cq_write_ctx_signal : rxd_cq_write_ctx; break; case FI_CQ_FORMAT_MSG: cq->write_fn = cq->util_cq.wait ? rxd_cq_write_msg_signal : rxd_cq_write_msg; break; case FI_CQ_FORMAT_DATA: cq->write_fn = cq->util_cq.wait ? rxd_cq_write_data_signal : rxd_cq_write_data; break; case FI_CQ_FORMAT_TAGGED: cq->write_fn = cq->util_cq.wait ? rxd_cq_write_tagged_signal : rxd_cq_write_tagged; break; default: ret = -FI_EINVAL; goto err2; } rxd_domain = container_of(domain, struct rxd_domain, util_domain.domain_fid); attr->format = FI_CQ_FORMAT_MSG; ret = fi_cq_open(rxd_domain->dg_domain, attr, &cq->dg_cq, context); if (ret) goto err2; cq->unexp_pool = util_buf_pool_create( RXD_EP_MAX_UNEXP_PKT * sizeof (struct rxd_unexp_cq_entry), RXD_BUF_POOL_ALIGNMENT, 0, RXD_EP_MAX_UNEXP_PKT); if (!cq->unexp_pool) { ret = -FI_ENOMEM; goto err3; } dlist_init(&cq->dom_entry); dlist_init(&cq->unexp_list); fastlock_init(&cq->lock); fastlock_acquire(&rxd_domain->lock); dlist_insert_tail(&cq->dom_entry, &rxd_domain->cq_list); fastlock_release(&rxd_domain->lock); *cq_fid = &cq->util_cq.cq_fid; (*cq_fid)->fid.ops = &rxd_cq_fi_ops; (*cq_fid)->ops = &rxd_cq_ops; cq->domain = rxd_domain; return 0; err3: ofi_cq_cleanup(&cq->util_cq); err2: fi_close(&cq->dg_cq->fid); err1: free(cq); return ret; }
static int tcpx_buf_pools_create(struct tcpx_buf_pool *buf_pools) { int i, ret; struct ofi_bufpool_attr attr = { .size = sizeof(struct tcpx_xfer_entry), .alignment = 16, .chunk_cnt = 1024, .init_fn = tcpx_buf_pool_init, }; for (i = 0; i < TCPX_OP_CODE_MAX; i++) { buf_pools[i].op_type = i; attr.context = &buf_pools[i]; ret = ofi_bufpool_create_attr(&attr, &buf_pools[i].pool); if (ret) { FI_WARN(&tcpx_prov, FI_LOG_EP_CTRL, "Unable to create buf pool\n"); goto err; } } return 0; err: while (i--) ofi_bufpool_destroy(buf_pools[i].pool); return -ret; } int tcpx_cq_open(struct fid_domain *domain, struct fi_cq_attr *attr, struct fid_cq **cq_fid, void *context) { int ret; struct tcpx_cq *tcpx_cq; tcpx_cq = calloc(1, sizeof(*tcpx_cq)); if (!tcpx_cq) return -FI_ENOMEM; if (!attr->size) attr->size = TCPX_DEF_CQ_SIZE; ret = tcpx_buf_pools_create(tcpx_cq->buf_pools); if (ret) goto free_cq; ret = ofi_cq_init(&tcpx_prov, domain, attr, &tcpx_cq->util_cq, &ofi_cq_progress, context); if (ret) goto destroy_pool; *cq_fid = &tcpx_cq->util_cq.cq_fid; (*cq_fid)->fid.ops = &tcpx_cq_fi_ops; return 0; destroy_pool: tcpx_buf_pools_destroy(tcpx_cq->buf_pools); free_cq: free(tcpx_cq); return ret; }