static int udpx_cq_close(struct fid *fid) { int ret; struct util_cq *cq; cq = container_of(fid, struct util_cq, cq_fid.fid); ret = ofi_cq_cleanup(cq); if (ret) return ret; free(cq); return 0; }
static int tcpx_cq_close(struct fid *fid) { int ret; struct tcpx_cq *tcpx_cq; tcpx_cq = container_of(fid, struct tcpx_cq, util_cq.cq_fid.fid); tcpx_buf_pools_destroy(tcpx_cq->buf_pools); ret = ofi_cq_cleanup(&tcpx_cq->util_cq); if (ret) return ret; free(tcpx_cq); return 0; }
int rxd_cq_open(struct fid_domain *domain, struct fi_cq_attr *attr, struct fid_cq **cq_fid, void *context) { int ret; struct rxd_cq *cq; cq = calloc(1, sizeof(*cq)); if (!cq) return -FI_ENOMEM; ret = ofi_cq_init(&rxd_prov, domain, attr, &cq->util_cq, &ofi_cq_progress, context); if (ret) goto free; switch (attr->format) { case FI_CQ_FORMAT_UNSPEC: case FI_CQ_FORMAT_CONTEXT: cq->write_fn = cq->util_cq.wait ? rxd_cq_write_ctx_signal : rxd_cq_write_ctx; break; case FI_CQ_FORMAT_MSG: cq->write_fn = cq->util_cq.wait ? rxd_cq_write_msg_signal : rxd_cq_write_msg; break; case FI_CQ_FORMAT_DATA: cq->write_fn = cq->util_cq.wait ? rxd_cq_write_data_signal : rxd_cq_write_data; break; case FI_CQ_FORMAT_TAGGED: cq->write_fn = cq->util_cq.wait ? rxd_cq_write_tagged_signal : rxd_cq_write_tagged; break; default: ret = -FI_EINVAL; goto cleanup; } *cq_fid = &cq->util_cq.cq_fid; (*cq_fid)->fid.ops = &rxd_cq_fi_ops; (*cq_fid)->ops = &rxd_cq_ops; return 0; cleanup: ofi_cq_cleanup(&cq->util_cq); free: free(cq); return ret; }
static int rxm_cq_close(struct fid *fid) { struct rxm_cq *rxm_cq; int ret, retv = 0; rxm_cq = container_of(fid, struct rxm_cq, util_cq.cq_fid.fid); ret = ofi_cq_cleanup(&rxm_cq->util_cq); if (ret) retv = ret; ret = fi_close(&rxm_cq->msg_cq->fid); if (ret) { FI_WARN(&rxm_prov, FI_LOG_CQ, "Unable to close MSG CQ\n"); retv = ret; } free(rxm_cq); return retv; }
static int rxd_cq_close(struct fid *fid) { int ret; struct rxd_cq *cq; cq = container_of(fid, struct rxd_cq, util_cq.cq_fid.fid); fastlock_acquire(&cq->domain->lock); dlist_remove(&cq->dom_entry); fastlock_release(&cq->domain->lock); fastlock_destroy(&cq->lock); ret = fi_close(&cq->dg_cq->fid); if (ret) return ret; ret = ofi_cq_cleanup(&cq->util_cq); if (ret) return ret; util_buf_pool_destroy(cq->unexp_pool); free(cq); return 0; }
int rxd_cq_open(struct fid_domain *domain, struct fi_cq_attr *attr, struct fid_cq **cq_fid, void *context) { int ret; struct rxd_cq *cq; struct rxd_domain *rxd_domain; cq = calloc(1, sizeof(*cq)); if (!cq) return -FI_ENOMEM; ret = ofi_cq_init(&rxd_prov, domain, attr, &cq->util_cq, &rxd_cq_progress, context); if (ret) goto err1; switch (attr->format) { case FI_CQ_FORMAT_UNSPEC: case FI_CQ_FORMAT_CONTEXT: cq->write_fn = cq->util_cq.wait ? rxd_cq_write_ctx_signal : rxd_cq_write_ctx; break; case FI_CQ_FORMAT_MSG: cq->write_fn = cq->util_cq.wait ? rxd_cq_write_msg_signal : rxd_cq_write_msg; break; case FI_CQ_FORMAT_DATA: cq->write_fn = cq->util_cq.wait ? rxd_cq_write_data_signal : rxd_cq_write_data; break; case FI_CQ_FORMAT_TAGGED: cq->write_fn = cq->util_cq.wait ? rxd_cq_write_tagged_signal : rxd_cq_write_tagged; break; default: ret = -FI_EINVAL; goto err2; } rxd_domain = container_of(domain, struct rxd_domain, util_domain.domain_fid); attr->format = FI_CQ_FORMAT_MSG; ret = fi_cq_open(rxd_domain->dg_domain, attr, &cq->dg_cq, context); if (ret) goto err2; cq->unexp_pool = util_buf_pool_create( RXD_EP_MAX_UNEXP_PKT * sizeof (struct rxd_unexp_cq_entry), RXD_BUF_POOL_ALIGNMENT, 0, RXD_EP_MAX_UNEXP_PKT); if (!cq->unexp_pool) { ret = -FI_ENOMEM; goto err3; } dlist_init(&cq->dom_entry); dlist_init(&cq->unexp_list); fastlock_init(&cq->lock); fastlock_acquire(&rxd_domain->lock); dlist_insert_tail(&cq->dom_entry, &rxd_domain->cq_list); fastlock_release(&rxd_domain->lock); *cq_fid = &cq->util_cq.cq_fid; (*cq_fid)->fid.ops = &rxd_cq_fi_ops; (*cq_fid)->ops = &rxd_cq_ops; cq->domain = rxd_domain; return 0; err3: ofi_cq_cleanup(&cq->util_cq); err2: fi_close(&cq->dg_cq->fid); err1: free(cq); return ret; }