Test(utils, references) { int refs; struct gnix_reference_tester test; /* initialize test structure */ _gnix_ref_init(&test.ref_cnt, 1, test_destruct); test.destructed = 0; /* check for validity */ cr_assert(atomic_get(&test.ref_cnt.references) == 1); cr_assert(test.destructed == 0); /* increment refs and check */ refs = _gnix_ref_get(&test); cr_assert(refs == 2); cr_assert(atomic_get(&test.ref_cnt.references) == 2); cr_assert(test.destructed == 0); /* decrement refs and check */ refs = _gnix_ref_put(&test); cr_assert(refs == 1); cr_assert(atomic_get(&test.ref_cnt.references) == 1); cr_assert(test.destructed == 0); /* decrement and destruct, check for validity */ refs = _gnix_ref_put(&test); cr_assert(refs == 0); cr_assert(atomic_get(&test.ref_cnt.references) == 0); cr_assert(test.destructed == 1); }
static int __gnix_deregister_region( void *handle, void *context) { struct gnix_fid_mem_desc *mr = (struct gnix_fid_mem_desc *) handle; gni_return_t ret; struct gnix_fid_domain *domain; struct gnix_nic *nic; domain = mr->domain; nic = mr->nic; COND_ACQUIRE(nic->requires_lock, &nic->lock); ret = GNI_MemDeregister(nic->gni_nic_hndl, &mr->mem_hndl); COND_RELEASE(nic->requires_lock, &nic->lock); if (ret == GNI_RC_SUCCESS) { /* release reference to domain */ _gnix_ref_put(domain); /* release reference to nic */ _gnix_ref_put(nic); } else { GNIX_INFO(FI_LOG_MR, "failed to deregister memory" " region, entry=%p ret=%i\n", handle, ret); } return ret; }
/** * Destroy a shared transmit context. * * @param[in] fid fid for previously allocated gnix_fid_stx * structure * @return FI_SUCCESS if shared tx context successfully closed * @return -FI_EINVAL if invalid arg(s) supplied * * @note - the structure will actually not be freed till all * references to the structure have released their references * to the stx structure. */ static int gnix_stx_close(fid_t fid) { struct gnix_fid_stx *stx; GNIX_TRACE(FI_LOG_DOMAIN, "\n"); stx = container_of(fid, struct gnix_fid_stx, stx_fid.fid); if (stx->stx_fid.fid.fclass != FI_CLASS_STX_CTX) return -FI_EINVAL; _gnix_ref_put(stx->domain); _gnix_ref_put(stx); return FI_SUCCESS; }
int gnix_wait_close(struct fid *wait) { struct gnix_fid_wait *wait_priv; GNIX_TRACE(WAIT_SUB, "\n"); wait_priv = container_of(wait, struct gnix_fid_wait, wait.fid); if (!slist_empty(&wait_priv->set)) { GNIX_WARN(WAIT_SUB, "resources still connected to wait set.\n"); return -FI_EBUSY; } if (wait_priv->type == FI_WAIT_FD) { close(wait_priv->fd[WAIT_READ]); close(wait_priv->fd[WAIT_WRITE]); } _gnix_ref_put(wait_priv->fabric); free(wait_priv); return FI_SUCCESS; }
/** * Closes and deallocates a libfabric memory registration in the internal cache * * @param[in] fid libfabric memory registration fid * * @return FI_SUCCESS on success * -FI_EINVAL on invalid fid * -FI_NOENT when there isn't a matching registration for the * provided fid * Otherwise, GNI_RC_* ret codes converted to FI_* err codes */ static int fi_gnix_mr_close(fid_t fid) { struct gnix_fid_mem_desc *mr; gni_return_t ret; struct gnix_fid_domain *domain; GNIX_TRACE(FI_LOG_MR, "\n"); if (unlikely(fid->fclass != FI_CLASS_MR)) return -FI_EINVAL; mr = container_of(fid, struct gnix_fid_mem_desc, mr_fid.fid); domain = mr->domain; /* call cache deregister op */ fastlock_acquire(&domain->mr_cache_lock); ret = domain->mr_ops->dereg_mr(domain, mr); fastlock_release(&domain->mr_cache_lock); /* check retcode */ if (likely(ret == FI_SUCCESS)) { /* release references to the domain and nic */ _gnix_ref_put(domain); } else { GNIX_INFO(FI_LOG_MR, "failed to deregister memory, " "ret=%i\n", ret); } return ret; }
static void __domain_destruct(void *obj) { int ret = FI_SUCCESS; struct gnix_fid_domain *domain = (struct gnix_fid_domain *) obj; GNIX_TRACE(FI_LOG_DOMAIN, "\n"); ret = _gnix_close_cache(domain); if (ret != FI_SUCCESS) GNIX_FATAL(FI_LOG_MR, "failed to close memory registration cache\n"); ret = _gnix_notifier_close(domain->mr_cache_attr.notifier); if (ret != FI_SUCCESS) GNIX_FATAL(FI_LOG_MR, "failed to close MR notifier\n"); /* * remove from the list of cdms attached to fabric */ dlist_remove_init(&domain->list); _gnix_ref_put(domain->fabric); memset(domain, 0, sizeof *domain); free(domain); }
static int gnix_domain_close(fid_t fid) { int ret = FI_SUCCESS, references_held; struct gnix_fid_domain *domain; GNIX_TRACE(FI_LOG_DOMAIN, "\n"); domain = container_of(fid, struct gnix_fid_domain, domain_fid.fid); if (domain->domain_fid.fid.fclass != FI_CLASS_DOMAIN) { ret = -FI_EINVAL; goto err; } /* before checking the refcnt, flush the memory registration cache */ if (domain->mr_cache_ro) { fastlock_acquire(&domain->mr_cache_lock); ret = _gnix_mr_cache_flush(domain->mr_cache_ro); if (ret != FI_SUCCESS) { GNIX_WARN(FI_LOG_DOMAIN, "failed to flush memory cache on domain close\n"); fastlock_release(&domain->mr_cache_lock); goto err; } fastlock_release(&domain->mr_cache_lock); } if (domain->mr_cache_rw) { fastlock_acquire(&domain->mr_cache_lock); ret = _gnix_mr_cache_flush(domain->mr_cache_rw); if (ret != FI_SUCCESS) { GNIX_WARN(FI_LOG_DOMAIN, "failed to flush memory cache on domain close\n"); fastlock_release(&domain->mr_cache_lock); goto err; } fastlock_release(&domain->mr_cache_lock); } /* * if non-zero refcnt, there are eps, mrs, and/or an eq associated * with this domain which have not been closed. */ references_held = _gnix_ref_put(domain); if (references_held) { GNIX_INFO(FI_LOG_DOMAIN, "failed to fully close domain due to " "lingering references. references=%i dom=%p\n", references_held, domain); } GNIX_INFO(FI_LOG_DOMAIN, "gnix_domain_close invoked returning %d\n", ret); err: return ret; }
int _gnix_cm_nic_free(struct gnix_cm_nic *cm_nic) { GNIX_TRACE(FI_LOG_EP_CTRL, "\n"); if (cm_nic == NULL) return -FI_EINVAL; _gnix_ref_put(cm_nic); return FI_SUCCESS; }
static void __trx_destruct(void *obj) { int __attribute__((unused)) ret; struct gnix_fid_trx *trx = (struct gnix_fid_trx *) obj; struct gnix_fid_ep *ep_priv; struct gnix_fid_sep *sep_priv; int refs_held; GNIX_TRACE(FI_LOG_EP_CTRL, "\n"); ep_priv = trx->ep; assert(ep_priv != NULL); sep_priv = trx->sep; assert(sep_priv != NULL); refs_held = _gnix_ref_put(ep_priv); if (refs_held == 0) _gnix_ref_put(sep_priv->cm_nic); _gnix_ref_put(sep_priv); free(trx); }
static int gnix_fabric_close(fid_t fid) { struct gnix_fid_fabric *fab; int references_held; fab = container_of(fid, struct gnix_fid_fabric, fab_fid); references_held = _gnix_ref_put(fab); if (references_held) GNIX_INFO(FI_LOG_FABRIC, "failed to fully close fabric due " "to lingering references. references=%i fabric=%p\n", references_held, fab); return FI_SUCCESS; }
static int gnix_pep_close(fid_t fid) { int ret = FI_SUCCESS; struct gnix_fid_pep *pep; int references_held; pep = container_of(fid, struct gnix_fid_pep, pep_fid.fid); references_held = _gnix_ref_put(pep); if (references_held) GNIX_INFO(FI_LOG_EP_CTRL, "failed to fully close pep due " "to lingering references. references=%i pep=%p\n", references_held, pep); return ret; }
static void __pep_destruct(void *obj) { struct gnix_fid_pep *pep = (struct gnix_fid_pep *)obj; GNIX_DEBUG(FI_LOG_EP_CTRL, "Destroying PEP: %p\n", pep); fastlock_destroy(&pep->lock); if (pep->listen_fd >= 0) close(pep->listen_fd); if (pep->eq) { _gnix_eq_poll_obj_rem(pep->eq, &pep->pep_fid.fid); _gnix_ref_put(pep->eq); } free(pep); }
static void __cm_nic_destruct(void *obj) { int ret; struct gnix_cm_nic *cm_nic = (struct gnix_cm_nic *)obj; GNIX_TRACE(FI_LOG_EP_CTRL, "\n"); pthread_mutex_lock(&gnix_cm_nic_list_lock); dlist_remove(&cm_nic->cm_nic_list); pthread_mutex_unlock(&gnix_cm_nic_list_lock); if (cm_nic->dgram_hndl != NULL) { ret = _gnix_dgram_hndl_free(cm_nic->dgram_hndl); if (ret != FI_SUCCESS) GNIX_WARN(FI_LOG_EP_CTRL, "gnix_dgram_hndl_free returned %d\n", ret); } if (cm_nic->addr_to_ep_ht != NULL) { ret = _gnix_ht_destroy(cm_nic->addr_to_ep_ht); if (ret != FI_SUCCESS) GNIX_WARN(FI_LOG_EP_CTRL, "gnix_ht_destroy returned %d\n", ret); free(cm_nic->addr_to_ep_ht); cm_nic->addr_to_ep_ht = NULL; } if (cm_nic->nic != NULL) { _gnix_ref_put(cm_nic->nic); cm_nic->nic = NULL; } cm_nic->domain->cm_nic = NULL; free(cm_nic); }
static int gnix_av_close(fid_t fid) { struct gnix_fid_av *av = NULL; int ret = FI_SUCCESS; int references_held; GNIX_TRACE(FI_LOG_AV, "\n"); if (!fid) { ret = -FI_EINVAL; goto err; } av = container_of(fid, struct gnix_fid_av, av_fid.fid); references_held = _gnix_ref_put(av); if (references_held) { GNIX_INFO(FI_LOG_AV, "failed to fully close av due to lingering " "references. references=%i av=%p\n", references_held, av); } err: return ret; }
static int gnix_cq_set_wait(struct gnix_fid_cq *cq) { int ret = FI_SUCCESS; GNIX_TRACE(FI_LOG_CQ, "\n"); struct fi_wait_attr requested = { .wait_obj = cq->attr.wait_obj, .flags = 0 }; switch (cq->attr.wait_obj) { case FI_WAIT_UNSPEC: case FI_WAIT_FD: case FI_WAIT_MUTEX_COND: ret = gnix_wait_open(&cq->domain->fabric->fab_fid, &requested, &cq->wait); break; case FI_WAIT_SET: ret = _gnix_wait_set_add(cq->attr.wait_set, &cq->cq_fid.fid); if (!ret) cq->wait = cq->attr.wait_set; break; default: break; } return ret; } static void free_cq_entry(struct slist_entry *item) { struct gnix_cq_entry *entry; entry = container_of(item, struct gnix_cq_entry, item); free(entry->the_entry); free(entry); } static struct slist_entry *alloc_cq_entry(size_t size) { struct gnix_cq_entry *entry = malloc(sizeof(*entry)); if (!entry) { GNIX_DEBUG(FI_LOG_CQ, "out of memory\n"); goto err; } entry->the_entry = malloc(size); if (!entry->the_entry) { GNIX_DEBUG(FI_LOG_CQ, "out of memory\n"); goto cleanup; } return &entry->item; cleanup: free(entry); err: return NULL; } static int __gnix_cq_progress(struct gnix_fid_cq *cq) { return _gnix_prog_progress(&cq->pset); } /******************************************************************************* * Exposed helper functions ******************************************************************************/ ssize_t _gnix_cq_add_event(struct gnix_fid_cq *cq, struct gnix_fid_ep *ep, void *op_context, uint64_t flags, size_t len, void *buf, uint64_t data, uint64_t tag, fi_addr_t src_addr) { struct gnix_cq_entry *event; struct slist_entry *item; uint64_t mask; ssize_t ret = FI_SUCCESS; if (ep) { if (ep->info && ep->info->mode & FI_NOTIFY_FLAGS_ONLY) { mask = (FI_REMOTE_CQ_DATA | FI_MULTI_RECV); if (flags & FI_RMA_EVENT) { mask |= (FI_REMOTE_READ | FI_REMOTE_WRITE | FI_RMA); } flags &= mask; } } COND_ACQUIRE(cq->requires_lock, &cq->lock); item = _gnix_queue_get_free(cq->events); if (!item) { GNIX_DEBUG(FI_LOG_CQ, "error creating cq_entry\n"); ret = -FI_ENOMEM; goto err; } event = container_of(item, struct gnix_cq_entry, item); assert(event->the_entry); fill_function[cq->attr.format](event->the_entry, op_context, flags, len, buf, data, tag); event->src_addr = src_addr; _gnix_queue_enqueue(cq->events, &event->item); GNIX_DEBUG(FI_LOG_CQ, "Added event: %lx\n", op_context); if (cq->wait) _gnix_signal_wait_obj(cq->wait); err: COND_RELEASE(cq->requires_lock, &cq->lock); return ret; } ssize_t _gnix_cq_add_error(struct gnix_fid_cq *cq, void *op_context, uint64_t flags, size_t len, void *buf, uint64_t data, uint64_t tag, size_t olen, int err, int prov_errno, void *err_data, size_t err_data_size) { struct fi_cq_err_entry *error; struct gnix_cq_entry *event; struct slist_entry *item; ssize_t ret = FI_SUCCESS; GNIX_INFO(FI_LOG_CQ, "creating error event entry\n"); COND_ACQUIRE(cq->requires_lock, &cq->lock); item = _gnix_queue_get_free(cq->errors); if (!item) { GNIX_WARN(FI_LOG_CQ, "error creating error entry\n"); ret = -FI_ENOMEM; goto err; } event = container_of(item, struct gnix_cq_entry, item); error = event->the_entry; error->op_context = op_context; error->flags = flags; error->len = len; error->buf = buf; error->data = data; error->tag = tag; error->olen = olen; error->err = err; error->prov_errno = prov_errno; error->err_data = err_data; error->err_data_size = err_data_size; _gnix_queue_enqueue(cq->errors, &event->item); if (cq->wait) _gnix_signal_wait_obj(cq->wait); err: COND_RELEASE(cq->requires_lock, &cq->lock); return ret; } int _gnix_cq_poll_obj_add(struct gnix_fid_cq *cq, void *obj, int (*prog_fn)(void *data)) { return _gnix_prog_obj_add(&cq->pset, obj, prog_fn); } int _gnix_cq_poll_obj_rem(struct gnix_fid_cq *cq, void *obj, int (*prog_fn)(void *data)) { return _gnix_prog_obj_rem(&cq->pset, obj, prog_fn); } static void __cq_destruct(void *obj) { struct gnix_fid_cq *cq = (struct gnix_fid_cq *) obj; _gnix_ref_put(cq->domain); switch (cq->attr.wait_obj) { case FI_WAIT_NONE: break; case FI_WAIT_SET: _gnix_wait_set_remove(cq->wait, &cq->cq_fid.fid); break; case FI_WAIT_UNSPEC: case FI_WAIT_FD: case FI_WAIT_MUTEX_COND: assert(cq->wait); gnix_wait_close(&cq->wait->fid); break; default: GNIX_WARN(FI_LOG_CQ, "format: %d unsupported.\n", cq->attr.wait_obj); break; } _gnix_prog_fini(&cq->pset); _gnix_queue_destroy(cq->events); _gnix_queue_destroy(cq->errors); fastlock_destroy(&cq->lock); free(cq->cq_fid.ops); free(cq->cq_fid.fid.ops); free(cq); } /******************************************************************************* * API functions. ******************************************************************************/ static int gnix_cq_close(fid_t fid) { struct gnix_fid_cq *cq; int references_held; GNIX_TRACE(FI_LOG_CQ, "\n"); cq = container_of(fid, struct gnix_fid_cq, cq_fid); references_held = _gnix_ref_put(cq); if (references_held) { GNIX_INFO(FI_LOG_CQ, "failed to fully close cq due to lingering " "references. references=%i cq=%p\n", references_held, cq); } return FI_SUCCESS; } static ssize_t __gnix_cq_readfrom(struct fid_cq *cq, void *buf, size_t count, fi_addr_t *src_addr) { struct gnix_fid_cq *cq_priv; struct gnix_cq_entry *event; struct slist_entry *temp; ssize_t read_count = 0; if (!cq || !buf || !count) return -FI_EINVAL; cq_priv = container_of(cq, struct gnix_fid_cq, cq_fid); __gnix_cq_progress(cq_priv); if (_gnix_queue_peek(cq_priv->errors)) return -FI_EAVAIL; COND_ACQUIRE(cq_priv->requires_lock, &cq_priv->lock); while (_gnix_queue_peek(cq_priv->events) && count--) { temp = _gnix_queue_dequeue(cq_priv->events); event = container_of(temp, struct gnix_cq_entry, item); assert(event->the_entry); memcpy(buf, event->the_entry, cq_priv->entry_size); if (src_addr) memcpy(&src_addr[read_count], &event->src_addr, sizeof(fi_addr_t)); _gnix_queue_enqueue_free(cq_priv->events, &event->item); buf = (void *) ((uint8_t *) buf + cq_priv->entry_size); read_count++; } COND_RELEASE(cq_priv->requires_lock, &cq_priv->lock); return read_count ?: -FI_EAGAIN; } static ssize_t __gnix_cq_sreadfrom(int blocking, struct fid_cq *cq, void *buf, size_t count, fi_addr_t *src_addr, const void *cond, int timeout) { struct gnix_fid_cq *cq_priv; cq_priv = container_of(cq, struct gnix_fid_cq, cq_fid); if ((blocking && !cq_priv->wait) || (blocking && cq_priv->attr.wait_obj == FI_WAIT_SET)) return -FI_EINVAL; if (_gnix_queue_peek(cq_priv->errors)) return -FI_EAVAIL; if (cq_priv->wait) gnix_wait_wait((struct fid_wait *)cq_priv->wait, timeout); return __gnix_cq_readfrom(cq, buf, count, src_addr); } DIRECT_FN STATIC ssize_t gnix_cq_sreadfrom(struct fid_cq *cq, void *buf, size_t count, fi_addr_t *src_addr, const void *cond, int timeout) { return __gnix_cq_sreadfrom(1, cq, buf, count, src_addr, cond, timeout); } DIRECT_FN STATIC ssize_t gnix_cq_read(struct fid_cq *cq, void *buf, size_t count) { return __gnix_cq_sreadfrom(0, cq, buf, count, NULL, NULL, 0); } DIRECT_FN STATIC ssize_t gnix_cq_sread(struct fid_cq *cq, void *buf, size_t count, const void *cond, int timeout) { return __gnix_cq_sreadfrom(1, cq, buf, count, NULL, cond, timeout); } DIRECT_FN STATIC ssize_t gnix_cq_readfrom(struct fid_cq *cq, void *buf, size_t count, fi_addr_t *src_addr) { return __gnix_cq_sreadfrom(0, cq, buf, count, src_addr, NULL, 0); } DIRECT_FN STATIC ssize_t gnix_cq_readerr(struct fid_cq *cq, struct fi_cq_err_entry *buf, uint64_t flags) { struct gnix_fid_cq *cq_priv; struct gnix_cq_entry *event; struct slist_entry *entry; size_t err_data_cpylen; struct fi_cq_err_entry *gnix_cq_err; ssize_t read_count = 0; if (!cq || !buf) return -FI_EINVAL; cq_priv = container_of(cq, struct gnix_fid_cq, cq_fid); /* * we need to progress cq. some apps may be only using * cq to check for errors. */ _gnix_prog_progress(&cq_priv->pset); COND_ACQUIRE(cq_priv->requires_lock, &cq_priv->lock); entry = _gnix_queue_dequeue(cq_priv->errors); if (!entry) { read_count = -FI_EAGAIN; goto err; } event = container_of(entry, struct gnix_cq_entry, item); gnix_cq_err = event->the_entry; buf->op_context = gnix_cq_err->op_context; buf->flags = gnix_cq_err->flags; buf->len = gnix_cq_err->len; buf->buf = gnix_cq_err->buf; buf->data = gnix_cq_err->data; buf->tag = gnix_cq_err->tag; buf->olen = gnix_cq_err->olen; buf->err = gnix_cq_err->err; buf->prov_errno = gnix_cq_err->prov_errno; if (gnix_cq_err->err_data != NULL) { /* * Note: If the api version is >= 1.5 then copy err_data into * buf->err_data and copy at most buf->err_data_size. * If buf->err_data_size is zero or the api version is < 1.5, * use the old method of allocating space in provider. */ if (FI_VERSION_LT(cq_priv->domain->fabric->fab_fid.api_version, FI_VERSION(1, 5)) || buf->err_data_size == 0) { err_data_cpylen = sizeof(cq_priv->err_data); memcpy(cq_priv->err_data, gnix_cq_err->err_data, err_data_cpylen); buf->err_data = cq_priv->err_data; } else { if (buf->err_data == NULL) return -FI_EINVAL; err_data_cpylen = MIN(buf->err_data_size, gnix_cq_err->err_data_size); memcpy(buf->err_data, gnix_cq_err->err_data, err_data_cpylen); buf->err_data_size = err_data_cpylen; } free(gnix_cq_err->err_data); gnix_cq_err->err_data = NULL; } else { if (FI_VERSION_LT(cq_priv->domain->fabric->fab_fid.api_version, FI_VERSION(1, 5))) { buf->err_data = NULL; } else { buf->err_data_size = 0; } } _gnix_queue_enqueue_free(cq_priv->errors, &event->item); read_count++; err: COND_RELEASE(cq_priv->requires_lock, &cq_priv->lock); return read_count; } DIRECT_FN STATIC const char *gnix_cq_strerror(struct fid_cq *cq, int prov_errno, const void *prov_data, char *buf, size_t len) { return NULL; } DIRECT_FN STATIC int gnix_cq_signal(struct fid_cq *cq) { struct gnix_fid_cq *cq_priv; cq_priv = container_of(cq, struct gnix_fid_cq, cq_fid); if (cq_priv->wait) _gnix_signal_wait_obj(cq_priv->wait); return FI_SUCCESS; } static int gnix_cq_control(struct fid *cq, int command, void *arg) { switch (command) { case FI_GETWAIT: return -FI_ENOSYS; default: return -FI_EINVAL; } } DIRECT_FN int gnix_cq_open(struct fid_domain *domain, struct fi_cq_attr *attr, struct fid_cq **cq, void *context) { struct gnix_fid_domain *domain_priv; struct gnix_fid_cq *cq_priv; struct fi_ops_cq *cq_ops; struct fi_ops *fi_cq_ops; int ret = FI_SUCCESS; GNIX_TRACE(FI_LOG_CQ, "\n"); cq_ops = calloc(1, sizeof(*cq_ops)); if (!cq_ops) { return -FI_ENOMEM; } fi_cq_ops = calloc(1, sizeof(*fi_cq_ops)); if (!fi_cq_ops) { ret = -FI_ENOMEM; goto free_cq_ops; } *cq_ops = gnix_cq_ops; *fi_cq_ops = gnix_cq_fi_ops; ret = verify_cq_attr(attr, cq_ops, fi_cq_ops); if (ret) goto free_fi_cq_ops; domain_priv = container_of(domain, struct gnix_fid_domain, domain_fid); if (!domain_priv) { ret = -FI_EINVAL; goto free_fi_cq_ops; } cq_priv = calloc(1, sizeof(*cq_priv)); if (!cq_priv) { ret = -FI_ENOMEM; goto free_fi_cq_ops; } cq_priv->requires_lock = (domain_priv->thread_model != FI_THREAD_COMPLETION); cq_priv->domain = domain_priv; cq_priv->attr = *attr; _gnix_ref_init(&cq_priv->ref_cnt, 1, __cq_destruct); _gnix_ref_get(cq_priv->domain); _gnix_prog_init(&cq_priv->pset); cq_priv->cq_fid.fid.fclass = FI_CLASS_CQ; cq_priv->cq_fid.fid.context = context; cq_priv->cq_fid.fid.ops = fi_cq_ops; cq_priv->cq_fid.ops = cq_ops; /* * Although we don't need to store entry_size since we're already * storing the format, this might provide a performance benefit * when allocating storage. */ cq_priv->entry_size = format_sizes[cq_priv->attr.format]; fastlock_init(&cq_priv->lock); ret = gnix_cq_set_wait(cq_priv); if (ret) goto free_cq_priv; ret = _gnix_queue_create(&cq_priv->events, alloc_cq_entry, free_cq_entry, cq_priv->entry_size, cq_priv->attr.size); if (ret) goto free_cq_priv; ret = _gnix_queue_create(&cq_priv->errors, alloc_cq_entry, free_cq_entry, sizeof(struct fi_cq_err_entry), 0); if (ret) goto free_gnix_queue; *cq = &cq_priv->cq_fid; return ret; free_gnix_queue: _gnix_queue_destroy(cq_priv->events); free_cq_priv: _gnix_ref_put(cq_priv->domain); fastlock_destroy(&cq_priv->lock); free(cq_priv); free_fi_cq_ops: free(fi_cq_ops); free_cq_ops: free(cq_ops); return ret; } /******************************************************************************* * FI_OPS_* data structures. ******************************************************************************/ static const struct fi_ops gnix_cq_fi_ops = { .size = sizeof(struct fi_ops), .close = gnix_cq_close, .bind = fi_no_bind, .control = gnix_cq_control, .ops_open = fi_no_ops_open }; static const struct fi_ops_cq gnix_cq_ops = { .size = sizeof(struct fi_ops_cq), .read = gnix_cq_read, .readfrom = gnix_cq_readfrom, .readerr = gnix_cq_readerr, .sread = gnix_cq_sread, .sreadfrom = gnix_cq_sreadfrom, .signal = gnix_cq_signal, .strerror = gnix_cq_strerror };
static inline void *__gnix_generic_register( struct gnix_fid_domain *domain, struct gnix_fid_mem_desc *md, void *address, size_t length, gni_cq_handle_t dst_cq_hndl, int flags, int vmdh_index) { struct gnix_nic *nic; gni_return_t grc = GNI_RC_SUCCESS; int rc; pthread_mutex_lock(&gnix_nic_list_lock); /* If the nic list is empty, create a nic */ if (unlikely((dlist_empty(&gnix_nic_list_ptag[domain->ptag])))) { /* release the lock because we are not checking the list after this point. Additionally, gnix_nic_alloc takes the lock to add the nic. */ pthread_mutex_unlock(&gnix_nic_list_lock); rc = gnix_nic_alloc(domain, NULL, &nic); if (rc) { GNIX_INFO(FI_LOG_MR, "could not allocate nic to do mr_reg," " ret=%i\n", rc); return NULL; } } else { nic = dlist_first_entry(&gnix_nic_list_ptag[domain->ptag], struct gnix_nic, ptag_nic_list); if (unlikely(nic == NULL)) { GNIX_ERR(FI_LOG_MR, "Failed to find nic on " "ptag list\n"); pthread_mutex_unlock(&gnix_nic_list_lock); return NULL; } _gnix_ref_get(nic); pthread_mutex_unlock(&gnix_nic_list_lock); } COND_ACQUIRE(nic->requires_lock, &nic->lock); grc = GNI_MemRegister(nic->gni_nic_hndl, (uint64_t) address, length, dst_cq_hndl, flags, vmdh_index, &md->mem_hndl); COND_RELEASE(nic->requires_lock, &nic->lock); if (unlikely(grc != GNI_RC_SUCCESS)) { GNIX_INFO(FI_LOG_MR, "failed to register memory with uGNI, " "ret=%s\n", gni_err_str[grc]); _gnix_ref_put(nic); return NULL; } /* set up the mem desc */ md->nic = nic; md->domain = domain; /* take references on domain */ _gnix_ref_get(md->domain); return md; }
static void __ep_destruct(void *obj) { int __attribute__((unused)) ret; struct gnix_fid_domain *domain; struct gnix_nic *nic; struct gnix_fid_av *av; struct gnix_cm_nic *cm_nic; gnix_ht_key_t *key_ptr; struct gnix_fid_ep *ep = (struct gnix_fid_ep *) obj; GNIX_TRACE(FI_LOG_EP_CTRL, "\n"); /* * clean up any vc hash table or vector, * remove entry from addr_to_ep ht. * any outstanding GNI internal requests on * the VC's will be completed prior to * destroying the VC entries in the ht. */ if (ep->type == FI_EP_RDM) { key_ptr = (gnix_ht_key_t *)&ep->my_name.gnix_addr; ret = _gnix_ht_remove(ep->cm_nic->addr_to_ep_ht, *key_ptr); if (ep->vc_ht != NULL) { ret = _gnix_ht_destroy(ep->vc_ht); if (ret == FI_SUCCESS) { free(ep->vc_ht); ep->vc_ht = NULL; } else { GNIX_WARN(FI_LOG_EP_CTRL, "_gnix_ht_destroy returned %s\n", fi_strerror(-ret)); } } } if (ep->send_cq) { _gnix_cq_poll_nic_rem(ep->send_cq, ep->nic); _gnix_ref_put(ep->send_cq); } if (ep->recv_cq) { _gnix_cq_poll_nic_rem(ep->recv_cq, ep->nic); _gnix_ref_put(ep->recv_cq); } if (ep->send_cntr) { _gnix_cntr_poll_nic_rem(ep->send_cntr, ep->nic); _gnix_ref_put(ep->send_cntr); } if (ep->recv_cntr) { _gnix_cntr_poll_nic_rem(ep->recv_cntr, ep->nic); _gnix_ref_put(ep->recv_cntr); } if (ep->read_cntr) { _gnix_cntr_poll_nic_rem(ep->read_cntr, ep->nic); _gnix_ref_put(ep->read_cntr); } if (ep->write_cntr) { _gnix_cntr_poll_nic_rem(ep->write_cntr, ep->nic); _gnix_ref_put(ep->write_cntr); } if (ep->stx_ctx) _gnix_ref_put(ep->stx_ctx); domain = ep->domain; assert(domain != NULL); _gnix_ref_put(domain); cm_nic = ep->cm_nic; assert(cm_nic != NULL); nic = ep->nic; assert(nic != NULL); av = ep->av; if (av != NULL) _gnix_ref_put(av); /* There is no other choice here, we need to assert if we can't free */ ret = _gnix_nic_free(nic); assert(ret == FI_SUCCESS); ep->nic = NULL; /* This currently always returns FI_SUCCESS */ ret = _gnix_cm_nic_free(cm_nic); assert(ret == FI_SUCCESS); /* * Free fab_reqs */ __fr_freelist_destroy(ep); free(ep); }