static int usdf_domain_close(fid_t fid) { struct usdf_domain *udp; int ret; USDF_TRACE_SYS(DOMAIN, "\n"); udp = container_of(fid, struct usdf_domain, dom_fid.fid); if (ofi_atomic_get32(&udp->dom_refcnt) > 0) { return -FI_EBUSY; } if (udp->dom_dev != NULL) { ret = usd_close(udp->dom_dev); if (ret != 0) { return ret; } } usdf_dom_rdc_free_data(udp); if (udp->dom_eq != NULL) { ofi_atomic_dec32(&udp->dom_eq->eq_refcnt); } ofi_atomic_dec32(&udp->dom_fabric->fab_refcnt); LIST_REMOVE(udp, dom_link); fi_freeinfo(udp->dom_info); free(udp); return 0; }
static int sock_poll_del(struct fid_poll *pollset, struct fid *event_fid, uint64_t flags) { struct sock_poll *poll; struct sock_fid_list *list_item; struct dlist_entry *p, *head; struct sock_cq *cq; struct sock_cntr *cntr; poll = container_of(pollset, struct sock_poll, poll_fid.fid); head = &poll->fid_list; for (p = head->next; p != head; p = p->next) { list_item = container_of(p, struct sock_fid_list, entry); if (list_item->fid == event_fid) { dlist_remove(p); switch (list_item->fid->fclass) { case FI_CLASS_CQ: cq = container_of(list_item->fid, struct sock_cq, cq_fid); ofi_atomic_dec32(&cq->ref); break; case FI_CLASS_CNTR: cntr = container_of(list_item->fid, struct sock_cntr, cntr_fid); ofi_atomic_dec32(&cntr->ref); break; default: SOCK_LOG_ERROR("Invalid fid class\n"); break; } free(list_item); break; } }
static int util_eq_close(struct fid *fid) { struct util_eq *eq; struct slist_entry *entry; struct util_event *event; eq = container_of(fid, struct util_eq, eq_fid.fid); if (ofi_atomic_get32(&eq->ref)) return -FI_EBUSY; while (!slist_empty(&eq->list)) { entry = slist_remove_head(&eq->list); event = container_of(entry, struct util_event, entry); free(event); } if (eq->wait) { fi_poll_del(&eq->wait->pollset->poll_fid, &eq->eq_fid.fid, 0); if (eq->internal_wait) fi_close(&eq->wait->wait_fid.fid); } fastlock_destroy(&eq->lock); ofi_atomic_dec32(&eq->fabric->ref); free(eq); return 0; }
void ofi_mr_cache_cleanup(struct ofi_mr_cache *cache) { struct ofi_mr_entry *entry; struct dlist_entry *tmp; FI_INFO(cache->domain->prov, FI_LOG_MR, "MR cache stats: " "searches %zu, deletes %zu, hits %zu\n", cache->search_cnt, cache->delete_cnt, cache->hit_cnt); util_mr_cache_process_events(cache); dlist_foreach_container_safe(&cache->lru_list, struct ofi_mr_entry, entry, lru_entry, tmp) { assert(entry->use_cnt == 0); util_mr_uncache_entry(cache, entry); dlist_remove_init(&entry->lru_entry); util_mr_free_entry(cache, entry); } cache->mr_storage.destroy(&cache->mr_storage); ofi_monitor_del_queue(&cache->nq); ofi_atomic_dec32(&cache->domain->ref); util_buf_pool_destroy(cache->entry_pool); assert(cache->cached_cnt == 0); assert(cache->cached_size == 0); }
void sock_cq_remove_rx_ctx(struct sock_cq *cq, struct sock_rx_ctx *rx_ctx) { fastlock_acquire(&cq->list_lock); dlist_remove(&rx_ctx->cq_entry); ofi_atomic_dec32(&cq->ref); fastlock_release(&cq->list_lock); }
int ofi_wait_fd_del(struct util_wait *wait, int fd) { int ret = 0; struct ofi_wait_fd_entry *fd_entry; struct dlist_entry *entry; struct util_wait_fd *wait_fd = container_of(wait, struct util_wait_fd, util_wait); fastlock_acquire(&wait_fd->lock); entry = dlist_find_first_match(&wait_fd->fd_list, ofi_wait_fd_match, &fd); if (!entry) { FI_INFO(wait->prov, FI_LOG_FABRIC, "Given fd (%d) not found in wait list - %p\n", fd, wait_fd); ret = -FI_EINVAL; goto out; } fd_entry = container_of(entry, struct ofi_wait_fd_entry, entry); if (ofi_atomic_dec32(&fd_entry->ref)) goto out; dlist_remove(&fd_entry->entry); fi_epoll_del(wait_fd->epoll_fd, fd_entry->fd); free(fd_entry); out: fastlock_release(&wait_fd->lock); return ret; }
static int mlx_ep_close(fid_t fid) { struct mlx_ep *fid_ep; fid_ep = container_of(fid, struct mlx_ep, ep.ep_fid.fid); ucp_worker_flush(fid_ep->worker); ucp_worker_destroy(fid_ep->worker); if (fid_ep->ep.tx_cq) { ofi_atomic_dec32(&(fid_ep->ep.tx_cq->ref)); } if (fid_ep->ep.rx_cq) { ofi_atomic_dec32(&(fid_ep->ep.rx_cq->ref)); } ofi_atomic_dec32(&(fid_ep->ep.domain->ref)); free(fid_ep); return FI_SUCCESS; }
int ofi_ep_bind_eq(struct util_ep *ep, struct util_eq *eq) { if (ep->eq) ofi_atomic_dec32(&ep->eq->ref); ep->eq = eq; ofi_atomic_inc32(&eq->ref); return 0; }
static int psmx2_sep_ctxt_close(fid_t fid) { struct psmx2_fid_ep *ep; ep = container_of(fid, struct psmx2_fid_ep, ep.fid); if (ep->base_ep) ofi_atomic_dec32(&ep->base_ep->ref); return 0; }
int ofi_domain_close(struct util_domain *domain) { if (ofi_atomic_get32(&domain->ref)) return -FI_EBUSY; fastlock_acquire(&domain->fabric->lock); dlist_remove(&domain->list_entry); fastlock_release(&domain->fabric->lock); free(domain->name); fastlock_destroy(&domain->lock); ofi_atomic_dec32(&domain->fabric->ref); return 0; }
int fi_wait_cleanup(struct util_wait *wait) { int ret; if (ofi_atomic_get32(&wait->ref)) return -FI_EBUSY; ret = fi_close(&wait->pollset->poll_fid.fid); if (ret) return ret; ofi_atomic_dec32(&wait->fabric->ref); return 0; }
static int psmx2_ep_close(fid_t fid) { struct psmx2_fid_ep *ep; struct psmx2_ep_name ep_name; struct psmx2_trx_ctxt *trx_ctxt; ep = container_of(fid, struct psmx2_fid_ep, ep.fid); if (ep->base_ep) { ofi_atomic_dec32(&ep->base_ep->ref); return 0; } if (ofi_atomic_get32(&ep->ref)) return -FI_EBUSY; if (ep->stx) ofi_atomic_dec32(&ep->stx->ref); if (ep->rx) { ep_name.epid = ep->rx->psm2_epid; ofi_ns_del_local_name(&ep->domain->fabric->name_server, &ep->service, &ep_name); } if (ep->rx) { psmx2_lock(&ep->domain->trx_ctxt_lock, 1); dlist_remove(&ep->rx->entry); psmx2_unlock(&ep->domain->trx_ctxt_lock, 1); } trx_ctxt = ep->rx; psmx2_ep_close_internal(ep); psmx2_trx_ctxt_free(trx_ctxt); return 0; }
int mlx_ep_open( struct fid_domain *domain, struct fi_info *info, struct fid_ep **fid, void *context) { struct mlx_ep *ep; struct mlx_domain *u_domain; int ofi_status = FI_SUCCESS; ucs_status_t status = UCS_OK; ucp_worker_params_t worker_params; worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE; worker_params.thread_mode = UCS_THREAD_MODE_MULTI; u_domain = container_of( domain, struct mlx_domain, u_domain.domain_fid); ep = (struct mlx_ep *) calloc(1, sizeof (struct mlx_ep)); if (!ep) { return -ENOMEM; } ofi_status = ofi_endpoint_init(domain, &mlx_util_prov, info, &ep->ep, context, mlx_ep_progress); if (ofi_status) { goto free_ep; } status = ucp_worker_create( u_domain->context, &worker_params, &(ep->worker)); if (status != UCS_OK) { ofi_status = MLX_TRANSLATE_ERRCODE(status); ofi_atomic_dec32(&(u_domain->u_domain.ref)); goto free_ep; } ep->ep.ep_fid.fid.ops = &mlx_fi_ops; ep->ep.ep_fid.ops = &mlx_ep_ops; ep->ep.ep_fid.cm = &mlx_cm_ops; ep->ep.ep_fid.tagged = &mlx_tagged_ops; ep->ep.flags = info->mode; ep->ep.caps = u_domain->u_domain.info_domain_caps; *fid = &(ep->ep.ep_fid); return FI_SUCCESS; free_ep: free(ep); return ofi_status; }
static int sock_cq_close(struct fid *fid) { struct sock_cq *cq; cq = container_of(fid, struct sock_cq, cq_fid.fid); if (ofi_atomic_get32(&cq->ref)) return -FI_EBUSY; if (cq->signal && cq->attr.wait_obj == FI_WAIT_MUTEX_COND) sock_wait_close(&cq->waitset->fid); ofi_rbfree(&cq->addr_rb); ofi_rbfree(&cq->cqerr_rb); ofi_rbfdfree(&cq->cq_rbfd); fastlock_destroy(&cq->lock); fastlock_destroy(&cq->list_lock); ofi_atomic_dec32(&cq->domain->ref); free(cq); return 0; }
static int psmx_ep_close(fid_t fid) { struct psmx_fid_ep *ep; ep = container_of(fid, struct psmx_fid_ep, ep.fid); if (ep->base_ep) { ofi_atomic_dec32(&ep->base_ep->ref); return 0; } if (ofi_atomic_get32(&ep->ref)) return -FI_EBUSY; ofi_ns_del_local_name(&ep->domain->fabric->name_server, &ep->service, &ep->domain->psm_epid); psmx_domain_disable_ep(ep->domain, ep); psmx_domain_release(ep->domain); free(ep); return 0; }
int ofi_mr_cache_init(struct util_domain *domain, struct ofi_mem_monitor *monitor, struct ofi_mr_cache *cache) { int ret; assert(cache->add_region && cache->delete_region); ret = ofi_mr_cache_init_storage(cache); if (ret) return ret; cache->domain = domain; ofi_atomic_inc32(&domain->ref); dlist_init(&cache->lru_list); cache->cached_cnt = 0; cache->cached_size = 0; if (!cache->max_cached_size) cache->max_cached_size = SIZE_MAX; cache->search_cnt = 0; cache->delete_cnt = 0; cache->hit_cnt = 0; ofi_monitor_add_queue(monitor, &cache->nq); ret = util_buf_pool_create(&cache->entry_pool, sizeof(struct ofi_mr_entry) + cache->entry_data_size, 16, 0, cache->max_cached_cnt); if (ret) goto err; return 0; err: ofi_atomic_dec32(&cache->domain->ref); ofi_monitor_del_queue(&cache->nq); cache->mr_storage.destroy(&cache->mr_storage); return ret; }
static int sock_ctx_close(struct fid *fid) { struct sock_tx_ctx *tx_ctx; struct sock_rx_ctx *rx_ctx; switch (fid->fclass) { case FI_CLASS_TX_CTX: tx_ctx = container_of(fid, struct sock_tx_ctx, fid.ctx.fid); sock_pe_remove_tx_ctx(tx_ctx); ofi_atomic_dec32(&tx_ctx->ep_attr->num_tx_ctx); ofi_atomic_dec32(&tx_ctx->domain->ref); sock_tx_ctx_close(tx_ctx); sock_tx_ctx_free(tx_ctx); break; case FI_CLASS_RX_CTX: rx_ctx = container_of(fid, struct sock_rx_ctx, ctx.fid); sock_pe_remove_rx_ctx(rx_ctx); ofi_atomic_dec32(&rx_ctx->ep_attr->num_rx_ctx); ofi_atomic_dec32(&rx_ctx->domain->ref); sock_rx_ctx_close(rx_ctx); sock_rx_ctx_free(rx_ctx); break; case FI_CLASS_STX_CTX: tx_ctx = container_of(fid, struct sock_tx_ctx, fid.stx.fid); ofi_atomic_dec32(&tx_ctx->domain->ref); sock_pe_remove_tx_ctx(tx_ctx); sock_tx_ctx_free(tx_ctx); break; case FI_CLASS_SRX_CTX: rx_ctx = container_of(fid, struct sock_rx_ctx, ctx.fid); ofi_atomic_dec32(&rx_ctx->domain->ref); sock_pe_remove_rx_ctx(rx_ctx); sock_rx_ctx_free(rx_ctx); break; default: SOCK_LOG_ERROR("Invalid fid\n"); return -FI_EINVAL; } return 0; }
static void __gnix_amo_fr_complete(struct gnix_fab_req *req) { int rc; if (req->flags & FI_LOCAL_MR) { GNIX_INFO(FI_LOG_EP_DATA, "freeing auto-reg MR: %p\n", req->amo.loc_md); rc = fi_close(&req->amo.loc_md->mr_fid.fid); if (rc != FI_SUCCESS) { GNIX_ERR(FI_LOG_DOMAIN, "failed to deregister auto-registered region, " "rc=%d\n", rc); } req->flags &= ~FI_LOCAL_MR; } ofi_atomic_dec32(&req->vc->outstanding_tx_reqs); /* Schedule VC TX queue in case the VC is 'fenced'. */ _gnix_vc_tx_schedule(req->vc); _gnix_fr_free(req->vc->ep, req); }
void ofi_monitor_del_queue(struct ofi_notification_queue *nq) { assert(dlist_empty(&nq->list) && (nq->refcnt == 0)); ofi_atomic_dec32(&nq->monitor->refcnt); fastlock_destroy(&nq->lock); }
static int sock_ep_close(struct fid *fid) { struct sock_ep *sock_ep; char c = 0; switch (fid->fclass) { case FI_CLASS_EP: sock_ep = container_of(fid, struct sock_ep, ep.fid); break; case FI_CLASS_SEP: sock_ep = container_of(fid, struct sock_ep, ep.fid); break; default: return -FI_EINVAL; } if (sock_ep->is_alias) { ofi_atomic_dec32(&sock_ep->attr->ref); return 0; } if (ofi_atomic_get32(&sock_ep->attr->ref) || ofi_atomic_get32(&sock_ep->attr->num_rx_ctx) || ofi_atomic_get32(&sock_ep->attr->num_tx_ctx)) return -FI_EBUSY; if (sock_ep->attr->ep_type == FI_EP_MSG) { sock_ep->attr->cm.do_listen = 0; if (ofi_write_socket(sock_ep->attr->cm.signal_fds[0], &c, 1) != 1) SOCK_LOG_DBG("Failed to signal\n"); if (sock_ep->attr->cm.listener_thread && pthread_join(sock_ep->attr->cm.listener_thread, NULL)) { SOCK_LOG_ERROR("pthread join failed (%d)\n", ofi_syserr()); } ofi_close_socket(sock_ep->attr->cm.signal_fds[0]); ofi_close_socket(sock_ep->attr->cm.signal_fds[1]); } else { if (sock_ep->attr->av) ofi_atomic_dec32(&sock_ep->attr->av->ref); } if (sock_ep->attr->av) { fastlock_acquire(&sock_ep->attr->av->list_lock); fid_list_remove(&sock_ep->attr->av->ep_list, &sock_ep->attr->lock, &sock_ep->ep.fid); fastlock_release(&sock_ep->attr->av->list_lock); } pthread_mutex_lock(&sock_ep->attr->domain->pe->list_lock); if (sock_ep->attr->tx_shared) { fastlock_acquire(&sock_ep->attr->tx_ctx->lock); dlist_remove(&sock_ep->attr->tx_ctx_entry); fastlock_release(&sock_ep->attr->tx_ctx->lock); } if (sock_ep->attr->rx_shared) { fastlock_acquire(&sock_ep->attr->rx_ctx->lock); dlist_remove(&sock_ep->attr->rx_ctx_entry); fastlock_release(&sock_ep->attr->rx_ctx->lock); } pthread_mutex_unlock(&sock_ep->attr->domain->pe->list_lock); if (sock_ep->attr->conn_handle.do_listen) { fastlock_acquire(&sock_ep->attr->domain->conn_listener.signal_lock); fi_epoll_del(sock_ep->attr->domain->conn_listener.emap, sock_ep->attr->conn_handle.sock); fastlock_release(&sock_ep->attr->domain->conn_listener.signal_lock); ofi_close_socket(sock_ep->attr->conn_handle.sock); sock_ep->attr->conn_handle.do_listen = 0; } fastlock_destroy(&sock_ep->attr->cm.lock); if (sock_ep->attr->eq) { fastlock_acquire(&sock_ep->attr->eq->lock); sock_ep_clear_eq_list(&sock_ep->attr->eq->list, &sock_ep->ep); /* Any err_data if present would be freed by * sock_eq_clean_err_data_list when EQ is closed */ sock_ep_clear_eq_list(&sock_ep->attr->eq->err_list, &sock_ep->ep); fastlock_release(&sock_ep->attr->eq->lock); } if (sock_ep->attr->fclass != FI_CLASS_SEP) { if (!sock_ep->attr->tx_shared) sock_pe_remove_tx_ctx(sock_ep->attr->tx_array[0]); sock_tx_ctx_close(sock_ep->attr->tx_array[0]); sock_tx_ctx_free(sock_ep->attr->tx_array[0]); } if (sock_ep->attr->fclass != FI_CLASS_SEP) { if (!sock_ep->attr->rx_shared) sock_pe_remove_rx_ctx(sock_ep->attr->rx_array[0]); sock_rx_ctx_close(sock_ep->attr->rx_array[0]); sock_rx_ctx_free(sock_ep->attr->rx_array[0]); } free(sock_ep->attr->tx_array); free(sock_ep->attr->rx_array); if (sock_ep->attr->src_addr) free(sock_ep->attr->src_addr); if (sock_ep->attr->dest_addr) free(sock_ep->attr->dest_addr); fastlock_acquire(&sock_ep->attr->domain->pe->lock); ofi_idm_reset(&sock_ep->attr->av_idm); sock_conn_map_destroy(sock_ep->attr); fastlock_release(&sock_ep->attr->domain->pe->lock); ofi_atomic_dec32(&sock_ep->attr->domain->ref); fastlock_destroy(&sock_ep->attr->lock); free(sock_ep->attr); free(sock_ep); return 0; }
int ofi_endpoint_close(struct util_ep *util_ep) { if (util_ep->tx_cq) { fid_list_remove(&util_ep->tx_cq->ep_list, &util_ep->tx_cq->ep_list_lock, &util_ep->ep_fid.fid); ofi_atomic_dec32(&util_ep->tx_cq->ref); } if (util_ep->rx_cq) { fid_list_remove(&util_ep->rx_cq->ep_list, &util_ep->rx_cq->ep_list_lock, &util_ep->ep_fid.fid); ofi_atomic_dec32(&util_ep->rx_cq->ref); } if (util_ep->rx_cntr) { fid_list_remove(&util_ep->rx_cntr->ep_list, &util_ep->rx_cntr->ep_list_lock, &util_ep->ep_fid.fid); ofi_atomic_dec32(&util_ep->rx_cntr->ref); } if (util_ep->tx_cntr) { fid_list_remove(&util_ep->tx_cntr->ep_list, &util_ep->tx_cntr->ep_list_lock, &util_ep->ep_fid.fid); ofi_atomic_dec32(&util_ep->tx_cntr->ref); } if (util_ep->rd_cntr) { fid_list_remove(&util_ep->rd_cntr->ep_list, &util_ep->rd_cntr->ep_list_lock, &util_ep->ep_fid.fid); ofi_atomic_dec32(&util_ep->rd_cntr->ref); } if (util_ep->wr_cntr) { fid_list_remove(&util_ep->wr_cntr->ep_list, &util_ep->wr_cntr->ep_list_lock, &util_ep->ep_fid.fid); ofi_atomic_dec32(&util_ep->wr_cntr->ref); } if (util_ep->rem_rd_cntr) { fid_list_remove(&util_ep->rem_rd_cntr->ep_list, &util_ep->rem_rd_cntr->ep_list_lock, &util_ep->ep_fid.fid); ofi_atomic_dec32(&util_ep->rem_rd_cntr->ref); } if (util_ep->rem_wr_cntr) { fid_list_remove(&util_ep->rem_wr_cntr->ep_list, &util_ep->rem_wr_cntr->ep_list_lock, &util_ep->ep_fid.fid); ofi_atomic_dec32(&util_ep->rem_wr_cntr->ref); } if (util_ep->av) { fastlock_acquire(&util_ep->av->ep_list_lock); dlist_remove(&util_ep->av_entry); fastlock_release(&util_ep->av->ep_list_lock); ofi_atomic_dec32(&util_ep->av->ref); } if (util_ep->eq) ofi_atomic_dec32(&util_ep->eq->ref); ofi_atomic_dec32(&util_ep->domain->ref); fastlock_destroy(&util_ep->lock); return 0; }