static int fi_bgq_close_sep(fid_t fid) { int ret; struct fi_bgq_sep *bgq_sep = container_of(fid, struct fi_bgq_sep, ep_fid); ret = fi_bgq_fid_check(fid, FI_CLASS_SEP, "scalable endpoint"); if (ret) return ret; ret = fi_bgq_ref_dec(&bgq_sep->av->ref_cnt, "address vector"); if (ret) return ret; ret = fi_bgq_ref_finalize(&bgq_sep->ref_cnt, "scalable endpoint"); if (ret) return ret; ret = fi_bgq_ref_dec(&bgq_sep->domain->ref_cnt, "domain"); if (ret) return ret; free(bgq_sep->info->ep_attr); free(bgq_sep->info); void * memptr = bgq_sep->memptr; free(memptr); return 0; }
int fi_bgq_progress_fini (struct fi_bgq_domain *bgq_domain) { assert(0==bgq_domain->progress.num_threads_active); int i; for (i=0; i<bgq_domain->progress.max_threads; ++i) { assert(0 == bgq_domain->progress.thread[i].enabled); assert(0 == bgq_domain->progress.thread[i].active); //l2atomic_fifo_finalize(&bgq_domain->progress.thread[i].consumer, // &bgq_domain->progress.thread[i].producer); bgq_domain->progress.thread[i].bgq_domain = NULL; fi_bgq_ref_dec(&bgq_domain->ref_cnt, "domain"); } free(bgq_domain->progress.memptr); bgq_domain->progress.memptr = NULL; return 0; }