Test(gnix_hashtable_advanced, insert_1024_lookup_all) { int ret, i; gnix_test_element_t test_elements[1024]; gnix_test_element_t *item; gnix_test_element_t *found = NULL; srand(time(NULL)); for (i = 0; i < 1024; ++i) { item = &test_elements[i]; item->key = i; item->val = rand() % (1024 * 1024); item->magic = __GNIX_MAGIC_VALUE; } for (i = 0; i < 1024; ++i) { item = &test_elements[i]; ret = _gnix_ht_insert(test_ht, item->key, item); cr_assert(ret == 0); cr_assert(atomic_get(&test_ht->ht_elements) == (i + 1)); } cr_assert(atomic_get(&test_ht->ht_elements) == 1024); for (i = 0; i < 1024; ++i) { found = _gnix_ht_lookup(test_ht, test_elements[i].key); cr_assert(found != NULL); cr_assert(found == &test_elements[i]); cr_assert(found->magic == __GNIX_MAGIC_VALUE); } }
Test(utils, references) { int refs; struct gnix_reference_tester test; /* initialize test structure */ _gnix_ref_init(&test.ref_cnt, 1, test_destruct); test.destructed = 0; /* check for validity */ cr_assert(atomic_get(&test.ref_cnt.references) == 1); cr_assert(test.destructed == 0); /* increment refs and check */ refs = _gnix_ref_get(&test); cr_assert(refs == 2); cr_assert(atomic_get(&test.ref_cnt.references) == 2); cr_assert(test.destructed == 0); /* decrement refs and check */ refs = _gnix_ref_put(&test); cr_assert(refs == 1); cr_assert(atomic_get(&test.ref_cnt.references) == 1); cr_assert(test.destructed == 0); /* decrement and destruct, check for validity */ refs = _gnix_ref_put(&test); cr_assert(refs == 0); cr_assert(atomic_get(&test.ref_cnt.references) == 0); cr_assert(test.destructed == 1); }
void su_thread_disposable(su_state *s) { if (atomic_get(&s->thread_indisposable)) { while ((atomic_get(&s->msi->interrupt) & ISCOLLECT) == ISCOLLECT) thread_sleep(0); atomic_set(&s->thread_indisposable, 0); } }
static __inline__ void nmp_scheduler_find_best_loop(nmp_scheduler_t *scheduler) { nmp_watch_loop_t *loop; int index, best_watches, watches; if (scheduler->loop_count == 1) { scheduler->next_loop = 0; return; } scheduler->next_loop = 0; loop = &scheduler->loops[0]; best_watches = atomic_get(&loop->watches_count); for (index = 1; index < scheduler->loop_count; ++index) { loop = &scheduler->loops[index]; watches = atomic_get(&loop->watches_count); if (watches < best_watches) { best_watches = watches; scheduler->next_loop = index; } } }
void su_close(su_state *s) { int i; s->stack_top = 0; su_thread_indisposable(s); while (atomic_get(&s->msi->thread_count) > 1) thread_sleep(0); for (i = 0; i < SU_OPT_MAX_THREADS; i++) { su_state *thread = &s->msi->threads[i]; memset(thread->string_cache, 0, sizeof(thread->string_cache)); if (thread->string_builder) thread->alloc(s->string_builder, 0); if (thread->fstdin != stdin) fclose(thread->fstdin); if (thread->fstdout != stdout) fclose(thread->fstdout); if (thread->fstderr != stderr) fclose(thread->fstderr); } while (atomic_get(&s->msi->num_objects) > 1) su_gc(s); gc_free_object(s, s->msi->gc_root); if (s->msi->c_lambdas) s->alloc(s->msi->c_lambdas, 0); s->alloc(s->msi, 0); }
int _lru_free_mem(cache_t *c, segment_t *pseg, ex_off_t bytes_to_free) { cache_lru_t *cp = (cache_lru_t *)c->fn.priv; cache_segment_t *s; cache_page_t *p; page_lru_t *lp; Stack_ele_t *ele; apr_thread_mutex_t *plock; ex_off_t total_bytes, pending_bytes; int gotlock, count, bits, err; total_bytes = 0; err = 0; log_printf(15, "START seg=" XIDT " bytes_to_free=" XOT " bytes_used=" XOT " stack_size=%d\n", segment_id(pseg), bytes_to_free, cp->bytes_used, stack_size(cp->stack)); move_to_bottom(cp->stack); ele = get_ptr(cp->stack); while ((total_bytes < bytes_to_free) && (ele != NULL) && (err == 0)) { p = (cache_page_t *)get_stack_ele_data(ele); lp = (page_lru_t *)p->priv; plock = p->seg->lock; gotlock = apr_thread_mutex_trylock(plock); if ((gotlock == APR_SUCCESS) || (p->seg == pseg)) { bits = atomic_get(p->bit_fields); if ((bits & C_TORELEASE) == 0) { //** Skip it if already flagged for removal count = atomic_get(p->access_pending[CACHE_READ]) + atomic_get(p->access_pending[CACHE_WRITE]) + atomic_get(p->access_pending[CACHE_FLUSH]); if (count == 0) { //** No one is using it s = (cache_segment_t *)p->seg->priv; if ((bits & C_ISDIRTY) == 0) { //** Don't have to flush it total_bytes += s->page_size; log_printf(15, "lru_free_mem: freeing page seg=" XIDT " p->offset=" XOT " bits=%d\n", segment_id(p->seg), p->offset, bits); list_remove(s->pages, &(p->offset), p); //** Have to do this here cause p->offset is the key var delete_current(cp->stack, 1, 0); if (p->data[0].ptr) free(p->data[0].ptr); if (p->data[1].ptr) free(p->data[1].ptr); free(lp); } else { //** Got to flush the page first err = 1; } } else { err = 1; } } if (gotlock == APR_SUCCESS) apr_thread_mutex_unlock(plock); } else { err = 1; } if ((total_bytes < bytes_to_free) && (err == 0)) ele = get_ptr(cp->stack); } cp->bytes_used -= total_bytes; pending_bytes = bytes_to_free - total_bytes; log_printf(15, "END seg=" XIDT " bytes_to_free=" XOT " pending_bytes=" XOT " bytes_used=" XOT "\n", segment_id(pseg), bytes_to_free, pending_bytes, cp->bytes_used); return(pending_bytes); }
static int sock_cntr_wait(struct fid_cntr *cntr, uint64_t threshold, int timeout) { int ret = 0; uint64_t start_ms = 0, end_ms = 0; struct sock_cntr *_cntr; _cntr = container_of(cntr, struct sock_cntr, cntr_fid); pthread_mutex_lock(&_cntr->mut); if (_cntr->err_flag) { ret = -FI_EAVAIL; goto out; } if (atomic_get(&_cntr->value) >= threshold) { ret = 0; goto out; } if (_cntr->is_waiting) { ret = -FI_EBUSY; goto out; } _cntr->is_waiting = 1; atomic_set(&_cntr->threshold, threshold); if (_cntr->domain->progress_mode == FI_PROGRESS_MANUAL) { pthread_mutex_unlock(&_cntr->mut); if (timeout >= 0) { start_ms = fi_gettime_ms(); end_ms = start_ms + timeout; } while (atomic_get(&_cntr->value) < threshold) { sock_cntr_progress(_cntr); if (timeout >= 0 && fi_gettime_ms() >= end_ms) { ret = FI_ETIMEDOUT; break; } } pthread_mutex_lock(&_cntr->mut); } else { ret = fi_wait_cond(&_cntr->cond, &_cntr->mut, timeout); } _cntr->is_waiting = 0; atomic_set(&_cntr->threshold, ~0); pthread_mutex_unlock(&_cntr->mut); sock_cntr_check_trigger_list(_cntr); return (_cntr->err_flag) ? -FI_EAVAIL : -ret; out: pthread_mutex_unlock(&_cntr->mut); return ret; }
int sock_cntr_inc(struct sock_cntr *cntr) { pthread_mutex_lock(&cntr->mut); atomic_inc(&cntr->value); if (atomic_get(&cntr->value) >= atomic_get(&cntr->threshold)) pthread_cond_signal(&cntr->cond); pthread_mutex_unlock(&cntr->mut); return 0; }
Test(gnix_hashtable_advanced, insert_8K_lookup_128K_random) { int ret, i, index; gnix_test_element_t *test_elements; gnix_test_element_t *found = NULL, *to_find = NULL; gnix_test_element_t *item; gnix_bitmap_t allocated = {0}; int test_size = 8 * 1024; int bitmap_size = 64 * test_size; int lookups = 128 * 1024; test_elements = calloc(test_size, sizeof(gnix_test_element_t)); cr_assert(test_elements != NULL); ret = _gnix_alloc_bitmap(&allocated, bitmap_size); cr_assert(ret == 0); srand(time(NULL)); for (i = 0; i < test_size; ++i) { do { index = rand() % bitmap_size; } while (_gnix_test_and_set_bit(&allocated, index)); item = &test_elements[i]; item->key = index; item->val = rand() % lookups; item->magic = __GNIX_MAGIC_VALUE; } for (i = 0; i < test_size; ++i) { item = &test_elements[i]; ret = _gnix_ht_insert(test_ht, item->key, item); cr_assert(ret == 0); cr_assert(atomic_get(&test_ht->ht_elements) == (i + 1)); } cr_assert(atomic_get(&test_ht->ht_elements) == test_size); for (i = 0; i < lookups; ++i) { to_find = &test_elements[rand() % test_size]; found = _gnix_ht_lookup(test_ht, to_find->key); cr_assert(found != NULL); cr_assert(found == to_find); cr_assert(found->magic == __GNIX_MAGIC_VALUE); } ret = _gnix_free_bitmap(&allocated); cr_expect(ret == 0); free(test_elements); }
static int sock_poll_poll(struct fid_poll *pollset, void **context, int count) { struct sock_poll *poll; struct sock_cq *cq; struct sock_eq *eq; struct sock_cntr *cntr; struct sock_fid_list *list_item; struct dlist_entry *p, *head; int ret_count = 0; poll = container_of(pollset, struct sock_poll, poll_fid.fid); head = &poll->fid_list; for (p = head->next; p != head && ret_count < count; p = p->next) { list_item = container_of(p, struct sock_fid_list, entry); switch (list_item->fid->fclass) { case FI_CLASS_CQ: cq = container_of(list_item->fid, struct sock_cq, cq_fid); sock_cq_progress(cq); fastlock_acquire(&cq->lock); if (rbfdused(&cq->cq_rbfd)) { *context++ = cq->cq_fid.fid.context; ret_count++; } fastlock_release(&cq->lock); break; case FI_CLASS_CNTR: cntr = container_of(list_item->fid, struct sock_cntr, cntr_fid); sock_cntr_progress(cntr); fastlock_acquire(&cntr->mut); if (atomic_get(&cntr->value) >= atomic_get(&cntr->threshold)) { *context++ = cntr->cntr_fid.fid.context; ret_count++; } fastlock_release(&cntr->mut); break; case FI_CLASS_EQ: eq = container_of(list_item->fid, struct sock_eq, eq); fastlock_acquire(&eq->lock); if (!dlistfd_empty(&eq->list)) { *context++ = eq->eq.fid.context; ret_count++; } fastlock_release(&eq->lock); break; default: break; } } return ret_count; }
void lru_pages_destroy(cache_t *c, cache_page_t **page, int n_pages, int remove_from_segment) { cache_lru_t *cp = (cache_lru_t *)c->fn.priv; cache_segment_t *s; page_lru_t *lp; cache_page_t *p; // cache_cond_t *cache_cond; int i; int cr, cw, cf, count; cache_lock(c); log_printf(15, " START cp->bytes_used=" XOT "\n", cp->bytes_used); for (i=0; i<n_pages; i++) { p = page[i]; s = (cache_segment_t *)p->seg->priv; cr = atomic_get(p->access_pending[CACHE_READ]); cw = atomic_get(p->access_pending[CACHE_WRITE]); cf = atomic_get(p->access_pending[CACHE_FLUSH]); count = cr +cw + cf; // cache_cond = (cache_cond_t *)pigeon_coop_hole_data(&(p->cond_pch)); // if (cache_cond == NULL) { //** No one listening so free normally if (count == 0) { //** No one is listening log_printf(15, "lru_pages_destroy i=%d p->offset=" XOT " seg=" XIDT " remove_from_segment=%d limbo=%d\n", i, p->offset, segment_id(p->seg), remove_from_segment, cp->limbo_pages); cp->bytes_used -= s->page_size; lp = (page_lru_t *)p->priv; if (lp->ele != NULL) { move_to_ptr(cp->stack, lp->ele); delete_current(cp->stack, 0, 0); } if (remove_from_segment == 1) { s = (cache_segment_t *)p->seg->priv; list_remove(s->pages, &(p->offset), p); //** Have to do this here cause p->offset is the key var } if (p->data[0].ptr) free(p->data[0].ptr); if (p->data[1].ptr) free(p->data[1].ptr); free(lp); } else { //** Someone is listening so trigger them and also clear the bits so it will be released atomic_set(p->bit_fields, C_TORELEASE); log_printf(15, "lru_pages_destroy i=%d p->offset=" XOT " seg=" XIDT " remove_from_segment=%d cr=%d cw=%d cf=%d limbo=%d\n", i, p->offset, segment_id(p->seg), remove_from_segment, cr, cw, cf, cp->limbo_pages); } } log_printf(15, " AFTER LOOP cp->bytes_used=" XOT "\n", cp->bytes_used); log_printf(15, " END cp->bytes_used=" XOT "\n", cp->bytes_used); cache_unlock(c); }
static int sock_cntr_set(struct fid_cntr *cntr, uint64_t value) { struct sock_cntr *_cntr; _cntr = container_of(cntr, struct sock_cntr, cntr_fid); pthread_mutex_lock(&_cntr->mut); atomic_set(&_cntr->value, value); if (atomic_get(&_cntr->value) >= atomic_get(&_cntr->threshold)) pthread_cond_signal(&_cntr->cond); pthread_mutex_unlock(&_cntr->mut); return 0; }
static uint64 get_spinlock_counter(spinlock* lock) { uint32 high; uint32 low; do { high = (uint32)atomic_get(&lock->count_high); low = (uint32)atomic_get(&lock->count_low); } while (high != atomic_get(&lock->count_high)); return ((uint64)high << 32) | low; }
Test(gnix_hashtable_advanced, insert_duplicate) { int ret; ret = _gnix_ht_insert(test_ht, simple_element->key, simple_element); cr_assert(ret == 0); cr_assert(atomic_get(&test_ht->ht_elements) == 1); ret = _gnix_ht_insert(test_ht, simple_element->key, simple_element); cr_assert(ret == -FI_ENOSPC); cr_assert(atomic_get(&test_ht->ht_elements) == 1); }
Test(gnix_hashtable_locked, iterate) { int ret, i; gnix_test_element_t test_elements[1024]; gnix_test_element_t *item; char test_elements_found[1024] = {0}; srand(time(NULL)); for (i = 0; i < 1024; ++i) { item = &test_elements[i]; item->key = i; item->val = rand() % (1024 * 1024); item->magic = __GNIX_MAGIC_VALUE; } for (i = 0; i < 1024; ++i) { item = &test_elements[i]; ret = _gnix_ht_insert(test_ht, item->key, item); cr_assert(ret == 0); cr_assert(atomic_get(&test_ht->ht_elements) == (i + 1)); } { GNIX_HASHTABLE_ITERATOR(test_ht, iter); for (i = 0; i < 1024; ++i) { item = (gnix_test_element_t *) _gnix_ht_iterator_next(&iter); cr_assert(item); cr_assert(!test_elements_found[item->key]); test_elements_found[item->key] = 1; } } for (i = 1023; i >= 0; --i) { item = &test_elements[i]; cr_assert(i == item->key); ret = _gnix_ht_remove(test_ht, item->key); cr_assert(ret == 0); cr_assert(atomic_get(&test_ht->ht_elements) == i); } cr_assert(atomic_get(&test_ht->ht_elements) == 0); }
/** Halt wrapper * * Set halt flag and halt the CPU. * */ void halt() { #if (defined(CONFIG_DEBUG)) && (defined(CONFIG_KCONSOLE)) bool rundebugger = false; if (!atomic_get(&haltstate)) { atomic_set(&haltstate, 1); rundebugger = true; } #else atomic_set(&haltstate, 1); #endif interrupts_disable(); #if (defined(CONFIG_DEBUG)) && (defined(CONFIG_KCONSOLE)) if ((rundebugger) && (kconsole_check_poll())) kconsole("panic", "\nLast resort kernel console ready.\n", false); #endif if (CPU) printf("cpu%u: halted\n", CPU->id); else printf("cpu: halted\n"); cpu_halt(); }
void __gnix_hashtable_test_destroyed_clean(void) { cr_assert(test_ht->ht_state == GNIX_HT_STATE_DEAD); cr_assert(atomic_get(&test_ht->ht_elements) == 0); cr_assert(test_ht->ht_size == 0); cr_assert(test_ht->ht_lf_tbl == NULL); }
void __gnix_hashtable_test_initialized(void) { cr_assert(test_ht->ht_state == GNIX_HT_STATE_READY); cr_assert(atomic_get(&test_ht->ht_elements) == 0); cr_assert(test_ht->ht_size == test_ht->ht_attr.ht_initial_size); cr_assert(test_ht->ht_lf_tbl != NULL); }
static int sock_av_close(struct fid *fid) { struct sock_av *av; int i; av = container_of(fid, struct sock_av, av_fid.fid); if (atomic_get(&av->ref)) return -FI_EBUSY; for (i=0; i<av->table_hdr->stored; i++) { if(idm_lookup(&av->addr_idm, i)) idm_clear(&av->addr_idm , i); } if (!av->name) free(av->table_hdr); else { shm_unlink(av->name); free(av->name); munmap(av->table_hdr, sizeof(struct sock_av_table_hdr) + av->attr.count * sizeof(struct sock_av_addr)); close(av->shared_fd); } atomic_dec(&av->domain->ref); free(av->key); free(av); return 0; }
void psmx2_cntr_check_trigger(struct psmx2_fid_cntr *cntr) { struct psmx2_fid_domain *domain = cntr->domain; struct psmx2_trigger *trigger; if (!cntr->trigger) return; pthread_mutex_lock(&cntr->trigger_lock); trigger = cntr->trigger; while (trigger) { if (atomic_get(&cntr->counter) < trigger->threshold) break; cntr->trigger = trigger->next; if (domain->am_initialized) { fastlock_acquire(&domain->trigger_queue.lock); slist_insert_tail(&trigger->list_entry, &domain->trigger_queue.list); fastlock_release(&domain->trigger_queue.lock); } else { psmx2_process_trigger(domain, trigger); } trigger = cntr->trigger; } pthread_mutex_unlock(&cntr->trigger_lock); }
static int sock_dom_close(struct fid *fid) { struct sock_domain *dom; void *res; int c; dom = container_of(fid, struct sock_domain, dom_fid.fid); if (atomic_get(&dom->ref)) { return -FI_EBUSY; } dom->listening = 0; write(dom->signal_fds[0], &c, 1); if (pthread_join(dom->listen_thread, &res)) { SOCK_LOG_ERROR("could not join listener thread, errno = %d\n", errno); return -FI_EBUSY; } if (dom->r_cmap.size) sock_conn_map_destroy(&dom->r_cmap); fastlock_destroy(&dom->r_cmap.lock); sock_pe_finalize(dom->pe); fastlock_destroy(&dom->lock); free(dom); return 0; }
static void *continuous_remove(void *data) { int pos, n, ret; int expected_state; fi_addr_t *addresses = ((fi_addr_t **) data)[0]; atomic_t *fe = ((atomic_t **) data)[1]; int num_addrs = (int) ((uint64_t *) data)[2]; atomic_t *done = ((atomic_t **) data)[3]; ret = pthread_barrier_wait(&mtbar); if ((ret != PTHREAD_BARRIER_SERIAL_THREAD) && (ret != 0)) { pthread_exit((void *) 1UL); } pos = 0; while (!atomic_get(done)) { n = (pos++)%num_addrs; expected_state = state_full; if (atomic_cas_weak(&fe[n], &expected_state, state_locked)) { av_lock(); ret = fi_av_remove(av, &addresses[n], 1, 0); av_unlock(); if (ret != FI_SUCCESS) { /* flag shutdown to avoid deadlock */ atomic_set(done, 1); pthread_exit((void *) 1UL); } atomic_set(&fe[n], state_empty); } } pthread_exit((void *) NULL); }
static int usdf_fabric_close(fid_t fid) { struct usdf_fabric *fp; int ret; void *rv; fp = fab_fidtou(fid); if (atomic_get(&fp->fab_refcnt) > 0) { return -FI_EBUSY; } /* Tell progression thread to exit */ fp->fab_exit = 1; ret = usdf_fabric_wake_thread(fp); if (ret != 0) { return ret; } pthread_join(fp->fab_thread, &rv); usdf_timer_deinit(fp); close(fp->fab_eventfd); close(fp->fab_epollfd); close(fp->fab_arp_sockfd); free(fp); return 0; }
ssize_t usdf_eq_write_internal(struct usdf_eq *eq, uint32_t event, const void *buf, size_t len, uint64_t flags) { uint64_t val; int ret; int n; pthread_spin_lock(&eq->eq_lock); /* EQ full? */ if (atomic_get(&eq->eq_num_events) == eq->eq_ev_ring_size) { ret = -FI_EAGAIN; goto done; } ret = usdf_eq_write_event(eq, event, buf, len, flags); /* If successful, post to eventfd */ if (ret >= 0 && eq->eq_wait_obj == FI_WAIT_FD) { val = 1; n = write(eq->eq_fd, &val, sizeof(val)); if (n != sizeof(val)) { ret = -FI_EIO; } } done: pthread_spin_unlock(&eq->eq_lock); return ret; }
static ssize_t usdf_eq_write_fd(struct fid_eq *feq, uint32_t event, const void *buf, size_t len, uint64_t flags) { struct usdf_eq *eq; uint64_t val; int ret; int n; eq = eq_ftou(feq); pthread_spin_lock(&eq->eq_lock); /* EQ full? */ if (atomic_get(&eq->eq_num_events) == eq->eq_ev_ring_size) { ret = -FI_EAGAIN; goto done; } ret = usdf_eq_write_event(eq, event, buf, len, flags); /* If successful, post to eventfd */ if (ret >= 0) { val = 1; n = write(eq->eq_fd, &val, sizeof(val)); if (n != sizeof(val)) { ret = -FI_EIO; } /* XXX unpost event? */ } done: pthread_spin_unlock(&eq->eq_lock); return ret; }
Test(domain, many_domains) { int i, ret; const int num_doms = 7919; struct fid_domain *doms[num_doms]; struct gnix_fid_domain *gdom; struct gnix_fid_fabric *gfab; memset(doms, 0, num_doms*sizeof(struct fid_domain *)); gfab = container_of(fabric, struct gnix_fid_fabric, fab_fid); for (i = 0; i < num_doms; i++) { ret = fi_domain(fabric, fi, &doms[i], NULL); cr_assert(ret == FI_SUCCESS, "fi_domain"); gdom = container_of(doms[i], struct gnix_fid_domain, domain_fid); cr_assert(gdom, "domain not allcoated"); cr_assert(gdom->fabric == gfab, "Incorrect fabric"); cr_assert(atomic_get(&gdom->ref_cnt.references) == 1, "Incorrect ref_cnt"); } for (i = num_doms-1; i >= 0; i--) { ret = fi_close(&doms[i]->fid); cr_assert(ret == FI_SUCCESS, "fi_close domain"); } }
static uint64_t sock_cntr_read(struct fid_cntr *cntr) { struct sock_cntr *_cntr; _cntr = container_of(cntr, struct sock_cntr, cntr_fid); sock_cntr_progress(_cntr); return atomic_get(&_cntr->value); }
Test(gnix_hashtable_advanced, insert_1_remove_1) { int ret; srand(time(NULL)); ret = _gnix_ht_insert(test_ht, simple_element->key, simple_element); cr_assert(ret == 0); cr_assert(atomic_get(&test_ht->ht_elements) == 1); ret = _gnix_ht_remove(test_ht, simple_element->key); cr_assert(ret == 0); cr_assert(atomic_get(&test_ht->ht_elements) == 0); }
static int usdf_eq_close(fid_t fid) { struct usdf_eq *eq; eq = eq_fidtou(fid); if (atomic_get(&eq->eq_refcnt) > 0) { return -FI_EBUSY; } atomic_dec(&eq->eq_fabric->fab_refcnt); /* release wait obj */ switch (eq->eq_wait_obj) { case FI_WAIT_FD: close(eq->eq_fd); break; default: break; } free(eq); return 0; }
static int usdf_domain_close(fid_t fid) { struct usdf_domain *udp; int ret; USDF_TRACE_SYS(DOMAIN, "\n"); udp = container_of(fid, struct usdf_domain, dom_fid.fid); if (atomic_get(&udp->dom_refcnt) > 0) { return -FI_EBUSY; } if (udp->dom_dev != NULL) { ret = usd_close(udp->dom_dev); if (ret != 0) { return ret; } } usdf_dom_rdc_free_data(udp); if (udp->dom_eq != NULL) { atomic_dec(&udp->dom_eq->eq_refcnt); } atomic_dec(&udp->dom_fabric->fab_refcnt); LIST_REMOVE(udp, dom_link); fi_freeinfo(udp->dom_info); free(udp); return 0; }