/* * dapl_rmr_free * * DAPL Requirements Version xxx, 6.6.4.2 * * Destroy an instance of the Remote Memory Region * * Input: * rmr_handle * * Output: * none * * Returns: * DAT_SUCCESS * DAT_INVALID_PARAMETER */ DAT_RETURN dapl_rmr_free(IN DAT_RMR_HANDLE rmr_handle) { DAPL_RMR *rmr; DAT_RETURN dat_status; dat_status = DAT_SUCCESS; if (DAPL_BAD_HANDLE(rmr_handle, DAPL_MAGIC_RMR)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_RMR); goto bail; } rmr = (DAPL_RMR *)rmr_handle; /* * If the user did not perform an unbind op, release * counts here. */ if (rmr->param.lmr_triplet.virtual_address != 0) { (void) dapl_os_atomic_dec(&rmr->lmr->lmr_ref_count); rmr->param.lmr_triplet.virtual_address = 0; } dat_status = dapls_ib_mw_free(rmr); if (dat_status != DAT_SUCCESS) { goto bail; } dapl_os_atomic_dec(&rmr->pz->pz_ref_count); dapl_rmr_dealloc(rmr); bail: return (dat_status); }
/* * dapls_osd_fork_cleanup * * Update val to value of passed in environment variable if present * * Input: * env_str * val Updated if environment variable exists * * Returns: * TRUE or FALSE */ void dapls_osd_fork_cleanup(void) { DAPL_PROVIDER_LIST_NODE *cur_node; DAPL_HCA *hca_ptr; DAPL_IA *ia_ptr; DAPL_LMR *lmr_ptr; DAPL_RMR *rmr_ptr; DAPL_PZ *pz_ptr; DAPL_CR *cr_ptr; DAPL_EP *ep_ptr; DAPL_EVD *evd_ptr; DAT_EP_PARAM *param; DAPL_SP *sp_ptr; while (NULL != g_dapl_provider_list.head) { cur_node = g_dapl_provider_list.head; g_dapl_provider_list.head = cur_node->next; hca_ptr = (DAPL_HCA *) cur_node->data.extension; /* * Walk the list of IA ptrs & clean up. This is purposely * a destructive list walk, we really don't want to preserve * any of it. */ while (!dapl_llist_is_empty(&hca_ptr->ia_list_head)) { ia_ptr = (DAPL_IA *) dapl_llist_peek_head(&hca_ptr->ia_list_head); /* * The rest of the cleanup code is similar to dapl_ia_close, * the big difference is that we don't release IB resources, * only memory; the underlying IB subsystem doesn't deal * with fork at all, so leave IB handles alone. */ while (!dapl_llist_is_empty(&ia_ptr->rmr_list_head)) { rmr_ptr = (DAPL_RMR *) dapl_llist_peek_head(&ia_ptr-> rmr_list_head); if (rmr_ptr->param.lmr_triplet. virtual_address != 0) { dapl_os_atomic_dec(&rmr_ptr->lmr-> lmr_ref_count); rmr_ptr->param.lmr_triplet. virtual_address = 0; } dapl_os_atomic_dec(&rmr_ptr->pz->pz_ref_count); dapl_ia_unlink_rmr(rmr_ptr->header.owner_ia, rmr_ptr); dapl_rmr_dealloc(rmr_ptr); } while (!dapl_llist_is_empty(&ia_ptr->rsp_list_head)) { sp_ptr = (DAPL_SP *) dapl_llist_peek_head(&ia_ptr-> rsp_list_head); dapl_os_atomic_dec(& ((DAPL_EVD *) sp_ptr-> evd_handle)->evd_ref_count); dapls_ia_unlink_sp(ia_ptr, sp_ptr); dapls_sp_free_sp(sp_ptr); } while (!dapl_llist_is_empty(&ia_ptr->ep_list_head)) { ep_ptr = (DAPL_EP *) dapl_llist_peek_head(&ia_ptr->ep_list_head); param = &ep_ptr->param; if (param->pz_handle != NULL) { dapl_os_atomic_dec(& ((DAPL_PZ *) param-> pz_handle)-> pz_ref_count); } if (param->recv_evd_handle != NULL) { dapl_os_atomic_dec(& ((DAPL_EVD *) param-> recv_evd_handle)-> evd_ref_count); } if (param->request_evd_handle) { dapl_os_atomic_dec(& ((DAPL_EVD *) param-> request_evd_handle)-> evd_ref_count); } if (param->connect_evd_handle != NULL) { dapl_os_atomic_dec(& ((DAPL_EVD *) param-> connect_evd_handle)-> evd_ref_count); } /* ...and free the resource */ dapl_ia_unlink_ep(ia_ptr, ep_ptr); dapl_ep_dealloc(ep_ptr); } while (!dapl_llist_is_empty(&ia_ptr->lmr_list_head)) { lmr_ptr = (DAPL_LMR *) dapl_llist_peek_head(&ia_ptr-> lmr_list_head); (void)dapls_hash_remove(lmr_ptr->header. owner_ia->hca_ptr-> lmr_hash_table, lmr_ptr->param. lmr_context, NULL); pz_ptr = (DAPL_PZ *) lmr_ptr->param.pz_handle; dapl_os_atomic_dec(&pz_ptr->pz_ref_count); dapl_ia_unlink_lmr(lmr_ptr->header.owner_ia, lmr_ptr); dapl_lmr_dealloc(lmr_ptr); } while (!dapl_llist_is_empty(&ia_ptr->psp_list_head)) { sp_ptr = (DAPL_SP *) dapl_llist_peek_head(&ia_ptr-> psp_list_head); while (!dapl_llist_is_empty (&sp_ptr->cr_list_head)) { cr_ptr = (DAPL_CR *) dapl_llist_peek_head(&sp_ptr-> cr_list_head); dapl_sp_remove_cr(sp_ptr, cr_ptr); dapls_cr_free(cr_ptr); } dapls_ia_unlink_sp(ia_ptr, sp_ptr); dapl_os_atomic_dec(& ((DAPL_EVD *) sp_ptr-> evd_handle)->evd_ref_count); dapls_sp_free_sp(sp_ptr); } while (!dapl_llist_is_empty(&ia_ptr->pz_list_head)) { pz_ptr = (DAPL_PZ *) dapl_llist_peek_head(&ia_ptr->pz_list_head); dapl_ia_unlink_pz(pz_ptr->header.owner_ia, pz_ptr); dapl_pz_dealloc(pz_ptr); } while (!dapl_llist_is_empty(&ia_ptr->evd_list_head)) { evd_ptr = (DAPL_EVD *) dapl_llist_peek_head(&ia_ptr-> evd_list_head); dapl_ia_unlink_evd(evd_ptr->header.owner_ia, evd_ptr); /* reset the cq_handle to avoid having it removed */ evd_ptr->ib_cq_handle = IB_INVALID_HANDLE; dapls_evd_dealloc(evd_ptr); } dapl_hca_unlink_ia(ia_ptr->hca_ptr, ia_ptr); /* asycn error evd was taken care of above, reset the pointer */ ia_ptr->async_error_evd = NULL; dapls_ia_free(ia_ptr); } /* end while ( ia_ptr != NULL ) */ dapl_os_free(cur_node, sizeof(DAPL_PROVIDER_LIST_NODE)); } /* end while (NULL != g_dapl_provider_list.head) */ }