/* * dapli_hca_cleanup * * Clean up partially allocated HCA stuff. Strictly to make cleanup * simple. */ void dapli_hca_cleanup(DAPL_HCA * hca_ptr, DAT_BOOLEAN dec_ref) { dapls_ib_close_hca(hca_ptr); hca_ptr->ib_hca_handle = IB_INVALID_HANDLE; if (dec_ref == DAT_TRUE) { dapl_os_atomic_dec(&hca_ptr->handle_ref_count); } }
/* * dapl_rmr_free * * DAPL Requirements Version xxx, 6.6.4.2 * * Destroy an instance of the Remote Memory Region * * Input: * rmr_handle * * Output: * none * * Returns: * DAT_SUCCESS * DAT_INVALID_PARAMETER */ DAT_RETURN dapl_rmr_free(IN DAT_RMR_HANDLE rmr_handle) { DAPL_RMR *rmr; DAT_RETURN dat_status; dat_status = DAT_SUCCESS; if (DAPL_BAD_HANDLE(rmr_handle, DAPL_MAGIC_RMR)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_RMR); goto bail; } rmr = (DAPL_RMR *)rmr_handle; /* * If the user did not perform an unbind op, release * counts here. */ if (rmr->param.lmr_triplet.virtual_address != 0) { (void) dapl_os_atomic_dec(&rmr->lmr->lmr_ref_count); rmr->param.lmr_triplet.virtual_address = 0; } dat_status = dapls_ib_mw_free(rmr); if (dat_status != DAT_SUCCESS) { goto bail; } dapl_os_atomic_dec(&rmr->pz->pz_ref_count); dapl_rmr_dealloc(rmr); bail: return (dat_status); }
/* * dapli_get_sp_ep * * Passive side of a connection is now fully established. Clean * up resources and obtain the EP pointer associated with a CR in * the SP * * Input: * ib_cm_handle, * sp_ptr * connection_event * * Output: * none * * Returns * ep_ptr * */ DAPL_EP *dapli_get_sp_ep(IN dp_ib_cm_handle_t ib_cm_handle, IN DAPL_SP * sp_ptr, IN DAT_EVENT_NUMBER dat_event_num) { DAPL_CR *cr_ptr; DAPL_EP *ep_ptr; /* * acquire the lock, we may be racing with other threads here */ dapl_os_lock(&sp_ptr->header.lock); /* Verify under lock that the SP is still valid */ if (sp_ptr->header.magic == DAPL_MAGIC_INVALID) { dapl_os_unlock(&sp_ptr->header.lock); return NULL; } /* * There are potentially multiple connections in progress. Need to * go through the list and find the one we are interested * in. There is no guarantee of order. dapl_sp_search_cr * leaves the CR on the SP queue. */ cr_ptr = dapl_sp_search_cr(sp_ptr, ib_cm_handle); if (cr_ptr == NULL) { dapl_os_unlock(&sp_ptr->header.lock); return NULL; } ep_ptr = (DAPL_EP *) cr_ptr->param.local_ep_handle; /* Quick check to ensure our EP is still valid */ if ((DAPL_BAD_HANDLE(ep_ptr, DAPL_MAGIC_EP))) { ep_ptr = NULL; } /* The CR record is discarded in all except for the CONNECTED case, * as it will have no further relevance. */ if (dat_event_num != DAT_CONNECTION_EVENT_ESTABLISHED) { /* Remove the CR from the queue */ dapl_sp_remove_cr(sp_ptr, cr_ptr); if (ep_ptr != NULL) { ep_ptr->cr_ptr = NULL; } /* * If this SP has been removed from service, free it * up after the last CR is removed */ if (sp_ptr->listening != DAT_TRUE && sp_ptr->cr_list_count == 0 && sp_ptr->state != DAPL_SP_STATE_FREE && sp_ptr->state != DAPL_SP_STATE_RSP_LISTENING) { dapl_dbg_log(DAPL_DBG_TYPE_CM, "--> dapli_get_sp_ep! disconnect dump sp: %p \n", sp_ptr); /* Decrement the ref count on the EVD */ if (sp_ptr->evd_handle) { dapl_os_atomic_dec(& ((DAPL_EVD *) sp_ptr-> evd_handle)->evd_ref_count); sp_ptr->evd_handle = NULL; } sp_ptr->state = DAPL_SP_STATE_FREE; dapl_os_unlock(&sp_ptr->header.lock); (void)dapls_ib_remove_conn_listener(sp_ptr->header. owner_ia, sp_ptr); dapls_ia_unlink_sp((DAPL_IA *) sp_ptr->header.owner_ia, sp_ptr); dapls_sp_free_sp(sp_ptr); dapls_cr_free(cr_ptr); goto skip_unlock; } dapl_os_unlock(&sp_ptr->header.lock); /* free memory outside of the lock */ dapls_cr_free(cr_ptr); } else { dapl_os_unlock(&sp_ptr->header.lock); } skip_unlock: return ep_ptr; }
/* * dapls_osd_fork_cleanup * * Update val to value of passed in environment variable if present * * Input: * env_str * val Updated if environment variable exists * * Returns: * TRUE or FALSE */ void dapls_osd_fork_cleanup(void) { DAPL_PROVIDER_LIST_NODE *cur_node; DAPL_HCA *hca_ptr; DAPL_IA *ia_ptr; DAPL_LMR *lmr_ptr; DAPL_RMR *rmr_ptr; DAPL_PZ *pz_ptr; DAPL_CR *cr_ptr; DAPL_EP *ep_ptr; DAPL_EVD *evd_ptr; DAT_EP_PARAM *param; DAPL_SP *sp_ptr; while (NULL != g_dapl_provider_list.head) { cur_node = g_dapl_provider_list.head; g_dapl_provider_list.head = cur_node->next; hca_ptr = (DAPL_HCA *) cur_node->data.extension; /* * Walk the list of IA ptrs & clean up. This is purposely * a destructive list walk, we really don't want to preserve * any of it. */ while (!dapl_llist_is_empty(&hca_ptr->ia_list_head)) { ia_ptr = (DAPL_IA *) dapl_llist_peek_head(&hca_ptr->ia_list_head); /* * The rest of the cleanup code is similar to dapl_ia_close, * the big difference is that we don't release IB resources, * only memory; the underlying IB subsystem doesn't deal * with fork at all, so leave IB handles alone. */ while (!dapl_llist_is_empty(&ia_ptr->rmr_list_head)) { rmr_ptr = (DAPL_RMR *) dapl_llist_peek_head(&ia_ptr-> rmr_list_head); if (rmr_ptr->param.lmr_triplet. virtual_address != 0) { dapl_os_atomic_dec(&rmr_ptr->lmr-> lmr_ref_count); rmr_ptr->param.lmr_triplet. virtual_address = 0; } dapl_os_atomic_dec(&rmr_ptr->pz->pz_ref_count); dapl_ia_unlink_rmr(rmr_ptr->header.owner_ia, rmr_ptr); dapl_rmr_dealloc(rmr_ptr); } while (!dapl_llist_is_empty(&ia_ptr->rsp_list_head)) { sp_ptr = (DAPL_SP *) dapl_llist_peek_head(&ia_ptr-> rsp_list_head); dapl_os_atomic_dec(& ((DAPL_EVD *) sp_ptr-> evd_handle)->evd_ref_count); dapls_ia_unlink_sp(ia_ptr, sp_ptr); dapls_sp_free_sp(sp_ptr); } while (!dapl_llist_is_empty(&ia_ptr->ep_list_head)) { ep_ptr = (DAPL_EP *) dapl_llist_peek_head(&ia_ptr->ep_list_head); param = &ep_ptr->param; if (param->pz_handle != NULL) { dapl_os_atomic_dec(& ((DAPL_PZ *) param-> pz_handle)-> pz_ref_count); } if (param->recv_evd_handle != NULL) { dapl_os_atomic_dec(& ((DAPL_EVD *) param-> recv_evd_handle)-> evd_ref_count); } if (param->request_evd_handle) { dapl_os_atomic_dec(& ((DAPL_EVD *) param-> request_evd_handle)-> evd_ref_count); } if (param->connect_evd_handle != NULL) { dapl_os_atomic_dec(& ((DAPL_EVD *) param-> connect_evd_handle)-> evd_ref_count); } /* ...and free the resource */ dapl_ia_unlink_ep(ia_ptr, ep_ptr); dapl_ep_dealloc(ep_ptr); } while (!dapl_llist_is_empty(&ia_ptr->lmr_list_head)) { lmr_ptr = (DAPL_LMR *) dapl_llist_peek_head(&ia_ptr-> lmr_list_head); (void)dapls_hash_remove(lmr_ptr->header. owner_ia->hca_ptr-> lmr_hash_table, lmr_ptr->param. lmr_context, NULL); pz_ptr = (DAPL_PZ *) lmr_ptr->param.pz_handle; dapl_os_atomic_dec(&pz_ptr->pz_ref_count); dapl_ia_unlink_lmr(lmr_ptr->header.owner_ia, lmr_ptr); dapl_lmr_dealloc(lmr_ptr); } while (!dapl_llist_is_empty(&ia_ptr->psp_list_head)) { sp_ptr = (DAPL_SP *) dapl_llist_peek_head(&ia_ptr-> psp_list_head); while (!dapl_llist_is_empty (&sp_ptr->cr_list_head)) { cr_ptr = (DAPL_CR *) dapl_llist_peek_head(&sp_ptr-> cr_list_head); dapl_sp_remove_cr(sp_ptr, cr_ptr); dapls_cr_free(cr_ptr); } dapls_ia_unlink_sp(ia_ptr, sp_ptr); dapl_os_atomic_dec(& ((DAPL_EVD *) sp_ptr-> evd_handle)->evd_ref_count); dapls_sp_free_sp(sp_ptr); } while (!dapl_llist_is_empty(&ia_ptr->pz_list_head)) { pz_ptr = (DAPL_PZ *) dapl_llist_peek_head(&ia_ptr->pz_list_head); dapl_ia_unlink_pz(pz_ptr->header.owner_ia, pz_ptr); dapl_pz_dealloc(pz_ptr); } while (!dapl_llist_is_empty(&ia_ptr->evd_list_head)) { evd_ptr = (DAPL_EVD *) dapl_llist_peek_head(&ia_ptr-> evd_list_head); dapl_ia_unlink_evd(evd_ptr->header.owner_ia, evd_ptr); /* reset the cq_handle to avoid having it removed */ evd_ptr->ib_cq_handle = IB_INVALID_HANDLE; dapls_evd_dealloc(evd_ptr); } dapl_hca_unlink_ia(ia_ptr->hca_ptr, ia_ptr); /* asycn error evd was taken care of above, reset the pointer */ ia_ptr->async_error_evd = NULL; dapls_ia_free(ia_ptr); } /* end while ( ia_ptr != NULL ) */ dapl_os_free(cur_node, sizeof(DAPL_PROVIDER_LIST_NODE)); } /* end while (NULL != g_dapl_provider_list.head) */ }
static DAT_RETURN dapli_rmr_bind_fuse( IN DAPL_RMR *rmr, IN const DAT_LMR_TRIPLET* lmr_triplet, IN DAT_MEM_PRIV_FLAGS mem_priv, IN DAPL_EP *ep_ptr, IN DAT_RMR_COOKIE user_cookie, IN DAT_COMPLETION_FLAGS completion_flags, OUT DAT_RMR_CONTEXT *rmr_context) { DAPL_LMR *lmr; DAPL_COOKIE *cookie; DAT_RETURN dat_status; dat_status = dapls_hash_search( rmr->header.owner_ia->hca_ptr->lmr_hash_table, lmr_triplet->lmr_context, (DAPL_HASH_DATA *) &lmr); if (DAT_SUCCESS != dat_status) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); goto bail; } /* * if the ep in unconnected return an error. IB requires that the * QP be connected to change a memory window binding since: * * - memory window bind operations are WQEs placed on a QP's * send queue * * - QP's only process WQEs on the send queue when the QP is in * the RTS state */ if (DAT_EP_STATE_CONNECTED != ep_ptr->param.ep_state) { dat_status = DAT_ERROR(DAT_INVALID_STATE, dapls_ep_state_subtype(ep_ptr)); goto bail; } if (DAT_FALSE == dapl_mr_bounds_check( dapl_mr_get_address(lmr->param.region_desc, lmr->param.mem_type), lmr->param.length, lmr_triplet->virtual_address, lmr_triplet->segment_length)) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); goto bail; } /* If the LMR, RMR, and EP are not in the same PZ, there is an error */ if ((ep_ptr->param.pz_handle != lmr->param.pz_handle) || (ep_ptr->param.pz_handle != rmr->param.pz_handle)) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG4); goto bail; } if (!dapl_rmr_validate_completion_flag(DAT_COMPLETION_SUPPRESS_FLAG, ep_ptr->param.ep_attr.request_completion_flags, completion_flags) || !dapl_rmr_validate_completion_flag(DAT_COMPLETION_UNSIGNALLED_FLAG, ep_ptr->param.ep_attr.request_completion_flags, completion_flags) || !dapl_rmr_validate_completion_flag( DAT_COMPLETION_BARRIER_FENCE_FLAG, ep_ptr->param.ep_attr.request_completion_flags, completion_flags)) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG4); goto bail; } dat_status = dapls_rmr_cookie_alloc(&ep_ptr->req_buffer, rmr, user_cookie, &cookie); if (DAT_SUCCESS != dat_status) { goto bail; } dat_status = dapls_ib_mw_bind(rmr, lmr_triplet->lmr_context, ep_ptr, cookie, lmr_triplet->virtual_address, lmr_triplet->segment_length, mem_priv, completion_flags); if (DAT_SUCCESS != dat_status) { dapls_cookie_dealloc(&ep_ptr->req_buffer, cookie); goto bail; } (void) dapl_os_atomic_inc(&lmr->lmr_ref_count); /* if the RMR was previously bound */ if (NULL != rmr->lmr) { (void) dapl_os_atomic_dec(&rmr->lmr->lmr_ref_count); } rmr->param.mem_priv = mem_priv; rmr->param.lmr_triplet = *lmr_triplet; rmr->ep = ep_ptr; rmr->lmr = lmr; dapl_os_atomic_inc(&ep_ptr->req_count); if (NULL != rmr_context) { *rmr_context = rmr->param.rmr_context; } bail: return (dat_status); }
static DAT_RETURN dapli_rmr_bind_unfuse( IN DAPL_RMR *rmr, IN const DAT_LMR_TRIPLET *lmr_triplet, IN DAPL_EP *ep_ptr, IN DAT_RMR_COOKIE user_cookie, IN DAT_COMPLETION_FLAGS completion_flags) { DAPL_COOKIE *cookie; DAT_RETURN dat_status; dat_status = DAT_SUCCESS; /* * if the ep in unconnected return an error. IB requires that the * QP be connected to change a memory window binding since: * * - memory window bind operations are WQEs placed on a QP's * send queue * * - QP's only process WQEs on the send queue when the QP is in * the RTS state */ if (DAT_EP_STATE_CONNECTED != ep_ptr->param.ep_state) { dat_status = DAT_ERROR(DAT_INVALID_STATE, dapls_ep_state_subtype(ep_ptr)); goto bail1; } /* If the RMR and EP are not in the same PZ, there is an error */ if (ep_ptr->param.pz_handle != rmr->param.pz_handle) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); goto bail1; } if (!dapl_rmr_validate_completion_flag(DAT_COMPLETION_SUPPRESS_FLAG, ep_ptr->param.ep_attr.request_completion_flags, completion_flags) || !dapl_rmr_validate_completion_flag(DAT_COMPLETION_UNSIGNALLED_FLAG, ep_ptr->param.ep_attr.request_completion_flags, completion_flags) || !dapl_rmr_validate_completion_flag( DAT_COMPLETION_BARRIER_FENCE_FLAG, ep_ptr->param.ep_attr.request_completion_flags, completion_flags)) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); goto bail1; } dat_status = dapls_rmr_cookie_alloc(&ep_ptr->req_buffer, rmr, user_cookie, &cookie); if (DAT_SUCCESS != dat_status) { goto bail1; } dat_status = dapls_ib_mw_unbind(rmr, lmr_triplet->lmr_context, ep_ptr, cookie, completion_flags); if (DAT_SUCCESS != dat_status) { dapls_cookie_dealloc(&ep_ptr->req_buffer, cookie); goto bail1; } /* if the RMR was previously bound */ if (NULL != rmr->lmr) { (void) dapl_os_atomic_dec(&rmr->lmr->lmr_ref_count); } rmr->param.mem_priv = DAT_MEM_PRIV_NONE_FLAG; rmr->param.lmr_triplet.lmr_context = 0; rmr->param.lmr_triplet.virtual_address = 0; rmr->param.lmr_triplet.segment_length = 0; rmr->ep = ep_ptr; rmr->lmr = NULL; dapl_os_atomic_inc(&ep_ptr->req_count); bail1: return (dat_status); }
/* * dapl_psp_create_any * * uDAPL: User Direct Access Program Library Version 1.1, 6.4.3.3 * * Create a persistent Public Service Point that can receive multiple * requests for connections and generate multiple connection request * instances that will be delivered to the specified Event Dispatcher * in a notification event. Differs from dapl_psp_create() in that * the conn_qual is selected by the implementation and returned to * the user. * * Input: * ia_handle * evd_handle * psp_flags * * Output: * conn_qual * psp_handle * * Returns: * DAT_SUCCESS * DAT_INSUFFICIENT_RESOURCES * DAT_INVALID_HANDLE * DAT_INVALID_PARAMETER * DAT_CONN_QUAL_IN_USE * DAT_MODEL_NOT_SUPPORTED */ DAT_RETURN DAT_API dapl_psp_create_any(IN DAT_IA_HANDLE ia_handle, OUT DAT_CONN_QUAL * conn_qual, IN DAT_EVD_HANDLE evd_handle, IN DAT_PSP_FLAGS psp_flags, OUT DAT_PSP_HANDLE * psp_handle) { DAPL_IA *ia_ptr; DAPL_SP *sp_ptr; DAPL_EVD *evd_ptr; DAT_RETURN dat_status; static DAT_CONN_QUAL hint_conn_qual = 1024; /* seed value */ DAT_CONN_QUAL lcl_conn_qual; DAT_CONN_QUAL limit_conn_qual; ia_ptr = (DAPL_IA *) ia_handle; dat_status = DAT_SUCCESS; if (DAPL_BAD_HANDLE(ia_ptr, DAPL_MAGIC_IA)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_IA); goto bail; } if (DAPL_BAD_HANDLE(evd_handle, DAPL_MAGIC_EVD)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EVD_CR); goto bail; } if (psp_handle == NULL) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG5); goto bail; } if (conn_qual == NULL) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); goto bail; } evd_ptr = (DAPL_EVD *) evd_handle; if (!(evd_ptr->evd_flags & DAT_EVD_CR_FLAG)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EVD_CR); goto bail; } if (psp_flags != DAT_PSP_CONSUMER_FLAG && psp_flags != DAT_PSP_PROVIDER_FLAG) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG4); goto bail; } /* Allocate PSP */ sp_ptr = dapls_sp_alloc(ia_ptr, DAT_TRUE); if (sp_ptr == NULL) { dat_status = DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); goto bail; } DAPL_CNTR(ia_ptr, DCNT_IA_PSP_CREATE_ANY); /* * Fill out the args for a PSP */ sp_ptr->evd_handle = evd_handle; sp_ptr->psp_flags = psp_flags; sp_ptr->ep_handle = NULL; /* * Take a reference on the EVD handle */ dapl_os_atomic_inc(&((DAPL_EVD *) evd_handle)->evd_ref_count); /* Link it onto the IA */ dapl_ia_link_psp(ia_ptr, sp_ptr); /* * Set up a listener for a connection. Connections can arrive * even before this call returns! */ sp_ptr->state = DAPL_SP_STATE_PSP_LISTENING; sp_ptr->listening = DAT_TRUE; limit_conn_qual = 0; lcl_conn_qual = hint_conn_qual; dat_status = ~DAT_SUCCESS; while (dat_status != DAT_SUCCESS) { dat_status = dapls_ib_setup_conn_listener(ia_ptr, lcl_conn_qual, sp_ptr); lcl_conn_qual++; if (dat_status == DAT_CONN_QUAL_IN_USE) { /* * If we have a big number of tries and we still haven't * found a service_ID we can use, bail out with an error, * something is wrong! */ if (limit_conn_qual++ > 100000) { dat_status = DAT_CONN_QUAL_UNAVAILABLE; break; } } } hint_conn_qual = lcl_conn_qual; if (dat_status != DAT_SUCCESS) { /* * Have a problem setting up the connection, something wrong! */ dapl_os_atomic_dec(&((DAPL_EVD *) evd_handle)->evd_ref_count); sp_ptr->evd_handle = NULL; dapls_ia_unlink_sp(ia_ptr, sp_ptr); dapls_sp_free_sp(sp_ptr); dapl_os_printf ("--> dapl_psp_create cannot set up conn listener: %x\n", dat_status); goto bail; } sp_ptr->conn_qual = lcl_conn_qual - 1; /* * Return handle to the user */ *conn_qual = sp_ptr->conn_qual; *psp_handle = (DAT_PSP_HANDLE) sp_ptr; bail: return dat_status; }
/* * dapl_ep_free * * DAPL Requirements Version xxx, 6.5.3 * * Destroy an instance of the Endpoint * * Input: * ep_handle * * Output: * none * * Returns: * DAT_SUCCESS * DAT_INVALID_PARAMETER * DAT_INVALID_STATE */ DAT_RETURN dapl_ep_free( IN DAT_EP_HANDLE ep_handle) { DAPL_EP *ep_ptr; DAPL_IA *ia_ptr; DAT_EP_PARAM *param; DAT_RETURN dat_status = DAT_SUCCESS; dapl_dbg_log(DAPL_DBG_TYPE_API, "dapl_ep_free (%p)\n", ep_handle); ep_ptr = (DAPL_EP *) ep_handle; param = &ep_ptr->param; /* * Verify parameter & state */ if (DAPL_BAD_HANDLE(ep_ptr, DAPL_MAGIC_EP) && !(ep_ptr->header.magic == DAPL_MAGIC_EP_EXIT && ep_ptr->param.ep_state == DAT_EP_STATE_DISCONNECTED)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EP); goto bail; } if (ep_ptr->param.ep_state == DAT_EP_STATE_RESERVED || ep_ptr->param.ep_state == DAT_EP_STATE_PASSIVE_CONNECTION_PENDING || ep_ptr->param.ep_state == DAT_EP_STATE_TENTATIVE_CONNECTION_PENDING) { dapl_dbg_log(DAPL_DBG_TYPE_WARN, "--> dapl_ep_free: invalid state: %x, ep %p\n", ep_ptr->param.ep_state, ep_ptr); dat_status = DAT_ERROR(DAT_INVALID_STATE, dapls_ep_state_subtype(ep_ptr)); goto bail; } ia_ptr = ep_ptr->header.owner_ia; /* * If we are connected, issue a disconnect. If we are in the * disconnect_pending state, disconnect with the ABRUPT flag * set. */ /* * Do verification of parameters and the state change atomically. */ dapl_os_lock(&ep_ptr->header.lock); if (ep_ptr->param.ep_state == DAT_EP_STATE_CONNECTED || ep_ptr->param.ep_state == DAT_EP_STATE_ACTIVE_CONNECTION_PENDING || ep_ptr->param.ep_state == DAT_EP_STATE_COMPLETION_PENDING || ep_ptr->param.ep_state == DAT_EP_STATE_DISCONNECT_PENDING) { /* * Issue the disconnect and return. The DISCONNECT callback * will invoke this routine and finish the job */ ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECT_PENDING; dapl_os_unlock(&ep_ptr->header.lock); dapl_dbg_log(DAPL_DBG_TYPE_EP, "--> dapl_ep_free: disconnecting EP: %x, ep %p\n", ep_ptr->param.ep_state, ep_ptr); dat_status = dapls_ib_disconnect(ep_ptr, DAT_CLOSE_ABRUPT_FLAG); ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECT_PENDING; ep_ptr->header.magic = DAPL_MAGIC_EP_EXIT; } else { dapl_os_unlock(&ep_ptr->header.lock); } /* * Release all reference counts and unlink this structure. If we * got here from a callback, don't repeat this step */ if (!(ep_ptr->header.magic == DAPL_MAGIC_EP_EXIT && ep_ptr->param.ep_state == DAT_EP_STATE_DISCONNECTED)) { /* Remove link from the IA */ dapl_ia_unlink_ep(ia_ptr, ep_ptr); } /* * If the EP is disconnected tear everything down. Otherwise, * disconnect the EP but leave the QP and basic EP structure * intact; the callback code will finish the job. */ dapl_os_lock(&ep_ptr->header.lock); if (ep_ptr->param.ep_state == DAT_EP_STATE_DISCONNECTED || ep_ptr->param.ep_state == DAT_EP_STATE_UNCONNECTED) { /* * Update ref counts. Note the user may have used ep_modify * to set handles to NULL. */ if (param->pz_handle != NULL) { dapl_os_atomic_dec(&((DAPL_PZ *) param->pz_handle)->pz_ref_count); param->pz_handle = NULL; } if (param->recv_evd_handle != NULL) { dapl_os_atomic_dec(&((DAPL_EVD *) param->recv_evd_handle)->evd_ref_count); param->recv_evd_handle = NULL; } if (param->request_evd_handle != NULL) { dapl_os_atomic_dec(&((DAPL_EVD *) param->request_evd_handle)->evd_ref_count); param->request_evd_handle = NULL; } if (param->connect_evd_handle != NULL) { dapl_os_atomic_dec(&((DAPL_EVD *) param->connect_evd_handle)->evd_ref_count); param->connect_evd_handle = NULL; } if (param->srq_handle != NULL) { dapl_os_atomic_dec(&((DAPL_SRQ *) param->srq_handle)->srq_ref_count); param->srq_handle = NULL; } dapl_dbg_log(DAPL_DBG_TYPE_EP, "--> dapl_ep_free: Free EP: %x, ep %p\n", ep_ptr->param.ep_state, ep_ptr); /* * Free the QP. If the EP has never been used, * the QP is invalid */ if (ep_ptr->qp_handle != IB_INVALID_HANDLE) { dat_status = dapls_ib_qp_free(ia_ptr, ep_ptr); /* * This should always succeed, but report to the user if * there is a problem */ if (dat_status != DAT_SUCCESS) { goto bail; } ep_ptr->qp_handle = IB_INVALID_HANDLE; } dapl_os_unlock(&ep_ptr->header.lock); /* Free the resource */ dapl_ep_dealloc(ep_ptr); } else { dapl_os_unlock(&ep_ptr->header.lock); } bail: return (dat_status); }
/* * dapl_psp_create * * uDAPL: User Direct Access Program Library Version 1.1, 6.4.1.1 * * Create a persistent Public Service Point that can receive multiple * requests for connections and generate multiple connection request * instances that will be delivered to the specified Event Dispatcher * in a notification event. * * Input: * ia_handle * conn_qual * evd_handle * psp_flags * * Output: * psp_handle * * Returns: * DAT_SUCCESS * DAT_INSUFFICIENT_RESOURCES * DAT_INVALID_PARAMETER * DAT_CONN_QUAL_IN_USE * DAT_MODEL_NOT_SUPPORTED */ DAT_RETURN DAT_API dapl_psp_create(IN DAT_IA_HANDLE ia_handle, IN DAT_CONN_QUAL conn_qual, IN DAT_EVD_HANDLE evd_handle, IN DAT_PSP_FLAGS psp_flags, OUT DAT_PSP_HANDLE * psp_handle) { DAPL_IA *ia_ptr; DAPL_SP *sp_ptr; DAPL_EVD *evd_ptr; DAT_BOOLEAN sp_found; DAT_RETURN dat_status; ia_ptr = (DAPL_IA *) ia_handle; dat_status = DAT_SUCCESS; if (DAPL_BAD_HANDLE(ia_ptr, DAPL_MAGIC_IA)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_IA); goto bail; } if (DAPL_BAD_HANDLE(evd_handle, DAPL_MAGIC_EVD)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EVD_CR); goto bail; } if (psp_handle == NULL) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG5); goto bail; } evd_ptr = (DAPL_EVD *) evd_handle; if (!(evd_ptr->evd_flags & DAT_EVD_CR_FLAG)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EVD_CR); goto bail; } if (psp_flags != DAT_PSP_CONSUMER_FLAG && psp_flags != DAT_PSP_PROVIDER_FLAG) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG4); goto bail; } DAPL_CNTR(ia_ptr, DCNT_IA_PSP_CREATE); /* * See if we have a quiescent listener to use for this PSP, else * create one and set it listening */ sp_ptr = dapls_ia_sp_search(ia_ptr, conn_qual, DAT_TRUE); sp_found = DAT_TRUE; if (sp_ptr == NULL) { /* Allocate PSP */ sp_found = DAT_FALSE; sp_ptr = dapls_sp_alloc(ia_ptr, DAT_TRUE); if (sp_ptr == NULL) { dat_status = DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); goto bail; } } else if (sp_ptr->listening == DAT_TRUE) { dat_status = DAT_ERROR(DAT_CONN_QUAL_IN_USE, 0); goto bail; } /* * Fill out the args for a PSP */ sp_ptr->conn_qual = conn_qual; sp_ptr->evd_handle = evd_handle; sp_ptr->psp_flags = psp_flags; sp_ptr->ep_handle = NULL; /* * Take a reference on the EVD handle */ dapl_os_atomic_inc(&((DAPL_EVD *) evd_handle)->evd_ref_count); /* * Set up a listener for a connection. Connections can arrive * even before this call returns! */ sp_ptr->state = DAPL_SP_STATE_PSP_LISTENING; sp_ptr->listening = DAT_TRUE; /* * If this is a new sp we need to add it to the IA queue, and set up * a conn_listener. */ if (sp_found == DAT_FALSE) { /* Link it onto the IA before enabling it to receive conn * requests */ dapl_ia_link_psp(ia_ptr, sp_ptr); dat_status = dapls_ib_setup_conn_listener(ia_ptr, conn_qual, sp_ptr); if (dat_status != DAT_SUCCESS) { /* * Have a problem setting up the connection, something * wrong! Decrements the EVD refcount & release it. */ dapl_os_atomic_dec(&((DAPL_EVD *) evd_handle)-> evd_ref_count); sp_ptr->evd_handle = NULL; dapls_ia_unlink_sp(ia_ptr, sp_ptr); dapls_sp_free_sp(sp_ptr); dapl_dbg_log(DAPL_DBG_TYPE_CM, "--> dapl_psp_create setup_conn_listener failed: %x\n", dat_status); goto bail; } } /* * Return handle to the user */ *psp_handle = (DAT_PSP_HANDLE) sp_ptr; bail: return dat_status; }
/* * dapli_open_query_ext * * * Direct link to provider for quick provider query without full IA device open * * Input: * provider name * ia_attr * provider_attr * * Output: * ia_attr * provider_attr * * Return Values: * DAT_SUCCESS * DAT_INSUFFICIENT_RESOURCES * DAT_INVALID_PARAMETER * DAT_INVALID_HANDLE * DAT_PROVIDER_NOT_FOUND (returned by dat registry if necessary) */ DAT_RETURN dapli_open_query_ext(IN const DAT_NAME_PTR name, OUT DAT_IA_HANDLE * ia_handle_ptr, IN DAT_IA_ATTR_MASK ia_mask, OUT DAT_IA_ATTR * ia_attr, IN DAT_PROVIDER_ATTR_MASK pr_mask, OUT DAT_PROVIDER_ATTR * pr_attr) { DAT_RETURN dat_status = DAT_SUCCESS; DAT_PROVIDER *provider; DAPL_HCA *hca_ptr = NULL; DAT_IA_HANDLE ia_ptr = NULL; dapl_log(DAPL_DBG_TYPE_EXTENSION, "dapli_open_query_ext (%s, 0x%llx, %p, 0x%x, %p)\n", name, ia_mask, ia_attr, pr_mask, pr_attr); dat_status = dapl_provider_list_search(name, &provider); if (DAT_SUCCESS != dat_status) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG1); goto bail; } /* ia_handle_ptr and async_evd_handle_ptr cannot be NULL */ if ((ia_attr == NULL) && (pr_attr == NULL)) { return DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG5); } /* initialize the caller's OUT param */ *ia_handle_ptr = DAT_HANDLE_NULL; /* get the hca_ptr */ hca_ptr = (DAPL_HCA *) provider->extension; /* log levels could be reset and set between open_query calls */ if (dapl_os_get_env_val("DAPL_DBG_TYPE", 0)) g_dapl_dbg_type = dapl_os_get_env_val("DAPL_DBG_TYPE", 0); /* * Open the HCA if it has not been done before. */ dapl_os_lock(&hca_ptr->lock); if (hca_ptr->ib_hca_handle == IB_INVALID_HANDLE) { /* open in query mode */ dat_status = dapls_ib_open_hca(hca_ptr->name, hca_ptr, DAPL_OPEN_QUERY); if (dat_status != DAT_SUCCESS) { dapl_dbg_log(DAPL_DBG_TYPE_ERR, "dapls_ib_open_hca failed %x\n", dat_status); dapl_os_unlock(&hca_ptr->lock); goto bail; } } /* Take a reference on the hca_handle */ dapl_os_atomic_inc(&hca_ptr->handle_ref_count); dapl_os_unlock(&hca_ptr->lock); /* Allocate and initialize ia structure */ ia_ptr = (DAT_IA_HANDLE) dapl_ia_alloc(provider, hca_ptr); if (!ia_ptr) { dat_status = DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); goto cleanup; } dat_status = dapl_ia_query(ia_ptr, NULL, ia_mask, ia_attr, pr_mask, pr_attr); if (dat_status != DAT_SUCCESS) { dapl_dbg_log(DAPL_DBG_TYPE_ERR, "dapls_ib_query_hca failed %x\n", dat_status); goto cleanup; } *ia_handle_ptr = ia_ptr; return DAT_SUCCESS; cleanup: /* close device and release HCA reference */ if (ia_ptr) { dapl_ia_close(ia_ptr, DAT_CLOSE_ABRUPT_FLAG); } else { dapl_os_lock(&hca_ptr->lock); dapls_ib_close_hca(hca_ptr); hca_ptr->ib_hca_handle = IB_INVALID_HANDLE; dapl_os_atomic_dec(&hca_ptr->handle_ref_count); dapl_os_unlock(&hca_ptr->lock); } bail: return dat_status; }
DAT_RETURN dapli_post_ext(IN DAT_EP_HANDLE ep_handle, IN DAT_UINT64 cmp_add, IN DAT_UINT64 swap, IN DAT_UINT32 immed_data, IN DAT_COUNT segments, IN DAT_LMR_TRIPLET * local_iov, IN DAT_DTO_COOKIE user_cookie, IN const DAT_RMR_TRIPLET * remote_iov, IN int op_type, IN DAT_COMPLETION_FLAGS flags, IN DAT_IB_ADDR_HANDLE * ah) { DAPL_EP *ep_ptr; ib_qp_handle_t qp_ptr; DAPL_COOKIE *cookie = NULL; DAT_RETURN dat_status = DAT_SUCCESS; dapl_dbg_log(DAPL_DBG_TYPE_API, " post_ext_op: ep %p cmp_val %d " "swap_val %d cookie 0x%x, r_iov %p, flags 0x%x, ah %p\n", ep_handle, (unsigned)cmp_add, (unsigned)swap, (unsigned)user_cookie.as_64, remote_iov, flags, ah); if (DAPL_BAD_HANDLE(ep_handle, DAPL_MAGIC_EP)) return (DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EP)); ep_ptr = (DAPL_EP *) ep_handle; qp_ptr = ep_ptr->qp_handle; /* * Synchronization ok since this buffer is only used for send * requests, which aren't allowed to race with each other. */ dat_status = dapls_dto_cookie_alloc(&ep_ptr->req_buffer, DAPL_DTO_TYPE_EXTENSION, user_cookie, &cookie); if (dat_status != DAT_SUCCESS) goto bail; /* * Take reference before posting to avoid race conditions with * completions */ dapl_os_atomic_inc(&ep_ptr->req_count); /* * Invoke provider specific routine to post DTO */ dat_status = dapls_ib_post_ext_send(ep_ptr, op_type, cookie, segments, /* data segments */ local_iov, remote_iov, immed_data, /* immed data */ cmp_add, /* compare or add */ swap, /* swap */ flags, ah); if (dat_status != DAT_SUCCESS) { dapl_os_atomic_dec(&ep_ptr->req_count); dapls_cookie_dealloc(&ep_ptr->req_buffer, cookie); } bail: return dat_status; }