/* * dapl_hca_link_ia * * Add an ia to the HCA structure * * Input: * hca_ptr * ia_ptr * * Output: * none * * Returns: * none * */ void dapl_hca_link_ia(IN DAPL_HCA * hca_ptr, IN DAPL_IA * ia_ptr) { dapl_os_lock(&hca_ptr->lock); dapl_llist_add_head(&hca_ptr->ia_list_head, &ia_ptr->hca_ia_list_entry, ia_ptr); dapl_os_unlock(&hca_ptr->lock); }
/* * dapl_evd_set_unwaitable * * DAPL Requirements Version 1.1, 6.3.4.7 * * Transition the Event Dispatcher into an unwaitable state * * Input: * evd_handle * * Output: * none * * Returns: * DAT_SUCCESS * DAT_INVALID_HANDLE */ DAT_RETURN DAT_API dapl_evd_set_unwaitable(IN DAT_EVD_HANDLE evd_handle) { DAPL_EVD *evd_ptr; DAT_RETURN dat_status; evd_ptr = (DAPL_EVD *) evd_handle; dat_status = DAT_SUCCESS; if (DAPL_BAD_HANDLE(evd_handle, DAPL_MAGIC_EVD)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, 0); goto bail; } dapl_os_lock(&evd_ptr->header.lock); evd_ptr->evd_waitable = DAT_FALSE; /* * If this evd is waiting, wake it up. There is an obvious race * condition here where we may wakeup the waiter before it goes to * sleep; but the wait_object allows this and will do the right * thing. */ if (evd_ptr->evd_state == DAPL_EVD_STATE_WAITED) { if (evd_ptr->evd_flags & (DAT_EVD_DTO_FLAG | DAT_EVD_RMR_BIND_FLAG)) dapls_evd_dto_wakeup(evd_ptr); else dapl_os_wait_object_wakeup(&evd_ptr->wait_object); } dapl_os_unlock(&evd_ptr->header.lock); bail: return dat_status; }
/* * dapl_cr_link_cr * * Add a cr to a PSP structure * * Input: * sp_ptr * cr_ptr * * Output: * none * * Returns: * none * */ void dapl_sp_link_cr( IN DAPL_SP *sp_ptr, IN DAPL_CR *cr_ptr) { dapl_os_lock(&sp_ptr->header.lock); dapl_llist_add_tail(&sp_ptr->cr_list_head, &cr_ptr->header.ia_list_entry, cr_ptr); sp_ptr->cr_list_count++; dapl_os_unlock(&sp_ptr->header.lock); }
/* * dapl_sp_free * * Free the passed in PSP structure. * * Input: * entry point pointer * * Output: * none * * Returns: * none * */ void dapls_sp_free_sp( IN DAPL_SP *sp_ptr) { dapl_os_assert(sp_ptr->header.magic == DAPL_MAGIC_PSP || sp_ptr->header.magic == DAPL_MAGIC_RSP); dapl_os_assert(dapl_llist_is_empty(&sp_ptr->cr_list_head)); dapl_os_lock(&sp_ptr->header.lock); /* reset magic to prevent reuse */ sp_ptr->header.magic = DAPL_MAGIC_INVALID; dapl_os_unlock(&sp_ptr->header.lock); dapl_os_free(sp_ptr, sizeof (DAPL_SP)); }
/* * dapl_hca_unlink_ia * * Remove an ia from the hca info structure * * Input: * hca_ptr * ia_ptr * * Output: * none * * Returns: * none * */ void dapl_hca_unlink_ia(IN DAPL_HCA * hca_ptr, IN DAPL_IA * ia_ptr) { dapl_os_lock(&hca_ptr->lock); /* * If an error occurred when we were opening the IA it * will not be linked on the list; don't unlink an unlinked * list! */ if (!dapl_llist_is_empty(&hca_ptr->ia_list_head)) { dapl_llist_remove_entry(&hca_ptr->ia_list_head, &ia_ptr->hca_ia_list_entry); } dapl_os_unlock(&hca_ptr->lock); }
DAT_RETURN dapl_ep_dup_connect( IN DAT_EP_HANDLE ep_handle, IN DAT_EP_HANDLE ep_dup_handle, IN DAT_TIMEOUT timeout, IN DAT_COUNT private_data_size, IN const DAT_PVOID private_data, IN DAT_QOS qos) { DAPL_EP *ep_dup_ptr; DAT_RETURN dat_status; DAT_IA_ADDRESS_PTR remote_ia_address_ptr; DAT_CONN_QUAL remote_conn_qual; ep_dup_ptr = (DAPL_EP *)ep_dup_handle; /* * Verify the dup handle, which must be connected. All other * parameters will be verified by dapl_ep_connect */ if (DAPL_BAD_HANDLE(ep_dup_handle, DAPL_MAGIC_EP)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EP); goto bail; } /* * Check both the EP state and the QP state: if we don't have a QP * there is a problem. Do this under a lock and pull out * the connection parameters for atomicity. */ dapl_os_lock(&ep_dup_ptr->header.lock); if (ep_dup_ptr->param.ep_state != DAT_EP_STATE_CONNECTED) { dapl_os_unlock(&ep_dup_ptr->header.lock); dat_status = DAT_ERROR(DAT_INVALID_STATE, dapls_ep_state_subtype(ep_dup_ptr)); goto bail; } remote_ia_address_ptr = ep_dup_ptr->param.remote_ia_address_ptr; remote_conn_qual = ep_dup_ptr->param.remote_port_qual; dapl_os_unlock(&ep_dup_ptr->header.lock); dat_status = dapl_ep_connect(ep_handle, remote_ia_address_ptr, remote_conn_qual, timeout, private_data_size, private_data, qos, DAT_CONNECT_DEFAULT_FLAG); bail: return (dat_status); }
/* * dapl_cno_free * * DAPL Requirements Version xxx, 6.3.2.2 * * Destroy a consumer notification object instance * * Input: * cno_handle * * Output: * none * * Returns: * DAT_SUCCESS * DAT_INVALID_HANDLE * DAT_INVALID_STATE */ DAT_RETURN dapl_cno_free( IN DAT_CNO_HANDLE cno_handle) /* cno_handle */ { DAPL_CNO *cno_ptr; DAT_RETURN dat_status; dat_status = DAT_SUCCESS; cno_ptr = (DAPL_CNO *)cno_handle; if (DAPL_BAD_HANDLE(cno_handle, DAPL_MAGIC_CNO)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_CNO); goto bail; } if (cno_ptr->cno_ref_count != 0 || cno_ptr->cno_waiters != 0) { dat_status = DAT_ERROR(DAT_INVALID_STATE, DAT_INVALID_STATE_CNO_IN_USE); goto bail; } dapl_os_lock(&cno_ptr->header.lock); if (!dapl_llist_is_empty(&cno_ptr->evd_list_head)) { dapl_dbg_log(DAPL_DBG_TYPE_UTIL, "cno_free: evd list not empty!\n"); dapl_os_unlock(&cno_ptr->header.lock); dat_status = DAT_ERROR(DAT_INVALID_STATE, DAT_INVALID_STATE_CNO_IN_USE); goto bail; } dapl_os_unlock(&cno_ptr->header.lock); dat_status = dapls_ib_cno_free(cno_ptr); if (dat_status != DAT_SUCCESS) { goto bail; } dapl_ia_unlink_cno(cno_ptr->header.owner_ia, cno_ptr); dapl_cno_dealloc(cno_ptr); bail: return (dat_status); }
/* * dapl_sp_remove_cr * * Remove the CR from the PSP. Done prior to freeing the CR resource. * * Input: * sp_ptr * cr_ptr * * Output: * none * * Returns: * void * */ void dapl_sp_remove_cr( IN DAPL_SP *sp_ptr, IN DAPL_CR *cr_ptr) { dapl_os_lock(&sp_ptr->header.lock); if (dapl_llist_is_empty(&sp_ptr->cr_list_head)) { dapl_dbg_log(DAPL_DBG_TYPE_ERR, "***dapl_sp_remove_cr: removing from empty queue! sp %p\n", sp_ptr); dapl_os_unlock(&sp_ptr->header.lock); return; } (void) dapl_llist_remove_entry(&sp_ptr->cr_list_head, &cr_ptr->header.ia_list_entry); sp_ptr->cr_list_count--; dapl_os_unlock(&sp_ptr->header.lock); }
/* * dapl_sp_search_cr * * Search for a CR on the PSP cr_list with a matching cm_handle. When * found, remove it from the list and update fields. * * Input: * sp_ptr * ib_cm_handle * * Output: * none * * Returns: * cr_ptr_fnd Pointer to matching DAPL_CR * */ DAPL_CR * dapl_sp_search_cr( IN DAPL_SP *sp_ptr, IN ib_cm_handle_t ib_cm_handle) { DAPL_CR *cr_ptr; DAPL_CR *cr_ptr_fnd; dapl_os_lock(&sp_ptr->header.lock); cr_ptr_fnd = NULL; cr_ptr = (DAPL_CR *) dapl_llist_peek_head(&sp_ptr->cr_list_head); do { if (cr_ptr->ib_cm_handle == ib_cm_handle) { cr_ptr_fnd = cr_ptr; break; } cr_ptr = cr_ptr->header.ia_list_entry.flink->data; } while ((void *)cr_ptr != (void *)sp_ptr->cr_list_head->data); dapl_os_unlock(&sp_ptr->header.lock); return (cr_ptr_fnd); }
/* * dapli_open_query_ext * * * Direct link to provider for quick provider query without full IA device open * * Input: * provider name * ia_attr * provider_attr * * Output: * ia_attr * provider_attr * * Return Values: * DAT_SUCCESS * DAT_INSUFFICIENT_RESOURCES * DAT_INVALID_PARAMETER * DAT_INVALID_HANDLE * DAT_PROVIDER_NOT_FOUND (returned by dat registry if necessary) */ DAT_RETURN dapli_open_query_ext(IN const DAT_NAME_PTR name, OUT DAT_IA_HANDLE * ia_handle_ptr, IN DAT_IA_ATTR_MASK ia_mask, OUT DAT_IA_ATTR * ia_attr, IN DAT_PROVIDER_ATTR_MASK pr_mask, OUT DAT_PROVIDER_ATTR * pr_attr) { DAT_RETURN dat_status = DAT_SUCCESS; DAT_PROVIDER *provider; DAPL_HCA *hca_ptr = NULL; DAT_IA_HANDLE ia_ptr = NULL; dapl_log(DAPL_DBG_TYPE_EXTENSION, "dapli_open_query_ext (%s, 0x%llx, %p, 0x%x, %p)\n", name, ia_mask, ia_attr, pr_mask, pr_attr); dat_status = dapl_provider_list_search(name, &provider); if (DAT_SUCCESS != dat_status) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG1); goto bail; } /* ia_handle_ptr and async_evd_handle_ptr cannot be NULL */ if ((ia_attr == NULL) && (pr_attr == NULL)) { return DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG5); } /* initialize the caller's OUT param */ *ia_handle_ptr = DAT_HANDLE_NULL; /* get the hca_ptr */ hca_ptr = (DAPL_HCA *) provider->extension; /* log levels could be reset and set between open_query calls */ if (dapl_os_get_env_val("DAPL_DBG_TYPE", 0)) g_dapl_dbg_type = dapl_os_get_env_val("DAPL_DBG_TYPE", 0); /* * Open the HCA if it has not been done before. */ dapl_os_lock(&hca_ptr->lock); if (hca_ptr->ib_hca_handle == IB_INVALID_HANDLE) { /* open in query mode */ dat_status = dapls_ib_open_hca(hca_ptr->name, hca_ptr, DAPL_OPEN_QUERY); if (dat_status != DAT_SUCCESS) { dapl_dbg_log(DAPL_DBG_TYPE_ERR, "dapls_ib_open_hca failed %x\n", dat_status); dapl_os_unlock(&hca_ptr->lock); goto bail; } } /* Take a reference on the hca_handle */ dapl_os_atomic_inc(&hca_ptr->handle_ref_count); dapl_os_unlock(&hca_ptr->lock); /* Allocate and initialize ia structure */ ia_ptr = (DAT_IA_HANDLE) dapl_ia_alloc(provider, hca_ptr); if (!ia_ptr) { dat_status = DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); goto cleanup; } dat_status = dapl_ia_query(ia_ptr, NULL, ia_mask, ia_attr, pr_mask, pr_attr); if (dat_status != DAT_SUCCESS) { dapl_dbg_log(DAPL_DBG_TYPE_ERR, "dapls_ib_query_hca failed %x\n", dat_status); goto cleanup; } *ia_handle_ptr = ia_ptr; return DAT_SUCCESS; cleanup: /* close device and release HCA reference */ if (ia_ptr) { dapl_ia_close(ia_ptr, DAT_CLOSE_ABRUPT_FLAG); } else { dapl_os_lock(&hca_ptr->lock); dapls_ib_close_hca(hca_ptr); hca_ptr->ib_hca_handle = IB_INVALID_HANDLE; dapl_os_atomic_dec(&hca_ptr->handle_ref_count); dapl_os_unlock(&hca_ptr->lock); } bail: return dat_status; }
/* * dapls_cr_callback * * The callback function registered with verbs for passive side of * connection requests. The interface is specified by cm_api.h * * * Input: * ib_cm_handle, Handle to CM * ib_cm_event Specific CM event * instant_data Private data with DAT ADDRESS header * context SP pointer * * Output: * None * */ void dapls_cr_callback(IN dp_ib_cm_handle_t ib_cm_handle, IN const ib_cm_events_t ib_cm_event, IN const void *private_data_ptr, IN const int private_data_size, IN const void *context) { DAPL_EP *ep_ptr; DAPL_EVD *evd_ptr; DAPL_SP *sp_ptr; DAPL_PRIVATE *prd_ptr; DAT_EVENT_NUMBER dat_event_num; DAT_RETURN dat_status; dapl_dbg_log(DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK, "--> dapl_cr_callback! context: %p event: %x cm_handle %p\n", context, ib_cm_event, (void *)ib_cm_handle); /* * Passive side of the connection, context is a SP and * we need to look up the EP. */ sp_ptr = (DAPL_SP *) context; /* * The context pointer could have been cleaned up in a racing * CM callback, check to see if we should just exit here */ if (sp_ptr->header.magic == DAPL_MAGIC_INVALID) { return; } dapl_os_assert(sp_ptr->header.magic == DAPL_MAGIC_PSP || sp_ptr->header.magic == DAPL_MAGIC_RSP); /* Obtain the event number from the provider layer */ dat_event_num = dapls_ib_get_dat_event(ib_cm_event, DAT_FALSE); /* * CONNECT_REQUEST events create an event on the PSP * EVD, which will trigger connection processing. The * sequence is: * CONNECT_REQUEST Event to SP * CONNECTED Event to EP * DISCONNECT Event to EP * * Obtain the EP if required and set an event up on the correct * EVD. */ if (dat_event_num == DAT_CONNECTION_REQUEST_EVENT) { ep_ptr = NULL; evd_ptr = sp_ptr->evd_handle; } else { /* see if there is an EP connected with this CM handle */ ep_ptr = dapli_get_sp_ep(ib_cm_handle, sp_ptr, dat_event_num); /* if we lost a race with the CM just exit. */ if (ep_ptr == NULL) { return; } evd_ptr = (DAPL_EVD *) ep_ptr->param.connect_evd_handle; /* if something has happened to our EVD, bail. */ if (evd_ptr == NULL) { return; } } prd_ptr = (DAPL_PRIVATE *) private_data_ptr; dat_status = DAT_INTERNAL_ERROR; /* init to ERR */ switch (dat_event_num) { case DAT_CONNECTION_REQUEST_EVENT: { /* * Requests arriving on a disabled SP are immediatly rejected */ dapl_os_lock(&sp_ptr->header.lock); if (sp_ptr->listening == DAT_FALSE) { dapl_os_unlock(&sp_ptr->header.lock); dapl_log(DAPL_DBG_TYPE_CM_WARN, " cr_callback: CR event on non-listening SP\n"); (void)dapls_ib_reject_connection(ib_cm_handle, DAT_CONNECTION_EVENT_UNREACHABLE, 0, NULL); return; } if (sp_ptr->header.handle_type == DAT_HANDLE_TYPE_RSP) { /* * RSP connections only allow a single connection. Close * it down NOW so we reject any further connections. */ sp_ptr->listening = DAT_FALSE; } dapl_os_unlock(&sp_ptr->header.lock); /* * Only occurs on the passive side of a connection * dapli_connection_request will post the connection * event if appropriate. */ dat_status = dapli_connection_request(ib_cm_handle, sp_ptr, prd_ptr, private_data_size, evd_ptr); /* Set evd_ptr = NULL so we don't generate an event below */ evd_ptr = NULL; break; } case DAT_CONNECTION_EVENT_ESTABLISHED: { /* This is just a notification the connection is now * established, there isn't any private data to deal with. * * Update the EP state and cache a copy of the cm handle, * then let the user know we are ready to go. */ dapl_os_lock(&ep_ptr->header.lock); if (ep_ptr->header.magic != DAPL_MAGIC_EP || ep_ptr->param.ep_state != DAT_EP_STATE_COMPLETION_PENDING) { /* If someone pulled the plug on the EP or connection, * just exit */ dapl_os_unlock(&ep_ptr->header.lock); dat_status = DAT_SUCCESS; /* Set evd_ptr = NULL so we don't generate an event below */ evd_ptr = NULL; break; } ep_ptr->param.ep_state = DAT_EP_STATE_CONNECTED; dapl_os_unlock(&ep_ptr->header.lock); break; } case DAT_CONNECTION_EVENT_DISCONNECTED: { /* * EP is now fully disconnected; initiate any post processing * to reset the underlying QP and get the EP ready for * another connection */ dapl_os_lock(&ep_ptr->header.lock); if (ep_ptr->param.ep_state == DAT_EP_STATE_DISCONNECTED) { /* The disconnect has already occurred, we are now * cleaned up and ready to exit */ dapl_os_unlock(&ep_ptr->header.lock); return; } ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED; dapls_ib_disconnect_clean(ep_ptr, DAT_FALSE, ib_cm_event); dapl_os_unlock(&ep_ptr->header.lock); break; } case DAT_CONNECTION_EVENT_NON_PEER_REJECTED: case DAT_CONNECTION_EVENT_PEER_REJECTED: case DAT_CONNECTION_EVENT_UNREACHABLE: { /* * After posting an accept the requesting node has * stopped talking. */ dapl_os_lock(&ep_ptr->header.lock); ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED; dapls_ib_disconnect_clean(ep_ptr, DAT_FALSE, ib_cm_event); dapl_os_unlock(&ep_ptr->header.lock); break; } case DAT_CONNECTION_EVENT_BROKEN: { dapl_os_lock(&ep_ptr->header.lock); ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED; dapls_ib_disconnect_clean(ep_ptr, DAT_FALSE, ib_cm_event); dapl_os_unlock(&ep_ptr->header.lock); break; } default: { evd_ptr = NULL; dapl_os_assert(0); /* shouldn't happen */ break; } } if (evd_ptr != NULL) { dat_status = dapls_evd_post_connection_event(evd_ptr, dat_event_num, (DAT_HANDLE) ep_ptr, 0, NULL); } if (dat_status != DAT_SUCCESS) { /* The event post failed; take appropriate action. */ (void)dapls_ib_reject_connection(ib_cm_handle, DAT_CONNECTION_EVENT_BROKEN, 0, NULL); return; } }
/* * dapli_get_sp_ep * * Passive side of a connection is now fully established. Clean * up resources and obtain the EP pointer associated with a CR in * the SP * * Input: * ib_cm_handle, * sp_ptr * connection_event * * Output: * none * * Returns * ep_ptr * */ DAPL_EP *dapli_get_sp_ep(IN dp_ib_cm_handle_t ib_cm_handle, IN DAPL_SP * sp_ptr, IN DAT_EVENT_NUMBER dat_event_num) { DAPL_CR *cr_ptr; DAPL_EP *ep_ptr; /* * acquire the lock, we may be racing with other threads here */ dapl_os_lock(&sp_ptr->header.lock); /* Verify under lock that the SP is still valid */ if (sp_ptr->header.magic == DAPL_MAGIC_INVALID) { dapl_os_unlock(&sp_ptr->header.lock); return NULL; } /* * There are potentially multiple connections in progress. Need to * go through the list and find the one we are interested * in. There is no guarantee of order. dapl_sp_search_cr * leaves the CR on the SP queue. */ cr_ptr = dapl_sp_search_cr(sp_ptr, ib_cm_handle); if (cr_ptr == NULL) { dapl_os_unlock(&sp_ptr->header.lock); return NULL; } ep_ptr = (DAPL_EP *) cr_ptr->param.local_ep_handle; /* Quick check to ensure our EP is still valid */ if ((DAPL_BAD_HANDLE(ep_ptr, DAPL_MAGIC_EP))) { ep_ptr = NULL; } /* The CR record is discarded in all except for the CONNECTED case, * as it will have no further relevance. */ if (dat_event_num != DAT_CONNECTION_EVENT_ESTABLISHED) { /* Remove the CR from the queue */ dapl_sp_remove_cr(sp_ptr, cr_ptr); if (ep_ptr != NULL) { ep_ptr->cr_ptr = NULL; } /* * If this SP has been removed from service, free it * up after the last CR is removed */ if (sp_ptr->listening != DAT_TRUE && sp_ptr->cr_list_count == 0 && sp_ptr->state != DAPL_SP_STATE_FREE && sp_ptr->state != DAPL_SP_STATE_RSP_LISTENING) { dapl_dbg_log(DAPL_DBG_TYPE_CM, "--> dapli_get_sp_ep! disconnect dump sp: %p \n", sp_ptr); /* Decrement the ref count on the EVD */ if (sp_ptr->evd_handle) { dapl_os_atomic_dec(& ((DAPL_EVD *) sp_ptr-> evd_handle)->evd_ref_count); sp_ptr->evd_handle = NULL; } sp_ptr->state = DAPL_SP_STATE_FREE; dapl_os_unlock(&sp_ptr->header.lock); (void)dapls_ib_remove_conn_listener(sp_ptr->header. owner_ia, sp_ptr); dapls_ia_unlink_sp((DAPL_IA *) sp_ptr->header.owner_ia, sp_ptr); dapls_sp_free_sp(sp_ptr); dapls_cr_free(cr_ptr); goto skip_unlock; } dapl_os_unlock(&sp_ptr->header.lock); /* free memory outside of the lock */ dapls_cr_free(cr_ptr); } else { dapl_os_unlock(&sp_ptr->header.lock); } skip_unlock: return ep_ptr; }
/* * dapli_connection_request * * Process a connection request on the Passive side of a connection. * Create a CR record and link it on to the SP so we can update it * and free it later. Create an EP if specified by the PSP flags. * * Input: * ib_cm_handle, * sp_ptr * event_ptr * prd_ptr * * Output: * None * * Returns * DAT_INSUFFICIENT_RESOURCES * DAT_SUCCESS * */ DAT_RETURN dapli_connection_request(IN dp_ib_cm_handle_t ib_cm_handle, IN DAPL_SP * sp_ptr, IN DAPL_PRIVATE * prd_ptr, IN int private_data_size, IN DAPL_EVD * evd_ptr) { DAT_RETURN dat_status; DAPL_CR *cr_ptr; DAPL_EP *ep_ptr; DAPL_IA *ia_ptr; DAT_SP_HANDLE sp_handle; cr_ptr = dapls_cr_alloc(sp_ptr->header.owner_ia); if (cr_ptr == NULL) { /* Invoking function will call dapls_ib_cm_reject() */ return DAT_INSUFFICIENT_RESOURCES; } /* * Set up the CR */ cr_ptr->sp_ptr = sp_ptr; /* maintain sp_ptr in case of reject */ cr_ptr->param.remote_port_qual = 0; cr_ptr->ib_cm_handle = ib_cm_handle; #ifdef IBHOSTS_NAMING /* * Special case: pull the remote HCA address from the private data * prefix. This is a spec violation as it introduces a protocol, but * some implementations may find it necessary for a time. */ cr_ptr->remote_ia_address = prd_ptr->hca_address; #endif /* IBHOSTS_NAMING */ cr_ptr->param.remote_ia_address_ptr = (DAT_IA_ADDRESS_PTR) & cr_ptr->remote_ia_address; /* * Copy the remote address and private data out of the private_data * payload and put them in a local structure */ /* Private data size will be determined by the provider layer */ cr_ptr->param.private_data = cr_ptr->private_data; cr_ptr->param.private_data_size = private_data_size; if (cr_ptr->param.private_data_size > 0) { dapl_os_memcpy(cr_ptr->private_data, prd_ptr->private_data, DAPL_MIN(cr_ptr->param.private_data_size, DAPL_MAX_PRIVATE_DATA_SIZE)); } /* EP will be NULL unless RSP service point */ ep_ptr = (DAPL_EP *) sp_ptr->ep_handle; if (sp_ptr->psp_flags == DAT_PSP_PROVIDER_FLAG) { /* * Never true for RSP connections * * Create an EP for the user. If we can't allocate an * EP we are out of resources and need to tell the * requestor that we cant help them. */ ia_ptr = sp_ptr->header.owner_ia; ep_ptr = dapl_ep_alloc(ia_ptr, NULL); if (ep_ptr == NULL) { dapls_cr_free(cr_ptr); /* Invoking function will call dapls_ib_cm_reject() */ return DAT_INSUFFICIENT_RESOURCES; } ep_ptr->param.ia_handle = ia_ptr; ep_ptr->param.local_ia_address_ptr = (DAT_IA_ADDRESS_PTR) & ia_ptr->hca_ptr->hca_address; /* Link the EP onto the IA */ dapl_ia_link_ep(ia_ptr, ep_ptr); } cr_ptr->param.local_ep_handle = ep_ptr; if (ep_ptr != NULL) { /* Assign valid EP fields: RSP and PSP_PROVIDER_FLAG only */ if (sp_ptr->psp_flags == DAT_PSP_PROVIDER_FLAG) { ep_ptr->param.ep_state = DAT_EP_STATE_TENTATIVE_CONNECTION_PENDING; } else { /* RSP */ dapl_os_assert(sp_ptr->header.handle_type == DAT_HANDLE_TYPE_RSP); ep_ptr->param.ep_state = DAT_EP_STATE_PASSIVE_CONNECTION_PENDING; } dapl_ep_link_cm(ep_ptr, ib_cm_handle); } /* link the CR onto the SP so we can pick it up later */ dapl_sp_link_cr(sp_ptr, cr_ptr); /* Post the event. */ /* assign sp_ptr to union to avoid typecast errors from some compilers */ sp_handle.psp_handle = (DAT_PSP_HANDLE) sp_ptr; dat_status = dapls_evd_post_cr_arrival_event(evd_ptr, DAT_CONNECTION_REQUEST_EVENT, sp_handle, (DAT_IA_ADDRESS_PTR) & sp_ptr->header.owner_ia-> hca_ptr->hca_address, sp_ptr->conn_qual, (DAT_CR_HANDLE) cr_ptr); if (dat_status != DAT_SUCCESS) { (void)dapls_ib_reject_connection(ib_cm_handle, DAT_CONNECTION_EVENT_BROKEN, 0, NULL); /* Take the CR off the list, we can't use it */ dapl_os_lock(&sp_ptr->header.lock); dapl_sp_remove_cr(sp_ptr, cr_ptr); dapl_os_unlock(&sp_ptr->header.lock); dapls_cr_free(cr_ptr); return DAT_INSUFFICIENT_RESOURCES; } return DAT_SUCCESS; }
/* * dapl_ep_connect * * DAPL Requirements Version xxx, 6.5.7 * * Request a connection be established between the local Endpoint * and a remote Endpoint. This operation is used by the active/client * side of a connection * * Input: * ep_handle * remote_ia_address * remote_conn_qual * timeout * private_data_size * privaet_data * qos * connect_flags * * Output: * None * * Returns: * DAT_SUCCESS * DAT_INSUFFICIENT_RESOUCRES * DAT_INVALID_PARAMETER * DAT_MODLE_NOT_SUPPORTED */ DAT_RETURN dapl_ep_connect( IN DAT_EP_HANDLE ep_handle, IN DAT_IA_ADDRESS_PTR remote_ia_address, IN DAT_CONN_QUAL remote_conn_qual, IN DAT_TIMEOUT timeout, IN DAT_COUNT private_data_size, IN const DAT_PVOID private_data, IN DAT_QOS qos, IN DAT_CONNECT_FLAGS connect_flags) { DAPL_EP *ep_ptr; DAPL_PRIVATE prd; DAPL_EP alloc_ep; DAT_RETURN dat_status; dapl_dbg_log(DAPL_DBG_TYPE_API | DAPL_DBG_TYPE_CM, "dapl_ep_connect (%p, {%u.%u.%u.%u}, %X, %d, %d, %p, %x, %x)\n", ep_handle, remote_ia_address->sa_data[2], remote_ia_address->sa_data[3], remote_ia_address->sa_data[4], remote_ia_address->sa_data[5], remote_conn_qual, timeout, private_data_size, private_data, qos, connect_flags); dat_status = DAT_SUCCESS; ep_ptr = (DAPL_EP *) ep_handle; /* * Verify parameter & state. The connection handle must be good * at this point. */ if (DAPL_BAD_HANDLE(ep_ptr, DAPL_MAGIC_EP)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EP); goto bail; } if (DAPL_BAD_HANDLE(ep_ptr->param.connect_evd_handle, DAPL_MAGIC_EVD)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EVD_CONN); goto bail; } /* * If the endpoint needs a QP, associated the QP with it. * This needs to be done carefully, in order to: * * Avoid allocating under a lock. * * Not step on data structures being altered by * routines with which we are racing. * So we: * * Confirm that a new QP is needed and is not forbidden by the * current state. * * Allocate it into a separate EP. * * Take the EP lock. * * Reconfirm that the EP is in a state where it needs a QP. * * Assign the QP and release the lock. */ if (ep_ptr->qp_state == DAPL_QP_STATE_UNATTACHED) { if (ep_ptr->param.pz_handle == NULL || DAPL_BAD_HANDLE(ep_ptr->param.pz_handle, DAPL_MAGIC_PZ)) { dat_status = DAT_ERROR(DAT_INVALID_STATE, DAT_INVALID_STATE_EP_NOTREADY); goto bail; } alloc_ep = *ep_ptr; dat_status = dapls_ib_qp_alloc(ep_ptr->header.owner_ia, &alloc_ep, ep_ptr); if (dat_status != DAT_SUCCESS) { dat_status = DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); goto bail; } dapl_os_lock(&ep_ptr->header.lock); /* * PZ shouldn't have changed since we're only racing with * dapl_cr_accept() */ if (ep_ptr->qp_state != DAPL_QP_STATE_UNATTACHED) { /* Bail, cleaning up. */ dapl_os_unlock(&ep_ptr->header.lock); dat_status = dapls_ib_qp_free(ep_ptr->header.owner_ia, &alloc_ep); if (dat_status != DAT_SUCCESS) { dapl_dbg_log(DAPL_DBG_TYPE_WARN, "ep_connect: ib_qp_free failed with %x\n", dat_status); } dat_status = DAT_ERROR(DAT_INVALID_STATE, dapls_ep_state_subtype(ep_ptr)); goto bail; } ep_ptr->qp_handle = alloc_ep.qp_handle; ep_ptr->qpn = alloc_ep.qpn; ep_ptr->qp_state = alloc_ep.qp_state; dapl_os_unlock(&ep_ptr->header.lock); } /* * We do state checks and transitions under lock. * The only code we're racing against is dapl_cr_accept. */ dapl_os_lock(&ep_ptr->header.lock); /* * Verify the attributes of the EP handle before we connect it. Test * all of the handles to make sure they are currently valid. * Specifically: * pz_handle required * recv_evd_handle optional, but must be valid * request_evd_handle optional, but must be valid * connect_evd_handle required */ if (ep_ptr->param.pz_handle == NULL || DAPL_BAD_HANDLE(ep_ptr->param.pz_handle, DAPL_MAGIC_PZ) || ep_ptr->param.connect_evd_handle == NULL || DAPL_BAD_HANDLE(ep_ptr->param.connect_evd_handle, DAPL_MAGIC_EVD) || !(((DAPL_EVD *)ep_ptr->param.connect_evd_handle)->evd_flags & DAT_EVD_CONNECTION_FLAG) || (ep_ptr->param.recv_evd_handle != DAT_HANDLE_NULL && (DAPL_BAD_HANDLE(ep_ptr->param.recv_evd_handle, DAPL_MAGIC_EVD))) || (ep_ptr->param.request_evd_handle != DAT_HANDLE_NULL && (DAPL_BAD_HANDLE(ep_ptr->param.request_evd_handle, DAPL_MAGIC_EVD)))) { dapl_os_unlock(&ep_ptr->header.lock); dat_status = DAT_ERROR(DAT_INVALID_STATE, DAT_INVALID_STATE_EP_NOTREADY); goto bail; } /* * Check both the EP state and the QP state: if we don't have a QP * we need to attach one now. */ if (ep_ptr->qp_state == DAPL_QP_STATE_UNATTACHED) { dat_status = dapls_ib_qp_alloc(ep_ptr->header.owner_ia, ep_ptr, ep_ptr); if (dat_status != DAT_SUCCESS) { dapl_os_unlock(&ep_ptr->header.lock); dat_status = DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_TEP); goto bail; } } if (ep_ptr->param.ep_state != DAT_EP_STATE_UNCONNECTED) { dapl_os_unlock(&ep_ptr->header.lock); dat_status = DAT_ERROR(DAT_INVALID_STATE, dapls_ep_state_subtype(ep_ptr)); goto bail; } if (qos != DAT_QOS_BEST_EFFORT || connect_flags != DAT_CONNECT_DEFAULT_FLAG) { /* * At this point we only support one QOS level */ dapl_os_unlock(&ep_ptr->header.lock); dat_status = DAT_ERROR(DAT_MODEL_NOT_SUPPORTED, 0); goto bail; } /* * Verify the private data size doesn't exceed the max */ if (private_data_size > DAPL_CONSUMER_MAX_PRIVATE_DATA_SIZE) { dapl_os_unlock(&ep_ptr->header.lock); dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG5); goto bail; } /* * transition the state before requesting a connection to avoid * race conditions */ ep_ptr->param.ep_state = DAT_EP_STATE_ACTIVE_CONNECTION_PENDING; /* * At this point we're committed, and done with the endpoint * except for the connect, so we can drop the lock. */ dapl_os_unlock(&ep_ptr->header.lock); /* * fill in the private data */ (void) dapl_os_memzero(&prd, sizeof (DAPL_PRIVATE)); if (private_data_size > 0) (void) dapl_os_memcpy(prd.private_data, private_data, private_data_size); /* Copy the connection qualifiers */ (void) dapl_os_memcpy(ep_ptr->param.remote_ia_address_ptr, remote_ia_address, sizeof (DAT_SOCK_ADDR6)); ep_ptr->param.remote_port_qual = remote_conn_qual; dat_status = dapls_ib_connect(ep_handle, remote_ia_address, remote_conn_qual, private_data_size, &prd, timeout); if (dat_status != DAT_SUCCESS) { DAPL_EVD *evd_ptr; if (dat_status == DAT_ERROR(DAT_INVALID_ADDRESS, DAT_INVALID_ADDRESS_UNREACHABLE)) { /* Unreachable IP address */ evd_ptr = (DAPL_EVD *)ep_ptr->param.connect_evd_handle; if (evd_ptr != NULL) { (void) dapls_evd_post_connection_event(evd_ptr, DAT_CONNECTION_EVENT_UNREACHABLE, (DAT_HANDLE) ep_ptr, 0, 0); } ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED; dat_status = DAT_SUCCESS; } else if (dat_status == DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ADDRESS_UNREACHABLE)) { /* Non-existant connection qualifier */ evd_ptr = (DAPL_EVD *)ep_ptr->param.connect_evd_handle; if (evd_ptr != NULL) { (void) dapls_evd_post_connection_event(evd_ptr, DAT_CONNECTION_EVENT_NON_PEER_REJECTED, (DAT_HANDLE) ep_ptr, 0, 0); } ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED; dat_status = DAT_SUCCESS; } else { ep_ptr->param.ep_state = DAT_EP_STATE_UNCONNECTED; } } bail: dapl_dbg_log(DAPL_DBG_TYPE_RTN | DAPL_DBG_TYPE_CM, "dapl_ep_connect () returns 0x%x\n", dat_status); return (dat_status); }
/* * dapl_ia_open * * DAPL Requirements Version xxx, 6.2.1.1 * * Open a provider and return a handle. The handle enables the user * to invoke operations on this provider. * * The dat_ia_open call is actually part of the DAT registration module. * That function maps the DAT_NAME parameter of dat_ia_open to a DAT_PROVIDER, * and calls this function. * * Input: * provider * async_evd_qlen * async_evd_handle_ptr * * Output: * async_evd_handle * ia_handle * * Return Values: * DAT_SUCCESS * DAT_INSUFFICIENT_RESOURCES * DAT_INVALID_PARAMETER * DAT_INVALID_HANDLE * DAT_PROVIDER_NOT_FOUND (returned by dat registry if necessary) */ DAT_RETURN DAT_API dapl_ia_open(IN const DAT_NAME_PTR name, IN DAT_COUNT async_evd_qlen, INOUT DAT_EVD_HANDLE * async_evd_handle_ptr, OUT DAT_IA_HANDLE * ia_handle_ptr) { DAT_RETURN dat_status; DAT_PROVIDER *provider; DAPL_HCA *hca_ptr; DAPL_IA *ia_ptr; DAPL_EVD *evd_ptr; dat_status = DAT_SUCCESS; hca_ptr = NULL; ia_ptr = NULL; dapl_dbg_log(DAPL_DBG_TYPE_API, "dapl_ia_open (%s, %d, %p, %p)\n", name, async_evd_qlen, async_evd_handle_ptr, ia_handle_ptr); dat_status = dapl_provider_list_search(name, &provider); if (DAT_SUCCESS != dat_status) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG1); goto bail; } /* ia_handle_ptr and async_evd_handle_ptr cannot be NULL */ if (ia_handle_ptr == NULL) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG4); goto bail; } if (async_evd_handle_ptr == NULL) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); goto bail; } /* initialize the caller's OUT param */ *ia_handle_ptr = DAT_HANDLE_NULL; /* get the hca_ptr */ hca_ptr = (DAPL_HCA *) provider->extension; /* log levels could be reset and set between opens */ g_dapl_dbg_type = dapl_os_get_env_val("DAPL_DBG_TYPE", DAPL_DBG_TYPE_ERR | DAPL_DBG_TYPE_WARN); /* * Open the HCA if it has not been done before. */ dapl_os_lock(&hca_ptr->lock); if (hca_ptr->ib_hca_handle == IB_INVALID_HANDLE) { /* register with the HW */ dat_status = dapls_ib_open_hca(hca_ptr->name, hca_ptr, DAPL_OPEN_NORMAL); if (dat_status != DAT_SUCCESS) { dapl_dbg_log(DAPL_DBG_TYPE_ERR, "dapls_ib_open_hca failed %x\n", dat_status); dapl_os_unlock(&hca_ptr->lock); goto bail; } /* * Obtain the IP address associated with this name and HCA. */ #ifdef IBHOSTS_NAMING dapli_assign_hca_ip_address(hca_ptr, name); #endif /* IBHOSTS_NAMING */ /* * Obtain IA attributes from the HCA to limit certain * operations. * If using DAPL_ATS naming, ib_query_hca will also set the ip * address. */ dat_status = dapls_ib_query_hca(hca_ptr, &hca_ptr->ia_attr, NULL, &hca_ptr->hca_address); if (dat_status != DAT_SUCCESS) { dapli_hca_cleanup(hca_ptr, DAT_FALSE); dapl_os_unlock(&hca_ptr->lock); goto bail; } } /* Take a reference on the hca_handle */ dapl_os_atomic_inc(&hca_ptr->handle_ref_count); dapl_os_unlock(&hca_ptr->lock); /* Allocate and initialize ia structure */ ia_ptr = dapl_ia_alloc(provider, hca_ptr); if (!ia_ptr) { dapl_os_lock(&hca_ptr->lock); dapli_hca_cleanup(hca_ptr, DAT_TRUE); dapl_os_unlock(&hca_ptr->lock); dat_status = DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); goto bail; } /* we need an async EVD for this IA * use the one passed in (if non-NULL) or create one */ evd_ptr = (DAPL_EVD *) * async_evd_handle_ptr; if (evd_ptr) { if (DAPL_BAD_HANDLE(evd_ptr, DAPL_MAGIC_EVD) || !(evd_ptr->evd_flags & DAT_EVD_ASYNC_FLAG)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EVD_ASYNC); goto bail; } /* InfiniBand allows only 1 asychronous event handler per HCA */ /* (see InfiniBand Spec, release 1.1, vol I, section 11.5.2, */ /* page 559). */ /* */ /* We only need to make sure that this EVD's CQ belongs to */ /* the same HCA as is being opened. */ if (evd_ptr->header.owner_ia->hca_ptr->ib_hca_handle != hca_ptr->ib_hca_handle) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EVD_ASYNC); goto bail; } ia_ptr->cleanup_async_error_evd = DAT_FALSE; ia_ptr->async_error_evd = evd_ptr; } else { /* Verify we have >0 length, and let the provider check the size */ if (async_evd_qlen <= 0) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); goto bail; } dat_status = dapls_evd_internal_create(ia_ptr, NULL, /* CNO ptr */ async_evd_qlen, DAT_EVD_ASYNC_FLAG, &evd_ptr); if (dat_status != DAT_SUCCESS) { goto bail; } dapl_os_atomic_inc(&evd_ptr->evd_ref_count); dapl_os_lock(&hca_ptr->lock); if (hca_ptr->async_evd != (DAPL_EVD *) 0) { dapl_os_unlock(&hca_ptr->lock); } else { hca_ptr->async_evd = evd_ptr; dapl_os_unlock(&hca_ptr->lock); /* Register the handlers associated with the async EVD. */ dat_status = dapls_ia_setup_callbacks(ia_ptr, evd_ptr); if (dat_status != DAT_SUCCESS) { /* Assign the EVD so it gets cleaned up */ ia_ptr->cleanup_async_error_evd = DAT_TRUE; ia_ptr->async_error_evd = evd_ptr; goto bail; } } ia_ptr->cleanup_async_error_evd = DAT_TRUE; ia_ptr->async_error_evd = evd_ptr; } dat_status = DAT_SUCCESS; *ia_handle_ptr = ia_ptr; *async_evd_handle_ptr = evd_ptr; #if DAPL_COUNTERS dapli_start_counters((DAT_HANDLE)ia_ptr); #endif bail: if (dat_status != DAT_SUCCESS) { if (ia_ptr) { /* This will release the async EVD if needed. */ dapl_ia_close(ia_ptr, DAT_CLOSE_ABRUPT_FLAG); } } dapl_dbg_log(DAPL_DBG_TYPE_RTN, "dapl_ia_open () returns 0x%x\n", dat_status); return dat_status; }
/* * dapl_ep_free * * DAPL Requirements Version xxx, 6.5.3 * * Destroy an instance of the Endpoint * * Input: * ep_handle * * Output: * none * * Returns: * DAT_SUCCESS * DAT_INVALID_PARAMETER * DAT_INVALID_STATE */ DAT_RETURN dapl_ep_free( IN DAT_EP_HANDLE ep_handle) { DAPL_EP *ep_ptr; DAPL_IA *ia_ptr; DAT_EP_PARAM *param; DAT_RETURN dat_status = DAT_SUCCESS; dapl_dbg_log(DAPL_DBG_TYPE_API, "dapl_ep_free (%p)\n", ep_handle); ep_ptr = (DAPL_EP *) ep_handle; param = &ep_ptr->param; /* * Verify parameter & state */ if (DAPL_BAD_HANDLE(ep_ptr, DAPL_MAGIC_EP) && !(ep_ptr->header.magic == DAPL_MAGIC_EP_EXIT && ep_ptr->param.ep_state == DAT_EP_STATE_DISCONNECTED)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EP); goto bail; } if (ep_ptr->param.ep_state == DAT_EP_STATE_RESERVED || ep_ptr->param.ep_state == DAT_EP_STATE_PASSIVE_CONNECTION_PENDING || ep_ptr->param.ep_state == DAT_EP_STATE_TENTATIVE_CONNECTION_PENDING) { dapl_dbg_log(DAPL_DBG_TYPE_WARN, "--> dapl_ep_free: invalid state: %x, ep %p\n", ep_ptr->param.ep_state, ep_ptr); dat_status = DAT_ERROR(DAT_INVALID_STATE, dapls_ep_state_subtype(ep_ptr)); goto bail; } ia_ptr = ep_ptr->header.owner_ia; /* * If we are connected, issue a disconnect. If we are in the * disconnect_pending state, disconnect with the ABRUPT flag * set. */ /* * Do verification of parameters and the state change atomically. */ dapl_os_lock(&ep_ptr->header.lock); if (ep_ptr->param.ep_state == DAT_EP_STATE_CONNECTED || ep_ptr->param.ep_state == DAT_EP_STATE_ACTIVE_CONNECTION_PENDING || ep_ptr->param.ep_state == DAT_EP_STATE_COMPLETION_PENDING || ep_ptr->param.ep_state == DAT_EP_STATE_DISCONNECT_PENDING) { /* * Issue the disconnect and return. The DISCONNECT callback * will invoke this routine and finish the job */ ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECT_PENDING; dapl_os_unlock(&ep_ptr->header.lock); dapl_dbg_log(DAPL_DBG_TYPE_EP, "--> dapl_ep_free: disconnecting EP: %x, ep %p\n", ep_ptr->param.ep_state, ep_ptr); dat_status = dapls_ib_disconnect(ep_ptr, DAT_CLOSE_ABRUPT_FLAG); ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECT_PENDING; ep_ptr->header.magic = DAPL_MAGIC_EP_EXIT; } else { dapl_os_unlock(&ep_ptr->header.lock); } /* * Release all reference counts and unlink this structure. If we * got here from a callback, don't repeat this step */ if (!(ep_ptr->header.magic == DAPL_MAGIC_EP_EXIT && ep_ptr->param.ep_state == DAT_EP_STATE_DISCONNECTED)) { /* Remove link from the IA */ dapl_ia_unlink_ep(ia_ptr, ep_ptr); } /* * If the EP is disconnected tear everything down. Otherwise, * disconnect the EP but leave the QP and basic EP structure * intact; the callback code will finish the job. */ dapl_os_lock(&ep_ptr->header.lock); if (ep_ptr->param.ep_state == DAT_EP_STATE_DISCONNECTED || ep_ptr->param.ep_state == DAT_EP_STATE_UNCONNECTED) { /* * Update ref counts. Note the user may have used ep_modify * to set handles to NULL. */ if (param->pz_handle != NULL) { dapl_os_atomic_dec(&((DAPL_PZ *) param->pz_handle)->pz_ref_count); param->pz_handle = NULL; } if (param->recv_evd_handle != NULL) { dapl_os_atomic_dec(&((DAPL_EVD *) param->recv_evd_handle)->evd_ref_count); param->recv_evd_handle = NULL; } if (param->request_evd_handle != NULL) { dapl_os_atomic_dec(&((DAPL_EVD *) param->request_evd_handle)->evd_ref_count); param->request_evd_handle = NULL; } if (param->connect_evd_handle != NULL) { dapl_os_atomic_dec(&((DAPL_EVD *) param->connect_evd_handle)->evd_ref_count); param->connect_evd_handle = NULL; } if (param->srq_handle != NULL) { dapl_os_atomic_dec(&((DAPL_SRQ *) param->srq_handle)->srq_ref_count); param->srq_handle = NULL; } dapl_dbg_log(DAPL_DBG_TYPE_EP, "--> dapl_ep_free: Free EP: %x, ep %p\n", ep_ptr->param.ep_state, ep_ptr); /* * Free the QP. If the EP has never been used, * the QP is invalid */ if (ep_ptr->qp_handle != IB_INVALID_HANDLE) { dat_status = dapls_ib_qp_free(ia_ptr, ep_ptr); /* * This should always succeed, but report to the user if * there is a problem */ if (dat_status != DAT_SUCCESS) { goto bail; } ep_ptr->qp_handle = IB_INVALID_HANDLE; } dapl_os_unlock(&ep_ptr->header.lock); /* Free the resource */ dapl_ep_dealloc(ep_ptr); } else { dapl_os_unlock(&ep_ptr->header.lock); } bail: return (dat_status); }
DAT_RETURN DAT_API dapl_evd_resize(IN DAT_EVD_HANDLE evd_handle, IN DAT_COUNT evd_qlen) { DAPL_IA *ia_ptr; DAPL_EVD *evd_ptr; DAT_COUNT pend_cnt; DAT_RETURN dat_status; dapl_dbg_log(DAPL_DBG_TYPE_API, "dapl_evd_resize (%p, %d)\n", evd_handle, evd_qlen); if (DAPL_BAD_HANDLE(evd_handle, DAPL_MAGIC_EVD)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE1); goto bail; } evd_ptr = (DAPL_EVD *) evd_handle; ia_ptr = evd_ptr->header.owner_ia; if (evd_qlen == evd_ptr->qlen) { dat_status = DAT_SUCCESS; goto bail; } if (evd_qlen > ia_ptr->hca_ptr->ia_attr.max_evd_qlen) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); goto bail; } dapl_os_lock(&evd_ptr->header.lock); /* Don't try to resize if we are actively waiting */ if (evd_ptr->evd_state == DAPL_EVD_STATE_WAITED) { dapl_os_unlock(&evd_ptr->header.lock); dat_status = DAT_ERROR(DAT_INVALID_STATE, 0); goto bail; } pend_cnt = dapls_rbuf_count(&evd_ptr->pending_event_queue); if (pend_cnt > evd_qlen) { dapl_os_unlock(&evd_ptr->header.lock); dat_status = DAT_ERROR(DAT_INVALID_STATE, 0); goto bail; } if (evd_ptr->ib_cq_handle) { dat_status = dapls_ib_cq_resize(evd_ptr->header.owner_ia, evd_ptr, &evd_qlen); if (dat_status != DAT_SUCCESS) { dapl_os_unlock(&evd_ptr->header.lock); goto bail; } } dat_status = dapls_evd_event_realloc(evd_ptr, evd_qlen); if (dat_status != DAT_SUCCESS) { dapl_os_unlock(&evd_ptr->header.lock); goto bail; } dapl_os_unlock(&evd_ptr->header.lock); bail: return dat_status; }
/* * dapl_cno_wait * * DAPL Requirements Version xxx, 6.3.2.3 * * Wait for a consumer notification event * * Input: * cno_handle * timeout * evd_handle * * Output: * evd_handle * * Returns: * DAT_SUCCESS * DAT_INVALID_HANDLE * DAT_QUEUE_EMPTY * DAT_INVALID_PARAMETER */ DAT_RETURN DAT_API dapl_cno_wait(IN DAT_CNO_HANDLE cno_handle, /* cno_handle */ IN DAT_TIMEOUT timeout, /* agent */ OUT DAT_EVD_HANDLE * evd_handle) { /* ia_handle */ DAPL_CNO *cno_ptr; DAT_RETURN dat_status; if (DAPL_BAD_HANDLE(cno_handle, DAPL_MAGIC_CNO)) { dat_status = DAT_INVALID_HANDLE | DAT_INVALID_HANDLE_CNO; goto bail; } dat_status = DAT_SUCCESS; cno_ptr = (DAPL_CNO *) cno_handle; if (cno_ptr->cno_state == DAPL_CNO_STATE_DEAD) { dat_status = DAT_ERROR(DAT_INVALID_STATE, DAT_INVALID_STATE_CNO_DEAD); goto bail; } dapl_os_lock(&cno_ptr->header.lock); if (cno_ptr->cno_state == DAPL_CNO_STATE_TRIGGERED) { cno_ptr->cno_state = DAPL_CNO_STATE_UNTRIGGERED; *evd_handle = cno_ptr->cno_evd_triggered; cno_ptr->cno_evd_triggered = NULL; dapl_os_unlock(&cno_ptr->header.lock); goto bail; } while (cno_ptr->cno_state == DAPL_CNO_STATE_UNTRIGGERED && DAT_GET_TYPE(dat_status) != DAT_TIMEOUT_EXPIRED) { cno_ptr->cno_waiters++; dapl_os_unlock(&cno_ptr->header.lock); dat_status = dapl_os_wait_object_wait(&cno_ptr->cno_wait_object, timeout); dapl_os_lock(&cno_ptr->header.lock); cno_ptr->cno_waiters--; } if (cno_ptr->cno_state == DAPL_CNO_STATE_DEAD) { dat_status = DAT_ERROR(DAT_INVALID_STATE, DAT_INVALID_STATE_CNO_DEAD); } else if (dat_status == DAT_SUCCESS) { /* * After the first triggering, this will be a valid handle. * If we're racing with wakeups of other CNO waiters, * that's ok. */ dapl_os_assert(cno_ptr->cno_state == DAPL_CNO_STATE_TRIGGERED); cno_ptr->cno_state = DAPL_CNO_STATE_UNTRIGGERED; *evd_handle = cno_ptr->cno_evd_triggered; cno_ptr->cno_evd_triggered = NULL; } else if (DAT_GET_TYPE(dat_status) == DAT_TIMEOUT_EXPIRED) { cno_ptr->cno_state = DAPL_CNO_STATE_UNTRIGGERED; *evd_handle = NULL; dat_status = DAT_QUEUE_EMPTY; } else { /* * The only other reason we could have made it out of * the loop is an interrupted system call. */ dapl_os_assert(DAT_GET_TYPE(dat_status) == DAT_INTERRUPTED_CALL); } dapl_os_unlock(&cno_ptr->header.lock); bail: return dat_status; }
/* * dapl_cr_accept * * DAPL Requirements Version xxx, 6.4.2.1 * * Establish a connection between active remote side requesting Endpoint * and passic side local Endpoint. * * Input: * cr_handle * ep_handle * private_data_size * private_data * * Output: * none * * Returns: * DAT_SUCCESS * DAT_INVALID_PARAMETER * DAT_INVALID_ATTRIBUTE */ DAT_RETURN dapl_cr_accept( IN DAT_CR_HANDLE cr_handle, IN DAT_EP_HANDLE ep_handle, IN DAT_COUNT private_data_size, IN const DAT_PVOID private_data) { DAPL_EP *ep_ptr; DAT_RETURN dat_status; DAPL_PRIVATE prd; DAPL_CR *cr_ptr; DAT_EP_STATE entry_ep_state; DAT_EP_HANDLE entry_ep_handle; dapl_dbg_log(DAPL_DBG_TYPE_API, "dapl_cr_accept(%p, %p, %d, %p)\n", cr_handle, ep_handle, private_data_size, private_data); if (DAPL_BAD_HANDLE(cr_handle, DAPL_MAGIC_CR)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_CR); goto bail; } cr_ptr = (DAPL_CR *) cr_handle; /* * Return an error if we have an ep_handle and the CR already has an * EP, indicating this is an RSP connection or PSP_PROVIDER_FLAG was * specified. */ if (ep_handle != NULL && (DAPL_BAD_HANDLE(ep_handle, DAPL_MAGIC_EP) || cr_ptr->param.local_ep_handle != NULL)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EP); goto bail; } if ((0 != private_data_size) && (NULL == private_data)) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG4); goto bail; } /* * Verify the private data size doesn't exceed the max */ if (private_data_size > DAPL_CONSUMER_MAX_PRIVATE_DATA_SIZE) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); goto bail; } /* * ep_handle is NULL if the user specified DAT_PSP_PROVIDER_FLAG * OR this is an RSP connection; retrieve it from the cr. */ if (ep_handle == NULL) { ep_handle = cr_ptr->param.local_ep_handle; if ((((DAPL_EP *) ep_handle)->param.ep_state != DAT_EP_STATE_TENTATIVE_CONNECTION_PENDING) && (((DAPL_EP *)ep_handle)->param.ep_state != DAT_EP_STATE_PASSIVE_CONNECTION_PENDING)) { return (DAT_INVALID_STATE); } } else { /* ensure this EP isn't connected or in use */ if (((DAPL_EP *)ep_handle)->param.ep_state != DAT_EP_STATE_UNCONNECTED) { return (DAT_INVALID_STATE); } } ep_ptr = (DAPL_EP *) ep_handle; /* * Verify the attributes of the EP handle before we connect it. Test * all of the handles to make sure they are currently valid. * Specifically: * pz_handle required * recv_evd_handle optional, but must be valid * request_evd_handle optional, but must be valid * connect_evd_handle required * We do all verification and state change under lock, at which * point the EP state should protect us from most races. */ dapl_os_lock(&ep_ptr->header.lock); if ((ep_ptr->param.pz_handle == NULL) || DAPL_BAD_HANDLE(ep_ptr->param.pz_handle, DAPL_MAGIC_PZ) || (ep_ptr->param.connect_evd_handle == NULL) || DAPL_BAD_HANDLE(ep_ptr->param.connect_evd_handle, DAPL_MAGIC_EVD) || !(((DAPL_EVD *)ep_ptr->param.connect_evd_handle)->evd_flags & DAT_EVD_CONNECTION_FLAG) || (ep_ptr->param.recv_evd_handle != DAT_HANDLE_NULL && (DAPL_BAD_HANDLE(ep_ptr->param.recv_evd_handle, DAPL_MAGIC_EVD))) || (ep_ptr->param.request_evd_handle != DAT_HANDLE_NULL && (DAPL_BAD_HANDLE(ep_ptr->param.request_evd_handle, DAPL_MAGIC_EVD)))) { dapl_os_unlock(&ep_ptr->header.lock); dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EP); goto bail; } if (ep_ptr->qp_state == DAPL_QP_STATE_UNATTACHED) { /* * If we are lazy attaching the QP then we may need to * hook it up here. Typically, we run this code only for * DAT_PSP_PROVIDER_FLAG */ dat_status = dapls_ib_qp_alloc(cr_ptr->header.owner_ia, ep_ptr, ep_ptr); if (dat_status != DAT_SUCCESS) { /* This is not a great error code, but spec allows */ dapl_os_unlock(&ep_ptr->header.lock); dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EP); goto bail; } } entry_ep_state = ep_ptr->param.ep_state; entry_ep_handle = cr_ptr->param.local_ep_handle; ep_ptr->param.ep_state = DAT_EP_STATE_COMPLETION_PENDING; ep_ptr->cm_handle = cr_ptr->ib_cm_handle; ep_ptr->cr_ptr = cr_ptr; ep_ptr->param.remote_ia_address_ptr = cr_ptr->param. remote_ia_address_ptr; cr_ptr->param.local_ep_handle = ep_handle; /* * private data */ (void) dapl_os_memcpy(prd.private_data, private_data, private_data_size); (void) dapl_os_memzero(prd.private_data + private_data_size, sizeof (DAPL_PRIVATE) - private_data_size); dapl_os_unlock(&ep_ptr->header.lock); dat_status = dapls_ib_accept_connection(cr_handle, ep_handle, &prd); /* * If the provider failed, unwind the damage so we are back at * the initial state. */ if (dat_status != DAT_SUCCESS) { ep_ptr->param.ep_state = entry_ep_state; cr_ptr->param.local_ep_handle = entry_ep_handle; } else { /* * Make this CR invalid. We need to hang on to it until * the connection terminates, but it's destroyed from * the app point of view. */ cr_ptr->header.magic = DAPL_MAGIC_CR_DESTROYED; } bail: return (dat_status); }
DAT_RETURN dapl_evd_dequeue( IN DAT_EVD_HANDLE evd_handle, OUT DAT_EVENT *event) { DAPL_EVD *evd_ptr; DAT_EVENT *local_event; DAT_RETURN dat_status; dapl_dbg_log(DAPL_DBG_TYPE_API, "dapl_evd_dequeue (%p, %p)\n", evd_handle, event); evd_ptr = (DAPL_EVD *)evd_handle; dat_status = DAT_SUCCESS; if (DAPL_BAD_HANDLE(evd_handle, DAPL_MAGIC_EVD)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, 0); goto bail; } if (event == NULL) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); goto bail; } /* * We need to dequeue under lock, as the IB OS Access API * restricts us from having multiple threads in CQ poll, and the * DAPL 1.1 API allows multiple threads in dat_evd_dequeue() */ dapl_os_lock(&evd_ptr->header.lock); /* * Make sure there are no other waiters and the evd is active. * Currently this means only the OPEN state is allowed. */ if (evd_ptr->evd_state != DAPL_EVD_STATE_OPEN || evd_ptr->catastrophic_overflow) { dapl_os_unlock(&evd_ptr->header.lock); dat_status = DAT_ERROR(DAT_INVALID_STATE, 0); goto bail; } /* * Try the EVD rbuf first; poll from the CQ only if that's empty. * This keeps events in order if dat_evd_wait() has copied events * from CQ to EVD. */ if (evd_ptr->pending_event_queue.head != evd_ptr->pending_event_queue.tail) { local_event = (DAT_EVENT *) dapls_rbuf_remove(&evd_ptr->pending_event_queue); if (local_event != NULL) { *event = *local_event; dat_status = dapls_rbuf_add(&evd_ptr->free_event_queue, local_event); } else { /* should never happen */ dat_status = DAT_ERROR(DAT_INTERNAL_ERROR, 0); } } else if (evd_ptr->ib_cq_handle == IB_INVALID_HANDLE) { dat_status = DAT_ERROR(DAT_QUEUE_EMPTY, 0); } else if ((evd_ptr->evd_flags & (DAT_EVD_CONNECTION_FLAG | DAT_EVD_CR_FLAG | DAT_EVD_ASYNC_FLAG)) == 0) { /* * No need to drop into kernel, just check the CQ. */ dat_status = dapls_evd_cq_poll_to_event(evd_ptr, event); } else { /* poll for events with threshold and timeout both 0 */ evd_ptr->threshold = 0; dapl_os_unlock(&evd_ptr->header.lock); dat_status = dapls_evd_copy_events(evd_ptr, 0); if (dat_status != DAT_SUCCESS) { dat_status = DAT_ERROR(DAT_QUEUE_EMPTY, 0); goto bail; } dapl_os_lock(&evd_ptr->header.lock); local_event = (DAT_EVENT *)dapls_rbuf_remove( &evd_ptr->pending_event_queue); if (local_event != NULL) { *event = *local_event; dat_status = dapls_rbuf_add(&evd_ptr->free_event_queue, local_event); } else { /* still didn't find anything */ dat_status = DAT_ERROR(DAT_QUEUE_EMPTY, 0); } } dapl_os_unlock(&evd_ptr->header.lock); bail: dapl_dbg_log(DAPL_DBG_TYPE_RTN, "dapl_evd_dequeue () returns 0x%x\n", dat_status); return (dat_status); }