DAT_RETURN dat_registry_add_provider( IN DAT_PROVIDER *provider, IN const DAT_PROVIDER_INFO *provider_info) { DAT_DR_ENTRY entry; dat_os_dbg_print(DAT_OS_DBG_TYPE_PROVIDER_API, "DAT Registry: dat_registry_add_provider() called\n"); if (UDAT_IS_BAD_POINTER(provider)) { return (DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG1)); } if (UDAT_IS_BAD_POINTER(provider_info)) { return (DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG2)); } if (DAT_FALSE == udat_check_state()) { return (DAT_ERROR(DAT_INVALID_STATE, 0)); } entry.ref_count = 0; entry.ia_open_func = provider->ia_open_func; entry.info = *provider_info; return (dat_dr_insert(provider_info, &entry)); }
DAT_RETURN dapl_cr_query( IN DAT_CR_HANDLE cr_handle, IN DAT_CR_PARAM_MASK cr_param_mask, OUT DAT_CR_PARAM *cr_param) { DAPL_CR *cr_ptr; DAT_RETURN dat_status; dapl_dbg_log(DAPL_DBG_TYPE_API, "dapl_cr_query (%p, %x, %p)\n", cr_handle, cr_param_mask, cr_param); dat_status = DAT_SUCCESS; if (DAPL_BAD_HANDLE(cr_handle, DAPL_MAGIC_CR)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_CR); goto bail; } if (NULL == cr_param) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); goto bail; } cr_ptr = (DAPL_CR *) cr_handle; /* since the arguments are easily accessible, ignore the mask */ (void) dapl_os_memcpy(cr_param, &cr_ptr->param, sizeof (DAT_CR_PARAM)); bail: return (dat_status); }
/* * dapl_cno_query * * DAPL Requirements Version xxx, 6.3.2.5 * * Return the consumer parameters of the CNO * * Input: * cno_handle * cno_param_mask * cno_param * * Output: * cno_param * * Returns: * DAT_SUCCESS * DAT_INVALID_HANDLE * DAT_INVALID_PARAMETER */ DAT_RETURN dapl_cno_query( IN DAT_CNO_HANDLE cno_handle, /* cno_handle */ IN DAT_CNO_PARAM_MASK cno_param_mask, /* cno_param_mask */ OUT DAT_CNO_PARAM *cno_param) /* cno_param */ { DAPL_CNO *cno_ptr; DAT_RETURN dat_status; dat_status = DAT_SUCCESS; if (DAPL_BAD_HANDLE(cno_handle, DAPL_MAGIC_CNO)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_CNO); goto bail; } /* check for invalid cno param mask */ if (cno_param_mask & ~DAT_CNO_FIELD_ALL) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); goto bail; } if (NULL == cno_param) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); goto bail; } cno_ptr = (DAPL_CNO *)cno_handle; cno_param->ia_handle = cno_ptr->header.owner_ia; cno_param->agent = cno_ptr->cno_wait_agent; bail: return (dat_status); }
/* * dapl_ep_recv_query * * uDAPL Version 1.2, 6.6.11 * * Destroy an instance of the Endpoint * * Input: * ep_handle * * Output: * none * * Returns: * DAT_SUCCESS * DAT_INVALID_PARAMETER * DAT_INVALID_HANDLE * DAT_MODEL_NOT_SUPPORTED */ DAT_RETURN DAT_API dapl_ep_recv_query(IN DAT_EP_HANDLE ep_handle, OUT DAT_COUNT * nbufs_allocate, OUT DAT_COUNT * bufs_alloc_span) { DAPL_EP *ep_ptr; DAT_RETURN dat_status; dat_status = DAT_SUCCESS; dapl_dbg_log(DAPL_DBG_TYPE_API, "dapl_ep_recv_query (%p, %p, %p)\n", ep_handle, nbufs_allocate, bufs_alloc_span); ep_ptr = (DAPL_EP *) ep_handle; /* * Verify parameter & state */ if (DAPL_BAD_HANDLE(ep_ptr, DAPL_MAGIC_EP)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EP); goto bail; } dat_status = DAT_ERROR(DAT_NOT_IMPLEMENTED, DAT_NO_SUBTYPE); bail: return dat_status; }
/* * dapl_ep_query * * DAPL Requirements Version xxx, 6.5.5 * * Provide the consumer parameters, including attributes and status of * the Endpoint. * * Input: * ep_handle * ep_param_mask * * Output: * ep_param * * Returns: * DAT_SUCCESS * DAT_INVALID_PARAMETER */ DAT_RETURN DAT_API dapl_ep_query(IN DAT_EP_HANDLE ep_handle, IN DAT_EP_PARAM_MASK ep_param_mask, OUT DAT_EP_PARAM * ep_param) { DAPL_EP *ep_ptr; DAT_RETURN dat_status; dapl_dbg_log(DAPL_DBG_TYPE_API, "dapl_ep_query (%p, %x, %p)\n", ep_handle, ep_param_mask, ep_param); dat_status = DAT_SUCCESS; ep_ptr = (DAPL_EP *) ep_handle; /* * Verify parameter & state */ if (DAPL_BAD_HANDLE(ep_ptr, DAPL_MAGIC_EP)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EP); goto bail; } if (ep_param == NULL) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); goto bail; } /* * Fill in according to user request * * N.B. Just slam all values into the user structure, there * is nothing to be gained by checking for each bit. */ if (ep_param_mask & DAT_EP_FIELD_ALL) { /* only attempt to get remote IA address if consumer requested it */ if (ep_param_mask & DAT_EP_FIELD_REMOTE_IA_ADDRESS_PTR) { if (ep_ptr->param.ep_state == DAT_EP_STATE_CONNECTED) { /* obtain the remote IP address */ dat_status = dapls_ib_cm_remote_addr((DAT_HANDLE) ep_handle, &ep_ptr-> remote_ia_address); } ep_ptr->param.remote_ia_address_ptr = (DAT_IA_ADDRESS_PTR) & ep_ptr->remote_ia_address; } *ep_param = ep_ptr->param; dats_get_ia_handle(ep_ptr->param.ia_handle, &ep_param->ia_handle); } bail: return dat_status; }
/* * dapl_os_wait_object_destroy * * Destroy a wait object * * Input: * wait_obj * * Returns: * DAT_SUCCESS * DAT_INTERNAL_ERROR */ DAT_RETURN dapl_os_wait_object_destroy(IN DAPL_OS_WAIT_OBJECT * wait_obj) { if (0 != pthread_cond_destroy(&wait_obj->cv)) { return DAT_ERROR(DAT_INTERNAL_ERROR, 0); } if (0 != pthread_mutex_destroy(&wait_obj->lock)) { return DAT_ERROR(DAT_INTERNAL_ERROR, 0); } return DAT_SUCCESS; }
DAT_RETURN dat_registry_list_providers( IN DAT_COUNT max_to_return, OUT DAT_COUNT *entries_returned, OUT DAT_PROVIDER_INFO *(dat_provider_list[])) { DAT_RETURN dat_status; dat_status = DAT_SUCCESS; dat_os_dbg_print(DAT_OS_DBG_TYPE_CONSUMER_API, "DAT Registry: dat_registry_list_providers() called\n"); if (DAT_FALSE == udat_check_state()) { return (DAT_ERROR(DAT_INVALID_STATE, 0)); } if ((UDAT_IS_BAD_POINTER(entries_returned))) { return (DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG2)); } if (0 != max_to_return && (UDAT_IS_BAD_POINTER(dat_provider_list))) { return (DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG3)); } if (0 == max_to_return) { /* * the user is allowed to call with max_to_return set to zero. * in which case we simply return (in *entries_returned) the * number of providers currently installed. We must also * (per spec) return an error */ #ifndef DAT_NO_STATIC_REGISTRY (void) dat_sr_size(entries_returned); #else (void) dat_dr_size(entries_returned); #endif return (DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG1)); } else { #ifndef DAT_NO_STATIC_REGISTRY dat_status = dat_sr_list(max_to_return, entries_returned, dat_provider_list); #else dat_status = dat_dr_list(max_to_return, entries_returned, dat_provider_list); #endif } return (dat_status); }
DAT_RETURN dapl_ep_dup_connect( IN DAT_EP_HANDLE ep_handle, IN DAT_EP_HANDLE ep_dup_handle, IN DAT_TIMEOUT timeout, IN DAT_COUNT private_data_size, IN const DAT_PVOID private_data, IN DAT_QOS qos) { DAPL_EP *ep_dup_ptr; DAT_RETURN dat_status; DAT_IA_ADDRESS_PTR remote_ia_address_ptr; DAT_CONN_QUAL remote_conn_qual; ep_dup_ptr = (DAPL_EP *)ep_dup_handle; /* * Verify the dup handle, which must be connected. All other * parameters will be verified by dapl_ep_connect */ if (DAPL_BAD_HANDLE(ep_dup_handle, DAPL_MAGIC_EP)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EP); goto bail; } /* * Check both the EP state and the QP state: if we don't have a QP * there is a problem. Do this under a lock and pull out * the connection parameters for atomicity. */ dapl_os_lock(&ep_dup_ptr->header.lock); if (ep_dup_ptr->param.ep_state != DAT_EP_STATE_CONNECTED) { dapl_os_unlock(&ep_dup_ptr->header.lock); dat_status = DAT_ERROR(DAT_INVALID_STATE, dapls_ep_state_subtype(ep_dup_ptr)); goto bail; } remote_ia_address_ptr = ep_dup_ptr->param.remote_ia_address_ptr; remote_conn_qual = ep_dup_ptr->param.remote_port_qual; dapl_os_unlock(&ep_dup_ptr->header.lock); dat_status = dapl_ep_connect(ep_handle, remote_ia_address_ptr, remote_conn_qual, timeout, private_data_size, private_data, qos, DAT_CONNECT_DEFAULT_FLAG); bail: return (dat_status); }
DAT_RETURN DAT_API dapl_get_handle_type(IN DAT_HANDLE dat_handle, OUT DAT_HANDLE_TYPE * handle_type) { DAT_RETURN dat_status; DAPL_HEADER *header; dat_status = DAT_SUCCESS; header = (DAPL_HEADER *) dat_handle; if (((header) == NULL) || DAPL_BAD_PTR(header) || (header->magic != DAPL_MAGIC_IA && header->magic != DAPL_MAGIC_EVD && header->magic != DAPL_MAGIC_EP && header->magic != DAPL_MAGIC_LMR && header->magic != DAPL_MAGIC_RMR && header->magic != DAPL_MAGIC_PZ && header->magic != DAPL_MAGIC_PSP && header->magic != DAPL_MAGIC_RSP && header->magic != DAPL_MAGIC_CR)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, 0); goto bail; } *handle_type = header->handle_type; bail: return dat_status; }
/* * dapl_evd_set_unwaitable * * DAPL Requirements Version 1.1, 6.3.4.7 * * Transition the Event Dispatcher into an unwaitable state * * Input: * evd_handle * * Output: * none * * Returns: * DAT_SUCCESS * DAT_INVALID_HANDLE */ DAT_RETURN DAT_API dapl_evd_set_unwaitable(IN DAT_EVD_HANDLE evd_handle) { DAPL_EVD *evd_ptr; DAT_RETURN dat_status; evd_ptr = (DAPL_EVD *) evd_handle; dat_status = DAT_SUCCESS; if (DAPL_BAD_HANDLE(evd_handle, DAPL_MAGIC_EVD)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, 0); goto bail; } dapl_os_lock(&evd_ptr->header.lock); evd_ptr->evd_waitable = DAT_FALSE; /* * If this evd is waiting, wake it up. There is an obvious race * condition here where we may wakeup the waiter before it goes to * sleep; but the wait_object allows this and will do the right * thing. */ if (evd_ptr->evd_state == DAPL_EVD_STATE_WAITED) { if (evd_ptr->evd_flags & (DAT_EVD_DTO_FLAG | DAT_EVD_RMR_BIND_FLAG)) dapls_evd_dto_wakeup(evd_ptr); else dapl_os_wait_object_wakeup(&evd_ptr->wait_object); } dapl_os_unlock(&evd_ptr->header.lock); bail: return dat_status; }
/* * dapls_os_thread_create * * Create a thread for dapl * * Input: * func function to invoke thread * f_arg argument to pass to function * * Output * thread_id handle for thread * * Returns: * DAT_SUCCESS */ DAT_RETURN dapl_os_thread_create(IN void (*func) (void *), IN void *data, OUT DAPL_OS_THREAD * thread_id) { pthread_attr_t thread_attr; struct thread_draft *thread_draft; int status; /* * Get default set of thread attributes */ status = pthread_attr_init(&thread_attr); if (status != 0) { return DAT_ERROR(DAT_INTERNAL_ERROR, 0); } /* Create dapl threads as detached from this process */ status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_DETACHED); if (status != 0) { return DAT_ERROR(DAT_INTERNAL_ERROR, 0); } thread_draft = dapl_os_alloc(sizeof(struct thread_draft)); thread_draft->func = func; thread_draft->data = data; /* Create the thread. Observe that we first invoke a local * routine to set up OS parameters, before invoking the user * specified routine. */ status = pthread_create(thread_id, &thread_attr, (void *(*)(void *))dapli_thread_init, (void *)thread_draft); /* clean up resources */ (void)pthread_attr_destroy(&thread_attr); if (status != 0) { return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, 0); } return DAT_SUCCESS; }
/* * dapls_create_gid_map() * * Read /usr/local/etc/ibhosts to obtain host names and GIDs. * Create a table containing IP addresses and GIDs which can * be used for lookups. * * This implementation is a simple method providing name services * when more advanced mechanisms do not exist. The proper way * to obtain these mappings is to use a name service such as is * provided by IPoIB on InfiniBand. * * Input: * device_name Name of device as reported by the provider * * Output: * none * * Returns: * char * to string number */ DAT_RETURN dapli_ns_create_gid_map(void) { FILE *f; ib_gid_t gid; char hostname[128]; int rc; struct addrinfo *addr; struct sockaddr_in *si; DAPL_GID_MAP gmt; f = fopen(MAP_FILE, "r"); if (f == NULL) { dapl_dbg_log(DAPL_DBG_TYPE_ERR, "ERROR: Must have file <%s> " "for IP/GID mappings\n", MAP_FILE); return (DAT_ERROR(DAT_INTERNAL_ERROR, 0)); } rc = fscanf(f, "%s " F64x " " F64x, hostname, &gid.gid_prefix, &gid.gid_guid); while (rc != EOF) { rc = dapls_osd_getaddrinfo(hostname, &addr); if (rc != 0) { /* * hostname not registered in DNS, * provide a dummy value */ dapl_dbg_log(DAPL_DBG_TYPE_ERR, "WARNING: <%s> not registered in " "DNS, using dummy IP value\n", hostname); gmt.ip_address = 0x01020304; } else { /* * Load into the ip/gid mapping table */ si = (struct sockaddr_in *)addr->ai_addr; if (AF_INET == addr->ai_addr->sa_family) { gmt.ip_address = si->sin_addr.s_addr; } else { dapl_dbg_log(DAPL_DBG_TYPE_ERR, "WARNING: <%s> Address family " "not supported, using dummy " "IP value\n", hostname); gmt.ip_address = 0x01020304; } dapls_osd_freeaddrinfo(addr); } gmt.gid.gid_prefix = gid.gid_prefix; gmt.gid.gid_guid = gid.gid_guid; dapli_ns_add_address(&gmt); rc = fscanf(f, "%s " F64x " " F64x, hostname, &gid.gid_prefix, &gid.gid_guid); } (void) fclose(f); return (DAT_SUCCESS); }
/* * dapl_cno_create * * DAPL Requirements Version xxx, 6.3.4.1 * * Create a consumer notification object instance * * Input: * ia_handle * wait_agent * cno_handle * * Output: * cno_handle * * Returns: * DAT_SUCCESS * DAT_INSUFFICIENT_RESOURCES * DAT_INVALID_HANDLE * DAT_INVALID_PARAMETER */ DAT_RETURN dapl_cno_create( IN DAT_IA_HANDLE ia_handle, /* ia_handle */ IN DAT_OS_WAIT_PROXY_AGENT wait_agent, /* agent */ OUT DAT_CNO_HANDLE *cno_handle) /* cno_handle */ { DAPL_IA *ia_ptr; DAPL_CNO *cno_ptr; DAT_RETURN dat_status; ia_ptr = (DAPL_IA *)ia_handle; cno_ptr = NULL; dat_status = DAT_SUCCESS; if (DAPL_BAD_HANDLE(ia_handle, DAPL_MAGIC_IA)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_IA); goto bail; } cno_ptr = dapl_cno_alloc(ia_ptr, wait_agent); if (!cno_ptr) { dat_status = DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); goto bail; } cno_ptr->cno_state = DAPL_CNO_STATE_UNTRIGGERED; dat_status = dapls_ib_cno_alloc(ia_ptr, cno_ptr); if (dat_status != DAT_SUCCESS) { goto bail; } dapl_ia_link_cno(ia_ptr, cno_ptr); *cno_handle = cno_ptr; bail: if (dat_status != DAT_SUCCESS && cno_ptr != NULL) { dapl_cno_dealloc(cno_ptr); } return (dat_status); }
DAT_RETURN dat_registry_remove_provider( IN DAT_PROVIDER *provider, IN const DAT_PROVIDER_INFO *provider_info) { dat_os_dbg_print(DAT_OS_DBG_TYPE_PROVIDER_API, "DAT Registry: dat_registry_remove_provider() called\n"); if (UDAT_IS_BAD_POINTER(provider)) { return (DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG1)); } if (DAT_FALSE == udat_check_state()) { return (DAT_ERROR(DAT_INVALID_STATE, 0)); } return (dat_dr_remove(provider_info)); }
/* * dapl_cno_free * * DAPL Requirements Version xxx, 6.3.2.2 * * Destroy a consumer notification object instance * * Input: * cno_handle * * Output: * none * * Returns: * DAT_SUCCESS * DAT_INVALID_HANDLE * DAT_INVALID_STATE */ DAT_RETURN dapl_cno_free( IN DAT_CNO_HANDLE cno_handle) /* cno_handle */ { DAPL_CNO *cno_ptr; DAT_RETURN dat_status; dat_status = DAT_SUCCESS; cno_ptr = (DAPL_CNO *)cno_handle; if (DAPL_BAD_HANDLE(cno_handle, DAPL_MAGIC_CNO)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_CNO); goto bail; } if (cno_ptr->cno_ref_count != 0 || cno_ptr->cno_waiters != 0) { dat_status = DAT_ERROR(DAT_INVALID_STATE, DAT_INVALID_STATE_CNO_IN_USE); goto bail; } dapl_os_lock(&cno_ptr->header.lock); if (!dapl_llist_is_empty(&cno_ptr->evd_list_head)) { dapl_dbg_log(DAPL_DBG_TYPE_UTIL, "cno_free: evd list not empty!\n"); dapl_os_unlock(&cno_ptr->header.lock); dat_status = DAT_ERROR(DAT_INVALID_STATE, DAT_INVALID_STATE_CNO_IN_USE); goto bail; } dapl_os_unlock(&cno_ptr->header.lock); dat_status = dapls_ib_cno_free(cno_ptr); if (dat_status != DAT_SUCCESS) { goto bail; } dapl_ia_unlink_cno(cno_ptr->header.owner_ia, cno_ptr); dapl_cno_dealloc(cno_ptr); bail: return (dat_status); }
/* * dapl_os_wait_object_wakeup * * Wakeup a thread waiting on a wait object * * Input: * wait_obj * * Returns: * DAT_SUCCESS * DAT_INTERNAL_ERROR */ DAT_RETURN dapl_os_wait_object_wakeup(IN DAPL_OS_WAIT_OBJECT * wait_obj) { pthread_mutex_lock(&wait_obj->lock); wait_obj->signaled = DAT_TRUE; pthread_mutex_unlock(&wait_obj->lock); if (0 != pthread_cond_signal(&wait_obj->cv)) { return DAT_ERROR(DAT_INTERNAL_ERROR, 0); } return DAT_SUCCESS; }
/* * dapl_os_wait_object_init * * Initialize a wait object * * Input: * wait_obj * * Returns: * DAT_SUCCESS * DAT_INTERNAL_ERROR */ DAT_RETURN dapl_os_wait_object_init(IN DAPL_OS_WAIT_OBJECT * wait_obj) { wait_obj->signaled = DAT_FALSE; if (0 != pthread_cond_init(&wait_obj->cv, NULL)) { return DAT_ERROR(DAT_INTERNAL_ERROR, 0); } /* Always returns 0. */ pthread_mutex_init(&wait_obj->lock, NULL); return DAT_SUCCESS; }
/* * dapl_pz_query * * DAPL Requirements Version xxx, 6.6.2.1 * * Return the ia associated with the protection zone pz * * Input: * pz_handle * pz_param_mask * * Output: * pz_param * * Returns: * DAT_SUCCESS * DAT_INVALID_HANDLE * DAT_INVALID_PARAMETER */ DAT_RETURN dapl_pz_query( IN DAT_PZ_HANDLE pz_handle, IN DAT_PZ_PARAM_MASK pz_param_mask, OUT DAT_PZ_PARAM *pz_param) { DAPL_PZ *pz; DAT_RETURN dat_status; dapl_dbg_log(DAPL_DBG_TYPE_API, "dapl_pz_query (%p, %x, %p)\n", pz_handle, pz_param_mask, pz_param); dat_status = DAT_SUCCESS; if (DAPL_BAD_HANDLE(pz_handle, DAPL_MAGIC_PZ)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_PZ); goto bail; } if (NULL == pz_param) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); goto bail; } if (pz_param_mask & ~DAT_PZ_FIELD_ALL) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); goto bail; } pz = (DAPL_PZ *) pz_handle; /* Since the DAT_PZ_ARGS values are easily accessible, */ /* don't bother checking the DAT_PZ_ARGS_MASK value */ pz_param->ia_handle = (DAT_IA_HANDLE) pz->header.owner_ia; bail: return (dat_status); }
/* * dapl_rmr_bind * * DAPL Requirements Version xxx, 6.6.4.4 * * Bind the RMR to the specified memory region within the LMR and * provide a new rmr_context value. * * Input: * Output: */ DAT_RETURN dapl_rmr_bind( IN DAT_RMR_HANDLE rmr_handle, IN const DAT_LMR_TRIPLET *lmr_triplet, IN DAT_MEM_PRIV_FLAGS mem_priv, IN DAT_EP_HANDLE ep_handle, IN DAT_RMR_COOKIE user_cookie, IN DAT_COMPLETION_FLAGS completion_flags, OUT DAT_RMR_CONTEXT *rmr_context) { DAPL_RMR *rmr; DAPL_EP *ep_ptr; if (DAPL_BAD_HANDLE(rmr_handle, DAPL_MAGIC_RMR)) { return (DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_RMR)); } if (DAPL_BAD_HANDLE(ep_handle, DAPL_MAGIC_EP)) { return (DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EP)); } rmr = (DAPL_RMR *) rmr_handle; ep_ptr = (DAPL_EP *) ep_handle; /* if the rmr should be bound */ if (0 != lmr_triplet->segment_length) { return (dapli_rmr_bind_fuse(rmr, lmr_triplet, mem_priv, ep_ptr, user_cookie, completion_flags, rmr_context)); } else { /* the rmr should be unbound */ return (dapli_rmr_bind_unfuse(rmr, lmr_triplet, ep_ptr, user_cookie, completion_flags)); } }
/* * dapl_srq_query * * DAPL Requirements Version 1.2, 6.5.6 * * Return SRQ parameters to the consumer * * Input: * srq_handle * srq_param_mask * * Output: * srq_param * * Returns: * DAT_SUCCESS * DAT_INVALID_HANDLE * DAT_INVALID_PARAMETER */ DAT_RETURN DAT_API dapl_srq_query(IN DAT_SRQ_HANDLE srq_handle, IN DAT_SRQ_PARAM_MASK srq_param_mask, OUT DAT_SRQ_PARAM * srq_param) { DAPL_SRQ *srq_ptr; DAT_RETURN dat_status; dat_status = DAT_SUCCESS; dapl_dbg_log(DAPL_DBG_TYPE_API, "dapl_srq_query (%p, %x, %p)\n", srq_handle, srq_param_mask, srq_param); if (DAPL_BAD_HANDLE(srq_handle, DAPL_MAGIC_SRQ)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_SRQ); goto bail; } if (srq_param == NULL) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); goto bail; } srq_ptr = (DAPL_SRQ *) srq_handle; /* * XXX Need to calculate available_dto_count and outstanding_dto_count */ srq_ptr->param.available_dto_count = DAT_VALUE_UNKNOWN; srq_ptr->param.outstanding_dto_count = DAT_VALUE_UNKNOWN; *srq_param = srq_ptr->param; dats_get_ia_handle(srq_ptr->header.owner_ia, &srq_param->ia_handle); bail: return dat_status; }
/* * dapli_ns_add_address * * Add a table entry to the gid_map_table. * * Input: * remote_ia_address remote IP address * gid pointer to output gid * * Output: * gid filled in GID * * Returns: * DAT_SUCCESS * DAT_INSUFFICIENT_RESOURCES * DAT_INVALID_PARAMETER */ DAT_RETURN dapli_ns_add_address( IN DAPL_GID_MAP *gme) { DAPL_GID_MAP *gmt; int count; gmt = g_gid_map_table; for (count = 0, gmt = g_gid_map_table; gmt->ip_address; gmt++) { count++; } if (count > MAX_GID_ENTRIES) { return (DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, 0)); } *gmt = *gme; return (DAT_SUCCESS); }
DAT_RETURN DAT_API dapl_evd_disable(IN DAT_EVD_HANDLE evd_handle) { DAPL_EVD *evd_ptr; DAT_RETURN dat_status; evd_ptr = (DAPL_EVD *) evd_handle; dat_status = DAT_SUCCESS; if (DAPL_BAD_HANDLE(evd_handle, DAPL_MAGIC_EVD)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, 0); goto bail; } evd_ptr->evd_enabled = DAT_FALSE; bail: return dat_status; }
DAT_RETURN dapl_cr_handoff( IN DAT_CR_HANDLE cr_handle, IN DAT_CONN_QUAL cr_handoff) /* handoff */ { DAPL_CR *cr_ptr; DAT_RETURN dat_status; if (DAPL_BAD_HANDLE(cr_handle, DAPL_MAGIC_CR)) { return (DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_CR)); } cr_ptr = (DAPL_CR *)cr_handle; dat_status = dapls_ib_handoff_connection(cr_ptr, cr_handoff); /* Remove the CR from the queue, then free it */ dapl_sp_remove_cr(cr_ptr->sp_ptr, cr_ptr); dapls_cr_free(cr_ptr); return (dat_status); }
/* * dapl_rmr_free * * DAPL Requirements Version xxx, 6.6.4.2 * * Destroy an instance of the Remote Memory Region * * Input: * rmr_handle * * Output: * none * * Returns: * DAT_SUCCESS * DAT_INVALID_PARAMETER */ DAT_RETURN dapl_rmr_free(IN DAT_RMR_HANDLE rmr_handle) { DAPL_RMR *rmr; DAT_RETURN dat_status; dat_status = DAT_SUCCESS; if (DAPL_BAD_HANDLE(rmr_handle, DAPL_MAGIC_RMR)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_RMR); goto bail; } rmr = (DAPL_RMR *)rmr_handle; /* * If the user did not perform an unbind op, release * counts here. */ if (rmr->param.lmr_triplet.virtual_address != 0) { (void) dapl_os_atomic_dec(&rmr->lmr->lmr_ref_count); rmr->param.lmr_triplet.virtual_address = 0; } dat_status = dapls_ib_mw_free(rmr); if (dat_status != DAT_SUCCESS) { goto bail; } dapl_os_atomic_dec(&rmr->pz->pz_ref_count); dapl_rmr_dealloc(rmr); bail: return (dat_status); }
/* * dapls_ns_lookup_address * * Look up the provided IA_ADDRESS in the gid_map_table. Return * the gid if found. * * Input: * remote_ia_address remote IP address * gid pointer to output gid * timeout timeout in microseconds * * Output: * gid filled in GID * * Returns: * DAT_SUCCESS * DAT_INSUFFICIENT_RESOURCES * DAT_INVALID_PARAMETER */ DAT_RETURN dapls_ns_lookup_address( IN DAPL_IA *ia_ptr, IN DAT_IA_ADDRESS_PTR remote_ia_address, IN DAT_TIMEOUT timeout, OUT ib_gid_t *gid) { DAPL_GID_MAP *gmt; struct sockaddr_in *si; /* unused here */ ia_ptr = ia_ptr; si = (struct sockaddr_in *)remote_ia_address; for (gmt = g_gid_map_table; gmt->ip_address; gmt++) { if (gmt->ip_address == si->sin_addr.s_addr) { gid->gid_guid = gmt->gid.gid_guid; gid->gid_prefix = gmt->gid.gid_prefix; return (DAT_SUCCESS); } } return (DAT_ERROR(DAT_INVALID_PARAMETER, 0)); }
DAT_RETURN dapl_os_wait_object_wait(IN DAPL_OS_WAIT_OBJECT * wait_obj, IN DAT_TIMEOUT timeout_val) { DAT_RETURN dat_status; int pthread_status; struct timespec future; dat_status = DAT_SUCCESS; pthread_status = 0; if (timeout_val != DAT_TIMEOUT_INFINITE) { struct timeval now; struct timezone tz; unsigned int microsecs; gettimeofday(&now, &tz); #define USEC_PER_SEC 1000000 microsecs = now.tv_usec + timeout_val; now.tv_sec = now.tv_sec + microsecs / USEC_PER_SEC; now.tv_usec = microsecs % USEC_PER_SEC; /* Convert timeval to timespec */ future.tv_sec = now.tv_sec; future.tv_nsec = now.tv_usec * 1000; pthread_mutex_lock(&wait_obj->lock); while (wait_obj->signaled == DAT_FALSE && pthread_status == 0) { pthread_status = pthread_cond_timedwait(&wait_obj->cv, &wait_obj->lock, &future); /* * No need to reset &future if we go around the loop; * It's an absolute time. */ } /* Reset the signaled status if we were woken up. */ if (pthread_status == 0) { wait_obj->signaled = DAT_FALSE; } pthread_mutex_unlock(&wait_obj->lock); } else { pthread_mutex_lock(&wait_obj->lock); while (wait_obj->signaled == DAT_FALSE && pthread_status == 0) { pthread_status = pthread_cond_wait(&wait_obj->cv, &wait_obj->lock); } /* Reset the signaled status if we were woken up. */ if (pthread_status == 0) { wait_obj->signaled = DAT_FALSE; } pthread_mutex_unlock(&wait_obj->lock); } if (ETIMEDOUT == pthread_status) { dat_status = DAT_ERROR(DAT_TIMEOUT_EXPIRED, 0); } else if (EINTR == pthread_status) { dat_status = DAT_ERROR(DAT_INTERRUPTED_CALL, 0); } else if (0 != pthread_status) { dat_status = DAT_ERROR(DAT_INTERNAL_ERROR, 0); } return dat_status; }
DAT_RETURN dapl_evd_resize( IN DAT_EVD_HANDLE evd_handle, IN DAT_COUNT req_evd_qlen) { int i; DAPL_EVD *evd_ptr; DAT_EVENT *event_ptr; DAT_EVENT *eventp; DAT_EVENT *event; DAT_EVENT *new_event; DAPL_RING_BUFFER free_event_queue; DAPL_RING_BUFFER pending_event_queue; DAT_RETURN dat_status; DAT_COUNT max_evd_qlen; DAT_COUNT evd_qlen; evd_ptr = (DAPL_EVD *)evd_handle; dat_status = DAT_SUCCESS; if (DAPL_BAD_HANDLE(evd_handle, DAPL_MAGIC_EVD)) { return (DAT_ERROR(DAT_INVALID_HANDLE, 0)); } if (req_evd_qlen < evd_ptr->qlen) { return (DAT_ERROR(DAT_INVALID_STATE, 0)); } if (req_evd_qlen == evd_ptr->qlen) { return (DAT_SUCCESS); } max_evd_qlen = evd_ptr->header.owner_ia->hca_ptr->ia_attr.max_evd_qlen; if (req_evd_qlen > max_evd_qlen) { return (DAT_ERROR(DAT_INVALID_STATE, 0)); } evd_qlen = DAPL_MIN_RESZ_QLEN; while (req_evd_qlen > evd_qlen) { evd_qlen <<= 1; if (evd_qlen > max_evd_qlen) evd_qlen = max_evd_qlen; } /* Allocate EVENTs */ event_ptr = (DAT_EVENT *) dapl_os_alloc(evd_qlen * sizeof (DAT_EVENT)); if (!event_ptr) { return (DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY)); } /* allocate free event queue */ dat_status = dapls_rbuf_alloc(&free_event_queue, evd_qlen); if (dat_status != DAT_SUCCESS) { goto bail; } /* allocate pending event queue */ dat_status = dapls_rbuf_alloc(&pending_event_queue, evd_qlen); if (dat_status != DAT_SUCCESS) { goto bail; } /* need to resize the cq only for DTO/BIND evds */ if (0 != (evd_ptr->evd_flags & ~ (DAT_EVD_SOFTWARE_FLAG | DAT_EVD_CONNECTION_FLAG | DAT_EVD_CR_FLAG))) { dat_status = dapls_ib_cq_resize(evd_ptr, evd_qlen); if (dat_status != DAT_SUCCESS) goto bail; } /* add events to free event queue */ for (i = 0, eventp = event_ptr; i < evd_qlen; i++) { (void) dapls_rbuf_add(&free_event_queue, (void *)eventp); eventp++; } /* * copy pending events from evd to the new pending event queue */ while (event = (DAT_EVENT *) dapls_rbuf_remove(&evd_ptr->pending_event_queue)) { new_event = (DAT_EVENT *) dapls_rbuf_remove(&free_event_queue); dapl_os_assert(new_event); (void) dapl_os_memcpy(new_event, event, sizeof (DAT_EVENT)); dat_status = dapls_rbuf_add(&pending_event_queue, new_event); dapl_os_assert(dat_status == DAT_SUCCESS); dat_status = dapls_rbuf_add(&evd_ptr->free_event_queue, event); dapl_os_assert(dat_status == DAT_SUCCESS); } dapls_rbuf_destroy(&evd_ptr->free_event_queue); dapls_rbuf_destroy(&evd_ptr->pending_event_queue); if (evd_ptr->events) { dapl_os_free(evd_ptr->events, evd_ptr->qlen * sizeof (DAT_EVENT)); } evd_ptr->events = event_ptr; evd_ptr->free_event_queue = free_event_queue; evd_ptr->pending_event_queue = pending_event_queue; evd_ptr->qlen = evd_qlen; return (DAT_SUCCESS); bail: /* * If we are here means event_ptr was allocd but something else * failed */ dapl_os_free(event_ptr, evd_qlen * sizeof (DAT_EVENT)); dapls_rbuf_destroy(&free_event_queue); dapls_rbuf_destroy(&pending_event_queue); return (dat_status); }
DAT_RETURN DAT_API dapl_evd_create(IN DAT_IA_HANDLE ia_handle, IN DAT_COUNT evd_min_qlen, IN DAT_CNO_HANDLE cno_handle, IN DAT_EVD_FLAGS evd_flags, OUT DAT_EVD_HANDLE * evd_handle) { DAPL_IA *ia_ptr; DAPL_EVD *evd_ptr; DAPL_CNO *cno_ptr; DAT_RETURN dat_status; DAT_PROVIDER_ATTR provider_attr; int i; int j; int flag_mask[6]; dapl_dbg_log(DAPL_DBG_TYPE_API, "dapl_evd_create (%p, %d, %p, 0x%x, %p)\n", ia_handle, evd_min_qlen, cno_handle, evd_flags, evd_handle); ia_ptr = (DAPL_IA *) ia_handle; cno_ptr = (DAPL_CNO *) cno_handle; evd_ptr = NULL; *evd_handle = NULL; dat_status = DAT_SUCCESS; if (DAPL_BAD_HANDLE(ia_handle, DAPL_MAGIC_IA)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_IA); goto bail; } DAPL_CNTR(ia_ptr, DCNT_IA_EVD_CREATE); if (evd_min_qlen <= 0) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); goto bail; } if (evd_min_qlen > ia_ptr->hca_ptr->ia_attr.max_evd_qlen) { dat_status = DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_TEVD); goto bail; } if (cno_handle != DAT_HANDLE_NULL && DAPL_BAD_HANDLE(cno_handle, DAPL_MAGIC_CNO)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_CNO); goto bail; } /* * Check the merging attributes to ensure the combination of * flags requested is supported. */ dapl_ia_query(ia_handle, NULL, 0, NULL, DAT_PROVIDER_FIELD_ALL, &provider_attr); /* Set up an array of flags to compare against; the EVD bits are * a sparse array that need to be mapped to the merging flags */ flag_mask[0] = DAT_EVD_SOFTWARE_FLAG; flag_mask[1] = DAT_EVD_CR_FLAG; flag_mask[2] = DAT_EVD_DTO_FLAG; flag_mask[3] = DAT_EVD_CONNECTION_FLAG; flag_mask[4] = DAT_EVD_RMR_BIND_FLAG; flag_mask[5] = DAT_EVD_ASYNC_FLAG; for (i = 0; i < 6; i++) { if (flag_mask[i] & evd_flags) { for (j = 0; j < 6; j++) { if (flag_mask[j] & evd_flags) { if (provider_attr. evd_stream_merging_supported[i][j] == DAT_FALSE) { dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG4); goto bail; } } } /* end for j */ } } /* end for i */ dat_status = dapls_evd_internal_create(ia_ptr, cno_ptr, evd_min_qlen, evd_flags, &evd_ptr); if (dat_status != DAT_SUCCESS) { goto bail; } evd_ptr->evd_state = DAPL_EVD_STATE_OPEN; *evd_handle = (DAT_EVD_HANDLE) evd_ptr; bail: if (dat_status != DAT_SUCCESS) { if (evd_ptr) { dapl_evd_free(evd_ptr); } } dapl_dbg_log(DAPL_DBG_TYPE_RTN, "dapl_evd_create () returns 0x%x\n", dat_status); return dat_status; }
/* * dapl_psp_create * * uDAPL: User Direct Access Program Library Version 1.1, 6.4.1.1 * * Create a persistent Public Service Point that can recieve multiple * requests for connections and generate multiple connection request * instances that wil be delivered to the specified Event Dispatcher * in a notification event. * * Input: * ia_handle * conn_qual * evd_handle * psp_flags * * Output: * psp_handle * * Returns: * DAT_SUCCESS * DAT_INSUFFICIENT_RESOURCES * DAT_INVALID_PARAMETER * DAT_CONN_QUAL_IN_USE * DAT_MODEL_NOT_SUPPORTED */ DAT_RETURN dapl_psp_create( IN DAT_IA_HANDLE ia_handle, IN DAT_CONN_QUAL conn_qual, IN DAT_EVD_HANDLE evd_handle, IN DAT_PSP_FLAGS psp_flags, OUT DAT_PSP_HANDLE *psp_handle) { DAPL_IA *ia_ptr; DAPL_SP *sp_ptr; DAPL_EVD *evd_ptr; DAT_BOOLEAN sp_found; DAT_RETURN dat_status; ia_ptr = (DAPL_IA *)ia_handle; dat_status = DAT_SUCCESS; if (DAPL_BAD_HANDLE(ia_ptr, DAPL_MAGIC_IA)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_IA); goto bail; } if (DAPL_BAD_HANDLE(evd_handle, DAPL_MAGIC_EVD)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EVD_CR); goto bail; } if (psp_handle == NULL) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG5); goto bail; } /* check for invalid psp flags */ if ((psp_flags != DAT_PSP_CONSUMER_FLAG) && (psp_flags != DAT_PSP_PROVIDER_FLAG)) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG4); goto bail; } evd_ptr = (DAPL_EVD *)evd_handle; if (!(evd_ptr->evd_flags & DAT_EVD_CR_FLAG)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EVD_CR); goto bail; } /* * check for connection qualifier eq 0 * in IB this is called Null Service ID, use of it in CM is invalid. * in tcp/udp, port number 0 is reserved. */ if (!conn_qual) { dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); goto bail; } /* * See if we have a quiescent listener to use for this PSP, else * create one and set it listening */ sp_ptr = dapls_ia_sp_search(ia_ptr, conn_qual, DAT_TRUE); sp_found = DAT_TRUE; if (sp_ptr == NULL) { /* Allocate PSP */ sp_found = DAT_FALSE; sp_ptr = dapls_sp_alloc(ia_ptr, DAT_TRUE); if (sp_ptr == NULL) { dat_status = DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); goto bail; } } else if (sp_ptr->listening == DAT_TRUE) { dat_status = DAT_ERROR(DAT_CONN_QUAL_IN_USE, 0); goto bail; } /* * Fill out the args for a PSP */ sp_ptr->ia_handle = ia_handle; sp_ptr->conn_qual = conn_qual; sp_ptr->evd_handle = evd_handle; sp_ptr->psp_flags = psp_flags; sp_ptr->ep_handle = NULL; /* * Take a reference on the EVD handle */ (void) dapl_os_atomic_inc(&((DAPL_EVD *)evd_handle)->evd_ref_count); /* * Set up a listener for a connection. Connections can arrive * even before this call returns! */ sp_ptr->state = DAPL_SP_STATE_PSP_LISTENING; sp_ptr->listening = DAT_TRUE; /* * If this is a new sp we need to add it to the IA queue, and set up * a conn_listener. */ if (sp_found == DAT_FALSE) { /* Link it onto the IA */ dapl_ia_link_psp(ia_ptr, sp_ptr); dat_status = dapls_ib_setup_conn_listener(ia_ptr, conn_qual, sp_ptr); if (dat_status != DAT_SUCCESS) { /* * Have a problem setting up the connection, something * wrong! The psp_free decrements the EVD refcount for * us; we don't * need to do that. * But we want to set the listener bits to false, * as we know that call failed. */ sp_ptr->state = DAPL_SP_STATE_FREE; sp_ptr->listening = DAT_FALSE; (void) dapl_psp_free((DAT_PSP_HANDLE) sp_ptr); dapl_dbg_log(DAPL_DBG_TYPE_CM, "--> dapl_psp_create setup_conn_listener failed: " "%x\n", dat_status); goto bail; } } /* * Return handle to the user */ *psp_handle = (DAT_PSP_HANDLE)sp_ptr; bail: return (dat_status); }
/* * dapl_ep_connect * * DAPL Requirements Version xxx, 6.5.7 * * Request a connection be established between the local Endpoint * and a remote Endpoint. This operation is used by the active/client * side of a connection * * Input: * ep_handle * remote_ia_address * remote_conn_qual * timeout * private_data_size * privaet_data * qos * connect_flags * * Output: * None * * Returns: * DAT_SUCCESS * DAT_INSUFFICIENT_RESOUCRES * DAT_INVALID_PARAMETER * DAT_MODLE_NOT_SUPPORTED */ DAT_RETURN dapl_ep_connect( IN DAT_EP_HANDLE ep_handle, IN DAT_IA_ADDRESS_PTR remote_ia_address, IN DAT_CONN_QUAL remote_conn_qual, IN DAT_TIMEOUT timeout, IN DAT_COUNT private_data_size, IN const DAT_PVOID private_data, IN DAT_QOS qos, IN DAT_CONNECT_FLAGS connect_flags) { DAPL_EP *ep_ptr; DAPL_PRIVATE prd; DAPL_EP alloc_ep; DAT_RETURN dat_status; dapl_dbg_log(DAPL_DBG_TYPE_API | DAPL_DBG_TYPE_CM, "dapl_ep_connect (%p, {%u.%u.%u.%u}, %X, %d, %d, %p, %x, %x)\n", ep_handle, remote_ia_address->sa_data[2], remote_ia_address->sa_data[3], remote_ia_address->sa_data[4], remote_ia_address->sa_data[5], remote_conn_qual, timeout, private_data_size, private_data, qos, connect_flags); dat_status = DAT_SUCCESS; ep_ptr = (DAPL_EP *) ep_handle; /* * Verify parameter & state. The connection handle must be good * at this point. */ if (DAPL_BAD_HANDLE(ep_ptr, DAPL_MAGIC_EP)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EP); goto bail; } if (DAPL_BAD_HANDLE(ep_ptr->param.connect_evd_handle, DAPL_MAGIC_EVD)) { dat_status = DAT_ERROR(DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EVD_CONN); goto bail; } /* * If the endpoint needs a QP, associated the QP with it. * This needs to be done carefully, in order to: * * Avoid allocating under a lock. * * Not step on data structures being altered by * routines with which we are racing. * So we: * * Confirm that a new QP is needed and is not forbidden by the * current state. * * Allocate it into a separate EP. * * Take the EP lock. * * Reconfirm that the EP is in a state where it needs a QP. * * Assign the QP and release the lock. */ if (ep_ptr->qp_state == DAPL_QP_STATE_UNATTACHED) { if (ep_ptr->param.pz_handle == NULL || DAPL_BAD_HANDLE(ep_ptr->param.pz_handle, DAPL_MAGIC_PZ)) { dat_status = DAT_ERROR(DAT_INVALID_STATE, DAT_INVALID_STATE_EP_NOTREADY); goto bail; } alloc_ep = *ep_ptr; dat_status = dapls_ib_qp_alloc(ep_ptr->header.owner_ia, &alloc_ep, ep_ptr); if (dat_status != DAT_SUCCESS) { dat_status = DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); goto bail; } dapl_os_lock(&ep_ptr->header.lock); /* * PZ shouldn't have changed since we're only racing with * dapl_cr_accept() */ if (ep_ptr->qp_state != DAPL_QP_STATE_UNATTACHED) { /* Bail, cleaning up. */ dapl_os_unlock(&ep_ptr->header.lock); dat_status = dapls_ib_qp_free(ep_ptr->header.owner_ia, &alloc_ep); if (dat_status != DAT_SUCCESS) { dapl_dbg_log(DAPL_DBG_TYPE_WARN, "ep_connect: ib_qp_free failed with %x\n", dat_status); } dat_status = DAT_ERROR(DAT_INVALID_STATE, dapls_ep_state_subtype(ep_ptr)); goto bail; } ep_ptr->qp_handle = alloc_ep.qp_handle; ep_ptr->qpn = alloc_ep.qpn; ep_ptr->qp_state = alloc_ep.qp_state; dapl_os_unlock(&ep_ptr->header.lock); } /* * We do state checks and transitions under lock. * The only code we're racing against is dapl_cr_accept. */ dapl_os_lock(&ep_ptr->header.lock); /* * Verify the attributes of the EP handle before we connect it. Test * all of the handles to make sure they are currently valid. * Specifically: * pz_handle required * recv_evd_handle optional, but must be valid * request_evd_handle optional, but must be valid * connect_evd_handle required */ if (ep_ptr->param.pz_handle == NULL || DAPL_BAD_HANDLE(ep_ptr->param.pz_handle, DAPL_MAGIC_PZ) || ep_ptr->param.connect_evd_handle == NULL || DAPL_BAD_HANDLE(ep_ptr->param.connect_evd_handle, DAPL_MAGIC_EVD) || !(((DAPL_EVD *)ep_ptr->param.connect_evd_handle)->evd_flags & DAT_EVD_CONNECTION_FLAG) || (ep_ptr->param.recv_evd_handle != DAT_HANDLE_NULL && (DAPL_BAD_HANDLE(ep_ptr->param.recv_evd_handle, DAPL_MAGIC_EVD))) || (ep_ptr->param.request_evd_handle != DAT_HANDLE_NULL && (DAPL_BAD_HANDLE(ep_ptr->param.request_evd_handle, DAPL_MAGIC_EVD)))) { dapl_os_unlock(&ep_ptr->header.lock); dat_status = DAT_ERROR(DAT_INVALID_STATE, DAT_INVALID_STATE_EP_NOTREADY); goto bail; } /* * Check both the EP state and the QP state: if we don't have a QP * we need to attach one now. */ if (ep_ptr->qp_state == DAPL_QP_STATE_UNATTACHED) { dat_status = dapls_ib_qp_alloc(ep_ptr->header.owner_ia, ep_ptr, ep_ptr); if (dat_status != DAT_SUCCESS) { dapl_os_unlock(&ep_ptr->header.lock); dat_status = DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_TEP); goto bail; } } if (ep_ptr->param.ep_state != DAT_EP_STATE_UNCONNECTED) { dapl_os_unlock(&ep_ptr->header.lock); dat_status = DAT_ERROR(DAT_INVALID_STATE, dapls_ep_state_subtype(ep_ptr)); goto bail; } if (qos != DAT_QOS_BEST_EFFORT || connect_flags != DAT_CONNECT_DEFAULT_FLAG) { /* * At this point we only support one QOS level */ dapl_os_unlock(&ep_ptr->header.lock); dat_status = DAT_ERROR(DAT_MODEL_NOT_SUPPORTED, 0); goto bail; } /* * Verify the private data size doesn't exceed the max */ if (private_data_size > DAPL_CONSUMER_MAX_PRIVATE_DATA_SIZE) { dapl_os_unlock(&ep_ptr->header.lock); dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG5); goto bail; } /* * transition the state before requesting a connection to avoid * race conditions */ ep_ptr->param.ep_state = DAT_EP_STATE_ACTIVE_CONNECTION_PENDING; /* * At this point we're committed, and done with the endpoint * except for the connect, so we can drop the lock. */ dapl_os_unlock(&ep_ptr->header.lock); /* * fill in the private data */ (void) dapl_os_memzero(&prd, sizeof (DAPL_PRIVATE)); if (private_data_size > 0) (void) dapl_os_memcpy(prd.private_data, private_data, private_data_size); /* Copy the connection qualifiers */ (void) dapl_os_memcpy(ep_ptr->param.remote_ia_address_ptr, remote_ia_address, sizeof (DAT_SOCK_ADDR6)); ep_ptr->param.remote_port_qual = remote_conn_qual; dat_status = dapls_ib_connect(ep_handle, remote_ia_address, remote_conn_qual, private_data_size, &prd, timeout); if (dat_status != DAT_SUCCESS) { DAPL_EVD *evd_ptr; if (dat_status == DAT_ERROR(DAT_INVALID_ADDRESS, DAT_INVALID_ADDRESS_UNREACHABLE)) { /* Unreachable IP address */ evd_ptr = (DAPL_EVD *)ep_ptr->param.connect_evd_handle; if (evd_ptr != NULL) { (void) dapls_evd_post_connection_event(evd_ptr, DAT_CONNECTION_EVENT_UNREACHABLE, (DAT_HANDLE) ep_ptr, 0, 0); } ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED; dat_status = DAT_SUCCESS; } else if (dat_status == DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ADDRESS_UNREACHABLE)) { /* Non-existant connection qualifier */ evd_ptr = (DAPL_EVD *)ep_ptr->param.connect_evd_handle; if (evd_ptr != NULL) { (void) dapls_evd_post_connection_event(evd_ptr, DAT_CONNECTION_EVENT_NON_PEER_REJECTED, (DAT_HANDLE) ep_ptr, 0, 0); } ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED; dat_status = DAT_SUCCESS; } else { ep_ptr->param.ep_state = DAT_EP_STATE_UNCONNECTED; } } bail: dapl_dbg_log(DAPL_DBG_TYPE_RTN | DAPL_DBG_TYPE_CM, "dapl_ep_connect () returns 0x%x\n", dat_status); return (dat_status); }