mca_btl_udapl_proc_t* mca_btl_udapl_proc_create(ompi_proc_t* ompi_proc) { mca_btl_udapl_proc_t* udapl_proc = NULL; size_t size; int rc; /* Check if we have already created a uDAPL proc * structure for this ompi process */ udapl_proc = mca_btl_udapl_proc_lookup_ompi(ompi_proc); if(udapl_proc != NULL) { return udapl_proc; } /* create a new udapl proc out of the ompi_proc ... */ udapl_proc = OBJ_NEW(mca_btl_udapl_proc_t); udapl_proc->proc_endpoint_count = 0; udapl_proc->proc_ompi = ompi_proc; /* query for the peer address info */ rc = ompi_modex_recv( &mca_btl_udapl_component.super.btl_version, ompi_proc, (void*)&udapl_proc->proc_addrs, &size); if(OMPI_SUCCESS != rc) { BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_CRITICAL, ("ompi_modex_recv failed for peer %s", ORTE_NAME_PRINT(&ompi_proc->proc_name))); OBJ_RELEASE(udapl_proc); return NULL; } if((size % sizeof(mca_btl_udapl_addr_t)) != 0) { BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_CRITICAL, ("invalid udapl address for peer %s", ORTE_NAME_PRINT(&ompi_proc->proc_name))); OBJ_RELEASE(udapl_proc); return NULL; } udapl_proc->proc_addr_count = size/sizeof(mca_btl_udapl_addr_t); if (0 == udapl_proc->proc_addr_count) { udapl_proc->proc_endpoints = NULL; } else { udapl_proc->proc_endpoints = (mca_btl_base_endpoint_t**) malloc(udapl_proc->proc_addr_count * sizeof(mca_btl_base_endpoint_t*)); } if(NULL == udapl_proc->proc_endpoints) { OBJ_RELEASE(udapl_proc); return NULL; } return udapl_proc; }
int mca_btl_udapl_get( mca_btl_base_module_t* btl, mca_btl_base_endpoint_t* endpoint, mca_btl_base_descriptor_t* des) { BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_DEVELOPER, ("udapl_get\n")); return OMPI_ERR_NOT_IMPLEMENTED; }
int mca_btl_udapl_finalize(struct mca_btl_base_module_t* base_btl) { mca_btl_udapl_module_t* udapl_btl = (mca_btl_udapl_module_t*) base_btl; int32_t i; /* * Cleaning up the endpoints here because mca_btl_udapl_del_procs * is never called by upper layers. * Note: this is only looking at those endpoints which are available * off of the btl module rdma list. */ for (i=0; i < udapl_btl->udapl_eager_rdma_endpoint_count; i++) { mca_btl_udapl_endpoint_t* endpoint = opal_pointer_array_get_item(udapl_btl->udapl_eager_rdma_endpoints, i); OBJ_DESTRUCT(endpoint); } /* release uDAPL resources */ dat_evd_free(udapl_btl->udapl_evd_dto); dat_evd_free(udapl_btl->udapl_evd_conn); dat_pz_free(udapl_btl->udapl_pz); dat_ia_close(udapl_btl->udapl_ia, DAT_CLOSE_GRACEFUL_FLAG); /* destroy objects */ OBJ_DESTRUCT(&udapl_btl->udapl_lock); OBJ_DESTRUCT(&udapl_btl->udapl_frag_eager); OBJ_DESTRUCT(&udapl_btl->udapl_frag_eager_recv); OBJ_DESTRUCT(&udapl_btl->udapl_frag_max); OBJ_DESTRUCT(&udapl_btl->udapl_frag_max_recv); OBJ_DESTRUCT(&udapl_btl->udapl_frag_user); OBJ_DESTRUCT(&udapl_btl->udapl_frag_control); OBJ_DESTRUCT(&udapl_btl->udapl_eager_rdma_lock); /* destroy mpool */ if (OMPI_SUCCESS != mca_mpool_base_module_destroy(udapl_btl->super.btl_mpool)) { BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_INFORM, ("WARNING: Failed to release mpool")); return OMPI_ERROR; } free(udapl_btl); return OMPI_SUCCESS; }
int mca_btl_udapl_free( struct mca_btl_base_module_t* btl, mca_btl_base_descriptor_t* des) { mca_btl_udapl_frag_t* frag = (mca_btl_udapl_frag_t*)des; if(0 == frag->size) { if (NULL != frag->registration) { btl->btl_mpool->mpool_deregister(btl->btl_mpool, &(frag->registration->base)); frag->registration = NULL; } MCA_BTL_UDAPL_FRAG_RETURN_USER(btl, frag); } else if(frag->size == mca_btl_udapl_component.udapl_eager_frag_size) { MCA_BTL_UDAPL_FRAG_RETURN_EAGER(btl, frag); } else if(frag->size == mca_btl_udapl_component.udapl_max_frag_size) { MCA_BTL_UDAPL_FRAG_RETURN_MAX(btl, frag); } else { BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_DIAGNOSE, ("mca_btl_udapl_free: invalid descriptor\n")); return OMPI_ERR_BAD_PARAM; } return OMPI_SUCCESS; }
/* * Find and assign system netmask for the address of the uDAPL BTL * module, but only if udapl_if_mask has not been set by the "--mca * btl_udapl_if_mask" parameter. This routine will either find * the system netmask or set the value to 0. * * @param udapl_btl (IN) BTL module * * @return OMPI_SUCCESS or OMPI_ERROR */ static int mca_btl_udapl_assign_netmask(mca_btl_udapl_module_t* udapl_btl) { struct sockaddr *saddr; struct sockaddr_in *btl_addr; char btl_addr_string[INET_ADDRSTRLEN]; char btl_ifname[INET_ADDRSTRLEN]; /* Setting if_mask to 0 informs future steps to assume all * addresses are reachable. */ udapl_btl->udapl_if_mask = 0; if (mca_btl_udapl_component.udapl_compare_subnet) { /* go get system netmask value */ /* use generic address to find address family */ saddr = (struct sockaddr *)&(udapl_btl->udapl_addr.addr); if (saddr->sa_family == AF_INET) { btl_addr = (struct sockaddr_in *)saddr; /* * Retrieve the netmask of the udapl btl address. To * accomplish this requires 4 steps and the use of an opal * utility. This same utility is used by the tcp oob. * Steps: * 1. Get string value of known udapl btl module address. * 2. Use string value to find the interface name of address. * 3. Use interface name to find its index. * 4. From the index get the netmask. */ /* retrieve string value of udapl btl address */ inet_ntop(AF_INET, (void *) &btl_addr->sin_addr, btl_addr_string, INET_ADDRSTRLEN); /* use address string to retrieve associated interface name */ if (OPAL_SUCCESS != opal_ifaddrtoname(btl_addr_string, btl_ifname, INET_ADDRSTRLEN)) { BTL_UDAPL_VERBOSE_HELP(VERBOSE_SHOW_HELP, ("help-mpi-btl-udapl.txt", "interface not found", true, ompi_process_info.nodename, btl_addr_string)); return OMPI_ERROR; } /* use interface name to retrieve index; then * use index to retrieve udapl btl address netmask */ if (OPAL_SUCCESS != opal_ifindextomask(opal_ifnametoindex(btl_ifname), &(udapl_btl->udapl_if_mask), sizeof(udapl_btl->udapl_if_mask))) { BTL_UDAPL_VERBOSE_HELP(VERBOSE_SHOW_HELP, ("help-mpi-btl-udapl.txt", "netmask not found", true, ompi_process_info.nodename, btl_addr_string)); return OMPI_ERROR; } /* report if_mask used by address */ BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_INFORM, ("uDAPL BTL address %s : if_mask = %d", btl_addr_string, udapl_btl->udapl_if_mask)); } else { /* current uDAPL BTL does not support IPv6 */ BTL_UDAPL_VERBOSE_HELP(VERBOSE_SHOW_HELP, ("help-mpi-btl-udapl.txt", "IPv4 only", true, ompi_process_info.nodename)); return OMPI_ERROR; } } return OMPI_SUCCESS; }
int mca_btl_udapl_init(DAT_NAME_PTR ia_name, mca_btl_udapl_module_t* btl) { mca_mpool_base_resources_t res; DAT_CONN_QUAL port; DAT_RETURN rc; /* open the uDAPL interface */ btl->udapl_evd_async = DAT_HANDLE_NULL; rc = dat_ia_open(ia_name, btl->udapl_async_evd_qlen, &btl->udapl_evd_async, &btl->udapl_ia); if(DAT_SUCCESS != rc) { char* major; char* minor; dat_strerror(rc, (const char**)&major, (const char**)&minor); #if defined(__SVR4) && defined(__sun) if (strcmp(major, "DAT_INVALID_PARAMETER") == 0 && strcmp(minor, "DAT_INVALID_RO_COOKIE") == 0) { /* Some platforms that Solaris runs on implement the PCI * standard for relaxed ordering(RO). Using RDMA with * polling on a memory location as the uDAPL (and openib * by the way) BTL does for short messages with * relaxed ordering could potentially produce silent data * corruption. For this reason we need to take extra * steps and this is accomplished by setting * "ro_aware_system = 1" and handling as required. * * The uDAPL standard does not provide an interface to * inform users of this scenario so Sun has implemented the * following: If a platform supports relaxed ordering * when the interface name is passed into the * dat_ia_open() call, the call will return * DAT_INVALID_PARAMETER and DAT_INVALID_RO_COOKIE. * DAT_INVALID_RO_COOKIE is not part of the uDAPL standard * at this time. The only way to open this interface is * to prefix the following cookie "RO_AWARE_" to the ia * name that was retreived from the dat registry. * * Example: ia_name = "ib0", new expected name will be * "RO_AWARE_ib0". * * Here, since our first ia open attempt failed in the * standard way, add the cookie and try to open again. */ DAT_NAME_PTR ro_ia_name; /* prefix relaxed order cookie to ia_name */ asprintf(&ro_ia_name, "RO_AWARE_%s", ia_name); if (NULL == ro_ia_name) { return OMPI_ERR_OUT_OF_RESOURCE; } /* because this is not standard inform user in some way */ BTL_UDAPL_VERBOSE_HELP(VERBOSE_INFORM, ("help-mpi-btl-udapl.txt", "relaxed order support", true, ia_name, ro_ia_name)); /* try and open again */ btl->udapl_evd_async = DAT_HANDLE_NULL; rc = dat_ia_open(ro_ia_name, btl->udapl_async_evd_qlen, &btl->udapl_evd_async, &btl->udapl_ia); dat_strerror(rc, (const char**)&major, (const char**)&minor); if (DAT_SUCCESS == rc) { mca_btl_udapl_component.ro_aware_system = 1; free(ro_ia_name); } else { BTL_UDAPL_VERBOSE_HELP(VERBOSE_SHOW_HELP, ("help-mpi-btl-udapl.txt", "dat_ia_open fail RO", true, ro_ia_name, major, minor, ia_name)); free(ro_ia_name); return OMPI_ERROR; } } else { #endif BTL_UDAPL_VERBOSE_HELP(VERBOSE_SHOW_HELP, ("help-mpi-btl-udapl.txt", "dat_ia_open fail", true, ia_name, major, minor)); return OMPI_ERROR; #if defined(__SVR4) && defined(__sun) } #endif } /* create a protection zone */ rc = dat_pz_create(btl->udapl_ia, &btl->udapl_pz); if(DAT_SUCCESS != rc) { char* major; char* minor; dat_strerror(rc, (const char**)&major, (const char**)&minor); BTL_ERROR(("ERROR: %s %s %s\n", "dat_pz_create", major, minor)); goto failure; } /* query to get address information */ rc = dat_ia_query(btl->udapl_ia, &btl->udapl_evd_async, DAT_IA_ALL, &(btl->udapl_ia_attr), 0, NULL); if(DAT_SUCCESS != rc) { char* major; char* minor; dat_strerror(rc, (const char**)&major, (const char**)&minor); BTL_ERROR(("ERROR: %s %s %s\n", "dat_ia_query", major, minor)); goto failure; } memcpy(&btl->udapl_addr.addr, (btl->udapl_ia_attr).ia_address_ptr, sizeof(DAT_SOCK_ADDR)); /* determine netmask */ mca_btl_udapl_assign_netmask(btl); /* check evd qlen against adapter max */ if (btl->udapl_dto_evd_qlen > (btl->udapl_ia_attr).max_evd_qlen) { BTL_UDAPL_VERBOSE_HELP(VERBOSE_SHOW_HELP, ("help-mpi-btl-udapl.txt", "evd_qlen adapter max", true, "btl_udapl_dto_evd_qlen", btl->udapl_dto_evd_qlen, (btl->udapl_ia_attr).max_evd_qlen)); btl->udapl_dto_evd_qlen = btl->udapl_ia_attr.max_evd_qlen; } if (btl->udapl_conn_evd_qlen > (btl->udapl_ia_attr).max_evd_qlen) { BTL_UDAPL_VERBOSE_HELP(VERBOSE_SHOW_HELP, ("help-mpi-btl-udapl.txt", "evd_qlen adapter max", true, "btl_udapl_conn_evd_qlen", btl->udapl_conn_evd_qlen, (btl->udapl_ia_attr).max_evd_qlen)); btl->udapl_conn_evd_qlen = btl->udapl_ia_attr.max_evd_qlen; } /* set up evd's */ rc = dat_evd_create(btl->udapl_ia, btl->udapl_dto_evd_qlen, DAT_HANDLE_NULL, DAT_EVD_DTO_FLAG | DAT_EVD_RMR_BIND_FLAG, &btl->udapl_evd_dto); if(DAT_SUCCESS != rc) { char* major; char* minor; dat_strerror(rc, (const char**)&major, (const char**)&minor); BTL_ERROR(("ERROR: %s %s %s\n", "dat_evd_create (dto)", major, minor)); goto failure; } rc = dat_evd_create(btl->udapl_ia, btl->udapl_conn_evd_qlen, DAT_HANDLE_NULL, DAT_EVD_CR_FLAG | DAT_EVD_CONNECTION_FLAG, &btl->udapl_evd_conn); if(DAT_SUCCESS != rc) { char* major; char* minor; dat_strerror(rc, (const char**)&major, (const char**)&minor); BTL_ERROR(("ERROR: %s %s %s\n", "dat_evd_create (conn)", major, minor)); goto failure; } /* create our public service point */ rc = dat_psp_create_any(btl->udapl_ia, &port, btl->udapl_evd_conn, DAT_PSP_CONSUMER_FLAG, &btl->udapl_psp); if(DAT_SUCCESS != rc) { char* major; char* minor; dat_strerror(rc, (const char**)&major, (const char**)&minor); BTL_ERROR(("ERROR: %s %s %s\n", "dat_psp_create_any", major, minor)); goto failure; } /* establish endpoint parameters */ rc = mca_btl_udapl_endpoint_get_params(btl, &(btl->udapl_ep_param)); if(OMPI_SUCCESS != rc) { /* by not erroring out here we can try to continue with * the default endpoint parameter values */ BTL_UDAPL_VERBOSE_HELP(VERBOSE_SHOW_HELP, ("help-mpi-btl-udapl.txt", "use default endpoint params", true)); } /* Save the port with the address information */ /* TODO - since we're doing the hack below, do we need our own port? */ btl->udapl_addr.port = port; /* Using dat_ep_query to obtain the remote port would be ideal but * since the current udapl implementations don't seem to support * this we store the port in udapl_addr and explictly exchange the * information later. */ ((struct sockaddr_in*)&btl->udapl_addr.addr)->sin_port = htons(port); /* initialize the memory pool */ res.pool_name = "udapl"; res.reg_data = btl; res.sizeof_reg = sizeof(mca_btl_udapl_reg_t); res.register_mem = udapl_reg_mr; res.deregister_mem = udapl_dereg_mr; btl->super.btl_mpool = mca_mpool_base_module_create( mca_btl_udapl_component.udapl_mpool_name, &btl->super, &res); if (NULL == btl->super.btl_mpool) { BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_INFORM, ("WARNING: Failed to create mpool.")); goto failure; } /* initialize objects */ OBJ_CONSTRUCT(&btl->udapl_frag_eager, ompi_free_list_t); OBJ_CONSTRUCT(&btl->udapl_frag_eager_recv, ompi_free_list_t); OBJ_CONSTRUCT(&btl->udapl_frag_max, ompi_free_list_t); OBJ_CONSTRUCT(&btl->udapl_frag_max_recv, ompi_free_list_t); OBJ_CONSTRUCT(&btl->udapl_frag_user, ompi_free_list_t); OBJ_CONSTRUCT(&btl->udapl_frag_control, ompi_free_list_t); OBJ_CONSTRUCT(&btl->udapl_lock, opal_mutex_t); /* check buffer alignment against dat library */ if (mca_btl_udapl_component.udapl_buffer_alignment != DAT_OPTIMAL_ALIGNMENT) { BTL_UDAPL_VERBOSE_HELP(VERBOSE_SHOW_HELP, ("help-mpi-btl-udapl.txt", "optimal buffer alignment mismatch", true, DAT_OPTIMAL_ALIGNMENT, mca_btl_udapl_component.udapl_buffer_alignment, DAT_OPTIMAL_ALIGNMENT)); } /* initialize free lists */ ompi_free_list_init_ex_new(&btl->udapl_frag_eager, sizeof(mca_btl_udapl_frag_eager_t) + mca_btl_udapl_component.udapl_eager_frag_size, mca_btl_udapl_component.udapl_buffer_alignment, OBJ_CLASS(mca_btl_udapl_frag_eager_t), mca_btl_udapl_component.udapl_eager_frag_size, mca_btl_udapl_component.udapl_buffer_alignment, mca_btl_udapl_component.udapl_free_list_num, mca_btl_udapl_component.udapl_free_list_max, mca_btl_udapl_component.udapl_free_list_inc, btl->super.btl_mpool, NULL, NULL); ompi_free_list_init_ex_new(&btl->udapl_frag_eager_recv, sizeof(mca_btl_udapl_frag_eager_t) + mca_btl_udapl_component.udapl_eager_frag_size, mca_btl_udapl_component.udapl_buffer_alignment, OBJ_CLASS(mca_btl_udapl_frag_eager_t), mca_btl_udapl_component.udapl_eager_frag_size, mca_btl_udapl_component.udapl_buffer_alignment, mca_btl_udapl_component.udapl_free_list_num, mca_btl_udapl_component.udapl_free_list_max, mca_btl_udapl_component.udapl_free_list_inc, btl->super.btl_mpool, NULL, NULL); ompi_free_list_init_ex_new(&btl->udapl_frag_max, sizeof(mca_btl_udapl_frag_max_t) + mca_btl_udapl_component.udapl_max_frag_size, mca_btl_udapl_component.udapl_buffer_alignment, OBJ_CLASS(mca_btl_udapl_frag_max_t), mca_btl_udapl_component.udapl_max_frag_size, mca_btl_udapl_component.udapl_buffer_alignment, mca_btl_udapl_component.udapl_free_list_num, mca_btl_udapl_component.udapl_free_list_max, mca_btl_udapl_component.udapl_free_list_inc, btl->super.btl_mpool, NULL, NULL); ompi_free_list_init_ex_new(&btl->udapl_frag_max_recv, sizeof(mca_btl_udapl_frag_max_t) + mca_btl_udapl_component.udapl_max_frag_size, mca_btl_udapl_component.udapl_buffer_alignment, OBJ_CLASS(mca_btl_udapl_frag_max_t), mca_btl_udapl_component.udapl_max_frag_size, mca_btl_udapl_component.udapl_buffer_alignment, mca_btl_udapl_component.udapl_free_list_num, mca_btl_udapl_component.udapl_free_list_max, mca_btl_udapl_component.udapl_free_list_inc, btl->super.btl_mpool, NULL, NULL); ompi_free_list_init_ex_new(&btl->udapl_frag_user, sizeof(mca_btl_udapl_frag_user_t), mca_btl_udapl_component.udapl_buffer_alignment, OBJ_CLASS(mca_btl_udapl_frag_user_t), 0,0, mca_btl_udapl_component.udapl_free_list_num, mca_btl_udapl_component.udapl_free_list_max, mca_btl_udapl_component.udapl_free_list_inc, NULL, NULL, NULL); ompi_free_list_init_ex_new(&btl->udapl_frag_control, sizeof(mca_btl_udapl_frag_eager_t) + mca_btl_udapl_component.udapl_eager_frag_size, mca_btl_udapl_component.udapl_buffer_alignment, OBJ_CLASS(mca_btl_udapl_frag_eager_t), mca_btl_udapl_component.udapl_eager_frag_size, mca_btl_udapl_component.udapl_buffer_alignment, mca_btl_udapl_component.udapl_free_list_num, -1, mca_btl_udapl_component.udapl_free_list_inc, btl->super.btl_mpool, NULL, NULL); /* initialize eager rdma buffer info */ btl->udapl_eager_rdma_endpoints = OBJ_NEW(opal_pointer_array_t); opal_pointer_array_init(btl->udapl_eager_rdma_endpoints, mca_btl_udapl_component.udapl_max_eager_rdma_peers, mca_btl_udapl_component.udapl_max_eager_rdma_peers, 0); btl->udapl_eager_rdma_endpoint_count = 0; OBJ_CONSTRUCT(&btl->udapl_eager_rdma_lock, opal_mutex_t); /* initialize miscellaneous variables */ btl->udapl_async_events = 0; btl->udapl_connect_inprogress = 0; btl->udapl_num_peers = 0; /* TODO - Set up SRQ when it is supported */ return OMPI_SUCCESS; failure: dat_ia_close(btl->udapl_ia, DAT_CLOSE_ABRUPT_FLAG); return OMPI_ERROR; }
/* * Find an address on the peer_process which matches stated criteria * to the udapl btl module address information. Return in peer_addr_idx * the index to the peer_process address that matches the btl module * address. Where match criteria is: * - the address in not already in use * - compare addresses using netmask, the netmask value can be modified with * "--mca btl_udapl_if_mask" * * Note: since this is called from mca_btl_udapl_proc_insert() it * is assumed that the process lock is locked when entered. * * @param udapl_btl (IN) BTL module * @param peer_process (IN) BTL peer process * @param peer_addr_idx(IN/OUT) Index of address on peer_process * which matches the udapl_btl address data. * On success should be >= 0. * @return OMPI_SUCCESS or error status on failure */ static int mca_btl_udapl_proc_address_match( mca_btl_udapl_module_t* udapl_btl, mca_btl_udapl_proc_t* peer_proc, int* peer_addr_idx) { int i; struct sockaddr *saddr; struct sockaddr_in *btl_addr; struct sockaddr_in *peer_addr; char btl_addr_string[INET_ADDRSTRLEN]; char peer_addr_string[INET_ADDRSTRLEN]; *peer_addr_idx = MCA_BTL_UDAPL_INVALID_PEER_ADDR_IDX; /* use generic address to find address family */ saddr = (struct sockaddr *)&(udapl_btl->udapl_addr.addr); if (saddr->sa_family == AF_INET) { btl_addr = (struct sockaddr_in *)saddr; /* Loop thru peer process addresses looking for match. * Match criteria: * - address should not be "inuse" * - both udapl btl module and peer address should be on * the same subnet (compare with if_mask value) */ for(i = 0; i < (int) peer_proc->proc_addr_count; i++) { peer_addr = (struct sockaddr_in *)&(peer_proc->proc_addrs[i].addr); if (VERBOSE_INFORM <= mca_btl_udapl_component.udapl_verbosity) { /* retrieve udapl btl and peer address string for reporting */ inet_ntop(AF_INET, (void *) &btl_addr->sin_addr, btl_addr_string, INET_ADDRSTRLEN); inet_ntop(AF_INET, (void *) &peer_addr->sin_addr, peer_addr_string, INET_ADDRSTRLEN); } if ((false == peer_proc->proc_addrs[i].inuse) && (opal_net_samenetwork((struct sockaddr *)btl_addr, (struct sockaddr *)peer_addr, udapl_btl->udapl_if_mask))) { /* capture index of remote address where match found */ *peer_addr_idx = i; /* mark this address as now being used */ peer_proc->proc_addrs[i].inuse = true; /* report what address was found to match */ BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_INFORM, ("uDAPL BTL module(%s) matched %s", btl_addr_string, peer_addr_string)); break; } else { /* peer address already used by another udapl btl * module or netmask check not successful so skip */ BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_INFORM, ("uDAPL BTL module(%s) either skipped because it " "is already in use or match criteria not successful " "for peer address %s", btl_addr_string, peer_addr_string)); } } } else { /* current uDAPL BTL only supports IPv4 */ BTL_UDAPL_VERBOSE_HELP(VERBOSE_SHOW_HELP, ("help-mpi-btl-udapl.txt", "IPv4 only", true, orte_process_info.nodename)); return OMPI_ERROR; } if (MCA_BTL_UDAPL_INVALID_PEER_ADDR_IDX == *peer_addr_idx) { BTL_UDAPL_VERBOSE_HELP(VERBOSE_SHOW_HELP, ("help-mpi-btl-udapl.txt", "no network match", true, btl_addr_string, orte_process_info.nodename, peer_proc->proc_ompi->proc_hostname)); return OMPI_ERR_OUT_OF_RESOURCE; } return OMPI_SUCCESS; }
int mca_btl_udapl_component_progress() { mca_btl_udapl_module_t* btl; static int32_t inprogress = 0; DAT_EVENT event; size_t i; int32_t j, rdma_ep_count; int count = 0, btl_ownership; mca_btl_udapl_frag_t* frag; mca_btl_base_endpoint_t* endpoint; /* prevent deadlock - only one thread should be 'progressing' at a time */ if(OPAL_THREAD_ADD32(&inprogress, 1) > 1) { OPAL_THREAD_ADD32(&inprogress, -1); return OMPI_SUCCESS; } /* check for work to do on each uDAPL btl */ OPAL_THREAD_LOCK(&mca_btl_udapl_component.udapl_lock); for(i = 0; i < mca_btl_udapl_component.udapl_num_btls; i++) { btl = mca_btl_udapl_component.udapl_btls[i]; /* Check DTO EVD */ while(DAT_SUCCESS == dat_evd_dequeue(btl->udapl_evd_dto, &event)) { DAT_DTO_COMPLETION_EVENT_DATA* dto; switch(event.event_number) { case DAT_DTO_COMPLETION_EVENT: dto = &event.event_data.dto_completion_event_data; frag = dto->user_cookie.as_ptr; /* Was the DTO successful? */ if(DAT_DTO_SUCCESS != dto->status) { if (DAT_DTO_ERR_FLUSHED == dto->status) { BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_INFORM, ("DAT_DTO_ERR_FLUSHED: probably OK if occurs during MPI_Finalize().\n")); } else { BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_CRITICAL, ("ERROR: DAT_DTO_COMPLETION_EVENT: %d %d %lu %p.\n", dto->status, frag->type, (unsigned long)frag->size, dto->ep_handle)); } return OMPI_ERROR; } endpoint = frag->endpoint; btl_ownership = (frag->base.des_flags & MCA_BTL_DES_FLAGS_BTL_OWNERSHIP); switch(frag->type) { case MCA_BTL_UDAPL_RDMA_WRITE: { assert(frag->base.des_src == &frag->segment); assert(frag->base.des_src_cnt == 1); assert(frag->base.des_dst == NULL); assert(frag->base.des_dst_cnt == 0); assert(frag->type == MCA_BTL_UDAPL_RDMA_WRITE); frag->base.des_cbfunc(&btl->super, endpoint, &frag->base, OMPI_SUCCESS); if( btl_ownership ) { mca_btl_udapl_free(&btl->super, &frag->base); } OPAL_THREAD_ADD32(&(endpoint->endpoint_lwqe_tokens[BTL_UDAPL_EAGER_CONNECTION]), 1); mca_btl_udapl_frag_progress_pending(btl, endpoint, BTL_UDAPL_EAGER_CONNECTION); break; } case MCA_BTL_UDAPL_SEND: { int connection = BTL_UDAPL_EAGER_CONNECTION; assert(frag->base.des_src == &frag->segment); assert(frag->base.des_src_cnt == 1); assert(frag->base.des_dst == NULL); assert(frag->base.des_dst_cnt == 0); assert(frag->type == MCA_BTL_UDAPL_SEND); if(frag->size != mca_btl_udapl_component.udapl_eager_frag_size) { assert(frag->size == mca_btl_udapl_component.udapl_max_frag_size); connection = BTL_UDAPL_MAX_CONNECTION; } frag->base.des_cbfunc(&btl->super, endpoint, &frag->base, OMPI_SUCCESS); if( btl_ownership ) { mca_btl_udapl_free(&btl->super, &frag->base); } OPAL_THREAD_ADD32(&(endpoint->endpoint_lwqe_tokens[connection]), 1); mca_btl_udapl_frag_progress_pending(btl, endpoint, connection); break; } case MCA_BTL_UDAPL_RECV: { mca_btl_active_message_callback_t* reg; int cntrl_msg = -1; assert(frag->base.des_dst == &frag->segment); assert(frag->base.des_dst_cnt == 1); assert(frag->base.des_src == NULL); assert(frag->base.des_src_cnt == 0); assert(frag->type == MCA_BTL_UDAPL_RECV); assert(frag->triplet.virtual_address == (DAT_VADDR)(uintptr_t)frag->segment.seg_addr.pval); assert(frag->triplet.segment_length == frag->size); assert(frag->btl == btl); /* setup frag ftr location and do callback */ frag->segment.seg_len = dto->transfered_length - sizeof(mca_btl_udapl_footer_t); frag->ftr = (mca_btl_udapl_footer_t *) ((char *)frag->segment.seg_addr.pval + frag->segment.seg_len); cntrl_msg = frag->ftr->tag; reg = mca_btl_base_active_message_trigger + frag->ftr->tag; OPAL_THREAD_UNLOCK(&mca_btl_udapl_component.udapl_lock); reg->cbfunc(&btl->super, frag->ftr->tag, &frag->base, reg->cbdata); OPAL_THREAD_LOCK(&mca_btl_udapl_component.udapl_lock); /* Repost the frag */ frag->ftr = frag->segment.seg_addr.pval; frag->segment.seg_len = (frag->size - sizeof(mca_btl_udapl_footer_t) - sizeof(mca_btl_udapl_rdma_footer_t)); frag->base.des_flags = 0; if(frag->size == mca_btl_udapl_component.udapl_eager_frag_size) { OPAL_THREAD_ADD32(&(frag->endpoint->endpoint_sr_credits[BTL_UDAPL_EAGER_CONNECTION]), 1); dat_ep_post_recv(frag->endpoint->endpoint_eager, 1, &frag->triplet, dto->user_cookie, DAT_COMPLETION_DEFAULT_FLAG); if (frag->endpoint->endpoint_sr_credits[BTL_UDAPL_EAGER_CONNECTION] >= mca_btl_udapl_component.udapl_sr_win) { mca_btl_udapl_endpoint_send_sr_credits(frag->endpoint, BTL_UDAPL_EAGER_CONNECTION); } if (MCA_BTL_TAG_UDAPL == cntrl_msg) { mca_btl_udapl_frag_progress_pending(btl, frag->endpoint, BTL_UDAPL_EAGER_CONNECTION); } } else { assert(frag->size == mca_btl_udapl_component.udapl_max_frag_size); OPAL_THREAD_ADD32(&(frag->endpoint->endpoint_sr_credits[BTL_UDAPL_MAX_CONNECTION]), 1); dat_ep_post_recv(frag->endpoint->endpoint_max, 1, &frag->triplet, dto->user_cookie, DAT_COMPLETION_DEFAULT_FLAG); if (frag->endpoint->endpoint_sr_credits[BTL_UDAPL_MAX_CONNECTION] >= mca_btl_udapl_component.udapl_sr_win) { mca_btl_udapl_endpoint_send_sr_credits(frag->endpoint, BTL_UDAPL_MAX_CONNECTION); } if (MCA_BTL_TAG_UDAPL == cntrl_msg) { mca_btl_udapl_frag_progress_pending(btl, frag->endpoint, BTL_UDAPL_MAX_CONNECTION); } } break; } case MCA_BTL_UDAPL_PUT: { assert(frag->base.des_src == &frag->segment); assert(frag->base.des_src_cnt == 1); assert(frag->base.des_dst_cnt == 1); assert(frag->type == MCA_BTL_UDAPL_PUT); frag->base.des_cbfunc(&btl->super, endpoint, &frag->base, OMPI_SUCCESS); if( btl_ownership ) { mca_btl_udapl_free(&btl->super, &frag->base); } OPAL_THREAD_ADD32(&(endpoint->endpoint_lwqe_tokens[BTL_UDAPL_MAX_CONNECTION]), 1); OPAL_THREAD_ADD32(&(endpoint->endpoint_sr_tokens[BTL_UDAPL_MAX_CONNECTION]), 1); mca_btl_udapl_frag_progress_pending(btl, endpoint, BTL_UDAPL_MAX_CONNECTION); break; } case MCA_BTL_UDAPL_CONN_RECV: mca_btl_udapl_endpoint_finish_connect(btl, frag->segment.seg_addr.pval, (int32_t *)((char *)frag->segment.seg_addr.pval + sizeof(mca_btl_udapl_addr_t)), event.event_data.connect_event_data.ep_handle); /* No break - fall through to free */ case MCA_BTL_UDAPL_CONN_SEND: frag->segment.seg_len = mca_btl_udapl_module.super.btl_eager_limit; mca_btl_udapl_free(&btl->super, &frag->base); break; default: BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_DIAGNOSE, ("WARNING: unknown frag type: %d\n", frag->type)); } count++; break; default: BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_DIAGNOSE, ("WARNING: DTO event: %s (%d)\n", mca_btl_udapl_dat_event_to_string(event.event_number), event.event_number)); } } /* Check connection EVD */ while((btl->udapl_connect_inprogress > 0) && (DAT_SUCCESS == dat_evd_dequeue(btl->udapl_evd_conn, &event))) { switch(event.event_number) { case DAT_CONNECTION_REQUEST_EVENT: /* Accept a new connection */ mca_btl_udapl_accept_connect(btl, event.event_data.cr_arrival_event_data.cr_handle); count++; break; case DAT_CONNECTION_EVENT_ESTABLISHED: /* Both the client and server side of a connection generate this event */ if (mca_btl_udapl_component.udapl_conn_priv_data) { /* private data is only valid at this point if this * event is from a dat_ep_connect call, not an accept */ mca_btl_udapl_endpoint_pd_established_conn(btl, event.event_data.connect_event_data.ep_handle); } else { /* explicitly exchange process data */ mca_btl_udapl_sendrecv(btl, event.event_data.connect_event_data.ep_handle); } count++; break; case DAT_CONNECTION_EVENT_PEER_REJECTED: case DAT_CONNECTION_EVENT_NON_PEER_REJECTED: case DAT_CONNECTION_EVENT_ACCEPT_COMPLETION_ERROR: case DAT_CONNECTION_EVENT_DISCONNECTED: case DAT_CONNECTION_EVENT_BROKEN: case DAT_CONNECTION_EVENT_TIMED_OUT: /* handle this case specially? if we have finite timeout, we might want to try connecting again here. */ case DAT_CONNECTION_EVENT_UNREACHABLE: /* Need to set the BTL endpoint to MCA_BTL_UDAPL_FAILED See dat_ep_connect documentation pdf pg 198 */ BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_CRITICAL, ("WARNING: connection event not handled : %s (%d)\n", mca_btl_udapl_dat_event_to_string(event.event_number), event.event_number)); break; default: BTL_ERROR(("ERROR: connection event : %s (%d)", mca_btl_udapl_dat_event_to_string(event.event_number), event.event_number)); } } /* Check async EVD */ if (btl->udapl_async_events == mca_btl_udapl_component.udapl_async_events) { btl->udapl_async_events = 0; while(DAT_SUCCESS == dat_evd_dequeue(btl->udapl_evd_async, &event)) { switch(event.event_number) { case DAT_ASYNC_ERROR_EVD_OVERFLOW: case DAT_ASYNC_ERROR_IA_CATASTROPHIC: case DAT_ASYNC_ERROR_EP_BROKEN: case DAT_ASYNC_ERROR_TIMED_OUT: case DAT_ASYNC_ERROR_PROVIDER_INTERNAL_ERROR: BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_CRITICAL, ("WARNING: async event ignored : %s (%d)", mca_btl_udapl_dat_event_to_string(event.event_number), event.event_number)); break; default: BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_CRITICAL, ("WARNING: %s (%d)\n", mca_btl_udapl_dat_event_to_string(event.event_number), event.event_number)); } } } else { btl->udapl_async_events++; } /* * Check eager rdma segments */ /* find the number of endpoints with rdma buffers */ rdma_ep_count = btl->udapl_eager_rdma_endpoint_count; for (j = 0; j < rdma_ep_count; j++) { mca_btl_udapl_endpoint_t* endpoint; mca_btl_udapl_frag_t *local_rdma_frag; endpoint = opal_pointer_array_get_item(btl->udapl_eager_rdma_endpoints, j); OPAL_THREAD_LOCK(&endpoint->endpoint_eager_rdma_local.lock); local_rdma_frag = MCA_BTL_UDAPL_GET_LOCAL_RDMA_FRAG(endpoint, endpoint->endpoint_eager_rdma_local.head); if (local_rdma_frag->rdma_ftr->active == 1) { int pad = 0; mca_btl_active_message_callback_t* reg; MCA_BTL_UDAPL_RDMA_NEXT_INDEX(endpoint->endpoint_eager_rdma_local.head); OPAL_THREAD_UNLOCK(&endpoint->endpoint_eager_rdma_local.lock); /* compute pad as needed */ MCA_BTL_UDAPL_FRAG_CALC_ALIGNMENT_PAD(pad, (local_rdma_frag->rdma_ftr->size + sizeof(mca_btl_udapl_footer_t))); /* set fragment information */ local_rdma_frag->ftr = (mca_btl_udapl_footer_t *) ((char *)local_rdma_frag->rdma_ftr - pad - sizeof(mca_btl_udapl_footer_t)); local_rdma_frag->segment.seg_len = local_rdma_frag->rdma_ftr->size; local_rdma_frag->segment.seg_addr.pval = (unsigned char *) ((char *)local_rdma_frag->ftr - local_rdma_frag->segment.seg_len); /* trigger callback */ reg = mca_btl_base_active_message_trigger + local_rdma_frag->ftr->tag; reg->cbfunc(&btl->super, local_rdma_frag->ftr->tag, &local_rdma_frag->base, reg->cbdata); /* repost */ local_rdma_frag->rdma_ftr->active = 0; local_rdma_frag->segment.seg_len = mca_btl_udapl_module.super.btl_eager_limit; local_rdma_frag->base.des_flags = 0; /* increment local rdma credits */ OPAL_THREAD_ADD32(&(endpoint->endpoint_eager_rdma_local.credits), 1); if (endpoint->endpoint_eager_rdma_local.credits >= mca_btl_udapl_component.udapl_eager_rdma_win) { mca_btl_udapl_endpoint_send_eager_rdma_credits(endpoint); } count++; } else { OPAL_THREAD_UNLOCK(&endpoint->endpoint_eager_rdma_local.lock); } } /* end of rdma_count loop */ } /* unlock and return */ OPAL_THREAD_UNLOCK(&mca_btl_udapl_component.udapl_lock); OPAL_THREAD_ADD32(&inprogress, -1); return count; }