int
ompi_common_portals_get_procs(size_t nprocs,
                              struct ompi_proc_t **procs,
                              ptl_process_id_t *portals_procs)
{
    size_t i, size;
    int ret;
    ptl_process_id_t *info;

    for (i = 0 ; i < nprocs ; ++i) {
        ret = ompi_modex_recv(&portals_component,
                                      procs[i], (void**) &info, &size);
        if (OMPI_SUCCESS != ret) {
            opal_output(0, "%5d: ompi_modex_recv failed: %d", 
                        getpid(), ret);
            return ret;
        } else if (sizeof(ptl_process_id_t) != size) {
            opal_output(0, "%5d: ompi_modex_recv returned size %d, expected %d", 
                        getpid(), size, sizeof(ptl_process_id_t));
            return OMPI_ERROR;
        }

        portals_procs[i].nid = ntohl(info->nid);
        portals_procs[i].pid = ntohl(info->pid);
    }

    return OMPI_SUCCESS;
}
int
ompi_common_portals_get_procs(size_t nprocs,
                              struct ompi_proc_t **procs,
                              ptl_process_id_t *portals_procs)
{
    size_t i, size;
    int ret;
    ptl_process_id_t *ptl_process_id;
    
    for (i = 0 ; i < nprocs ; ++i) {
        ret = ompi_modex_recv(&portals_component,
                              procs[i], (void**) &ptl_process_id, &size);
        if (OMPI_SUCCESS != ret) {
            opal_output(0, "ompi_modex_recv failed: %d", ret);
            return ret;
        } else if (sizeof(ptl_process_id_t) != size) {
            opal_output(0, "ompi_modex_recv returned size %d, expected %d", 
                        (int) size, (int) sizeof(ptl_process_id_t));
            return OMPI_ERROR;
        }
        
        portals_procs[i] = *ptl_process_id;
    }
    
    return OMPI_SUCCESS;
}
void usnic_compat_modex_recv(int *rc,
                             mca_base_component_t *component,
                             opal_proc_t *proc,
                             struct opal_btl_usnic_modex_t **modexes,
                             size_t *size)
{
    *rc = ompi_modex_recv(component, proc, (void*) modexes, size);
}
示例#4
0
static int recv_ep_address(ompi_proc_t *proc, void **address_p, size_t *addrlen_p)
{
    int ret = ompi_modex_recv(&mca_pml_yalla_component.pmlm_version, proc, address_p,
                              addrlen_p);

    if (ret < 0) {
        PML_YALLA_ERROR("Failed to receive EP address");
    }
    return ret;
}
示例#5
0
mca_btl_sctp_proc_t* mca_btl_sctp_proc_create(ompi_proc_t* ompi_proc)
{
    int rc;
    size_t size;
    mca_btl_sctp_proc_t* btl_proc;
    uint64_t hash = orte_util_hash_name(&ompi_proc->proc_name);

    OPAL_THREAD_LOCK(&mca_btl_sctp_component.sctp_lock);
    rc = opal_hash_table_get_value_uint64(&mca_btl_sctp_component.sctp_procs, 
                                          hash, (void**)&btl_proc);
    if(OMPI_SUCCESS == rc) {
        OPAL_THREAD_UNLOCK(&mca_btl_sctp_component.sctp_lock);
        return btl_proc;
    }

    btl_proc = OBJ_NEW(mca_btl_sctp_proc_t);
    if(NULL == btl_proc) {
        return NULL;
    }
    btl_proc->proc_ompi = ompi_proc;
    btl_proc->proc_name = ompi_proc->proc_name;

    /* add to hash table of all proc instance */
    opal_hash_table_set_value_uint64(&mca_btl_sctp_component.sctp_procs,
                                     hash, btl_proc);
    OPAL_THREAD_UNLOCK(&mca_btl_sctp_component.sctp_lock);

    /* lookup sctp parameters exported by this proc */
    rc = ompi_modex_recv( &mca_btl_sctp_component.super.btl_version,
            ompi_proc,
            (void**)&btl_proc->proc_addrs,
            &size );
    if(rc != OMPI_SUCCESS) {
        BTL_ERROR(("mca_base_modex_recv: failed with return value=%d", rc));
        OBJ_RELEASE(btl_proc);
        return NULL;
    }
    if(0 != (size % sizeof(mca_btl_sctp_addr_t))) {
        BTL_ERROR(("mca_base_modex_recv: invalid size %" PRIsize_t "\n", size));
        return NULL;
    }
    btl_proc->proc_addr_count = size / sizeof(mca_btl_sctp_addr_t);

    /* allocate space for endpoint array - one for each exported address */
    btl_proc->proc_endpoints = (mca_btl_base_endpoint_t**)
        malloc(btl_proc->proc_addr_count * sizeof(mca_btl_base_endpoint_t*));
    if(NULL == btl_proc->proc_endpoints) {
        OBJ_RELEASE(btl_proc);
        return NULL;
    }
    if(NULL == mca_btl_sctp_component.sctp_local && ompi_proc == ompi_proc_local()) {
        mca_btl_sctp_component.sctp_local = btl_proc;
    }
    return btl_proc;
}
示例#6
0
mca_btl_udapl_proc_t* mca_btl_udapl_proc_create(ompi_proc_t* ompi_proc)
{
    mca_btl_udapl_proc_t* udapl_proc = NULL;
    size_t size;
    int rc;

    /* Check if we have already created a uDAPL proc
     * structure for this ompi process */
    udapl_proc = mca_btl_udapl_proc_lookup_ompi(ompi_proc);
    if(udapl_proc != NULL) {
        return udapl_proc;
    }

    /* create a new udapl proc out of the ompi_proc ... */
    udapl_proc = OBJ_NEW(mca_btl_udapl_proc_t);
    udapl_proc->proc_endpoint_count = 0;
    udapl_proc->proc_ompi = ompi_proc;

    /* query for the peer address info */
    rc = ompi_modex_recv(
                 &mca_btl_udapl_component.super.btl_version,
                 ompi_proc,
                 (void*)&udapl_proc->proc_addrs,
                 &size); 
    if(OMPI_SUCCESS != rc) {
        BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_CRITICAL,
            ("ompi_modex_recv failed for peer %s",
            ORTE_NAME_PRINT(&ompi_proc->proc_name)));
        OBJ_RELEASE(udapl_proc);
        return NULL;
    }

    if((size % sizeof(mca_btl_udapl_addr_t)) != 0) {
        BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_CRITICAL,
            ("invalid udapl address for peer %s",
            ORTE_NAME_PRINT(&ompi_proc->proc_name)));
        OBJ_RELEASE(udapl_proc);
        return NULL;
    }

    udapl_proc->proc_addr_count = size/sizeof(mca_btl_udapl_addr_t);
    if (0 == udapl_proc->proc_addr_count) {
        udapl_proc->proc_endpoints = NULL;
    } else {
        udapl_proc->proc_endpoints = (mca_btl_base_endpoint_t**)
            malloc(udapl_proc->proc_addr_count * sizeof(mca_btl_base_endpoint_t*));
    }
    if(NULL == udapl_proc->proc_endpoints) {
        OBJ_RELEASE(udapl_proc);
        return NULL;
    }
    return udapl_proc;
}
示例#7
0
int
ompi_mtl_portals4_add_procs(struct mca_mtl_base_module_t *mtl,
                            size_t nprocs,
                            struct ompi_proc_t** procs, 
                            struct mca_mtl_base_endpoint_t **mtl_peer_data)
{
    int ret;
    size_t i;

    /* Get the list of ptl_process_id_t from the runtime and copy into structure */
    for (i = 0 ; i < nprocs ; ++i) {
        ptl_process_t *id;
        size_t size;

        if (procs[i]->proc_arch != ompi_proc_local()->proc_arch) {
            opal_output_verbose(1, ompi_mtl_base_output,
                                "Portals 4 MTL does not support heterogeneous operations.");
            opal_output_verbose(1, ompi_mtl_base_output,
                                "Proc %s architecture %x, mine %x.",
                                ORTE_NAME_PRINT(&procs[i]->proc_name), 
                                procs[i]->proc_arch, ompi_proc_local()->proc_arch);
            return OMPI_ERR_NOT_SUPPORTED;
        }

        mtl_peer_data[i] = malloc(sizeof(struct mca_mtl_base_endpoint_t));
        if (NULL == mtl_peer_data[i]) {
            opal_output_verbose(1, ompi_mtl_base_output,
                                "%s:%d: malloc failed: %d\n",
                                __FILE__, __LINE__, ret);
            return OMPI_ERR_OUT_OF_RESOURCE;
        }

        ret = ompi_modex_recv(&mca_mtl_portals4_component.mtl_version,
                              procs[i], (void**) &id, &size);
        if (OMPI_SUCCESS != ret) {
            opal_output_verbose(1, ompi_mtl_base_output,
                                "%s:%d: ompi_modex_recv failed: %d\n",
                                __FILE__, __LINE__, ret);
            return ret;
        } else if (sizeof(ptl_process_t) != size) {
            opal_output_verbose(1, ompi_mtl_base_output,
                                "%s:%d: ompi_modex_recv failed: %d\n",
                                __FILE__, __LINE__, ret);
            return OMPI_ERR_BAD_PARAM;
        }

        mtl_peer_data[i]->ptl_proc = *id;
    }

    return OMPI_SUCCESS;
}
示例#8
0
static int init_vader_endpoint (struct mca_btl_base_endpoint_t *ep, struct ompi_proc_t *proc, int remote_rank) {
    const int fbox_in_offset = MCA_BTL_VADER_LOCAL_RANK - (MCA_BTL_VADER_LOCAL_RANK > remote_rank);
    const int fbox_out_offset = remote_rank - (MCA_BTL_VADER_LOCAL_RANK < remote_rank);
    mca_btl_vader_component_t *component = &mca_btl_vader_component;
    struct vader_modex_t *modex;
    size_t msg_size;
    int rc;

    ep->peer_smp_rank = remote_rank;

    if (remote_rank != MCA_BTL_VADER_LOCAL_RANK) {
        if (OMPI_SUCCESS != (rc = ompi_modex_recv(&component->super.btl_version,
                                                  proc, (void *)&modex, &msg_size))) {
            return rc;
        }

        /* attatch to the remote segment */
#if OMPI_BTL_VADER_HAVE_XPMEM
        /* always use xpmem if it is available */
        ep->apid = xpmem_get (modex->seg_id, XPMEM_RDWR, XPMEM_PERMIT_MODE, (void *) 0666);
        ep->rcache = mca_rcache_base_module_create("vma");
        (void) vader_get_registation (ep, modex->segment_base, mca_btl_vader_component.segment_size,
                                      MCA_MPOOL_FLAGS_PERSIST, (void **) &ep->segment_base);
#else
        int offset = offsetof (opal_shmem_ds_t, seg_name);

        memcpy (&ep->seg_ds, modex->buffer, offset);
        memcpy (&ep->seg_ds.seg_base_addr, modex->buffer + offset, sizeof (ep->seg_ds.seg_base_addr));
        offset += sizeof (ep->seg_ds.seg_base_addr);
        strncpy (ep->seg_ds.seg_name, modex->buffer + offset, OPAL_PATH_MAX);

        ep->segment_base = opal_shmem_segment_attach (&ep->seg_ds);
        if (NULL == ep->segment_base) {
            return rc;
        }
#endif

        free (modex);

        ep->next_fbox_out = 0;
        ep->next_fbox_in  = 0;
        ep->next_sequence = 0;
        ep->expected_sequence = 0;

        ep->fbox_in  = (struct mca_btl_vader_fbox_t * restrict) (ep->segment_base + MCA_BTL_VADER_FIFO_SIZE +
                                                                 fbox_in_offset * MCA_BTL_VADER_FBOX_PEER_SIZE);
        ep->fbox_out = (struct mca_btl_vader_fbox_t * restrict) (component->my_segment + MCA_BTL_VADER_FIFO_SIZE +
                                                                 fbox_out_offset * MCA_BTL_VADER_FBOX_PEER_SIZE);
    } else {
示例#9
0
mca_btl_elan_proc_t* mca_btl_elan_proc_create(ompi_proc_t* ompi_proc)
{
    int rc;
    size_t size;
    mca_btl_elan_proc_t* module_proc = NULL;
    /* Check if we have already created a Elan proc
     * structure for this ompi process */
    module_proc = mca_btl_elan_proc_lookup_ompi(ompi_proc);
    if(module_proc != NULL) {
        /* Gotcha! */
        return module_proc;
    }
    /* Oops! First time, gotta create a new Elan proc
     * out of the ompi_proc ... */
    module_proc = OBJ_NEW(mca_btl_elan_proc_t);
    if(NULL == module_proc)
        return NULL;
    /* Initialize number of peer */
    module_proc->proc_endpoint_count = 0;
    module_proc->proc_ompi = ompi_proc;

    /* build a unique identifier (of arbitrary
     * size) to represent the proc */
    module_proc->proc_guid = ompi_proc->proc_name;
    rc = ompi_modex_recv( &mca_btl_elan_component.super.btl_version,
                          ompi_proc,
                          (void**)&module_proc->position_id_array,
                          &size );
    if(rc != OMPI_SUCCESS) {
        BTL_ERROR(("mca_base_modex_recv: failed with return value=%d", rc));
        OBJ_RELEASE(module_proc);
        return NULL;
    }
    module_proc->proc_rail_count = size / sizeof(unsigned int);;
    /* XXX: Right now, there can be only 1 peer associated
     * with a proc. Needs a little bit change in 
     * mca_btl_elan_proc_t to allow on demand increasing of
     * number of endpoints for this proc 
     */

    module_proc->proc_endpoints = (mca_btl_base_endpoint_t**)
        malloc((1+module_proc->proc_rail_count )* sizeof(mca_btl_base_endpoint_t*));
    if(NULL == module_proc->proc_endpoints) {
        OBJ_RELEASE(module_proc);
        return NULL;
    }
    return module_proc;
}
示例#10
0
mca_mtl_mx_endpoint_t* mca_mtl_mx_endpoint_create(ompi_proc_t* ompi_proc) { 
    mca_mtl_mx_endpoint_t* mtl_mx_endpoint = NULL;
    int rc; 
    mca_mtl_mx_addr_t *mx_peer; 
    size_t size;
    mx_return_t mx_return;
    int num_retry = 0;
    /* get the remote proc's address (only one) */
    rc = ompi_modex_recv(&mca_mtl_mx_component.super.mtl_version, 
                                 ompi_proc, (void**)&mx_peer, &size);
    if( rc != OMPI_SUCCESS || size != sizeof(mca_mtl_mx_addr_t)) { 
        return NULL; 
    }
    
    mtl_mx_endpoint = (mca_mtl_mx_endpoint_t*) OBJ_NEW(mca_mtl_mx_endpoint_t);
    mtl_mx_endpoint->mx_peer = mx_peer;
    
 retry_connect:
    mx_return = mx_connect(ompi_mtl_mx.mx_endpoint, 
                           mx_peer->nic_id, 
                           mx_peer->endpoint_id, 
                           ompi_mtl_mx.mx_filter, 
                           ompi_mtl_mx.mx_timeout, 
                           &mtl_mx_endpoint->mx_peer_addr);
    if(MX_SUCCESS != mx_return) { 
        char peer_name[MX_MAX_HOSTNAME_LEN];
        if(MX_TIMEOUT == mx_return) { 
            if( num_retry++ < ompi_mtl_mx.mx_retries ) { 
                goto retry_connect;
            }
        }
        
        if(MX_SUCCESS != mx_nic_id_to_hostname( mx_peer->nic_id, peer_name)) { 
            sprintf( peer_name, "unknown %lx nic_id", (long)mx_peer->nic_id ); 
        }
        opal_output(ompi_mtl_base_output, 
                    "mx_connect fail for %s with key %x (error %s)\n", 
                    peer_name, ompi_mtl_mx.mx_filter, mx_strerror(mx_return) );
        return NULL;
    }
    
    
    return mtl_mx_endpoint;
    
}
static int init_vader_endpoint (struct mca_btl_base_endpoint_t *ep, struct ompi_proc_t *proc, int remote_rank) {
    const int fbox_in_offset = MCA_BTL_VADER_LOCAL_RANK - (MCA_BTL_VADER_LOCAL_RANK > remote_rank);
    const int fbox_out_offset = remote_rank - (MCA_BTL_VADER_LOCAL_RANK < remote_rank);
    mca_btl_vader_component_t *component = &mca_btl_vader_component;
    struct vader_modex_t *modex;
    size_t msg_size;
    int rc;

    ep->peer_smp_rank = remote_rank;

    if (remote_rank != MCA_BTL_VADER_LOCAL_RANK) {
        if (OMPI_SUCCESS != (rc = ompi_modex_recv(&component->super.btl_version,
                                                  proc, (void *)&modex, &msg_size))) {
            return rc;
        }

        ep->apid = xpmem_get (modex->seg_id, XPMEM_RDWR, XPMEM_PERMIT_MODE, (void *) 0666);
        ep->rcache = mca_rcache_base_module_create("vma");
        ep->next_fbox_out = 0;
        ep->next_fbox_in  = 0;

        /* attatch to the remote segment */
        (void) vader_get_registation (ep, modex->segment_base, mca_btl_vader_component.segment_size,
                                      MCA_MPOOL_FLAGS_PERSIST, (void **) &ep->segment_base);

        ep->fifo     = (struct vader_fifo_t *) ep->segment_base;
        ep->fbox_in  = ep->segment_base + 4096 + fbox_in_offset * MCA_BTL_VADER_FBOX_PEER_SIZE;
        ep->fbox_out = component->my_segment + 4096 + fbox_out_offset * MCA_BTL_VADER_FBOX_PEER_SIZE;
    } else {
        /* set up the segment base so we can calculate a virtual to real for local pointers */
        ep->segment_base = component->my_segment;
        ep->fifo = (struct vader_fifo_t *) ep->segment_base;
    }

    return OMPI_SUCCESS;
}
示例#12
0
int
ompi_mtl_psm_add_procs(struct mca_mtl_base_module_t *mtl,
                      size_t nprocs,
                      struct ompi_proc_t** procs)
{
    int i,j; 
    int rc;
    psm_epid_t   *epids_in = NULL;
    psm_epid_t	 *epid;
    psm_epaddr_t *epaddrs_out = NULL;
    psm_error_t  *errs_out = NULL, err;
    size_t size;
    int proc_errors[PSM_ERROR_LAST] = { 0 };
    int timeout_in_secs;
    
    assert(mtl == &ompi_mtl_psm.super);
    rc = OMPI_ERR_OUT_OF_RESOURCE;

    errs_out = (psm_error_t *) malloc(nprocs * sizeof(psm_error_t));
    if (errs_out == NULL) {
	goto bail;
    }
    epids_in = (psm_epid_t *) malloc(nprocs * sizeof(psm_epid_t));
    if (epids_in == NULL) {
	goto bail;
    }
    epaddrs_out = (psm_epaddr_t *) malloc(nprocs * sizeof(psm_epaddr_t));
    if (epaddrs_out == NULL) {
	goto bail;
    }
    rc = OMPI_SUCCESS;

    /* Get the epids for all the processes from modex */
    for (i = 0; i < (int) nprocs; i++) {
	rc = ompi_modex_recv(&mca_mtl_psm_component.super.mtl_version, 
				     procs[i], (void**)&epid, &size);
	if (rc != OMPI_SUCCESS || size != sizeof(psm_epid_t)) {
	  return OMPI_ERROR;
	}
	epids_in[i] = *epid;
    }

    timeout_in_secs = max(ompi_mtl_psm.connect_timeout, 0.5 * nprocs);

    psm_error_register_handler(ompi_mtl_psm.ep, PSM_ERRHANDLER_NOP);

    err = psm_ep_connect(ompi_mtl_psm.ep,
			 nprocs,
			 epids_in,
			 NULL, /* connect all */
			 errs_out,
			 epaddrs_out,
			 timeout_in_secs * 1e9);
    if (err) {
	char *errstr = (char *) ompi_mtl_psm_connect_error_msg(err);
	if (errstr == NULL) {
	    opal_output(0, "PSM returned unhandled/unknown connect error: %s\n",
			psm_error_get_string(err));
	}
	for (i = 0; i < (int) nprocs; i++) {
	    psm_error_t thiserr = errs_out[i];
	    errstr = (char *) ompi_mtl_psm_connect_error_msg(thiserr);
	    if (proc_errors[thiserr] == 0) {
		proc_errors[thiserr] = 1;
		opal_output(0, "PSM EP connect error (%s):", 
			    errstr ? errstr : "unknown connect error");
		for (j = 0; j < (int) nprocs; j++) {
		  if (errs_out[j] == thiserr) {
                      opal_output(0, " %s", (NULL == procs[j]->proc_hostname) ?
                                  "unknown" : procs[j]->proc_hostname);
		  }
		}
		opal_output(0, "\n");
	    }
	}

	rc = OMPI_ERROR;
    }
    else {
	/* Default error handling is enabled, errors will not be returned to
	 * user.  PSM prints the error and the offending endpoint's hostname
	 * and exits with -1 */
	psm_error_register_handler(ompi_mtl_psm.ep, PSM_ERRHANDLER_DEFAULT);
		
	/* Fill in endpoint data */
	for (i = 0; i < (int) nprocs; i++) { 
            mca_mtl_psm_endpoint_t *endpoint = 
		(mca_mtl_psm_endpoint_t *) OBJ_NEW(mca_mtl_psm_endpoint_t);
	    endpoint->peer_epid = epids_in[i];
	    endpoint->peer_addr = epaddrs_out[i];
            procs[i]->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_MTL] = endpoint;
	}

	rc = OMPI_SUCCESS;
    }
    
bail:
    if (epids_in != NULL) {
	free(epids_in);
    }
    if (errs_out != NULL) {
	free(errs_out);
    }
    if (epaddrs_out != NULL) {
	free(epaddrs_out);
    }

    return rc;
}
示例#13
0
mca_btl_ud_proc_t* mca_btl_ud_proc_create(ompi_proc_t* ompi_proc)
{
    mca_btl_ud_proc_t* module_proc = NULL;
    size_t size;
    int rc;

    /* Check if we have already created a IB proc
     * structure for this ompi process */
    module_proc = mca_btl_ud_proc_lookup_ompi(ompi_proc);

    if(module_proc != NULL) {
        /* Gotcha! */
        return module_proc;
    }

    /* Oops! First time, gotta create a new IB proc out of the ompi_proc ... */
    module_proc = OBJ_NEW(mca_btl_ud_proc_t);
    /* Initialize number of peer */
    module_proc->proc_endpoint_count = 0;
    module_proc->proc_ompi = ompi_proc;

    /* build a unique identifier (of arbitrary size) to represent the proc */
    module_proc->proc_guid = ompi_proc->proc_name;


    /* query for the peer address info */
    rc = ompi_modex_recv(&mca_btl_ofud_component.super.btl_version,
                                 ompi_proc, (void*)&module_proc->proc_addrs,
                                 &size);

    if(OMPI_SUCCESS != rc) {
        opal_output(0,
                "[%s:%d] ompi_modex_recv failed for peer %s",
                __FILE__,__LINE__,ORTE_NAME_PRINT(&ompi_proc->proc_name));
        OBJ_RELEASE(module_proc);
        return NULL;
    }

    if((size % sizeof(mca_btl_ud_addr_t)) != 0) {
        opal_output(0, "[%s:%d] invalid module address for peer %s",
                __FILE__,__LINE__,ORTE_NAME_PRINT(&ompi_proc->proc_name));
        OBJ_RELEASE(module_proc);
        return NULL;
    }


    module_proc->proc_addr_count = size / sizeof(mca_btl_ud_addr_t);


    if (0 == module_proc->proc_addr_count) {
        module_proc->proc_endpoints = NULL;
    } else {
        module_proc->proc_endpoints = (mca_btl_base_endpoint_t**)
            malloc(module_proc->proc_addr_count *
                    sizeof(mca_btl_base_endpoint_t*));
    }

    if(NULL == module_proc->proc_endpoints) {
        OBJ_RELEASE(module_proc);
        return NULL;
    }
    return module_proc;
}
示例#14
0
mca_btl_tcp2_proc_t* mca_btl_tcp2_proc_create(ompi_proc_t* ompi_proc)
{
    int rc;
    size_t size;
    mca_btl_tcp2_proc_t* btl_proc;
    uint64_t hash = orte_util_hash_name(&ompi_proc->proc_name);

    OPAL_THREAD_LOCK(&mca_btl_tcp2_component.tcp_lock);
    rc = opal_hash_table_get_value_uint64(&mca_btl_tcp2_component.tcp_procs, 
                                          hash, (void**)&btl_proc);
    if(OMPI_SUCCESS == rc) {
        OPAL_THREAD_UNLOCK(&mca_btl_tcp2_component.tcp_lock);
        return btl_proc;
    }

    btl_proc = OBJ_NEW(mca_btl_tcp2_proc_t);
    if(NULL == btl_proc)
        return NULL;
    btl_proc->proc_ompi = ompi_proc;
    
    /* add to hash table of all proc instance */
    opal_hash_table_set_value_uint64(&mca_btl_tcp2_component.tcp_procs,
                                     hash, btl_proc);
    OPAL_THREAD_UNLOCK(&mca_btl_tcp2_component.tcp_lock);

    /* lookup tcp parameters exported by this proc */
    rc = ompi_modex_recv( &mca_btl_tcp2_component.super.btl_version,
                                  ompi_proc,
                                  (void**)&btl_proc->proc_addrs,
                                  &size );
    if(rc != OMPI_SUCCESS) {
        BTL_ERROR(("mca_base_modex_recv: failed with return value=%d", rc));
        OBJ_RELEASE(btl_proc);
        return NULL;
    }
    if(0 != (size % sizeof(mca_btl_tcp2_addr_t))) {
        BTL_ERROR(("mca_base_modex_recv: invalid size %lu: btl-size: %lu\n",
          (unsigned long) size, (unsigned long)sizeof(mca_btl_tcp2_addr_t)));
        return NULL;
    }
    btl_proc->proc_addr_count = size / sizeof(mca_btl_tcp2_addr_t);

    /* allocate space for endpoint array - one for each exported address */
    btl_proc->proc_endpoints = (mca_btl_base_endpoint_t**)
        malloc((1 + btl_proc->proc_addr_count) *
                sizeof(mca_btl_base_endpoint_t*));
    if(NULL == btl_proc->proc_endpoints) {
        OBJ_RELEASE(btl_proc);
        return NULL;
    }
    if(NULL == mca_btl_tcp2_component.tcp_local && ompi_proc == ompi_proc_local()) {
        mca_btl_tcp2_component.tcp_local = btl_proc;
    }
    {
        /* convert the OMPI addr_family field to OS constants,
         * so we can check for AF_INET (or AF_INET6) and don't have
         * to deal with byte ordering anymore.
         */
        unsigned int i;
        for (i = 0; i < btl_proc->proc_addr_count; i++) {
            if (MCA_BTL_TCP_AF_INET == btl_proc->proc_addrs[i].addr_family) {
                btl_proc->proc_addrs[i].addr_family = AF_INET;
            }
#if OPAL_WANT_IPV6
            if (MCA_BTL_TCP_AF_INET6 == btl_proc->proc_addrs[i].addr_family) {
                btl_proc->proc_addrs[i].addr_family = AF_INET6;
            }
#endif
        }
    }
    return btl_proc;
}
int
ompi_common_portals_ni_initialize(ptl_handle_ni_t *ni_handle, bool *accel)
{
    int ret;

    *accel = false;

    OPAL_THREAD_ADD32(&ni_usage_count, 1);
    if (PTL_INVALID_HANDLE != active_ni_h) {
        *ni_handle = active_ni_h;
        return OMPI_SUCCESS;
    }

    if (setup_utcp_params) {
        ompi_proc_t **procs;
        int my_rid = 0;
        ptl_process_id_t *info;
        char *nidmap = NULL, *pidmap = NULL;
        char *nid_str, *pid_str;
        size_t map_size = 0;
        size_t nprocs, size, i;
        char *tmp;
        ompi_proc_t* proc_self = ompi_proc_local();
        int max_interfaces;

        /* get our world */
        procs = ompi_proc_world(&nprocs);

        map_size = nprocs * 12 + 1; /* 12 is max length of long in decimal */
        nidmap = malloc(map_size);
        pidmap = malloc(map_size);
        nid_str = malloc(12 + 1);
        pid_str = malloc(12 + 1);
        if (NULL == nidmap || NULL == pidmap || 
            NULL == nid_str || NULL == pid_str)
            return OMPI_ERROR;
         
        for (i = 0 ; i < nprocs ; ++i) {
            if (proc_self == procs[i]) my_rid = i;

            ret = ompi_modex_recv(&portals_component,
                                          procs[i], (void**) &info, &size);
            if (OMPI_SUCCESS != ret) {
                opal_output(0, "%5d: ompi_modex_recv failed: %d", 
                            getpid(), ret);
                return ret;
            } else if (sizeof(ptl_process_id_t) != size) {
                opal_output(0, "%5d: ompi_modex_recv returned size %d, expected %d", 
                            getpid(), size, sizeof(ptl_process_id_t));
                return OMPI_ERROR;
            }

            if (i == 0) {
                snprintf(nidmap, map_size, "%u", ntohl(info->nid));
                snprintf(pidmap, map_size, "%u", ntohl(info->pid));
            } else {
                snprintf(nid_str, 12 + 1, ":%u", ntohl(info->nid));
                snprintf(pid_str, 12 + 1, ":%u", ntohl(info->pid));
                strncat(nidmap, nid_str, 12);
                strncat(pidmap, pid_str, 12);
            }

            free(info);
        }

        asprintf(&tmp, "PTL_MY_RID=%u", my_rid);
        putenv(tmp);
        asprintf(&tmp, "PTL_NIDMAP=%s", nidmap);
        putenv(tmp);
        asprintf(&tmp, "PTL_PIDMAP=%s", pidmap);
        putenv(tmp);
        asprintf(&tmp, "PTL_IFACE=%s", ptl_ifname);
        putenv(tmp);

        free(pidmap);
        free(nidmap);
        free(pid_str);
        free(nid_str);

        /*
         * Initialize Portals
         */

        ret = PtlInit(&max_interfaces);
        if (PTL_OK != ret) {
            opal_output(0, "%5d: PtlInit failed, returning %d\n", 
                        getpid(), ret);
            return OMPI_ERR_NOT_AVAILABLE;
        }
        init_called = true;

        /* tell the UTCP runtime code to read the env variables */
        PtlSetRank(PTL_INVALID_HANDLE, -1, -1);

        /* Initialize a network device */
        ret = PtlNIInit(PTL_IFACE_DEFAULT, /* interface to initialize */
                        PTL_PID_ANY,       /* let library assign our pid */
                        NULL,              /* no desired limits */
                        NULL,              /* no need to have limits around */
                        &active_ni_h       /* our interface handle */
                        );
        if (PTL_OK != ret) {
            opal_output(0, "%5d: PtlNIInit failed, returning %d\n", 
                        getpid(), ret);
            return OMPI_ERR_FATAL;
        }

        *ni_handle = active_ni_h;

        return OMPI_SUCCESS;
    }

    /* shouldn't ever be able to get here */
    return OMPI_ERROR;
}
示例#16
0
/*
 * Create an ompi_btl_usnic_proc_t and initialize it with modex info
 * and an empty array of endpoints.
 *
 * Returns OMPI_ERR_UNREACH if we can't reach the peer (i.e., we can't
 * find their modex data).
 */
static int create_proc(ompi_proc_t *ompi_proc, 
                       ompi_btl_usnic_proc_t **usnic_proc)
{
    ompi_btl_usnic_proc_t *proc = NULL;
    size_t size;
    int rc;

    *usnic_proc = NULL;

    /* Create the proc if it doesn't already exist */
    proc = OBJ_NEW(ompi_btl_usnic_proc_t);
    if (NULL == proc) {
        return OMPI_ERR_OUT_OF_RESOURCE;
    }

    /* Initialize number of peers */
    proc->proc_endpoint_count = 0;
    proc->proc_ompi = ompi_proc;

    /* query for the peer address info */
    rc = ompi_modex_recv(&mca_btl_usnic_component.super.btl_version,
                         ompi_proc, (void*)&proc->proc_modex,
                         &size);

    /* If this proc simply doesn't have this key, then they're not
       running the usnic BTL -- just ignore them.  Otherwise, show an
       error message. */
    if (OPAL_ERR_DATA_VALUE_NOT_FOUND == rc) {
        OBJ_RELEASE(proc);
        return OMPI_ERR_UNREACH;
    } else if (OMPI_SUCCESS != rc) {
        opal_show_help("help-mpi-btl-usnic.txt",
                       "internal error during init",
                       true,
                       ompi_process_info.nodename,
                       "<none>", 0,
                       "ompi_modex_recv() failed", __FILE__, __LINE__,
                       opal_strerror(rc));
        OBJ_RELEASE(proc);
        return OMPI_ERROR;
    }

    if ((size % sizeof(ompi_btl_usnic_addr_t)) != 0) {
        char msg[1024];

        snprintf(msg, sizeof(msg), 
                 "sizeof(modex for peer %s data) == %d, expected multiple of %d",
                 OMPI_NAME_PRINT(&ompi_proc->proc_name),
                 (int) size, (int) sizeof(ompi_btl_usnic_addr_t));
        opal_show_help("help-mpi-btl-usnic.txt", "internal error during init",
                       true,
                       ompi_process_info.nodename,
                       "<none>", 0,
                       "invalid modex data", __FILE__, __LINE__,
                       msg);

        OBJ_RELEASE(proc);
        return OMPI_ERR_VALUE_OUT_OF_BOUNDS;
    }

    proc->proc_modex_count = size / sizeof(ompi_btl_usnic_addr_t);
    if (0 == proc->proc_modex_count) {
        proc->proc_endpoints = NULL;
        OBJ_RELEASE(proc);
        return OMPI_ERR_UNREACH;
    }

    proc->proc_modex_claimed = (bool*) 
        calloc(proc->proc_modex_count, sizeof(bool));
    if (NULL == proc->proc_modex_claimed) {
        OMPI_ERROR_LOG(OMPI_ERR_OUT_OF_RESOURCE);
        OBJ_RELEASE(proc);
        return OMPI_ERR_OUT_OF_RESOURCE;
    }

    proc->proc_endpoints = (mca_btl_base_endpoint_t**)
        calloc(proc->proc_modex_count, sizeof(mca_btl_base_endpoint_t*));
    if (NULL == proc->proc_endpoints) {
        OMPI_ERROR_LOG(OMPI_ERR_OUT_OF_RESOURCE);
        OBJ_RELEASE(proc);
        return OMPI_ERR_OUT_OF_RESOURCE;
    }

    *usnic_proc = proc;
    return OMPI_SUCCESS;
}
示例#17
0
/**
 * Create a MX process structure. There is a one-to-one correspondence
 * between a ompi_proc_t and a mca_btl_mx_proc_t instance. We cache
 * additional data (specifically the list of mca_btl_mx_endpoint_t instances, 
 * and published addresses) associated w/ a given destination on this
 * datastructure.
 */
mca_btl_mx_proc_t* mca_btl_mx_proc_create(ompi_proc_t* ompi_proc)
{
    mca_btl_mx_proc_t* module_proc = NULL;
    mca_btl_mx_addr_t  *mx_peers;
    int i, j, rc, mx_peers_count, *mx_routing;
    bool at_least_one_route = false;
    size_t size;

    /* Check if we have already created a MX proc
     * structure for this ompi process */
    module_proc = mca_btl_mx_proc_lookup_ompi(ompi_proc);
    if( module_proc != NULL ) {
        return module_proc;  /* Gotcha! */
    }

    /* query for the peer address info */
    rc = ompi_modex_recv( &mca_btl_mx_component.super.btl_version,
				  ompi_proc, (void*)&mx_peers, &size );
    if( OMPI_SUCCESS != rc ) {
        opal_output( 0, "mca_pml_base_modex_recv failed for peer %s",
		     OMPI_NAME_PRINT(&ompi_proc->proc_name) );
	return NULL;
    }

    if( size < sizeof(mca_btl_mx_addr_t) ) {  /* no available connection */
        return NULL;
    }
    if( (size % sizeof(mca_btl_mx_addr_t)) != 0 ) {
        opal_output( 0, "invalid mx address for peer %s",
		     OMPI_NAME_PRINT(&ompi_proc->proc_name) );
	return NULL;
    }
    /* Let's see if we have a way to connect to the remote proc using MX.
     * Without the routing information from the mapper, it is pretty
     * to do this. Right now, we base this connection detection on the last
     * 6 digits of the mapper MAC.
     */
    mx_peers_count = size / sizeof(mca_btl_mx_addr_t);
    mx_routing = (int*)malloc( mx_peers_count * sizeof(int) );
    for( i = 0; i < mx_peers_count; mx_routing[i++] = -1 );

    for( i = 0; i < mx_peers_count; i++ ) {
        mca_btl_mx_module_t* mx_btl;
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
        BTL_MX_ADDR_NTOH(mx_peers[rc]);
#endif
	for( j = 0; j < mca_btl_mx_component.mx_num_btls; j++ ) {
	    mx_btl = mca_btl_mx_component.mx_btls[j];
            if( mx_btl->mx_unique_network_id == mx_peers[j].unique_network_id ) {
                /* There is at least one connection between these two nodes */
	        if( -1 == mx_routing[j] ) {
		    /* First connection */
		    mx_routing[j] = i;
		    at_least_one_route = true;
		    break;
		}
		/* If multiple remote endpoints match mine, we keep going. As a
		 * result we will match them in order, i.e. remote endpoint 0
		 * will be connected to local endpoint 0.
		 */
            }
        }
    }
    if( false == at_least_one_route ) {
        free(mx_routing);
	return NULL;
    }

    module_proc = OBJ_NEW(mca_btl_mx_proc_t);
    module_proc->proc_ompi      = ompi_proc;
    module_proc->mx_peers_count = mx_peers_count;
    module_proc->mx_peers       = mx_peers;
    module_proc->mx_routing     = mx_routing;
    return module_proc;
}
示例#18
0
/*
 * Create an ompi_btl_usnic_proc_t and initialize it with modex info
 * and an empty array of endpoints.
 */
static ompi_btl_usnic_proc_t *create_proc(ompi_proc_t *ompi_proc)
{
    ompi_btl_usnic_proc_t *proc = NULL;
    size_t size;
    int rc;

    /* Create the proc if it doesn't already exist */
    proc = OBJ_NEW(ompi_btl_usnic_proc_t);
    if (NULL == proc) {
        return NULL;
    }

    /* Initialize number of peers */
    proc->proc_endpoint_count = 0;
    proc->proc_ompi = ompi_proc;

    /* query for the peer address info */
    rc = ompi_modex_recv(&mca_btl_usnic_component.super.btl_version,
                         ompi_proc, (void*)&proc->proc_modex,
                         &size);

    if (OMPI_SUCCESS != rc) {
        opal_show_help("help-mpi-btl-usnic.txt", "internal error during init",
                       true,
                       ompi_process_info.nodename,
                       "<none>", 0,
                       "ompi_modex_recv() failed", __FILE__, __LINE__,
                       opal_strerror(rc));
        OBJ_RELEASE(proc);
        return NULL;
    }

    if ((size % sizeof(ompi_btl_usnic_addr_t)) != 0) {
        char msg[1024];

        snprintf(msg, sizeof(msg), 
                 "sizeof(modex for peer %s data) == %d, expected multiple of %d",
                 OMPI_NAME_PRINT(&ompi_proc->proc_name),
                 (int) size, (int) sizeof(ompi_btl_usnic_addr_t));
        opal_show_help("help-mpi-btl-usnic.txt", "internal error during init",
                       true,
                       ompi_process_info.nodename,
                       "<none>", 0,
                       "invalid modex data", __FILE__, __LINE__,
                       msg);

        OBJ_RELEASE(proc);
        return NULL;
    }

    proc->proc_modex_count = size / sizeof(ompi_btl_usnic_addr_t);
    if (0 == proc->proc_modex_count) {
        proc->proc_endpoints = NULL;
        OBJ_RELEASE(proc);
        return NULL;
    }

    proc->proc_modex_claimed = (bool*) 
        calloc(proc->proc_modex_count, sizeof(bool));
    if (NULL == proc->proc_modex_claimed) {
        OMPI_ERROR_LOG(OMPI_ERR_OUT_OF_RESOURCE);
        OBJ_RELEASE(proc);
        return NULL;
    }

    proc->proc_endpoints = (mca_btl_base_endpoint_t**)
        calloc(proc->proc_modex_count, sizeof(mca_btl_base_endpoint_t*));
    if (NULL == proc->proc_endpoints) {
        OMPI_ERROR_LOG(OMPI_ERR_OUT_OF_RESOURCE);
        OBJ_RELEASE(proc);
        return NULL;
    }

    return proc;
}