static struct ompi_proc_t *ompi_group_dense_lookup_raw (ompi_group_t *group, const int peer_id) { if (OPAL_UNLIKELY(ompi_proc_is_sentinel (group->grp_proc_pointers[peer_id]))) { ompi_proc_t *proc = (ompi_proc_t *) ompi_proc_lookup (ompi_proc_sentinel_to_name ((intptr_t) group->grp_proc_pointers[peer_id])); if (NULL != proc) { /* replace sentinel value with an actual ompi_proc_t */ group->grp_proc_pointers[peer_id] = proc; /* retain the proc */ OBJ_RETAIN(group->grp_proc_pointers[peer_id]); } } return group->grp_proc_pointers[peer_id]; }
/** * This PML monitors only the processes in the MPI_COMM_WORLD. As OMPI is now lazily * adding peers on the first call to add_procs we need to check how many processes * are in the MPI_COMM_WORLD to create the storage with the right size. */ int mca_pml_monitoring_add_procs(struct ompi_proc_t **procs, size_t nprocs) { opal_process_name_t tmp, wp_name; size_t i, peer_rank, nprocs_world; uint64_t key; if(NULL == translation_ht) { translation_ht = OBJ_NEW(opal_hash_table_t); opal_hash_table_init(translation_ht, 2048); /* get my rank in the MPI_COMM_WORLD */ my_rank = ompi_comm_rank((ompi_communicator_t*)&ompi_mpi_comm_world); } nprocs_world = ompi_comm_size((ompi_communicator_t*)&ompi_mpi_comm_world); /* For all procs in the same MPI_COMM_WORLD we need to add them to the hash table */ for( i = 0; i < nprocs; i++ ) { /* Extract the peer procname from the procs array */ if( ompi_proc_is_sentinel(procs[i]) ) { tmp = ompi_proc_sentinel_to_name((uintptr_t)procs[i]); } else { tmp = procs[i]->super.proc_name; } if( tmp.jobid != ompi_proc_local_proc->super.proc_name.jobid ) continue; for( peer_rank = 0; peer_rank < nprocs_world; peer_rank++ ) { wp_name = ompi_group_get_proc_name(((ompi_communicator_t*)&ompi_mpi_comm_world)->c_remote_group, peer_rank); if( 0 != opal_compare_proc( tmp, wp_name) ) continue; /* Find the rank of the peer in MPI_COMM_WORLD */ key = *((uint64_t*)&tmp); /* store the rank (in COMM_WORLD) of the process with its name (a uniq opal ID) as key in the hash table*/ if( OPAL_SUCCESS != opal_hash_table_set_value_uint64(translation_ht, key, (void*)(uintptr_t)peer_rank) ) { return OMPI_ERR_OUT_OF_RESOURCE; /* failed to allocate memory or growing the hash table */ } break; } } return pml_selected_module.pml_add_procs(procs, nprocs); }
bool ompi_group_have_remote_peers (ompi_group_t *group) { for (int i = 0 ; i < group->grp_proc_count ; ++i) { ompi_proc_t *proc = NULL; #if OMPI_GROUP_SPARSE proc = ompi_group_peer_lookup (group, i); #else proc = ompi_group_get_proc_ptr_raw (group, i); if (ompi_proc_is_sentinel (proc)) { /* the proc must be stored in the group or cached in the proc * hash table if the process resides in the local node * (see ompi_proc_complete_init) */ return true; } #endif if (!OPAL_PROC_ON_LOCAL_NODE(proc->super.proc_flags)) { return true; } } return false; }
int mca_pml_monitoring_add_procs(struct ompi_proc_t **procs, size_t nprocs) { /** * Create the monitoring hashtable only for my MPI_COMM_WORLD. We choose * to ignore by now all other processes. */ if(NULL == translation_ht) { size_t i; uint64_t key; opal_process_name_t tmp; nbprocs = nprocs; translation_ht = OBJ_NEW(opal_hash_table_t); opal_hash_table_init(translation_ht, 2048); for( i = 0; i < nprocs; i++ ) { /* rank : ompi_proc_local_proc in procs */ if( procs[i] == ompi_proc_local_proc) my_rank = i; /* Extract the peer procname from the procs array */ if( ompi_proc_is_sentinel(procs[i]) ) { tmp = ompi_proc_sentinel_to_name((uintptr_t)procs[i]); } else { tmp = procs[i]->super.proc_name; } key = *((uint64_t*)&tmp); /* store the rank (in COMM_WORLD) of the process with its name (a uniq opal ID) as key in the hash table*/ if( OPAL_SUCCESS != opal_hash_table_set_value_uint64(translation_ht, key, (void*)(uintptr_t)i) ) { return OMPI_ERR_OUT_OF_RESOURCE; /* failed to allocate memory or growing the hash table */ } } } return pml_selected_module.pml_add_procs(procs, nprocs); }