int MPIR_Setup_intercomm_localcomm(MPID_Comm * intercomm_ptr) { MPID_Comm *localcomm_ptr; int mpi_errno = MPI_SUCCESS; MPID_MPI_STATE_DECL(MPID_STATE_MPIR_SETUP_INTERCOMM_LOCALCOMM); MPID_MPI_FUNC_ENTER(MPID_STATE_MPIR_SETUP_INTERCOMM_LOCALCOMM); localcomm_ptr = (MPID_Comm *) MPIU_Handle_obj_alloc(&MPID_Comm_mem); MPIR_ERR_CHKANDJUMP(!localcomm_ptr, mpi_errno, MPI_ERR_OTHER, "**nomem"); /* get sensible default values for most fields (usually zeros) */ mpi_errno = MPIR_Comm_init(localcomm_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* use the parent intercomm's recv ctx as the basis for our ctx */ localcomm_ptr->recvcontext_id = MPID_CONTEXT_SET_FIELD(IS_LOCALCOMM, intercomm_ptr->recvcontext_id, 1); localcomm_ptr->context_id = localcomm_ptr->recvcontext_id; MPIU_DBG_MSG_FMT(COMM, TYPICAL, (MPIU_DBG_FDEST, "setup_intercomm_localcomm ic=%p ic->context_id=%d ic->recvcontext_id=%d lc->recvcontext_id=%d", intercomm_ptr, intercomm_ptr->context_id, intercomm_ptr->recvcontext_id, localcomm_ptr->recvcontext_id)); /* Save the kind of the communicator */ localcomm_ptr->comm_kind = MPID_INTRACOMM; /* Set the sizes and ranks */ localcomm_ptr->remote_size = intercomm_ptr->local_size; localcomm_ptr->local_size = intercomm_ptr->local_size; localcomm_ptr->rank = intercomm_ptr->rank; MPIR_Comm_map_dup(localcomm_ptr, intercomm_ptr, MPIR_COMM_MAP_DIR_L2L); /* TODO More advanced version: if the group is available, dup it by * increasing the reference count instead of recreating it later */ /* FIXME : No coll_fns functions for the collectives */ /* FIXME : No local functions for the topology routines */ intercomm_ptr->local_comm = localcomm_ptr; /* sets up the SMP-aware sub-communicators and tables */ mpi_errno = MPIR_Comm_commit(localcomm_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); fn_fail: MPID_MPI_FUNC_EXIT(MPID_STATE_MPIR_SETUP_INTERCOMM_LOCALCOMM); return mpi_errno; }
static int sched_cb_commit_comm(MPIR_Comm * comm, int tag, void *state) { int mpi_errno = MPI_SUCCESS; struct gcn_state *st = state; mpi_errno = MPIR_Comm_commit(st->new_comm); if (mpi_errno) MPIR_ERR_POP(mpi_errno); fn_fail: return mpi_errno; }
int MPID_Init(int *argc, char ***argv, int threadlevel_requested, int *threadlevel_provided, int *has_args, int *has_env) { int mpi_errno = MPI_SUCCESS; int pg_rank, pg_size, pg_id_sz; int appnum = -1; /* int universe_size; */ int has_parent; pscom_socket_t *socket; pscom_err_t rc; char *pg_id_name; char *parent_port; /* Call any and all MPID_Init type functions */ MPIR_Err_init(); MPIR_Datatype_init(); MPIR_Group_init(); mpid_debug_init(); assert(PSCOM_ANYPORT == -1); /* all codeplaces which depends on it are marked with: "assert(PSP_ANYPORT == -1);" */ MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPID_INIT); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPID_INIT); PMICALL(PMI_Init(&has_parent)); PMICALL(PMI_Get_rank(&pg_rank)); PMICALL(PMI_Get_size(&pg_size)); PMICALL(PMI_Get_appnum(&appnum)); *has_args = 1; *has_env = 1; /* without PMI_Get_universe_size() we see pmi error: '[unset]: write_line error; fd=-1' in PMI_KVS_Get()! */ /* PMICALL(PMI_Get_universe_size(&universe_size)); */ if (pg_rank < 0) pg_rank = 0; if (pg_size <= 0) pg_size = 1; if ( #ifndef MPICH_IS_THREADED 1 #else threadlevel_requested < MPI_THREAD_MULTIPLE #endif ) { rc = pscom_init(PSCOM_VERSION); if (rc != PSCOM_SUCCESS) { fprintf(stderr, "pscom_init(0x%04x) failed : %s\n", PSCOM_VERSION, pscom_err_str(rc)); exit(1); } } else { rc = pscom_init_thread(PSCOM_VERSION); if (rc != PSCOM_SUCCESS) { fprintf(stderr, "pscom_init_thread(0x%04x) failed : %s\n", PSCOM_VERSION, pscom_err_str(rc)); exit(1); } } /* Initialize the switches */ pscom_env_get_uint(&MPIDI_Process.env.enable_collectives, "PSP_COLLECTIVES"); #ifdef PSCOM_HAS_ON_DEMAND_CONNECTIONS /* if (pg_size > 32) MPIDI_Process.env.enable_ondemand = 1; */ pscom_env_get_uint(&MPIDI_Process.env.enable_ondemand, "PSP_ONDEMAND"); #else MPIDI_Process.env.enable_ondemand = 0; #endif /* enable_ondemand_spawn defaults to enable_ondemand */ MPIDI_Process.env.enable_ondemand_spawn = MPIDI_Process.env.enable_ondemand; pscom_env_get_uint(&MPIDI_Process.env.enable_ondemand_spawn, "PSP_ONDEMAND_SPAWN"); /* take SMP-related locality information into account (e.g., for MPI_Win_allocate_shared) */ pscom_env_get_uint(&MPIDI_Process.env.enable_smp_awareness, "PSP_SMP_AWARENESS"); /* take MSA-related topology information into account */ pscom_env_get_uint(&MPIDI_Process.env.enable_msa_awareness, "PSP_MSA_AWARENESS"); if(MPIDI_Process.env.enable_msa_awareness) { pscom_env_get_uint(&MPIDI_Process.msa_module_id, "PSP_MSA_MODULE_ID"); } #ifdef MPID_PSP_TOPOLOGY_AWARE_COLLOPS /* use hierarchy-aware collectives on SMP level */ pscom_env_get_uint(&MPIDI_Process.env.enable_smp_aware_collops, "PSP_SMP_AWARE_COLLOPS"); /* use hierarchy-aware collectives on MSA level (disables SMP-aware collops / FIX ME!) */ pscom_env_get_uint(&MPIDI_Process.env.enable_msa_aware_collops, "PSP_MSA_AWARE_COLLOPS"); if(MPIDI_Process.env.enable_msa_aware_collops) MPIDI_Process.env.enable_smp_aware_collops = 0; #endif #ifdef MPID_PSP_CREATE_HISTOGRAM /* collect statistics information and print them at the end of a run */ pscom_env_get_uint(&MPIDI_Process.env.enable_histogram, "PSP_HISTOGRAM"); pscom_env_get_uint(&MPIDI_Process.histo.max_size, "PSP_HISTOGRAM_MAX"); pscom_env_get_uint(&MPIDI_Process.histo.min_size, "PSP_HISTOGRAM_MIN"); pscom_env_get_uint(&MPIDI_Process.histo.step_width, "PSP_HISTOGRAM_SHIFT"); #endif /* pscom_env_get_uint(&mpir_allgather_short_msg, "PSP_ALLGATHER_SHORT_MSG"); pscom_env_get_uint(&mpir_allgather_long_msg, "PSP_ALLGATHER_LONG_MSG"); pscom_env_get_uint(&mpir_allreduce_short_msg, "PSP_ALLREDUCE_SHORT_MSG"); pscom_env_get_uint(&mpir_alltoall_short_msg, "PSP_ALLTOALL_SHORT_MSG"); pscom_env_get_uint(&mpir_alltoall_medium_msg, "PSP_ALLTOALL_MEDIUM_MSG"); pscom_env_get_uint(&mpir_alltoall_throttle, "PSP_ALLTOALL_THROTTLE"); pscom_env_get_uint(&mpir_bcast_short_msg, "PSP_BCAST_SHORT_MSG"); pscom_env_get_uint(&mpir_bcast_long_msg, "PSP_BCAST_LONG_MSG"); pscom_env_get_uint(&mpir_bcast_min_procs, "PSP_BCAST_MIN_PROCS"); pscom_env_get_uint(&mpir_gather_short_msg, "PSP_GATHER_SHORT_MSG"); pscom_env_get_uint(&mpir_gather_vsmall_msg, "PSP_GATHER_VSMALL_MSG"); pscom_env_get_uint(&mpir_redscat_commutative_long_msg, "PSP_REDSCAT_COMMUTATIVE_LONG_MSG"); pscom_env_get_uint(&mpir_redscat_noncommutative_short_msg, "PSP_REDSCAT_NONCOMMUTATIVE_SHORT_MSG"); pscom_env_get_uint(&mpir_reduce_short_msg, "PSP_REDUCE_SHORT_MSG"); pscom_env_get_uint(&mpir_scatter_short_msg, "PSP_SCATTER_SHORT_MSG"); */ socket = pscom_open_socket(0, 0); if (!MPIDI_Process.env.enable_ondemand) { socket->ops.con_accept = mpid_con_accept; } { char name[10]; snprintf(name, sizeof(name), "r%07u", (unsigned)pg_rank); pscom_socket_set_name(socket, name); } rc = pscom_listen(socket, PSCOM_ANYPORT); if (rc != PSCOM_SUCCESS) { PRINTERROR("pscom_listen(PSCOM_ANYPORT)"); goto fn_fail; } /* Note that if pmi is not availble, the value of MPI_APPNUM is not set */ /* if (appnum != -1) {*/ MPIR_Process.attrs.appnum = appnum; /* }*/ #if 0 // see mpiimpl.h: // typedef struct PreDefined_attrs { // int appnum; /* Application number provided by mpiexec (MPI-2) */ // int host; /* host */ // int io; /* standard io allowed */ // int lastusedcode; /* last used error code (MPI-2) */ // int tag_ub; /* Maximum message tag */ // int universe; /* Universe size from mpiexec (MPI-2) */ // int wtime_is_global; /* Wtime is global over processes in COMM_WORLD */ // } PreDefined_attrs; #endif MPIR_Process.attrs.tag_ub = MPIDI_TAG_UB; /* obtain the id of the process group */ PMICALL(PMI_KVS_Get_name_length_max(&pg_id_sz)); pg_id_name = MPL_malloc(pg_id_sz + 1, MPL_MEM_STRINGS); if (!pg_id_name) { PRINTERROR("MPL_malloc()"); goto fn_fail; } PMICALL(PMI_KVS_Get_my_name(pg_id_name, pg_id_sz)); /* safe */ /* MPIDI_Process.socket = socket; */ MPIDI_Process.my_pg_rank = pg_rank; MPIDI_Process.my_pg_size = pg_size; MPIDI_Process.pg_id_name = pg_id_name; if (!MPIDI_Process.env.enable_ondemand) { /* Create and establish all connections */ if (InitPortConnections(socket) != MPI_SUCCESS) goto fn_fail; } else { /* Create all connections as "on demand" connections. */ if (InitPscomConnections(socket) != MPI_SUCCESS) goto fn_fail; } #ifdef MPID_PSP_TOPOLOGY_AWARE_COLLOPS { int grank; int my_node_id = -1; int remote_node_id = -1; int* node_id_table; if(MPIDI_Process.env.enable_msa_awareness && MPIDI_Process.env.enable_msa_aware_collops) { my_node_id = MPIDI_Process.msa_module_id; assert(my_node_id > -1); } else if(MPIDI_Process.env.enable_smp_awareness && MPIDI_Process.env.enable_smp_aware_collops) { if (!MPIDI_Process.env.enable_ondemand) { /* In the PSP_ONDEMAND=0 case, we can just check the pscom connection types: */ for (grank = 0; grank < pg_size; grank++) { pscom_connection_t *con = grank2con_get(grank); if( (con->type == PSCOM_CON_TYPE_SHM) || (pg_rank == grank) ) { my_node_id = grank; break; } } } else { /* In the PSP_ONDEMAND=1 case, we have to use a hash of the host name: */ my_node_id = MPID_PSP_get_host_hash(); if(my_node_id < 0) my_node_id *= -1; } assert(my_node_id > -1); } else { /* No hierarchy-awareness requested */ assert(my_node_id == -1); } if(my_node_id > -1) { node_id_table = MPL_malloc(pg_size * sizeof(int), MPL_MEM_OBJECT); if(pg_rank != 0) { /* gather: */ pscom_connection_t *con = grank2con_get(0); assert(con); pscom_send(con, NULL, 0, &my_node_id, sizeof(int)); /* bcast: */ rc = pscom_recv_from(con, NULL, 0, node_id_table, pg_size*sizeof(int)); assert(rc == PSCOM_SUCCESS); } else { /* gather: */ node_id_table[0] = my_node_id; for(grank=1; grank < pg_size; grank++) { pscom_connection_t *con = grank2con_get(grank); assert(con); rc = pscom_recv_from(con, NULL, 0, &remote_node_id, sizeof(int)); assert(rc == PSCOM_SUCCESS); node_id_table[grank] = remote_node_id; } /* bcast: */ for(grank=1; grank < pg_size; grank++) { pscom_connection_t *con = grank2con_get(grank); pscom_send(con, NULL, 0, node_id_table, pg_size*sizeof(int)); } } MPIDI_Process.node_id_table = node_id_table; } else { /* No hierarchy-awareness requested */ assert(MPIDI_Process.node_id_table == NULL); } } #endif /* * Initialize the MPI_COMM_WORLD object */ { MPIR_Comm * comm; int grank; MPIDI_PG_t * pg_ptr; int pg_id_num; MPIDI_VCRT_t * vcrt; comm = MPIR_Process.comm_world; comm->rank = pg_rank; comm->remote_size = pg_size; comm->local_size = pg_size; comm->pscom_socket = socket; vcrt = MPIDI_VCRT_Create(comm->remote_size); assert(vcrt); MPID_PSP_comm_set_vcrt(comm, vcrt); MPIDI_PG_Convert_id(pg_id_name, &pg_id_num); MPIDI_PG_Create(pg_size, pg_id_num, &pg_ptr); assert(pg_ptr == MPIDI_Process.my_pg); for (grank = 0; grank < pg_size; grank++) { /* MPIR_CheckDisjointLpids() in mpi/comm/intercomm_create.c expect lpid to be smaller than 4096!!! Else you will see an "Fatal error in MPI_Intercomm_create" */ pscom_connection_t *con = grank2con_get(grank); pg_ptr->vcr[grank] = MPIDI_VC_Create(pg_ptr, grank, con, grank); comm->vcr[grank] = MPIDI_VC_Dup(pg_ptr->vcr[grank]); } mpi_errno = MPIR_Comm_commit(comm); assert(mpi_errno == MPI_SUCCESS); } /* * Initialize the MPI_COMM_SELF object */ { MPIR_Comm * comm; MPIDI_VCRT_t * vcrt; comm = MPIR_Process.comm_self; comm->rank = 0; comm->remote_size = 1; comm->local_size = 1; comm->pscom_socket = socket; vcrt = MPIDI_VCRT_Create(comm->remote_size); assert(vcrt); MPID_PSP_comm_set_vcrt(comm, vcrt); comm->vcr[0] = MPIDI_VC_Dup(MPIR_Process.comm_world->vcr[pg_rank]); mpi_errno = MPIR_Comm_commit(comm); assert(mpi_errno == MPI_SUCCESS); } /* ToDo: move MPID_enable_receive_dispach to bg thread */ MPID_enable_receive_dispach(socket); if (threadlevel_provided) { *threadlevel_provided = (MPICH_THREAD_LEVEL < threadlevel_requested) ? MPICH_THREAD_LEVEL : threadlevel_requested; } if (has_parent) { MPIR_Comm * comm; mpi_errno = MPID_PSP_GetParentPort(&parent_port); assert(mpi_errno == MPI_SUCCESS); /* printf("%s:%u:%s Child with Parent: %s\n", __FILE__, __LINE__, __func__, parent_port); */ mpi_errno = MPID_Comm_connect(parent_port, NULL, 0, MPIR_Process.comm_world, &comm); if (mpi_errno != MPI_SUCCESS) { fprintf(stderr, "MPI_Comm_connect(parent) failed!\n"); goto fn_fail; } assert(comm != NULL); MPL_strncpy(comm->name, "MPI_COMM_PARENT", MPI_MAX_OBJECT_NAME); MPIR_Process.comm_parent = comm; } MPID_PSP_shm_rma_init(); fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPID_INIT); return mpi_errno; /* --- */ fn_fail: /* A failing MPI_Init() did'nt call the MPI error handler, which mostly calls abort(). This cause MPI_Init() to return the mpi_errno, which nobody check, causing segfaultm double frees and so on. To prevent strange error messages, we now call _exit(1) here. */ _exit(1); }
int MPIR_Comm_copy(MPID_Comm * comm_ptr, int size, MPID_Comm ** outcomm_ptr) { int mpi_errno = MPI_SUCCESS; MPIU_Context_id_t new_context_id, new_recvcontext_id; MPID_Comm *newcomm_ptr = NULL; MPIR_Comm_map_t *map; MPID_MPI_STATE_DECL(MPID_STATE_MPIR_COMM_COPY); MPID_MPI_FUNC_ENTER(MPID_STATE_MPIR_COMM_COPY); /* Get a new context first. We need this to be collective over the * input communicator */ /* If there is a context id cache in oldcomm, use it here. Otherwise, * use the appropriate algorithm to get a new context id. Be careful * of intercomms here */ if (comm_ptr->comm_kind == MPID_INTERCOMM) { mpi_errno = MPIR_Get_intercomm_contextid(comm_ptr, &new_context_id, &new_recvcontext_id); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } else { mpi_errno = MPIR_Get_contextid_sparse(comm_ptr, &new_context_id, FALSE); new_recvcontext_id = new_context_id; if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIU_Assert(new_context_id != 0); } /* This is the local size, not the remote size, in the case of * an intercomm */ if (comm_ptr->rank >= size) { *outcomm_ptr = 0; /* always free the recvcontext ID, never the "send" ID */ MPIR_Free_contextid(new_recvcontext_id); goto fn_exit; } /* We're left with the processes that will have a non-null communicator. * Create the object, initialize the data, and return the result */ mpi_errno = MPIR_Comm_create(&newcomm_ptr); if (mpi_errno) goto fn_fail; newcomm_ptr->context_id = new_context_id; newcomm_ptr->recvcontext_id = new_recvcontext_id; /* Save the kind of the communicator */ newcomm_ptr->comm_kind = comm_ptr->comm_kind; newcomm_ptr->local_comm = 0; /* There are two cases here - size is the same as the old communicator, * or it is smaller. If the size is the same, we can just add a reference. * Otherwise, we need to create a new network address mapping. Note that this is the * test that matches the test on rank above. */ if (size == comm_ptr->local_size) { /* Duplicate the network address mapping */ if (comm_ptr->comm_kind == MPID_INTRACOMM) MPIR_Comm_map_dup(newcomm_ptr, comm_ptr, MPIR_COMM_MAP_DIR_L2L); else MPIR_Comm_map_dup(newcomm_ptr, comm_ptr, MPIR_COMM_MAP_DIR_R2R); } else { int i; if (comm_ptr->comm_kind == MPID_INTRACOMM) MPIR_Comm_map_irregular(newcomm_ptr, comm_ptr, NULL, size, MPIR_COMM_MAP_DIR_L2L, &map); else MPIR_Comm_map_irregular(newcomm_ptr, comm_ptr, NULL, size, MPIR_COMM_MAP_DIR_R2R, &map); for (i = 0; i < size; i++) { /* For rank i in the new communicator, find the corresponding * rank in the input communicator */ map->src_mapping[i] = i; } } /* If it is an intercomm, duplicate the local network address references */ if (comm_ptr->comm_kind == MPID_INTERCOMM) { MPIR_Comm_map_dup(newcomm_ptr, comm_ptr, MPIR_COMM_MAP_DIR_L2L); } /* Set the sizes and ranks */ newcomm_ptr->rank = comm_ptr->rank; if (comm_ptr->comm_kind == MPID_INTERCOMM) { newcomm_ptr->local_size = comm_ptr->local_size; newcomm_ptr->remote_size = comm_ptr->remote_size; newcomm_ptr->is_low_group = comm_ptr->is_low_group; } else { newcomm_ptr->local_size = size; newcomm_ptr->remote_size = size; } /* Inherit the error handler (if any) */ MPID_THREAD_CS_ENTER(POBJ, MPIR_THREAD_POBJ_COMM_MUTEX(comm_ptr)); newcomm_ptr->errhandler = comm_ptr->errhandler; if (comm_ptr->errhandler) { MPIR_Errhandler_add_ref(comm_ptr->errhandler); } MPID_THREAD_CS_EXIT(POBJ, MPIR_THREAD_POBJ_COMM_MUTEX(comm_ptr)); /* FIXME do we want to copy coll_fns here? */ mpi_errno = MPIR_Comm_commit(newcomm_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* Start with no attributes on this communicator */ newcomm_ptr->attributes = 0; /* Copy over the info hints from the original communicator. */ mpi_errno = MPIR_Info_dup_impl(comm_ptr->info, &(newcomm_ptr->info)); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIR_Comm_apply_hints(newcomm_ptr, newcomm_ptr->info); if (mpi_errno) MPIR_ERR_POP(mpi_errno); *outcomm_ptr = newcomm_ptr; fn_fail: fn_exit: MPID_MPI_FUNC_EXIT(MPID_STATE_MPIR_COMM_COPY); return mpi_errno; }
int MPIR_Intercomm_merge_impl(MPID_Comm *comm_ptr, int high, MPID_Comm **new_intracomm_ptr) { int mpi_errno = MPI_SUCCESS; int local_high, remote_high, new_size; MPIU_Context_id_t new_context_id; MPIR_Errflag_t errflag = MPIR_ERR_NONE; MPID_MPI_STATE_DECL(MPID_STATE_MPIR_INTERCOMM_MERGE_IMPL); MPID_MPI_FUNC_ENTER(MPID_STATE_MPIR_INTERCOMM_MERGE_IMPL); /* Make sure that we have a local intercommunicator */ if (!comm_ptr->local_comm) { /* Manufacture the local communicator */ mpi_errno = MPIR_Setup_intercomm_localcomm( comm_ptr ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } /* Find the "high" value of the other group of processes. This will be used to determine which group is ordered first in the generated communicator. high is logical */ local_high = high; if (comm_ptr->rank == 0) { /* This routine allows use to use the collective communication context rather than the point-to-point context. */ mpi_errno = MPIC_Sendrecv( &local_high, 1, MPI_INT, 0, 0, &remote_high, 1, MPI_INT, 0, 0, comm_ptr, MPI_STATUS_IGNORE, &errflag ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* If local_high and remote_high are the same, then order is arbitrary. we use the gpids of the rank 0 member of the local and remote groups to choose an order in this case. */ if (local_high == remote_high) { MPID_Gpid ingpid, outgpid; mpi_errno = MPID_GPID_Get( comm_ptr, 0, &ingpid ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIC_Sendrecv( &ingpid, sizeof(MPID_Gpid), MPI_BYTE, 0, 1, &outgpid, sizeof(MPID_Gpid), MPI_BYTE, 0, 1, comm_ptr, MPI_STATUS_IGNORE, &errflag ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* Note that the gpids cannot be the same because we are starting from a valid intercomm */ int rc = memcmp(&ingpid,&outgpid,sizeof(MPID_Gpid)); if(rc < 0) local_high = 1; else if(rc > 0) local_high = 0; else { /* req#3930: The merge algorithm will deadlock if the gpids are inadvertently the same due to implementation bugs in the MPID_GPID_Get() function */ MPIU_Assert(rc != 0); } } } /* All processes in the local group now need to get the value of local_high, which may have changed if both groups of processes had the same value for high */ mpi_errno = MPIR_Bcast_impl( &local_high, 1, MPI_INT, 0, comm_ptr->local_comm, &errflag ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_ERR_CHKANDJUMP(errflag, mpi_errno, MPI_ERR_OTHER, "**coll_fail"); mpi_errno = MPIR_Comm_create( new_intracomm_ptr ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); new_size = comm_ptr->local_size + comm_ptr->remote_size; /* FIXME: For the intracomm, we need a consistent context id. That means that one of the two groups needs to use the recvcontext_id and the other must use the context_id */ if (local_high) { (*new_intracomm_ptr)->context_id = comm_ptr->recvcontext_id + 2; /* See below */ } else { (*new_intracomm_ptr)->context_id = comm_ptr->context_id + 2; /* See below */ } (*new_intracomm_ptr)->recvcontext_id = (*new_intracomm_ptr)->context_id; (*new_intracomm_ptr)->remote_size = (*new_intracomm_ptr)->local_size = new_size; (*new_intracomm_ptr)->rank = -1; (*new_intracomm_ptr)->comm_kind = MPID_INTRACOMM; /* Now we know which group comes first. Build the new mapping from the existing comm */ mpi_errno = create_and_map(comm_ptr, local_high, (*new_intracomm_ptr)); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* We've setup a temporary context id, based on the context id used by the intercomm. This allows us to perform the allreduce operations within the context id algorithm, since we already have a valid (almost - see comm_create_hook) communicator. */ mpi_errno = MPIR_Comm_commit((*new_intracomm_ptr)); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* printf( "About to get context id \n" ); fflush( stdout ); */ /* In the multi-threaded case, MPIR_Get_contextid_sparse assumes that the calling routine already holds the single criticial section */ new_context_id = 0; mpi_errno = MPIR_Get_contextid_sparse( (*new_intracomm_ptr), &new_context_id, FALSE ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIU_Assert(new_context_id != 0); /* We release this communicator that was involved just to * get valid context id and create true one */ mpi_errno = MPIR_Comm_release(*new_intracomm_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIR_Comm_create( new_intracomm_ptr ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); (*new_intracomm_ptr)->remote_size = (*new_intracomm_ptr)->local_size = new_size; (*new_intracomm_ptr)->rank = -1; (*new_intracomm_ptr)->comm_kind = MPID_INTRACOMM; (*new_intracomm_ptr)->context_id = new_context_id; (*new_intracomm_ptr)->recvcontext_id = new_context_id; mpi_errno = create_and_map(comm_ptr, local_high, (*new_intracomm_ptr)); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIR_Comm_commit((*new_intracomm_ptr)); if (mpi_errno) MPIR_ERR_POP(mpi_errno); fn_exit: MPID_MPI_FUNC_EXIT(MPID_STATE_MPIR_INTERCOMM_MERGE_IMPL); return mpi_errno; fn_fail: goto fn_exit; }
/* comm create impl for intracommunicators, assumes that the standard error * checking has already taken place in the calling function */ int MPIR_Comm_create_intra(MPIR_Comm *comm_ptr, MPIR_Group *group_ptr, MPIR_Comm **newcomm_ptr) { int mpi_errno = MPI_SUCCESS; MPIR_Context_id_t new_context_id = 0; int *mapping = NULL; int n; MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPIR_COMM_CREATE_INTRA); MPIR_FUNC_TERSE_ENTER(MPID_STATE_MPIR_COMM_CREATE_INTRA); MPIR_Assert(comm_ptr->comm_kind == MPIR_COMM_KIND__INTRACOMM); n = group_ptr->size; *newcomm_ptr = NULL; /* Create a new communicator from the specified group members */ /* Creating the context id is collective over the *input* communicator, so it must be created before we decide if this process is a member of the group */ /* In the multi-threaded case, MPIR_Get_contextid_sparse assumes that the calling routine already holds the single criticial section */ mpi_errno = MPIR_Get_contextid_sparse( comm_ptr, &new_context_id, group_ptr->rank == MPI_UNDEFINED ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_Assert(new_context_id != 0); if (group_ptr->rank != MPI_UNDEFINED) { MPIR_Comm *mapping_comm = NULL; mpi_errno = MPII_Comm_create_calculate_mapping(group_ptr, comm_ptr, &mapping, &mapping_comm); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* Get the new communicator structure and context id */ mpi_errno = MPIR_Comm_create( newcomm_ptr ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); (*newcomm_ptr)->recvcontext_id = new_context_id; (*newcomm_ptr)->rank = group_ptr->rank; (*newcomm_ptr)->comm_kind = comm_ptr->comm_kind; /* Since the group has been provided, let the new communicator know about the group */ (*newcomm_ptr)->local_comm = 0; (*newcomm_ptr)->local_group = group_ptr; MPIR_Group_add_ref( group_ptr ); (*newcomm_ptr)->remote_group = group_ptr; MPIR_Group_add_ref( group_ptr ); (*newcomm_ptr)->context_id = (*newcomm_ptr)->recvcontext_id; (*newcomm_ptr)->remote_size = (*newcomm_ptr)->local_size = n; (*newcomm_ptr)->pof2 = MPL_pof2(n); /* Setup the communicator's network address mapping. This is for the remote group, which is the same as the local group for intracommunicators */ mpi_errno = MPII_Comm_create_map(n, 0, mapping, NULL, mapping_comm, *newcomm_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIR_Comm_commit(*newcomm_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } else { /* This process is not in the group */ new_context_id = 0; } fn_exit: if (mapping) MPL_free(mapping); MPIR_FUNC_TERSE_EXIT(MPID_STATE_MPIR_COMM_CREATE_INTRA); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ if (*newcomm_ptr != NULL) { MPIR_Comm_release(*newcomm_ptr); new_context_id = 0; /* MPIR_Comm_release frees the new ctx id */ } if (new_context_id != 0 && group_ptr->rank != MPI_UNDEFINED) { MPIR_Free_contextid(new_context_id); } /* --END ERROR HANDLING-- */ goto fn_exit; }
/* comm create group impl; assumes that the standard error checking * has already taken place in the calling function */ int MPIR_Comm_create_group(MPIR_Comm * comm_ptr, MPIR_Group * group_ptr, int tag, MPIR_Comm ** newcomm_ptr) { int mpi_errno = MPI_SUCCESS; MPIR_Context_id_t new_context_id = 0; int *mapping = NULL; int n; MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPIR_COMM_CREATE_GROUP); MPIR_FUNC_TERSE_ENTER(MPID_STATE_MPIR_COMM_CREATE_GROUP); MPIR_Assert(comm_ptr->comm_kind == MPIR_COMM_KIND__INTRACOMM); n = group_ptr->size; *newcomm_ptr = NULL; /* Create a new communicator from the specified group members */ if (group_ptr->rank != MPI_UNDEFINED) { MPIR_Comm *mapping_comm = NULL; /* For this routine, creation of the id is collective over the input *group*, so processes not in the group do not participate. */ mpi_errno = MPIR_Get_contextid_sparse_group( comm_ptr, group_ptr, tag, &new_context_id, 0 ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_Assert(new_context_id != 0); mpi_errno = MPII_Comm_create_calculate_mapping(group_ptr, comm_ptr, &mapping, &mapping_comm); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* Get the new communicator structure and context id */ mpi_errno = MPIR_Comm_create( newcomm_ptr ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); (*newcomm_ptr)->recvcontext_id = new_context_id; (*newcomm_ptr)->rank = group_ptr->rank; (*newcomm_ptr)->comm_kind = comm_ptr->comm_kind; /* Since the group has been provided, let the new communicator know about the group */ (*newcomm_ptr)->local_comm = 0; (*newcomm_ptr)->local_group = group_ptr; MPIR_Group_add_ref( group_ptr ); (*newcomm_ptr)->remote_group = group_ptr; MPIR_Group_add_ref( group_ptr ); (*newcomm_ptr)->context_id = (*newcomm_ptr)->recvcontext_id; (*newcomm_ptr)->remote_size = (*newcomm_ptr)->local_size = n; /* Setup the communicator's vc table. This is for the remote group, which is the same as the local group for intracommunicators */ mpi_errno = MPII_Comm_create_map(n, 0, mapping, NULL, mapping_comm, *newcomm_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIR_Comm_commit(*newcomm_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } else { /* This process is not in the group */ new_context_id = 0; } fn_exit: if (mapping) MPL_free(mapping); MPIR_FUNC_TERSE_EXIT(MPID_STATE_MPIR_COMM_CREATE_GROUP); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ if (*newcomm_ptr != NULL) { MPIR_Comm_release(*newcomm_ptr); new_context_id = 0; /* MPIR_Comm_release frees the new ctx id */ } if (new_context_id != 0) MPIR_Free_contextid(new_context_id); /* --END ERROR HANDLING-- */ goto fn_exit; }
int MPIR_Intercomm_create_impl(MPIR_Comm *local_comm_ptr, int local_leader, MPIR_Comm *peer_comm_ptr, int remote_leader, int tag, MPIR_Comm **new_intercomm_ptr) { int mpi_errno = MPI_SUCCESS; MPIR_Context_id_t final_context_id, recvcontext_id; int remote_size = 0, *remote_lpids = NULL; int comm_info[3]; int is_low_group = 0; int cts_tag; MPIR_Errflag_t errflag = MPIR_ERR_NONE; MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPIR_COMM_KIND__INTERCOMM_CREATE_IMPL); MPIR_FUNC_TERSE_ENTER(MPID_STATE_MPIR_COMM_KIND__INTERCOMM_CREATE_IMPL); /* Shift tag into the tagged coll space (tag provided by the user is ignored as of MPI 3.0) */ cts_tag = MPIR_COMM_KIND__INTERCOMM_CREATE_TAG | MPIR_Process.tagged_coll_mask; mpi_errno = MPID_Intercomm_exchange_map(local_comm_ptr, local_leader, peer_comm_ptr, remote_leader, &remote_size, &remote_lpids, &is_low_group); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* * Create the contexts. Each group will have a context for sending * to the other group. All processes must be involved. Because * we know that the local and remote groups are disjoint, this * step will complete */ MPL_DBG_MSG_FMT(MPIR_DBG_COMM,VERBOSE, (MPL_DBG_FDEST,"About to get contextid (local_size=%d) on rank %d", local_comm_ptr->local_size, local_comm_ptr->rank )); /* In the multi-threaded case, MPIR_Get_contextid_sparse assumes that the calling routine already holds the single criticial section */ /* TODO: Make sure this is tag-safe */ mpi_errno = MPIR_Get_contextid_sparse( local_comm_ptr, &recvcontext_id, FALSE ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_Assert(recvcontext_id != 0); MPL_DBG_MSG_FMT(MPIR_DBG_COMM,VERBOSE, (MPL_DBG_FDEST,"Got contextid=%d", recvcontext_id)); /* Leaders can now swap context ids and then broadcast the value to the local group of processes */ if (local_comm_ptr->rank == local_leader) { MPIR_Context_id_t remote_context_id; mpi_errno = MPIC_Sendrecv( &recvcontext_id, 1, MPIR_CONTEXT_ID_T_DATATYPE, remote_leader, cts_tag, &remote_context_id, 1, MPIR_CONTEXT_ID_T_DATATYPE, remote_leader, cts_tag, peer_comm_ptr, MPI_STATUS_IGNORE, &errflag ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); final_context_id = remote_context_id; /* Now, send all of our local processes the remote_lpids, along with the final context id */ comm_info[0] = final_context_id; MPL_DBG_MSG(MPIR_DBG_COMM,VERBOSE,"About to bcast on local_comm"); mpi_errno = MPID_Bcast( comm_info, 1, MPI_INT, local_leader, local_comm_ptr, &errflag ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_ERR_CHKANDJUMP(errflag, mpi_errno, MPI_ERR_OTHER, "**coll_fail"); MPL_DBG_MSG_D(MPIR_DBG_COMM,VERBOSE,"end of bcast on local_comm of size %d", local_comm_ptr->local_size ); } else { /* we're the other processes */ MPL_DBG_MSG(MPIR_DBG_COMM,VERBOSE,"About to receive bcast on local_comm"); mpi_errno = MPID_Bcast( comm_info, 1, MPI_INT, local_leader, local_comm_ptr, &errflag ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_ERR_CHKANDJUMP(errflag, mpi_errno, MPI_ERR_OTHER, "**coll_fail"); /* Extract the context and group sign informatin */ final_context_id = comm_info[0]; } /* At last, we now have the information that we need to build the intercommunicator */ /* All processes in the local_comm now build the communicator */ mpi_errno = MPIR_Comm_create( new_intercomm_ptr ); if (mpi_errno) goto fn_fail; (*new_intercomm_ptr)->context_id = final_context_id; (*new_intercomm_ptr)->recvcontext_id = recvcontext_id; (*new_intercomm_ptr)->remote_size = remote_size; (*new_intercomm_ptr)->local_size = local_comm_ptr->local_size; (*new_intercomm_ptr)->pof2 = local_comm_ptr->pof2; (*new_intercomm_ptr)->rank = local_comm_ptr->rank; (*new_intercomm_ptr)->comm_kind = MPIR_COMM_KIND__INTERCOMM; (*new_intercomm_ptr)->local_comm = 0; (*new_intercomm_ptr)->is_low_group = is_low_group; mpi_errno = MPID_Create_intercomm_from_lpids( *new_intercomm_ptr, remote_size, remote_lpids ); if (mpi_errno) goto fn_fail; MPIR_Comm_map_dup(*new_intercomm_ptr, local_comm_ptr, MPIR_COMM_MAP_DIR__L2L); /* Inherit the error handler (if any) */ MPID_THREAD_CS_ENTER(POBJ, MPIR_THREAD_POBJ_COMM_MUTEX(local_comm_ptr)); (*new_intercomm_ptr)->errhandler = local_comm_ptr->errhandler; if (local_comm_ptr->errhandler) { MPIR_Errhandler_add_ref( local_comm_ptr->errhandler ); } MPID_THREAD_CS_EXIT(POBJ, MPIR_THREAD_POBJ_COMM_MUTEX(local_comm_ptr)); mpi_errno = MPIR_Comm_commit(*new_intercomm_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); fn_exit: if (remote_lpids) { MPL_free(remote_lpids); remote_lpids = NULL; } MPIR_FUNC_TERSE_EXIT(MPID_STATE_MPIR_COMM_KIND__INTERCOMM_CREATE_IMPL); return mpi_errno; fn_fail: goto fn_exit; }
int MPIR_Intercomm_merge_impl(MPIR_Comm * comm_ptr, int high, MPIR_Comm ** new_intracomm_ptr) { int mpi_errno = MPI_SUCCESS; int local_high, remote_high, new_size; MPIR_Context_id_t new_context_id; MPIR_Errflag_t errflag = MPIR_ERR_NONE; MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPIR_COMM_KIND__INTERCOMM_MERGE_IMPL); MPIR_FUNC_TERSE_ENTER(MPID_STATE_MPIR_COMM_KIND__INTERCOMM_MERGE_IMPL); /* Make sure that we have a local intercommunicator */ if (!comm_ptr->local_comm) { /* Manufacture the local communicator */ mpi_errno = MPII_Setup_intercomm_localcomm(comm_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } /* Find the "high" value of the other group of processes. This * will be used to determine which group is ordered first in * the generated communicator. high is logical */ local_high = high; if (comm_ptr->rank == 0) { /* This routine allows use to use the collective communication * context rather than the point-to-point context. */ mpi_errno = MPIC_Sendrecv(&local_high, 1, MPI_INT, 0, 0, &remote_high, 1, MPI_INT, 0, 0, comm_ptr, MPI_STATUS_IGNORE, &errflag); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* If local_high and remote_high are the same, then order is arbitrary. * we use the is_low_group in the intercomm in this case. */ MPL_DBG_MSG_FMT(MPIR_DBG_COMM, VERBOSE, (MPL_DBG_FDEST, "local_high=%d remote_high=%d is_low_group=%d", local_high, remote_high, comm_ptr->is_low_group)); if (local_high == remote_high) { local_high = !(comm_ptr->is_low_group); } } /* * All processes in the local group now need to get the * value of local_high, which may have changed if both groups * of processes had the same value for high */ mpi_errno = MPIR_Bcast(&local_high, 1, MPI_INT, 0, comm_ptr->local_comm, &errflag); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_ERR_CHKANDJUMP(errflag, mpi_errno, MPI_ERR_OTHER, "**coll_fail"); /* * For the intracomm, we need a consistent context id. * That means that one of the two groups needs to use * the recvcontext_id and the other must use the context_id * The recvcontext_id is unique on each process, but another * communicator may use the context_id. Therefore, we do a small hack. * We set both flags indicating a sub-communicator (intra-node and * inter-node) to one. This is normally not possible (a communicator * is either intra- or inter-node) - which makes this context_id unique. * */ new_size = comm_ptr->local_size + comm_ptr->remote_size; mpi_errno = MPIR_Comm_create(new_intracomm_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); if (local_high) { (*new_intracomm_ptr)->context_id = MPIR_CONTEXT_SET_FIELD(SUBCOMM, comm_ptr->recvcontext_id, 3); } else { (*new_intracomm_ptr)->context_id = MPIR_CONTEXT_SET_FIELD(SUBCOMM, comm_ptr->context_id, 3); } (*new_intracomm_ptr)->recvcontext_id = (*new_intracomm_ptr)->context_id; (*new_intracomm_ptr)->remote_size = (*new_intracomm_ptr)->local_size = new_size; (*new_intracomm_ptr)->pof2 = MPL_pof2(new_size); (*new_intracomm_ptr)->rank = -1; (*new_intracomm_ptr)->comm_kind = MPIR_COMM_KIND__INTRACOMM; /* Now we know which group comes first. Build the new mapping * from the existing comm */ mpi_errno = create_and_map(comm_ptr, local_high, (*new_intracomm_ptr)); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* We've setup a temporary context id, based on the context id * used by the intercomm. This allows us to perform the allreduce * operations within the context id algorithm, since we already * have a valid (almost - see comm_create_hook) communicator. */ mpi_errno = MPIR_Comm_commit((*new_intracomm_ptr)); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* printf("About to get context id \n"); fflush(stdout); */ /* In the multi-threaded case, MPIR_Get_contextid_sparse assumes that the * calling routine already holds the single criticial section */ new_context_id = 0; mpi_errno = MPIR_Get_contextid_sparse((*new_intracomm_ptr), &new_context_id, FALSE); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_Assert(new_context_id != 0); /* We release this communicator that was involved just to * get valid context id and create true one */ mpi_errno = MPIR_Comm_release(*new_intracomm_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIR_Comm_create(new_intracomm_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); (*new_intracomm_ptr)->remote_size = (*new_intracomm_ptr)->local_size = new_size; (*new_intracomm_ptr)->pof2 = MPL_pof2(new_size); (*new_intracomm_ptr)->rank = -1; (*new_intracomm_ptr)->comm_kind = MPIR_COMM_KIND__INTRACOMM; (*new_intracomm_ptr)->context_id = new_context_id; (*new_intracomm_ptr)->recvcontext_id = new_context_id; mpi_errno = create_and_map(comm_ptr, local_high, (*new_intracomm_ptr)); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIR_Comm_commit((*new_intracomm_ptr)); if (mpi_errno) MPIR_ERR_POP(mpi_errno); fn_exit: MPIR_FUNC_TERSE_EXIT(MPID_STATE_MPIR_COMM_KIND__INTERCOMM_MERGE_IMPL); return mpi_errno; fn_fail: goto fn_exit; }
/* comm create impl for intracommunicators, assumes that the standard error * checking has already taken place in the calling function */ PMPI_LOCAL int MPIR_Comm_create_intra(MPID_Comm *comm_ptr, MPID_Group *group_ptr, MPI_Comm *newcomm) { int mpi_errno = MPI_SUCCESS; MPIR_Context_id_t new_context_id = 0; MPID_Comm *newcomm_ptr = NULL; int *mapping = NULL; int n; MPID_MPI_STATE_DECL(MPID_STATE_MPIR_COMM_CREATE_INTRA); MPID_MPI_FUNC_ENTER(MPID_STATE_MPIR_COMM_CREATE_INTRA); MPIU_Assert(comm_ptr->comm_kind == MPID_INTRACOMM); n = group_ptr->size; *newcomm = MPI_COMM_NULL; /* Create a new communicator from the specified group members */ /* Creating the context id is collective over the *input* communicator, so it must be created before we decide if this process is a member of the group */ /* In the multi-threaded case, MPIR_Get_contextid assumes that the calling routine already holds the single criticial section */ /* TODO should be converted to use MPIR_Get_contextid_sparse instead */ mpi_errno = MPIR_Get_contextid( comm_ptr, &new_context_id ); if (mpi_errno) MPIU_ERR_POP(mpi_errno); MPIU_Assert(new_context_id != 0); if (group_ptr->rank != MPI_UNDEFINED) { MPID_VCR *mapping_vcr = NULL; mpi_errno = MPIR_Comm_create_calculate_mapping(group_ptr, comm_ptr, &mapping_vcr, &mapping); if (mpi_errno) MPIU_ERR_POP(mpi_errno); /* Get the new communicator structure and context id */ mpi_errno = MPIR_Comm_create( &newcomm_ptr ); if (mpi_errno) MPIU_ERR_POP(mpi_errno); newcomm_ptr->recvcontext_id = new_context_id; newcomm_ptr->rank = group_ptr->rank; newcomm_ptr->comm_kind = comm_ptr->comm_kind; /* Since the group has been provided, let the new communicator know about the group */ newcomm_ptr->local_comm = 0; newcomm_ptr->local_group = group_ptr; MPIR_Group_add_ref( group_ptr ); newcomm_ptr->remote_group = group_ptr; MPIR_Group_add_ref( group_ptr ); newcomm_ptr->context_id = newcomm_ptr->recvcontext_id; newcomm_ptr->remote_size = newcomm_ptr->local_size = n; /* Setup the communicator's vc table. This is for the remote group, which is the same as the local group for intracommunicators */ mpi_errno = MPIR_Comm_create_create_and_map_vcrt(n, mapping, mapping_vcr, &newcomm_ptr->vcrt, &newcomm_ptr->vcr); if (mpi_errno) MPIU_ERR_POP(mpi_errno); /* Notify the device of this new communicator */ MPID_Dev_comm_create_hook( newcomm_ptr ); mpi_errno = MPIR_Comm_commit(newcomm_ptr); if (mpi_errno) MPIU_ERR_POP(mpi_errno); MPIU_OBJ_PUBLISH_HANDLE(*newcomm, newcomm_ptr->handle); } else { /* This process is not in the group */ MPIR_Free_contextid( new_context_id ); new_context_id = 0; } fn_exit: if (mapping) MPIU_Free(mapping); MPID_MPI_FUNC_EXIT(MPID_STATE_MPIR_COMM_CREATE_INTRA); return mpi_errno; fn_fail: if (newcomm_ptr != NULL) { MPIR_Comm_release(newcomm_ptr, 0/*isDisconnect*/); new_context_id = 0; /* MPIR_Comm_release frees the new ctx id */ } if (new_context_id != 0) MPIR_Free_contextid(new_context_id); *newcomm = MPI_COMM_NULL; goto fn_exit; }