Esempio n. 1
0
/* comm create impl for intercommunicators, assumes that the standard error
 * checking has already taken place in the calling function */
PMPI_LOCAL int MPIR_Comm_create_inter(MPID_Comm *comm_ptr, MPID_Group *group_ptr, MPI_Comm *newcomm)
{
    int mpi_errno = MPI_SUCCESS;
    MPIR_Context_id_t new_context_id;
    MPID_Comm *newcomm_ptr = NULL;
    MPI_Comm comm = comm_ptr->handle;
    int *mapping = NULL;
    int *remote_mapping = NULL;
    int remote_size = -1;
    int rinfo[2];
    MPID_VCR *mapping_vcr = NULL;
    MPID_VCR *remote_mapping_vcr = NULL;
    int errflag = FALSE;
    MPIU_CHKLMEM_DECL(1);
    MPID_MPI_STATE_DECL(MPID_STATE_MPIR_COMM_CREATE_INTER);

    MPID_MPI_FUNC_ENTER(MPID_STATE_MPIR_COMM_CREATE_INTER);

    MPIU_Assert(comm_ptr->comm_kind == MPID_INTERCOMM);

    /* Create a new communicator from the specified group members */

    /* If there is a context id cache in oldcomm, use it here.  Otherwise,
       use the appropriate algorithm to get a new context id. 
       Creating the context id is collective over the *input* communicator,
       so it must be created before we decide if this process is a 
       member of the group */
    /* In the multi-threaded case, MPIR_Get_contextid assumes that the
       calling routine already holds the single criticial section */
    if (!comm_ptr->local_comm) {
        MPIR_Setup_intercomm_localcomm( comm_ptr );
    }
    mpi_errno = MPIR_Get_contextid( comm_ptr->local_comm, &new_context_id );
    if (mpi_errno) MPIU_ERR_POP(mpi_errno);
    MPIU_Assert(new_context_id != 0);
    MPIU_Assert(new_context_id != comm_ptr->recvcontext_id);

    remote_mapping_vcr = comm_ptr->vcr;

    mpi_errno = MPIR_Comm_create_calculate_mapping(group_ptr, comm_ptr, 
						   &mapping_vcr, &mapping);
    if (mpi_errno) MPIU_ERR_POP(mpi_errno);

    if (group_ptr->rank != MPI_UNDEFINED) {
        /* Get the new communicator structure and context id */
        mpi_errno = MPIR_Comm_create( &newcomm_ptr );
        if (mpi_errno) goto fn_fail;

        newcomm_ptr->recvcontext_id = new_context_id;
        newcomm_ptr->rank           = group_ptr->rank;
        newcomm_ptr->comm_kind      = comm_ptr->comm_kind;
        /* Since the group has been provided, let the new communicator know
           about the group */
        newcomm_ptr->local_comm     = 0;
        newcomm_ptr->local_group    = group_ptr;
        MPIR_Group_add_ref( group_ptr );

        newcomm_ptr->local_size   = group_ptr->size;
        newcomm_ptr->remote_group = 0;

        newcomm_ptr->is_low_group = comm_ptr->is_low_group;
    }

    /* There is an additional step.  We must communicate the information
       on the local context id and the group members, given by the ranks
       so that the remote process can construct the appropriate VCRT
       First we exchange group sizes and context ids.  Then the
       ranks in the remote group, from which the remote VCRT can
       be constructed.  We need to use the "collective" context in the
       original intercommunicator */
    if (comm_ptr->rank == 0) {
        int info[2];
        info[0] = new_context_id;
        info[1] = group_ptr->size;

        mpi_errno = MPIC_Sendrecv(info, 2, MPI_INT, 0, 0,
                                  rinfo, 2, MPI_INT, 0, 0,
                                  comm, MPI_STATUS_IGNORE );
        if (mpi_errno) { MPIU_ERR_POP( mpi_errno ); }
        if (newcomm_ptr != NULL) {
            newcomm_ptr->context_id = rinfo[0];
        }
        remote_size = rinfo[1];

        MPIU_CHKLMEM_MALLOC(remote_mapping,int*,
                            remote_size*sizeof(int),
                            mpi_errno,"remote_mapping");

        /* Populate and exchange the ranks */
        mpi_errno = MPIC_Sendrecv( mapping, group_ptr->size, MPI_INT, 0, 0,
                                   remote_mapping, remote_size, MPI_INT, 0, 0,
                                   comm, MPI_STATUS_IGNORE );
        if (mpi_errno) { MPIU_ERR_POP( mpi_errno ); }

        /* Broadcast to the other members of the local group */
        mpi_errno = MPIR_Bcast_impl( rinfo, 2, MPI_INT, 0,
                                     comm_ptr->local_comm, &errflag);
        if (mpi_errno) MPIU_ERR_POP(mpi_errno);
        mpi_errno = MPIR_Bcast_impl( remote_mapping, remote_size, MPI_INT, 0,
                                     comm_ptr->local_comm, &errflag);
        if (mpi_errno) MPIU_ERR_POP(mpi_errno);
        MPIU_ERR_CHKANDJUMP(errflag, mpi_errno, MPI_ERR_OTHER, "**coll_fail");
    }
Esempio n. 2
0
int MPIDI_Comm_connect(const char *port_name, MPID_Info *info, int root, 
		       MPID_Comm *comm_ptr, MPID_Comm **newcomm)
{
    int mpi_errno=MPI_SUCCESS;
    int j, i, rank, recv_ints[3], send_ints[3], context_id;
    int remote_comm_size=0;
    MPID_Comm *tmp_comm = NULL;
    MPIDI_VC_t *new_vc = NULL;
    int sendtag=100, recvtag=100, n_remote_pgs;
    int n_local_pgs=1, local_comm_size;
    pg_translation *local_translation = NULL, *remote_translation = NULL;
    pg_node *pg_list = NULL;
    MPIDI_PG_t **remote_pg = NULL;
    MPIR_Context_id_t recvcontext_id = MPIR_INVALID_CONTEXT_ID;
    int errflag = FALSE;
    MPIU_CHKLMEM_DECL(3);
    MPIDI_STATE_DECL(MPID_STATE_MPIDI_COMM_CONNECT);

    MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_COMM_CONNECT);

    /* Get the context ID here because we need to send it to the remote side */
    mpi_errno = MPIR_Get_contextid( comm_ptr, &recvcontext_id );
    if (mpi_errno) MPIU_ERR_POP(mpi_errno);

    rank = comm_ptr->rank;
    local_comm_size = comm_ptr->local_size;

    if (rank == root)
    {
	/* Establish a communicator to communicate with the root on the 
	   other side. */
	mpi_errno = MPIDI_Create_inter_root_communicator_connect(
	    port_name, &tmp_comm, &new_vc);
	if (mpi_errno != MPI_SUCCESS) {
	    MPIU_ERR_POP_LABEL(mpi_errno, no_port);
	}

	/* Make an array to translate local ranks to process group index 
	   and rank */
	MPIU_CHKLMEM_MALLOC(local_translation,pg_translation*,
			    local_comm_size*sizeof(pg_translation),
			    mpi_errno,"local_translation");

	/* Make a list of the local communicator's process groups and encode 
	   them in strings to be sent to the other side.
	   The encoded string for each process group contains the process 
	   group id, size and all its KVS values */
	mpi_errno = ExtractLocalPGInfo( comm_ptr, local_translation, 
					&pg_list, &n_local_pgs );
        MPIU_ERR_CHKINTERNAL(mpi_errno, mpi_errno, "Can't extract local PG info.");

	/* Send the remote root: n_local_pgs, local_comm_size,
           Recv from the remote root: n_remote_pgs, remote_comm_size,
           recvcontext_id for newcomm */

        send_ints[0] = n_local_pgs;
        send_ints[1] = local_comm_size;
        send_ints[2] = recvcontext_id;

	MPIU_DBG_MSG_FMT(CH3_CONNECT,VERBOSE,(MPIU_DBG_FDEST,
		  "sending 3 ints, %d, %d and %d, and receiving 3 ints", 
                  send_ints[0], send_ints[1], send_ints[2]));
        mpi_errno = MPIC_Sendrecv(send_ints, 3, MPI_INT, 0,
                                  sendtag++, recv_ints, 3, MPI_INT,
                                  0, recvtag++, tmp_comm->handle,
                                  MPI_STATUS_IGNORE);
        if (mpi_errno != MPI_SUCCESS) {
            /* this is a no_port error because we may fail to connect
               on the send if the port name is invalid */
	    MPIU_ERR_POP_LABEL(mpi_errno, no_port);
	}
    }
Esempio n. 3
0
/* comm create impl for intracommunicators, assumes that the standard error
 * checking has already taken place in the calling function */
PMPI_LOCAL int MPIR_Comm_create_intra(MPID_Comm *comm_ptr, MPID_Group *group_ptr, MPI_Comm *newcomm)
{
    int mpi_errno = MPI_SUCCESS;
    MPIR_Context_id_t new_context_id = 0;
    MPID_Comm *newcomm_ptr = NULL;
    int *mapping = NULL;
    int n;
    MPID_MPI_STATE_DECL(MPID_STATE_MPIR_COMM_CREATE_INTRA);

    MPID_MPI_FUNC_ENTER(MPID_STATE_MPIR_COMM_CREATE_INTRA);

    MPIU_Assert(comm_ptr->comm_kind == MPID_INTRACOMM);

    n = group_ptr->size;
    *newcomm = MPI_COMM_NULL;

    /* Create a new communicator from the specified group members */

    /* Creating the context id is collective over the *input* communicator,
       so it must be created before we decide if this process is a
       member of the group */
    /* In the multi-threaded case, MPIR_Get_contextid assumes that the
       calling routine already holds the single criticial section */
    /* TODO should be converted to use MPIR_Get_contextid_sparse instead */
    mpi_errno = MPIR_Get_contextid( comm_ptr, &new_context_id );
    if (mpi_errno) MPIU_ERR_POP(mpi_errno);
    MPIU_Assert(new_context_id != 0);

    if (group_ptr->rank != MPI_UNDEFINED) {
        MPID_VCR *mapping_vcr = NULL;

        mpi_errno = MPIR_Comm_create_calculate_mapping(group_ptr, comm_ptr, 
						       &mapping_vcr, &mapping);
        if (mpi_errno) MPIU_ERR_POP(mpi_errno);

        /* Get the new communicator structure and context id */

        mpi_errno = MPIR_Comm_create( &newcomm_ptr );
        if (mpi_errno) MPIU_ERR_POP(mpi_errno);

        newcomm_ptr->recvcontext_id = new_context_id;
        newcomm_ptr->rank           = group_ptr->rank;
        newcomm_ptr->comm_kind      = comm_ptr->comm_kind;
        /* Since the group has been provided, let the new communicator know
           about the group */
        newcomm_ptr->local_comm     = 0;
        newcomm_ptr->local_group    = group_ptr;
        MPIR_Group_add_ref( group_ptr );

        newcomm_ptr->remote_group   = group_ptr;
        MPIR_Group_add_ref( group_ptr );
        newcomm_ptr->context_id     = newcomm_ptr->recvcontext_id;
        newcomm_ptr->remote_size    = newcomm_ptr->local_size = n;

        /* Setup the communicator's vc table.  This is for the remote group,
           which is the same as the local group for intracommunicators */
        mpi_errno = MPIR_Comm_create_create_and_map_vcrt(n,
                                                         mapping,
                                                         mapping_vcr,
                                                         &newcomm_ptr->vcrt,
                                                         &newcomm_ptr->vcr);
        if (mpi_errno) MPIU_ERR_POP(mpi_errno);

        /* Notify the device of this new communicator */
        MPID_Dev_comm_create_hook( newcomm_ptr );
        mpi_errno = MPIR_Comm_commit(newcomm_ptr);
        if (mpi_errno) MPIU_ERR_POP(mpi_errno);

        MPIU_OBJ_PUBLISH_HANDLE(*newcomm, newcomm_ptr->handle);
    }
    else {
        /* This process is not in the group */
        MPIR_Free_contextid( new_context_id );
        new_context_id = 0;
    }

fn_exit:
    if (mapping)
        MPIU_Free(mapping);

    MPID_MPI_FUNC_EXIT(MPID_STATE_MPIR_COMM_CREATE_INTRA);
    return mpi_errno;
fn_fail:
    if (newcomm_ptr != NULL) {
        MPIR_Comm_release(newcomm_ptr, 0/*isDisconnect*/);
        new_context_id = 0; /* MPIR_Comm_release frees the new ctx id */
    }
    if (new_context_id != 0)
        MPIR_Free_contextid(new_context_id);
    *newcomm = MPI_COMM_NULL;

    goto fn_exit;
}