Пример #1
0
int MPI_Group_free(MPI_Group *group)
{
    int ret;

    /* check to make sure we don't free GROUP_EMPTY or GROUP_NULL */
    if (MPI_PARAM_CHECK) {
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

        if ((MPI_GROUP_NULL == *group) || (NULL == *group) ) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_GROUP,
                                          FUNC_NAME);
        }

    }

    ret = ompi_group_free ( group);
    OMPI_ERRHANDLER_CHECK(ret, MPI_COMM_WORLD, ret, FUNC_NAME);

    return MPI_SUCCESS;
}
Пример #2
0
int MPI_Group_free(MPI_Group *group)
{
    int ret;

    /* check to make sure we don't free GROUP_NULL.  Note that we *do*
       allow freeing GROUP_EMPTY after much debate in the OMPI core
       group.  The final thread about this, and the decision to
       support freeing GROUP_EMPTY can be found here:

       http://www.open-mpi.org/community/lists/devel/2007/12/2750.php

       The short version: other MPI's allow it (LAM/MPI, CT6, MPICH2)
       probably mainly because the Intel MPI test suite expects it to
       happen and there's now several years worth of expected behavior
       to allow this behavior.  Rather than have to explain every time
       why OMPI is the only one who completely adheres to the standard
       / fails the intel tests, it seemed easier to just let this one
       slide.  It's not really that important, after all! */
    if (MPI_PARAM_CHECK) {
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

        if ((NULL == group) ||
            (MPI_GROUP_NULL == *group) || (NULL == *group) ) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_GROUP,
                                          FUNC_NAME);
        }

    }

    OPAL_CR_ENTER_LIBRARY();

    ret = ompi_group_free ( group);
    OMPI_ERRHANDLER_CHECK(ret, MPI_COMM_WORLD, ret, FUNC_NAME);

    OPAL_CR_EXIT_LIBRARY();
    return MPI_SUCCESS;
}
Пример #3
0
/*
 * Invoked when there's a new communicator that has been created.
 * Look at the communicator and decide which set of functions and
 * priority we want to return.
 */
mca_scoll_base_module_t *
mca_scoll_mpi_comm_query(oshmem_group_t *osh_group, int *priority)
{
    mca_scoll_base_module_t *module;
    mca_scoll_mpi_module_t *mpi_module;
    int err, i;
    int tag;
    ompi_group_t* parent_group, *new_group;
    ompi_communicator_t* newcomm = NULL;
    *priority = 0;
    mca_scoll_mpi_component_t *cm;
    cm = &mca_scoll_mpi_component;
    int* ranks;
    if (!cm->mpi_enable){
        return NULL;
    }
    if ((osh_group->proc_count < 2) || (osh_group->proc_count < cm->mpi_np)) {
        return NULL;
    }
    /* Create OMPI_Comm object and store ptr to it in group obj*/
    if (NULL == oshmem_group_all) {
        osh_group->ompi_comm = &(ompi_mpi_comm_world.comm);
    } else {
        int my_rank = MPI_UNDEFINED;

        err = ompi_comm_group(&(ompi_mpi_comm_world.comm), &parent_group);
        if (OPAL_UNLIKELY(OMPI_SUCCESS != err)) {
            return NULL;
        }
        ranks = (int*) malloc(osh_group->proc_count * sizeof(int));
        if (OPAL_UNLIKELY(NULL == ranks)) {
            return NULL;
        }
        tag = 1;

        for (i = 0; i < osh_group->proc_count; i++) {
            ompi_proc_t* ompi_proc;
            for( int j = 0; j < ompi_group_size(parent_group); j++ ) {
                ompi_proc = ompi_group_peer_lookup(parent_group, j);
                if( 0 == opal_compare_proc(ompi_proc->super.proc_name, osh_group->proc_array[i]->super.proc_name)) {
                    ranks[i] = j;
                    break;
                }
            }
            /* NTH: keep track of my rank in the new group for the workaround below */
            if (ranks[i] == ompi_comm_rank (&ompi_mpi_comm_world.comm)) {
                my_rank = i;
            }
        }

        err = ompi_group_incl(parent_group, osh_group->proc_count, ranks, &new_group);
        if (OPAL_UNLIKELY(OMPI_SUCCESS != err)) {
            free(ranks);
            return NULL;
        }

        /* NTH: XXX -- WORKAROUND -- The oshmem code overwrites ompi_proc_local_proc with its
         * own proc but does not update the proc list in comm world or comm self. This causes
         * the code in ompi_group_incl that updates grp_my_rank to fail. This will cause failures
         * here and when an application attempts to mix oshmem and mpi so it will really need to
         * be fixed in oshmem/proc and not here. For now we need to work around a new jenkins
         * failure so set my group ranking so we do not crash when running ompi_comm_create_group. */
        new_group->grp_my_rank = my_rank;

        err = ompi_comm_create_group(&(ompi_mpi_comm_world.comm), new_group, tag, &newcomm);
        if (OPAL_UNLIKELY(OMPI_SUCCESS != err)) {
            free(ranks);
            return NULL;
        }
        err = ompi_group_free(&new_group);
        if (OPAL_UNLIKELY(OMPI_SUCCESS != err)) {
            free(ranks);
            return NULL;
        }

        free(ranks);
        osh_group->ompi_comm = newcomm;
    }
    mpi_module = OBJ_NEW(mca_scoll_mpi_module_t);
    if (!mpi_module){
        return NULL;
    }
    mpi_module->comm = osh_group->ompi_comm;

    mpi_module->super.scoll_module_enable = mca_scoll_mpi_module_enable;
    mpi_module->super.scoll_barrier = mca_scoll_mpi_barrier;
    mpi_module->super.scoll_broadcast = mca_scoll_mpi_broadcast;
    mpi_module->super.scoll_reduce = mca_scoll_mpi_reduce;
    mpi_module->super.scoll_collect = mca_scoll_mpi_collect;

    *priority = cm->mpi_priority;
    module = &mpi_module->super;

    return module;
}
Пример #4
0
/*
 * Invoked when there's a new communicator that has been created.
 * Look at the communicator and decide which set of functions and
 * priority we want to return.
 */
mca_scoll_base_module_t *
mca_scoll_mpi_comm_query(oshmem_group_t *osh_group, int *priority)
{
    mca_scoll_base_module_t *module;
    mca_scoll_mpi_module_t *mpi_module;
    int err, i;
    int tag;
    ompi_group_t* parent_group, *new_group;
    ompi_communicator_t* newcomm = NULL;
    *priority = 0;
    mca_scoll_mpi_component_t *cm;
    cm = &mca_scoll_mpi_component;
    int* ranks;
    if (!cm->mpi_enable){
        return NULL;
    }
    if ((osh_group->proc_count < 2) || (osh_group->proc_count < cm->mpi_np)) {
        return NULL;
    }
    /* Create OMPI_Comm object and store ptr to it in group obj*/
    if (NULL == oshmem_group_all) {
        osh_group->ompi_comm = &(ompi_mpi_comm_world.comm);
    } else {
        err = ompi_comm_group(&(ompi_mpi_comm_world.comm), &parent_group);
        if (OPAL_UNLIKELY(OMPI_SUCCESS != err)) {
            return NULL;
        }
        ranks = (int*) malloc(osh_group->proc_count * sizeof(int));
        if (OPAL_UNLIKELY(NULL == ranks)) {
            return NULL;
        }
        tag = 1;

        for (i = 0; i < osh_group->proc_count; i++) {
            ompi_proc_t* ompi_proc;
            for( int j = 0; j < ompi_group_size(parent_group); j++ ) {
                ompi_proc = ompi_group_peer_lookup(parent_group, j);
                if( 0 == opal_compare_proc(ompi_proc->super.proc_name, osh_group->proc_array[i]->super.proc_name)) {
                    ranks[i] = j;
                    break;
                }
            }
        }

        err = ompi_group_incl(parent_group, osh_group->proc_count, ranks, &new_group);
        if (OPAL_UNLIKELY(OMPI_SUCCESS != err)) {
            free(ranks);
            return NULL;
        }
        err = ompi_comm_create_group(&(ompi_mpi_comm_world.comm), new_group, tag, &newcomm);
        if (OPAL_UNLIKELY(OMPI_SUCCESS != err)) {
            free(ranks);
            return NULL;
        }
        err = ompi_group_free(&new_group);
        if (OPAL_UNLIKELY(OMPI_SUCCESS != err)) {
            free(ranks);
            return NULL;
        }

        free(ranks);
        osh_group->ompi_comm = newcomm;
    }
    mpi_module = OBJ_NEW(mca_scoll_mpi_module_t);
    if (!mpi_module){
        return NULL;
    }
    mpi_module->comm = osh_group->ompi_comm;

    mpi_module->super.scoll_module_enable = mca_scoll_mpi_module_enable;
    mpi_module->super.scoll_barrier = mca_scoll_mpi_barrier;
    mpi_module->super.scoll_broadcast = mca_scoll_mpi_broadcast;
    mpi_module->super.scoll_reduce = mca_scoll_mpi_reduce;
    mpi_module->super.scoll_collect = mca_scoll_mpi_collect;
    mpi_module->super.scoll_alltoall = NULL;

    *priority = cm->mpi_priority;
    module = &mpi_module->super;

    return module;
}