Exemplo n.º 1
0
static void
na_test_mpi_init(na_bool_t server)
{
    int mpi_initialized = 0;

    MPI_Initialized(&mpi_initialized);
    if (mpi_initialized) goto done;

#ifdef NA_MPI_HAS_GNI_SETUP
    /* Setup GNI job before initializing MPI */
    if (NA_MPI_Gni_job_setup() != NA_SUCCESS) {
        fprintf(stderr, "Could not setup GNI job\n");
        return;
    }
#endif
    if (server || na_test_use_static_mpi_g) {
        int provided;

        MPI_Init_thread(NULL, NULL, MPI_THREAD_MULTIPLE, &provided);
        if (provided != MPI_THREAD_MULTIPLE) {
            fprintf(stderr, "MPI_THREAD_MULTIPLE cannot be set\n");
        }

        /* Only if we do static MPMD MPI */
        if (na_test_use_static_mpi_g) {
            int mpi_ret, color, global_rank;

            MPI_Comm_rank(MPI_COMM_WORLD, &global_rank);
            /* Color is 1 for server, 2 for client */
            color = (server) ? 1 : 2;

            /* Assume that the application did not split MPI_COMM_WORLD already */
            mpi_ret = MPI_Comm_split(MPI_COMM_WORLD, color, global_rank,
                    &na_test_comm_g);
            if (mpi_ret != MPI_SUCCESS) {
                fprintf(stderr, "Could not split communicator\n");
            }
#ifdef NA_HAS_MPI
            /* Set init comm that will be used to setup NA MPI */
            NA_MPI_Set_init_intra_comm(na_test_comm_g);
#endif
        }
    } else {
        MPI_Init(NULL, NULL);
    }
    mpi_internally_initialized = NA_TRUE;

done:
    MPI_Comm_rank(na_test_comm_g, &na_test_comm_rank_g);
    MPI_Comm_size(na_test_comm_g, &na_test_comm_size_g);
}
Exemplo n.º 2
0
static void
na_test_mpi_init(struct na_test_info *na_test_info)
{
    int mpi_initialized = 0;
    int mpi_finalized = 0;

    na_test_info->mpi_comm = MPI_COMM_WORLD; /* default */

    MPI_Initialized(&mpi_initialized);
    if (mpi_initialized) {
        NA_LOG_WARNING("MPI was already initialized");
        goto done;
    }
    MPI_Finalized(&mpi_finalized);
    if (mpi_finalized) {
        NA_LOG_ERROR("MPI was already finalized");
        goto done;
    }

#ifdef NA_MPI_HAS_GNI_SETUP
    /* Setup GNI job before initializing MPI */
    if (NA_MPI_Gni_job_setup() != NA_SUCCESS) {
        NA_LOG_ERROR("Could not setup GNI job");
        return;
    }
#endif
    if (na_test_info->listen || na_test_info->mpi_static) {
        int provided;

        MPI_Init_thread(NULL, NULL, MPI_THREAD_MULTIPLE, &provided);
        if (provided != MPI_THREAD_MULTIPLE) {
            NA_LOG_ERROR("MPI_THREAD_MULTIPLE cannot be set");
        }

        /* Only if we do static MPMD MPI */
        if (na_test_info->mpi_static) {
            int mpi_ret, color, global_rank;

            MPI_Comm_rank(MPI_COMM_WORLD, &global_rank);
            /* Color is 1 for server, 2 for client */
            color = (na_test_info->listen) ? 1 : 2;

            /* Assume that the application did not split MPI_COMM_WORLD already */
            mpi_ret = MPI_Comm_split(MPI_COMM_WORLD, color, global_rank,
                &na_test_info->mpi_comm);
            if (mpi_ret != MPI_SUCCESS) {
                NA_LOG_ERROR("Could not split communicator");
            }
#ifdef NA_HAS_MPI
            /* Set init comm that will be used to setup NA MPI */
            NA_MPI_Set_init_intra_comm(na_test_info->mpi_comm);
#endif
        }
    } else {
        MPI_Init(NULL, NULL);
    }

done:
    MPI_Comm_rank(na_test_info->mpi_comm, &na_test_info->mpi_comm_rank);
    MPI_Comm_size(na_test_info->mpi_comm, &na_test_info->mpi_comm_size);

    return;
}