コード例 #1
0
int main(int argc, char *argv[])
{
    int size, rank;
    MPI_Group world_group;
    MPI_Comm group_comm, idup_comm;
    MPI_Request req;
    MPI_Init(&argc, &argv);

    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (size % 2) {
        fprintf(stderr, "this program requires even number of processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    /* Create some groups */
    MPI_Comm_group(MPI_COMM_WORLD, &world_group);

    if (rank % 2 == 0) {
        MPI_Comm_create_group(MPI_COMM_WORLD, world_group, 0, &group_comm);
        MPI_Comm_idup(MPI_COMM_WORLD, &idup_comm, &req);
    }
    else {
        MPI_Comm_idup(MPI_COMM_WORLD, &idup_comm, &req);
        MPI_Comm_create_group(MPI_COMM_WORLD, world_group, 0, &group_comm);
    }

    MPI_Wait(&req, MPI_STATUSES_IGNORE);
    /*Test new comm with a barrier */
    MPI_Barrier(idup_comm);
    MPI_Barrier(group_comm);

    MPI_Group_free(&world_group);
    MPI_Comm_free(&idup_comm);
    MPI_Comm_free(&group_comm);
    if (rank == 0)
        printf(" No errors\n");

    MPI_Finalize();
    return 0;
}
コード例 #2
0
ファイル: too_many_icomms.c プロジェクト: Niharikareddy/mpich
int main(int argc, char **argv)
{
    int rank, nproc, mpi_errno;
    int i, ncomm;
    int errors = 1;
    MPI_Comm *comm_hdls;
    MPI_Request req;

    MPI_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);

    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
    comm_hdls = malloc(sizeof(MPI_Comm) * MAX_NCOMM);


    ncomm = 0;
    for (i = 0; i < MAX_NCOMM; i++) {
        /* Note: the comms we create are all dups of MPI_COMM_WORLD */
        MPI_Comm_idup(MPI_COMM_WORLD, &comm_hdls[i], &req);
        mpi_errno = MPI_Wait(&req,MPI_STATUSES_IGNORE);
        if (mpi_errno == MPI_SUCCESS) {
            ncomm++;
        }
        else {
            if (verbose)
                printf("%d: Error creating comm %d\n", rank, i);
            errors = 0;
            break;
        }
    }

    for (i = 0; i < ncomm; i++)
        MPI_Comm_free(&comm_hdls[i]);

    free(comm_hdls);
    MTest_Finalize(errors);
    MPI_Finalize();

    return 0;
}
コード例 #3
0
ファイル: idup_deadlock.c プロジェクト: NexMirror/MPICH
MTEST_THREAD_RETURN_TYPE test_comm_dup(void *arg)
{
    int rank;
    int i, j;
    int wait;
    int tid = *(int *) arg;
    MPI_Comm_rank(comms[*(int *) arg], &rank);
    MPI_Comm comm;
    MPI_Request req;

    if (tid % 2 == 0 && rank % 2 == 0) {
        do {
            wait = 0;
            for (i = 0; i < NUM_THREADS; i++)
                wait += start_idup[i];
        } while (wait > NUM_THREADS / 2);
    }

    MPI_Comm_idup(comms[*(int *) arg], &comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);
    start_idup[tid] = 0;
    MPI_Comm_free(&comm);
    return (MTEST_THREAD_RETURN_TYPE) 0;
}
コード例 #4
0
ファイル: comm_idup_mul.c プロジェクト: Julio-Anjos/simgrid
int main(int argc, char **argv)
{
    int i, rank;
    MPI_Comm comms[NUM_ITER];
    MPI_Request req[NUM_ITER];

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    for (i = 0; i < NUM_ITER; i++)
        MPI_Comm_idup(MPI_COMM_WORLD, &comms[i], &req[i]);

    MPI_Waitall(NUM_ITER, req, MPI_STATUSES_IGNORE);

    for (i = 0; i < NUM_ITER; i++)
        MPI_Comm_free(&comms[i]);

    if (rank == 0)
        printf(" No Errors\n");

    MPI_Finalize();

    return 0;
}
コード例 #5
0
ファイル: comm_idup_comm.c プロジェクト: ParaStation/psmpi2
int main(int argc, char **argv)
{
    int errs = 0;
    int i;
    int rank, size;
    int *excl;
    int ranges[1][3];
    int isLeft, rleader;
    MPI_Group world_group, high_group, even_group;
    MPI_Comm local_comm, inter_comm, test_comm, outcomm;
    MPI_Comm idupcomms[NUM_IDUPS];
    MPI_Request reqs[NUM_IDUPS];

    MTest_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_group(MPI_COMM_WORLD, &world_group);

    if (size < 2) {
        printf("this test requires at least 2 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    /* Idup MPI_COMM_WORLD multiple times */
    for (i = 0; i < NUM_IDUPS; i++) {
        MPI_Comm_idup(MPI_COMM_WORLD, &idupcomms[i], &reqs[i]);
    }

    /* Overlap pending idups with various comm generation functions */

    /* Comm_dup */
    MPI_Comm_dup(MPI_COMM_WORLD, &outcomm);
    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_split */
    MPI_Comm_split(MPI_COMM_WORLD, rank % 2, size - rank, &outcomm);
    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create, high half of MPI_COMM_WORLD */
    ranges[0][0] = size / 2;
    ranges[0][1] = size - 1;
    ranges[0][2] = 1;
    MPI_Group_range_incl(world_group, 1, ranges, &high_group);
    MPI_Comm_create(MPI_COMM_WORLD, high_group, &outcomm);
    MPI_Group_free(&high_group);
    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create_group, even ranks of MPI_COMM_WORLD */
    /* exclude the odd ranks */
    excl = malloc((size / 2) * sizeof(int));
    for (i = 0; i < size / 2; i++)
        excl[i] = (2 * i) + 1;

    MPI_Group_excl(world_group, size / 2, excl, &even_group);
    free(excl);

    if (rank % 2 == 0) {
        MPI_Comm_create_group(MPI_COMM_WORLD, even_group, 0, &outcomm);
    } else {
        outcomm = MPI_COMM_NULL;
    }
    MPI_Group_free(&even_group);

    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Intercomm_create & Intercomm_merge */
    MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &local_comm);

    if (rank == 0) {
        rleader = size / 2;
    } else if (rank == size / 2) {
        rleader = 0;
    } else {
        rleader = -1;
    }
    isLeft = rank < size / 2;

    MPI_Intercomm_create(local_comm, 0, MPI_COMM_WORLD, rleader, 99, &inter_comm);
    MPI_Intercomm_merge(inter_comm, isLeft, &outcomm);
    MPI_Comm_free(&local_comm);

    errs += MTestTestComm(inter_comm);
    MTestFreeComm(&inter_comm);

    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    MPI_Waitall(NUM_IDUPS, reqs, MPI_STATUSES_IGNORE);
    for (i = 0; i < NUM_IDUPS; i++) {
        errs += MTestTestComm(idupcomms[i]);
        MPI_Comm_free(&idupcomms[i]);
    }

    MPI_Group_free(&world_group);

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}
コード例 #6
0
ファイル: idup_comm_gen.c プロジェクト: Niharikareddy/mpich
MTEST_THREAD_RETURN_TYPE test_idup(void *arg)
{
    int i;
    int size, rank;
    int ranges[1][3];
    int rleader, isLeft;
    int *excl = NULL;
    int tid = *(int *) arg;

    MPI_Group ingroup, high_group, even_group;
    MPI_Comm local_comm, inter_comm;
    MPI_Comm idupcomms[NUM_IDUPS];
    MPI_Request reqs[NUM_IDUPS];

    MPI_Comm outcomm;
    MPI_Comm incomm = comms[tid];

    MPI_Comm_size(incomm, &size);
    MPI_Comm_rank(incomm, &rank);
    MPI_Comm_group(incomm, &ingroup);

    /* Idup incomm multiple times */
    for (i = 0; i < NUM_IDUPS; i++) {
        MPI_Comm_idup(incomm, &idupcomms[i], &reqs[i]);
    }

    /* Overlap pending idups with various comm generation functions */
    /* Comm_dup */
    MPI_Comm_dup(incomm, &outcomm);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_split */
    MPI_Comm_split(incomm, rank % 2, size - rank, &outcomm);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create, high half of incomm */
    ranges[0][0] = size / 2;
    ranges[0][1] = size - 1;
    ranges[0][2] = 1;
    MPI_Group_range_incl(ingroup, 1, ranges, &high_group);
    MPI_Comm_create(incomm, high_group, &outcomm);
    MPI_Group_free(&high_group);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create_group, even ranks of incomm */
    /* exclude the odd ranks */
    excl = malloc((size / 2) * sizeof(int));
    for (i = 0; i < size / 2; i++)
        excl[i] = (2 * i) + 1;

    MPI_Group_excl(ingroup, size / 2, excl, &even_group);
    free(excl);

    if (rank % 2 == 0) {
        MPI_Comm_create_group(incomm, even_group, 0, &outcomm);
    }
    else {
        outcomm = MPI_COMM_NULL;
    }
    MPI_Group_free(&even_group);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Intercomm_create & Intercomm_merge */
    MPI_Comm_split(incomm, (rank < size / 2), rank, &local_comm);
    if (rank == 0) {
        rleader = size / 2;
    }
    else if (rank == size / 2) {
        rleader = 0;
    }
    else {
        rleader = -1;
    }
    isLeft = rank < size / 2;

    MPI_Intercomm_create(local_comm, 0, incomm, rleader, 99, &inter_comm);
    MPI_Intercomm_merge(inter_comm, isLeft, &outcomm);
    MPI_Comm_free(&local_comm);

    errs[tid] += MTestTestComm(inter_comm);
    MTestFreeComm(&inter_comm);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    MPI_Waitall(NUM_IDUPS, reqs, MPI_STATUSES_IGNORE);
    for (i = 0; i < NUM_IDUPS; i++) {
        errs[tid] += MTestTestComm(idupcomms[i]);
        MPI_Comm_free(&idupcomms[i]);
    }
    MPI_Group_free(&ingroup);
    return NULL;
}